e5c69f5de625729cad58ecd99c21fdc6f34dbc3a
[deliverable/binutils-gdb.git] / bfd / elf32-arm.c
1 /* 32-bit ELF support for ARM
2 Copyright (C) 1998-2016 Free Software Foundation, Inc.
3
4 This file is part of BFD, the Binary File Descriptor library.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
20
21 #include "sysdep.h"
22 #include <limits.h>
23
24 #include "bfd.h"
25 #include "bfd_stdint.h"
26 #include "libiberty.h"
27 #include "libbfd.h"
28 #include "elf-bfd.h"
29 #include "elf-nacl.h"
30 #include "elf-vxworks.h"
31 #include "elf/arm.h"
32
33 /* Return the relocation section associated with NAME. HTAB is the
34 bfd's elf32_arm_link_hash_entry. */
35 #define RELOC_SECTION(HTAB, NAME) \
36 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
37
38 /* Return size of a relocation entry. HTAB is the bfd's
39 elf32_arm_link_hash_entry. */
40 #define RELOC_SIZE(HTAB) \
41 ((HTAB)->use_rel \
42 ? sizeof (Elf32_External_Rel) \
43 : sizeof (Elf32_External_Rela))
44
45 /* Return function to swap relocations in. HTAB is the bfd's
46 elf32_arm_link_hash_entry. */
47 #define SWAP_RELOC_IN(HTAB) \
48 ((HTAB)->use_rel \
49 ? bfd_elf32_swap_reloc_in \
50 : bfd_elf32_swap_reloca_in)
51
52 /* Return function to swap relocations out. HTAB is the bfd's
53 elf32_arm_link_hash_entry. */
54 #define SWAP_RELOC_OUT(HTAB) \
55 ((HTAB)->use_rel \
56 ? bfd_elf32_swap_reloc_out \
57 : bfd_elf32_swap_reloca_out)
58
59 #define elf_info_to_howto 0
60 #define elf_info_to_howto_rel elf32_arm_info_to_howto
61
62 #define ARM_ELF_ABI_VERSION 0
63 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
64
65 /* The Adjusted Place, as defined by AAELF. */
66 #define Pa(X) ((X) & 0xfffffffc)
67
68 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
69 struct bfd_link_info *link_info,
70 asection *sec,
71 bfd_byte *contents);
72
73 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
74 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
75 in that slot. */
76
77 static reloc_howto_type elf32_arm_howto_table_1[] =
78 {
79 /* No relocation. */
80 HOWTO (R_ARM_NONE, /* type */
81 0, /* rightshift */
82 3, /* size (0 = byte, 1 = short, 2 = long) */
83 0, /* bitsize */
84 FALSE, /* pc_relative */
85 0, /* bitpos */
86 complain_overflow_dont,/* complain_on_overflow */
87 bfd_elf_generic_reloc, /* special_function */
88 "R_ARM_NONE", /* name */
89 FALSE, /* partial_inplace */
90 0, /* src_mask */
91 0, /* dst_mask */
92 FALSE), /* pcrel_offset */
93
94 HOWTO (R_ARM_PC24, /* type */
95 2, /* rightshift */
96 2, /* size (0 = byte, 1 = short, 2 = long) */
97 24, /* bitsize */
98 TRUE, /* pc_relative */
99 0, /* bitpos */
100 complain_overflow_signed,/* complain_on_overflow */
101 bfd_elf_generic_reloc, /* special_function */
102 "R_ARM_PC24", /* name */
103 FALSE, /* partial_inplace */
104 0x00ffffff, /* src_mask */
105 0x00ffffff, /* dst_mask */
106 TRUE), /* pcrel_offset */
107
108 /* 32 bit absolute */
109 HOWTO (R_ARM_ABS32, /* type */
110 0, /* rightshift */
111 2, /* size (0 = byte, 1 = short, 2 = long) */
112 32, /* bitsize */
113 FALSE, /* pc_relative */
114 0, /* bitpos */
115 complain_overflow_bitfield,/* complain_on_overflow */
116 bfd_elf_generic_reloc, /* special_function */
117 "R_ARM_ABS32", /* name */
118 FALSE, /* partial_inplace */
119 0xffffffff, /* src_mask */
120 0xffffffff, /* dst_mask */
121 FALSE), /* pcrel_offset */
122
123 /* standard 32bit pc-relative reloc */
124 HOWTO (R_ARM_REL32, /* type */
125 0, /* rightshift */
126 2, /* size (0 = byte, 1 = short, 2 = long) */
127 32, /* bitsize */
128 TRUE, /* pc_relative */
129 0, /* bitpos */
130 complain_overflow_bitfield,/* complain_on_overflow */
131 bfd_elf_generic_reloc, /* special_function */
132 "R_ARM_REL32", /* name */
133 FALSE, /* partial_inplace */
134 0xffffffff, /* src_mask */
135 0xffffffff, /* dst_mask */
136 TRUE), /* pcrel_offset */
137
138 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
139 HOWTO (R_ARM_LDR_PC_G0, /* type */
140 0, /* rightshift */
141 0, /* size (0 = byte, 1 = short, 2 = long) */
142 32, /* bitsize */
143 TRUE, /* pc_relative */
144 0, /* bitpos */
145 complain_overflow_dont,/* complain_on_overflow */
146 bfd_elf_generic_reloc, /* special_function */
147 "R_ARM_LDR_PC_G0", /* name */
148 FALSE, /* partial_inplace */
149 0xffffffff, /* src_mask */
150 0xffffffff, /* dst_mask */
151 TRUE), /* pcrel_offset */
152
153 /* 16 bit absolute */
154 HOWTO (R_ARM_ABS16, /* type */
155 0, /* rightshift */
156 1, /* size (0 = byte, 1 = short, 2 = long) */
157 16, /* bitsize */
158 FALSE, /* pc_relative */
159 0, /* bitpos */
160 complain_overflow_bitfield,/* complain_on_overflow */
161 bfd_elf_generic_reloc, /* special_function */
162 "R_ARM_ABS16", /* name */
163 FALSE, /* partial_inplace */
164 0x0000ffff, /* src_mask */
165 0x0000ffff, /* dst_mask */
166 FALSE), /* pcrel_offset */
167
168 /* 12 bit absolute */
169 HOWTO (R_ARM_ABS12, /* type */
170 0, /* rightshift */
171 2, /* size (0 = byte, 1 = short, 2 = long) */
172 12, /* bitsize */
173 FALSE, /* pc_relative */
174 0, /* bitpos */
175 complain_overflow_bitfield,/* complain_on_overflow */
176 bfd_elf_generic_reloc, /* special_function */
177 "R_ARM_ABS12", /* name */
178 FALSE, /* partial_inplace */
179 0x00000fff, /* src_mask */
180 0x00000fff, /* dst_mask */
181 FALSE), /* pcrel_offset */
182
183 HOWTO (R_ARM_THM_ABS5, /* type */
184 6, /* rightshift */
185 1, /* size (0 = byte, 1 = short, 2 = long) */
186 5, /* bitsize */
187 FALSE, /* pc_relative */
188 0, /* bitpos */
189 complain_overflow_bitfield,/* complain_on_overflow */
190 bfd_elf_generic_reloc, /* special_function */
191 "R_ARM_THM_ABS5", /* name */
192 FALSE, /* partial_inplace */
193 0x000007e0, /* src_mask */
194 0x000007e0, /* dst_mask */
195 FALSE), /* pcrel_offset */
196
197 /* 8 bit absolute */
198 HOWTO (R_ARM_ABS8, /* type */
199 0, /* rightshift */
200 0, /* size (0 = byte, 1 = short, 2 = long) */
201 8, /* bitsize */
202 FALSE, /* pc_relative */
203 0, /* bitpos */
204 complain_overflow_bitfield,/* complain_on_overflow */
205 bfd_elf_generic_reloc, /* special_function */
206 "R_ARM_ABS8", /* name */
207 FALSE, /* partial_inplace */
208 0x000000ff, /* src_mask */
209 0x000000ff, /* dst_mask */
210 FALSE), /* pcrel_offset */
211
212 HOWTO (R_ARM_SBREL32, /* type */
213 0, /* rightshift */
214 2, /* size (0 = byte, 1 = short, 2 = long) */
215 32, /* bitsize */
216 FALSE, /* pc_relative */
217 0, /* bitpos */
218 complain_overflow_dont,/* complain_on_overflow */
219 bfd_elf_generic_reloc, /* special_function */
220 "R_ARM_SBREL32", /* name */
221 FALSE, /* partial_inplace */
222 0xffffffff, /* src_mask */
223 0xffffffff, /* dst_mask */
224 FALSE), /* pcrel_offset */
225
226 HOWTO (R_ARM_THM_CALL, /* type */
227 1, /* rightshift */
228 2, /* size (0 = byte, 1 = short, 2 = long) */
229 24, /* bitsize */
230 TRUE, /* pc_relative */
231 0, /* bitpos */
232 complain_overflow_signed,/* complain_on_overflow */
233 bfd_elf_generic_reloc, /* special_function */
234 "R_ARM_THM_CALL", /* name */
235 FALSE, /* partial_inplace */
236 0x07ff2fff, /* src_mask */
237 0x07ff2fff, /* dst_mask */
238 TRUE), /* pcrel_offset */
239
240 HOWTO (R_ARM_THM_PC8, /* type */
241 1, /* rightshift */
242 1, /* size (0 = byte, 1 = short, 2 = long) */
243 8, /* bitsize */
244 TRUE, /* pc_relative */
245 0, /* bitpos */
246 complain_overflow_signed,/* complain_on_overflow */
247 bfd_elf_generic_reloc, /* special_function */
248 "R_ARM_THM_PC8", /* name */
249 FALSE, /* partial_inplace */
250 0x000000ff, /* src_mask */
251 0x000000ff, /* dst_mask */
252 TRUE), /* pcrel_offset */
253
254 HOWTO (R_ARM_BREL_ADJ, /* type */
255 1, /* rightshift */
256 1, /* size (0 = byte, 1 = short, 2 = long) */
257 32, /* bitsize */
258 FALSE, /* pc_relative */
259 0, /* bitpos */
260 complain_overflow_signed,/* complain_on_overflow */
261 bfd_elf_generic_reloc, /* special_function */
262 "R_ARM_BREL_ADJ", /* name */
263 FALSE, /* partial_inplace */
264 0xffffffff, /* src_mask */
265 0xffffffff, /* dst_mask */
266 FALSE), /* pcrel_offset */
267
268 HOWTO (R_ARM_TLS_DESC, /* type */
269 0, /* rightshift */
270 2, /* size (0 = byte, 1 = short, 2 = long) */
271 32, /* bitsize */
272 FALSE, /* pc_relative */
273 0, /* bitpos */
274 complain_overflow_bitfield,/* complain_on_overflow */
275 bfd_elf_generic_reloc, /* special_function */
276 "R_ARM_TLS_DESC", /* name */
277 FALSE, /* partial_inplace */
278 0xffffffff, /* src_mask */
279 0xffffffff, /* dst_mask */
280 FALSE), /* pcrel_offset */
281
282 HOWTO (R_ARM_THM_SWI8, /* type */
283 0, /* rightshift */
284 0, /* size (0 = byte, 1 = short, 2 = long) */
285 0, /* bitsize */
286 FALSE, /* pc_relative */
287 0, /* bitpos */
288 complain_overflow_signed,/* complain_on_overflow */
289 bfd_elf_generic_reloc, /* special_function */
290 "R_ARM_SWI8", /* name */
291 FALSE, /* partial_inplace */
292 0x00000000, /* src_mask */
293 0x00000000, /* dst_mask */
294 FALSE), /* pcrel_offset */
295
296 /* BLX instruction for the ARM. */
297 HOWTO (R_ARM_XPC25, /* type */
298 2, /* rightshift */
299 2, /* size (0 = byte, 1 = short, 2 = long) */
300 24, /* bitsize */
301 TRUE, /* pc_relative */
302 0, /* bitpos */
303 complain_overflow_signed,/* complain_on_overflow */
304 bfd_elf_generic_reloc, /* special_function */
305 "R_ARM_XPC25", /* name */
306 FALSE, /* partial_inplace */
307 0x00ffffff, /* src_mask */
308 0x00ffffff, /* dst_mask */
309 TRUE), /* pcrel_offset */
310
311 /* BLX instruction for the Thumb. */
312 HOWTO (R_ARM_THM_XPC22, /* type */
313 2, /* rightshift */
314 2, /* size (0 = byte, 1 = short, 2 = long) */
315 24, /* bitsize */
316 TRUE, /* pc_relative */
317 0, /* bitpos */
318 complain_overflow_signed,/* complain_on_overflow */
319 bfd_elf_generic_reloc, /* special_function */
320 "R_ARM_THM_XPC22", /* name */
321 FALSE, /* partial_inplace */
322 0x07ff2fff, /* src_mask */
323 0x07ff2fff, /* dst_mask */
324 TRUE), /* pcrel_offset */
325
326 /* Dynamic TLS relocations. */
327
328 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
329 0, /* rightshift */
330 2, /* size (0 = byte, 1 = short, 2 = long) */
331 32, /* bitsize */
332 FALSE, /* pc_relative */
333 0, /* bitpos */
334 complain_overflow_bitfield,/* complain_on_overflow */
335 bfd_elf_generic_reloc, /* special_function */
336 "R_ARM_TLS_DTPMOD32", /* name */
337 TRUE, /* partial_inplace */
338 0xffffffff, /* src_mask */
339 0xffffffff, /* dst_mask */
340 FALSE), /* pcrel_offset */
341
342 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
343 0, /* rightshift */
344 2, /* size (0 = byte, 1 = short, 2 = long) */
345 32, /* bitsize */
346 FALSE, /* pc_relative */
347 0, /* bitpos */
348 complain_overflow_bitfield,/* complain_on_overflow */
349 bfd_elf_generic_reloc, /* special_function */
350 "R_ARM_TLS_DTPOFF32", /* name */
351 TRUE, /* partial_inplace */
352 0xffffffff, /* src_mask */
353 0xffffffff, /* dst_mask */
354 FALSE), /* pcrel_offset */
355
356 HOWTO (R_ARM_TLS_TPOFF32, /* type */
357 0, /* rightshift */
358 2, /* size (0 = byte, 1 = short, 2 = long) */
359 32, /* bitsize */
360 FALSE, /* pc_relative */
361 0, /* bitpos */
362 complain_overflow_bitfield,/* complain_on_overflow */
363 bfd_elf_generic_reloc, /* special_function */
364 "R_ARM_TLS_TPOFF32", /* name */
365 TRUE, /* partial_inplace */
366 0xffffffff, /* src_mask */
367 0xffffffff, /* dst_mask */
368 FALSE), /* pcrel_offset */
369
370 /* Relocs used in ARM Linux */
371
372 HOWTO (R_ARM_COPY, /* type */
373 0, /* rightshift */
374 2, /* size (0 = byte, 1 = short, 2 = long) */
375 32, /* bitsize */
376 FALSE, /* pc_relative */
377 0, /* bitpos */
378 complain_overflow_bitfield,/* complain_on_overflow */
379 bfd_elf_generic_reloc, /* special_function */
380 "R_ARM_COPY", /* name */
381 TRUE, /* partial_inplace */
382 0xffffffff, /* src_mask */
383 0xffffffff, /* dst_mask */
384 FALSE), /* pcrel_offset */
385
386 HOWTO (R_ARM_GLOB_DAT, /* type */
387 0, /* rightshift */
388 2, /* size (0 = byte, 1 = short, 2 = long) */
389 32, /* bitsize */
390 FALSE, /* pc_relative */
391 0, /* bitpos */
392 complain_overflow_bitfield,/* complain_on_overflow */
393 bfd_elf_generic_reloc, /* special_function */
394 "R_ARM_GLOB_DAT", /* name */
395 TRUE, /* partial_inplace */
396 0xffffffff, /* src_mask */
397 0xffffffff, /* dst_mask */
398 FALSE), /* pcrel_offset */
399
400 HOWTO (R_ARM_JUMP_SLOT, /* type */
401 0, /* rightshift */
402 2, /* size (0 = byte, 1 = short, 2 = long) */
403 32, /* bitsize */
404 FALSE, /* pc_relative */
405 0, /* bitpos */
406 complain_overflow_bitfield,/* complain_on_overflow */
407 bfd_elf_generic_reloc, /* special_function */
408 "R_ARM_JUMP_SLOT", /* name */
409 TRUE, /* partial_inplace */
410 0xffffffff, /* src_mask */
411 0xffffffff, /* dst_mask */
412 FALSE), /* pcrel_offset */
413
414 HOWTO (R_ARM_RELATIVE, /* type */
415 0, /* rightshift */
416 2, /* size (0 = byte, 1 = short, 2 = long) */
417 32, /* bitsize */
418 FALSE, /* pc_relative */
419 0, /* bitpos */
420 complain_overflow_bitfield,/* complain_on_overflow */
421 bfd_elf_generic_reloc, /* special_function */
422 "R_ARM_RELATIVE", /* name */
423 TRUE, /* partial_inplace */
424 0xffffffff, /* src_mask */
425 0xffffffff, /* dst_mask */
426 FALSE), /* pcrel_offset */
427
428 HOWTO (R_ARM_GOTOFF32, /* type */
429 0, /* rightshift */
430 2, /* size (0 = byte, 1 = short, 2 = long) */
431 32, /* bitsize */
432 FALSE, /* pc_relative */
433 0, /* bitpos */
434 complain_overflow_bitfield,/* complain_on_overflow */
435 bfd_elf_generic_reloc, /* special_function */
436 "R_ARM_GOTOFF32", /* name */
437 TRUE, /* partial_inplace */
438 0xffffffff, /* src_mask */
439 0xffffffff, /* dst_mask */
440 FALSE), /* pcrel_offset */
441
442 HOWTO (R_ARM_GOTPC, /* type */
443 0, /* rightshift */
444 2, /* size (0 = byte, 1 = short, 2 = long) */
445 32, /* bitsize */
446 TRUE, /* pc_relative */
447 0, /* bitpos */
448 complain_overflow_bitfield,/* complain_on_overflow */
449 bfd_elf_generic_reloc, /* special_function */
450 "R_ARM_GOTPC", /* name */
451 TRUE, /* partial_inplace */
452 0xffffffff, /* src_mask */
453 0xffffffff, /* dst_mask */
454 TRUE), /* pcrel_offset */
455
456 HOWTO (R_ARM_GOT32, /* type */
457 0, /* rightshift */
458 2, /* size (0 = byte, 1 = short, 2 = long) */
459 32, /* bitsize */
460 FALSE, /* pc_relative */
461 0, /* bitpos */
462 complain_overflow_bitfield,/* complain_on_overflow */
463 bfd_elf_generic_reloc, /* special_function */
464 "R_ARM_GOT32", /* name */
465 TRUE, /* partial_inplace */
466 0xffffffff, /* src_mask */
467 0xffffffff, /* dst_mask */
468 FALSE), /* pcrel_offset */
469
470 HOWTO (R_ARM_PLT32, /* type */
471 2, /* rightshift */
472 2, /* size (0 = byte, 1 = short, 2 = long) */
473 24, /* bitsize */
474 TRUE, /* pc_relative */
475 0, /* bitpos */
476 complain_overflow_bitfield,/* complain_on_overflow */
477 bfd_elf_generic_reloc, /* special_function */
478 "R_ARM_PLT32", /* name */
479 FALSE, /* partial_inplace */
480 0x00ffffff, /* src_mask */
481 0x00ffffff, /* dst_mask */
482 TRUE), /* pcrel_offset */
483
484 HOWTO (R_ARM_CALL, /* type */
485 2, /* rightshift */
486 2, /* size (0 = byte, 1 = short, 2 = long) */
487 24, /* bitsize */
488 TRUE, /* pc_relative */
489 0, /* bitpos */
490 complain_overflow_signed,/* complain_on_overflow */
491 bfd_elf_generic_reloc, /* special_function */
492 "R_ARM_CALL", /* name */
493 FALSE, /* partial_inplace */
494 0x00ffffff, /* src_mask */
495 0x00ffffff, /* dst_mask */
496 TRUE), /* pcrel_offset */
497
498 HOWTO (R_ARM_JUMP24, /* type */
499 2, /* rightshift */
500 2, /* size (0 = byte, 1 = short, 2 = long) */
501 24, /* bitsize */
502 TRUE, /* pc_relative */
503 0, /* bitpos */
504 complain_overflow_signed,/* complain_on_overflow */
505 bfd_elf_generic_reloc, /* special_function */
506 "R_ARM_JUMP24", /* name */
507 FALSE, /* partial_inplace */
508 0x00ffffff, /* src_mask */
509 0x00ffffff, /* dst_mask */
510 TRUE), /* pcrel_offset */
511
512 HOWTO (R_ARM_THM_JUMP24, /* type */
513 1, /* rightshift */
514 2, /* size (0 = byte, 1 = short, 2 = long) */
515 24, /* bitsize */
516 TRUE, /* pc_relative */
517 0, /* bitpos */
518 complain_overflow_signed,/* complain_on_overflow */
519 bfd_elf_generic_reloc, /* special_function */
520 "R_ARM_THM_JUMP24", /* name */
521 FALSE, /* partial_inplace */
522 0x07ff2fff, /* src_mask */
523 0x07ff2fff, /* dst_mask */
524 TRUE), /* pcrel_offset */
525
526 HOWTO (R_ARM_BASE_ABS, /* type */
527 0, /* rightshift */
528 2, /* size (0 = byte, 1 = short, 2 = long) */
529 32, /* bitsize */
530 FALSE, /* pc_relative */
531 0, /* bitpos */
532 complain_overflow_dont,/* complain_on_overflow */
533 bfd_elf_generic_reloc, /* special_function */
534 "R_ARM_BASE_ABS", /* name */
535 FALSE, /* partial_inplace */
536 0xffffffff, /* src_mask */
537 0xffffffff, /* dst_mask */
538 FALSE), /* pcrel_offset */
539
540 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
541 0, /* rightshift */
542 2, /* size (0 = byte, 1 = short, 2 = long) */
543 12, /* bitsize */
544 TRUE, /* pc_relative */
545 0, /* bitpos */
546 complain_overflow_dont,/* complain_on_overflow */
547 bfd_elf_generic_reloc, /* special_function */
548 "R_ARM_ALU_PCREL_7_0", /* name */
549 FALSE, /* partial_inplace */
550 0x00000fff, /* src_mask */
551 0x00000fff, /* dst_mask */
552 TRUE), /* pcrel_offset */
553
554 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
555 0, /* rightshift */
556 2, /* size (0 = byte, 1 = short, 2 = long) */
557 12, /* bitsize */
558 TRUE, /* pc_relative */
559 8, /* bitpos */
560 complain_overflow_dont,/* complain_on_overflow */
561 bfd_elf_generic_reloc, /* special_function */
562 "R_ARM_ALU_PCREL_15_8",/* name */
563 FALSE, /* partial_inplace */
564 0x00000fff, /* src_mask */
565 0x00000fff, /* dst_mask */
566 TRUE), /* pcrel_offset */
567
568 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
569 0, /* rightshift */
570 2, /* size (0 = byte, 1 = short, 2 = long) */
571 12, /* bitsize */
572 TRUE, /* pc_relative */
573 16, /* bitpos */
574 complain_overflow_dont,/* complain_on_overflow */
575 bfd_elf_generic_reloc, /* special_function */
576 "R_ARM_ALU_PCREL_23_15",/* name */
577 FALSE, /* partial_inplace */
578 0x00000fff, /* src_mask */
579 0x00000fff, /* dst_mask */
580 TRUE), /* pcrel_offset */
581
582 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
583 0, /* rightshift */
584 2, /* size (0 = byte, 1 = short, 2 = long) */
585 12, /* bitsize */
586 FALSE, /* pc_relative */
587 0, /* bitpos */
588 complain_overflow_dont,/* complain_on_overflow */
589 bfd_elf_generic_reloc, /* special_function */
590 "R_ARM_LDR_SBREL_11_0",/* name */
591 FALSE, /* partial_inplace */
592 0x00000fff, /* src_mask */
593 0x00000fff, /* dst_mask */
594 FALSE), /* pcrel_offset */
595
596 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
597 0, /* rightshift */
598 2, /* size (0 = byte, 1 = short, 2 = long) */
599 8, /* bitsize */
600 FALSE, /* pc_relative */
601 12, /* bitpos */
602 complain_overflow_dont,/* complain_on_overflow */
603 bfd_elf_generic_reloc, /* special_function */
604 "R_ARM_ALU_SBREL_19_12",/* name */
605 FALSE, /* partial_inplace */
606 0x000ff000, /* src_mask */
607 0x000ff000, /* dst_mask */
608 FALSE), /* pcrel_offset */
609
610 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
611 0, /* rightshift */
612 2, /* size (0 = byte, 1 = short, 2 = long) */
613 8, /* bitsize */
614 FALSE, /* pc_relative */
615 20, /* bitpos */
616 complain_overflow_dont,/* complain_on_overflow */
617 bfd_elf_generic_reloc, /* special_function */
618 "R_ARM_ALU_SBREL_27_20",/* name */
619 FALSE, /* partial_inplace */
620 0x0ff00000, /* src_mask */
621 0x0ff00000, /* dst_mask */
622 FALSE), /* pcrel_offset */
623
624 HOWTO (R_ARM_TARGET1, /* type */
625 0, /* rightshift */
626 2, /* size (0 = byte, 1 = short, 2 = long) */
627 32, /* bitsize */
628 FALSE, /* pc_relative */
629 0, /* bitpos */
630 complain_overflow_dont,/* complain_on_overflow */
631 bfd_elf_generic_reloc, /* special_function */
632 "R_ARM_TARGET1", /* name */
633 FALSE, /* partial_inplace */
634 0xffffffff, /* src_mask */
635 0xffffffff, /* dst_mask */
636 FALSE), /* pcrel_offset */
637
638 HOWTO (R_ARM_ROSEGREL32, /* type */
639 0, /* rightshift */
640 2, /* size (0 = byte, 1 = short, 2 = long) */
641 32, /* bitsize */
642 FALSE, /* pc_relative */
643 0, /* bitpos */
644 complain_overflow_dont,/* complain_on_overflow */
645 bfd_elf_generic_reloc, /* special_function */
646 "R_ARM_ROSEGREL32", /* name */
647 FALSE, /* partial_inplace */
648 0xffffffff, /* src_mask */
649 0xffffffff, /* dst_mask */
650 FALSE), /* pcrel_offset */
651
652 HOWTO (R_ARM_V4BX, /* type */
653 0, /* rightshift */
654 2, /* size (0 = byte, 1 = short, 2 = long) */
655 32, /* bitsize */
656 FALSE, /* pc_relative */
657 0, /* bitpos */
658 complain_overflow_dont,/* complain_on_overflow */
659 bfd_elf_generic_reloc, /* special_function */
660 "R_ARM_V4BX", /* name */
661 FALSE, /* partial_inplace */
662 0xffffffff, /* src_mask */
663 0xffffffff, /* dst_mask */
664 FALSE), /* pcrel_offset */
665
666 HOWTO (R_ARM_TARGET2, /* type */
667 0, /* rightshift */
668 2, /* size (0 = byte, 1 = short, 2 = long) */
669 32, /* bitsize */
670 FALSE, /* pc_relative */
671 0, /* bitpos */
672 complain_overflow_signed,/* complain_on_overflow */
673 bfd_elf_generic_reloc, /* special_function */
674 "R_ARM_TARGET2", /* name */
675 FALSE, /* partial_inplace */
676 0xffffffff, /* src_mask */
677 0xffffffff, /* dst_mask */
678 TRUE), /* pcrel_offset */
679
680 HOWTO (R_ARM_PREL31, /* type */
681 0, /* rightshift */
682 2, /* size (0 = byte, 1 = short, 2 = long) */
683 31, /* bitsize */
684 TRUE, /* pc_relative */
685 0, /* bitpos */
686 complain_overflow_signed,/* complain_on_overflow */
687 bfd_elf_generic_reloc, /* special_function */
688 "R_ARM_PREL31", /* name */
689 FALSE, /* partial_inplace */
690 0x7fffffff, /* src_mask */
691 0x7fffffff, /* dst_mask */
692 TRUE), /* pcrel_offset */
693
694 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
695 0, /* rightshift */
696 2, /* size (0 = byte, 1 = short, 2 = long) */
697 16, /* bitsize */
698 FALSE, /* pc_relative */
699 0, /* bitpos */
700 complain_overflow_dont,/* complain_on_overflow */
701 bfd_elf_generic_reloc, /* special_function */
702 "R_ARM_MOVW_ABS_NC", /* name */
703 FALSE, /* partial_inplace */
704 0x000f0fff, /* src_mask */
705 0x000f0fff, /* dst_mask */
706 FALSE), /* pcrel_offset */
707
708 HOWTO (R_ARM_MOVT_ABS, /* type */
709 0, /* rightshift */
710 2, /* size (0 = byte, 1 = short, 2 = long) */
711 16, /* bitsize */
712 FALSE, /* pc_relative */
713 0, /* bitpos */
714 complain_overflow_bitfield,/* complain_on_overflow */
715 bfd_elf_generic_reloc, /* special_function */
716 "R_ARM_MOVT_ABS", /* name */
717 FALSE, /* partial_inplace */
718 0x000f0fff, /* src_mask */
719 0x000f0fff, /* dst_mask */
720 FALSE), /* pcrel_offset */
721
722 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
723 0, /* rightshift */
724 2, /* size (0 = byte, 1 = short, 2 = long) */
725 16, /* bitsize */
726 TRUE, /* pc_relative */
727 0, /* bitpos */
728 complain_overflow_dont,/* complain_on_overflow */
729 bfd_elf_generic_reloc, /* special_function */
730 "R_ARM_MOVW_PREL_NC", /* name */
731 FALSE, /* partial_inplace */
732 0x000f0fff, /* src_mask */
733 0x000f0fff, /* dst_mask */
734 TRUE), /* pcrel_offset */
735
736 HOWTO (R_ARM_MOVT_PREL, /* type */
737 0, /* rightshift */
738 2, /* size (0 = byte, 1 = short, 2 = long) */
739 16, /* bitsize */
740 TRUE, /* pc_relative */
741 0, /* bitpos */
742 complain_overflow_bitfield,/* complain_on_overflow */
743 bfd_elf_generic_reloc, /* special_function */
744 "R_ARM_MOVT_PREL", /* name */
745 FALSE, /* partial_inplace */
746 0x000f0fff, /* src_mask */
747 0x000f0fff, /* dst_mask */
748 TRUE), /* pcrel_offset */
749
750 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
751 0, /* rightshift */
752 2, /* size (0 = byte, 1 = short, 2 = long) */
753 16, /* bitsize */
754 FALSE, /* pc_relative */
755 0, /* bitpos */
756 complain_overflow_dont,/* complain_on_overflow */
757 bfd_elf_generic_reloc, /* special_function */
758 "R_ARM_THM_MOVW_ABS_NC",/* name */
759 FALSE, /* partial_inplace */
760 0x040f70ff, /* src_mask */
761 0x040f70ff, /* dst_mask */
762 FALSE), /* pcrel_offset */
763
764 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
765 0, /* rightshift */
766 2, /* size (0 = byte, 1 = short, 2 = long) */
767 16, /* bitsize */
768 FALSE, /* pc_relative */
769 0, /* bitpos */
770 complain_overflow_bitfield,/* complain_on_overflow */
771 bfd_elf_generic_reloc, /* special_function */
772 "R_ARM_THM_MOVT_ABS", /* name */
773 FALSE, /* partial_inplace */
774 0x040f70ff, /* src_mask */
775 0x040f70ff, /* dst_mask */
776 FALSE), /* pcrel_offset */
777
778 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
779 0, /* rightshift */
780 2, /* size (0 = byte, 1 = short, 2 = long) */
781 16, /* bitsize */
782 TRUE, /* pc_relative */
783 0, /* bitpos */
784 complain_overflow_dont,/* complain_on_overflow */
785 bfd_elf_generic_reloc, /* special_function */
786 "R_ARM_THM_MOVW_PREL_NC",/* name */
787 FALSE, /* partial_inplace */
788 0x040f70ff, /* src_mask */
789 0x040f70ff, /* dst_mask */
790 TRUE), /* pcrel_offset */
791
792 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
793 0, /* rightshift */
794 2, /* size (0 = byte, 1 = short, 2 = long) */
795 16, /* bitsize */
796 TRUE, /* pc_relative */
797 0, /* bitpos */
798 complain_overflow_bitfield,/* complain_on_overflow */
799 bfd_elf_generic_reloc, /* special_function */
800 "R_ARM_THM_MOVT_PREL", /* name */
801 FALSE, /* partial_inplace */
802 0x040f70ff, /* src_mask */
803 0x040f70ff, /* dst_mask */
804 TRUE), /* pcrel_offset */
805
806 HOWTO (R_ARM_THM_JUMP19, /* type */
807 1, /* rightshift */
808 2, /* size (0 = byte, 1 = short, 2 = long) */
809 19, /* bitsize */
810 TRUE, /* pc_relative */
811 0, /* bitpos */
812 complain_overflow_signed,/* complain_on_overflow */
813 bfd_elf_generic_reloc, /* special_function */
814 "R_ARM_THM_JUMP19", /* name */
815 FALSE, /* partial_inplace */
816 0x043f2fff, /* src_mask */
817 0x043f2fff, /* dst_mask */
818 TRUE), /* pcrel_offset */
819
820 HOWTO (R_ARM_THM_JUMP6, /* type */
821 1, /* rightshift */
822 1, /* size (0 = byte, 1 = short, 2 = long) */
823 6, /* bitsize */
824 TRUE, /* pc_relative */
825 0, /* bitpos */
826 complain_overflow_unsigned,/* complain_on_overflow */
827 bfd_elf_generic_reloc, /* special_function */
828 "R_ARM_THM_JUMP6", /* name */
829 FALSE, /* partial_inplace */
830 0x02f8, /* src_mask */
831 0x02f8, /* dst_mask */
832 TRUE), /* pcrel_offset */
833
834 /* These are declared as 13-bit signed relocations because we can
835 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
836 versa. */
837 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
838 0, /* rightshift */
839 2, /* size (0 = byte, 1 = short, 2 = long) */
840 13, /* bitsize */
841 TRUE, /* pc_relative */
842 0, /* bitpos */
843 complain_overflow_dont,/* complain_on_overflow */
844 bfd_elf_generic_reloc, /* special_function */
845 "R_ARM_THM_ALU_PREL_11_0",/* name */
846 FALSE, /* partial_inplace */
847 0xffffffff, /* src_mask */
848 0xffffffff, /* dst_mask */
849 TRUE), /* pcrel_offset */
850
851 HOWTO (R_ARM_THM_PC12, /* type */
852 0, /* rightshift */
853 2, /* size (0 = byte, 1 = short, 2 = long) */
854 13, /* bitsize */
855 TRUE, /* pc_relative */
856 0, /* bitpos */
857 complain_overflow_dont,/* complain_on_overflow */
858 bfd_elf_generic_reloc, /* special_function */
859 "R_ARM_THM_PC12", /* name */
860 FALSE, /* partial_inplace */
861 0xffffffff, /* src_mask */
862 0xffffffff, /* dst_mask */
863 TRUE), /* pcrel_offset */
864
865 HOWTO (R_ARM_ABS32_NOI, /* type */
866 0, /* rightshift */
867 2, /* size (0 = byte, 1 = short, 2 = long) */
868 32, /* bitsize */
869 FALSE, /* pc_relative */
870 0, /* bitpos */
871 complain_overflow_dont,/* complain_on_overflow */
872 bfd_elf_generic_reloc, /* special_function */
873 "R_ARM_ABS32_NOI", /* name */
874 FALSE, /* partial_inplace */
875 0xffffffff, /* src_mask */
876 0xffffffff, /* dst_mask */
877 FALSE), /* pcrel_offset */
878
879 HOWTO (R_ARM_REL32_NOI, /* type */
880 0, /* rightshift */
881 2, /* size (0 = byte, 1 = short, 2 = long) */
882 32, /* bitsize */
883 TRUE, /* pc_relative */
884 0, /* bitpos */
885 complain_overflow_dont,/* complain_on_overflow */
886 bfd_elf_generic_reloc, /* special_function */
887 "R_ARM_REL32_NOI", /* name */
888 FALSE, /* partial_inplace */
889 0xffffffff, /* src_mask */
890 0xffffffff, /* dst_mask */
891 FALSE), /* pcrel_offset */
892
893 /* Group relocations. */
894
895 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
896 0, /* rightshift */
897 2, /* size (0 = byte, 1 = short, 2 = long) */
898 32, /* bitsize */
899 TRUE, /* pc_relative */
900 0, /* bitpos */
901 complain_overflow_dont,/* complain_on_overflow */
902 bfd_elf_generic_reloc, /* special_function */
903 "R_ARM_ALU_PC_G0_NC", /* name */
904 FALSE, /* partial_inplace */
905 0xffffffff, /* src_mask */
906 0xffffffff, /* dst_mask */
907 TRUE), /* pcrel_offset */
908
909 HOWTO (R_ARM_ALU_PC_G0, /* type */
910 0, /* rightshift */
911 2, /* size (0 = byte, 1 = short, 2 = long) */
912 32, /* bitsize */
913 TRUE, /* pc_relative */
914 0, /* bitpos */
915 complain_overflow_dont,/* complain_on_overflow */
916 bfd_elf_generic_reloc, /* special_function */
917 "R_ARM_ALU_PC_G0", /* name */
918 FALSE, /* partial_inplace */
919 0xffffffff, /* src_mask */
920 0xffffffff, /* dst_mask */
921 TRUE), /* pcrel_offset */
922
923 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
924 0, /* rightshift */
925 2, /* size (0 = byte, 1 = short, 2 = long) */
926 32, /* bitsize */
927 TRUE, /* pc_relative */
928 0, /* bitpos */
929 complain_overflow_dont,/* complain_on_overflow */
930 bfd_elf_generic_reloc, /* special_function */
931 "R_ARM_ALU_PC_G1_NC", /* name */
932 FALSE, /* partial_inplace */
933 0xffffffff, /* src_mask */
934 0xffffffff, /* dst_mask */
935 TRUE), /* pcrel_offset */
936
937 HOWTO (R_ARM_ALU_PC_G1, /* type */
938 0, /* rightshift */
939 2, /* size (0 = byte, 1 = short, 2 = long) */
940 32, /* bitsize */
941 TRUE, /* pc_relative */
942 0, /* bitpos */
943 complain_overflow_dont,/* complain_on_overflow */
944 bfd_elf_generic_reloc, /* special_function */
945 "R_ARM_ALU_PC_G1", /* name */
946 FALSE, /* partial_inplace */
947 0xffffffff, /* src_mask */
948 0xffffffff, /* dst_mask */
949 TRUE), /* pcrel_offset */
950
951 HOWTO (R_ARM_ALU_PC_G2, /* type */
952 0, /* rightshift */
953 2, /* size (0 = byte, 1 = short, 2 = long) */
954 32, /* bitsize */
955 TRUE, /* pc_relative */
956 0, /* bitpos */
957 complain_overflow_dont,/* complain_on_overflow */
958 bfd_elf_generic_reloc, /* special_function */
959 "R_ARM_ALU_PC_G2", /* name */
960 FALSE, /* partial_inplace */
961 0xffffffff, /* src_mask */
962 0xffffffff, /* dst_mask */
963 TRUE), /* pcrel_offset */
964
965 HOWTO (R_ARM_LDR_PC_G1, /* type */
966 0, /* rightshift */
967 2, /* size (0 = byte, 1 = short, 2 = long) */
968 32, /* bitsize */
969 TRUE, /* pc_relative */
970 0, /* bitpos */
971 complain_overflow_dont,/* complain_on_overflow */
972 bfd_elf_generic_reloc, /* special_function */
973 "R_ARM_LDR_PC_G1", /* name */
974 FALSE, /* partial_inplace */
975 0xffffffff, /* src_mask */
976 0xffffffff, /* dst_mask */
977 TRUE), /* pcrel_offset */
978
979 HOWTO (R_ARM_LDR_PC_G2, /* type */
980 0, /* rightshift */
981 2, /* size (0 = byte, 1 = short, 2 = long) */
982 32, /* bitsize */
983 TRUE, /* pc_relative */
984 0, /* bitpos */
985 complain_overflow_dont,/* complain_on_overflow */
986 bfd_elf_generic_reloc, /* special_function */
987 "R_ARM_LDR_PC_G2", /* name */
988 FALSE, /* partial_inplace */
989 0xffffffff, /* src_mask */
990 0xffffffff, /* dst_mask */
991 TRUE), /* pcrel_offset */
992
993 HOWTO (R_ARM_LDRS_PC_G0, /* type */
994 0, /* rightshift */
995 2, /* size (0 = byte, 1 = short, 2 = long) */
996 32, /* bitsize */
997 TRUE, /* pc_relative */
998 0, /* bitpos */
999 complain_overflow_dont,/* complain_on_overflow */
1000 bfd_elf_generic_reloc, /* special_function */
1001 "R_ARM_LDRS_PC_G0", /* name */
1002 FALSE, /* partial_inplace */
1003 0xffffffff, /* src_mask */
1004 0xffffffff, /* dst_mask */
1005 TRUE), /* pcrel_offset */
1006
1007 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1008 0, /* rightshift */
1009 2, /* size (0 = byte, 1 = short, 2 = long) */
1010 32, /* bitsize */
1011 TRUE, /* pc_relative */
1012 0, /* bitpos */
1013 complain_overflow_dont,/* complain_on_overflow */
1014 bfd_elf_generic_reloc, /* special_function */
1015 "R_ARM_LDRS_PC_G1", /* name */
1016 FALSE, /* partial_inplace */
1017 0xffffffff, /* src_mask */
1018 0xffffffff, /* dst_mask */
1019 TRUE), /* pcrel_offset */
1020
1021 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1022 0, /* rightshift */
1023 2, /* size (0 = byte, 1 = short, 2 = long) */
1024 32, /* bitsize */
1025 TRUE, /* pc_relative */
1026 0, /* bitpos */
1027 complain_overflow_dont,/* complain_on_overflow */
1028 bfd_elf_generic_reloc, /* special_function */
1029 "R_ARM_LDRS_PC_G2", /* name */
1030 FALSE, /* partial_inplace */
1031 0xffffffff, /* src_mask */
1032 0xffffffff, /* dst_mask */
1033 TRUE), /* pcrel_offset */
1034
1035 HOWTO (R_ARM_LDC_PC_G0, /* type */
1036 0, /* rightshift */
1037 2, /* size (0 = byte, 1 = short, 2 = long) */
1038 32, /* bitsize */
1039 TRUE, /* pc_relative */
1040 0, /* bitpos */
1041 complain_overflow_dont,/* complain_on_overflow */
1042 bfd_elf_generic_reloc, /* special_function */
1043 "R_ARM_LDC_PC_G0", /* name */
1044 FALSE, /* partial_inplace */
1045 0xffffffff, /* src_mask */
1046 0xffffffff, /* dst_mask */
1047 TRUE), /* pcrel_offset */
1048
1049 HOWTO (R_ARM_LDC_PC_G1, /* type */
1050 0, /* rightshift */
1051 2, /* size (0 = byte, 1 = short, 2 = long) */
1052 32, /* bitsize */
1053 TRUE, /* pc_relative */
1054 0, /* bitpos */
1055 complain_overflow_dont,/* complain_on_overflow */
1056 bfd_elf_generic_reloc, /* special_function */
1057 "R_ARM_LDC_PC_G1", /* name */
1058 FALSE, /* partial_inplace */
1059 0xffffffff, /* src_mask */
1060 0xffffffff, /* dst_mask */
1061 TRUE), /* pcrel_offset */
1062
1063 HOWTO (R_ARM_LDC_PC_G2, /* type */
1064 0, /* rightshift */
1065 2, /* size (0 = byte, 1 = short, 2 = long) */
1066 32, /* bitsize */
1067 TRUE, /* pc_relative */
1068 0, /* bitpos */
1069 complain_overflow_dont,/* complain_on_overflow */
1070 bfd_elf_generic_reloc, /* special_function */
1071 "R_ARM_LDC_PC_G2", /* name */
1072 FALSE, /* partial_inplace */
1073 0xffffffff, /* src_mask */
1074 0xffffffff, /* dst_mask */
1075 TRUE), /* pcrel_offset */
1076
1077 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1078 0, /* rightshift */
1079 2, /* size (0 = byte, 1 = short, 2 = long) */
1080 32, /* bitsize */
1081 TRUE, /* pc_relative */
1082 0, /* bitpos */
1083 complain_overflow_dont,/* complain_on_overflow */
1084 bfd_elf_generic_reloc, /* special_function */
1085 "R_ARM_ALU_SB_G0_NC", /* name */
1086 FALSE, /* partial_inplace */
1087 0xffffffff, /* src_mask */
1088 0xffffffff, /* dst_mask */
1089 TRUE), /* pcrel_offset */
1090
1091 HOWTO (R_ARM_ALU_SB_G0, /* type */
1092 0, /* rightshift */
1093 2, /* size (0 = byte, 1 = short, 2 = long) */
1094 32, /* bitsize */
1095 TRUE, /* pc_relative */
1096 0, /* bitpos */
1097 complain_overflow_dont,/* complain_on_overflow */
1098 bfd_elf_generic_reloc, /* special_function */
1099 "R_ARM_ALU_SB_G0", /* name */
1100 FALSE, /* partial_inplace */
1101 0xffffffff, /* src_mask */
1102 0xffffffff, /* dst_mask */
1103 TRUE), /* pcrel_offset */
1104
1105 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1106 0, /* rightshift */
1107 2, /* size (0 = byte, 1 = short, 2 = long) */
1108 32, /* bitsize */
1109 TRUE, /* pc_relative */
1110 0, /* bitpos */
1111 complain_overflow_dont,/* complain_on_overflow */
1112 bfd_elf_generic_reloc, /* special_function */
1113 "R_ARM_ALU_SB_G1_NC", /* name */
1114 FALSE, /* partial_inplace */
1115 0xffffffff, /* src_mask */
1116 0xffffffff, /* dst_mask */
1117 TRUE), /* pcrel_offset */
1118
1119 HOWTO (R_ARM_ALU_SB_G1, /* type */
1120 0, /* rightshift */
1121 2, /* size (0 = byte, 1 = short, 2 = long) */
1122 32, /* bitsize */
1123 TRUE, /* pc_relative */
1124 0, /* bitpos */
1125 complain_overflow_dont,/* complain_on_overflow */
1126 bfd_elf_generic_reloc, /* special_function */
1127 "R_ARM_ALU_SB_G1", /* name */
1128 FALSE, /* partial_inplace */
1129 0xffffffff, /* src_mask */
1130 0xffffffff, /* dst_mask */
1131 TRUE), /* pcrel_offset */
1132
1133 HOWTO (R_ARM_ALU_SB_G2, /* type */
1134 0, /* rightshift */
1135 2, /* size (0 = byte, 1 = short, 2 = long) */
1136 32, /* bitsize */
1137 TRUE, /* pc_relative */
1138 0, /* bitpos */
1139 complain_overflow_dont,/* complain_on_overflow */
1140 bfd_elf_generic_reloc, /* special_function */
1141 "R_ARM_ALU_SB_G2", /* name */
1142 FALSE, /* partial_inplace */
1143 0xffffffff, /* src_mask */
1144 0xffffffff, /* dst_mask */
1145 TRUE), /* pcrel_offset */
1146
1147 HOWTO (R_ARM_LDR_SB_G0, /* type */
1148 0, /* rightshift */
1149 2, /* size (0 = byte, 1 = short, 2 = long) */
1150 32, /* bitsize */
1151 TRUE, /* pc_relative */
1152 0, /* bitpos */
1153 complain_overflow_dont,/* complain_on_overflow */
1154 bfd_elf_generic_reloc, /* special_function */
1155 "R_ARM_LDR_SB_G0", /* name */
1156 FALSE, /* partial_inplace */
1157 0xffffffff, /* src_mask */
1158 0xffffffff, /* dst_mask */
1159 TRUE), /* pcrel_offset */
1160
1161 HOWTO (R_ARM_LDR_SB_G1, /* type */
1162 0, /* rightshift */
1163 2, /* size (0 = byte, 1 = short, 2 = long) */
1164 32, /* bitsize */
1165 TRUE, /* pc_relative */
1166 0, /* bitpos */
1167 complain_overflow_dont,/* complain_on_overflow */
1168 bfd_elf_generic_reloc, /* special_function */
1169 "R_ARM_LDR_SB_G1", /* name */
1170 FALSE, /* partial_inplace */
1171 0xffffffff, /* src_mask */
1172 0xffffffff, /* dst_mask */
1173 TRUE), /* pcrel_offset */
1174
1175 HOWTO (R_ARM_LDR_SB_G2, /* type */
1176 0, /* rightshift */
1177 2, /* size (0 = byte, 1 = short, 2 = long) */
1178 32, /* bitsize */
1179 TRUE, /* pc_relative */
1180 0, /* bitpos */
1181 complain_overflow_dont,/* complain_on_overflow */
1182 bfd_elf_generic_reloc, /* special_function */
1183 "R_ARM_LDR_SB_G2", /* name */
1184 FALSE, /* partial_inplace */
1185 0xffffffff, /* src_mask */
1186 0xffffffff, /* dst_mask */
1187 TRUE), /* pcrel_offset */
1188
1189 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1190 0, /* rightshift */
1191 2, /* size (0 = byte, 1 = short, 2 = long) */
1192 32, /* bitsize */
1193 TRUE, /* pc_relative */
1194 0, /* bitpos */
1195 complain_overflow_dont,/* complain_on_overflow */
1196 bfd_elf_generic_reloc, /* special_function */
1197 "R_ARM_LDRS_SB_G0", /* name */
1198 FALSE, /* partial_inplace */
1199 0xffffffff, /* src_mask */
1200 0xffffffff, /* dst_mask */
1201 TRUE), /* pcrel_offset */
1202
1203 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1204 0, /* rightshift */
1205 2, /* size (0 = byte, 1 = short, 2 = long) */
1206 32, /* bitsize */
1207 TRUE, /* pc_relative */
1208 0, /* bitpos */
1209 complain_overflow_dont,/* complain_on_overflow */
1210 bfd_elf_generic_reloc, /* special_function */
1211 "R_ARM_LDRS_SB_G1", /* name */
1212 FALSE, /* partial_inplace */
1213 0xffffffff, /* src_mask */
1214 0xffffffff, /* dst_mask */
1215 TRUE), /* pcrel_offset */
1216
1217 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1218 0, /* rightshift */
1219 2, /* size (0 = byte, 1 = short, 2 = long) */
1220 32, /* bitsize */
1221 TRUE, /* pc_relative */
1222 0, /* bitpos */
1223 complain_overflow_dont,/* complain_on_overflow */
1224 bfd_elf_generic_reloc, /* special_function */
1225 "R_ARM_LDRS_SB_G2", /* name */
1226 FALSE, /* partial_inplace */
1227 0xffffffff, /* src_mask */
1228 0xffffffff, /* dst_mask */
1229 TRUE), /* pcrel_offset */
1230
1231 HOWTO (R_ARM_LDC_SB_G0, /* type */
1232 0, /* rightshift */
1233 2, /* size (0 = byte, 1 = short, 2 = long) */
1234 32, /* bitsize */
1235 TRUE, /* pc_relative */
1236 0, /* bitpos */
1237 complain_overflow_dont,/* complain_on_overflow */
1238 bfd_elf_generic_reloc, /* special_function */
1239 "R_ARM_LDC_SB_G0", /* name */
1240 FALSE, /* partial_inplace */
1241 0xffffffff, /* src_mask */
1242 0xffffffff, /* dst_mask */
1243 TRUE), /* pcrel_offset */
1244
1245 HOWTO (R_ARM_LDC_SB_G1, /* type */
1246 0, /* rightshift */
1247 2, /* size (0 = byte, 1 = short, 2 = long) */
1248 32, /* bitsize */
1249 TRUE, /* pc_relative */
1250 0, /* bitpos */
1251 complain_overflow_dont,/* complain_on_overflow */
1252 bfd_elf_generic_reloc, /* special_function */
1253 "R_ARM_LDC_SB_G1", /* name */
1254 FALSE, /* partial_inplace */
1255 0xffffffff, /* src_mask */
1256 0xffffffff, /* dst_mask */
1257 TRUE), /* pcrel_offset */
1258
1259 HOWTO (R_ARM_LDC_SB_G2, /* type */
1260 0, /* rightshift */
1261 2, /* size (0 = byte, 1 = short, 2 = long) */
1262 32, /* bitsize */
1263 TRUE, /* pc_relative */
1264 0, /* bitpos */
1265 complain_overflow_dont,/* complain_on_overflow */
1266 bfd_elf_generic_reloc, /* special_function */
1267 "R_ARM_LDC_SB_G2", /* name */
1268 FALSE, /* partial_inplace */
1269 0xffffffff, /* src_mask */
1270 0xffffffff, /* dst_mask */
1271 TRUE), /* pcrel_offset */
1272
1273 /* End of group relocations. */
1274
1275 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1276 0, /* rightshift */
1277 2, /* size (0 = byte, 1 = short, 2 = long) */
1278 16, /* bitsize */
1279 FALSE, /* pc_relative */
1280 0, /* bitpos */
1281 complain_overflow_dont,/* complain_on_overflow */
1282 bfd_elf_generic_reloc, /* special_function */
1283 "R_ARM_MOVW_BREL_NC", /* name */
1284 FALSE, /* partial_inplace */
1285 0x0000ffff, /* src_mask */
1286 0x0000ffff, /* dst_mask */
1287 FALSE), /* pcrel_offset */
1288
1289 HOWTO (R_ARM_MOVT_BREL, /* type */
1290 0, /* rightshift */
1291 2, /* size (0 = byte, 1 = short, 2 = long) */
1292 16, /* bitsize */
1293 FALSE, /* pc_relative */
1294 0, /* bitpos */
1295 complain_overflow_bitfield,/* complain_on_overflow */
1296 bfd_elf_generic_reloc, /* special_function */
1297 "R_ARM_MOVT_BREL", /* name */
1298 FALSE, /* partial_inplace */
1299 0x0000ffff, /* src_mask */
1300 0x0000ffff, /* dst_mask */
1301 FALSE), /* pcrel_offset */
1302
1303 HOWTO (R_ARM_MOVW_BREL, /* type */
1304 0, /* rightshift */
1305 2, /* size (0 = byte, 1 = short, 2 = long) */
1306 16, /* bitsize */
1307 FALSE, /* pc_relative */
1308 0, /* bitpos */
1309 complain_overflow_dont,/* complain_on_overflow */
1310 bfd_elf_generic_reloc, /* special_function */
1311 "R_ARM_MOVW_BREL", /* name */
1312 FALSE, /* partial_inplace */
1313 0x0000ffff, /* src_mask */
1314 0x0000ffff, /* dst_mask */
1315 FALSE), /* pcrel_offset */
1316
1317 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1318 0, /* rightshift */
1319 2, /* size (0 = byte, 1 = short, 2 = long) */
1320 16, /* bitsize */
1321 FALSE, /* pc_relative */
1322 0, /* bitpos */
1323 complain_overflow_dont,/* complain_on_overflow */
1324 bfd_elf_generic_reloc, /* special_function */
1325 "R_ARM_THM_MOVW_BREL_NC",/* name */
1326 FALSE, /* partial_inplace */
1327 0x040f70ff, /* src_mask */
1328 0x040f70ff, /* dst_mask */
1329 FALSE), /* pcrel_offset */
1330
1331 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1332 0, /* rightshift */
1333 2, /* size (0 = byte, 1 = short, 2 = long) */
1334 16, /* bitsize */
1335 FALSE, /* pc_relative */
1336 0, /* bitpos */
1337 complain_overflow_bitfield,/* complain_on_overflow */
1338 bfd_elf_generic_reloc, /* special_function */
1339 "R_ARM_THM_MOVT_BREL", /* name */
1340 FALSE, /* partial_inplace */
1341 0x040f70ff, /* src_mask */
1342 0x040f70ff, /* dst_mask */
1343 FALSE), /* pcrel_offset */
1344
1345 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1346 0, /* rightshift */
1347 2, /* size (0 = byte, 1 = short, 2 = long) */
1348 16, /* bitsize */
1349 FALSE, /* pc_relative */
1350 0, /* bitpos */
1351 complain_overflow_dont,/* complain_on_overflow */
1352 bfd_elf_generic_reloc, /* special_function */
1353 "R_ARM_THM_MOVW_BREL", /* name */
1354 FALSE, /* partial_inplace */
1355 0x040f70ff, /* src_mask */
1356 0x040f70ff, /* dst_mask */
1357 FALSE), /* pcrel_offset */
1358
1359 HOWTO (R_ARM_TLS_GOTDESC, /* type */
1360 0, /* rightshift */
1361 2, /* size (0 = byte, 1 = short, 2 = long) */
1362 32, /* bitsize */
1363 FALSE, /* pc_relative */
1364 0, /* bitpos */
1365 complain_overflow_bitfield,/* complain_on_overflow */
1366 NULL, /* special_function */
1367 "R_ARM_TLS_GOTDESC", /* name */
1368 TRUE, /* partial_inplace */
1369 0xffffffff, /* src_mask */
1370 0xffffffff, /* dst_mask */
1371 FALSE), /* pcrel_offset */
1372
1373 HOWTO (R_ARM_TLS_CALL, /* type */
1374 0, /* rightshift */
1375 2, /* size (0 = byte, 1 = short, 2 = long) */
1376 24, /* bitsize */
1377 FALSE, /* pc_relative */
1378 0, /* bitpos */
1379 complain_overflow_dont,/* complain_on_overflow */
1380 bfd_elf_generic_reloc, /* special_function */
1381 "R_ARM_TLS_CALL", /* name */
1382 FALSE, /* partial_inplace */
1383 0x00ffffff, /* src_mask */
1384 0x00ffffff, /* dst_mask */
1385 FALSE), /* pcrel_offset */
1386
1387 HOWTO (R_ARM_TLS_DESCSEQ, /* type */
1388 0, /* rightshift */
1389 2, /* size (0 = byte, 1 = short, 2 = long) */
1390 0, /* bitsize */
1391 FALSE, /* pc_relative */
1392 0, /* bitpos */
1393 complain_overflow_bitfield,/* complain_on_overflow */
1394 bfd_elf_generic_reloc, /* special_function */
1395 "R_ARM_TLS_DESCSEQ", /* name */
1396 FALSE, /* partial_inplace */
1397 0x00000000, /* src_mask */
1398 0x00000000, /* dst_mask */
1399 FALSE), /* pcrel_offset */
1400
1401 HOWTO (R_ARM_THM_TLS_CALL, /* type */
1402 0, /* rightshift */
1403 2, /* size (0 = byte, 1 = short, 2 = long) */
1404 24, /* bitsize */
1405 FALSE, /* pc_relative */
1406 0, /* bitpos */
1407 complain_overflow_dont,/* complain_on_overflow */
1408 bfd_elf_generic_reloc, /* special_function */
1409 "R_ARM_THM_TLS_CALL", /* name */
1410 FALSE, /* partial_inplace */
1411 0x07ff07ff, /* src_mask */
1412 0x07ff07ff, /* dst_mask */
1413 FALSE), /* pcrel_offset */
1414
1415 HOWTO (R_ARM_PLT32_ABS, /* type */
1416 0, /* rightshift */
1417 2, /* size (0 = byte, 1 = short, 2 = long) */
1418 32, /* bitsize */
1419 FALSE, /* pc_relative */
1420 0, /* bitpos */
1421 complain_overflow_dont,/* complain_on_overflow */
1422 bfd_elf_generic_reloc, /* special_function */
1423 "R_ARM_PLT32_ABS", /* name */
1424 FALSE, /* partial_inplace */
1425 0xffffffff, /* src_mask */
1426 0xffffffff, /* dst_mask */
1427 FALSE), /* pcrel_offset */
1428
1429 HOWTO (R_ARM_GOT_ABS, /* type */
1430 0, /* rightshift */
1431 2, /* size (0 = byte, 1 = short, 2 = long) */
1432 32, /* bitsize */
1433 FALSE, /* pc_relative */
1434 0, /* bitpos */
1435 complain_overflow_dont,/* complain_on_overflow */
1436 bfd_elf_generic_reloc, /* special_function */
1437 "R_ARM_GOT_ABS", /* name */
1438 FALSE, /* partial_inplace */
1439 0xffffffff, /* src_mask */
1440 0xffffffff, /* dst_mask */
1441 FALSE), /* pcrel_offset */
1442
1443 HOWTO (R_ARM_GOT_PREL, /* type */
1444 0, /* rightshift */
1445 2, /* size (0 = byte, 1 = short, 2 = long) */
1446 32, /* bitsize */
1447 TRUE, /* pc_relative */
1448 0, /* bitpos */
1449 complain_overflow_dont, /* complain_on_overflow */
1450 bfd_elf_generic_reloc, /* special_function */
1451 "R_ARM_GOT_PREL", /* name */
1452 FALSE, /* partial_inplace */
1453 0xffffffff, /* src_mask */
1454 0xffffffff, /* dst_mask */
1455 TRUE), /* pcrel_offset */
1456
1457 HOWTO (R_ARM_GOT_BREL12, /* type */
1458 0, /* rightshift */
1459 2, /* size (0 = byte, 1 = short, 2 = long) */
1460 12, /* bitsize */
1461 FALSE, /* pc_relative */
1462 0, /* bitpos */
1463 complain_overflow_bitfield,/* complain_on_overflow */
1464 bfd_elf_generic_reloc, /* special_function */
1465 "R_ARM_GOT_BREL12", /* name */
1466 FALSE, /* partial_inplace */
1467 0x00000fff, /* src_mask */
1468 0x00000fff, /* dst_mask */
1469 FALSE), /* pcrel_offset */
1470
1471 HOWTO (R_ARM_GOTOFF12, /* type */
1472 0, /* rightshift */
1473 2, /* size (0 = byte, 1 = short, 2 = long) */
1474 12, /* bitsize */
1475 FALSE, /* pc_relative */
1476 0, /* bitpos */
1477 complain_overflow_bitfield,/* complain_on_overflow */
1478 bfd_elf_generic_reloc, /* special_function */
1479 "R_ARM_GOTOFF12", /* name */
1480 FALSE, /* partial_inplace */
1481 0x00000fff, /* src_mask */
1482 0x00000fff, /* dst_mask */
1483 FALSE), /* pcrel_offset */
1484
1485 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1486
1487 /* GNU extension to record C++ vtable member usage */
1488 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1489 0, /* rightshift */
1490 2, /* size (0 = byte, 1 = short, 2 = long) */
1491 0, /* bitsize */
1492 FALSE, /* pc_relative */
1493 0, /* bitpos */
1494 complain_overflow_dont, /* complain_on_overflow */
1495 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1496 "R_ARM_GNU_VTENTRY", /* name */
1497 FALSE, /* partial_inplace */
1498 0, /* src_mask */
1499 0, /* dst_mask */
1500 FALSE), /* pcrel_offset */
1501
1502 /* GNU extension to record C++ vtable hierarchy */
1503 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1504 0, /* rightshift */
1505 2, /* size (0 = byte, 1 = short, 2 = long) */
1506 0, /* bitsize */
1507 FALSE, /* pc_relative */
1508 0, /* bitpos */
1509 complain_overflow_dont, /* complain_on_overflow */
1510 NULL, /* special_function */
1511 "R_ARM_GNU_VTINHERIT", /* name */
1512 FALSE, /* partial_inplace */
1513 0, /* src_mask */
1514 0, /* dst_mask */
1515 FALSE), /* pcrel_offset */
1516
1517 HOWTO (R_ARM_THM_JUMP11, /* type */
1518 1, /* rightshift */
1519 1, /* size (0 = byte, 1 = short, 2 = long) */
1520 11, /* bitsize */
1521 TRUE, /* pc_relative */
1522 0, /* bitpos */
1523 complain_overflow_signed, /* complain_on_overflow */
1524 bfd_elf_generic_reloc, /* special_function */
1525 "R_ARM_THM_JUMP11", /* name */
1526 FALSE, /* partial_inplace */
1527 0x000007ff, /* src_mask */
1528 0x000007ff, /* dst_mask */
1529 TRUE), /* pcrel_offset */
1530
1531 HOWTO (R_ARM_THM_JUMP8, /* type */
1532 1, /* rightshift */
1533 1, /* size (0 = byte, 1 = short, 2 = long) */
1534 8, /* bitsize */
1535 TRUE, /* pc_relative */
1536 0, /* bitpos */
1537 complain_overflow_signed, /* complain_on_overflow */
1538 bfd_elf_generic_reloc, /* special_function */
1539 "R_ARM_THM_JUMP8", /* name */
1540 FALSE, /* partial_inplace */
1541 0x000000ff, /* src_mask */
1542 0x000000ff, /* dst_mask */
1543 TRUE), /* pcrel_offset */
1544
1545 /* TLS relocations */
1546 HOWTO (R_ARM_TLS_GD32, /* type */
1547 0, /* rightshift */
1548 2, /* size (0 = byte, 1 = short, 2 = long) */
1549 32, /* bitsize */
1550 FALSE, /* pc_relative */
1551 0, /* bitpos */
1552 complain_overflow_bitfield,/* complain_on_overflow */
1553 NULL, /* special_function */
1554 "R_ARM_TLS_GD32", /* name */
1555 TRUE, /* partial_inplace */
1556 0xffffffff, /* src_mask */
1557 0xffffffff, /* dst_mask */
1558 FALSE), /* pcrel_offset */
1559
1560 HOWTO (R_ARM_TLS_LDM32, /* type */
1561 0, /* rightshift */
1562 2, /* size (0 = byte, 1 = short, 2 = long) */
1563 32, /* bitsize */
1564 FALSE, /* pc_relative */
1565 0, /* bitpos */
1566 complain_overflow_bitfield,/* complain_on_overflow */
1567 bfd_elf_generic_reloc, /* special_function */
1568 "R_ARM_TLS_LDM32", /* name */
1569 TRUE, /* partial_inplace */
1570 0xffffffff, /* src_mask */
1571 0xffffffff, /* dst_mask */
1572 FALSE), /* pcrel_offset */
1573
1574 HOWTO (R_ARM_TLS_LDO32, /* type */
1575 0, /* rightshift */
1576 2, /* size (0 = byte, 1 = short, 2 = long) */
1577 32, /* bitsize */
1578 FALSE, /* pc_relative */
1579 0, /* bitpos */
1580 complain_overflow_bitfield,/* complain_on_overflow */
1581 bfd_elf_generic_reloc, /* special_function */
1582 "R_ARM_TLS_LDO32", /* name */
1583 TRUE, /* partial_inplace */
1584 0xffffffff, /* src_mask */
1585 0xffffffff, /* dst_mask */
1586 FALSE), /* pcrel_offset */
1587
1588 HOWTO (R_ARM_TLS_IE32, /* type */
1589 0, /* rightshift */
1590 2, /* size (0 = byte, 1 = short, 2 = long) */
1591 32, /* bitsize */
1592 FALSE, /* pc_relative */
1593 0, /* bitpos */
1594 complain_overflow_bitfield,/* complain_on_overflow */
1595 NULL, /* special_function */
1596 "R_ARM_TLS_IE32", /* name */
1597 TRUE, /* partial_inplace */
1598 0xffffffff, /* src_mask */
1599 0xffffffff, /* dst_mask */
1600 FALSE), /* pcrel_offset */
1601
1602 HOWTO (R_ARM_TLS_LE32, /* type */
1603 0, /* rightshift */
1604 2, /* size (0 = byte, 1 = short, 2 = long) */
1605 32, /* bitsize */
1606 FALSE, /* pc_relative */
1607 0, /* bitpos */
1608 complain_overflow_bitfield,/* complain_on_overflow */
1609 NULL, /* special_function */
1610 "R_ARM_TLS_LE32", /* name */
1611 TRUE, /* partial_inplace */
1612 0xffffffff, /* src_mask */
1613 0xffffffff, /* dst_mask */
1614 FALSE), /* pcrel_offset */
1615
1616 HOWTO (R_ARM_TLS_LDO12, /* type */
1617 0, /* rightshift */
1618 2, /* size (0 = byte, 1 = short, 2 = long) */
1619 12, /* bitsize */
1620 FALSE, /* pc_relative */
1621 0, /* bitpos */
1622 complain_overflow_bitfield,/* complain_on_overflow */
1623 bfd_elf_generic_reloc, /* special_function */
1624 "R_ARM_TLS_LDO12", /* name */
1625 FALSE, /* partial_inplace */
1626 0x00000fff, /* src_mask */
1627 0x00000fff, /* dst_mask */
1628 FALSE), /* pcrel_offset */
1629
1630 HOWTO (R_ARM_TLS_LE12, /* type */
1631 0, /* rightshift */
1632 2, /* size (0 = byte, 1 = short, 2 = long) */
1633 12, /* bitsize */
1634 FALSE, /* pc_relative */
1635 0, /* bitpos */
1636 complain_overflow_bitfield,/* complain_on_overflow */
1637 bfd_elf_generic_reloc, /* special_function */
1638 "R_ARM_TLS_LE12", /* name */
1639 FALSE, /* partial_inplace */
1640 0x00000fff, /* src_mask */
1641 0x00000fff, /* dst_mask */
1642 FALSE), /* pcrel_offset */
1643
1644 HOWTO (R_ARM_TLS_IE12GP, /* type */
1645 0, /* rightshift */
1646 2, /* size (0 = byte, 1 = short, 2 = long) */
1647 12, /* bitsize */
1648 FALSE, /* pc_relative */
1649 0, /* bitpos */
1650 complain_overflow_bitfield,/* complain_on_overflow */
1651 bfd_elf_generic_reloc, /* special_function */
1652 "R_ARM_TLS_IE12GP", /* name */
1653 FALSE, /* partial_inplace */
1654 0x00000fff, /* src_mask */
1655 0x00000fff, /* dst_mask */
1656 FALSE), /* pcrel_offset */
1657
1658 /* 112-127 private relocations. */
1659 EMPTY_HOWTO (112),
1660 EMPTY_HOWTO (113),
1661 EMPTY_HOWTO (114),
1662 EMPTY_HOWTO (115),
1663 EMPTY_HOWTO (116),
1664 EMPTY_HOWTO (117),
1665 EMPTY_HOWTO (118),
1666 EMPTY_HOWTO (119),
1667 EMPTY_HOWTO (120),
1668 EMPTY_HOWTO (121),
1669 EMPTY_HOWTO (122),
1670 EMPTY_HOWTO (123),
1671 EMPTY_HOWTO (124),
1672 EMPTY_HOWTO (125),
1673 EMPTY_HOWTO (126),
1674 EMPTY_HOWTO (127),
1675
1676 /* R_ARM_ME_TOO, obsolete. */
1677 EMPTY_HOWTO (128),
1678
1679 HOWTO (R_ARM_THM_TLS_DESCSEQ, /* type */
1680 0, /* rightshift */
1681 1, /* size (0 = byte, 1 = short, 2 = long) */
1682 0, /* bitsize */
1683 FALSE, /* pc_relative */
1684 0, /* bitpos */
1685 complain_overflow_bitfield,/* complain_on_overflow */
1686 bfd_elf_generic_reloc, /* special_function */
1687 "R_ARM_THM_TLS_DESCSEQ",/* name */
1688 FALSE, /* partial_inplace */
1689 0x00000000, /* src_mask */
1690 0x00000000, /* dst_mask */
1691 FALSE), /* pcrel_offset */
1692 EMPTY_HOWTO (130),
1693 EMPTY_HOWTO (131),
1694 HOWTO (R_ARM_THM_ALU_ABS_G0_NC,/* type. */
1695 0, /* rightshift. */
1696 1, /* size (0 = byte, 1 = short, 2 = long). */
1697 16, /* bitsize. */
1698 FALSE, /* pc_relative. */
1699 0, /* bitpos. */
1700 complain_overflow_bitfield,/* complain_on_overflow. */
1701 bfd_elf_generic_reloc, /* special_function. */
1702 "R_ARM_THM_ALU_ABS_G0_NC",/* name. */
1703 FALSE, /* partial_inplace. */
1704 0x00000000, /* src_mask. */
1705 0x00000000, /* dst_mask. */
1706 FALSE), /* pcrel_offset. */
1707 HOWTO (R_ARM_THM_ALU_ABS_G1_NC,/* type. */
1708 0, /* rightshift. */
1709 1, /* size (0 = byte, 1 = short, 2 = long). */
1710 16, /* bitsize. */
1711 FALSE, /* pc_relative. */
1712 0, /* bitpos. */
1713 complain_overflow_bitfield,/* complain_on_overflow. */
1714 bfd_elf_generic_reloc, /* special_function. */
1715 "R_ARM_THM_ALU_ABS_G1_NC",/* name. */
1716 FALSE, /* partial_inplace. */
1717 0x00000000, /* src_mask. */
1718 0x00000000, /* dst_mask. */
1719 FALSE), /* pcrel_offset. */
1720 HOWTO (R_ARM_THM_ALU_ABS_G2_NC,/* type. */
1721 0, /* rightshift. */
1722 1, /* size (0 = byte, 1 = short, 2 = long). */
1723 16, /* bitsize. */
1724 FALSE, /* pc_relative. */
1725 0, /* bitpos. */
1726 complain_overflow_bitfield,/* complain_on_overflow. */
1727 bfd_elf_generic_reloc, /* special_function. */
1728 "R_ARM_THM_ALU_ABS_G2_NC",/* name. */
1729 FALSE, /* partial_inplace. */
1730 0x00000000, /* src_mask. */
1731 0x00000000, /* dst_mask. */
1732 FALSE), /* pcrel_offset. */
1733 HOWTO (R_ARM_THM_ALU_ABS_G3_NC,/* type. */
1734 0, /* rightshift. */
1735 1, /* size (0 = byte, 1 = short, 2 = long). */
1736 16, /* bitsize. */
1737 FALSE, /* pc_relative. */
1738 0, /* bitpos. */
1739 complain_overflow_bitfield,/* complain_on_overflow. */
1740 bfd_elf_generic_reloc, /* special_function. */
1741 "R_ARM_THM_ALU_ABS_G3_NC",/* name. */
1742 FALSE, /* partial_inplace. */
1743 0x00000000, /* src_mask. */
1744 0x00000000, /* dst_mask. */
1745 FALSE), /* pcrel_offset. */
1746 };
1747
1748 /* 160 onwards: */
1749 static reloc_howto_type elf32_arm_howto_table_2[1] =
1750 {
1751 HOWTO (R_ARM_IRELATIVE, /* type */
1752 0, /* rightshift */
1753 2, /* size (0 = byte, 1 = short, 2 = long) */
1754 32, /* bitsize */
1755 FALSE, /* pc_relative */
1756 0, /* bitpos */
1757 complain_overflow_bitfield,/* complain_on_overflow */
1758 bfd_elf_generic_reloc, /* special_function */
1759 "R_ARM_IRELATIVE", /* name */
1760 TRUE, /* partial_inplace */
1761 0xffffffff, /* src_mask */
1762 0xffffffff, /* dst_mask */
1763 FALSE) /* pcrel_offset */
1764 };
1765
1766 /* 249-255 extended, currently unused, relocations: */
1767 static reloc_howto_type elf32_arm_howto_table_3[4] =
1768 {
1769 HOWTO (R_ARM_RREL32, /* type */
1770 0, /* rightshift */
1771 0, /* size (0 = byte, 1 = short, 2 = long) */
1772 0, /* bitsize */
1773 FALSE, /* pc_relative */
1774 0, /* bitpos */
1775 complain_overflow_dont,/* complain_on_overflow */
1776 bfd_elf_generic_reloc, /* special_function */
1777 "R_ARM_RREL32", /* name */
1778 FALSE, /* partial_inplace */
1779 0, /* src_mask */
1780 0, /* dst_mask */
1781 FALSE), /* pcrel_offset */
1782
1783 HOWTO (R_ARM_RABS32, /* type */
1784 0, /* rightshift */
1785 0, /* size (0 = byte, 1 = short, 2 = long) */
1786 0, /* bitsize */
1787 FALSE, /* pc_relative */
1788 0, /* bitpos */
1789 complain_overflow_dont,/* complain_on_overflow */
1790 bfd_elf_generic_reloc, /* special_function */
1791 "R_ARM_RABS32", /* name */
1792 FALSE, /* partial_inplace */
1793 0, /* src_mask */
1794 0, /* dst_mask */
1795 FALSE), /* pcrel_offset */
1796
1797 HOWTO (R_ARM_RPC24, /* type */
1798 0, /* rightshift */
1799 0, /* size (0 = byte, 1 = short, 2 = long) */
1800 0, /* bitsize */
1801 FALSE, /* pc_relative */
1802 0, /* bitpos */
1803 complain_overflow_dont,/* complain_on_overflow */
1804 bfd_elf_generic_reloc, /* special_function */
1805 "R_ARM_RPC24", /* name */
1806 FALSE, /* partial_inplace */
1807 0, /* src_mask */
1808 0, /* dst_mask */
1809 FALSE), /* pcrel_offset */
1810
1811 HOWTO (R_ARM_RBASE, /* type */
1812 0, /* rightshift */
1813 0, /* size (0 = byte, 1 = short, 2 = long) */
1814 0, /* bitsize */
1815 FALSE, /* pc_relative */
1816 0, /* bitpos */
1817 complain_overflow_dont,/* complain_on_overflow */
1818 bfd_elf_generic_reloc, /* special_function */
1819 "R_ARM_RBASE", /* name */
1820 FALSE, /* partial_inplace */
1821 0, /* src_mask */
1822 0, /* dst_mask */
1823 FALSE) /* pcrel_offset */
1824 };
1825
1826 static reloc_howto_type *
1827 elf32_arm_howto_from_type (unsigned int r_type)
1828 {
1829 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1830 return &elf32_arm_howto_table_1[r_type];
1831
1832 if (r_type == R_ARM_IRELATIVE)
1833 return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
1834
1835 if (r_type >= R_ARM_RREL32
1836 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3))
1837 return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32];
1838
1839 return NULL;
1840 }
1841
1842 static void
1843 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1844 Elf_Internal_Rela * elf_reloc)
1845 {
1846 unsigned int r_type;
1847
1848 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1849 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1850 }
1851
1852 struct elf32_arm_reloc_map
1853 {
1854 bfd_reloc_code_real_type bfd_reloc_val;
1855 unsigned char elf_reloc_val;
1856 };
1857
1858 /* All entries in this list must also be present in elf32_arm_howto_table. */
1859 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1860 {
1861 {BFD_RELOC_NONE, R_ARM_NONE},
1862 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1863 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1864 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1865 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1866 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1867 {BFD_RELOC_32, R_ARM_ABS32},
1868 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1869 {BFD_RELOC_8, R_ARM_ABS8},
1870 {BFD_RELOC_16, R_ARM_ABS16},
1871 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1872 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1873 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1874 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1875 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1876 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1877 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1878 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1879 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1880 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1881 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1882 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1883 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1884 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
1885 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1886 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1887 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1888 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1889 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1890 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1891 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1892 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1893 {BFD_RELOC_ARM_TLS_GOTDESC, R_ARM_TLS_GOTDESC},
1894 {BFD_RELOC_ARM_TLS_CALL, R_ARM_TLS_CALL},
1895 {BFD_RELOC_ARM_THM_TLS_CALL, R_ARM_THM_TLS_CALL},
1896 {BFD_RELOC_ARM_TLS_DESCSEQ, R_ARM_TLS_DESCSEQ},
1897 {BFD_RELOC_ARM_THM_TLS_DESCSEQ, R_ARM_THM_TLS_DESCSEQ},
1898 {BFD_RELOC_ARM_TLS_DESC, R_ARM_TLS_DESC},
1899 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1900 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1901 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1902 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1903 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1904 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1905 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1906 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1907 {BFD_RELOC_ARM_IRELATIVE, R_ARM_IRELATIVE},
1908 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1909 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1910 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1911 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1912 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1913 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1914 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1915 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1916 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1917 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1918 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1919 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1920 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1921 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1922 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1923 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1924 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1925 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1926 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1927 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1928 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1929 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1930 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1931 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1932 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1933 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1934 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1935 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1936 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1937 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1938 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1939 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1940 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1941 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1942 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1943 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1944 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1945 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1946 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX},
1947 {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC, R_ARM_THM_ALU_ABS_G3_NC},
1948 {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC, R_ARM_THM_ALU_ABS_G2_NC},
1949 {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC, R_ARM_THM_ALU_ABS_G1_NC},
1950 {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC, R_ARM_THM_ALU_ABS_G0_NC}
1951 };
1952
1953 static reloc_howto_type *
1954 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1955 bfd_reloc_code_real_type code)
1956 {
1957 unsigned int i;
1958
1959 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1960 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1961 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1962
1963 return NULL;
1964 }
1965
1966 static reloc_howto_type *
1967 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1968 const char *r_name)
1969 {
1970 unsigned int i;
1971
1972 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1973 if (elf32_arm_howto_table_1[i].name != NULL
1974 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1975 return &elf32_arm_howto_table_1[i];
1976
1977 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1978 if (elf32_arm_howto_table_2[i].name != NULL
1979 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1980 return &elf32_arm_howto_table_2[i];
1981
1982 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++)
1983 if (elf32_arm_howto_table_3[i].name != NULL
1984 && strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0)
1985 return &elf32_arm_howto_table_3[i];
1986
1987 return NULL;
1988 }
1989
1990 /* Support for core dump NOTE sections. */
1991
1992 static bfd_boolean
1993 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1994 {
1995 int offset;
1996 size_t size;
1997
1998 switch (note->descsz)
1999 {
2000 default:
2001 return FALSE;
2002
2003 case 148: /* Linux/ARM 32-bit. */
2004 /* pr_cursig */
2005 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
2006
2007 /* pr_pid */
2008 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
2009
2010 /* pr_reg */
2011 offset = 72;
2012 size = 72;
2013
2014 break;
2015 }
2016
2017 /* Make a ".reg/999" section. */
2018 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
2019 size, note->descpos + offset);
2020 }
2021
2022 static bfd_boolean
2023 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
2024 {
2025 switch (note->descsz)
2026 {
2027 default:
2028 return FALSE;
2029
2030 case 124: /* Linux/ARM elf_prpsinfo. */
2031 elf_tdata (abfd)->core->pid
2032 = bfd_get_32 (abfd, note->descdata + 12);
2033 elf_tdata (abfd)->core->program
2034 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
2035 elf_tdata (abfd)->core->command
2036 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
2037 }
2038
2039 /* Note that for some reason, a spurious space is tacked
2040 onto the end of the args in some (at least one anyway)
2041 implementations, so strip it off if it exists. */
2042 {
2043 char *command = elf_tdata (abfd)->core->command;
2044 int n = strlen (command);
2045
2046 if (0 < n && command[n - 1] == ' ')
2047 command[n - 1] = '\0';
2048 }
2049
2050 return TRUE;
2051 }
2052
2053 static char *
2054 elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz,
2055 int note_type, ...)
2056 {
2057 switch (note_type)
2058 {
2059 default:
2060 return NULL;
2061
2062 case NT_PRPSINFO:
2063 {
2064 char data[124];
2065 va_list ap;
2066
2067 va_start (ap, note_type);
2068 memset (data, 0, sizeof (data));
2069 strncpy (data + 28, va_arg (ap, const char *), 16);
2070 strncpy (data + 44, va_arg (ap, const char *), 80);
2071 va_end (ap);
2072
2073 return elfcore_write_note (abfd, buf, bufsiz,
2074 "CORE", note_type, data, sizeof (data));
2075 }
2076
2077 case NT_PRSTATUS:
2078 {
2079 char data[148];
2080 va_list ap;
2081 long pid;
2082 int cursig;
2083 const void *greg;
2084
2085 va_start (ap, note_type);
2086 memset (data, 0, sizeof (data));
2087 pid = va_arg (ap, long);
2088 bfd_put_32 (abfd, pid, data + 24);
2089 cursig = va_arg (ap, int);
2090 bfd_put_16 (abfd, cursig, data + 12);
2091 greg = va_arg (ap, const void *);
2092 memcpy (data + 72, greg, 72);
2093 va_end (ap);
2094
2095 return elfcore_write_note (abfd, buf, bufsiz,
2096 "CORE", note_type, data, sizeof (data));
2097 }
2098 }
2099 }
2100
2101 #define TARGET_LITTLE_SYM arm_elf32_le_vec
2102 #define TARGET_LITTLE_NAME "elf32-littlearm"
2103 #define TARGET_BIG_SYM arm_elf32_be_vec
2104 #define TARGET_BIG_NAME "elf32-bigarm"
2105
2106 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2107 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2108 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2109
2110 typedef unsigned long int insn32;
2111 typedef unsigned short int insn16;
2112
2113 /* In lieu of proper flags, assume all EABIv4 or later objects are
2114 interworkable. */
2115 #define INTERWORK_FLAG(abfd) \
2116 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2117 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2118 || ((abfd)->flags & BFD_LINKER_CREATED))
2119
2120 /* The linker script knows the section names for placement.
2121 The entry_names are used to do simple name mangling on the stubs.
2122 Given a function name, and its type, the stub can be found. The
2123 name can be changed. The only requirement is the %s be present. */
2124 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2125 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2126
2127 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2128 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2129
2130 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2131 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2132
2133 #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
2134 #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "__stm32l4xx_veneer_%x"
2135
2136 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2137 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2138
2139 #define STUB_ENTRY_NAME "__%s_veneer"
2140
2141 /* The name of the dynamic interpreter. This is put in the .interp
2142 section. */
2143 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2144
2145 static const unsigned long tls_trampoline [] =
2146 {
2147 0xe08e0000, /* add r0, lr, r0 */
2148 0xe5901004, /* ldr r1, [r0,#4] */
2149 0xe12fff11, /* bx r1 */
2150 };
2151
2152 static const unsigned long dl_tlsdesc_lazy_trampoline [] =
2153 {
2154 0xe52d2004, /* push {r2} */
2155 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2156 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2157 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2158 0xe081100f, /* 2: add r1, pc */
2159 0xe12fff12, /* bx r2 */
2160 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2161 + dl_tlsdesc_lazy_resolver(GOT) */
2162 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2163 };
2164
2165 #ifdef FOUR_WORD_PLT
2166
2167 /* The first entry in a procedure linkage table looks like
2168 this. It is set up so that any shared library function that is
2169 called before the relocation has been set up calls the dynamic
2170 linker first. */
2171 static const bfd_vma elf32_arm_plt0_entry [] =
2172 {
2173 0xe52de004, /* str lr, [sp, #-4]! */
2174 0xe59fe010, /* ldr lr, [pc, #16] */
2175 0xe08fe00e, /* add lr, pc, lr */
2176 0xe5bef008, /* ldr pc, [lr, #8]! */
2177 };
2178
2179 /* Subsequent entries in a procedure linkage table look like
2180 this. */
2181 static const bfd_vma elf32_arm_plt_entry [] =
2182 {
2183 0xe28fc600, /* add ip, pc, #NN */
2184 0xe28cca00, /* add ip, ip, #NN */
2185 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2186 0x00000000, /* unused */
2187 };
2188
2189 #else /* not FOUR_WORD_PLT */
2190
2191 /* The first entry in a procedure linkage table looks like
2192 this. It is set up so that any shared library function that is
2193 called before the relocation has been set up calls the dynamic
2194 linker first. */
2195 static const bfd_vma elf32_arm_plt0_entry [] =
2196 {
2197 0xe52de004, /* str lr, [sp, #-4]! */
2198 0xe59fe004, /* ldr lr, [pc, #4] */
2199 0xe08fe00e, /* add lr, pc, lr */
2200 0xe5bef008, /* ldr pc, [lr, #8]! */
2201 0x00000000, /* &GOT[0] - . */
2202 };
2203
2204 /* By default subsequent entries in a procedure linkage table look like
2205 this. Offsets that don't fit into 28 bits will cause link error. */
2206 static const bfd_vma elf32_arm_plt_entry_short [] =
2207 {
2208 0xe28fc600, /* add ip, pc, #0xNN00000 */
2209 0xe28cca00, /* add ip, ip, #0xNN000 */
2210 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2211 };
2212
2213 /* When explicitly asked, we'll use this "long" entry format
2214 which can cope with arbitrary displacements. */
2215 static const bfd_vma elf32_arm_plt_entry_long [] =
2216 {
2217 0xe28fc200, /* add ip, pc, #0xN0000000 */
2218 0xe28cc600, /* add ip, ip, #0xNN00000 */
2219 0xe28cca00, /* add ip, ip, #0xNN000 */
2220 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2221 };
2222
2223 static bfd_boolean elf32_arm_use_long_plt_entry = FALSE;
2224
2225 #endif /* not FOUR_WORD_PLT */
2226
2227 /* The first entry in a procedure linkage table looks like this.
2228 It is set up so that any shared library function that is called before the
2229 relocation has been set up calls the dynamic linker first. */
2230 static const bfd_vma elf32_thumb2_plt0_entry [] =
2231 {
2232 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2233 an instruction maybe encoded to one or two array elements. */
2234 0xf8dfb500, /* push {lr} */
2235 0x44fee008, /* ldr.w lr, [pc, #8] */
2236 /* add lr, pc */
2237 0xff08f85e, /* ldr.w pc, [lr, #8]! */
2238 0x00000000, /* &GOT[0] - . */
2239 };
2240
2241 /* Subsequent entries in a procedure linkage table for thumb only target
2242 look like this. */
2243 static const bfd_vma elf32_thumb2_plt_entry [] =
2244 {
2245 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2246 an instruction maybe encoded to one or two array elements. */
2247 0x0c00f240, /* movw ip, #0xNNNN */
2248 0x0c00f2c0, /* movt ip, #0xNNNN */
2249 0xf8dc44fc, /* add ip, pc */
2250 0xbf00f000 /* ldr.w pc, [ip] */
2251 /* nop */
2252 };
2253
2254 /* The format of the first entry in the procedure linkage table
2255 for a VxWorks executable. */
2256 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
2257 {
2258 0xe52dc008, /* str ip,[sp,#-8]! */
2259 0xe59fc000, /* ldr ip,[pc] */
2260 0xe59cf008, /* ldr pc,[ip,#8] */
2261 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2262 };
2263
2264 /* The format of subsequent entries in a VxWorks executable. */
2265 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
2266 {
2267 0xe59fc000, /* ldr ip,[pc] */
2268 0xe59cf000, /* ldr pc,[ip] */
2269 0x00000000, /* .long @got */
2270 0xe59fc000, /* ldr ip,[pc] */
2271 0xea000000, /* b _PLT */
2272 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2273 };
2274
2275 /* The format of entries in a VxWorks shared library. */
2276 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
2277 {
2278 0xe59fc000, /* ldr ip,[pc] */
2279 0xe79cf009, /* ldr pc,[ip,r9] */
2280 0x00000000, /* .long @got */
2281 0xe59fc000, /* ldr ip,[pc] */
2282 0xe599f008, /* ldr pc,[r9,#8] */
2283 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2284 };
2285
2286 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2287 #define PLT_THUMB_STUB_SIZE 4
2288 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2289 {
2290 0x4778, /* bx pc */
2291 0x46c0 /* nop */
2292 };
2293
2294 /* The entries in a PLT when using a DLL-based target with multiple
2295 address spaces. */
2296 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2297 {
2298 0xe51ff004, /* ldr pc, [pc, #-4] */
2299 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2300 };
2301
2302 /* The first entry in a procedure linkage table looks like
2303 this. It is set up so that any shared library function that is
2304 called before the relocation has been set up calls the dynamic
2305 linker first. */
2306 static const bfd_vma elf32_arm_nacl_plt0_entry [] =
2307 {
2308 /* First bundle: */
2309 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2310 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2311 0xe08cc00f, /* add ip, ip, pc */
2312 0xe52dc008, /* str ip, [sp, #-8]! */
2313 /* Second bundle: */
2314 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2315 0xe59cc000, /* ldr ip, [ip] */
2316 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2317 0xe12fff1c, /* bx ip */
2318 /* Third bundle: */
2319 0xe320f000, /* nop */
2320 0xe320f000, /* nop */
2321 0xe320f000, /* nop */
2322 /* .Lplt_tail: */
2323 0xe50dc004, /* str ip, [sp, #-4] */
2324 /* Fourth bundle: */
2325 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2326 0xe59cc000, /* ldr ip, [ip] */
2327 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2328 0xe12fff1c, /* bx ip */
2329 };
2330 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2331
2332 /* Subsequent entries in a procedure linkage table look like this. */
2333 static const bfd_vma elf32_arm_nacl_plt_entry [] =
2334 {
2335 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2336 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2337 0xe08cc00f, /* add ip, ip, pc */
2338 0xea000000, /* b .Lplt_tail */
2339 };
2340
2341 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2342 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2343 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2344 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2345 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2346 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2347 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2348 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2349
2350 enum stub_insn_type
2351 {
2352 THUMB16_TYPE = 1,
2353 THUMB32_TYPE,
2354 ARM_TYPE,
2355 DATA_TYPE
2356 };
2357
2358 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2359 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2360 is inserted in arm_build_one_stub(). */
2361 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2362 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2363 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2364 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2365 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2366 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2367
2368 typedef struct
2369 {
2370 bfd_vma data;
2371 enum stub_insn_type type;
2372 unsigned int r_type;
2373 int reloc_addend;
2374 } insn_sequence;
2375
2376 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2377 to reach the stub if necessary. */
2378 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2379 {
2380 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2381 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2382 };
2383
2384 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2385 available. */
2386 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2387 {
2388 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2389 ARM_INSN (0xe12fff1c), /* bx ip */
2390 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2391 };
2392
2393 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2394 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2395 {
2396 THUMB16_INSN (0xb401), /* push {r0} */
2397 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2398 THUMB16_INSN (0x4684), /* mov ip, r0 */
2399 THUMB16_INSN (0xbc01), /* pop {r0} */
2400 THUMB16_INSN (0x4760), /* bx ip */
2401 THUMB16_INSN (0xbf00), /* nop */
2402 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2403 };
2404
2405 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2406 allowed. */
2407 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2408 {
2409 THUMB16_INSN (0x4778), /* bx pc */
2410 THUMB16_INSN (0x46c0), /* nop */
2411 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2412 ARM_INSN (0xe12fff1c), /* bx ip */
2413 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2414 };
2415
2416 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2417 available. */
2418 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2419 {
2420 THUMB16_INSN (0x4778), /* bx pc */
2421 THUMB16_INSN (0x46c0), /* nop */
2422 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2423 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2424 };
2425
2426 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2427 one, when the destination is close enough. */
2428 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2429 {
2430 THUMB16_INSN (0x4778), /* bx pc */
2431 THUMB16_INSN (0x46c0), /* nop */
2432 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2433 };
2434
2435 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2436 blx to reach the stub if necessary. */
2437 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2438 {
2439 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2440 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2441 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2442 };
2443
2444 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2445 blx to reach the stub if necessary. We can not add into pc;
2446 it is not guaranteed to mode switch (different in ARMv6 and
2447 ARMv7). */
2448 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2449 {
2450 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2451 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2452 ARM_INSN (0xe12fff1c), /* bx ip */
2453 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2454 };
2455
2456 /* V4T ARM -> ARM long branch stub, PIC. */
2457 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2458 {
2459 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2460 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2461 ARM_INSN (0xe12fff1c), /* bx ip */
2462 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2463 };
2464
2465 /* V4T Thumb -> ARM long branch stub, PIC. */
2466 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2467 {
2468 THUMB16_INSN (0x4778), /* bx pc */
2469 THUMB16_INSN (0x46c0), /* nop */
2470 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2471 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2472 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2473 };
2474
2475 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2476 architectures. */
2477 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2478 {
2479 THUMB16_INSN (0xb401), /* push {r0} */
2480 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2481 THUMB16_INSN (0x46fc), /* mov ip, pc */
2482 THUMB16_INSN (0x4484), /* add ip, r0 */
2483 THUMB16_INSN (0xbc01), /* pop {r0} */
2484 THUMB16_INSN (0x4760), /* bx ip */
2485 DATA_WORD (0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2486 };
2487
2488 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2489 allowed. */
2490 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2491 {
2492 THUMB16_INSN (0x4778), /* bx pc */
2493 THUMB16_INSN (0x46c0), /* nop */
2494 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2495 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2496 ARM_INSN (0xe12fff1c), /* bx ip */
2497 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2498 };
2499
2500 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2501 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2502 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] =
2503 {
2504 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2505 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2506 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2507 };
2508
2509 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2510 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2511 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
2512 {
2513 THUMB16_INSN (0x4778), /* bx pc */
2514 THUMB16_INSN (0x46c0), /* nop */
2515 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2516 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2517 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2518 };
2519
2520 /* NaCl ARM -> ARM long branch stub. */
2521 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl[] =
2522 {
2523 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2524 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2525 ARM_INSN (0xe12fff1c), /* bx ip */
2526 ARM_INSN (0xe320f000), /* nop */
2527 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2528 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2529 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2530 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2531 };
2532
2533 /* NaCl ARM -> ARM long branch stub, PIC. */
2534 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic[] =
2535 {
2536 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2537 ARM_INSN (0xe08cc00f), /* add ip, ip, pc */
2538 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2539 ARM_INSN (0xe12fff1c), /* bx ip */
2540 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2541 DATA_WORD (0, R_ARM_REL32, 8), /* dcd R_ARM_REL32(X+8) */
2542 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2543 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2544 };
2545
2546
2547 /* Cortex-A8 erratum-workaround stubs. */
2548
2549 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2550 can't use a conditional branch to reach this stub). */
2551
2552 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2553 {
2554 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2555 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2556 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2557 };
2558
2559 /* Stub used for b.w and bl.w instructions. */
2560
2561 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2562 {
2563 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2564 };
2565
2566 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2567 {
2568 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2569 };
2570
2571 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2572 instruction (which switches to ARM mode) to point to this stub. Jump to the
2573 real destination using an ARM-mode branch. */
2574
2575 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2576 {
2577 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2578 };
2579
2580 /* For each section group there can be a specially created linker section
2581 to hold the stubs for that group. The name of the stub section is based
2582 upon the name of another section within that group with the suffix below
2583 applied.
2584
2585 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2586 create what appeared to be a linker stub section when it actually
2587 contained user code/data. For example, consider this fragment:
2588
2589 const char * stubborn_problems[] = { "np" };
2590
2591 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2592 section called:
2593
2594 .data.rel.local.stubborn_problems
2595
2596 This then causes problems in arm32_arm_build_stubs() as it triggers:
2597
2598 // Ignore non-stub sections.
2599 if (!strstr (stub_sec->name, STUB_SUFFIX))
2600 continue;
2601
2602 And so the section would be ignored instead of being processed. Hence
2603 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2604 C identifier. */
2605 #define STUB_SUFFIX ".__stub"
2606
2607 /* One entry per long/short branch stub defined above. */
2608 #define DEF_STUBS \
2609 DEF_STUB(long_branch_any_any) \
2610 DEF_STUB(long_branch_v4t_arm_thumb) \
2611 DEF_STUB(long_branch_thumb_only) \
2612 DEF_STUB(long_branch_v4t_thumb_thumb) \
2613 DEF_STUB(long_branch_v4t_thumb_arm) \
2614 DEF_STUB(short_branch_v4t_thumb_arm) \
2615 DEF_STUB(long_branch_any_arm_pic) \
2616 DEF_STUB(long_branch_any_thumb_pic) \
2617 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2618 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2619 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2620 DEF_STUB(long_branch_thumb_only_pic) \
2621 DEF_STUB(long_branch_any_tls_pic) \
2622 DEF_STUB(long_branch_v4t_thumb_tls_pic) \
2623 DEF_STUB(long_branch_arm_nacl) \
2624 DEF_STUB(long_branch_arm_nacl_pic) \
2625 DEF_STUB(a8_veneer_b_cond) \
2626 DEF_STUB(a8_veneer_b) \
2627 DEF_STUB(a8_veneer_bl) \
2628 DEF_STUB(a8_veneer_blx)
2629
2630 #define DEF_STUB(x) arm_stub_##x,
2631 enum elf32_arm_stub_type
2632 {
2633 arm_stub_none,
2634 DEF_STUBS
2635 max_stub_type
2636 };
2637 #undef DEF_STUB
2638
2639 /* Note the first a8_veneer type. */
2640 const unsigned arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond;
2641
2642 typedef struct
2643 {
2644 const insn_sequence* template_sequence;
2645 int template_size;
2646 } stub_def;
2647
2648 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2649 static const stub_def stub_definitions[] =
2650 {
2651 {NULL, 0},
2652 DEF_STUBS
2653 };
2654
2655 struct elf32_arm_stub_hash_entry
2656 {
2657 /* Base hash table entry structure. */
2658 struct bfd_hash_entry root;
2659
2660 /* The stub section. */
2661 asection *stub_sec;
2662
2663 /* Offset within stub_sec of the beginning of this stub. */
2664 bfd_vma stub_offset;
2665
2666 /* Given the symbol's value and its section we can determine its final
2667 value when building the stubs (so the stub knows where to jump). */
2668 bfd_vma target_value;
2669 asection *target_section;
2670
2671 /* Same as above but for the source of the branch to the stub. Used for
2672 Cortex-A8 erratum workaround to patch it to branch to the stub. As
2673 such, source section does not need to be recorded since Cortex-A8 erratum
2674 workaround stubs are only generated when both source and target are in the
2675 same section. */
2676 bfd_vma source_value;
2677
2678 /* The instruction which caused this stub to be generated (only valid for
2679 Cortex-A8 erratum workaround stubs at present). */
2680 unsigned long orig_insn;
2681
2682 /* The stub type. */
2683 enum elf32_arm_stub_type stub_type;
2684 /* Its encoding size in bytes. */
2685 int stub_size;
2686 /* Its template. */
2687 const insn_sequence *stub_template;
2688 /* The size of the template (number of entries). */
2689 int stub_template_size;
2690
2691 /* The symbol table entry, if any, that this was derived from. */
2692 struct elf32_arm_link_hash_entry *h;
2693
2694 /* Type of branch. */
2695 enum arm_st_branch_type branch_type;
2696
2697 /* Where this stub is being called from, or, in the case of combined
2698 stub sections, the first input section in the group. */
2699 asection *id_sec;
2700
2701 /* The name for the local symbol at the start of this stub. The
2702 stub name in the hash table has to be unique; this does not, so
2703 it can be friendlier. */
2704 char *output_name;
2705 };
2706
2707 /* Used to build a map of a section. This is required for mixed-endian
2708 code/data. */
2709
2710 typedef struct elf32_elf_section_map
2711 {
2712 bfd_vma vma;
2713 char type;
2714 }
2715 elf32_arm_section_map;
2716
2717 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2718
2719 typedef enum
2720 {
2721 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2722 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2723 VFP11_ERRATUM_ARM_VENEER,
2724 VFP11_ERRATUM_THUMB_VENEER
2725 }
2726 elf32_vfp11_erratum_type;
2727
2728 typedef struct elf32_vfp11_erratum_list
2729 {
2730 struct elf32_vfp11_erratum_list *next;
2731 bfd_vma vma;
2732 union
2733 {
2734 struct
2735 {
2736 struct elf32_vfp11_erratum_list *veneer;
2737 unsigned int vfp_insn;
2738 } b;
2739 struct
2740 {
2741 struct elf32_vfp11_erratum_list *branch;
2742 unsigned int id;
2743 } v;
2744 } u;
2745 elf32_vfp11_erratum_type type;
2746 }
2747 elf32_vfp11_erratum_list;
2748
2749 /* Information about a STM32L4XX erratum veneer, or a branch to such a
2750 veneer. */
2751 typedef enum
2752 {
2753 STM32L4XX_ERRATUM_BRANCH_TO_VENEER,
2754 STM32L4XX_ERRATUM_VENEER
2755 }
2756 elf32_stm32l4xx_erratum_type;
2757
2758 typedef struct elf32_stm32l4xx_erratum_list
2759 {
2760 struct elf32_stm32l4xx_erratum_list *next;
2761 bfd_vma vma;
2762 union
2763 {
2764 struct
2765 {
2766 struct elf32_stm32l4xx_erratum_list *veneer;
2767 unsigned int insn;
2768 } b;
2769 struct
2770 {
2771 struct elf32_stm32l4xx_erratum_list *branch;
2772 unsigned int id;
2773 } v;
2774 } u;
2775 elf32_stm32l4xx_erratum_type type;
2776 }
2777 elf32_stm32l4xx_erratum_list;
2778
2779 typedef enum
2780 {
2781 DELETE_EXIDX_ENTRY,
2782 INSERT_EXIDX_CANTUNWIND_AT_END
2783 }
2784 arm_unwind_edit_type;
2785
2786 /* A (sorted) list of edits to apply to an unwind table. */
2787 typedef struct arm_unwind_table_edit
2788 {
2789 arm_unwind_edit_type type;
2790 /* Note: we sometimes want to insert an unwind entry corresponding to a
2791 section different from the one we're currently writing out, so record the
2792 (text) section this edit relates to here. */
2793 asection *linked_section;
2794 unsigned int index;
2795 struct arm_unwind_table_edit *next;
2796 }
2797 arm_unwind_table_edit;
2798
2799 typedef struct _arm_elf_section_data
2800 {
2801 /* Information about mapping symbols. */
2802 struct bfd_elf_section_data elf;
2803 unsigned int mapcount;
2804 unsigned int mapsize;
2805 elf32_arm_section_map *map;
2806 /* Information about CPU errata. */
2807 unsigned int erratumcount;
2808 elf32_vfp11_erratum_list *erratumlist;
2809 unsigned int stm32l4xx_erratumcount;
2810 elf32_stm32l4xx_erratum_list *stm32l4xx_erratumlist;
2811 unsigned int additional_reloc_count;
2812 /* Information about unwind tables. */
2813 union
2814 {
2815 /* Unwind info attached to a text section. */
2816 struct
2817 {
2818 asection *arm_exidx_sec;
2819 } text;
2820
2821 /* Unwind info attached to an .ARM.exidx section. */
2822 struct
2823 {
2824 arm_unwind_table_edit *unwind_edit_list;
2825 arm_unwind_table_edit *unwind_edit_tail;
2826 } exidx;
2827 } u;
2828 }
2829 _arm_elf_section_data;
2830
2831 #define elf32_arm_section_data(sec) \
2832 ((_arm_elf_section_data *) elf_section_data (sec))
2833
2834 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2835 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2836 so may be created multiple times: we use an array of these entries whilst
2837 relaxing which we can refresh easily, then create stubs for each potentially
2838 erratum-triggering instruction once we've settled on a solution. */
2839
2840 struct a8_erratum_fix
2841 {
2842 bfd *input_bfd;
2843 asection *section;
2844 bfd_vma offset;
2845 bfd_vma target_offset;
2846 unsigned long orig_insn;
2847 char *stub_name;
2848 enum elf32_arm_stub_type stub_type;
2849 enum arm_st_branch_type branch_type;
2850 };
2851
2852 /* A table of relocs applied to branches which might trigger Cortex-A8
2853 erratum. */
2854
2855 struct a8_erratum_reloc
2856 {
2857 bfd_vma from;
2858 bfd_vma destination;
2859 struct elf32_arm_link_hash_entry *hash;
2860 const char *sym_name;
2861 unsigned int r_type;
2862 enum arm_st_branch_type branch_type;
2863 bfd_boolean non_a8_stub;
2864 };
2865
2866 /* The size of the thread control block. */
2867 #define TCB_SIZE 8
2868
2869 /* ARM-specific information about a PLT entry, over and above the usual
2870 gotplt_union. */
2871 struct arm_plt_info
2872 {
2873 /* We reference count Thumb references to a PLT entry separately,
2874 so that we can emit the Thumb trampoline only if needed. */
2875 bfd_signed_vma thumb_refcount;
2876
2877 /* Some references from Thumb code may be eliminated by BL->BLX
2878 conversion, so record them separately. */
2879 bfd_signed_vma maybe_thumb_refcount;
2880
2881 /* How many of the recorded PLT accesses were from non-call relocations.
2882 This information is useful when deciding whether anything takes the
2883 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
2884 non-call references to the function should resolve directly to the
2885 real runtime target. */
2886 unsigned int noncall_refcount;
2887
2888 /* Since PLT entries have variable size if the Thumb prologue is
2889 used, we need to record the index into .got.plt instead of
2890 recomputing it from the PLT offset. */
2891 bfd_signed_vma got_offset;
2892 };
2893
2894 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
2895 struct arm_local_iplt_info
2896 {
2897 /* The information that is usually found in the generic ELF part of
2898 the hash table entry. */
2899 union gotplt_union root;
2900
2901 /* The information that is usually found in the ARM-specific part of
2902 the hash table entry. */
2903 struct arm_plt_info arm;
2904
2905 /* A list of all potential dynamic relocations against this symbol. */
2906 struct elf_dyn_relocs *dyn_relocs;
2907 };
2908
2909 struct elf_arm_obj_tdata
2910 {
2911 struct elf_obj_tdata root;
2912
2913 /* tls_type for each local got entry. */
2914 char *local_got_tls_type;
2915
2916 /* GOTPLT entries for TLS descriptors. */
2917 bfd_vma *local_tlsdesc_gotent;
2918
2919 /* Information for local symbols that need entries in .iplt. */
2920 struct arm_local_iplt_info **local_iplt;
2921
2922 /* Zero to warn when linking objects with incompatible enum sizes. */
2923 int no_enum_size_warning;
2924
2925 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2926 int no_wchar_size_warning;
2927 };
2928
2929 #define elf_arm_tdata(bfd) \
2930 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2931
2932 #define elf32_arm_local_got_tls_type(bfd) \
2933 (elf_arm_tdata (bfd)->local_got_tls_type)
2934
2935 #define elf32_arm_local_tlsdesc_gotent(bfd) \
2936 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
2937
2938 #define elf32_arm_local_iplt(bfd) \
2939 (elf_arm_tdata (bfd)->local_iplt)
2940
2941 #define is_arm_elf(bfd) \
2942 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2943 && elf_tdata (bfd) != NULL \
2944 && elf_object_id (bfd) == ARM_ELF_DATA)
2945
2946 static bfd_boolean
2947 elf32_arm_mkobject (bfd *abfd)
2948 {
2949 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2950 ARM_ELF_DATA);
2951 }
2952
2953 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2954
2955 /* Arm ELF linker hash entry. */
2956 struct elf32_arm_link_hash_entry
2957 {
2958 struct elf_link_hash_entry root;
2959
2960 /* Track dynamic relocs copied for this symbol. */
2961 struct elf_dyn_relocs *dyn_relocs;
2962
2963 /* ARM-specific PLT information. */
2964 struct arm_plt_info plt;
2965
2966 #define GOT_UNKNOWN 0
2967 #define GOT_NORMAL 1
2968 #define GOT_TLS_GD 2
2969 #define GOT_TLS_IE 4
2970 #define GOT_TLS_GDESC 8
2971 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
2972 unsigned int tls_type : 8;
2973
2974 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
2975 unsigned int is_iplt : 1;
2976
2977 unsigned int unused : 23;
2978
2979 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
2980 starting at the end of the jump table. */
2981 bfd_vma tlsdesc_got;
2982
2983 /* The symbol marking the real symbol location for exported thumb
2984 symbols with Arm stubs. */
2985 struct elf_link_hash_entry *export_glue;
2986
2987 /* A pointer to the most recently used stub hash entry against this
2988 symbol. */
2989 struct elf32_arm_stub_hash_entry *stub_cache;
2990 };
2991
2992 /* Traverse an arm ELF linker hash table. */
2993 #define elf32_arm_link_hash_traverse(table, func, info) \
2994 (elf_link_hash_traverse \
2995 (&(table)->root, \
2996 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2997 (info)))
2998
2999 /* Get the ARM elf linker hash table from a link_info structure. */
3000 #define elf32_arm_hash_table(info) \
3001 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
3002 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
3003
3004 #define arm_stub_hash_lookup(table, string, create, copy) \
3005 ((struct elf32_arm_stub_hash_entry *) \
3006 bfd_hash_lookup ((table), (string), (create), (copy)))
3007
3008 /* Array to keep track of which stub sections have been created, and
3009 information on stub grouping. */
3010 struct map_stub
3011 {
3012 /* This is the section to which stubs in the group will be
3013 attached. */
3014 asection *link_sec;
3015 /* The stub section. */
3016 asection *stub_sec;
3017 };
3018
3019 #define elf32_arm_compute_jump_table_size(htab) \
3020 ((htab)->next_tls_desc_index * 4)
3021
3022 /* ARM ELF linker hash table. */
3023 struct elf32_arm_link_hash_table
3024 {
3025 /* The main hash table. */
3026 struct elf_link_hash_table root;
3027
3028 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
3029 bfd_size_type thumb_glue_size;
3030
3031 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
3032 bfd_size_type arm_glue_size;
3033
3034 /* The size in bytes of section containing the ARMv4 BX veneers. */
3035 bfd_size_type bx_glue_size;
3036
3037 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
3038 veneer has been populated. */
3039 bfd_vma bx_glue_offset[15];
3040
3041 /* The size in bytes of the section containing glue for VFP11 erratum
3042 veneers. */
3043 bfd_size_type vfp11_erratum_glue_size;
3044
3045 /* The size in bytes of the section containing glue for STM32L4XX erratum
3046 veneers. */
3047 bfd_size_type stm32l4xx_erratum_glue_size;
3048
3049 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
3050 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
3051 elf32_arm_write_section(). */
3052 struct a8_erratum_fix *a8_erratum_fixes;
3053 unsigned int num_a8_erratum_fixes;
3054
3055 /* An arbitrary input BFD chosen to hold the glue sections. */
3056 bfd * bfd_of_glue_owner;
3057
3058 /* Nonzero to output a BE8 image. */
3059 int byteswap_code;
3060
3061 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
3062 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
3063 int target1_is_rel;
3064
3065 /* The relocation to use for R_ARM_TARGET2 relocations. */
3066 int target2_reloc;
3067
3068 /* 0 = Ignore R_ARM_V4BX.
3069 1 = Convert BX to MOV PC.
3070 2 = Generate v4 interworing stubs. */
3071 int fix_v4bx;
3072
3073 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
3074 int fix_cortex_a8;
3075
3076 /* Whether we should fix the ARM1176 BLX immediate issue. */
3077 int fix_arm1176;
3078
3079 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
3080 int use_blx;
3081
3082 /* What sort of code sequences we should look for which may trigger the
3083 VFP11 denorm erratum. */
3084 bfd_arm_vfp11_fix vfp11_fix;
3085
3086 /* Global counter for the number of fixes we have emitted. */
3087 int num_vfp11_fixes;
3088
3089 /* What sort of code sequences we should look for which may trigger the
3090 STM32L4XX erratum. */
3091 bfd_arm_stm32l4xx_fix stm32l4xx_fix;
3092
3093 /* Global counter for the number of fixes we have emitted. */
3094 int num_stm32l4xx_fixes;
3095
3096 /* Nonzero to force PIC branch veneers. */
3097 int pic_veneer;
3098
3099 /* The number of bytes in the initial entry in the PLT. */
3100 bfd_size_type plt_header_size;
3101
3102 /* The number of bytes in the subsequent PLT etries. */
3103 bfd_size_type plt_entry_size;
3104
3105 /* True if the target system is VxWorks. */
3106 int vxworks_p;
3107
3108 /* True if the target system is Symbian OS. */
3109 int symbian_p;
3110
3111 /* True if the target system is Native Client. */
3112 int nacl_p;
3113
3114 /* True if the target uses REL relocations. */
3115 int use_rel;
3116
3117 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
3118 bfd_vma next_tls_desc_index;
3119
3120 /* How many R_ARM_TLS_DESC relocations were generated so far. */
3121 bfd_vma num_tls_desc;
3122
3123 /* Short-cuts to get to dynamic linker sections. */
3124 asection *sdynbss;
3125 asection *srelbss;
3126
3127 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
3128 asection *srelplt2;
3129
3130 /* The offset into splt of the PLT entry for the TLS descriptor
3131 resolver. Special values are 0, if not necessary (or not found
3132 to be necessary yet), and -1 if needed but not determined
3133 yet. */
3134 bfd_vma dt_tlsdesc_plt;
3135
3136 /* The offset into sgot of the GOT entry used by the PLT entry
3137 above. */
3138 bfd_vma dt_tlsdesc_got;
3139
3140 /* Offset in .plt section of tls_arm_trampoline. */
3141 bfd_vma tls_trampoline;
3142
3143 /* Data for R_ARM_TLS_LDM32 relocations. */
3144 union
3145 {
3146 bfd_signed_vma refcount;
3147 bfd_vma offset;
3148 } tls_ldm_got;
3149
3150 /* Small local sym cache. */
3151 struct sym_cache sym_cache;
3152
3153 /* For convenience in allocate_dynrelocs. */
3154 bfd * obfd;
3155
3156 /* The amount of space used by the reserved portion of the sgotplt
3157 section, plus whatever space is used by the jump slots. */
3158 bfd_vma sgotplt_jump_table_size;
3159
3160 /* The stub hash table. */
3161 struct bfd_hash_table stub_hash_table;
3162
3163 /* Linker stub bfd. */
3164 bfd *stub_bfd;
3165
3166 /* Linker call-backs. */
3167 asection * (*add_stub_section) (const char *, asection *, asection *,
3168 unsigned int);
3169 void (*layout_sections_again) (void);
3170
3171 /* Array to keep track of which stub sections have been created, and
3172 information on stub grouping. */
3173 struct map_stub *stub_group;
3174
3175 /* Number of elements in stub_group. */
3176 unsigned int top_id;
3177
3178 /* Assorted information used by elf32_arm_size_stubs. */
3179 unsigned int bfd_count;
3180 unsigned int top_index;
3181 asection **input_list;
3182 };
3183
3184 static inline int
3185 ctz (unsigned int mask)
3186 {
3187 #if GCC_VERSION >= 3004
3188 return __builtin_ctz (mask);
3189 #else
3190 unsigned int i;
3191
3192 for (i = 0; i < 8 * sizeof (mask); i++)
3193 {
3194 if (mask & 0x1)
3195 break;
3196 mask = (mask >> 1);
3197 }
3198 return i;
3199 #endif
3200 }
3201
3202 static inline int
3203 popcount (unsigned int mask)
3204 {
3205 #if GCC_VERSION >= 3004
3206 return __builtin_popcount (mask);
3207 #else
3208 unsigned int i, sum = 0;
3209
3210 for (i = 0; i < 8 * sizeof (mask); i++)
3211 {
3212 if (mask & 0x1)
3213 sum++;
3214 mask = (mask >> 1);
3215 }
3216 return sum;
3217 #endif
3218 }
3219
3220 /* Create an entry in an ARM ELF linker hash table. */
3221
3222 static struct bfd_hash_entry *
3223 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
3224 struct bfd_hash_table * table,
3225 const char * string)
3226 {
3227 struct elf32_arm_link_hash_entry * ret =
3228 (struct elf32_arm_link_hash_entry *) entry;
3229
3230 /* Allocate the structure if it has not already been allocated by a
3231 subclass. */
3232 if (ret == NULL)
3233 ret = (struct elf32_arm_link_hash_entry *)
3234 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
3235 if (ret == NULL)
3236 return (struct bfd_hash_entry *) ret;
3237
3238 /* Call the allocation method of the superclass. */
3239 ret = ((struct elf32_arm_link_hash_entry *)
3240 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
3241 table, string));
3242 if (ret != NULL)
3243 {
3244 ret->dyn_relocs = NULL;
3245 ret->tls_type = GOT_UNKNOWN;
3246 ret->tlsdesc_got = (bfd_vma) -1;
3247 ret->plt.thumb_refcount = 0;
3248 ret->plt.maybe_thumb_refcount = 0;
3249 ret->plt.noncall_refcount = 0;
3250 ret->plt.got_offset = -1;
3251 ret->is_iplt = FALSE;
3252 ret->export_glue = NULL;
3253
3254 ret->stub_cache = NULL;
3255 }
3256
3257 return (struct bfd_hash_entry *) ret;
3258 }
3259
3260 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3261 symbols. */
3262
3263 static bfd_boolean
3264 elf32_arm_allocate_local_sym_info (bfd *abfd)
3265 {
3266 if (elf_local_got_refcounts (abfd) == NULL)
3267 {
3268 bfd_size_type num_syms;
3269 bfd_size_type size;
3270 char *data;
3271
3272 num_syms = elf_tdata (abfd)->symtab_hdr.sh_info;
3273 size = num_syms * (sizeof (bfd_signed_vma)
3274 + sizeof (struct arm_local_iplt_info *)
3275 + sizeof (bfd_vma)
3276 + sizeof (char));
3277 data = bfd_zalloc (abfd, size);
3278 if (data == NULL)
3279 return FALSE;
3280
3281 elf_local_got_refcounts (abfd) = (bfd_signed_vma *) data;
3282 data += num_syms * sizeof (bfd_signed_vma);
3283
3284 elf32_arm_local_iplt (abfd) = (struct arm_local_iplt_info **) data;
3285 data += num_syms * sizeof (struct arm_local_iplt_info *);
3286
3287 elf32_arm_local_tlsdesc_gotent (abfd) = (bfd_vma *) data;
3288 data += num_syms * sizeof (bfd_vma);
3289
3290 elf32_arm_local_got_tls_type (abfd) = data;
3291 }
3292 return TRUE;
3293 }
3294
3295 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3296 to input bfd ABFD. Create the information if it doesn't already exist.
3297 Return null if an allocation fails. */
3298
3299 static struct arm_local_iplt_info *
3300 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
3301 {
3302 struct arm_local_iplt_info **ptr;
3303
3304 if (!elf32_arm_allocate_local_sym_info (abfd))
3305 return NULL;
3306
3307 BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info);
3308 ptr = &elf32_arm_local_iplt (abfd)[r_symndx];
3309 if (*ptr == NULL)
3310 *ptr = bfd_zalloc (abfd, sizeof (**ptr));
3311 return *ptr;
3312 }
3313
3314 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3315 in ABFD's symbol table. If the symbol is global, H points to its
3316 hash table entry, otherwise H is null.
3317
3318 Return true if the symbol does have PLT information. When returning
3319 true, point *ROOT_PLT at the target-independent reference count/offset
3320 union and *ARM_PLT at the ARM-specific information. */
3321
3322 static bfd_boolean
3323 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_entry *h,
3324 unsigned long r_symndx, union gotplt_union **root_plt,
3325 struct arm_plt_info **arm_plt)
3326 {
3327 struct arm_local_iplt_info *local_iplt;
3328
3329 if (h != NULL)
3330 {
3331 *root_plt = &h->root.plt;
3332 *arm_plt = &h->plt;
3333 return TRUE;
3334 }
3335
3336 if (elf32_arm_local_iplt (abfd) == NULL)
3337 return FALSE;
3338
3339 local_iplt = elf32_arm_local_iplt (abfd)[r_symndx];
3340 if (local_iplt == NULL)
3341 return FALSE;
3342
3343 *root_plt = &local_iplt->root;
3344 *arm_plt = &local_iplt->arm;
3345 return TRUE;
3346 }
3347
3348 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3349 before it. */
3350
3351 static bfd_boolean
3352 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info,
3353 struct arm_plt_info *arm_plt)
3354 {
3355 struct elf32_arm_link_hash_table *htab;
3356
3357 htab = elf32_arm_hash_table (info);
3358 return (arm_plt->thumb_refcount != 0
3359 || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0));
3360 }
3361
3362 /* Return a pointer to the head of the dynamic reloc list that should
3363 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3364 ABFD's symbol table. Return null if an error occurs. */
3365
3366 static struct elf_dyn_relocs **
3367 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx,
3368 Elf_Internal_Sym *isym)
3369 {
3370 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
3371 {
3372 struct arm_local_iplt_info *local_iplt;
3373
3374 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
3375 if (local_iplt == NULL)
3376 return NULL;
3377 return &local_iplt->dyn_relocs;
3378 }
3379 else
3380 {
3381 /* Track dynamic relocs needed for local syms too.
3382 We really need local syms available to do this
3383 easily. Oh well. */
3384 asection *s;
3385 void *vpp;
3386
3387 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3388 if (s == NULL)
3389 abort ();
3390
3391 vpp = &elf_section_data (s)->local_dynrel;
3392 return (struct elf_dyn_relocs **) vpp;
3393 }
3394 }
3395
3396 /* Initialize an entry in the stub hash table. */
3397
3398 static struct bfd_hash_entry *
3399 stub_hash_newfunc (struct bfd_hash_entry *entry,
3400 struct bfd_hash_table *table,
3401 const char *string)
3402 {
3403 /* Allocate the structure if it has not already been allocated by a
3404 subclass. */
3405 if (entry == NULL)
3406 {
3407 entry = (struct bfd_hash_entry *)
3408 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
3409 if (entry == NULL)
3410 return entry;
3411 }
3412
3413 /* Call the allocation method of the superclass. */
3414 entry = bfd_hash_newfunc (entry, table, string);
3415 if (entry != NULL)
3416 {
3417 struct elf32_arm_stub_hash_entry *eh;
3418
3419 /* Initialize the local fields. */
3420 eh = (struct elf32_arm_stub_hash_entry *) entry;
3421 eh->stub_sec = NULL;
3422 eh->stub_offset = 0;
3423 eh->source_value = 0;
3424 eh->target_value = 0;
3425 eh->target_section = NULL;
3426 eh->orig_insn = 0;
3427 eh->stub_type = arm_stub_none;
3428 eh->stub_size = 0;
3429 eh->stub_template = NULL;
3430 eh->stub_template_size = 0;
3431 eh->h = NULL;
3432 eh->id_sec = NULL;
3433 eh->output_name = NULL;
3434 }
3435
3436 return entry;
3437 }
3438
3439 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3440 shortcuts to them in our hash table. */
3441
3442 static bfd_boolean
3443 create_got_section (bfd *dynobj, struct bfd_link_info *info)
3444 {
3445 struct elf32_arm_link_hash_table *htab;
3446
3447 htab = elf32_arm_hash_table (info);
3448 if (htab == NULL)
3449 return FALSE;
3450
3451 /* BPABI objects never have a GOT, or associated sections. */
3452 if (htab->symbian_p)
3453 return TRUE;
3454
3455 if (! _bfd_elf_create_got_section (dynobj, info))
3456 return FALSE;
3457
3458 return TRUE;
3459 }
3460
3461 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3462
3463 static bfd_boolean
3464 create_ifunc_sections (struct bfd_link_info *info)
3465 {
3466 struct elf32_arm_link_hash_table *htab;
3467 const struct elf_backend_data *bed;
3468 bfd *dynobj;
3469 asection *s;
3470 flagword flags;
3471
3472 htab = elf32_arm_hash_table (info);
3473 dynobj = htab->root.dynobj;
3474 bed = get_elf_backend_data (dynobj);
3475 flags = bed->dynamic_sec_flags;
3476
3477 if (htab->root.iplt == NULL)
3478 {
3479 s = bfd_make_section_anyway_with_flags (dynobj, ".iplt",
3480 flags | SEC_READONLY | SEC_CODE);
3481 if (s == NULL
3482 || !bfd_set_section_alignment (dynobj, s, bed->plt_alignment))
3483 return FALSE;
3484 htab->root.iplt = s;
3485 }
3486
3487 if (htab->root.irelplt == NULL)
3488 {
3489 s = bfd_make_section_anyway_with_flags (dynobj,
3490 RELOC_SECTION (htab, ".iplt"),
3491 flags | SEC_READONLY);
3492 if (s == NULL
3493 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3494 return FALSE;
3495 htab->root.irelplt = s;
3496 }
3497
3498 if (htab->root.igotplt == NULL)
3499 {
3500 s = bfd_make_section_anyway_with_flags (dynobj, ".igot.plt", flags);
3501 if (s == NULL
3502 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3503 return FALSE;
3504 htab->root.igotplt = s;
3505 }
3506 return TRUE;
3507 }
3508
3509 /* Determine if we're dealing with a Thumb only architecture. */
3510
3511 static bfd_boolean
3512 using_thumb_only (struct elf32_arm_link_hash_table *globals)
3513 {
3514 int arch;
3515 int profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3516 Tag_CPU_arch_profile);
3517
3518 if (profile)
3519 return profile == 'M';
3520
3521 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3522
3523 if (arch == TAG_CPU_ARCH_V6_M
3524 || arch == TAG_CPU_ARCH_V6S_M
3525 || arch == TAG_CPU_ARCH_V7E_M
3526 || arch == TAG_CPU_ARCH_V8M_BASE
3527 || arch == TAG_CPU_ARCH_V8M_MAIN)
3528 return TRUE;
3529
3530 return FALSE;
3531 }
3532
3533 /* Determine if we're dealing with a Thumb-2 object. */
3534
3535 static bfd_boolean
3536 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3537 {
3538 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3539 Tag_CPU_arch);
3540 return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7;
3541 }
3542
3543 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3544 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3545 hash table. */
3546
3547 static bfd_boolean
3548 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
3549 {
3550 struct elf32_arm_link_hash_table *htab;
3551
3552 htab = elf32_arm_hash_table (info);
3553 if (htab == NULL)
3554 return FALSE;
3555
3556 if (!htab->root.sgot && !create_got_section (dynobj, info))
3557 return FALSE;
3558
3559 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3560 return FALSE;
3561
3562 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
3563 if (!bfd_link_pic (info))
3564 htab->srelbss = bfd_get_linker_section (dynobj,
3565 RELOC_SECTION (htab, ".bss"));
3566
3567 if (htab->vxworks_p)
3568 {
3569 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
3570 return FALSE;
3571
3572 if (bfd_link_pic (info))
3573 {
3574 htab->plt_header_size = 0;
3575 htab->plt_entry_size
3576 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
3577 }
3578 else
3579 {
3580 htab->plt_header_size
3581 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
3582 htab->plt_entry_size
3583 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
3584 }
3585
3586 if (elf_elfheader (dynobj))
3587 elf_elfheader (dynobj)->e_ident[EI_CLASS] = ELFCLASS32;
3588 }
3589 else
3590 {
3591 /* PR ld/16017
3592 Test for thumb only architectures. Note - we cannot just call
3593 using_thumb_only() as the attributes in the output bfd have not been
3594 initialised at this point, so instead we use the input bfd. */
3595 bfd * saved_obfd = htab->obfd;
3596
3597 htab->obfd = dynobj;
3598 if (using_thumb_only (htab))
3599 {
3600 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
3601 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
3602 }
3603 htab->obfd = saved_obfd;
3604 }
3605
3606 if (!htab->root.splt
3607 || !htab->root.srelplt
3608 || !htab->sdynbss
3609 || (!bfd_link_pic (info) && !htab->srelbss))
3610 abort ();
3611
3612 return TRUE;
3613 }
3614
3615 /* Copy the extra info we tack onto an elf_link_hash_entry. */
3616
3617 static void
3618 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
3619 struct elf_link_hash_entry *dir,
3620 struct elf_link_hash_entry *ind)
3621 {
3622 struct elf32_arm_link_hash_entry *edir, *eind;
3623
3624 edir = (struct elf32_arm_link_hash_entry *) dir;
3625 eind = (struct elf32_arm_link_hash_entry *) ind;
3626
3627 if (eind->dyn_relocs != NULL)
3628 {
3629 if (edir->dyn_relocs != NULL)
3630 {
3631 struct elf_dyn_relocs **pp;
3632 struct elf_dyn_relocs *p;
3633
3634 /* Add reloc counts against the indirect sym to the direct sym
3635 list. Merge any entries against the same section. */
3636 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
3637 {
3638 struct elf_dyn_relocs *q;
3639
3640 for (q = edir->dyn_relocs; q != NULL; q = q->next)
3641 if (q->sec == p->sec)
3642 {
3643 q->pc_count += p->pc_count;
3644 q->count += p->count;
3645 *pp = p->next;
3646 break;
3647 }
3648 if (q == NULL)
3649 pp = &p->next;
3650 }
3651 *pp = edir->dyn_relocs;
3652 }
3653
3654 edir->dyn_relocs = eind->dyn_relocs;
3655 eind->dyn_relocs = NULL;
3656 }
3657
3658 if (ind->root.type == bfd_link_hash_indirect)
3659 {
3660 /* Copy over PLT info. */
3661 edir->plt.thumb_refcount += eind->plt.thumb_refcount;
3662 eind->plt.thumb_refcount = 0;
3663 edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount;
3664 eind->plt.maybe_thumb_refcount = 0;
3665 edir->plt.noncall_refcount += eind->plt.noncall_refcount;
3666 eind->plt.noncall_refcount = 0;
3667
3668 /* We should only allocate a function to .iplt once the final
3669 symbol information is known. */
3670 BFD_ASSERT (!eind->is_iplt);
3671
3672 if (dir->got.refcount <= 0)
3673 {
3674 edir->tls_type = eind->tls_type;
3675 eind->tls_type = GOT_UNKNOWN;
3676 }
3677 }
3678
3679 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
3680 }
3681
3682 /* Destroy an ARM elf linker hash table. */
3683
3684 static void
3685 elf32_arm_link_hash_table_free (bfd *obfd)
3686 {
3687 struct elf32_arm_link_hash_table *ret
3688 = (struct elf32_arm_link_hash_table *) obfd->link.hash;
3689
3690 bfd_hash_table_free (&ret->stub_hash_table);
3691 _bfd_elf_link_hash_table_free (obfd);
3692 }
3693
3694 /* Create an ARM elf linker hash table. */
3695
3696 static struct bfd_link_hash_table *
3697 elf32_arm_link_hash_table_create (bfd *abfd)
3698 {
3699 struct elf32_arm_link_hash_table *ret;
3700 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
3701
3702 ret = (struct elf32_arm_link_hash_table *) bfd_zmalloc (amt);
3703 if (ret == NULL)
3704 return NULL;
3705
3706 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
3707 elf32_arm_link_hash_newfunc,
3708 sizeof (struct elf32_arm_link_hash_entry),
3709 ARM_ELF_DATA))
3710 {
3711 free (ret);
3712 return NULL;
3713 }
3714
3715 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
3716 ret->stm32l4xx_fix = BFD_ARM_STM32L4XX_FIX_NONE;
3717 #ifdef FOUR_WORD_PLT
3718 ret->plt_header_size = 16;
3719 ret->plt_entry_size = 16;
3720 #else
3721 ret->plt_header_size = 20;
3722 ret->plt_entry_size = elf32_arm_use_long_plt_entry ? 16 : 12;
3723 #endif
3724 ret->use_rel = 1;
3725 ret->obfd = abfd;
3726
3727 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
3728 sizeof (struct elf32_arm_stub_hash_entry)))
3729 {
3730 _bfd_elf_link_hash_table_free (abfd);
3731 return NULL;
3732 }
3733 ret->root.root.hash_table_free = elf32_arm_link_hash_table_free;
3734
3735 return &ret->root.root;
3736 }
3737
3738 /* Determine what kind of NOPs are available. */
3739
3740 static bfd_boolean
3741 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
3742 {
3743 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3744 Tag_CPU_arch);
3745 return arch == TAG_CPU_ARCH_V6T2
3746 || arch == TAG_CPU_ARCH_V6K
3747 || arch == TAG_CPU_ARCH_V7
3748 || arch == TAG_CPU_ARCH_V7E_M;
3749 }
3750
3751 static bfd_boolean
3752 arch_has_thumb2_nop (struct elf32_arm_link_hash_table *globals)
3753 {
3754 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3755 Tag_CPU_arch);
3756 return (arch == TAG_CPU_ARCH_V6T2 || arch == TAG_CPU_ARCH_V7
3757 || arch == TAG_CPU_ARCH_V7E_M);
3758 }
3759
3760 static bfd_boolean
3761 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
3762 {
3763 switch (stub_type)
3764 {
3765 case arm_stub_long_branch_thumb_only:
3766 case arm_stub_long_branch_v4t_thumb_arm:
3767 case arm_stub_short_branch_v4t_thumb_arm:
3768 case arm_stub_long_branch_v4t_thumb_arm_pic:
3769 case arm_stub_long_branch_v4t_thumb_tls_pic:
3770 case arm_stub_long_branch_thumb_only_pic:
3771 return TRUE;
3772 case arm_stub_none:
3773 BFD_FAIL ();
3774 return FALSE;
3775 break;
3776 default:
3777 return FALSE;
3778 }
3779 }
3780
3781 /* Determine the type of stub needed, if any, for a call. */
3782
3783 static enum elf32_arm_stub_type
3784 arm_type_of_stub (struct bfd_link_info *info,
3785 asection *input_sec,
3786 const Elf_Internal_Rela *rel,
3787 unsigned char st_type,
3788 enum arm_st_branch_type *actual_branch_type,
3789 struct elf32_arm_link_hash_entry *hash,
3790 bfd_vma destination,
3791 asection *sym_sec,
3792 bfd *input_bfd,
3793 const char *name)
3794 {
3795 bfd_vma location;
3796 bfd_signed_vma branch_offset;
3797 unsigned int r_type;
3798 struct elf32_arm_link_hash_table * globals;
3799 int thumb2;
3800 int thumb_only;
3801 enum elf32_arm_stub_type stub_type = arm_stub_none;
3802 int use_plt = 0;
3803 enum arm_st_branch_type branch_type = *actual_branch_type;
3804 union gotplt_union *root_plt;
3805 struct arm_plt_info *arm_plt;
3806
3807 if (branch_type == ST_BRANCH_LONG)
3808 return stub_type;
3809
3810 globals = elf32_arm_hash_table (info);
3811 if (globals == NULL)
3812 return stub_type;
3813
3814 thumb_only = using_thumb_only (globals);
3815
3816 thumb2 = using_thumb2 (globals);
3817
3818 /* Determine where the call point is. */
3819 location = (input_sec->output_offset
3820 + input_sec->output_section->vma
3821 + rel->r_offset);
3822
3823 r_type = ELF32_R_TYPE (rel->r_info);
3824
3825 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
3826 are considering a function call relocation. */
3827 if (thumb_only && (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
3828 || r_type == R_ARM_THM_JUMP19)
3829 && branch_type == ST_BRANCH_TO_ARM)
3830 branch_type = ST_BRANCH_TO_THUMB;
3831
3832 /* For TLS call relocs, it is the caller's responsibility to provide
3833 the address of the appropriate trampoline. */
3834 if (r_type != R_ARM_TLS_CALL
3835 && r_type != R_ARM_THM_TLS_CALL
3836 && elf32_arm_get_plt_info (input_bfd, hash, ELF32_R_SYM (rel->r_info),
3837 &root_plt, &arm_plt)
3838 && root_plt->offset != (bfd_vma) -1)
3839 {
3840 asection *splt;
3841
3842 if (hash == NULL || hash->is_iplt)
3843 splt = globals->root.iplt;
3844 else
3845 splt = globals->root.splt;
3846 if (splt != NULL)
3847 {
3848 use_plt = 1;
3849
3850 /* Note when dealing with PLT entries: the main PLT stub is in
3851 ARM mode, so if the branch is in Thumb mode, another
3852 Thumb->ARM stub will be inserted later just before the ARM
3853 PLT stub. We don't take this extra distance into account
3854 here, because if a long branch stub is needed, we'll add a
3855 Thumb->Arm one and branch directly to the ARM PLT entry
3856 because it avoids spreading offset corrections in several
3857 places. */
3858
3859 destination = (splt->output_section->vma
3860 + splt->output_offset
3861 + root_plt->offset);
3862 st_type = STT_FUNC;
3863 branch_type = ST_BRANCH_TO_ARM;
3864 }
3865 }
3866 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
3867 BFD_ASSERT (st_type != STT_GNU_IFUNC);
3868
3869 branch_offset = (bfd_signed_vma)(destination - location);
3870
3871 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
3872 || r_type == R_ARM_THM_TLS_CALL || r_type == R_ARM_THM_JUMP19)
3873 {
3874 /* Handle cases where:
3875 - this call goes too far (different Thumb/Thumb2 max
3876 distance)
3877 - it's a Thumb->Arm call and blx is not available, or it's a
3878 Thumb->Arm branch (not bl). A stub is needed in this case,
3879 but only if this call is not through a PLT entry. Indeed,
3880 PLT stubs handle mode switching already.
3881 */
3882 if ((!thumb2
3883 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3884 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3885 || (thumb2
3886 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3887 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3888 || (thumb2
3889 && (branch_offset > THM2_MAX_FWD_COND_BRANCH_OFFSET
3890 || (branch_offset < THM2_MAX_BWD_COND_BRANCH_OFFSET))
3891 && (r_type == R_ARM_THM_JUMP19))
3892 || (branch_type == ST_BRANCH_TO_ARM
3893 && (((r_type == R_ARM_THM_CALL
3894 || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
3895 || (r_type == R_ARM_THM_JUMP24)
3896 || (r_type == R_ARM_THM_JUMP19))
3897 && !use_plt))
3898 {
3899 if (branch_type == ST_BRANCH_TO_THUMB)
3900 {
3901 /* Thumb to thumb. */
3902 if (!thumb_only)
3903 {
3904 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
3905 /* PIC stubs. */
3906 ? ((globals->use_blx
3907 && (r_type == R_ARM_THM_CALL))
3908 /* V5T and above. Stub starts with ARM code, so
3909 we must be able to switch mode before
3910 reaching it, which is only possible for 'bl'
3911 (ie R_ARM_THM_CALL relocation). */
3912 ? arm_stub_long_branch_any_thumb_pic
3913 /* On V4T, use Thumb code only. */
3914 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3915
3916 /* non-PIC stubs. */
3917 : ((globals->use_blx
3918 && (r_type == R_ARM_THM_CALL))
3919 /* V5T and above. */
3920 ? arm_stub_long_branch_any_any
3921 /* V4T. */
3922 : arm_stub_long_branch_v4t_thumb_thumb);
3923 }
3924 else
3925 {
3926 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
3927 /* PIC stub. */
3928 ? arm_stub_long_branch_thumb_only_pic
3929 /* non-PIC stub. */
3930 : arm_stub_long_branch_thumb_only;
3931 }
3932 }
3933 else
3934 {
3935 /* Thumb to arm. */
3936 if (sym_sec != NULL
3937 && sym_sec->owner != NULL
3938 && !INTERWORK_FLAG (sym_sec->owner))
3939 {
3940 (*_bfd_error_handler)
3941 (_("%B(%s): warning: interworking not enabled.\n"
3942 " first occurrence: %B: Thumb call to ARM"),
3943 sym_sec->owner, input_bfd, name);
3944 }
3945
3946 stub_type =
3947 (bfd_link_pic (info) | globals->pic_veneer)
3948 /* PIC stubs. */
3949 ? (r_type == R_ARM_THM_TLS_CALL
3950 /* TLS PIC stubs. */
3951 ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
3952 : arm_stub_long_branch_v4t_thumb_tls_pic)
3953 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
3954 /* V5T PIC and above. */
3955 ? arm_stub_long_branch_any_arm_pic
3956 /* V4T PIC stub. */
3957 : arm_stub_long_branch_v4t_thumb_arm_pic))
3958
3959 /* non-PIC stubs. */
3960 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
3961 /* V5T and above. */
3962 ? arm_stub_long_branch_any_any
3963 /* V4T. */
3964 : arm_stub_long_branch_v4t_thumb_arm);
3965
3966 /* Handle v4t short branches. */
3967 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3968 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3969 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3970 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3971 }
3972 }
3973 }
3974 else if (r_type == R_ARM_CALL
3975 || r_type == R_ARM_JUMP24
3976 || r_type == R_ARM_PLT32
3977 || r_type == R_ARM_TLS_CALL)
3978 {
3979 if (branch_type == ST_BRANCH_TO_THUMB)
3980 {
3981 /* Arm to thumb. */
3982
3983 if (sym_sec != NULL
3984 && sym_sec->owner != NULL
3985 && !INTERWORK_FLAG (sym_sec->owner))
3986 {
3987 (*_bfd_error_handler)
3988 (_("%B(%s): warning: interworking not enabled.\n"
3989 " first occurrence: %B: ARM call to Thumb"),
3990 sym_sec->owner, input_bfd, name);
3991 }
3992
3993 /* We have an extra 2-bytes reach because of
3994 the mode change (bit 24 (H) of BLX encoding). */
3995 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
3996 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
3997 || (r_type == R_ARM_CALL && !globals->use_blx)
3998 || (r_type == R_ARM_JUMP24)
3999 || (r_type == R_ARM_PLT32))
4000 {
4001 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4002 /* PIC stubs. */
4003 ? ((globals->use_blx)
4004 /* V5T and above. */
4005 ? arm_stub_long_branch_any_thumb_pic
4006 /* V4T stub. */
4007 : arm_stub_long_branch_v4t_arm_thumb_pic)
4008
4009 /* non-PIC stubs. */
4010 : ((globals->use_blx)
4011 /* V5T and above. */
4012 ? arm_stub_long_branch_any_any
4013 /* V4T. */
4014 : arm_stub_long_branch_v4t_arm_thumb);
4015 }
4016 }
4017 else
4018 {
4019 /* Arm to arm. */
4020 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
4021 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
4022 {
4023 stub_type =
4024 (bfd_link_pic (info) | globals->pic_veneer)
4025 /* PIC stubs. */
4026 ? (r_type == R_ARM_TLS_CALL
4027 /* TLS PIC Stub. */
4028 ? arm_stub_long_branch_any_tls_pic
4029 : (globals->nacl_p
4030 ? arm_stub_long_branch_arm_nacl_pic
4031 : arm_stub_long_branch_any_arm_pic))
4032 /* non-PIC stubs. */
4033 : (globals->nacl_p
4034 ? arm_stub_long_branch_arm_nacl
4035 : arm_stub_long_branch_any_any);
4036 }
4037 }
4038 }
4039
4040 /* If a stub is needed, record the actual destination type. */
4041 if (stub_type != arm_stub_none)
4042 *actual_branch_type = branch_type;
4043
4044 return stub_type;
4045 }
4046
4047 /* Build a name for an entry in the stub hash table. */
4048
4049 static char *
4050 elf32_arm_stub_name (const asection *input_section,
4051 const asection *sym_sec,
4052 const struct elf32_arm_link_hash_entry *hash,
4053 const Elf_Internal_Rela *rel,
4054 enum elf32_arm_stub_type stub_type)
4055 {
4056 char *stub_name;
4057 bfd_size_type len;
4058
4059 if (hash)
4060 {
4061 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
4062 stub_name = (char *) bfd_malloc (len);
4063 if (stub_name != NULL)
4064 sprintf (stub_name, "%08x_%s+%x_%d",
4065 input_section->id & 0xffffffff,
4066 hash->root.root.root.string,
4067 (int) rel->r_addend & 0xffffffff,
4068 (int) stub_type);
4069 }
4070 else
4071 {
4072 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
4073 stub_name = (char *) bfd_malloc (len);
4074 if (stub_name != NULL)
4075 sprintf (stub_name, "%08x_%x:%x+%x_%d",
4076 input_section->id & 0xffffffff,
4077 sym_sec->id & 0xffffffff,
4078 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
4079 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL
4080 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
4081 (int) rel->r_addend & 0xffffffff,
4082 (int) stub_type);
4083 }
4084
4085 return stub_name;
4086 }
4087
4088 /* Look up an entry in the stub hash. Stub entries are cached because
4089 creating the stub name takes a bit of time. */
4090
4091 static struct elf32_arm_stub_hash_entry *
4092 elf32_arm_get_stub_entry (const asection *input_section,
4093 const asection *sym_sec,
4094 struct elf_link_hash_entry *hash,
4095 const Elf_Internal_Rela *rel,
4096 struct elf32_arm_link_hash_table *htab,
4097 enum elf32_arm_stub_type stub_type)
4098 {
4099 struct elf32_arm_stub_hash_entry *stub_entry;
4100 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
4101 const asection *id_sec;
4102
4103 if ((input_section->flags & SEC_CODE) == 0)
4104 return NULL;
4105
4106 /* If this input section is part of a group of sections sharing one
4107 stub section, then use the id of the first section in the group.
4108 Stub names need to include a section id, as there may well be
4109 more than one stub used to reach say, printf, and we need to
4110 distinguish between them. */
4111 id_sec = htab->stub_group[input_section->id].link_sec;
4112
4113 if (h != NULL && h->stub_cache != NULL
4114 && h->stub_cache->h == h
4115 && h->stub_cache->id_sec == id_sec
4116 && h->stub_cache->stub_type == stub_type)
4117 {
4118 stub_entry = h->stub_cache;
4119 }
4120 else
4121 {
4122 char *stub_name;
4123
4124 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
4125 if (stub_name == NULL)
4126 return NULL;
4127
4128 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
4129 stub_name, FALSE, FALSE);
4130 if (h != NULL)
4131 h->stub_cache = stub_entry;
4132
4133 free (stub_name);
4134 }
4135
4136 return stub_entry;
4137 }
4138
4139 /* Whether veneers of type STUB_TYPE require to be in a dedicated output
4140 section. */
4141
4142 static bfd_boolean
4143 arm_dedicated_stub_output_section_required (enum elf32_arm_stub_type stub_type)
4144 {
4145 if (stub_type >= max_stub_type)
4146 abort (); /* Should be unreachable. */
4147
4148 return FALSE;
4149 }
4150
4151 /* Required alignment (as a power of 2) for the dedicated section holding
4152 veneers of type STUB_TYPE, or 0 if veneers of this type are interspersed
4153 with input sections. */
4154
4155 static int
4156 arm_dedicated_stub_output_section_required_alignment
4157 (enum elf32_arm_stub_type stub_type)
4158 {
4159 if (stub_type >= max_stub_type)
4160 abort (); /* Should be unreachable. */
4161
4162 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4163 return 0;
4164 }
4165
4166 /* Name of the dedicated output section to put veneers of type STUB_TYPE, or
4167 NULL if veneers of this type are interspersed with input sections. */
4168
4169 static const char *
4170 arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type)
4171 {
4172 if (stub_type >= max_stub_type)
4173 abort (); /* Should be unreachable. */
4174
4175 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4176 return NULL;
4177 }
4178
4179 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4180 returns the address of the hash table field in HTAB holding a pointer to the
4181 corresponding input section. Otherwise, returns NULL. */
4182
4183 static asection **
4184 arm_dedicated_stub_input_section_ptr
4185 (struct elf32_arm_link_hash_table *htab ATTRIBUTE_UNUSED,
4186 enum elf32_arm_stub_type stub_type)
4187 {
4188 if (stub_type >= max_stub_type)
4189 abort (); /* Should be unreachable. */
4190
4191 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4192 return NULL;
4193 }
4194
4195 /* Find or create a stub section to contain a stub of type STUB_TYPE. SECTION
4196 is the section that branch into veneer and can be NULL if stub should go in
4197 a dedicated output section. Returns a pointer to the stub section, and the
4198 section to which the stub section will be attached (in *LINK_SEC_P).
4199 LINK_SEC_P may be NULL. */
4200
4201 static asection *
4202 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
4203 struct elf32_arm_link_hash_table *htab,
4204 enum elf32_arm_stub_type stub_type)
4205 {
4206 asection *link_sec, *out_sec, **stub_sec_p;
4207 const char *stub_sec_prefix;
4208 bfd_boolean dedicated_output_section =
4209 arm_dedicated_stub_output_section_required (stub_type);
4210 int align;
4211
4212 if (dedicated_output_section)
4213 {
4214 bfd *output_bfd = htab->obfd;
4215 const char *out_sec_name =
4216 arm_dedicated_stub_output_section_name (stub_type);
4217 link_sec = NULL;
4218 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
4219 stub_sec_prefix = out_sec_name;
4220 align = arm_dedicated_stub_output_section_required_alignment (stub_type);
4221 out_sec = bfd_get_section_by_name (output_bfd, out_sec_name);
4222 if (out_sec == NULL)
4223 {
4224 (*_bfd_error_handler) (_("No address assigned to the veneers output "
4225 "section %s"), out_sec_name);
4226 return NULL;
4227 }
4228 }
4229 else
4230 {
4231 link_sec = htab->stub_group[section->id].link_sec;
4232 BFD_ASSERT (link_sec != NULL);
4233 stub_sec_p = &htab->stub_group[section->id].stub_sec;
4234 if (*stub_sec_p == NULL)
4235 stub_sec_p = &htab->stub_group[link_sec->id].stub_sec;
4236 stub_sec_prefix = link_sec->name;
4237 out_sec = link_sec->output_section;
4238 align = htab->nacl_p ? 4 : 3;
4239 }
4240
4241 if (*stub_sec_p == NULL)
4242 {
4243 size_t namelen;
4244 bfd_size_type len;
4245 char *s_name;
4246
4247 namelen = strlen (stub_sec_prefix);
4248 len = namelen + sizeof (STUB_SUFFIX);
4249 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
4250 if (s_name == NULL)
4251 return NULL;
4252
4253 memcpy (s_name, stub_sec_prefix, namelen);
4254 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
4255 *stub_sec_p = (*htab->add_stub_section) (s_name, out_sec, link_sec,
4256 align);
4257 if (*stub_sec_p == NULL)
4258 return NULL;
4259
4260 out_sec->flags |= SEC_ALLOC | SEC_LOAD | SEC_READONLY | SEC_CODE
4261 | SEC_HAS_CONTENTS | SEC_RELOC | SEC_IN_MEMORY
4262 | SEC_KEEP;
4263 }
4264
4265 if (!dedicated_output_section)
4266 htab->stub_group[section->id].stub_sec = *stub_sec_p;
4267
4268 if (link_sec_p)
4269 *link_sec_p = link_sec;
4270
4271 return *stub_sec_p;
4272 }
4273
4274 /* Add a new stub entry to the stub hash. Not all fields of the new
4275 stub entry are initialised. */
4276
4277 static struct elf32_arm_stub_hash_entry *
4278 elf32_arm_add_stub (const char *stub_name, asection *section,
4279 struct elf32_arm_link_hash_table *htab,
4280 enum elf32_arm_stub_type stub_type)
4281 {
4282 asection *link_sec;
4283 asection *stub_sec;
4284 struct elf32_arm_stub_hash_entry *stub_entry;
4285
4286 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab,
4287 stub_type);
4288 if (stub_sec == NULL)
4289 return NULL;
4290
4291 /* Enter this entry into the linker stub hash table. */
4292 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4293 TRUE, FALSE);
4294 if (stub_entry == NULL)
4295 {
4296 if (section == NULL)
4297 section = stub_sec;
4298 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
4299 section->owner,
4300 stub_name);
4301 return NULL;
4302 }
4303
4304 stub_entry->stub_sec = stub_sec;
4305 stub_entry->stub_offset = 0;
4306 stub_entry->id_sec = link_sec;
4307
4308 return stub_entry;
4309 }
4310
4311 /* Store an Arm insn into an output section not processed by
4312 elf32_arm_write_section. */
4313
4314 static void
4315 put_arm_insn (struct elf32_arm_link_hash_table * htab,
4316 bfd * output_bfd, bfd_vma val, void * ptr)
4317 {
4318 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4319 bfd_putl32 (val, ptr);
4320 else
4321 bfd_putb32 (val, ptr);
4322 }
4323
4324 /* Store a 16-bit Thumb insn into an output section not processed by
4325 elf32_arm_write_section. */
4326
4327 static void
4328 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
4329 bfd * output_bfd, bfd_vma val, void * ptr)
4330 {
4331 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4332 bfd_putl16 (val, ptr);
4333 else
4334 bfd_putb16 (val, ptr);
4335 }
4336
4337 /* Store a Thumb2 insn into an output section not processed by
4338 elf32_arm_write_section. */
4339
4340 static void
4341 put_thumb2_insn (struct elf32_arm_link_hash_table * htab,
4342 bfd * output_bfd, bfd_vma val, bfd_byte * ptr)
4343 {
4344 /* T2 instructions are 16-bit streamed. */
4345 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4346 {
4347 bfd_putl16 ((val >> 16) & 0xffff, ptr);
4348 bfd_putl16 ((val & 0xffff), ptr + 2);
4349 }
4350 else
4351 {
4352 bfd_putb16 ((val >> 16) & 0xffff, ptr);
4353 bfd_putb16 ((val & 0xffff), ptr + 2);
4354 }
4355 }
4356
4357 /* If it's possible to change R_TYPE to a more efficient access
4358 model, return the new reloc type. */
4359
4360 static unsigned
4361 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
4362 struct elf_link_hash_entry *h)
4363 {
4364 int is_local = (h == NULL);
4365
4366 if (bfd_link_pic (info)
4367 || (h && h->root.type == bfd_link_hash_undefweak))
4368 return r_type;
4369
4370 /* We do not support relaxations for Old TLS models. */
4371 switch (r_type)
4372 {
4373 case R_ARM_TLS_GOTDESC:
4374 case R_ARM_TLS_CALL:
4375 case R_ARM_THM_TLS_CALL:
4376 case R_ARM_TLS_DESCSEQ:
4377 case R_ARM_THM_TLS_DESCSEQ:
4378 return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32;
4379 }
4380
4381 return r_type;
4382 }
4383
4384 static bfd_reloc_status_type elf32_arm_final_link_relocate
4385 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
4386 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
4387 const char *, unsigned char, enum arm_st_branch_type,
4388 struct elf_link_hash_entry *, bfd_boolean *, char **);
4389
4390 static unsigned int
4391 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
4392 {
4393 switch (stub_type)
4394 {
4395 case arm_stub_a8_veneer_b_cond:
4396 case arm_stub_a8_veneer_b:
4397 case arm_stub_a8_veneer_bl:
4398 return 2;
4399
4400 case arm_stub_long_branch_any_any:
4401 case arm_stub_long_branch_v4t_arm_thumb:
4402 case arm_stub_long_branch_thumb_only:
4403 case arm_stub_long_branch_v4t_thumb_thumb:
4404 case arm_stub_long_branch_v4t_thumb_arm:
4405 case arm_stub_short_branch_v4t_thumb_arm:
4406 case arm_stub_long_branch_any_arm_pic:
4407 case arm_stub_long_branch_any_thumb_pic:
4408 case arm_stub_long_branch_v4t_thumb_thumb_pic:
4409 case arm_stub_long_branch_v4t_arm_thumb_pic:
4410 case arm_stub_long_branch_v4t_thumb_arm_pic:
4411 case arm_stub_long_branch_thumb_only_pic:
4412 case arm_stub_long_branch_any_tls_pic:
4413 case arm_stub_long_branch_v4t_thumb_tls_pic:
4414 case arm_stub_a8_veneer_blx:
4415 return 4;
4416
4417 case arm_stub_long_branch_arm_nacl:
4418 case arm_stub_long_branch_arm_nacl_pic:
4419 return 16;
4420
4421 default:
4422 abort (); /* Should be unreachable. */
4423 }
4424 }
4425
4426 /* Returns whether stubs of type STUB_TYPE take over the symbol they are
4427 veneering (TRUE) or have their own symbol (FALSE). */
4428
4429 static bfd_boolean
4430 arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type)
4431 {
4432 if (stub_type >= max_stub_type)
4433 abort (); /* Should be unreachable. */
4434
4435 return FALSE;
4436 }
4437
4438 static bfd_boolean
4439 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
4440 void * in_arg)
4441 {
4442 #define MAXRELOCS 3
4443 struct elf32_arm_stub_hash_entry *stub_entry;
4444 struct elf32_arm_link_hash_table *globals;
4445 struct bfd_link_info *info;
4446 asection *stub_sec;
4447 bfd *stub_bfd;
4448 bfd_byte *loc;
4449 bfd_vma sym_value;
4450 int template_size;
4451 int size;
4452 const insn_sequence *template_sequence;
4453 int i;
4454 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
4455 int stub_reloc_offset[MAXRELOCS] = {0, 0};
4456 int nrelocs = 0;
4457
4458 /* Massage our args to the form they really have. */
4459 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4460 info = (struct bfd_link_info *) in_arg;
4461
4462 globals = elf32_arm_hash_table (info);
4463 if (globals == NULL)
4464 return FALSE;
4465
4466 stub_sec = stub_entry->stub_sec;
4467
4468 if ((globals->fix_cortex_a8 < 0)
4469 != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
4470 /* We have to do less-strictly-aligned fixes last. */
4471 return TRUE;
4472
4473 /* Make a note of the offset within the stubs for this entry. */
4474 stub_entry->stub_offset = stub_sec->size;
4475 loc = stub_sec->contents + stub_entry->stub_offset;
4476
4477 stub_bfd = stub_sec->owner;
4478
4479 /* This is the address of the stub destination. */
4480 sym_value = (stub_entry->target_value
4481 + stub_entry->target_section->output_offset
4482 + stub_entry->target_section->output_section->vma);
4483
4484 template_sequence = stub_entry->stub_template;
4485 template_size = stub_entry->stub_template_size;
4486
4487 size = 0;
4488 for (i = 0; i < template_size; i++)
4489 {
4490 switch (template_sequence[i].type)
4491 {
4492 case THUMB16_TYPE:
4493 {
4494 bfd_vma data = (bfd_vma) template_sequence[i].data;
4495 if (template_sequence[i].reloc_addend != 0)
4496 {
4497 /* We've borrowed the reloc_addend field to mean we should
4498 insert a condition code into this (Thumb-1 branch)
4499 instruction. See THUMB16_BCOND_INSN. */
4500 BFD_ASSERT ((data & 0xff00) == 0xd000);
4501 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
4502 }
4503 bfd_put_16 (stub_bfd, data, loc + size);
4504 size += 2;
4505 }
4506 break;
4507
4508 case THUMB32_TYPE:
4509 bfd_put_16 (stub_bfd,
4510 (template_sequence[i].data >> 16) & 0xffff,
4511 loc + size);
4512 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
4513 loc + size + 2);
4514 if (template_sequence[i].r_type != R_ARM_NONE)
4515 {
4516 stub_reloc_idx[nrelocs] = i;
4517 stub_reloc_offset[nrelocs++] = size;
4518 }
4519 size += 4;
4520 break;
4521
4522 case ARM_TYPE:
4523 bfd_put_32 (stub_bfd, template_sequence[i].data,
4524 loc + size);
4525 /* Handle cases where the target is encoded within the
4526 instruction. */
4527 if (template_sequence[i].r_type == R_ARM_JUMP24)
4528 {
4529 stub_reloc_idx[nrelocs] = i;
4530 stub_reloc_offset[nrelocs++] = size;
4531 }
4532 size += 4;
4533 break;
4534
4535 case DATA_TYPE:
4536 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
4537 stub_reloc_idx[nrelocs] = i;
4538 stub_reloc_offset[nrelocs++] = size;
4539 size += 4;
4540 break;
4541
4542 default:
4543 BFD_FAIL ();
4544 return FALSE;
4545 }
4546 }
4547
4548 stub_sec->size += size;
4549
4550 /* Stub size has already been computed in arm_size_one_stub. Check
4551 consistency. */
4552 BFD_ASSERT (size == stub_entry->stub_size);
4553
4554 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
4555 if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
4556 sym_value |= 1;
4557
4558 /* Assume there is at least one and at most MAXRELOCS entries to relocate
4559 in each stub. */
4560 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
4561
4562 for (i = 0; i < nrelocs; i++)
4563 {
4564 Elf_Internal_Rela rel;
4565 bfd_boolean unresolved_reloc;
4566 char *error_message;
4567 bfd_vma points_to =
4568 sym_value + template_sequence[stub_reloc_idx[i]].reloc_addend;
4569
4570 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
4571 rel.r_info = ELF32_R_INFO (0,
4572 template_sequence[stub_reloc_idx[i]].r_type);
4573 rel.r_addend = 0;
4574
4575 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
4576 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
4577 template should refer back to the instruction after the original
4578 branch. We use target_section as Cortex-A8 erratum workaround stubs
4579 are only generated when both source and target are in the same
4580 section. */
4581 points_to = stub_entry->target_section->output_section->vma
4582 + stub_entry->target_section->output_offset
4583 + stub_entry->source_value;
4584
4585 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
4586 (template_sequence[stub_reloc_idx[i]].r_type),
4587 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
4588 points_to, info, stub_entry->target_section, "", STT_FUNC,
4589 stub_entry->branch_type,
4590 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
4591 &error_message);
4592 }
4593
4594 return TRUE;
4595 #undef MAXRELOCS
4596 }
4597
4598 /* Calculate the template, template size and instruction size for a stub.
4599 Return value is the instruction size. */
4600
4601 static unsigned int
4602 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
4603 const insn_sequence **stub_template,
4604 int *stub_template_size)
4605 {
4606 const insn_sequence *template_sequence = NULL;
4607 int template_size = 0, i;
4608 unsigned int size;
4609
4610 template_sequence = stub_definitions[stub_type].template_sequence;
4611 if (stub_template)
4612 *stub_template = template_sequence;
4613
4614 template_size = stub_definitions[stub_type].template_size;
4615 if (stub_template_size)
4616 *stub_template_size = template_size;
4617
4618 size = 0;
4619 for (i = 0; i < template_size; i++)
4620 {
4621 switch (template_sequence[i].type)
4622 {
4623 case THUMB16_TYPE:
4624 size += 2;
4625 break;
4626
4627 case ARM_TYPE:
4628 case THUMB32_TYPE:
4629 case DATA_TYPE:
4630 size += 4;
4631 break;
4632
4633 default:
4634 BFD_FAIL ();
4635 return 0;
4636 }
4637 }
4638
4639 return size;
4640 }
4641
4642 /* As above, but don't actually build the stub. Just bump offset so
4643 we know stub section sizes. */
4644
4645 static bfd_boolean
4646 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
4647 void *in_arg ATTRIBUTE_UNUSED)
4648 {
4649 struct elf32_arm_stub_hash_entry *stub_entry;
4650 const insn_sequence *template_sequence;
4651 int template_size, size;
4652
4653 /* Massage our args to the form they really have. */
4654 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4655
4656 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
4657 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
4658
4659 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
4660 &template_size);
4661
4662 stub_entry->stub_size = size;
4663 stub_entry->stub_template = template_sequence;
4664 stub_entry->stub_template_size = template_size;
4665
4666 size = (size + 7) & ~7;
4667 stub_entry->stub_sec->size += size;
4668
4669 return TRUE;
4670 }
4671
4672 /* External entry points for sizing and building linker stubs. */
4673
4674 /* Set up various things so that we can make a list of input sections
4675 for each output section included in the link. Returns -1 on error,
4676 0 when no stubs will be needed, and 1 on success. */
4677
4678 int
4679 elf32_arm_setup_section_lists (bfd *output_bfd,
4680 struct bfd_link_info *info)
4681 {
4682 bfd *input_bfd;
4683 unsigned int bfd_count;
4684 unsigned int top_id, top_index;
4685 asection *section;
4686 asection **input_list, **list;
4687 bfd_size_type amt;
4688 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4689
4690 if (htab == NULL)
4691 return 0;
4692 if (! is_elf_hash_table (htab))
4693 return 0;
4694
4695 /* Count the number of input BFDs and find the top input section id. */
4696 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
4697 input_bfd != NULL;
4698 input_bfd = input_bfd->link.next)
4699 {
4700 bfd_count += 1;
4701 for (section = input_bfd->sections;
4702 section != NULL;
4703 section = section->next)
4704 {
4705 if (top_id < section->id)
4706 top_id = section->id;
4707 }
4708 }
4709 htab->bfd_count = bfd_count;
4710
4711 amt = sizeof (struct map_stub) * (top_id + 1);
4712 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
4713 if (htab->stub_group == NULL)
4714 return -1;
4715 htab->top_id = top_id;
4716
4717 /* We can't use output_bfd->section_count here to find the top output
4718 section index as some sections may have been removed, and
4719 _bfd_strip_section_from_output doesn't renumber the indices. */
4720 for (section = output_bfd->sections, top_index = 0;
4721 section != NULL;
4722 section = section->next)
4723 {
4724 if (top_index < section->index)
4725 top_index = section->index;
4726 }
4727
4728 htab->top_index = top_index;
4729 amt = sizeof (asection *) * (top_index + 1);
4730 input_list = (asection **) bfd_malloc (amt);
4731 htab->input_list = input_list;
4732 if (input_list == NULL)
4733 return -1;
4734
4735 /* For sections we aren't interested in, mark their entries with a
4736 value we can check later. */
4737 list = input_list + top_index;
4738 do
4739 *list = bfd_abs_section_ptr;
4740 while (list-- != input_list);
4741
4742 for (section = output_bfd->sections;
4743 section != NULL;
4744 section = section->next)
4745 {
4746 if ((section->flags & SEC_CODE) != 0)
4747 input_list[section->index] = NULL;
4748 }
4749
4750 return 1;
4751 }
4752
4753 /* The linker repeatedly calls this function for each input section,
4754 in the order that input sections are linked into output sections.
4755 Build lists of input sections to determine groupings between which
4756 we may insert linker stubs. */
4757
4758 void
4759 elf32_arm_next_input_section (struct bfd_link_info *info,
4760 asection *isec)
4761 {
4762 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4763
4764 if (htab == NULL)
4765 return;
4766
4767 if (isec->output_section->index <= htab->top_index)
4768 {
4769 asection **list = htab->input_list + isec->output_section->index;
4770
4771 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
4772 {
4773 /* Steal the link_sec pointer for our list. */
4774 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
4775 /* This happens to make the list in reverse order,
4776 which we reverse later. */
4777 PREV_SEC (isec) = *list;
4778 *list = isec;
4779 }
4780 }
4781 }
4782
4783 /* See whether we can group stub sections together. Grouping stub
4784 sections may result in fewer stubs. More importantly, we need to
4785 put all .init* and .fini* stubs at the end of the .init or
4786 .fini output sections respectively, because glibc splits the
4787 _init and _fini functions into multiple parts. Putting a stub in
4788 the middle of a function is not a good idea. */
4789
4790 static void
4791 group_sections (struct elf32_arm_link_hash_table *htab,
4792 bfd_size_type stub_group_size,
4793 bfd_boolean stubs_always_after_branch)
4794 {
4795 asection **list = htab->input_list;
4796
4797 do
4798 {
4799 asection *tail = *list;
4800 asection *head;
4801
4802 if (tail == bfd_abs_section_ptr)
4803 continue;
4804
4805 /* Reverse the list: we must avoid placing stubs at the
4806 beginning of the section because the beginning of the text
4807 section may be required for an interrupt vector in bare metal
4808 code. */
4809 #define NEXT_SEC PREV_SEC
4810 head = NULL;
4811 while (tail != NULL)
4812 {
4813 /* Pop from tail. */
4814 asection *item = tail;
4815 tail = PREV_SEC (item);
4816
4817 /* Push on head. */
4818 NEXT_SEC (item) = head;
4819 head = item;
4820 }
4821
4822 while (head != NULL)
4823 {
4824 asection *curr;
4825 asection *next;
4826 bfd_vma stub_group_start = head->output_offset;
4827 bfd_vma end_of_next;
4828
4829 curr = head;
4830 while (NEXT_SEC (curr) != NULL)
4831 {
4832 next = NEXT_SEC (curr);
4833 end_of_next = next->output_offset + next->size;
4834 if (end_of_next - stub_group_start >= stub_group_size)
4835 /* End of NEXT is too far from start, so stop. */
4836 break;
4837 /* Add NEXT to the group. */
4838 curr = next;
4839 }
4840
4841 /* OK, the size from the start to the start of CURR is less
4842 than stub_group_size and thus can be handled by one stub
4843 section. (Or the head section is itself larger than
4844 stub_group_size, in which case we may be toast.)
4845 We should really be keeping track of the total size of
4846 stubs added here, as stubs contribute to the final output
4847 section size. */
4848 do
4849 {
4850 next = NEXT_SEC (head);
4851 /* Set up this stub group. */
4852 htab->stub_group[head->id].link_sec = curr;
4853 }
4854 while (head != curr && (head = next) != NULL);
4855
4856 /* But wait, there's more! Input sections up to stub_group_size
4857 bytes after the stub section can be handled by it too. */
4858 if (!stubs_always_after_branch)
4859 {
4860 stub_group_start = curr->output_offset + curr->size;
4861
4862 while (next != NULL)
4863 {
4864 end_of_next = next->output_offset + next->size;
4865 if (end_of_next - stub_group_start >= stub_group_size)
4866 /* End of NEXT is too far from stubs, so stop. */
4867 break;
4868 /* Add NEXT to the stub group. */
4869 head = next;
4870 next = NEXT_SEC (head);
4871 htab->stub_group[head->id].link_sec = curr;
4872 }
4873 }
4874 head = next;
4875 }
4876 }
4877 while (list++ != htab->input_list + htab->top_index);
4878
4879 free (htab->input_list);
4880 #undef PREV_SEC
4881 #undef NEXT_SEC
4882 }
4883
4884 /* Comparison function for sorting/searching relocations relating to Cortex-A8
4885 erratum fix. */
4886
4887 static int
4888 a8_reloc_compare (const void *a, const void *b)
4889 {
4890 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
4891 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
4892
4893 if (ra->from < rb->from)
4894 return -1;
4895 else if (ra->from > rb->from)
4896 return 1;
4897 else
4898 return 0;
4899 }
4900
4901 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
4902 const char *, char **);
4903
4904 /* Helper function to scan code for sequences which might trigger the Cortex-A8
4905 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
4906 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
4907 otherwise. */
4908
4909 static bfd_boolean
4910 cortex_a8_erratum_scan (bfd *input_bfd,
4911 struct bfd_link_info *info,
4912 struct a8_erratum_fix **a8_fixes_p,
4913 unsigned int *num_a8_fixes_p,
4914 unsigned int *a8_fix_table_size_p,
4915 struct a8_erratum_reloc *a8_relocs,
4916 unsigned int num_a8_relocs,
4917 unsigned prev_num_a8_fixes,
4918 bfd_boolean *stub_changed_p)
4919 {
4920 asection *section;
4921 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4922 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
4923 unsigned int num_a8_fixes = *num_a8_fixes_p;
4924 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
4925
4926 if (htab == NULL)
4927 return FALSE;
4928
4929 for (section = input_bfd->sections;
4930 section != NULL;
4931 section = section->next)
4932 {
4933 bfd_byte *contents = NULL;
4934 struct _arm_elf_section_data *sec_data;
4935 unsigned int span;
4936 bfd_vma base_vma;
4937
4938 if (elf_section_type (section) != SHT_PROGBITS
4939 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
4940 || (section->flags & SEC_EXCLUDE) != 0
4941 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
4942 || (section->output_section == bfd_abs_section_ptr))
4943 continue;
4944
4945 base_vma = section->output_section->vma + section->output_offset;
4946
4947 if (elf_section_data (section)->this_hdr.contents != NULL)
4948 contents = elf_section_data (section)->this_hdr.contents;
4949 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
4950 return TRUE;
4951
4952 sec_data = elf32_arm_section_data (section);
4953
4954 for (span = 0; span < sec_data->mapcount; span++)
4955 {
4956 unsigned int span_start = sec_data->map[span].vma;
4957 unsigned int span_end = (span == sec_data->mapcount - 1)
4958 ? section->size : sec_data->map[span + 1].vma;
4959 unsigned int i;
4960 char span_type = sec_data->map[span].type;
4961 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
4962
4963 if (span_type != 't')
4964 continue;
4965
4966 /* Span is entirely within a single 4KB region: skip scanning. */
4967 if (((base_vma + span_start) & ~0xfff)
4968 == ((base_vma + span_end) & ~0xfff))
4969 continue;
4970
4971 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
4972
4973 * The opcode is BLX.W, BL.W, B.W, Bcc.W
4974 * The branch target is in the same 4KB region as the
4975 first half of the branch.
4976 * The instruction before the branch is a 32-bit
4977 length non-branch instruction. */
4978 for (i = span_start; i < span_end;)
4979 {
4980 unsigned int insn = bfd_getl16 (&contents[i]);
4981 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
4982 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
4983
4984 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
4985 insn_32bit = TRUE;
4986
4987 if (insn_32bit)
4988 {
4989 /* Load the rest of the insn (in manual-friendly order). */
4990 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
4991
4992 /* Encoding T4: B<c>.W. */
4993 is_b = (insn & 0xf800d000) == 0xf0009000;
4994 /* Encoding T1: BL<c>.W. */
4995 is_bl = (insn & 0xf800d000) == 0xf000d000;
4996 /* Encoding T2: BLX<c>.W. */
4997 is_blx = (insn & 0xf800d000) == 0xf000c000;
4998 /* Encoding T3: B<c>.W (not permitted in IT block). */
4999 is_bcc = (insn & 0xf800d000) == 0xf0008000
5000 && (insn & 0x07f00000) != 0x03800000;
5001 }
5002
5003 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
5004
5005 if (((base_vma + i) & 0xfff) == 0xffe
5006 && insn_32bit
5007 && is_32bit_branch
5008 && last_was_32bit
5009 && ! last_was_branch)
5010 {
5011 bfd_signed_vma offset = 0;
5012 bfd_boolean force_target_arm = FALSE;
5013 bfd_boolean force_target_thumb = FALSE;
5014 bfd_vma target;
5015 enum elf32_arm_stub_type stub_type = arm_stub_none;
5016 struct a8_erratum_reloc key, *found;
5017 bfd_boolean use_plt = FALSE;
5018
5019 key.from = base_vma + i;
5020 found = (struct a8_erratum_reloc *)
5021 bsearch (&key, a8_relocs, num_a8_relocs,
5022 sizeof (struct a8_erratum_reloc),
5023 &a8_reloc_compare);
5024
5025 if (found)
5026 {
5027 char *error_message = NULL;
5028 struct elf_link_hash_entry *entry;
5029
5030 /* We don't care about the error returned from this
5031 function, only if there is glue or not. */
5032 entry = find_thumb_glue (info, found->sym_name,
5033 &error_message);
5034
5035 if (entry)
5036 found->non_a8_stub = TRUE;
5037
5038 /* Keep a simpler condition, for the sake of clarity. */
5039 if (htab->root.splt != NULL && found->hash != NULL
5040 && found->hash->root.plt.offset != (bfd_vma) -1)
5041 use_plt = TRUE;
5042
5043 if (found->r_type == R_ARM_THM_CALL)
5044 {
5045 if (found->branch_type == ST_BRANCH_TO_ARM
5046 || use_plt)
5047 force_target_arm = TRUE;
5048 else
5049 force_target_thumb = TRUE;
5050 }
5051 }
5052
5053 /* Check if we have an offending branch instruction. */
5054
5055 if (found && found->non_a8_stub)
5056 /* We've already made a stub for this instruction, e.g.
5057 it's a long branch or a Thumb->ARM stub. Assume that
5058 stub will suffice to work around the A8 erratum (see
5059 setting of always_after_branch above). */
5060 ;
5061 else if (is_bcc)
5062 {
5063 offset = (insn & 0x7ff) << 1;
5064 offset |= (insn & 0x3f0000) >> 4;
5065 offset |= (insn & 0x2000) ? 0x40000 : 0;
5066 offset |= (insn & 0x800) ? 0x80000 : 0;
5067 offset |= (insn & 0x4000000) ? 0x100000 : 0;
5068 if (offset & 0x100000)
5069 offset |= ~ ((bfd_signed_vma) 0xfffff);
5070 stub_type = arm_stub_a8_veneer_b_cond;
5071 }
5072 else if (is_b || is_bl || is_blx)
5073 {
5074 int s = (insn & 0x4000000) != 0;
5075 int j1 = (insn & 0x2000) != 0;
5076 int j2 = (insn & 0x800) != 0;
5077 int i1 = !(j1 ^ s);
5078 int i2 = !(j2 ^ s);
5079
5080 offset = (insn & 0x7ff) << 1;
5081 offset |= (insn & 0x3ff0000) >> 4;
5082 offset |= i2 << 22;
5083 offset |= i1 << 23;
5084 offset |= s << 24;
5085 if (offset & 0x1000000)
5086 offset |= ~ ((bfd_signed_vma) 0xffffff);
5087
5088 if (is_blx)
5089 offset &= ~ ((bfd_signed_vma) 3);
5090
5091 stub_type = is_blx ? arm_stub_a8_veneer_blx :
5092 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
5093 }
5094
5095 if (stub_type != arm_stub_none)
5096 {
5097 bfd_vma pc_for_insn = base_vma + i + 4;
5098
5099 /* The original instruction is a BL, but the target is
5100 an ARM instruction. If we were not making a stub,
5101 the BL would have been converted to a BLX. Use the
5102 BLX stub instead in that case. */
5103 if (htab->use_blx && force_target_arm
5104 && stub_type == arm_stub_a8_veneer_bl)
5105 {
5106 stub_type = arm_stub_a8_veneer_blx;
5107 is_blx = TRUE;
5108 is_bl = FALSE;
5109 }
5110 /* Conversely, if the original instruction was
5111 BLX but the target is Thumb mode, use the BL
5112 stub. */
5113 else if (force_target_thumb
5114 && stub_type == arm_stub_a8_veneer_blx)
5115 {
5116 stub_type = arm_stub_a8_veneer_bl;
5117 is_blx = FALSE;
5118 is_bl = TRUE;
5119 }
5120
5121 if (is_blx)
5122 pc_for_insn &= ~ ((bfd_vma) 3);
5123
5124 /* If we found a relocation, use the proper destination,
5125 not the offset in the (unrelocated) instruction.
5126 Note this is always done if we switched the stub type
5127 above. */
5128 if (found)
5129 offset =
5130 (bfd_signed_vma) (found->destination - pc_for_insn);
5131
5132 /* If the stub will use a Thumb-mode branch to a
5133 PLT target, redirect it to the preceding Thumb
5134 entry point. */
5135 if (stub_type != arm_stub_a8_veneer_blx && use_plt)
5136 offset -= PLT_THUMB_STUB_SIZE;
5137
5138 target = pc_for_insn + offset;
5139
5140 /* The BLX stub is ARM-mode code. Adjust the offset to
5141 take the different PC value (+8 instead of +4) into
5142 account. */
5143 if (stub_type == arm_stub_a8_veneer_blx)
5144 offset += 4;
5145
5146 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
5147 {
5148 char *stub_name = NULL;
5149
5150 if (num_a8_fixes == a8_fix_table_size)
5151 {
5152 a8_fix_table_size *= 2;
5153 a8_fixes = (struct a8_erratum_fix *)
5154 bfd_realloc (a8_fixes,
5155 sizeof (struct a8_erratum_fix)
5156 * a8_fix_table_size);
5157 }
5158
5159 if (num_a8_fixes < prev_num_a8_fixes)
5160 {
5161 /* If we're doing a subsequent scan,
5162 check if we've found the same fix as
5163 before, and try and reuse the stub
5164 name. */
5165 stub_name = a8_fixes[num_a8_fixes].stub_name;
5166 if ((a8_fixes[num_a8_fixes].section != section)
5167 || (a8_fixes[num_a8_fixes].offset != i))
5168 {
5169 free (stub_name);
5170 stub_name = NULL;
5171 *stub_changed_p = TRUE;
5172 }
5173 }
5174
5175 if (!stub_name)
5176 {
5177 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
5178 if (stub_name != NULL)
5179 sprintf (stub_name, "%x:%x", section->id, i);
5180 }
5181
5182 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
5183 a8_fixes[num_a8_fixes].section = section;
5184 a8_fixes[num_a8_fixes].offset = i;
5185 a8_fixes[num_a8_fixes].target_offset =
5186 target - base_vma;
5187 a8_fixes[num_a8_fixes].orig_insn = insn;
5188 a8_fixes[num_a8_fixes].stub_name = stub_name;
5189 a8_fixes[num_a8_fixes].stub_type = stub_type;
5190 a8_fixes[num_a8_fixes].branch_type =
5191 is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
5192
5193 num_a8_fixes++;
5194 }
5195 }
5196 }
5197
5198 i += insn_32bit ? 4 : 2;
5199 last_was_32bit = insn_32bit;
5200 last_was_branch = is_32bit_branch;
5201 }
5202 }
5203
5204 if (elf_section_data (section)->this_hdr.contents == NULL)
5205 free (contents);
5206 }
5207
5208 *a8_fixes_p = a8_fixes;
5209 *num_a8_fixes_p = num_a8_fixes;
5210 *a8_fix_table_size_p = a8_fix_table_size;
5211
5212 return FALSE;
5213 }
5214
5215 /* Create or update a stub entry depending on whether the stub can already be
5216 found in HTAB. The stub is identified by:
5217 - its type STUB_TYPE
5218 - its source branch (note that several can share the same stub) whose
5219 section and relocation (if any) are given by SECTION and IRELA
5220 respectively
5221 - its target symbol whose input section, hash, name, value and branch type
5222 are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE
5223 respectively
5224
5225 If found, the value of the stub's target symbol is updated from SYM_VALUE
5226 and *NEW_STUB is set to FALSE. Otherwise, *NEW_STUB is set to
5227 TRUE and the stub entry is initialized.
5228
5229 Returns whether the stub could be successfully created or updated, or FALSE
5230 if an error occured. */
5231
5232 static bfd_boolean
5233 elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab,
5234 enum elf32_arm_stub_type stub_type, asection *section,
5235 Elf_Internal_Rela *irela, asection *sym_sec,
5236 struct elf32_arm_link_hash_entry *hash, char *sym_name,
5237 bfd_vma sym_value, enum arm_st_branch_type branch_type,
5238 bfd_boolean *new_stub)
5239 {
5240 const asection *id_sec;
5241 char *stub_name;
5242 struct elf32_arm_stub_hash_entry *stub_entry;
5243 unsigned int r_type;
5244 bfd_boolean sym_claimed = arm_stub_sym_claimed (stub_type);
5245
5246 BFD_ASSERT (stub_type != arm_stub_none);
5247 *new_stub = FALSE;
5248
5249 if (sym_claimed)
5250 stub_name = sym_name;
5251 else
5252 {
5253 BFD_ASSERT (irela);
5254 BFD_ASSERT (section);
5255
5256 /* Support for grouping stub sections. */
5257 id_sec = htab->stub_group[section->id].link_sec;
5258
5259 /* Get the name of this stub. */
5260 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, irela,
5261 stub_type);
5262 if (!stub_name)
5263 return FALSE;
5264 }
5265
5266 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, FALSE,
5267 FALSE);
5268 /* The proper stub has already been created, just update its value. */
5269 if (stub_entry != NULL)
5270 {
5271 if (!sym_claimed)
5272 free (stub_name);
5273 stub_entry->target_value = sym_value;
5274 return TRUE;
5275 }
5276
5277 stub_entry = elf32_arm_add_stub (stub_name, section, htab, stub_type);
5278 if (stub_entry == NULL)
5279 {
5280 if (!sym_claimed)
5281 free (stub_name);
5282 return FALSE;
5283 }
5284
5285 stub_entry->target_value = sym_value;
5286 stub_entry->target_section = sym_sec;
5287 stub_entry->stub_type = stub_type;
5288 stub_entry->h = hash;
5289 stub_entry->branch_type = branch_type;
5290
5291 if (sym_claimed)
5292 stub_entry->output_name = sym_name;
5293 else
5294 {
5295 if (sym_name == NULL)
5296 sym_name = "unnamed";
5297 stub_entry->output_name = (char *)
5298 bfd_alloc (htab->stub_bfd, sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
5299 + strlen (sym_name));
5300 if (stub_entry->output_name == NULL)
5301 {
5302 free (stub_name);
5303 return FALSE;
5304 }
5305
5306 /* For historical reasons, use the existing names for ARM-to-Thumb and
5307 Thumb-to-ARM stubs. */
5308 r_type = ELF32_R_TYPE (irela->r_info);
5309 if ((r_type == (unsigned int) R_ARM_THM_CALL
5310 || r_type == (unsigned int) R_ARM_THM_JUMP24
5311 || r_type == (unsigned int) R_ARM_THM_JUMP19)
5312 && branch_type == ST_BRANCH_TO_ARM)
5313 sprintf (stub_entry->output_name, THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
5314 else if ((r_type == (unsigned int) R_ARM_CALL
5315 || r_type == (unsigned int) R_ARM_JUMP24)
5316 && branch_type == ST_BRANCH_TO_THUMB)
5317 sprintf (stub_entry->output_name, ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
5318 else
5319 sprintf (stub_entry->output_name, STUB_ENTRY_NAME, sym_name);
5320 }
5321
5322 *new_stub = TRUE;
5323 return TRUE;
5324 }
5325
5326 /* Determine and set the size of the stub section for a final link.
5327
5328 The basic idea here is to examine all the relocations looking for
5329 PC-relative calls to a target that is unreachable with a "bl"
5330 instruction. */
5331
5332 bfd_boolean
5333 elf32_arm_size_stubs (bfd *output_bfd,
5334 bfd *stub_bfd,
5335 struct bfd_link_info *info,
5336 bfd_signed_vma group_size,
5337 asection * (*add_stub_section) (const char *, asection *,
5338 asection *,
5339 unsigned int),
5340 void (*layout_sections_again) (void))
5341 {
5342 bfd_size_type stub_group_size;
5343 bfd_boolean stubs_always_after_branch;
5344 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5345 struct a8_erratum_fix *a8_fixes = NULL;
5346 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
5347 struct a8_erratum_reloc *a8_relocs = NULL;
5348 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
5349
5350 if (htab == NULL)
5351 return FALSE;
5352
5353 if (htab->fix_cortex_a8)
5354 {
5355 a8_fixes = (struct a8_erratum_fix *)
5356 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
5357 a8_relocs = (struct a8_erratum_reloc *)
5358 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
5359 }
5360
5361 /* Propagate mach to stub bfd, because it may not have been
5362 finalized when we created stub_bfd. */
5363 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
5364 bfd_get_mach (output_bfd));
5365
5366 /* Stash our params away. */
5367 htab->stub_bfd = stub_bfd;
5368 htab->add_stub_section = add_stub_section;
5369 htab->layout_sections_again = layout_sections_again;
5370 stubs_always_after_branch = group_size < 0;
5371
5372 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
5373 as the first half of a 32-bit branch straddling two 4K pages. This is a
5374 crude way of enforcing that. */
5375 if (htab->fix_cortex_a8)
5376 stubs_always_after_branch = 1;
5377
5378 if (group_size < 0)
5379 stub_group_size = -group_size;
5380 else
5381 stub_group_size = group_size;
5382
5383 if (stub_group_size == 1)
5384 {
5385 /* Default values. */
5386 /* Thumb branch range is +-4MB has to be used as the default
5387 maximum size (a given section can contain both ARM and Thumb
5388 code, so the worst case has to be taken into account).
5389
5390 This value is 24K less than that, which allows for 2025
5391 12-byte stubs. If we exceed that, then we will fail to link.
5392 The user will have to relink with an explicit group size
5393 option. */
5394 stub_group_size = 4170000;
5395 }
5396
5397 group_sections (htab, stub_group_size, stubs_always_after_branch);
5398
5399 /* If we're applying the cortex A8 fix, we need to determine the
5400 program header size now, because we cannot change it later --
5401 that could alter section placements. Notice the A8 erratum fix
5402 ends up requiring the section addresses to remain unchanged
5403 modulo the page size. That's something we cannot represent
5404 inside BFD, and we don't want to force the section alignment to
5405 be the page size. */
5406 if (htab->fix_cortex_a8)
5407 (*htab->layout_sections_again) ();
5408
5409 while (1)
5410 {
5411 bfd *input_bfd;
5412 unsigned int bfd_indx;
5413 asection *stub_sec;
5414 bfd_boolean stub_changed = FALSE;
5415 unsigned prev_num_a8_fixes = num_a8_fixes;
5416
5417 num_a8_fixes = 0;
5418 for (input_bfd = info->input_bfds, bfd_indx = 0;
5419 input_bfd != NULL;
5420 input_bfd = input_bfd->link.next, bfd_indx++)
5421 {
5422 Elf_Internal_Shdr *symtab_hdr;
5423 asection *section;
5424 Elf_Internal_Sym *local_syms = NULL;
5425
5426 if (!is_arm_elf (input_bfd))
5427 continue;
5428
5429 num_a8_relocs = 0;
5430
5431 /* We'll need the symbol table in a second. */
5432 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
5433 if (symtab_hdr->sh_info == 0)
5434 continue;
5435
5436 /* Walk over each section attached to the input bfd. */
5437 for (section = input_bfd->sections;
5438 section != NULL;
5439 section = section->next)
5440 {
5441 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
5442
5443 /* If there aren't any relocs, then there's nothing more
5444 to do. */
5445 if ((section->flags & SEC_RELOC) == 0
5446 || section->reloc_count == 0
5447 || (section->flags & SEC_CODE) == 0)
5448 continue;
5449
5450 /* If this section is a link-once section that will be
5451 discarded, then don't create any stubs. */
5452 if (section->output_section == NULL
5453 || section->output_section->owner != output_bfd)
5454 continue;
5455
5456 /* Get the relocs. */
5457 internal_relocs
5458 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
5459 NULL, info->keep_memory);
5460 if (internal_relocs == NULL)
5461 goto error_ret_free_local;
5462
5463 /* Now examine each relocation. */
5464 irela = internal_relocs;
5465 irelaend = irela + section->reloc_count;
5466 for (; irela < irelaend; irela++)
5467 {
5468 unsigned int r_type, r_indx;
5469 enum elf32_arm_stub_type stub_type;
5470 asection *sym_sec;
5471 bfd_vma sym_value;
5472 bfd_vma destination;
5473 struct elf32_arm_link_hash_entry *hash;
5474 const char *sym_name;
5475 unsigned char st_type;
5476 enum arm_st_branch_type branch_type;
5477 bfd_boolean created_stub = FALSE;
5478
5479 r_type = ELF32_R_TYPE (irela->r_info);
5480 r_indx = ELF32_R_SYM (irela->r_info);
5481
5482 if (r_type >= (unsigned int) R_ARM_max)
5483 {
5484 bfd_set_error (bfd_error_bad_value);
5485 error_ret_free_internal:
5486 if (elf_section_data (section)->relocs == NULL)
5487 free (internal_relocs);
5488 /* Fall through. */
5489 error_ret_free_local:
5490 if (local_syms != NULL
5491 && (symtab_hdr->contents
5492 != (unsigned char *) local_syms))
5493 free (local_syms);
5494 return FALSE;
5495 }
5496
5497 hash = NULL;
5498 if (r_indx >= symtab_hdr->sh_info)
5499 hash = elf32_arm_hash_entry
5500 (elf_sym_hashes (input_bfd)
5501 [r_indx - symtab_hdr->sh_info]);
5502
5503 /* Only look for stubs on branch instructions, or
5504 non-relaxed TLSCALL */
5505 if ((r_type != (unsigned int) R_ARM_CALL)
5506 && (r_type != (unsigned int) R_ARM_THM_CALL)
5507 && (r_type != (unsigned int) R_ARM_JUMP24)
5508 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
5509 && (r_type != (unsigned int) R_ARM_THM_XPC22)
5510 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
5511 && (r_type != (unsigned int) R_ARM_PLT32)
5512 && !((r_type == (unsigned int) R_ARM_TLS_CALL
5513 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
5514 && r_type == elf32_arm_tls_transition
5515 (info, r_type, &hash->root)
5516 && ((hash ? hash->tls_type
5517 : (elf32_arm_local_got_tls_type
5518 (input_bfd)[r_indx]))
5519 & GOT_TLS_GDESC) != 0))
5520 continue;
5521
5522 /* Now determine the call target, its name, value,
5523 section. */
5524 sym_sec = NULL;
5525 sym_value = 0;
5526 destination = 0;
5527 sym_name = NULL;
5528
5529 if (r_type == (unsigned int) R_ARM_TLS_CALL
5530 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
5531 {
5532 /* A non-relaxed TLS call. The target is the
5533 plt-resident trampoline and nothing to do
5534 with the symbol. */
5535 BFD_ASSERT (htab->tls_trampoline > 0);
5536 sym_sec = htab->root.splt;
5537 sym_value = htab->tls_trampoline;
5538 hash = 0;
5539 st_type = STT_FUNC;
5540 branch_type = ST_BRANCH_TO_ARM;
5541 }
5542 else if (!hash)
5543 {
5544 /* It's a local symbol. */
5545 Elf_Internal_Sym *sym;
5546
5547 if (local_syms == NULL)
5548 {
5549 local_syms
5550 = (Elf_Internal_Sym *) symtab_hdr->contents;
5551 if (local_syms == NULL)
5552 local_syms
5553 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
5554 symtab_hdr->sh_info, 0,
5555 NULL, NULL, NULL);
5556 if (local_syms == NULL)
5557 goto error_ret_free_internal;
5558 }
5559
5560 sym = local_syms + r_indx;
5561 if (sym->st_shndx == SHN_UNDEF)
5562 sym_sec = bfd_und_section_ptr;
5563 else if (sym->st_shndx == SHN_ABS)
5564 sym_sec = bfd_abs_section_ptr;
5565 else if (sym->st_shndx == SHN_COMMON)
5566 sym_sec = bfd_com_section_ptr;
5567 else
5568 sym_sec =
5569 bfd_section_from_elf_index (input_bfd, sym->st_shndx);
5570
5571 if (!sym_sec)
5572 /* This is an undefined symbol. It can never
5573 be resolved. */
5574 continue;
5575
5576 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
5577 sym_value = sym->st_value;
5578 destination = (sym_value + irela->r_addend
5579 + sym_sec->output_offset
5580 + sym_sec->output_section->vma);
5581 st_type = ELF_ST_TYPE (sym->st_info);
5582 branch_type =
5583 ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
5584 sym_name
5585 = bfd_elf_string_from_elf_section (input_bfd,
5586 symtab_hdr->sh_link,
5587 sym->st_name);
5588 }
5589 else
5590 {
5591 /* It's an external symbol. */
5592 while (hash->root.root.type == bfd_link_hash_indirect
5593 || hash->root.root.type == bfd_link_hash_warning)
5594 hash = ((struct elf32_arm_link_hash_entry *)
5595 hash->root.root.u.i.link);
5596
5597 if (hash->root.root.type == bfd_link_hash_defined
5598 || hash->root.root.type == bfd_link_hash_defweak)
5599 {
5600 sym_sec = hash->root.root.u.def.section;
5601 sym_value = hash->root.root.u.def.value;
5602
5603 struct elf32_arm_link_hash_table *globals =
5604 elf32_arm_hash_table (info);
5605
5606 /* For a destination in a shared library,
5607 use the PLT stub as target address to
5608 decide whether a branch stub is
5609 needed. */
5610 if (globals != NULL
5611 && globals->root.splt != NULL
5612 && hash != NULL
5613 && hash->root.plt.offset != (bfd_vma) -1)
5614 {
5615 sym_sec = globals->root.splt;
5616 sym_value = hash->root.plt.offset;
5617 if (sym_sec->output_section != NULL)
5618 destination = (sym_value
5619 + sym_sec->output_offset
5620 + sym_sec->output_section->vma);
5621 }
5622 else if (sym_sec->output_section != NULL)
5623 destination = (sym_value + irela->r_addend
5624 + sym_sec->output_offset
5625 + sym_sec->output_section->vma);
5626 }
5627 else if ((hash->root.root.type == bfd_link_hash_undefined)
5628 || (hash->root.root.type == bfd_link_hash_undefweak))
5629 {
5630 /* For a shared library, use the PLT stub as
5631 target address to decide whether a long
5632 branch stub is needed.
5633 For absolute code, they cannot be handled. */
5634 struct elf32_arm_link_hash_table *globals =
5635 elf32_arm_hash_table (info);
5636
5637 if (globals != NULL
5638 && globals->root.splt != NULL
5639 && hash != NULL
5640 && hash->root.plt.offset != (bfd_vma) -1)
5641 {
5642 sym_sec = globals->root.splt;
5643 sym_value = hash->root.plt.offset;
5644 if (sym_sec->output_section != NULL)
5645 destination = (sym_value
5646 + sym_sec->output_offset
5647 + sym_sec->output_section->vma);
5648 }
5649 else
5650 continue;
5651 }
5652 else
5653 {
5654 bfd_set_error (bfd_error_bad_value);
5655 goto error_ret_free_internal;
5656 }
5657 st_type = hash->root.type;
5658 branch_type =
5659 ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
5660 sym_name = hash->root.root.root.string;
5661 }
5662
5663 do
5664 {
5665 bfd_boolean new_stub;
5666
5667 /* Determine what (if any) linker stub is needed. */
5668 stub_type = arm_type_of_stub (info, section, irela,
5669 st_type, &branch_type,
5670 hash, destination, sym_sec,
5671 input_bfd, sym_name);
5672 if (stub_type == arm_stub_none)
5673 break;
5674
5675 /* We've either created a stub for this reloc already,
5676 or we are about to. */
5677 created_stub =
5678 elf32_arm_create_stub (htab, stub_type, section, irela,
5679 sym_sec, hash,
5680 (char *) sym_name, sym_value,
5681 branch_type, &new_stub);
5682
5683 if (!created_stub)
5684 goto error_ret_free_internal;
5685 else if (!new_stub)
5686 break;
5687 else
5688 stub_changed = TRUE;
5689 }
5690 while (0);
5691
5692 /* Look for relocations which might trigger Cortex-A8
5693 erratum. */
5694 if (htab->fix_cortex_a8
5695 && (r_type == (unsigned int) R_ARM_THM_JUMP24
5696 || r_type == (unsigned int) R_ARM_THM_JUMP19
5697 || r_type == (unsigned int) R_ARM_THM_CALL
5698 || r_type == (unsigned int) R_ARM_THM_XPC22))
5699 {
5700 bfd_vma from = section->output_section->vma
5701 + section->output_offset
5702 + irela->r_offset;
5703
5704 if ((from & 0xfff) == 0xffe)
5705 {
5706 /* Found a candidate. Note we haven't checked the
5707 destination is within 4K here: if we do so (and
5708 don't create an entry in a8_relocs) we can't tell
5709 that a branch should have been relocated when
5710 scanning later. */
5711 if (num_a8_relocs == a8_reloc_table_size)
5712 {
5713 a8_reloc_table_size *= 2;
5714 a8_relocs = (struct a8_erratum_reloc *)
5715 bfd_realloc (a8_relocs,
5716 sizeof (struct a8_erratum_reloc)
5717 * a8_reloc_table_size);
5718 }
5719
5720 a8_relocs[num_a8_relocs].from = from;
5721 a8_relocs[num_a8_relocs].destination = destination;
5722 a8_relocs[num_a8_relocs].r_type = r_type;
5723 a8_relocs[num_a8_relocs].branch_type = branch_type;
5724 a8_relocs[num_a8_relocs].sym_name = sym_name;
5725 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
5726 a8_relocs[num_a8_relocs].hash = hash;
5727
5728 num_a8_relocs++;
5729 }
5730 }
5731 }
5732
5733 /* We're done with the internal relocs, free them. */
5734 if (elf_section_data (section)->relocs == NULL)
5735 free (internal_relocs);
5736 }
5737
5738 if (htab->fix_cortex_a8)
5739 {
5740 /* Sort relocs which might apply to Cortex-A8 erratum. */
5741 qsort (a8_relocs, num_a8_relocs,
5742 sizeof (struct a8_erratum_reloc),
5743 &a8_reloc_compare);
5744
5745 /* Scan for branches which might trigger Cortex-A8 erratum. */
5746 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
5747 &num_a8_fixes, &a8_fix_table_size,
5748 a8_relocs, num_a8_relocs,
5749 prev_num_a8_fixes, &stub_changed)
5750 != 0)
5751 goto error_ret_free_local;
5752 }
5753
5754 if (local_syms != NULL
5755 && symtab_hdr->contents != (unsigned char *) local_syms)
5756 {
5757 if (!info->keep_memory)
5758 free (local_syms);
5759 else
5760 symtab_hdr->contents = (unsigned char *) local_syms;
5761 }
5762 }
5763
5764 if (prev_num_a8_fixes != num_a8_fixes)
5765 stub_changed = TRUE;
5766
5767 if (!stub_changed)
5768 break;
5769
5770 /* OK, we've added some stubs. Find out the new size of the
5771 stub sections. */
5772 for (stub_sec = htab->stub_bfd->sections;
5773 stub_sec != NULL;
5774 stub_sec = stub_sec->next)
5775 {
5776 /* Ignore non-stub sections. */
5777 if (!strstr (stub_sec->name, STUB_SUFFIX))
5778 continue;
5779
5780 stub_sec->size = 0;
5781 }
5782
5783 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
5784
5785 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
5786 if (htab->fix_cortex_a8)
5787 for (i = 0; i < num_a8_fixes; i++)
5788 {
5789 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
5790 a8_fixes[i].section, htab, a8_fixes[i].stub_type);
5791
5792 if (stub_sec == NULL)
5793 return FALSE;
5794
5795 stub_sec->size
5796 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
5797 NULL);
5798 }
5799
5800
5801 /* Ask the linker to do its stuff. */
5802 (*htab->layout_sections_again) ();
5803 }
5804
5805 /* Add stubs for Cortex-A8 erratum fixes now. */
5806 if (htab->fix_cortex_a8)
5807 {
5808 for (i = 0; i < num_a8_fixes; i++)
5809 {
5810 struct elf32_arm_stub_hash_entry *stub_entry;
5811 char *stub_name = a8_fixes[i].stub_name;
5812 asection *section = a8_fixes[i].section;
5813 unsigned int section_id = a8_fixes[i].section->id;
5814 asection *link_sec = htab->stub_group[section_id].link_sec;
5815 asection *stub_sec = htab->stub_group[section_id].stub_sec;
5816 const insn_sequence *template_sequence;
5817 int template_size, size = 0;
5818
5819 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
5820 TRUE, FALSE);
5821 if (stub_entry == NULL)
5822 {
5823 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
5824 section->owner,
5825 stub_name);
5826 return FALSE;
5827 }
5828
5829 stub_entry->stub_sec = stub_sec;
5830 stub_entry->stub_offset = 0;
5831 stub_entry->id_sec = link_sec;
5832 stub_entry->stub_type = a8_fixes[i].stub_type;
5833 stub_entry->source_value = a8_fixes[i].offset;
5834 stub_entry->target_section = a8_fixes[i].section;
5835 stub_entry->target_value = a8_fixes[i].target_offset;
5836 stub_entry->orig_insn = a8_fixes[i].orig_insn;
5837 stub_entry->branch_type = a8_fixes[i].branch_type;
5838
5839 size = find_stub_size_and_template (a8_fixes[i].stub_type,
5840 &template_sequence,
5841 &template_size);
5842
5843 stub_entry->stub_size = size;
5844 stub_entry->stub_template = template_sequence;
5845 stub_entry->stub_template_size = template_size;
5846 }
5847
5848 /* Stash the Cortex-A8 erratum fix array for use later in
5849 elf32_arm_write_section(). */
5850 htab->a8_erratum_fixes = a8_fixes;
5851 htab->num_a8_erratum_fixes = num_a8_fixes;
5852 }
5853 else
5854 {
5855 htab->a8_erratum_fixes = NULL;
5856 htab->num_a8_erratum_fixes = 0;
5857 }
5858 return TRUE;
5859 }
5860
5861 /* Build all the stubs associated with the current output file. The
5862 stubs are kept in a hash table attached to the main linker hash
5863 table. We also set up the .plt entries for statically linked PIC
5864 functions here. This function is called via arm_elf_finish in the
5865 linker. */
5866
5867 bfd_boolean
5868 elf32_arm_build_stubs (struct bfd_link_info *info)
5869 {
5870 asection *stub_sec;
5871 struct bfd_hash_table *table;
5872 struct elf32_arm_link_hash_table *htab;
5873
5874 htab = elf32_arm_hash_table (info);
5875 if (htab == NULL)
5876 return FALSE;
5877
5878 for (stub_sec = htab->stub_bfd->sections;
5879 stub_sec != NULL;
5880 stub_sec = stub_sec->next)
5881 {
5882 bfd_size_type size;
5883
5884 /* Ignore non-stub sections. */
5885 if (!strstr (stub_sec->name, STUB_SUFFIX))
5886 continue;
5887
5888 /* Allocate memory to hold the linker stubs. */
5889 size = stub_sec->size;
5890 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
5891 if (stub_sec->contents == NULL && size != 0)
5892 return FALSE;
5893 stub_sec->size = 0;
5894 }
5895
5896 /* Build the stubs as directed by the stub hash table. */
5897 table = &htab->stub_hash_table;
5898 bfd_hash_traverse (table, arm_build_one_stub, info);
5899 if (htab->fix_cortex_a8)
5900 {
5901 /* Place the cortex a8 stubs last. */
5902 htab->fix_cortex_a8 = -1;
5903 bfd_hash_traverse (table, arm_build_one_stub, info);
5904 }
5905
5906 return TRUE;
5907 }
5908
5909 /* Locate the Thumb encoded calling stub for NAME. */
5910
5911 static struct elf_link_hash_entry *
5912 find_thumb_glue (struct bfd_link_info *link_info,
5913 const char *name,
5914 char **error_message)
5915 {
5916 char *tmp_name;
5917 struct elf_link_hash_entry *hash;
5918 struct elf32_arm_link_hash_table *hash_table;
5919
5920 /* We need a pointer to the armelf specific hash table. */
5921 hash_table = elf32_arm_hash_table (link_info);
5922 if (hash_table == NULL)
5923 return NULL;
5924
5925 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5926 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
5927
5928 BFD_ASSERT (tmp_name);
5929
5930 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
5931
5932 hash = elf_link_hash_lookup
5933 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
5934
5935 if (hash == NULL
5936 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
5937 tmp_name, name) == -1)
5938 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
5939
5940 free (tmp_name);
5941
5942 return hash;
5943 }
5944
5945 /* Locate the ARM encoded calling stub for NAME. */
5946
5947 static struct elf_link_hash_entry *
5948 find_arm_glue (struct bfd_link_info *link_info,
5949 const char *name,
5950 char **error_message)
5951 {
5952 char *tmp_name;
5953 struct elf_link_hash_entry *myh;
5954 struct elf32_arm_link_hash_table *hash_table;
5955
5956 /* We need a pointer to the elfarm specific hash table. */
5957 hash_table = elf32_arm_hash_table (link_info);
5958 if (hash_table == NULL)
5959 return NULL;
5960
5961 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5962 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
5963
5964 BFD_ASSERT (tmp_name);
5965
5966 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
5967
5968 myh = elf_link_hash_lookup
5969 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
5970
5971 if (myh == NULL
5972 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
5973 tmp_name, name) == -1)
5974 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
5975
5976 free (tmp_name);
5977
5978 return myh;
5979 }
5980
5981 /* ARM->Thumb glue (static images):
5982
5983 .arm
5984 __func_from_arm:
5985 ldr r12, __func_addr
5986 bx r12
5987 __func_addr:
5988 .word func @ behave as if you saw a ARM_32 reloc.
5989
5990 (v5t static images)
5991 .arm
5992 __func_from_arm:
5993 ldr pc, __func_addr
5994 __func_addr:
5995 .word func @ behave as if you saw a ARM_32 reloc.
5996
5997 (relocatable images)
5998 .arm
5999 __func_from_arm:
6000 ldr r12, __func_offset
6001 add r12, r12, pc
6002 bx r12
6003 __func_offset:
6004 .word func - . */
6005
6006 #define ARM2THUMB_STATIC_GLUE_SIZE 12
6007 static const insn32 a2t1_ldr_insn = 0xe59fc000;
6008 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
6009 static const insn32 a2t3_func_addr_insn = 0x00000001;
6010
6011 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
6012 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
6013 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
6014
6015 #define ARM2THUMB_PIC_GLUE_SIZE 16
6016 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
6017 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
6018 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
6019
6020 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
6021
6022 .thumb .thumb
6023 .align 2 .align 2
6024 __func_from_thumb: __func_from_thumb:
6025 bx pc push {r6, lr}
6026 nop ldr r6, __func_addr
6027 .arm mov lr, pc
6028 b func bx r6
6029 .arm
6030 ;; back_to_thumb
6031 ldmia r13! {r6, lr}
6032 bx lr
6033 __func_addr:
6034 .word func */
6035
6036 #define THUMB2ARM_GLUE_SIZE 8
6037 static const insn16 t2a1_bx_pc_insn = 0x4778;
6038 static const insn16 t2a2_noop_insn = 0x46c0;
6039 static const insn32 t2a3_b_insn = 0xea000000;
6040
6041 #define VFP11_ERRATUM_VENEER_SIZE 8
6042 #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
6043 #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
6044
6045 #define ARM_BX_VENEER_SIZE 12
6046 static const insn32 armbx1_tst_insn = 0xe3100001;
6047 static const insn32 armbx2_moveq_insn = 0x01a0f000;
6048 static const insn32 armbx3_bx_insn = 0xe12fff10;
6049
6050 #ifndef ELFARM_NABI_C_INCLUDED
6051 static void
6052 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
6053 {
6054 asection * s;
6055 bfd_byte * contents;
6056
6057 if (size == 0)
6058 {
6059 /* Do not include empty glue sections in the output. */
6060 if (abfd != NULL)
6061 {
6062 s = bfd_get_linker_section (abfd, name);
6063 if (s != NULL)
6064 s->flags |= SEC_EXCLUDE;
6065 }
6066 return;
6067 }
6068
6069 BFD_ASSERT (abfd != NULL);
6070
6071 s = bfd_get_linker_section (abfd, name);
6072 BFD_ASSERT (s != NULL);
6073
6074 contents = (bfd_byte *) bfd_alloc (abfd, size);
6075
6076 BFD_ASSERT (s->size == size);
6077 s->contents = contents;
6078 }
6079
6080 bfd_boolean
6081 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
6082 {
6083 struct elf32_arm_link_hash_table * globals;
6084
6085 globals = elf32_arm_hash_table (info);
6086 BFD_ASSERT (globals != NULL);
6087
6088 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6089 globals->arm_glue_size,
6090 ARM2THUMB_GLUE_SECTION_NAME);
6091
6092 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6093 globals->thumb_glue_size,
6094 THUMB2ARM_GLUE_SECTION_NAME);
6095
6096 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6097 globals->vfp11_erratum_glue_size,
6098 VFP11_ERRATUM_VENEER_SECTION_NAME);
6099
6100 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6101 globals->stm32l4xx_erratum_glue_size,
6102 STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
6103
6104 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6105 globals->bx_glue_size,
6106 ARM_BX_GLUE_SECTION_NAME);
6107
6108 return TRUE;
6109 }
6110
6111 /* Allocate space and symbols for calling a Thumb function from Arm mode.
6112 returns the symbol identifying the stub. */
6113
6114 static struct elf_link_hash_entry *
6115 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
6116 struct elf_link_hash_entry * h)
6117 {
6118 const char * name = h->root.root.string;
6119 asection * s;
6120 char * tmp_name;
6121 struct elf_link_hash_entry * myh;
6122 struct bfd_link_hash_entry * bh;
6123 struct elf32_arm_link_hash_table * globals;
6124 bfd_vma val;
6125 bfd_size_type size;
6126
6127 globals = elf32_arm_hash_table (link_info);
6128 BFD_ASSERT (globals != NULL);
6129 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6130
6131 s = bfd_get_linker_section
6132 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
6133
6134 BFD_ASSERT (s != NULL);
6135
6136 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
6137 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
6138
6139 BFD_ASSERT (tmp_name);
6140
6141 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
6142
6143 myh = elf_link_hash_lookup
6144 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6145
6146 if (myh != NULL)
6147 {
6148 /* We've already seen this guy. */
6149 free (tmp_name);
6150 return myh;
6151 }
6152
6153 /* The only trick here is using hash_table->arm_glue_size as the value.
6154 Even though the section isn't allocated yet, this is where we will be
6155 putting it. The +1 on the value marks that the stub has not been
6156 output yet - not that it is a Thumb function. */
6157 bh = NULL;
6158 val = globals->arm_glue_size + 1;
6159 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
6160 tmp_name, BSF_GLOBAL, s, val,
6161 NULL, TRUE, FALSE, &bh);
6162
6163 myh = (struct elf_link_hash_entry *) bh;
6164 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6165 myh->forced_local = 1;
6166
6167 free (tmp_name);
6168
6169 if (bfd_link_pic (link_info)
6170 || globals->root.is_relocatable_executable
6171 || globals->pic_veneer)
6172 size = ARM2THUMB_PIC_GLUE_SIZE;
6173 else if (globals->use_blx)
6174 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
6175 else
6176 size = ARM2THUMB_STATIC_GLUE_SIZE;
6177
6178 s->size += size;
6179 globals->arm_glue_size += size;
6180
6181 return myh;
6182 }
6183
6184 /* Allocate space for ARMv4 BX veneers. */
6185
6186 static void
6187 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
6188 {
6189 asection * s;
6190 struct elf32_arm_link_hash_table *globals;
6191 char *tmp_name;
6192 struct elf_link_hash_entry *myh;
6193 struct bfd_link_hash_entry *bh;
6194 bfd_vma val;
6195
6196 /* BX PC does not need a veneer. */
6197 if (reg == 15)
6198 return;
6199
6200 globals = elf32_arm_hash_table (link_info);
6201 BFD_ASSERT (globals != NULL);
6202 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6203
6204 /* Check if this veneer has already been allocated. */
6205 if (globals->bx_glue_offset[reg])
6206 return;
6207
6208 s = bfd_get_linker_section
6209 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
6210
6211 BFD_ASSERT (s != NULL);
6212
6213 /* Add symbol for veneer. */
6214 tmp_name = (char *)
6215 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
6216
6217 BFD_ASSERT (tmp_name);
6218
6219 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
6220
6221 myh = elf_link_hash_lookup
6222 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
6223
6224 BFD_ASSERT (myh == NULL);
6225
6226 bh = NULL;
6227 val = globals->bx_glue_size;
6228 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
6229 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
6230 NULL, TRUE, FALSE, &bh);
6231
6232 myh = (struct elf_link_hash_entry *) bh;
6233 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6234 myh->forced_local = 1;
6235
6236 s->size += ARM_BX_VENEER_SIZE;
6237 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
6238 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
6239 }
6240
6241
6242 /* Add an entry to the code/data map for section SEC. */
6243
6244 static void
6245 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
6246 {
6247 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6248 unsigned int newidx;
6249
6250 if (sec_data->map == NULL)
6251 {
6252 sec_data->map = (elf32_arm_section_map *)
6253 bfd_malloc (sizeof (elf32_arm_section_map));
6254 sec_data->mapcount = 0;
6255 sec_data->mapsize = 1;
6256 }
6257
6258 newidx = sec_data->mapcount++;
6259
6260 if (sec_data->mapcount > sec_data->mapsize)
6261 {
6262 sec_data->mapsize *= 2;
6263 sec_data->map = (elf32_arm_section_map *)
6264 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
6265 * sizeof (elf32_arm_section_map));
6266 }
6267
6268 if (sec_data->map)
6269 {
6270 sec_data->map[newidx].vma = vma;
6271 sec_data->map[newidx].type = type;
6272 }
6273 }
6274
6275
6276 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
6277 veneers are handled for now. */
6278
6279 static bfd_vma
6280 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
6281 elf32_vfp11_erratum_list *branch,
6282 bfd *branch_bfd,
6283 asection *branch_sec,
6284 unsigned int offset)
6285 {
6286 asection *s;
6287 struct elf32_arm_link_hash_table *hash_table;
6288 char *tmp_name;
6289 struct elf_link_hash_entry *myh;
6290 struct bfd_link_hash_entry *bh;
6291 bfd_vma val;
6292 struct _arm_elf_section_data *sec_data;
6293 elf32_vfp11_erratum_list *newerr;
6294
6295 hash_table = elf32_arm_hash_table (link_info);
6296 BFD_ASSERT (hash_table != NULL);
6297 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
6298
6299 s = bfd_get_linker_section
6300 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
6301
6302 sec_data = elf32_arm_section_data (s);
6303
6304 BFD_ASSERT (s != NULL);
6305
6306 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6307 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6308
6309 BFD_ASSERT (tmp_name);
6310
6311 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6312 hash_table->num_vfp11_fixes);
6313
6314 myh = elf_link_hash_lookup
6315 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6316
6317 BFD_ASSERT (myh == NULL);
6318
6319 bh = NULL;
6320 val = hash_table->vfp11_erratum_glue_size;
6321 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
6322 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
6323 NULL, TRUE, FALSE, &bh);
6324
6325 myh = (struct elf_link_hash_entry *) bh;
6326 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6327 myh->forced_local = 1;
6328
6329 /* Link veneer back to calling location. */
6330 sec_data->erratumcount += 1;
6331 newerr = (elf32_vfp11_erratum_list *)
6332 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
6333
6334 newerr->type = VFP11_ERRATUM_ARM_VENEER;
6335 newerr->vma = -1;
6336 newerr->u.v.branch = branch;
6337 newerr->u.v.id = hash_table->num_vfp11_fixes;
6338 branch->u.b.veneer = newerr;
6339
6340 newerr->next = sec_data->erratumlist;
6341 sec_data->erratumlist = newerr;
6342
6343 /* A symbol for the return from the veneer. */
6344 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6345 hash_table->num_vfp11_fixes);
6346
6347 myh = elf_link_hash_lookup
6348 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6349
6350 if (myh != NULL)
6351 abort ();
6352
6353 bh = NULL;
6354 val = offset + 4;
6355 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
6356 branch_sec, val, NULL, TRUE, FALSE, &bh);
6357
6358 myh = (struct elf_link_hash_entry *) bh;
6359 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6360 myh->forced_local = 1;
6361
6362 free (tmp_name);
6363
6364 /* Generate a mapping symbol for the veneer section, and explicitly add an
6365 entry for that symbol to the code/data map for the section. */
6366 if (hash_table->vfp11_erratum_glue_size == 0)
6367 {
6368 bh = NULL;
6369 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
6370 ever requires this erratum fix. */
6371 _bfd_generic_link_add_one_symbol (link_info,
6372 hash_table->bfd_of_glue_owner, "$a",
6373 BSF_LOCAL, s, 0, NULL,
6374 TRUE, FALSE, &bh);
6375
6376 myh = (struct elf_link_hash_entry *) bh;
6377 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
6378 myh->forced_local = 1;
6379
6380 /* The elf32_arm_init_maps function only cares about symbols from input
6381 BFDs. We must make a note of this generated mapping symbol
6382 ourselves so that code byteswapping works properly in
6383 elf32_arm_write_section. */
6384 elf32_arm_section_map_add (s, 'a', 0);
6385 }
6386
6387 s->size += VFP11_ERRATUM_VENEER_SIZE;
6388 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
6389 hash_table->num_vfp11_fixes++;
6390
6391 /* The offset of the veneer. */
6392 return val;
6393 }
6394
6395 /* Record information about a STM32L4XX STM erratum veneer. Only THUMB-mode
6396 veneers need to be handled because used only in Cortex-M. */
6397
6398 static bfd_vma
6399 record_stm32l4xx_erratum_veneer (struct bfd_link_info *link_info,
6400 elf32_stm32l4xx_erratum_list *branch,
6401 bfd *branch_bfd,
6402 asection *branch_sec,
6403 unsigned int offset,
6404 bfd_size_type veneer_size)
6405 {
6406 asection *s;
6407 struct elf32_arm_link_hash_table *hash_table;
6408 char *tmp_name;
6409 struct elf_link_hash_entry *myh;
6410 struct bfd_link_hash_entry *bh;
6411 bfd_vma val;
6412 struct _arm_elf_section_data *sec_data;
6413 elf32_stm32l4xx_erratum_list *newerr;
6414
6415 hash_table = elf32_arm_hash_table (link_info);
6416 BFD_ASSERT (hash_table != NULL);
6417 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
6418
6419 s = bfd_get_linker_section
6420 (hash_table->bfd_of_glue_owner, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
6421
6422 BFD_ASSERT (s != NULL);
6423
6424 sec_data = elf32_arm_section_data (s);
6425
6426 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6427 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
6428
6429 BFD_ASSERT (tmp_name);
6430
6431 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
6432 hash_table->num_stm32l4xx_fixes);
6433
6434 myh = elf_link_hash_lookup
6435 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6436
6437 BFD_ASSERT (myh == NULL);
6438
6439 bh = NULL;
6440 val = hash_table->stm32l4xx_erratum_glue_size;
6441 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
6442 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
6443 NULL, TRUE, FALSE, &bh);
6444
6445 myh = (struct elf_link_hash_entry *) bh;
6446 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6447 myh->forced_local = 1;
6448
6449 /* Link veneer back to calling location. */
6450 sec_data->stm32l4xx_erratumcount += 1;
6451 newerr = (elf32_stm32l4xx_erratum_list *)
6452 bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list));
6453
6454 newerr->type = STM32L4XX_ERRATUM_VENEER;
6455 newerr->vma = -1;
6456 newerr->u.v.branch = branch;
6457 newerr->u.v.id = hash_table->num_stm32l4xx_fixes;
6458 branch->u.b.veneer = newerr;
6459
6460 newerr->next = sec_data->stm32l4xx_erratumlist;
6461 sec_data->stm32l4xx_erratumlist = newerr;
6462
6463 /* A symbol for the return from the veneer. */
6464 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
6465 hash_table->num_stm32l4xx_fixes);
6466
6467 myh = elf_link_hash_lookup
6468 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6469
6470 if (myh != NULL)
6471 abort ();
6472
6473 bh = NULL;
6474 val = offset + 4;
6475 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
6476 branch_sec, val, NULL, TRUE, FALSE, &bh);
6477
6478 myh = (struct elf_link_hash_entry *) bh;
6479 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6480 myh->forced_local = 1;
6481
6482 free (tmp_name);
6483
6484 /* Generate a mapping symbol for the veneer section, and explicitly add an
6485 entry for that symbol to the code/data map for the section. */
6486 if (hash_table->stm32l4xx_erratum_glue_size == 0)
6487 {
6488 bh = NULL;
6489 /* Creates a THUMB symbol since there is no other choice. */
6490 _bfd_generic_link_add_one_symbol (link_info,
6491 hash_table->bfd_of_glue_owner, "$t",
6492 BSF_LOCAL, s, 0, NULL,
6493 TRUE, FALSE, &bh);
6494
6495 myh = (struct elf_link_hash_entry *) bh;
6496 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
6497 myh->forced_local = 1;
6498
6499 /* The elf32_arm_init_maps function only cares about symbols from input
6500 BFDs. We must make a note of this generated mapping symbol
6501 ourselves so that code byteswapping works properly in
6502 elf32_arm_write_section. */
6503 elf32_arm_section_map_add (s, 't', 0);
6504 }
6505
6506 s->size += veneer_size;
6507 hash_table->stm32l4xx_erratum_glue_size += veneer_size;
6508 hash_table->num_stm32l4xx_fixes++;
6509
6510 /* The offset of the veneer. */
6511 return val;
6512 }
6513
6514 #define ARM_GLUE_SECTION_FLAGS \
6515 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
6516 | SEC_READONLY | SEC_LINKER_CREATED)
6517
6518 /* Create a fake section for use by the ARM backend of the linker. */
6519
6520 static bfd_boolean
6521 arm_make_glue_section (bfd * abfd, const char * name)
6522 {
6523 asection * sec;
6524
6525 sec = bfd_get_linker_section (abfd, name);
6526 if (sec != NULL)
6527 /* Already made. */
6528 return TRUE;
6529
6530 sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
6531
6532 if (sec == NULL
6533 || !bfd_set_section_alignment (abfd, sec, 2))
6534 return FALSE;
6535
6536 /* Set the gc mark to prevent the section from being removed by garbage
6537 collection, despite the fact that no relocs refer to this section. */
6538 sec->gc_mark = 1;
6539
6540 return TRUE;
6541 }
6542
6543 /* Set size of .plt entries. This function is called from the
6544 linker scripts in ld/emultempl/{armelf}.em. */
6545
6546 void
6547 bfd_elf32_arm_use_long_plt (void)
6548 {
6549 elf32_arm_use_long_plt_entry = TRUE;
6550 }
6551
6552 /* Add the glue sections to ABFD. This function is called from the
6553 linker scripts in ld/emultempl/{armelf}.em. */
6554
6555 bfd_boolean
6556 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
6557 struct bfd_link_info *info)
6558 {
6559 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
6560 bfd_boolean dostm32l4xx = globals
6561 && globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE;
6562 bfd_boolean addglue;
6563
6564 /* If we are only performing a partial
6565 link do not bother adding the glue. */
6566 if (bfd_link_relocatable (info))
6567 return TRUE;
6568
6569 addglue = arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
6570 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
6571 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
6572 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
6573
6574 if (!dostm32l4xx)
6575 return addglue;
6576
6577 return addglue
6578 && arm_make_glue_section (abfd, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
6579 }
6580
6581 /* Mark output sections of veneers needing a dedicated one with SEC_KEEP. This
6582 ensures they are not marked for deletion by
6583 strip_excluded_output_sections () when veneers are going to be created
6584 later. Not doing so would trigger assert on empty section size in
6585 lang_size_sections_1 (). */
6586
6587 void
6588 bfd_elf32_arm_keep_private_stub_output_sections (struct bfd_link_info *info)
6589 {
6590 enum elf32_arm_stub_type stub_type;
6591
6592 /* If we are only performing a partial
6593 link do not bother adding the glue. */
6594 if (bfd_link_relocatable (info))
6595 return;
6596
6597 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
6598 {
6599 asection *out_sec;
6600 const char *out_sec_name;
6601
6602 if (!arm_dedicated_stub_output_section_required (stub_type))
6603 continue;
6604
6605 out_sec_name = arm_dedicated_stub_output_section_name (stub_type);
6606 out_sec = bfd_get_section_by_name (info->output_bfd, out_sec_name);
6607 if (out_sec != NULL)
6608 out_sec->flags |= SEC_KEEP;
6609 }
6610 }
6611
6612 /* Select a BFD to be used to hold the sections used by the glue code.
6613 This function is called from the linker scripts in ld/emultempl/
6614 {armelf/pe}.em. */
6615
6616 bfd_boolean
6617 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
6618 {
6619 struct elf32_arm_link_hash_table *globals;
6620
6621 /* If we are only performing a partial link
6622 do not bother getting a bfd to hold the glue. */
6623 if (bfd_link_relocatable (info))
6624 return TRUE;
6625
6626 /* Make sure we don't attach the glue sections to a dynamic object. */
6627 BFD_ASSERT (!(abfd->flags & DYNAMIC));
6628
6629 globals = elf32_arm_hash_table (info);
6630 BFD_ASSERT (globals != NULL);
6631
6632 if (globals->bfd_of_glue_owner != NULL)
6633 return TRUE;
6634
6635 /* Save the bfd for later use. */
6636 globals->bfd_of_glue_owner = abfd;
6637
6638 return TRUE;
6639 }
6640
6641 static void
6642 check_use_blx (struct elf32_arm_link_hash_table *globals)
6643 {
6644 int cpu_arch;
6645
6646 cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
6647 Tag_CPU_arch);
6648
6649 if (globals->fix_arm1176)
6650 {
6651 if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
6652 globals->use_blx = 1;
6653 }
6654 else
6655 {
6656 if (cpu_arch > TAG_CPU_ARCH_V4T)
6657 globals->use_blx = 1;
6658 }
6659 }
6660
6661 bfd_boolean
6662 bfd_elf32_arm_process_before_allocation (bfd *abfd,
6663 struct bfd_link_info *link_info)
6664 {
6665 Elf_Internal_Shdr *symtab_hdr;
6666 Elf_Internal_Rela *internal_relocs = NULL;
6667 Elf_Internal_Rela *irel, *irelend;
6668 bfd_byte *contents = NULL;
6669
6670 asection *sec;
6671 struct elf32_arm_link_hash_table *globals;
6672
6673 /* If we are only performing a partial link do not bother
6674 to construct any glue. */
6675 if (bfd_link_relocatable (link_info))
6676 return TRUE;
6677
6678 /* Here we have a bfd that is to be included on the link. We have a
6679 hook to do reloc rummaging, before section sizes are nailed down. */
6680 globals = elf32_arm_hash_table (link_info);
6681 BFD_ASSERT (globals != NULL);
6682
6683 check_use_blx (globals);
6684
6685 if (globals->byteswap_code && !bfd_big_endian (abfd))
6686 {
6687 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
6688 abfd);
6689 return FALSE;
6690 }
6691
6692 /* PR 5398: If we have not decided to include any loadable sections in
6693 the output then we will not have a glue owner bfd. This is OK, it
6694 just means that there is nothing else for us to do here. */
6695 if (globals->bfd_of_glue_owner == NULL)
6696 return TRUE;
6697
6698 /* Rummage around all the relocs and map the glue vectors. */
6699 sec = abfd->sections;
6700
6701 if (sec == NULL)
6702 return TRUE;
6703
6704 for (; sec != NULL; sec = sec->next)
6705 {
6706 if (sec->reloc_count == 0)
6707 continue;
6708
6709 if ((sec->flags & SEC_EXCLUDE) != 0)
6710 continue;
6711
6712 symtab_hdr = & elf_symtab_hdr (abfd);
6713
6714 /* Load the relocs. */
6715 internal_relocs
6716 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
6717
6718 if (internal_relocs == NULL)
6719 goto error_return;
6720
6721 irelend = internal_relocs + sec->reloc_count;
6722 for (irel = internal_relocs; irel < irelend; irel++)
6723 {
6724 long r_type;
6725 unsigned long r_index;
6726
6727 struct elf_link_hash_entry *h;
6728
6729 r_type = ELF32_R_TYPE (irel->r_info);
6730 r_index = ELF32_R_SYM (irel->r_info);
6731
6732 /* These are the only relocation types we care about. */
6733 if ( r_type != R_ARM_PC24
6734 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
6735 continue;
6736
6737 /* Get the section contents if we haven't done so already. */
6738 if (contents == NULL)
6739 {
6740 /* Get cached copy if it exists. */
6741 if (elf_section_data (sec)->this_hdr.contents != NULL)
6742 contents = elf_section_data (sec)->this_hdr.contents;
6743 else
6744 {
6745 /* Go get them off disk. */
6746 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
6747 goto error_return;
6748 }
6749 }
6750
6751 if (r_type == R_ARM_V4BX)
6752 {
6753 int reg;
6754
6755 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
6756 record_arm_bx_glue (link_info, reg);
6757 continue;
6758 }
6759
6760 /* If the relocation is not against a symbol it cannot concern us. */
6761 h = NULL;
6762
6763 /* We don't care about local symbols. */
6764 if (r_index < symtab_hdr->sh_info)
6765 continue;
6766
6767 /* This is an external symbol. */
6768 r_index -= symtab_hdr->sh_info;
6769 h = (struct elf_link_hash_entry *)
6770 elf_sym_hashes (abfd)[r_index];
6771
6772 /* If the relocation is against a static symbol it must be within
6773 the current section and so cannot be a cross ARM/Thumb relocation. */
6774 if (h == NULL)
6775 continue;
6776
6777 /* If the call will go through a PLT entry then we do not need
6778 glue. */
6779 if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
6780 continue;
6781
6782 switch (r_type)
6783 {
6784 case R_ARM_PC24:
6785 /* This one is a call from arm code. We need to look up
6786 the target of the call. If it is a thumb target, we
6787 insert glue. */
6788 if (ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
6789 == ST_BRANCH_TO_THUMB)
6790 record_arm_to_thumb_glue (link_info, h);
6791 break;
6792
6793 default:
6794 abort ();
6795 }
6796 }
6797
6798 if (contents != NULL
6799 && elf_section_data (sec)->this_hdr.contents != contents)
6800 free (contents);
6801 contents = NULL;
6802
6803 if (internal_relocs != NULL
6804 && elf_section_data (sec)->relocs != internal_relocs)
6805 free (internal_relocs);
6806 internal_relocs = NULL;
6807 }
6808
6809 return TRUE;
6810
6811 error_return:
6812 if (contents != NULL
6813 && elf_section_data (sec)->this_hdr.contents != contents)
6814 free (contents);
6815 if (internal_relocs != NULL
6816 && elf_section_data (sec)->relocs != internal_relocs)
6817 free (internal_relocs);
6818
6819 return FALSE;
6820 }
6821 #endif
6822
6823
6824 /* Initialise maps of ARM/Thumb/data for input BFDs. */
6825
6826 void
6827 bfd_elf32_arm_init_maps (bfd *abfd)
6828 {
6829 Elf_Internal_Sym *isymbuf;
6830 Elf_Internal_Shdr *hdr;
6831 unsigned int i, localsyms;
6832
6833 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
6834 if (! is_arm_elf (abfd))
6835 return;
6836
6837 if ((abfd->flags & DYNAMIC) != 0)
6838 return;
6839
6840 hdr = & elf_symtab_hdr (abfd);
6841 localsyms = hdr->sh_info;
6842
6843 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
6844 should contain the number of local symbols, which should come before any
6845 global symbols. Mapping symbols are always local. */
6846 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
6847 NULL);
6848
6849 /* No internal symbols read? Skip this BFD. */
6850 if (isymbuf == NULL)
6851 return;
6852
6853 for (i = 0; i < localsyms; i++)
6854 {
6855 Elf_Internal_Sym *isym = &isymbuf[i];
6856 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
6857 const char *name;
6858
6859 if (sec != NULL
6860 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
6861 {
6862 name = bfd_elf_string_from_elf_section (abfd,
6863 hdr->sh_link, isym->st_name);
6864
6865 if (bfd_is_arm_special_symbol_name (name,
6866 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
6867 elf32_arm_section_map_add (sec, name[1], isym->st_value);
6868 }
6869 }
6870 }
6871
6872
6873 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
6874 say what they wanted. */
6875
6876 void
6877 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
6878 {
6879 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6880 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6881
6882 if (globals == NULL)
6883 return;
6884
6885 if (globals->fix_cortex_a8 == -1)
6886 {
6887 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
6888 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
6889 && (out_attr[Tag_CPU_arch_profile].i == 'A'
6890 || out_attr[Tag_CPU_arch_profile].i == 0))
6891 globals->fix_cortex_a8 = 1;
6892 else
6893 globals->fix_cortex_a8 = 0;
6894 }
6895 }
6896
6897
6898 void
6899 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
6900 {
6901 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6902 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6903
6904 if (globals == NULL)
6905 return;
6906 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
6907 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
6908 {
6909 switch (globals->vfp11_fix)
6910 {
6911 case BFD_ARM_VFP11_FIX_DEFAULT:
6912 case BFD_ARM_VFP11_FIX_NONE:
6913 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
6914 break;
6915
6916 default:
6917 /* Give a warning, but do as the user requests anyway. */
6918 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
6919 "workaround is not necessary for target architecture"), obfd);
6920 }
6921 }
6922 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
6923 /* For earlier architectures, we might need the workaround, but do not
6924 enable it by default. If users is running with broken hardware, they
6925 must enable the erratum fix explicitly. */
6926 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
6927 }
6928
6929 void
6930 bfd_elf32_arm_set_stm32l4xx_fix (bfd *obfd, struct bfd_link_info *link_info)
6931 {
6932 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6933 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6934
6935 if (globals == NULL)
6936 return;
6937
6938 /* We assume only Cortex-M4 may require the fix. */
6939 if (out_attr[Tag_CPU_arch].i != TAG_CPU_ARCH_V7E_M
6940 || out_attr[Tag_CPU_arch_profile].i != 'M')
6941 {
6942 if (globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE)
6943 /* Give a warning, but do as the user requests anyway. */
6944 (*_bfd_error_handler)
6945 (_("%B: warning: selected STM32L4XX erratum "
6946 "workaround is not necessary for target architecture"), obfd);
6947 }
6948 }
6949
6950 enum bfd_arm_vfp11_pipe
6951 {
6952 VFP11_FMAC,
6953 VFP11_LS,
6954 VFP11_DS,
6955 VFP11_BAD
6956 };
6957
6958 /* Return a VFP register number. This is encoded as RX:X for single-precision
6959 registers, or X:RX for double-precision registers, where RX is the group of
6960 four bits in the instruction encoding and X is the single extension bit.
6961 RX and X fields are specified using their lowest (starting) bit. The return
6962 value is:
6963
6964 0...31: single-precision registers s0...s31
6965 32...63: double-precision registers d0...d31.
6966
6967 Although X should be zero for VFP11 (encoding d0...d15 only), we might
6968 encounter VFP3 instructions, so we allow the full range for DP registers. */
6969
6970 static unsigned int
6971 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
6972 unsigned int x)
6973 {
6974 if (is_double)
6975 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
6976 else
6977 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
6978 }
6979
6980 /* Set bits in *WMASK according to a register number REG as encoded by
6981 bfd_arm_vfp11_regno(). Ignore d16-d31. */
6982
6983 static void
6984 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
6985 {
6986 if (reg < 32)
6987 *wmask |= 1 << reg;
6988 else if (reg < 48)
6989 *wmask |= 3 << ((reg - 32) * 2);
6990 }
6991
6992 /* Return TRUE if WMASK overwrites anything in REGS. */
6993
6994 static bfd_boolean
6995 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
6996 {
6997 int i;
6998
6999 for (i = 0; i < numregs; i++)
7000 {
7001 unsigned int reg = regs[i];
7002
7003 if (reg < 32 && (wmask & (1 << reg)) != 0)
7004 return TRUE;
7005
7006 reg -= 32;
7007
7008 if (reg >= 16)
7009 continue;
7010
7011 if ((wmask & (3 << (reg * 2))) != 0)
7012 return TRUE;
7013 }
7014
7015 return FALSE;
7016 }
7017
7018 /* In this function, we're interested in two things: finding input registers
7019 for VFP data-processing instructions, and finding the set of registers which
7020 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
7021 hold the written set, so FLDM etc. are easy to deal with (we're only
7022 interested in 32 SP registers or 16 dp registers, due to the VFP version
7023 implemented by the chip in question). DP registers are marked by setting
7024 both SP registers in the write mask). */
7025
7026 static enum bfd_arm_vfp11_pipe
7027 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
7028 int *numregs)
7029 {
7030 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
7031 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
7032
7033 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
7034 {
7035 unsigned int pqrs;
7036 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
7037 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
7038
7039 pqrs = ((insn & 0x00800000) >> 20)
7040 | ((insn & 0x00300000) >> 19)
7041 | ((insn & 0x00000040) >> 6);
7042
7043 switch (pqrs)
7044 {
7045 case 0: /* fmac[sd]. */
7046 case 1: /* fnmac[sd]. */
7047 case 2: /* fmsc[sd]. */
7048 case 3: /* fnmsc[sd]. */
7049 vpipe = VFP11_FMAC;
7050 bfd_arm_vfp11_write_mask (destmask, fd);
7051 regs[0] = fd;
7052 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
7053 regs[2] = fm;
7054 *numregs = 3;
7055 break;
7056
7057 case 4: /* fmul[sd]. */
7058 case 5: /* fnmul[sd]. */
7059 case 6: /* fadd[sd]. */
7060 case 7: /* fsub[sd]. */
7061 vpipe = VFP11_FMAC;
7062 goto vfp_binop;
7063
7064 case 8: /* fdiv[sd]. */
7065 vpipe = VFP11_DS;
7066 vfp_binop:
7067 bfd_arm_vfp11_write_mask (destmask, fd);
7068 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
7069 regs[1] = fm;
7070 *numregs = 2;
7071 break;
7072
7073 case 15: /* extended opcode. */
7074 {
7075 unsigned int extn = ((insn >> 15) & 0x1e)
7076 | ((insn >> 7) & 1);
7077
7078 switch (extn)
7079 {
7080 case 0: /* fcpy[sd]. */
7081 case 1: /* fabs[sd]. */
7082 case 2: /* fneg[sd]. */
7083 case 8: /* fcmp[sd]. */
7084 case 9: /* fcmpe[sd]. */
7085 case 10: /* fcmpz[sd]. */
7086 case 11: /* fcmpez[sd]. */
7087 case 16: /* fuito[sd]. */
7088 case 17: /* fsito[sd]. */
7089 case 24: /* ftoui[sd]. */
7090 case 25: /* ftouiz[sd]. */
7091 case 26: /* ftosi[sd]. */
7092 case 27: /* ftosiz[sd]. */
7093 /* These instructions will not bounce due to underflow. */
7094 *numregs = 0;
7095 vpipe = VFP11_FMAC;
7096 break;
7097
7098 case 3: /* fsqrt[sd]. */
7099 /* fsqrt cannot underflow, but it can (perhaps) overwrite
7100 registers to cause the erratum in previous instructions. */
7101 bfd_arm_vfp11_write_mask (destmask, fd);
7102 vpipe = VFP11_DS;
7103 break;
7104
7105 case 15: /* fcvt{ds,sd}. */
7106 {
7107 int rnum = 0;
7108
7109 bfd_arm_vfp11_write_mask (destmask, fd);
7110
7111 /* Only FCVTSD can underflow. */
7112 if ((insn & 0x100) != 0)
7113 regs[rnum++] = fm;
7114
7115 *numregs = rnum;
7116
7117 vpipe = VFP11_FMAC;
7118 }
7119 break;
7120
7121 default:
7122 return VFP11_BAD;
7123 }
7124 }
7125 break;
7126
7127 default:
7128 return VFP11_BAD;
7129 }
7130 }
7131 /* Two-register transfer. */
7132 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
7133 {
7134 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
7135
7136 if ((insn & 0x100000) == 0)
7137 {
7138 if (is_double)
7139 bfd_arm_vfp11_write_mask (destmask, fm);
7140 else
7141 {
7142 bfd_arm_vfp11_write_mask (destmask, fm);
7143 bfd_arm_vfp11_write_mask (destmask, fm + 1);
7144 }
7145 }
7146
7147 vpipe = VFP11_LS;
7148 }
7149 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
7150 {
7151 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
7152 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
7153
7154 switch (puw)
7155 {
7156 case 0: /* Two-reg transfer. We should catch these above. */
7157 abort ();
7158
7159 case 2: /* fldm[sdx]. */
7160 case 3:
7161 case 5:
7162 {
7163 unsigned int i, offset = insn & 0xff;
7164
7165 if (is_double)
7166 offset >>= 1;
7167
7168 for (i = fd; i < fd + offset; i++)
7169 bfd_arm_vfp11_write_mask (destmask, i);
7170 }
7171 break;
7172
7173 case 4: /* fld[sd]. */
7174 case 6:
7175 bfd_arm_vfp11_write_mask (destmask, fd);
7176 break;
7177
7178 default:
7179 return VFP11_BAD;
7180 }
7181
7182 vpipe = VFP11_LS;
7183 }
7184 /* Single-register transfer. Note L==0. */
7185 else if ((insn & 0x0f100e10) == 0x0e000a10)
7186 {
7187 unsigned int opcode = (insn >> 21) & 7;
7188 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
7189
7190 switch (opcode)
7191 {
7192 case 0: /* fmsr/fmdlr. */
7193 case 1: /* fmdhr. */
7194 /* Mark fmdhr and fmdlr as writing to the whole of the DP
7195 destination register. I don't know if this is exactly right,
7196 but it is the conservative choice. */
7197 bfd_arm_vfp11_write_mask (destmask, fn);
7198 break;
7199
7200 case 7: /* fmxr. */
7201 break;
7202 }
7203
7204 vpipe = VFP11_LS;
7205 }
7206
7207 return vpipe;
7208 }
7209
7210
7211 static int elf32_arm_compare_mapping (const void * a, const void * b);
7212
7213
7214 /* Look for potentially-troublesome code sequences which might trigger the
7215 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
7216 (available from ARM) for details of the erratum. A short version is
7217 described in ld.texinfo. */
7218
7219 bfd_boolean
7220 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
7221 {
7222 asection *sec;
7223 bfd_byte *contents = NULL;
7224 int state = 0;
7225 int regs[3], numregs = 0;
7226 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
7227 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
7228
7229 if (globals == NULL)
7230 return FALSE;
7231
7232 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
7233 The states transition as follows:
7234
7235 0 -> 1 (vector) or 0 -> 2 (scalar)
7236 A VFP FMAC-pipeline instruction has been seen. Fill
7237 regs[0]..regs[numregs-1] with its input operands. Remember this
7238 instruction in 'first_fmac'.
7239
7240 1 -> 2
7241 Any instruction, except for a VFP instruction which overwrites
7242 regs[*].
7243
7244 1 -> 3 [ -> 0 ] or
7245 2 -> 3 [ -> 0 ]
7246 A VFP instruction has been seen which overwrites any of regs[*].
7247 We must make a veneer! Reset state to 0 before examining next
7248 instruction.
7249
7250 2 -> 0
7251 If we fail to match anything in state 2, reset to state 0 and reset
7252 the instruction pointer to the instruction after 'first_fmac'.
7253
7254 If the VFP11 vector mode is in use, there must be at least two unrelated
7255 instructions between anti-dependent VFP11 instructions to properly avoid
7256 triggering the erratum, hence the use of the extra state 1. */
7257
7258 /* If we are only performing a partial link do not bother
7259 to construct any glue. */
7260 if (bfd_link_relocatable (link_info))
7261 return TRUE;
7262
7263 /* Skip if this bfd does not correspond to an ELF image. */
7264 if (! is_arm_elf (abfd))
7265 return TRUE;
7266
7267 /* We should have chosen a fix type by the time we get here. */
7268 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
7269
7270 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
7271 return TRUE;
7272
7273 /* Skip this BFD if it corresponds to an executable or dynamic object. */
7274 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
7275 return TRUE;
7276
7277 for (sec = abfd->sections; sec != NULL; sec = sec->next)
7278 {
7279 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
7280 struct _arm_elf_section_data *sec_data;
7281
7282 /* If we don't have executable progbits, we're not interested in this
7283 section. Also skip if section is to be excluded. */
7284 if (elf_section_type (sec) != SHT_PROGBITS
7285 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
7286 || (sec->flags & SEC_EXCLUDE) != 0
7287 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
7288 || sec->output_section == bfd_abs_section_ptr
7289 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
7290 continue;
7291
7292 sec_data = elf32_arm_section_data (sec);
7293
7294 if (sec_data->mapcount == 0)
7295 continue;
7296
7297 if (elf_section_data (sec)->this_hdr.contents != NULL)
7298 contents = elf_section_data (sec)->this_hdr.contents;
7299 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
7300 goto error_return;
7301
7302 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
7303 elf32_arm_compare_mapping);
7304
7305 for (span = 0; span < sec_data->mapcount; span++)
7306 {
7307 unsigned int span_start = sec_data->map[span].vma;
7308 unsigned int span_end = (span == sec_data->mapcount - 1)
7309 ? sec->size : sec_data->map[span + 1].vma;
7310 char span_type = sec_data->map[span].type;
7311
7312 /* FIXME: Only ARM mode is supported at present. We may need to
7313 support Thumb-2 mode also at some point. */
7314 if (span_type != 'a')
7315 continue;
7316
7317 for (i = span_start; i < span_end;)
7318 {
7319 unsigned int next_i = i + 4;
7320 unsigned int insn = bfd_big_endian (abfd)
7321 ? (contents[i] << 24)
7322 | (contents[i + 1] << 16)
7323 | (contents[i + 2] << 8)
7324 | contents[i + 3]
7325 : (contents[i + 3] << 24)
7326 | (contents[i + 2] << 16)
7327 | (contents[i + 1] << 8)
7328 | contents[i];
7329 unsigned int writemask = 0;
7330 enum bfd_arm_vfp11_pipe vpipe;
7331
7332 switch (state)
7333 {
7334 case 0:
7335 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
7336 &numregs);
7337 /* I'm assuming the VFP11 erratum can trigger with denorm
7338 operands on either the FMAC or the DS pipeline. This might
7339 lead to slightly overenthusiastic veneer insertion. */
7340 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
7341 {
7342 state = use_vector ? 1 : 2;
7343 first_fmac = i;
7344 veneer_of_insn = insn;
7345 }
7346 break;
7347
7348 case 1:
7349 {
7350 int other_regs[3], other_numregs;
7351 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
7352 other_regs,
7353 &other_numregs);
7354 if (vpipe != VFP11_BAD
7355 && bfd_arm_vfp11_antidependency (writemask, regs,
7356 numregs))
7357 state = 3;
7358 else
7359 state = 2;
7360 }
7361 break;
7362
7363 case 2:
7364 {
7365 int other_regs[3], other_numregs;
7366 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
7367 other_regs,
7368 &other_numregs);
7369 if (vpipe != VFP11_BAD
7370 && bfd_arm_vfp11_antidependency (writemask, regs,
7371 numregs))
7372 state = 3;
7373 else
7374 {
7375 state = 0;
7376 next_i = first_fmac + 4;
7377 }
7378 }
7379 break;
7380
7381 case 3:
7382 abort (); /* Should be unreachable. */
7383 }
7384
7385 if (state == 3)
7386 {
7387 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
7388 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
7389
7390 elf32_arm_section_data (sec)->erratumcount += 1;
7391
7392 newerr->u.b.vfp_insn = veneer_of_insn;
7393
7394 switch (span_type)
7395 {
7396 case 'a':
7397 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
7398 break;
7399
7400 default:
7401 abort ();
7402 }
7403
7404 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
7405 first_fmac);
7406
7407 newerr->vma = -1;
7408
7409 newerr->next = sec_data->erratumlist;
7410 sec_data->erratumlist = newerr;
7411
7412 state = 0;
7413 }
7414
7415 i = next_i;
7416 }
7417 }
7418
7419 if (contents != NULL
7420 && elf_section_data (sec)->this_hdr.contents != contents)
7421 free (contents);
7422 contents = NULL;
7423 }
7424
7425 return TRUE;
7426
7427 error_return:
7428 if (contents != NULL
7429 && elf_section_data (sec)->this_hdr.contents != contents)
7430 free (contents);
7431
7432 return FALSE;
7433 }
7434
7435 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
7436 after sections have been laid out, using specially-named symbols. */
7437
7438 void
7439 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
7440 struct bfd_link_info *link_info)
7441 {
7442 asection *sec;
7443 struct elf32_arm_link_hash_table *globals;
7444 char *tmp_name;
7445
7446 if (bfd_link_relocatable (link_info))
7447 return;
7448
7449 /* Skip if this bfd does not correspond to an ELF image. */
7450 if (! is_arm_elf (abfd))
7451 return;
7452
7453 globals = elf32_arm_hash_table (link_info);
7454 if (globals == NULL)
7455 return;
7456
7457 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7458 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
7459
7460 for (sec = abfd->sections; sec != NULL; sec = sec->next)
7461 {
7462 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
7463 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
7464
7465 for (; errnode != NULL; errnode = errnode->next)
7466 {
7467 struct elf_link_hash_entry *myh;
7468 bfd_vma vma;
7469
7470 switch (errnode->type)
7471 {
7472 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
7473 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
7474 /* Find veneer symbol. */
7475 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
7476 errnode->u.b.veneer->u.v.id);
7477
7478 myh = elf_link_hash_lookup
7479 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7480
7481 if (myh == NULL)
7482 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
7483 "`%s'"), abfd, tmp_name);
7484
7485 vma = myh->root.u.def.section->output_section->vma
7486 + myh->root.u.def.section->output_offset
7487 + myh->root.u.def.value;
7488
7489 errnode->u.b.veneer->vma = vma;
7490 break;
7491
7492 case VFP11_ERRATUM_ARM_VENEER:
7493 case VFP11_ERRATUM_THUMB_VENEER:
7494 /* Find return location. */
7495 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
7496 errnode->u.v.id);
7497
7498 myh = elf_link_hash_lookup
7499 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7500
7501 if (myh == NULL)
7502 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
7503 "`%s'"), abfd, tmp_name);
7504
7505 vma = myh->root.u.def.section->output_section->vma
7506 + myh->root.u.def.section->output_offset
7507 + myh->root.u.def.value;
7508
7509 errnode->u.v.branch->vma = vma;
7510 break;
7511
7512 default:
7513 abort ();
7514 }
7515 }
7516 }
7517
7518 free (tmp_name);
7519 }
7520
7521 /* Find virtual-memory addresses for STM32L4XX erratum veneers and
7522 return locations after sections have been laid out, using
7523 specially-named symbols. */
7524
7525 void
7526 bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd *abfd,
7527 struct bfd_link_info *link_info)
7528 {
7529 asection *sec;
7530 struct elf32_arm_link_hash_table *globals;
7531 char *tmp_name;
7532
7533 if (bfd_link_relocatable (link_info))
7534 return;
7535
7536 /* Skip if this bfd does not correspond to an ELF image. */
7537 if (! is_arm_elf (abfd))
7538 return;
7539
7540 globals = elf32_arm_hash_table (link_info);
7541 if (globals == NULL)
7542 return;
7543
7544 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7545 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
7546
7547 for (sec = abfd->sections; sec != NULL; sec = sec->next)
7548 {
7549 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
7550 elf32_stm32l4xx_erratum_list *errnode = sec_data->stm32l4xx_erratumlist;
7551
7552 for (; errnode != NULL; errnode = errnode->next)
7553 {
7554 struct elf_link_hash_entry *myh;
7555 bfd_vma vma;
7556
7557 switch (errnode->type)
7558 {
7559 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
7560 /* Find veneer symbol. */
7561 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
7562 errnode->u.b.veneer->u.v.id);
7563
7564 myh = elf_link_hash_lookup
7565 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7566
7567 if (myh == NULL)
7568 (*_bfd_error_handler) (_("%B: unable to find STM32L4XX veneer "
7569 "`%s'"), abfd, tmp_name);
7570
7571 vma = myh->root.u.def.section->output_section->vma
7572 + myh->root.u.def.section->output_offset
7573 + myh->root.u.def.value;
7574
7575 errnode->u.b.veneer->vma = vma;
7576 break;
7577
7578 case STM32L4XX_ERRATUM_VENEER:
7579 /* Find return location. */
7580 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
7581 errnode->u.v.id);
7582
7583 myh = elf_link_hash_lookup
7584 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7585
7586 if (myh == NULL)
7587 (*_bfd_error_handler) (_("%B: unable to find STM32L4XX veneer "
7588 "`%s'"), abfd, tmp_name);
7589
7590 vma = myh->root.u.def.section->output_section->vma
7591 + myh->root.u.def.section->output_offset
7592 + myh->root.u.def.value;
7593
7594 errnode->u.v.branch->vma = vma;
7595 break;
7596
7597 default:
7598 abort ();
7599 }
7600 }
7601 }
7602
7603 free (tmp_name);
7604 }
7605
7606 static inline bfd_boolean
7607 is_thumb2_ldmia (const insn32 insn)
7608 {
7609 /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
7610 1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll. */
7611 return (insn & 0xffd02000) == 0xe8900000;
7612 }
7613
7614 static inline bfd_boolean
7615 is_thumb2_ldmdb (const insn32 insn)
7616 {
7617 /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
7618 1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll. */
7619 return (insn & 0xffd02000) == 0xe9100000;
7620 }
7621
7622 static inline bfd_boolean
7623 is_thumb2_vldm (const insn32 insn)
7624 {
7625 /* A6.5 Extension register load or store instruction
7626 A7.7.229
7627 We look for SP 32-bit and DP 64-bit registers.
7628 Encoding T1 VLDM{mode}<c> <Rn>{!}, <list>
7629 <list> is consecutive 64-bit registers
7630 1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii
7631 Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
7632 <list> is consecutive 32-bit registers
7633 1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
7634 if P==0 && U==1 && W==1 && Rn=1101 VPOP
7635 if PUW=010 || PUW=011 || PUW=101 VLDM. */
7636 return
7637 (((insn & 0xfe100f00) == 0xec100b00) ||
7638 ((insn & 0xfe100f00) == 0xec100a00))
7639 && /* (IA without !). */
7640 (((((insn << 7) >> 28) & 0xd) == 0x4)
7641 /* (IA with !), includes VPOP (when reg number is SP). */
7642 || ((((insn << 7) >> 28) & 0xd) == 0x5)
7643 /* (DB with !). */
7644 || ((((insn << 7) >> 28) & 0xd) == 0x9));
7645 }
7646
7647 /* STM STM32L4XX erratum : This function assumes that it receives an LDM or
7648 VLDM opcode and:
7649 - computes the number and the mode of memory accesses
7650 - decides if the replacement should be done:
7651 . replaces only if > 8-word accesses
7652 . or (testing purposes only) replaces all accesses. */
7653
7654 static bfd_boolean
7655 stm32l4xx_need_create_replacing_stub (const insn32 insn,
7656 bfd_arm_stm32l4xx_fix stm32l4xx_fix)
7657 {
7658 int nb_words = 0;
7659
7660 /* The field encoding the register list is the same for both LDMIA
7661 and LDMDB encodings. */
7662 if (is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn))
7663 nb_words = popcount (insn & 0x0000ffff);
7664 else if (is_thumb2_vldm (insn))
7665 nb_words = (insn & 0xff);
7666
7667 /* DEFAULT mode accounts for the real bug condition situation,
7668 ALL mode inserts stubs for each LDM/VLDM instruction (testing). */
7669 return
7670 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_DEFAULT) ? nb_words > 8 :
7671 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_ALL) ? TRUE : FALSE;
7672 }
7673
7674 /* Look for potentially-troublesome code sequences which might trigger
7675 the STM STM32L4XX erratum. */
7676
7677 bfd_boolean
7678 bfd_elf32_arm_stm32l4xx_erratum_scan (bfd *abfd,
7679 struct bfd_link_info *link_info)
7680 {
7681 asection *sec;
7682 bfd_byte *contents = NULL;
7683 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
7684
7685 if (globals == NULL)
7686 return FALSE;
7687
7688 /* If we are only performing a partial link do not bother
7689 to construct any glue. */
7690 if (bfd_link_relocatable (link_info))
7691 return TRUE;
7692
7693 /* Skip if this bfd does not correspond to an ELF image. */
7694 if (! is_arm_elf (abfd))
7695 return TRUE;
7696
7697 if (globals->stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_NONE)
7698 return TRUE;
7699
7700 /* Skip this BFD if it corresponds to an executable or dynamic object. */
7701 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
7702 return TRUE;
7703
7704 for (sec = abfd->sections; sec != NULL; sec = sec->next)
7705 {
7706 unsigned int i, span;
7707 struct _arm_elf_section_data *sec_data;
7708
7709 /* If we don't have executable progbits, we're not interested in this
7710 section. Also skip if section is to be excluded. */
7711 if (elf_section_type (sec) != SHT_PROGBITS
7712 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
7713 || (sec->flags & SEC_EXCLUDE) != 0
7714 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
7715 || sec->output_section == bfd_abs_section_ptr
7716 || strcmp (sec->name, STM32L4XX_ERRATUM_VENEER_SECTION_NAME) == 0)
7717 continue;
7718
7719 sec_data = elf32_arm_section_data (sec);
7720
7721 if (sec_data->mapcount == 0)
7722 continue;
7723
7724 if (elf_section_data (sec)->this_hdr.contents != NULL)
7725 contents = elf_section_data (sec)->this_hdr.contents;
7726 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
7727 goto error_return;
7728
7729 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
7730 elf32_arm_compare_mapping);
7731
7732 for (span = 0; span < sec_data->mapcount; span++)
7733 {
7734 unsigned int span_start = sec_data->map[span].vma;
7735 unsigned int span_end = (span == sec_data->mapcount - 1)
7736 ? sec->size : sec_data->map[span + 1].vma;
7737 char span_type = sec_data->map[span].type;
7738 int itblock_current_pos = 0;
7739
7740 /* Only Thumb2 mode need be supported with this CM4 specific
7741 code, we should not encounter any arm mode eg span_type
7742 != 'a'. */
7743 if (span_type != 't')
7744 continue;
7745
7746 for (i = span_start; i < span_end;)
7747 {
7748 unsigned int insn = bfd_get_16 (abfd, &contents[i]);
7749 bfd_boolean insn_32bit = FALSE;
7750 bfd_boolean is_ldm = FALSE;
7751 bfd_boolean is_vldm = FALSE;
7752 bfd_boolean is_not_last_in_it_block = FALSE;
7753
7754 /* The first 16-bits of all 32-bit thumb2 instructions start
7755 with opcode[15..13]=0b111 and the encoded op1 can be anything
7756 except opcode[12..11]!=0b00.
7757 See 32-bit Thumb instruction encoding. */
7758 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
7759 insn_32bit = TRUE;
7760
7761 /* Compute the predicate that tells if the instruction
7762 is concerned by the IT block
7763 - Creates an error if there is a ldm that is not
7764 last in the IT block thus cannot be replaced
7765 - Otherwise we can create a branch at the end of the
7766 IT block, it will be controlled naturally by IT
7767 with the proper pseudo-predicate
7768 - So the only interesting predicate is the one that
7769 tells that we are not on the last item of an IT
7770 block. */
7771 if (itblock_current_pos != 0)
7772 is_not_last_in_it_block = !!--itblock_current_pos;
7773
7774 if (insn_32bit)
7775 {
7776 /* Load the rest of the insn (in manual-friendly order). */
7777 insn = (insn << 16) | bfd_get_16 (abfd, &contents[i + 2]);
7778 is_ldm = is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn);
7779 is_vldm = is_thumb2_vldm (insn);
7780
7781 /* Veneers are created for (v)ldm depending on
7782 option flags and memory accesses conditions; but
7783 if the instruction is not the last instruction of
7784 an IT block, we cannot create a jump there, so we
7785 bail out. */
7786 if ((is_ldm || is_vldm) &&
7787 stm32l4xx_need_create_replacing_stub
7788 (insn, globals->stm32l4xx_fix))
7789 {
7790 if (is_not_last_in_it_block)
7791 {
7792 (*_bfd_error_handler)
7793 /* Note - overlong line used here to allow for translation. */
7794 (_("\
7795 %B(%A+0x%lx): error: multiple load detected in non-last IT block instruction : STM32L4XX veneer cannot be generated.\n"
7796 "Use gcc option -mrestrict-it to generate only one instruction per IT block.\n"),
7797 abfd, sec, (long)i);
7798 }
7799 else
7800 {
7801 elf32_stm32l4xx_erratum_list *newerr =
7802 (elf32_stm32l4xx_erratum_list *)
7803 bfd_zmalloc
7804 (sizeof (elf32_stm32l4xx_erratum_list));
7805
7806 elf32_arm_section_data (sec)
7807 ->stm32l4xx_erratumcount += 1;
7808 newerr->u.b.insn = insn;
7809 /* We create only thumb branches. */
7810 newerr->type =
7811 STM32L4XX_ERRATUM_BRANCH_TO_VENEER;
7812 record_stm32l4xx_erratum_veneer
7813 (link_info, newerr, abfd, sec,
7814 i,
7815 is_ldm ?
7816 STM32L4XX_ERRATUM_LDM_VENEER_SIZE:
7817 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
7818 newerr->vma = -1;
7819 newerr->next = sec_data->stm32l4xx_erratumlist;
7820 sec_data->stm32l4xx_erratumlist = newerr;
7821 }
7822 }
7823 }
7824 else
7825 {
7826 /* A7.7.37 IT p208
7827 IT blocks are only encoded in T1
7828 Encoding T1: IT{x{y{z}}} <firstcond>
7829 1 0 1 1 - 1 1 1 1 - firstcond - mask
7830 if mask = '0000' then see 'related encodings'
7831 We don't deal with UNPREDICTABLE, just ignore these.
7832 There can be no nested IT blocks so an IT block
7833 is naturally a new one for which it is worth
7834 computing its size. */
7835 bfd_boolean is_newitblock = ((insn & 0xff00) == 0xbf00) &&
7836 ((insn & 0x000f) != 0x0000);
7837 /* If we have a new IT block we compute its size. */
7838 if (is_newitblock)
7839 {
7840 /* Compute the number of instructions controlled
7841 by the IT block, it will be used to decide
7842 whether we are inside an IT block or not. */
7843 unsigned int mask = insn & 0x000f;
7844 itblock_current_pos = 4 - ctz (mask);
7845 }
7846 }
7847
7848 i += insn_32bit ? 4 : 2;
7849 }
7850 }
7851
7852 if (contents != NULL
7853 && elf_section_data (sec)->this_hdr.contents != contents)
7854 free (contents);
7855 contents = NULL;
7856 }
7857
7858 return TRUE;
7859
7860 error_return:
7861 if (contents != NULL
7862 && elf_section_data (sec)->this_hdr.contents != contents)
7863 free (contents);
7864
7865 return FALSE;
7866 }
7867
7868 /* Set target relocation values needed during linking. */
7869
7870 void
7871 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
7872 struct bfd_link_info *link_info,
7873 int target1_is_rel,
7874 char * target2_type,
7875 int fix_v4bx,
7876 int use_blx,
7877 bfd_arm_vfp11_fix vfp11_fix,
7878 bfd_arm_stm32l4xx_fix stm32l4xx_fix,
7879 int no_enum_warn, int no_wchar_warn,
7880 int pic_veneer, int fix_cortex_a8,
7881 int fix_arm1176)
7882 {
7883 struct elf32_arm_link_hash_table *globals;
7884
7885 globals = elf32_arm_hash_table (link_info);
7886 if (globals == NULL)
7887 return;
7888
7889 globals->target1_is_rel = target1_is_rel;
7890 if (strcmp (target2_type, "rel") == 0)
7891 globals->target2_reloc = R_ARM_REL32;
7892 else if (strcmp (target2_type, "abs") == 0)
7893 globals->target2_reloc = R_ARM_ABS32;
7894 else if (strcmp (target2_type, "got-rel") == 0)
7895 globals->target2_reloc = R_ARM_GOT_PREL;
7896 else
7897 {
7898 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
7899 target2_type);
7900 }
7901 globals->fix_v4bx = fix_v4bx;
7902 globals->use_blx |= use_blx;
7903 globals->vfp11_fix = vfp11_fix;
7904 globals->stm32l4xx_fix = stm32l4xx_fix;
7905 globals->pic_veneer = pic_veneer;
7906 globals->fix_cortex_a8 = fix_cortex_a8;
7907 globals->fix_arm1176 = fix_arm1176;
7908
7909 BFD_ASSERT (is_arm_elf (output_bfd));
7910 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
7911 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
7912 }
7913
7914 /* Replace the target offset of a Thumb bl or b.w instruction. */
7915
7916 static void
7917 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
7918 {
7919 bfd_vma upper;
7920 bfd_vma lower;
7921 int reloc_sign;
7922
7923 BFD_ASSERT ((offset & 1) == 0);
7924
7925 upper = bfd_get_16 (abfd, insn);
7926 lower = bfd_get_16 (abfd, insn + 2);
7927 reloc_sign = (offset < 0) ? 1 : 0;
7928 upper = (upper & ~(bfd_vma) 0x7ff)
7929 | ((offset >> 12) & 0x3ff)
7930 | (reloc_sign << 10);
7931 lower = (lower & ~(bfd_vma) 0x2fff)
7932 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
7933 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
7934 | ((offset >> 1) & 0x7ff);
7935 bfd_put_16 (abfd, upper, insn);
7936 bfd_put_16 (abfd, lower, insn + 2);
7937 }
7938
7939 /* Thumb code calling an ARM function. */
7940
7941 static int
7942 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
7943 const char * name,
7944 bfd * input_bfd,
7945 bfd * output_bfd,
7946 asection * input_section,
7947 bfd_byte * hit_data,
7948 asection * sym_sec,
7949 bfd_vma offset,
7950 bfd_signed_vma addend,
7951 bfd_vma val,
7952 char **error_message)
7953 {
7954 asection * s = 0;
7955 bfd_vma my_offset;
7956 long int ret_offset;
7957 struct elf_link_hash_entry * myh;
7958 struct elf32_arm_link_hash_table * globals;
7959
7960 myh = find_thumb_glue (info, name, error_message);
7961 if (myh == NULL)
7962 return FALSE;
7963
7964 globals = elf32_arm_hash_table (info);
7965 BFD_ASSERT (globals != NULL);
7966 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7967
7968 my_offset = myh->root.u.def.value;
7969
7970 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
7971 THUMB2ARM_GLUE_SECTION_NAME);
7972
7973 BFD_ASSERT (s != NULL);
7974 BFD_ASSERT (s->contents != NULL);
7975 BFD_ASSERT (s->output_section != NULL);
7976
7977 if ((my_offset & 0x01) == 0x01)
7978 {
7979 if (sym_sec != NULL
7980 && sym_sec->owner != NULL
7981 && !INTERWORK_FLAG (sym_sec->owner))
7982 {
7983 (*_bfd_error_handler)
7984 (_("%B(%s): warning: interworking not enabled.\n"
7985 " first occurrence: %B: Thumb call to ARM"),
7986 sym_sec->owner, input_bfd, name);
7987
7988 return FALSE;
7989 }
7990
7991 --my_offset;
7992 myh->root.u.def.value = my_offset;
7993
7994 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
7995 s->contents + my_offset);
7996
7997 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
7998 s->contents + my_offset + 2);
7999
8000 ret_offset =
8001 /* Address of destination of the stub. */
8002 ((bfd_signed_vma) val)
8003 - ((bfd_signed_vma)
8004 /* Offset from the start of the current section
8005 to the start of the stubs. */
8006 (s->output_offset
8007 /* Offset of the start of this stub from the start of the stubs. */
8008 + my_offset
8009 /* Address of the start of the current section. */
8010 + s->output_section->vma)
8011 /* The branch instruction is 4 bytes into the stub. */
8012 + 4
8013 /* ARM branches work from the pc of the instruction + 8. */
8014 + 8);
8015
8016 put_arm_insn (globals, output_bfd,
8017 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
8018 s->contents + my_offset + 4);
8019 }
8020
8021 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
8022
8023 /* Now go back and fix up the original BL insn to point to here. */
8024 ret_offset =
8025 /* Address of where the stub is located. */
8026 (s->output_section->vma + s->output_offset + my_offset)
8027 /* Address of where the BL is located. */
8028 - (input_section->output_section->vma + input_section->output_offset
8029 + offset)
8030 /* Addend in the relocation. */
8031 - addend
8032 /* Biassing for PC-relative addressing. */
8033 - 8;
8034
8035 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
8036
8037 return TRUE;
8038 }
8039
8040 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
8041
8042 static struct elf_link_hash_entry *
8043 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
8044 const char * name,
8045 bfd * input_bfd,
8046 bfd * output_bfd,
8047 asection * sym_sec,
8048 bfd_vma val,
8049 asection * s,
8050 char ** error_message)
8051 {
8052 bfd_vma my_offset;
8053 long int ret_offset;
8054 struct elf_link_hash_entry * myh;
8055 struct elf32_arm_link_hash_table * globals;
8056
8057 myh = find_arm_glue (info, name, error_message);
8058 if (myh == NULL)
8059 return NULL;
8060
8061 globals = elf32_arm_hash_table (info);
8062 BFD_ASSERT (globals != NULL);
8063 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8064
8065 my_offset = myh->root.u.def.value;
8066
8067 if ((my_offset & 0x01) == 0x01)
8068 {
8069 if (sym_sec != NULL
8070 && sym_sec->owner != NULL
8071 && !INTERWORK_FLAG (sym_sec->owner))
8072 {
8073 (*_bfd_error_handler)
8074 (_("%B(%s): warning: interworking not enabled.\n"
8075 " first occurrence: %B: arm call to thumb"),
8076 sym_sec->owner, input_bfd, name);
8077 }
8078
8079 --my_offset;
8080 myh->root.u.def.value = my_offset;
8081
8082 if (bfd_link_pic (info)
8083 || globals->root.is_relocatable_executable
8084 || globals->pic_veneer)
8085 {
8086 /* For relocatable objects we can't use absolute addresses,
8087 so construct the address from a relative offset. */
8088 /* TODO: If the offset is small it's probably worth
8089 constructing the address with adds. */
8090 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
8091 s->contents + my_offset);
8092 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
8093 s->contents + my_offset + 4);
8094 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
8095 s->contents + my_offset + 8);
8096 /* Adjust the offset by 4 for the position of the add,
8097 and 8 for the pipeline offset. */
8098 ret_offset = (val - (s->output_offset
8099 + s->output_section->vma
8100 + my_offset + 12))
8101 | 1;
8102 bfd_put_32 (output_bfd, ret_offset,
8103 s->contents + my_offset + 12);
8104 }
8105 else if (globals->use_blx)
8106 {
8107 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
8108 s->contents + my_offset);
8109
8110 /* It's a thumb address. Add the low order bit. */
8111 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
8112 s->contents + my_offset + 4);
8113 }
8114 else
8115 {
8116 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
8117 s->contents + my_offset);
8118
8119 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
8120 s->contents + my_offset + 4);
8121
8122 /* It's a thumb address. Add the low order bit. */
8123 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
8124 s->contents + my_offset + 8);
8125
8126 my_offset += 12;
8127 }
8128 }
8129
8130 BFD_ASSERT (my_offset <= globals->arm_glue_size);
8131
8132 return myh;
8133 }
8134
8135 /* Arm code calling a Thumb function. */
8136
8137 static int
8138 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
8139 const char * name,
8140 bfd * input_bfd,
8141 bfd * output_bfd,
8142 asection * input_section,
8143 bfd_byte * hit_data,
8144 asection * sym_sec,
8145 bfd_vma offset,
8146 bfd_signed_vma addend,
8147 bfd_vma val,
8148 char **error_message)
8149 {
8150 unsigned long int tmp;
8151 bfd_vma my_offset;
8152 asection * s;
8153 long int ret_offset;
8154 struct elf_link_hash_entry * myh;
8155 struct elf32_arm_link_hash_table * globals;
8156
8157 globals = elf32_arm_hash_table (info);
8158 BFD_ASSERT (globals != NULL);
8159 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8160
8161 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
8162 ARM2THUMB_GLUE_SECTION_NAME);
8163 BFD_ASSERT (s != NULL);
8164 BFD_ASSERT (s->contents != NULL);
8165 BFD_ASSERT (s->output_section != NULL);
8166
8167 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
8168 sym_sec, val, s, error_message);
8169 if (!myh)
8170 return FALSE;
8171
8172 my_offset = myh->root.u.def.value;
8173 tmp = bfd_get_32 (input_bfd, hit_data);
8174 tmp = tmp & 0xFF000000;
8175
8176 /* Somehow these are both 4 too far, so subtract 8. */
8177 ret_offset = (s->output_offset
8178 + my_offset
8179 + s->output_section->vma
8180 - (input_section->output_offset
8181 + input_section->output_section->vma
8182 + offset + addend)
8183 - 8);
8184
8185 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
8186
8187 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
8188
8189 return TRUE;
8190 }
8191
8192 /* Populate Arm stub for an exported Thumb function. */
8193
8194 static bfd_boolean
8195 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
8196 {
8197 struct bfd_link_info * info = (struct bfd_link_info *) inf;
8198 asection * s;
8199 struct elf_link_hash_entry * myh;
8200 struct elf32_arm_link_hash_entry *eh;
8201 struct elf32_arm_link_hash_table * globals;
8202 asection *sec;
8203 bfd_vma val;
8204 char *error_message;
8205
8206 eh = elf32_arm_hash_entry (h);
8207 /* Allocate stubs for exported Thumb functions on v4t. */
8208 if (eh->export_glue == NULL)
8209 return TRUE;
8210
8211 globals = elf32_arm_hash_table (info);
8212 BFD_ASSERT (globals != NULL);
8213 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8214
8215 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
8216 ARM2THUMB_GLUE_SECTION_NAME);
8217 BFD_ASSERT (s != NULL);
8218 BFD_ASSERT (s->contents != NULL);
8219 BFD_ASSERT (s->output_section != NULL);
8220
8221 sec = eh->export_glue->root.u.def.section;
8222
8223 BFD_ASSERT (sec->output_section != NULL);
8224
8225 val = eh->export_glue->root.u.def.value + sec->output_offset
8226 + sec->output_section->vma;
8227
8228 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
8229 h->root.u.def.section->owner,
8230 globals->obfd, sec, val, s,
8231 &error_message);
8232 BFD_ASSERT (myh);
8233 return TRUE;
8234 }
8235
8236 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
8237
8238 static bfd_vma
8239 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
8240 {
8241 bfd_byte *p;
8242 bfd_vma glue_addr;
8243 asection *s;
8244 struct elf32_arm_link_hash_table *globals;
8245
8246 globals = elf32_arm_hash_table (info);
8247 BFD_ASSERT (globals != NULL);
8248 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8249
8250 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
8251 ARM_BX_GLUE_SECTION_NAME);
8252 BFD_ASSERT (s != NULL);
8253 BFD_ASSERT (s->contents != NULL);
8254 BFD_ASSERT (s->output_section != NULL);
8255
8256 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
8257
8258 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
8259
8260 if ((globals->bx_glue_offset[reg] & 1) == 0)
8261 {
8262 p = s->contents + glue_addr;
8263 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
8264 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
8265 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
8266 globals->bx_glue_offset[reg] |= 1;
8267 }
8268
8269 return glue_addr + s->output_section->vma + s->output_offset;
8270 }
8271
8272 /* Generate Arm stubs for exported Thumb symbols. */
8273 static void
8274 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
8275 struct bfd_link_info *link_info)
8276 {
8277 struct elf32_arm_link_hash_table * globals;
8278
8279 if (link_info == NULL)
8280 /* Ignore this if we are not called by the ELF backend linker. */
8281 return;
8282
8283 globals = elf32_arm_hash_table (link_info);
8284 if (globals == NULL)
8285 return;
8286
8287 /* If blx is available then exported Thumb symbols are OK and there is
8288 nothing to do. */
8289 if (globals->use_blx)
8290 return;
8291
8292 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
8293 link_info);
8294 }
8295
8296 /* Reserve space for COUNT dynamic relocations in relocation selection
8297 SRELOC. */
8298
8299 static void
8300 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc,
8301 bfd_size_type count)
8302 {
8303 struct elf32_arm_link_hash_table *htab;
8304
8305 htab = elf32_arm_hash_table (info);
8306 BFD_ASSERT (htab->root.dynamic_sections_created);
8307 if (sreloc == NULL)
8308 abort ();
8309 sreloc->size += RELOC_SIZE (htab) * count;
8310 }
8311
8312 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
8313 dynamic, the relocations should go in SRELOC, otherwise they should
8314 go in the special .rel.iplt section. */
8315
8316 static void
8317 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc,
8318 bfd_size_type count)
8319 {
8320 struct elf32_arm_link_hash_table *htab;
8321
8322 htab = elf32_arm_hash_table (info);
8323 if (!htab->root.dynamic_sections_created)
8324 htab->root.irelplt->size += RELOC_SIZE (htab) * count;
8325 else
8326 {
8327 BFD_ASSERT (sreloc != NULL);
8328 sreloc->size += RELOC_SIZE (htab) * count;
8329 }
8330 }
8331
8332 /* Add relocation REL to the end of relocation section SRELOC. */
8333
8334 static void
8335 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
8336 asection *sreloc, Elf_Internal_Rela *rel)
8337 {
8338 bfd_byte *loc;
8339 struct elf32_arm_link_hash_table *htab;
8340
8341 htab = elf32_arm_hash_table (info);
8342 if (!htab->root.dynamic_sections_created
8343 && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE)
8344 sreloc = htab->root.irelplt;
8345 if (sreloc == NULL)
8346 abort ();
8347 loc = sreloc->contents;
8348 loc += sreloc->reloc_count++ * RELOC_SIZE (htab);
8349 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
8350 abort ();
8351 SWAP_RELOC_OUT (htab) (output_bfd, rel, loc);
8352 }
8353
8354 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
8355 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
8356 to .plt. */
8357
8358 static void
8359 elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
8360 bfd_boolean is_iplt_entry,
8361 union gotplt_union *root_plt,
8362 struct arm_plt_info *arm_plt)
8363 {
8364 struct elf32_arm_link_hash_table *htab;
8365 asection *splt;
8366 asection *sgotplt;
8367
8368 htab = elf32_arm_hash_table (info);
8369
8370 if (is_iplt_entry)
8371 {
8372 splt = htab->root.iplt;
8373 sgotplt = htab->root.igotplt;
8374
8375 /* NaCl uses a special first entry in .iplt too. */
8376 if (htab->nacl_p && splt->size == 0)
8377 splt->size += htab->plt_header_size;
8378
8379 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
8380 elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
8381 }
8382 else
8383 {
8384 splt = htab->root.splt;
8385 sgotplt = htab->root.sgotplt;
8386
8387 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
8388 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
8389
8390 /* If this is the first .plt entry, make room for the special
8391 first entry. */
8392 if (splt->size == 0)
8393 splt->size += htab->plt_header_size;
8394
8395 htab->next_tls_desc_index++;
8396 }
8397
8398 /* Allocate the PLT entry itself, including any leading Thumb stub. */
8399 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
8400 splt->size += PLT_THUMB_STUB_SIZE;
8401 root_plt->offset = splt->size;
8402 splt->size += htab->plt_entry_size;
8403
8404 if (!htab->symbian_p)
8405 {
8406 /* We also need to make an entry in the .got.plt section, which
8407 will be placed in the .got section by the linker script. */
8408 if (is_iplt_entry)
8409 arm_plt->got_offset = sgotplt->size;
8410 else
8411 arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
8412 sgotplt->size += 4;
8413 }
8414 }
8415
8416 static bfd_vma
8417 arm_movw_immediate (bfd_vma value)
8418 {
8419 return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
8420 }
8421
8422 static bfd_vma
8423 arm_movt_immediate (bfd_vma value)
8424 {
8425 return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
8426 }
8427
8428 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
8429 the entry lives in .iplt and resolves to (*SYM_VALUE)().
8430 Otherwise, DYNINDX is the index of the symbol in the dynamic
8431 symbol table and SYM_VALUE is undefined.
8432
8433 ROOT_PLT points to the offset of the PLT entry from the start of its
8434 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
8435 bookkeeping information.
8436
8437 Returns FALSE if there was a problem. */
8438
8439 static bfd_boolean
8440 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
8441 union gotplt_union *root_plt,
8442 struct arm_plt_info *arm_plt,
8443 int dynindx, bfd_vma sym_value)
8444 {
8445 struct elf32_arm_link_hash_table *htab;
8446 asection *sgot;
8447 asection *splt;
8448 asection *srel;
8449 bfd_byte *loc;
8450 bfd_vma plt_index;
8451 Elf_Internal_Rela rel;
8452 bfd_vma plt_header_size;
8453 bfd_vma got_header_size;
8454
8455 htab = elf32_arm_hash_table (info);
8456
8457 /* Pick the appropriate sections and sizes. */
8458 if (dynindx == -1)
8459 {
8460 splt = htab->root.iplt;
8461 sgot = htab->root.igotplt;
8462 srel = htab->root.irelplt;
8463
8464 /* There are no reserved entries in .igot.plt, and no special
8465 first entry in .iplt. */
8466 got_header_size = 0;
8467 plt_header_size = 0;
8468 }
8469 else
8470 {
8471 splt = htab->root.splt;
8472 sgot = htab->root.sgotplt;
8473 srel = htab->root.srelplt;
8474
8475 got_header_size = get_elf_backend_data (output_bfd)->got_header_size;
8476 plt_header_size = htab->plt_header_size;
8477 }
8478 BFD_ASSERT (splt != NULL && srel != NULL);
8479
8480 /* Fill in the entry in the procedure linkage table. */
8481 if (htab->symbian_p)
8482 {
8483 BFD_ASSERT (dynindx >= 0);
8484 put_arm_insn (htab, output_bfd,
8485 elf32_arm_symbian_plt_entry[0],
8486 splt->contents + root_plt->offset);
8487 bfd_put_32 (output_bfd,
8488 elf32_arm_symbian_plt_entry[1],
8489 splt->contents + root_plt->offset + 4);
8490
8491 /* Fill in the entry in the .rel.plt section. */
8492 rel.r_offset = (splt->output_section->vma
8493 + splt->output_offset
8494 + root_plt->offset + 4);
8495 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_GLOB_DAT);
8496
8497 /* Get the index in the procedure linkage table which
8498 corresponds to this symbol. This is the index of this symbol
8499 in all the symbols for which we are making plt entries. The
8500 first entry in the procedure linkage table is reserved. */
8501 plt_index = ((root_plt->offset - plt_header_size)
8502 / htab->plt_entry_size);
8503 }
8504 else
8505 {
8506 bfd_vma got_offset, got_address, plt_address;
8507 bfd_vma got_displacement, initial_got_entry;
8508 bfd_byte * ptr;
8509
8510 BFD_ASSERT (sgot != NULL);
8511
8512 /* Get the offset into the .(i)got.plt table of the entry that
8513 corresponds to this function. */
8514 got_offset = (arm_plt->got_offset & -2);
8515
8516 /* Get the index in the procedure linkage table which
8517 corresponds to this symbol. This is the index of this symbol
8518 in all the symbols for which we are making plt entries.
8519 After the reserved .got.plt entries, all symbols appear in
8520 the same order as in .plt. */
8521 plt_index = (got_offset - got_header_size) / 4;
8522
8523 /* Calculate the address of the GOT entry. */
8524 got_address = (sgot->output_section->vma
8525 + sgot->output_offset
8526 + got_offset);
8527
8528 /* ...and the address of the PLT entry. */
8529 plt_address = (splt->output_section->vma
8530 + splt->output_offset
8531 + root_plt->offset);
8532
8533 ptr = splt->contents + root_plt->offset;
8534 if (htab->vxworks_p && bfd_link_pic (info))
8535 {
8536 unsigned int i;
8537 bfd_vma val;
8538
8539 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
8540 {
8541 val = elf32_arm_vxworks_shared_plt_entry[i];
8542 if (i == 2)
8543 val |= got_address - sgot->output_section->vma;
8544 if (i == 5)
8545 val |= plt_index * RELOC_SIZE (htab);
8546 if (i == 2 || i == 5)
8547 bfd_put_32 (output_bfd, val, ptr);
8548 else
8549 put_arm_insn (htab, output_bfd, val, ptr);
8550 }
8551 }
8552 else if (htab->vxworks_p)
8553 {
8554 unsigned int i;
8555 bfd_vma val;
8556
8557 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
8558 {
8559 val = elf32_arm_vxworks_exec_plt_entry[i];
8560 if (i == 2)
8561 val |= got_address;
8562 if (i == 4)
8563 val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2);
8564 if (i == 5)
8565 val |= plt_index * RELOC_SIZE (htab);
8566 if (i == 2 || i == 5)
8567 bfd_put_32 (output_bfd, val, ptr);
8568 else
8569 put_arm_insn (htab, output_bfd, val, ptr);
8570 }
8571
8572 loc = (htab->srelplt2->contents
8573 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
8574
8575 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
8576 referencing the GOT for this PLT entry. */
8577 rel.r_offset = plt_address + 8;
8578 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
8579 rel.r_addend = got_offset;
8580 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
8581 loc += RELOC_SIZE (htab);
8582
8583 /* Create the R_ARM_ABS32 relocation referencing the
8584 beginning of the PLT for this GOT entry. */
8585 rel.r_offset = got_address;
8586 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
8587 rel.r_addend = 0;
8588 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
8589 }
8590 else if (htab->nacl_p)
8591 {
8592 /* Calculate the displacement between the PLT slot and the
8593 common tail that's part of the special initial PLT slot. */
8594 int32_t tail_displacement
8595 = ((splt->output_section->vma + splt->output_offset
8596 + ARM_NACL_PLT_TAIL_OFFSET)
8597 - (plt_address + htab->plt_entry_size + 4));
8598 BFD_ASSERT ((tail_displacement & 3) == 0);
8599 tail_displacement >>= 2;
8600
8601 BFD_ASSERT ((tail_displacement & 0xff000000) == 0
8602 || (-tail_displacement & 0xff000000) == 0);
8603
8604 /* Calculate the displacement between the PLT slot and the entry
8605 in the GOT. The offset accounts for the value produced by
8606 adding to pc in the penultimate instruction of the PLT stub. */
8607 got_displacement = (got_address
8608 - (plt_address + htab->plt_entry_size));
8609
8610 /* NaCl does not support interworking at all. */
8611 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
8612
8613 put_arm_insn (htab, output_bfd,
8614 elf32_arm_nacl_plt_entry[0]
8615 | arm_movw_immediate (got_displacement),
8616 ptr + 0);
8617 put_arm_insn (htab, output_bfd,
8618 elf32_arm_nacl_plt_entry[1]
8619 | arm_movt_immediate (got_displacement),
8620 ptr + 4);
8621 put_arm_insn (htab, output_bfd,
8622 elf32_arm_nacl_plt_entry[2],
8623 ptr + 8);
8624 put_arm_insn (htab, output_bfd,
8625 elf32_arm_nacl_plt_entry[3]
8626 | (tail_displacement & 0x00ffffff),
8627 ptr + 12);
8628 }
8629 else if (using_thumb_only (htab))
8630 {
8631 /* PR ld/16017: Generate thumb only PLT entries. */
8632 if (!using_thumb2 (htab))
8633 {
8634 /* FIXME: We ought to be able to generate thumb-1 PLT
8635 instructions... */
8636 _bfd_error_handler (_("%B: Warning: thumb-1 mode PLT generation not currently supported"),
8637 output_bfd);
8638 return FALSE;
8639 }
8640
8641 /* Calculate the displacement between the PLT slot and the entry in
8642 the GOT. The 12-byte offset accounts for the value produced by
8643 adding to pc in the 3rd instruction of the PLT stub. */
8644 got_displacement = got_address - (plt_address + 12);
8645
8646 /* As we are using 32 bit instructions we have to use 'put_arm_insn'
8647 instead of 'put_thumb_insn'. */
8648 put_arm_insn (htab, output_bfd,
8649 elf32_thumb2_plt_entry[0]
8650 | ((got_displacement & 0x000000ff) << 16)
8651 | ((got_displacement & 0x00000700) << 20)
8652 | ((got_displacement & 0x00000800) >> 1)
8653 | ((got_displacement & 0x0000f000) >> 12),
8654 ptr + 0);
8655 put_arm_insn (htab, output_bfd,
8656 elf32_thumb2_plt_entry[1]
8657 | ((got_displacement & 0x00ff0000) )
8658 | ((got_displacement & 0x07000000) << 4)
8659 | ((got_displacement & 0x08000000) >> 17)
8660 | ((got_displacement & 0xf0000000) >> 28),
8661 ptr + 4);
8662 put_arm_insn (htab, output_bfd,
8663 elf32_thumb2_plt_entry[2],
8664 ptr + 8);
8665 put_arm_insn (htab, output_bfd,
8666 elf32_thumb2_plt_entry[3],
8667 ptr + 12);
8668 }
8669 else
8670 {
8671 /* Calculate the displacement between the PLT slot and the
8672 entry in the GOT. The eight-byte offset accounts for the
8673 value produced by adding to pc in the first instruction
8674 of the PLT stub. */
8675 got_displacement = got_address - (plt_address + 8);
8676
8677 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
8678 {
8679 put_thumb_insn (htab, output_bfd,
8680 elf32_arm_plt_thumb_stub[0], ptr - 4);
8681 put_thumb_insn (htab, output_bfd,
8682 elf32_arm_plt_thumb_stub[1], ptr - 2);
8683 }
8684
8685 if (!elf32_arm_use_long_plt_entry)
8686 {
8687 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
8688
8689 put_arm_insn (htab, output_bfd,
8690 elf32_arm_plt_entry_short[0]
8691 | ((got_displacement & 0x0ff00000) >> 20),
8692 ptr + 0);
8693 put_arm_insn (htab, output_bfd,
8694 elf32_arm_plt_entry_short[1]
8695 | ((got_displacement & 0x000ff000) >> 12),
8696 ptr+ 4);
8697 put_arm_insn (htab, output_bfd,
8698 elf32_arm_plt_entry_short[2]
8699 | (got_displacement & 0x00000fff),
8700 ptr + 8);
8701 #ifdef FOUR_WORD_PLT
8702 bfd_put_32 (output_bfd, elf32_arm_plt_entry_short[3], ptr + 12);
8703 #endif
8704 }
8705 else
8706 {
8707 put_arm_insn (htab, output_bfd,
8708 elf32_arm_plt_entry_long[0]
8709 | ((got_displacement & 0xf0000000) >> 28),
8710 ptr + 0);
8711 put_arm_insn (htab, output_bfd,
8712 elf32_arm_plt_entry_long[1]
8713 | ((got_displacement & 0x0ff00000) >> 20),
8714 ptr + 4);
8715 put_arm_insn (htab, output_bfd,
8716 elf32_arm_plt_entry_long[2]
8717 | ((got_displacement & 0x000ff000) >> 12),
8718 ptr+ 8);
8719 put_arm_insn (htab, output_bfd,
8720 elf32_arm_plt_entry_long[3]
8721 | (got_displacement & 0x00000fff),
8722 ptr + 12);
8723 }
8724 }
8725
8726 /* Fill in the entry in the .rel(a).(i)plt section. */
8727 rel.r_offset = got_address;
8728 rel.r_addend = 0;
8729 if (dynindx == -1)
8730 {
8731 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
8732 The dynamic linker or static executable then calls SYM_VALUE
8733 to determine the correct run-time value of the .igot.plt entry. */
8734 rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
8735 initial_got_entry = sym_value;
8736 }
8737 else
8738 {
8739 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
8740 initial_got_entry = (splt->output_section->vma
8741 + splt->output_offset);
8742 }
8743
8744 /* Fill in the entry in the global offset table. */
8745 bfd_put_32 (output_bfd, initial_got_entry,
8746 sgot->contents + got_offset);
8747 }
8748
8749 if (dynindx == -1)
8750 elf32_arm_add_dynreloc (output_bfd, info, srel, &rel);
8751 else
8752 {
8753 loc = srel->contents + plt_index * RELOC_SIZE (htab);
8754 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
8755 }
8756
8757 return TRUE;
8758 }
8759
8760 /* Some relocations map to different relocations depending on the
8761 target. Return the real relocation. */
8762
8763 static int
8764 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
8765 int r_type)
8766 {
8767 switch (r_type)
8768 {
8769 case R_ARM_TARGET1:
8770 if (globals->target1_is_rel)
8771 return R_ARM_REL32;
8772 else
8773 return R_ARM_ABS32;
8774
8775 case R_ARM_TARGET2:
8776 return globals->target2_reloc;
8777
8778 default:
8779 return r_type;
8780 }
8781 }
8782
8783 /* Return the base VMA address which should be subtracted from real addresses
8784 when resolving @dtpoff relocation.
8785 This is PT_TLS segment p_vaddr. */
8786
8787 static bfd_vma
8788 dtpoff_base (struct bfd_link_info *info)
8789 {
8790 /* If tls_sec is NULL, we should have signalled an error already. */
8791 if (elf_hash_table (info)->tls_sec == NULL)
8792 return 0;
8793 return elf_hash_table (info)->tls_sec->vma;
8794 }
8795
8796 /* Return the relocation value for @tpoff relocation
8797 if STT_TLS virtual address is ADDRESS. */
8798
8799 static bfd_vma
8800 tpoff (struct bfd_link_info *info, bfd_vma address)
8801 {
8802 struct elf_link_hash_table *htab = elf_hash_table (info);
8803 bfd_vma base;
8804
8805 /* If tls_sec is NULL, we should have signalled an error already. */
8806 if (htab->tls_sec == NULL)
8807 return 0;
8808 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
8809 return address - htab->tls_sec->vma + base;
8810 }
8811
8812 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
8813 VALUE is the relocation value. */
8814
8815 static bfd_reloc_status_type
8816 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
8817 {
8818 if (value > 0xfff)
8819 return bfd_reloc_overflow;
8820
8821 value |= bfd_get_32 (abfd, data) & 0xfffff000;
8822 bfd_put_32 (abfd, value, data);
8823 return bfd_reloc_ok;
8824 }
8825
8826 /* Handle TLS relaxations. Relaxing is possible for symbols that use
8827 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
8828 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
8829
8830 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
8831 is to then call final_link_relocate. Return other values in the
8832 case of error.
8833
8834 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
8835 the pre-relaxed code. It would be nice if the relocs were updated
8836 to match the optimization. */
8837
8838 static bfd_reloc_status_type
8839 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
8840 bfd *input_bfd, asection *input_sec, bfd_byte *contents,
8841 Elf_Internal_Rela *rel, unsigned long is_local)
8842 {
8843 unsigned long insn;
8844
8845 switch (ELF32_R_TYPE (rel->r_info))
8846 {
8847 default:
8848 return bfd_reloc_notsupported;
8849
8850 case R_ARM_TLS_GOTDESC:
8851 if (is_local)
8852 insn = 0;
8853 else
8854 {
8855 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
8856 if (insn & 1)
8857 insn -= 5; /* THUMB */
8858 else
8859 insn -= 8; /* ARM */
8860 }
8861 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
8862 return bfd_reloc_continue;
8863
8864 case R_ARM_THM_TLS_DESCSEQ:
8865 /* Thumb insn. */
8866 insn = bfd_get_16 (input_bfd, contents + rel->r_offset);
8867 if ((insn & 0xff78) == 0x4478) /* add rx, pc */
8868 {
8869 if (is_local)
8870 /* nop */
8871 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
8872 }
8873 else if ((insn & 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
8874 {
8875 if (is_local)
8876 /* nop */
8877 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
8878 else
8879 /* ldr rx,[ry] */
8880 bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset);
8881 }
8882 else if ((insn & 0xff87) == 0x4780) /* blx rx */
8883 {
8884 if (is_local)
8885 /* nop */
8886 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
8887 else
8888 /* mov r0, rx */
8889 bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78),
8890 contents + rel->r_offset);
8891 }
8892 else
8893 {
8894 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
8895 /* It's a 32 bit instruction, fetch the rest of it for
8896 error generation. */
8897 insn = (insn << 16)
8898 | bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
8899 (*_bfd_error_handler)
8900 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' in TLS trampoline"),
8901 input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
8902 return bfd_reloc_notsupported;
8903 }
8904 break;
8905
8906 case R_ARM_TLS_DESCSEQ:
8907 /* arm insn. */
8908 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
8909 if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
8910 {
8911 if (is_local)
8912 /* mov rx, ry */
8913 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff),
8914 contents + rel->r_offset);
8915 }
8916 else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
8917 {
8918 if (is_local)
8919 /* nop */
8920 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
8921 else
8922 /* ldr rx,[ry] */
8923 bfd_put_32 (input_bfd, insn & 0xfffff000,
8924 contents + rel->r_offset);
8925 }
8926 else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */
8927 {
8928 if (is_local)
8929 /* nop */
8930 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
8931 else
8932 /* mov r0, rx */
8933 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf),
8934 contents + rel->r_offset);
8935 }
8936 else
8937 {
8938 (*_bfd_error_handler)
8939 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' in TLS trampoline"),
8940 input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
8941 return bfd_reloc_notsupported;
8942 }
8943 break;
8944
8945 case R_ARM_TLS_CALL:
8946 /* GD->IE relaxation, turn the instruction into 'nop' or
8947 'ldr r0, [pc,r0]' */
8948 insn = is_local ? 0xe1a00000 : 0xe79f0000;
8949 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
8950 break;
8951
8952 case R_ARM_THM_TLS_CALL:
8953 /* GD->IE relaxation. */
8954 if (!is_local)
8955 /* add r0,pc; ldr r0, [r0] */
8956 insn = 0x44786800;
8957 else if (arch_has_thumb2_nop (globals))
8958 /* nop.w */
8959 insn = 0xf3af8000;
8960 else
8961 /* nop; nop */
8962 insn = 0xbf00bf00;
8963
8964 bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset);
8965 bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2);
8966 break;
8967 }
8968 return bfd_reloc_ok;
8969 }
8970
8971 /* For a given value of n, calculate the value of G_n as required to
8972 deal with group relocations. We return it in the form of an
8973 encoded constant-and-rotation, together with the final residual. If n is
8974 specified as less than zero, then final_residual is filled with the
8975 input value and no further action is performed. */
8976
8977 static bfd_vma
8978 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
8979 {
8980 int current_n;
8981 bfd_vma g_n;
8982 bfd_vma encoded_g_n = 0;
8983 bfd_vma residual = value; /* Also known as Y_n. */
8984
8985 for (current_n = 0; current_n <= n; current_n++)
8986 {
8987 int shift;
8988
8989 /* Calculate which part of the value to mask. */
8990 if (residual == 0)
8991 shift = 0;
8992 else
8993 {
8994 int msb;
8995
8996 /* Determine the most significant bit in the residual and
8997 align the resulting value to a 2-bit boundary. */
8998 for (msb = 30; msb >= 0; msb -= 2)
8999 if (residual & (3 << msb))
9000 break;
9001
9002 /* The desired shift is now (msb - 6), or zero, whichever
9003 is the greater. */
9004 shift = msb - 6;
9005 if (shift < 0)
9006 shift = 0;
9007 }
9008
9009 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
9010 g_n = residual & (0xff << shift);
9011 encoded_g_n = (g_n >> shift)
9012 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
9013
9014 /* Calculate the residual for the next time around. */
9015 residual &= ~g_n;
9016 }
9017
9018 *final_residual = residual;
9019
9020 return encoded_g_n;
9021 }
9022
9023 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
9024 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
9025
9026 static int
9027 identify_add_or_sub (bfd_vma insn)
9028 {
9029 int opcode = insn & 0x1e00000;
9030
9031 if (opcode == 1 << 23) /* ADD */
9032 return 1;
9033
9034 if (opcode == 1 << 22) /* SUB */
9035 return -1;
9036
9037 return 0;
9038 }
9039
9040 /* Perform a relocation as part of a final link. */
9041
9042 static bfd_reloc_status_type
9043 elf32_arm_final_link_relocate (reloc_howto_type * howto,
9044 bfd * input_bfd,
9045 bfd * output_bfd,
9046 asection * input_section,
9047 bfd_byte * contents,
9048 Elf_Internal_Rela * rel,
9049 bfd_vma value,
9050 struct bfd_link_info * info,
9051 asection * sym_sec,
9052 const char * sym_name,
9053 unsigned char st_type,
9054 enum arm_st_branch_type branch_type,
9055 struct elf_link_hash_entry * h,
9056 bfd_boolean * unresolved_reloc_p,
9057 char ** error_message)
9058 {
9059 unsigned long r_type = howto->type;
9060 unsigned long r_symndx;
9061 bfd_byte * hit_data = contents + rel->r_offset;
9062 bfd_vma * local_got_offsets;
9063 bfd_vma * local_tlsdesc_gotents;
9064 asection * sgot;
9065 asection * splt;
9066 asection * sreloc = NULL;
9067 asection * srelgot;
9068 bfd_vma addend;
9069 bfd_signed_vma signed_addend;
9070 unsigned char dynreloc_st_type;
9071 bfd_vma dynreloc_value;
9072 struct elf32_arm_link_hash_table * globals;
9073 struct elf32_arm_link_hash_entry *eh;
9074 union gotplt_union *root_plt;
9075 struct arm_plt_info *arm_plt;
9076 bfd_vma plt_offset;
9077 bfd_vma gotplt_offset;
9078 bfd_boolean has_iplt_entry;
9079
9080 globals = elf32_arm_hash_table (info);
9081 if (globals == NULL)
9082 return bfd_reloc_notsupported;
9083
9084 BFD_ASSERT (is_arm_elf (input_bfd));
9085
9086 /* Some relocation types map to different relocations depending on the
9087 target. We pick the right one here. */
9088 r_type = arm_real_reloc_type (globals, r_type);
9089
9090 /* It is possible to have linker relaxations on some TLS access
9091 models. Update our information here. */
9092 r_type = elf32_arm_tls_transition (info, r_type, h);
9093
9094 if (r_type != howto->type)
9095 howto = elf32_arm_howto_from_type (r_type);
9096
9097 eh = (struct elf32_arm_link_hash_entry *) h;
9098 sgot = globals->root.sgot;
9099 local_got_offsets = elf_local_got_offsets (input_bfd);
9100 local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd);
9101
9102 if (globals->root.dynamic_sections_created)
9103 srelgot = globals->root.srelgot;
9104 else
9105 srelgot = NULL;
9106
9107 r_symndx = ELF32_R_SYM (rel->r_info);
9108
9109 if (globals->use_rel)
9110 {
9111 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
9112
9113 if (addend & ((howto->src_mask + 1) >> 1))
9114 {
9115 signed_addend = -1;
9116 signed_addend &= ~ howto->src_mask;
9117 signed_addend |= addend;
9118 }
9119 else
9120 signed_addend = addend;
9121 }
9122 else
9123 addend = signed_addend = rel->r_addend;
9124
9125 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
9126 are resolving a function call relocation. */
9127 if (using_thumb_only (globals)
9128 && (r_type == R_ARM_THM_CALL
9129 || r_type == R_ARM_THM_JUMP24)
9130 && branch_type == ST_BRANCH_TO_ARM)
9131 branch_type = ST_BRANCH_TO_THUMB;
9132
9133 /* Record the symbol information that should be used in dynamic
9134 relocations. */
9135 dynreloc_st_type = st_type;
9136 dynreloc_value = value;
9137 if (branch_type == ST_BRANCH_TO_THUMB)
9138 dynreloc_value |= 1;
9139
9140 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
9141 VALUE appropriately for relocations that we resolve at link time. */
9142 has_iplt_entry = FALSE;
9143 if (elf32_arm_get_plt_info (input_bfd, eh, r_symndx, &root_plt, &arm_plt)
9144 && root_plt->offset != (bfd_vma) -1)
9145 {
9146 plt_offset = root_plt->offset;
9147 gotplt_offset = arm_plt->got_offset;
9148
9149 if (h == NULL || eh->is_iplt)
9150 {
9151 has_iplt_entry = TRUE;
9152 splt = globals->root.iplt;
9153
9154 /* Populate .iplt entries here, because not all of them will
9155 be seen by finish_dynamic_symbol. The lower bit is set if
9156 we have already populated the entry. */
9157 if (plt_offset & 1)
9158 plt_offset--;
9159 else
9160 {
9161 if (elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
9162 -1, dynreloc_value))
9163 root_plt->offset |= 1;
9164 else
9165 return bfd_reloc_notsupported;
9166 }
9167
9168 /* Static relocations always resolve to the .iplt entry. */
9169 st_type = STT_FUNC;
9170 value = (splt->output_section->vma
9171 + splt->output_offset
9172 + plt_offset);
9173 branch_type = ST_BRANCH_TO_ARM;
9174
9175 /* If there are non-call relocations that resolve to the .iplt
9176 entry, then all dynamic ones must too. */
9177 if (arm_plt->noncall_refcount != 0)
9178 {
9179 dynreloc_st_type = st_type;
9180 dynreloc_value = value;
9181 }
9182 }
9183 else
9184 /* We populate the .plt entry in finish_dynamic_symbol. */
9185 splt = globals->root.splt;
9186 }
9187 else
9188 {
9189 splt = NULL;
9190 plt_offset = (bfd_vma) -1;
9191 gotplt_offset = (bfd_vma) -1;
9192 }
9193
9194 switch (r_type)
9195 {
9196 case R_ARM_NONE:
9197 /* We don't need to find a value for this symbol. It's just a
9198 marker. */
9199 *unresolved_reloc_p = FALSE;
9200 return bfd_reloc_ok;
9201
9202 case R_ARM_ABS12:
9203 if (!globals->vxworks_p)
9204 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
9205
9206 case R_ARM_PC24:
9207 case R_ARM_ABS32:
9208 case R_ARM_ABS32_NOI:
9209 case R_ARM_REL32:
9210 case R_ARM_REL32_NOI:
9211 case R_ARM_CALL:
9212 case R_ARM_JUMP24:
9213 case R_ARM_XPC25:
9214 case R_ARM_PREL31:
9215 case R_ARM_PLT32:
9216 /* Handle relocations which should use the PLT entry. ABS32/REL32
9217 will use the symbol's value, which may point to a PLT entry, but we
9218 don't need to handle that here. If we created a PLT entry, all
9219 branches in this object should go to it, except if the PLT is too
9220 far away, in which case a long branch stub should be inserted. */
9221 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
9222 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
9223 && r_type != R_ARM_CALL
9224 && r_type != R_ARM_JUMP24
9225 && r_type != R_ARM_PLT32)
9226 && plt_offset != (bfd_vma) -1)
9227 {
9228 /* If we've created a .plt section, and assigned a PLT entry
9229 to this function, it must either be a STT_GNU_IFUNC reference
9230 or not be known to bind locally. In other cases, we should
9231 have cleared the PLT entry by now. */
9232 BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h));
9233
9234 value = (splt->output_section->vma
9235 + splt->output_offset
9236 + plt_offset);
9237 *unresolved_reloc_p = FALSE;
9238 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9239 contents, rel->r_offset, value,
9240 rel->r_addend);
9241 }
9242
9243 /* When generating a shared object or relocatable executable, these
9244 relocations are copied into the output file to be resolved at
9245 run time. */
9246 if ((bfd_link_pic (info)
9247 || globals->root.is_relocatable_executable)
9248 && (input_section->flags & SEC_ALLOC)
9249 && !(globals->vxworks_p
9250 && strcmp (input_section->output_section->name,
9251 ".tls_vars") == 0)
9252 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
9253 || !SYMBOL_CALLS_LOCAL (info, h))
9254 && !(input_bfd == globals->stub_bfd
9255 && strstr (input_section->name, STUB_SUFFIX))
9256 && (h == NULL
9257 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
9258 || h->root.type != bfd_link_hash_undefweak)
9259 && r_type != R_ARM_PC24
9260 && r_type != R_ARM_CALL
9261 && r_type != R_ARM_JUMP24
9262 && r_type != R_ARM_PREL31
9263 && r_type != R_ARM_PLT32)
9264 {
9265 Elf_Internal_Rela outrel;
9266 bfd_boolean skip, relocate;
9267
9268 if ((r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
9269 && !h->def_regular)
9270 {
9271 char *v = _("shared object");
9272
9273 if (bfd_link_executable (info))
9274 v = _("PIE executable");
9275
9276 (*_bfd_error_handler)
9277 (_("%B: relocation %s against external or undefined symbol `%s'"
9278 " can not be used when making a %s; recompile with -fPIC"), input_bfd,
9279 elf32_arm_howto_table_1[r_type].name, h->root.root.string, v);
9280 return bfd_reloc_notsupported;
9281 }
9282
9283 *unresolved_reloc_p = FALSE;
9284
9285 if (sreloc == NULL && globals->root.dynamic_sections_created)
9286 {
9287 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
9288 ! globals->use_rel);
9289
9290 if (sreloc == NULL)
9291 return bfd_reloc_notsupported;
9292 }
9293
9294 skip = FALSE;
9295 relocate = FALSE;
9296
9297 outrel.r_addend = addend;
9298 outrel.r_offset =
9299 _bfd_elf_section_offset (output_bfd, info, input_section,
9300 rel->r_offset);
9301 if (outrel.r_offset == (bfd_vma) -1)
9302 skip = TRUE;
9303 else if (outrel.r_offset == (bfd_vma) -2)
9304 skip = TRUE, relocate = TRUE;
9305 outrel.r_offset += (input_section->output_section->vma
9306 + input_section->output_offset);
9307
9308 if (skip)
9309 memset (&outrel, 0, sizeof outrel);
9310 else if (h != NULL
9311 && h->dynindx != -1
9312 && (!bfd_link_pic (info)
9313 || !SYMBOLIC_BIND (info, h)
9314 || !h->def_regular))
9315 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
9316 else
9317 {
9318 int symbol;
9319
9320 /* This symbol is local, or marked to become local. */
9321 BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI);
9322 if (globals->symbian_p)
9323 {
9324 asection *osec;
9325
9326 /* On Symbian OS, the data segment and text segement
9327 can be relocated independently. Therefore, we
9328 must indicate the segment to which this
9329 relocation is relative. The BPABI allows us to
9330 use any symbol in the right segment; we just use
9331 the section symbol as it is convenient. (We
9332 cannot use the symbol given by "h" directly as it
9333 will not appear in the dynamic symbol table.)
9334
9335 Note that the dynamic linker ignores the section
9336 symbol value, so we don't subtract osec->vma
9337 from the emitted reloc addend. */
9338 if (sym_sec)
9339 osec = sym_sec->output_section;
9340 else
9341 osec = input_section->output_section;
9342 symbol = elf_section_data (osec)->dynindx;
9343 if (symbol == 0)
9344 {
9345 struct elf_link_hash_table *htab = elf_hash_table (info);
9346
9347 if ((osec->flags & SEC_READONLY) == 0
9348 && htab->data_index_section != NULL)
9349 osec = htab->data_index_section;
9350 else
9351 osec = htab->text_index_section;
9352 symbol = elf_section_data (osec)->dynindx;
9353 }
9354 BFD_ASSERT (symbol != 0);
9355 }
9356 else
9357 /* On SVR4-ish systems, the dynamic loader cannot
9358 relocate the text and data segments independently,
9359 so the symbol does not matter. */
9360 symbol = 0;
9361 if (dynreloc_st_type == STT_GNU_IFUNC)
9362 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
9363 to the .iplt entry. Instead, every non-call reference
9364 must use an R_ARM_IRELATIVE relocation to obtain the
9365 correct run-time address. */
9366 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
9367 else
9368 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
9369 if (globals->use_rel)
9370 relocate = TRUE;
9371 else
9372 outrel.r_addend += dynreloc_value;
9373 }
9374
9375 elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
9376
9377 /* If this reloc is against an external symbol, we do not want to
9378 fiddle with the addend. Otherwise, we need to include the symbol
9379 value so that it becomes an addend for the dynamic reloc. */
9380 if (! relocate)
9381 return bfd_reloc_ok;
9382
9383 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9384 contents, rel->r_offset,
9385 dynreloc_value, (bfd_vma) 0);
9386 }
9387 else switch (r_type)
9388 {
9389 case R_ARM_ABS12:
9390 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
9391
9392 case R_ARM_XPC25: /* Arm BLX instruction. */
9393 case R_ARM_CALL:
9394 case R_ARM_JUMP24:
9395 case R_ARM_PC24: /* Arm B/BL instruction. */
9396 case R_ARM_PLT32:
9397 {
9398 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
9399
9400 if (r_type == R_ARM_XPC25)
9401 {
9402 /* Check for Arm calling Arm function. */
9403 /* FIXME: Should we translate the instruction into a BL
9404 instruction instead ? */
9405 if (branch_type != ST_BRANCH_TO_THUMB)
9406 (*_bfd_error_handler)
9407 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
9408 input_bfd,
9409 h ? h->root.root.string : "(local)");
9410 }
9411 else if (r_type == R_ARM_PC24)
9412 {
9413 /* Check for Arm calling Thumb function. */
9414 if (branch_type == ST_BRANCH_TO_THUMB)
9415 {
9416 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
9417 output_bfd, input_section,
9418 hit_data, sym_sec, rel->r_offset,
9419 signed_addend, value,
9420 error_message))
9421 return bfd_reloc_ok;
9422 else
9423 return bfd_reloc_dangerous;
9424 }
9425 }
9426
9427 /* Check if a stub has to be inserted because the
9428 destination is too far or we are changing mode. */
9429 if ( r_type == R_ARM_CALL
9430 || r_type == R_ARM_JUMP24
9431 || r_type == R_ARM_PLT32)
9432 {
9433 enum elf32_arm_stub_type stub_type = arm_stub_none;
9434 struct elf32_arm_link_hash_entry *hash;
9435
9436 hash = (struct elf32_arm_link_hash_entry *) h;
9437 stub_type = arm_type_of_stub (info, input_section, rel,
9438 st_type, &branch_type,
9439 hash, value, sym_sec,
9440 input_bfd, sym_name);
9441
9442 if (stub_type != arm_stub_none)
9443 {
9444 /* The target is out of reach, so redirect the
9445 branch to the local stub for this function. */
9446 stub_entry = elf32_arm_get_stub_entry (input_section,
9447 sym_sec, h,
9448 rel, globals,
9449 stub_type);
9450 {
9451 if (stub_entry != NULL)
9452 value = (stub_entry->stub_offset
9453 + stub_entry->stub_sec->output_offset
9454 + stub_entry->stub_sec->output_section->vma);
9455
9456 if (plt_offset != (bfd_vma) -1)
9457 *unresolved_reloc_p = FALSE;
9458 }
9459 }
9460 else
9461 {
9462 /* If the call goes through a PLT entry, make sure to
9463 check distance to the right destination address. */
9464 if (plt_offset != (bfd_vma) -1)
9465 {
9466 value = (splt->output_section->vma
9467 + splt->output_offset
9468 + plt_offset);
9469 *unresolved_reloc_p = FALSE;
9470 /* The PLT entry is in ARM mode, regardless of the
9471 target function. */
9472 branch_type = ST_BRANCH_TO_ARM;
9473 }
9474 }
9475 }
9476
9477 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
9478 where:
9479 S is the address of the symbol in the relocation.
9480 P is address of the instruction being relocated.
9481 A is the addend (extracted from the instruction) in bytes.
9482
9483 S is held in 'value'.
9484 P is the base address of the section containing the
9485 instruction plus the offset of the reloc into that
9486 section, ie:
9487 (input_section->output_section->vma +
9488 input_section->output_offset +
9489 rel->r_offset).
9490 A is the addend, converted into bytes, ie:
9491 (signed_addend * 4)
9492
9493 Note: None of these operations have knowledge of the pipeline
9494 size of the processor, thus it is up to the assembler to
9495 encode this information into the addend. */
9496 value -= (input_section->output_section->vma
9497 + input_section->output_offset);
9498 value -= rel->r_offset;
9499 if (globals->use_rel)
9500 value += (signed_addend << howto->size);
9501 else
9502 /* RELA addends do not have to be adjusted by howto->size. */
9503 value += signed_addend;
9504
9505 signed_addend = value;
9506 signed_addend >>= howto->rightshift;
9507
9508 /* A branch to an undefined weak symbol is turned into a jump to
9509 the next instruction unless a PLT entry will be created.
9510 Do the same for local undefined symbols (but not for STN_UNDEF).
9511 The jump to the next instruction is optimized as a NOP depending
9512 on the architecture. */
9513 if (h ? (h->root.type == bfd_link_hash_undefweak
9514 && plt_offset == (bfd_vma) -1)
9515 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
9516 {
9517 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
9518
9519 if (arch_has_arm_nop (globals))
9520 value |= 0x0320f000;
9521 else
9522 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
9523 }
9524 else
9525 {
9526 /* Perform a signed range check. */
9527 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
9528 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
9529 return bfd_reloc_overflow;
9530
9531 addend = (value & 2);
9532
9533 value = (signed_addend & howto->dst_mask)
9534 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
9535
9536 if (r_type == R_ARM_CALL)
9537 {
9538 /* Set the H bit in the BLX instruction. */
9539 if (branch_type == ST_BRANCH_TO_THUMB)
9540 {
9541 if (addend)
9542 value |= (1 << 24);
9543 else
9544 value &= ~(bfd_vma)(1 << 24);
9545 }
9546
9547 /* Select the correct instruction (BL or BLX). */
9548 /* Only if we are not handling a BL to a stub. In this
9549 case, mode switching is performed by the stub. */
9550 if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry)
9551 value |= (1 << 28);
9552 else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN)
9553 {
9554 value &= ~(bfd_vma)(1 << 28);
9555 value |= (1 << 24);
9556 }
9557 }
9558 }
9559 }
9560 break;
9561
9562 case R_ARM_ABS32:
9563 value += addend;
9564 if (branch_type == ST_BRANCH_TO_THUMB)
9565 value |= 1;
9566 break;
9567
9568 case R_ARM_ABS32_NOI:
9569 value += addend;
9570 break;
9571
9572 case R_ARM_REL32:
9573 value += addend;
9574 if (branch_type == ST_BRANCH_TO_THUMB)
9575 value |= 1;
9576 value -= (input_section->output_section->vma
9577 + input_section->output_offset + rel->r_offset);
9578 break;
9579
9580 case R_ARM_REL32_NOI:
9581 value += addend;
9582 value -= (input_section->output_section->vma
9583 + input_section->output_offset + rel->r_offset);
9584 break;
9585
9586 case R_ARM_PREL31:
9587 value -= (input_section->output_section->vma
9588 + input_section->output_offset + rel->r_offset);
9589 value += signed_addend;
9590 if (! h || h->root.type != bfd_link_hash_undefweak)
9591 {
9592 /* Check for overflow. */
9593 if ((value ^ (value >> 1)) & (1 << 30))
9594 return bfd_reloc_overflow;
9595 }
9596 value &= 0x7fffffff;
9597 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
9598 if (branch_type == ST_BRANCH_TO_THUMB)
9599 value |= 1;
9600 break;
9601 }
9602
9603 bfd_put_32 (input_bfd, value, hit_data);
9604 return bfd_reloc_ok;
9605
9606 case R_ARM_ABS8:
9607 /* PR 16202: Refectch the addend using the correct size. */
9608 if (globals->use_rel)
9609 addend = bfd_get_8 (input_bfd, hit_data);
9610 value += addend;
9611
9612 /* There is no way to tell whether the user intended to use a signed or
9613 unsigned addend. When checking for overflow we accept either,
9614 as specified by the AAELF. */
9615 if ((long) value > 0xff || (long) value < -0x80)
9616 return bfd_reloc_overflow;
9617
9618 bfd_put_8 (input_bfd, value, hit_data);
9619 return bfd_reloc_ok;
9620
9621 case R_ARM_ABS16:
9622 /* PR 16202: Refectch the addend using the correct size. */
9623 if (globals->use_rel)
9624 addend = bfd_get_16 (input_bfd, hit_data);
9625 value += addend;
9626
9627 /* See comment for R_ARM_ABS8. */
9628 if ((long) value > 0xffff || (long) value < -0x8000)
9629 return bfd_reloc_overflow;
9630
9631 bfd_put_16 (input_bfd, value, hit_data);
9632 return bfd_reloc_ok;
9633
9634 case R_ARM_THM_ABS5:
9635 /* Support ldr and str instructions for the thumb. */
9636 if (globals->use_rel)
9637 {
9638 /* Need to refetch addend. */
9639 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
9640 /* ??? Need to determine shift amount from operand size. */
9641 addend >>= howto->rightshift;
9642 }
9643 value += addend;
9644
9645 /* ??? Isn't value unsigned? */
9646 if ((long) value > 0x1f || (long) value < -0x10)
9647 return bfd_reloc_overflow;
9648
9649 /* ??? Value needs to be properly shifted into place first. */
9650 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
9651 bfd_put_16 (input_bfd, value, hit_data);
9652 return bfd_reloc_ok;
9653
9654 case R_ARM_THM_ALU_PREL_11_0:
9655 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
9656 {
9657 bfd_vma insn;
9658 bfd_signed_vma relocation;
9659
9660 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
9661 | bfd_get_16 (input_bfd, hit_data + 2);
9662
9663 if (globals->use_rel)
9664 {
9665 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
9666 | ((insn & (1 << 26)) >> 15);
9667 if (insn & 0xf00000)
9668 signed_addend = -signed_addend;
9669 }
9670
9671 relocation = value + signed_addend;
9672 relocation -= Pa (input_section->output_section->vma
9673 + input_section->output_offset
9674 + rel->r_offset);
9675
9676 value = relocation;
9677
9678 if (value >= 0x1000)
9679 return bfd_reloc_overflow;
9680
9681 insn = (insn & 0xfb0f8f00) | (value & 0xff)
9682 | ((value & 0x700) << 4)
9683 | ((value & 0x800) << 15);
9684 if (relocation < 0)
9685 insn |= 0xa00000;
9686
9687 bfd_put_16 (input_bfd, insn >> 16, hit_data);
9688 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
9689
9690 return bfd_reloc_ok;
9691 }
9692
9693 case R_ARM_THM_PC8:
9694 /* PR 10073: This reloc is not generated by the GNU toolchain,
9695 but it is supported for compatibility with third party libraries
9696 generated by other compilers, specifically the ARM/IAR. */
9697 {
9698 bfd_vma insn;
9699 bfd_signed_vma relocation;
9700
9701 insn = bfd_get_16 (input_bfd, hit_data);
9702
9703 if (globals->use_rel)
9704 addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4;
9705
9706 relocation = value + addend;
9707 relocation -= Pa (input_section->output_section->vma
9708 + input_section->output_offset
9709 + rel->r_offset);
9710
9711 value = relocation;
9712
9713 /* We do not check for overflow of this reloc. Although strictly
9714 speaking this is incorrect, it appears to be necessary in order
9715 to work with IAR generated relocs. Since GCC and GAS do not
9716 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
9717 a problem for them. */
9718 value &= 0x3fc;
9719
9720 insn = (insn & 0xff00) | (value >> 2);
9721
9722 bfd_put_16 (input_bfd, insn, hit_data);
9723
9724 return bfd_reloc_ok;
9725 }
9726
9727 case R_ARM_THM_PC12:
9728 /* Corresponds to: ldr.w reg, [pc, #offset]. */
9729 {
9730 bfd_vma insn;
9731 bfd_signed_vma relocation;
9732
9733 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
9734 | bfd_get_16 (input_bfd, hit_data + 2);
9735
9736 if (globals->use_rel)
9737 {
9738 signed_addend = insn & 0xfff;
9739 if (!(insn & (1 << 23)))
9740 signed_addend = -signed_addend;
9741 }
9742
9743 relocation = value + signed_addend;
9744 relocation -= Pa (input_section->output_section->vma
9745 + input_section->output_offset
9746 + rel->r_offset);
9747
9748 value = relocation;
9749
9750 if (value >= 0x1000)
9751 return bfd_reloc_overflow;
9752
9753 insn = (insn & 0xff7ff000) | value;
9754 if (relocation >= 0)
9755 insn |= (1 << 23);
9756
9757 bfd_put_16 (input_bfd, insn >> 16, hit_data);
9758 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
9759
9760 return bfd_reloc_ok;
9761 }
9762
9763 case R_ARM_THM_XPC22:
9764 case R_ARM_THM_CALL:
9765 case R_ARM_THM_JUMP24:
9766 /* Thumb BL (branch long instruction). */
9767 {
9768 bfd_vma relocation;
9769 bfd_vma reloc_sign;
9770 bfd_boolean overflow = FALSE;
9771 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
9772 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
9773 bfd_signed_vma reloc_signed_max;
9774 bfd_signed_vma reloc_signed_min;
9775 bfd_vma check;
9776 bfd_signed_vma signed_check;
9777 int bitsize;
9778 const int thumb2 = using_thumb2 (globals);
9779
9780 /* A branch to an undefined weak symbol is turned into a jump to
9781 the next instruction unless a PLT entry will be created.
9782 The jump to the next instruction is optimized as a NOP.W for
9783 Thumb-2 enabled architectures. */
9784 if (h && h->root.type == bfd_link_hash_undefweak
9785 && plt_offset == (bfd_vma) -1)
9786 {
9787 if (arch_has_thumb2_nop (globals))
9788 {
9789 bfd_put_16 (input_bfd, 0xf3af, hit_data);
9790 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
9791 }
9792 else
9793 {
9794 bfd_put_16 (input_bfd, 0xe000, hit_data);
9795 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
9796 }
9797 return bfd_reloc_ok;
9798 }
9799
9800 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
9801 with Thumb-1) involving the J1 and J2 bits. */
9802 if (globals->use_rel)
9803 {
9804 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
9805 bfd_vma upper = upper_insn & 0x3ff;
9806 bfd_vma lower = lower_insn & 0x7ff;
9807 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
9808 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
9809 bfd_vma i1 = j1 ^ s ? 0 : 1;
9810 bfd_vma i2 = j2 ^ s ? 0 : 1;
9811
9812 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
9813 /* Sign extend. */
9814 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
9815
9816 signed_addend = addend;
9817 }
9818
9819 if (r_type == R_ARM_THM_XPC22)
9820 {
9821 /* Check for Thumb to Thumb call. */
9822 /* FIXME: Should we translate the instruction into a BL
9823 instruction instead ? */
9824 if (branch_type == ST_BRANCH_TO_THUMB)
9825 (*_bfd_error_handler)
9826 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
9827 input_bfd,
9828 h ? h->root.root.string : "(local)");
9829 }
9830 else
9831 {
9832 /* If it is not a call to Thumb, assume call to Arm.
9833 If it is a call relative to a section name, then it is not a
9834 function call at all, but rather a long jump. Calls through
9835 the PLT do not require stubs. */
9836 if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1)
9837 {
9838 if (globals->use_blx && r_type == R_ARM_THM_CALL)
9839 {
9840 /* Convert BL to BLX. */
9841 lower_insn = (lower_insn & ~0x1000) | 0x0800;
9842 }
9843 else if (( r_type != R_ARM_THM_CALL)
9844 && (r_type != R_ARM_THM_JUMP24))
9845 {
9846 if (elf32_thumb_to_arm_stub
9847 (info, sym_name, input_bfd, output_bfd, input_section,
9848 hit_data, sym_sec, rel->r_offset, signed_addend, value,
9849 error_message))
9850 return bfd_reloc_ok;
9851 else
9852 return bfd_reloc_dangerous;
9853 }
9854 }
9855 else if (branch_type == ST_BRANCH_TO_THUMB
9856 && globals->use_blx
9857 && r_type == R_ARM_THM_CALL)
9858 {
9859 /* Make sure this is a BL. */
9860 lower_insn |= 0x1800;
9861 }
9862 }
9863
9864 enum elf32_arm_stub_type stub_type = arm_stub_none;
9865 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
9866 {
9867 /* Check if a stub has to be inserted because the destination
9868 is too far. */
9869 struct elf32_arm_stub_hash_entry *stub_entry;
9870 struct elf32_arm_link_hash_entry *hash;
9871
9872 hash = (struct elf32_arm_link_hash_entry *) h;
9873
9874 stub_type = arm_type_of_stub (info, input_section, rel,
9875 st_type, &branch_type,
9876 hash, value, sym_sec,
9877 input_bfd, sym_name);
9878
9879 if (stub_type != arm_stub_none)
9880 {
9881 /* The target is out of reach or we are changing modes, so
9882 redirect the branch to the local stub for this
9883 function. */
9884 stub_entry = elf32_arm_get_stub_entry (input_section,
9885 sym_sec, h,
9886 rel, globals,
9887 stub_type);
9888 if (stub_entry != NULL)
9889 {
9890 value = (stub_entry->stub_offset
9891 + stub_entry->stub_sec->output_offset
9892 + stub_entry->stub_sec->output_section->vma);
9893
9894 if (plt_offset != (bfd_vma) -1)
9895 *unresolved_reloc_p = FALSE;
9896 }
9897
9898 /* If this call becomes a call to Arm, force BLX. */
9899 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
9900 {
9901 if ((stub_entry
9902 && !arm_stub_is_thumb (stub_entry->stub_type))
9903 || branch_type != ST_BRANCH_TO_THUMB)
9904 lower_insn = (lower_insn & ~0x1000) | 0x0800;
9905 }
9906 }
9907 }
9908
9909 /* Handle calls via the PLT. */
9910 if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1)
9911 {
9912 value = (splt->output_section->vma
9913 + splt->output_offset
9914 + plt_offset);
9915
9916 if (globals->use_blx
9917 && r_type == R_ARM_THM_CALL
9918 && ! using_thumb_only (globals))
9919 {
9920 /* If the Thumb BLX instruction is available, convert
9921 the BL to a BLX instruction to call the ARM-mode
9922 PLT entry. */
9923 lower_insn = (lower_insn & ~0x1000) | 0x0800;
9924 branch_type = ST_BRANCH_TO_ARM;
9925 }
9926 else
9927 {
9928 if (! using_thumb_only (globals))
9929 /* Target the Thumb stub before the ARM PLT entry. */
9930 value -= PLT_THUMB_STUB_SIZE;
9931 branch_type = ST_BRANCH_TO_THUMB;
9932 }
9933 *unresolved_reloc_p = FALSE;
9934 }
9935
9936 relocation = value + signed_addend;
9937
9938 relocation -= (input_section->output_section->vma
9939 + input_section->output_offset
9940 + rel->r_offset);
9941
9942 check = relocation >> howto->rightshift;
9943
9944 /* If this is a signed value, the rightshift just dropped
9945 leading 1 bits (assuming twos complement). */
9946 if ((bfd_signed_vma) relocation >= 0)
9947 signed_check = check;
9948 else
9949 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
9950
9951 /* Calculate the permissable maximum and minimum values for
9952 this relocation according to whether we're relocating for
9953 Thumb-2 or not. */
9954 bitsize = howto->bitsize;
9955 if (!thumb2)
9956 bitsize -= 2;
9957 reloc_signed_max = (1 << (bitsize - 1)) - 1;
9958 reloc_signed_min = ~reloc_signed_max;
9959
9960 /* Assumes two's complement. */
9961 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
9962 overflow = TRUE;
9963
9964 if ((lower_insn & 0x5000) == 0x4000)
9965 /* For a BLX instruction, make sure that the relocation is rounded up
9966 to a word boundary. This follows the semantics of the instruction
9967 which specifies that bit 1 of the target address will come from bit
9968 1 of the base address. */
9969 relocation = (relocation + 2) & ~ 3;
9970
9971 /* Put RELOCATION back into the insn. Assumes two's complement.
9972 We use the Thumb-2 encoding, which is safe even if dealing with
9973 a Thumb-1 instruction by virtue of our overflow check above. */
9974 reloc_sign = (signed_check < 0) ? 1 : 0;
9975 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
9976 | ((relocation >> 12) & 0x3ff)
9977 | (reloc_sign << 10);
9978 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
9979 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
9980 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
9981 | ((relocation >> 1) & 0x7ff);
9982
9983 /* Put the relocated value back in the object file: */
9984 bfd_put_16 (input_bfd, upper_insn, hit_data);
9985 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
9986
9987 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
9988 }
9989 break;
9990
9991 case R_ARM_THM_JUMP19:
9992 /* Thumb32 conditional branch instruction. */
9993 {
9994 bfd_vma relocation;
9995 bfd_boolean overflow = FALSE;
9996 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
9997 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
9998 bfd_signed_vma reloc_signed_max = 0xffffe;
9999 bfd_signed_vma reloc_signed_min = -0x100000;
10000 bfd_signed_vma signed_check;
10001 enum elf32_arm_stub_type stub_type = arm_stub_none;
10002 struct elf32_arm_stub_hash_entry *stub_entry;
10003 struct elf32_arm_link_hash_entry *hash;
10004
10005 /* Need to refetch the addend, reconstruct the top three bits,
10006 and squish the two 11 bit pieces together. */
10007 if (globals->use_rel)
10008 {
10009 bfd_vma S = (upper_insn & 0x0400) >> 10;
10010 bfd_vma upper = (upper_insn & 0x003f);
10011 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
10012 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
10013 bfd_vma lower = (lower_insn & 0x07ff);
10014
10015 upper |= J1 << 6;
10016 upper |= J2 << 7;
10017 upper |= (!S) << 8;
10018 upper -= 0x0100; /* Sign extend. */
10019
10020 addend = (upper << 12) | (lower << 1);
10021 signed_addend = addend;
10022 }
10023
10024 /* Handle calls via the PLT. */
10025 if (plt_offset != (bfd_vma) -1)
10026 {
10027 value = (splt->output_section->vma
10028 + splt->output_offset
10029 + plt_offset);
10030 /* Target the Thumb stub before the ARM PLT entry. */
10031 value -= PLT_THUMB_STUB_SIZE;
10032 *unresolved_reloc_p = FALSE;
10033 }
10034
10035 hash = (struct elf32_arm_link_hash_entry *)h;
10036
10037 stub_type = arm_type_of_stub (info, input_section, rel,
10038 st_type, &branch_type,
10039 hash, value, sym_sec,
10040 input_bfd, sym_name);
10041 if (stub_type != arm_stub_none)
10042 {
10043 stub_entry = elf32_arm_get_stub_entry (input_section,
10044 sym_sec, h,
10045 rel, globals,
10046 stub_type);
10047 if (stub_entry != NULL)
10048 {
10049 value = (stub_entry->stub_offset
10050 + stub_entry->stub_sec->output_offset
10051 + stub_entry->stub_sec->output_section->vma);
10052 }
10053 }
10054
10055 relocation = value + signed_addend;
10056 relocation -= (input_section->output_section->vma
10057 + input_section->output_offset
10058 + rel->r_offset);
10059 signed_check = (bfd_signed_vma) relocation;
10060
10061 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
10062 overflow = TRUE;
10063
10064 /* Put RELOCATION back into the insn. */
10065 {
10066 bfd_vma S = (relocation & 0x00100000) >> 20;
10067 bfd_vma J2 = (relocation & 0x00080000) >> 19;
10068 bfd_vma J1 = (relocation & 0x00040000) >> 18;
10069 bfd_vma hi = (relocation & 0x0003f000) >> 12;
10070 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
10071
10072 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
10073 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
10074 }
10075
10076 /* Put the relocated value back in the object file: */
10077 bfd_put_16 (input_bfd, upper_insn, hit_data);
10078 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
10079
10080 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
10081 }
10082
10083 case R_ARM_THM_JUMP11:
10084 case R_ARM_THM_JUMP8:
10085 case R_ARM_THM_JUMP6:
10086 /* Thumb B (branch) instruction). */
10087 {
10088 bfd_signed_vma relocation;
10089 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
10090 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
10091 bfd_signed_vma signed_check;
10092
10093 /* CZB cannot jump backward. */
10094 if (r_type == R_ARM_THM_JUMP6)
10095 reloc_signed_min = 0;
10096
10097 if (globals->use_rel)
10098 {
10099 /* Need to refetch addend. */
10100 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
10101 if (addend & ((howto->src_mask + 1) >> 1))
10102 {
10103 signed_addend = -1;
10104 signed_addend &= ~ howto->src_mask;
10105 signed_addend |= addend;
10106 }
10107 else
10108 signed_addend = addend;
10109 /* The value in the insn has been right shifted. We need to
10110 undo this, so that we can perform the address calculation
10111 in terms of bytes. */
10112 signed_addend <<= howto->rightshift;
10113 }
10114 relocation = value + signed_addend;
10115
10116 relocation -= (input_section->output_section->vma
10117 + input_section->output_offset
10118 + rel->r_offset);
10119
10120 relocation >>= howto->rightshift;
10121 signed_check = relocation;
10122
10123 if (r_type == R_ARM_THM_JUMP6)
10124 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
10125 else
10126 relocation &= howto->dst_mask;
10127 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
10128
10129 bfd_put_16 (input_bfd, relocation, hit_data);
10130
10131 /* Assumes two's complement. */
10132 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
10133 return bfd_reloc_overflow;
10134
10135 return bfd_reloc_ok;
10136 }
10137
10138 case R_ARM_ALU_PCREL7_0:
10139 case R_ARM_ALU_PCREL15_8:
10140 case R_ARM_ALU_PCREL23_15:
10141 {
10142 bfd_vma insn;
10143 bfd_vma relocation;
10144
10145 insn = bfd_get_32 (input_bfd, hit_data);
10146 if (globals->use_rel)
10147 {
10148 /* Extract the addend. */
10149 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
10150 signed_addend = addend;
10151 }
10152 relocation = value + signed_addend;
10153
10154 relocation -= (input_section->output_section->vma
10155 + input_section->output_offset
10156 + rel->r_offset);
10157 insn = (insn & ~0xfff)
10158 | ((howto->bitpos << 7) & 0xf00)
10159 | ((relocation >> howto->bitpos) & 0xff);
10160 bfd_put_32 (input_bfd, value, hit_data);
10161 }
10162 return bfd_reloc_ok;
10163
10164 case R_ARM_GNU_VTINHERIT:
10165 case R_ARM_GNU_VTENTRY:
10166 return bfd_reloc_ok;
10167
10168 case R_ARM_GOTOFF32:
10169 /* Relocation is relative to the start of the
10170 global offset table. */
10171
10172 BFD_ASSERT (sgot != NULL);
10173 if (sgot == NULL)
10174 return bfd_reloc_notsupported;
10175
10176 /* If we are addressing a Thumb function, we need to adjust the
10177 address by one, so that attempts to call the function pointer will
10178 correctly interpret it as Thumb code. */
10179 if (branch_type == ST_BRANCH_TO_THUMB)
10180 value += 1;
10181
10182 /* Note that sgot->output_offset is not involved in this
10183 calculation. We always want the start of .got. If we
10184 define _GLOBAL_OFFSET_TABLE in a different way, as is
10185 permitted by the ABI, we might have to change this
10186 calculation. */
10187 value -= sgot->output_section->vma;
10188 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10189 contents, rel->r_offset, value,
10190 rel->r_addend);
10191
10192 case R_ARM_GOTPC:
10193 /* Use global offset table as symbol value. */
10194 BFD_ASSERT (sgot != NULL);
10195
10196 if (sgot == NULL)
10197 return bfd_reloc_notsupported;
10198
10199 *unresolved_reloc_p = FALSE;
10200 value = sgot->output_section->vma;
10201 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10202 contents, rel->r_offset, value,
10203 rel->r_addend);
10204
10205 case R_ARM_GOT32:
10206 case R_ARM_GOT_PREL:
10207 /* Relocation is to the entry for this symbol in the
10208 global offset table. */
10209 if (sgot == NULL)
10210 return bfd_reloc_notsupported;
10211
10212 if (dynreloc_st_type == STT_GNU_IFUNC
10213 && plt_offset != (bfd_vma) -1
10214 && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
10215 {
10216 /* We have a relocation against a locally-binding STT_GNU_IFUNC
10217 symbol, and the relocation resolves directly to the runtime
10218 target rather than to the .iplt entry. This means that any
10219 .got entry would be the same value as the .igot.plt entry,
10220 so there's no point creating both. */
10221 sgot = globals->root.igotplt;
10222 value = sgot->output_offset + gotplt_offset;
10223 }
10224 else if (h != NULL)
10225 {
10226 bfd_vma off;
10227
10228 off = h->got.offset;
10229 BFD_ASSERT (off != (bfd_vma) -1);
10230 if ((off & 1) != 0)
10231 {
10232 /* We have already processsed one GOT relocation against
10233 this symbol. */
10234 off &= ~1;
10235 if (globals->root.dynamic_sections_created
10236 && !SYMBOL_REFERENCES_LOCAL (info, h))
10237 *unresolved_reloc_p = FALSE;
10238 }
10239 else
10240 {
10241 Elf_Internal_Rela outrel;
10242
10243 if (h->dynindx != -1 && !SYMBOL_REFERENCES_LOCAL (info, h))
10244 {
10245 /* If the symbol doesn't resolve locally in a static
10246 object, we have an undefined reference. If the
10247 symbol doesn't resolve locally in a dynamic object,
10248 it should be resolved by the dynamic linker. */
10249 if (globals->root.dynamic_sections_created)
10250 {
10251 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
10252 *unresolved_reloc_p = FALSE;
10253 }
10254 else
10255 outrel.r_info = 0;
10256 outrel.r_addend = 0;
10257 }
10258 else
10259 {
10260 if (dynreloc_st_type == STT_GNU_IFUNC)
10261 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
10262 else if (bfd_link_pic (info) &&
10263 (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
10264 || h->root.type != bfd_link_hash_undefweak))
10265 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
10266 else
10267 outrel.r_info = 0;
10268 outrel.r_addend = dynreloc_value;
10269 }
10270
10271 /* The GOT entry is initialized to zero by default.
10272 See if we should install a different value. */
10273 if (outrel.r_addend != 0
10274 && (outrel.r_info == 0 || globals->use_rel))
10275 {
10276 bfd_put_32 (output_bfd, outrel.r_addend,
10277 sgot->contents + off);
10278 outrel.r_addend = 0;
10279 }
10280
10281 if (outrel.r_info != 0)
10282 {
10283 outrel.r_offset = (sgot->output_section->vma
10284 + sgot->output_offset
10285 + off);
10286 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10287 }
10288 h->got.offset |= 1;
10289 }
10290 value = sgot->output_offset + off;
10291 }
10292 else
10293 {
10294 bfd_vma off;
10295
10296 BFD_ASSERT (local_got_offsets != NULL &&
10297 local_got_offsets[r_symndx] != (bfd_vma) -1);
10298
10299 off = local_got_offsets[r_symndx];
10300
10301 /* The offset must always be a multiple of 4. We use the
10302 least significant bit to record whether we have already
10303 generated the necessary reloc. */
10304 if ((off & 1) != 0)
10305 off &= ~1;
10306 else
10307 {
10308 if (globals->use_rel)
10309 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
10310
10311 if (bfd_link_pic (info) || dynreloc_st_type == STT_GNU_IFUNC)
10312 {
10313 Elf_Internal_Rela outrel;
10314
10315 outrel.r_addend = addend + dynreloc_value;
10316 outrel.r_offset = (sgot->output_section->vma
10317 + sgot->output_offset
10318 + off);
10319 if (dynreloc_st_type == STT_GNU_IFUNC)
10320 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
10321 else
10322 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
10323 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10324 }
10325
10326 local_got_offsets[r_symndx] |= 1;
10327 }
10328
10329 value = sgot->output_offset + off;
10330 }
10331 if (r_type != R_ARM_GOT32)
10332 value += sgot->output_section->vma;
10333
10334 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10335 contents, rel->r_offset, value,
10336 rel->r_addend);
10337
10338 case R_ARM_TLS_LDO32:
10339 value = value - dtpoff_base (info);
10340
10341 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10342 contents, rel->r_offset, value,
10343 rel->r_addend);
10344
10345 case R_ARM_TLS_LDM32:
10346 {
10347 bfd_vma off;
10348
10349 if (sgot == NULL)
10350 abort ();
10351
10352 off = globals->tls_ldm_got.offset;
10353
10354 if ((off & 1) != 0)
10355 off &= ~1;
10356 else
10357 {
10358 /* If we don't know the module number, create a relocation
10359 for it. */
10360 if (bfd_link_pic (info))
10361 {
10362 Elf_Internal_Rela outrel;
10363
10364 if (srelgot == NULL)
10365 abort ();
10366
10367 outrel.r_addend = 0;
10368 outrel.r_offset = (sgot->output_section->vma
10369 + sgot->output_offset + off);
10370 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
10371
10372 if (globals->use_rel)
10373 bfd_put_32 (output_bfd, outrel.r_addend,
10374 sgot->contents + off);
10375
10376 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10377 }
10378 else
10379 bfd_put_32 (output_bfd, 1, sgot->contents + off);
10380
10381 globals->tls_ldm_got.offset |= 1;
10382 }
10383
10384 value = sgot->output_section->vma + sgot->output_offset + off
10385 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
10386
10387 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10388 contents, rel->r_offset, value,
10389 rel->r_addend);
10390 }
10391
10392 case R_ARM_TLS_CALL:
10393 case R_ARM_THM_TLS_CALL:
10394 case R_ARM_TLS_GD32:
10395 case R_ARM_TLS_IE32:
10396 case R_ARM_TLS_GOTDESC:
10397 case R_ARM_TLS_DESCSEQ:
10398 case R_ARM_THM_TLS_DESCSEQ:
10399 {
10400 bfd_vma off, offplt;
10401 int indx = 0;
10402 char tls_type;
10403
10404 BFD_ASSERT (sgot != NULL);
10405
10406 if (h != NULL)
10407 {
10408 bfd_boolean dyn;
10409 dyn = globals->root.dynamic_sections_created;
10410 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
10411 bfd_link_pic (info),
10412 h)
10413 && (!bfd_link_pic (info)
10414 || !SYMBOL_REFERENCES_LOCAL (info, h)))
10415 {
10416 *unresolved_reloc_p = FALSE;
10417 indx = h->dynindx;
10418 }
10419 off = h->got.offset;
10420 offplt = elf32_arm_hash_entry (h)->tlsdesc_got;
10421 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
10422 }
10423 else
10424 {
10425 BFD_ASSERT (local_got_offsets != NULL);
10426 off = local_got_offsets[r_symndx];
10427 offplt = local_tlsdesc_gotents[r_symndx];
10428 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
10429 }
10430
10431 /* Linker relaxations happens from one of the
10432 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
10433 if (ELF32_R_TYPE(rel->r_info) != r_type)
10434 tls_type = GOT_TLS_IE;
10435
10436 BFD_ASSERT (tls_type != GOT_UNKNOWN);
10437
10438 if ((off & 1) != 0)
10439 off &= ~1;
10440 else
10441 {
10442 bfd_boolean need_relocs = FALSE;
10443 Elf_Internal_Rela outrel;
10444 int cur_off = off;
10445
10446 /* The GOT entries have not been initialized yet. Do it
10447 now, and emit any relocations. If both an IE GOT and a
10448 GD GOT are necessary, we emit the GD first. */
10449
10450 if ((bfd_link_pic (info) || indx != 0)
10451 && (h == NULL
10452 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
10453 || h->root.type != bfd_link_hash_undefweak))
10454 {
10455 need_relocs = TRUE;
10456 BFD_ASSERT (srelgot != NULL);
10457 }
10458
10459 if (tls_type & GOT_TLS_GDESC)
10460 {
10461 bfd_byte *loc;
10462
10463 /* We should have relaxed, unless this is an undefined
10464 weak symbol. */
10465 BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
10466 || bfd_link_pic (info));
10467 BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
10468 <= globals->root.sgotplt->size);
10469
10470 outrel.r_addend = 0;
10471 outrel.r_offset = (globals->root.sgotplt->output_section->vma
10472 + globals->root.sgotplt->output_offset
10473 + offplt
10474 + globals->sgotplt_jump_table_size);
10475
10476 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC);
10477 sreloc = globals->root.srelplt;
10478 loc = sreloc->contents;
10479 loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
10480 BFD_ASSERT (loc + RELOC_SIZE (globals)
10481 <= sreloc->contents + sreloc->size);
10482
10483 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
10484
10485 /* For globals, the first word in the relocation gets
10486 the relocation index and the top bit set, or zero,
10487 if we're binding now. For locals, it gets the
10488 symbol's offset in the tls section. */
10489 bfd_put_32 (output_bfd,
10490 !h ? value - elf_hash_table (info)->tls_sec->vma
10491 : info->flags & DF_BIND_NOW ? 0
10492 : 0x80000000 | ELF32_R_SYM (outrel.r_info),
10493 globals->root.sgotplt->contents + offplt
10494 + globals->sgotplt_jump_table_size);
10495
10496 /* Second word in the relocation is always zero. */
10497 bfd_put_32 (output_bfd, 0,
10498 globals->root.sgotplt->contents + offplt
10499 + globals->sgotplt_jump_table_size + 4);
10500 }
10501 if (tls_type & GOT_TLS_GD)
10502 {
10503 if (need_relocs)
10504 {
10505 outrel.r_addend = 0;
10506 outrel.r_offset = (sgot->output_section->vma
10507 + sgot->output_offset
10508 + cur_off);
10509 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
10510
10511 if (globals->use_rel)
10512 bfd_put_32 (output_bfd, outrel.r_addend,
10513 sgot->contents + cur_off);
10514
10515 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10516
10517 if (indx == 0)
10518 bfd_put_32 (output_bfd, value - dtpoff_base (info),
10519 sgot->contents + cur_off + 4);
10520 else
10521 {
10522 outrel.r_addend = 0;
10523 outrel.r_info = ELF32_R_INFO (indx,
10524 R_ARM_TLS_DTPOFF32);
10525 outrel.r_offset += 4;
10526
10527 if (globals->use_rel)
10528 bfd_put_32 (output_bfd, outrel.r_addend,
10529 sgot->contents + cur_off + 4);
10530
10531 elf32_arm_add_dynreloc (output_bfd, info,
10532 srelgot, &outrel);
10533 }
10534 }
10535 else
10536 {
10537 /* If we are not emitting relocations for a
10538 general dynamic reference, then we must be in a
10539 static link or an executable link with the
10540 symbol binding locally. Mark it as belonging
10541 to module 1, the executable. */
10542 bfd_put_32 (output_bfd, 1,
10543 sgot->contents + cur_off);
10544 bfd_put_32 (output_bfd, value - dtpoff_base (info),
10545 sgot->contents + cur_off + 4);
10546 }
10547
10548 cur_off += 8;
10549 }
10550
10551 if (tls_type & GOT_TLS_IE)
10552 {
10553 if (need_relocs)
10554 {
10555 if (indx == 0)
10556 outrel.r_addend = value - dtpoff_base (info);
10557 else
10558 outrel.r_addend = 0;
10559 outrel.r_offset = (sgot->output_section->vma
10560 + sgot->output_offset
10561 + cur_off);
10562 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
10563
10564 if (globals->use_rel)
10565 bfd_put_32 (output_bfd, outrel.r_addend,
10566 sgot->contents + cur_off);
10567
10568 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10569 }
10570 else
10571 bfd_put_32 (output_bfd, tpoff (info, value),
10572 sgot->contents + cur_off);
10573 cur_off += 4;
10574 }
10575
10576 if (h != NULL)
10577 h->got.offset |= 1;
10578 else
10579 local_got_offsets[r_symndx] |= 1;
10580 }
10581
10582 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
10583 off += 8;
10584 else if (tls_type & GOT_TLS_GDESC)
10585 off = offplt;
10586
10587 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL
10588 || ELF32_R_TYPE(rel->r_info) == R_ARM_THM_TLS_CALL)
10589 {
10590 bfd_signed_vma offset;
10591 /* TLS stubs are arm mode. The original symbol is a
10592 data object, so branch_type is bogus. */
10593 branch_type = ST_BRANCH_TO_ARM;
10594 enum elf32_arm_stub_type stub_type
10595 = arm_type_of_stub (info, input_section, rel,
10596 st_type, &branch_type,
10597 (struct elf32_arm_link_hash_entry *)h,
10598 globals->tls_trampoline, globals->root.splt,
10599 input_bfd, sym_name);
10600
10601 if (stub_type != arm_stub_none)
10602 {
10603 struct elf32_arm_stub_hash_entry *stub_entry
10604 = elf32_arm_get_stub_entry
10605 (input_section, globals->root.splt, 0, rel,
10606 globals, stub_type);
10607 offset = (stub_entry->stub_offset
10608 + stub_entry->stub_sec->output_offset
10609 + stub_entry->stub_sec->output_section->vma);
10610 }
10611 else
10612 offset = (globals->root.splt->output_section->vma
10613 + globals->root.splt->output_offset
10614 + globals->tls_trampoline);
10615
10616 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL)
10617 {
10618 unsigned long inst;
10619
10620 offset -= (input_section->output_section->vma
10621 + input_section->output_offset
10622 + rel->r_offset + 8);
10623
10624 inst = offset >> 2;
10625 inst &= 0x00ffffff;
10626 value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000);
10627 }
10628 else
10629 {
10630 /* Thumb blx encodes the offset in a complicated
10631 fashion. */
10632 unsigned upper_insn, lower_insn;
10633 unsigned neg;
10634
10635 offset -= (input_section->output_section->vma
10636 + input_section->output_offset
10637 + rel->r_offset + 4);
10638
10639 if (stub_type != arm_stub_none
10640 && arm_stub_is_thumb (stub_type))
10641 {
10642 lower_insn = 0xd000;
10643 }
10644 else
10645 {
10646 lower_insn = 0xc000;
10647 /* Round up the offset to a word boundary. */
10648 offset = (offset + 2) & ~2;
10649 }
10650
10651 neg = offset < 0;
10652 upper_insn = (0xf000
10653 | ((offset >> 12) & 0x3ff)
10654 | (neg << 10));
10655 lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13)
10656 | (((!((offset >> 22) & 1)) ^ neg) << 11)
10657 | ((offset >> 1) & 0x7ff);
10658 bfd_put_16 (input_bfd, upper_insn, hit_data);
10659 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
10660 return bfd_reloc_ok;
10661 }
10662 }
10663 /* These relocations needs special care, as besides the fact
10664 they point somewhere in .gotplt, the addend must be
10665 adjusted accordingly depending on the type of instruction
10666 we refer to. */
10667 else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
10668 {
10669 unsigned long data, insn;
10670 unsigned thumb;
10671
10672 data = bfd_get_32 (input_bfd, hit_data);
10673 thumb = data & 1;
10674 data &= ~1u;
10675
10676 if (thumb)
10677 {
10678 insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data);
10679 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
10680 insn = (insn << 16)
10681 | bfd_get_16 (input_bfd,
10682 contents + rel->r_offset - data + 2);
10683 if ((insn & 0xf800c000) == 0xf000c000)
10684 /* bl/blx */
10685 value = -6;
10686 else if ((insn & 0xffffff00) == 0x4400)
10687 /* add */
10688 value = -5;
10689 else
10690 {
10691 (*_bfd_error_handler)
10692 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' referenced by TLS_GOTDESC"),
10693 input_bfd, input_section,
10694 (unsigned long)rel->r_offset, insn);
10695 return bfd_reloc_notsupported;
10696 }
10697 }
10698 else
10699 {
10700 insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data);
10701
10702 switch (insn >> 24)
10703 {
10704 case 0xeb: /* bl */
10705 case 0xfa: /* blx */
10706 value = -4;
10707 break;
10708
10709 case 0xe0: /* add */
10710 value = -8;
10711 break;
10712
10713 default:
10714 (*_bfd_error_handler)
10715 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' referenced by TLS_GOTDESC"),
10716 input_bfd, input_section,
10717 (unsigned long)rel->r_offset, insn);
10718 return bfd_reloc_notsupported;
10719 }
10720 }
10721
10722 value += ((globals->root.sgotplt->output_section->vma
10723 + globals->root.sgotplt->output_offset + off)
10724 - (input_section->output_section->vma
10725 + input_section->output_offset
10726 + rel->r_offset)
10727 + globals->sgotplt_jump_table_size);
10728 }
10729 else
10730 value = ((globals->root.sgot->output_section->vma
10731 + globals->root.sgot->output_offset + off)
10732 - (input_section->output_section->vma
10733 + input_section->output_offset + rel->r_offset));
10734
10735 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10736 contents, rel->r_offset, value,
10737 rel->r_addend);
10738 }
10739
10740 case R_ARM_TLS_LE32:
10741 if (bfd_link_dll (info))
10742 {
10743 (*_bfd_error_handler)
10744 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
10745 input_bfd, input_section,
10746 (long) rel->r_offset, howto->name);
10747 return bfd_reloc_notsupported;
10748 }
10749 else
10750 value = tpoff (info, value);
10751
10752 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10753 contents, rel->r_offset, value,
10754 rel->r_addend);
10755
10756 case R_ARM_V4BX:
10757 if (globals->fix_v4bx)
10758 {
10759 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10760
10761 /* Ensure that we have a BX instruction. */
10762 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
10763
10764 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
10765 {
10766 /* Branch to veneer. */
10767 bfd_vma glue_addr;
10768 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
10769 glue_addr -= input_section->output_section->vma
10770 + input_section->output_offset
10771 + rel->r_offset + 8;
10772 insn = (insn & 0xf0000000) | 0x0a000000
10773 | ((glue_addr >> 2) & 0x00ffffff);
10774 }
10775 else
10776 {
10777 /* Preserve Rm (lowest four bits) and the condition code
10778 (highest four bits). Other bits encode MOV PC,Rm. */
10779 insn = (insn & 0xf000000f) | 0x01a0f000;
10780 }
10781
10782 bfd_put_32 (input_bfd, insn, hit_data);
10783 }
10784 return bfd_reloc_ok;
10785
10786 case R_ARM_MOVW_ABS_NC:
10787 case R_ARM_MOVT_ABS:
10788 case R_ARM_MOVW_PREL_NC:
10789 case R_ARM_MOVT_PREL:
10790 /* Until we properly support segment-base-relative addressing then
10791 we assume the segment base to be zero, as for the group relocations.
10792 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
10793 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
10794 case R_ARM_MOVW_BREL_NC:
10795 case R_ARM_MOVW_BREL:
10796 case R_ARM_MOVT_BREL:
10797 {
10798 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10799
10800 if (globals->use_rel)
10801 {
10802 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
10803 signed_addend = (addend ^ 0x8000) - 0x8000;
10804 }
10805
10806 value += signed_addend;
10807
10808 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
10809 value -= (input_section->output_section->vma
10810 + input_section->output_offset + rel->r_offset);
10811
10812 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
10813 return bfd_reloc_overflow;
10814
10815 if (branch_type == ST_BRANCH_TO_THUMB)
10816 value |= 1;
10817
10818 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
10819 || r_type == R_ARM_MOVT_BREL)
10820 value >>= 16;
10821
10822 insn &= 0xfff0f000;
10823 insn |= value & 0xfff;
10824 insn |= (value & 0xf000) << 4;
10825 bfd_put_32 (input_bfd, insn, hit_data);
10826 }
10827 return bfd_reloc_ok;
10828
10829 case R_ARM_THM_MOVW_ABS_NC:
10830 case R_ARM_THM_MOVT_ABS:
10831 case R_ARM_THM_MOVW_PREL_NC:
10832 case R_ARM_THM_MOVT_PREL:
10833 /* Until we properly support segment-base-relative addressing then
10834 we assume the segment base to be zero, as for the above relocations.
10835 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
10836 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
10837 as R_ARM_THM_MOVT_ABS. */
10838 case R_ARM_THM_MOVW_BREL_NC:
10839 case R_ARM_THM_MOVW_BREL:
10840 case R_ARM_THM_MOVT_BREL:
10841 {
10842 bfd_vma insn;
10843
10844 insn = bfd_get_16 (input_bfd, hit_data) << 16;
10845 insn |= bfd_get_16 (input_bfd, hit_data + 2);
10846
10847 if (globals->use_rel)
10848 {
10849 addend = ((insn >> 4) & 0xf000)
10850 | ((insn >> 15) & 0x0800)
10851 | ((insn >> 4) & 0x0700)
10852 | (insn & 0x00ff);
10853 signed_addend = (addend ^ 0x8000) - 0x8000;
10854 }
10855
10856 value += signed_addend;
10857
10858 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
10859 value -= (input_section->output_section->vma
10860 + input_section->output_offset + rel->r_offset);
10861
10862 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
10863 return bfd_reloc_overflow;
10864
10865 if (branch_type == ST_BRANCH_TO_THUMB)
10866 value |= 1;
10867
10868 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
10869 || r_type == R_ARM_THM_MOVT_BREL)
10870 value >>= 16;
10871
10872 insn &= 0xfbf08f00;
10873 insn |= (value & 0xf000) << 4;
10874 insn |= (value & 0x0800) << 15;
10875 insn |= (value & 0x0700) << 4;
10876 insn |= (value & 0x00ff);
10877
10878 bfd_put_16 (input_bfd, insn >> 16, hit_data);
10879 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
10880 }
10881 return bfd_reloc_ok;
10882
10883 case R_ARM_ALU_PC_G0_NC:
10884 case R_ARM_ALU_PC_G1_NC:
10885 case R_ARM_ALU_PC_G0:
10886 case R_ARM_ALU_PC_G1:
10887 case R_ARM_ALU_PC_G2:
10888 case R_ARM_ALU_SB_G0_NC:
10889 case R_ARM_ALU_SB_G1_NC:
10890 case R_ARM_ALU_SB_G0:
10891 case R_ARM_ALU_SB_G1:
10892 case R_ARM_ALU_SB_G2:
10893 {
10894 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10895 bfd_vma pc = input_section->output_section->vma
10896 + input_section->output_offset + rel->r_offset;
10897 /* sb is the origin of the *segment* containing the symbol. */
10898 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
10899 bfd_vma residual;
10900 bfd_vma g_n;
10901 bfd_signed_vma signed_value;
10902 int group = 0;
10903
10904 /* Determine which group of bits to select. */
10905 switch (r_type)
10906 {
10907 case R_ARM_ALU_PC_G0_NC:
10908 case R_ARM_ALU_PC_G0:
10909 case R_ARM_ALU_SB_G0_NC:
10910 case R_ARM_ALU_SB_G0:
10911 group = 0;
10912 break;
10913
10914 case R_ARM_ALU_PC_G1_NC:
10915 case R_ARM_ALU_PC_G1:
10916 case R_ARM_ALU_SB_G1_NC:
10917 case R_ARM_ALU_SB_G1:
10918 group = 1;
10919 break;
10920
10921 case R_ARM_ALU_PC_G2:
10922 case R_ARM_ALU_SB_G2:
10923 group = 2;
10924 break;
10925
10926 default:
10927 abort ();
10928 }
10929
10930 /* If REL, extract the addend from the insn. If RELA, it will
10931 have already been fetched for us. */
10932 if (globals->use_rel)
10933 {
10934 int negative;
10935 bfd_vma constant = insn & 0xff;
10936 bfd_vma rotation = (insn & 0xf00) >> 8;
10937
10938 if (rotation == 0)
10939 signed_addend = constant;
10940 else
10941 {
10942 /* Compensate for the fact that in the instruction, the
10943 rotation is stored in multiples of 2 bits. */
10944 rotation *= 2;
10945
10946 /* Rotate "constant" right by "rotation" bits. */
10947 signed_addend = (constant >> rotation) |
10948 (constant << (8 * sizeof (bfd_vma) - rotation));
10949 }
10950
10951 /* Determine if the instruction is an ADD or a SUB.
10952 (For REL, this determines the sign of the addend.) */
10953 negative = identify_add_or_sub (insn);
10954 if (negative == 0)
10955 {
10956 (*_bfd_error_handler)
10957 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
10958 input_bfd, input_section,
10959 (long) rel->r_offset, howto->name);
10960 return bfd_reloc_overflow;
10961 }
10962
10963 signed_addend *= negative;
10964 }
10965
10966 /* Compute the value (X) to go in the place. */
10967 if (r_type == R_ARM_ALU_PC_G0_NC
10968 || r_type == R_ARM_ALU_PC_G1_NC
10969 || r_type == R_ARM_ALU_PC_G0
10970 || r_type == R_ARM_ALU_PC_G1
10971 || r_type == R_ARM_ALU_PC_G2)
10972 /* PC relative. */
10973 signed_value = value - pc + signed_addend;
10974 else
10975 /* Section base relative. */
10976 signed_value = value - sb + signed_addend;
10977
10978 /* If the target symbol is a Thumb function, then set the
10979 Thumb bit in the address. */
10980 if (branch_type == ST_BRANCH_TO_THUMB)
10981 signed_value |= 1;
10982
10983 /* Calculate the value of the relevant G_n, in encoded
10984 constant-with-rotation format. */
10985 g_n = calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
10986 group, &residual);
10987
10988 /* Check for overflow if required. */
10989 if ((r_type == R_ARM_ALU_PC_G0
10990 || r_type == R_ARM_ALU_PC_G1
10991 || r_type == R_ARM_ALU_PC_G2
10992 || r_type == R_ARM_ALU_SB_G0
10993 || r_type == R_ARM_ALU_SB_G1
10994 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
10995 {
10996 (*_bfd_error_handler)
10997 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
10998 input_bfd, input_section,
10999 (long) rel->r_offset, signed_value < 0 ? - signed_value : signed_value,
11000 howto->name);
11001 return bfd_reloc_overflow;
11002 }
11003
11004 /* Mask out the value and the ADD/SUB part of the opcode; take care
11005 not to destroy the S bit. */
11006 insn &= 0xff1ff000;
11007
11008 /* Set the opcode according to whether the value to go in the
11009 place is negative. */
11010 if (signed_value < 0)
11011 insn |= 1 << 22;
11012 else
11013 insn |= 1 << 23;
11014
11015 /* Encode the offset. */
11016 insn |= g_n;
11017
11018 bfd_put_32 (input_bfd, insn, hit_data);
11019 }
11020 return bfd_reloc_ok;
11021
11022 case R_ARM_LDR_PC_G0:
11023 case R_ARM_LDR_PC_G1:
11024 case R_ARM_LDR_PC_G2:
11025 case R_ARM_LDR_SB_G0:
11026 case R_ARM_LDR_SB_G1:
11027 case R_ARM_LDR_SB_G2:
11028 {
11029 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
11030 bfd_vma pc = input_section->output_section->vma
11031 + input_section->output_offset + rel->r_offset;
11032 /* sb is the origin of the *segment* containing the symbol. */
11033 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
11034 bfd_vma residual;
11035 bfd_signed_vma signed_value;
11036 int group = 0;
11037
11038 /* Determine which groups of bits to calculate. */
11039 switch (r_type)
11040 {
11041 case R_ARM_LDR_PC_G0:
11042 case R_ARM_LDR_SB_G0:
11043 group = 0;
11044 break;
11045
11046 case R_ARM_LDR_PC_G1:
11047 case R_ARM_LDR_SB_G1:
11048 group = 1;
11049 break;
11050
11051 case R_ARM_LDR_PC_G2:
11052 case R_ARM_LDR_SB_G2:
11053 group = 2;
11054 break;
11055
11056 default:
11057 abort ();
11058 }
11059
11060 /* If REL, extract the addend from the insn. If RELA, it will
11061 have already been fetched for us. */
11062 if (globals->use_rel)
11063 {
11064 int negative = (insn & (1 << 23)) ? 1 : -1;
11065 signed_addend = negative * (insn & 0xfff);
11066 }
11067
11068 /* Compute the value (X) to go in the place. */
11069 if (r_type == R_ARM_LDR_PC_G0
11070 || r_type == R_ARM_LDR_PC_G1
11071 || r_type == R_ARM_LDR_PC_G2)
11072 /* PC relative. */
11073 signed_value = value - pc + signed_addend;
11074 else
11075 /* Section base relative. */
11076 signed_value = value - sb + signed_addend;
11077
11078 /* Calculate the value of the relevant G_{n-1} to obtain
11079 the residual at that stage. */
11080 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
11081 group - 1, &residual);
11082
11083 /* Check for overflow. */
11084 if (residual >= 0x1000)
11085 {
11086 (*_bfd_error_handler)
11087 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11088 input_bfd, input_section,
11089 (long) rel->r_offset, labs (signed_value), howto->name);
11090 return bfd_reloc_overflow;
11091 }
11092
11093 /* Mask out the value and U bit. */
11094 insn &= 0xff7ff000;
11095
11096 /* Set the U bit if the value to go in the place is non-negative. */
11097 if (signed_value >= 0)
11098 insn |= 1 << 23;
11099
11100 /* Encode the offset. */
11101 insn |= residual;
11102
11103 bfd_put_32 (input_bfd, insn, hit_data);
11104 }
11105 return bfd_reloc_ok;
11106
11107 case R_ARM_LDRS_PC_G0:
11108 case R_ARM_LDRS_PC_G1:
11109 case R_ARM_LDRS_PC_G2:
11110 case R_ARM_LDRS_SB_G0:
11111 case R_ARM_LDRS_SB_G1:
11112 case R_ARM_LDRS_SB_G2:
11113 {
11114 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
11115 bfd_vma pc = input_section->output_section->vma
11116 + input_section->output_offset + rel->r_offset;
11117 /* sb is the origin of the *segment* containing the symbol. */
11118 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
11119 bfd_vma residual;
11120 bfd_signed_vma signed_value;
11121 int group = 0;
11122
11123 /* Determine which groups of bits to calculate. */
11124 switch (r_type)
11125 {
11126 case R_ARM_LDRS_PC_G0:
11127 case R_ARM_LDRS_SB_G0:
11128 group = 0;
11129 break;
11130
11131 case R_ARM_LDRS_PC_G1:
11132 case R_ARM_LDRS_SB_G1:
11133 group = 1;
11134 break;
11135
11136 case R_ARM_LDRS_PC_G2:
11137 case R_ARM_LDRS_SB_G2:
11138 group = 2;
11139 break;
11140
11141 default:
11142 abort ();
11143 }
11144
11145 /* If REL, extract the addend from the insn. If RELA, it will
11146 have already been fetched for us. */
11147 if (globals->use_rel)
11148 {
11149 int negative = (insn & (1 << 23)) ? 1 : -1;
11150 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
11151 }
11152
11153 /* Compute the value (X) to go in the place. */
11154 if (r_type == R_ARM_LDRS_PC_G0
11155 || r_type == R_ARM_LDRS_PC_G1
11156 || r_type == R_ARM_LDRS_PC_G2)
11157 /* PC relative. */
11158 signed_value = value - pc + signed_addend;
11159 else
11160 /* Section base relative. */
11161 signed_value = value - sb + signed_addend;
11162
11163 /* Calculate the value of the relevant G_{n-1} to obtain
11164 the residual at that stage. */
11165 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
11166 group - 1, &residual);
11167
11168 /* Check for overflow. */
11169 if (residual >= 0x100)
11170 {
11171 (*_bfd_error_handler)
11172 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11173 input_bfd, input_section,
11174 (long) rel->r_offset, labs (signed_value), howto->name);
11175 return bfd_reloc_overflow;
11176 }
11177
11178 /* Mask out the value and U bit. */
11179 insn &= 0xff7ff0f0;
11180
11181 /* Set the U bit if the value to go in the place is non-negative. */
11182 if (signed_value >= 0)
11183 insn |= 1 << 23;
11184
11185 /* Encode the offset. */
11186 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
11187
11188 bfd_put_32 (input_bfd, insn, hit_data);
11189 }
11190 return bfd_reloc_ok;
11191
11192 case R_ARM_LDC_PC_G0:
11193 case R_ARM_LDC_PC_G1:
11194 case R_ARM_LDC_PC_G2:
11195 case R_ARM_LDC_SB_G0:
11196 case R_ARM_LDC_SB_G1:
11197 case R_ARM_LDC_SB_G2:
11198 {
11199 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
11200 bfd_vma pc = input_section->output_section->vma
11201 + input_section->output_offset + rel->r_offset;
11202 /* sb is the origin of the *segment* containing the symbol. */
11203 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
11204 bfd_vma residual;
11205 bfd_signed_vma signed_value;
11206 int group = 0;
11207
11208 /* Determine which groups of bits to calculate. */
11209 switch (r_type)
11210 {
11211 case R_ARM_LDC_PC_G0:
11212 case R_ARM_LDC_SB_G0:
11213 group = 0;
11214 break;
11215
11216 case R_ARM_LDC_PC_G1:
11217 case R_ARM_LDC_SB_G1:
11218 group = 1;
11219 break;
11220
11221 case R_ARM_LDC_PC_G2:
11222 case R_ARM_LDC_SB_G2:
11223 group = 2;
11224 break;
11225
11226 default:
11227 abort ();
11228 }
11229
11230 /* If REL, extract the addend from the insn. If RELA, it will
11231 have already been fetched for us. */
11232 if (globals->use_rel)
11233 {
11234 int negative = (insn & (1 << 23)) ? 1 : -1;
11235 signed_addend = negative * ((insn & 0xff) << 2);
11236 }
11237
11238 /* Compute the value (X) to go in the place. */
11239 if (r_type == R_ARM_LDC_PC_G0
11240 || r_type == R_ARM_LDC_PC_G1
11241 || r_type == R_ARM_LDC_PC_G2)
11242 /* PC relative. */
11243 signed_value = value - pc + signed_addend;
11244 else
11245 /* Section base relative. */
11246 signed_value = value - sb + signed_addend;
11247
11248 /* Calculate the value of the relevant G_{n-1} to obtain
11249 the residual at that stage. */
11250 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
11251 group - 1, &residual);
11252
11253 /* Check for overflow. (The absolute value to go in the place must be
11254 divisible by four and, after having been divided by four, must
11255 fit in eight bits.) */
11256 if ((residual & 0x3) != 0 || residual >= 0x400)
11257 {
11258 (*_bfd_error_handler)
11259 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11260 input_bfd, input_section,
11261 (long) rel->r_offset, labs (signed_value), howto->name);
11262 return bfd_reloc_overflow;
11263 }
11264
11265 /* Mask out the value and U bit. */
11266 insn &= 0xff7fff00;
11267
11268 /* Set the U bit if the value to go in the place is non-negative. */
11269 if (signed_value >= 0)
11270 insn |= 1 << 23;
11271
11272 /* Encode the offset. */
11273 insn |= residual >> 2;
11274
11275 bfd_put_32 (input_bfd, insn, hit_data);
11276 }
11277 return bfd_reloc_ok;
11278
11279 case R_ARM_THM_ALU_ABS_G0_NC:
11280 case R_ARM_THM_ALU_ABS_G1_NC:
11281 case R_ARM_THM_ALU_ABS_G2_NC:
11282 case R_ARM_THM_ALU_ABS_G3_NC:
11283 {
11284 const int shift_array[4] = {0, 8, 16, 24};
11285 bfd_vma insn = bfd_get_16 (input_bfd, hit_data);
11286 bfd_vma addr = value;
11287 int shift = shift_array[r_type - R_ARM_THM_ALU_ABS_G0_NC];
11288
11289 /* Compute address. */
11290 if (globals->use_rel)
11291 signed_addend = insn & 0xff;
11292 addr += signed_addend;
11293 if (branch_type == ST_BRANCH_TO_THUMB)
11294 addr |= 1;
11295 /* Clean imm8 insn. */
11296 insn &= 0xff00;
11297 /* And update with correct part of address. */
11298 insn |= (addr >> shift) & 0xff;
11299 /* Update insn. */
11300 bfd_put_16 (input_bfd, insn, hit_data);
11301 }
11302
11303 *unresolved_reloc_p = FALSE;
11304 return bfd_reloc_ok;
11305
11306 default:
11307 return bfd_reloc_notsupported;
11308 }
11309 }
11310
11311 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
11312 static void
11313 arm_add_to_rel (bfd * abfd,
11314 bfd_byte * address,
11315 reloc_howto_type * howto,
11316 bfd_signed_vma increment)
11317 {
11318 bfd_signed_vma addend;
11319
11320 if (howto->type == R_ARM_THM_CALL
11321 || howto->type == R_ARM_THM_JUMP24)
11322 {
11323 int upper_insn, lower_insn;
11324 int upper, lower;
11325
11326 upper_insn = bfd_get_16 (abfd, address);
11327 lower_insn = bfd_get_16 (abfd, address + 2);
11328 upper = upper_insn & 0x7ff;
11329 lower = lower_insn & 0x7ff;
11330
11331 addend = (upper << 12) | (lower << 1);
11332 addend += increment;
11333 addend >>= 1;
11334
11335 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
11336 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
11337
11338 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
11339 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
11340 }
11341 else
11342 {
11343 bfd_vma contents;
11344
11345 contents = bfd_get_32 (abfd, address);
11346
11347 /* Get the (signed) value from the instruction. */
11348 addend = contents & howto->src_mask;
11349 if (addend & ((howto->src_mask + 1) >> 1))
11350 {
11351 bfd_signed_vma mask;
11352
11353 mask = -1;
11354 mask &= ~ howto->src_mask;
11355 addend |= mask;
11356 }
11357
11358 /* Add in the increment, (which is a byte value). */
11359 switch (howto->type)
11360 {
11361 default:
11362 addend += increment;
11363 break;
11364
11365 case R_ARM_PC24:
11366 case R_ARM_PLT32:
11367 case R_ARM_CALL:
11368 case R_ARM_JUMP24:
11369 addend <<= howto->size;
11370 addend += increment;
11371
11372 /* Should we check for overflow here ? */
11373
11374 /* Drop any undesired bits. */
11375 addend >>= howto->rightshift;
11376 break;
11377 }
11378
11379 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
11380
11381 bfd_put_32 (abfd, contents, address);
11382 }
11383 }
11384
11385 #define IS_ARM_TLS_RELOC(R_TYPE) \
11386 ((R_TYPE) == R_ARM_TLS_GD32 \
11387 || (R_TYPE) == R_ARM_TLS_LDO32 \
11388 || (R_TYPE) == R_ARM_TLS_LDM32 \
11389 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
11390 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
11391 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
11392 || (R_TYPE) == R_ARM_TLS_LE32 \
11393 || (R_TYPE) == R_ARM_TLS_IE32 \
11394 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
11395
11396 /* Specific set of relocations for the gnu tls dialect. */
11397 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
11398 ((R_TYPE) == R_ARM_TLS_GOTDESC \
11399 || (R_TYPE) == R_ARM_TLS_CALL \
11400 || (R_TYPE) == R_ARM_THM_TLS_CALL \
11401 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
11402 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
11403
11404 /* Relocate an ARM ELF section. */
11405
11406 static bfd_boolean
11407 elf32_arm_relocate_section (bfd * output_bfd,
11408 struct bfd_link_info * info,
11409 bfd * input_bfd,
11410 asection * input_section,
11411 bfd_byte * contents,
11412 Elf_Internal_Rela * relocs,
11413 Elf_Internal_Sym * local_syms,
11414 asection ** local_sections)
11415 {
11416 Elf_Internal_Shdr *symtab_hdr;
11417 struct elf_link_hash_entry **sym_hashes;
11418 Elf_Internal_Rela *rel;
11419 Elf_Internal_Rela *relend;
11420 const char *name;
11421 struct elf32_arm_link_hash_table * globals;
11422
11423 globals = elf32_arm_hash_table (info);
11424 if (globals == NULL)
11425 return FALSE;
11426
11427 symtab_hdr = & elf_symtab_hdr (input_bfd);
11428 sym_hashes = elf_sym_hashes (input_bfd);
11429
11430 rel = relocs;
11431 relend = relocs + input_section->reloc_count;
11432 for (; rel < relend; rel++)
11433 {
11434 int r_type;
11435 reloc_howto_type * howto;
11436 unsigned long r_symndx;
11437 Elf_Internal_Sym * sym;
11438 asection * sec;
11439 struct elf_link_hash_entry * h;
11440 bfd_vma relocation;
11441 bfd_reloc_status_type r;
11442 arelent bfd_reloc;
11443 char sym_type;
11444 bfd_boolean unresolved_reloc = FALSE;
11445 char *error_message = NULL;
11446
11447 r_symndx = ELF32_R_SYM (rel->r_info);
11448 r_type = ELF32_R_TYPE (rel->r_info);
11449 r_type = arm_real_reloc_type (globals, r_type);
11450
11451 if ( r_type == R_ARM_GNU_VTENTRY
11452 || r_type == R_ARM_GNU_VTINHERIT)
11453 continue;
11454
11455 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
11456 howto = bfd_reloc.howto;
11457
11458 h = NULL;
11459 sym = NULL;
11460 sec = NULL;
11461
11462 if (r_symndx < symtab_hdr->sh_info)
11463 {
11464 sym = local_syms + r_symndx;
11465 sym_type = ELF32_ST_TYPE (sym->st_info);
11466 sec = local_sections[r_symndx];
11467
11468 /* An object file might have a reference to a local
11469 undefined symbol. This is a daft object file, but we
11470 should at least do something about it. V4BX & NONE
11471 relocations do not use the symbol and are explicitly
11472 allowed to use the undefined symbol, so allow those.
11473 Likewise for relocations against STN_UNDEF. */
11474 if (r_type != R_ARM_V4BX
11475 && r_type != R_ARM_NONE
11476 && r_symndx != STN_UNDEF
11477 && bfd_is_und_section (sec)
11478 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
11479 {
11480 if (!info->callbacks->undefined_symbol
11481 (info, bfd_elf_string_from_elf_section
11482 (input_bfd, symtab_hdr->sh_link, sym->st_name),
11483 input_bfd, input_section,
11484 rel->r_offset, TRUE))
11485 return FALSE;
11486 }
11487
11488 if (globals->use_rel)
11489 {
11490 relocation = (sec->output_section->vma
11491 + sec->output_offset
11492 + sym->st_value);
11493 if (!bfd_link_relocatable (info)
11494 && (sec->flags & SEC_MERGE)
11495 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
11496 {
11497 asection *msec;
11498 bfd_vma addend, value;
11499
11500 switch (r_type)
11501 {
11502 case R_ARM_MOVW_ABS_NC:
11503 case R_ARM_MOVT_ABS:
11504 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
11505 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
11506 addend = (addend ^ 0x8000) - 0x8000;
11507 break;
11508
11509 case R_ARM_THM_MOVW_ABS_NC:
11510 case R_ARM_THM_MOVT_ABS:
11511 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
11512 << 16;
11513 value |= bfd_get_16 (input_bfd,
11514 contents + rel->r_offset + 2);
11515 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
11516 | ((value & 0x04000000) >> 15);
11517 addend = (addend ^ 0x8000) - 0x8000;
11518 break;
11519
11520 default:
11521 if (howto->rightshift
11522 || (howto->src_mask & (howto->src_mask + 1)))
11523 {
11524 (*_bfd_error_handler)
11525 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
11526 input_bfd, input_section,
11527 (long) rel->r_offset, howto->name);
11528 return FALSE;
11529 }
11530
11531 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
11532
11533 /* Get the (signed) value from the instruction. */
11534 addend = value & howto->src_mask;
11535 if (addend & ((howto->src_mask + 1) >> 1))
11536 {
11537 bfd_signed_vma mask;
11538
11539 mask = -1;
11540 mask &= ~ howto->src_mask;
11541 addend |= mask;
11542 }
11543 break;
11544 }
11545
11546 msec = sec;
11547 addend =
11548 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
11549 - relocation;
11550 addend += msec->output_section->vma + msec->output_offset;
11551
11552 /* Cases here must match those in the preceding
11553 switch statement. */
11554 switch (r_type)
11555 {
11556 case R_ARM_MOVW_ABS_NC:
11557 case R_ARM_MOVT_ABS:
11558 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
11559 | (addend & 0xfff);
11560 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
11561 break;
11562
11563 case R_ARM_THM_MOVW_ABS_NC:
11564 case R_ARM_THM_MOVT_ABS:
11565 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
11566 | (addend & 0xff) | ((addend & 0x0800) << 15);
11567 bfd_put_16 (input_bfd, value >> 16,
11568 contents + rel->r_offset);
11569 bfd_put_16 (input_bfd, value,
11570 contents + rel->r_offset + 2);
11571 break;
11572
11573 default:
11574 value = (value & ~ howto->dst_mask)
11575 | (addend & howto->dst_mask);
11576 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
11577 break;
11578 }
11579 }
11580 }
11581 else
11582 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
11583 }
11584 else
11585 {
11586 bfd_boolean warned, ignored;
11587
11588 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
11589 r_symndx, symtab_hdr, sym_hashes,
11590 h, sec, relocation,
11591 unresolved_reloc, warned, ignored);
11592
11593 sym_type = h->type;
11594 }
11595
11596 if (sec != NULL && discarded_section (sec))
11597 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
11598 rel, 1, relend, howto, 0, contents);
11599
11600 if (bfd_link_relocatable (info))
11601 {
11602 /* This is a relocatable link. We don't have to change
11603 anything, unless the reloc is against a section symbol,
11604 in which case we have to adjust according to where the
11605 section symbol winds up in the output section. */
11606 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
11607 {
11608 if (globals->use_rel)
11609 arm_add_to_rel (input_bfd, contents + rel->r_offset,
11610 howto, (bfd_signed_vma) sec->output_offset);
11611 else
11612 rel->r_addend += sec->output_offset;
11613 }
11614 continue;
11615 }
11616
11617 if (h != NULL)
11618 name = h->root.root.string;
11619 else
11620 {
11621 name = (bfd_elf_string_from_elf_section
11622 (input_bfd, symtab_hdr->sh_link, sym->st_name));
11623 if (name == NULL || *name == '\0')
11624 name = bfd_section_name (input_bfd, sec);
11625 }
11626
11627 if (r_symndx != STN_UNDEF
11628 && r_type != R_ARM_NONE
11629 && (h == NULL
11630 || h->root.type == bfd_link_hash_defined
11631 || h->root.type == bfd_link_hash_defweak)
11632 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
11633 {
11634 (*_bfd_error_handler)
11635 ((sym_type == STT_TLS
11636 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
11637 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
11638 input_bfd,
11639 input_section,
11640 (long) rel->r_offset,
11641 howto->name,
11642 name);
11643 }
11644
11645 /* We call elf32_arm_final_link_relocate unless we're completely
11646 done, i.e., the relaxation produced the final output we want,
11647 and we won't let anybody mess with it. Also, we have to do
11648 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
11649 both in relaxed and non-relaxed cases. */
11650 if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
11651 || (IS_ARM_TLS_GNU_RELOC (r_type)
11652 && !((h ? elf32_arm_hash_entry (h)->tls_type :
11653 elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
11654 & GOT_TLS_GDESC)))
11655 {
11656 r = elf32_arm_tls_relax (globals, input_bfd, input_section,
11657 contents, rel, h == NULL);
11658 /* This may have been marked unresolved because it came from
11659 a shared library. But we've just dealt with that. */
11660 unresolved_reloc = 0;
11661 }
11662 else
11663 r = bfd_reloc_continue;
11664
11665 if (r == bfd_reloc_continue)
11666 {
11667 unsigned char branch_type =
11668 h ? ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
11669 : ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
11670
11671 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
11672 input_section, contents, rel,
11673 relocation, info, sec, name,
11674 sym_type, branch_type, h,
11675 &unresolved_reloc,
11676 &error_message);
11677 }
11678
11679 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
11680 because such sections are not SEC_ALLOC and thus ld.so will
11681 not process them. */
11682 if (unresolved_reloc
11683 && !((input_section->flags & SEC_DEBUGGING) != 0
11684 && h->def_dynamic)
11685 && _bfd_elf_section_offset (output_bfd, info, input_section,
11686 rel->r_offset) != (bfd_vma) -1)
11687 {
11688 (*_bfd_error_handler)
11689 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
11690 input_bfd,
11691 input_section,
11692 (long) rel->r_offset,
11693 howto->name,
11694 h->root.root.string);
11695 return FALSE;
11696 }
11697
11698 if (r != bfd_reloc_ok)
11699 {
11700 switch (r)
11701 {
11702 case bfd_reloc_overflow:
11703 /* If the overflowing reloc was to an undefined symbol,
11704 we have already printed one error message and there
11705 is no point complaining again. */
11706 if ((! h ||
11707 h->root.type != bfd_link_hash_undefined)
11708 && (!((*info->callbacks->reloc_overflow)
11709 (info, (h ? &h->root : NULL), name, howto->name,
11710 (bfd_vma) 0, input_bfd, input_section,
11711 rel->r_offset))))
11712 return FALSE;
11713 break;
11714
11715 case bfd_reloc_undefined:
11716 if (!((*info->callbacks->undefined_symbol)
11717 (info, name, input_bfd, input_section,
11718 rel->r_offset, TRUE)))
11719 return FALSE;
11720 break;
11721
11722 case bfd_reloc_outofrange:
11723 error_message = _("out of range");
11724 goto common_error;
11725
11726 case bfd_reloc_notsupported:
11727 error_message = _("unsupported relocation");
11728 goto common_error;
11729
11730 case bfd_reloc_dangerous:
11731 /* error_message should already be set. */
11732 goto common_error;
11733
11734 default:
11735 error_message = _("unknown error");
11736 /* Fall through. */
11737
11738 common_error:
11739 BFD_ASSERT (error_message != NULL);
11740 if (!((*info->callbacks->reloc_dangerous)
11741 (info, error_message, input_bfd, input_section,
11742 rel->r_offset)))
11743 return FALSE;
11744 break;
11745 }
11746 }
11747 }
11748
11749 return TRUE;
11750 }
11751
11752 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
11753 adds the edit to the start of the list. (The list must be built in order of
11754 ascending TINDEX: the function's callers are primarily responsible for
11755 maintaining that condition). */
11756
11757 static void
11758 add_unwind_table_edit (arm_unwind_table_edit **head,
11759 arm_unwind_table_edit **tail,
11760 arm_unwind_edit_type type,
11761 asection *linked_section,
11762 unsigned int tindex)
11763 {
11764 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
11765 xmalloc (sizeof (arm_unwind_table_edit));
11766
11767 new_edit->type = type;
11768 new_edit->linked_section = linked_section;
11769 new_edit->index = tindex;
11770
11771 if (tindex > 0)
11772 {
11773 new_edit->next = NULL;
11774
11775 if (*tail)
11776 (*tail)->next = new_edit;
11777
11778 (*tail) = new_edit;
11779
11780 if (!*head)
11781 (*head) = new_edit;
11782 }
11783 else
11784 {
11785 new_edit->next = *head;
11786
11787 if (!*tail)
11788 *tail = new_edit;
11789
11790 *head = new_edit;
11791 }
11792 }
11793
11794 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
11795
11796 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
11797 static void
11798 adjust_exidx_size(asection *exidx_sec, int adjust)
11799 {
11800 asection *out_sec;
11801
11802 if (!exidx_sec->rawsize)
11803 exidx_sec->rawsize = exidx_sec->size;
11804
11805 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
11806 out_sec = exidx_sec->output_section;
11807 /* Adjust size of output section. */
11808 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
11809 }
11810
11811 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
11812 static void
11813 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
11814 {
11815 struct _arm_elf_section_data *exidx_arm_data;
11816
11817 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
11818 add_unwind_table_edit (
11819 &exidx_arm_data->u.exidx.unwind_edit_list,
11820 &exidx_arm_data->u.exidx.unwind_edit_tail,
11821 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
11822
11823 exidx_arm_data->additional_reloc_count++;
11824
11825 adjust_exidx_size(exidx_sec, 8);
11826 }
11827
11828 /* Scan .ARM.exidx tables, and create a list describing edits which should be
11829 made to those tables, such that:
11830
11831 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
11832 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
11833 codes which have been inlined into the index).
11834
11835 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
11836
11837 The edits are applied when the tables are written
11838 (in elf32_arm_write_section). */
11839
11840 bfd_boolean
11841 elf32_arm_fix_exidx_coverage (asection **text_section_order,
11842 unsigned int num_text_sections,
11843 struct bfd_link_info *info,
11844 bfd_boolean merge_exidx_entries)
11845 {
11846 bfd *inp;
11847 unsigned int last_second_word = 0, i;
11848 asection *last_exidx_sec = NULL;
11849 asection *last_text_sec = NULL;
11850 int last_unwind_type = -1;
11851
11852 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
11853 text sections. */
11854 for (inp = info->input_bfds; inp != NULL; inp = inp->link.next)
11855 {
11856 asection *sec;
11857
11858 for (sec = inp->sections; sec != NULL; sec = sec->next)
11859 {
11860 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
11861 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
11862
11863 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
11864 continue;
11865
11866 if (elf_sec->linked_to)
11867 {
11868 Elf_Internal_Shdr *linked_hdr
11869 = &elf_section_data (elf_sec->linked_to)->this_hdr;
11870 struct _arm_elf_section_data *linked_sec_arm_data
11871 = get_arm_elf_section_data (linked_hdr->bfd_section);
11872
11873 if (linked_sec_arm_data == NULL)
11874 continue;
11875
11876 /* Link this .ARM.exidx section back from the text section it
11877 describes. */
11878 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
11879 }
11880 }
11881 }
11882
11883 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
11884 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
11885 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
11886
11887 for (i = 0; i < num_text_sections; i++)
11888 {
11889 asection *sec = text_section_order[i];
11890 asection *exidx_sec;
11891 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
11892 struct _arm_elf_section_data *exidx_arm_data;
11893 bfd_byte *contents = NULL;
11894 int deleted_exidx_bytes = 0;
11895 bfd_vma j;
11896 arm_unwind_table_edit *unwind_edit_head = NULL;
11897 arm_unwind_table_edit *unwind_edit_tail = NULL;
11898 Elf_Internal_Shdr *hdr;
11899 bfd *ibfd;
11900
11901 if (arm_data == NULL)
11902 continue;
11903
11904 exidx_sec = arm_data->u.text.arm_exidx_sec;
11905 if (exidx_sec == NULL)
11906 {
11907 /* Section has no unwind data. */
11908 if (last_unwind_type == 0 || !last_exidx_sec)
11909 continue;
11910
11911 /* Ignore zero sized sections. */
11912 if (sec->size == 0)
11913 continue;
11914
11915 insert_cantunwind_after(last_text_sec, last_exidx_sec);
11916 last_unwind_type = 0;
11917 continue;
11918 }
11919
11920 /* Skip /DISCARD/ sections. */
11921 if (bfd_is_abs_section (exidx_sec->output_section))
11922 continue;
11923
11924 hdr = &elf_section_data (exidx_sec)->this_hdr;
11925 if (hdr->sh_type != SHT_ARM_EXIDX)
11926 continue;
11927
11928 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
11929 if (exidx_arm_data == NULL)
11930 continue;
11931
11932 ibfd = exidx_sec->owner;
11933
11934 if (hdr->contents != NULL)
11935 contents = hdr->contents;
11936 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
11937 /* An error? */
11938 continue;
11939
11940 if (last_unwind_type > 0)
11941 {
11942 unsigned int first_word = bfd_get_32 (ibfd, contents);
11943 /* Add cantunwind if first unwind item does not match section
11944 start. */
11945 if (first_word != sec->vma)
11946 {
11947 insert_cantunwind_after (last_text_sec, last_exidx_sec);
11948 last_unwind_type = 0;
11949 }
11950 }
11951
11952 for (j = 0; j < hdr->sh_size; j += 8)
11953 {
11954 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
11955 int unwind_type;
11956 int elide = 0;
11957
11958 /* An EXIDX_CANTUNWIND entry. */
11959 if (second_word == 1)
11960 {
11961 if (last_unwind_type == 0)
11962 elide = 1;
11963 unwind_type = 0;
11964 }
11965 /* Inlined unwinding data. Merge if equal to previous. */
11966 else if ((second_word & 0x80000000) != 0)
11967 {
11968 if (merge_exidx_entries
11969 && last_second_word == second_word && last_unwind_type == 1)
11970 elide = 1;
11971 unwind_type = 1;
11972 last_second_word = second_word;
11973 }
11974 /* Normal table entry. In theory we could merge these too,
11975 but duplicate entries are likely to be much less common. */
11976 else
11977 unwind_type = 2;
11978
11979 if (elide && !bfd_link_relocatable (info))
11980 {
11981 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
11982 DELETE_EXIDX_ENTRY, NULL, j / 8);
11983
11984 deleted_exidx_bytes += 8;
11985 }
11986
11987 last_unwind_type = unwind_type;
11988 }
11989
11990 /* Free contents if we allocated it ourselves. */
11991 if (contents != hdr->contents)
11992 free (contents);
11993
11994 /* Record edits to be applied later (in elf32_arm_write_section). */
11995 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
11996 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
11997
11998 if (deleted_exidx_bytes > 0)
11999 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
12000
12001 last_exidx_sec = exidx_sec;
12002 last_text_sec = sec;
12003 }
12004
12005 /* Add terminating CANTUNWIND entry. */
12006 if (!bfd_link_relocatable (info) && last_exidx_sec
12007 && last_unwind_type != 0)
12008 insert_cantunwind_after(last_text_sec, last_exidx_sec);
12009
12010 return TRUE;
12011 }
12012
12013 static bfd_boolean
12014 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
12015 bfd *ibfd, const char *name)
12016 {
12017 asection *sec, *osec;
12018
12019 sec = bfd_get_linker_section (ibfd, name);
12020 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
12021 return TRUE;
12022
12023 osec = sec->output_section;
12024 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
12025 return TRUE;
12026
12027 if (! bfd_set_section_contents (obfd, osec, sec->contents,
12028 sec->output_offset, sec->size))
12029 return FALSE;
12030
12031 return TRUE;
12032 }
12033
12034 static bfd_boolean
12035 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
12036 {
12037 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
12038 asection *sec, *osec;
12039
12040 if (globals == NULL)
12041 return FALSE;
12042
12043 /* Invoke the regular ELF backend linker to do all the work. */
12044 if (!bfd_elf_final_link (abfd, info))
12045 return FALSE;
12046
12047 /* Process stub sections (eg BE8 encoding, ...). */
12048 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
12049 unsigned int i;
12050 for (i=0; i<htab->top_id; i++)
12051 {
12052 sec = htab->stub_group[i].stub_sec;
12053 /* Only process it once, in its link_sec slot. */
12054 if (sec && i == htab->stub_group[i].link_sec->id)
12055 {
12056 osec = sec->output_section;
12057 elf32_arm_write_section (abfd, info, sec, sec->contents);
12058 if (! bfd_set_section_contents (abfd, osec, sec->contents,
12059 sec->output_offset, sec->size))
12060 return FALSE;
12061 }
12062 }
12063
12064 /* Write out any glue sections now that we have created all the
12065 stubs. */
12066 if (globals->bfd_of_glue_owner != NULL)
12067 {
12068 if (! elf32_arm_output_glue_section (info, abfd,
12069 globals->bfd_of_glue_owner,
12070 ARM2THUMB_GLUE_SECTION_NAME))
12071 return FALSE;
12072
12073 if (! elf32_arm_output_glue_section (info, abfd,
12074 globals->bfd_of_glue_owner,
12075 THUMB2ARM_GLUE_SECTION_NAME))
12076 return FALSE;
12077
12078 if (! elf32_arm_output_glue_section (info, abfd,
12079 globals->bfd_of_glue_owner,
12080 VFP11_ERRATUM_VENEER_SECTION_NAME))
12081 return FALSE;
12082
12083 if (! elf32_arm_output_glue_section (info, abfd,
12084 globals->bfd_of_glue_owner,
12085 STM32L4XX_ERRATUM_VENEER_SECTION_NAME))
12086 return FALSE;
12087
12088 if (! elf32_arm_output_glue_section (info, abfd,
12089 globals->bfd_of_glue_owner,
12090 ARM_BX_GLUE_SECTION_NAME))
12091 return FALSE;
12092 }
12093
12094 return TRUE;
12095 }
12096
12097 /* Return a best guess for the machine number based on the attributes. */
12098
12099 static unsigned int
12100 bfd_arm_get_mach_from_attributes (bfd * abfd)
12101 {
12102 int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch);
12103
12104 switch (arch)
12105 {
12106 case TAG_CPU_ARCH_V4: return bfd_mach_arm_4;
12107 case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T;
12108 case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T;
12109
12110 case TAG_CPU_ARCH_V5TE:
12111 {
12112 char * name;
12113
12114 BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES);
12115 name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s;
12116
12117 if (name)
12118 {
12119 if (strcmp (name, "IWMMXT2") == 0)
12120 return bfd_mach_arm_iWMMXt2;
12121
12122 if (strcmp (name, "IWMMXT") == 0)
12123 return bfd_mach_arm_iWMMXt;
12124
12125 if (strcmp (name, "XSCALE") == 0)
12126 {
12127 int wmmx;
12128
12129 BFD_ASSERT (Tag_WMMX_arch < NUM_KNOWN_OBJ_ATTRIBUTES);
12130 wmmx = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_WMMX_arch].i;
12131 switch (wmmx)
12132 {
12133 case 1: return bfd_mach_arm_iWMMXt;
12134 case 2: return bfd_mach_arm_iWMMXt2;
12135 default: return bfd_mach_arm_XScale;
12136 }
12137 }
12138 }
12139
12140 return bfd_mach_arm_5TE;
12141 }
12142
12143 default:
12144 return bfd_mach_arm_unknown;
12145 }
12146 }
12147
12148 /* Set the right machine number. */
12149
12150 static bfd_boolean
12151 elf32_arm_object_p (bfd *abfd)
12152 {
12153 unsigned int mach;
12154
12155 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
12156
12157 if (mach == bfd_mach_arm_unknown)
12158 {
12159 if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
12160 mach = bfd_mach_arm_ep9312;
12161 else
12162 mach = bfd_arm_get_mach_from_attributes (abfd);
12163 }
12164
12165 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
12166 return TRUE;
12167 }
12168
12169 /* Function to keep ARM specific flags in the ELF header. */
12170
12171 static bfd_boolean
12172 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
12173 {
12174 if (elf_flags_init (abfd)
12175 && elf_elfheader (abfd)->e_flags != flags)
12176 {
12177 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
12178 {
12179 if (flags & EF_ARM_INTERWORK)
12180 (*_bfd_error_handler)
12181 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
12182 abfd);
12183 else
12184 _bfd_error_handler
12185 (_("Warning: Clearing the interworking flag of %B due to outside request"),
12186 abfd);
12187 }
12188 }
12189 else
12190 {
12191 elf_elfheader (abfd)->e_flags = flags;
12192 elf_flags_init (abfd) = TRUE;
12193 }
12194
12195 return TRUE;
12196 }
12197
12198 /* Copy backend specific data from one object module to another. */
12199
12200 static bfd_boolean
12201 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
12202 {
12203 flagword in_flags;
12204 flagword out_flags;
12205
12206 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
12207 return TRUE;
12208
12209 in_flags = elf_elfheader (ibfd)->e_flags;
12210 out_flags = elf_elfheader (obfd)->e_flags;
12211
12212 if (elf_flags_init (obfd)
12213 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
12214 && in_flags != out_flags)
12215 {
12216 /* Cannot mix APCS26 and APCS32 code. */
12217 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
12218 return FALSE;
12219
12220 /* Cannot mix float APCS and non-float APCS code. */
12221 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
12222 return FALSE;
12223
12224 /* If the src and dest have different interworking flags
12225 then turn off the interworking bit. */
12226 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
12227 {
12228 if (out_flags & EF_ARM_INTERWORK)
12229 _bfd_error_handler
12230 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
12231 obfd, ibfd);
12232
12233 in_flags &= ~EF_ARM_INTERWORK;
12234 }
12235
12236 /* Likewise for PIC, though don't warn for this case. */
12237 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
12238 in_flags &= ~EF_ARM_PIC;
12239 }
12240
12241 elf_elfheader (obfd)->e_flags = in_flags;
12242 elf_flags_init (obfd) = TRUE;
12243
12244 return _bfd_elf_copy_private_bfd_data (ibfd, obfd);
12245 }
12246
12247 /* Values for Tag_ABI_PCS_R9_use. */
12248 enum
12249 {
12250 AEABI_R9_V6,
12251 AEABI_R9_SB,
12252 AEABI_R9_TLS,
12253 AEABI_R9_unused
12254 };
12255
12256 /* Values for Tag_ABI_PCS_RW_data. */
12257 enum
12258 {
12259 AEABI_PCS_RW_data_absolute,
12260 AEABI_PCS_RW_data_PCrel,
12261 AEABI_PCS_RW_data_SBrel,
12262 AEABI_PCS_RW_data_unused
12263 };
12264
12265 /* Values for Tag_ABI_enum_size. */
12266 enum
12267 {
12268 AEABI_enum_unused,
12269 AEABI_enum_short,
12270 AEABI_enum_wide,
12271 AEABI_enum_forced_wide
12272 };
12273
12274 /* Determine whether an object attribute tag takes an integer, a
12275 string or both. */
12276
12277 static int
12278 elf32_arm_obj_attrs_arg_type (int tag)
12279 {
12280 if (tag == Tag_compatibility)
12281 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
12282 else if (tag == Tag_nodefaults)
12283 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
12284 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
12285 return ATTR_TYPE_FLAG_STR_VAL;
12286 else if (tag < 32)
12287 return ATTR_TYPE_FLAG_INT_VAL;
12288 else
12289 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
12290 }
12291
12292 /* The ABI defines that Tag_conformance should be emitted first, and that
12293 Tag_nodefaults should be second (if either is defined). This sets those
12294 two positions, and bumps up the position of all the remaining tags to
12295 compensate. */
12296 static int
12297 elf32_arm_obj_attrs_order (int num)
12298 {
12299 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
12300 return Tag_conformance;
12301 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
12302 return Tag_nodefaults;
12303 if ((num - 2) < Tag_nodefaults)
12304 return num - 2;
12305 if ((num - 1) < Tag_conformance)
12306 return num - 1;
12307 return num;
12308 }
12309
12310 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
12311 static bfd_boolean
12312 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
12313 {
12314 if ((tag & 127) < 64)
12315 {
12316 _bfd_error_handler
12317 (_("%B: Unknown mandatory EABI object attribute %d"),
12318 abfd, tag);
12319 bfd_set_error (bfd_error_bad_value);
12320 return FALSE;
12321 }
12322 else
12323 {
12324 _bfd_error_handler
12325 (_("Warning: %B: Unknown EABI object attribute %d"),
12326 abfd, tag);
12327 return TRUE;
12328 }
12329 }
12330
12331 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
12332 Returns -1 if no architecture could be read. */
12333
12334 static int
12335 get_secondary_compatible_arch (bfd *abfd)
12336 {
12337 obj_attribute *attr =
12338 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
12339
12340 /* Note: the tag and its argument below are uleb128 values, though
12341 currently-defined values fit in one byte for each. */
12342 if (attr->s
12343 && attr->s[0] == Tag_CPU_arch
12344 && (attr->s[1] & 128) != 128
12345 && attr->s[2] == 0)
12346 return attr->s[1];
12347
12348 /* This tag is "safely ignorable", so don't complain if it looks funny. */
12349 return -1;
12350 }
12351
12352 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
12353 The tag is removed if ARCH is -1. */
12354
12355 static void
12356 set_secondary_compatible_arch (bfd *abfd, int arch)
12357 {
12358 obj_attribute *attr =
12359 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
12360
12361 if (arch == -1)
12362 {
12363 attr->s = NULL;
12364 return;
12365 }
12366
12367 /* Note: the tag and its argument below are uleb128 values, though
12368 currently-defined values fit in one byte for each. */
12369 if (!attr->s)
12370 attr->s = (char *) bfd_alloc (abfd, 3);
12371 attr->s[0] = Tag_CPU_arch;
12372 attr->s[1] = arch;
12373 attr->s[2] = '\0';
12374 }
12375
12376 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
12377 into account. */
12378
12379 static int
12380 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
12381 int newtag, int secondary_compat)
12382 {
12383 #define T(X) TAG_CPU_ARCH_##X
12384 int tagl, tagh, result;
12385 const int v6t2[] =
12386 {
12387 T(V6T2), /* PRE_V4. */
12388 T(V6T2), /* V4. */
12389 T(V6T2), /* V4T. */
12390 T(V6T2), /* V5T. */
12391 T(V6T2), /* V5TE. */
12392 T(V6T2), /* V5TEJ. */
12393 T(V6T2), /* V6. */
12394 T(V7), /* V6KZ. */
12395 T(V6T2) /* V6T2. */
12396 };
12397 const int v6k[] =
12398 {
12399 T(V6K), /* PRE_V4. */
12400 T(V6K), /* V4. */
12401 T(V6K), /* V4T. */
12402 T(V6K), /* V5T. */
12403 T(V6K), /* V5TE. */
12404 T(V6K), /* V5TEJ. */
12405 T(V6K), /* V6. */
12406 T(V6KZ), /* V6KZ. */
12407 T(V7), /* V6T2. */
12408 T(V6K) /* V6K. */
12409 };
12410 const int v7[] =
12411 {
12412 T(V7), /* PRE_V4. */
12413 T(V7), /* V4. */
12414 T(V7), /* V4T. */
12415 T(V7), /* V5T. */
12416 T(V7), /* V5TE. */
12417 T(V7), /* V5TEJ. */
12418 T(V7), /* V6. */
12419 T(V7), /* V6KZ. */
12420 T(V7), /* V6T2. */
12421 T(V7), /* V6K. */
12422 T(V7) /* V7. */
12423 };
12424 const int v6_m[] =
12425 {
12426 -1, /* PRE_V4. */
12427 -1, /* V4. */
12428 T(V6K), /* V4T. */
12429 T(V6K), /* V5T. */
12430 T(V6K), /* V5TE. */
12431 T(V6K), /* V5TEJ. */
12432 T(V6K), /* V6. */
12433 T(V6KZ), /* V6KZ. */
12434 T(V7), /* V6T2. */
12435 T(V6K), /* V6K. */
12436 T(V7), /* V7. */
12437 T(V6_M) /* V6_M. */
12438 };
12439 const int v6s_m[] =
12440 {
12441 -1, /* PRE_V4. */
12442 -1, /* V4. */
12443 T(V6K), /* V4T. */
12444 T(V6K), /* V5T. */
12445 T(V6K), /* V5TE. */
12446 T(V6K), /* V5TEJ. */
12447 T(V6K), /* V6. */
12448 T(V6KZ), /* V6KZ. */
12449 T(V7), /* V6T2. */
12450 T(V6K), /* V6K. */
12451 T(V7), /* V7. */
12452 T(V6S_M), /* V6_M. */
12453 T(V6S_M) /* V6S_M. */
12454 };
12455 const int v7e_m[] =
12456 {
12457 -1, /* PRE_V4. */
12458 -1, /* V4. */
12459 T(V7E_M), /* V4T. */
12460 T(V7E_M), /* V5T. */
12461 T(V7E_M), /* V5TE. */
12462 T(V7E_M), /* V5TEJ. */
12463 T(V7E_M), /* V6. */
12464 T(V7E_M), /* V6KZ. */
12465 T(V7E_M), /* V6T2. */
12466 T(V7E_M), /* V6K. */
12467 T(V7E_M), /* V7. */
12468 T(V7E_M), /* V6_M. */
12469 T(V7E_M), /* V6S_M. */
12470 T(V7E_M) /* V7E_M. */
12471 };
12472 const int v8[] =
12473 {
12474 T(V8), /* PRE_V4. */
12475 T(V8), /* V4. */
12476 T(V8), /* V4T. */
12477 T(V8), /* V5T. */
12478 T(V8), /* V5TE. */
12479 T(V8), /* V5TEJ. */
12480 T(V8), /* V6. */
12481 T(V8), /* V6KZ. */
12482 T(V8), /* V6T2. */
12483 T(V8), /* V6K. */
12484 T(V8), /* V7. */
12485 T(V8), /* V6_M. */
12486 T(V8), /* V6S_M. */
12487 T(V8), /* V7E_M. */
12488 T(V8) /* V8. */
12489 };
12490 const int v8m_baseline[] =
12491 {
12492 -1, /* PRE_V4. */
12493 -1, /* V4. */
12494 -1, /* V4T. */
12495 -1, /* V5T. */
12496 -1, /* V5TE. */
12497 -1, /* V5TEJ. */
12498 -1, /* V6. */
12499 -1, /* V6KZ. */
12500 -1, /* V6T2. */
12501 -1, /* V6K. */
12502 -1, /* V7. */
12503 T(V8M_BASE), /* V6_M. */
12504 T(V8M_BASE), /* V6S_M. */
12505 -1, /* V7E_M. */
12506 -1, /* V8. */
12507 -1,
12508 T(V8M_BASE) /* V8-M BASELINE. */
12509 };
12510 const int v8m_mainline[] =
12511 {
12512 -1, /* PRE_V4. */
12513 -1, /* V4. */
12514 -1, /* V4T. */
12515 -1, /* V5T. */
12516 -1, /* V5TE. */
12517 -1, /* V5TEJ. */
12518 -1, /* V6. */
12519 -1, /* V6KZ. */
12520 -1, /* V6T2. */
12521 -1, /* V6K. */
12522 T(V8M_MAIN), /* V7. */
12523 T(V8M_MAIN), /* V6_M. */
12524 T(V8M_MAIN), /* V6S_M. */
12525 T(V8M_MAIN), /* V7E_M. */
12526 -1, /* V8. */
12527 -1,
12528 T(V8M_MAIN), /* V8-M BASELINE. */
12529 T(V8M_MAIN) /* V8-M MAINLINE. */
12530 };
12531 const int v4t_plus_v6_m[] =
12532 {
12533 -1, /* PRE_V4. */
12534 -1, /* V4. */
12535 T(V4T), /* V4T. */
12536 T(V5T), /* V5T. */
12537 T(V5TE), /* V5TE. */
12538 T(V5TEJ), /* V5TEJ. */
12539 T(V6), /* V6. */
12540 T(V6KZ), /* V6KZ. */
12541 T(V6T2), /* V6T2. */
12542 T(V6K), /* V6K. */
12543 T(V7), /* V7. */
12544 T(V6_M), /* V6_M. */
12545 T(V6S_M), /* V6S_M. */
12546 T(V7E_M), /* V7E_M. */
12547 T(V8), /* V8. */
12548 -1, /* Unused. */
12549 T(V8M_BASE), /* V8-M BASELINE. */
12550 T(V8M_MAIN), /* V8-M MAINLINE. */
12551 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
12552 };
12553 const int *comb[] =
12554 {
12555 v6t2,
12556 v6k,
12557 v7,
12558 v6_m,
12559 v6s_m,
12560 v7e_m,
12561 v8,
12562 NULL,
12563 v8m_baseline,
12564 v8m_mainline,
12565 /* Pseudo-architecture. */
12566 v4t_plus_v6_m
12567 };
12568
12569 /* Check we've not got a higher architecture than we know about. */
12570
12571 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
12572 {
12573 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
12574 return -1;
12575 }
12576
12577 /* Override old tag if we have a Tag_also_compatible_with on the output. */
12578
12579 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
12580 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
12581 oldtag = T(V4T_PLUS_V6_M);
12582
12583 /* And override the new tag if we have a Tag_also_compatible_with on the
12584 input. */
12585
12586 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
12587 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
12588 newtag = T(V4T_PLUS_V6_M);
12589
12590 tagl = (oldtag < newtag) ? oldtag : newtag;
12591 result = tagh = (oldtag > newtag) ? oldtag : newtag;
12592
12593 /* Architectures before V6KZ add features monotonically. */
12594 if (tagh <= TAG_CPU_ARCH_V6KZ)
12595 return result;
12596
12597 result = comb[tagh - T(V6T2)] ? comb[tagh - T(V6T2)][tagl] : -1;
12598
12599 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
12600 as the canonical version. */
12601 if (result == T(V4T_PLUS_V6_M))
12602 {
12603 result = T(V4T);
12604 *secondary_compat_out = T(V6_M);
12605 }
12606 else
12607 *secondary_compat_out = -1;
12608
12609 if (result == -1)
12610 {
12611 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
12612 ibfd, oldtag, newtag);
12613 return -1;
12614 }
12615
12616 return result;
12617 #undef T
12618 }
12619
12620 /* Query attributes object to see if integer divide instructions may be
12621 present in an object. */
12622 static bfd_boolean
12623 elf32_arm_attributes_accept_div (const obj_attribute *attr)
12624 {
12625 int arch = attr[Tag_CPU_arch].i;
12626 int profile = attr[Tag_CPU_arch_profile].i;
12627
12628 switch (attr[Tag_DIV_use].i)
12629 {
12630 case 0:
12631 /* Integer divide allowed if instruction contained in archetecture. */
12632 if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
12633 return TRUE;
12634 else if (arch >= TAG_CPU_ARCH_V7E_M)
12635 return TRUE;
12636 else
12637 return FALSE;
12638
12639 case 1:
12640 /* Integer divide explicitly prohibited. */
12641 return FALSE;
12642
12643 default:
12644 /* Unrecognised case - treat as allowing divide everywhere. */
12645 case 2:
12646 /* Integer divide allowed in ARM state. */
12647 return TRUE;
12648 }
12649 }
12650
12651 /* Query attributes object to see if integer divide instructions are
12652 forbidden to be in the object. This is not the inverse of
12653 elf32_arm_attributes_accept_div. */
12654 static bfd_boolean
12655 elf32_arm_attributes_forbid_div (const obj_attribute *attr)
12656 {
12657 return attr[Tag_DIV_use].i == 1;
12658 }
12659
12660 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
12661 are conflicting attributes. */
12662
12663 static bfd_boolean
12664 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
12665 {
12666 obj_attribute *in_attr;
12667 obj_attribute *out_attr;
12668 /* Some tags have 0 = don't care, 1 = strong requirement,
12669 2 = weak requirement. */
12670 static const int order_021[3] = {0, 2, 1};
12671 int i;
12672 bfd_boolean result = TRUE;
12673 const char *sec_name = get_elf_backend_data (ibfd)->obj_attrs_section;
12674
12675 /* Skip the linker stubs file. This preserves previous behavior
12676 of accepting unknown attributes in the first input file - but
12677 is that a bug? */
12678 if (ibfd->flags & BFD_LINKER_CREATED)
12679 return TRUE;
12680
12681 /* Skip any input that hasn't attribute section.
12682 This enables to link object files without attribute section with
12683 any others. */
12684 if (bfd_get_section_by_name (ibfd, sec_name) == NULL)
12685 return TRUE;
12686
12687 if (!elf_known_obj_attributes_proc (obfd)[0].i)
12688 {
12689 /* This is the first object. Copy the attributes. */
12690 _bfd_elf_copy_obj_attributes (ibfd, obfd);
12691
12692 out_attr = elf_known_obj_attributes_proc (obfd);
12693
12694 /* Use the Tag_null value to indicate the attributes have been
12695 initialized. */
12696 out_attr[0].i = 1;
12697
12698 /* We do not output objects with Tag_MPextension_use_legacy - we move
12699 the attribute's value to Tag_MPextension_use. */
12700 if (out_attr[Tag_MPextension_use_legacy].i != 0)
12701 {
12702 if (out_attr[Tag_MPextension_use].i != 0
12703 && out_attr[Tag_MPextension_use_legacy].i
12704 != out_attr[Tag_MPextension_use].i)
12705 {
12706 _bfd_error_handler
12707 (_("Error: %B has both the current and legacy "
12708 "Tag_MPextension_use attributes"), ibfd);
12709 result = FALSE;
12710 }
12711
12712 out_attr[Tag_MPextension_use] =
12713 out_attr[Tag_MPextension_use_legacy];
12714 out_attr[Tag_MPextension_use_legacy].type = 0;
12715 out_attr[Tag_MPextension_use_legacy].i = 0;
12716 }
12717
12718 return result;
12719 }
12720
12721 in_attr = elf_known_obj_attributes_proc (ibfd);
12722 out_attr = elf_known_obj_attributes_proc (obfd);
12723 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
12724 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
12725 {
12726 /* Ignore mismatches if the object doesn't use floating point or is
12727 floating point ABI independent. */
12728 if (out_attr[Tag_ABI_FP_number_model].i == AEABI_FP_number_model_none
12729 || (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
12730 && out_attr[Tag_ABI_VFP_args].i == AEABI_VFP_args_compatible))
12731 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
12732 else if (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
12733 && in_attr[Tag_ABI_VFP_args].i != AEABI_VFP_args_compatible)
12734 {
12735 _bfd_error_handler
12736 (_("error: %B uses VFP register arguments, %B does not"),
12737 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
12738 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
12739 result = FALSE;
12740 }
12741 }
12742
12743 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
12744 {
12745 /* Merge this attribute with existing attributes. */
12746 switch (i)
12747 {
12748 case Tag_CPU_raw_name:
12749 case Tag_CPU_name:
12750 /* These are merged after Tag_CPU_arch. */
12751 break;
12752
12753 case Tag_ABI_optimization_goals:
12754 case Tag_ABI_FP_optimization_goals:
12755 /* Use the first value seen. */
12756 break;
12757
12758 case Tag_CPU_arch:
12759 {
12760 int secondary_compat = -1, secondary_compat_out = -1;
12761 unsigned int saved_out_attr = out_attr[i].i;
12762 int arch_attr;
12763 static const char *name_table[] =
12764 {
12765 /* These aren't real CPU names, but we can't guess
12766 that from the architecture version alone. */
12767 "Pre v4",
12768 "ARM v4",
12769 "ARM v4T",
12770 "ARM v5T",
12771 "ARM v5TE",
12772 "ARM v5TEJ",
12773 "ARM v6",
12774 "ARM v6KZ",
12775 "ARM v6T2",
12776 "ARM v6K",
12777 "ARM v7",
12778 "ARM v6-M",
12779 "ARM v6S-M",
12780 "ARM v8",
12781 "",
12782 "ARM v8-M.baseline",
12783 "ARM v8-M.mainline",
12784 };
12785
12786 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
12787 secondary_compat = get_secondary_compatible_arch (ibfd);
12788 secondary_compat_out = get_secondary_compatible_arch (obfd);
12789 arch_attr = tag_cpu_arch_combine (ibfd, out_attr[i].i,
12790 &secondary_compat_out,
12791 in_attr[i].i,
12792 secondary_compat);
12793
12794 /* Return with error if failed to merge. */
12795 if (arch_attr == -1)
12796 return FALSE;
12797
12798 out_attr[i].i = arch_attr;
12799
12800 set_secondary_compatible_arch (obfd, secondary_compat_out);
12801
12802 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
12803 if (out_attr[i].i == saved_out_attr)
12804 ; /* Leave the names alone. */
12805 else if (out_attr[i].i == in_attr[i].i)
12806 {
12807 /* The output architecture has been changed to match the
12808 input architecture. Use the input names. */
12809 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
12810 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
12811 : NULL;
12812 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
12813 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
12814 : NULL;
12815 }
12816 else
12817 {
12818 out_attr[Tag_CPU_name].s = NULL;
12819 out_attr[Tag_CPU_raw_name].s = NULL;
12820 }
12821
12822 /* If we still don't have a value for Tag_CPU_name,
12823 make one up now. Tag_CPU_raw_name remains blank. */
12824 if (out_attr[Tag_CPU_name].s == NULL
12825 && out_attr[i].i < ARRAY_SIZE (name_table))
12826 out_attr[Tag_CPU_name].s =
12827 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
12828 }
12829 break;
12830
12831 case Tag_ARM_ISA_use:
12832 case Tag_THUMB_ISA_use:
12833 case Tag_WMMX_arch:
12834 case Tag_Advanced_SIMD_arch:
12835 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
12836 case Tag_ABI_FP_rounding:
12837 case Tag_ABI_FP_exceptions:
12838 case Tag_ABI_FP_user_exceptions:
12839 case Tag_ABI_FP_number_model:
12840 case Tag_FP_HP_extension:
12841 case Tag_CPU_unaligned_access:
12842 case Tag_T2EE_use:
12843 case Tag_MPextension_use:
12844 /* Use the largest value specified. */
12845 if (in_attr[i].i > out_attr[i].i)
12846 out_attr[i].i = in_attr[i].i;
12847 break;
12848
12849 case Tag_ABI_align_preserved:
12850 case Tag_ABI_PCS_RO_data:
12851 /* Use the smallest value specified. */
12852 if (in_attr[i].i < out_attr[i].i)
12853 out_attr[i].i = in_attr[i].i;
12854 break;
12855
12856 case Tag_ABI_align_needed:
12857 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
12858 && (in_attr[Tag_ABI_align_preserved].i == 0
12859 || out_attr[Tag_ABI_align_preserved].i == 0))
12860 {
12861 /* This error message should be enabled once all non-conformant
12862 binaries in the toolchain have had the attributes set
12863 properly.
12864 _bfd_error_handler
12865 (_("error: %B: 8-byte data alignment conflicts with %B"),
12866 obfd, ibfd);
12867 result = FALSE; */
12868 }
12869 /* Fall through. */
12870 case Tag_ABI_FP_denormal:
12871 case Tag_ABI_PCS_GOT_use:
12872 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
12873 value if greater than 2 (for future-proofing). */
12874 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
12875 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
12876 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
12877 out_attr[i].i = in_attr[i].i;
12878 break;
12879
12880 case Tag_Virtualization_use:
12881 /* The virtualization tag effectively stores two bits of
12882 information: the intended use of TrustZone (in bit 0), and the
12883 intended use of Virtualization (in bit 1). */
12884 if (out_attr[i].i == 0)
12885 out_attr[i].i = in_attr[i].i;
12886 else if (in_attr[i].i != 0
12887 && in_attr[i].i != out_attr[i].i)
12888 {
12889 if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
12890 out_attr[i].i = 3;
12891 else
12892 {
12893 _bfd_error_handler
12894 (_("error: %B: unable to merge virtualization attributes "
12895 "with %B"),
12896 obfd, ibfd);
12897 result = FALSE;
12898 }
12899 }
12900 break;
12901
12902 case Tag_CPU_arch_profile:
12903 if (out_attr[i].i != in_attr[i].i)
12904 {
12905 /* 0 will merge with anything.
12906 'A' and 'S' merge to 'A'.
12907 'R' and 'S' merge to 'R'.
12908 'M' and 'A|R|S' is an error. */
12909 if (out_attr[i].i == 0
12910 || (out_attr[i].i == 'S'
12911 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
12912 out_attr[i].i = in_attr[i].i;
12913 else if (in_attr[i].i == 0
12914 || (in_attr[i].i == 'S'
12915 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
12916 ; /* Do nothing. */
12917 else
12918 {
12919 _bfd_error_handler
12920 (_("error: %B: Conflicting architecture profiles %c/%c"),
12921 ibfd,
12922 in_attr[i].i ? in_attr[i].i : '0',
12923 out_attr[i].i ? out_attr[i].i : '0');
12924 result = FALSE;
12925 }
12926 }
12927 break;
12928
12929 case Tag_DSP_extension:
12930 /* No need to change output value if any of:
12931 - pre (<=) ARMv5T input architecture (do not have DSP)
12932 - M input profile not ARMv7E-M and do not have DSP. */
12933 if (in_attr[Tag_CPU_arch].i <= 3
12934 || (in_attr[Tag_CPU_arch_profile].i == 'M'
12935 && in_attr[Tag_CPU_arch].i != 13
12936 && in_attr[i].i == 0))
12937 ; /* Do nothing. */
12938 /* Output value should be 0 if DSP part of architecture, ie.
12939 - post (>=) ARMv5te architecture output
12940 - A, R or S profile output or ARMv7E-M output architecture. */
12941 else if (out_attr[Tag_CPU_arch].i >= 4
12942 && (out_attr[Tag_CPU_arch_profile].i == 'A'
12943 || out_attr[Tag_CPU_arch_profile].i == 'R'
12944 || out_attr[Tag_CPU_arch_profile].i == 'S'
12945 || out_attr[Tag_CPU_arch].i == 13))
12946 out_attr[i].i = 0;
12947 /* Otherwise, DSP instructions are added and not part of output
12948 architecture. */
12949 else
12950 out_attr[i].i = 1;
12951 break;
12952
12953 case Tag_FP_arch:
12954 {
12955 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
12956 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
12957 when it's 0. It might mean absence of FP hardware if
12958 Tag_FP_arch is zero. */
12959
12960 #define VFP_VERSION_COUNT 9
12961 static const struct
12962 {
12963 int ver;
12964 int regs;
12965 } vfp_versions[VFP_VERSION_COUNT] =
12966 {
12967 {0, 0},
12968 {1, 16},
12969 {2, 16},
12970 {3, 32},
12971 {3, 16},
12972 {4, 32},
12973 {4, 16},
12974 {8, 32},
12975 {8, 16}
12976 };
12977 int ver;
12978 int regs;
12979 int newval;
12980
12981 /* If the output has no requirement about FP hardware,
12982 follow the requirement of the input. */
12983 if (out_attr[i].i == 0)
12984 {
12985 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
12986 out_attr[i].i = in_attr[i].i;
12987 out_attr[Tag_ABI_HardFP_use].i
12988 = in_attr[Tag_ABI_HardFP_use].i;
12989 break;
12990 }
12991 /* If the input has no requirement about FP hardware, do
12992 nothing. */
12993 else if (in_attr[i].i == 0)
12994 {
12995 BFD_ASSERT (in_attr[Tag_ABI_HardFP_use].i == 0);
12996 break;
12997 }
12998
12999 /* Both the input and the output have nonzero Tag_FP_arch.
13000 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero. */
13001
13002 /* If both the input and the output have zero Tag_ABI_HardFP_use,
13003 do nothing. */
13004 if (in_attr[Tag_ABI_HardFP_use].i == 0
13005 && out_attr[Tag_ABI_HardFP_use].i == 0)
13006 ;
13007 /* If the input and the output have different Tag_ABI_HardFP_use,
13008 the combination of them is 0 (implied by Tag_FP_arch). */
13009 else if (in_attr[Tag_ABI_HardFP_use].i
13010 != out_attr[Tag_ABI_HardFP_use].i)
13011 out_attr[Tag_ABI_HardFP_use].i = 0;
13012
13013 /* Now we can handle Tag_FP_arch. */
13014
13015 /* Values of VFP_VERSION_COUNT or more aren't defined, so just
13016 pick the biggest. */
13017 if (in_attr[i].i >= VFP_VERSION_COUNT
13018 && in_attr[i].i > out_attr[i].i)
13019 {
13020 out_attr[i] = in_attr[i];
13021 break;
13022 }
13023 /* The output uses the superset of input features
13024 (ISA version) and registers. */
13025 ver = vfp_versions[in_attr[i].i].ver;
13026 if (ver < vfp_versions[out_attr[i].i].ver)
13027 ver = vfp_versions[out_attr[i].i].ver;
13028 regs = vfp_versions[in_attr[i].i].regs;
13029 if (regs < vfp_versions[out_attr[i].i].regs)
13030 regs = vfp_versions[out_attr[i].i].regs;
13031 /* This assumes all possible supersets are also a valid
13032 options. */
13033 for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--)
13034 {
13035 if (regs == vfp_versions[newval].regs
13036 && ver == vfp_versions[newval].ver)
13037 break;
13038 }
13039 out_attr[i].i = newval;
13040 }
13041 break;
13042 case Tag_PCS_config:
13043 if (out_attr[i].i == 0)
13044 out_attr[i].i = in_attr[i].i;
13045 else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
13046 {
13047 /* It's sometimes ok to mix different configs, so this is only
13048 a warning. */
13049 _bfd_error_handler
13050 (_("Warning: %B: Conflicting platform configuration"), ibfd);
13051 }
13052 break;
13053 case Tag_ABI_PCS_R9_use:
13054 if (in_attr[i].i != out_attr[i].i
13055 && out_attr[i].i != AEABI_R9_unused
13056 && in_attr[i].i != AEABI_R9_unused)
13057 {
13058 _bfd_error_handler
13059 (_("error: %B: Conflicting use of R9"), ibfd);
13060 result = FALSE;
13061 }
13062 if (out_attr[i].i == AEABI_R9_unused)
13063 out_attr[i].i = in_attr[i].i;
13064 break;
13065 case Tag_ABI_PCS_RW_data:
13066 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
13067 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
13068 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
13069 {
13070 _bfd_error_handler
13071 (_("error: %B: SB relative addressing conflicts with use of R9"),
13072 ibfd);
13073 result = FALSE;
13074 }
13075 /* Use the smallest value specified. */
13076 if (in_attr[i].i < out_attr[i].i)
13077 out_attr[i].i = in_attr[i].i;
13078 break;
13079 case Tag_ABI_PCS_wchar_t:
13080 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
13081 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
13082 {
13083 _bfd_error_handler
13084 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
13085 ibfd, in_attr[i].i, out_attr[i].i);
13086 }
13087 else if (in_attr[i].i && !out_attr[i].i)
13088 out_attr[i].i = in_attr[i].i;
13089 break;
13090 case Tag_ABI_enum_size:
13091 if (in_attr[i].i != AEABI_enum_unused)
13092 {
13093 if (out_attr[i].i == AEABI_enum_unused
13094 || out_attr[i].i == AEABI_enum_forced_wide)
13095 {
13096 /* The existing object is compatible with anything.
13097 Use whatever requirements the new object has. */
13098 out_attr[i].i = in_attr[i].i;
13099 }
13100 else if (in_attr[i].i != AEABI_enum_forced_wide
13101 && out_attr[i].i != in_attr[i].i
13102 && !elf_arm_tdata (obfd)->no_enum_size_warning)
13103 {
13104 static const char *aeabi_enum_names[] =
13105 { "", "variable-size", "32-bit", "" };
13106 const char *in_name =
13107 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
13108 ? aeabi_enum_names[in_attr[i].i]
13109 : "<unknown>";
13110 const char *out_name =
13111 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
13112 ? aeabi_enum_names[out_attr[i].i]
13113 : "<unknown>";
13114 _bfd_error_handler
13115 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
13116 ibfd, in_name, out_name);
13117 }
13118 }
13119 break;
13120 case Tag_ABI_VFP_args:
13121 /* Aready done. */
13122 break;
13123 case Tag_ABI_WMMX_args:
13124 if (in_attr[i].i != out_attr[i].i)
13125 {
13126 _bfd_error_handler
13127 (_("error: %B uses iWMMXt register arguments, %B does not"),
13128 ibfd, obfd);
13129 result = FALSE;
13130 }
13131 break;
13132 case Tag_compatibility:
13133 /* Merged in target-independent code. */
13134 break;
13135 case Tag_ABI_HardFP_use:
13136 /* This is handled along with Tag_FP_arch. */
13137 break;
13138 case Tag_ABI_FP_16bit_format:
13139 if (in_attr[i].i != 0 && out_attr[i].i != 0)
13140 {
13141 if (in_attr[i].i != out_attr[i].i)
13142 {
13143 _bfd_error_handler
13144 (_("error: fp16 format mismatch between %B and %B"),
13145 ibfd, obfd);
13146 result = FALSE;
13147 }
13148 }
13149 if (in_attr[i].i != 0)
13150 out_attr[i].i = in_attr[i].i;
13151 break;
13152
13153 case Tag_DIV_use:
13154 /* A value of zero on input means that the divide instruction may
13155 be used if available in the base architecture as specified via
13156 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
13157 the user did not want divide instructions. A value of 2
13158 explicitly means that divide instructions were allowed in ARM
13159 and Thumb state. */
13160 if (in_attr[i].i == out_attr[i].i)
13161 /* Do nothing. */ ;
13162 else if (elf32_arm_attributes_forbid_div (in_attr)
13163 && !elf32_arm_attributes_accept_div (out_attr))
13164 out_attr[i].i = 1;
13165 else if (elf32_arm_attributes_forbid_div (out_attr)
13166 && elf32_arm_attributes_accept_div (in_attr))
13167 out_attr[i].i = in_attr[i].i;
13168 else if (in_attr[i].i == 2)
13169 out_attr[i].i = in_attr[i].i;
13170 break;
13171
13172 case Tag_MPextension_use_legacy:
13173 /* We don't output objects with Tag_MPextension_use_legacy - we
13174 move the value to Tag_MPextension_use. */
13175 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
13176 {
13177 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
13178 {
13179 _bfd_error_handler
13180 (_("%B has has both the current and legacy "
13181 "Tag_MPextension_use attributes"),
13182 ibfd);
13183 result = FALSE;
13184 }
13185 }
13186
13187 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
13188 out_attr[Tag_MPextension_use] = in_attr[i];
13189
13190 break;
13191
13192 case Tag_nodefaults:
13193 /* This tag is set if it exists, but the value is unused (and is
13194 typically zero). We don't actually need to do anything here -
13195 the merge happens automatically when the type flags are merged
13196 below. */
13197 break;
13198 case Tag_also_compatible_with:
13199 /* Already done in Tag_CPU_arch. */
13200 break;
13201 case Tag_conformance:
13202 /* Keep the attribute if it matches. Throw it away otherwise.
13203 No attribute means no claim to conform. */
13204 if (!in_attr[i].s || !out_attr[i].s
13205 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
13206 out_attr[i].s = NULL;
13207 break;
13208
13209 default:
13210 result
13211 = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
13212 }
13213
13214 /* If out_attr was copied from in_attr then it won't have a type yet. */
13215 if (in_attr[i].type && !out_attr[i].type)
13216 out_attr[i].type = in_attr[i].type;
13217 }
13218
13219 /* Merge Tag_compatibility attributes and any common GNU ones. */
13220 if (!_bfd_elf_merge_object_attributes (ibfd, obfd))
13221 return FALSE;
13222
13223 /* Check for any attributes not known on ARM. */
13224 result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
13225
13226 return result;
13227 }
13228
13229
13230 /* Return TRUE if the two EABI versions are incompatible. */
13231
13232 static bfd_boolean
13233 elf32_arm_versions_compatible (unsigned iver, unsigned over)
13234 {
13235 /* v4 and v5 are the same spec before and after it was released,
13236 so allow mixing them. */
13237 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
13238 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
13239 return TRUE;
13240
13241 return (iver == over);
13242 }
13243
13244 /* Merge backend specific data from an object file to the output
13245 object file when linking. */
13246
13247 static bfd_boolean
13248 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd);
13249
13250 /* Display the flags field. */
13251
13252 static bfd_boolean
13253 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
13254 {
13255 FILE * file = (FILE *) ptr;
13256 unsigned long flags;
13257
13258 BFD_ASSERT (abfd != NULL && ptr != NULL);
13259
13260 /* Print normal ELF private data. */
13261 _bfd_elf_print_private_bfd_data (abfd, ptr);
13262
13263 flags = elf_elfheader (abfd)->e_flags;
13264 /* Ignore init flag - it may not be set, despite the flags field
13265 containing valid data. */
13266
13267 /* xgettext:c-format */
13268 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
13269
13270 switch (EF_ARM_EABI_VERSION (flags))
13271 {
13272 case EF_ARM_EABI_UNKNOWN:
13273 /* The following flag bits are GNU extensions and not part of the
13274 official ARM ELF extended ABI. Hence they are only decoded if
13275 the EABI version is not set. */
13276 if (flags & EF_ARM_INTERWORK)
13277 fprintf (file, _(" [interworking enabled]"));
13278
13279 if (flags & EF_ARM_APCS_26)
13280 fprintf (file, " [APCS-26]");
13281 else
13282 fprintf (file, " [APCS-32]");
13283
13284 if (flags & EF_ARM_VFP_FLOAT)
13285 fprintf (file, _(" [VFP float format]"));
13286 else if (flags & EF_ARM_MAVERICK_FLOAT)
13287 fprintf (file, _(" [Maverick float format]"));
13288 else
13289 fprintf (file, _(" [FPA float format]"));
13290
13291 if (flags & EF_ARM_APCS_FLOAT)
13292 fprintf (file, _(" [floats passed in float registers]"));
13293
13294 if (flags & EF_ARM_PIC)
13295 fprintf (file, _(" [position independent]"));
13296
13297 if (flags & EF_ARM_NEW_ABI)
13298 fprintf (file, _(" [new ABI]"));
13299
13300 if (flags & EF_ARM_OLD_ABI)
13301 fprintf (file, _(" [old ABI]"));
13302
13303 if (flags & EF_ARM_SOFT_FLOAT)
13304 fprintf (file, _(" [software FP]"));
13305
13306 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
13307 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
13308 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
13309 | EF_ARM_MAVERICK_FLOAT);
13310 break;
13311
13312 case EF_ARM_EABI_VER1:
13313 fprintf (file, _(" [Version1 EABI]"));
13314
13315 if (flags & EF_ARM_SYMSARESORTED)
13316 fprintf (file, _(" [sorted symbol table]"));
13317 else
13318 fprintf (file, _(" [unsorted symbol table]"));
13319
13320 flags &= ~ EF_ARM_SYMSARESORTED;
13321 break;
13322
13323 case EF_ARM_EABI_VER2:
13324 fprintf (file, _(" [Version2 EABI]"));
13325
13326 if (flags & EF_ARM_SYMSARESORTED)
13327 fprintf (file, _(" [sorted symbol table]"));
13328 else
13329 fprintf (file, _(" [unsorted symbol table]"));
13330
13331 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
13332 fprintf (file, _(" [dynamic symbols use segment index]"));
13333
13334 if (flags & EF_ARM_MAPSYMSFIRST)
13335 fprintf (file, _(" [mapping symbols precede others]"));
13336
13337 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
13338 | EF_ARM_MAPSYMSFIRST);
13339 break;
13340
13341 case EF_ARM_EABI_VER3:
13342 fprintf (file, _(" [Version3 EABI]"));
13343 break;
13344
13345 case EF_ARM_EABI_VER4:
13346 fprintf (file, _(" [Version4 EABI]"));
13347 goto eabi;
13348
13349 case EF_ARM_EABI_VER5:
13350 fprintf (file, _(" [Version5 EABI]"));
13351
13352 if (flags & EF_ARM_ABI_FLOAT_SOFT)
13353 fprintf (file, _(" [soft-float ABI]"));
13354
13355 if (flags & EF_ARM_ABI_FLOAT_HARD)
13356 fprintf (file, _(" [hard-float ABI]"));
13357
13358 flags &= ~(EF_ARM_ABI_FLOAT_SOFT | EF_ARM_ABI_FLOAT_HARD);
13359
13360 eabi:
13361 if (flags & EF_ARM_BE8)
13362 fprintf (file, _(" [BE8]"));
13363
13364 if (flags & EF_ARM_LE8)
13365 fprintf (file, _(" [LE8]"));
13366
13367 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
13368 break;
13369
13370 default:
13371 fprintf (file, _(" <EABI version unrecognised>"));
13372 break;
13373 }
13374
13375 flags &= ~ EF_ARM_EABIMASK;
13376
13377 if (flags & EF_ARM_RELEXEC)
13378 fprintf (file, _(" [relocatable executable]"));
13379
13380 flags &= ~EF_ARM_RELEXEC;
13381
13382 if (flags)
13383 fprintf (file, _("<Unrecognised flag bits set>"));
13384
13385 fputc ('\n', file);
13386
13387 return TRUE;
13388 }
13389
13390 static int
13391 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
13392 {
13393 switch (ELF_ST_TYPE (elf_sym->st_info))
13394 {
13395 case STT_ARM_TFUNC:
13396 return ELF_ST_TYPE (elf_sym->st_info);
13397
13398 case STT_ARM_16BIT:
13399 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
13400 This allows us to distinguish between data used by Thumb instructions
13401 and non-data (which is probably code) inside Thumb regions of an
13402 executable. */
13403 if (type != STT_OBJECT && type != STT_TLS)
13404 return ELF_ST_TYPE (elf_sym->st_info);
13405 break;
13406
13407 default:
13408 break;
13409 }
13410
13411 return type;
13412 }
13413
13414 static asection *
13415 elf32_arm_gc_mark_hook (asection *sec,
13416 struct bfd_link_info *info,
13417 Elf_Internal_Rela *rel,
13418 struct elf_link_hash_entry *h,
13419 Elf_Internal_Sym *sym)
13420 {
13421 if (h != NULL)
13422 switch (ELF32_R_TYPE (rel->r_info))
13423 {
13424 case R_ARM_GNU_VTINHERIT:
13425 case R_ARM_GNU_VTENTRY:
13426 return NULL;
13427 }
13428
13429 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
13430 }
13431
13432 /* Update the got entry reference counts for the section being removed. */
13433
13434 static bfd_boolean
13435 elf32_arm_gc_sweep_hook (bfd * abfd,
13436 struct bfd_link_info * info,
13437 asection * sec,
13438 const Elf_Internal_Rela * relocs)
13439 {
13440 Elf_Internal_Shdr *symtab_hdr;
13441 struct elf_link_hash_entry **sym_hashes;
13442 bfd_signed_vma *local_got_refcounts;
13443 const Elf_Internal_Rela *rel, *relend;
13444 struct elf32_arm_link_hash_table * globals;
13445
13446 if (bfd_link_relocatable (info))
13447 return TRUE;
13448
13449 globals = elf32_arm_hash_table (info);
13450 if (globals == NULL)
13451 return FALSE;
13452
13453 elf_section_data (sec)->local_dynrel = NULL;
13454
13455 symtab_hdr = & elf_symtab_hdr (abfd);
13456 sym_hashes = elf_sym_hashes (abfd);
13457 local_got_refcounts = elf_local_got_refcounts (abfd);
13458
13459 check_use_blx (globals);
13460
13461 relend = relocs + sec->reloc_count;
13462 for (rel = relocs; rel < relend; rel++)
13463 {
13464 unsigned long r_symndx;
13465 struct elf_link_hash_entry *h = NULL;
13466 struct elf32_arm_link_hash_entry *eh;
13467 int r_type;
13468 bfd_boolean call_reloc_p;
13469 bfd_boolean may_become_dynamic_p;
13470 bfd_boolean may_need_local_target_p;
13471 union gotplt_union *root_plt;
13472 struct arm_plt_info *arm_plt;
13473
13474 r_symndx = ELF32_R_SYM (rel->r_info);
13475 if (r_symndx >= symtab_hdr->sh_info)
13476 {
13477 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
13478 while (h->root.type == bfd_link_hash_indirect
13479 || h->root.type == bfd_link_hash_warning)
13480 h = (struct elf_link_hash_entry *) h->root.u.i.link;
13481 }
13482 eh = (struct elf32_arm_link_hash_entry *) h;
13483
13484 call_reloc_p = FALSE;
13485 may_become_dynamic_p = FALSE;
13486 may_need_local_target_p = FALSE;
13487
13488 r_type = ELF32_R_TYPE (rel->r_info);
13489 r_type = arm_real_reloc_type (globals, r_type);
13490 switch (r_type)
13491 {
13492 case R_ARM_GOT32:
13493 case R_ARM_GOT_PREL:
13494 case R_ARM_TLS_GD32:
13495 case R_ARM_TLS_IE32:
13496 if (h != NULL)
13497 {
13498 if (h->got.refcount > 0)
13499 h->got.refcount -= 1;
13500 }
13501 else if (local_got_refcounts != NULL)
13502 {
13503 if (local_got_refcounts[r_symndx] > 0)
13504 local_got_refcounts[r_symndx] -= 1;
13505 }
13506 break;
13507
13508 case R_ARM_TLS_LDM32:
13509 globals->tls_ldm_got.refcount -= 1;
13510 break;
13511
13512 case R_ARM_PC24:
13513 case R_ARM_PLT32:
13514 case R_ARM_CALL:
13515 case R_ARM_JUMP24:
13516 case R_ARM_PREL31:
13517 case R_ARM_THM_CALL:
13518 case R_ARM_THM_JUMP24:
13519 case R_ARM_THM_JUMP19:
13520 call_reloc_p = TRUE;
13521 may_need_local_target_p = TRUE;
13522 break;
13523
13524 case R_ARM_ABS12:
13525 if (!globals->vxworks_p)
13526 {
13527 may_need_local_target_p = TRUE;
13528 break;
13529 }
13530 /* Fall through. */
13531 case R_ARM_ABS32:
13532 case R_ARM_ABS32_NOI:
13533 case R_ARM_REL32:
13534 case R_ARM_REL32_NOI:
13535 case R_ARM_MOVW_ABS_NC:
13536 case R_ARM_MOVT_ABS:
13537 case R_ARM_MOVW_PREL_NC:
13538 case R_ARM_MOVT_PREL:
13539 case R_ARM_THM_MOVW_ABS_NC:
13540 case R_ARM_THM_MOVT_ABS:
13541 case R_ARM_THM_MOVW_PREL_NC:
13542 case R_ARM_THM_MOVT_PREL:
13543 /* Should the interworking branches be here also? */
13544 if ((bfd_link_pic (info) || globals->root.is_relocatable_executable)
13545 && (sec->flags & SEC_ALLOC) != 0)
13546 {
13547 if (h == NULL
13548 && elf32_arm_howto_from_type (r_type)->pc_relative)
13549 {
13550 call_reloc_p = TRUE;
13551 may_need_local_target_p = TRUE;
13552 }
13553 else
13554 may_become_dynamic_p = TRUE;
13555 }
13556 else
13557 may_need_local_target_p = TRUE;
13558 break;
13559
13560 default:
13561 break;
13562 }
13563
13564 if (may_need_local_target_p
13565 && elf32_arm_get_plt_info (abfd, eh, r_symndx, &root_plt, &arm_plt))
13566 {
13567 /* If PLT refcount book-keeping is wrong and too low, we'll
13568 see a zero value (going to -1) for the root PLT reference
13569 count. */
13570 if (root_plt->refcount >= 0)
13571 {
13572 BFD_ASSERT (root_plt->refcount != 0);
13573 root_plt->refcount -= 1;
13574 }
13575 else
13576 /* A value of -1 means the symbol has become local, forced
13577 or seeing a hidden definition. Any other negative value
13578 is an error. */
13579 BFD_ASSERT (root_plt->refcount == -1);
13580
13581 if (!call_reloc_p)
13582 arm_plt->noncall_refcount--;
13583
13584 if (r_type == R_ARM_THM_CALL)
13585 arm_plt->maybe_thumb_refcount--;
13586
13587 if (r_type == R_ARM_THM_JUMP24
13588 || r_type == R_ARM_THM_JUMP19)
13589 arm_plt->thumb_refcount--;
13590 }
13591
13592 if (may_become_dynamic_p)
13593 {
13594 struct elf_dyn_relocs **pp;
13595 struct elf_dyn_relocs *p;
13596
13597 if (h != NULL)
13598 pp = &(eh->dyn_relocs);
13599 else
13600 {
13601 Elf_Internal_Sym *isym;
13602
13603 isym = bfd_sym_from_r_symndx (&globals->sym_cache,
13604 abfd, r_symndx);
13605 if (isym == NULL)
13606 return FALSE;
13607 pp = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
13608 if (pp == NULL)
13609 return FALSE;
13610 }
13611 for (; (p = *pp) != NULL; pp = &p->next)
13612 if (p->sec == sec)
13613 {
13614 /* Everything must go for SEC. */
13615 *pp = p->next;
13616 break;
13617 }
13618 }
13619 }
13620
13621 return TRUE;
13622 }
13623
13624 /* Look through the relocs for a section during the first phase. */
13625
13626 static bfd_boolean
13627 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
13628 asection *sec, const Elf_Internal_Rela *relocs)
13629 {
13630 Elf_Internal_Shdr *symtab_hdr;
13631 struct elf_link_hash_entry **sym_hashes;
13632 const Elf_Internal_Rela *rel;
13633 const Elf_Internal_Rela *rel_end;
13634 bfd *dynobj;
13635 asection *sreloc;
13636 struct elf32_arm_link_hash_table *htab;
13637 bfd_boolean call_reloc_p;
13638 bfd_boolean may_become_dynamic_p;
13639 bfd_boolean may_need_local_target_p;
13640 unsigned long nsyms;
13641
13642 if (bfd_link_relocatable (info))
13643 return TRUE;
13644
13645 BFD_ASSERT (is_arm_elf (abfd));
13646
13647 htab = elf32_arm_hash_table (info);
13648 if (htab == NULL)
13649 return FALSE;
13650
13651 sreloc = NULL;
13652
13653 /* Create dynamic sections for relocatable executables so that we can
13654 copy relocations. */
13655 if (htab->root.is_relocatable_executable
13656 && ! htab->root.dynamic_sections_created)
13657 {
13658 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
13659 return FALSE;
13660 }
13661
13662 if (htab->root.dynobj == NULL)
13663 htab->root.dynobj = abfd;
13664 if (!create_ifunc_sections (info))
13665 return FALSE;
13666
13667 dynobj = htab->root.dynobj;
13668
13669 symtab_hdr = & elf_symtab_hdr (abfd);
13670 sym_hashes = elf_sym_hashes (abfd);
13671 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
13672
13673 rel_end = relocs + sec->reloc_count;
13674 for (rel = relocs; rel < rel_end; rel++)
13675 {
13676 Elf_Internal_Sym *isym;
13677 struct elf_link_hash_entry *h;
13678 struct elf32_arm_link_hash_entry *eh;
13679 unsigned long r_symndx;
13680 int r_type;
13681
13682 r_symndx = ELF32_R_SYM (rel->r_info);
13683 r_type = ELF32_R_TYPE (rel->r_info);
13684 r_type = arm_real_reloc_type (htab, r_type);
13685
13686 if (r_symndx >= nsyms
13687 /* PR 9934: It is possible to have relocations that do not
13688 refer to symbols, thus it is also possible to have an
13689 object file containing relocations but no symbol table. */
13690 && (r_symndx > STN_UNDEF || nsyms > 0))
13691 {
13692 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
13693 r_symndx);
13694 return FALSE;
13695 }
13696
13697 h = NULL;
13698 isym = NULL;
13699 if (nsyms > 0)
13700 {
13701 if (r_symndx < symtab_hdr->sh_info)
13702 {
13703 /* A local symbol. */
13704 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
13705 abfd, r_symndx);
13706 if (isym == NULL)
13707 return FALSE;
13708 }
13709 else
13710 {
13711 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
13712 while (h->root.type == bfd_link_hash_indirect
13713 || h->root.type == bfd_link_hash_warning)
13714 h = (struct elf_link_hash_entry *) h->root.u.i.link;
13715
13716 /* PR15323, ref flags aren't set for references in the
13717 same object. */
13718 h->root.non_ir_ref = 1;
13719 }
13720 }
13721
13722 eh = (struct elf32_arm_link_hash_entry *) h;
13723
13724 call_reloc_p = FALSE;
13725 may_become_dynamic_p = FALSE;
13726 may_need_local_target_p = FALSE;
13727
13728 /* Could be done earlier, if h were already available. */
13729 r_type = elf32_arm_tls_transition (info, r_type, h);
13730 switch (r_type)
13731 {
13732 case R_ARM_GOT32:
13733 case R_ARM_GOT_PREL:
13734 case R_ARM_TLS_GD32:
13735 case R_ARM_TLS_IE32:
13736 case R_ARM_TLS_GOTDESC:
13737 case R_ARM_TLS_DESCSEQ:
13738 case R_ARM_THM_TLS_DESCSEQ:
13739 case R_ARM_TLS_CALL:
13740 case R_ARM_THM_TLS_CALL:
13741 /* This symbol requires a global offset table entry. */
13742 {
13743 int tls_type, old_tls_type;
13744
13745 switch (r_type)
13746 {
13747 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
13748
13749 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
13750
13751 case R_ARM_TLS_GOTDESC:
13752 case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
13753 case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ:
13754 tls_type = GOT_TLS_GDESC; break;
13755
13756 default: tls_type = GOT_NORMAL; break;
13757 }
13758
13759 if (!bfd_link_executable (info) && (tls_type & GOT_TLS_IE))
13760 info->flags |= DF_STATIC_TLS;
13761
13762 if (h != NULL)
13763 {
13764 h->got.refcount++;
13765 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
13766 }
13767 else
13768 {
13769 /* This is a global offset table entry for a local symbol. */
13770 if (!elf32_arm_allocate_local_sym_info (abfd))
13771 return FALSE;
13772 elf_local_got_refcounts (abfd)[r_symndx] += 1;
13773 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
13774 }
13775
13776 /* If a variable is accessed with both tls methods, two
13777 slots may be created. */
13778 if (GOT_TLS_GD_ANY_P (old_tls_type)
13779 && GOT_TLS_GD_ANY_P (tls_type))
13780 tls_type |= old_tls_type;
13781
13782 /* We will already have issued an error message if there
13783 is a TLS/non-TLS mismatch, based on the symbol
13784 type. So just combine any TLS types needed. */
13785 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
13786 && tls_type != GOT_NORMAL)
13787 tls_type |= old_tls_type;
13788
13789 /* If the symbol is accessed in both IE and GDESC
13790 method, we're able to relax. Turn off the GDESC flag,
13791 without messing up with any other kind of tls types
13792 that may be involved. */
13793 if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
13794 tls_type &= ~GOT_TLS_GDESC;
13795
13796 if (old_tls_type != tls_type)
13797 {
13798 if (h != NULL)
13799 elf32_arm_hash_entry (h)->tls_type = tls_type;
13800 else
13801 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
13802 }
13803 }
13804 /* Fall through. */
13805
13806 case R_ARM_TLS_LDM32:
13807 if (r_type == R_ARM_TLS_LDM32)
13808 htab->tls_ldm_got.refcount++;
13809 /* Fall through. */
13810
13811 case R_ARM_GOTOFF32:
13812 case R_ARM_GOTPC:
13813 if (htab->root.sgot == NULL
13814 && !create_got_section (htab->root.dynobj, info))
13815 return FALSE;
13816 break;
13817
13818 case R_ARM_PC24:
13819 case R_ARM_PLT32:
13820 case R_ARM_CALL:
13821 case R_ARM_JUMP24:
13822 case R_ARM_PREL31:
13823 case R_ARM_THM_CALL:
13824 case R_ARM_THM_JUMP24:
13825 case R_ARM_THM_JUMP19:
13826 call_reloc_p = TRUE;
13827 may_need_local_target_p = TRUE;
13828 break;
13829
13830 case R_ARM_ABS12:
13831 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
13832 ldr __GOTT_INDEX__ offsets. */
13833 if (!htab->vxworks_p)
13834 {
13835 may_need_local_target_p = TRUE;
13836 break;
13837 }
13838 else goto jump_over;
13839
13840 /* Fall through. */
13841
13842 case R_ARM_MOVW_ABS_NC:
13843 case R_ARM_MOVT_ABS:
13844 case R_ARM_THM_MOVW_ABS_NC:
13845 case R_ARM_THM_MOVT_ABS:
13846 if (bfd_link_pic (info))
13847 {
13848 (*_bfd_error_handler)
13849 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
13850 abfd, elf32_arm_howto_table_1[r_type].name,
13851 (h) ? h->root.root.string : "a local symbol");
13852 bfd_set_error (bfd_error_bad_value);
13853 return FALSE;
13854 }
13855
13856 /* Fall through. */
13857 case R_ARM_ABS32:
13858 case R_ARM_ABS32_NOI:
13859 jump_over:
13860 if (h != NULL && bfd_link_executable (info))
13861 {
13862 h->pointer_equality_needed = 1;
13863 }
13864 /* Fall through. */
13865 case R_ARM_REL32:
13866 case R_ARM_REL32_NOI:
13867 case R_ARM_MOVW_PREL_NC:
13868 case R_ARM_MOVT_PREL:
13869 case R_ARM_THM_MOVW_PREL_NC:
13870 case R_ARM_THM_MOVT_PREL:
13871
13872 /* Should the interworking branches be listed here? */
13873 if ((bfd_link_pic (info) || htab->root.is_relocatable_executable)
13874 && (sec->flags & SEC_ALLOC) != 0)
13875 {
13876 if (h == NULL
13877 && elf32_arm_howto_from_type (r_type)->pc_relative)
13878 {
13879 /* In shared libraries and relocatable executables,
13880 we treat local relative references as calls;
13881 see the related SYMBOL_CALLS_LOCAL code in
13882 allocate_dynrelocs. */
13883 call_reloc_p = TRUE;
13884 may_need_local_target_p = TRUE;
13885 }
13886 else
13887 /* We are creating a shared library or relocatable
13888 executable, and this is a reloc against a global symbol,
13889 or a non-PC-relative reloc against a local symbol.
13890 We may need to copy the reloc into the output. */
13891 may_become_dynamic_p = TRUE;
13892 }
13893 else
13894 may_need_local_target_p = TRUE;
13895 break;
13896
13897 /* This relocation describes the C++ object vtable hierarchy.
13898 Reconstruct it for later use during GC. */
13899 case R_ARM_GNU_VTINHERIT:
13900 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
13901 return FALSE;
13902 break;
13903
13904 /* This relocation describes which C++ vtable entries are actually
13905 used. Record for later use during GC. */
13906 case R_ARM_GNU_VTENTRY:
13907 BFD_ASSERT (h != NULL);
13908 if (h != NULL
13909 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
13910 return FALSE;
13911 break;
13912 }
13913
13914 if (h != NULL)
13915 {
13916 if (call_reloc_p)
13917 /* We may need a .plt entry if the function this reloc
13918 refers to is in a different object, regardless of the
13919 symbol's type. We can't tell for sure yet, because
13920 something later might force the symbol local. */
13921 h->needs_plt = 1;
13922 else if (may_need_local_target_p)
13923 /* If this reloc is in a read-only section, we might
13924 need a copy reloc. We can't check reliably at this
13925 stage whether the section is read-only, as input
13926 sections have not yet been mapped to output sections.
13927 Tentatively set the flag for now, and correct in
13928 adjust_dynamic_symbol. */
13929 h->non_got_ref = 1;
13930 }
13931
13932 if (may_need_local_target_p
13933 && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC))
13934 {
13935 union gotplt_union *root_plt;
13936 struct arm_plt_info *arm_plt;
13937 struct arm_local_iplt_info *local_iplt;
13938
13939 if (h != NULL)
13940 {
13941 root_plt = &h->plt;
13942 arm_plt = &eh->plt;
13943 }
13944 else
13945 {
13946 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
13947 if (local_iplt == NULL)
13948 return FALSE;
13949 root_plt = &local_iplt->root;
13950 arm_plt = &local_iplt->arm;
13951 }
13952
13953 /* If the symbol is a function that doesn't bind locally,
13954 this relocation will need a PLT entry. */
13955 if (root_plt->refcount != -1)
13956 root_plt->refcount += 1;
13957
13958 if (!call_reloc_p)
13959 arm_plt->noncall_refcount++;
13960
13961 /* It's too early to use htab->use_blx here, so we have to
13962 record possible blx references separately from
13963 relocs that definitely need a thumb stub. */
13964
13965 if (r_type == R_ARM_THM_CALL)
13966 arm_plt->maybe_thumb_refcount += 1;
13967
13968 if (r_type == R_ARM_THM_JUMP24
13969 || r_type == R_ARM_THM_JUMP19)
13970 arm_plt->thumb_refcount += 1;
13971 }
13972
13973 if (may_become_dynamic_p)
13974 {
13975 struct elf_dyn_relocs *p, **head;
13976
13977 /* Create a reloc section in dynobj. */
13978 if (sreloc == NULL)
13979 {
13980 sreloc = _bfd_elf_make_dynamic_reloc_section
13981 (sec, dynobj, 2, abfd, ! htab->use_rel);
13982
13983 if (sreloc == NULL)
13984 return FALSE;
13985
13986 /* BPABI objects never have dynamic relocations mapped. */
13987 if (htab->symbian_p)
13988 {
13989 flagword flags;
13990
13991 flags = bfd_get_section_flags (dynobj, sreloc);
13992 flags &= ~(SEC_LOAD | SEC_ALLOC);
13993 bfd_set_section_flags (dynobj, sreloc, flags);
13994 }
13995 }
13996
13997 /* If this is a global symbol, count the number of
13998 relocations we need for this symbol. */
13999 if (h != NULL)
14000 head = &((struct elf32_arm_link_hash_entry *) h)->dyn_relocs;
14001 else
14002 {
14003 head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
14004 if (head == NULL)
14005 return FALSE;
14006 }
14007
14008 p = *head;
14009 if (p == NULL || p->sec != sec)
14010 {
14011 bfd_size_type amt = sizeof *p;
14012
14013 p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt);
14014 if (p == NULL)
14015 return FALSE;
14016 p->next = *head;
14017 *head = p;
14018 p->sec = sec;
14019 p->count = 0;
14020 p->pc_count = 0;
14021 }
14022
14023 if (elf32_arm_howto_from_type (r_type)->pc_relative)
14024 p->pc_count += 1;
14025 p->count += 1;
14026 }
14027 }
14028
14029 return TRUE;
14030 }
14031
14032 /* Unwinding tables are not referenced directly. This pass marks them as
14033 required if the corresponding code section is marked. */
14034
14035 static bfd_boolean
14036 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
14037 elf_gc_mark_hook_fn gc_mark_hook)
14038 {
14039 bfd *sub;
14040 Elf_Internal_Shdr **elf_shdrp;
14041 bfd_boolean again;
14042
14043 _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
14044
14045 /* Marking EH data may cause additional code sections to be marked,
14046 requiring multiple passes. */
14047 again = TRUE;
14048 while (again)
14049 {
14050 again = FALSE;
14051 for (sub = info->input_bfds; sub != NULL; sub = sub->link.next)
14052 {
14053 asection *o;
14054
14055 if (! is_arm_elf (sub))
14056 continue;
14057
14058 elf_shdrp = elf_elfsections (sub);
14059 for (o = sub->sections; o != NULL; o = o->next)
14060 {
14061 Elf_Internal_Shdr *hdr;
14062
14063 hdr = &elf_section_data (o)->this_hdr;
14064 if (hdr->sh_type == SHT_ARM_EXIDX
14065 && hdr->sh_link
14066 && hdr->sh_link < elf_numsections (sub)
14067 && !o->gc_mark
14068 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
14069 {
14070 again = TRUE;
14071 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
14072 return FALSE;
14073 }
14074 }
14075 }
14076 }
14077
14078 return TRUE;
14079 }
14080
14081 /* Treat mapping symbols as special target symbols. */
14082
14083 static bfd_boolean
14084 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
14085 {
14086 return bfd_is_arm_special_symbol_name (sym->name,
14087 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
14088 }
14089
14090 /* This is a copy of elf_find_function() from elf.c except that
14091 ARM mapping symbols are ignored when looking for function names
14092 and STT_ARM_TFUNC is considered to a function type. */
14093
14094 static bfd_boolean
14095 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
14096 asymbol ** symbols,
14097 asection * section,
14098 bfd_vma offset,
14099 const char ** filename_ptr,
14100 const char ** functionname_ptr)
14101 {
14102 const char * filename = NULL;
14103 asymbol * func = NULL;
14104 bfd_vma low_func = 0;
14105 asymbol ** p;
14106
14107 for (p = symbols; *p != NULL; p++)
14108 {
14109 elf_symbol_type *q;
14110
14111 q = (elf_symbol_type *) *p;
14112
14113 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
14114 {
14115 default:
14116 break;
14117 case STT_FILE:
14118 filename = bfd_asymbol_name (&q->symbol);
14119 break;
14120 case STT_FUNC:
14121 case STT_ARM_TFUNC:
14122 case STT_NOTYPE:
14123 /* Skip mapping symbols. */
14124 if ((q->symbol.flags & BSF_LOCAL)
14125 && bfd_is_arm_special_symbol_name (q->symbol.name,
14126 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
14127 continue;
14128 /* Fall through. */
14129 if (bfd_get_section (&q->symbol) == section
14130 && q->symbol.value >= low_func
14131 && q->symbol.value <= offset)
14132 {
14133 func = (asymbol *) q;
14134 low_func = q->symbol.value;
14135 }
14136 break;
14137 }
14138 }
14139
14140 if (func == NULL)
14141 return FALSE;
14142
14143 if (filename_ptr)
14144 *filename_ptr = filename;
14145 if (functionname_ptr)
14146 *functionname_ptr = bfd_asymbol_name (func);
14147
14148 return TRUE;
14149 }
14150
14151
14152 /* Find the nearest line to a particular section and offset, for error
14153 reporting. This code is a duplicate of the code in elf.c, except
14154 that it uses arm_elf_find_function. */
14155
14156 static bfd_boolean
14157 elf32_arm_find_nearest_line (bfd * abfd,
14158 asymbol ** symbols,
14159 asection * section,
14160 bfd_vma offset,
14161 const char ** filename_ptr,
14162 const char ** functionname_ptr,
14163 unsigned int * line_ptr,
14164 unsigned int * discriminator_ptr)
14165 {
14166 bfd_boolean found = FALSE;
14167
14168 if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
14169 filename_ptr, functionname_ptr,
14170 line_ptr, discriminator_ptr,
14171 dwarf_debug_sections, 0,
14172 & elf_tdata (abfd)->dwarf2_find_line_info))
14173 {
14174 if (!*functionname_ptr)
14175 arm_elf_find_function (abfd, symbols, section, offset,
14176 *filename_ptr ? NULL : filename_ptr,
14177 functionname_ptr);
14178
14179 return TRUE;
14180 }
14181
14182 /* Skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain
14183 uses DWARF1. */
14184
14185 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
14186 & found, filename_ptr,
14187 functionname_ptr, line_ptr,
14188 & elf_tdata (abfd)->line_info))
14189 return FALSE;
14190
14191 if (found && (*functionname_ptr || *line_ptr))
14192 return TRUE;
14193
14194 if (symbols == NULL)
14195 return FALSE;
14196
14197 if (! arm_elf_find_function (abfd, symbols, section, offset,
14198 filename_ptr, functionname_ptr))
14199 return FALSE;
14200
14201 *line_ptr = 0;
14202 return TRUE;
14203 }
14204
14205 static bfd_boolean
14206 elf32_arm_find_inliner_info (bfd * abfd,
14207 const char ** filename_ptr,
14208 const char ** functionname_ptr,
14209 unsigned int * line_ptr)
14210 {
14211 bfd_boolean found;
14212 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
14213 functionname_ptr, line_ptr,
14214 & elf_tdata (abfd)->dwarf2_find_line_info);
14215 return found;
14216 }
14217
14218 /* Adjust a symbol defined by a dynamic object and referenced by a
14219 regular object. The current definition is in some section of the
14220 dynamic object, but we're not including those sections. We have to
14221 change the definition to something the rest of the link can
14222 understand. */
14223
14224 static bfd_boolean
14225 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
14226 struct elf_link_hash_entry * h)
14227 {
14228 bfd * dynobj;
14229 asection * s;
14230 struct elf32_arm_link_hash_entry * eh;
14231 struct elf32_arm_link_hash_table *globals;
14232
14233 globals = elf32_arm_hash_table (info);
14234 if (globals == NULL)
14235 return FALSE;
14236
14237 dynobj = elf_hash_table (info)->dynobj;
14238
14239 /* Make sure we know what is going on here. */
14240 BFD_ASSERT (dynobj != NULL
14241 && (h->needs_plt
14242 || h->type == STT_GNU_IFUNC
14243 || h->u.weakdef != NULL
14244 || (h->def_dynamic
14245 && h->ref_regular
14246 && !h->def_regular)));
14247
14248 eh = (struct elf32_arm_link_hash_entry *) h;
14249
14250 /* If this is a function, put it in the procedure linkage table. We
14251 will fill in the contents of the procedure linkage table later,
14252 when we know the address of the .got section. */
14253 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
14254 {
14255 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
14256 symbol binds locally. */
14257 if (h->plt.refcount <= 0
14258 || (h->type != STT_GNU_IFUNC
14259 && (SYMBOL_CALLS_LOCAL (info, h)
14260 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
14261 && h->root.type == bfd_link_hash_undefweak))))
14262 {
14263 /* This case can occur if we saw a PLT32 reloc in an input
14264 file, but the symbol was never referred to by a dynamic
14265 object, or if all references were garbage collected. In
14266 such a case, we don't actually need to build a procedure
14267 linkage table, and we can just do a PC24 reloc instead. */
14268 h->plt.offset = (bfd_vma) -1;
14269 eh->plt.thumb_refcount = 0;
14270 eh->plt.maybe_thumb_refcount = 0;
14271 eh->plt.noncall_refcount = 0;
14272 h->needs_plt = 0;
14273 }
14274
14275 return TRUE;
14276 }
14277 else
14278 {
14279 /* It's possible that we incorrectly decided a .plt reloc was
14280 needed for an R_ARM_PC24 or similar reloc to a non-function sym
14281 in check_relocs. We can't decide accurately between function
14282 and non-function syms in check-relocs; Objects loaded later in
14283 the link may change h->type. So fix it now. */
14284 h->plt.offset = (bfd_vma) -1;
14285 eh->plt.thumb_refcount = 0;
14286 eh->plt.maybe_thumb_refcount = 0;
14287 eh->plt.noncall_refcount = 0;
14288 }
14289
14290 /* If this is a weak symbol, and there is a real definition, the
14291 processor independent code will have arranged for us to see the
14292 real definition first, and we can just use the same value. */
14293 if (h->u.weakdef != NULL)
14294 {
14295 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
14296 || h->u.weakdef->root.type == bfd_link_hash_defweak);
14297 h->root.u.def.section = h->u.weakdef->root.u.def.section;
14298 h->root.u.def.value = h->u.weakdef->root.u.def.value;
14299 return TRUE;
14300 }
14301
14302 /* If there are no non-GOT references, we do not need a copy
14303 relocation. */
14304 if (!h->non_got_ref)
14305 return TRUE;
14306
14307 /* This is a reference to a symbol defined by a dynamic object which
14308 is not a function. */
14309
14310 /* If we are creating a shared library, we must presume that the
14311 only references to the symbol are via the global offset table.
14312 For such cases we need not do anything here; the relocations will
14313 be handled correctly by relocate_section. Relocatable executables
14314 can reference data in shared objects directly, so we don't need to
14315 do anything here. */
14316 if (bfd_link_pic (info) || globals->root.is_relocatable_executable)
14317 return TRUE;
14318
14319 /* We must allocate the symbol in our .dynbss section, which will
14320 become part of the .bss section of the executable. There will be
14321 an entry for this symbol in the .dynsym section. The dynamic
14322 object will contain position independent code, so all references
14323 from the dynamic object to this symbol will go through the global
14324 offset table. The dynamic linker will use the .dynsym entry to
14325 determine the address it must put in the global offset table, so
14326 both the dynamic object and the regular object will refer to the
14327 same memory location for the variable. */
14328 s = bfd_get_linker_section (dynobj, ".dynbss");
14329 BFD_ASSERT (s != NULL);
14330
14331 /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
14332 linker to copy the initial value out of the dynamic object and into
14333 the runtime process image. We need to remember the offset into the
14334 .rel(a).bss section we are going to use. */
14335 if (info->nocopyreloc == 0
14336 && (h->root.u.def.section->flags & SEC_ALLOC) != 0
14337 && h->size != 0)
14338 {
14339 asection *srel;
14340
14341 srel = bfd_get_linker_section (dynobj, RELOC_SECTION (globals, ".bss"));
14342 elf32_arm_allocate_dynrelocs (info, srel, 1);
14343 h->needs_copy = 1;
14344 }
14345
14346 return _bfd_elf_adjust_dynamic_copy (info, h, s);
14347 }
14348
14349 /* Allocate space in .plt, .got and associated reloc sections for
14350 dynamic relocs. */
14351
14352 static bfd_boolean
14353 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
14354 {
14355 struct bfd_link_info *info;
14356 struct elf32_arm_link_hash_table *htab;
14357 struct elf32_arm_link_hash_entry *eh;
14358 struct elf_dyn_relocs *p;
14359
14360 if (h->root.type == bfd_link_hash_indirect)
14361 return TRUE;
14362
14363 eh = (struct elf32_arm_link_hash_entry *) h;
14364
14365 info = (struct bfd_link_info *) inf;
14366 htab = elf32_arm_hash_table (info);
14367 if (htab == NULL)
14368 return FALSE;
14369
14370 if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC)
14371 && h->plt.refcount > 0)
14372 {
14373 /* Make sure this symbol is output as a dynamic symbol.
14374 Undefined weak syms won't yet be marked as dynamic. */
14375 if (h->dynindx == -1
14376 && !h->forced_local)
14377 {
14378 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14379 return FALSE;
14380 }
14381
14382 /* If the call in the PLT entry binds locally, the associated
14383 GOT entry should use an R_ARM_IRELATIVE relocation instead of
14384 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
14385 than the .plt section. */
14386 if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h))
14387 {
14388 eh->is_iplt = 1;
14389 if (eh->plt.noncall_refcount == 0
14390 && SYMBOL_REFERENCES_LOCAL (info, h))
14391 /* All non-call references can be resolved directly.
14392 This means that they can (and in some cases, must)
14393 resolve directly to the run-time target, rather than
14394 to the PLT. That in turns means that any .got entry
14395 would be equal to the .igot.plt entry, so there's
14396 no point having both. */
14397 h->got.refcount = 0;
14398 }
14399
14400 if (bfd_link_pic (info)
14401 || eh->is_iplt
14402 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
14403 {
14404 elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt);
14405
14406 /* If this symbol is not defined in a regular file, and we are
14407 not generating a shared library, then set the symbol to this
14408 location in the .plt. This is required to make function
14409 pointers compare as equal between the normal executable and
14410 the shared library. */
14411 if (! bfd_link_pic (info)
14412 && !h->def_regular)
14413 {
14414 h->root.u.def.section = htab->root.splt;
14415 h->root.u.def.value = h->plt.offset;
14416
14417 /* Make sure the function is not marked as Thumb, in case
14418 it is the target of an ABS32 relocation, which will
14419 point to the PLT entry. */
14420 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
14421 }
14422
14423 /* VxWorks executables have a second set of relocations for
14424 each PLT entry. They go in a separate relocation section,
14425 which is processed by the kernel loader. */
14426 if (htab->vxworks_p && !bfd_link_pic (info))
14427 {
14428 /* There is a relocation for the initial PLT entry:
14429 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
14430 if (h->plt.offset == htab->plt_header_size)
14431 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1);
14432
14433 /* There are two extra relocations for each subsequent
14434 PLT entry: an R_ARM_32 relocation for the GOT entry,
14435 and an R_ARM_32 relocation for the PLT entry. */
14436 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2);
14437 }
14438 }
14439 else
14440 {
14441 h->plt.offset = (bfd_vma) -1;
14442 h->needs_plt = 0;
14443 }
14444 }
14445 else
14446 {
14447 h->plt.offset = (bfd_vma) -1;
14448 h->needs_plt = 0;
14449 }
14450
14451 eh = (struct elf32_arm_link_hash_entry *) h;
14452 eh->tlsdesc_got = (bfd_vma) -1;
14453
14454 if (h->got.refcount > 0)
14455 {
14456 asection *s;
14457 bfd_boolean dyn;
14458 int tls_type = elf32_arm_hash_entry (h)->tls_type;
14459 int indx;
14460
14461 /* Make sure this symbol is output as a dynamic symbol.
14462 Undefined weak syms won't yet be marked as dynamic. */
14463 if (h->dynindx == -1
14464 && !h->forced_local)
14465 {
14466 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14467 return FALSE;
14468 }
14469
14470 if (!htab->symbian_p)
14471 {
14472 s = htab->root.sgot;
14473 h->got.offset = s->size;
14474
14475 if (tls_type == GOT_UNKNOWN)
14476 abort ();
14477
14478 if (tls_type == GOT_NORMAL)
14479 /* Non-TLS symbols need one GOT slot. */
14480 s->size += 4;
14481 else
14482 {
14483 if (tls_type & GOT_TLS_GDESC)
14484 {
14485 /* R_ARM_TLS_DESC needs 2 GOT slots. */
14486 eh->tlsdesc_got
14487 = (htab->root.sgotplt->size
14488 - elf32_arm_compute_jump_table_size (htab));
14489 htab->root.sgotplt->size += 8;
14490 h->got.offset = (bfd_vma) -2;
14491 /* plt.got_offset needs to know there's a TLS_DESC
14492 reloc in the middle of .got.plt. */
14493 htab->num_tls_desc++;
14494 }
14495
14496 if (tls_type & GOT_TLS_GD)
14497 {
14498 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. If
14499 the symbol is both GD and GDESC, got.offset may
14500 have been overwritten. */
14501 h->got.offset = s->size;
14502 s->size += 8;
14503 }
14504
14505 if (tls_type & GOT_TLS_IE)
14506 /* R_ARM_TLS_IE32 needs one GOT slot. */
14507 s->size += 4;
14508 }
14509
14510 dyn = htab->root.dynamic_sections_created;
14511
14512 indx = 0;
14513 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
14514 bfd_link_pic (info),
14515 h)
14516 && (!bfd_link_pic (info)
14517 || !SYMBOL_REFERENCES_LOCAL (info, h)))
14518 indx = h->dynindx;
14519
14520 if (tls_type != GOT_NORMAL
14521 && (bfd_link_pic (info) || indx != 0)
14522 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
14523 || h->root.type != bfd_link_hash_undefweak))
14524 {
14525 if (tls_type & GOT_TLS_IE)
14526 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14527
14528 if (tls_type & GOT_TLS_GD)
14529 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14530
14531 if (tls_type & GOT_TLS_GDESC)
14532 {
14533 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
14534 /* GDESC needs a trampoline to jump to. */
14535 htab->tls_trampoline = -1;
14536 }
14537
14538 /* Only GD needs it. GDESC just emits one relocation per
14539 2 entries. */
14540 if ((tls_type & GOT_TLS_GD) && indx != 0)
14541 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14542 }
14543 else if (indx != -1 && !SYMBOL_REFERENCES_LOCAL (info, h))
14544 {
14545 if (htab->root.dynamic_sections_created)
14546 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
14547 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14548 }
14549 else if (h->type == STT_GNU_IFUNC
14550 && eh->plt.noncall_refcount == 0)
14551 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
14552 they all resolve dynamically instead. Reserve room for the
14553 GOT entry's R_ARM_IRELATIVE relocation. */
14554 elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
14555 else if (bfd_link_pic (info)
14556 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
14557 || h->root.type != bfd_link_hash_undefweak))
14558 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
14559 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14560 }
14561 }
14562 else
14563 h->got.offset = (bfd_vma) -1;
14564
14565 /* Allocate stubs for exported Thumb functions on v4t. */
14566 if (!htab->use_blx && h->dynindx != -1
14567 && h->def_regular
14568 && ARM_GET_SYM_BRANCH_TYPE (h->target_internal) == ST_BRANCH_TO_THUMB
14569 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
14570 {
14571 struct elf_link_hash_entry * th;
14572 struct bfd_link_hash_entry * bh;
14573 struct elf_link_hash_entry * myh;
14574 char name[1024];
14575 asection *s;
14576 bh = NULL;
14577 /* Create a new symbol to regist the real location of the function. */
14578 s = h->root.u.def.section;
14579 sprintf (name, "__real_%s", h->root.root.string);
14580 _bfd_generic_link_add_one_symbol (info, s->owner,
14581 name, BSF_GLOBAL, s,
14582 h->root.u.def.value,
14583 NULL, TRUE, FALSE, &bh);
14584
14585 myh = (struct elf_link_hash_entry *) bh;
14586 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
14587 myh->forced_local = 1;
14588 ARM_SET_SYM_BRANCH_TYPE (myh->target_internal, ST_BRANCH_TO_THUMB);
14589 eh->export_glue = myh;
14590 th = record_arm_to_thumb_glue (info, h);
14591 /* Point the symbol at the stub. */
14592 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
14593 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
14594 h->root.u.def.section = th->root.u.def.section;
14595 h->root.u.def.value = th->root.u.def.value & ~1;
14596 }
14597
14598 if (eh->dyn_relocs == NULL)
14599 return TRUE;
14600
14601 /* In the shared -Bsymbolic case, discard space allocated for
14602 dynamic pc-relative relocs against symbols which turn out to be
14603 defined in regular objects. For the normal shared case, discard
14604 space for pc-relative relocs that have become local due to symbol
14605 visibility changes. */
14606
14607 if (bfd_link_pic (info) || htab->root.is_relocatable_executable)
14608 {
14609 /* Relocs that use pc_count are PC-relative forms, which will appear
14610 on something like ".long foo - ." or "movw REG, foo - .". We want
14611 calls to protected symbols to resolve directly to the function
14612 rather than going via the plt. If people want function pointer
14613 comparisons to work as expected then they should avoid writing
14614 assembly like ".long foo - .". */
14615 if (SYMBOL_CALLS_LOCAL (info, h))
14616 {
14617 struct elf_dyn_relocs **pp;
14618
14619 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
14620 {
14621 p->count -= p->pc_count;
14622 p->pc_count = 0;
14623 if (p->count == 0)
14624 *pp = p->next;
14625 else
14626 pp = &p->next;
14627 }
14628 }
14629
14630 if (htab->vxworks_p)
14631 {
14632 struct elf_dyn_relocs **pp;
14633
14634 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
14635 {
14636 if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
14637 *pp = p->next;
14638 else
14639 pp = &p->next;
14640 }
14641 }
14642
14643 /* Also discard relocs on undefined weak syms with non-default
14644 visibility. */
14645 if (eh->dyn_relocs != NULL
14646 && h->root.type == bfd_link_hash_undefweak)
14647 {
14648 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
14649 eh->dyn_relocs = NULL;
14650
14651 /* Make sure undefined weak symbols are output as a dynamic
14652 symbol in PIEs. */
14653 else if (h->dynindx == -1
14654 && !h->forced_local)
14655 {
14656 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14657 return FALSE;
14658 }
14659 }
14660
14661 else if (htab->root.is_relocatable_executable && h->dynindx == -1
14662 && h->root.type == bfd_link_hash_new)
14663 {
14664 /* Output absolute symbols so that we can create relocations
14665 against them. For normal symbols we output a relocation
14666 against the section that contains them. */
14667 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14668 return FALSE;
14669 }
14670
14671 }
14672 else
14673 {
14674 /* For the non-shared case, discard space for relocs against
14675 symbols which turn out to need copy relocs or are not
14676 dynamic. */
14677
14678 if (!h->non_got_ref
14679 && ((h->def_dynamic
14680 && !h->def_regular)
14681 || (htab->root.dynamic_sections_created
14682 && (h->root.type == bfd_link_hash_undefweak
14683 || h->root.type == bfd_link_hash_undefined))))
14684 {
14685 /* Make sure this symbol is output as a dynamic symbol.
14686 Undefined weak syms won't yet be marked as dynamic. */
14687 if (h->dynindx == -1
14688 && !h->forced_local)
14689 {
14690 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14691 return FALSE;
14692 }
14693
14694 /* If that succeeded, we know we'll be keeping all the
14695 relocs. */
14696 if (h->dynindx != -1)
14697 goto keep;
14698 }
14699
14700 eh->dyn_relocs = NULL;
14701
14702 keep: ;
14703 }
14704
14705 /* Finally, allocate space. */
14706 for (p = eh->dyn_relocs; p != NULL; p = p->next)
14707 {
14708 asection *sreloc = elf_section_data (p->sec)->sreloc;
14709 if (h->type == STT_GNU_IFUNC
14710 && eh->plt.noncall_refcount == 0
14711 && SYMBOL_REFERENCES_LOCAL (info, h))
14712 elf32_arm_allocate_irelocs (info, sreloc, p->count);
14713 else
14714 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
14715 }
14716
14717 return TRUE;
14718 }
14719
14720 /* Find any dynamic relocs that apply to read-only sections. */
14721
14722 static bfd_boolean
14723 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
14724 {
14725 struct elf32_arm_link_hash_entry * eh;
14726 struct elf_dyn_relocs * p;
14727
14728 eh = (struct elf32_arm_link_hash_entry *) h;
14729 for (p = eh->dyn_relocs; p != NULL; p = p->next)
14730 {
14731 asection *s = p->sec;
14732
14733 if (s != NULL && (s->flags & SEC_READONLY) != 0)
14734 {
14735 struct bfd_link_info *info = (struct bfd_link_info *) inf;
14736
14737 info->flags |= DF_TEXTREL;
14738
14739 /* Not an error, just cut short the traversal. */
14740 return FALSE;
14741 }
14742 }
14743 return TRUE;
14744 }
14745
14746 void
14747 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
14748 int byteswap_code)
14749 {
14750 struct elf32_arm_link_hash_table *globals;
14751
14752 globals = elf32_arm_hash_table (info);
14753 if (globals == NULL)
14754 return;
14755
14756 globals->byteswap_code = byteswap_code;
14757 }
14758
14759 /* Set the sizes of the dynamic sections. */
14760
14761 static bfd_boolean
14762 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
14763 struct bfd_link_info * info)
14764 {
14765 bfd * dynobj;
14766 asection * s;
14767 bfd_boolean plt;
14768 bfd_boolean relocs;
14769 bfd *ibfd;
14770 struct elf32_arm_link_hash_table *htab;
14771
14772 htab = elf32_arm_hash_table (info);
14773 if (htab == NULL)
14774 return FALSE;
14775
14776 dynobj = elf_hash_table (info)->dynobj;
14777 BFD_ASSERT (dynobj != NULL);
14778 check_use_blx (htab);
14779
14780 if (elf_hash_table (info)->dynamic_sections_created)
14781 {
14782 /* Set the contents of the .interp section to the interpreter. */
14783 if (bfd_link_executable (info) && !info->nointerp)
14784 {
14785 s = bfd_get_linker_section (dynobj, ".interp");
14786 BFD_ASSERT (s != NULL);
14787 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
14788 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
14789 }
14790 }
14791
14792 /* Set up .got offsets for local syms, and space for local dynamic
14793 relocs. */
14794 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
14795 {
14796 bfd_signed_vma *local_got;
14797 bfd_signed_vma *end_local_got;
14798 struct arm_local_iplt_info **local_iplt_ptr, *local_iplt;
14799 char *local_tls_type;
14800 bfd_vma *local_tlsdesc_gotent;
14801 bfd_size_type locsymcount;
14802 Elf_Internal_Shdr *symtab_hdr;
14803 asection *srel;
14804 bfd_boolean is_vxworks = htab->vxworks_p;
14805 unsigned int symndx;
14806
14807 if (! is_arm_elf (ibfd))
14808 continue;
14809
14810 for (s = ibfd->sections; s != NULL; s = s->next)
14811 {
14812 struct elf_dyn_relocs *p;
14813
14814 for (p = (struct elf_dyn_relocs *)
14815 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
14816 {
14817 if (!bfd_is_abs_section (p->sec)
14818 && bfd_is_abs_section (p->sec->output_section))
14819 {
14820 /* Input section has been discarded, either because
14821 it is a copy of a linkonce section or due to
14822 linker script /DISCARD/, so we'll be discarding
14823 the relocs too. */
14824 }
14825 else if (is_vxworks
14826 && strcmp (p->sec->output_section->name,
14827 ".tls_vars") == 0)
14828 {
14829 /* Relocations in vxworks .tls_vars sections are
14830 handled specially by the loader. */
14831 }
14832 else if (p->count != 0)
14833 {
14834 srel = elf_section_data (p->sec)->sreloc;
14835 elf32_arm_allocate_dynrelocs (info, srel, p->count);
14836 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
14837 info->flags |= DF_TEXTREL;
14838 }
14839 }
14840 }
14841
14842 local_got = elf_local_got_refcounts (ibfd);
14843 if (!local_got)
14844 continue;
14845
14846 symtab_hdr = & elf_symtab_hdr (ibfd);
14847 locsymcount = symtab_hdr->sh_info;
14848 end_local_got = local_got + locsymcount;
14849 local_iplt_ptr = elf32_arm_local_iplt (ibfd);
14850 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
14851 local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
14852 symndx = 0;
14853 s = htab->root.sgot;
14854 srel = htab->root.srelgot;
14855 for (; local_got < end_local_got;
14856 ++local_got, ++local_iplt_ptr, ++local_tls_type,
14857 ++local_tlsdesc_gotent, ++symndx)
14858 {
14859 *local_tlsdesc_gotent = (bfd_vma) -1;
14860 local_iplt = *local_iplt_ptr;
14861 if (local_iplt != NULL)
14862 {
14863 struct elf_dyn_relocs *p;
14864
14865 if (local_iplt->root.refcount > 0)
14866 {
14867 elf32_arm_allocate_plt_entry (info, TRUE,
14868 &local_iplt->root,
14869 &local_iplt->arm);
14870 if (local_iplt->arm.noncall_refcount == 0)
14871 /* All references to the PLT are calls, so all
14872 non-call references can resolve directly to the
14873 run-time target. This means that the .got entry
14874 would be the same as the .igot.plt entry, so there's
14875 no point creating both. */
14876 *local_got = 0;
14877 }
14878 else
14879 {
14880 BFD_ASSERT (local_iplt->arm.noncall_refcount == 0);
14881 local_iplt->root.offset = (bfd_vma) -1;
14882 }
14883
14884 for (p = local_iplt->dyn_relocs; p != NULL; p = p->next)
14885 {
14886 asection *psrel;
14887
14888 psrel = elf_section_data (p->sec)->sreloc;
14889 if (local_iplt->arm.noncall_refcount == 0)
14890 elf32_arm_allocate_irelocs (info, psrel, p->count);
14891 else
14892 elf32_arm_allocate_dynrelocs (info, psrel, p->count);
14893 }
14894 }
14895 if (*local_got > 0)
14896 {
14897 Elf_Internal_Sym *isym;
14898
14899 *local_got = s->size;
14900 if (*local_tls_type & GOT_TLS_GD)
14901 /* TLS_GD relocs need an 8-byte structure in the GOT. */
14902 s->size += 8;
14903 if (*local_tls_type & GOT_TLS_GDESC)
14904 {
14905 *local_tlsdesc_gotent = htab->root.sgotplt->size
14906 - elf32_arm_compute_jump_table_size (htab);
14907 htab->root.sgotplt->size += 8;
14908 *local_got = (bfd_vma) -2;
14909 /* plt.got_offset needs to know there's a TLS_DESC
14910 reloc in the middle of .got.plt. */
14911 htab->num_tls_desc++;
14912 }
14913 if (*local_tls_type & GOT_TLS_IE)
14914 s->size += 4;
14915
14916 if (*local_tls_type & GOT_NORMAL)
14917 {
14918 /* If the symbol is both GD and GDESC, *local_got
14919 may have been overwritten. */
14920 *local_got = s->size;
14921 s->size += 4;
14922 }
14923
14924 isym = bfd_sym_from_r_symndx (&htab->sym_cache, ibfd, symndx);
14925 if (isym == NULL)
14926 return FALSE;
14927
14928 /* If all references to an STT_GNU_IFUNC PLT are calls,
14929 then all non-call references, including this GOT entry,
14930 resolve directly to the run-time target. */
14931 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC
14932 && (local_iplt == NULL
14933 || local_iplt->arm.noncall_refcount == 0))
14934 elf32_arm_allocate_irelocs (info, srel, 1);
14935 else if (bfd_link_pic (info) || output_bfd->flags & DYNAMIC)
14936 {
14937 if ((bfd_link_pic (info) && !(*local_tls_type & GOT_TLS_GDESC))
14938 || *local_tls_type & GOT_TLS_GD)
14939 elf32_arm_allocate_dynrelocs (info, srel, 1);
14940
14941 if (bfd_link_pic (info) && *local_tls_type & GOT_TLS_GDESC)
14942 {
14943 elf32_arm_allocate_dynrelocs (info,
14944 htab->root.srelplt, 1);
14945 htab->tls_trampoline = -1;
14946 }
14947 }
14948 }
14949 else
14950 *local_got = (bfd_vma) -1;
14951 }
14952 }
14953
14954 if (htab->tls_ldm_got.refcount > 0)
14955 {
14956 /* Allocate two GOT entries and one dynamic relocation (if necessary)
14957 for R_ARM_TLS_LDM32 relocations. */
14958 htab->tls_ldm_got.offset = htab->root.sgot->size;
14959 htab->root.sgot->size += 8;
14960 if (bfd_link_pic (info))
14961 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14962 }
14963 else
14964 htab->tls_ldm_got.offset = -1;
14965
14966 /* Allocate global sym .plt and .got entries, and space for global
14967 sym dynamic relocs. */
14968 elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
14969
14970 /* Here we rummage through the found bfds to collect glue information. */
14971 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
14972 {
14973 if (! is_arm_elf (ibfd))
14974 continue;
14975
14976 /* Initialise mapping tables for code/data. */
14977 bfd_elf32_arm_init_maps (ibfd);
14978
14979 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
14980 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info)
14981 || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd, info))
14982 /* xgettext:c-format */
14983 _bfd_error_handler (_("Errors encountered processing file %s"),
14984 ibfd->filename);
14985 }
14986
14987 /* Allocate space for the glue sections now that we've sized them. */
14988 bfd_elf32_arm_allocate_interworking_sections (info);
14989
14990 /* For every jump slot reserved in the sgotplt, reloc_count is
14991 incremented. However, when we reserve space for TLS descriptors,
14992 it's not incremented, so in order to compute the space reserved
14993 for them, it suffices to multiply the reloc count by the jump
14994 slot size. */
14995 if (htab->root.srelplt)
14996 htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size(htab);
14997
14998 if (htab->tls_trampoline)
14999 {
15000 if (htab->root.splt->size == 0)
15001 htab->root.splt->size += htab->plt_header_size;
15002
15003 htab->tls_trampoline = htab->root.splt->size;
15004 htab->root.splt->size += htab->plt_entry_size;
15005
15006 /* If we're not using lazy TLS relocations, don't generate the
15007 PLT and GOT entries they require. */
15008 if (!(info->flags & DF_BIND_NOW))
15009 {
15010 htab->dt_tlsdesc_got = htab->root.sgot->size;
15011 htab->root.sgot->size += 4;
15012
15013 htab->dt_tlsdesc_plt = htab->root.splt->size;
15014 htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline);
15015 }
15016 }
15017
15018 /* The check_relocs and adjust_dynamic_symbol entry points have
15019 determined the sizes of the various dynamic sections. Allocate
15020 memory for them. */
15021 plt = FALSE;
15022 relocs = FALSE;
15023 for (s = dynobj->sections; s != NULL; s = s->next)
15024 {
15025 const char * name;
15026
15027 if ((s->flags & SEC_LINKER_CREATED) == 0)
15028 continue;
15029
15030 /* It's OK to base decisions on the section name, because none
15031 of the dynobj section names depend upon the input files. */
15032 name = bfd_get_section_name (dynobj, s);
15033
15034 if (s == htab->root.splt)
15035 {
15036 /* Remember whether there is a PLT. */
15037 plt = s->size != 0;
15038 }
15039 else if (CONST_STRNEQ (name, ".rel"))
15040 {
15041 if (s->size != 0)
15042 {
15043 /* Remember whether there are any reloc sections other
15044 than .rel(a).plt and .rela.plt.unloaded. */
15045 if (s != htab->root.srelplt && s != htab->srelplt2)
15046 relocs = TRUE;
15047
15048 /* We use the reloc_count field as a counter if we need
15049 to copy relocs into the output file. */
15050 s->reloc_count = 0;
15051 }
15052 }
15053 else if (s != htab->root.sgot
15054 && s != htab->root.sgotplt
15055 && s != htab->root.iplt
15056 && s != htab->root.igotplt
15057 && s != htab->sdynbss)
15058 {
15059 /* It's not one of our sections, so don't allocate space. */
15060 continue;
15061 }
15062
15063 if (s->size == 0)
15064 {
15065 /* If we don't need this section, strip it from the
15066 output file. This is mostly to handle .rel(a).bss and
15067 .rel(a).plt. We must create both sections in
15068 create_dynamic_sections, because they must be created
15069 before the linker maps input sections to output
15070 sections. The linker does that before
15071 adjust_dynamic_symbol is called, and it is that
15072 function which decides whether anything needs to go
15073 into these sections. */
15074 s->flags |= SEC_EXCLUDE;
15075 continue;
15076 }
15077
15078 if ((s->flags & SEC_HAS_CONTENTS) == 0)
15079 continue;
15080
15081 /* Allocate memory for the section contents. */
15082 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
15083 if (s->contents == NULL)
15084 return FALSE;
15085 }
15086
15087 if (elf_hash_table (info)->dynamic_sections_created)
15088 {
15089 /* Add some entries to the .dynamic section. We fill in the
15090 values later, in elf32_arm_finish_dynamic_sections, but we
15091 must add the entries now so that we get the correct size for
15092 the .dynamic section. The DT_DEBUG entry is filled in by the
15093 dynamic linker and used by the debugger. */
15094 #define add_dynamic_entry(TAG, VAL) \
15095 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
15096
15097 if (bfd_link_executable (info))
15098 {
15099 if (!add_dynamic_entry (DT_DEBUG, 0))
15100 return FALSE;
15101 }
15102
15103 if (plt)
15104 {
15105 if ( !add_dynamic_entry (DT_PLTGOT, 0)
15106 || !add_dynamic_entry (DT_PLTRELSZ, 0)
15107 || !add_dynamic_entry (DT_PLTREL,
15108 htab->use_rel ? DT_REL : DT_RELA)
15109 || !add_dynamic_entry (DT_JMPREL, 0))
15110 return FALSE;
15111
15112 if (htab->dt_tlsdesc_plt &&
15113 (!add_dynamic_entry (DT_TLSDESC_PLT,0)
15114 || !add_dynamic_entry (DT_TLSDESC_GOT,0)))
15115 return FALSE;
15116 }
15117
15118 if (relocs)
15119 {
15120 if (htab->use_rel)
15121 {
15122 if (!add_dynamic_entry (DT_REL, 0)
15123 || !add_dynamic_entry (DT_RELSZ, 0)
15124 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
15125 return FALSE;
15126 }
15127 else
15128 {
15129 if (!add_dynamic_entry (DT_RELA, 0)
15130 || !add_dynamic_entry (DT_RELASZ, 0)
15131 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
15132 return FALSE;
15133 }
15134 }
15135
15136 /* If any dynamic relocs apply to a read-only section,
15137 then we need a DT_TEXTREL entry. */
15138 if ((info->flags & DF_TEXTREL) == 0)
15139 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
15140 info);
15141
15142 if ((info->flags & DF_TEXTREL) != 0)
15143 {
15144 if (!add_dynamic_entry (DT_TEXTREL, 0))
15145 return FALSE;
15146 }
15147 if (htab->vxworks_p
15148 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
15149 return FALSE;
15150 }
15151 #undef add_dynamic_entry
15152
15153 return TRUE;
15154 }
15155
15156 /* Size sections even though they're not dynamic. We use it to setup
15157 _TLS_MODULE_BASE_, if needed. */
15158
15159 static bfd_boolean
15160 elf32_arm_always_size_sections (bfd *output_bfd,
15161 struct bfd_link_info *info)
15162 {
15163 asection *tls_sec;
15164
15165 if (bfd_link_relocatable (info))
15166 return TRUE;
15167
15168 tls_sec = elf_hash_table (info)->tls_sec;
15169
15170 if (tls_sec)
15171 {
15172 struct elf_link_hash_entry *tlsbase;
15173
15174 tlsbase = elf_link_hash_lookup
15175 (elf_hash_table (info), "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
15176
15177 if (tlsbase)
15178 {
15179 struct bfd_link_hash_entry *bh = NULL;
15180 const struct elf_backend_data *bed
15181 = get_elf_backend_data (output_bfd);
15182
15183 if (!(_bfd_generic_link_add_one_symbol
15184 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
15185 tls_sec, 0, NULL, FALSE,
15186 bed->collect, &bh)))
15187 return FALSE;
15188
15189 tlsbase->type = STT_TLS;
15190 tlsbase = (struct elf_link_hash_entry *)bh;
15191 tlsbase->def_regular = 1;
15192 tlsbase->other = STV_HIDDEN;
15193 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
15194 }
15195 }
15196 return TRUE;
15197 }
15198
15199 /* Finish up dynamic symbol handling. We set the contents of various
15200 dynamic sections here. */
15201
15202 static bfd_boolean
15203 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
15204 struct bfd_link_info * info,
15205 struct elf_link_hash_entry * h,
15206 Elf_Internal_Sym * sym)
15207 {
15208 struct elf32_arm_link_hash_table *htab;
15209 struct elf32_arm_link_hash_entry *eh;
15210
15211 htab = elf32_arm_hash_table (info);
15212 if (htab == NULL)
15213 return FALSE;
15214
15215 eh = (struct elf32_arm_link_hash_entry *) h;
15216
15217 if (h->plt.offset != (bfd_vma) -1)
15218 {
15219 if (!eh->is_iplt)
15220 {
15221 BFD_ASSERT (h->dynindx != -1);
15222 if (! elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
15223 h->dynindx, 0))
15224 return FALSE;
15225 }
15226
15227 if (!h->def_regular)
15228 {
15229 /* Mark the symbol as undefined, rather than as defined in
15230 the .plt section. */
15231 sym->st_shndx = SHN_UNDEF;
15232 /* If the symbol is weak we need to clear the value.
15233 Otherwise, the PLT entry would provide a definition for
15234 the symbol even if the symbol wasn't defined anywhere,
15235 and so the symbol would never be NULL. Leave the value if
15236 there were any relocations where pointer equality matters
15237 (this is a clue for the dynamic linker, to make function
15238 pointer comparisons work between an application and shared
15239 library). */
15240 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
15241 sym->st_value = 0;
15242 }
15243 else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
15244 {
15245 /* At least one non-call relocation references this .iplt entry,
15246 so the .iplt entry is the function's canonical address. */
15247 sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
15248 ARM_SET_SYM_BRANCH_TYPE (sym->st_target_internal, ST_BRANCH_TO_ARM);
15249 sym->st_shndx = (_bfd_elf_section_from_bfd_section
15250 (output_bfd, htab->root.iplt->output_section));
15251 sym->st_value = (h->plt.offset
15252 + htab->root.iplt->output_section->vma
15253 + htab->root.iplt->output_offset);
15254 }
15255 }
15256
15257 if (h->needs_copy)
15258 {
15259 asection * s;
15260 Elf_Internal_Rela rel;
15261
15262 /* This symbol needs a copy reloc. Set it up. */
15263 BFD_ASSERT (h->dynindx != -1
15264 && (h->root.type == bfd_link_hash_defined
15265 || h->root.type == bfd_link_hash_defweak));
15266
15267 s = htab->srelbss;
15268 BFD_ASSERT (s != NULL);
15269
15270 rel.r_addend = 0;
15271 rel.r_offset = (h->root.u.def.value
15272 + h->root.u.def.section->output_section->vma
15273 + h->root.u.def.section->output_offset);
15274 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
15275 elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
15276 }
15277
15278 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
15279 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
15280 to the ".got" section. */
15281 if (h == htab->root.hdynamic
15282 || (!htab->vxworks_p && h == htab->root.hgot))
15283 sym->st_shndx = SHN_ABS;
15284
15285 return TRUE;
15286 }
15287
15288 static void
15289 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
15290 void *contents,
15291 const unsigned long *template, unsigned count)
15292 {
15293 unsigned ix;
15294
15295 for (ix = 0; ix != count; ix++)
15296 {
15297 unsigned long insn = template[ix];
15298
15299 /* Emit mov pc,rx if bx is not permitted. */
15300 if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10)
15301 insn = (insn & 0xf000000f) | 0x01a0f000;
15302 put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4);
15303 }
15304 }
15305
15306 /* Install the special first PLT entry for elf32-arm-nacl. Unlike
15307 other variants, NaCl needs this entry in a static executable's
15308 .iplt too. When we're handling that case, GOT_DISPLACEMENT is
15309 zero. For .iplt really only the last bundle is useful, and .iplt
15310 could have a shorter first entry, with each individual PLT entry's
15311 relative branch calculated differently so it targets the last
15312 bundle instead of the instruction before it (labelled .Lplt_tail
15313 above). But it's simpler to keep the size and layout of PLT0
15314 consistent with the dynamic case, at the cost of some dead code at
15315 the start of .iplt and the one dead store to the stack at the start
15316 of .Lplt_tail. */
15317 static void
15318 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
15319 asection *plt, bfd_vma got_displacement)
15320 {
15321 unsigned int i;
15322
15323 put_arm_insn (htab, output_bfd,
15324 elf32_arm_nacl_plt0_entry[0]
15325 | arm_movw_immediate (got_displacement),
15326 plt->contents + 0);
15327 put_arm_insn (htab, output_bfd,
15328 elf32_arm_nacl_plt0_entry[1]
15329 | arm_movt_immediate (got_displacement),
15330 plt->contents + 4);
15331
15332 for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
15333 put_arm_insn (htab, output_bfd,
15334 elf32_arm_nacl_plt0_entry[i],
15335 plt->contents + (i * 4));
15336 }
15337
15338 /* Finish up the dynamic sections. */
15339
15340 static bfd_boolean
15341 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
15342 {
15343 bfd * dynobj;
15344 asection * sgot;
15345 asection * sdyn;
15346 struct elf32_arm_link_hash_table *htab;
15347
15348 htab = elf32_arm_hash_table (info);
15349 if (htab == NULL)
15350 return FALSE;
15351
15352 dynobj = elf_hash_table (info)->dynobj;
15353
15354 sgot = htab->root.sgotplt;
15355 /* A broken linker script might have discarded the dynamic sections.
15356 Catch this here so that we do not seg-fault later on. */
15357 if (sgot != NULL && bfd_is_abs_section (sgot->output_section))
15358 return FALSE;
15359 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
15360
15361 if (elf_hash_table (info)->dynamic_sections_created)
15362 {
15363 asection *splt;
15364 Elf32_External_Dyn *dyncon, *dynconend;
15365
15366 splt = htab->root.splt;
15367 BFD_ASSERT (splt != NULL && sdyn != NULL);
15368 BFD_ASSERT (htab->symbian_p || sgot != NULL);
15369
15370 dyncon = (Elf32_External_Dyn *) sdyn->contents;
15371 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
15372
15373 for (; dyncon < dynconend; dyncon++)
15374 {
15375 Elf_Internal_Dyn dyn;
15376 const char * name;
15377 asection * s;
15378
15379 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
15380
15381 switch (dyn.d_tag)
15382 {
15383 unsigned int type;
15384
15385 default:
15386 if (htab->vxworks_p
15387 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
15388 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15389 break;
15390
15391 case DT_HASH:
15392 name = ".hash";
15393 goto get_vma_if_bpabi;
15394 case DT_STRTAB:
15395 name = ".dynstr";
15396 goto get_vma_if_bpabi;
15397 case DT_SYMTAB:
15398 name = ".dynsym";
15399 goto get_vma_if_bpabi;
15400 case DT_VERSYM:
15401 name = ".gnu.version";
15402 goto get_vma_if_bpabi;
15403 case DT_VERDEF:
15404 name = ".gnu.version_d";
15405 goto get_vma_if_bpabi;
15406 case DT_VERNEED:
15407 name = ".gnu.version_r";
15408 goto get_vma_if_bpabi;
15409
15410 case DT_PLTGOT:
15411 name = htab->symbian_p ? ".got" : ".got.plt";
15412 goto get_vma;
15413 case DT_JMPREL:
15414 name = RELOC_SECTION (htab, ".plt");
15415 get_vma:
15416 s = bfd_get_linker_section (dynobj, name);
15417 if (s == NULL)
15418 {
15419 (*_bfd_error_handler)
15420 (_("could not find section %s"), name);
15421 bfd_set_error (bfd_error_invalid_operation);
15422 return FALSE;
15423 }
15424 if (!htab->symbian_p)
15425 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
15426 else
15427 /* In the BPABI, tags in the PT_DYNAMIC section point
15428 at the file offset, not the memory address, for the
15429 convenience of the post linker. */
15430 dyn.d_un.d_ptr = s->output_section->filepos + s->output_offset;
15431 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15432 break;
15433
15434 get_vma_if_bpabi:
15435 if (htab->symbian_p)
15436 goto get_vma;
15437 break;
15438
15439 case DT_PLTRELSZ:
15440 s = htab->root.srelplt;
15441 BFD_ASSERT (s != NULL);
15442 dyn.d_un.d_val = s->size;
15443 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15444 break;
15445
15446 case DT_RELSZ:
15447 case DT_RELASZ:
15448 if (!htab->symbian_p)
15449 {
15450 /* My reading of the SVR4 ABI indicates that the
15451 procedure linkage table relocs (DT_JMPREL) should be
15452 included in the overall relocs (DT_REL). This is
15453 what Solaris does. However, UnixWare can not handle
15454 that case. Therefore, we override the DT_RELSZ entry
15455 here to make it not include the JMPREL relocs. Since
15456 the linker script arranges for .rel(a).plt to follow all
15457 other relocation sections, we don't have to worry
15458 about changing the DT_REL entry. */
15459 s = htab->root.srelplt;
15460 if (s != NULL)
15461 dyn.d_un.d_val -= s->size;
15462 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15463 break;
15464 }
15465 /* Fall through. */
15466
15467 case DT_REL:
15468 case DT_RELA:
15469 /* In the BPABI, the DT_REL tag must point at the file
15470 offset, not the VMA, of the first relocation
15471 section. So, we use code similar to that in
15472 elflink.c, but do not check for SHF_ALLOC on the
15473 relcoation section, since relocations sections are
15474 never allocated under the BPABI. The comments above
15475 about Unixware notwithstanding, we include all of the
15476 relocations here. */
15477 if (htab->symbian_p)
15478 {
15479 unsigned int i;
15480 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
15481 ? SHT_REL : SHT_RELA);
15482 dyn.d_un.d_val = 0;
15483 for (i = 1; i < elf_numsections (output_bfd); i++)
15484 {
15485 Elf_Internal_Shdr *hdr
15486 = elf_elfsections (output_bfd)[i];
15487 if (hdr->sh_type == type)
15488 {
15489 if (dyn.d_tag == DT_RELSZ
15490 || dyn.d_tag == DT_RELASZ)
15491 dyn.d_un.d_val += hdr->sh_size;
15492 else if ((ufile_ptr) hdr->sh_offset
15493 <= dyn.d_un.d_val - 1)
15494 dyn.d_un.d_val = hdr->sh_offset;
15495 }
15496 }
15497 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15498 }
15499 break;
15500
15501 case DT_TLSDESC_PLT:
15502 s = htab->root.splt;
15503 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
15504 + htab->dt_tlsdesc_plt);
15505 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15506 break;
15507
15508 case DT_TLSDESC_GOT:
15509 s = htab->root.sgot;
15510 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
15511 + htab->dt_tlsdesc_got);
15512 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15513 break;
15514
15515 /* Set the bottom bit of DT_INIT/FINI if the
15516 corresponding function is Thumb. */
15517 case DT_INIT:
15518 name = info->init_function;
15519 goto get_sym;
15520 case DT_FINI:
15521 name = info->fini_function;
15522 get_sym:
15523 /* If it wasn't set by elf_bfd_final_link
15524 then there is nothing to adjust. */
15525 if (dyn.d_un.d_val != 0)
15526 {
15527 struct elf_link_hash_entry * eh;
15528
15529 eh = elf_link_hash_lookup (elf_hash_table (info), name,
15530 FALSE, FALSE, TRUE);
15531 if (eh != NULL
15532 && ARM_GET_SYM_BRANCH_TYPE (eh->target_internal)
15533 == ST_BRANCH_TO_THUMB)
15534 {
15535 dyn.d_un.d_val |= 1;
15536 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15537 }
15538 }
15539 break;
15540 }
15541 }
15542
15543 /* Fill in the first entry in the procedure linkage table. */
15544 if (splt->size > 0 && htab->plt_header_size)
15545 {
15546 const bfd_vma *plt0_entry;
15547 bfd_vma got_address, plt_address, got_displacement;
15548
15549 /* Calculate the addresses of the GOT and PLT. */
15550 got_address = sgot->output_section->vma + sgot->output_offset;
15551 plt_address = splt->output_section->vma + splt->output_offset;
15552
15553 if (htab->vxworks_p)
15554 {
15555 /* The VxWorks GOT is relocated by the dynamic linker.
15556 Therefore, we must emit relocations rather than simply
15557 computing the values now. */
15558 Elf_Internal_Rela rel;
15559
15560 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
15561 put_arm_insn (htab, output_bfd, plt0_entry[0],
15562 splt->contents + 0);
15563 put_arm_insn (htab, output_bfd, plt0_entry[1],
15564 splt->contents + 4);
15565 put_arm_insn (htab, output_bfd, plt0_entry[2],
15566 splt->contents + 8);
15567 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
15568
15569 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
15570 rel.r_offset = plt_address + 12;
15571 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
15572 rel.r_addend = 0;
15573 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
15574 htab->srelplt2->contents);
15575 }
15576 else if (htab->nacl_p)
15577 arm_nacl_put_plt0 (htab, output_bfd, splt,
15578 got_address + 8 - (plt_address + 16));
15579 else if (using_thumb_only (htab))
15580 {
15581 got_displacement = got_address - (plt_address + 12);
15582
15583 plt0_entry = elf32_thumb2_plt0_entry;
15584 put_arm_insn (htab, output_bfd, plt0_entry[0],
15585 splt->contents + 0);
15586 put_arm_insn (htab, output_bfd, plt0_entry[1],
15587 splt->contents + 4);
15588 put_arm_insn (htab, output_bfd, plt0_entry[2],
15589 splt->contents + 8);
15590
15591 bfd_put_32 (output_bfd, got_displacement, splt->contents + 12);
15592 }
15593 else
15594 {
15595 got_displacement = got_address - (plt_address + 16);
15596
15597 plt0_entry = elf32_arm_plt0_entry;
15598 put_arm_insn (htab, output_bfd, plt0_entry[0],
15599 splt->contents + 0);
15600 put_arm_insn (htab, output_bfd, plt0_entry[1],
15601 splt->contents + 4);
15602 put_arm_insn (htab, output_bfd, plt0_entry[2],
15603 splt->contents + 8);
15604 put_arm_insn (htab, output_bfd, plt0_entry[3],
15605 splt->contents + 12);
15606
15607 #ifdef FOUR_WORD_PLT
15608 /* The displacement value goes in the otherwise-unused
15609 last word of the second entry. */
15610 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
15611 #else
15612 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
15613 #endif
15614 }
15615 }
15616
15617 /* UnixWare sets the entsize of .plt to 4, although that doesn't
15618 really seem like the right value. */
15619 if (splt->output_section->owner == output_bfd)
15620 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
15621
15622 if (htab->dt_tlsdesc_plt)
15623 {
15624 bfd_vma got_address
15625 = sgot->output_section->vma + sgot->output_offset;
15626 bfd_vma gotplt_address = (htab->root.sgot->output_section->vma
15627 + htab->root.sgot->output_offset);
15628 bfd_vma plt_address
15629 = splt->output_section->vma + splt->output_offset;
15630
15631 arm_put_trampoline (htab, output_bfd,
15632 splt->contents + htab->dt_tlsdesc_plt,
15633 dl_tlsdesc_lazy_trampoline, 6);
15634
15635 bfd_put_32 (output_bfd,
15636 gotplt_address + htab->dt_tlsdesc_got
15637 - (plt_address + htab->dt_tlsdesc_plt)
15638 - dl_tlsdesc_lazy_trampoline[6],
15639 splt->contents + htab->dt_tlsdesc_plt + 24);
15640 bfd_put_32 (output_bfd,
15641 got_address - (plt_address + htab->dt_tlsdesc_plt)
15642 - dl_tlsdesc_lazy_trampoline[7],
15643 splt->contents + htab->dt_tlsdesc_plt + 24 + 4);
15644 }
15645
15646 if (htab->tls_trampoline)
15647 {
15648 arm_put_trampoline (htab, output_bfd,
15649 splt->contents + htab->tls_trampoline,
15650 tls_trampoline, 3);
15651 #ifdef FOUR_WORD_PLT
15652 bfd_put_32 (output_bfd, 0x00000000,
15653 splt->contents + htab->tls_trampoline + 12);
15654 #endif
15655 }
15656
15657 if (htab->vxworks_p
15658 && !bfd_link_pic (info)
15659 && htab->root.splt->size > 0)
15660 {
15661 /* Correct the .rel(a).plt.unloaded relocations. They will have
15662 incorrect symbol indexes. */
15663 int num_plts;
15664 unsigned char *p;
15665
15666 num_plts = ((htab->root.splt->size - htab->plt_header_size)
15667 / htab->plt_entry_size);
15668 p = htab->srelplt2->contents + RELOC_SIZE (htab);
15669
15670 for (; num_plts; num_plts--)
15671 {
15672 Elf_Internal_Rela rel;
15673
15674 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
15675 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
15676 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
15677 p += RELOC_SIZE (htab);
15678
15679 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
15680 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
15681 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
15682 p += RELOC_SIZE (htab);
15683 }
15684 }
15685 }
15686
15687 if (htab->nacl_p && htab->root.iplt != NULL && htab->root.iplt->size > 0)
15688 /* NaCl uses a special first entry in .iplt too. */
15689 arm_nacl_put_plt0 (htab, output_bfd, htab->root.iplt, 0);
15690
15691 /* Fill in the first three entries in the global offset table. */
15692 if (sgot)
15693 {
15694 if (sgot->size > 0)
15695 {
15696 if (sdyn == NULL)
15697 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
15698 else
15699 bfd_put_32 (output_bfd,
15700 sdyn->output_section->vma + sdyn->output_offset,
15701 sgot->contents);
15702 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
15703 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
15704 }
15705
15706 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
15707 }
15708
15709 return TRUE;
15710 }
15711
15712 static void
15713 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
15714 {
15715 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
15716 struct elf32_arm_link_hash_table *globals;
15717 struct elf_segment_map *m;
15718
15719 i_ehdrp = elf_elfheader (abfd);
15720
15721 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
15722 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
15723 else
15724 _bfd_elf_post_process_headers (abfd, link_info);
15725 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
15726
15727 if (link_info)
15728 {
15729 globals = elf32_arm_hash_table (link_info);
15730 if (globals != NULL && globals->byteswap_code)
15731 i_ehdrp->e_flags |= EF_ARM_BE8;
15732 }
15733
15734 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_VER5
15735 && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC)))
15736 {
15737 int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args);
15738 if (abi == AEABI_VFP_args_vfp)
15739 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD;
15740 else
15741 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT;
15742 }
15743
15744 /* Scan segment to set p_flags attribute if it contains only sections with
15745 SHF_ARM_NOREAD flag. */
15746 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
15747 {
15748 unsigned int j;
15749
15750 if (m->count == 0)
15751 continue;
15752 for (j = 0; j < m->count; j++)
15753 {
15754 if (!(elf_section_flags (m->sections[j]) & SHF_ARM_NOREAD))
15755 break;
15756 }
15757 if (j == m->count)
15758 {
15759 m->p_flags = PF_X;
15760 m->p_flags_valid = 1;
15761 }
15762 }
15763 }
15764
15765 static enum elf_reloc_type_class
15766 elf32_arm_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
15767 const asection *rel_sec ATTRIBUTE_UNUSED,
15768 const Elf_Internal_Rela *rela)
15769 {
15770 switch ((int) ELF32_R_TYPE (rela->r_info))
15771 {
15772 case R_ARM_RELATIVE:
15773 return reloc_class_relative;
15774 case R_ARM_JUMP_SLOT:
15775 return reloc_class_plt;
15776 case R_ARM_COPY:
15777 return reloc_class_copy;
15778 case R_ARM_IRELATIVE:
15779 return reloc_class_ifunc;
15780 default:
15781 return reloc_class_normal;
15782 }
15783 }
15784
15785 static void
15786 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
15787 {
15788 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
15789 }
15790
15791 /* Return TRUE if this is an unwinding table entry. */
15792
15793 static bfd_boolean
15794 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
15795 {
15796 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
15797 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
15798 }
15799
15800
15801 /* Set the type and flags for an ARM section. We do this by
15802 the section name, which is a hack, but ought to work. */
15803
15804 static bfd_boolean
15805 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
15806 {
15807 const char * name;
15808
15809 name = bfd_get_section_name (abfd, sec);
15810
15811 if (is_arm_elf_unwind_section_name (abfd, name))
15812 {
15813 hdr->sh_type = SHT_ARM_EXIDX;
15814 hdr->sh_flags |= SHF_LINK_ORDER;
15815 }
15816
15817 if (sec->flags & SEC_ELF_NOREAD)
15818 hdr->sh_flags |= SHF_ARM_NOREAD;
15819
15820 return TRUE;
15821 }
15822
15823 /* Handle an ARM specific section when reading an object file. This is
15824 called when bfd_section_from_shdr finds a section with an unknown
15825 type. */
15826
15827 static bfd_boolean
15828 elf32_arm_section_from_shdr (bfd *abfd,
15829 Elf_Internal_Shdr * hdr,
15830 const char *name,
15831 int shindex)
15832 {
15833 /* There ought to be a place to keep ELF backend specific flags, but
15834 at the moment there isn't one. We just keep track of the
15835 sections by their name, instead. Fortunately, the ABI gives
15836 names for all the ARM specific sections, so we will probably get
15837 away with this. */
15838 switch (hdr->sh_type)
15839 {
15840 case SHT_ARM_EXIDX:
15841 case SHT_ARM_PREEMPTMAP:
15842 case SHT_ARM_ATTRIBUTES:
15843 break;
15844
15845 default:
15846 return FALSE;
15847 }
15848
15849 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
15850 return FALSE;
15851
15852 return TRUE;
15853 }
15854
15855 static _arm_elf_section_data *
15856 get_arm_elf_section_data (asection * sec)
15857 {
15858 if (sec && sec->owner && is_arm_elf (sec->owner))
15859 return elf32_arm_section_data (sec);
15860 else
15861 return NULL;
15862 }
15863
15864 typedef struct
15865 {
15866 void *flaginfo;
15867 struct bfd_link_info *info;
15868 asection *sec;
15869 int sec_shndx;
15870 int (*func) (void *, const char *, Elf_Internal_Sym *,
15871 asection *, struct elf_link_hash_entry *);
15872 } output_arch_syminfo;
15873
15874 enum map_symbol_type
15875 {
15876 ARM_MAP_ARM,
15877 ARM_MAP_THUMB,
15878 ARM_MAP_DATA
15879 };
15880
15881
15882 /* Output a single mapping symbol. */
15883
15884 static bfd_boolean
15885 elf32_arm_output_map_sym (output_arch_syminfo *osi,
15886 enum map_symbol_type type,
15887 bfd_vma offset)
15888 {
15889 static const char *names[3] = {"$a", "$t", "$d"};
15890 Elf_Internal_Sym sym;
15891
15892 sym.st_value = osi->sec->output_section->vma
15893 + osi->sec->output_offset
15894 + offset;
15895 sym.st_size = 0;
15896 sym.st_other = 0;
15897 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
15898 sym.st_shndx = osi->sec_shndx;
15899 sym.st_target_internal = 0;
15900 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
15901 return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1;
15902 }
15903
15904 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
15905 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
15906
15907 static bfd_boolean
15908 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
15909 bfd_boolean is_iplt_entry_p,
15910 union gotplt_union *root_plt,
15911 struct arm_plt_info *arm_plt)
15912 {
15913 struct elf32_arm_link_hash_table *htab;
15914 bfd_vma addr, plt_header_size;
15915
15916 if (root_plt->offset == (bfd_vma) -1)
15917 return TRUE;
15918
15919 htab = elf32_arm_hash_table (osi->info);
15920 if (htab == NULL)
15921 return FALSE;
15922
15923 if (is_iplt_entry_p)
15924 {
15925 osi->sec = htab->root.iplt;
15926 plt_header_size = 0;
15927 }
15928 else
15929 {
15930 osi->sec = htab->root.splt;
15931 plt_header_size = htab->plt_header_size;
15932 }
15933 osi->sec_shndx = (_bfd_elf_section_from_bfd_section
15934 (osi->info->output_bfd, osi->sec->output_section));
15935
15936 addr = root_plt->offset & -2;
15937 if (htab->symbian_p)
15938 {
15939 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
15940 return FALSE;
15941 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
15942 return FALSE;
15943 }
15944 else if (htab->vxworks_p)
15945 {
15946 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
15947 return FALSE;
15948 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
15949 return FALSE;
15950 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
15951 return FALSE;
15952 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
15953 return FALSE;
15954 }
15955 else if (htab->nacl_p)
15956 {
15957 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
15958 return FALSE;
15959 }
15960 else if (using_thumb_only (htab))
15961 {
15962 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr))
15963 return FALSE;
15964 }
15965 else
15966 {
15967 bfd_boolean thumb_stub_p;
15968
15969 thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt);
15970 if (thumb_stub_p)
15971 {
15972 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
15973 return FALSE;
15974 }
15975 #ifdef FOUR_WORD_PLT
15976 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
15977 return FALSE;
15978 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
15979 return FALSE;
15980 #else
15981 /* A three-word PLT with no Thumb thunk contains only Arm code,
15982 so only need to output a mapping symbol for the first PLT entry and
15983 entries with thumb thunks. */
15984 if (thumb_stub_p || addr == plt_header_size)
15985 {
15986 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
15987 return FALSE;
15988 }
15989 #endif
15990 }
15991
15992 return TRUE;
15993 }
15994
15995 /* Output mapping symbols for PLT entries associated with H. */
15996
15997 static bfd_boolean
15998 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
15999 {
16000 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
16001 struct elf32_arm_link_hash_entry *eh;
16002
16003 if (h->root.type == bfd_link_hash_indirect)
16004 return TRUE;
16005
16006 if (h->root.type == bfd_link_hash_warning)
16007 /* When warning symbols are created, they **replace** the "real"
16008 entry in the hash table, thus we never get to see the real
16009 symbol in a hash traversal. So look at it now. */
16010 h = (struct elf_link_hash_entry *) h->root.u.i.link;
16011
16012 eh = (struct elf32_arm_link_hash_entry *) h;
16013 return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h),
16014 &h->plt, &eh->plt);
16015 }
16016
16017 /* Bind a veneered symbol to its veneer identified by its hash entry
16018 STUB_ENTRY. The veneered location thus loose its symbol. */
16019
16020 static void
16021 arm_stub_claim_sym (struct elf32_arm_stub_hash_entry *stub_entry)
16022 {
16023 struct elf32_arm_link_hash_entry *hash = stub_entry->h;
16024
16025 BFD_ASSERT (hash);
16026 hash->root.root.u.def.section = stub_entry->stub_sec;
16027 hash->root.root.u.def.value = stub_entry->stub_offset;
16028 hash->root.size = stub_entry->stub_size;
16029 }
16030
16031 /* Output a single local symbol for a generated stub. */
16032
16033 static bfd_boolean
16034 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
16035 bfd_vma offset, bfd_vma size)
16036 {
16037 Elf_Internal_Sym sym;
16038
16039 sym.st_value = osi->sec->output_section->vma
16040 + osi->sec->output_offset
16041 + offset;
16042 sym.st_size = size;
16043 sym.st_other = 0;
16044 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
16045 sym.st_shndx = osi->sec_shndx;
16046 sym.st_target_internal = 0;
16047 return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1;
16048 }
16049
16050 static bfd_boolean
16051 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
16052 void * in_arg)
16053 {
16054 struct elf32_arm_stub_hash_entry *stub_entry;
16055 asection *stub_sec;
16056 bfd_vma addr;
16057 char *stub_name;
16058 output_arch_syminfo *osi;
16059 const insn_sequence *template_sequence;
16060 enum stub_insn_type prev_type;
16061 int size;
16062 int i;
16063 enum map_symbol_type sym_type;
16064
16065 /* Massage our args to the form they really have. */
16066 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
16067 osi = (output_arch_syminfo *) in_arg;
16068
16069 stub_sec = stub_entry->stub_sec;
16070
16071 /* Ensure this stub is attached to the current section being
16072 processed. */
16073 if (stub_sec != osi->sec)
16074 return TRUE;
16075
16076 addr = (bfd_vma) stub_entry->stub_offset;
16077 template_sequence = stub_entry->stub_template;
16078
16079 if (arm_stub_sym_claimed (stub_entry->stub_type))
16080 arm_stub_claim_sym (stub_entry);
16081 else
16082 {
16083 stub_name = stub_entry->output_name;
16084 switch (template_sequence[0].type)
16085 {
16086 case ARM_TYPE:
16087 if (!elf32_arm_output_stub_sym (osi, stub_name, addr,
16088 stub_entry->stub_size))
16089 return FALSE;
16090 break;
16091 case THUMB16_TYPE:
16092 case THUMB32_TYPE:
16093 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
16094 stub_entry->stub_size))
16095 return FALSE;
16096 break;
16097 default:
16098 BFD_FAIL ();
16099 return 0;
16100 }
16101 }
16102
16103 prev_type = DATA_TYPE;
16104 size = 0;
16105 for (i = 0; i < stub_entry->stub_template_size; i++)
16106 {
16107 switch (template_sequence[i].type)
16108 {
16109 case ARM_TYPE:
16110 sym_type = ARM_MAP_ARM;
16111 break;
16112
16113 case THUMB16_TYPE:
16114 case THUMB32_TYPE:
16115 sym_type = ARM_MAP_THUMB;
16116 break;
16117
16118 case DATA_TYPE:
16119 sym_type = ARM_MAP_DATA;
16120 break;
16121
16122 default:
16123 BFD_FAIL ();
16124 return FALSE;
16125 }
16126
16127 if (template_sequence[i].type != prev_type)
16128 {
16129 prev_type = template_sequence[i].type;
16130 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
16131 return FALSE;
16132 }
16133
16134 switch (template_sequence[i].type)
16135 {
16136 case ARM_TYPE:
16137 case THUMB32_TYPE:
16138 size += 4;
16139 break;
16140
16141 case THUMB16_TYPE:
16142 size += 2;
16143 break;
16144
16145 case DATA_TYPE:
16146 size += 4;
16147 break;
16148
16149 default:
16150 BFD_FAIL ();
16151 return FALSE;
16152 }
16153 }
16154
16155 return TRUE;
16156 }
16157
16158 /* Output mapping symbols for linker generated sections,
16159 and for those data-only sections that do not have a
16160 $d. */
16161
16162 static bfd_boolean
16163 elf32_arm_output_arch_local_syms (bfd *output_bfd,
16164 struct bfd_link_info *info,
16165 void *flaginfo,
16166 int (*func) (void *, const char *,
16167 Elf_Internal_Sym *,
16168 asection *,
16169 struct elf_link_hash_entry *))
16170 {
16171 output_arch_syminfo osi;
16172 struct elf32_arm_link_hash_table *htab;
16173 bfd_vma offset;
16174 bfd_size_type size;
16175 bfd *input_bfd;
16176
16177 htab = elf32_arm_hash_table (info);
16178 if (htab == NULL)
16179 return FALSE;
16180
16181 check_use_blx (htab);
16182
16183 osi.flaginfo = flaginfo;
16184 osi.info = info;
16185 osi.func = func;
16186
16187 /* Add a $d mapping symbol to data-only sections that
16188 don't have any mapping symbol. This may result in (harmless) redundant
16189 mapping symbols. */
16190 for (input_bfd = info->input_bfds;
16191 input_bfd != NULL;
16192 input_bfd = input_bfd->link.next)
16193 {
16194 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
16195 for (osi.sec = input_bfd->sections;
16196 osi.sec != NULL;
16197 osi.sec = osi.sec->next)
16198 {
16199 if (osi.sec->output_section != NULL
16200 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
16201 != 0)
16202 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
16203 == SEC_HAS_CONTENTS
16204 && get_arm_elf_section_data (osi.sec) != NULL
16205 && get_arm_elf_section_data (osi.sec)->mapcount == 0
16206 && osi.sec->size > 0
16207 && (osi.sec->flags & SEC_EXCLUDE) == 0)
16208 {
16209 osi.sec_shndx = _bfd_elf_section_from_bfd_section
16210 (output_bfd, osi.sec->output_section);
16211 if (osi.sec_shndx != (int)SHN_BAD)
16212 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
16213 }
16214 }
16215 }
16216
16217 /* ARM->Thumb glue. */
16218 if (htab->arm_glue_size > 0)
16219 {
16220 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
16221 ARM2THUMB_GLUE_SECTION_NAME);
16222
16223 osi.sec_shndx = _bfd_elf_section_from_bfd_section
16224 (output_bfd, osi.sec->output_section);
16225 if (bfd_link_pic (info) || htab->root.is_relocatable_executable
16226 || htab->pic_veneer)
16227 size = ARM2THUMB_PIC_GLUE_SIZE;
16228 else if (htab->use_blx)
16229 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
16230 else
16231 size = ARM2THUMB_STATIC_GLUE_SIZE;
16232
16233 for (offset = 0; offset < htab->arm_glue_size; offset += size)
16234 {
16235 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
16236 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
16237 }
16238 }
16239
16240 /* Thumb->ARM glue. */
16241 if (htab->thumb_glue_size > 0)
16242 {
16243 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
16244 THUMB2ARM_GLUE_SECTION_NAME);
16245
16246 osi.sec_shndx = _bfd_elf_section_from_bfd_section
16247 (output_bfd, osi.sec->output_section);
16248 size = THUMB2ARM_GLUE_SIZE;
16249
16250 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
16251 {
16252 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
16253 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
16254 }
16255 }
16256
16257 /* ARMv4 BX veneers. */
16258 if (htab->bx_glue_size > 0)
16259 {
16260 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
16261 ARM_BX_GLUE_SECTION_NAME);
16262
16263 osi.sec_shndx = _bfd_elf_section_from_bfd_section
16264 (output_bfd, osi.sec->output_section);
16265
16266 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
16267 }
16268
16269 /* Long calls stubs. */
16270 if (htab->stub_bfd && htab->stub_bfd->sections)
16271 {
16272 asection* stub_sec;
16273
16274 for (stub_sec = htab->stub_bfd->sections;
16275 stub_sec != NULL;
16276 stub_sec = stub_sec->next)
16277 {
16278 /* Ignore non-stub sections. */
16279 if (!strstr (stub_sec->name, STUB_SUFFIX))
16280 continue;
16281
16282 osi.sec = stub_sec;
16283
16284 osi.sec_shndx = _bfd_elf_section_from_bfd_section
16285 (output_bfd, osi.sec->output_section);
16286
16287 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
16288 }
16289 }
16290
16291 /* Finally, output mapping symbols for the PLT. */
16292 if (htab->root.splt && htab->root.splt->size > 0)
16293 {
16294 osi.sec = htab->root.splt;
16295 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
16296 (output_bfd, osi.sec->output_section));
16297
16298 /* Output mapping symbols for the plt header. SymbianOS does not have a
16299 plt header. */
16300 if (htab->vxworks_p)
16301 {
16302 /* VxWorks shared libraries have no PLT header. */
16303 if (!bfd_link_pic (info))
16304 {
16305 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
16306 return FALSE;
16307 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
16308 return FALSE;
16309 }
16310 }
16311 else if (htab->nacl_p)
16312 {
16313 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
16314 return FALSE;
16315 }
16316 else if (using_thumb_only (htab))
16317 {
16318 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 0))
16319 return FALSE;
16320 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
16321 return FALSE;
16322 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 16))
16323 return FALSE;
16324 }
16325 else if (!htab->symbian_p)
16326 {
16327 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
16328 return FALSE;
16329 #ifndef FOUR_WORD_PLT
16330 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
16331 return FALSE;
16332 #endif
16333 }
16334 }
16335 if (htab->nacl_p && htab->root.iplt && htab->root.iplt->size > 0)
16336 {
16337 /* NaCl uses a special first entry in .iplt too. */
16338 osi.sec = htab->root.iplt;
16339 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
16340 (output_bfd, osi.sec->output_section));
16341 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
16342 return FALSE;
16343 }
16344 if ((htab->root.splt && htab->root.splt->size > 0)
16345 || (htab->root.iplt && htab->root.iplt->size > 0))
16346 {
16347 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
16348 for (input_bfd = info->input_bfds;
16349 input_bfd != NULL;
16350 input_bfd = input_bfd->link.next)
16351 {
16352 struct arm_local_iplt_info **local_iplt;
16353 unsigned int i, num_syms;
16354
16355 local_iplt = elf32_arm_local_iplt (input_bfd);
16356 if (local_iplt != NULL)
16357 {
16358 num_syms = elf_symtab_hdr (input_bfd).sh_info;
16359 for (i = 0; i < num_syms; i++)
16360 if (local_iplt[i] != NULL
16361 && !elf32_arm_output_plt_map_1 (&osi, TRUE,
16362 &local_iplt[i]->root,
16363 &local_iplt[i]->arm))
16364 return FALSE;
16365 }
16366 }
16367 }
16368 if (htab->dt_tlsdesc_plt != 0)
16369 {
16370 /* Mapping symbols for the lazy tls trampoline. */
16371 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->dt_tlsdesc_plt))
16372 return FALSE;
16373
16374 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
16375 htab->dt_tlsdesc_plt + 24))
16376 return FALSE;
16377 }
16378 if (htab->tls_trampoline != 0)
16379 {
16380 /* Mapping symbols for the tls trampoline. */
16381 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline))
16382 return FALSE;
16383 #ifdef FOUR_WORD_PLT
16384 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
16385 htab->tls_trampoline + 12))
16386 return FALSE;
16387 #endif
16388 }
16389
16390 return TRUE;
16391 }
16392
16393 /* Allocate target specific section data. */
16394
16395 static bfd_boolean
16396 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
16397 {
16398 if (!sec->used_by_bfd)
16399 {
16400 _arm_elf_section_data *sdata;
16401 bfd_size_type amt = sizeof (*sdata);
16402
16403 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
16404 if (sdata == NULL)
16405 return FALSE;
16406 sec->used_by_bfd = sdata;
16407 }
16408
16409 return _bfd_elf_new_section_hook (abfd, sec);
16410 }
16411
16412
16413 /* Used to order a list of mapping symbols by address. */
16414
16415 static int
16416 elf32_arm_compare_mapping (const void * a, const void * b)
16417 {
16418 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
16419 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
16420
16421 if (amap->vma > bmap->vma)
16422 return 1;
16423 else if (amap->vma < bmap->vma)
16424 return -1;
16425 else if (amap->type > bmap->type)
16426 /* Ensure results do not depend on the host qsort for objects with
16427 multiple mapping symbols at the same address by sorting on type
16428 after vma. */
16429 return 1;
16430 else if (amap->type < bmap->type)
16431 return -1;
16432 else
16433 return 0;
16434 }
16435
16436 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
16437
16438 static unsigned long
16439 offset_prel31 (unsigned long addr, bfd_vma offset)
16440 {
16441 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
16442 }
16443
16444 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
16445 relocations. */
16446
16447 static void
16448 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
16449 {
16450 unsigned long first_word = bfd_get_32 (output_bfd, from);
16451 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
16452
16453 /* High bit of first word is supposed to be zero. */
16454 if ((first_word & 0x80000000ul) == 0)
16455 first_word = offset_prel31 (first_word, offset);
16456
16457 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
16458 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
16459 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
16460 second_word = offset_prel31 (second_word, offset);
16461
16462 bfd_put_32 (output_bfd, first_word, to);
16463 bfd_put_32 (output_bfd, second_word, to + 4);
16464 }
16465
16466 /* Data for make_branch_to_a8_stub(). */
16467
16468 struct a8_branch_to_stub_data
16469 {
16470 asection *writing_section;
16471 bfd_byte *contents;
16472 };
16473
16474
16475 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
16476 places for a particular section. */
16477
16478 static bfd_boolean
16479 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
16480 void *in_arg)
16481 {
16482 struct elf32_arm_stub_hash_entry *stub_entry;
16483 struct a8_branch_to_stub_data *data;
16484 bfd_byte *contents;
16485 unsigned long branch_insn;
16486 bfd_vma veneered_insn_loc, veneer_entry_loc;
16487 bfd_signed_vma branch_offset;
16488 bfd *abfd;
16489 unsigned int loc;
16490
16491 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
16492 data = (struct a8_branch_to_stub_data *) in_arg;
16493
16494 if (stub_entry->target_section != data->writing_section
16495 || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
16496 return TRUE;
16497
16498 contents = data->contents;
16499
16500 /* We use target_section as Cortex-A8 erratum workaround stubs are only
16501 generated when both source and target are in the same section. */
16502 veneered_insn_loc = stub_entry->target_section->output_section->vma
16503 + stub_entry->target_section->output_offset
16504 + stub_entry->source_value;
16505
16506 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
16507 + stub_entry->stub_sec->output_offset
16508 + stub_entry->stub_offset;
16509
16510 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
16511 veneered_insn_loc &= ~3u;
16512
16513 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
16514
16515 abfd = stub_entry->target_section->owner;
16516 loc = stub_entry->source_value;
16517
16518 /* We attempt to avoid this condition by setting stubs_always_after_branch
16519 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
16520 This check is just to be on the safe side... */
16521 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
16522 {
16523 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
16524 "allocated in unsafe location"), abfd);
16525 return FALSE;
16526 }
16527
16528 switch (stub_entry->stub_type)
16529 {
16530 case arm_stub_a8_veneer_b:
16531 case arm_stub_a8_veneer_b_cond:
16532 branch_insn = 0xf0009000;
16533 goto jump24;
16534
16535 case arm_stub_a8_veneer_blx:
16536 branch_insn = 0xf000e800;
16537 goto jump24;
16538
16539 case arm_stub_a8_veneer_bl:
16540 {
16541 unsigned int i1, j1, i2, j2, s;
16542
16543 branch_insn = 0xf000d000;
16544
16545 jump24:
16546 if (branch_offset < -16777216 || branch_offset > 16777214)
16547 {
16548 /* There's not much we can do apart from complain if this
16549 happens. */
16550 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
16551 "of range (input file too large)"), abfd);
16552 return FALSE;
16553 }
16554
16555 /* i1 = not(j1 eor s), so:
16556 not i1 = j1 eor s
16557 j1 = (not i1) eor s. */
16558
16559 branch_insn |= (branch_offset >> 1) & 0x7ff;
16560 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
16561 i2 = (branch_offset >> 22) & 1;
16562 i1 = (branch_offset >> 23) & 1;
16563 s = (branch_offset >> 24) & 1;
16564 j1 = (!i1) ^ s;
16565 j2 = (!i2) ^ s;
16566 branch_insn |= j2 << 11;
16567 branch_insn |= j1 << 13;
16568 branch_insn |= s << 26;
16569 }
16570 break;
16571
16572 default:
16573 BFD_FAIL ();
16574 return FALSE;
16575 }
16576
16577 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[loc]);
16578 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[loc + 2]);
16579
16580 return TRUE;
16581 }
16582
16583 /* Beginning of stm32l4xx work-around. */
16584
16585 /* Functions encoding instructions necessary for the emission of the
16586 fix-stm32l4xx-629360.
16587 Encoding is extracted from the
16588 ARM (C) Architecture Reference Manual
16589 ARMv7-A and ARMv7-R edition
16590 ARM DDI 0406C.b (ID072512). */
16591
16592 static inline bfd_vma
16593 create_instruction_branch_absolute (int branch_offset)
16594 {
16595 /* A8.8.18 B (A8-334)
16596 B target_address (Encoding T4). */
16597 /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii. */
16598 /* jump offset is: S:I1:I2:imm10:imm11:0. */
16599 /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S). */
16600
16601 int s = ((branch_offset & 0x1000000) >> 24);
16602 int j1 = s ^ !((branch_offset & 0x800000) >> 23);
16603 int j2 = s ^ !((branch_offset & 0x400000) >> 22);
16604
16605 if (branch_offset < -(1 << 24) || branch_offset >= (1 << 24))
16606 BFD_ASSERT (0 && "Error: branch out of range. Cannot create branch.");
16607
16608 bfd_vma patched_inst = 0xf0009000
16609 | s << 26 /* S. */
16610 | (((unsigned long) (branch_offset) >> 12) & 0x3ff) << 16 /* imm10. */
16611 | j1 << 13 /* J1. */
16612 | j2 << 11 /* J2. */
16613 | (((unsigned long) (branch_offset) >> 1) & 0x7ff); /* imm11. */
16614
16615 return patched_inst;
16616 }
16617
16618 static inline bfd_vma
16619 create_instruction_ldmia (int base_reg, int wback, int reg_mask)
16620 {
16621 /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
16622 LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2). */
16623 bfd_vma patched_inst = 0xe8900000
16624 | (/*W=*/wback << 21)
16625 | (base_reg << 16)
16626 | (reg_mask & 0x0000ffff);
16627
16628 return patched_inst;
16629 }
16630
16631 static inline bfd_vma
16632 create_instruction_ldmdb (int base_reg, int wback, int reg_mask)
16633 {
16634 /* A8.8.60 LDMDB/LDMEA (A8-402)
16635 LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1). */
16636 bfd_vma patched_inst = 0xe9100000
16637 | (/*W=*/wback << 21)
16638 | (base_reg << 16)
16639 | (reg_mask & 0x0000ffff);
16640
16641 return patched_inst;
16642 }
16643
16644 static inline bfd_vma
16645 create_instruction_mov (int target_reg, int source_reg)
16646 {
16647 /* A8.8.103 MOV (register) (A8-486)
16648 MOV Rd, Rm (Encoding T1). */
16649 bfd_vma patched_inst = 0x4600
16650 | (target_reg & 0x7)
16651 | ((target_reg & 0x8) >> 3) << 7
16652 | (source_reg << 3);
16653
16654 return patched_inst;
16655 }
16656
16657 static inline bfd_vma
16658 create_instruction_sub (int target_reg, int source_reg, int value)
16659 {
16660 /* A8.8.221 SUB (immediate) (A8-708)
16661 SUB Rd, Rn, #value (Encoding T3). */
16662 bfd_vma patched_inst = 0xf1a00000
16663 | (target_reg << 8)
16664 | (source_reg << 16)
16665 | (/*S=*/0 << 20)
16666 | ((value & 0x800) >> 11) << 26
16667 | ((value & 0x700) >> 8) << 12
16668 | (value & 0x0ff);
16669
16670 return patched_inst;
16671 }
16672
16673 static inline bfd_vma
16674 create_instruction_vldmia (int base_reg, int is_dp, int wback, int num_words,
16675 int first_reg)
16676 {
16677 /* A8.8.332 VLDM (A8-922)
16678 VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2). */
16679 bfd_vma patched_inst = (is_dp ? 0xec900b00 : 0xec900a00)
16680 | (/*W=*/wback << 21)
16681 | (base_reg << 16)
16682 | (num_words & 0x000000ff)
16683 | (((unsigned)first_reg >> 1) & 0x0000000f) << 12
16684 | (first_reg & 0x00000001) << 22;
16685
16686 return patched_inst;
16687 }
16688
16689 static inline bfd_vma
16690 create_instruction_vldmdb (int base_reg, int is_dp, int num_words,
16691 int first_reg)
16692 {
16693 /* A8.8.332 VLDM (A8-922)
16694 VLMD{MODE} Rn!, {} (Encoding T1 or T2). */
16695 bfd_vma patched_inst = (is_dp ? 0xed300b00 : 0xed300a00)
16696 | (base_reg << 16)
16697 | (num_words & 0x000000ff)
16698 | (((unsigned)first_reg >>1 ) & 0x0000000f) << 12
16699 | (first_reg & 0x00000001) << 22;
16700
16701 return patched_inst;
16702 }
16703
16704 static inline bfd_vma
16705 create_instruction_udf_w (int value)
16706 {
16707 /* A8.8.247 UDF (A8-758)
16708 Undefined (Encoding T2). */
16709 bfd_vma patched_inst = 0xf7f0a000
16710 | (value & 0x00000fff)
16711 | (value & 0x000f0000) << 16;
16712
16713 return patched_inst;
16714 }
16715
16716 static inline bfd_vma
16717 create_instruction_udf (int value)
16718 {
16719 /* A8.8.247 UDF (A8-758)
16720 Undefined (Encoding T1). */
16721 bfd_vma patched_inst = 0xde00
16722 | (value & 0xff);
16723
16724 return patched_inst;
16725 }
16726
16727 /* Functions writing an instruction in memory, returning the next
16728 memory position to write to. */
16729
16730 static inline bfd_byte *
16731 push_thumb2_insn32 (struct elf32_arm_link_hash_table * htab,
16732 bfd * output_bfd, bfd_byte *pt, insn32 insn)
16733 {
16734 put_thumb2_insn (htab, output_bfd, insn, pt);
16735 return pt + 4;
16736 }
16737
16738 static inline bfd_byte *
16739 push_thumb2_insn16 (struct elf32_arm_link_hash_table * htab,
16740 bfd * output_bfd, bfd_byte *pt, insn32 insn)
16741 {
16742 put_thumb_insn (htab, output_bfd, insn, pt);
16743 return pt + 2;
16744 }
16745
16746 /* Function filling up a region in memory with T1 and T2 UDFs taking
16747 care of alignment. */
16748
16749 static bfd_byte *
16750 stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table * htab,
16751 bfd * output_bfd,
16752 const bfd_byte * const base_stub_contents,
16753 bfd_byte * const from_stub_contents,
16754 const bfd_byte * const end_stub_contents)
16755 {
16756 bfd_byte *current_stub_contents = from_stub_contents;
16757
16758 /* Fill the remaining of the stub with deterministic contents : UDF
16759 instructions.
16760 Check if realignment is needed on modulo 4 frontier using T1, to
16761 further use T2. */
16762 if ((current_stub_contents < end_stub_contents)
16763 && !((current_stub_contents - base_stub_contents) % 2)
16764 && ((current_stub_contents - base_stub_contents) % 4))
16765 current_stub_contents =
16766 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
16767 create_instruction_udf (0));
16768
16769 for (; current_stub_contents < end_stub_contents;)
16770 current_stub_contents =
16771 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16772 create_instruction_udf_w (0));
16773
16774 return current_stub_contents;
16775 }
16776
16777 /* Functions writing the stream of instructions equivalent to the
16778 derived sequence for ldmia, ldmdb, vldm respectively. */
16779
16780 static void
16781 stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table * htab,
16782 bfd * output_bfd,
16783 const insn32 initial_insn,
16784 const bfd_byte *const initial_insn_addr,
16785 bfd_byte *const base_stub_contents)
16786 {
16787 int wback = (initial_insn & 0x00200000) >> 21;
16788 int ri, rn = (initial_insn & 0x000F0000) >> 16;
16789 int insn_all_registers = initial_insn & 0x0000ffff;
16790 int insn_low_registers, insn_high_registers;
16791 int usable_register_mask;
16792 int nb_registers = popcount (insn_all_registers);
16793 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
16794 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
16795 bfd_byte *current_stub_contents = base_stub_contents;
16796
16797 BFD_ASSERT (is_thumb2_ldmia (initial_insn));
16798
16799 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
16800 smaller than 8 registers load sequences that do not cause the
16801 hardware issue. */
16802 if (nb_registers <= 8)
16803 {
16804 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
16805 current_stub_contents =
16806 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16807 initial_insn);
16808
16809 /* B initial_insn_addr+4. */
16810 if (!restore_pc)
16811 current_stub_contents =
16812 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16813 create_instruction_branch_absolute
16814 (initial_insn_addr - current_stub_contents));
16815
16816
16817 /* Fill the remaining of the stub with deterministic contents. */
16818 current_stub_contents =
16819 stm32l4xx_fill_stub_udf (htab, output_bfd,
16820 base_stub_contents, current_stub_contents,
16821 base_stub_contents +
16822 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
16823
16824 return;
16825 }
16826
16827 /* - reg_list[13] == 0. */
16828 BFD_ASSERT ((insn_all_registers & (1 << 13))==0);
16829
16830 /* - reg_list[14] & reg_list[15] != 1. */
16831 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
16832
16833 /* - if (wback==1) reg_list[rn] == 0. */
16834 BFD_ASSERT (!wback || !restore_rn);
16835
16836 /* - nb_registers > 8. */
16837 BFD_ASSERT (popcount (insn_all_registers) > 8);
16838
16839 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
16840
16841 /* In the following algorithm, we split this wide LDM using 2 LDM insns:
16842 - One with the 7 lowest registers (register mask 0x007F)
16843 This LDM will finally contain between 2 and 7 registers
16844 - One with the 7 highest registers (register mask 0xDF80)
16845 This ldm will finally contain between 2 and 7 registers. */
16846 insn_low_registers = insn_all_registers & 0x007F;
16847 insn_high_registers = insn_all_registers & 0xDF80;
16848
16849 /* A spare register may be needed during this veneer to temporarily
16850 handle the base register. This register will be restored with the
16851 last LDM operation.
16852 The usable register may be any general purpose register (that
16853 excludes PC, SP, LR : register mask is 0x1FFF). */
16854 usable_register_mask = 0x1FFF;
16855
16856 /* Generate the stub function. */
16857 if (wback)
16858 {
16859 /* LDMIA Rn!, {R-low-register-list} : (Encoding T2). */
16860 current_stub_contents =
16861 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16862 create_instruction_ldmia
16863 (rn, /*wback=*/1, insn_low_registers));
16864
16865 /* LDMIA Rn!, {R-high-register-list} : (Encoding T2). */
16866 current_stub_contents =
16867 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16868 create_instruction_ldmia
16869 (rn, /*wback=*/1, insn_high_registers));
16870 if (!restore_pc)
16871 {
16872 /* B initial_insn_addr+4. */
16873 current_stub_contents =
16874 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16875 create_instruction_branch_absolute
16876 (initial_insn_addr - current_stub_contents));
16877 }
16878 }
16879 else /* if (!wback). */
16880 {
16881 ri = rn;
16882
16883 /* If Rn is not part of the high-register-list, move it there. */
16884 if (!(insn_high_registers & (1 << rn)))
16885 {
16886 /* Choose a Ri in the high-register-list that will be restored. */
16887 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
16888
16889 /* MOV Ri, Rn. */
16890 current_stub_contents =
16891 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
16892 create_instruction_mov (ri, rn));
16893 }
16894
16895 /* LDMIA Ri!, {R-low-register-list} : (Encoding T2). */
16896 current_stub_contents =
16897 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16898 create_instruction_ldmia
16899 (ri, /*wback=*/1, insn_low_registers));
16900
16901 /* LDMIA Ri, {R-high-register-list} : (Encoding T2). */
16902 current_stub_contents =
16903 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16904 create_instruction_ldmia
16905 (ri, /*wback=*/0, insn_high_registers));
16906
16907 if (!restore_pc)
16908 {
16909 /* B initial_insn_addr+4. */
16910 current_stub_contents =
16911 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16912 create_instruction_branch_absolute
16913 (initial_insn_addr - current_stub_contents));
16914 }
16915 }
16916
16917 /* Fill the remaining of the stub with deterministic contents. */
16918 current_stub_contents =
16919 stm32l4xx_fill_stub_udf (htab, output_bfd,
16920 base_stub_contents, current_stub_contents,
16921 base_stub_contents +
16922 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
16923 }
16924
16925 static void
16926 stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table * htab,
16927 bfd * output_bfd,
16928 const insn32 initial_insn,
16929 const bfd_byte *const initial_insn_addr,
16930 bfd_byte *const base_stub_contents)
16931 {
16932 int wback = (initial_insn & 0x00200000) >> 21;
16933 int ri, rn = (initial_insn & 0x000f0000) >> 16;
16934 int insn_all_registers = initial_insn & 0x0000ffff;
16935 int insn_low_registers, insn_high_registers;
16936 int usable_register_mask;
16937 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
16938 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
16939 int nb_registers = popcount (insn_all_registers);
16940 bfd_byte *current_stub_contents = base_stub_contents;
16941
16942 BFD_ASSERT (is_thumb2_ldmdb (initial_insn));
16943
16944 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
16945 smaller than 8 registers load sequences that do not cause the
16946 hardware issue. */
16947 if (nb_registers <= 8)
16948 {
16949 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
16950 current_stub_contents =
16951 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16952 initial_insn);
16953
16954 /* B initial_insn_addr+4. */
16955 current_stub_contents =
16956 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16957 create_instruction_branch_absolute
16958 (initial_insn_addr - current_stub_contents));
16959
16960 /* Fill the remaining of the stub with deterministic contents. */
16961 current_stub_contents =
16962 stm32l4xx_fill_stub_udf (htab, output_bfd,
16963 base_stub_contents, current_stub_contents,
16964 base_stub_contents +
16965 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
16966
16967 return;
16968 }
16969
16970 /* - reg_list[13] == 0. */
16971 BFD_ASSERT ((insn_all_registers & (1 << 13)) == 0);
16972
16973 /* - reg_list[14] & reg_list[15] != 1. */
16974 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
16975
16976 /* - if (wback==1) reg_list[rn] == 0. */
16977 BFD_ASSERT (!wback || !restore_rn);
16978
16979 /* - nb_registers > 8. */
16980 BFD_ASSERT (popcount (insn_all_registers) > 8);
16981
16982 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
16983
16984 /* In the following algorithm, we split this wide LDM using 2 LDM insn:
16985 - One with the 7 lowest registers (register mask 0x007F)
16986 This LDM will finally contain between 2 and 7 registers
16987 - One with the 7 highest registers (register mask 0xDF80)
16988 This ldm will finally contain between 2 and 7 registers. */
16989 insn_low_registers = insn_all_registers & 0x007F;
16990 insn_high_registers = insn_all_registers & 0xDF80;
16991
16992 /* A spare register may be needed during this veneer to temporarily
16993 handle the base register. This register will be restored with
16994 the last LDM operation.
16995 The usable register may be any general purpose register (that excludes
16996 PC, SP, LR : register mask is 0x1FFF). */
16997 usable_register_mask = 0x1FFF;
16998
16999 /* Generate the stub function. */
17000 if (!wback && !restore_pc && !restore_rn)
17001 {
17002 /* Choose a Ri in the low-register-list that will be restored. */
17003 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
17004
17005 /* MOV Ri, Rn. */
17006 current_stub_contents =
17007 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
17008 create_instruction_mov (ri, rn));
17009
17010 /* LDMDB Ri!, {R-high-register-list}. */
17011 current_stub_contents =
17012 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17013 create_instruction_ldmdb
17014 (ri, /*wback=*/1, insn_high_registers));
17015
17016 /* LDMDB Ri, {R-low-register-list}. */
17017 current_stub_contents =
17018 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17019 create_instruction_ldmdb
17020 (ri, /*wback=*/0, insn_low_registers));
17021
17022 /* B initial_insn_addr+4. */
17023 current_stub_contents =
17024 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17025 create_instruction_branch_absolute
17026 (initial_insn_addr - current_stub_contents));
17027 }
17028 else if (wback && !restore_pc && !restore_rn)
17029 {
17030 /* LDMDB Rn!, {R-high-register-list}. */
17031 current_stub_contents =
17032 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17033 create_instruction_ldmdb
17034 (rn, /*wback=*/1, insn_high_registers));
17035
17036 /* LDMDB Rn!, {R-low-register-list}. */
17037 current_stub_contents =
17038 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17039 create_instruction_ldmdb
17040 (rn, /*wback=*/1, insn_low_registers));
17041
17042 /* B initial_insn_addr+4. */
17043 current_stub_contents =
17044 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17045 create_instruction_branch_absolute
17046 (initial_insn_addr - current_stub_contents));
17047 }
17048 else if (!wback && restore_pc && !restore_rn)
17049 {
17050 /* Choose a Ri in the high-register-list that will be restored. */
17051 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
17052
17053 /* SUB Ri, Rn, #(4*nb_registers). */
17054 current_stub_contents =
17055 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17056 create_instruction_sub (ri, rn, (4 * nb_registers)));
17057
17058 /* LDMIA Ri!, {R-low-register-list}. */
17059 current_stub_contents =
17060 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17061 create_instruction_ldmia
17062 (ri, /*wback=*/1, insn_low_registers));
17063
17064 /* LDMIA Ri, {R-high-register-list}. */
17065 current_stub_contents =
17066 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17067 create_instruction_ldmia
17068 (ri, /*wback=*/0, insn_high_registers));
17069 }
17070 else if (wback && restore_pc && !restore_rn)
17071 {
17072 /* Choose a Ri in the high-register-list that will be restored. */
17073 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
17074
17075 /* SUB Rn, Rn, #(4*nb_registers) */
17076 current_stub_contents =
17077 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17078 create_instruction_sub (rn, rn, (4 * nb_registers)));
17079
17080 /* MOV Ri, Rn. */
17081 current_stub_contents =
17082 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
17083 create_instruction_mov (ri, rn));
17084
17085 /* LDMIA Ri!, {R-low-register-list}. */
17086 current_stub_contents =
17087 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17088 create_instruction_ldmia
17089 (ri, /*wback=*/1, insn_low_registers));
17090
17091 /* LDMIA Ri, {R-high-register-list}. */
17092 current_stub_contents =
17093 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17094 create_instruction_ldmia
17095 (ri, /*wback=*/0, insn_high_registers));
17096 }
17097 else if (!wback && !restore_pc && restore_rn)
17098 {
17099 ri = rn;
17100 if (!(insn_low_registers & (1 << rn)))
17101 {
17102 /* Choose a Ri in the low-register-list that will be restored. */
17103 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
17104
17105 /* MOV Ri, Rn. */
17106 current_stub_contents =
17107 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
17108 create_instruction_mov (ri, rn));
17109 }
17110
17111 /* LDMDB Ri!, {R-high-register-list}. */
17112 current_stub_contents =
17113 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17114 create_instruction_ldmdb
17115 (ri, /*wback=*/1, insn_high_registers));
17116
17117 /* LDMDB Ri, {R-low-register-list}. */
17118 current_stub_contents =
17119 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17120 create_instruction_ldmdb
17121 (ri, /*wback=*/0, insn_low_registers));
17122
17123 /* B initial_insn_addr+4. */
17124 current_stub_contents =
17125 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17126 create_instruction_branch_absolute
17127 (initial_insn_addr - current_stub_contents));
17128 }
17129 else if (!wback && restore_pc && restore_rn)
17130 {
17131 ri = rn;
17132 if (!(insn_high_registers & (1 << rn)))
17133 {
17134 /* Choose a Ri in the high-register-list that will be restored. */
17135 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
17136 }
17137
17138 /* SUB Ri, Rn, #(4*nb_registers). */
17139 current_stub_contents =
17140 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17141 create_instruction_sub (ri, rn, (4 * nb_registers)));
17142
17143 /* LDMIA Ri!, {R-low-register-list}. */
17144 current_stub_contents =
17145 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17146 create_instruction_ldmia
17147 (ri, /*wback=*/1, insn_low_registers));
17148
17149 /* LDMIA Ri, {R-high-register-list}. */
17150 current_stub_contents =
17151 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17152 create_instruction_ldmia
17153 (ri, /*wback=*/0, insn_high_registers));
17154 }
17155 else if (wback && restore_rn)
17156 {
17157 /* The assembler should not have accepted to encode this. */
17158 BFD_ASSERT (0 && "Cannot patch an instruction that has an "
17159 "undefined behavior.\n");
17160 }
17161
17162 /* Fill the remaining of the stub with deterministic contents. */
17163 current_stub_contents =
17164 stm32l4xx_fill_stub_udf (htab, output_bfd,
17165 base_stub_contents, current_stub_contents,
17166 base_stub_contents +
17167 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
17168
17169 }
17170
17171 static void
17172 stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table * htab,
17173 bfd * output_bfd,
17174 const insn32 initial_insn,
17175 const bfd_byte *const initial_insn_addr,
17176 bfd_byte *const base_stub_contents)
17177 {
17178 int num_words = ((unsigned int) initial_insn << 24) >> 24;
17179 bfd_byte *current_stub_contents = base_stub_contents;
17180
17181 BFD_ASSERT (is_thumb2_vldm (initial_insn));
17182
17183 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
17184 smaller than 8 words load sequences that do not cause the
17185 hardware issue. */
17186 if (num_words <= 8)
17187 {
17188 /* Untouched instruction. */
17189 current_stub_contents =
17190 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17191 initial_insn);
17192
17193 /* B initial_insn_addr+4. */
17194 current_stub_contents =
17195 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17196 create_instruction_branch_absolute
17197 (initial_insn_addr - current_stub_contents));
17198 }
17199 else
17200 {
17201 bfd_boolean is_dp = /* DP encoding. */
17202 (initial_insn & 0xfe100f00) == 0xec100b00;
17203 bfd_boolean is_ia_nobang = /* (IA without !). */
17204 (((initial_insn << 7) >> 28) & 0xd) == 0x4;
17205 bfd_boolean is_ia_bang = /* (IA with !) - includes VPOP. */
17206 (((initial_insn << 7) >> 28) & 0xd) == 0x5;
17207 bfd_boolean is_db_bang = /* (DB with !). */
17208 (((initial_insn << 7) >> 28) & 0xd) == 0x9;
17209 int base_reg = ((unsigned int) initial_insn << 12) >> 28;
17210 /* d = UInt (Vd:D);. */
17211 int first_reg = ((((unsigned int) initial_insn << 16) >> 28) << 1)
17212 | (((unsigned int)initial_insn << 9) >> 31);
17213
17214 /* Compute the number of 8-words chunks needed to split. */
17215 int chunks = (num_words % 8) ? (num_words / 8 + 1) : (num_words / 8);
17216 int chunk;
17217
17218 /* The test coverage has been done assuming the following
17219 hypothesis that exactly one of the previous is_ predicates is
17220 true. */
17221 BFD_ASSERT ( (is_ia_nobang ^ is_ia_bang ^ is_db_bang)
17222 && !(is_ia_nobang & is_ia_bang & is_db_bang));
17223
17224 /* We treat the cutting of the words in one pass for all
17225 cases, then we emit the adjustments:
17226
17227 vldm rx, {...}
17228 -> vldm rx!, {8_words_or_less} for each needed 8_word
17229 -> sub rx, rx, #size (list)
17230
17231 vldm rx!, {...}
17232 -> vldm rx!, {8_words_or_less} for each needed 8_word
17233 This also handles vpop instruction (when rx is sp)
17234
17235 vldmd rx!, {...}
17236 -> vldmb rx!, {8_words_or_less} for each needed 8_word. */
17237 for (chunk = 0; chunk < chunks; ++chunk)
17238 {
17239 bfd_vma new_insn = 0;
17240
17241 if (is_ia_nobang || is_ia_bang)
17242 {
17243 new_insn = create_instruction_vldmia
17244 (base_reg,
17245 is_dp,
17246 /*wback= . */1,
17247 chunks - (chunk + 1) ?
17248 8 : num_words - chunk * 8,
17249 first_reg + chunk * 8);
17250 }
17251 else if (is_db_bang)
17252 {
17253 new_insn = create_instruction_vldmdb
17254 (base_reg,
17255 is_dp,
17256 chunks - (chunk + 1) ?
17257 8 : num_words - chunk * 8,
17258 first_reg + chunk * 8);
17259 }
17260
17261 if (new_insn)
17262 current_stub_contents =
17263 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17264 new_insn);
17265 }
17266
17267 /* Only this case requires the base register compensation
17268 subtract. */
17269 if (is_ia_nobang)
17270 {
17271 current_stub_contents =
17272 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17273 create_instruction_sub
17274 (base_reg, base_reg, 4*num_words));
17275 }
17276
17277 /* B initial_insn_addr+4. */
17278 current_stub_contents =
17279 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17280 create_instruction_branch_absolute
17281 (initial_insn_addr - current_stub_contents));
17282 }
17283
17284 /* Fill the remaining of the stub with deterministic contents. */
17285 current_stub_contents =
17286 stm32l4xx_fill_stub_udf (htab, output_bfd,
17287 base_stub_contents, current_stub_contents,
17288 base_stub_contents +
17289 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
17290 }
17291
17292 static void
17293 stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table * htab,
17294 bfd * output_bfd,
17295 const insn32 wrong_insn,
17296 const bfd_byte *const wrong_insn_addr,
17297 bfd_byte *const stub_contents)
17298 {
17299 if (is_thumb2_ldmia (wrong_insn))
17300 stm32l4xx_create_replacing_stub_ldmia (htab, output_bfd,
17301 wrong_insn, wrong_insn_addr,
17302 stub_contents);
17303 else if (is_thumb2_ldmdb (wrong_insn))
17304 stm32l4xx_create_replacing_stub_ldmdb (htab, output_bfd,
17305 wrong_insn, wrong_insn_addr,
17306 stub_contents);
17307 else if (is_thumb2_vldm (wrong_insn))
17308 stm32l4xx_create_replacing_stub_vldm (htab, output_bfd,
17309 wrong_insn, wrong_insn_addr,
17310 stub_contents);
17311 }
17312
17313 /* End of stm32l4xx work-around. */
17314
17315
17316 static void
17317 elf32_arm_add_relocation (bfd *output_bfd, struct bfd_link_info *info,
17318 asection *output_sec, Elf_Internal_Rela *rel)
17319 {
17320 BFD_ASSERT (output_sec && rel);
17321 struct bfd_elf_section_reloc_data *output_reldata;
17322 struct elf32_arm_link_hash_table *htab;
17323 struct bfd_elf_section_data *oesd = elf_section_data (output_sec);
17324 Elf_Internal_Shdr *rel_hdr;
17325
17326
17327 if (oesd->rel.hdr)
17328 {
17329 rel_hdr = oesd->rel.hdr;
17330 output_reldata = &(oesd->rel);
17331 }
17332 else if (oesd->rela.hdr)
17333 {
17334 rel_hdr = oesd->rela.hdr;
17335 output_reldata = &(oesd->rela);
17336 }
17337 else
17338 {
17339 abort ();
17340 }
17341
17342 bfd_byte *erel = rel_hdr->contents;
17343 erel += output_reldata->count * rel_hdr->sh_entsize;
17344 htab = elf32_arm_hash_table (info);
17345 SWAP_RELOC_OUT (htab) (output_bfd, rel, erel);
17346 output_reldata->count++;
17347 }
17348
17349 /* Do code byteswapping. Return FALSE afterwards so that the section is
17350 written out as normal. */
17351
17352 static bfd_boolean
17353 elf32_arm_write_section (bfd *output_bfd,
17354 struct bfd_link_info *link_info,
17355 asection *sec,
17356 bfd_byte *contents)
17357 {
17358 unsigned int mapcount, errcount;
17359 _arm_elf_section_data *arm_data;
17360 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
17361 elf32_arm_section_map *map;
17362 elf32_vfp11_erratum_list *errnode;
17363 elf32_stm32l4xx_erratum_list *stm32l4xx_errnode;
17364 bfd_vma ptr;
17365 bfd_vma end;
17366 bfd_vma offset = sec->output_section->vma + sec->output_offset;
17367 bfd_byte tmp;
17368 unsigned int i;
17369
17370 if (globals == NULL)
17371 return FALSE;
17372
17373 /* If this section has not been allocated an _arm_elf_section_data
17374 structure then we cannot record anything. */
17375 arm_data = get_arm_elf_section_data (sec);
17376 if (arm_data == NULL)
17377 return FALSE;
17378
17379 mapcount = arm_data->mapcount;
17380 map = arm_data->map;
17381 errcount = arm_data->erratumcount;
17382
17383 if (errcount != 0)
17384 {
17385 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
17386
17387 for (errnode = arm_data->erratumlist; errnode != 0;
17388 errnode = errnode->next)
17389 {
17390 bfd_vma target = errnode->vma - offset;
17391
17392 switch (errnode->type)
17393 {
17394 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
17395 {
17396 bfd_vma branch_to_veneer;
17397 /* Original condition code of instruction, plus bit mask for
17398 ARM B instruction. */
17399 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
17400 | 0x0a000000;
17401
17402 /* The instruction is before the label. */
17403 target -= 4;
17404
17405 /* Above offset included in -4 below. */
17406 branch_to_veneer = errnode->u.b.veneer->vma
17407 - errnode->vma - 4;
17408
17409 if ((signed) branch_to_veneer < -(1 << 25)
17410 || (signed) branch_to_veneer >= (1 << 25))
17411 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
17412 "range"), output_bfd);
17413
17414 insn |= (branch_to_veneer >> 2) & 0xffffff;
17415 contents[endianflip ^ target] = insn & 0xff;
17416 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
17417 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
17418 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
17419 }
17420 break;
17421
17422 case VFP11_ERRATUM_ARM_VENEER:
17423 {
17424 bfd_vma branch_from_veneer;
17425 unsigned int insn;
17426
17427 /* Take size of veneer into account. */
17428 branch_from_veneer = errnode->u.v.branch->vma
17429 - errnode->vma - 12;
17430
17431 if ((signed) branch_from_veneer < -(1 << 25)
17432 || (signed) branch_from_veneer >= (1 << 25))
17433 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
17434 "range"), output_bfd);
17435
17436 /* Original instruction. */
17437 insn = errnode->u.v.branch->u.b.vfp_insn;
17438 contents[endianflip ^ target] = insn & 0xff;
17439 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
17440 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
17441 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
17442
17443 /* Branch back to insn after original insn. */
17444 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
17445 contents[endianflip ^ (target + 4)] = insn & 0xff;
17446 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
17447 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
17448 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
17449 }
17450 break;
17451
17452 default:
17453 abort ();
17454 }
17455 }
17456 }
17457
17458 if (arm_data->stm32l4xx_erratumcount != 0)
17459 {
17460 for (stm32l4xx_errnode = arm_data->stm32l4xx_erratumlist;
17461 stm32l4xx_errnode != 0;
17462 stm32l4xx_errnode = stm32l4xx_errnode->next)
17463 {
17464 bfd_vma target = stm32l4xx_errnode->vma - offset;
17465
17466 switch (stm32l4xx_errnode->type)
17467 {
17468 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
17469 {
17470 unsigned int insn;
17471 bfd_vma branch_to_veneer =
17472 stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma;
17473
17474 if ((signed) branch_to_veneer < -(1 << 24)
17475 || (signed) branch_to_veneer >= (1 << 24))
17476 {
17477 bfd_vma out_of_range =
17478 ((signed) branch_to_veneer < -(1 << 24)) ?
17479 - branch_to_veneer - (1 << 24) :
17480 ((signed) branch_to_veneer >= (1 << 24)) ?
17481 branch_to_veneer - (1 << 24) : 0;
17482
17483 (*_bfd_error_handler)
17484 (_("%B(%#x): error: Cannot create STM32L4XX veneer. "
17485 "Jump out of range by %ld bytes. "
17486 "Cannot encode branch instruction. "),
17487 output_bfd,
17488 (long) (stm32l4xx_errnode->vma - 4),
17489 out_of_range);
17490 continue;
17491 }
17492
17493 insn = create_instruction_branch_absolute
17494 (stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma);
17495
17496 /* The instruction is before the label. */
17497 target -= 4;
17498
17499 put_thumb2_insn (globals, output_bfd,
17500 (bfd_vma) insn, contents + target);
17501 }
17502 break;
17503
17504 case STM32L4XX_ERRATUM_VENEER:
17505 {
17506 bfd_byte * veneer;
17507 bfd_byte * veneer_r;
17508 unsigned int insn;
17509
17510 veneer = contents + target;
17511 veneer_r = veneer
17512 + stm32l4xx_errnode->u.b.veneer->vma
17513 - stm32l4xx_errnode->vma - 4;
17514
17515 if ((signed) (veneer_r - veneer -
17516 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE >
17517 STM32L4XX_ERRATUM_LDM_VENEER_SIZE ?
17518 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE :
17519 STM32L4XX_ERRATUM_LDM_VENEER_SIZE) < -(1 << 24)
17520 || (signed) (veneer_r - veneer) >= (1 << 24))
17521 {
17522 (*_bfd_error_handler) (_("%B: error: Cannot create STM32L4XX "
17523 "veneer."), output_bfd);
17524 continue;
17525 }
17526
17527 /* Original instruction. */
17528 insn = stm32l4xx_errnode->u.v.branch->u.b.insn;
17529
17530 stm32l4xx_create_replacing_stub
17531 (globals, output_bfd, insn, (void*)veneer_r, (void*)veneer);
17532 }
17533 break;
17534
17535 default:
17536 abort ();
17537 }
17538 }
17539 }
17540
17541 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
17542 {
17543 arm_unwind_table_edit *edit_node
17544 = arm_data->u.exidx.unwind_edit_list;
17545 /* Now, sec->size is the size of the section we will write. The original
17546 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
17547 markers) was sec->rawsize. (This isn't the case if we perform no
17548 edits, then rawsize will be zero and we should use size). */
17549 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
17550 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
17551 unsigned int in_index, out_index;
17552 bfd_vma add_to_offsets = 0;
17553
17554 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
17555 {
17556 if (edit_node)
17557 {
17558 unsigned int edit_index = edit_node->index;
17559
17560 if (in_index < edit_index && in_index * 8 < input_size)
17561 {
17562 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
17563 contents + in_index * 8, add_to_offsets);
17564 out_index++;
17565 in_index++;
17566 }
17567 else if (in_index == edit_index
17568 || (in_index * 8 >= input_size
17569 && edit_index == UINT_MAX))
17570 {
17571 switch (edit_node->type)
17572 {
17573 case DELETE_EXIDX_ENTRY:
17574 in_index++;
17575 add_to_offsets += 8;
17576 break;
17577
17578 case INSERT_EXIDX_CANTUNWIND_AT_END:
17579 {
17580 asection *text_sec = edit_node->linked_section;
17581 bfd_vma text_offset = text_sec->output_section->vma
17582 + text_sec->output_offset
17583 + text_sec->size;
17584 bfd_vma exidx_offset = offset + out_index * 8;
17585 unsigned long prel31_offset;
17586
17587 /* Note: this is meant to be equivalent to an
17588 R_ARM_PREL31 relocation. These synthetic
17589 EXIDX_CANTUNWIND markers are not relocated by the
17590 usual BFD method. */
17591 prel31_offset = (text_offset - exidx_offset)
17592 & 0x7ffffffful;
17593 if (bfd_link_relocatable (link_info))
17594 {
17595 /* Here relocation for new EXIDX_CANTUNWIND is
17596 created, so there is no need to
17597 adjust offset by hand. */
17598 prel31_offset = text_sec->output_offset
17599 + text_sec->size;
17600
17601 /* New relocation entity. */
17602 asection *text_out = text_sec->output_section;
17603 Elf_Internal_Rela rel;
17604 rel.r_addend = 0;
17605 rel.r_offset = exidx_offset;
17606 rel.r_info = ELF32_R_INFO (text_out->target_index,
17607 R_ARM_PREL31);
17608
17609 elf32_arm_add_relocation (output_bfd, link_info,
17610 sec->output_section,
17611 &rel);
17612 }
17613
17614 /* First address we can't unwind. */
17615 bfd_put_32 (output_bfd, prel31_offset,
17616 &edited_contents[out_index * 8]);
17617
17618 /* Code for EXIDX_CANTUNWIND. */
17619 bfd_put_32 (output_bfd, 0x1,
17620 &edited_contents[out_index * 8 + 4]);
17621
17622 out_index++;
17623 add_to_offsets -= 8;
17624 }
17625 break;
17626 }
17627
17628 edit_node = edit_node->next;
17629 }
17630 }
17631 else
17632 {
17633 /* No more edits, copy remaining entries verbatim. */
17634 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
17635 contents + in_index * 8, add_to_offsets);
17636 out_index++;
17637 in_index++;
17638 }
17639 }
17640
17641 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
17642 bfd_set_section_contents (output_bfd, sec->output_section,
17643 edited_contents,
17644 (file_ptr) sec->output_offset, sec->size);
17645
17646 return TRUE;
17647 }
17648
17649 /* Fix code to point to Cortex-A8 erratum stubs. */
17650 if (globals->fix_cortex_a8)
17651 {
17652 struct a8_branch_to_stub_data data;
17653
17654 data.writing_section = sec;
17655 data.contents = contents;
17656
17657 bfd_hash_traverse (& globals->stub_hash_table, make_branch_to_a8_stub,
17658 & data);
17659 }
17660
17661 if (mapcount == 0)
17662 return FALSE;
17663
17664 if (globals->byteswap_code)
17665 {
17666 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
17667
17668 ptr = map[0].vma;
17669 for (i = 0; i < mapcount; i++)
17670 {
17671 if (i == mapcount - 1)
17672 end = sec->size;
17673 else
17674 end = map[i + 1].vma;
17675
17676 switch (map[i].type)
17677 {
17678 case 'a':
17679 /* Byte swap code words. */
17680 while (ptr + 3 < end)
17681 {
17682 tmp = contents[ptr];
17683 contents[ptr] = contents[ptr + 3];
17684 contents[ptr + 3] = tmp;
17685 tmp = contents[ptr + 1];
17686 contents[ptr + 1] = contents[ptr + 2];
17687 contents[ptr + 2] = tmp;
17688 ptr += 4;
17689 }
17690 break;
17691
17692 case 't':
17693 /* Byte swap code halfwords. */
17694 while (ptr + 1 < end)
17695 {
17696 tmp = contents[ptr];
17697 contents[ptr] = contents[ptr + 1];
17698 contents[ptr + 1] = tmp;
17699 ptr += 2;
17700 }
17701 break;
17702
17703 case 'd':
17704 /* Leave data alone. */
17705 break;
17706 }
17707 ptr = end;
17708 }
17709 }
17710
17711 free (map);
17712 arm_data->mapcount = -1;
17713 arm_data->mapsize = 0;
17714 arm_data->map = NULL;
17715
17716 return FALSE;
17717 }
17718
17719 /* Mangle thumb function symbols as we read them in. */
17720
17721 static bfd_boolean
17722 elf32_arm_swap_symbol_in (bfd * abfd,
17723 const void *psrc,
17724 const void *pshn,
17725 Elf_Internal_Sym *dst)
17726 {
17727 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
17728 return FALSE;
17729 dst->st_target_internal = 0;
17730
17731 /* New EABI objects mark thumb function symbols by setting the low bit of
17732 the address. */
17733 if (ELF_ST_TYPE (dst->st_info) == STT_FUNC
17734 || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
17735 {
17736 if (dst->st_value & 1)
17737 {
17738 dst->st_value &= ~(bfd_vma) 1;
17739 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal,
17740 ST_BRANCH_TO_THUMB);
17741 }
17742 else
17743 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_ARM);
17744 }
17745 else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
17746 {
17747 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
17748 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_THUMB);
17749 }
17750 else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
17751 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_LONG);
17752 else
17753 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_UNKNOWN);
17754
17755 return TRUE;
17756 }
17757
17758
17759 /* Mangle thumb function symbols as we write them out. */
17760
17761 static void
17762 elf32_arm_swap_symbol_out (bfd *abfd,
17763 const Elf_Internal_Sym *src,
17764 void *cdst,
17765 void *shndx)
17766 {
17767 Elf_Internal_Sym newsym;
17768
17769 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
17770 of the address set, as per the new EABI. We do this unconditionally
17771 because objcopy does not set the elf header flags until after
17772 it writes out the symbol table. */
17773 if (ARM_GET_SYM_BRANCH_TYPE (src->st_target_internal) == ST_BRANCH_TO_THUMB)
17774 {
17775 newsym = *src;
17776 if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
17777 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
17778 if (newsym.st_shndx != SHN_UNDEF)
17779 {
17780 /* Do this only for defined symbols. At link type, the static
17781 linker will simulate the work of dynamic linker of resolving
17782 symbols and will carry over the thumbness of found symbols to
17783 the output symbol table. It's not clear how it happens, but
17784 the thumbness of undefined symbols can well be different at
17785 runtime, and writing '1' for them will be confusing for users
17786 and possibly for dynamic linker itself.
17787 */
17788 newsym.st_value |= 1;
17789 }
17790
17791 src = &newsym;
17792 }
17793 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
17794 }
17795
17796 /* Add the PT_ARM_EXIDX program header. */
17797
17798 static bfd_boolean
17799 elf32_arm_modify_segment_map (bfd *abfd,
17800 struct bfd_link_info *info ATTRIBUTE_UNUSED)
17801 {
17802 struct elf_segment_map *m;
17803 asection *sec;
17804
17805 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
17806 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
17807 {
17808 /* If there is already a PT_ARM_EXIDX header, then we do not
17809 want to add another one. This situation arises when running
17810 "strip"; the input binary already has the header. */
17811 m = elf_seg_map (abfd);
17812 while (m && m->p_type != PT_ARM_EXIDX)
17813 m = m->next;
17814 if (!m)
17815 {
17816 m = (struct elf_segment_map *)
17817 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
17818 if (m == NULL)
17819 return FALSE;
17820 m->p_type = PT_ARM_EXIDX;
17821 m->count = 1;
17822 m->sections[0] = sec;
17823
17824 m->next = elf_seg_map (abfd);
17825 elf_seg_map (abfd) = m;
17826 }
17827 }
17828
17829 return TRUE;
17830 }
17831
17832 /* We may add a PT_ARM_EXIDX program header. */
17833
17834 static int
17835 elf32_arm_additional_program_headers (bfd *abfd,
17836 struct bfd_link_info *info ATTRIBUTE_UNUSED)
17837 {
17838 asection *sec;
17839
17840 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
17841 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
17842 return 1;
17843 else
17844 return 0;
17845 }
17846
17847 /* Hook called by the linker routine which adds symbols from an object
17848 file. */
17849
17850 static bfd_boolean
17851 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
17852 Elf_Internal_Sym *sym, const char **namep,
17853 flagword *flagsp, asection **secp, bfd_vma *valp)
17854 {
17855 if (ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
17856 && (abfd->flags & DYNAMIC) == 0
17857 && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour)
17858 elf_tdata (info->output_bfd)->has_gnu_symbols |= elf_gnu_symbol_ifunc;
17859
17860 if (elf32_arm_hash_table (info) == NULL)
17861 return FALSE;
17862
17863 if (elf32_arm_hash_table (info)->vxworks_p
17864 && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
17865 flagsp, secp, valp))
17866 return FALSE;
17867
17868 return TRUE;
17869 }
17870
17871 /* We use this to override swap_symbol_in and swap_symbol_out. */
17872 const struct elf_size_info elf32_arm_size_info =
17873 {
17874 sizeof (Elf32_External_Ehdr),
17875 sizeof (Elf32_External_Phdr),
17876 sizeof (Elf32_External_Shdr),
17877 sizeof (Elf32_External_Rel),
17878 sizeof (Elf32_External_Rela),
17879 sizeof (Elf32_External_Sym),
17880 sizeof (Elf32_External_Dyn),
17881 sizeof (Elf_External_Note),
17882 4,
17883 1,
17884 32, 2,
17885 ELFCLASS32, EV_CURRENT,
17886 bfd_elf32_write_out_phdrs,
17887 bfd_elf32_write_shdrs_and_ehdr,
17888 bfd_elf32_checksum_contents,
17889 bfd_elf32_write_relocs,
17890 elf32_arm_swap_symbol_in,
17891 elf32_arm_swap_symbol_out,
17892 bfd_elf32_slurp_reloc_table,
17893 bfd_elf32_slurp_symbol_table,
17894 bfd_elf32_swap_dyn_in,
17895 bfd_elf32_swap_dyn_out,
17896 bfd_elf32_swap_reloc_in,
17897 bfd_elf32_swap_reloc_out,
17898 bfd_elf32_swap_reloca_in,
17899 bfd_elf32_swap_reloca_out
17900 };
17901
17902 static bfd_vma
17903 read_code32 (const bfd *abfd, const bfd_byte *addr)
17904 {
17905 /* V7 BE8 code is always little endian. */
17906 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
17907 return bfd_getl32 (addr);
17908
17909 return bfd_get_32 (abfd, addr);
17910 }
17911
17912 static bfd_vma
17913 read_code16 (const bfd *abfd, const bfd_byte *addr)
17914 {
17915 /* V7 BE8 code is always little endian. */
17916 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
17917 return bfd_getl16 (addr);
17918
17919 return bfd_get_16 (abfd, addr);
17920 }
17921
17922 /* Return size of plt0 entry starting at ADDR
17923 or (bfd_vma) -1 if size can not be determined. */
17924
17925 static bfd_vma
17926 elf32_arm_plt0_size (const bfd *abfd, const bfd_byte *addr)
17927 {
17928 bfd_vma first_word;
17929 bfd_vma plt0_size;
17930
17931 first_word = read_code32 (abfd, addr);
17932
17933 if (first_word == elf32_arm_plt0_entry[0])
17934 plt0_size = 4 * ARRAY_SIZE (elf32_arm_plt0_entry);
17935 else if (first_word == elf32_thumb2_plt0_entry[0])
17936 plt0_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
17937 else
17938 /* We don't yet handle this PLT format. */
17939 return (bfd_vma) -1;
17940
17941 return plt0_size;
17942 }
17943
17944 /* Return size of plt entry starting at offset OFFSET
17945 of plt section located at address START
17946 or (bfd_vma) -1 if size can not be determined. */
17947
17948 static bfd_vma
17949 elf32_arm_plt_size (const bfd *abfd, const bfd_byte *start, bfd_vma offset)
17950 {
17951 bfd_vma first_insn;
17952 bfd_vma plt_size = 0;
17953 const bfd_byte *addr = start + offset;
17954
17955 /* PLT entry size if fixed on Thumb-only platforms. */
17956 if (read_code32 (abfd, start) == elf32_thumb2_plt0_entry[0])
17957 return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
17958
17959 /* Respect Thumb stub if necessary. */
17960 if (read_code16 (abfd, addr) == elf32_arm_plt_thumb_stub[0])
17961 {
17962 plt_size += 2 * ARRAY_SIZE(elf32_arm_plt_thumb_stub);
17963 }
17964
17965 /* Strip immediate from first add. */
17966 first_insn = read_code32 (abfd, addr + plt_size) & 0xffffff00;
17967
17968 #ifdef FOUR_WORD_PLT
17969 if (first_insn == elf32_arm_plt_entry[0])
17970 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry);
17971 #else
17972 if (first_insn == elf32_arm_plt_entry_long[0])
17973 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_long);
17974 else if (first_insn == elf32_arm_plt_entry_short[0])
17975 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_short);
17976 #endif
17977 else
17978 /* We don't yet handle this PLT format. */
17979 return (bfd_vma) -1;
17980
17981 return plt_size;
17982 }
17983
17984 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab. */
17985
17986 static long
17987 elf32_arm_get_synthetic_symtab (bfd *abfd,
17988 long symcount ATTRIBUTE_UNUSED,
17989 asymbol **syms ATTRIBUTE_UNUSED,
17990 long dynsymcount,
17991 asymbol **dynsyms,
17992 asymbol **ret)
17993 {
17994 asection *relplt;
17995 asymbol *s;
17996 arelent *p;
17997 long count, i, n;
17998 size_t size;
17999 Elf_Internal_Shdr *hdr;
18000 char *names;
18001 asection *plt;
18002 bfd_vma offset;
18003 bfd_byte *data;
18004
18005 *ret = NULL;
18006
18007 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
18008 return 0;
18009
18010 if (dynsymcount <= 0)
18011 return 0;
18012
18013 relplt = bfd_get_section_by_name (abfd, ".rel.plt");
18014 if (relplt == NULL)
18015 return 0;
18016
18017 hdr = &elf_section_data (relplt)->this_hdr;
18018 if (hdr->sh_link != elf_dynsymtab (abfd)
18019 || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA))
18020 return 0;
18021
18022 plt = bfd_get_section_by_name (abfd, ".plt");
18023 if (plt == NULL)
18024 return 0;
18025
18026 if (!elf32_arm_size_info.slurp_reloc_table (abfd, relplt, dynsyms, TRUE))
18027 return -1;
18028
18029 data = plt->contents;
18030 if (data == NULL)
18031 {
18032 if (!bfd_get_full_section_contents(abfd, (asection *) plt, &data) || data == NULL)
18033 return -1;
18034 bfd_cache_section_contents((asection *) plt, data);
18035 }
18036
18037 count = relplt->size / hdr->sh_entsize;
18038 size = count * sizeof (asymbol);
18039 p = relplt->relocation;
18040 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
18041 {
18042 size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
18043 if (p->addend != 0)
18044 size += sizeof ("+0x") - 1 + 8;
18045 }
18046
18047 s = *ret = (asymbol *) bfd_malloc (size);
18048 if (s == NULL)
18049 return -1;
18050
18051 offset = elf32_arm_plt0_size (abfd, data);
18052 if (offset == (bfd_vma) -1)
18053 return -1;
18054
18055 names = (char *) (s + count);
18056 p = relplt->relocation;
18057 n = 0;
18058 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
18059 {
18060 size_t len;
18061
18062 bfd_vma plt_size = elf32_arm_plt_size (abfd, data, offset);
18063 if (plt_size == (bfd_vma) -1)
18064 break;
18065
18066 *s = **p->sym_ptr_ptr;
18067 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
18068 we are defining a symbol, ensure one of them is set. */
18069 if ((s->flags & BSF_LOCAL) == 0)
18070 s->flags |= BSF_GLOBAL;
18071 s->flags |= BSF_SYNTHETIC;
18072 s->section = plt;
18073 s->value = offset;
18074 s->name = names;
18075 s->udata.p = NULL;
18076 len = strlen ((*p->sym_ptr_ptr)->name);
18077 memcpy (names, (*p->sym_ptr_ptr)->name, len);
18078 names += len;
18079 if (p->addend != 0)
18080 {
18081 char buf[30], *a;
18082
18083 memcpy (names, "+0x", sizeof ("+0x") - 1);
18084 names += sizeof ("+0x") - 1;
18085 bfd_sprintf_vma (abfd, buf, p->addend);
18086 for (a = buf; *a == '0'; ++a)
18087 ;
18088 len = strlen (a);
18089 memcpy (names, a, len);
18090 names += len;
18091 }
18092 memcpy (names, "@plt", sizeof ("@plt"));
18093 names += sizeof ("@plt");
18094 ++s, ++n;
18095 offset += plt_size;
18096 }
18097
18098 return n;
18099 }
18100
18101 static bfd_boolean
18102 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr * hdr)
18103 {
18104 if (hdr->sh_flags & SHF_ARM_NOREAD)
18105 *flags |= SEC_ELF_NOREAD;
18106 return TRUE;
18107 }
18108
18109 static flagword
18110 elf32_arm_lookup_section_flags (char *flag_name)
18111 {
18112 if (!strcmp (flag_name, "SHF_ARM_NOREAD"))
18113 return SHF_ARM_NOREAD;
18114
18115 return SEC_NO_FLAGS;
18116 }
18117
18118 static unsigned int
18119 elf32_arm_count_additional_relocs (asection *sec)
18120 {
18121 struct _arm_elf_section_data *arm_data;
18122 arm_data = get_arm_elf_section_data (sec);
18123 return arm_data->additional_reloc_count;
18124 }
18125
18126 /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
18127 has a type >= SHT_LOOS. Returns TRUE if these fields were initialised
18128 FALSE otherwise. ISECTION is the best guess matching section from the
18129 input bfd IBFD, but it might be NULL. */
18130
18131 static bfd_boolean
18132 elf32_arm_copy_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
18133 bfd *obfd ATTRIBUTE_UNUSED,
18134 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
18135 Elf_Internal_Shdr *osection)
18136 {
18137 switch (osection->sh_type)
18138 {
18139 case SHT_ARM_EXIDX:
18140 {
18141 Elf_Internal_Shdr **oheaders = elf_elfsections (obfd);
18142 Elf_Internal_Shdr **iheaders = elf_elfsections (ibfd);
18143 unsigned i = 0;
18144
18145 osection->sh_flags = SHF_ALLOC | SHF_LINK_ORDER;
18146 osection->sh_info = 0;
18147
18148 /* The sh_link field must be set to the text section associated with
18149 this index section. Unfortunately the ARM EHABI does not specify
18150 exactly how to determine this association. Our caller does try
18151 to match up OSECTION with its corresponding input section however
18152 so that is a good first guess. */
18153 if (isection != NULL
18154 && osection->bfd_section != NULL
18155 && isection->bfd_section != NULL
18156 && isection->bfd_section->output_section != NULL
18157 && isection->bfd_section->output_section == osection->bfd_section
18158 && iheaders != NULL
18159 && isection->sh_link > 0
18160 && isection->sh_link < elf_numsections (ibfd)
18161 && iheaders[isection->sh_link]->bfd_section != NULL
18162 && iheaders[isection->sh_link]->bfd_section->output_section != NULL
18163 )
18164 {
18165 for (i = elf_numsections (obfd); i-- > 0;)
18166 if (oheaders[i]->bfd_section
18167 == iheaders[isection->sh_link]->bfd_section->output_section)
18168 break;
18169 }
18170
18171 if (i == 0)
18172 {
18173 /* Failing that we have to find a matching section ourselves. If
18174 we had the output section name available we could compare that
18175 with input section names. Unfortunately we don't. So instead
18176 we use a simple heuristic and look for the nearest executable
18177 section before this one. */
18178 for (i = elf_numsections (obfd); i-- > 0;)
18179 if (oheaders[i] == osection)
18180 break;
18181 if (i == 0)
18182 break;
18183
18184 while (i-- > 0)
18185 if (oheaders[i]->sh_type == SHT_PROGBITS
18186 && (oheaders[i]->sh_flags & (SHF_ALLOC | SHF_EXECINSTR))
18187 == (SHF_ALLOC | SHF_EXECINSTR))
18188 break;
18189 }
18190
18191 if (i)
18192 {
18193 osection->sh_link = i;
18194 /* If the text section was part of a group
18195 then the index section should be too. */
18196 if (oheaders[i]->sh_flags & SHF_GROUP)
18197 osection->sh_flags |= SHF_GROUP;
18198 return TRUE;
18199 }
18200 }
18201 break;
18202
18203 case SHT_ARM_PREEMPTMAP:
18204 osection->sh_flags = SHF_ALLOC;
18205 break;
18206
18207 case SHT_ARM_ATTRIBUTES:
18208 case SHT_ARM_DEBUGOVERLAY:
18209 case SHT_ARM_OVERLAYSECTION:
18210 default:
18211 break;
18212 }
18213
18214 return FALSE;
18215 }
18216
18217 #undef elf_backend_copy_special_section_fields
18218 #define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
18219
18220 #define ELF_ARCH bfd_arch_arm
18221 #define ELF_TARGET_ID ARM_ELF_DATA
18222 #define ELF_MACHINE_CODE EM_ARM
18223 #ifdef __QNXTARGET__
18224 #define ELF_MAXPAGESIZE 0x1000
18225 #else
18226 #define ELF_MAXPAGESIZE 0x10000
18227 #endif
18228 #define ELF_MINPAGESIZE 0x1000
18229 #define ELF_COMMONPAGESIZE 0x1000
18230
18231 #define bfd_elf32_mkobject elf32_arm_mkobject
18232
18233 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
18234 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
18235 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
18236 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
18237 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
18238 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
18239 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
18240 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
18241 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
18242 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
18243 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
18244 #define bfd_elf32_bfd_final_link elf32_arm_final_link
18245 #define bfd_elf32_get_synthetic_symtab elf32_arm_get_synthetic_symtab
18246
18247 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
18248 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
18249 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
18250 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
18251 #define elf_backend_check_relocs elf32_arm_check_relocs
18252 #define elf_backend_relocate_section elf32_arm_relocate_section
18253 #define elf_backend_write_section elf32_arm_write_section
18254 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
18255 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
18256 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
18257 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
18258 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
18259 #define elf_backend_always_size_sections elf32_arm_always_size_sections
18260 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
18261 #define elf_backend_post_process_headers elf32_arm_post_process_headers
18262 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
18263 #define elf_backend_object_p elf32_arm_object_p
18264 #define elf_backend_fake_sections elf32_arm_fake_sections
18265 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
18266 #define elf_backend_final_write_processing elf32_arm_final_write_processing
18267 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
18268 #define elf_backend_size_info elf32_arm_size_info
18269 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
18270 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
18271 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
18272 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
18273 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
18274 #define elf_backend_count_additional_relocs elf32_arm_count_additional_relocs
18275
18276 #define elf_backend_can_refcount 1
18277 #define elf_backend_can_gc_sections 1
18278 #define elf_backend_plt_readonly 1
18279 #define elf_backend_want_got_plt 1
18280 #define elf_backend_want_plt_sym 0
18281 #define elf_backend_may_use_rel_p 1
18282 #define elf_backend_may_use_rela_p 0
18283 #define elf_backend_default_use_rela_p 0
18284
18285 #define elf_backend_got_header_size 12
18286 #define elf_backend_extern_protected_data 1
18287
18288 #undef elf_backend_obj_attrs_vendor
18289 #define elf_backend_obj_attrs_vendor "aeabi"
18290 #undef elf_backend_obj_attrs_section
18291 #define elf_backend_obj_attrs_section ".ARM.attributes"
18292 #undef elf_backend_obj_attrs_arg_type
18293 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
18294 #undef elf_backend_obj_attrs_section_type
18295 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
18296 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
18297 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
18298
18299 #undef elf_backend_section_flags
18300 #define elf_backend_section_flags elf32_arm_section_flags
18301 #undef elf_backend_lookup_section_flags_hook
18302 #define elf_backend_lookup_section_flags_hook elf32_arm_lookup_section_flags
18303
18304 #include "elf32-target.h"
18305
18306 /* Native Client targets. */
18307
18308 #undef TARGET_LITTLE_SYM
18309 #define TARGET_LITTLE_SYM arm_elf32_nacl_le_vec
18310 #undef TARGET_LITTLE_NAME
18311 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
18312 #undef TARGET_BIG_SYM
18313 #define TARGET_BIG_SYM arm_elf32_nacl_be_vec
18314 #undef TARGET_BIG_NAME
18315 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
18316
18317 /* Like elf32_arm_link_hash_table_create -- but overrides
18318 appropriately for NaCl. */
18319
18320 static struct bfd_link_hash_table *
18321 elf32_arm_nacl_link_hash_table_create (bfd *abfd)
18322 {
18323 struct bfd_link_hash_table *ret;
18324
18325 ret = elf32_arm_link_hash_table_create (abfd);
18326 if (ret)
18327 {
18328 struct elf32_arm_link_hash_table *htab
18329 = (struct elf32_arm_link_hash_table *) ret;
18330
18331 htab->nacl_p = 1;
18332
18333 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry);
18334 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry);
18335 }
18336 return ret;
18337 }
18338
18339 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
18340 really need to use elf32_arm_modify_segment_map. But we do it
18341 anyway just to reduce gratuitous differences with the stock ARM backend. */
18342
18343 static bfd_boolean
18344 elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
18345 {
18346 return (elf32_arm_modify_segment_map (abfd, info)
18347 && nacl_modify_segment_map (abfd, info));
18348 }
18349
18350 static void
18351 elf32_arm_nacl_final_write_processing (bfd *abfd, bfd_boolean linker)
18352 {
18353 elf32_arm_final_write_processing (abfd, linker);
18354 nacl_final_write_processing (abfd, linker);
18355 }
18356
18357 static bfd_vma
18358 elf32_arm_nacl_plt_sym_val (bfd_vma i, const asection *plt,
18359 const arelent *rel ATTRIBUTE_UNUSED)
18360 {
18361 return plt->vma
18362 + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry) +
18363 i * ARRAY_SIZE (elf32_arm_nacl_plt_entry));
18364 }
18365
18366 #undef elf32_bed
18367 #define elf32_bed elf32_arm_nacl_bed
18368 #undef bfd_elf32_bfd_link_hash_table_create
18369 #define bfd_elf32_bfd_link_hash_table_create \
18370 elf32_arm_nacl_link_hash_table_create
18371 #undef elf_backend_plt_alignment
18372 #define elf_backend_plt_alignment 4
18373 #undef elf_backend_modify_segment_map
18374 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
18375 #undef elf_backend_modify_program_headers
18376 #define elf_backend_modify_program_headers nacl_modify_program_headers
18377 #undef elf_backend_final_write_processing
18378 #define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing
18379 #undef bfd_elf32_get_synthetic_symtab
18380 #undef elf_backend_plt_sym_val
18381 #define elf_backend_plt_sym_val elf32_arm_nacl_plt_sym_val
18382 #undef elf_backend_copy_special_section_fields
18383
18384 #undef ELF_MINPAGESIZE
18385 #undef ELF_COMMONPAGESIZE
18386
18387
18388 #include "elf32-target.h"
18389
18390 /* Reset to defaults. */
18391 #undef elf_backend_plt_alignment
18392 #undef elf_backend_modify_segment_map
18393 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
18394 #undef elf_backend_modify_program_headers
18395 #undef elf_backend_final_write_processing
18396 #define elf_backend_final_write_processing elf32_arm_final_write_processing
18397 #undef ELF_MINPAGESIZE
18398 #define ELF_MINPAGESIZE 0x1000
18399 #undef ELF_COMMONPAGESIZE
18400 #define ELF_COMMONPAGESIZE 0x1000
18401
18402
18403 /* VxWorks Targets. */
18404
18405 #undef TARGET_LITTLE_SYM
18406 #define TARGET_LITTLE_SYM arm_elf32_vxworks_le_vec
18407 #undef TARGET_LITTLE_NAME
18408 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
18409 #undef TARGET_BIG_SYM
18410 #define TARGET_BIG_SYM arm_elf32_vxworks_be_vec
18411 #undef TARGET_BIG_NAME
18412 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
18413
18414 /* Like elf32_arm_link_hash_table_create -- but overrides
18415 appropriately for VxWorks. */
18416
18417 static struct bfd_link_hash_table *
18418 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
18419 {
18420 struct bfd_link_hash_table *ret;
18421
18422 ret = elf32_arm_link_hash_table_create (abfd);
18423 if (ret)
18424 {
18425 struct elf32_arm_link_hash_table *htab
18426 = (struct elf32_arm_link_hash_table *) ret;
18427 htab->use_rel = 0;
18428 htab->vxworks_p = 1;
18429 }
18430 return ret;
18431 }
18432
18433 static void
18434 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
18435 {
18436 elf32_arm_final_write_processing (abfd, linker);
18437 elf_vxworks_final_write_processing (abfd, linker);
18438 }
18439
18440 #undef elf32_bed
18441 #define elf32_bed elf32_arm_vxworks_bed
18442
18443 #undef bfd_elf32_bfd_link_hash_table_create
18444 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
18445 #undef elf_backend_final_write_processing
18446 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
18447 #undef elf_backend_emit_relocs
18448 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
18449
18450 #undef elf_backend_may_use_rel_p
18451 #define elf_backend_may_use_rel_p 0
18452 #undef elf_backend_may_use_rela_p
18453 #define elf_backend_may_use_rela_p 1
18454 #undef elf_backend_default_use_rela_p
18455 #define elf_backend_default_use_rela_p 1
18456 #undef elf_backend_want_plt_sym
18457 #define elf_backend_want_plt_sym 1
18458 #undef ELF_MAXPAGESIZE
18459 #define ELF_MAXPAGESIZE 0x1000
18460
18461 #include "elf32-target.h"
18462
18463
18464 /* Merge backend specific data from an object file to the output
18465 object file when linking. */
18466
18467 static bfd_boolean
18468 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
18469 {
18470 flagword out_flags;
18471 flagword in_flags;
18472 bfd_boolean flags_compatible = TRUE;
18473 asection *sec;
18474
18475 /* Check if we have the same endianness. */
18476 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
18477 return FALSE;
18478
18479 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
18480 return TRUE;
18481
18482 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
18483 return FALSE;
18484
18485 /* The input BFD must have had its flags initialised. */
18486 /* The following seems bogus to me -- The flags are initialized in
18487 the assembler but I don't think an elf_flags_init field is
18488 written into the object. */
18489 /* BFD_ASSERT (elf_flags_init (ibfd)); */
18490
18491 in_flags = elf_elfheader (ibfd)->e_flags;
18492 out_flags = elf_elfheader (obfd)->e_flags;
18493
18494 /* In theory there is no reason why we couldn't handle this. However
18495 in practice it isn't even close to working and there is no real
18496 reason to want it. */
18497 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
18498 && !(ibfd->flags & DYNAMIC)
18499 && (in_flags & EF_ARM_BE8))
18500 {
18501 _bfd_error_handler (_("error: %B is already in final BE8 format"),
18502 ibfd);
18503 return FALSE;
18504 }
18505
18506 if (!elf_flags_init (obfd))
18507 {
18508 /* If the input is the default architecture and had the default
18509 flags then do not bother setting the flags for the output
18510 architecture, instead allow future merges to do this. If no
18511 future merges ever set these flags then they will retain their
18512 uninitialised values, which surprise surprise, correspond
18513 to the default values. */
18514 if (bfd_get_arch_info (ibfd)->the_default
18515 && elf_elfheader (ibfd)->e_flags == 0)
18516 return TRUE;
18517
18518 elf_flags_init (obfd) = TRUE;
18519 elf_elfheader (obfd)->e_flags = in_flags;
18520
18521 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
18522 && bfd_get_arch_info (obfd)->the_default)
18523 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
18524
18525 return TRUE;
18526 }
18527
18528 /* Determine what should happen if the input ARM architecture
18529 does not match the output ARM architecture. */
18530 if (! bfd_arm_merge_machines (ibfd, obfd))
18531 return FALSE;
18532
18533 /* Identical flags must be compatible. */
18534 if (in_flags == out_flags)
18535 return TRUE;
18536
18537 /* Check to see if the input BFD actually contains any sections. If
18538 not, its flags may not have been initialised either, but it
18539 cannot actually cause any incompatiblity. Do not short-circuit
18540 dynamic objects; their section list may be emptied by
18541 elf_link_add_object_symbols.
18542
18543 Also check to see if there are no code sections in the input.
18544 In this case there is no need to check for code specific flags.
18545 XXX - do we need to worry about floating-point format compatability
18546 in data sections ? */
18547 if (!(ibfd->flags & DYNAMIC))
18548 {
18549 bfd_boolean null_input_bfd = TRUE;
18550 bfd_boolean only_data_sections = TRUE;
18551
18552 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
18553 {
18554 /* Ignore synthetic glue sections. */
18555 if (strcmp (sec->name, ".glue_7")
18556 && strcmp (sec->name, ".glue_7t"))
18557 {
18558 if ((bfd_get_section_flags (ibfd, sec)
18559 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
18560 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
18561 only_data_sections = FALSE;
18562
18563 null_input_bfd = FALSE;
18564 break;
18565 }
18566 }
18567
18568 if (null_input_bfd || only_data_sections)
18569 return TRUE;
18570 }
18571
18572 /* Complain about various flag mismatches. */
18573 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
18574 EF_ARM_EABI_VERSION (out_flags)))
18575 {
18576 _bfd_error_handler
18577 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
18578 ibfd, obfd,
18579 (in_flags & EF_ARM_EABIMASK) >> 24,
18580 (out_flags & EF_ARM_EABIMASK) >> 24);
18581 return FALSE;
18582 }
18583
18584 /* Not sure what needs to be checked for EABI versions >= 1. */
18585 /* VxWorks libraries do not use these flags. */
18586 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
18587 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
18588 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
18589 {
18590 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
18591 {
18592 _bfd_error_handler
18593 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
18594 ibfd, obfd,
18595 in_flags & EF_ARM_APCS_26 ? 26 : 32,
18596 out_flags & EF_ARM_APCS_26 ? 26 : 32);
18597 flags_compatible = FALSE;
18598 }
18599
18600 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
18601 {
18602 if (in_flags & EF_ARM_APCS_FLOAT)
18603 _bfd_error_handler
18604 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
18605 ibfd, obfd);
18606 else
18607 _bfd_error_handler
18608 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
18609 ibfd, obfd);
18610
18611 flags_compatible = FALSE;
18612 }
18613
18614 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
18615 {
18616 if (in_flags & EF_ARM_VFP_FLOAT)
18617 _bfd_error_handler
18618 (_("error: %B uses VFP instructions, whereas %B does not"),
18619 ibfd, obfd);
18620 else
18621 _bfd_error_handler
18622 (_("error: %B uses FPA instructions, whereas %B does not"),
18623 ibfd, obfd);
18624
18625 flags_compatible = FALSE;
18626 }
18627
18628 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
18629 {
18630 if (in_flags & EF_ARM_MAVERICK_FLOAT)
18631 _bfd_error_handler
18632 (_("error: %B uses Maverick instructions, whereas %B does not"),
18633 ibfd, obfd);
18634 else
18635 _bfd_error_handler
18636 (_("error: %B does not use Maverick instructions, whereas %B does"),
18637 ibfd, obfd);
18638
18639 flags_compatible = FALSE;
18640 }
18641
18642 #ifdef EF_ARM_SOFT_FLOAT
18643 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
18644 {
18645 /* We can allow interworking between code that is VFP format
18646 layout, and uses either soft float or integer regs for
18647 passing floating point arguments and results. We already
18648 know that the APCS_FLOAT flags match; similarly for VFP
18649 flags. */
18650 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
18651 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
18652 {
18653 if (in_flags & EF_ARM_SOFT_FLOAT)
18654 _bfd_error_handler
18655 (_("error: %B uses software FP, whereas %B uses hardware FP"),
18656 ibfd, obfd);
18657 else
18658 _bfd_error_handler
18659 (_("error: %B uses hardware FP, whereas %B uses software FP"),
18660 ibfd, obfd);
18661
18662 flags_compatible = FALSE;
18663 }
18664 }
18665 #endif
18666
18667 /* Interworking mismatch is only a warning. */
18668 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
18669 {
18670 if (in_flags & EF_ARM_INTERWORK)
18671 {
18672 _bfd_error_handler
18673 (_("Warning: %B supports interworking, whereas %B does not"),
18674 ibfd, obfd);
18675 }
18676 else
18677 {
18678 _bfd_error_handler
18679 (_("Warning: %B does not support interworking, whereas %B does"),
18680 ibfd, obfd);
18681 }
18682 }
18683 }
18684
18685 return flags_compatible;
18686 }
18687
18688
18689 /* Symbian OS Targets. */
18690
18691 #undef TARGET_LITTLE_SYM
18692 #define TARGET_LITTLE_SYM arm_elf32_symbian_le_vec
18693 #undef TARGET_LITTLE_NAME
18694 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
18695 #undef TARGET_BIG_SYM
18696 #define TARGET_BIG_SYM arm_elf32_symbian_be_vec
18697 #undef TARGET_BIG_NAME
18698 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
18699
18700 /* Like elf32_arm_link_hash_table_create -- but overrides
18701 appropriately for Symbian OS. */
18702
18703 static struct bfd_link_hash_table *
18704 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
18705 {
18706 struct bfd_link_hash_table *ret;
18707
18708 ret = elf32_arm_link_hash_table_create (abfd);
18709 if (ret)
18710 {
18711 struct elf32_arm_link_hash_table *htab
18712 = (struct elf32_arm_link_hash_table *)ret;
18713 /* There is no PLT header for Symbian OS. */
18714 htab->plt_header_size = 0;
18715 /* The PLT entries are each one instruction and one word. */
18716 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
18717 htab->symbian_p = 1;
18718 /* Symbian uses armv5t or above, so use_blx is always true. */
18719 htab->use_blx = 1;
18720 htab->root.is_relocatable_executable = 1;
18721 }
18722 return ret;
18723 }
18724
18725 static const struct bfd_elf_special_section
18726 elf32_arm_symbian_special_sections[] =
18727 {
18728 /* In a BPABI executable, the dynamic linking sections do not go in
18729 the loadable read-only segment. The post-linker may wish to
18730 refer to these sections, but they are not part of the final
18731 program image. */
18732 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
18733 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
18734 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
18735 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
18736 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
18737 /* These sections do not need to be writable as the SymbianOS
18738 postlinker will arrange things so that no dynamic relocation is
18739 required. */
18740 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
18741 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
18742 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
18743 { NULL, 0, 0, 0, 0 }
18744 };
18745
18746 static void
18747 elf32_arm_symbian_begin_write_processing (bfd *abfd,
18748 struct bfd_link_info *link_info)
18749 {
18750 /* BPABI objects are never loaded directly by an OS kernel; they are
18751 processed by a postlinker first, into an OS-specific format. If
18752 the D_PAGED bit is set on the file, BFD will align segments on
18753 page boundaries, so that an OS can directly map the file. With
18754 BPABI objects, that just results in wasted space. In addition,
18755 because we clear the D_PAGED bit, map_sections_to_segments will
18756 recognize that the program headers should not be mapped into any
18757 loadable segment. */
18758 abfd->flags &= ~D_PAGED;
18759 elf32_arm_begin_write_processing (abfd, link_info);
18760 }
18761
18762 static bfd_boolean
18763 elf32_arm_symbian_modify_segment_map (bfd *abfd,
18764 struct bfd_link_info *info)
18765 {
18766 struct elf_segment_map *m;
18767 asection *dynsec;
18768
18769 /* BPABI shared libraries and executables should have a PT_DYNAMIC
18770 segment. However, because the .dynamic section is not marked
18771 with SEC_LOAD, the generic ELF code will not create such a
18772 segment. */
18773 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
18774 if (dynsec)
18775 {
18776 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
18777 if (m->p_type == PT_DYNAMIC)
18778 break;
18779
18780 if (m == NULL)
18781 {
18782 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
18783 m->next = elf_seg_map (abfd);
18784 elf_seg_map (abfd) = m;
18785 }
18786 }
18787
18788 /* Also call the generic arm routine. */
18789 return elf32_arm_modify_segment_map (abfd, info);
18790 }
18791
18792 /* Return address for Ith PLT stub in section PLT, for relocation REL
18793 or (bfd_vma) -1 if it should not be included. */
18794
18795 static bfd_vma
18796 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
18797 const arelent *rel ATTRIBUTE_UNUSED)
18798 {
18799 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
18800 }
18801
18802 #undef elf32_bed
18803 #define elf32_bed elf32_arm_symbian_bed
18804
18805 /* The dynamic sections are not allocated on SymbianOS; the postlinker
18806 will process them and then discard them. */
18807 #undef ELF_DYNAMIC_SEC_FLAGS
18808 #define ELF_DYNAMIC_SEC_FLAGS \
18809 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
18810
18811 #undef elf_backend_emit_relocs
18812
18813 #undef bfd_elf32_bfd_link_hash_table_create
18814 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
18815 #undef elf_backend_special_sections
18816 #define elf_backend_special_sections elf32_arm_symbian_special_sections
18817 #undef elf_backend_begin_write_processing
18818 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
18819 #undef elf_backend_final_write_processing
18820 #define elf_backend_final_write_processing elf32_arm_final_write_processing
18821
18822 #undef elf_backend_modify_segment_map
18823 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
18824
18825 /* There is no .got section for BPABI objects, and hence no header. */
18826 #undef elf_backend_got_header_size
18827 #define elf_backend_got_header_size 0
18828
18829 /* Similarly, there is no .got.plt section. */
18830 #undef elf_backend_want_got_plt
18831 #define elf_backend_want_got_plt 0
18832
18833 #undef elf_backend_plt_sym_val
18834 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
18835
18836 #undef elf_backend_may_use_rel_p
18837 #define elf_backend_may_use_rel_p 1
18838 #undef elf_backend_may_use_rela_p
18839 #define elf_backend_may_use_rela_p 0
18840 #undef elf_backend_default_use_rela_p
18841 #define elf_backend_default_use_rela_p 0
18842 #undef elf_backend_want_plt_sym
18843 #define elf_backend_want_plt_sym 0
18844 #undef ELF_MAXPAGESIZE
18845 #define ELF_MAXPAGESIZE 0x8000
18846
18847 #include "elf32-target.h"
This page took 0.865612 seconds and 4 git commands to generate.