bfd/
[deliverable/binutils-gdb.git] / bfd / elf32-arm.c
1 /* 32-bit ELF support for ARM
2 Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
3 2008, 2009, 2010 Free Software Foundation, Inc.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include <limits.h>
24
25 #include "bfd.h"
26 #include "libiberty.h"
27 #include "libbfd.h"
28 #include "elf-bfd.h"
29 #include "elf-vxworks.h"
30 #include "elf/arm.h"
31
32 /* Return the relocation section associated with NAME. HTAB is the
33 bfd's elf32_arm_link_hash_entry. */
34 #define RELOC_SECTION(HTAB, NAME) \
35 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
36
37 /* Return size of a relocation entry. HTAB is the bfd's
38 elf32_arm_link_hash_entry. */
39 #define RELOC_SIZE(HTAB) \
40 ((HTAB)->use_rel \
41 ? sizeof (Elf32_External_Rel) \
42 : sizeof (Elf32_External_Rela))
43
44 /* Return function to swap relocations in. HTAB is the bfd's
45 elf32_arm_link_hash_entry. */
46 #define SWAP_RELOC_IN(HTAB) \
47 ((HTAB)->use_rel \
48 ? bfd_elf32_swap_reloc_in \
49 : bfd_elf32_swap_reloca_in)
50
51 /* Return function to swap relocations out. HTAB is the bfd's
52 elf32_arm_link_hash_entry. */
53 #define SWAP_RELOC_OUT(HTAB) \
54 ((HTAB)->use_rel \
55 ? bfd_elf32_swap_reloc_out \
56 : bfd_elf32_swap_reloca_out)
57
58 #define elf_info_to_howto 0
59 #define elf_info_to_howto_rel elf32_arm_info_to_howto
60
61 #define ARM_ELF_ABI_VERSION 0
62 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
63
64 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
65 struct bfd_link_info *link_info,
66 asection *sec,
67 bfd_byte *contents);
68
69 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
70 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
71 in that slot. */
72
73 static reloc_howto_type elf32_arm_howto_table_1[] =
74 {
75 /* No relocation. */
76 HOWTO (R_ARM_NONE, /* type */
77 0, /* rightshift */
78 0, /* size (0 = byte, 1 = short, 2 = long) */
79 0, /* bitsize */
80 FALSE, /* pc_relative */
81 0, /* bitpos */
82 complain_overflow_dont,/* complain_on_overflow */
83 bfd_elf_generic_reloc, /* special_function */
84 "R_ARM_NONE", /* name */
85 FALSE, /* partial_inplace */
86 0, /* src_mask */
87 0, /* dst_mask */
88 FALSE), /* pcrel_offset */
89
90 HOWTO (R_ARM_PC24, /* type */
91 2, /* rightshift */
92 2, /* size (0 = byte, 1 = short, 2 = long) */
93 24, /* bitsize */
94 TRUE, /* pc_relative */
95 0, /* bitpos */
96 complain_overflow_signed,/* complain_on_overflow */
97 bfd_elf_generic_reloc, /* special_function */
98 "R_ARM_PC24", /* name */
99 FALSE, /* partial_inplace */
100 0x00ffffff, /* src_mask */
101 0x00ffffff, /* dst_mask */
102 TRUE), /* pcrel_offset */
103
104 /* 32 bit absolute */
105 HOWTO (R_ARM_ABS32, /* type */
106 0, /* rightshift */
107 2, /* size (0 = byte, 1 = short, 2 = long) */
108 32, /* bitsize */
109 FALSE, /* pc_relative */
110 0, /* bitpos */
111 complain_overflow_bitfield,/* complain_on_overflow */
112 bfd_elf_generic_reloc, /* special_function */
113 "R_ARM_ABS32", /* name */
114 FALSE, /* partial_inplace */
115 0xffffffff, /* src_mask */
116 0xffffffff, /* dst_mask */
117 FALSE), /* pcrel_offset */
118
119 /* standard 32bit pc-relative reloc */
120 HOWTO (R_ARM_REL32, /* type */
121 0, /* rightshift */
122 2, /* size (0 = byte, 1 = short, 2 = long) */
123 32, /* bitsize */
124 TRUE, /* pc_relative */
125 0, /* bitpos */
126 complain_overflow_bitfield,/* complain_on_overflow */
127 bfd_elf_generic_reloc, /* special_function */
128 "R_ARM_REL32", /* name */
129 FALSE, /* partial_inplace */
130 0xffffffff, /* src_mask */
131 0xffffffff, /* dst_mask */
132 TRUE), /* pcrel_offset */
133
134 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
135 HOWTO (R_ARM_LDR_PC_G0, /* type */
136 0, /* rightshift */
137 0, /* size (0 = byte, 1 = short, 2 = long) */
138 32, /* bitsize */
139 TRUE, /* pc_relative */
140 0, /* bitpos */
141 complain_overflow_dont,/* complain_on_overflow */
142 bfd_elf_generic_reloc, /* special_function */
143 "R_ARM_LDR_PC_G0", /* name */
144 FALSE, /* partial_inplace */
145 0xffffffff, /* src_mask */
146 0xffffffff, /* dst_mask */
147 TRUE), /* pcrel_offset */
148
149 /* 16 bit absolute */
150 HOWTO (R_ARM_ABS16, /* type */
151 0, /* rightshift */
152 1, /* size (0 = byte, 1 = short, 2 = long) */
153 16, /* bitsize */
154 FALSE, /* pc_relative */
155 0, /* bitpos */
156 complain_overflow_bitfield,/* complain_on_overflow */
157 bfd_elf_generic_reloc, /* special_function */
158 "R_ARM_ABS16", /* name */
159 FALSE, /* partial_inplace */
160 0x0000ffff, /* src_mask */
161 0x0000ffff, /* dst_mask */
162 FALSE), /* pcrel_offset */
163
164 /* 12 bit absolute */
165 HOWTO (R_ARM_ABS12, /* type */
166 0, /* rightshift */
167 2, /* size (0 = byte, 1 = short, 2 = long) */
168 12, /* bitsize */
169 FALSE, /* pc_relative */
170 0, /* bitpos */
171 complain_overflow_bitfield,/* complain_on_overflow */
172 bfd_elf_generic_reloc, /* special_function */
173 "R_ARM_ABS12", /* name */
174 FALSE, /* partial_inplace */
175 0x00000fff, /* src_mask */
176 0x00000fff, /* dst_mask */
177 FALSE), /* pcrel_offset */
178
179 HOWTO (R_ARM_THM_ABS5, /* type */
180 6, /* rightshift */
181 1, /* size (0 = byte, 1 = short, 2 = long) */
182 5, /* bitsize */
183 FALSE, /* pc_relative */
184 0, /* bitpos */
185 complain_overflow_bitfield,/* complain_on_overflow */
186 bfd_elf_generic_reloc, /* special_function */
187 "R_ARM_THM_ABS5", /* name */
188 FALSE, /* partial_inplace */
189 0x000007e0, /* src_mask */
190 0x000007e0, /* dst_mask */
191 FALSE), /* pcrel_offset */
192
193 /* 8 bit absolute */
194 HOWTO (R_ARM_ABS8, /* type */
195 0, /* rightshift */
196 0, /* size (0 = byte, 1 = short, 2 = long) */
197 8, /* bitsize */
198 FALSE, /* pc_relative */
199 0, /* bitpos */
200 complain_overflow_bitfield,/* complain_on_overflow */
201 bfd_elf_generic_reloc, /* special_function */
202 "R_ARM_ABS8", /* name */
203 FALSE, /* partial_inplace */
204 0x000000ff, /* src_mask */
205 0x000000ff, /* dst_mask */
206 FALSE), /* pcrel_offset */
207
208 HOWTO (R_ARM_SBREL32, /* type */
209 0, /* rightshift */
210 2, /* size (0 = byte, 1 = short, 2 = long) */
211 32, /* bitsize */
212 FALSE, /* pc_relative */
213 0, /* bitpos */
214 complain_overflow_dont,/* complain_on_overflow */
215 bfd_elf_generic_reloc, /* special_function */
216 "R_ARM_SBREL32", /* name */
217 FALSE, /* partial_inplace */
218 0xffffffff, /* src_mask */
219 0xffffffff, /* dst_mask */
220 FALSE), /* pcrel_offset */
221
222 HOWTO (R_ARM_THM_CALL, /* type */
223 1, /* rightshift */
224 2, /* size (0 = byte, 1 = short, 2 = long) */
225 24, /* bitsize */
226 TRUE, /* pc_relative */
227 0, /* bitpos */
228 complain_overflow_signed,/* complain_on_overflow */
229 bfd_elf_generic_reloc, /* special_function */
230 "R_ARM_THM_CALL", /* name */
231 FALSE, /* partial_inplace */
232 0x07ff07ff, /* src_mask */
233 0x07ff07ff, /* dst_mask */
234 TRUE), /* pcrel_offset */
235
236 HOWTO (R_ARM_THM_PC8, /* type */
237 1, /* rightshift */
238 1, /* size (0 = byte, 1 = short, 2 = long) */
239 8, /* bitsize */
240 TRUE, /* pc_relative */
241 0, /* bitpos */
242 complain_overflow_signed,/* complain_on_overflow */
243 bfd_elf_generic_reloc, /* special_function */
244 "R_ARM_THM_PC8", /* name */
245 FALSE, /* partial_inplace */
246 0x000000ff, /* src_mask */
247 0x000000ff, /* dst_mask */
248 TRUE), /* pcrel_offset */
249
250 HOWTO (R_ARM_BREL_ADJ, /* type */
251 1, /* rightshift */
252 1, /* size (0 = byte, 1 = short, 2 = long) */
253 32, /* bitsize */
254 FALSE, /* pc_relative */
255 0, /* bitpos */
256 complain_overflow_signed,/* complain_on_overflow */
257 bfd_elf_generic_reloc, /* special_function */
258 "R_ARM_BREL_ADJ", /* name */
259 FALSE, /* partial_inplace */
260 0xffffffff, /* src_mask */
261 0xffffffff, /* dst_mask */
262 FALSE), /* pcrel_offset */
263
264 HOWTO (R_ARM_SWI24, /* type */
265 0, /* rightshift */
266 0, /* size (0 = byte, 1 = short, 2 = long) */
267 0, /* bitsize */
268 FALSE, /* pc_relative */
269 0, /* bitpos */
270 complain_overflow_signed,/* complain_on_overflow */
271 bfd_elf_generic_reloc, /* special_function */
272 "R_ARM_SWI24", /* name */
273 FALSE, /* partial_inplace */
274 0x00000000, /* src_mask */
275 0x00000000, /* dst_mask */
276 FALSE), /* pcrel_offset */
277
278 HOWTO (R_ARM_THM_SWI8, /* type */
279 0, /* rightshift */
280 0, /* size (0 = byte, 1 = short, 2 = long) */
281 0, /* bitsize */
282 FALSE, /* pc_relative */
283 0, /* bitpos */
284 complain_overflow_signed,/* complain_on_overflow */
285 bfd_elf_generic_reloc, /* special_function */
286 "R_ARM_SWI8", /* name */
287 FALSE, /* partial_inplace */
288 0x00000000, /* src_mask */
289 0x00000000, /* dst_mask */
290 FALSE), /* pcrel_offset */
291
292 /* BLX instruction for the ARM. */
293 HOWTO (R_ARM_XPC25, /* type */
294 2, /* rightshift */
295 2, /* size (0 = byte, 1 = short, 2 = long) */
296 25, /* bitsize */
297 TRUE, /* pc_relative */
298 0, /* bitpos */
299 complain_overflow_signed,/* complain_on_overflow */
300 bfd_elf_generic_reloc, /* special_function */
301 "R_ARM_XPC25", /* name */
302 FALSE, /* partial_inplace */
303 0x00ffffff, /* src_mask */
304 0x00ffffff, /* dst_mask */
305 TRUE), /* pcrel_offset */
306
307 /* BLX instruction for the Thumb. */
308 HOWTO (R_ARM_THM_XPC22, /* type */
309 2, /* rightshift */
310 2, /* size (0 = byte, 1 = short, 2 = long) */
311 22, /* bitsize */
312 TRUE, /* pc_relative */
313 0, /* bitpos */
314 complain_overflow_signed,/* complain_on_overflow */
315 bfd_elf_generic_reloc, /* special_function */
316 "R_ARM_THM_XPC22", /* name */
317 FALSE, /* partial_inplace */
318 0x07ff07ff, /* src_mask */
319 0x07ff07ff, /* dst_mask */
320 TRUE), /* pcrel_offset */
321
322 /* Dynamic TLS relocations. */
323
324 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
325 0, /* rightshift */
326 2, /* size (0 = byte, 1 = short, 2 = long) */
327 32, /* bitsize */
328 FALSE, /* pc_relative */
329 0, /* bitpos */
330 complain_overflow_bitfield,/* complain_on_overflow */
331 bfd_elf_generic_reloc, /* special_function */
332 "R_ARM_TLS_DTPMOD32", /* name */
333 TRUE, /* partial_inplace */
334 0xffffffff, /* src_mask */
335 0xffffffff, /* dst_mask */
336 FALSE), /* pcrel_offset */
337
338 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
339 0, /* rightshift */
340 2, /* size (0 = byte, 1 = short, 2 = long) */
341 32, /* bitsize */
342 FALSE, /* pc_relative */
343 0, /* bitpos */
344 complain_overflow_bitfield,/* complain_on_overflow */
345 bfd_elf_generic_reloc, /* special_function */
346 "R_ARM_TLS_DTPOFF32", /* name */
347 TRUE, /* partial_inplace */
348 0xffffffff, /* src_mask */
349 0xffffffff, /* dst_mask */
350 FALSE), /* pcrel_offset */
351
352 HOWTO (R_ARM_TLS_TPOFF32, /* type */
353 0, /* rightshift */
354 2, /* size (0 = byte, 1 = short, 2 = long) */
355 32, /* bitsize */
356 FALSE, /* pc_relative */
357 0, /* bitpos */
358 complain_overflow_bitfield,/* complain_on_overflow */
359 bfd_elf_generic_reloc, /* special_function */
360 "R_ARM_TLS_TPOFF32", /* name */
361 TRUE, /* partial_inplace */
362 0xffffffff, /* src_mask */
363 0xffffffff, /* dst_mask */
364 FALSE), /* pcrel_offset */
365
366 /* Relocs used in ARM Linux */
367
368 HOWTO (R_ARM_COPY, /* type */
369 0, /* rightshift */
370 2, /* size (0 = byte, 1 = short, 2 = long) */
371 32, /* bitsize */
372 FALSE, /* pc_relative */
373 0, /* bitpos */
374 complain_overflow_bitfield,/* complain_on_overflow */
375 bfd_elf_generic_reloc, /* special_function */
376 "R_ARM_COPY", /* name */
377 TRUE, /* partial_inplace */
378 0xffffffff, /* src_mask */
379 0xffffffff, /* dst_mask */
380 FALSE), /* pcrel_offset */
381
382 HOWTO (R_ARM_GLOB_DAT, /* type */
383 0, /* rightshift */
384 2, /* size (0 = byte, 1 = short, 2 = long) */
385 32, /* bitsize */
386 FALSE, /* pc_relative */
387 0, /* bitpos */
388 complain_overflow_bitfield,/* complain_on_overflow */
389 bfd_elf_generic_reloc, /* special_function */
390 "R_ARM_GLOB_DAT", /* name */
391 TRUE, /* partial_inplace */
392 0xffffffff, /* src_mask */
393 0xffffffff, /* dst_mask */
394 FALSE), /* pcrel_offset */
395
396 HOWTO (R_ARM_JUMP_SLOT, /* type */
397 0, /* rightshift */
398 2, /* size (0 = byte, 1 = short, 2 = long) */
399 32, /* bitsize */
400 FALSE, /* pc_relative */
401 0, /* bitpos */
402 complain_overflow_bitfield,/* complain_on_overflow */
403 bfd_elf_generic_reloc, /* special_function */
404 "R_ARM_JUMP_SLOT", /* name */
405 TRUE, /* partial_inplace */
406 0xffffffff, /* src_mask */
407 0xffffffff, /* dst_mask */
408 FALSE), /* pcrel_offset */
409
410 HOWTO (R_ARM_RELATIVE, /* type */
411 0, /* rightshift */
412 2, /* size (0 = byte, 1 = short, 2 = long) */
413 32, /* bitsize */
414 FALSE, /* pc_relative */
415 0, /* bitpos */
416 complain_overflow_bitfield,/* complain_on_overflow */
417 bfd_elf_generic_reloc, /* special_function */
418 "R_ARM_RELATIVE", /* name */
419 TRUE, /* partial_inplace */
420 0xffffffff, /* src_mask */
421 0xffffffff, /* dst_mask */
422 FALSE), /* pcrel_offset */
423
424 HOWTO (R_ARM_GOTOFF32, /* type */
425 0, /* rightshift */
426 2, /* size (0 = byte, 1 = short, 2 = long) */
427 32, /* bitsize */
428 FALSE, /* pc_relative */
429 0, /* bitpos */
430 complain_overflow_bitfield,/* complain_on_overflow */
431 bfd_elf_generic_reloc, /* special_function */
432 "R_ARM_GOTOFF32", /* name */
433 TRUE, /* partial_inplace */
434 0xffffffff, /* src_mask */
435 0xffffffff, /* dst_mask */
436 FALSE), /* pcrel_offset */
437
438 HOWTO (R_ARM_GOTPC, /* type */
439 0, /* rightshift */
440 2, /* size (0 = byte, 1 = short, 2 = long) */
441 32, /* bitsize */
442 TRUE, /* pc_relative */
443 0, /* bitpos */
444 complain_overflow_bitfield,/* complain_on_overflow */
445 bfd_elf_generic_reloc, /* special_function */
446 "R_ARM_GOTPC", /* name */
447 TRUE, /* partial_inplace */
448 0xffffffff, /* src_mask */
449 0xffffffff, /* dst_mask */
450 TRUE), /* pcrel_offset */
451
452 HOWTO (R_ARM_GOT32, /* type */
453 0, /* rightshift */
454 2, /* size (0 = byte, 1 = short, 2 = long) */
455 32, /* bitsize */
456 FALSE, /* pc_relative */
457 0, /* bitpos */
458 complain_overflow_bitfield,/* complain_on_overflow */
459 bfd_elf_generic_reloc, /* special_function */
460 "R_ARM_GOT32", /* name */
461 TRUE, /* partial_inplace */
462 0xffffffff, /* src_mask */
463 0xffffffff, /* dst_mask */
464 FALSE), /* pcrel_offset */
465
466 HOWTO (R_ARM_PLT32, /* type */
467 2, /* rightshift */
468 2, /* size (0 = byte, 1 = short, 2 = long) */
469 24, /* bitsize */
470 TRUE, /* pc_relative */
471 0, /* bitpos */
472 complain_overflow_bitfield,/* complain_on_overflow */
473 bfd_elf_generic_reloc, /* special_function */
474 "R_ARM_PLT32", /* name */
475 FALSE, /* partial_inplace */
476 0x00ffffff, /* src_mask */
477 0x00ffffff, /* dst_mask */
478 TRUE), /* pcrel_offset */
479
480 HOWTO (R_ARM_CALL, /* type */
481 2, /* rightshift */
482 2, /* size (0 = byte, 1 = short, 2 = long) */
483 24, /* bitsize */
484 TRUE, /* pc_relative */
485 0, /* bitpos */
486 complain_overflow_signed,/* complain_on_overflow */
487 bfd_elf_generic_reloc, /* special_function */
488 "R_ARM_CALL", /* name */
489 FALSE, /* partial_inplace */
490 0x00ffffff, /* src_mask */
491 0x00ffffff, /* dst_mask */
492 TRUE), /* pcrel_offset */
493
494 HOWTO (R_ARM_JUMP24, /* type */
495 2, /* rightshift */
496 2, /* size (0 = byte, 1 = short, 2 = long) */
497 24, /* bitsize */
498 TRUE, /* pc_relative */
499 0, /* bitpos */
500 complain_overflow_signed,/* complain_on_overflow */
501 bfd_elf_generic_reloc, /* special_function */
502 "R_ARM_JUMP24", /* name */
503 FALSE, /* partial_inplace */
504 0x00ffffff, /* src_mask */
505 0x00ffffff, /* dst_mask */
506 TRUE), /* pcrel_offset */
507
508 HOWTO (R_ARM_THM_JUMP24, /* type */
509 1, /* rightshift */
510 2, /* size (0 = byte, 1 = short, 2 = long) */
511 24, /* bitsize */
512 TRUE, /* pc_relative */
513 0, /* bitpos */
514 complain_overflow_signed,/* complain_on_overflow */
515 bfd_elf_generic_reloc, /* special_function */
516 "R_ARM_THM_JUMP24", /* name */
517 FALSE, /* partial_inplace */
518 0x07ff2fff, /* src_mask */
519 0x07ff2fff, /* dst_mask */
520 TRUE), /* pcrel_offset */
521
522 HOWTO (R_ARM_BASE_ABS, /* type */
523 0, /* rightshift */
524 2, /* size (0 = byte, 1 = short, 2 = long) */
525 32, /* bitsize */
526 FALSE, /* pc_relative */
527 0, /* bitpos */
528 complain_overflow_dont,/* complain_on_overflow */
529 bfd_elf_generic_reloc, /* special_function */
530 "R_ARM_BASE_ABS", /* name */
531 FALSE, /* partial_inplace */
532 0xffffffff, /* src_mask */
533 0xffffffff, /* dst_mask */
534 FALSE), /* pcrel_offset */
535
536 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
537 0, /* rightshift */
538 2, /* size (0 = byte, 1 = short, 2 = long) */
539 12, /* bitsize */
540 TRUE, /* pc_relative */
541 0, /* bitpos */
542 complain_overflow_dont,/* complain_on_overflow */
543 bfd_elf_generic_reloc, /* special_function */
544 "R_ARM_ALU_PCREL_7_0", /* name */
545 FALSE, /* partial_inplace */
546 0x00000fff, /* src_mask */
547 0x00000fff, /* dst_mask */
548 TRUE), /* pcrel_offset */
549
550 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
551 0, /* rightshift */
552 2, /* size (0 = byte, 1 = short, 2 = long) */
553 12, /* bitsize */
554 TRUE, /* pc_relative */
555 8, /* bitpos */
556 complain_overflow_dont,/* complain_on_overflow */
557 bfd_elf_generic_reloc, /* special_function */
558 "R_ARM_ALU_PCREL_15_8",/* name */
559 FALSE, /* partial_inplace */
560 0x00000fff, /* src_mask */
561 0x00000fff, /* dst_mask */
562 TRUE), /* pcrel_offset */
563
564 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
565 0, /* rightshift */
566 2, /* size (0 = byte, 1 = short, 2 = long) */
567 12, /* bitsize */
568 TRUE, /* pc_relative */
569 16, /* bitpos */
570 complain_overflow_dont,/* complain_on_overflow */
571 bfd_elf_generic_reloc, /* special_function */
572 "R_ARM_ALU_PCREL_23_15",/* name */
573 FALSE, /* partial_inplace */
574 0x00000fff, /* src_mask */
575 0x00000fff, /* dst_mask */
576 TRUE), /* pcrel_offset */
577
578 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
579 0, /* rightshift */
580 2, /* size (0 = byte, 1 = short, 2 = long) */
581 12, /* bitsize */
582 FALSE, /* pc_relative */
583 0, /* bitpos */
584 complain_overflow_dont,/* complain_on_overflow */
585 bfd_elf_generic_reloc, /* special_function */
586 "R_ARM_LDR_SBREL_11_0",/* name */
587 FALSE, /* partial_inplace */
588 0x00000fff, /* src_mask */
589 0x00000fff, /* dst_mask */
590 FALSE), /* pcrel_offset */
591
592 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
593 0, /* rightshift */
594 2, /* size (0 = byte, 1 = short, 2 = long) */
595 8, /* bitsize */
596 FALSE, /* pc_relative */
597 12, /* bitpos */
598 complain_overflow_dont,/* complain_on_overflow */
599 bfd_elf_generic_reloc, /* special_function */
600 "R_ARM_ALU_SBREL_19_12",/* name */
601 FALSE, /* partial_inplace */
602 0x000ff000, /* src_mask */
603 0x000ff000, /* dst_mask */
604 FALSE), /* pcrel_offset */
605
606 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
607 0, /* rightshift */
608 2, /* size (0 = byte, 1 = short, 2 = long) */
609 8, /* bitsize */
610 FALSE, /* pc_relative */
611 20, /* bitpos */
612 complain_overflow_dont,/* complain_on_overflow */
613 bfd_elf_generic_reloc, /* special_function */
614 "R_ARM_ALU_SBREL_27_20",/* name */
615 FALSE, /* partial_inplace */
616 0x0ff00000, /* src_mask */
617 0x0ff00000, /* dst_mask */
618 FALSE), /* pcrel_offset */
619
620 HOWTO (R_ARM_TARGET1, /* type */
621 0, /* rightshift */
622 2, /* size (0 = byte, 1 = short, 2 = long) */
623 32, /* bitsize */
624 FALSE, /* pc_relative */
625 0, /* bitpos */
626 complain_overflow_dont,/* complain_on_overflow */
627 bfd_elf_generic_reloc, /* special_function */
628 "R_ARM_TARGET1", /* name */
629 FALSE, /* partial_inplace */
630 0xffffffff, /* src_mask */
631 0xffffffff, /* dst_mask */
632 FALSE), /* pcrel_offset */
633
634 HOWTO (R_ARM_ROSEGREL32, /* type */
635 0, /* rightshift */
636 2, /* size (0 = byte, 1 = short, 2 = long) */
637 32, /* bitsize */
638 FALSE, /* pc_relative */
639 0, /* bitpos */
640 complain_overflow_dont,/* complain_on_overflow */
641 bfd_elf_generic_reloc, /* special_function */
642 "R_ARM_ROSEGREL32", /* name */
643 FALSE, /* partial_inplace */
644 0xffffffff, /* src_mask */
645 0xffffffff, /* dst_mask */
646 FALSE), /* pcrel_offset */
647
648 HOWTO (R_ARM_V4BX, /* type */
649 0, /* rightshift */
650 2, /* size (0 = byte, 1 = short, 2 = long) */
651 32, /* bitsize */
652 FALSE, /* pc_relative */
653 0, /* bitpos */
654 complain_overflow_dont,/* complain_on_overflow */
655 bfd_elf_generic_reloc, /* special_function */
656 "R_ARM_V4BX", /* name */
657 FALSE, /* partial_inplace */
658 0xffffffff, /* src_mask */
659 0xffffffff, /* dst_mask */
660 FALSE), /* pcrel_offset */
661
662 HOWTO (R_ARM_TARGET2, /* type */
663 0, /* rightshift */
664 2, /* size (0 = byte, 1 = short, 2 = long) */
665 32, /* bitsize */
666 FALSE, /* pc_relative */
667 0, /* bitpos */
668 complain_overflow_signed,/* complain_on_overflow */
669 bfd_elf_generic_reloc, /* special_function */
670 "R_ARM_TARGET2", /* name */
671 FALSE, /* partial_inplace */
672 0xffffffff, /* src_mask */
673 0xffffffff, /* dst_mask */
674 TRUE), /* pcrel_offset */
675
676 HOWTO (R_ARM_PREL31, /* type */
677 0, /* rightshift */
678 2, /* size (0 = byte, 1 = short, 2 = long) */
679 31, /* bitsize */
680 TRUE, /* pc_relative */
681 0, /* bitpos */
682 complain_overflow_signed,/* complain_on_overflow */
683 bfd_elf_generic_reloc, /* special_function */
684 "R_ARM_PREL31", /* name */
685 FALSE, /* partial_inplace */
686 0x7fffffff, /* src_mask */
687 0x7fffffff, /* dst_mask */
688 TRUE), /* pcrel_offset */
689
690 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
691 0, /* rightshift */
692 2, /* size (0 = byte, 1 = short, 2 = long) */
693 16, /* bitsize */
694 FALSE, /* pc_relative */
695 0, /* bitpos */
696 complain_overflow_dont,/* complain_on_overflow */
697 bfd_elf_generic_reloc, /* special_function */
698 "R_ARM_MOVW_ABS_NC", /* name */
699 FALSE, /* partial_inplace */
700 0x000f0fff, /* src_mask */
701 0x000f0fff, /* dst_mask */
702 FALSE), /* pcrel_offset */
703
704 HOWTO (R_ARM_MOVT_ABS, /* type */
705 0, /* rightshift */
706 2, /* size (0 = byte, 1 = short, 2 = long) */
707 16, /* bitsize */
708 FALSE, /* pc_relative */
709 0, /* bitpos */
710 complain_overflow_bitfield,/* complain_on_overflow */
711 bfd_elf_generic_reloc, /* special_function */
712 "R_ARM_MOVT_ABS", /* name */
713 FALSE, /* partial_inplace */
714 0x000f0fff, /* src_mask */
715 0x000f0fff, /* dst_mask */
716 FALSE), /* pcrel_offset */
717
718 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
719 0, /* rightshift */
720 2, /* size (0 = byte, 1 = short, 2 = long) */
721 16, /* bitsize */
722 TRUE, /* pc_relative */
723 0, /* bitpos */
724 complain_overflow_dont,/* complain_on_overflow */
725 bfd_elf_generic_reloc, /* special_function */
726 "R_ARM_MOVW_PREL_NC", /* name */
727 FALSE, /* partial_inplace */
728 0x000f0fff, /* src_mask */
729 0x000f0fff, /* dst_mask */
730 TRUE), /* pcrel_offset */
731
732 HOWTO (R_ARM_MOVT_PREL, /* type */
733 0, /* rightshift */
734 2, /* size (0 = byte, 1 = short, 2 = long) */
735 16, /* bitsize */
736 TRUE, /* pc_relative */
737 0, /* bitpos */
738 complain_overflow_bitfield,/* complain_on_overflow */
739 bfd_elf_generic_reloc, /* special_function */
740 "R_ARM_MOVT_PREL", /* name */
741 FALSE, /* partial_inplace */
742 0x000f0fff, /* src_mask */
743 0x000f0fff, /* dst_mask */
744 TRUE), /* pcrel_offset */
745
746 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
747 0, /* rightshift */
748 2, /* size (0 = byte, 1 = short, 2 = long) */
749 16, /* bitsize */
750 FALSE, /* pc_relative */
751 0, /* bitpos */
752 complain_overflow_dont,/* complain_on_overflow */
753 bfd_elf_generic_reloc, /* special_function */
754 "R_ARM_THM_MOVW_ABS_NC",/* name */
755 FALSE, /* partial_inplace */
756 0x040f70ff, /* src_mask */
757 0x040f70ff, /* dst_mask */
758 FALSE), /* pcrel_offset */
759
760 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
761 0, /* rightshift */
762 2, /* size (0 = byte, 1 = short, 2 = long) */
763 16, /* bitsize */
764 FALSE, /* pc_relative */
765 0, /* bitpos */
766 complain_overflow_bitfield,/* complain_on_overflow */
767 bfd_elf_generic_reloc, /* special_function */
768 "R_ARM_THM_MOVT_ABS", /* name */
769 FALSE, /* partial_inplace */
770 0x040f70ff, /* src_mask */
771 0x040f70ff, /* dst_mask */
772 FALSE), /* pcrel_offset */
773
774 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
775 0, /* rightshift */
776 2, /* size (0 = byte, 1 = short, 2 = long) */
777 16, /* bitsize */
778 TRUE, /* pc_relative */
779 0, /* bitpos */
780 complain_overflow_dont,/* complain_on_overflow */
781 bfd_elf_generic_reloc, /* special_function */
782 "R_ARM_THM_MOVW_PREL_NC",/* name */
783 FALSE, /* partial_inplace */
784 0x040f70ff, /* src_mask */
785 0x040f70ff, /* dst_mask */
786 TRUE), /* pcrel_offset */
787
788 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
789 0, /* rightshift */
790 2, /* size (0 = byte, 1 = short, 2 = long) */
791 16, /* bitsize */
792 TRUE, /* pc_relative */
793 0, /* bitpos */
794 complain_overflow_bitfield,/* complain_on_overflow */
795 bfd_elf_generic_reloc, /* special_function */
796 "R_ARM_THM_MOVT_PREL", /* name */
797 FALSE, /* partial_inplace */
798 0x040f70ff, /* src_mask */
799 0x040f70ff, /* dst_mask */
800 TRUE), /* pcrel_offset */
801
802 HOWTO (R_ARM_THM_JUMP19, /* type */
803 1, /* rightshift */
804 2, /* size (0 = byte, 1 = short, 2 = long) */
805 19, /* bitsize */
806 TRUE, /* pc_relative */
807 0, /* bitpos */
808 complain_overflow_signed,/* complain_on_overflow */
809 bfd_elf_generic_reloc, /* special_function */
810 "R_ARM_THM_JUMP19", /* name */
811 FALSE, /* partial_inplace */
812 0x043f2fff, /* src_mask */
813 0x043f2fff, /* dst_mask */
814 TRUE), /* pcrel_offset */
815
816 HOWTO (R_ARM_THM_JUMP6, /* type */
817 1, /* rightshift */
818 1, /* size (0 = byte, 1 = short, 2 = long) */
819 6, /* bitsize */
820 TRUE, /* pc_relative */
821 0, /* bitpos */
822 complain_overflow_unsigned,/* complain_on_overflow */
823 bfd_elf_generic_reloc, /* special_function */
824 "R_ARM_THM_JUMP6", /* name */
825 FALSE, /* partial_inplace */
826 0x02f8, /* src_mask */
827 0x02f8, /* dst_mask */
828 TRUE), /* pcrel_offset */
829
830 /* These are declared as 13-bit signed relocations because we can
831 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
832 versa. */
833 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
834 0, /* rightshift */
835 2, /* size (0 = byte, 1 = short, 2 = long) */
836 13, /* bitsize */
837 TRUE, /* pc_relative */
838 0, /* bitpos */
839 complain_overflow_dont,/* complain_on_overflow */
840 bfd_elf_generic_reloc, /* special_function */
841 "R_ARM_THM_ALU_PREL_11_0",/* name */
842 FALSE, /* partial_inplace */
843 0xffffffff, /* src_mask */
844 0xffffffff, /* dst_mask */
845 TRUE), /* pcrel_offset */
846
847 HOWTO (R_ARM_THM_PC12, /* type */
848 0, /* rightshift */
849 2, /* size (0 = byte, 1 = short, 2 = long) */
850 13, /* bitsize */
851 TRUE, /* pc_relative */
852 0, /* bitpos */
853 complain_overflow_dont,/* complain_on_overflow */
854 bfd_elf_generic_reloc, /* special_function */
855 "R_ARM_THM_PC12", /* name */
856 FALSE, /* partial_inplace */
857 0xffffffff, /* src_mask */
858 0xffffffff, /* dst_mask */
859 TRUE), /* pcrel_offset */
860
861 HOWTO (R_ARM_ABS32_NOI, /* type */
862 0, /* rightshift */
863 2, /* size (0 = byte, 1 = short, 2 = long) */
864 32, /* bitsize */
865 FALSE, /* pc_relative */
866 0, /* bitpos */
867 complain_overflow_dont,/* complain_on_overflow */
868 bfd_elf_generic_reloc, /* special_function */
869 "R_ARM_ABS32_NOI", /* name */
870 FALSE, /* partial_inplace */
871 0xffffffff, /* src_mask */
872 0xffffffff, /* dst_mask */
873 FALSE), /* pcrel_offset */
874
875 HOWTO (R_ARM_REL32_NOI, /* type */
876 0, /* rightshift */
877 2, /* size (0 = byte, 1 = short, 2 = long) */
878 32, /* bitsize */
879 TRUE, /* pc_relative */
880 0, /* bitpos */
881 complain_overflow_dont,/* complain_on_overflow */
882 bfd_elf_generic_reloc, /* special_function */
883 "R_ARM_REL32_NOI", /* name */
884 FALSE, /* partial_inplace */
885 0xffffffff, /* src_mask */
886 0xffffffff, /* dst_mask */
887 FALSE), /* pcrel_offset */
888
889 /* Group relocations. */
890
891 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
892 0, /* rightshift */
893 2, /* size (0 = byte, 1 = short, 2 = long) */
894 32, /* bitsize */
895 TRUE, /* pc_relative */
896 0, /* bitpos */
897 complain_overflow_dont,/* complain_on_overflow */
898 bfd_elf_generic_reloc, /* special_function */
899 "R_ARM_ALU_PC_G0_NC", /* name */
900 FALSE, /* partial_inplace */
901 0xffffffff, /* src_mask */
902 0xffffffff, /* dst_mask */
903 TRUE), /* pcrel_offset */
904
905 HOWTO (R_ARM_ALU_PC_G0, /* type */
906 0, /* rightshift */
907 2, /* size (0 = byte, 1 = short, 2 = long) */
908 32, /* bitsize */
909 TRUE, /* pc_relative */
910 0, /* bitpos */
911 complain_overflow_dont,/* complain_on_overflow */
912 bfd_elf_generic_reloc, /* special_function */
913 "R_ARM_ALU_PC_G0", /* name */
914 FALSE, /* partial_inplace */
915 0xffffffff, /* src_mask */
916 0xffffffff, /* dst_mask */
917 TRUE), /* pcrel_offset */
918
919 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
920 0, /* rightshift */
921 2, /* size (0 = byte, 1 = short, 2 = long) */
922 32, /* bitsize */
923 TRUE, /* pc_relative */
924 0, /* bitpos */
925 complain_overflow_dont,/* complain_on_overflow */
926 bfd_elf_generic_reloc, /* special_function */
927 "R_ARM_ALU_PC_G1_NC", /* name */
928 FALSE, /* partial_inplace */
929 0xffffffff, /* src_mask */
930 0xffffffff, /* dst_mask */
931 TRUE), /* pcrel_offset */
932
933 HOWTO (R_ARM_ALU_PC_G1, /* type */
934 0, /* rightshift */
935 2, /* size (0 = byte, 1 = short, 2 = long) */
936 32, /* bitsize */
937 TRUE, /* pc_relative */
938 0, /* bitpos */
939 complain_overflow_dont,/* complain_on_overflow */
940 bfd_elf_generic_reloc, /* special_function */
941 "R_ARM_ALU_PC_G1", /* name */
942 FALSE, /* partial_inplace */
943 0xffffffff, /* src_mask */
944 0xffffffff, /* dst_mask */
945 TRUE), /* pcrel_offset */
946
947 HOWTO (R_ARM_ALU_PC_G2, /* type */
948 0, /* rightshift */
949 2, /* size (0 = byte, 1 = short, 2 = long) */
950 32, /* bitsize */
951 TRUE, /* pc_relative */
952 0, /* bitpos */
953 complain_overflow_dont,/* complain_on_overflow */
954 bfd_elf_generic_reloc, /* special_function */
955 "R_ARM_ALU_PC_G2", /* name */
956 FALSE, /* partial_inplace */
957 0xffffffff, /* src_mask */
958 0xffffffff, /* dst_mask */
959 TRUE), /* pcrel_offset */
960
961 HOWTO (R_ARM_LDR_PC_G1, /* type */
962 0, /* rightshift */
963 2, /* size (0 = byte, 1 = short, 2 = long) */
964 32, /* bitsize */
965 TRUE, /* pc_relative */
966 0, /* bitpos */
967 complain_overflow_dont,/* complain_on_overflow */
968 bfd_elf_generic_reloc, /* special_function */
969 "R_ARM_LDR_PC_G1", /* name */
970 FALSE, /* partial_inplace */
971 0xffffffff, /* src_mask */
972 0xffffffff, /* dst_mask */
973 TRUE), /* pcrel_offset */
974
975 HOWTO (R_ARM_LDR_PC_G2, /* type */
976 0, /* rightshift */
977 2, /* size (0 = byte, 1 = short, 2 = long) */
978 32, /* bitsize */
979 TRUE, /* pc_relative */
980 0, /* bitpos */
981 complain_overflow_dont,/* complain_on_overflow */
982 bfd_elf_generic_reloc, /* special_function */
983 "R_ARM_LDR_PC_G2", /* name */
984 FALSE, /* partial_inplace */
985 0xffffffff, /* src_mask */
986 0xffffffff, /* dst_mask */
987 TRUE), /* pcrel_offset */
988
989 HOWTO (R_ARM_LDRS_PC_G0, /* type */
990 0, /* rightshift */
991 2, /* size (0 = byte, 1 = short, 2 = long) */
992 32, /* bitsize */
993 TRUE, /* pc_relative */
994 0, /* bitpos */
995 complain_overflow_dont,/* complain_on_overflow */
996 bfd_elf_generic_reloc, /* special_function */
997 "R_ARM_LDRS_PC_G0", /* name */
998 FALSE, /* partial_inplace */
999 0xffffffff, /* src_mask */
1000 0xffffffff, /* dst_mask */
1001 TRUE), /* pcrel_offset */
1002
1003 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1004 0, /* rightshift */
1005 2, /* size (0 = byte, 1 = short, 2 = long) */
1006 32, /* bitsize */
1007 TRUE, /* pc_relative */
1008 0, /* bitpos */
1009 complain_overflow_dont,/* complain_on_overflow */
1010 bfd_elf_generic_reloc, /* special_function */
1011 "R_ARM_LDRS_PC_G1", /* name */
1012 FALSE, /* partial_inplace */
1013 0xffffffff, /* src_mask */
1014 0xffffffff, /* dst_mask */
1015 TRUE), /* pcrel_offset */
1016
1017 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1018 0, /* rightshift */
1019 2, /* size (0 = byte, 1 = short, 2 = long) */
1020 32, /* bitsize */
1021 TRUE, /* pc_relative */
1022 0, /* bitpos */
1023 complain_overflow_dont,/* complain_on_overflow */
1024 bfd_elf_generic_reloc, /* special_function */
1025 "R_ARM_LDRS_PC_G2", /* name */
1026 FALSE, /* partial_inplace */
1027 0xffffffff, /* src_mask */
1028 0xffffffff, /* dst_mask */
1029 TRUE), /* pcrel_offset */
1030
1031 HOWTO (R_ARM_LDC_PC_G0, /* type */
1032 0, /* rightshift */
1033 2, /* size (0 = byte, 1 = short, 2 = long) */
1034 32, /* bitsize */
1035 TRUE, /* pc_relative */
1036 0, /* bitpos */
1037 complain_overflow_dont,/* complain_on_overflow */
1038 bfd_elf_generic_reloc, /* special_function */
1039 "R_ARM_LDC_PC_G0", /* name */
1040 FALSE, /* partial_inplace */
1041 0xffffffff, /* src_mask */
1042 0xffffffff, /* dst_mask */
1043 TRUE), /* pcrel_offset */
1044
1045 HOWTO (R_ARM_LDC_PC_G1, /* type */
1046 0, /* rightshift */
1047 2, /* size (0 = byte, 1 = short, 2 = long) */
1048 32, /* bitsize */
1049 TRUE, /* pc_relative */
1050 0, /* bitpos */
1051 complain_overflow_dont,/* complain_on_overflow */
1052 bfd_elf_generic_reloc, /* special_function */
1053 "R_ARM_LDC_PC_G1", /* name */
1054 FALSE, /* partial_inplace */
1055 0xffffffff, /* src_mask */
1056 0xffffffff, /* dst_mask */
1057 TRUE), /* pcrel_offset */
1058
1059 HOWTO (R_ARM_LDC_PC_G2, /* type */
1060 0, /* rightshift */
1061 2, /* size (0 = byte, 1 = short, 2 = long) */
1062 32, /* bitsize */
1063 TRUE, /* pc_relative */
1064 0, /* bitpos */
1065 complain_overflow_dont,/* complain_on_overflow */
1066 bfd_elf_generic_reloc, /* special_function */
1067 "R_ARM_LDC_PC_G2", /* name */
1068 FALSE, /* partial_inplace */
1069 0xffffffff, /* src_mask */
1070 0xffffffff, /* dst_mask */
1071 TRUE), /* pcrel_offset */
1072
1073 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1074 0, /* rightshift */
1075 2, /* size (0 = byte, 1 = short, 2 = long) */
1076 32, /* bitsize */
1077 TRUE, /* pc_relative */
1078 0, /* bitpos */
1079 complain_overflow_dont,/* complain_on_overflow */
1080 bfd_elf_generic_reloc, /* special_function */
1081 "R_ARM_ALU_SB_G0_NC", /* name */
1082 FALSE, /* partial_inplace */
1083 0xffffffff, /* src_mask */
1084 0xffffffff, /* dst_mask */
1085 TRUE), /* pcrel_offset */
1086
1087 HOWTO (R_ARM_ALU_SB_G0, /* type */
1088 0, /* rightshift */
1089 2, /* size (0 = byte, 1 = short, 2 = long) */
1090 32, /* bitsize */
1091 TRUE, /* pc_relative */
1092 0, /* bitpos */
1093 complain_overflow_dont,/* complain_on_overflow */
1094 bfd_elf_generic_reloc, /* special_function */
1095 "R_ARM_ALU_SB_G0", /* name */
1096 FALSE, /* partial_inplace */
1097 0xffffffff, /* src_mask */
1098 0xffffffff, /* dst_mask */
1099 TRUE), /* pcrel_offset */
1100
1101 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1102 0, /* rightshift */
1103 2, /* size (0 = byte, 1 = short, 2 = long) */
1104 32, /* bitsize */
1105 TRUE, /* pc_relative */
1106 0, /* bitpos */
1107 complain_overflow_dont,/* complain_on_overflow */
1108 bfd_elf_generic_reloc, /* special_function */
1109 "R_ARM_ALU_SB_G1_NC", /* name */
1110 FALSE, /* partial_inplace */
1111 0xffffffff, /* src_mask */
1112 0xffffffff, /* dst_mask */
1113 TRUE), /* pcrel_offset */
1114
1115 HOWTO (R_ARM_ALU_SB_G1, /* type */
1116 0, /* rightshift */
1117 2, /* size (0 = byte, 1 = short, 2 = long) */
1118 32, /* bitsize */
1119 TRUE, /* pc_relative */
1120 0, /* bitpos */
1121 complain_overflow_dont,/* complain_on_overflow */
1122 bfd_elf_generic_reloc, /* special_function */
1123 "R_ARM_ALU_SB_G1", /* name */
1124 FALSE, /* partial_inplace */
1125 0xffffffff, /* src_mask */
1126 0xffffffff, /* dst_mask */
1127 TRUE), /* pcrel_offset */
1128
1129 HOWTO (R_ARM_ALU_SB_G2, /* type */
1130 0, /* rightshift */
1131 2, /* size (0 = byte, 1 = short, 2 = long) */
1132 32, /* bitsize */
1133 TRUE, /* pc_relative */
1134 0, /* bitpos */
1135 complain_overflow_dont,/* complain_on_overflow */
1136 bfd_elf_generic_reloc, /* special_function */
1137 "R_ARM_ALU_SB_G2", /* name */
1138 FALSE, /* partial_inplace */
1139 0xffffffff, /* src_mask */
1140 0xffffffff, /* dst_mask */
1141 TRUE), /* pcrel_offset */
1142
1143 HOWTO (R_ARM_LDR_SB_G0, /* type */
1144 0, /* rightshift */
1145 2, /* size (0 = byte, 1 = short, 2 = long) */
1146 32, /* bitsize */
1147 TRUE, /* pc_relative */
1148 0, /* bitpos */
1149 complain_overflow_dont,/* complain_on_overflow */
1150 bfd_elf_generic_reloc, /* special_function */
1151 "R_ARM_LDR_SB_G0", /* name */
1152 FALSE, /* partial_inplace */
1153 0xffffffff, /* src_mask */
1154 0xffffffff, /* dst_mask */
1155 TRUE), /* pcrel_offset */
1156
1157 HOWTO (R_ARM_LDR_SB_G1, /* type */
1158 0, /* rightshift */
1159 2, /* size (0 = byte, 1 = short, 2 = long) */
1160 32, /* bitsize */
1161 TRUE, /* pc_relative */
1162 0, /* bitpos */
1163 complain_overflow_dont,/* complain_on_overflow */
1164 bfd_elf_generic_reloc, /* special_function */
1165 "R_ARM_LDR_SB_G1", /* name */
1166 FALSE, /* partial_inplace */
1167 0xffffffff, /* src_mask */
1168 0xffffffff, /* dst_mask */
1169 TRUE), /* pcrel_offset */
1170
1171 HOWTO (R_ARM_LDR_SB_G2, /* type */
1172 0, /* rightshift */
1173 2, /* size (0 = byte, 1 = short, 2 = long) */
1174 32, /* bitsize */
1175 TRUE, /* pc_relative */
1176 0, /* bitpos */
1177 complain_overflow_dont,/* complain_on_overflow */
1178 bfd_elf_generic_reloc, /* special_function */
1179 "R_ARM_LDR_SB_G2", /* name */
1180 FALSE, /* partial_inplace */
1181 0xffffffff, /* src_mask */
1182 0xffffffff, /* dst_mask */
1183 TRUE), /* pcrel_offset */
1184
1185 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1186 0, /* rightshift */
1187 2, /* size (0 = byte, 1 = short, 2 = long) */
1188 32, /* bitsize */
1189 TRUE, /* pc_relative */
1190 0, /* bitpos */
1191 complain_overflow_dont,/* complain_on_overflow */
1192 bfd_elf_generic_reloc, /* special_function */
1193 "R_ARM_LDRS_SB_G0", /* name */
1194 FALSE, /* partial_inplace */
1195 0xffffffff, /* src_mask */
1196 0xffffffff, /* dst_mask */
1197 TRUE), /* pcrel_offset */
1198
1199 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1200 0, /* rightshift */
1201 2, /* size (0 = byte, 1 = short, 2 = long) */
1202 32, /* bitsize */
1203 TRUE, /* pc_relative */
1204 0, /* bitpos */
1205 complain_overflow_dont,/* complain_on_overflow */
1206 bfd_elf_generic_reloc, /* special_function */
1207 "R_ARM_LDRS_SB_G1", /* name */
1208 FALSE, /* partial_inplace */
1209 0xffffffff, /* src_mask */
1210 0xffffffff, /* dst_mask */
1211 TRUE), /* pcrel_offset */
1212
1213 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1214 0, /* rightshift */
1215 2, /* size (0 = byte, 1 = short, 2 = long) */
1216 32, /* bitsize */
1217 TRUE, /* pc_relative */
1218 0, /* bitpos */
1219 complain_overflow_dont,/* complain_on_overflow */
1220 bfd_elf_generic_reloc, /* special_function */
1221 "R_ARM_LDRS_SB_G2", /* name */
1222 FALSE, /* partial_inplace */
1223 0xffffffff, /* src_mask */
1224 0xffffffff, /* dst_mask */
1225 TRUE), /* pcrel_offset */
1226
1227 HOWTO (R_ARM_LDC_SB_G0, /* type */
1228 0, /* rightshift */
1229 2, /* size (0 = byte, 1 = short, 2 = long) */
1230 32, /* bitsize */
1231 TRUE, /* pc_relative */
1232 0, /* bitpos */
1233 complain_overflow_dont,/* complain_on_overflow */
1234 bfd_elf_generic_reloc, /* special_function */
1235 "R_ARM_LDC_SB_G0", /* name */
1236 FALSE, /* partial_inplace */
1237 0xffffffff, /* src_mask */
1238 0xffffffff, /* dst_mask */
1239 TRUE), /* pcrel_offset */
1240
1241 HOWTO (R_ARM_LDC_SB_G1, /* type */
1242 0, /* rightshift */
1243 2, /* size (0 = byte, 1 = short, 2 = long) */
1244 32, /* bitsize */
1245 TRUE, /* pc_relative */
1246 0, /* bitpos */
1247 complain_overflow_dont,/* complain_on_overflow */
1248 bfd_elf_generic_reloc, /* special_function */
1249 "R_ARM_LDC_SB_G1", /* name */
1250 FALSE, /* partial_inplace */
1251 0xffffffff, /* src_mask */
1252 0xffffffff, /* dst_mask */
1253 TRUE), /* pcrel_offset */
1254
1255 HOWTO (R_ARM_LDC_SB_G2, /* type */
1256 0, /* rightshift */
1257 2, /* size (0 = byte, 1 = short, 2 = long) */
1258 32, /* bitsize */
1259 TRUE, /* pc_relative */
1260 0, /* bitpos */
1261 complain_overflow_dont,/* complain_on_overflow */
1262 bfd_elf_generic_reloc, /* special_function */
1263 "R_ARM_LDC_SB_G2", /* name */
1264 FALSE, /* partial_inplace */
1265 0xffffffff, /* src_mask */
1266 0xffffffff, /* dst_mask */
1267 TRUE), /* pcrel_offset */
1268
1269 /* End of group relocations. */
1270
1271 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1272 0, /* rightshift */
1273 2, /* size (0 = byte, 1 = short, 2 = long) */
1274 16, /* bitsize */
1275 FALSE, /* pc_relative */
1276 0, /* bitpos */
1277 complain_overflow_dont,/* complain_on_overflow */
1278 bfd_elf_generic_reloc, /* special_function */
1279 "R_ARM_MOVW_BREL_NC", /* name */
1280 FALSE, /* partial_inplace */
1281 0x0000ffff, /* src_mask */
1282 0x0000ffff, /* dst_mask */
1283 FALSE), /* pcrel_offset */
1284
1285 HOWTO (R_ARM_MOVT_BREL, /* type */
1286 0, /* rightshift */
1287 2, /* size (0 = byte, 1 = short, 2 = long) */
1288 16, /* bitsize */
1289 FALSE, /* pc_relative */
1290 0, /* bitpos */
1291 complain_overflow_bitfield,/* complain_on_overflow */
1292 bfd_elf_generic_reloc, /* special_function */
1293 "R_ARM_MOVT_BREL", /* name */
1294 FALSE, /* partial_inplace */
1295 0x0000ffff, /* src_mask */
1296 0x0000ffff, /* dst_mask */
1297 FALSE), /* pcrel_offset */
1298
1299 HOWTO (R_ARM_MOVW_BREL, /* type */
1300 0, /* rightshift */
1301 2, /* size (0 = byte, 1 = short, 2 = long) */
1302 16, /* bitsize */
1303 FALSE, /* pc_relative */
1304 0, /* bitpos */
1305 complain_overflow_dont,/* complain_on_overflow */
1306 bfd_elf_generic_reloc, /* special_function */
1307 "R_ARM_MOVW_BREL", /* name */
1308 FALSE, /* partial_inplace */
1309 0x0000ffff, /* src_mask */
1310 0x0000ffff, /* dst_mask */
1311 FALSE), /* pcrel_offset */
1312
1313 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1314 0, /* rightshift */
1315 2, /* size (0 = byte, 1 = short, 2 = long) */
1316 16, /* bitsize */
1317 FALSE, /* pc_relative */
1318 0, /* bitpos */
1319 complain_overflow_dont,/* complain_on_overflow */
1320 bfd_elf_generic_reloc, /* special_function */
1321 "R_ARM_THM_MOVW_BREL_NC",/* name */
1322 FALSE, /* partial_inplace */
1323 0x040f70ff, /* src_mask */
1324 0x040f70ff, /* dst_mask */
1325 FALSE), /* pcrel_offset */
1326
1327 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1328 0, /* rightshift */
1329 2, /* size (0 = byte, 1 = short, 2 = long) */
1330 16, /* bitsize */
1331 FALSE, /* pc_relative */
1332 0, /* bitpos */
1333 complain_overflow_bitfield,/* complain_on_overflow */
1334 bfd_elf_generic_reloc, /* special_function */
1335 "R_ARM_THM_MOVT_BREL", /* name */
1336 FALSE, /* partial_inplace */
1337 0x040f70ff, /* src_mask */
1338 0x040f70ff, /* dst_mask */
1339 FALSE), /* pcrel_offset */
1340
1341 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1342 0, /* rightshift */
1343 2, /* size (0 = byte, 1 = short, 2 = long) */
1344 16, /* bitsize */
1345 FALSE, /* pc_relative */
1346 0, /* bitpos */
1347 complain_overflow_dont,/* complain_on_overflow */
1348 bfd_elf_generic_reloc, /* special_function */
1349 "R_ARM_THM_MOVW_BREL", /* name */
1350 FALSE, /* partial_inplace */
1351 0x040f70ff, /* src_mask */
1352 0x040f70ff, /* dst_mask */
1353 FALSE), /* pcrel_offset */
1354
1355 EMPTY_HOWTO (90), /* Unallocated. */
1356 EMPTY_HOWTO (91),
1357 EMPTY_HOWTO (92),
1358 EMPTY_HOWTO (93),
1359
1360 HOWTO (R_ARM_PLT32_ABS, /* type */
1361 0, /* rightshift */
1362 2, /* size (0 = byte, 1 = short, 2 = long) */
1363 32, /* bitsize */
1364 FALSE, /* pc_relative */
1365 0, /* bitpos */
1366 complain_overflow_dont,/* complain_on_overflow */
1367 bfd_elf_generic_reloc, /* special_function */
1368 "R_ARM_PLT32_ABS", /* name */
1369 FALSE, /* partial_inplace */
1370 0xffffffff, /* src_mask */
1371 0xffffffff, /* dst_mask */
1372 FALSE), /* pcrel_offset */
1373
1374 HOWTO (R_ARM_GOT_ABS, /* type */
1375 0, /* rightshift */
1376 2, /* size (0 = byte, 1 = short, 2 = long) */
1377 32, /* bitsize */
1378 FALSE, /* pc_relative */
1379 0, /* bitpos */
1380 complain_overflow_dont,/* complain_on_overflow */
1381 bfd_elf_generic_reloc, /* special_function */
1382 "R_ARM_GOT_ABS", /* name */
1383 FALSE, /* partial_inplace */
1384 0xffffffff, /* src_mask */
1385 0xffffffff, /* dst_mask */
1386 FALSE), /* pcrel_offset */
1387
1388 HOWTO (R_ARM_GOT_PREL, /* type */
1389 0, /* rightshift */
1390 2, /* size (0 = byte, 1 = short, 2 = long) */
1391 32, /* bitsize */
1392 TRUE, /* pc_relative */
1393 0, /* bitpos */
1394 complain_overflow_dont, /* complain_on_overflow */
1395 bfd_elf_generic_reloc, /* special_function */
1396 "R_ARM_GOT_PREL", /* name */
1397 FALSE, /* partial_inplace */
1398 0xffffffff, /* src_mask */
1399 0xffffffff, /* dst_mask */
1400 TRUE), /* pcrel_offset */
1401
1402 HOWTO (R_ARM_GOT_BREL12, /* type */
1403 0, /* rightshift */
1404 2, /* size (0 = byte, 1 = short, 2 = long) */
1405 12, /* bitsize */
1406 FALSE, /* pc_relative */
1407 0, /* bitpos */
1408 complain_overflow_bitfield,/* complain_on_overflow */
1409 bfd_elf_generic_reloc, /* special_function */
1410 "R_ARM_GOT_BREL12", /* name */
1411 FALSE, /* partial_inplace */
1412 0x00000fff, /* src_mask */
1413 0x00000fff, /* dst_mask */
1414 FALSE), /* pcrel_offset */
1415
1416 HOWTO (R_ARM_GOTOFF12, /* type */
1417 0, /* rightshift */
1418 2, /* size (0 = byte, 1 = short, 2 = long) */
1419 12, /* bitsize */
1420 FALSE, /* pc_relative */
1421 0, /* bitpos */
1422 complain_overflow_bitfield,/* complain_on_overflow */
1423 bfd_elf_generic_reloc, /* special_function */
1424 "R_ARM_GOTOFF12", /* name */
1425 FALSE, /* partial_inplace */
1426 0x00000fff, /* src_mask */
1427 0x00000fff, /* dst_mask */
1428 FALSE), /* pcrel_offset */
1429
1430 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1431
1432 /* GNU extension to record C++ vtable member usage */
1433 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1434 0, /* rightshift */
1435 2, /* size (0 = byte, 1 = short, 2 = long) */
1436 0, /* bitsize */
1437 FALSE, /* pc_relative */
1438 0, /* bitpos */
1439 complain_overflow_dont, /* complain_on_overflow */
1440 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1441 "R_ARM_GNU_VTENTRY", /* name */
1442 FALSE, /* partial_inplace */
1443 0, /* src_mask */
1444 0, /* dst_mask */
1445 FALSE), /* pcrel_offset */
1446
1447 /* GNU extension to record C++ vtable hierarchy */
1448 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1449 0, /* rightshift */
1450 2, /* size (0 = byte, 1 = short, 2 = long) */
1451 0, /* bitsize */
1452 FALSE, /* pc_relative */
1453 0, /* bitpos */
1454 complain_overflow_dont, /* complain_on_overflow */
1455 NULL, /* special_function */
1456 "R_ARM_GNU_VTINHERIT", /* name */
1457 FALSE, /* partial_inplace */
1458 0, /* src_mask */
1459 0, /* dst_mask */
1460 FALSE), /* pcrel_offset */
1461
1462 HOWTO (R_ARM_THM_JUMP11, /* type */
1463 1, /* rightshift */
1464 1, /* size (0 = byte, 1 = short, 2 = long) */
1465 11, /* bitsize */
1466 TRUE, /* pc_relative */
1467 0, /* bitpos */
1468 complain_overflow_signed, /* complain_on_overflow */
1469 bfd_elf_generic_reloc, /* special_function */
1470 "R_ARM_THM_JUMP11", /* name */
1471 FALSE, /* partial_inplace */
1472 0x000007ff, /* src_mask */
1473 0x000007ff, /* dst_mask */
1474 TRUE), /* pcrel_offset */
1475
1476 HOWTO (R_ARM_THM_JUMP8, /* type */
1477 1, /* rightshift */
1478 1, /* size (0 = byte, 1 = short, 2 = long) */
1479 8, /* bitsize */
1480 TRUE, /* pc_relative */
1481 0, /* bitpos */
1482 complain_overflow_signed, /* complain_on_overflow */
1483 bfd_elf_generic_reloc, /* special_function */
1484 "R_ARM_THM_JUMP8", /* name */
1485 FALSE, /* partial_inplace */
1486 0x000000ff, /* src_mask */
1487 0x000000ff, /* dst_mask */
1488 TRUE), /* pcrel_offset */
1489
1490 /* TLS relocations */
1491 HOWTO (R_ARM_TLS_GD32, /* type */
1492 0, /* rightshift */
1493 2, /* size (0 = byte, 1 = short, 2 = long) */
1494 32, /* bitsize */
1495 FALSE, /* pc_relative */
1496 0, /* bitpos */
1497 complain_overflow_bitfield,/* complain_on_overflow */
1498 NULL, /* special_function */
1499 "R_ARM_TLS_GD32", /* name */
1500 TRUE, /* partial_inplace */
1501 0xffffffff, /* src_mask */
1502 0xffffffff, /* dst_mask */
1503 FALSE), /* pcrel_offset */
1504
1505 HOWTO (R_ARM_TLS_LDM32, /* type */
1506 0, /* rightshift */
1507 2, /* size (0 = byte, 1 = short, 2 = long) */
1508 32, /* bitsize */
1509 FALSE, /* pc_relative */
1510 0, /* bitpos */
1511 complain_overflow_bitfield,/* complain_on_overflow */
1512 bfd_elf_generic_reloc, /* special_function */
1513 "R_ARM_TLS_LDM32", /* name */
1514 TRUE, /* partial_inplace */
1515 0xffffffff, /* src_mask */
1516 0xffffffff, /* dst_mask */
1517 FALSE), /* pcrel_offset */
1518
1519 HOWTO (R_ARM_TLS_LDO32, /* type */
1520 0, /* rightshift */
1521 2, /* size (0 = byte, 1 = short, 2 = long) */
1522 32, /* bitsize */
1523 FALSE, /* pc_relative */
1524 0, /* bitpos */
1525 complain_overflow_bitfield,/* complain_on_overflow */
1526 bfd_elf_generic_reloc, /* special_function */
1527 "R_ARM_TLS_LDO32", /* name */
1528 TRUE, /* partial_inplace */
1529 0xffffffff, /* src_mask */
1530 0xffffffff, /* dst_mask */
1531 FALSE), /* pcrel_offset */
1532
1533 HOWTO (R_ARM_TLS_IE32, /* type */
1534 0, /* rightshift */
1535 2, /* size (0 = byte, 1 = short, 2 = long) */
1536 32, /* bitsize */
1537 FALSE, /* pc_relative */
1538 0, /* bitpos */
1539 complain_overflow_bitfield,/* complain_on_overflow */
1540 NULL, /* special_function */
1541 "R_ARM_TLS_IE32", /* name */
1542 TRUE, /* partial_inplace */
1543 0xffffffff, /* src_mask */
1544 0xffffffff, /* dst_mask */
1545 FALSE), /* pcrel_offset */
1546
1547 HOWTO (R_ARM_TLS_LE32, /* type */
1548 0, /* rightshift */
1549 2, /* size (0 = byte, 1 = short, 2 = long) */
1550 32, /* bitsize */
1551 FALSE, /* pc_relative */
1552 0, /* bitpos */
1553 complain_overflow_bitfield,/* complain_on_overflow */
1554 bfd_elf_generic_reloc, /* special_function */
1555 "R_ARM_TLS_LE32", /* name */
1556 TRUE, /* partial_inplace */
1557 0xffffffff, /* src_mask */
1558 0xffffffff, /* dst_mask */
1559 FALSE), /* pcrel_offset */
1560
1561 HOWTO (R_ARM_TLS_LDO12, /* type */
1562 0, /* rightshift */
1563 2, /* size (0 = byte, 1 = short, 2 = long) */
1564 12, /* bitsize */
1565 FALSE, /* pc_relative */
1566 0, /* bitpos */
1567 complain_overflow_bitfield,/* complain_on_overflow */
1568 bfd_elf_generic_reloc, /* special_function */
1569 "R_ARM_TLS_LDO12", /* name */
1570 FALSE, /* partial_inplace */
1571 0x00000fff, /* src_mask */
1572 0x00000fff, /* dst_mask */
1573 FALSE), /* pcrel_offset */
1574
1575 HOWTO (R_ARM_TLS_LE12, /* type */
1576 0, /* rightshift */
1577 2, /* size (0 = byte, 1 = short, 2 = long) */
1578 12, /* bitsize */
1579 FALSE, /* pc_relative */
1580 0, /* bitpos */
1581 complain_overflow_bitfield,/* complain_on_overflow */
1582 bfd_elf_generic_reloc, /* special_function */
1583 "R_ARM_TLS_LE12", /* name */
1584 FALSE, /* partial_inplace */
1585 0x00000fff, /* src_mask */
1586 0x00000fff, /* dst_mask */
1587 FALSE), /* pcrel_offset */
1588
1589 HOWTO (R_ARM_TLS_IE12GP, /* type */
1590 0, /* rightshift */
1591 2, /* size (0 = byte, 1 = short, 2 = long) */
1592 12, /* bitsize */
1593 FALSE, /* pc_relative */
1594 0, /* bitpos */
1595 complain_overflow_bitfield,/* complain_on_overflow */
1596 bfd_elf_generic_reloc, /* special_function */
1597 "R_ARM_TLS_IE12GP", /* name */
1598 FALSE, /* partial_inplace */
1599 0x00000fff, /* src_mask */
1600 0x00000fff, /* dst_mask */
1601 FALSE), /* pcrel_offset */
1602 };
1603
1604 /* 112-127 private relocations
1605 128 R_ARM_ME_TOO, obsolete
1606 129-255 unallocated in AAELF.
1607
1608 249-255 extended, currently unused, relocations: */
1609
1610 static reloc_howto_type elf32_arm_howto_table_2[4] =
1611 {
1612 HOWTO (R_ARM_RREL32, /* type */
1613 0, /* rightshift */
1614 0, /* size (0 = byte, 1 = short, 2 = long) */
1615 0, /* bitsize */
1616 FALSE, /* pc_relative */
1617 0, /* bitpos */
1618 complain_overflow_dont,/* complain_on_overflow */
1619 bfd_elf_generic_reloc, /* special_function */
1620 "R_ARM_RREL32", /* name */
1621 FALSE, /* partial_inplace */
1622 0, /* src_mask */
1623 0, /* dst_mask */
1624 FALSE), /* pcrel_offset */
1625
1626 HOWTO (R_ARM_RABS32, /* type */
1627 0, /* rightshift */
1628 0, /* size (0 = byte, 1 = short, 2 = long) */
1629 0, /* bitsize */
1630 FALSE, /* pc_relative */
1631 0, /* bitpos */
1632 complain_overflow_dont,/* complain_on_overflow */
1633 bfd_elf_generic_reloc, /* special_function */
1634 "R_ARM_RABS32", /* name */
1635 FALSE, /* partial_inplace */
1636 0, /* src_mask */
1637 0, /* dst_mask */
1638 FALSE), /* pcrel_offset */
1639
1640 HOWTO (R_ARM_RPC24, /* type */
1641 0, /* rightshift */
1642 0, /* size (0 = byte, 1 = short, 2 = long) */
1643 0, /* bitsize */
1644 FALSE, /* pc_relative */
1645 0, /* bitpos */
1646 complain_overflow_dont,/* complain_on_overflow */
1647 bfd_elf_generic_reloc, /* special_function */
1648 "R_ARM_RPC24", /* name */
1649 FALSE, /* partial_inplace */
1650 0, /* src_mask */
1651 0, /* dst_mask */
1652 FALSE), /* pcrel_offset */
1653
1654 HOWTO (R_ARM_RBASE, /* type */
1655 0, /* rightshift */
1656 0, /* size (0 = byte, 1 = short, 2 = long) */
1657 0, /* bitsize */
1658 FALSE, /* pc_relative */
1659 0, /* bitpos */
1660 complain_overflow_dont,/* complain_on_overflow */
1661 bfd_elf_generic_reloc, /* special_function */
1662 "R_ARM_RBASE", /* name */
1663 FALSE, /* partial_inplace */
1664 0, /* src_mask */
1665 0, /* dst_mask */
1666 FALSE) /* pcrel_offset */
1667 };
1668
1669 static reloc_howto_type *
1670 elf32_arm_howto_from_type (unsigned int r_type)
1671 {
1672 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1673 return &elf32_arm_howto_table_1[r_type];
1674
1675 if (r_type >= R_ARM_RREL32
1676 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_2))
1677 return &elf32_arm_howto_table_2[r_type - R_ARM_RREL32];
1678
1679 return NULL;
1680 }
1681
1682 static void
1683 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1684 Elf_Internal_Rela * elf_reloc)
1685 {
1686 unsigned int r_type;
1687
1688 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1689 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1690 }
1691
1692 struct elf32_arm_reloc_map
1693 {
1694 bfd_reloc_code_real_type bfd_reloc_val;
1695 unsigned char elf_reloc_val;
1696 };
1697
1698 /* All entries in this list must also be present in elf32_arm_howto_table. */
1699 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1700 {
1701 {BFD_RELOC_NONE, R_ARM_NONE},
1702 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1703 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1704 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1705 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1706 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1707 {BFD_RELOC_32, R_ARM_ABS32},
1708 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1709 {BFD_RELOC_8, R_ARM_ABS8},
1710 {BFD_RELOC_16, R_ARM_ABS16},
1711 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1712 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1713 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1714 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1715 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1716 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1717 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1718 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1719 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1720 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1721 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1722 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1723 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1724 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
1725 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1726 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1727 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1728 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1729 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1730 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1731 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1732 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1733 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1734 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1735 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1736 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1737 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1738 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1739 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1740 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1741 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1742 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1743 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1744 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1745 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1746 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1747 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1748 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1749 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1750 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1751 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1752 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1753 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1754 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1755 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1756 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1757 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1758 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1759 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1760 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1761 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1762 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1763 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1764 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1765 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1766 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1767 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1768 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1769 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1770 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1771 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1772 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1773 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1774 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1775 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1776 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1777 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1778 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1779 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX}
1780 };
1781
1782 static reloc_howto_type *
1783 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1784 bfd_reloc_code_real_type code)
1785 {
1786 unsigned int i;
1787
1788 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1789 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1790 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1791
1792 return NULL;
1793 }
1794
1795 static reloc_howto_type *
1796 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1797 const char *r_name)
1798 {
1799 unsigned int i;
1800
1801 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1802 if (elf32_arm_howto_table_1[i].name != NULL
1803 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1804 return &elf32_arm_howto_table_1[i];
1805
1806 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1807 if (elf32_arm_howto_table_2[i].name != NULL
1808 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1809 return &elf32_arm_howto_table_2[i];
1810
1811 return NULL;
1812 }
1813
1814 /* Support for core dump NOTE sections. */
1815
1816 static bfd_boolean
1817 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1818 {
1819 int offset;
1820 size_t size;
1821
1822 switch (note->descsz)
1823 {
1824 default:
1825 return FALSE;
1826
1827 case 148: /* Linux/ARM 32-bit. */
1828 /* pr_cursig */
1829 elf_tdata (abfd)->core_signal = bfd_get_16 (abfd, note->descdata + 12);
1830
1831 /* pr_pid */
1832 elf_tdata (abfd)->core_lwpid = bfd_get_32 (abfd, note->descdata + 24);
1833
1834 /* pr_reg */
1835 offset = 72;
1836 size = 72;
1837
1838 break;
1839 }
1840
1841 /* Make a ".reg/999" section. */
1842 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1843 size, note->descpos + offset);
1844 }
1845
1846 static bfd_boolean
1847 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
1848 {
1849 switch (note->descsz)
1850 {
1851 default:
1852 return FALSE;
1853
1854 case 124: /* Linux/ARM elf_prpsinfo. */
1855 elf_tdata (abfd)->core_program
1856 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
1857 elf_tdata (abfd)->core_command
1858 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
1859 }
1860
1861 /* Note that for some reason, a spurious space is tacked
1862 onto the end of the args in some (at least one anyway)
1863 implementations, so strip it off if it exists. */
1864 {
1865 char *command = elf_tdata (abfd)->core_command;
1866 int n = strlen (command);
1867
1868 if (0 < n && command[n - 1] == ' ')
1869 command[n - 1] = '\0';
1870 }
1871
1872 return TRUE;
1873 }
1874
1875 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vec
1876 #define TARGET_LITTLE_NAME "elf32-littlearm"
1877 #define TARGET_BIG_SYM bfd_elf32_bigarm_vec
1878 #define TARGET_BIG_NAME "elf32-bigarm"
1879
1880 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
1881 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
1882
1883 typedef unsigned long int insn32;
1884 typedef unsigned short int insn16;
1885
1886 /* In lieu of proper flags, assume all EABIv4 or later objects are
1887 interworkable. */
1888 #define INTERWORK_FLAG(abfd) \
1889 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
1890 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
1891 || ((abfd)->flags & BFD_LINKER_CREATED))
1892
1893 /* The linker script knows the section names for placement.
1894 The entry_names are used to do simple name mangling on the stubs.
1895 Given a function name, and its type, the stub can be found. The
1896 name can be changed. The only requirement is the %s be present. */
1897 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
1898 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
1899
1900 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
1901 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
1902
1903 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
1904 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
1905
1906 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
1907 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
1908
1909 #define STUB_ENTRY_NAME "__%s_veneer"
1910
1911 /* The name of the dynamic interpreter. This is put in the .interp
1912 section. */
1913 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
1914
1915 #ifdef FOUR_WORD_PLT
1916
1917 /* The first entry in a procedure linkage table looks like
1918 this. It is set up so that any shared library function that is
1919 called before the relocation has been set up calls the dynamic
1920 linker first. */
1921 static const bfd_vma elf32_arm_plt0_entry [] =
1922 {
1923 0xe52de004, /* str lr, [sp, #-4]! */
1924 0xe59fe010, /* ldr lr, [pc, #16] */
1925 0xe08fe00e, /* add lr, pc, lr */
1926 0xe5bef008, /* ldr pc, [lr, #8]! */
1927 };
1928
1929 /* Subsequent entries in a procedure linkage table look like
1930 this. */
1931 static const bfd_vma elf32_arm_plt_entry [] =
1932 {
1933 0xe28fc600, /* add ip, pc, #NN */
1934 0xe28cca00, /* add ip, ip, #NN */
1935 0xe5bcf000, /* ldr pc, [ip, #NN]! */
1936 0x00000000, /* unused */
1937 };
1938
1939 #else
1940
1941 /* The first entry in a procedure linkage table looks like
1942 this. It is set up so that any shared library function that is
1943 called before the relocation has been set up calls the dynamic
1944 linker first. */
1945 static const bfd_vma elf32_arm_plt0_entry [] =
1946 {
1947 0xe52de004, /* str lr, [sp, #-4]! */
1948 0xe59fe004, /* ldr lr, [pc, #4] */
1949 0xe08fe00e, /* add lr, pc, lr */
1950 0xe5bef008, /* ldr pc, [lr, #8]! */
1951 0x00000000, /* &GOT[0] - . */
1952 };
1953
1954 /* Subsequent entries in a procedure linkage table look like
1955 this. */
1956 static const bfd_vma elf32_arm_plt_entry [] =
1957 {
1958 0xe28fc600, /* add ip, pc, #0xNN00000 */
1959 0xe28cca00, /* add ip, ip, #0xNN000 */
1960 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
1961 };
1962
1963 #endif
1964
1965 /* The format of the first entry in the procedure linkage table
1966 for a VxWorks executable. */
1967 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
1968 {
1969 0xe52dc008, /* str ip,[sp,#-8]! */
1970 0xe59fc000, /* ldr ip,[pc] */
1971 0xe59cf008, /* ldr pc,[ip,#8] */
1972 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
1973 };
1974
1975 /* The format of subsequent entries in a VxWorks executable. */
1976 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
1977 {
1978 0xe59fc000, /* ldr ip,[pc] */
1979 0xe59cf000, /* ldr pc,[ip] */
1980 0x00000000, /* .long @got */
1981 0xe59fc000, /* ldr ip,[pc] */
1982 0xea000000, /* b _PLT */
1983 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1984 };
1985
1986 /* The format of entries in a VxWorks shared library. */
1987 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
1988 {
1989 0xe59fc000, /* ldr ip,[pc] */
1990 0xe79cf009, /* ldr pc,[ip,r9] */
1991 0x00000000, /* .long @got */
1992 0xe59fc000, /* ldr ip,[pc] */
1993 0xe599f008, /* ldr pc,[r9,#8] */
1994 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1995 };
1996
1997 /* An initial stub used if the PLT entry is referenced from Thumb code. */
1998 #define PLT_THUMB_STUB_SIZE 4
1999 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2000 {
2001 0x4778, /* bx pc */
2002 0x46c0 /* nop */
2003 };
2004
2005 /* The entries in a PLT when using a DLL-based target with multiple
2006 address spaces. */
2007 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2008 {
2009 0xe51ff004, /* ldr pc, [pc, #-4] */
2010 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2011 };
2012
2013 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2014 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2015 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2016 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2017 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2018 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2019
2020 enum stub_insn_type
2021 {
2022 THUMB16_TYPE = 1,
2023 THUMB32_TYPE,
2024 ARM_TYPE,
2025 DATA_TYPE
2026 };
2027
2028 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2029 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2030 is inserted in arm_build_one_stub(). */
2031 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2032 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2033 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2034 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2035 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2036 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2037
2038 typedef struct
2039 {
2040 bfd_vma data;
2041 enum stub_insn_type type;
2042 unsigned int r_type;
2043 int reloc_addend;
2044 } insn_sequence;
2045
2046 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2047 to reach the stub if necessary. */
2048 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2049 {
2050 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2051 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2052 };
2053
2054 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2055 available. */
2056 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2057 {
2058 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2059 ARM_INSN(0xe12fff1c), /* bx ip */
2060 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2061 };
2062
2063 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2064 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2065 {
2066 THUMB16_INSN(0xb401), /* push {r0} */
2067 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2068 THUMB16_INSN(0x4684), /* mov ip, r0 */
2069 THUMB16_INSN(0xbc01), /* pop {r0} */
2070 THUMB16_INSN(0x4760), /* bx ip */
2071 THUMB16_INSN(0xbf00), /* nop */
2072 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2073 };
2074
2075 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2076 allowed. */
2077 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2078 {
2079 THUMB16_INSN(0x4778), /* bx pc */
2080 THUMB16_INSN(0x46c0), /* nop */
2081 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2082 ARM_INSN(0xe12fff1c), /* bx ip */
2083 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2084 };
2085
2086 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2087 available. */
2088 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2089 {
2090 THUMB16_INSN(0x4778), /* bx pc */
2091 THUMB16_INSN(0x46c0), /* nop */
2092 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2093 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2094 };
2095
2096 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2097 one, when the destination is close enough. */
2098 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2099 {
2100 THUMB16_INSN(0x4778), /* bx pc */
2101 THUMB16_INSN(0x46c0), /* nop */
2102 ARM_REL_INSN(0xea000000, -8), /* b (X-8) */
2103 };
2104
2105 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2106 blx to reach the stub if necessary. */
2107 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2108 {
2109 ARM_INSN(0xe59fc000), /* ldr ip, [pc] */
2110 ARM_INSN(0xe08ff00c), /* add pc, pc, ip */
2111 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2112 };
2113
2114 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2115 blx to reach the stub if necessary. We can not add into pc;
2116 it is not guaranteed to mode switch (different in ARMv6 and
2117 ARMv7). */
2118 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2119 {
2120 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2121 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2122 ARM_INSN(0xe12fff1c), /* bx ip */
2123 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2124 };
2125
2126 /* V4T ARM -> ARM long branch stub, PIC. */
2127 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2128 {
2129 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2130 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2131 ARM_INSN(0xe12fff1c), /* bx ip */
2132 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2133 };
2134
2135 /* V4T Thumb -> ARM long branch stub, PIC. */
2136 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2137 {
2138 THUMB16_INSN(0x4778), /* bx pc */
2139 THUMB16_INSN(0x46c0), /* nop */
2140 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2141 ARM_INSN(0xe08cf00f), /* add pc, ip, pc */
2142 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2143 };
2144
2145 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2146 architectures. */
2147 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2148 {
2149 THUMB16_INSN(0xb401), /* push {r0} */
2150 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2151 THUMB16_INSN(0x46fc), /* mov ip, pc */
2152 THUMB16_INSN(0x4484), /* add ip, r0 */
2153 THUMB16_INSN(0xbc01), /* pop {r0} */
2154 THUMB16_INSN(0x4760), /* bx ip */
2155 DATA_WORD(0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2156 };
2157
2158 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2159 allowed. */
2160 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2161 {
2162 THUMB16_INSN(0x4778), /* bx pc */
2163 THUMB16_INSN(0x46c0), /* nop */
2164 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2165 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2166 ARM_INSN(0xe12fff1c), /* bx ip */
2167 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2168 };
2169
2170 /* Cortex-A8 erratum-workaround stubs. */
2171
2172 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2173 can't use a conditional branch to reach this stub). */
2174
2175 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2176 {
2177 THUMB16_BCOND_INSN(0xd001), /* b<cond>.n true. */
2178 THUMB32_B_INSN(0xf000b800, -4), /* b.w insn_after_original_branch. */
2179 THUMB32_B_INSN(0xf000b800, -4) /* true: b.w original_branch_dest. */
2180 };
2181
2182 /* Stub used for b.w and bl.w instructions. */
2183
2184 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2185 {
2186 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2187 };
2188
2189 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2190 {
2191 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2192 };
2193
2194 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2195 instruction (which switches to ARM mode) to point to this stub. Jump to the
2196 real destination using an ARM-mode branch. */
2197
2198 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2199 {
2200 ARM_REL_INSN(0xea000000, -8) /* b original_branch_dest. */
2201 };
2202
2203 /* Section name for stubs is the associated section name plus this
2204 string. */
2205 #define STUB_SUFFIX ".stub"
2206
2207 /* One entry per long/short branch stub defined above. */
2208 #define DEF_STUBS \
2209 DEF_STUB(long_branch_any_any) \
2210 DEF_STUB(long_branch_v4t_arm_thumb) \
2211 DEF_STUB(long_branch_thumb_only) \
2212 DEF_STUB(long_branch_v4t_thumb_thumb) \
2213 DEF_STUB(long_branch_v4t_thumb_arm) \
2214 DEF_STUB(short_branch_v4t_thumb_arm) \
2215 DEF_STUB(long_branch_any_arm_pic) \
2216 DEF_STUB(long_branch_any_thumb_pic) \
2217 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2218 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2219 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2220 DEF_STUB(long_branch_thumb_only_pic) \
2221 DEF_STUB(a8_veneer_b_cond) \
2222 DEF_STUB(a8_veneer_b) \
2223 DEF_STUB(a8_veneer_bl) \
2224 DEF_STUB(a8_veneer_blx)
2225
2226 #define DEF_STUB(x) arm_stub_##x,
2227 enum elf32_arm_stub_type {
2228 arm_stub_none,
2229 DEF_STUBS
2230 /* Note the first a8_veneer type */
2231 arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond
2232 };
2233 #undef DEF_STUB
2234
2235 typedef struct
2236 {
2237 const insn_sequence* template_sequence;
2238 int template_size;
2239 } stub_def;
2240
2241 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2242 static const stub_def stub_definitions[] = {
2243 {NULL, 0},
2244 DEF_STUBS
2245 };
2246
2247 struct elf32_arm_stub_hash_entry
2248 {
2249 /* Base hash table entry structure. */
2250 struct bfd_hash_entry root;
2251
2252 /* The stub section. */
2253 asection *stub_sec;
2254
2255 /* Offset within stub_sec of the beginning of this stub. */
2256 bfd_vma stub_offset;
2257
2258 /* Given the symbol's value and its section we can determine its final
2259 value when building the stubs (so the stub knows where to jump). */
2260 bfd_vma target_value;
2261 asection *target_section;
2262
2263 /* Offset to apply to relocation referencing target_value. */
2264 bfd_vma target_addend;
2265
2266 /* The instruction which caused this stub to be generated (only valid for
2267 Cortex-A8 erratum workaround stubs at present). */
2268 unsigned long orig_insn;
2269
2270 /* The stub type. */
2271 enum elf32_arm_stub_type stub_type;
2272 /* Its encoding size in bytes. */
2273 int stub_size;
2274 /* Its template. */
2275 const insn_sequence *stub_template;
2276 /* The size of the template (number of entries). */
2277 int stub_template_size;
2278
2279 /* The symbol table entry, if any, that this was derived from. */
2280 struct elf32_arm_link_hash_entry *h;
2281
2282 /* Destination symbol type (STT_ARM_TFUNC, ...) */
2283 unsigned char st_type;
2284
2285 /* Where this stub is being called from, or, in the case of combined
2286 stub sections, the first input section in the group. */
2287 asection *id_sec;
2288
2289 /* The name for the local symbol at the start of this stub. The
2290 stub name in the hash table has to be unique; this does not, so
2291 it can be friendlier. */
2292 char *output_name;
2293 };
2294
2295 /* Used to build a map of a section. This is required for mixed-endian
2296 code/data. */
2297
2298 typedef struct elf32_elf_section_map
2299 {
2300 bfd_vma vma;
2301 char type;
2302 }
2303 elf32_arm_section_map;
2304
2305 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2306
2307 typedef enum
2308 {
2309 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2310 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2311 VFP11_ERRATUM_ARM_VENEER,
2312 VFP11_ERRATUM_THUMB_VENEER
2313 }
2314 elf32_vfp11_erratum_type;
2315
2316 typedef struct elf32_vfp11_erratum_list
2317 {
2318 struct elf32_vfp11_erratum_list *next;
2319 bfd_vma vma;
2320 union
2321 {
2322 struct
2323 {
2324 struct elf32_vfp11_erratum_list *veneer;
2325 unsigned int vfp_insn;
2326 } b;
2327 struct
2328 {
2329 struct elf32_vfp11_erratum_list *branch;
2330 unsigned int id;
2331 } v;
2332 } u;
2333 elf32_vfp11_erratum_type type;
2334 }
2335 elf32_vfp11_erratum_list;
2336
2337 typedef enum
2338 {
2339 DELETE_EXIDX_ENTRY,
2340 INSERT_EXIDX_CANTUNWIND_AT_END
2341 }
2342 arm_unwind_edit_type;
2343
2344 /* A (sorted) list of edits to apply to an unwind table. */
2345 typedef struct arm_unwind_table_edit
2346 {
2347 arm_unwind_edit_type type;
2348 /* Note: we sometimes want to insert an unwind entry corresponding to a
2349 section different from the one we're currently writing out, so record the
2350 (text) section this edit relates to here. */
2351 asection *linked_section;
2352 unsigned int index;
2353 struct arm_unwind_table_edit *next;
2354 }
2355 arm_unwind_table_edit;
2356
2357 typedef struct _arm_elf_section_data
2358 {
2359 /* Information about mapping symbols. */
2360 struct bfd_elf_section_data elf;
2361 unsigned int mapcount;
2362 unsigned int mapsize;
2363 elf32_arm_section_map *map;
2364 /* Information about CPU errata. */
2365 unsigned int erratumcount;
2366 elf32_vfp11_erratum_list *erratumlist;
2367 /* Information about unwind tables. */
2368 union
2369 {
2370 /* Unwind info attached to a text section. */
2371 struct
2372 {
2373 asection *arm_exidx_sec;
2374 } text;
2375
2376 /* Unwind info attached to an .ARM.exidx section. */
2377 struct
2378 {
2379 arm_unwind_table_edit *unwind_edit_list;
2380 arm_unwind_table_edit *unwind_edit_tail;
2381 } exidx;
2382 } u;
2383 }
2384 _arm_elf_section_data;
2385
2386 #define elf32_arm_section_data(sec) \
2387 ((_arm_elf_section_data *) elf_section_data (sec))
2388
2389 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2390 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2391 so may be created multiple times: we use an array of these entries whilst
2392 relaxing which we can refresh easily, then create stubs for each potentially
2393 erratum-triggering instruction once we've settled on a solution. */
2394
2395 struct a8_erratum_fix {
2396 bfd *input_bfd;
2397 asection *section;
2398 bfd_vma offset;
2399 bfd_vma addend;
2400 unsigned long orig_insn;
2401 char *stub_name;
2402 enum elf32_arm_stub_type stub_type;
2403 int st_type;
2404 };
2405
2406 /* A table of relocs applied to branches which might trigger Cortex-A8
2407 erratum. */
2408
2409 struct a8_erratum_reloc {
2410 bfd_vma from;
2411 bfd_vma destination;
2412 struct elf32_arm_link_hash_entry *hash;
2413 const char *sym_name;
2414 unsigned int r_type;
2415 unsigned char st_type;
2416 bfd_boolean non_a8_stub;
2417 };
2418
2419 /* The size of the thread control block. */
2420 #define TCB_SIZE 8
2421
2422 struct elf_arm_obj_tdata
2423 {
2424 struct elf_obj_tdata root;
2425
2426 /* tls_type for each local got entry. */
2427 char *local_got_tls_type;
2428
2429 /* Zero to warn when linking objects with incompatible enum sizes. */
2430 int no_enum_size_warning;
2431
2432 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2433 int no_wchar_size_warning;
2434 };
2435
2436 #define elf_arm_tdata(bfd) \
2437 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2438
2439 #define elf32_arm_local_got_tls_type(bfd) \
2440 (elf_arm_tdata (bfd)->local_got_tls_type)
2441
2442 #define is_arm_elf(bfd) \
2443 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2444 && elf_tdata (bfd) != NULL \
2445 && elf_object_id (bfd) == ARM_ELF_DATA)
2446
2447 static bfd_boolean
2448 elf32_arm_mkobject (bfd *abfd)
2449 {
2450 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2451 ARM_ELF_DATA);
2452 }
2453
2454 /* The ARM linker needs to keep track of the number of relocs that it
2455 decides to copy in check_relocs for each symbol. This is so that
2456 it can discard PC relative relocs if it doesn't need them when
2457 linking with -Bsymbolic. We store the information in a field
2458 extending the regular ELF linker hash table. */
2459
2460 /* This structure keeps track of the number of relocs we have copied
2461 for a given symbol. */
2462 struct elf32_arm_relocs_copied
2463 {
2464 /* Next section. */
2465 struct elf32_arm_relocs_copied * next;
2466 /* A section in dynobj. */
2467 asection * section;
2468 /* Number of relocs copied in this section. */
2469 bfd_size_type count;
2470 /* Number of PC-relative relocs copied in this section. */
2471 bfd_size_type pc_count;
2472 };
2473
2474 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2475
2476 /* Arm ELF linker hash entry. */
2477 struct elf32_arm_link_hash_entry
2478 {
2479 struct elf_link_hash_entry root;
2480
2481 /* Number of PC relative relocs copied for this symbol. */
2482 struct elf32_arm_relocs_copied * relocs_copied;
2483
2484 /* We reference count Thumb references to a PLT entry separately,
2485 so that we can emit the Thumb trampoline only if needed. */
2486 bfd_signed_vma plt_thumb_refcount;
2487
2488 /* Some references from Thumb code may be eliminated by BL->BLX
2489 conversion, so record them separately. */
2490 bfd_signed_vma plt_maybe_thumb_refcount;
2491
2492 /* Since PLT entries have variable size if the Thumb prologue is
2493 used, we need to record the index into .got.plt instead of
2494 recomputing it from the PLT offset. */
2495 bfd_signed_vma plt_got_offset;
2496
2497 #define GOT_UNKNOWN 0
2498 #define GOT_NORMAL 1
2499 #define GOT_TLS_GD 2
2500 #define GOT_TLS_IE 4
2501 unsigned char tls_type;
2502
2503 /* The symbol marking the real symbol location for exported thumb
2504 symbols with Arm stubs. */
2505 struct elf_link_hash_entry *export_glue;
2506
2507 /* A pointer to the most recently used stub hash entry against this
2508 symbol. */
2509 struct elf32_arm_stub_hash_entry *stub_cache;
2510 };
2511
2512 /* Traverse an arm ELF linker hash table. */
2513 #define elf32_arm_link_hash_traverse(table, func, info) \
2514 (elf_link_hash_traverse \
2515 (&(table)->root, \
2516 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2517 (info)))
2518
2519 /* Get the ARM elf linker hash table from a link_info structure. */
2520 #define elf32_arm_hash_table(info) \
2521 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
2522 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
2523
2524 #define arm_stub_hash_lookup(table, string, create, copy) \
2525 ((struct elf32_arm_stub_hash_entry *) \
2526 bfd_hash_lookup ((table), (string), (create), (copy)))
2527
2528 /* Array to keep track of which stub sections have been created, and
2529 information on stub grouping. */
2530 struct map_stub
2531 {
2532 /* This is the section to which stubs in the group will be
2533 attached. */
2534 asection *link_sec;
2535 /* The stub section. */
2536 asection *stub_sec;
2537 };
2538
2539 /* ARM ELF linker hash table. */
2540 struct elf32_arm_link_hash_table
2541 {
2542 /* The main hash table. */
2543 struct elf_link_hash_table root;
2544
2545 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
2546 bfd_size_type thumb_glue_size;
2547
2548 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
2549 bfd_size_type arm_glue_size;
2550
2551 /* The size in bytes of section containing the ARMv4 BX veneers. */
2552 bfd_size_type bx_glue_size;
2553
2554 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
2555 veneer has been populated. */
2556 bfd_vma bx_glue_offset[15];
2557
2558 /* The size in bytes of the section containing glue for VFP11 erratum
2559 veneers. */
2560 bfd_size_type vfp11_erratum_glue_size;
2561
2562 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
2563 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
2564 elf32_arm_write_section(). */
2565 struct a8_erratum_fix *a8_erratum_fixes;
2566 unsigned int num_a8_erratum_fixes;
2567
2568 /* An arbitrary input BFD chosen to hold the glue sections. */
2569 bfd * bfd_of_glue_owner;
2570
2571 /* Nonzero to output a BE8 image. */
2572 int byteswap_code;
2573
2574 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
2575 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
2576 int target1_is_rel;
2577
2578 /* The relocation to use for R_ARM_TARGET2 relocations. */
2579 int target2_reloc;
2580
2581 /* 0 = Ignore R_ARM_V4BX.
2582 1 = Convert BX to MOV PC.
2583 2 = Generate v4 interworing stubs. */
2584 int fix_v4bx;
2585
2586 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
2587 int fix_cortex_a8;
2588
2589 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
2590 int use_blx;
2591
2592 /* What sort of code sequences we should look for which may trigger the
2593 VFP11 denorm erratum. */
2594 bfd_arm_vfp11_fix vfp11_fix;
2595
2596 /* Global counter for the number of fixes we have emitted. */
2597 int num_vfp11_fixes;
2598
2599 /* Nonzero to force PIC branch veneers. */
2600 int pic_veneer;
2601
2602 /* The number of bytes in the initial entry in the PLT. */
2603 bfd_size_type plt_header_size;
2604
2605 /* The number of bytes in the subsequent PLT etries. */
2606 bfd_size_type plt_entry_size;
2607
2608 /* True if the target system is VxWorks. */
2609 int vxworks_p;
2610
2611 /* True if the target system is Symbian OS. */
2612 int symbian_p;
2613
2614 /* True if the target uses REL relocations. */
2615 int use_rel;
2616
2617 /* Short-cuts to get to dynamic linker sections. */
2618 asection *sgot;
2619 asection *sgotplt;
2620 asection *srelgot;
2621 asection *splt;
2622 asection *srelplt;
2623 asection *sdynbss;
2624 asection *srelbss;
2625
2626 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
2627 asection *srelplt2;
2628
2629 /* Data for R_ARM_TLS_LDM32 relocations. */
2630 union
2631 {
2632 bfd_signed_vma refcount;
2633 bfd_vma offset;
2634 } tls_ldm_got;
2635
2636 /* Small local sym cache. */
2637 struct sym_cache sym_cache;
2638
2639 /* For convenience in allocate_dynrelocs. */
2640 bfd * obfd;
2641
2642 /* The stub hash table. */
2643 struct bfd_hash_table stub_hash_table;
2644
2645 /* Linker stub bfd. */
2646 bfd *stub_bfd;
2647
2648 /* Linker call-backs. */
2649 asection * (*add_stub_section) (const char *, asection *);
2650 void (*layout_sections_again) (void);
2651
2652 /* Array to keep track of which stub sections have been created, and
2653 information on stub grouping. */
2654 struct map_stub *stub_group;
2655
2656 /* Number of elements in stub_group. */
2657 int top_id;
2658
2659 /* Assorted information used by elf32_arm_size_stubs. */
2660 unsigned int bfd_count;
2661 int top_index;
2662 asection **input_list;
2663 };
2664
2665 /* Create an entry in an ARM ELF linker hash table. */
2666
2667 static struct bfd_hash_entry *
2668 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
2669 struct bfd_hash_table * table,
2670 const char * string)
2671 {
2672 struct elf32_arm_link_hash_entry * ret =
2673 (struct elf32_arm_link_hash_entry *) entry;
2674
2675 /* Allocate the structure if it has not already been allocated by a
2676 subclass. */
2677 if (ret == NULL)
2678 ret = (struct elf32_arm_link_hash_entry *)
2679 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
2680 if (ret == NULL)
2681 return (struct bfd_hash_entry *) ret;
2682
2683 /* Call the allocation method of the superclass. */
2684 ret = ((struct elf32_arm_link_hash_entry *)
2685 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2686 table, string));
2687 if (ret != NULL)
2688 {
2689 ret->relocs_copied = NULL;
2690 ret->tls_type = GOT_UNKNOWN;
2691 ret->plt_thumb_refcount = 0;
2692 ret->plt_maybe_thumb_refcount = 0;
2693 ret->plt_got_offset = -1;
2694 ret->export_glue = NULL;
2695
2696 ret->stub_cache = NULL;
2697 }
2698
2699 return (struct bfd_hash_entry *) ret;
2700 }
2701
2702 /* Initialize an entry in the stub hash table. */
2703
2704 static struct bfd_hash_entry *
2705 stub_hash_newfunc (struct bfd_hash_entry *entry,
2706 struct bfd_hash_table *table,
2707 const char *string)
2708 {
2709 /* Allocate the structure if it has not already been allocated by a
2710 subclass. */
2711 if (entry == NULL)
2712 {
2713 entry = (struct bfd_hash_entry *)
2714 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
2715 if (entry == NULL)
2716 return entry;
2717 }
2718
2719 /* Call the allocation method of the superclass. */
2720 entry = bfd_hash_newfunc (entry, table, string);
2721 if (entry != NULL)
2722 {
2723 struct elf32_arm_stub_hash_entry *eh;
2724
2725 /* Initialize the local fields. */
2726 eh = (struct elf32_arm_stub_hash_entry *) entry;
2727 eh->stub_sec = NULL;
2728 eh->stub_offset = 0;
2729 eh->target_value = 0;
2730 eh->target_section = NULL;
2731 eh->target_addend = 0;
2732 eh->orig_insn = 0;
2733 eh->stub_type = arm_stub_none;
2734 eh->stub_size = 0;
2735 eh->stub_template = NULL;
2736 eh->stub_template_size = 0;
2737 eh->h = NULL;
2738 eh->id_sec = NULL;
2739 eh->output_name = NULL;
2740 }
2741
2742 return entry;
2743 }
2744
2745 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
2746 shortcuts to them in our hash table. */
2747
2748 static bfd_boolean
2749 create_got_section (bfd *dynobj, struct bfd_link_info *info)
2750 {
2751 struct elf32_arm_link_hash_table *htab;
2752
2753 htab = elf32_arm_hash_table (info);
2754 if (htab == NULL)
2755 return FALSE;
2756
2757 /* BPABI objects never have a GOT, or associated sections. */
2758 if (htab->symbian_p)
2759 return TRUE;
2760
2761 if (! _bfd_elf_create_got_section (dynobj, info))
2762 return FALSE;
2763
2764 htab->sgot = bfd_get_section_by_name (dynobj, ".got");
2765 htab->sgotplt = bfd_get_section_by_name (dynobj, ".got.plt");
2766 if (!htab->sgot || !htab->sgotplt)
2767 abort ();
2768
2769 htab->srelgot = bfd_get_section_by_name (dynobj,
2770 RELOC_SECTION (htab, ".got"));
2771 if (htab->srelgot == NULL)
2772 return FALSE;
2773 return TRUE;
2774 }
2775
2776 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
2777 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
2778 hash table. */
2779
2780 static bfd_boolean
2781 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
2782 {
2783 struct elf32_arm_link_hash_table *htab;
2784
2785 htab = elf32_arm_hash_table (info);
2786 if (htab == NULL)
2787 return FALSE;
2788
2789 if (!htab->sgot && !create_got_section (dynobj, info))
2790 return FALSE;
2791
2792 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
2793 return FALSE;
2794
2795 htab->splt = bfd_get_section_by_name (dynobj, ".plt");
2796 htab->srelplt = bfd_get_section_by_name (dynobj,
2797 RELOC_SECTION (htab, ".plt"));
2798 htab->sdynbss = bfd_get_section_by_name (dynobj, ".dynbss");
2799 if (!info->shared)
2800 htab->srelbss = bfd_get_section_by_name (dynobj,
2801 RELOC_SECTION (htab, ".bss"));
2802
2803 if (htab->vxworks_p)
2804 {
2805 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
2806 return FALSE;
2807
2808 if (info->shared)
2809 {
2810 htab->plt_header_size = 0;
2811 htab->plt_entry_size
2812 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
2813 }
2814 else
2815 {
2816 htab->plt_header_size
2817 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
2818 htab->plt_entry_size
2819 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
2820 }
2821 }
2822
2823 if (!htab->splt
2824 || !htab->srelplt
2825 || !htab->sdynbss
2826 || (!info->shared && !htab->srelbss))
2827 abort ();
2828
2829 return TRUE;
2830 }
2831
2832 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2833
2834 static void
2835 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
2836 struct elf_link_hash_entry *dir,
2837 struct elf_link_hash_entry *ind)
2838 {
2839 struct elf32_arm_link_hash_entry *edir, *eind;
2840
2841 edir = (struct elf32_arm_link_hash_entry *) dir;
2842 eind = (struct elf32_arm_link_hash_entry *) ind;
2843
2844 if (eind->relocs_copied != NULL)
2845 {
2846 if (edir->relocs_copied != NULL)
2847 {
2848 struct elf32_arm_relocs_copied **pp;
2849 struct elf32_arm_relocs_copied *p;
2850
2851 /* Add reloc counts against the indirect sym to the direct sym
2852 list. Merge any entries against the same section. */
2853 for (pp = &eind->relocs_copied; (p = *pp) != NULL; )
2854 {
2855 struct elf32_arm_relocs_copied *q;
2856
2857 for (q = edir->relocs_copied; q != NULL; q = q->next)
2858 if (q->section == p->section)
2859 {
2860 q->pc_count += p->pc_count;
2861 q->count += p->count;
2862 *pp = p->next;
2863 break;
2864 }
2865 if (q == NULL)
2866 pp = &p->next;
2867 }
2868 *pp = edir->relocs_copied;
2869 }
2870
2871 edir->relocs_copied = eind->relocs_copied;
2872 eind->relocs_copied = NULL;
2873 }
2874
2875 if (ind->root.type == bfd_link_hash_indirect)
2876 {
2877 /* Copy over PLT info. */
2878 edir->plt_thumb_refcount += eind->plt_thumb_refcount;
2879 eind->plt_thumb_refcount = 0;
2880 edir->plt_maybe_thumb_refcount += eind->plt_maybe_thumb_refcount;
2881 eind->plt_maybe_thumb_refcount = 0;
2882
2883 if (dir->got.refcount <= 0)
2884 {
2885 edir->tls_type = eind->tls_type;
2886 eind->tls_type = GOT_UNKNOWN;
2887 }
2888 }
2889
2890 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2891 }
2892
2893 /* Create an ARM elf linker hash table. */
2894
2895 static struct bfd_link_hash_table *
2896 elf32_arm_link_hash_table_create (bfd *abfd)
2897 {
2898 struct elf32_arm_link_hash_table *ret;
2899 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
2900
2901 ret = (struct elf32_arm_link_hash_table *) bfd_malloc (amt);
2902 if (ret == NULL)
2903 return NULL;
2904
2905 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
2906 elf32_arm_link_hash_newfunc,
2907 sizeof (struct elf32_arm_link_hash_entry),
2908 ARM_ELF_DATA))
2909 {
2910 free (ret);
2911 return NULL;
2912 }
2913
2914 ret->sgot = NULL;
2915 ret->sgotplt = NULL;
2916 ret->srelgot = NULL;
2917 ret->splt = NULL;
2918 ret->srelplt = NULL;
2919 ret->sdynbss = NULL;
2920 ret->srelbss = NULL;
2921 ret->srelplt2 = NULL;
2922 ret->thumb_glue_size = 0;
2923 ret->arm_glue_size = 0;
2924 ret->bx_glue_size = 0;
2925 memset (ret->bx_glue_offset, 0, sizeof (ret->bx_glue_offset));
2926 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
2927 ret->vfp11_erratum_glue_size = 0;
2928 ret->num_vfp11_fixes = 0;
2929 ret->fix_cortex_a8 = 0;
2930 ret->bfd_of_glue_owner = NULL;
2931 ret->byteswap_code = 0;
2932 ret->target1_is_rel = 0;
2933 ret->target2_reloc = R_ARM_NONE;
2934 #ifdef FOUR_WORD_PLT
2935 ret->plt_header_size = 16;
2936 ret->plt_entry_size = 16;
2937 #else
2938 ret->plt_header_size = 20;
2939 ret->plt_entry_size = 12;
2940 #endif
2941 ret->fix_v4bx = 0;
2942 ret->use_blx = 0;
2943 ret->vxworks_p = 0;
2944 ret->symbian_p = 0;
2945 ret->use_rel = 1;
2946 ret->sym_cache.abfd = NULL;
2947 ret->obfd = abfd;
2948 ret->tls_ldm_got.refcount = 0;
2949 ret->stub_bfd = NULL;
2950 ret->add_stub_section = NULL;
2951 ret->layout_sections_again = NULL;
2952 ret->stub_group = NULL;
2953 ret->top_id = 0;
2954 ret->bfd_count = 0;
2955 ret->top_index = 0;
2956 ret->input_list = NULL;
2957
2958 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2959 sizeof (struct elf32_arm_stub_hash_entry)))
2960 {
2961 free (ret);
2962 return NULL;
2963 }
2964
2965 return &ret->root.root;
2966 }
2967
2968 /* Free the derived linker hash table. */
2969
2970 static void
2971 elf32_arm_hash_table_free (struct bfd_link_hash_table *hash)
2972 {
2973 struct elf32_arm_link_hash_table *ret
2974 = (struct elf32_arm_link_hash_table *) hash;
2975
2976 bfd_hash_table_free (&ret->stub_hash_table);
2977 _bfd_generic_link_hash_table_free (hash);
2978 }
2979
2980 /* Determine if we're dealing with a Thumb only architecture. */
2981
2982 static bfd_boolean
2983 using_thumb_only (struct elf32_arm_link_hash_table *globals)
2984 {
2985 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2986 Tag_CPU_arch);
2987 int profile;
2988
2989 if (arch == TAG_CPU_ARCH_V6_M || arch == TAG_CPU_ARCH_V6S_M)
2990 return TRUE;
2991
2992 if (arch != TAG_CPU_ARCH_V7 && arch != TAG_CPU_ARCH_V7E_M)
2993 return FALSE;
2994
2995 profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2996 Tag_CPU_arch_profile);
2997
2998 return profile == 'M';
2999 }
3000
3001 /* Determine if we're dealing with a Thumb-2 object. */
3002
3003 static bfd_boolean
3004 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3005 {
3006 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3007 Tag_CPU_arch);
3008 return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7;
3009 }
3010
3011 /* Determine what kind of NOPs are available. */
3012
3013 static bfd_boolean
3014 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
3015 {
3016 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3017 Tag_CPU_arch);
3018 return arch == TAG_CPU_ARCH_V6T2
3019 || arch == TAG_CPU_ARCH_V6K
3020 || arch == TAG_CPU_ARCH_V7
3021 || arch == TAG_CPU_ARCH_V7E_M;
3022 }
3023
3024 static bfd_boolean
3025 arch_has_thumb2_nop (struct elf32_arm_link_hash_table *globals)
3026 {
3027 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3028 Tag_CPU_arch);
3029 return (arch == TAG_CPU_ARCH_V6T2 || arch == TAG_CPU_ARCH_V7
3030 || arch == TAG_CPU_ARCH_V7E_M);
3031 }
3032
3033 static bfd_boolean
3034 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
3035 {
3036 switch (stub_type)
3037 {
3038 case arm_stub_long_branch_thumb_only:
3039 case arm_stub_long_branch_v4t_thumb_arm:
3040 case arm_stub_short_branch_v4t_thumb_arm:
3041 case arm_stub_long_branch_v4t_thumb_arm_pic:
3042 case arm_stub_long_branch_thumb_only_pic:
3043 return TRUE;
3044 case arm_stub_none:
3045 BFD_FAIL ();
3046 return FALSE;
3047 break;
3048 default:
3049 return FALSE;
3050 }
3051 }
3052
3053 /* Determine the type of stub needed, if any, for a call. */
3054
3055 static enum elf32_arm_stub_type
3056 arm_type_of_stub (struct bfd_link_info *info,
3057 asection *input_sec,
3058 const Elf_Internal_Rela *rel,
3059 int *actual_st_type,
3060 struct elf32_arm_link_hash_entry *hash,
3061 bfd_vma destination,
3062 asection *sym_sec,
3063 bfd *input_bfd,
3064 const char *name)
3065 {
3066 bfd_vma location;
3067 bfd_signed_vma branch_offset;
3068 unsigned int r_type;
3069 struct elf32_arm_link_hash_table * globals;
3070 int thumb2;
3071 int thumb_only;
3072 enum elf32_arm_stub_type stub_type = arm_stub_none;
3073 int use_plt = 0;
3074 int st_type = *actual_st_type;
3075
3076 /* We don't know the actual type of destination in case it is of
3077 type STT_SECTION: give up. */
3078 if (st_type == STT_SECTION)
3079 return stub_type;
3080
3081 globals = elf32_arm_hash_table (info);
3082 if (globals == NULL)
3083 return stub_type;
3084
3085 thumb_only = using_thumb_only (globals);
3086
3087 thumb2 = using_thumb2 (globals);
3088
3089 /* Determine where the call point is. */
3090 location = (input_sec->output_offset
3091 + input_sec->output_section->vma
3092 + rel->r_offset);
3093
3094 r_type = ELF32_R_TYPE (rel->r_info);
3095
3096 /* Keep a simpler condition, for the sake of clarity. */
3097 if (globals->splt != NULL
3098 && hash != NULL
3099 && hash->root.plt.offset != (bfd_vma) -1)
3100 {
3101 use_plt = 1;
3102
3103 /* Note when dealing with PLT entries: the main PLT stub is in
3104 ARM mode, so if the branch is in Thumb mode, another
3105 Thumb->ARM stub will be inserted later just before the ARM
3106 PLT stub. We don't take this extra distance into account
3107 here, because if a long branch stub is needed, we'll add a
3108 Thumb->Arm one and branch directly to the ARM PLT entry
3109 because it avoids spreading offset corrections in several
3110 places. */
3111
3112 destination = (globals->splt->output_section->vma
3113 + globals->splt->output_offset
3114 + hash->root.plt.offset);
3115 st_type = STT_FUNC;
3116 }
3117
3118 branch_offset = (bfd_signed_vma)(destination - location);
3119
3120 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
3121 {
3122 /* Handle cases where:
3123 - this call goes too far (different Thumb/Thumb2 max
3124 distance)
3125 - it's a Thumb->Arm call and blx is not available, or it's a
3126 Thumb->Arm branch (not bl). A stub is needed in this case,
3127 but only if this call is not through a PLT entry. Indeed,
3128 PLT stubs handle mode switching already.
3129 */
3130 if ((!thumb2
3131 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3132 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3133 || (thumb2
3134 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3135 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3136 || ((st_type != STT_ARM_TFUNC)
3137 && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
3138 || (r_type == R_ARM_THM_JUMP24))
3139 && !use_plt))
3140 {
3141 if (st_type == STT_ARM_TFUNC)
3142 {
3143 /* Thumb to thumb. */
3144 if (!thumb_only)
3145 {
3146 stub_type = (info->shared | globals->pic_veneer)
3147 /* PIC stubs. */
3148 ? ((globals->use_blx
3149 && (r_type ==R_ARM_THM_CALL))
3150 /* V5T and above. Stub starts with ARM code, so
3151 we must be able to switch mode before
3152 reaching it, which is only possible for 'bl'
3153 (ie R_ARM_THM_CALL relocation). */
3154 ? arm_stub_long_branch_any_thumb_pic
3155 /* On V4T, use Thumb code only. */
3156 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3157
3158 /* non-PIC stubs. */
3159 : ((globals->use_blx
3160 && (r_type ==R_ARM_THM_CALL))
3161 /* V5T and above. */
3162 ? arm_stub_long_branch_any_any
3163 /* V4T. */
3164 : arm_stub_long_branch_v4t_thumb_thumb);
3165 }
3166 else
3167 {
3168 stub_type = (info->shared | globals->pic_veneer)
3169 /* PIC stub. */
3170 ? arm_stub_long_branch_thumb_only_pic
3171 /* non-PIC stub. */
3172 : arm_stub_long_branch_thumb_only;
3173 }
3174 }
3175 else
3176 {
3177 /* Thumb to arm. */
3178 if (sym_sec != NULL
3179 && sym_sec->owner != NULL
3180 && !INTERWORK_FLAG (sym_sec->owner))
3181 {
3182 (*_bfd_error_handler)
3183 (_("%B(%s): warning: interworking not enabled.\n"
3184 " first occurrence: %B: Thumb call to ARM"),
3185 sym_sec->owner, input_bfd, name);
3186 }
3187
3188 stub_type = (info->shared | globals->pic_veneer)
3189 /* PIC stubs. */
3190 ? ((globals->use_blx
3191 && (r_type ==R_ARM_THM_CALL))
3192 /* V5T and above. */
3193 ? arm_stub_long_branch_any_arm_pic
3194 /* V4T PIC stub. */
3195 : arm_stub_long_branch_v4t_thumb_arm_pic)
3196
3197 /* non-PIC stubs. */
3198 : ((globals->use_blx
3199 && (r_type ==R_ARM_THM_CALL))
3200 /* V5T and above. */
3201 ? arm_stub_long_branch_any_any
3202 /* V4T. */
3203 : arm_stub_long_branch_v4t_thumb_arm);
3204
3205 /* Handle v4t short branches. */
3206 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3207 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3208 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3209 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3210 }
3211 }
3212 }
3213 else if (r_type == R_ARM_CALL
3214 || r_type == R_ARM_JUMP24
3215 || r_type == R_ARM_PLT32)
3216 {
3217 if (st_type == STT_ARM_TFUNC)
3218 {
3219 /* Arm to thumb. */
3220
3221 if (sym_sec != NULL
3222 && sym_sec->owner != NULL
3223 && !INTERWORK_FLAG (sym_sec->owner))
3224 {
3225 (*_bfd_error_handler)
3226 (_("%B(%s): warning: interworking not enabled.\n"
3227 " first occurrence: %B: ARM call to Thumb"),
3228 sym_sec->owner, input_bfd, name);
3229 }
3230
3231 /* We have an extra 2-bytes reach because of
3232 the mode change (bit 24 (H) of BLX encoding). */
3233 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
3234 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
3235 || ((r_type == R_ARM_CALL) && !globals->use_blx)
3236 || (r_type == R_ARM_JUMP24)
3237 || (r_type == R_ARM_PLT32))
3238 {
3239 stub_type = (info->shared | globals->pic_veneer)
3240 /* PIC stubs. */
3241 ? ((globals->use_blx)
3242 /* V5T and above. */
3243 ? arm_stub_long_branch_any_thumb_pic
3244 /* V4T stub. */
3245 : arm_stub_long_branch_v4t_arm_thumb_pic)
3246
3247 /* non-PIC stubs. */
3248 : ((globals->use_blx)
3249 /* V5T and above. */
3250 ? arm_stub_long_branch_any_any
3251 /* V4T. */
3252 : arm_stub_long_branch_v4t_arm_thumb);
3253 }
3254 }
3255 else
3256 {
3257 /* Arm to arm. */
3258 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
3259 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
3260 {
3261 stub_type = (info->shared | globals->pic_veneer)
3262 /* PIC stubs. */
3263 ? arm_stub_long_branch_any_arm_pic
3264 /* non-PIC stubs. */
3265 : arm_stub_long_branch_any_any;
3266 }
3267 }
3268 }
3269
3270 /* If a stub is needed, record the actual destination type. */
3271 if (stub_type != arm_stub_none)
3272 *actual_st_type = st_type;
3273
3274 return stub_type;
3275 }
3276
3277 /* Build a name for an entry in the stub hash table. */
3278
3279 static char *
3280 elf32_arm_stub_name (const asection *input_section,
3281 const asection *sym_sec,
3282 const struct elf32_arm_link_hash_entry *hash,
3283 const Elf_Internal_Rela *rel,
3284 enum elf32_arm_stub_type stub_type)
3285 {
3286 char *stub_name;
3287 bfd_size_type len;
3288
3289 if (hash)
3290 {
3291 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
3292 stub_name = (char *) bfd_malloc (len);
3293 if (stub_name != NULL)
3294 sprintf (stub_name, "%08x_%s+%x_%d",
3295 input_section->id & 0xffffffff,
3296 hash->root.root.root.string,
3297 (int) rel->r_addend & 0xffffffff,
3298 (int) stub_type);
3299 }
3300 else
3301 {
3302 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
3303 stub_name = (char *) bfd_malloc (len);
3304 if (stub_name != NULL)
3305 sprintf (stub_name, "%08x_%x:%x+%x_%d",
3306 input_section->id & 0xffffffff,
3307 sym_sec->id & 0xffffffff,
3308 (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
3309 (int) rel->r_addend & 0xffffffff,
3310 (int) stub_type);
3311 }
3312
3313 return stub_name;
3314 }
3315
3316 /* Look up an entry in the stub hash. Stub entries are cached because
3317 creating the stub name takes a bit of time. */
3318
3319 static struct elf32_arm_stub_hash_entry *
3320 elf32_arm_get_stub_entry (const asection *input_section,
3321 const asection *sym_sec,
3322 struct elf_link_hash_entry *hash,
3323 const Elf_Internal_Rela *rel,
3324 struct elf32_arm_link_hash_table *htab,
3325 enum elf32_arm_stub_type stub_type)
3326 {
3327 struct elf32_arm_stub_hash_entry *stub_entry;
3328 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
3329 const asection *id_sec;
3330
3331 if ((input_section->flags & SEC_CODE) == 0)
3332 return NULL;
3333
3334 /* If this input section is part of a group of sections sharing one
3335 stub section, then use the id of the first section in the group.
3336 Stub names need to include a section id, as there may well be
3337 more than one stub used to reach say, printf, and we need to
3338 distinguish between them. */
3339 id_sec = htab->stub_group[input_section->id].link_sec;
3340
3341 if (h != NULL && h->stub_cache != NULL
3342 && h->stub_cache->h == h
3343 && h->stub_cache->id_sec == id_sec
3344 && h->stub_cache->stub_type == stub_type)
3345 {
3346 stub_entry = h->stub_cache;
3347 }
3348 else
3349 {
3350 char *stub_name;
3351
3352 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
3353 if (stub_name == NULL)
3354 return NULL;
3355
3356 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
3357 stub_name, FALSE, FALSE);
3358 if (h != NULL)
3359 h->stub_cache = stub_entry;
3360
3361 free (stub_name);
3362 }
3363
3364 return stub_entry;
3365 }
3366
3367 /* Find or create a stub section. Returns a pointer to the stub section, and
3368 the section to which the stub section will be attached (in *LINK_SEC_P).
3369 LINK_SEC_P may be NULL. */
3370
3371 static asection *
3372 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
3373 struct elf32_arm_link_hash_table *htab)
3374 {
3375 asection *link_sec;
3376 asection *stub_sec;
3377
3378 link_sec = htab->stub_group[section->id].link_sec;
3379 stub_sec = htab->stub_group[section->id].stub_sec;
3380 if (stub_sec == NULL)
3381 {
3382 stub_sec = htab->stub_group[link_sec->id].stub_sec;
3383 if (stub_sec == NULL)
3384 {
3385 size_t namelen;
3386 bfd_size_type len;
3387 char *s_name;
3388
3389 namelen = strlen (link_sec->name);
3390 len = namelen + sizeof (STUB_SUFFIX);
3391 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
3392 if (s_name == NULL)
3393 return NULL;
3394
3395 memcpy (s_name, link_sec->name, namelen);
3396 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
3397 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
3398 if (stub_sec == NULL)
3399 return NULL;
3400 htab->stub_group[link_sec->id].stub_sec = stub_sec;
3401 }
3402 htab->stub_group[section->id].stub_sec = stub_sec;
3403 }
3404
3405 if (link_sec_p)
3406 *link_sec_p = link_sec;
3407
3408 return stub_sec;
3409 }
3410
3411 /* Add a new stub entry to the stub hash. Not all fields of the new
3412 stub entry are initialised. */
3413
3414 static struct elf32_arm_stub_hash_entry *
3415 elf32_arm_add_stub (const char *stub_name,
3416 asection *section,
3417 struct elf32_arm_link_hash_table *htab)
3418 {
3419 asection *link_sec;
3420 asection *stub_sec;
3421 struct elf32_arm_stub_hash_entry *stub_entry;
3422
3423 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab);
3424 if (stub_sec == NULL)
3425 return NULL;
3426
3427 /* Enter this entry into the linker stub hash table. */
3428 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3429 TRUE, FALSE);
3430 if (stub_entry == NULL)
3431 {
3432 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
3433 section->owner,
3434 stub_name);
3435 return NULL;
3436 }
3437
3438 stub_entry->stub_sec = stub_sec;
3439 stub_entry->stub_offset = 0;
3440 stub_entry->id_sec = link_sec;
3441
3442 return stub_entry;
3443 }
3444
3445 /* Store an Arm insn into an output section not processed by
3446 elf32_arm_write_section. */
3447
3448 static void
3449 put_arm_insn (struct elf32_arm_link_hash_table * htab,
3450 bfd * output_bfd, bfd_vma val, void * ptr)
3451 {
3452 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3453 bfd_putl32 (val, ptr);
3454 else
3455 bfd_putb32 (val, ptr);
3456 }
3457
3458 /* Store a 16-bit Thumb insn into an output section not processed by
3459 elf32_arm_write_section. */
3460
3461 static void
3462 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
3463 bfd * output_bfd, bfd_vma val, void * ptr)
3464 {
3465 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3466 bfd_putl16 (val, ptr);
3467 else
3468 bfd_putb16 (val, ptr);
3469 }
3470
3471 static bfd_reloc_status_type elf32_arm_final_link_relocate
3472 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
3473 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
3474 const char *, int, struct elf_link_hash_entry *, bfd_boolean *, char **);
3475
3476 static unsigned int
3477 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
3478 {
3479 switch (stub_type)
3480 {
3481 case arm_stub_a8_veneer_b_cond:
3482 case arm_stub_a8_veneer_b:
3483 case arm_stub_a8_veneer_bl:
3484 return 2;
3485
3486 case arm_stub_long_branch_any_any:
3487 case arm_stub_long_branch_v4t_arm_thumb:
3488 case arm_stub_long_branch_thumb_only:
3489 case arm_stub_long_branch_v4t_thumb_thumb:
3490 case arm_stub_long_branch_v4t_thumb_arm:
3491 case arm_stub_short_branch_v4t_thumb_arm:
3492 case arm_stub_long_branch_any_arm_pic:
3493 case arm_stub_long_branch_any_thumb_pic:
3494 case arm_stub_long_branch_v4t_thumb_thumb_pic:
3495 case arm_stub_long_branch_v4t_arm_thumb_pic:
3496 case arm_stub_long_branch_v4t_thumb_arm_pic:
3497 case arm_stub_long_branch_thumb_only_pic:
3498 case arm_stub_a8_veneer_blx:
3499 return 4;
3500
3501 default:
3502 abort (); /* Should be unreachable. */
3503 }
3504 }
3505
3506 static bfd_boolean
3507 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
3508 void * in_arg)
3509 {
3510 #define MAXRELOCS 2
3511 struct elf32_arm_stub_hash_entry *stub_entry;
3512 struct elf32_arm_link_hash_table *globals;
3513 struct bfd_link_info *info;
3514 asection *stub_sec;
3515 bfd *stub_bfd;
3516 bfd_byte *loc;
3517 bfd_vma sym_value;
3518 int template_size;
3519 int size;
3520 const insn_sequence *template_sequence;
3521 int i;
3522 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
3523 int stub_reloc_offset[MAXRELOCS] = {0, 0};
3524 int nrelocs = 0;
3525
3526 /* Massage our args to the form they really have. */
3527 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3528 info = (struct bfd_link_info *) in_arg;
3529
3530 globals = elf32_arm_hash_table (info);
3531 if (globals == NULL)
3532 return FALSE;
3533
3534 stub_sec = stub_entry->stub_sec;
3535
3536 if ((globals->fix_cortex_a8 < 0)
3537 != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
3538 /* We have to do less-strictly-aligned fixes last. */
3539 return TRUE;
3540
3541 /* Make a note of the offset within the stubs for this entry. */
3542 stub_entry->stub_offset = stub_sec->size;
3543 loc = stub_sec->contents + stub_entry->stub_offset;
3544
3545 stub_bfd = stub_sec->owner;
3546
3547 /* This is the address of the stub destination. */
3548 sym_value = (stub_entry->target_value
3549 + stub_entry->target_section->output_offset
3550 + stub_entry->target_section->output_section->vma);
3551
3552 template_sequence = stub_entry->stub_template;
3553 template_size = stub_entry->stub_template_size;
3554
3555 size = 0;
3556 for (i = 0; i < template_size; i++)
3557 {
3558 switch (template_sequence[i].type)
3559 {
3560 case THUMB16_TYPE:
3561 {
3562 bfd_vma data = (bfd_vma) template_sequence[i].data;
3563 if (template_sequence[i].reloc_addend != 0)
3564 {
3565 /* We've borrowed the reloc_addend field to mean we should
3566 insert a condition code into this (Thumb-1 branch)
3567 instruction. See THUMB16_BCOND_INSN. */
3568 BFD_ASSERT ((data & 0xff00) == 0xd000);
3569 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
3570 }
3571 bfd_put_16 (stub_bfd, data, loc + size);
3572 size += 2;
3573 }
3574 break;
3575
3576 case THUMB32_TYPE:
3577 bfd_put_16 (stub_bfd,
3578 (template_sequence[i].data >> 16) & 0xffff,
3579 loc + size);
3580 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
3581 loc + size + 2);
3582 if (template_sequence[i].r_type != R_ARM_NONE)
3583 {
3584 stub_reloc_idx[nrelocs] = i;
3585 stub_reloc_offset[nrelocs++] = size;
3586 }
3587 size += 4;
3588 break;
3589
3590 case ARM_TYPE:
3591 bfd_put_32 (stub_bfd, template_sequence[i].data,
3592 loc + size);
3593 /* Handle cases where the target is encoded within the
3594 instruction. */
3595 if (template_sequence[i].r_type == R_ARM_JUMP24)
3596 {
3597 stub_reloc_idx[nrelocs] = i;
3598 stub_reloc_offset[nrelocs++] = size;
3599 }
3600 size += 4;
3601 break;
3602
3603 case DATA_TYPE:
3604 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
3605 stub_reloc_idx[nrelocs] = i;
3606 stub_reloc_offset[nrelocs++] = size;
3607 size += 4;
3608 break;
3609
3610 default:
3611 BFD_FAIL ();
3612 return FALSE;
3613 }
3614 }
3615
3616 stub_sec->size += size;
3617
3618 /* Stub size has already been computed in arm_size_one_stub. Check
3619 consistency. */
3620 BFD_ASSERT (size == stub_entry->stub_size);
3621
3622 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
3623 if (stub_entry->st_type == STT_ARM_TFUNC)
3624 sym_value |= 1;
3625
3626 /* Assume there is at least one and at most MAXRELOCS entries to relocate
3627 in each stub. */
3628 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
3629
3630 for (i = 0; i < nrelocs; i++)
3631 if (template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24
3632 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19
3633 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL
3634 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22)
3635 {
3636 Elf_Internal_Rela rel;
3637 bfd_boolean unresolved_reloc;
3638 char *error_message;
3639 int sym_flags
3640 = (template_sequence[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22)
3641 ? STT_ARM_TFUNC : 0;
3642 bfd_vma points_to = sym_value + stub_entry->target_addend;
3643
3644 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
3645 rel.r_info = ELF32_R_INFO (0,
3646 template_sequence[stub_reloc_idx[i]].r_type);
3647 rel.r_addend = template_sequence[stub_reloc_idx[i]].reloc_addend;
3648
3649 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
3650 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
3651 template should refer back to the instruction after the original
3652 branch. */
3653 points_to = sym_value;
3654
3655 /* There may be unintended consequences if this is not true. */
3656 BFD_ASSERT (stub_entry->h == NULL);
3657
3658 /* Note: _bfd_final_link_relocate doesn't handle these relocations
3659 properly. We should probably use this function unconditionally,
3660 rather than only for certain relocations listed in the enclosing
3661 conditional, for the sake of consistency. */
3662 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
3663 (template_sequence[stub_reloc_idx[i]].r_type),
3664 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
3665 points_to, info, stub_entry->target_section, "", sym_flags,
3666 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
3667 &error_message);
3668 }
3669 else
3670 {
3671 Elf_Internal_Rela rel;
3672 bfd_boolean unresolved_reloc;
3673 char *error_message;
3674 bfd_vma points_to = sym_value + stub_entry->target_addend
3675 + template_sequence[stub_reloc_idx[i]].reloc_addend;
3676
3677 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
3678 rel.r_info = ELF32_R_INFO (0,
3679 template_sequence[stub_reloc_idx[i]].r_type);
3680 rel.r_addend = 0;
3681
3682 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
3683 (template_sequence[stub_reloc_idx[i]].r_type),
3684 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
3685 points_to, info, stub_entry->target_section, "", stub_entry->st_type,
3686 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
3687 &error_message);
3688 }
3689
3690 return TRUE;
3691 #undef MAXRELOCS
3692 }
3693
3694 /* Calculate the template, template size and instruction size for a stub.
3695 Return value is the instruction size. */
3696
3697 static unsigned int
3698 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
3699 const insn_sequence **stub_template,
3700 int *stub_template_size)
3701 {
3702 const insn_sequence *template_sequence = NULL;
3703 int template_size = 0, i;
3704 unsigned int size;
3705
3706 template_sequence = stub_definitions[stub_type].template_sequence;
3707 if (stub_template)
3708 *stub_template = template_sequence;
3709
3710 template_size = stub_definitions[stub_type].template_size;
3711 if (stub_template_size)
3712 *stub_template_size = template_size;
3713
3714 size = 0;
3715 for (i = 0; i < template_size; i++)
3716 {
3717 switch (template_sequence[i].type)
3718 {
3719 case THUMB16_TYPE:
3720 size += 2;
3721 break;
3722
3723 case ARM_TYPE:
3724 case THUMB32_TYPE:
3725 case DATA_TYPE:
3726 size += 4;
3727 break;
3728
3729 default:
3730 BFD_FAIL ();
3731 return 0;
3732 }
3733 }
3734
3735 return size;
3736 }
3737
3738 /* As above, but don't actually build the stub. Just bump offset so
3739 we know stub section sizes. */
3740
3741 static bfd_boolean
3742 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
3743 void *in_arg ATTRIBUTE_UNUSED)
3744 {
3745 struct elf32_arm_stub_hash_entry *stub_entry;
3746 const insn_sequence *template_sequence;
3747 int template_size, size;
3748
3749 /* Massage our args to the form they really have. */
3750 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3751
3752 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
3753 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
3754
3755 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
3756 &template_size);
3757
3758 stub_entry->stub_size = size;
3759 stub_entry->stub_template = template_sequence;
3760 stub_entry->stub_template_size = template_size;
3761
3762 size = (size + 7) & ~7;
3763 stub_entry->stub_sec->size += size;
3764
3765 return TRUE;
3766 }
3767
3768 /* External entry points for sizing and building linker stubs. */
3769
3770 /* Set up various things so that we can make a list of input sections
3771 for each output section included in the link. Returns -1 on error,
3772 0 when no stubs will be needed, and 1 on success. */
3773
3774 int
3775 elf32_arm_setup_section_lists (bfd *output_bfd,
3776 struct bfd_link_info *info)
3777 {
3778 bfd *input_bfd;
3779 unsigned int bfd_count;
3780 int top_id, top_index;
3781 asection *section;
3782 asection **input_list, **list;
3783 bfd_size_type amt;
3784 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3785
3786 if (htab == NULL)
3787 return 0;
3788 if (! is_elf_hash_table (htab))
3789 return 0;
3790
3791 /* Count the number of input BFDs and find the top input section id. */
3792 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
3793 input_bfd != NULL;
3794 input_bfd = input_bfd->link_next)
3795 {
3796 bfd_count += 1;
3797 for (section = input_bfd->sections;
3798 section != NULL;
3799 section = section->next)
3800 {
3801 if (top_id < section->id)
3802 top_id = section->id;
3803 }
3804 }
3805 htab->bfd_count = bfd_count;
3806
3807 amt = sizeof (struct map_stub) * (top_id + 1);
3808 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
3809 if (htab->stub_group == NULL)
3810 return -1;
3811 htab->top_id = top_id;
3812
3813 /* We can't use output_bfd->section_count here to find the top output
3814 section index as some sections may have been removed, and
3815 _bfd_strip_section_from_output doesn't renumber the indices. */
3816 for (section = output_bfd->sections, top_index = 0;
3817 section != NULL;
3818 section = section->next)
3819 {
3820 if (top_index < section->index)
3821 top_index = section->index;
3822 }
3823
3824 htab->top_index = top_index;
3825 amt = sizeof (asection *) * (top_index + 1);
3826 input_list = (asection **) bfd_malloc (amt);
3827 htab->input_list = input_list;
3828 if (input_list == NULL)
3829 return -1;
3830
3831 /* For sections we aren't interested in, mark their entries with a
3832 value we can check later. */
3833 list = input_list + top_index;
3834 do
3835 *list = bfd_abs_section_ptr;
3836 while (list-- != input_list);
3837
3838 for (section = output_bfd->sections;
3839 section != NULL;
3840 section = section->next)
3841 {
3842 if ((section->flags & SEC_CODE) != 0)
3843 input_list[section->index] = NULL;
3844 }
3845
3846 return 1;
3847 }
3848
3849 /* The linker repeatedly calls this function for each input section,
3850 in the order that input sections are linked into output sections.
3851 Build lists of input sections to determine groupings between which
3852 we may insert linker stubs. */
3853
3854 void
3855 elf32_arm_next_input_section (struct bfd_link_info *info,
3856 asection *isec)
3857 {
3858 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3859
3860 if (htab == NULL)
3861 return;
3862
3863 if (isec->output_section->index <= htab->top_index)
3864 {
3865 asection **list = htab->input_list + isec->output_section->index;
3866
3867 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
3868 {
3869 /* Steal the link_sec pointer for our list. */
3870 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
3871 /* This happens to make the list in reverse order,
3872 which we reverse later. */
3873 PREV_SEC (isec) = *list;
3874 *list = isec;
3875 }
3876 }
3877 }
3878
3879 /* See whether we can group stub sections together. Grouping stub
3880 sections may result in fewer stubs. More importantly, we need to
3881 put all .init* and .fini* stubs at the end of the .init or
3882 .fini output sections respectively, because glibc splits the
3883 _init and _fini functions into multiple parts. Putting a stub in
3884 the middle of a function is not a good idea. */
3885
3886 static void
3887 group_sections (struct elf32_arm_link_hash_table *htab,
3888 bfd_size_type stub_group_size,
3889 bfd_boolean stubs_always_after_branch)
3890 {
3891 asection **list = htab->input_list;
3892
3893 do
3894 {
3895 asection *tail = *list;
3896 asection *head;
3897
3898 if (tail == bfd_abs_section_ptr)
3899 continue;
3900
3901 /* Reverse the list: we must avoid placing stubs at the
3902 beginning of the section because the beginning of the text
3903 section may be required for an interrupt vector in bare metal
3904 code. */
3905 #define NEXT_SEC PREV_SEC
3906 head = NULL;
3907 while (tail != NULL)
3908 {
3909 /* Pop from tail. */
3910 asection *item = tail;
3911 tail = PREV_SEC (item);
3912
3913 /* Push on head. */
3914 NEXT_SEC (item) = head;
3915 head = item;
3916 }
3917
3918 while (head != NULL)
3919 {
3920 asection *curr;
3921 asection *next;
3922 bfd_vma stub_group_start = head->output_offset;
3923 bfd_vma end_of_next;
3924
3925 curr = head;
3926 while (NEXT_SEC (curr) != NULL)
3927 {
3928 next = NEXT_SEC (curr);
3929 end_of_next = next->output_offset + next->size;
3930 if (end_of_next - stub_group_start >= stub_group_size)
3931 /* End of NEXT is too far from start, so stop. */
3932 break;
3933 /* Add NEXT to the group. */
3934 curr = next;
3935 }
3936
3937 /* OK, the size from the start to the start of CURR is less
3938 than stub_group_size and thus can be handled by one stub
3939 section. (Or the head section is itself larger than
3940 stub_group_size, in which case we may be toast.)
3941 We should really be keeping track of the total size of
3942 stubs added here, as stubs contribute to the final output
3943 section size. */
3944 do
3945 {
3946 next = NEXT_SEC (head);
3947 /* Set up this stub group. */
3948 htab->stub_group[head->id].link_sec = curr;
3949 }
3950 while (head != curr && (head = next) != NULL);
3951
3952 /* But wait, there's more! Input sections up to stub_group_size
3953 bytes after the stub section can be handled by it too. */
3954 if (!stubs_always_after_branch)
3955 {
3956 stub_group_start = curr->output_offset + curr->size;
3957
3958 while (next != NULL)
3959 {
3960 end_of_next = next->output_offset + next->size;
3961 if (end_of_next - stub_group_start >= stub_group_size)
3962 /* End of NEXT is too far from stubs, so stop. */
3963 break;
3964 /* Add NEXT to the stub group. */
3965 head = next;
3966 next = NEXT_SEC (head);
3967 htab->stub_group[head->id].link_sec = curr;
3968 }
3969 }
3970 head = next;
3971 }
3972 }
3973 while (list++ != htab->input_list + htab->top_index);
3974
3975 free (htab->input_list);
3976 #undef PREV_SEC
3977 #undef NEXT_SEC
3978 }
3979
3980 /* Comparison function for sorting/searching relocations relating to Cortex-A8
3981 erratum fix. */
3982
3983 static int
3984 a8_reloc_compare (const void *a, const void *b)
3985 {
3986 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
3987 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
3988
3989 if (ra->from < rb->from)
3990 return -1;
3991 else if (ra->from > rb->from)
3992 return 1;
3993 else
3994 return 0;
3995 }
3996
3997 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
3998 const char *, char **);
3999
4000 /* Helper function to scan code for sequences which might trigger the Cortex-A8
4001 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
4002 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
4003 otherwise. */
4004
4005 static bfd_boolean
4006 cortex_a8_erratum_scan (bfd *input_bfd,
4007 struct bfd_link_info *info,
4008 struct a8_erratum_fix **a8_fixes_p,
4009 unsigned int *num_a8_fixes_p,
4010 unsigned int *a8_fix_table_size_p,
4011 struct a8_erratum_reloc *a8_relocs,
4012 unsigned int num_a8_relocs,
4013 unsigned prev_num_a8_fixes,
4014 bfd_boolean *stub_changed_p)
4015 {
4016 asection *section;
4017 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4018 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
4019 unsigned int num_a8_fixes = *num_a8_fixes_p;
4020 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
4021
4022 if (htab == NULL)
4023 return FALSE;
4024
4025 for (section = input_bfd->sections;
4026 section != NULL;
4027 section = section->next)
4028 {
4029 bfd_byte *contents = NULL;
4030 struct _arm_elf_section_data *sec_data;
4031 unsigned int span;
4032 bfd_vma base_vma;
4033
4034 if (elf_section_type (section) != SHT_PROGBITS
4035 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
4036 || (section->flags & SEC_EXCLUDE) != 0
4037 || (section->sec_info_type == ELF_INFO_TYPE_JUST_SYMS)
4038 || (section->output_section == bfd_abs_section_ptr))
4039 continue;
4040
4041 base_vma = section->output_section->vma + section->output_offset;
4042
4043 if (elf_section_data (section)->this_hdr.contents != NULL)
4044 contents = elf_section_data (section)->this_hdr.contents;
4045 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
4046 return TRUE;
4047
4048 sec_data = elf32_arm_section_data (section);
4049
4050 for (span = 0; span < sec_data->mapcount; span++)
4051 {
4052 unsigned int span_start = sec_data->map[span].vma;
4053 unsigned int span_end = (span == sec_data->mapcount - 1)
4054 ? section->size : sec_data->map[span + 1].vma;
4055 unsigned int i;
4056 char span_type = sec_data->map[span].type;
4057 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
4058
4059 if (span_type != 't')
4060 continue;
4061
4062 /* Span is entirely within a single 4KB region: skip scanning. */
4063 if (((base_vma + span_start) & ~0xfff)
4064 == ((base_vma + span_end) & ~0xfff))
4065 continue;
4066
4067 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
4068
4069 * The opcode is BLX.W, BL.W, B.W, Bcc.W
4070 * The branch target is in the same 4KB region as the
4071 first half of the branch.
4072 * The instruction before the branch is a 32-bit
4073 length non-branch instruction. */
4074 for (i = span_start; i < span_end;)
4075 {
4076 unsigned int insn = bfd_getl16 (&contents[i]);
4077 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
4078 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
4079
4080 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
4081 insn_32bit = TRUE;
4082
4083 if (insn_32bit)
4084 {
4085 /* Load the rest of the insn (in manual-friendly order). */
4086 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
4087
4088 /* Encoding T4: B<c>.W. */
4089 is_b = (insn & 0xf800d000) == 0xf0009000;
4090 /* Encoding T1: BL<c>.W. */
4091 is_bl = (insn & 0xf800d000) == 0xf000d000;
4092 /* Encoding T2: BLX<c>.W. */
4093 is_blx = (insn & 0xf800d000) == 0xf000c000;
4094 /* Encoding T3: B<c>.W (not permitted in IT block). */
4095 is_bcc = (insn & 0xf800d000) == 0xf0008000
4096 && (insn & 0x07f00000) != 0x03800000;
4097 }
4098
4099 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
4100
4101 if (((base_vma + i) & 0xfff) == 0xffe
4102 && insn_32bit
4103 && is_32bit_branch
4104 && last_was_32bit
4105 && ! last_was_branch)
4106 {
4107 bfd_signed_vma offset = 0;
4108 bfd_boolean force_target_arm = FALSE;
4109 bfd_boolean force_target_thumb = FALSE;
4110 bfd_vma target;
4111 enum elf32_arm_stub_type stub_type = arm_stub_none;
4112 struct a8_erratum_reloc key, *found;
4113
4114 key.from = base_vma + i;
4115 found = (struct a8_erratum_reloc *)
4116 bsearch (&key, a8_relocs, num_a8_relocs,
4117 sizeof (struct a8_erratum_reloc),
4118 &a8_reloc_compare);
4119
4120 if (found)
4121 {
4122 char *error_message = NULL;
4123 struct elf_link_hash_entry *entry;
4124 bfd_boolean use_plt = FALSE;
4125
4126 /* We don't care about the error returned from this
4127 function, only if there is glue or not. */
4128 entry = find_thumb_glue (info, found->sym_name,
4129 &error_message);
4130
4131 if (entry)
4132 found->non_a8_stub = TRUE;
4133
4134 /* Keep a simpler condition, for the sake of clarity. */
4135 if (htab->splt != NULL && found->hash != NULL
4136 && found->hash->root.plt.offset != (bfd_vma) -1)
4137 use_plt = TRUE;
4138
4139 if (found->r_type == R_ARM_THM_CALL)
4140 {
4141 if (found->st_type != STT_ARM_TFUNC || use_plt)
4142 force_target_arm = TRUE;
4143 else
4144 force_target_thumb = TRUE;
4145 }
4146 }
4147
4148 /* Check if we have an offending branch instruction. */
4149
4150 if (found && found->non_a8_stub)
4151 /* We've already made a stub for this instruction, e.g.
4152 it's a long branch or a Thumb->ARM stub. Assume that
4153 stub will suffice to work around the A8 erratum (see
4154 setting of always_after_branch above). */
4155 ;
4156 else if (is_bcc)
4157 {
4158 offset = (insn & 0x7ff) << 1;
4159 offset |= (insn & 0x3f0000) >> 4;
4160 offset |= (insn & 0x2000) ? 0x40000 : 0;
4161 offset |= (insn & 0x800) ? 0x80000 : 0;
4162 offset |= (insn & 0x4000000) ? 0x100000 : 0;
4163 if (offset & 0x100000)
4164 offset |= ~ ((bfd_signed_vma) 0xfffff);
4165 stub_type = arm_stub_a8_veneer_b_cond;
4166 }
4167 else if (is_b || is_bl || is_blx)
4168 {
4169 int s = (insn & 0x4000000) != 0;
4170 int j1 = (insn & 0x2000) != 0;
4171 int j2 = (insn & 0x800) != 0;
4172 int i1 = !(j1 ^ s);
4173 int i2 = !(j2 ^ s);
4174
4175 offset = (insn & 0x7ff) << 1;
4176 offset |= (insn & 0x3ff0000) >> 4;
4177 offset |= i2 << 22;
4178 offset |= i1 << 23;
4179 offset |= s << 24;
4180 if (offset & 0x1000000)
4181 offset |= ~ ((bfd_signed_vma) 0xffffff);
4182
4183 if (is_blx)
4184 offset &= ~ ((bfd_signed_vma) 3);
4185
4186 stub_type = is_blx ? arm_stub_a8_veneer_blx :
4187 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
4188 }
4189
4190 if (stub_type != arm_stub_none)
4191 {
4192 bfd_vma pc_for_insn = base_vma + i + 4;
4193
4194 /* The original instruction is a BL, but the target is
4195 an ARM instruction. If we were not making a stub,
4196 the BL would have been converted to a BLX. Use the
4197 BLX stub instead in that case. */
4198 if (htab->use_blx && force_target_arm
4199 && stub_type == arm_stub_a8_veneer_bl)
4200 {
4201 stub_type = arm_stub_a8_veneer_blx;
4202 is_blx = TRUE;
4203 is_bl = FALSE;
4204 }
4205 /* Conversely, if the original instruction was
4206 BLX but the target is Thumb mode, use the BL
4207 stub. */
4208 else if (force_target_thumb
4209 && stub_type == arm_stub_a8_veneer_blx)
4210 {
4211 stub_type = arm_stub_a8_veneer_bl;
4212 is_blx = FALSE;
4213 is_bl = TRUE;
4214 }
4215
4216 if (is_blx)
4217 pc_for_insn &= ~ ((bfd_vma) 3);
4218
4219 /* If we found a relocation, use the proper destination,
4220 not the offset in the (unrelocated) instruction.
4221 Note this is always done if we switched the stub type
4222 above. */
4223 if (found)
4224 offset =
4225 (bfd_signed_vma) (found->destination - pc_for_insn);
4226
4227 target = pc_for_insn + offset;
4228
4229 /* The BLX stub is ARM-mode code. Adjust the offset to
4230 take the different PC value (+8 instead of +4) into
4231 account. */
4232 if (stub_type == arm_stub_a8_veneer_blx)
4233 offset += 4;
4234
4235 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
4236 {
4237 char *stub_name = NULL;
4238
4239 if (num_a8_fixes == a8_fix_table_size)
4240 {
4241 a8_fix_table_size *= 2;
4242 a8_fixes = (struct a8_erratum_fix *)
4243 bfd_realloc (a8_fixes,
4244 sizeof (struct a8_erratum_fix)
4245 * a8_fix_table_size);
4246 }
4247
4248 if (num_a8_fixes < prev_num_a8_fixes)
4249 {
4250 /* If we're doing a subsequent scan,
4251 check if we've found the same fix as
4252 before, and try and reuse the stub
4253 name. */
4254 stub_name = a8_fixes[num_a8_fixes].stub_name;
4255 if ((a8_fixes[num_a8_fixes].section != section)
4256 || (a8_fixes[num_a8_fixes].offset != i))
4257 {
4258 free (stub_name);
4259 stub_name = NULL;
4260 *stub_changed_p = TRUE;
4261 }
4262 }
4263
4264 if (!stub_name)
4265 {
4266 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
4267 if (stub_name != NULL)
4268 sprintf (stub_name, "%x:%x", section->id, i);
4269 }
4270
4271 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
4272 a8_fixes[num_a8_fixes].section = section;
4273 a8_fixes[num_a8_fixes].offset = i;
4274 a8_fixes[num_a8_fixes].addend = offset;
4275 a8_fixes[num_a8_fixes].orig_insn = insn;
4276 a8_fixes[num_a8_fixes].stub_name = stub_name;
4277 a8_fixes[num_a8_fixes].stub_type = stub_type;
4278 a8_fixes[num_a8_fixes].st_type =
4279 is_blx ? STT_FUNC : STT_ARM_TFUNC;
4280
4281 num_a8_fixes++;
4282 }
4283 }
4284 }
4285
4286 i += insn_32bit ? 4 : 2;
4287 last_was_32bit = insn_32bit;
4288 last_was_branch = is_32bit_branch;
4289 }
4290 }
4291
4292 if (elf_section_data (section)->this_hdr.contents == NULL)
4293 free (contents);
4294 }
4295
4296 *a8_fixes_p = a8_fixes;
4297 *num_a8_fixes_p = num_a8_fixes;
4298 *a8_fix_table_size_p = a8_fix_table_size;
4299
4300 return FALSE;
4301 }
4302
4303 /* Determine and set the size of the stub section for a final link.
4304
4305 The basic idea here is to examine all the relocations looking for
4306 PC-relative calls to a target that is unreachable with a "bl"
4307 instruction. */
4308
4309 bfd_boolean
4310 elf32_arm_size_stubs (bfd *output_bfd,
4311 bfd *stub_bfd,
4312 struct bfd_link_info *info,
4313 bfd_signed_vma group_size,
4314 asection * (*add_stub_section) (const char *, asection *),
4315 void (*layout_sections_again) (void))
4316 {
4317 bfd_size_type stub_group_size;
4318 bfd_boolean stubs_always_after_branch;
4319 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4320 struct a8_erratum_fix *a8_fixes = NULL;
4321 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
4322 struct a8_erratum_reloc *a8_relocs = NULL;
4323 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
4324
4325 if (htab == NULL)
4326 return FALSE;
4327
4328 if (htab->fix_cortex_a8)
4329 {
4330 a8_fixes = (struct a8_erratum_fix *)
4331 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
4332 a8_relocs = (struct a8_erratum_reloc *)
4333 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
4334 }
4335
4336 /* Propagate mach to stub bfd, because it may not have been
4337 finalized when we created stub_bfd. */
4338 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
4339 bfd_get_mach (output_bfd));
4340
4341 /* Stash our params away. */
4342 htab->stub_bfd = stub_bfd;
4343 htab->add_stub_section = add_stub_section;
4344 htab->layout_sections_again = layout_sections_again;
4345 stubs_always_after_branch = group_size < 0;
4346
4347 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
4348 as the first half of a 32-bit branch straddling two 4K pages. This is a
4349 crude way of enforcing that. */
4350 if (htab->fix_cortex_a8)
4351 stubs_always_after_branch = 1;
4352
4353 if (group_size < 0)
4354 stub_group_size = -group_size;
4355 else
4356 stub_group_size = group_size;
4357
4358 if (stub_group_size == 1)
4359 {
4360 /* Default values. */
4361 /* Thumb branch range is +-4MB has to be used as the default
4362 maximum size (a given section can contain both ARM and Thumb
4363 code, so the worst case has to be taken into account).
4364
4365 This value is 24K less than that, which allows for 2025
4366 12-byte stubs. If we exceed that, then we will fail to link.
4367 The user will have to relink with an explicit group size
4368 option. */
4369 stub_group_size = 4170000;
4370 }
4371
4372 group_sections (htab, stub_group_size, stubs_always_after_branch);
4373
4374 /* If we're applying the cortex A8 fix, we need to determine the
4375 program header size now, because we cannot change it later --
4376 that could alter section placements. Notice the A8 erratum fix
4377 ends up requiring the section addresses to remain unchanged
4378 modulo the page size. That's something we cannot represent
4379 inside BFD, and we don't want to force the section alignment to
4380 be the page size. */
4381 if (htab->fix_cortex_a8)
4382 (*htab->layout_sections_again) ();
4383
4384 while (1)
4385 {
4386 bfd *input_bfd;
4387 unsigned int bfd_indx;
4388 asection *stub_sec;
4389 bfd_boolean stub_changed = FALSE;
4390 unsigned prev_num_a8_fixes = num_a8_fixes;
4391
4392 num_a8_fixes = 0;
4393 for (input_bfd = info->input_bfds, bfd_indx = 0;
4394 input_bfd != NULL;
4395 input_bfd = input_bfd->link_next, bfd_indx++)
4396 {
4397 Elf_Internal_Shdr *symtab_hdr;
4398 asection *section;
4399 Elf_Internal_Sym *local_syms = NULL;
4400
4401 num_a8_relocs = 0;
4402
4403 /* We'll need the symbol table in a second. */
4404 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4405 if (symtab_hdr->sh_info == 0)
4406 continue;
4407
4408 /* Walk over each section attached to the input bfd. */
4409 for (section = input_bfd->sections;
4410 section != NULL;
4411 section = section->next)
4412 {
4413 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
4414
4415 /* If there aren't any relocs, then there's nothing more
4416 to do. */
4417 if ((section->flags & SEC_RELOC) == 0
4418 || section->reloc_count == 0
4419 || (section->flags & SEC_CODE) == 0)
4420 continue;
4421
4422 /* If this section is a link-once section that will be
4423 discarded, then don't create any stubs. */
4424 if (section->output_section == NULL
4425 || section->output_section->owner != output_bfd)
4426 continue;
4427
4428 /* Get the relocs. */
4429 internal_relocs
4430 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
4431 NULL, info->keep_memory);
4432 if (internal_relocs == NULL)
4433 goto error_ret_free_local;
4434
4435 /* Now examine each relocation. */
4436 irela = internal_relocs;
4437 irelaend = irela + section->reloc_count;
4438 for (; irela < irelaend; irela++)
4439 {
4440 unsigned int r_type, r_indx;
4441 enum elf32_arm_stub_type stub_type;
4442 struct elf32_arm_stub_hash_entry *stub_entry;
4443 asection *sym_sec;
4444 bfd_vma sym_value;
4445 bfd_vma destination;
4446 struct elf32_arm_link_hash_entry *hash;
4447 const char *sym_name;
4448 char *stub_name;
4449 const asection *id_sec;
4450 int st_type;
4451 bfd_boolean created_stub = FALSE;
4452
4453 r_type = ELF32_R_TYPE (irela->r_info);
4454 r_indx = ELF32_R_SYM (irela->r_info);
4455
4456 if (r_type >= (unsigned int) R_ARM_max)
4457 {
4458 bfd_set_error (bfd_error_bad_value);
4459 error_ret_free_internal:
4460 if (elf_section_data (section)->relocs == NULL)
4461 free (internal_relocs);
4462 goto error_ret_free_local;
4463 }
4464
4465 /* Only look for stubs on branch instructions. */
4466 if ((r_type != (unsigned int) R_ARM_CALL)
4467 && (r_type != (unsigned int) R_ARM_THM_CALL)
4468 && (r_type != (unsigned int) R_ARM_JUMP24)
4469 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
4470 && (r_type != (unsigned int) R_ARM_THM_XPC22)
4471 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
4472 && (r_type != (unsigned int) R_ARM_PLT32))
4473 continue;
4474
4475 /* Now determine the call target, its name, value,
4476 section. */
4477 sym_sec = NULL;
4478 sym_value = 0;
4479 destination = 0;
4480 hash = NULL;
4481 sym_name = NULL;
4482 if (r_indx < symtab_hdr->sh_info)
4483 {
4484 /* It's a local symbol. */
4485 Elf_Internal_Sym *sym;
4486
4487 if (local_syms == NULL)
4488 {
4489 local_syms
4490 = (Elf_Internal_Sym *) symtab_hdr->contents;
4491 if (local_syms == NULL)
4492 local_syms
4493 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
4494 symtab_hdr->sh_info, 0,
4495 NULL, NULL, NULL);
4496 if (local_syms == NULL)
4497 goto error_ret_free_internal;
4498 }
4499
4500 sym = local_syms + r_indx;
4501 if (sym->st_shndx == SHN_UNDEF)
4502 sym_sec = bfd_und_section_ptr;
4503 else if (sym->st_shndx == SHN_ABS)
4504 sym_sec = bfd_abs_section_ptr;
4505 else if (sym->st_shndx == SHN_COMMON)
4506 sym_sec = bfd_com_section_ptr;
4507 else
4508 sym_sec =
4509 bfd_section_from_elf_index (input_bfd, sym->st_shndx);
4510
4511 if (!sym_sec)
4512 /* This is an undefined symbol. It can never
4513 be resolved. */
4514 continue;
4515
4516 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
4517 sym_value = sym->st_value;
4518 destination = (sym_value + irela->r_addend
4519 + sym_sec->output_offset
4520 + sym_sec->output_section->vma);
4521 st_type = ELF_ST_TYPE (sym->st_info);
4522 sym_name
4523 = bfd_elf_string_from_elf_section (input_bfd,
4524 symtab_hdr->sh_link,
4525 sym->st_name);
4526 }
4527 else
4528 {
4529 /* It's an external symbol. */
4530 int e_indx;
4531
4532 e_indx = r_indx - symtab_hdr->sh_info;
4533 hash = ((struct elf32_arm_link_hash_entry *)
4534 elf_sym_hashes (input_bfd)[e_indx]);
4535
4536 while (hash->root.root.type == bfd_link_hash_indirect
4537 || hash->root.root.type == bfd_link_hash_warning)
4538 hash = ((struct elf32_arm_link_hash_entry *)
4539 hash->root.root.u.i.link);
4540
4541 if (hash->root.root.type == bfd_link_hash_defined
4542 || hash->root.root.type == bfd_link_hash_defweak)
4543 {
4544 sym_sec = hash->root.root.u.def.section;
4545 sym_value = hash->root.root.u.def.value;
4546
4547 struct elf32_arm_link_hash_table *globals =
4548 elf32_arm_hash_table (info);
4549
4550 /* For a destination in a shared library,
4551 use the PLT stub as target address to
4552 decide whether a branch stub is
4553 needed. */
4554 if (globals != NULL
4555 && globals->splt != NULL
4556 && hash != NULL
4557 && hash->root.plt.offset != (bfd_vma) -1)
4558 {
4559 sym_sec = globals->splt;
4560 sym_value = hash->root.plt.offset;
4561 if (sym_sec->output_section != NULL)
4562 destination = (sym_value
4563 + sym_sec->output_offset
4564 + sym_sec->output_section->vma);
4565 }
4566 else if (sym_sec->output_section != NULL)
4567 destination = (sym_value + irela->r_addend
4568 + sym_sec->output_offset
4569 + sym_sec->output_section->vma);
4570 }
4571 else if ((hash->root.root.type == bfd_link_hash_undefined)
4572 || (hash->root.root.type == bfd_link_hash_undefweak))
4573 {
4574 /* For a shared library, use the PLT stub as
4575 target address to decide whether a long
4576 branch stub is needed.
4577 For absolute code, they cannot be handled. */
4578 struct elf32_arm_link_hash_table *globals =
4579 elf32_arm_hash_table (info);
4580
4581 if (globals != NULL
4582 && globals->splt != NULL
4583 && hash != NULL
4584 && hash->root.plt.offset != (bfd_vma) -1)
4585 {
4586 sym_sec = globals->splt;
4587 sym_value = hash->root.plt.offset;
4588 if (sym_sec->output_section != NULL)
4589 destination = (sym_value
4590 + sym_sec->output_offset
4591 + sym_sec->output_section->vma);
4592 }
4593 else
4594 continue;
4595 }
4596 else
4597 {
4598 bfd_set_error (bfd_error_bad_value);
4599 goto error_ret_free_internal;
4600 }
4601 st_type = ELF_ST_TYPE (hash->root.type);
4602 sym_name = hash->root.root.root.string;
4603 }
4604
4605 do
4606 {
4607 /* Determine what (if any) linker stub is needed. */
4608 stub_type = arm_type_of_stub (info, section, irela,
4609 &st_type, hash,
4610 destination, sym_sec,
4611 input_bfd, sym_name);
4612 if (stub_type == arm_stub_none)
4613 break;
4614
4615 /* Support for grouping stub sections. */
4616 id_sec = htab->stub_group[section->id].link_sec;
4617
4618 /* Get the name of this stub. */
4619 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash,
4620 irela, stub_type);
4621 if (!stub_name)
4622 goto error_ret_free_internal;
4623
4624 /* We've either created a stub for this reloc already,
4625 or we are about to. */
4626 created_stub = TRUE;
4627
4628 stub_entry = arm_stub_hash_lookup
4629 (&htab->stub_hash_table, stub_name,
4630 FALSE, FALSE);
4631 if (stub_entry != NULL)
4632 {
4633 /* The proper stub has already been created. */
4634 free (stub_name);
4635 stub_entry->target_value = sym_value;
4636 break;
4637 }
4638
4639 stub_entry = elf32_arm_add_stub (stub_name, section,
4640 htab);
4641 if (stub_entry == NULL)
4642 {
4643 free (stub_name);
4644 goto error_ret_free_internal;
4645 }
4646
4647 stub_entry->target_value = sym_value;
4648 stub_entry->target_section = sym_sec;
4649 stub_entry->stub_type = stub_type;
4650 stub_entry->h = hash;
4651 stub_entry->st_type = st_type;
4652
4653 if (sym_name == NULL)
4654 sym_name = "unnamed";
4655 stub_entry->output_name = (char *)
4656 bfd_alloc (htab->stub_bfd,
4657 sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
4658 + strlen (sym_name));
4659 if (stub_entry->output_name == NULL)
4660 {
4661 free (stub_name);
4662 goto error_ret_free_internal;
4663 }
4664
4665 /* For historical reasons, use the existing names for
4666 ARM-to-Thumb and Thumb-to-ARM stubs. */
4667 if ( ((r_type == (unsigned int) R_ARM_THM_CALL)
4668 || (r_type == (unsigned int) R_ARM_THM_JUMP24))
4669 && st_type != STT_ARM_TFUNC)
4670 sprintf (stub_entry->output_name,
4671 THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
4672 else if ( ((r_type == (unsigned int) R_ARM_CALL)
4673 || (r_type == (unsigned int) R_ARM_JUMP24))
4674 && st_type == STT_ARM_TFUNC)
4675 sprintf (stub_entry->output_name,
4676 ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
4677 else
4678 sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
4679 sym_name);
4680
4681 stub_changed = TRUE;
4682 }
4683 while (0);
4684
4685 /* Look for relocations which might trigger Cortex-A8
4686 erratum. */
4687 if (htab->fix_cortex_a8
4688 && (r_type == (unsigned int) R_ARM_THM_JUMP24
4689 || r_type == (unsigned int) R_ARM_THM_JUMP19
4690 || r_type == (unsigned int) R_ARM_THM_CALL
4691 || r_type == (unsigned int) R_ARM_THM_XPC22))
4692 {
4693 bfd_vma from = section->output_section->vma
4694 + section->output_offset
4695 + irela->r_offset;
4696
4697 if ((from & 0xfff) == 0xffe)
4698 {
4699 /* Found a candidate. Note we haven't checked the
4700 destination is within 4K here: if we do so (and
4701 don't create an entry in a8_relocs) we can't tell
4702 that a branch should have been relocated when
4703 scanning later. */
4704 if (num_a8_relocs == a8_reloc_table_size)
4705 {
4706 a8_reloc_table_size *= 2;
4707 a8_relocs = (struct a8_erratum_reloc *)
4708 bfd_realloc (a8_relocs,
4709 sizeof (struct a8_erratum_reloc)
4710 * a8_reloc_table_size);
4711 }
4712
4713 a8_relocs[num_a8_relocs].from = from;
4714 a8_relocs[num_a8_relocs].destination = destination;
4715 a8_relocs[num_a8_relocs].r_type = r_type;
4716 a8_relocs[num_a8_relocs].st_type = st_type;
4717 a8_relocs[num_a8_relocs].sym_name = sym_name;
4718 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
4719 a8_relocs[num_a8_relocs].hash = hash;
4720
4721 num_a8_relocs++;
4722 }
4723 }
4724 }
4725
4726 /* We're done with the internal relocs, free them. */
4727 if (elf_section_data (section)->relocs == NULL)
4728 free (internal_relocs);
4729 }
4730
4731 if (htab->fix_cortex_a8)
4732 {
4733 /* Sort relocs which might apply to Cortex-A8 erratum. */
4734 qsort (a8_relocs, num_a8_relocs,
4735 sizeof (struct a8_erratum_reloc),
4736 &a8_reloc_compare);
4737
4738 /* Scan for branches which might trigger Cortex-A8 erratum. */
4739 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
4740 &num_a8_fixes, &a8_fix_table_size,
4741 a8_relocs, num_a8_relocs,
4742 prev_num_a8_fixes, &stub_changed)
4743 != 0)
4744 goto error_ret_free_local;
4745 }
4746 }
4747
4748 if (prev_num_a8_fixes != num_a8_fixes)
4749 stub_changed = TRUE;
4750
4751 if (!stub_changed)
4752 break;
4753
4754 /* OK, we've added some stubs. Find out the new size of the
4755 stub sections. */
4756 for (stub_sec = htab->stub_bfd->sections;
4757 stub_sec != NULL;
4758 stub_sec = stub_sec->next)
4759 {
4760 /* Ignore non-stub sections. */
4761 if (!strstr (stub_sec->name, STUB_SUFFIX))
4762 continue;
4763
4764 stub_sec->size = 0;
4765 }
4766
4767 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
4768
4769 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
4770 if (htab->fix_cortex_a8)
4771 for (i = 0; i < num_a8_fixes; i++)
4772 {
4773 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
4774 a8_fixes[i].section, htab);
4775
4776 if (stub_sec == NULL)
4777 goto error_ret_free_local;
4778
4779 stub_sec->size
4780 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
4781 NULL);
4782 }
4783
4784
4785 /* Ask the linker to do its stuff. */
4786 (*htab->layout_sections_again) ();
4787 }
4788
4789 /* Add stubs for Cortex-A8 erratum fixes now. */
4790 if (htab->fix_cortex_a8)
4791 {
4792 for (i = 0; i < num_a8_fixes; i++)
4793 {
4794 struct elf32_arm_stub_hash_entry *stub_entry;
4795 char *stub_name = a8_fixes[i].stub_name;
4796 asection *section = a8_fixes[i].section;
4797 unsigned int section_id = a8_fixes[i].section->id;
4798 asection *link_sec = htab->stub_group[section_id].link_sec;
4799 asection *stub_sec = htab->stub_group[section_id].stub_sec;
4800 const insn_sequence *template_sequence;
4801 int template_size, size = 0;
4802
4803 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4804 TRUE, FALSE);
4805 if (stub_entry == NULL)
4806 {
4807 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
4808 section->owner,
4809 stub_name);
4810 return FALSE;
4811 }
4812
4813 stub_entry->stub_sec = stub_sec;
4814 stub_entry->stub_offset = 0;
4815 stub_entry->id_sec = link_sec;
4816 stub_entry->stub_type = a8_fixes[i].stub_type;
4817 stub_entry->target_section = a8_fixes[i].section;
4818 stub_entry->target_value = a8_fixes[i].offset;
4819 stub_entry->target_addend = a8_fixes[i].addend;
4820 stub_entry->orig_insn = a8_fixes[i].orig_insn;
4821 stub_entry->st_type = a8_fixes[i].st_type;
4822
4823 size = find_stub_size_and_template (a8_fixes[i].stub_type,
4824 &template_sequence,
4825 &template_size);
4826
4827 stub_entry->stub_size = size;
4828 stub_entry->stub_template = template_sequence;
4829 stub_entry->stub_template_size = template_size;
4830 }
4831
4832 /* Stash the Cortex-A8 erratum fix array for use later in
4833 elf32_arm_write_section(). */
4834 htab->a8_erratum_fixes = a8_fixes;
4835 htab->num_a8_erratum_fixes = num_a8_fixes;
4836 }
4837 else
4838 {
4839 htab->a8_erratum_fixes = NULL;
4840 htab->num_a8_erratum_fixes = 0;
4841 }
4842 return TRUE;
4843
4844 error_ret_free_local:
4845 return FALSE;
4846 }
4847
4848 /* Build all the stubs associated with the current output file. The
4849 stubs are kept in a hash table attached to the main linker hash
4850 table. We also set up the .plt entries for statically linked PIC
4851 functions here. This function is called via arm_elf_finish in the
4852 linker. */
4853
4854 bfd_boolean
4855 elf32_arm_build_stubs (struct bfd_link_info *info)
4856 {
4857 asection *stub_sec;
4858 struct bfd_hash_table *table;
4859 struct elf32_arm_link_hash_table *htab;
4860
4861 htab = elf32_arm_hash_table (info);
4862 if (htab == NULL)
4863 return FALSE;
4864
4865 for (stub_sec = htab->stub_bfd->sections;
4866 stub_sec != NULL;
4867 stub_sec = stub_sec->next)
4868 {
4869 bfd_size_type size;
4870
4871 /* Ignore non-stub sections. */
4872 if (!strstr (stub_sec->name, STUB_SUFFIX))
4873 continue;
4874
4875 /* Allocate memory to hold the linker stubs. */
4876 size = stub_sec->size;
4877 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
4878 if (stub_sec->contents == NULL && size != 0)
4879 return FALSE;
4880 stub_sec->size = 0;
4881 }
4882
4883 /* Build the stubs as directed by the stub hash table. */
4884 table = &htab->stub_hash_table;
4885 bfd_hash_traverse (table, arm_build_one_stub, info);
4886 if (htab->fix_cortex_a8)
4887 {
4888 /* Place the cortex a8 stubs last. */
4889 htab->fix_cortex_a8 = -1;
4890 bfd_hash_traverse (table, arm_build_one_stub, info);
4891 }
4892
4893 return TRUE;
4894 }
4895
4896 /* Locate the Thumb encoded calling stub for NAME. */
4897
4898 static struct elf_link_hash_entry *
4899 find_thumb_glue (struct bfd_link_info *link_info,
4900 const char *name,
4901 char **error_message)
4902 {
4903 char *tmp_name;
4904 struct elf_link_hash_entry *hash;
4905 struct elf32_arm_link_hash_table *hash_table;
4906
4907 /* We need a pointer to the armelf specific hash table. */
4908 hash_table = elf32_arm_hash_table (link_info);
4909 if (hash_table == NULL)
4910 return NULL;
4911
4912 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
4913 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
4914
4915 BFD_ASSERT (tmp_name);
4916
4917 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
4918
4919 hash = elf_link_hash_lookup
4920 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4921
4922 if (hash == NULL
4923 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
4924 tmp_name, name) == -1)
4925 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4926
4927 free (tmp_name);
4928
4929 return hash;
4930 }
4931
4932 /* Locate the ARM encoded calling stub for NAME. */
4933
4934 static struct elf_link_hash_entry *
4935 find_arm_glue (struct bfd_link_info *link_info,
4936 const char *name,
4937 char **error_message)
4938 {
4939 char *tmp_name;
4940 struct elf_link_hash_entry *myh;
4941 struct elf32_arm_link_hash_table *hash_table;
4942
4943 /* We need a pointer to the elfarm specific hash table. */
4944 hash_table = elf32_arm_hash_table (link_info);
4945 if (hash_table == NULL)
4946 return NULL;
4947
4948 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
4949 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
4950
4951 BFD_ASSERT (tmp_name);
4952
4953 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
4954
4955 myh = elf_link_hash_lookup
4956 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4957
4958 if (myh == NULL
4959 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
4960 tmp_name, name) == -1)
4961 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4962
4963 free (tmp_name);
4964
4965 return myh;
4966 }
4967
4968 /* ARM->Thumb glue (static images):
4969
4970 .arm
4971 __func_from_arm:
4972 ldr r12, __func_addr
4973 bx r12
4974 __func_addr:
4975 .word func @ behave as if you saw a ARM_32 reloc.
4976
4977 (v5t static images)
4978 .arm
4979 __func_from_arm:
4980 ldr pc, __func_addr
4981 __func_addr:
4982 .word func @ behave as if you saw a ARM_32 reloc.
4983
4984 (relocatable images)
4985 .arm
4986 __func_from_arm:
4987 ldr r12, __func_offset
4988 add r12, r12, pc
4989 bx r12
4990 __func_offset:
4991 .word func - . */
4992
4993 #define ARM2THUMB_STATIC_GLUE_SIZE 12
4994 static const insn32 a2t1_ldr_insn = 0xe59fc000;
4995 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
4996 static const insn32 a2t3_func_addr_insn = 0x00000001;
4997
4998 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
4999 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
5000 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
5001
5002 #define ARM2THUMB_PIC_GLUE_SIZE 16
5003 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
5004 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
5005 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
5006
5007 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
5008
5009 .thumb .thumb
5010 .align 2 .align 2
5011 __func_from_thumb: __func_from_thumb:
5012 bx pc push {r6, lr}
5013 nop ldr r6, __func_addr
5014 .arm mov lr, pc
5015 b func bx r6
5016 .arm
5017 ;; back_to_thumb
5018 ldmia r13! {r6, lr}
5019 bx lr
5020 __func_addr:
5021 .word func */
5022
5023 #define THUMB2ARM_GLUE_SIZE 8
5024 static const insn16 t2a1_bx_pc_insn = 0x4778;
5025 static const insn16 t2a2_noop_insn = 0x46c0;
5026 static const insn32 t2a3_b_insn = 0xea000000;
5027
5028 #define VFP11_ERRATUM_VENEER_SIZE 8
5029
5030 #define ARM_BX_VENEER_SIZE 12
5031 static const insn32 armbx1_tst_insn = 0xe3100001;
5032 static const insn32 armbx2_moveq_insn = 0x01a0f000;
5033 static const insn32 armbx3_bx_insn = 0xe12fff10;
5034
5035 #ifndef ELFARM_NABI_C_INCLUDED
5036 static void
5037 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
5038 {
5039 asection * s;
5040 bfd_byte * contents;
5041
5042 if (size == 0)
5043 {
5044 /* Do not include empty glue sections in the output. */
5045 if (abfd != NULL)
5046 {
5047 s = bfd_get_section_by_name (abfd, name);
5048 if (s != NULL)
5049 s->flags |= SEC_EXCLUDE;
5050 }
5051 return;
5052 }
5053
5054 BFD_ASSERT (abfd != NULL);
5055
5056 s = bfd_get_section_by_name (abfd, name);
5057 BFD_ASSERT (s != NULL);
5058
5059 contents = (bfd_byte *) bfd_alloc (abfd, size);
5060
5061 BFD_ASSERT (s->size == size);
5062 s->contents = contents;
5063 }
5064
5065 bfd_boolean
5066 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
5067 {
5068 struct elf32_arm_link_hash_table * globals;
5069
5070 globals = elf32_arm_hash_table (info);
5071 BFD_ASSERT (globals != NULL);
5072
5073 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5074 globals->arm_glue_size,
5075 ARM2THUMB_GLUE_SECTION_NAME);
5076
5077 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5078 globals->thumb_glue_size,
5079 THUMB2ARM_GLUE_SECTION_NAME);
5080
5081 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5082 globals->vfp11_erratum_glue_size,
5083 VFP11_ERRATUM_VENEER_SECTION_NAME);
5084
5085 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5086 globals->bx_glue_size,
5087 ARM_BX_GLUE_SECTION_NAME);
5088
5089 return TRUE;
5090 }
5091
5092 /* Allocate space and symbols for calling a Thumb function from Arm mode.
5093 returns the symbol identifying the stub. */
5094
5095 static struct elf_link_hash_entry *
5096 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
5097 struct elf_link_hash_entry * h)
5098 {
5099 const char * name = h->root.root.string;
5100 asection * s;
5101 char * tmp_name;
5102 struct elf_link_hash_entry * myh;
5103 struct bfd_link_hash_entry * bh;
5104 struct elf32_arm_link_hash_table * globals;
5105 bfd_vma val;
5106 bfd_size_type size;
5107
5108 globals = elf32_arm_hash_table (link_info);
5109 BFD_ASSERT (globals != NULL);
5110 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5111
5112 s = bfd_get_section_by_name
5113 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
5114
5115 BFD_ASSERT (s != NULL);
5116
5117 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5118 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
5119
5120 BFD_ASSERT (tmp_name);
5121
5122 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
5123
5124 myh = elf_link_hash_lookup
5125 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
5126
5127 if (myh != NULL)
5128 {
5129 /* We've already seen this guy. */
5130 free (tmp_name);
5131 return myh;
5132 }
5133
5134 /* The only trick here is using hash_table->arm_glue_size as the value.
5135 Even though the section isn't allocated yet, this is where we will be
5136 putting it. The +1 on the value marks that the stub has not been
5137 output yet - not that it is a Thumb function. */
5138 bh = NULL;
5139 val = globals->arm_glue_size + 1;
5140 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5141 tmp_name, BSF_GLOBAL, s, val,
5142 NULL, TRUE, FALSE, &bh);
5143
5144 myh = (struct elf_link_hash_entry *) bh;
5145 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5146 myh->forced_local = 1;
5147
5148 free (tmp_name);
5149
5150 if (link_info->shared || globals->root.is_relocatable_executable
5151 || globals->pic_veneer)
5152 size = ARM2THUMB_PIC_GLUE_SIZE;
5153 else if (globals->use_blx)
5154 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
5155 else
5156 size = ARM2THUMB_STATIC_GLUE_SIZE;
5157
5158 s->size += size;
5159 globals->arm_glue_size += size;
5160
5161 return myh;
5162 }
5163
5164 /* Allocate space for ARMv4 BX veneers. */
5165
5166 static void
5167 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
5168 {
5169 asection * s;
5170 struct elf32_arm_link_hash_table *globals;
5171 char *tmp_name;
5172 struct elf_link_hash_entry *myh;
5173 struct bfd_link_hash_entry *bh;
5174 bfd_vma val;
5175
5176 /* BX PC does not need a veneer. */
5177 if (reg == 15)
5178 return;
5179
5180 globals = elf32_arm_hash_table (link_info);
5181 BFD_ASSERT (globals != NULL);
5182 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5183
5184 /* Check if this veneer has already been allocated. */
5185 if (globals->bx_glue_offset[reg])
5186 return;
5187
5188 s = bfd_get_section_by_name
5189 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
5190
5191 BFD_ASSERT (s != NULL);
5192
5193 /* Add symbol for veneer. */
5194 tmp_name = (char *)
5195 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
5196
5197 BFD_ASSERT (tmp_name);
5198
5199 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
5200
5201 myh = elf_link_hash_lookup
5202 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
5203
5204 BFD_ASSERT (myh == NULL);
5205
5206 bh = NULL;
5207 val = globals->bx_glue_size;
5208 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5209 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5210 NULL, TRUE, FALSE, &bh);
5211
5212 myh = (struct elf_link_hash_entry *) bh;
5213 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5214 myh->forced_local = 1;
5215
5216 s->size += ARM_BX_VENEER_SIZE;
5217 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
5218 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
5219 }
5220
5221
5222 /* Add an entry to the code/data map for section SEC. */
5223
5224 static void
5225 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
5226 {
5227 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
5228 unsigned int newidx;
5229
5230 if (sec_data->map == NULL)
5231 {
5232 sec_data->map = (elf32_arm_section_map *)
5233 bfd_malloc (sizeof (elf32_arm_section_map));
5234 sec_data->mapcount = 0;
5235 sec_data->mapsize = 1;
5236 }
5237
5238 newidx = sec_data->mapcount++;
5239
5240 if (sec_data->mapcount > sec_data->mapsize)
5241 {
5242 sec_data->mapsize *= 2;
5243 sec_data->map = (elf32_arm_section_map *)
5244 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
5245 * sizeof (elf32_arm_section_map));
5246 }
5247
5248 if (sec_data->map)
5249 {
5250 sec_data->map[newidx].vma = vma;
5251 sec_data->map[newidx].type = type;
5252 }
5253 }
5254
5255
5256 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
5257 veneers are handled for now. */
5258
5259 static bfd_vma
5260 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
5261 elf32_vfp11_erratum_list *branch,
5262 bfd *branch_bfd,
5263 asection *branch_sec,
5264 unsigned int offset)
5265 {
5266 asection *s;
5267 struct elf32_arm_link_hash_table *hash_table;
5268 char *tmp_name;
5269 struct elf_link_hash_entry *myh;
5270 struct bfd_link_hash_entry *bh;
5271 bfd_vma val;
5272 struct _arm_elf_section_data *sec_data;
5273 elf32_vfp11_erratum_list *newerr;
5274
5275 hash_table = elf32_arm_hash_table (link_info);
5276 BFD_ASSERT (hash_table != NULL);
5277 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
5278
5279 s = bfd_get_section_by_name
5280 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
5281
5282 sec_data = elf32_arm_section_data (s);
5283
5284 BFD_ASSERT (s != NULL);
5285
5286 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
5287 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
5288
5289 BFD_ASSERT (tmp_name);
5290
5291 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
5292 hash_table->num_vfp11_fixes);
5293
5294 myh = elf_link_hash_lookup
5295 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5296
5297 BFD_ASSERT (myh == NULL);
5298
5299 bh = NULL;
5300 val = hash_table->vfp11_erratum_glue_size;
5301 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
5302 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5303 NULL, TRUE, FALSE, &bh);
5304
5305 myh = (struct elf_link_hash_entry *) bh;
5306 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5307 myh->forced_local = 1;
5308
5309 /* Link veneer back to calling location. */
5310 sec_data->erratumcount += 1;
5311 newerr = (elf32_vfp11_erratum_list *)
5312 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5313
5314 newerr->type = VFP11_ERRATUM_ARM_VENEER;
5315 newerr->vma = -1;
5316 newerr->u.v.branch = branch;
5317 newerr->u.v.id = hash_table->num_vfp11_fixes;
5318 branch->u.b.veneer = newerr;
5319
5320 newerr->next = sec_data->erratumlist;
5321 sec_data->erratumlist = newerr;
5322
5323 /* A symbol for the return from the veneer. */
5324 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
5325 hash_table->num_vfp11_fixes);
5326
5327 myh = elf_link_hash_lookup
5328 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5329
5330 if (myh != NULL)
5331 abort ();
5332
5333 bh = NULL;
5334 val = offset + 4;
5335 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
5336 branch_sec, val, NULL, TRUE, FALSE, &bh);
5337
5338 myh = (struct elf_link_hash_entry *) bh;
5339 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5340 myh->forced_local = 1;
5341
5342 free (tmp_name);
5343
5344 /* Generate a mapping symbol for the veneer section, and explicitly add an
5345 entry for that symbol to the code/data map for the section. */
5346 if (hash_table->vfp11_erratum_glue_size == 0)
5347 {
5348 bh = NULL;
5349 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
5350 ever requires this erratum fix. */
5351 _bfd_generic_link_add_one_symbol (link_info,
5352 hash_table->bfd_of_glue_owner, "$a",
5353 BSF_LOCAL, s, 0, NULL,
5354 TRUE, FALSE, &bh);
5355
5356 myh = (struct elf_link_hash_entry *) bh;
5357 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5358 myh->forced_local = 1;
5359
5360 /* The elf32_arm_init_maps function only cares about symbols from input
5361 BFDs. We must make a note of this generated mapping symbol
5362 ourselves so that code byteswapping works properly in
5363 elf32_arm_write_section. */
5364 elf32_arm_section_map_add (s, 'a', 0);
5365 }
5366
5367 s->size += VFP11_ERRATUM_VENEER_SIZE;
5368 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
5369 hash_table->num_vfp11_fixes++;
5370
5371 /* The offset of the veneer. */
5372 return val;
5373 }
5374
5375 #define ARM_GLUE_SECTION_FLAGS \
5376 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
5377 | SEC_READONLY | SEC_LINKER_CREATED)
5378
5379 /* Create a fake section for use by the ARM backend of the linker. */
5380
5381 static bfd_boolean
5382 arm_make_glue_section (bfd * abfd, const char * name)
5383 {
5384 asection * sec;
5385
5386 sec = bfd_get_section_by_name (abfd, name);
5387 if (sec != NULL)
5388 /* Already made. */
5389 return TRUE;
5390
5391 sec = bfd_make_section_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
5392
5393 if (sec == NULL
5394 || !bfd_set_section_alignment (abfd, sec, 2))
5395 return FALSE;
5396
5397 /* Set the gc mark to prevent the section from being removed by garbage
5398 collection, despite the fact that no relocs refer to this section. */
5399 sec->gc_mark = 1;
5400
5401 return TRUE;
5402 }
5403
5404 /* Add the glue sections to ABFD. This function is called from the
5405 linker scripts in ld/emultempl/{armelf}.em. */
5406
5407 bfd_boolean
5408 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
5409 struct bfd_link_info *info)
5410 {
5411 /* If we are only performing a partial
5412 link do not bother adding the glue. */
5413 if (info->relocatable)
5414 return TRUE;
5415
5416 return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
5417 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
5418 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
5419 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
5420 }
5421
5422 /* Select a BFD to be used to hold the sections used by the glue code.
5423 This function is called from the linker scripts in ld/emultempl/
5424 {armelf/pe}.em. */
5425
5426 bfd_boolean
5427 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
5428 {
5429 struct elf32_arm_link_hash_table *globals;
5430
5431 /* If we are only performing a partial link
5432 do not bother getting a bfd to hold the glue. */
5433 if (info->relocatable)
5434 return TRUE;
5435
5436 /* Make sure we don't attach the glue sections to a dynamic object. */
5437 BFD_ASSERT (!(abfd->flags & DYNAMIC));
5438
5439 globals = elf32_arm_hash_table (info);
5440 BFD_ASSERT (globals != NULL);
5441
5442 if (globals->bfd_of_glue_owner != NULL)
5443 return TRUE;
5444
5445 /* Save the bfd for later use. */
5446 globals->bfd_of_glue_owner = abfd;
5447
5448 return TRUE;
5449 }
5450
5451 static void
5452 check_use_blx (struct elf32_arm_link_hash_table *globals)
5453 {
5454 if (bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
5455 Tag_CPU_arch) > 2)
5456 globals->use_blx = 1;
5457 }
5458
5459 bfd_boolean
5460 bfd_elf32_arm_process_before_allocation (bfd *abfd,
5461 struct bfd_link_info *link_info)
5462 {
5463 Elf_Internal_Shdr *symtab_hdr;
5464 Elf_Internal_Rela *internal_relocs = NULL;
5465 Elf_Internal_Rela *irel, *irelend;
5466 bfd_byte *contents = NULL;
5467
5468 asection *sec;
5469 struct elf32_arm_link_hash_table *globals;
5470
5471 /* If we are only performing a partial link do not bother
5472 to construct any glue. */
5473 if (link_info->relocatable)
5474 return TRUE;
5475
5476 /* Here we have a bfd that is to be included on the link. We have a
5477 hook to do reloc rummaging, before section sizes are nailed down. */
5478 globals = elf32_arm_hash_table (link_info);
5479 BFD_ASSERT (globals != NULL);
5480
5481 check_use_blx (globals);
5482
5483 if (globals->byteswap_code && !bfd_big_endian (abfd))
5484 {
5485 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
5486 abfd);
5487 return FALSE;
5488 }
5489
5490 /* PR 5398: If we have not decided to include any loadable sections in
5491 the output then we will not have a glue owner bfd. This is OK, it
5492 just means that there is nothing else for us to do here. */
5493 if (globals->bfd_of_glue_owner == NULL)
5494 return TRUE;
5495
5496 /* Rummage around all the relocs and map the glue vectors. */
5497 sec = abfd->sections;
5498
5499 if (sec == NULL)
5500 return TRUE;
5501
5502 for (; sec != NULL; sec = sec->next)
5503 {
5504 if (sec->reloc_count == 0)
5505 continue;
5506
5507 if ((sec->flags & SEC_EXCLUDE) != 0)
5508 continue;
5509
5510 symtab_hdr = & elf_symtab_hdr (abfd);
5511
5512 /* Load the relocs. */
5513 internal_relocs
5514 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
5515
5516 if (internal_relocs == NULL)
5517 goto error_return;
5518
5519 irelend = internal_relocs + sec->reloc_count;
5520 for (irel = internal_relocs; irel < irelend; irel++)
5521 {
5522 long r_type;
5523 unsigned long r_index;
5524
5525 struct elf_link_hash_entry *h;
5526
5527 r_type = ELF32_R_TYPE (irel->r_info);
5528 r_index = ELF32_R_SYM (irel->r_info);
5529
5530 /* These are the only relocation types we care about. */
5531 if ( r_type != R_ARM_PC24
5532 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
5533 continue;
5534
5535 /* Get the section contents if we haven't done so already. */
5536 if (contents == NULL)
5537 {
5538 /* Get cached copy if it exists. */
5539 if (elf_section_data (sec)->this_hdr.contents != NULL)
5540 contents = elf_section_data (sec)->this_hdr.contents;
5541 else
5542 {
5543 /* Go get them off disk. */
5544 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
5545 goto error_return;
5546 }
5547 }
5548
5549 if (r_type == R_ARM_V4BX)
5550 {
5551 int reg;
5552
5553 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
5554 record_arm_bx_glue (link_info, reg);
5555 continue;
5556 }
5557
5558 /* If the relocation is not against a symbol it cannot concern us. */
5559 h = NULL;
5560
5561 /* We don't care about local symbols. */
5562 if (r_index < symtab_hdr->sh_info)
5563 continue;
5564
5565 /* This is an external symbol. */
5566 r_index -= symtab_hdr->sh_info;
5567 h = (struct elf_link_hash_entry *)
5568 elf_sym_hashes (abfd)[r_index];
5569
5570 /* If the relocation is against a static symbol it must be within
5571 the current section and so cannot be a cross ARM/Thumb relocation. */
5572 if (h == NULL)
5573 continue;
5574
5575 /* If the call will go through a PLT entry then we do not need
5576 glue. */
5577 if (globals->splt != NULL && h->plt.offset != (bfd_vma) -1)
5578 continue;
5579
5580 switch (r_type)
5581 {
5582 case R_ARM_PC24:
5583 /* This one is a call from arm code. We need to look up
5584 the target of the call. If it is a thumb target, we
5585 insert glue. */
5586 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
5587 record_arm_to_thumb_glue (link_info, h);
5588 break;
5589
5590 default:
5591 abort ();
5592 }
5593 }
5594
5595 if (contents != NULL
5596 && elf_section_data (sec)->this_hdr.contents != contents)
5597 free (contents);
5598 contents = NULL;
5599
5600 if (internal_relocs != NULL
5601 && elf_section_data (sec)->relocs != internal_relocs)
5602 free (internal_relocs);
5603 internal_relocs = NULL;
5604 }
5605
5606 return TRUE;
5607
5608 error_return:
5609 if (contents != NULL
5610 && elf_section_data (sec)->this_hdr.contents != contents)
5611 free (contents);
5612 if (internal_relocs != NULL
5613 && elf_section_data (sec)->relocs != internal_relocs)
5614 free (internal_relocs);
5615
5616 return FALSE;
5617 }
5618 #endif
5619
5620
5621 /* Initialise maps of ARM/Thumb/data for input BFDs. */
5622
5623 void
5624 bfd_elf32_arm_init_maps (bfd *abfd)
5625 {
5626 Elf_Internal_Sym *isymbuf;
5627 Elf_Internal_Shdr *hdr;
5628 unsigned int i, localsyms;
5629
5630 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
5631 if (! is_arm_elf (abfd))
5632 return;
5633
5634 if ((abfd->flags & DYNAMIC) != 0)
5635 return;
5636
5637 hdr = & elf_symtab_hdr (abfd);
5638 localsyms = hdr->sh_info;
5639
5640 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
5641 should contain the number of local symbols, which should come before any
5642 global symbols. Mapping symbols are always local. */
5643 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
5644 NULL);
5645
5646 /* No internal symbols read? Skip this BFD. */
5647 if (isymbuf == NULL)
5648 return;
5649
5650 for (i = 0; i < localsyms; i++)
5651 {
5652 Elf_Internal_Sym *isym = &isymbuf[i];
5653 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
5654 const char *name;
5655
5656 if (sec != NULL
5657 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
5658 {
5659 name = bfd_elf_string_from_elf_section (abfd,
5660 hdr->sh_link, isym->st_name);
5661
5662 if (bfd_is_arm_special_symbol_name (name,
5663 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
5664 elf32_arm_section_map_add (sec, name[1], isym->st_value);
5665 }
5666 }
5667 }
5668
5669
5670 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
5671 say what they wanted. */
5672
5673 void
5674 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
5675 {
5676 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5677 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5678
5679 if (globals == NULL)
5680 return;
5681
5682 if (globals->fix_cortex_a8 == -1)
5683 {
5684 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
5685 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
5686 && (out_attr[Tag_CPU_arch_profile].i == 'A'
5687 || out_attr[Tag_CPU_arch_profile].i == 0))
5688 globals->fix_cortex_a8 = 1;
5689 else
5690 globals->fix_cortex_a8 = 0;
5691 }
5692 }
5693
5694
5695 void
5696 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
5697 {
5698 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5699 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5700
5701 if (globals == NULL)
5702 return;
5703 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
5704 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
5705 {
5706 switch (globals->vfp11_fix)
5707 {
5708 case BFD_ARM_VFP11_FIX_DEFAULT:
5709 case BFD_ARM_VFP11_FIX_NONE:
5710 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5711 break;
5712
5713 default:
5714 /* Give a warning, but do as the user requests anyway. */
5715 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
5716 "workaround is not necessary for target architecture"), obfd);
5717 }
5718 }
5719 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
5720 /* For earlier architectures, we might need the workaround, but do not
5721 enable it by default. If users is running with broken hardware, they
5722 must enable the erratum fix explicitly. */
5723 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5724 }
5725
5726
5727 enum bfd_arm_vfp11_pipe
5728 {
5729 VFP11_FMAC,
5730 VFP11_LS,
5731 VFP11_DS,
5732 VFP11_BAD
5733 };
5734
5735 /* Return a VFP register number. This is encoded as RX:X for single-precision
5736 registers, or X:RX for double-precision registers, where RX is the group of
5737 four bits in the instruction encoding and X is the single extension bit.
5738 RX and X fields are specified using their lowest (starting) bit. The return
5739 value is:
5740
5741 0...31: single-precision registers s0...s31
5742 32...63: double-precision registers d0...d31.
5743
5744 Although X should be zero for VFP11 (encoding d0...d15 only), we might
5745 encounter VFP3 instructions, so we allow the full range for DP registers. */
5746
5747 static unsigned int
5748 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
5749 unsigned int x)
5750 {
5751 if (is_double)
5752 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
5753 else
5754 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
5755 }
5756
5757 /* Set bits in *WMASK according to a register number REG as encoded by
5758 bfd_arm_vfp11_regno(). Ignore d16-d31. */
5759
5760 static void
5761 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
5762 {
5763 if (reg < 32)
5764 *wmask |= 1 << reg;
5765 else if (reg < 48)
5766 *wmask |= 3 << ((reg - 32) * 2);
5767 }
5768
5769 /* Return TRUE if WMASK overwrites anything in REGS. */
5770
5771 static bfd_boolean
5772 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
5773 {
5774 int i;
5775
5776 for (i = 0; i < numregs; i++)
5777 {
5778 unsigned int reg = regs[i];
5779
5780 if (reg < 32 && (wmask & (1 << reg)) != 0)
5781 return TRUE;
5782
5783 reg -= 32;
5784
5785 if (reg >= 16)
5786 continue;
5787
5788 if ((wmask & (3 << (reg * 2))) != 0)
5789 return TRUE;
5790 }
5791
5792 return FALSE;
5793 }
5794
5795 /* In this function, we're interested in two things: finding input registers
5796 for VFP data-processing instructions, and finding the set of registers which
5797 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
5798 hold the written set, so FLDM etc. are easy to deal with (we're only
5799 interested in 32 SP registers or 16 dp registers, due to the VFP version
5800 implemented by the chip in question). DP registers are marked by setting
5801 both SP registers in the write mask). */
5802
5803 static enum bfd_arm_vfp11_pipe
5804 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
5805 int *numregs)
5806 {
5807 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
5808 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
5809
5810 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
5811 {
5812 unsigned int pqrs;
5813 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5814 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5815
5816 pqrs = ((insn & 0x00800000) >> 20)
5817 | ((insn & 0x00300000) >> 19)
5818 | ((insn & 0x00000040) >> 6);
5819
5820 switch (pqrs)
5821 {
5822 case 0: /* fmac[sd]. */
5823 case 1: /* fnmac[sd]. */
5824 case 2: /* fmsc[sd]. */
5825 case 3: /* fnmsc[sd]. */
5826 vpipe = VFP11_FMAC;
5827 bfd_arm_vfp11_write_mask (destmask, fd);
5828 regs[0] = fd;
5829 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5830 regs[2] = fm;
5831 *numregs = 3;
5832 break;
5833
5834 case 4: /* fmul[sd]. */
5835 case 5: /* fnmul[sd]. */
5836 case 6: /* fadd[sd]. */
5837 case 7: /* fsub[sd]. */
5838 vpipe = VFP11_FMAC;
5839 goto vfp_binop;
5840
5841 case 8: /* fdiv[sd]. */
5842 vpipe = VFP11_DS;
5843 vfp_binop:
5844 bfd_arm_vfp11_write_mask (destmask, fd);
5845 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5846 regs[1] = fm;
5847 *numregs = 2;
5848 break;
5849
5850 case 15: /* extended opcode. */
5851 {
5852 unsigned int extn = ((insn >> 15) & 0x1e)
5853 | ((insn >> 7) & 1);
5854
5855 switch (extn)
5856 {
5857 case 0: /* fcpy[sd]. */
5858 case 1: /* fabs[sd]. */
5859 case 2: /* fneg[sd]. */
5860 case 8: /* fcmp[sd]. */
5861 case 9: /* fcmpe[sd]. */
5862 case 10: /* fcmpz[sd]. */
5863 case 11: /* fcmpez[sd]. */
5864 case 16: /* fuito[sd]. */
5865 case 17: /* fsito[sd]. */
5866 case 24: /* ftoui[sd]. */
5867 case 25: /* ftouiz[sd]. */
5868 case 26: /* ftosi[sd]. */
5869 case 27: /* ftosiz[sd]. */
5870 /* These instructions will not bounce due to underflow. */
5871 *numregs = 0;
5872 vpipe = VFP11_FMAC;
5873 break;
5874
5875 case 3: /* fsqrt[sd]. */
5876 /* fsqrt cannot underflow, but it can (perhaps) overwrite
5877 registers to cause the erratum in previous instructions. */
5878 bfd_arm_vfp11_write_mask (destmask, fd);
5879 vpipe = VFP11_DS;
5880 break;
5881
5882 case 15: /* fcvt{ds,sd}. */
5883 {
5884 int rnum = 0;
5885
5886 bfd_arm_vfp11_write_mask (destmask, fd);
5887
5888 /* Only FCVTSD can underflow. */
5889 if ((insn & 0x100) != 0)
5890 regs[rnum++] = fm;
5891
5892 *numregs = rnum;
5893
5894 vpipe = VFP11_FMAC;
5895 }
5896 break;
5897
5898 default:
5899 return VFP11_BAD;
5900 }
5901 }
5902 break;
5903
5904 default:
5905 return VFP11_BAD;
5906 }
5907 }
5908 /* Two-register transfer. */
5909 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
5910 {
5911 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5912
5913 if ((insn & 0x100000) == 0)
5914 {
5915 if (is_double)
5916 bfd_arm_vfp11_write_mask (destmask, fm);
5917 else
5918 {
5919 bfd_arm_vfp11_write_mask (destmask, fm);
5920 bfd_arm_vfp11_write_mask (destmask, fm + 1);
5921 }
5922 }
5923
5924 vpipe = VFP11_LS;
5925 }
5926 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
5927 {
5928 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5929 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
5930
5931 switch (puw)
5932 {
5933 case 0: /* Two-reg transfer. We should catch these above. */
5934 abort ();
5935
5936 case 2: /* fldm[sdx]. */
5937 case 3:
5938 case 5:
5939 {
5940 unsigned int i, offset = insn & 0xff;
5941
5942 if (is_double)
5943 offset >>= 1;
5944
5945 for (i = fd; i < fd + offset; i++)
5946 bfd_arm_vfp11_write_mask (destmask, i);
5947 }
5948 break;
5949
5950 case 4: /* fld[sd]. */
5951 case 6:
5952 bfd_arm_vfp11_write_mask (destmask, fd);
5953 break;
5954
5955 default:
5956 return VFP11_BAD;
5957 }
5958
5959 vpipe = VFP11_LS;
5960 }
5961 /* Single-register transfer. Note L==0. */
5962 else if ((insn & 0x0f100e10) == 0x0e000a10)
5963 {
5964 unsigned int opcode = (insn >> 21) & 7;
5965 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
5966
5967 switch (opcode)
5968 {
5969 case 0: /* fmsr/fmdlr. */
5970 case 1: /* fmdhr. */
5971 /* Mark fmdhr and fmdlr as writing to the whole of the DP
5972 destination register. I don't know if this is exactly right,
5973 but it is the conservative choice. */
5974 bfd_arm_vfp11_write_mask (destmask, fn);
5975 break;
5976
5977 case 7: /* fmxr. */
5978 break;
5979 }
5980
5981 vpipe = VFP11_LS;
5982 }
5983
5984 return vpipe;
5985 }
5986
5987
5988 static int elf32_arm_compare_mapping (const void * a, const void * b);
5989
5990
5991 /* Look for potentially-troublesome code sequences which might trigger the
5992 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
5993 (available from ARM) for details of the erratum. A short version is
5994 described in ld.texinfo. */
5995
5996 bfd_boolean
5997 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
5998 {
5999 asection *sec;
6000 bfd_byte *contents = NULL;
6001 int state = 0;
6002 int regs[3], numregs = 0;
6003 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6004 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
6005
6006 if (globals == NULL)
6007 return FALSE;
6008
6009 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
6010 The states transition as follows:
6011
6012 0 -> 1 (vector) or 0 -> 2 (scalar)
6013 A VFP FMAC-pipeline instruction has been seen. Fill
6014 regs[0]..regs[numregs-1] with its input operands. Remember this
6015 instruction in 'first_fmac'.
6016
6017 1 -> 2
6018 Any instruction, except for a VFP instruction which overwrites
6019 regs[*].
6020
6021 1 -> 3 [ -> 0 ] or
6022 2 -> 3 [ -> 0 ]
6023 A VFP instruction has been seen which overwrites any of regs[*].
6024 We must make a veneer! Reset state to 0 before examining next
6025 instruction.
6026
6027 2 -> 0
6028 If we fail to match anything in state 2, reset to state 0 and reset
6029 the instruction pointer to the instruction after 'first_fmac'.
6030
6031 If the VFP11 vector mode is in use, there must be at least two unrelated
6032 instructions between anti-dependent VFP11 instructions to properly avoid
6033 triggering the erratum, hence the use of the extra state 1. */
6034
6035 /* If we are only performing a partial link do not bother
6036 to construct any glue. */
6037 if (link_info->relocatable)
6038 return TRUE;
6039
6040 /* Skip if this bfd does not correspond to an ELF image. */
6041 if (! is_arm_elf (abfd))
6042 return TRUE;
6043
6044 /* We should have chosen a fix type by the time we get here. */
6045 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
6046
6047 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
6048 return TRUE;
6049
6050 /* Skip this BFD if it corresponds to an executable or dynamic object. */
6051 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
6052 return TRUE;
6053
6054 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6055 {
6056 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
6057 struct _arm_elf_section_data *sec_data;
6058
6059 /* If we don't have executable progbits, we're not interested in this
6060 section. Also skip if section is to be excluded. */
6061 if (elf_section_type (sec) != SHT_PROGBITS
6062 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
6063 || (sec->flags & SEC_EXCLUDE) != 0
6064 || sec->sec_info_type == ELF_INFO_TYPE_JUST_SYMS
6065 || sec->output_section == bfd_abs_section_ptr
6066 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
6067 continue;
6068
6069 sec_data = elf32_arm_section_data (sec);
6070
6071 if (sec_data->mapcount == 0)
6072 continue;
6073
6074 if (elf_section_data (sec)->this_hdr.contents != NULL)
6075 contents = elf_section_data (sec)->this_hdr.contents;
6076 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
6077 goto error_return;
6078
6079 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
6080 elf32_arm_compare_mapping);
6081
6082 for (span = 0; span < sec_data->mapcount; span++)
6083 {
6084 unsigned int span_start = sec_data->map[span].vma;
6085 unsigned int span_end = (span == sec_data->mapcount - 1)
6086 ? sec->size : sec_data->map[span + 1].vma;
6087 char span_type = sec_data->map[span].type;
6088
6089 /* FIXME: Only ARM mode is supported at present. We may need to
6090 support Thumb-2 mode also at some point. */
6091 if (span_type != 'a')
6092 continue;
6093
6094 for (i = span_start; i < span_end;)
6095 {
6096 unsigned int next_i = i + 4;
6097 unsigned int insn = bfd_big_endian (abfd)
6098 ? (contents[i] << 24)
6099 | (contents[i + 1] << 16)
6100 | (contents[i + 2] << 8)
6101 | contents[i + 3]
6102 : (contents[i + 3] << 24)
6103 | (contents[i + 2] << 16)
6104 | (contents[i + 1] << 8)
6105 | contents[i];
6106 unsigned int writemask = 0;
6107 enum bfd_arm_vfp11_pipe vpipe;
6108
6109 switch (state)
6110 {
6111 case 0:
6112 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
6113 &numregs);
6114 /* I'm assuming the VFP11 erratum can trigger with denorm
6115 operands on either the FMAC or the DS pipeline. This might
6116 lead to slightly overenthusiastic veneer insertion. */
6117 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
6118 {
6119 state = use_vector ? 1 : 2;
6120 first_fmac = i;
6121 veneer_of_insn = insn;
6122 }
6123 break;
6124
6125 case 1:
6126 {
6127 int other_regs[3], other_numregs;
6128 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6129 other_regs,
6130 &other_numregs);
6131 if (vpipe != VFP11_BAD
6132 && bfd_arm_vfp11_antidependency (writemask, regs,
6133 numregs))
6134 state = 3;
6135 else
6136 state = 2;
6137 }
6138 break;
6139
6140 case 2:
6141 {
6142 int other_regs[3], other_numregs;
6143 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6144 other_regs,
6145 &other_numregs);
6146 if (vpipe != VFP11_BAD
6147 && bfd_arm_vfp11_antidependency (writemask, regs,
6148 numregs))
6149 state = 3;
6150 else
6151 {
6152 state = 0;
6153 next_i = first_fmac + 4;
6154 }
6155 }
6156 break;
6157
6158 case 3:
6159 abort (); /* Should be unreachable. */
6160 }
6161
6162 if (state == 3)
6163 {
6164 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
6165 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
6166
6167 elf32_arm_section_data (sec)->erratumcount += 1;
6168
6169 newerr->u.b.vfp_insn = veneer_of_insn;
6170
6171 switch (span_type)
6172 {
6173 case 'a':
6174 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
6175 break;
6176
6177 default:
6178 abort ();
6179 }
6180
6181 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
6182 first_fmac);
6183
6184 newerr->vma = -1;
6185
6186 newerr->next = sec_data->erratumlist;
6187 sec_data->erratumlist = newerr;
6188
6189 state = 0;
6190 }
6191
6192 i = next_i;
6193 }
6194 }
6195
6196 if (contents != NULL
6197 && elf_section_data (sec)->this_hdr.contents != contents)
6198 free (contents);
6199 contents = NULL;
6200 }
6201
6202 return TRUE;
6203
6204 error_return:
6205 if (contents != NULL
6206 && elf_section_data (sec)->this_hdr.contents != contents)
6207 free (contents);
6208
6209 return FALSE;
6210 }
6211
6212 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
6213 after sections have been laid out, using specially-named symbols. */
6214
6215 void
6216 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
6217 struct bfd_link_info *link_info)
6218 {
6219 asection *sec;
6220 struct elf32_arm_link_hash_table *globals;
6221 char *tmp_name;
6222
6223 if (link_info->relocatable)
6224 return;
6225
6226 /* Skip if this bfd does not correspond to an ELF image. */
6227 if (! is_arm_elf (abfd))
6228 return;
6229
6230 globals = elf32_arm_hash_table (link_info);
6231 if (globals == NULL)
6232 return;
6233
6234 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6235 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6236
6237 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6238 {
6239 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6240 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
6241
6242 for (; errnode != NULL; errnode = errnode->next)
6243 {
6244 struct elf_link_hash_entry *myh;
6245 bfd_vma vma;
6246
6247 switch (errnode->type)
6248 {
6249 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
6250 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
6251 /* Find veneer symbol. */
6252 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6253 errnode->u.b.veneer->u.v.id);
6254
6255 myh = elf_link_hash_lookup
6256 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6257
6258 if (myh == NULL)
6259 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6260 "`%s'"), abfd, tmp_name);
6261
6262 vma = myh->root.u.def.section->output_section->vma
6263 + myh->root.u.def.section->output_offset
6264 + myh->root.u.def.value;
6265
6266 errnode->u.b.veneer->vma = vma;
6267 break;
6268
6269 case VFP11_ERRATUM_ARM_VENEER:
6270 case VFP11_ERRATUM_THUMB_VENEER:
6271 /* Find return location. */
6272 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6273 errnode->u.v.id);
6274
6275 myh = elf_link_hash_lookup
6276 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6277
6278 if (myh == NULL)
6279 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6280 "`%s'"), abfd, tmp_name);
6281
6282 vma = myh->root.u.def.section->output_section->vma
6283 + myh->root.u.def.section->output_offset
6284 + myh->root.u.def.value;
6285
6286 errnode->u.v.branch->vma = vma;
6287 break;
6288
6289 default:
6290 abort ();
6291 }
6292 }
6293 }
6294
6295 free (tmp_name);
6296 }
6297
6298
6299 /* Set target relocation values needed during linking. */
6300
6301 void
6302 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
6303 struct bfd_link_info *link_info,
6304 int target1_is_rel,
6305 char * target2_type,
6306 int fix_v4bx,
6307 int use_blx,
6308 bfd_arm_vfp11_fix vfp11_fix,
6309 int no_enum_warn, int no_wchar_warn,
6310 int pic_veneer, int fix_cortex_a8)
6311 {
6312 struct elf32_arm_link_hash_table *globals;
6313
6314 globals = elf32_arm_hash_table (link_info);
6315 if (globals == NULL)
6316 return;
6317
6318 globals->target1_is_rel = target1_is_rel;
6319 if (strcmp (target2_type, "rel") == 0)
6320 globals->target2_reloc = R_ARM_REL32;
6321 else if (strcmp (target2_type, "abs") == 0)
6322 globals->target2_reloc = R_ARM_ABS32;
6323 else if (strcmp (target2_type, "got-rel") == 0)
6324 globals->target2_reloc = R_ARM_GOT_PREL;
6325 else
6326 {
6327 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
6328 target2_type);
6329 }
6330 globals->fix_v4bx = fix_v4bx;
6331 globals->use_blx |= use_blx;
6332 globals->vfp11_fix = vfp11_fix;
6333 globals->pic_veneer = pic_veneer;
6334 globals->fix_cortex_a8 = fix_cortex_a8;
6335
6336 BFD_ASSERT (is_arm_elf (output_bfd));
6337 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
6338 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
6339 }
6340
6341 /* Replace the target offset of a Thumb bl or b.w instruction. */
6342
6343 static void
6344 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
6345 {
6346 bfd_vma upper;
6347 bfd_vma lower;
6348 int reloc_sign;
6349
6350 BFD_ASSERT ((offset & 1) == 0);
6351
6352 upper = bfd_get_16 (abfd, insn);
6353 lower = bfd_get_16 (abfd, insn + 2);
6354 reloc_sign = (offset < 0) ? 1 : 0;
6355 upper = (upper & ~(bfd_vma) 0x7ff)
6356 | ((offset >> 12) & 0x3ff)
6357 | (reloc_sign << 10);
6358 lower = (lower & ~(bfd_vma) 0x2fff)
6359 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
6360 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
6361 | ((offset >> 1) & 0x7ff);
6362 bfd_put_16 (abfd, upper, insn);
6363 bfd_put_16 (abfd, lower, insn + 2);
6364 }
6365
6366 /* Thumb code calling an ARM function. */
6367
6368 static int
6369 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
6370 const char * name,
6371 bfd * input_bfd,
6372 bfd * output_bfd,
6373 asection * input_section,
6374 bfd_byte * hit_data,
6375 asection * sym_sec,
6376 bfd_vma offset,
6377 bfd_signed_vma addend,
6378 bfd_vma val,
6379 char **error_message)
6380 {
6381 asection * s = 0;
6382 bfd_vma my_offset;
6383 long int ret_offset;
6384 struct elf_link_hash_entry * myh;
6385 struct elf32_arm_link_hash_table * globals;
6386
6387 myh = find_thumb_glue (info, name, error_message);
6388 if (myh == NULL)
6389 return FALSE;
6390
6391 globals = elf32_arm_hash_table (info);
6392 BFD_ASSERT (globals != NULL);
6393 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6394
6395 my_offset = myh->root.u.def.value;
6396
6397 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6398 THUMB2ARM_GLUE_SECTION_NAME);
6399
6400 BFD_ASSERT (s != NULL);
6401 BFD_ASSERT (s->contents != NULL);
6402 BFD_ASSERT (s->output_section != NULL);
6403
6404 if ((my_offset & 0x01) == 0x01)
6405 {
6406 if (sym_sec != NULL
6407 && sym_sec->owner != NULL
6408 && !INTERWORK_FLAG (sym_sec->owner))
6409 {
6410 (*_bfd_error_handler)
6411 (_("%B(%s): warning: interworking not enabled.\n"
6412 " first occurrence: %B: thumb call to arm"),
6413 sym_sec->owner, input_bfd, name);
6414
6415 return FALSE;
6416 }
6417
6418 --my_offset;
6419 myh->root.u.def.value = my_offset;
6420
6421 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
6422 s->contents + my_offset);
6423
6424 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
6425 s->contents + my_offset + 2);
6426
6427 ret_offset =
6428 /* Address of destination of the stub. */
6429 ((bfd_signed_vma) val)
6430 - ((bfd_signed_vma)
6431 /* Offset from the start of the current section
6432 to the start of the stubs. */
6433 (s->output_offset
6434 /* Offset of the start of this stub from the start of the stubs. */
6435 + my_offset
6436 /* Address of the start of the current section. */
6437 + s->output_section->vma)
6438 /* The branch instruction is 4 bytes into the stub. */
6439 + 4
6440 /* ARM branches work from the pc of the instruction + 8. */
6441 + 8);
6442
6443 put_arm_insn (globals, output_bfd,
6444 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
6445 s->contents + my_offset + 4);
6446 }
6447
6448 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
6449
6450 /* Now go back and fix up the original BL insn to point to here. */
6451 ret_offset =
6452 /* Address of where the stub is located. */
6453 (s->output_section->vma + s->output_offset + my_offset)
6454 /* Address of where the BL is located. */
6455 - (input_section->output_section->vma + input_section->output_offset
6456 + offset)
6457 /* Addend in the relocation. */
6458 - addend
6459 /* Biassing for PC-relative addressing. */
6460 - 8;
6461
6462 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
6463
6464 return TRUE;
6465 }
6466
6467 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
6468
6469 static struct elf_link_hash_entry *
6470 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
6471 const char * name,
6472 bfd * input_bfd,
6473 bfd * output_bfd,
6474 asection * sym_sec,
6475 bfd_vma val,
6476 asection * s,
6477 char ** error_message)
6478 {
6479 bfd_vma my_offset;
6480 long int ret_offset;
6481 struct elf_link_hash_entry * myh;
6482 struct elf32_arm_link_hash_table * globals;
6483
6484 myh = find_arm_glue (info, name, error_message);
6485 if (myh == NULL)
6486 return NULL;
6487
6488 globals = elf32_arm_hash_table (info);
6489 BFD_ASSERT (globals != NULL);
6490 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6491
6492 my_offset = myh->root.u.def.value;
6493
6494 if ((my_offset & 0x01) == 0x01)
6495 {
6496 if (sym_sec != NULL
6497 && sym_sec->owner != NULL
6498 && !INTERWORK_FLAG (sym_sec->owner))
6499 {
6500 (*_bfd_error_handler)
6501 (_("%B(%s): warning: interworking not enabled.\n"
6502 " first occurrence: %B: arm call to thumb"),
6503 sym_sec->owner, input_bfd, name);
6504 }
6505
6506 --my_offset;
6507 myh->root.u.def.value = my_offset;
6508
6509 if (info->shared || globals->root.is_relocatable_executable
6510 || globals->pic_veneer)
6511 {
6512 /* For relocatable objects we can't use absolute addresses,
6513 so construct the address from a relative offset. */
6514 /* TODO: If the offset is small it's probably worth
6515 constructing the address with adds. */
6516 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
6517 s->contents + my_offset);
6518 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
6519 s->contents + my_offset + 4);
6520 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
6521 s->contents + my_offset + 8);
6522 /* Adjust the offset by 4 for the position of the add,
6523 and 8 for the pipeline offset. */
6524 ret_offset = (val - (s->output_offset
6525 + s->output_section->vma
6526 + my_offset + 12))
6527 | 1;
6528 bfd_put_32 (output_bfd, ret_offset,
6529 s->contents + my_offset + 12);
6530 }
6531 else if (globals->use_blx)
6532 {
6533 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
6534 s->contents + my_offset);
6535
6536 /* It's a thumb address. Add the low order bit. */
6537 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
6538 s->contents + my_offset + 4);
6539 }
6540 else
6541 {
6542 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
6543 s->contents + my_offset);
6544
6545 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
6546 s->contents + my_offset + 4);
6547
6548 /* It's a thumb address. Add the low order bit. */
6549 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
6550 s->contents + my_offset + 8);
6551
6552 my_offset += 12;
6553 }
6554 }
6555
6556 BFD_ASSERT (my_offset <= globals->arm_glue_size);
6557
6558 return myh;
6559 }
6560
6561 /* Arm code calling a Thumb function. */
6562
6563 static int
6564 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
6565 const char * name,
6566 bfd * input_bfd,
6567 bfd * output_bfd,
6568 asection * input_section,
6569 bfd_byte * hit_data,
6570 asection * sym_sec,
6571 bfd_vma offset,
6572 bfd_signed_vma addend,
6573 bfd_vma val,
6574 char **error_message)
6575 {
6576 unsigned long int tmp;
6577 bfd_vma my_offset;
6578 asection * s;
6579 long int ret_offset;
6580 struct elf_link_hash_entry * myh;
6581 struct elf32_arm_link_hash_table * globals;
6582
6583 globals = elf32_arm_hash_table (info);
6584 BFD_ASSERT (globals != NULL);
6585 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6586
6587 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6588 ARM2THUMB_GLUE_SECTION_NAME);
6589 BFD_ASSERT (s != NULL);
6590 BFD_ASSERT (s->contents != NULL);
6591 BFD_ASSERT (s->output_section != NULL);
6592
6593 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
6594 sym_sec, val, s, error_message);
6595 if (!myh)
6596 return FALSE;
6597
6598 my_offset = myh->root.u.def.value;
6599 tmp = bfd_get_32 (input_bfd, hit_data);
6600 tmp = tmp & 0xFF000000;
6601
6602 /* Somehow these are both 4 too far, so subtract 8. */
6603 ret_offset = (s->output_offset
6604 + my_offset
6605 + s->output_section->vma
6606 - (input_section->output_offset
6607 + input_section->output_section->vma
6608 + offset + addend)
6609 - 8);
6610
6611 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
6612
6613 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
6614
6615 return TRUE;
6616 }
6617
6618 /* Populate Arm stub for an exported Thumb function. */
6619
6620 static bfd_boolean
6621 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
6622 {
6623 struct bfd_link_info * info = (struct bfd_link_info *) inf;
6624 asection * s;
6625 struct elf_link_hash_entry * myh;
6626 struct elf32_arm_link_hash_entry *eh;
6627 struct elf32_arm_link_hash_table * globals;
6628 asection *sec;
6629 bfd_vma val;
6630 char *error_message;
6631
6632 eh = elf32_arm_hash_entry (h);
6633 /* Allocate stubs for exported Thumb functions on v4t. */
6634 if (eh->export_glue == NULL)
6635 return TRUE;
6636
6637 globals = elf32_arm_hash_table (info);
6638 BFD_ASSERT (globals != NULL);
6639 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6640
6641 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6642 ARM2THUMB_GLUE_SECTION_NAME);
6643 BFD_ASSERT (s != NULL);
6644 BFD_ASSERT (s->contents != NULL);
6645 BFD_ASSERT (s->output_section != NULL);
6646
6647 sec = eh->export_glue->root.u.def.section;
6648
6649 BFD_ASSERT (sec->output_section != NULL);
6650
6651 val = eh->export_glue->root.u.def.value + sec->output_offset
6652 + sec->output_section->vma;
6653
6654 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
6655 h->root.u.def.section->owner,
6656 globals->obfd, sec, val, s,
6657 &error_message);
6658 BFD_ASSERT (myh);
6659 return TRUE;
6660 }
6661
6662 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
6663
6664 static bfd_vma
6665 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
6666 {
6667 bfd_byte *p;
6668 bfd_vma glue_addr;
6669 asection *s;
6670 struct elf32_arm_link_hash_table *globals;
6671
6672 globals = elf32_arm_hash_table (info);
6673 BFD_ASSERT (globals != NULL);
6674 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6675
6676 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6677 ARM_BX_GLUE_SECTION_NAME);
6678 BFD_ASSERT (s != NULL);
6679 BFD_ASSERT (s->contents != NULL);
6680 BFD_ASSERT (s->output_section != NULL);
6681
6682 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
6683
6684 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
6685
6686 if ((globals->bx_glue_offset[reg] & 1) == 0)
6687 {
6688 p = s->contents + glue_addr;
6689 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
6690 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
6691 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
6692 globals->bx_glue_offset[reg] |= 1;
6693 }
6694
6695 return glue_addr + s->output_section->vma + s->output_offset;
6696 }
6697
6698 /* Generate Arm stubs for exported Thumb symbols. */
6699 static void
6700 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
6701 struct bfd_link_info *link_info)
6702 {
6703 struct elf32_arm_link_hash_table * globals;
6704
6705 if (link_info == NULL)
6706 /* Ignore this if we are not called by the ELF backend linker. */
6707 return;
6708
6709 globals = elf32_arm_hash_table (link_info);
6710 if (globals == NULL)
6711 return;
6712
6713 /* If blx is available then exported Thumb symbols are OK and there is
6714 nothing to do. */
6715 if (globals->use_blx)
6716 return;
6717
6718 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
6719 link_info);
6720 }
6721
6722 /* Some relocations map to different relocations depending on the
6723 target. Return the real relocation. */
6724
6725 static int
6726 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
6727 int r_type)
6728 {
6729 switch (r_type)
6730 {
6731 case R_ARM_TARGET1:
6732 if (globals->target1_is_rel)
6733 return R_ARM_REL32;
6734 else
6735 return R_ARM_ABS32;
6736
6737 case R_ARM_TARGET2:
6738 return globals->target2_reloc;
6739
6740 default:
6741 return r_type;
6742 }
6743 }
6744
6745 /* Return the base VMA address which should be subtracted from real addresses
6746 when resolving @dtpoff relocation.
6747 This is PT_TLS segment p_vaddr. */
6748
6749 static bfd_vma
6750 dtpoff_base (struct bfd_link_info *info)
6751 {
6752 /* If tls_sec is NULL, we should have signalled an error already. */
6753 if (elf_hash_table (info)->tls_sec == NULL)
6754 return 0;
6755 return elf_hash_table (info)->tls_sec->vma;
6756 }
6757
6758 /* Return the relocation value for @tpoff relocation
6759 if STT_TLS virtual address is ADDRESS. */
6760
6761 static bfd_vma
6762 tpoff (struct bfd_link_info *info, bfd_vma address)
6763 {
6764 struct elf_link_hash_table *htab = elf_hash_table (info);
6765 bfd_vma base;
6766
6767 /* If tls_sec is NULL, we should have signalled an error already. */
6768 if (htab->tls_sec == NULL)
6769 return 0;
6770 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
6771 return address - htab->tls_sec->vma + base;
6772 }
6773
6774 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
6775 VALUE is the relocation value. */
6776
6777 static bfd_reloc_status_type
6778 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
6779 {
6780 if (value > 0xfff)
6781 return bfd_reloc_overflow;
6782
6783 value |= bfd_get_32 (abfd, data) & 0xfffff000;
6784 bfd_put_32 (abfd, value, data);
6785 return bfd_reloc_ok;
6786 }
6787
6788 /* For a given value of n, calculate the value of G_n as required to
6789 deal with group relocations. We return it in the form of an
6790 encoded constant-and-rotation, together with the final residual. If n is
6791 specified as less than zero, then final_residual is filled with the
6792 input value and no further action is performed. */
6793
6794 static bfd_vma
6795 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
6796 {
6797 int current_n;
6798 bfd_vma g_n;
6799 bfd_vma encoded_g_n = 0;
6800 bfd_vma residual = value; /* Also known as Y_n. */
6801
6802 for (current_n = 0; current_n <= n; current_n++)
6803 {
6804 int shift;
6805
6806 /* Calculate which part of the value to mask. */
6807 if (residual == 0)
6808 shift = 0;
6809 else
6810 {
6811 int msb;
6812
6813 /* Determine the most significant bit in the residual and
6814 align the resulting value to a 2-bit boundary. */
6815 for (msb = 30; msb >= 0; msb -= 2)
6816 if (residual & (3 << msb))
6817 break;
6818
6819 /* The desired shift is now (msb - 6), or zero, whichever
6820 is the greater. */
6821 shift = msb - 6;
6822 if (shift < 0)
6823 shift = 0;
6824 }
6825
6826 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
6827 g_n = residual & (0xff << shift);
6828 encoded_g_n = (g_n >> shift)
6829 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
6830
6831 /* Calculate the residual for the next time around. */
6832 residual &= ~g_n;
6833 }
6834
6835 *final_residual = residual;
6836
6837 return encoded_g_n;
6838 }
6839
6840 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
6841 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
6842
6843 static int
6844 identify_add_or_sub (bfd_vma insn)
6845 {
6846 int opcode = insn & 0x1e00000;
6847
6848 if (opcode == 1 << 23) /* ADD */
6849 return 1;
6850
6851 if (opcode == 1 << 22) /* SUB */
6852 return -1;
6853
6854 return 0;
6855 }
6856
6857 /* Perform a relocation as part of a final link. */
6858
6859 static bfd_reloc_status_type
6860 elf32_arm_final_link_relocate (reloc_howto_type * howto,
6861 bfd * input_bfd,
6862 bfd * output_bfd,
6863 asection * input_section,
6864 bfd_byte * contents,
6865 Elf_Internal_Rela * rel,
6866 bfd_vma value,
6867 struct bfd_link_info * info,
6868 asection * sym_sec,
6869 const char * sym_name,
6870 int sym_flags,
6871 struct elf_link_hash_entry * h,
6872 bfd_boolean * unresolved_reloc_p,
6873 char ** error_message)
6874 {
6875 unsigned long r_type = howto->type;
6876 unsigned long r_symndx;
6877 bfd_byte * hit_data = contents + rel->r_offset;
6878 bfd * dynobj = NULL;
6879 bfd_vma * local_got_offsets;
6880 asection * sgot = NULL;
6881 asection * splt = NULL;
6882 asection * sreloc = NULL;
6883 bfd_vma addend;
6884 bfd_signed_vma signed_addend;
6885 struct elf32_arm_link_hash_table * globals;
6886
6887 globals = elf32_arm_hash_table (info);
6888 if (globals == NULL)
6889 return bfd_reloc_notsupported;
6890
6891 BFD_ASSERT (is_arm_elf (input_bfd));
6892
6893 /* Some relocation types map to different relocations depending on the
6894 target. We pick the right one here. */
6895 r_type = arm_real_reloc_type (globals, r_type);
6896 if (r_type != howto->type)
6897 howto = elf32_arm_howto_from_type (r_type);
6898
6899 /* If the start address has been set, then set the EF_ARM_HASENTRY
6900 flag. Setting this more than once is redundant, but the cost is
6901 not too high, and it keeps the code simple.
6902
6903 The test is done here, rather than somewhere else, because the
6904 start address is only set just before the final link commences.
6905
6906 Note - if the user deliberately sets a start address of 0, the
6907 flag will not be set. */
6908 if (bfd_get_start_address (output_bfd) != 0)
6909 elf_elfheader (output_bfd)->e_flags |= EF_ARM_HASENTRY;
6910
6911 dynobj = elf_hash_table (info)->dynobj;
6912 if (dynobj)
6913 {
6914 sgot = bfd_get_section_by_name (dynobj, ".got");
6915 splt = bfd_get_section_by_name (dynobj, ".plt");
6916 }
6917 local_got_offsets = elf_local_got_offsets (input_bfd);
6918 r_symndx = ELF32_R_SYM (rel->r_info);
6919
6920 if (globals->use_rel)
6921 {
6922 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
6923
6924 if (addend & ((howto->src_mask + 1) >> 1))
6925 {
6926 signed_addend = -1;
6927 signed_addend &= ~ howto->src_mask;
6928 signed_addend |= addend;
6929 }
6930 else
6931 signed_addend = addend;
6932 }
6933 else
6934 addend = signed_addend = rel->r_addend;
6935
6936 switch (r_type)
6937 {
6938 case R_ARM_NONE:
6939 /* We don't need to find a value for this symbol. It's just a
6940 marker. */
6941 *unresolved_reloc_p = FALSE;
6942 return bfd_reloc_ok;
6943
6944 case R_ARM_ABS12:
6945 if (!globals->vxworks_p)
6946 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
6947
6948 case R_ARM_PC24:
6949 case R_ARM_ABS32:
6950 case R_ARM_ABS32_NOI:
6951 case R_ARM_REL32:
6952 case R_ARM_REL32_NOI:
6953 case R_ARM_CALL:
6954 case R_ARM_JUMP24:
6955 case R_ARM_XPC25:
6956 case R_ARM_PREL31:
6957 case R_ARM_PLT32:
6958 /* Handle relocations which should use the PLT entry. ABS32/REL32
6959 will use the symbol's value, which may point to a PLT entry, but we
6960 don't need to handle that here. If we created a PLT entry, all
6961 branches in this object should go to it, except if the PLT is too
6962 far away, in which case a long branch stub should be inserted. */
6963 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
6964 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
6965 && r_type != R_ARM_CALL
6966 && r_type != R_ARM_JUMP24
6967 && r_type != R_ARM_PLT32)
6968 && h != NULL
6969 && splt != NULL
6970 && h->plt.offset != (bfd_vma) -1)
6971 {
6972 /* If we've created a .plt section, and assigned a PLT entry to
6973 this function, it should not be known to bind locally. If
6974 it were, we would have cleared the PLT entry. */
6975 BFD_ASSERT (!SYMBOL_CALLS_LOCAL (info, h));
6976
6977 value = (splt->output_section->vma
6978 + splt->output_offset
6979 + h->plt.offset);
6980 *unresolved_reloc_p = FALSE;
6981 return _bfd_final_link_relocate (howto, input_bfd, input_section,
6982 contents, rel->r_offset, value,
6983 rel->r_addend);
6984 }
6985
6986 /* When generating a shared object or relocatable executable, these
6987 relocations are copied into the output file to be resolved at
6988 run time. */
6989 if ((info->shared || globals->root.is_relocatable_executable)
6990 && (input_section->flags & SEC_ALLOC)
6991 && !(globals->vxworks_p
6992 && strcmp (input_section->output_section->name,
6993 ".tls_vars") == 0)
6994 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
6995 || !SYMBOL_CALLS_LOCAL (info, h))
6996 && (!strstr (input_section->name, STUB_SUFFIX))
6997 && (h == NULL
6998 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6999 || h->root.type != bfd_link_hash_undefweak)
7000 && r_type != R_ARM_PC24
7001 && r_type != R_ARM_CALL
7002 && r_type != R_ARM_JUMP24
7003 && r_type != R_ARM_PREL31
7004 && r_type != R_ARM_PLT32)
7005 {
7006 Elf_Internal_Rela outrel;
7007 bfd_byte *loc;
7008 bfd_boolean skip, relocate;
7009
7010 *unresolved_reloc_p = FALSE;
7011
7012 if (sreloc == NULL)
7013 {
7014 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
7015 ! globals->use_rel);
7016
7017 if (sreloc == NULL)
7018 return bfd_reloc_notsupported;
7019 }
7020
7021 skip = FALSE;
7022 relocate = FALSE;
7023
7024 outrel.r_addend = addend;
7025 outrel.r_offset =
7026 _bfd_elf_section_offset (output_bfd, info, input_section,
7027 rel->r_offset);
7028 if (outrel.r_offset == (bfd_vma) -1)
7029 skip = TRUE;
7030 else if (outrel.r_offset == (bfd_vma) -2)
7031 skip = TRUE, relocate = TRUE;
7032 outrel.r_offset += (input_section->output_section->vma
7033 + input_section->output_offset);
7034
7035 if (skip)
7036 memset (&outrel, 0, sizeof outrel);
7037 else if (h != NULL
7038 && h->dynindx != -1
7039 && (!info->shared
7040 || !info->symbolic
7041 || !h->def_regular))
7042 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
7043 else
7044 {
7045 int symbol;
7046
7047 /* This symbol is local, or marked to become local. */
7048 if (sym_flags == STT_ARM_TFUNC)
7049 value |= 1;
7050 if (globals->symbian_p)
7051 {
7052 asection *osec;
7053
7054 /* On Symbian OS, the data segment and text segement
7055 can be relocated independently. Therefore, we
7056 must indicate the segment to which this
7057 relocation is relative. The BPABI allows us to
7058 use any symbol in the right segment; we just use
7059 the section symbol as it is convenient. (We
7060 cannot use the symbol given by "h" directly as it
7061 will not appear in the dynamic symbol table.)
7062
7063 Note that the dynamic linker ignores the section
7064 symbol value, so we don't subtract osec->vma
7065 from the emitted reloc addend. */
7066 if (sym_sec)
7067 osec = sym_sec->output_section;
7068 else
7069 osec = input_section->output_section;
7070 symbol = elf_section_data (osec)->dynindx;
7071 if (symbol == 0)
7072 {
7073 struct elf_link_hash_table *htab = elf_hash_table (info);
7074
7075 if ((osec->flags & SEC_READONLY) == 0
7076 && htab->data_index_section != NULL)
7077 osec = htab->data_index_section;
7078 else
7079 osec = htab->text_index_section;
7080 symbol = elf_section_data (osec)->dynindx;
7081 }
7082 BFD_ASSERT (symbol != 0);
7083 }
7084 else
7085 /* On SVR4-ish systems, the dynamic loader cannot
7086 relocate the text and data segments independently,
7087 so the symbol does not matter. */
7088 symbol = 0;
7089 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
7090 if (globals->use_rel)
7091 relocate = TRUE;
7092 else
7093 outrel.r_addend += value;
7094 }
7095
7096 loc = sreloc->contents;
7097 loc += sreloc->reloc_count++ * RELOC_SIZE (globals);
7098 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7099
7100 /* If this reloc is against an external symbol, we do not want to
7101 fiddle with the addend. Otherwise, we need to include the symbol
7102 value so that it becomes an addend for the dynamic reloc. */
7103 if (! relocate)
7104 return bfd_reloc_ok;
7105
7106 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7107 contents, rel->r_offset, value,
7108 (bfd_vma) 0);
7109 }
7110 else switch (r_type)
7111 {
7112 case R_ARM_ABS12:
7113 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
7114
7115 case R_ARM_XPC25: /* Arm BLX instruction. */
7116 case R_ARM_CALL:
7117 case R_ARM_JUMP24:
7118 case R_ARM_PC24: /* Arm B/BL instruction. */
7119 case R_ARM_PLT32:
7120 {
7121 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
7122
7123 if (r_type == R_ARM_XPC25)
7124 {
7125 /* Check for Arm calling Arm function. */
7126 /* FIXME: Should we translate the instruction into a BL
7127 instruction instead ? */
7128 if (sym_flags != STT_ARM_TFUNC)
7129 (*_bfd_error_handler)
7130 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
7131 input_bfd,
7132 h ? h->root.root.string : "(local)");
7133 }
7134 else if (r_type == R_ARM_PC24)
7135 {
7136 /* Check for Arm calling Thumb function. */
7137 if (sym_flags == STT_ARM_TFUNC)
7138 {
7139 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
7140 output_bfd, input_section,
7141 hit_data, sym_sec, rel->r_offset,
7142 signed_addend, value,
7143 error_message))
7144 return bfd_reloc_ok;
7145 else
7146 return bfd_reloc_dangerous;
7147 }
7148 }
7149
7150 /* Check if a stub has to be inserted because the
7151 destination is too far or we are changing mode. */
7152 if ( r_type == R_ARM_CALL
7153 || r_type == R_ARM_JUMP24
7154 || r_type == R_ARM_PLT32)
7155 {
7156 enum elf32_arm_stub_type stub_type = arm_stub_none;
7157 struct elf32_arm_link_hash_entry *hash;
7158
7159 hash = (struct elf32_arm_link_hash_entry *) h;
7160 stub_type = arm_type_of_stub (info, input_section, rel,
7161 &sym_flags, hash,
7162 value, sym_sec,
7163 input_bfd, sym_name);
7164
7165 if (stub_type != arm_stub_none)
7166 {
7167 /* The target is out of reach, so redirect the
7168 branch to the local stub for this function. */
7169
7170 stub_entry = elf32_arm_get_stub_entry (input_section,
7171 sym_sec, h,
7172 rel, globals,
7173 stub_type);
7174 if (stub_entry != NULL)
7175 value = (stub_entry->stub_offset
7176 + stub_entry->stub_sec->output_offset
7177 + stub_entry->stub_sec->output_section->vma);
7178 }
7179 else
7180 {
7181 /* If the call goes through a PLT entry, make sure to
7182 check distance to the right destination address. */
7183 if (h != NULL
7184 && splt != NULL
7185 && h->plt.offset != (bfd_vma) -1)
7186 {
7187 value = (splt->output_section->vma
7188 + splt->output_offset
7189 + h->plt.offset);
7190 *unresolved_reloc_p = FALSE;
7191 /* The PLT entry is in ARM mode, regardless of the
7192 target function. */
7193 sym_flags = STT_FUNC;
7194 }
7195 }
7196 }
7197
7198 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
7199 where:
7200 S is the address of the symbol in the relocation.
7201 P is address of the instruction being relocated.
7202 A is the addend (extracted from the instruction) in bytes.
7203
7204 S is held in 'value'.
7205 P is the base address of the section containing the
7206 instruction plus the offset of the reloc into that
7207 section, ie:
7208 (input_section->output_section->vma +
7209 input_section->output_offset +
7210 rel->r_offset).
7211 A is the addend, converted into bytes, ie:
7212 (signed_addend * 4)
7213
7214 Note: None of these operations have knowledge of the pipeline
7215 size of the processor, thus it is up to the assembler to
7216 encode this information into the addend. */
7217 value -= (input_section->output_section->vma
7218 + input_section->output_offset);
7219 value -= rel->r_offset;
7220 if (globals->use_rel)
7221 value += (signed_addend << howto->size);
7222 else
7223 /* RELA addends do not have to be adjusted by howto->size. */
7224 value += signed_addend;
7225
7226 signed_addend = value;
7227 signed_addend >>= howto->rightshift;
7228
7229 /* A branch to an undefined weak symbol is turned into a jump to
7230 the next instruction unless a PLT entry will be created.
7231 Do the same for local undefined symbols (but not for STN_UNDEF).
7232 The jump to the next instruction is optimized as a NOP depending
7233 on the architecture. */
7234 if (h ? (h->root.type == bfd_link_hash_undefweak
7235 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7236 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
7237 {
7238 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
7239
7240 if (arch_has_arm_nop (globals))
7241 value |= 0x0320f000;
7242 else
7243 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
7244 }
7245 else
7246 {
7247 /* Perform a signed range check. */
7248 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
7249 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
7250 return bfd_reloc_overflow;
7251
7252 addend = (value & 2);
7253
7254 value = (signed_addend & howto->dst_mask)
7255 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
7256
7257 if (r_type == R_ARM_CALL)
7258 {
7259 /* Set the H bit in the BLX instruction. */
7260 if (sym_flags == STT_ARM_TFUNC)
7261 {
7262 if (addend)
7263 value |= (1 << 24);
7264 else
7265 value &= ~(bfd_vma)(1 << 24);
7266 }
7267
7268 /* Select the correct instruction (BL or BLX). */
7269 /* Only if we are not handling a BL to a stub. In this
7270 case, mode switching is performed by the stub. */
7271 if (sym_flags == STT_ARM_TFUNC && !stub_entry)
7272 value |= (1 << 28);
7273 else
7274 {
7275 value &= ~(bfd_vma)(1 << 28);
7276 value |= (1 << 24);
7277 }
7278 }
7279 }
7280 }
7281 break;
7282
7283 case R_ARM_ABS32:
7284 value += addend;
7285 if (sym_flags == STT_ARM_TFUNC)
7286 value |= 1;
7287 break;
7288
7289 case R_ARM_ABS32_NOI:
7290 value += addend;
7291 break;
7292
7293 case R_ARM_REL32:
7294 value += addend;
7295 if (sym_flags == STT_ARM_TFUNC)
7296 value |= 1;
7297 value -= (input_section->output_section->vma
7298 + input_section->output_offset + rel->r_offset);
7299 break;
7300
7301 case R_ARM_REL32_NOI:
7302 value += addend;
7303 value -= (input_section->output_section->vma
7304 + input_section->output_offset + rel->r_offset);
7305 break;
7306
7307 case R_ARM_PREL31:
7308 value -= (input_section->output_section->vma
7309 + input_section->output_offset + rel->r_offset);
7310 value += signed_addend;
7311 if (! h || h->root.type != bfd_link_hash_undefweak)
7312 {
7313 /* Check for overflow. */
7314 if ((value ^ (value >> 1)) & (1 << 30))
7315 return bfd_reloc_overflow;
7316 }
7317 value &= 0x7fffffff;
7318 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
7319 if (sym_flags == STT_ARM_TFUNC)
7320 value |= 1;
7321 break;
7322 }
7323
7324 bfd_put_32 (input_bfd, value, hit_data);
7325 return bfd_reloc_ok;
7326
7327 case R_ARM_ABS8:
7328 value += addend;
7329
7330 /* There is no way to tell whether the user intended to use a signed or
7331 unsigned addend. When checking for overflow we accept either,
7332 as specified by the AAELF. */
7333 if ((long) value > 0xff || (long) value < -0x80)
7334 return bfd_reloc_overflow;
7335
7336 bfd_put_8 (input_bfd, value, hit_data);
7337 return bfd_reloc_ok;
7338
7339 case R_ARM_ABS16:
7340 value += addend;
7341
7342 /* See comment for R_ARM_ABS8. */
7343 if ((long) value > 0xffff || (long) value < -0x8000)
7344 return bfd_reloc_overflow;
7345
7346 bfd_put_16 (input_bfd, value, hit_data);
7347 return bfd_reloc_ok;
7348
7349 case R_ARM_THM_ABS5:
7350 /* Support ldr and str instructions for the thumb. */
7351 if (globals->use_rel)
7352 {
7353 /* Need to refetch addend. */
7354 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7355 /* ??? Need to determine shift amount from operand size. */
7356 addend >>= howto->rightshift;
7357 }
7358 value += addend;
7359
7360 /* ??? Isn't value unsigned? */
7361 if ((long) value > 0x1f || (long) value < -0x10)
7362 return bfd_reloc_overflow;
7363
7364 /* ??? Value needs to be properly shifted into place first. */
7365 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
7366 bfd_put_16 (input_bfd, value, hit_data);
7367 return bfd_reloc_ok;
7368
7369 case R_ARM_THM_ALU_PREL_11_0:
7370 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
7371 {
7372 bfd_vma insn;
7373 bfd_signed_vma relocation;
7374
7375 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7376 | bfd_get_16 (input_bfd, hit_data + 2);
7377
7378 if (globals->use_rel)
7379 {
7380 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
7381 | ((insn & (1 << 26)) >> 15);
7382 if (insn & 0xf00000)
7383 signed_addend = -signed_addend;
7384 }
7385
7386 relocation = value + signed_addend;
7387 relocation -= (input_section->output_section->vma
7388 + input_section->output_offset
7389 + rel->r_offset);
7390
7391 value = abs (relocation);
7392
7393 if (value >= 0x1000)
7394 return bfd_reloc_overflow;
7395
7396 insn = (insn & 0xfb0f8f00) | (value & 0xff)
7397 | ((value & 0x700) << 4)
7398 | ((value & 0x800) << 15);
7399 if (relocation < 0)
7400 insn |= 0xa00000;
7401
7402 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7403 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7404
7405 return bfd_reloc_ok;
7406 }
7407
7408 case R_ARM_THM_PC8:
7409 /* PR 10073: This reloc is not generated by the GNU toolchain,
7410 but it is supported for compatibility with third party libraries
7411 generated by other compilers, specifically the ARM/IAR. */
7412 {
7413 bfd_vma insn;
7414 bfd_signed_vma relocation;
7415
7416 insn = bfd_get_16 (input_bfd, hit_data);
7417
7418 if (globals->use_rel)
7419 addend = (insn & 0x00ff) << 2;
7420
7421 relocation = value + addend;
7422 relocation -= (input_section->output_section->vma
7423 + input_section->output_offset
7424 + rel->r_offset);
7425
7426 value = abs (relocation);
7427
7428 /* We do not check for overflow of this reloc. Although strictly
7429 speaking this is incorrect, it appears to be necessary in order
7430 to work with IAR generated relocs. Since GCC and GAS do not
7431 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
7432 a problem for them. */
7433 value &= 0x3fc;
7434
7435 insn = (insn & 0xff00) | (value >> 2);
7436
7437 bfd_put_16 (input_bfd, insn, hit_data);
7438
7439 return bfd_reloc_ok;
7440 }
7441
7442 case R_ARM_THM_PC12:
7443 /* Corresponds to: ldr.w reg, [pc, #offset]. */
7444 {
7445 bfd_vma insn;
7446 bfd_signed_vma relocation;
7447
7448 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7449 | bfd_get_16 (input_bfd, hit_data + 2);
7450
7451 if (globals->use_rel)
7452 {
7453 signed_addend = insn & 0xfff;
7454 if (!(insn & (1 << 23)))
7455 signed_addend = -signed_addend;
7456 }
7457
7458 relocation = value + signed_addend;
7459 relocation -= (input_section->output_section->vma
7460 + input_section->output_offset
7461 + rel->r_offset);
7462
7463 value = abs (relocation);
7464
7465 if (value >= 0x1000)
7466 return bfd_reloc_overflow;
7467
7468 insn = (insn & 0xff7ff000) | value;
7469 if (relocation >= 0)
7470 insn |= (1 << 23);
7471
7472 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7473 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7474
7475 return bfd_reloc_ok;
7476 }
7477
7478 case R_ARM_THM_XPC22:
7479 case R_ARM_THM_CALL:
7480 case R_ARM_THM_JUMP24:
7481 /* Thumb BL (branch long instruction). */
7482 {
7483 bfd_vma relocation;
7484 bfd_vma reloc_sign;
7485 bfd_boolean overflow = FALSE;
7486 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7487 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7488 bfd_signed_vma reloc_signed_max;
7489 bfd_signed_vma reloc_signed_min;
7490 bfd_vma check;
7491 bfd_signed_vma signed_check;
7492 int bitsize;
7493 const int thumb2 = using_thumb2 (globals);
7494
7495 /* A branch to an undefined weak symbol is turned into a jump to
7496 the next instruction unless a PLT entry will be created.
7497 The jump to the next instruction is optimized as a NOP.W for
7498 Thumb-2 enabled architectures. */
7499 if (h && h->root.type == bfd_link_hash_undefweak
7500 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7501 {
7502 if (arch_has_thumb2_nop (globals))
7503 {
7504 bfd_put_16 (input_bfd, 0xf3af, hit_data);
7505 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
7506 }
7507 else
7508 {
7509 bfd_put_16 (input_bfd, 0xe000, hit_data);
7510 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
7511 }
7512 return bfd_reloc_ok;
7513 }
7514
7515 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
7516 with Thumb-1) involving the J1 and J2 bits. */
7517 if (globals->use_rel)
7518 {
7519 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
7520 bfd_vma upper = upper_insn & 0x3ff;
7521 bfd_vma lower = lower_insn & 0x7ff;
7522 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
7523 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
7524 bfd_vma i1 = j1 ^ s ? 0 : 1;
7525 bfd_vma i2 = j2 ^ s ? 0 : 1;
7526
7527 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
7528 /* Sign extend. */
7529 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
7530
7531 signed_addend = addend;
7532 }
7533
7534 if (r_type == R_ARM_THM_XPC22)
7535 {
7536 /* Check for Thumb to Thumb call. */
7537 /* FIXME: Should we translate the instruction into a BL
7538 instruction instead ? */
7539 if (sym_flags == STT_ARM_TFUNC)
7540 (*_bfd_error_handler)
7541 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
7542 input_bfd,
7543 h ? h->root.root.string : "(local)");
7544 }
7545 else
7546 {
7547 /* If it is not a call to Thumb, assume call to Arm.
7548 If it is a call relative to a section name, then it is not a
7549 function call at all, but rather a long jump. Calls through
7550 the PLT do not require stubs. */
7551 if (sym_flags != STT_ARM_TFUNC && sym_flags != STT_SECTION
7552 && (h == NULL || splt == NULL
7553 || h->plt.offset == (bfd_vma) -1))
7554 {
7555 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7556 {
7557 /* Convert BL to BLX. */
7558 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7559 }
7560 else if (( r_type != R_ARM_THM_CALL)
7561 && (r_type != R_ARM_THM_JUMP24))
7562 {
7563 if (elf32_thumb_to_arm_stub
7564 (info, sym_name, input_bfd, output_bfd, input_section,
7565 hit_data, sym_sec, rel->r_offset, signed_addend, value,
7566 error_message))
7567 return bfd_reloc_ok;
7568 else
7569 return bfd_reloc_dangerous;
7570 }
7571 }
7572 else if (sym_flags == STT_ARM_TFUNC && globals->use_blx
7573 && r_type == R_ARM_THM_CALL)
7574 {
7575 /* Make sure this is a BL. */
7576 lower_insn |= 0x1800;
7577 }
7578 }
7579
7580 enum elf32_arm_stub_type stub_type = arm_stub_none;
7581 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
7582 {
7583 /* Check if a stub has to be inserted because the destination
7584 is too far. */
7585 struct elf32_arm_stub_hash_entry *stub_entry;
7586 struct elf32_arm_link_hash_entry *hash;
7587
7588 hash = (struct elf32_arm_link_hash_entry *) h;
7589
7590 stub_type = arm_type_of_stub (info, input_section, rel,
7591 &sym_flags, hash, value, sym_sec,
7592 input_bfd, sym_name);
7593
7594 if (stub_type != arm_stub_none)
7595 {
7596 /* The target is out of reach or we are changing modes, so
7597 redirect the branch to the local stub for this
7598 function. */
7599 stub_entry = elf32_arm_get_stub_entry (input_section,
7600 sym_sec, h,
7601 rel, globals,
7602 stub_type);
7603 if (stub_entry != NULL)
7604 value = (stub_entry->stub_offset
7605 + stub_entry->stub_sec->output_offset
7606 + stub_entry->stub_sec->output_section->vma);
7607
7608 /* If this call becomes a call to Arm, force BLX. */
7609 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
7610 {
7611 if ((stub_entry
7612 && !arm_stub_is_thumb (stub_entry->stub_type))
7613 || (sym_flags != STT_ARM_TFUNC))
7614 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7615 }
7616 }
7617 }
7618
7619 /* Handle calls via the PLT. */
7620 if (stub_type == arm_stub_none
7621 && h != NULL
7622 && splt != NULL
7623 && h->plt.offset != (bfd_vma) -1)
7624 {
7625 value = (splt->output_section->vma
7626 + splt->output_offset
7627 + h->plt.offset);
7628
7629 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7630 {
7631 /* If the Thumb BLX instruction is available, convert
7632 the BL to a BLX instruction to call the ARM-mode
7633 PLT entry. */
7634 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7635 sym_flags = STT_FUNC;
7636 }
7637 else
7638 {
7639 /* Target the Thumb stub before the ARM PLT entry. */
7640 value -= PLT_THUMB_STUB_SIZE;
7641 sym_flags = STT_ARM_TFUNC;
7642 }
7643 *unresolved_reloc_p = FALSE;
7644 }
7645
7646 relocation = value + signed_addend;
7647
7648 relocation -= (input_section->output_section->vma
7649 + input_section->output_offset
7650 + rel->r_offset);
7651
7652 check = relocation >> howto->rightshift;
7653
7654 /* If this is a signed value, the rightshift just dropped
7655 leading 1 bits (assuming twos complement). */
7656 if ((bfd_signed_vma) relocation >= 0)
7657 signed_check = check;
7658 else
7659 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
7660
7661 /* Calculate the permissable maximum and minimum values for
7662 this relocation according to whether we're relocating for
7663 Thumb-2 or not. */
7664 bitsize = howto->bitsize;
7665 if (!thumb2)
7666 bitsize -= 2;
7667 reloc_signed_max = (1 << (bitsize - 1)) - 1;
7668 reloc_signed_min = ~reloc_signed_max;
7669
7670 /* Assumes two's complement. */
7671 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7672 overflow = TRUE;
7673
7674 if ((lower_insn & 0x5000) == 0x4000)
7675 /* For a BLX instruction, make sure that the relocation is rounded up
7676 to a word boundary. This follows the semantics of the instruction
7677 which specifies that bit 1 of the target address will come from bit
7678 1 of the base address. */
7679 relocation = (relocation + 2) & ~ 3;
7680
7681 /* Put RELOCATION back into the insn. Assumes two's complement.
7682 We use the Thumb-2 encoding, which is safe even if dealing with
7683 a Thumb-1 instruction by virtue of our overflow check above. */
7684 reloc_sign = (signed_check < 0) ? 1 : 0;
7685 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
7686 | ((relocation >> 12) & 0x3ff)
7687 | (reloc_sign << 10);
7688 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
7689 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
7690 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
7691 | ((relocation >> 1) & 0x7ff);
7692
7693 /* Put the relocated value back in the object file: */
7694 bfd_put_16 (input_bfd, upper_insn, hit_data);
7695 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7696
7697 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7698 }
7699 break;
7700
7701 case R_ARM_THM_JUMP19:
7702 /* Thumb32 conditional branch instruction. */
7703 {
7704 bfd_vma relocation;
7705 bfd_boolean overflow = FALSE;
7706 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7707 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7708 bfd_signed_vma reloc_signed_max = 0xffffe;
7709 bfd_signed_vma reloc_signed_min = -0x100000;
7710 bfd_signed_vma signed_check;
7711
7712 /* Need to refetch the addend, reconstruct the top three bits,
7713 and squish the two 11 bit pieces together. */
7714 if (globals->use_rel)
7715 {
7716 bfd_vma S = (upper_insn & 0x0400) >> 10;
7717 bfd_vma upper = (upper_insn & 0x003f);
7718 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
7719 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
7720 bfd_vma lower = (lower_insn & 0x07ff);
7721
7722 upper |= J1 << 6;
7723 upper |= J2 << 7;
7724 upper |= (!S) << 8;
7725 upper -= 0x0100; /* Sign extend. */
7726
7727 addend = (upper << 12) | (lower << 1);
7728 signed_addend = addend;
7729 }
7730
7731 /* Handle calls via the PLT. */
7732 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7733 {
7734 value = (splt->output_section->vma
7735 + splt->output_offset
7736 + h->plt.offset);
7737 /* Target the Thumb stub before the ARM PLT entry. */
7738 value -= PLT_THUMB_STUB_SIZE;
7739 *unresolved_reloc_p = FALSE;
7740 }
7741
7742 /* ??? Should handle interworking? GCC might someday try to
7743 use this for tail calls. */
7744
7745 relocation = value + signed_addend;
7746 relocation -= (input_section->output_section->vma
7747 + input_section->output_offset
7748 + rel->r_offset);
7749 signed_check = (bfd_signed_vma) relocation;
7750
7751 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7752 overflow = TRUE;
7753
7754 /* Put RELOCATION back into the insn. */
7755 {
7756 bfd_vma S = (relocation & 0x00100000) >> 20;
7757 bfd_vma J2 = (relocation & 0x00080000) >> 19;
7758 bfd_vma J1 = (relocation & 0x00040000) >> 18;
7759 bfd_vma hi = (relocation & 0x0003f000) >> 12;
7760 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
7761
7762 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
7763 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
7764 }
7765
7766 /* Put the relocated value back in the object file: */
7767 bfd_put_16 (input_bfd, upper_insn, hit_data);
7768 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7769
7770 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7771 }
7772
7773 case R_ARM_THM_JUMP11:
7774 case R_ARM_THM_JUMP8:
7775 case R_ARM_THM_JUMP6:
7776 /* Thumb B (branch) instruction). */
7777 {
7778 bfd_signed_vma relocation;
7779 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
7780 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
7781 bfd_signed_vma signed_check;
7782
7783 /* CZB cannot jump backward. */
7784 if (r_type == R_ARM_THM_JUMP6)
7785 reloc_signed_min = 0;
7786
7787 if (globals->use_rel)
7788 {
7789 /* Need to refetch addend. */
7790 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7791 if (addend & ((howto->src_mask + 1) >> 1))
7792 {
7793 signed_addend = -1;
7794 signed_addend &= ~ howto->src_mask;
7795 signed_addend |= addend;
7796 }
7797 else
7798 signed_addend = addend;
7799 /* The value in the insn has been right shifted. We need to
7800 undo this, so that we can perform the address calculation
7801 in terms of bytes. */
7802 signed_addend <<= howto->rightshift;
7803 }
7804 relocation = value + signed_addend;
7805
7806 relocation -= (input_section->output_section->vma
7807 + input_section->output_offset
7808 + rel->r_offset);
7809
7810 relocation >>= howto->rightshift;
7811 signed_check = relocation;
7812
7813 if (r_type == R_ARM_THM_JUMP6)
7814 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
7815 else
7816 relocation &= howto->dst_mask;
7817 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
7818
7819 bfd_put_16 (input_bfd, relocation, hit_data);
7820
7821 /* Assumes two's complement. */
7822 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7823 return bfd_reloc_overflow;
7824
7825 return bfd_reloc_ok;
7826 }
7827
7828 case R_ARM_ALU_PCREL7_0:
7829 case R_ARM_ALU_PCREL15_8:
7830 case R_ARM_ALU_PCREL23_15:
7831 {
7832 bfd_vma insn;
7833 bfd_vma relocation;
7834
7835 insn = bfd_get_32 (input_bfd, hit_data);
7836 if (globals->use_rel)
7837 {
7838 /* Extract the addend. */
7839 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
7840 signed_addend = addend;
7841 }
7842 relocation = value + signed_addend;
7843
7844 relocation -= (input_section->output_section->vma
7845 + input_section->output_offset
7846 + rel->r_offset);
7847 insn = (insn & ~0xfff)
7848 | ((howto->bitpos << 7) & 0xf00)
7849 | ((relocation >> howto->bitpos) & 0xff);
7850 bfd_put_32 (input_bfd, value, hit_data);
7851 }
7852 return bfd_reloc_ok;
7853
7854 case R_ARM_GNU_VTINHERIT:
7855 case R_ARM_GNU_VTENTRY:
7856 return bfd_reloc_ok;
7857
7858 case R_ARM_GOTOFF32:
7859 /* Relocation is relative to the start of the
7860 global offset table. */
7861
7862 BFD_ASSERT (sgot != NULL);
7863 if (sgot == NULL)
7864 return bfd_reloc_notsupported;
7865
7866 /* If we are addressing a Thumb function, we need to adjust the
7867 address by one, so that attempts to call the function pointer will
7868 correctly interpret it as Thumb code. */
7869 if (sym_flags == STT_ARM_TFUNC)
7870 value += 1;
7871
7872 /* Note that sgot->output_offset is not involved in this
7873 calculation. We always want the start of .got. If we
7874 define _GLOBAL_OFFSET_TABLE in a different way, as is
7875 permitted by the ABI, we might have to change this
7876 calculation. */
7877 value -= sgot->output_section->vma;
7878 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7879 contents, rel->r_offset, value,
7880 rel->r_addend);
7881
7882 case R_ARM_GOTPC:
7883 /* Use global offset table as symbol value. */
7884 BFD_ASSERT (sgot != NULL);
7885
7886 if (sgot == NULL)
7887 return bfd_reloc_notsupported;
7888
7889 *unresolved_reloc_p = FALSE;
7890 value = sgot->output_section->vma;
7891 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7892 contents, rel->r_offset, value,
7893 rel->r_addend);
7894
7895 case R_ARM_GOT32:
7896 case R_ARM_GOT_PREL:
7897 /* Relocation is to the entry for this symbol in the
7898 global offset table. */
7899 if (sgot == NULL)
7900 return bfd_reloc_notsupported;
7901
7902 if (h != NULL)
7903 {
7904 bfd_vma off;
7905 bfd_boolean dyn;
7906
7907 off = h->got.offset;
7908 BFD_ASSERT (off != (bfd_vma) -1);
7909 dyn = globals->root.dynamic_sections_created;
7910
7911 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
7912 || (info->shared
7913 && SYMBOL_REFERENCES_LOCAL (info, h))
7914 || (ELF_ST_VISIBILITY (h->other)
7915 && h->root.type == bfd_link_hash_undefweak))
7916 {
7917 /* This is actually a static link, or it is a -Bsymbolic link
7918 and the symbol is defined locally. We must initialize this
7919 entry in the global offset table. Since the offset must
7920 always be a multiple of 4, we use the least significant bit
7921 to record whether we have initialized it already.
7922
7923 When doing a dynamic link, we create a .rel(a).got relocation
7924 entry to initialize the value. This is done in the
7925 finish_dynamic_symbol routine. */
7926 if ((off & 1) != 0)
7927 off &= ~1;
7928 else
7929 {
7930 /* If we are addressing a Thumb function, we need to
7931 adjust the address by one, so that attempts to
7932 call the function pointer will correctly
7933 interpret it as Thumb code. */
7934 if (sym_flags == STT_ARM_TFUNC)
7935 value |= 1;
7936
7937 bfd_put_32 (output_bfd, value, sgot->contents + off);
7938 h->got.offset |= 1;
7939 }
7940 }
7941 else
7942 *unresolved_reloc_p = FALSE;
7943
7944 value = sgot->output_offset + off;
7945 }
7946 else
7947 {
7948 bfd_vma off;
7949
7950 BFD_ASSERT (local_got_offsets != NULL &&
7951 local_got_offsets[r_symndx] != (bfd_vma) -1);
7952
7953 off = local_got_offsets[r_symndx];
7954
7955 /* The offset must always be a multiple of 4. We use the
7956 least significant bit to record whether we have already
7957 generated the necessary reloc. */
7958 if ((off & 1) != 0)
7959 off &= ~1;
7960 else
7961 {
7962 /* If we are addressing a Thumb function, we need to
7963 adjust the address by one, so that attempts to
7964 call the function pointer will correctly
7965 interpret it as Thumb code. */
7966 if (sym_flags == STT_ARM_TFUNC)
7967 value |= 1;
7968
7969 if (globals->use_rel)
7970 bfd_put_32 (output_bfd, value, sgot->contents + off);
7971
7972 if (info->shared)
7973 {
7974 asection * srelgot;
7975 Elf_Internal_Rela outrel;
7976 bfd_byte *loc;
7977
7978 srelgot = (bfd_get_section_by_name
7979 (dynobj, RELOC_SECTION (globals, ".got")));
7980 BFD_ASSERT (srelgot != NULL);
7981
7982 outrel.r_addend = addend + value;
7983 outrel.r_offset = (sgot->output_section->vma
7984 + sgot->output_offset
7985 + off);
7986 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
7987 loc = srelgot->contents;
7988 loc += srelgot->reloc_count++ * RELOC_SIZE (globals);
7989 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7990 }
7991
7992 local_got_offsets[r_symndx] |= 1;
7993 }
7994
7995 value = sgot->output_offset + off;
7996 }
7997 if (r_type != R_ARM_GOT32)
7998 value += sgot->output_section->vma;
7999
8000 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8001 contents, rel->r_offset, value,
8002 rel->r_addend);
8003
8004 case R_ARM_TLS_LDO32:
8005 value = value - dtpoff_base (info);
8006
8007 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8008 contents, rel->r_offset, value,
8009 rel->r_addend);
8010
8011 case R_ARM_TLS_LDM32:
8012 {
8013 bfd_vma off;
8014
8015 if (globals->sgot == NULL)
8016 abort ();
8017
8018 off = globals->tls_ldm_got.offset;
8019
8020 if ((off & 1) != 0)
8021 off &= ~1;
8022 else
8023 {
8024 /* If we don't know the module number, create a relocation
8025 for it. */
8026 if (info->shared)
8027 {
8028 Elf_Internal_Rela outrel;
8029 bfd_byte *loc;
8030
8031 if (globals->srelgot == NULL)
8032 abort ();
8033
8034 outrel.r_addend = 0;
8035 outrel.r_offset = (globals->sgot->output_section->vma
8036 + globals->sgot->output_offset + off);
8037 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
8038
8039 if (globals->use_rel)
8040 bfd_put_32 (output_bfd, outrel.r_addend,
8041 globals->sgot->contents + off);
8042
8043 loc = globals->srelgot->contents;
8044 loc += globals->srelgot->reloc_count++ * RELOC_SIZE (globals);
8045 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8046 }
8047 else
8048 bfd_put_32 (output_bfd, 1, globals->sgot->contents + off);
8049
8050 globals->tls_ldm_got.offset |= 1;
8051 }
8052
8053 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
8054 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
8055
8056 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8057 contents, rel->r_offset, value,
8058 rel->r_addend);
8059 }
8060
8061 case R_ARM_TLS_GD32:
8062 case R_ARM_TLS_IE32:
8063 {
8064 bfd_vma off;
8065 int indx;
8066 char tls_type;
8067
8068 if (globals->sgot == NULL)
8069 abort ();
8070
8071 indx = 0;
8072 if (h != NULL)
8073 {
8074 bfd_boolean dyn;
8075 dyn = globals->root.dynamic_sections_created;
8076 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
8077 && (!info->shared
8078 || !SYMBOL_REFERENCES_LOCAL (info, h)))
8079 {
8080 *unresolved_reloc_p = FALSE;
8081 indx = h->dynindx;
8082 }
8083 off = h->got.offset;
8084 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
8085 }
8086 else
8087 {
8088 if (local_got_offsets == NULL)
8089 abort ();
8090 off = local_got_offsets[r_symndx];
8091 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
8092 }
8093
8094 if (tls_type == GOT_UNKNOWN)
8095 abort ();
8096
8097 if ((off & 1) != 0)
8098 off &= ~1;
8099 else
8100 {
8101 bfd_boolean need_relocs = FALSE;
8102 Elf_Internal_Rela outrel;
8103 bfd_byte *loc = NULL;
8104 int cur_off = off;
8105
8106 /* The GOT entries have not been initialized yet. Do it
8107 now, and emit any relocations. If both an IE GOT and a
8108 GD GOT are necessary, we emit the GD first. */
8109
8110 if ((info->shared || indx != 0)
8111 && (h == NULL
8112 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
8113 || h->root.type != bfd_link_hash_undefweak))
8114 {
8115 need_relocs = TRUE;
8116 if (globals->srelgot == NULL)
8117 abort ();
8118 loc = globals->srelgot->contents;
8119 loc += globals->srelgot->reloc_count * RELOC_SIZE (globals);
8120 }
8121
8122 if (tls_type & GOT_TLS_GD)
8123 {
8124 if (need_relocs)
8125 {
8126 outrel.r_addend = 0;
8127 outrel.r_offset = (globals->sgot->output_section->vma
8128 + globals->sgot->output_offset
8129 + cur_off);
8130 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
8131
8132 if (globals->use_rel)
8133 bfd_put_32 (output_bfd, outrel.r_addend,
8134 globals->sgot->contents + cur_off);
8135
8136 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8137 globals->srelgot->reloc_count++;
8138 loc += RELOC_SIZE (globals);
8139
8140 if (indx == 0)
8141 bfd_put_32 (output_bfd, value - dtpoff_base (info),
8142 globals->sgot->contents + cur_off + 4);
8143 else
8144 {
8145 outrel.r_addend = 0;
8146 outrel.r_info = ELF32_R_INFO (indx,
8147 R_ARM_TLS_DTPOFF32);
8148 outrel.r_offset += 4;
8149
8150 if (globals->use_rel)
8151 bfd_put_32 (output_bfd, outrel.r_addend,
8152 globals->sgot->contents + cur_off + 4);
8153
8154
8155 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8156 globals->srelgot->reloc_count++;
8157 loc += RELOC_SIZE (globals);
8158 }
8159 }
8160 else
8161 {
8162 /* If we are not emitting relocations for a
8163 general dynamic reference, then we must be in a
8164 static link or an executable link with the
8165 symbol binding locally. Mark it as belonging
8166 to module 1, the executable. */
8167 bfd_put_32 (output_bfd, 1,
8168 globals->sgot->contents + cur_off);
8169 bfd_put_32 (output_bfd, value - dtpoff_base (info),
8170 globals->sgot->contents + cur_off + 4);
8171 }
8172
8173 cur_off += 8;
8174 }
8175
8176 if (tls_type & GOT_TLS_IE)
8177 {
8178 if (need_relocs)
8179 {
8180 if (indx == 0)
8181 outrel.r_addend = value - dtpoff_base (info);
8182 else
8183 outrel.r_addend = 0;
8184 outrel.r_offset = (globals->sgot->output_section->vma
8185 + globals->sgot->output_offset
8186 + cur_off);
8187 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
8188
8189 if (globals->use_rel)
8190 bfd_put_32 (output_bfd, outrel.r_addend,
8191 globals->sgot->contents + cur_off);
8192
8193 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8194 globals->srelgot->reloc_count++;
8195 loc += RELOC_SIZE (globals);
8196 }
8197 else
8198 bfd_put_32 (output_bfd, tpoff (info, value),
8199 globals->sgot->contents + cur_off);
8200 cur_off += 4;
8201 }
8202
8203 if (h != NULL)
8204 h->got.offset |= 1;
8205 else
8206 local_got_offsets[r_symndx] |= 1;
8207 }
8208
8209 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
8210 off += 8;
8211 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
8212 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
8213
8214 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8215 contents, rel->r_offset, value,
8216 rel->r_addend);
8217 }
8218
8219 case R_ARM_TLS_LE32:
8220 if (info->shared)
8221 {
8222 (*_bfd_error_handler)
8223 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
8224 input_bfd, input_section,
8225 (long) rel->r_offset, howto->name);
8226 return (bfd_reloc_status_type) FALSE;
8227 }
8228 else
8229 value = tpoff (info, value);
8230
8231 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8232 contents, rel->r_offset, value,
8233 rel->r_addend);
8234
8235 case R_ARM_V4BX:
8236 if (globals->fix_v4bx)
8237 {
8238 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8239
8240 /* Ensure that we have a BX instruction. */
8241 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
8242
8243 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
8244 {
8245 /* Branch to veneer. */
8246 bfd_vma glue_addr;
8247 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
8248 glue_addr -= input_section->output_section->vma
8249 + input_section->output_offset
8250 + rel->r_offset + 8;
8251 insn = (insn & 0xf0000000) | 0x0a000000
8252 | ((glue_addr >> 2) & 0x00ffffff);
8253 }
8254 else
8255 {
8256 /* Preserve Rm (lowest four bits) and the condition code
8257 (highest four bits). Other bits encode MOV PC,Rm. */
8258 insn = (insn & 0xf000000f) | 0x01a0f000;
8259 }
8260
8261 bfd_put_32 (input_bfd, insn, hit_data);
8262 }
8263 return bfd_reloc_ok;
8264
8265 case R_ARM_MOVW_ABS_NC:
8266 case R_ARM_MOVT_ABS:
8267 case R_ARM_MOVW_PREL_NC:
8268 case R_ARM_MOVT_PREL:
8269 /* Until we properly support segment-base-relative addressing then
8270 we assume the segment base to be zero, as for the group relocations.
8271 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
8272 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
8273 case R_ARM_MOVW_BREL_NC:
8274 case R_ARM_MOVW_BREL:
8275 case R_ARM_MOVT_BREL:
8276 {
8277 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8278
8279 if (globals->use_rel)
8280 {
8281 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8282 signed_addend = (addend ^ 0x8000) - 0x8000;
8283 }
8284
8285 value += signed_addend;
8286
8287 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
8288 value -= (input_section->output_section->vma
8289 + input_section->output_offset + rel->r_offset);
8290
8291 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
8292 return bfd_reloc_overflow;
8293
8294 if (sym_flags == STT_ARM_TFUNC)
8295 value |= 1;
8296
8297 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
8298 || r_type == R_ARM_MOVT_BREL)
8299 value >>= 16;
8300
8301 insn &= 0xfff0f000;
8302 insn |= value & 0xfff;
8303 insn |= (value & 0xf000) << 4;
8304 bfd_put_32 (input_bfd, insn, hit_data);
8305 }
8306 return bfd_reloc_ok;
8307
8308 case R_ARM_THM_MOVW_ABS_NC:
8309 case R_ARM_THM_MOVT_ABS:
8310 case R_ARM_THM_MOVW_PREL_NC:
8311 case R_ARM_THM_MOVT_PREL:
8312 /* Until we properly support segment-base-relative addressing then
8313 we assume the segment base to be zero, as for the above relocations.
8314 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
8315 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
8316 as R_ARM_THM_MOVT_ABS. */
8317 case R_ARM_THM_MOVW_BREL_NC:
8318 case R_ARM_THM_MOVW_BREL:
8319 case R_ARM_THM_MOVT_BREL:
8320 {
8321 bfd_vma insn;
8322
8323 insn = bfd_get_16 (input_bfd, hit_data) << 16;
8324 insn |= bfd_get_16 (input_bfd, hit_data + 2);
8325
8326 if (globals->use_rel)
8327 {
8328 addend = ((insn >> 4) & 0xf000)
8329 | ((insn >> 15) & 0x0800)
8330 | ((insn >> 4) & 0x0700)
8331 | (insn & 0x00ff);
8332 signed_addend = (addend ^ 0x8000) - 0x8000;
8333 }
8334
8335 value += signed_addend;
8336
8337 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
8338 value -= (input_section->output_section->vma
8339 + input_section->output_offset + rel->r_offset);
8340
8341 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
8342 return bfd_reloc_overflow;
8343
8344 if (sym_flags == STT_ARM_TFUNC)
8345 value |= 1;
8346
8347 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
8348 || r_type == R_ARM_THM_MOVT_BREL)
8349 value >>= 16;
8350
8351 insn &= 0xfbf08f00;
8352 insn |= (value & 0xf000) << 4;
8353 insn |= (value & 0x0800) << 15;
8354 insn |= (value & 0x0700) << 4;
8355 insn |= (value & 0x00ff);
8356
8357 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8358 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8359 }
8360 return bfd_reloc_ok;
8361
8362 case R_ARM_ALU_PC_G0_NC:
8363 case R_ARM_ALU_PC_G1_NC:
8364 case R_ARM_ALU_PC_G0:
8365 case R_ARM_ALU_PC_G1:
8366 case R_ARM_ALU_PC_G2:
8367 case R_ARM_ALU_SB_G0_NC:
8368 case R_ARM_ALU_SB_G1_NC:
8369 case R_ARM_ALU_SB_G0:
8370 case R_ARM_ALU_SB_G1:
8371 case R_ARM_ALU_SB_G2:
8372 {
8373 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8374 bfd_vma pc = input_section->output_section->vma
8375 + input_section->output_offset + rel->r_offset;
8376 /* sb should be the origin of the *segment* containing the symbol.
8377 It is not clear how to obtain this OS-dependent value, so we
8378 make an arbitrary choice of zero. */
8379 bfd_vma sb = 0;
8380 bfd_vma residual;
8381 bfd_vma g_n;
8382 bfd_signed_vma signed_value;
8383 int group = 0;
8384
8385 /* Determine which group of bits to select. */
8386 switch (r_type)
8387 {
8388 case R_ARM_ALU_PC_G0_NC:
8389 case R_ARM_ALU_PC_G0:
8390 case R_ARM_ALU_SB_G0_NC:
8391 case R_ARM_ALU_SB_G0:
8392 group = 0;
8393 break;
8394
8395 case R_ARM_ALU_PC_G1_NC:
8396 case R_ARM_ALU_PC_G1:
8397 case R_ARM_ALU_SB_G1_NC:
8398 case R_ARM_ALU_SB_G1:
8399 group = 1;
8400 break;
8401
8402 case R_ARM_ALU_PC_G2:
8403 case R_ARM_ALU_SB_G2:
8404 group = 2;
8405 break;
8406
8407 default:
8408 abort ();
8409 }
8410
8411 /* If REL, extract the addend from the insn. If RELA, it will
8412 have already been fetched for us. */
8413 if (globals->use_rel)
8414 {
8415 int negative;
8416 bfd_vma constant = insn & 0xff;
8417 bfd_vma rotation = (insn & 0xf00) >> 8;
8418
8419 if (rotation == 0)
8420 signed_addend = constant;
8421 else
8422 {
8423 /* Compensate for the fact that in the instruction, the
8424 rotation is stored in multiples of 2 bits. */
8425 rotation *= 2;
8426
8427 /* Rotate "constant" right by "rotation" bits. */
8428 signed_addend = (constant >> rotation) |
8429 (constant << (8 * sizeof (bfd_vma) - rotation));
8430 }
8431
8432 /* Determine if the instruction is an ADD or a SUB.
8433 (For REL, this determines the sign of the addend.) */
8434 negative = identify_add_or_sub (insn);
8435 if (negative == 0)
8436 {
8437 (*_bfd_error_handler)
8438 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
8439 input_bfd, input_section,
8440 (long) rel->r_offset, howto->name);
8441 return bfd_reloc_overflow;
8442 }
8443
8444 signed_addend *= negative;
8445 }
8446
8447 /* Compute the value (X) to go in the place. */
8448 if (r_type == R_ARM_ALU_PC_G0_NC
8449 || r_type == R_ARM_ALU_PC_G1_NC
8450 || r_type == R_ARM_ALU_PC_G0
8451 || r_type == R_ARM_ALU_PC_G1
8452 || r_type == R_ARM_ALU_PC_G2)
8453 /* PC relative. */
8454 signed_value = value - pc + signed_addend;
8455 else
8456 /* Section base relative. */
8457 signed_value = value - sb + signed_addend;
8458
8459 /* If the target symbol is a Thumb function, then set the
8460 Thumb bit in the address. */
8461 if (sym_flags == STT_ARM_TFUNC)
8462 signed_value |= 1;
8463
8464 /* Calculate the value of the relevant G_n, in encoded
8465 constant-with-rotation format. */
8466 g_n = calculate_group_reloc_mask (abs (signed_value), group,
8467 &residual);
8468
8469 /* Check for overflow if required. */
8470 if ((r_type == R_ARM_ALU_PC_G0
8471 || r_type == R_ARM_ALU_PC_G1
8472 || r_type == R_ARM_ALU_PC_G2
8473 || r_type == R_ARM_ALU_SB_G0
8474 || r_type == R_ARM_ALU_SB_G1
8475 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
8476 {
8477 (*_bfd_error_handler)
8478 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8479 input_bfd, input_section,
8480 (long) rel->r_offset, abs (signed_value), howto->name);
8481 return bfd_reloc_overflow;
8482 }
8483
8484 /* Mask out the value and the ADD/SUB part of the opcode; take care
8485 not to destroy the S bit. */
8486 insn &= 0xff1ff000;
8487
8488 /* Set the opcode according to whether the value to go in the
8489 place is negative. */
8490 if (signed_value < 0)
8491 insn |= 1 << 22;
8492 else
8493 insn |= 1 << 23;
8494
8495 /* Encode the offset. */
8496 insn |= g_n;
8497
8498 bfd_put_32 (input_bfd, insn, hit_data);
8499 }
8500 return bfd_reloc_ok;
8501
8502 case R_ARM_LDR_PC_G0:
8503 case R_ARM_LDR_PC_G1:
8504 case R_ARM_LDR_PC_G2:
8505 case R_ARM_LDR_SB_G0:
8506 case R_ARM_LDR_SB_G1:
8507 case R_ARM_LDR_SB_G2:
8508 {
8509 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8510 bfd_vma pc = input_section->output_section->vma
8511 + input_section->output_offset + rel->r_offset;
8512 bfd_vma sb = 0; /* See note above. */
8513 bfd_vma residual;
8514 bfd_signed_vma signed_value;
8515 int group = 0;
8516
8517 /* Determine which groups of bits to calculate. */
8518 switch (r_type)
8519 {
8520 case R_ARM_LDR_PC_G0:
8521 case R_ARM_LDR_SB_G0:
8522 group = 0;
8523 break;
8524
8525 case R_ARM_LDR_PC_G1:
8526 case R_ARM_LDR_SB_G1:
8527 group = 1;
8528 break;
8529
8530 case R_ARM_LDR_PC_G2:
8531 case R_ARM_LDR_SB_G2:
8532 group = 2;
8533 break;
8534
8535 default:
8536 abort ();
8537 }
8538
8539 /* If REL, extract the addend from the insn. If RELA, it will
8540 have already been fetched for us. */
8541 if (globals->use_rel)
8542 {
8543 int negative = (insn & (1 << 23)) ? 1 : -1;
8544 signed_addend = negative * (insn & 0xfff);
8545 }
8546
8547 /* Compute the value (X) to go in the place. */
8548 if (r_type == R_ARM_LDR_PC_G0
8549 || r_type == R_ARM_LDR_PC_G1
8550 || r_type == R_ARM_LDR_PC_G2)
8551 /* PC relative. */
8552 signed_value = value - pc + signed_addend;
8553 else
8554 /* Section base relative. */
8555 signed_value = value - sb + signed_addend;
8556
8557 /* Calculate the value of the relevant G_{n-1} to obtain
8558 the residual at that stage. */
8559 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8560
8561 /* Check for overflow. */
8562 if (residual >= 0x1000)
8563 {
8564 (*_bfd_error_handler)
8565 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8566 input_bfd, input_section,
8567 (long) rel->r_offset, abs (signed_value), howto->name);
8568 return bfd_reloc_overflow;
8569 }
8570
8571 /* Mask out the value and U bit. */
8572 insn &= 0xff7ff000;
8573
8574 /* Set the U bit if the value to go in the place is non-negative. */
8575 if (signed_value >= 0)
8576 insn |= 1 << 23;
8577
8578 /* Encode the offset. */
8579 insn |= residual;
8580
8581 bfd_put_32 (input_bfd, insn, hit_data);
8582 }
8583 return bfd_reloc_ok;
8584
8585 case R_ARM_LDRS_PC_G0:
8586 case R_ARM_LDRS_PC_G1:
8587 case R_ARM_LDRS_PC_G2:
8588 case R_ARM_LDRS_SB_G0:
8589 case R_ARM_LDRS_SB_G1:
8590 case R_ARM_LDRS_SB_G2:
8591 {
8592 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8593 bfd_vma pc = input_section->output_section->vma
8594 + input_section->output_offset + rel->r_offset;
8595 bfd_vma sb = 0; /* See note above. */
8596 bfd_vma residual;
8597 bfd_signed_vma signed_value;
8598 int group = 0;
8599
8600 /* Determine which groups of bits to calculate. */
8601 switch (r_type)
8602 {
8603 case R_ARM_LDRS_PC_G0:
8604 case R_ARM_LDRS_SB_G0:
8605 group = 0;
8606 break;
8607
8608 case R_ARM_LDRS_PC_G1:
8609 case R_ARM_LDRS_SB_G1:
8610 group = 1;
8611 break;
8612
8613 case R_ARM_LDRS_PC_G2:
8614 case R_ARM_LDRS_SB_G2:
8615 group = 2;
8616 break;
8617
8618 default:
8619 abort ();
8620 }
8621
8622 /* If REL, extract the addend from the insn. If RELA, it will
8623 have already been fetched for us. */
8624 if (globals->use_rel)
8625 {
8626 int negative = (insn & (1 << 23)) ? 1 : -1;
8627 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
8628 }
8629
8630 /* Compute the value (X) to go in the place. */
8631 if (r_type == R_ARM_LDRS_PC_G0
8632 || r_type == R_ARM_LDRS_PC_G1
8633 || r_type == R_ARM_LDRS_PC_G2)
8634 /* PC relative. */
8635 signed_value = value - pc + signed_addend;
8636 else
8637 /* Section base relative. */
8638 signed_value = value - sb + signed_addend;
8639
8640 /* Calculate the value of the relevant G_{n-1} to obtain
8641 the residual at that stage. */
8642 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8643
8644 /* Check for overflow. */
8645 if (residual >= 0x100)
8646 {
8647 (*_bfd_error_handler)
8648 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8649 input_bfd, input_section,
8650 (long) rel->r_offset, abs (signed_value), howto->name);
8651 return bfd_reloc_overflow;
8652 }
8653
8654 /* Mask out the value and U bit. */
8655 insn &= 0xff7ff0f0;
8656
8657 /* Set the U bit if the value to go in the place is non-negative. */
8658 if (signed_value >= 0)
8659 insn |= 1 << 23;
8660
8661 /* Encode the offset. */
8662 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
8663
8664 bfd_put_32 (input_bfd, insn, hit_data);
8665 }
8666 return bfd_reloc_ok;
8667
8668 case R_ARM_LDC_PC_G0:
8669 case R_ARM_LDC_PC_G1:
8670 case R_ARM_LDC_PC_G2:
8671 case R_ARM_LDC_SB_G0:
8672 case R_ARM_LDC_SB_G1:
8673 case R_ARM_LDC_SB_G2:
8674 {
8675 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8676 bfd_vma pc = input_section->output_section->vma
8677 + input_section->output_offset + rel->r_offset;
8678 bfd_vma sb = 0; /* See note above. */
8679 bfd_vma residual;
8680 bfd_signed_vma signed_value;
8681 int group = 0;
8682
8683 /* Determine which groups of bits to calculate. */
8684 switch (r_type)
8685 {
8686 case R_ARM_LDC_PC_G0:
8687 case R_ARM_LDC_SB_G0:
8688 group = 0;
8689 break;
8690
8691 case R_ARM_LDC_PC_G1:
8692 case R_ARM_LDC_SB_G1:
8693 group = 1;
8694 break;
8695
8696 case R_ARM_LDC_PC_G2:
8697 case R_ARM_LDC_SB_G2:
8698 group = 2;
8699 break;
8700
8701 default:
8702 abort ();
8703 }
8704
8705 /* If REL, extract the addend from the insn. If RELA, it will
8706 have already been fetched for us. */
8707 if (globals->use_rel)
8708 {
8709 int negative = (insn & (1 << 23)) ? 1 : -1;
8710 signed_addend = negative * ((insn & 0xff) << 2);
8711 }
8712
8713 /* Compute the value (X) to go in the place. */
8714 if (r_type == R_ARM_LDC_PC_G0
8715 || r_type == R_ARM_LDC_PC_G1
8716 || r_type == R_ARM_LDC_PC_G2)
8717 /* PC relative. */
8718 signed_value = value - pc + signed_addend;
8719 else
8720 /* Section base relative. */
8721 signed_value = value - sb + signed_addend;
8722
8723 /* Calculate the value of the relevant G_{n-1} to obtain
8724 the residual at that stage. */
8725 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8726
8727 /* Check for overflow. (The absolute value to go in the place must be
8728 divisible by four and, after having been divided by four, must
8729 fit in eight bits.) */
8730 if ((residual & 0x3) != 0 || residual >= 0x400)
8731 {
8732 (*_bfd_error_handler)
8733 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8734 input_bfd, input_section,
8735 (long) rel->r_offset, abs (signed_value), howto->name);
8736 return bfd_reloc_overflow;
8737 }
8738
8739 /* Mask out the value and U bit. */
8740 insn &= 0xff7fff00;
8741
8742 /* Set the U bit if the value to go in the place is non-negative. */
8743 if (signed_value >= 0)
8744 insn |= 1 << 23;
8745
8746 /* Encode the offset. */
8747 insn |= residual >> 2;
8748
8749 bfd_put_32 (input_bfd, insn, hit_data);
8750 }
8751 return bfd_reloc_ok;
8752
8753 default:
8754 return bfd_reloc_notsupported;
8755 }
8756 }
8757
8758 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
8759 static void
8760 arm_add_to_rel (bfd * abfd,
8761 bfd_byte * address,
8762 reloc_howto_type * howto,
8763 bfd_signed_vma increment)
8764 {
8765 bfd_signed_vma addend;
8766
8767 if (howto->type == R_ARM_THM_CALL
8768 || howto->type == R_ARM_THM_JUMP24)
8769 {
8770 int upper_insn, lower_insn;
8771 int upper, lower;
8772
8773 upper_insn = bfd_get_16 (abfd, address);
8774 lower_insn = bfd_get_16 (abfd, address + 2);
8775 upper = upper_insn & 0x7ff;
8776 lower = lower_insn & 0x7ff;
8777
8778 addend = (upper << 12) | (lower << 1);
8779 addend += increment;
8780 addend >>= 1;
8781
8782 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
8783 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
8784
8785 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
8786 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
8787 }
8788 else
8789 {
8790 bfd_vma contents;
8791
8792 contents = bfd_get_32 (abfd, address);
8793
8794 /* Get the (signed) value from the instruction. */
8795 addend = contents & howto->src_mask;
8796 if (addend & ((howto->src_mask + 1) >> 1))
8797 {
8798 bfd_signed_vma mask;
8799
8800 mask = -1;
8801 mask &= ~ howto->src_mask;
8802 addend |= mask;
8803 }
8804
8805 /* Add in the increment, (which is a byte value). */
8806 switch (howto->type)
8807 {
8808 default:
8809 addend += increment;
8810 break;
8811
8812 case R_ARM_PC24:
8813 case R_ARM_PLT32:
8814 case R_ARM_CALL:
8815 case R_ARM_JUMP24:
8816 addend <<= howto->size;
8817 addend += increment;
8818
8819 /* Should we check for overflow here ? */
8820
8821 /* Drop any undesired bits. */
8822 addend >>= howto->rightshift;
8823 break;
8824 }
8825
8826 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
8827
8828 bfd_put_32 (abfd, contents, address);
8829 }
8830 }
8831
8832 #define IS_ARM_TLS_RELOC(R_TYPE) \
8833 ((R_TYPE) == R_ARM_TLS_GD32 \
8834 || (R_TYPE) == R_ARM_TLS_LDO32 \
8835 || (R_TYPE) == R_ARM_TLS_LDM32 \
8836 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
8837 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
8838 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
8839 || (R_TYPE) == R_ARM_TLS_LE32 \
8840 || (R_TYPE) == R_ARM_TLS_IE32)
8841
8842 /* Relocate an ARM ELF section. */
8843
8844 static bfd_boolean
8845 elf32_arm_relocate_section (bfd * output_bfd,
8846 struct bfd_link_info * info,
8847 bfd * input_bfd,
8848 asection * input_section,
8849 bfd_byte * contents,
8850 Elf_Internal_Rela * relocs,
8851 Elf_Internal_Sym * local_syms,
8852 asection ** local_sections)
8853 {
8854 Elf_Internal_Shdr *symtab_hdr;
8855 struct elf_link_hash_entry **sym_hashes;
8856 Elf_Internal_Rela *rel;
8857 Elf_Internal_Rela *relend;
8858 const char *name;
8859 struct elf32_arm_link_hash_table * globals;
8860
8861 globals = elf32_arm_hash_table (info);
8862 if (globals == NULL)
8863 return FALSE;
8864
8865 symtab_hdr = & elf_symtab_hdr (input_bfd);
8866 sym_hashes = elf_sym_hashes (input_bfd);
8867
8868 rel = relocs;
8869 relend = relocs + input_section->reloc_count;
8870 for (; rel < relend; rel++)
8871 {
8872 int r_type;
8873 reloc_howto_type * howto;
8874 unsigned long r_symndx;
8875 Elf_Internal_Sym * sym;
8876 asection * sec;
8877 struct elf_link_hash_entry * h;
8878 bfd_vma relocation;
8879 bfd_reloc_status_type r;
8880 arelent bfd_reloc;
8881 char sym_type;
8882 bfd_boolean unresolved_reloc = FALSE;
8883 char *error_message = NULL;
8884
8885 r_symndx = ELF32_R_SYM (rel->r_info);
8886 r_type = ELF32_R_TYPE (rel->r_info);
8887 r_type = arm_real_reloc_type (globals, r_type);
8888
8889 if ( r_type == R_ARM_GNU_VTENTRY
8890 || r_type == R_ARM_GNU_VTINHERIT)
8891 continue;
8892
8893 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
8894 howto = bfd_reloc.howto;
8895
8896 h = NULL;
8897 sym = NULL;
8898 sec = NULL;
8899
8900 if (r_symndx < symtab_hdr->sh_info)
8901 {
8902 sym = local_syms + r_symndx;
8903 sym_type = ELF32_ST_TYPE (sym->st_info);
8904 sec = local_sections[r_symndx];
8905
8906 /* An object file might have a reference to a local
8907 undefined symbol. This is a daft object file, but we
8908 should at least do something about it. V4BX & NONE
8909 relocations do not use the symbol and are explicitly
8910 allowed to use the undefined symbol, so allow those.
8911 Likewise for relocations against STN_UNDEF. */
8912 if (r_type != R_ARM_V4BX
8913 && r_type != R_ARM_NONE
8914 && r_symndx != STN_UNDEF
8915 && bfd_is_und_section (sec)
8916 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
8917 {
8918 if (!info->callbacks->undefined_symbol
8919 (info, bfd_elf_string_from_elf_section
8920 (input_bfd, symtab_hdr->sh_link, sym->st_name),
8921 input_bfd, input_section,
8922 rel->r_offset, TRUE))
8923 return FALSE;
8924 }
8925
8926 if (globals->use_rel)
8927 {
8928 relocation = (sec->output_section->vma
8929 + sec->output_offset
8930 + sym->st_value);
8931 if (!info->relocatable
8932 && (sec->flags & SEC_MERGE)
8933 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
8934 {
8935 asection *msec;
8936 bfd_vma addend, value;
8937
8938 switch (r_type)
8939 {
8940 case R_ARM_MOVW_ABS_NC:
8941 case R_ARM_MOVT_ABS:
8942 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8943 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
8944 addend = (addend ^ 0x8000) - 0x8000;
8945 break;
8946
8947 case R_ARM_THM_MOVW_ABS_NC:
8948 case R_ARM_THM_MOVT_ABS:
8949 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
8950 << 16;
8951 value |= bfd_get_16 (input_bfd,
8952 contents + rel->r_offset + 2);
8953 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
8954 | ((value & 0x04000000) >> 15);
8955 addend = (addend ^ 0x8000) - 0x8000;
8956 break;
8957
8958 default:
8959 if (howto->rightshift
8960 || (howto->src_mask & (howto->src_mask + 1)))
8961 {
8962 (*_bfd_error_handler)
8963 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
8964 input_bfd, input_section,
8965 (long) rel->r_offset, howto->name);
8966 return FALSE;
8967 }
8968
8969 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8970
8971 /* Get the (signed) value from the instruction. */
8972 addend = value & howto->src_mask;
8973 if (addend & ((howto->src_mask + 1) >> 1))
8974 {
8975 bfd_signed_vma mask;
8976
8977 mask = -1;
8978 mask &= ~ howto->src_mask;
8979 addend |= mask;
8980 }
8981 break;
8982 }
8983
8984 msec = sec;
8985 addend =
8986 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
8987 - relocation;
8988 addend += msec->output_section->vma + msec->output_offset;
8989
8990 /* Cases here must match those in the preceeding
8991 switch statement. */
8992 switch (r_type)
8993 {
8994 case R_ARM_MOVW_ABS_NC:
8995 case R_ARM_MOVT_ABS:
8996 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
8997 | (addend & 0xfff);
8998 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
8999 break;
9000
9001 case R_ARM_THM_MOVW_ABS_NC:
9002 case R_ARM_THM_MOVT_ABS:
9003 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
9004 | (addend & 0xff) | ((addend & 0x0800) << 15);
9005 bfd_put_16 (input_bfd, value >> 16,
9006 contents + rel->r_offset);
9007 bfd_put_16 (input_bfd, value,
9008 contents + rel->r_offset + 2);
9009 break;
9010
9011 default:
9012 value = (value & ~ howto->dst_mask)
9013 | (addend & howto->dst_mask);
9014 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
9015 break;
9016 }
9017 }
9018 }
9019 else
9020 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
9021 }
9022 else
9023 {
9024 bfd_boolean warned;
9025
9026 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
9027 r_symndx, symtab_hdr, sym_hashes,
9028 h, sec, relocation,
9029 unresolved_reloc, warned);
9030
9031 sym_type = h->type;
9032 }
9033
9034 if (sec != NULL && elf_discarded_section (sec))
9035 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
9036 rel, relend, howto, contents);
9037
9038 if (info->relocatable)
9039 {
9040 /* This is a relocatable link. We don't have to change
9041 anything, unless the reloc is against a section symbol,
9042 in which case we have to adjust according to where the
9043 section symbol winds up in the output section. */
9044 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
9045 {
9046 if (globals->use_rel)
9047 arm_add_to_rel (input_bfd, contents + rel->r_offset,
9048 howto, (bfd_signed_vma) sec->output_offset);
9049 else
9050 rel->r_addend += sec->output_offset;
9051 }
9052 continue;
9053 }
9054
9055 if (h != NULL)
9056 name = h->root.root.string;
9057 else
9058 {
9059 name = (bfd_elf_string_from_elf_section
9060 (input_bfd, symtab_hdr->sh_link, sym->st_name));
9061 if (name == NULL || *name == '\0')
9062 name = bfd_section_name (input_bfd, sec);
9063 }
9064
9065 if (r_symndx != STN_UNDEF
9066 && r_type != R_ARM_NONE
9067 && (h == NULL
9068 || h->root.type == bfd_link_hash_defined
9069 || h->root.type == bfd_link_hash_defweak)
9070 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
9071 {
9072 (*_bfd_error_handler)
9073 ((sym_type == STT_TLS
9074 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
9075 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
9076 input_bfd,
9077 input_section,
9078 (long) rel->r_offset,
9079 howto->name,
9080 name);
9081 }
9082
9083 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
9084 input_section, contents, rel,
9085 relocation, info, sec, name,
9086 (h ? ELF_ST_TYPE (h->type) :
9087 ELF_ST_TYPE (sym->st_info)), h,
9088 &unresolved_reloc, &error_message);
9089
9090 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
9091 because such sections are not SEC_ALLOC and thus ld.so will
9092 not process them. */
9093 if (unresolved_reloc
9094 && !((input_section->flags & SEC_DEBUGGING) != 0
9095 && h->def_dynamic))
9096 {
9097 (*_bfd_error_handler)
9098 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
9099 input_bfd,
9100 input_section,
9101 (long) rel->r_offset,
9102 howto->name,
9103 h->root.root.string);
9104 return FALSE;
9105 }
9106
9107 if (r != bfd_reloc_ok)
9108 {
9109 switch (r)
9110 {
9111 case bfd_reloc_overflow:
9112 /* If the overflowing reloc was to an undefined symbol,
9113 we have already printed one error message and there
9114 is no point complaining again. */
9115 if ((! h ||
9116 h->root.type != bfd_link_hash_undefined)
9117 && (!((*info->callbacks->reloc_overflow)
9118 (info, (h ? &h->root : NULL), name, howto->name,
9119 (bfd_vma) 0, input_bfd, input_section,
9120 rel->r_offset))))
9121 return FALSE;
9122 break;
9123
9124 case bfd_reloc_undefined:
9125 if (!((*info->callbacks->undefined_symbol)
9126 (info, name, input_bfd, input_section,
9127 rel->r_offset, TRUE)))
9128 return FALSE;
9129 break;
9130
9131 case bfd_reloc_outofrange:
9132 error_message = _("out of range");
9133 goto common_error;
9134
9135 case bfd_reloc_notsupported:
9136 error_message = _("unsupported relocation");
9137 goto common_error;
9138
9139 case bfd_reloc_dangerous:
9140 /* error_message should already be set. */
9141 goto common_error;
9142
9143 default:
9144 error_message = _("unknown error");
9145 /* Fall through. */
9146
9147 common_error:
9148 BFD_ASSERT (error_message != NULL);
9149 if (!((*info->callbacks->reloc_dangerous)
9150 (info, error_message, input_bfd, input_section,
9151 rel->r_offset)))
9152 return FALSE;
9153 break;
9154 }
9155 }
9156 }
9157
9158 return TRUE;
9159 }
9160
9161 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
9162 adds the edit to the start of the list. (The list must be built in order of
9163 ascending TINDEX: the function's callers are primarily responsible for
9164 maintaining that condition). */
9165
9166 static void
9167 add_unwind_table_edit (arm_unwind_table_edit **head,
9168 arm_unwind_table_edit **tail,
9169 arm_unwind_edit_type type,
9170 asection *linked_section,
9171 unsigned int tindex)
9172 {
9173 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
9174 xmalloc (sizeof (arm_unwind_table_edit));
9175
9176 new_edit->type = type;
9177 new_edit->linked_section = linked_section;
9178 new_edit->index = tindex;
9179
9180 if (tindex > 0)
9181 {
9182 new_edit->next = NULL;
9183
9184 if (*tail)
9185 (*tail)->next = new_edit;
9186
9187 (*tail) = new_edit;
9188
9189 if (!*head)
9190 (*head) = new_edit;
9191 }
9192 else
9193 {
9194 new_edit->next = *head;
9195
9196 if (!*tail)
9197 *tail = new_edit;
9198
9199 *head = new_edit;
9200 }
9201 }
9202
9203 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
9204
9205 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
9206 static void
9207 adjust_exidx_size(asection *exidx_sec, int adjust)
9208 {
9209 asection *out_sec;
9210
9211 if (!exidx_sec->rawsize)
9212 exidx_sec->rawsize = exidx_sec->size;
9213
9214 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
9215 out_sec = exidx_sec->output_section;
9216 /* Adjust size of output section. */
9217 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
9218 }
9219
9220 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
9221 static void
9222 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
9223 {
9224 struct _arm_elf_section_data *exidx_arm_data;
9225
9226 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
9227 add_unwind_table_edit (
9228 &exidx_arm_data->u.exidx.unwind_edit_list,
9229 &exidx_arm_data->u.exidx.unwind_edit_tail,
9230 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
9231
9232 adjust_exidx_size(exidx_sec, 8);
9233 }
9234
9235 /* Scan .ARM.exidx tables, and create a list describing edits which should be
9236 made to those tables, such that:
9237
9238 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
9239 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
9240 codes which have been inlined into the index).
9241
9242 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
9243
9244 The edits are applied when the tables are written
9245 (in elf32_arm_write_section).
9246 */
9247
9248 bfd_boolean
9249 elf32_arm_fix_exidx_coverage (asection **text_section_order,
9250 unsigned int num_text_sections,
9251 struct bfd_link_info *info,
9252 bfd_boolean merge_exidx_entries)
9253 {
9254 bfd *inp;
9255 unsigned int last_second_word = 0, i;
9256 asection *last_exidx_sec = NULL;
9257 asection *last_text_sec = NULL;
9258 int last_unwind_type = -1;
9259
9260 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
9261 text sections. */
9262 for (inp = info->input_bfds; inp != NULL; inp = inp->link_next)
9263 {
9264 asection *sec;
9265
9266 for (sec = inp->sections; sec != NULL; sec = sec->next)
9267 {
9268 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
9269 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
9270
9271 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
9272 continue;
9273
9274 if (elf_sec->linked_to)
9275 {
9276 Elf_Internal_Shdr *linked_hdr
9277 = &elf_section_data (elf_sec->linked_to)->this_hdr;
9278 struct _arm_elf_section_data *linked_sec_arm_data
9279 = get_arm_elf_section_data (linked_hdr->bfd_section);
9280
9281 if (linked_sec_arm_data == NULL)
9282 continue;
9283
9284 /* Link this .ARM.exidx section back from the text section it
9285 describes. */
9286 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
9287 }
9288 }
9289 }
9290
9291 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
9292 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
9293 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
9294
9295 for (i = 0; i < num_text_sections; i++)
9296 {
9297 asection *sec = text_section_order[i];
9298 asection *exidx_sec;
9299 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
9300 struct _arm_elf_section_data *exidx_arm_data;
9301 bfd_byte *contents = NULL;
9302 int deleted_exidx_bytes = 0;
9303 bfd_vma j;
9304 arm_unwind_table_edit *unwind_edit_head = NULL;
9305 arm_unwind_table_edit *unwind_edit_tail = NULL;
9306 Elf_Internal_Shdr *hdr;
9307 bfd *ibfd;
9308
9309 if (arm_data == NULL)
9310 continue;
9311
9312 exidx_sec = arm_data->u.text.arm_exidx_sec;
9313 if (exidx_sec == NULL)
9314 {
9315 /* Section has no unwind data. */
9316 if (last_unwind_type == 0 || !last_exidx_sec)
9317 continue;
9318
9319 /* Ignore zero sized sections. */
9320 if (sec->size == 0)
9321 continue;
9322
9323 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9324 last_unwind_type = 0;
9325 continue;
9326 }
9327
9328 /* Skip /DISCARD/ sections. */
9329 if (bfd_is_abs_section (exidx_sec->output_section))
9330 continue;
9331
9332 hdr = &elf_section_data (exidx_sec)->this_hdr;
9333 if (hdr->sh_type != SHT_ARM_EXIDX)
9334 continue;
9335
9336 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
9337 if (exidx_arm_data == NULL)
9338 continue;
9339
9340 ibfd = exidx_sec->owner;
9341
9342 if (hdr->contents != NULL)
9343 contents = hdr->contents;
9344 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
9345 /* An error? */
9346 continue;
9347
9348 for (j = 0; j < hdr->sh_size; j += 8)
9349 {
9350 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
9351 int unwind_type;
9352 int elide = 0;
9353
9354 /* An EXIDX_CANTUNWIND entry. */
9355 if (second_word == 1)
9356 {
9357 if (last_unwind_type == 0)
9358 elide = 1;
9359 unwind_type = 0;
9360 }
9361 /* Inlined unwinding data. Merge if equal to previous. */
9362 else if ((second_word & 0x80000000) != 0)
9363 {
9364 if (merge_exidx_entries
9365 && last_second_word == second_word && last_unwind_type == 1)
9366 elide = 1;
9367 unwind_type = 1;
9368 last_second_word = second_word;
9369 }
9370 /* Normal table entry. In theory we could merge these too,
9371 but duplicate entries are likely to be much less common. */
9372 else
9373 unwind_type = 2;
9374
9375 if (elide)
9376 {
9377 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
9378 DELETE_EXIDX_ENTRY, NULL, j / 8);
9379
9380 deleted_exidx_bytes += 8;
9381 }
9382
9383 last_unwind_type = unwind_type;
9384 }
9385
9386 /* Free contents if we allocated it ourselves. */
9387 if (contents != hdr->contents)
9388 free (contents);
9389
9390 /* Record edits to be applied later (in elf32_arm_write_section). */
9391 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
9392 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
9393
9394 if (deleted_exidx_bytes > 0)
9395 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
9396
9397 last_exidx_sec = exidx_sec;
9398 last_text_sec = sec;
9399 }
9400
9401 /* Add terminating CANTUNWIND entry. */
9402 if (last_exidx_sec && last_unwind_type != 0)
9403 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9404
9405 return TRUE;
9406 }
9407
9408 static bfd_boolean
9409 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
9410 bfd *ibfd, const char *name)
9411 {
9412 asection *sec, *osec;
9413
9414 sec = bfd_get_section_by_name (ibfd, name);
9415 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
9416 return TRUE;
9417
9418 osec = sec->output_section;
9419 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
9420 return TRUE;
9421
9422 if (! bfd_set_section_contents (obfd, osec, sec->contents,
9423 sec->output_offset, sec->size))
9424 return FALSE;
9425
9426 return TRUE;
9427 }
9428
9429 static bfd_boolean
9430 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
9431 {
9432 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
9433 asection *sec, *osec;
9434
9435 if (globals == NULL)
9436 return FALSE;
9437
9438 /* Invoke the regular ELF backend linker to do all the work. */
9439 if (!bfd_elf_final_link (abfd, info))
9440 return FALSE;
9441
9442 /* Process stub sections (eg BE8 encoding, ...). */
9443 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
9444 int i;
9445 for (i=0; i<htab->top_id; i++)
9446 {
9447 sec = htab->stub_group[i].stub_sec;
9448 /* Only process it once, in its link_sec slot. */
9449 if (sec && i == htab->stub_group[i].link_sec->id)
9450 {
9451 osec = sec->output_section;
9452 elf32_arm_write_section (abfd, info, sec, sec->contents);
9453 if (! bfd_set_section_contents (abfd, osec, sec->contents,
9454 sec->output_offset, sec->size))
9455 return FALSE;
9456 }
9457 }
9458
9459 /* Write out any glue sections now that we have created all the
9460 stubs. */
9461 if (globals->bfd_of_glue_owner != NULL)
9462 {
9463 if (! elf32_arm_output_glue_section (info, abfd,
9464 globals->bfd_of_glue_owner,
9465 ARM2THUMB_GLUE_SECTION_NAME))
9466 return FALSE;
9467
9468 if (! elf32_arm_output_glue_section (info, abfd,
9469 globals->bfd_of_glue_owner,
9470 THUMB2ARM_GLUE_SECTION_NAME))
9471 return FALSE;
9472
9473 if (! elf32_arm_output_glue_section (info, abfd,
9474 globals->bfd_of_glue_owner,
9475 VFP11_ERRATUM_VENEER_SECTION_NAME))
9476 return FALSE;
9477
9478 if (! elf32_arm_output_glue_section (info, abfd,
9479 globals->bfd_of_glue_owner,
9480 ARM_BX_GLUE_SECTION_NAME))
9481 return FALSE;
9482 }
9483
9484 return TRUE;
9485 }
9486
9487 /* Set the right machine number. */
9488
9489 static bfd_boolean
9490 elf32_arm_object_p (bfd *abfd)
9491 {
9492 unsigned int mach;
9493
9494 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
9495
9496 if (mach != bfd_mach_arm_unknown)
9497 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9498
9499 else if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
9500 bfd_default_set_arch_mach (abfd, bfd_arch_arm, bfd_mach_arm_ep9312);
9501
9502 else
9503 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9504
9505 return TRUE;
9506 }
9507
9508 /* Function to keep ARM specific flags in the ELF header. */
9509
9510 static bfd_boolean
9511 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
9512 {
9513 if (elf_flags_init (abfd)
9514 && elf_elfheader (abfd)->e_flags != flags)
9515 {
9516 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
9517 {
9518 if (flags & EF_ARM_INTERWORK)
9519 (*_bfd_error_handler)
9520 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
9521 abfd);
9522 else
9523 _bfd_error_handler
9524 (_("Warning: Clearing the interworking flag of %B due to outside request"),
9525 abfd);
9526 }
9527 }
9528 else
9529 {
9530 elf_elfheader (abfd)->e_flags = flags;
9531 elf_flags_init (abfd) = TRUE;
9532 }
9533
9534 return TRUE;
9535 }
9536
9537 /* Copy backend specific data from one object module to another. */
9538
9539 static bfd_boolean
9540 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
9541 {
9542 flagword in_flags;
9543 flagword out_flags;
9544
9545 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
9546 return TRUE;
9547
9548 in_flags = elf_elfheader (ibfd)->e_flags;
9549 out_flags = elf_elfheader (obfd)->e_flags;
9550
9551 if (elf_flags_init (obfd)
9552 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
9553 && in_flags != out_flags)
9554 {
9555 /* Cannot mix APCS26 and APCS32 code. */
9556 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
9557 return FALSE;
9558
9559 /* Cannot mix float APCS and non-float APCS code. */
9560 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
9561 return FALSE;
9562
9563 /* If the src and dest have different interworking flags
9564 then turn off the interworking bit. */
9565 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
9566 {
9567 if (out_flags & EF_ARM_INTERWORK)
9568 _bfd_error_handler
9569 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
9570 obfd, ibfd);
9571
9572 in_flags &= ~EF_ARM_INTERWORK;
9573 }
9574
9575 /* Likewise for PIC, though don't warn for this case. */
9576 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
9577 in_flags &= ~EF_ARM_PIC;
9578 }
9579
9580 elf_elfheader (obfd)->e_flags = in_flags;
9581 elf_flags_init (obfd) = TRUE;
9582
9583 /* Also copy the EI_OSABI field. */
9584 elf_elfheader (obfd)->e_ident[EI_OSABI] =
9585 elf_elfheader (ibfd)->e_ident[EI_OSABI];
9586
9587 /* Copy object attributes. */
9588 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9589
9590 return TRUE;
9591 }
9592
9593 /* Values for Tag_ABI_PCS_R9_use. */
9594 enum
9595 {
9596 AEABI_R9_V6,
9597 AEABI_R9_SB,
9598 AEABI_R9_TLS,
9599 AEABI_R9_unused
9600 };
9601
9602 /* Values for Tag_ABI_PCS_RW_data. */
9603 enum
9604 {
9605 AEABI_PCS_RW_data_absolute,
9606 AEABI_PCS_RW_data_PCrel,
9607 AEABI_PCS_RW_data_SBrel,
9608 AEABI_PCS_RW_data_unused
9609 };
9610
9611 /* Values for Tag_ABI_enum_size. */
9612 enum
9613 {
9614 AEABI_enum_unused,
9615 AEABI_enum_short,
9616 AEABI_enum_wide,
9617 AEABI_enum_forced_wide
9618 };
9619
9620 /* Determine whether an object attribute tag takes an integer, a
9621 string or both. */
9622
9623 static int
9624 elf32_arm_obj_attrs_arg_type (int tag)
9625 {
9626 if (tag == Tag_compatibility)
9627 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
9628 else if (tag == Tag_nodefaults)
9629 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
9630 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
9631 return ATTR_TYPE_FLAG_STR_VAL;
9632 else if (tag < 32)
9633 return ATTR_TYPE_FLAG_INT_VAL;
9634 else
9635 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
9636 }
9637
9638 /* The ABI defines that Tag_conformance should be emitted first, and that
9639 Tag_nodefaults should be second (if either is defined). This sets those
9640 two positions, and bumps up the position of all the remaining tags to
9641 compensate. */
9642 static int
9643 elf32_arm_obj_attrs_order (int num)
9644 {
9645 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
9646 return Tag_conformance;
9647 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
9648 return Tag_nodefaults;
9649 if ((num - 2) < Tag_nodefaults)
9650 return num - 2;
9651 if ((num - 1) < Tag_conformance)
9652 return num - 1;
9653 return num;
9654 }
9655
9656 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
9657 Returns -1 if no architecture could be read. */
9658
9659 static int
9660 get_secondary_compatible_arch (bfd *abfd)
9661 {
9662 obj_attribute *attr =
9663 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9664
9665 /* Note: the tag and its argument below are uleb128 values, though
9666 currently-defined values fit in one byte for each. */
9667 if (attr->s
9668 && attr->s[0] == Tag_CPU_arch
9669 && (attr->s[1] & 128) != 128
9670 && attr->s[2] == 0)
9671 return attr->s[1];
9672
9673 /* This tag is "safely ignorable", so don't complain if it looks funny. */
9674 return -1;
9675 }
9676
9677 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
9678 The tag is removed if ARCH is -1. */
9679
9680 static void
9681 set_secondary_compatible_arch (bfd *abfd, int arch)
9682 {
9683 obj_attribute *attr =
9684 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9685
9686 if (arch == -1)
9687 {
9688 attr->s = NULL;
9689 return;
9690 }
9691
9692 /* Note: the tag and its argument below are uleb128 values, though
9693 currently-defined values fit in one byte for each. */
9694 if (!attr->s)
9695 attr->s = (char *) bfd_alloc (abfd, 3);
9696 attr->s[0] = Tag_CPU_arch;
9697 attr->s[1] = arch;
9698 attr->s[2] = '\0';
9699 }
9700
9701 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
9702 into account. */
9703
9704 static int
9705 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
9706 int newtag, int secondary_compat)
9707 {
9708 #define T(X) TAG_CPU_ARCH_##X
9709 int tagl, tagh, result;
9710 const int v6t2[] =
9711 {
9712 T(V6T2), /* PRE_V4. */
9713 T(V6T2), /* V4. */
9714 T(V6T2), /* V4T. */
9715 T(V6T2), /* V5T. */
9716 T(V6T2), /* V5TE. */
9717 T(V6T2), /* V5TEJ. */
9718 T(V6T2), /* V6. */
9719 T(V7), /* V6KZ. */
9720 T(V6T2) /* V6T2. */
9721 };
9722 const int v6k[] =
9723 {
9724 T(V6K), /* PRE_V4. */
9725 T(V6K), /* V4. */
9726 T(V6K), /* V4T. */
9727 T(V6K), /* V5T. */
9728 T(V6K), /* V5TE. */
9729 T(V6K), /* V5TEJ. */
9730 T(V6K), /* V6. */
9731 T(V6KZ), /* V6KZ. */
9732 T(V7), /* V6T2. */
9733 T(V6K) /* V6K. */
9734 };
9735 const int v7[] =
9736 {
9737 T(V7), /* PRE_V4. */
9738 T(V7), /* V4. */
9739 T(V7), /* V4T. */
9740 T(V7), /* V5T. */
9741 T(V7), /* V5TE. */
9742 T(V7), /* V5TEJ. */
9743 T(V7), /* V6. */
9744 T(V7), /* V6KZ. */
9745 T(V7), /* V6T2. */
9746 T(V7), /* V6K. */
9747 T(V7) /* V7. */
9748 };
9749 const int v6_m[] =
9750 {
9751 -1, /* PRE_V4. */
9752 -1, /* V4. */
9753 T(V6K), /* V4T. */
9754 T(V6K), /* V5T. */
9755 T(V6K), /* V5TE. */
9756 T(V6K), /* V5TEJ. */
9757 T(V6K), /* V6. */
9758 T(V6KZ), /* V6KZ. */
9759 T(V7), /* V6T2. */
9760 T(V6K), /* V6K. */
9761 T(V7), /* V7. */
9762 T(V6_M) /* V6_M. */
9763 };
9764 const int v6s_m[] =
9765 {
9766 -1, /* PRE_V4. */
9767 -1, /* V4. */
9768 T(V6K), /* V4T. */
9769 T(V6K), /* V5T. */
9770 T(V6K), /* V5TE. */
9771 T(V6K), /* V5TEJ. */
9772 T(V6K), /* V6. */
9773 T(V6KZ), /* V6KZ. */
9774 T(V7), /* V6T2. */
9775 T(V6K), /* V6K. */
9776 T(V7), /* V7. */
9777 T(V6S_M), /* V6_M. */
9778 T(V6S_M) /* V6S_M. */
9779 };
9780 const int v7e_m[] =
9781 {
9782 -1, /* PRE_V4. */
9783 -1, /* V4. */
9784 T(V7E_M), /* V4T. */
9785 T(V7E_M), /* V5T. */
9786 T(V7E_M), /* V5TE. */
9787 T(V7E_M), /* V5TEJ. */
9788 T(V7E_M), /* V6. */
9789 T(V7E_M), /* V6KZ. */
9790 T(V7E_M), /* V6T2. */
9791 T(V7E_M), /* V6K. */
9792 T(V7E_M), /* V7. */
9793 T(V7E_M), /* V6_M. */
9794 T(V7E_M), /* V6S_M. */
9795 T(V7E_M) /* V7E_M. */
9796 };
9797 const int v4t_plus_v6_m[] =
9798 {
9799 -1, /* PRE_V4. */
9800 -1, /* V4. */
9801 T(V4T), /* V4T. */
9802 T(V5T), /* V5T. */
9803 T(V5TE), /* V5TE. */
9804 T(V5TEJ), /* V5TEJ. */
9805 T(V6), /* V6. */
9806 T(V6KZ), /* V6KZ. */
9807 T(V6T2), /* V6T2. */
9808 T(V6K), /* V6K. */
9809 T(V7), /* V7. */
9810 T(V6_M), /* V6_M. */
9811 T(V6S_M), /* V6S_M. */
9812 T(V7E_M), /* V7E_M. */
9813 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
9814 };
9815 const int *comb[] =
9816 {
9817 v6t2,
9818 v6k,
9819 v7,
9820 v6_m,
9821 v6s_m,
9822 v7e_m,
9823 /* Pseudo-architecture. */
9824 v4t_plus_v6_m
9825 };
9826
9827 /* Check we've not got a higher architecture than we know about. */
9828
9829 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
9830 {
9831 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
9832 return -1;
9833 }
9834
9835 /* Override old tag if we have a Tag_also_compatible_with on the output. */
9836
9837 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
9838 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
9839 oldtag = T(V4T_PLUS_V6_M);
9840
9841 /* And override the new tag if we have a Tag_also_compatible_with on the
9842 input. */
9843
9844 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
9845 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
9846 newtag = T(V4T_PLUS_V6_M);
9847
9848 tagl = (oldtag < newtag) ? oldtag : newtag;
9849 result = tagh = (oldtag > newtag) ? oldtag : newtag;
9850
9851 /* Architectures before V6KZ add features monotonically. */
9852 if (tagh <= TAG_CPU_ARCH_V6KZ)
9853 return result;
9854
9855 result = comb[tagh - T(V6T2)][tagl];
9856
9857 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
9858 as the canonical version. */
9859 if (result == T(V4T_PLUS_V6_M))
9860 {
9861 result = T(V4T);
9862 *secondary_compat_out = T(V6_M);
9863 }
9864 else
9865 *secondary_compat_out = -1;
9866
9867 if (result == -1)
9868 {
9869 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
9870 ibfd, oldtag, newtag);
9871 return -1;
9872 }
9873
9874 return result;
9875 #undef T
9876 }
9877
9878 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
9879 are conflicting attributes. */
9880
9881 static bfd_boolean
9882 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
9883 {
9884 obj_attribute *in_attr;
9885 obj_attribute *out_attr;
9886 obj_attribute_list *in_list;
9887 obj_attribute_list *out_list;
9888 obj_attribute_list **out_listp;
9889 /* Some tags have 0 = don't care, 1 = strong requirement,
9890 2 = weak requirement. */
9891 static const int order_021[3] = {0, 2, 1};
9892 int i;
9893 bfd_boolean result = TRUE;
9894
9895 /* Skip the linker stubs file. This preserves previous behavior
9896 of accepting unknown attributes in the first input file - but
9897 is that a bug? */
9898 if (ibfd->flags & BFD_LINKER_CREATED)
9899 return TRUE;
9900
9901 if (!elf_known_obj_attributes_proc (obfd)[0].i)
9902 {
9903 /* This is the first object. Copy the attributes. */
9904 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9905
9906 out_attr = elf_known_obj_attributes_proc (obfd);
9907
9908 /* Use the Tag_null value to indicate the attributes have been
9909 initialized. */
9910 out_attr[0].i = 1;
9911
9912 /* We do not output objects with Tag_MPextension_use_legacy - we move
9913 the attribute's value to Tag_MPextension_use. */
9914 if (out_attr[Tag_MPextension_use_legacy].i != 0)
9915 {
9916 if (out_attr[Tag_MPextension_use].i != 0
9917 && out_attr[Tag_MPextension_use_legacy].i
9918 != out_attr[Tag_MPextension_use].i)
9919 {
9920 _bfd_error_handler
9921 (_("Error: %B has both the current and legacy "
9922 "Tag_MPextension_use attributes"), ibfd);
9923 result = FALSE;
9924 }
9925
9926 out_attr[Tag_MPextension_use] =
9927 out_attr[Tag_MPextension_use_legacy];
9928 out_attr[Tag_MPextension_use_legacy].type = 0;
9929 out_attr[Tag_MPextension_use_legacy].i = 0;
9930 }
9931
9932 return result;
9933 }
9934
9935 in_attr = elf_known_obj_attributes_proc (ibfd);
9936 out_attr = elf_known_obj_attributes_proc (obfd);
9937 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
9938 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
9939 {
9940 /* Ignore mismatches if the object doesn't use floating point. */
9941 if (out_attr[Tag_ABI_FP_number_model].i == 0)
9942 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
9943 else if (in_attr[Tag_ABI_FP_number_model].i != 0)
9944 {
9945 _bfd_error_handler
9946 (_("error: %B uses VFP register arguments, %B does not"),
9947 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
9948 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
9949 result = FALSE;
9950 }
9951 }
9952
9953 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
9954 {
9955 /* Merge this attribute with existing attributes. */
9956 switch (i)
9957 {
9958 case Tag_CPU_raw_name:
9959 case Tag_CPU_name:
9960 /* These are merged after Tag_CPU_arch. */
9961 break;
9962
9963 case Tag_ABI_optimization_goals:
9964 case Tag_ABI_FP_optimization_goals:
9965 /* Use the first value seen. */
9966 break;
9967
9968 case Tag_CPU_arch:
9969 {
9970 int secondary_compat = -1, secondary_compat_out = -1;
9971 unsigned int saved_out_attr = out_attr[i].i;
9972 static const char *name_table[] = {
9973 /* These aren't real CPU names, but we can't guess
9974 that from the architecture version alone. */
9975 "Pre v4",
9976 "ARM v4",
9977 "ARM v4T",
9978 "ARM v5T",
9979 "ARM v5TE",
9980 "ARM v5TEJ",
9981 "ARM v6",
9982 "ARM v6KZ",
9983 "ARM v6T2",
9984 "ARM v6K",
9985 "ARM v7",
9986 "ARM v6-M",
9987 "ARM v6S-M"
9988 };
9989
9990 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
9991 secondary_compat = get_secondary_compatible_arch (ibfd);
9992 secondary_compat_out = get_secondary_compatible_arch (obfd);
9993 out_attr[i].i = tag_cpu_arch_combine (ibfd, out_attr[i].i,
9994 &secondary_compat_out,
9995 in_attr[i].i,
9996 secondary_compat);
9997 set_secondary_compatible_arch (obfd, secondary_compat_out);
9998
9999 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
10000 if (out_attr[i].i == saved_out_attr)
10001 ; /* Leave the names alone. */
10002 else if (out_attr[i].i == in_attr[i].i)
10003 {
10004 /* The output architecture has been changed to match the
10005 input architecture. Use the input names. */
10006 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
10007 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
10008 : NULL;
10009 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
10010 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
10011 : NULL;
10012 }
10013 else
10014 {
10015 out_attr[Tag_CPU_name].s = NULL;
10016 out_attr[Tag_CPU_raw_name].s = NULL;
10017 }
10018
10019 /* If we still don't have a value for Tag_CPU_name,
10020 make one up now. Tag_CPU_raw_name remains blank. */
10021 if (out_attr[Tag_CPU_name].s == NULL
10022 && out_attr[i].i < ARRAY_SIZE (name_table))
10023 out_attr[Tag_CPU_name].s =
10024 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
10025 }
10026 break;
10027
10028 case Tag_ARM_ISA_use:
10029 case Tag_THUMB_ISA_use:
10030 case Tag_WMMX_arch:
10031 case Tag_Advanced_SIMD_arch:
10032 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
10033 case Tag_ABI_FP_rounding:
10034 case Tag_ABI_FP_exceptions:
10035 case Tag_ABI_FP_user_exceptions:
10036 case Tag_ABI_FP_number_model:
10037 case Tag_FP_HP_extension:
10038 case Tag_CPU_unaligned_access:
10039 case Tag_T2EE_use:
10040 case Tag_MPextension_use:
10041 /* Use the largest value specified. */
10042 if (in_attr[i].i > out_attr[i].i)
10043 out_attr[i].i = in_attr[i].i;
10044 break;
10045
10046 case Tag_ABI_align_preserved:
10047 case Tag_ABI_PCS_RO_data:
10048 /* Use the smallest value specified. */
10049 if (in_attr[i].i < out_attr[i].i)
10050 out_attr[i].i = in_attr[i].i;
10051 break;
10052
10053 case Tag_ABI_align_needed:
10054 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
10055 && (in_attr[Tag_ABI_align_preserved].i == 0
10056 || out_attr[Tag_ABI_align_preserved].i == 0))
10057 {
10058 /* This error message should be enabled once all non-conformant
10059 binaries in the toolchain have had the attributes set
10060 properly.
10061 _bfd_error_handler
10062 (_("error: %B: 8-byte data alignment conflicts with %B"),
10063 obfd, ibfd);
10064 result = FALSE; */
10065 }
10066 /* Fall through. */
10067 case Tag_ABI_FP_denormal:
10068 case Tag_ABI_PCS_GOT_use:
10069 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
10070 value if greater than 2 (for future-proofing). */
10071 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
10072 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
10073 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
10074 out_attr[i].i = in_attr[i].i;
10075 break;
10076
10077 case Tag_Virtualization_use:
10078 /* The virtualization tag effectively stores two bits of
10079 information: the intended use of TrustZone (in bit 0), and the
10080 intended use of Virtualization (in bit 1). */
10081 if (out_attr[i].i == 0)
10082 out_attr[i].i = in_attr[i].i;
10083 else if (in_attr[i].i != 0
10084 && in_attr[i].i != out_attr[i].i)
10085 {
10086 if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
10087 out_attr[i].i = 3;
10088 else
10089 {
10090 _bfd_error_handler
10091 (_("error: %B: unable to merge virtualization attributes "
10092 "with %B"),
10093 obfd, ibfd);
10094 result = FALSE;
10095 }
10096 }
10097 break;
10098
10099 case Tag_CPU_arch_profile:
10100 if (out_attr[i].i != in_attr[i].i)
10101 {
10102 /* 0 will merge with anything.
10103 'A' and 'S' merge to 'A'.
10104 'R' and 'S' merge to 'R'.
10105 'M' and 'A|R|S' is an error. */
10106 if (out_attr[i].i == 0
10107 || (out_attr[i].i == 'S'
10108 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
10109 out_attr[i].i = in_attr[i].i;
10110 else if (in_attr[i].i == 0
10111 || (in_attr[i].i == 'S'
10112 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
10113 ; /* Do nothing. */
10114 else
10115 {
10116 _bfd_error_handler
10117 (_("error: %B: Conflicting architecture profiles %c/%c"),
10118 ibfd,
10119 in_attr[i].i ? in_attr[i].i : '0',
10120 out_attr[i].i ? out_attr[i].i : '0');
10121 result = FALSE;
10122 }
10123 }
10124 break;
10125 case Tag_FP_arch:
10126 {
10127 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
10128 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
10129 when it's 0. It might mean absence of FP hardware if
10130 Tag_FP_arch is zero, otherwise it is effectively SP + DP. */
10131
10132 static const struct
10133 {
10134 int ver;
10135 int regs;
10136 } vfp_versions[7] =
10137 {
10138 {0, 0},
10139 {1, 16},
10140 {2, 16},
10141 {3, 32},
10142 {3, 16},
10143 {4, 32},
10144 {4, 16}
10145 };
10146 int ver;
10147 int regs;
10148 int newval;
10149
10150 /* If the output has no requirement about FP hardware,
10151 follow the requirement of the input. */
10152 if (out_attr[i].i == 0)
10153 {
10154 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
10155 out_attr[i].i = in_attr[i].i;
10156 out_attr[Tag_ABI_HardFP_use].i
10157 = in_attr[Tag_ABI_HardFP_use].i;
10158 break;
10159 }
10160 /* If the input has no requirement about FP hardware, do
10161 nothing. */
10162 else if (in_attr[i].i == 0)
10163 {
10164 BFD_ASSERT (in_attr[Tag_ABI_HardFP_use].i == 0);
10165 break;
10166 }
10167
10168 /* Both the input and the output have nonzero Tag_FP_arch.
10169 So Tag_ABI_HardFP_use is (SP & DP) when it's zero. */
10170
10171 /* If both the input and the output have zero Tag_ABI_HardFP_use,
10172 do nothing. */
10173 if (in_attr[Tag_ABI_HardFP_use].i == 0
10174 && out_attr[Tag_ABI_HardFP_use].i == 0)
10175 ;
10176 /* If the input and the output have different Tag_ABI_HardFP_use,
10177 the combination of them is 3 (SP & DP). */
10178 else if (in_attr[Tag_ABI_HardFP_use].i
10179 != out_attr[Tag_ABI_HardFP_use].i)
10180 out_attr[Tag_ABI_HardFP_use].i = 3;
10181
10182 /* Now we can handle Tag_FP_arch. */
10183
10184 /* Values greater than 6 aren't defined, so just pick the
10185 biggest */
10186 if (in_attr[i].i > 6 && in_attr[i].i > out_attr[i].i)
10187 {
10188 out_attr[i] = in_attr[i];
10189 break;
10190 }
10191 /* The output uses the superset of input features
10192 (ISA version) and registers. */
10193 ver = vfp_versions[in_attr[i].i].ver;
10194 if (ver < vfp_versions[out_attr[i].i].ver)
10195 ver = vfp_versions[out_attr[i].i].ver;
10196 regs = vfp_versions[in_attr[i].i].regs;
10197 if (regs < vfp_versions[out_attr[i].i].regs)
10198 regs = vfp_versions[out_attr[i].i].regs;
10199 /* This assumes all possible supersets are also a valid
10200 options. */
10201 for (newval = 6; newval > 0; newval--)
10202 {
10203 if (regs == vfp_versions[newval].regs
10204 && ver == vfp_versions[newval].ver)
10205 break;
10206 }
10207 out_attr[i].i = newval;
10208 }
10209 break;
10210 case Tag_PCS_config:
10211 if (out_attr[i].i == 0)
10212 out_attr[i].i = in_attr[i].i;
10213 else if (in_attr[i].i != 0 && out_attr[i].i != 0)
10214 {
10215 /* It's sometimes ok to mix different configs, so this is only
10216 a warning. */
10217 _bfd_error_handler
10218 (_("Warning: %B: Conflicting platform configuration"), ibfd);
10219 }
10220 break;
10221 case Tag_ABI_PCS_R9_use:
10222 if (in_attr[i].i != out_attr[i].i
10223 && out_attr[i].i != AEABI_R9_unused
10224 && in_attr[i].i != AEABI_R9_unused)
10225 {
10226 _bfd_error_handler
10227 (_("error: %B: Conflicting use of R9"), ibfd);
10228 result = FALSE;
10229 }
10230 if (out_attr[i].i == AEABI_R9_unused)
10231 out_attr[i].i = in_attr[i].i;
10232 break;
10233 case Tag_ABI_PCS_RW_data:
10234 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
10235 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
10236 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
10237 {
10238 _bfd_error_handler
10239 (_("error: %B: SB relative addressing conflicts with use of R9"),
10240 ibfd);
10241 result = FALSE;
10242 }
10243 /* Use the smallest value specified. */
10244 if (in_attr[i].i < out_attr[i].i)
10245 out_attr[i].i = in_attr[i].i;
10246 break;
10247 case Tag_ABI_PCS_wchar_t:
10248 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
10249 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
10250 {
10251 _bfd_error_handler
10252 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
10253 ibfd, in_attr[i].i, out_attr[i].i);
10254 }
10255 else if (in_attr[i].i && !out_attr[i].i)
10256 out_attr[i].i = in_attr[i].i;
10257 break;
10258 case Tag_ABI_enum_size:
10259 if (in_attr[i].i != AEABI_enum_unused)
10260 {
10261 if (out_attr[i].i == AEABI_enum_unused
10262 || out_attr[i].i == AEABI_enum_forced_wide)
10263 {
10264 /* The existing object is compatible with anything.
10265 Use whatever requirements the new object has. */
10266 out_attr[i].i = in_attr[i].i;
10267 }
10268 else if (in_attr[i].i != AEABI_enum_forced_wide
10269 && out_attr[i].i != in_attr[i].i
10270 && !elf_arm_tdata (obfd)->no_enum_size_warning)
10271 {
10272 static const char *aeabi_enum_names[] =
10273 { "", "variable-size", "32-bit", "" };
10274 const char *in_name =
10275 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
10276 ? aeabi_enum_names[in_attr[i].i]
10277 : "<unknown>";
10278 const char *out_name =
10279 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
10280 ? aeabi_enum_names[out_attr[i].i]
10281 : "<unknown>";
10282 _bfd_error_handler
10283 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
10284 ibfd, in_name, out_name);
10285 }
10286 }
10287 break;
10288 case Tag_ABI_VFP_args:
10289 /* Aready done. */
10290 break;
10291 case Tag_ABI_WMMX_args:
10292 if (in_attr[i].i != out_attr[i].i)
10293 {
10294 _bfd_error_handler
10295 (_("error: %B uses iWMMXt register arguments, %B does not"),
10296 ibfd, obfd);
10297 result = FALSE;
10298 }
10299 break;
10300 case Tag_compatibility:
10301 /* Merged in target-independent code. */
10302 break;
10303 case Tag_ABI_HardFP_use:
10304 /* This is handled along with Tag_FP_arch. */
10305 break;
10306 case Tag_ABI_FP_16bit_format:
10307 if (in_attr[i].i != 0 && out_attr[i].i != 0)
10308 {
10309 if (in_attr[i].i != out_attr[i].i)
10310 {
10311 _bfd_error_handler
10312 (_("error: fp16 format mismatch between %B and %B"),
10313 ibfd, obfd);
10314 result = FALSE;
10315 }
10316 }
10317 if (in_attr[i].i != 0)
10318 out_attr[i].i = in_attr[i].i;
10319 break;
10320
10321 case Tag_DIV_use:
10322 /* This tag is set to zero if we can use UDIV and SDIV in Thumb
10323 mode on a v7-M or v7-R CPU; to one if we can not use UDIV or
10324 SDIV at all; and to two if we can use UDIV or SDIV on a v7-A
10325 CPU. We will merge as follows: If the input attribute's value
10326 is one then the output attribute's value remains unchanged. If
10327 the input attribute's value is zero or two then if the output
10328 attribute's value is one the output value is set to the input
10329 value, otherwise the output value must be the same as the
10330 inputs. */
10331 if (in_attr[i].i != 1 && out_attr[i].i != 1)
10332 {
10333 if (in_attr[i].i != out_attr[i].i)
10334 {
10335 _bfd_error_handler
10336 (_("DIV usage mismatch between %B and %B"),
10337 ibfd, obfd);
10338 result = FALSE;
10339 }
10340 }
10341
10342 if (in_attr[i].i != 1)
10343 out_attr[i].i = in_attr[i].i;
10344
10345 break;
10346
10347 case Tag_MPextension_use_legacy:
10348 /* We don't output objects with Tag_MPextension_use_legacy - we
10349 move the value to Tag_MPextension_use. */
10350 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
10351 {
10352 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
10353 {
10354 _bfd_error_handler
10355 (_("%B has has both the current and legacy "
10356 "Tag_MPextension_use attributes"),
10357 ibfd);
10358 result = FALSE;
10359 }
10360 }
10361
10362 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
10363 out_attr[Tag_MPextension_use] = in_attr[i];
10364
10365 break;
10366
10367 case Tag_nodefaults:
10368 /* This tag is set if it exists, but the value is unused (and is
10369 typically zero). We don't actually need to do anything here -
10370 the merge happens automatically when the type flags are merged
10371 below. */
10372 break;
10373 case Tag_also_compatible_with:
10374 /* Already done in Tag_CPU_arch. */
10375 break;
10376 case Tag_conformance:
10377 /* Keep the attribute if it matches. Throw it away otherwise.
10378 No attribute means no claim to conform. */
10379 if (!in_attr[i].s || !out_attr[i].s
10380 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
10381 out_attr[i].s = NULL;
10382 break;
10383
10384 default:
10385 {
10386 bfd *err_bfd = NULL;
10387
10388 /* The "known_obj_attributes" table does contain some undefined
10389 attributes. Ensure that there are unused. */
10390 if (out_attr[i].i != 0 || out_attr[i].s != NULL)
10391 err_bfd = obfd;
10392 else if (in_attr[i].i != 0 || in_attr[i].s != NULL)
10393 err_bfd = ibfd;
10394
10395 if (err_bfd != NULL)
10396 {
10397 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
10398 if ((i & 127) < 64)
10399 {
10400 _bfd_error_handler
10401 (_("%B: Unknown mandatory EABI object attribute %d"),
10402 err_bfd, i);
10403 bfd_set_error (bfd_error_bad_value);
10404 result = FALSE;
10405 }
10406 else
10407 {
10408 _bfd_error_handler
10409 (_("Warning: %B: Unknown EABI object attribute %d"),
10410 err_bfd, i);
10411 }
10412 }
10413
10414 /* Only pass on attributes that match in both inputs. */
10415 if (in_attr[i].i != out_attr[i].i
10416 || in_attr[i].s != out_attr[i].s
10417 || (in_attr[i].s != NULL && out_attr[i].s != NULL
10418 && strcmp (in_attr[i].s, out_attr[i].s) != 0))
10419 {
10420 out_attr[i].i = 0;
10421 out_attr[i].s = NULL;
10422 }
10423 }
10424 }
10425
10426 /* If out_attr was copied from in_attr then it won't have a type yet. */
10427 if (in_attr[i].type && !out_attr[i].type)
10428 out_attr[i].type = in_attr[i].type;
10429 }
10430
10431 /* Merge Tag_compatibility attributes and any common GNU ones. */
10432 if (!_bfd_elf_merge_object_attributes (ibfd, obfd))
10433 return FALSE;
10434
10435 /* Check for any attributes not known on ARM. */
10436 in_list = elf_other_obj_attributes_proc (ibfd);
10437 out_listp = &elf_other_obj_attributes_proc (obfd);
10438 out_list = *out_listp;
10439
10440 for (; in_list || out_list; )
10441 {
10442 bfd *err_bfd = NULL;
10443 int err_tag = 0;
10444
10445 /* The tags for each list are in numerical order. */
10446 /* If the tags are equal, then merge. */
10447 if (out_list && (!in_list || in_list->tag > out_list->tag))
10448 {
10449 /* This attribute only exists in obfd. We can't merge, and we don't
10450 know what the tag means, so delete it. */
10451 err_bfd = obfd;
10452 err_tag = out_list->tag;
10453 *out_listp = out_list->next;
10454 out_list = *out_listp;
10455 }
10456 else if (in_list && (!out_list || in_list->tag < out_list->tag))
10457 {
10458 /* This attribute only exists in ibfd. We can't merge, and we don't
10459 know what the tag means, so ignore it. */
10460 err_bfd = ibfd;
10461 err_tag = in_list->tag;
10462 in_list = in_list->next;
10463 }
10464 else /* The tags are equal. */
10465 {
10466 /* As present, all attributes in the list are unknown, and
10467 therefore can't be merged meaningfully. */
10468 err_bfd = obfd;
10469 err_tag = out_list->tag;
10470
10471 /* Only pass on attributes that match in both inputs. */
10472 if (in_list->attr.i != out_list->attr.i
10473 || in_list->attr.s != out_list->attr.s
10474 || (in_list->attr.s && out_list->attr.s
10475 && strcmp (in_list->attr.s, out_list->attr.s) != 0))
10476 {
10477 /* No match. Delete the attribute. */
10478 *out_listp = out_list->next;
10479 out_list = *out_listp;
10480 }
10481 else
10482 {
10483 /* Matched. Keep the attribute and move to the next. */
10484 out_list = out_list->next;
10485 in_list = in_list->next;
10486 }
10487 }
10488
10489 if (err_bfd)
10490 {
10491 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
10492 if ((err_tag & 127) < 64)
10493 {
10494 _bfd_error_handler
10495 (_("%B: Unknown mandatory EABI object attribute %d"),
10496 err_bfd, err_tag);
10497 bfd_set_error (bfd_error_bad_value);
10498 result = FALSE;
10499 }
10500 else
10501 {
10502 _bfd_error_handler
10503 (_("Warning: %B: Unknown EABI object attribute %d"),
10504 err_bfd, err_tag);
10505 }
10506 }
10507 }
10508 return result;
10509 }
10510
10511
10512 /* Return TRUE if the two EABI versions are incompatible. */
10513
10514 static bfd_boolean
10515 elf32_arm_versions_compatible (unsigned iver, unsigned over)
10516 {
10517 /* v4 and v5 are the same spec before and after it was released,
10518 so allow mixing them. */
10519 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
10520 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
10521 return TRUE;
10522
10523 return (iver == over);
10524 }
10525
10526 /* Merge backend specific data from an object file to the output
10527 object file when linking. */
10528
10529 static bfd_boolean
10530 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd);
10531
10532 /* Display the flags field. */
10533
10534 static bfd_boolean
10535 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
10536 {
10537 FILE * file = (FILE *) ptr;
10538 unsigned long flags;
10539
10540 BFD_ASSERT (abfd != NULL && ptr != NULL);
10541
10542 /* Print normal ELF private data. */
10543 _bfd_elf_print_private_bfd_data (abfd, ptr);
10544
10545 flags = elf_elfheader (abfd)->e_flags;
10546 /* Ignore init flag - it may not be set, despite the flags field
10547 containing valid data. */
10548
10549 /* xgettext:c-format */
10550 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
10551
10552 switch (EF_ARM_EABI_VERSION (flags))
10553 {
10554 case EF_ARM_EABI_UNKNOWN:
10555 /* The following flag bits are GNU extensions and not part of the
10556 official ARM ELF extended ABI. Hence they are only decoded if
10557 the EABI version is not set. */
10558 if (flags & EF_ARM_INTERWORK)
10559 fprintf (file, _(" [interworking enabled]"));
10560
10561 if (flags & EF_ARM_APCS_26)
10562 fprintf (file, " [APCS-26]");
10563 else
10564 fprintf (file, " [APCS-32]");
10565
10566 if (flags & EF_ARM_VFP_FLOAT)
10567 fprintf (file, _(" [VFP float format]"));
10568 else if (flags & EF_ARM_MAVERICK_FLOAT)
10569 fprintf (file, _(" [Maverick float format]"));
10570 else
10571 fprintf (file, _(" [FPA float format]"));
10572
10573 if (flags & EF_ARM_APCS_FLOAT)
10574 fprintf (file, _(" [floats passed in float registers]"));
10575
10576 if (flags & EF_ARM_PIC)
10577 fprintf (file, _(" [position independent]"));
10578
10579 if (flags & EF_ARM_NEW_ABI)
10580 fprintf (file, _(" [new ABI]"));
10581
10582 if (flags & EF_ARM_OLD_ABI)
10583 fprintf (file, _(" [old ABI]"));
10584
10585 if (flags & EF_ARM_SOFT_FLOAT)
10586 fprintf (file, _(" [software FP]"));
10587
10588 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
10589 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
10590 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
10591 | EF_ARM_MAVERICK_FLOAT);
10592 break;
10593
10594 case EF_ARM_EABI_VER1:
10595 fprintf (file, _(" [Version1 EABI]"));
10596
10597 if (flags & EF_ARM_SYMSARESORTED)
10598 fprintf (file, _(" [sorted symbol table]"));
10599 else
10600 fprintf (file, _(" [unsorted symbol table]"));
10601
10602 flags &= ~ EF_ARM_SYMSARESORTED;
10603 break;
10604
10605 case EF_ARM_EABI_VER2:
10606 fprintf (file, _(" [Version2 EABI]"));
10607
10608 if (flags & EF_ARM_SYMSARESORTED)
10609 fprintf (file, _(" [sorted symbol table]"));
10610 else
10611 fprintf (file, _(" [unsorted symbol table]"));
10612
10613 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
10614 fprintf (file, _(" [dynamic symbols use segment index]"));
10615
10616 if (flags & EF_ARM_MAPSYMSFIRST)
10617 fprintf (file, _(" [mapping symbols precede others]"));
10618
10619 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
10620 | EF_ARM_MAPSYMSFIRST);
10621 break;
10622
10623 case EF_ARM_EABI_VER3:
10624 fprintf (file, _(" [Version3 EABI]"));
10625 break;
10626
10627 case EF_ARM_EABI_VER4:
10628 fprintf (file, _(" [Version4 EABI]"));
10629 goto eabi;
10630
10631 case EF_ARM_EABI_VER5:
10632 fprintf (file, _(" [Version5 EABI]"));
10633 eabi:
10634 if (flags & EF_ARM_BE8)
10635 fprintf (file, _(" [BE8]"));
10636
10637 if (flags & EF_ARM_LE8)
10638 fprintf (file, _(" [LE8]"));
10639
10640 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
10641 break;
10642
10643 default:
10644 fprintf (file, _(" <EABI version unrecognised>"));
10645 break;
10646 }
10647
10648 flags &= ~ EF_ARM_EABIMASK;
10649
10650 if (flags & EF_ARM_RELEXEC)
10651 fprintf (file, _(" [relocatable executable]"));
10652
10653 if (flags & EF_ARM_HASENTRY)
10654 fprintf (file, _(" [has entry point]"));
10655
10656 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_HASENTRY);
10657
10658 if (flags)
10659 fprintf (file, _("<Unrecognised flag bits set>"));
10660
10661 fputc ('\n', file);
10662
10663 return TRUE;
10664 }
10665
10666 static int
10667 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
10668 {
10669 switch (ELF_ST_TYPE (elf_sym->st_info))
10670 {
10671 case STT_ARM_TFUNC:
10672 return ELF_ST_TYPE (elf_sym->st_info);
10673
10674 case STT_ARM_16BIT:
10675 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
10676 This allows us to distinguish between data used by Thumb instructions
10677 and non-data (which is probably code) inside Thumb regions of an
10678 executable. */
10679 if (type != STT_OBJECT && type != STT_TLS)
10680 return ELF_ST_TYPE (elf_sym->st_info);
10681 break;
10682
10683 default:
10684 break;
10685 }
10686
10687 return type;
10688 }
10689
10690 static asection *
10691 elf32_arm_gc_mark_hook (asection *sec,
10692 struct bfd_link_info *info,
10693 Elf_Internal_Rela *rel,
10694 struct elf_link_hash_entry *h,
10695 Elf_Internal_Sym *sym)
10696 {
10697 if (h != NULL)
10698 switch (ELF32_R_TYPE (rel->r_info))
10699 {
10700 case R_ARM_GNU_VTINHERIT:
10701 case R_ARM_GNU_VTENTRY:
10702 return NULL;
10703 }
10704
10705 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
10706 }
10707
10708 /* Update the got entry reference counts for the section being removed. */
10709
10710 static bfd_boolean
10711 elf32_arm_gc_sweep_hook (bfd * abfd,
10712 struct bfd_link_info * info,
10713 asection * sec,
10714 const Elf_Internal_Rela * relocs)
10715 {
10716 Elf_Internal_Shdr *symtab_hdr;
10717 struct elf_link_hash_entry **sym_hashes;
10718 bfd_signed_vma *local_got_refcounts;
10719 const Elf_Internal_Rela *rel, *relend;
10720 struct elf32_arm_link_hash_table * globals;
10721
10722 if (info->relocatable)
10723 return TRUE;
10724
10725 globals = elf32_arm_hash_table (info);
10726 if (globals == NULL)
10727 return FALSE;
10728
10729 elf_section_data (sec)->local_dynrel = NULL;
10730
10731 symtab_hdr = & elf_symtab_hdr (abfd);
10732 sym_hashes = elf_sym_hashes (abfd);
10733 local_got_refcounts = elf_local_got_refcounts (abfd);
10734
10735 check_use_blx (globals);
10736
10737 relend = relocs + sec->reloc_count;
10738 for (rel = relocs; rel < relend; rel++)
10739 {
10740 unsigned long r_symndx;
10741 struct elf_link_hash_entry *h = NULL;
10742 int r_type;
10743
10744 r_symndx = ELF32_R_SYM (rel->r_info);
10745 if (r_symndx >= symtab_hdr->sh_info)
10746 {
10747 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10748 while (h->root.type == bfd_link_hash_indirect
10749 || h->root.type == bfd_link_hash_warning)
10750 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10751 }
10752
10753 r_type = ELF32_R_TYPE (rel->r_info);
10754 r_type = arm_real_reloc_type (globals, r_type);
10755 switch (r_type)
10756 {
10757 case R_ARM_GOT32:
10758 case R_ARM_GOT_PREL:
10759 case R_ARM_TLS_GD32:
10760 case R_ARM_TLS_IE32:
10761 if (h != NULL)
10762 {
10763 if (h->got.refcount > 0)
10764 h->got.refcount -= 1;
10765 }
10766 else if (local_got_refcounts != NULL)
10767 {
10768 if (local_got_refcounts[r_symndx] > 0)
10769 local_got_refcounts[r_symndx] -= 1;
10770 }
10771 break;
10772
10773 case R_ARM_TLS_LDM32:
10774 globals->tls_ldm_got.refcount -= 1;
10775 break;
10776
10777 case R_ARM_ABS32:
10778 case R_ARM_ABS32_NOI:
10779 case R_ARM_REL32:
10780 case R_ARM_REL32_NOI:
10781 case R_ARM_PC24:
10782 case R_ARM_PLT32:
10783 case R_ARM_CALL:
10784 case R_ARM_JUMP24:
10785 case R_ARM_PREL31:
10786 case R_ARM_THM_CALL:
10787 case R_ARM_THM_JUMP24:
10788 case R_ARM_THM_JUMP19:
10789 case R_ARM_MOVW_ABS_NC:
10790 case R_ARM_MOVT_ABS:
10791 case R_ARM_MOVW_PREL_NC:
10792 case R_ARM_MOVT_PREL:
10793 case R_ARM_THM_MOVW_ABS_NC:
10794 case R_ARM_THM_MOVT_ABS:
10795 case R_ARM_THM_MOVW_PREL_NC:
10796 case R_ARM_THM_MOVT_PREL:
10797 /* Should the interworking branches be here also? */
10798
10799 if (h != NULL)
10800 {
10801 struct elf32_arm_link_hash_entry *eh;
10802 struct elf32_arm_relocs_copied **pp;
10803 struct elf32_arm_relocs_copied *p;
10804
10805 eh = (struct elf32_arm_link_hash_entry *) h;
10806
10807 if (h->plt.refcount > 0)
10808 {
10809 h->plt.refcount -= 1;
10810 if (r_type == R_ARM_THM_CALL)
10811 eh->plt_maybe_thumb_refcount--;
10812
10813 if (r_type == R_ARM_THM_JUMP24
10814 || r_type == R_ARM_THM_JUMP19)
10815 eh->plt_thumb_refcount--;
10816 }
10817
10818 if (r_type == R_ARM_ABS32
10819 || r_type == R_ARM_REL32
10820 || r_type == R_ARM_ABS32_NOI
10821 || r_type == R_ARM_REL32_NOI)
10822 {
10823 for (pp = &eh->relocs_copied; (p = *pp) != NULL;
10824 pp = &p->next)
10825 if (p->section == sec)
10826 {
10827 p->count -= 1;
10828 if (ELF32_R_TYPE (rel->r_info) == R_ARM_REL32
10829 || ELF32_R_TYPE (rel->r_info) == R_ARM_REL32_NOI)
10830 p->pc_count -= 1;
10831 if (p->count == 0)
10832 *pp = p->next;
10833 break;
10834 }
10835 }
10836 }
10837 break;
10838
10839 default:
10840 break;
10841 }
10842 }
10843
10844 return TRUE;
10845 }
10846
10847 /* Look through the relocs for a section during the first phase. */
10848
10849 static bfd_boolean
10850 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
10851 asection *sec, const Elf_Internal_Rela *relocs)
10852 {
10853 Elf_Internal_Shdr *symtab_hdr;
10854 struct elf_link_hash_entry **sym_hashes;
10855 const Elf_Internal_Rela *rel;
10856 const Elf_Internal_Rela *rel_end;
10857 bfd *dynobj;
10858 asection *sreloc;
10859 struct elf32_arm_link_hash_table *htab;
10860 bfd_boolean needs_plt;
10861 unsigned long nsyms;
10862
10863 if (info->relocatable)
10864 return TRUE;
10865
10866 BFD_ASSERT (is_arm_elf (abfd));
10867
10868 htab = elf32_arm_hash_table (info);
10869 if (htab == NULL)
10870 return FALSE;
10871
10872 sreloc = NULL;
10873
10874 /* Create dynamic sections for relocatable executables so that we can
10875 copy relocations. */
10876 if (htab->root.is_relocatable_executable
10877 && ! htab->root.dynamic_sections_created)
10878 {
10879 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
10880 return FALSE;
10881 }
10882
10883 dynobj = elf_hash_table (info)->dynobj;
10884 symtab_hdr = & elf_symtab_hdr (abfd);
10885 sym_hashes = elf_sym_hashes (abfd);
10886 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
10887
10888 rel_end = relocs + sec->reloc_count;
10889 for (rel = relocs; rel < rel_end; rel++)
10890 {
10891 struct elf_link_hash_entry *h;
10892 struct elf32_arm_link_hash_entry *eh;
10893 unsigned long r_symndx;
10894 int r_type;
10895
10896 r_symndx = ELF32_R_SYM (rel->r_info);
10897 r_type = ELF32_R_TYPE (rel->r_info);
10898 r_type = arm_real_reloc_type (htab, r_type);
10899
10900 if (r_symndx >= nsyms
10901 /* PR 9934: It is possible to have relocations that do not
10902 refer to symbols, thus it is also possible to have an
10903 object file containing relocations but no symbol table. */
10904 && (r_symndx > STN_UNDEF || nsyms > 0))
10905 {
10906 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
10907 r_symndx);
10908 return FALSE;
10909 }
10910
10911 if (nsyms == 0 || r_symndx < symtab_hdr->sh_info)
10912 h = NULL;
10913 else
10914 {
10915 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10916 while (h->root.type == bfd_link_hash_indirect
10917 || h->root.type == bfd_link_hash_warning)
10918 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10919 }
10920
10921 eh = (struct elf32_arm_link_hash_entry *) h;
10922
10923 switch (r_type)
10924 {
10925 case R_ARM_GOT32:
10926 case R_ARM_GOT_PREL:
10927 case R_ARM_TLS_GD32:
10928 case R_ARM_TLS_IE32:
10929 /* This symbol requires a global offset table entry. */
10930 {
10931 int tls_type, old_tls_type;
10932
10933 switch (r_type)
10934 {
10935 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
10936 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
10937 default: tls_type = GOT_NORMAL; break;
10938 }
10939
10940 if (h != NULL)
10941 {
10942 h->got.refcount++;
10943 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
10944 }
10945 else
10946 {
10947 bfd_signed_vma *local_got_refcounts;
10948
10949 /* This is a global offset table entry for a local symbol. */
10950 local_got_refcounts = elf_local_got_refcounts (abfd);
10951 if (local_got_refcounts == NULL)
10952 {
10953 bfd_size_type size;
10954
10955 size = symtab_hdr->sh_info;
10956 size *= (sizeof (bfd_signed_vma) + sizeof (char));
10957 local_got_refcounts = (bfd_signed_vma *)
10958 bfd_zalloc (abfd, size);
10959 if (local_got_refcounts == NULL)
10960 return FALSE;
10961 elf_local_got_refcounts (abfd) = local_got_refcounts;
10962 elf32_arm_local_got_tls_type (abfd)
10963 = (char *) (local_got_refcounts + symtab_hdr->sh_info);
10964 }
10965 local_got_refcounts[r_symndx] += 1;
10966 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
10967 }
10968
10969 /* We will already have issued an error message if there is a
10970 TLS / non-TLS mismatch, based on the symbol type. We don't
10971 support any linker relaxations. So just combine any TLS
10972 types needed. */
10973 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
10974 && tls_type != GOT_NORMAL)
10975 tls_type |= old_tls_type;
10976
10977 if (old_tls_type != tls_type)
10978 {
10979 if (h != NULL)
10980 elf32_arm_hash_entry (h)->tls_type = tls_type;
10981 else
10982 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
10983 }
10984 }
10985 /* Fall through. */
10986
10987 case R_ARM_TLS_LDM32:
10988 if (r_type == R_ARM_TLS_LDM32)
10989 htab->tls_ldm_got.refcount++;
10990 /* Fall through. */
10991
10992 case R_ARM_GOTOFF32:
10993 case R_ARM_GOTPC:
10994 if (htab->sgot == NULL)
10995 {
10996 if (htab->root.dynobj == NULL)
10997 htab->root.dynobj = abfd;
10998 if (!create_got_section (htab->root.dynobj, info))
10999 return FALSE;
11000 }
11001 break;
11002
11003 case R_ARM_ABS12:
11004 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
11005 ldr __GOTT_INDEX__ offsets. */
11006 if (!htab->vxworks_p)
11007 break;
11008 /* Fall through. */
11009
11010 case R_ARM_PC24:
11011 case R_ARM_PLT32:
11012 case R_ARM_CALL:
11013 case R_ARM_JUMP24:
11014 case R_ARM_PREL31:
11015 case R_ARM_THM_CALL:
11016 case R_ARM_THM_JUMP24:
11017 case R_ARM_THM_JUMP19:
11018 needs_plt = 1;
11019 goto normal_reloc;
11020
11021 case R_ARM_MOVW_ABS_NC:
11022 case R_ARM_MOVT_ABS:
11023 case R_ARM_THM_MOVW_ABS_NC:
11024 case R_ARM_THM_MOVT_ABS:
11025 if (info->shared)
11026 {
11027 (*_bfd_error_handler)
11028 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
11029 abfd, elf32_arm_howto_table_1[r_type].name,
11030 (h) ? h->root.root.string : "a local symbol");
11031 bfd_set_error (bfd_error_bad_value);
11032 return FALSE;
11033 }
11034
11035 /* Fall through. */
11036 case R_ARM_ABS32:
11037 case R_ARM_ABS32_NOI:
11038 case R_ARM_REL32:
11039 case R_ARM_REL32_NOI:
11040 case R_ARM_MOVW_PREL_NC:
11041 case R_ARM_MOVT_PREL:
11042 case R_ARM_THM_MOVW_PREL_NC:
11043 case R_ARM_THM_MOVT_PREL:
11044 needs_plt = 0;
11045 normal_reloc:
11046
11047 /* Should the interworking branches be listed here? */
11048 if (h != NULL)
11049 {
11050 /* If this reloc is in a read-only section, we might
11051 need a copy reloc. We can't check reliably at this
11052 stage whether the section is read-only, as input
11053 sections have not yet been mapped to output sections.
11054 Tentatively set the flag for now, and correct in
11055 adjust_dynamic_symbol. */
11056 if (!info->shared)
11057 h->non_got_ref = 1;
11058
11059 /* We may need a .plt entry if the function this reloc
11060 refers to is in a different object. We can't tell for
11061 sure yet, because something later might force the
11062 symbol local. */
11063 if (needs_plt)
11064 h->needs_plt = 1;
11065
11066 /* If we create a PLT entry, this relocation will reference
11067 it, even if it's an ABS32 relocation. */
11068 h->plt.refcount += 1;
11069
11070 /* It's too early to use htab->use_blx here, so we have to
11071 record possible blx references separately from
11072 relocs that definitely need a thumb stub. */
11073
11074 if (r_type == R_ARM_THM_CALL)
11075 eh->plt_maybe_thumb_refcount += 1;
11076
11077 if (r_type == R_ARM_THM_JUMP24
11078 || r_type == R_ARM_THM_JUMP19)
11079 eh->plt_thumb_refcount += 1;
11080 }
11081
11082 /* If we are creating a shared library or relocatable executable,
11083 and this is a reloc against a global symbol, or a non PC
11084 relative reloc against a local symbol, then we need to copy
11085 the reloc into the shared library. However, if we are linking
11086 with -Bsymbolic, we do not need to copy a reloc against a
11087 global symbol which is defined in an object we are
11088 including in the link (i.e., DEF_REGULAR is set). At
11089 this point we have not seen all the input files, so it is
11090 possible that DEF_REGULAR is not set now but will be set
11091 later (it is never cleared). We account for that
11092 possibility below by storing information in the
11093 relocs_copied field of the hash table entry. */
11094 if ((info->shared || htab->root.is_relocatable_executable)
11095 && (sec->flags & SEC_ALLOC) != 0
11096 && ((r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI)
11097 || (h != NULL && ! h->needs_plt
11098 && (! info->symbolic || ! h->def_regular))))
11099 {
11100 struct elf32_arm_relocs_copied *p, **head;
11101
11102 /* When creating a shared object, we must copy these
11103 reloc types into the output file. We create a reloc
11104 section in dynobj and make room for this reloc. */
11105 if (sreloc == NULL)
11106 {
11107 sreloc = _bfd_elf_make_dynamic_reloc_section
11108 (sec, dynobj, 2, abfd, ! htab->use_rel);
11109
11110 if (sreloc == NULL)
11111 return FALSE;
11112
11113 /* BPABI objects never have dynamic relocations mapped. */
11114 if (htab->symbian_p)
11115 {
11116 flagword flags;
11117
11118 flags = bfd_get_section_flags (dynobj, sreloc);
11119 flags &= ~(SEC_LOAD | SEC_ALLOC);
11120 bfd_set_section_flags (dynobj, sreloc, flags);
11121 }
11122 }
11123
11124 /* If this is a global symbol, we count the number of
11125 relocations we need for this symbol. */
11126 if (h != NULL)
11127 {
11128 head = &((struct elf32_arm_link_hash_entry *) h)->relocs_copied;
11129 }
11130 else
11131 {
11132 /* Track dynamic relocs needed for local syms too.
11133 We really need local syms available to do this
11134 easily. Oh well. */
11135 asection *s;
11136 void *vpp;
11137 Elf_Internal_Sym *isym;
11138
11139 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
11140 abfd, r_symndx);
11141 if (isym == NULL)
11142 return FALSE;
11143
11144 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
11145 if (s == NULL)
11146 s = sec;
11147
11148 vpp = &elf_section_data (s)->local_dynrel;
11149 head = (struct elf32_arm_relocs_copied **) vpp;
11150 }
11151
11152 p = *head;
11153 if (p == NULL || p->section != sec)
11154 {
11155 bfd_size_type amt = sizeof *p;
11156
11157 p = (struct elf32_arm_relocs_copied *)
11158 bfd_alloc (htab->root.dynobj, amt);
11159 if (p == NULL)
11160 return FALSE;
11161 p->next = *head;
11162 *head = p;
11163 p->section = sec;
11164 p->count = 0;
11165 p->pc_count = 0;
11166 }
11167
11168 if (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
11169 p->pc_count += 1;
11170 p->count += 1;
11171 }
11172 break;
11173
11174 /* This relocation describes the C++ object vtable hierarchy.
11175 Reconstruct it for later use during GC. */
11176 case R_ARM_GNU_VTINHERIT:
11177 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
11178 return FALSE;
11179 break;
11180
11181 /* This relocation describes which C++ vtable entries are actually
11182 used. Record for later use during GC. */
11183 case R_ARM_GNU_VTENTRY:
11184 BFD_ASSERT (h != NULL);
11185 if (h != NULL
11186 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
11187 return FALSE;
11188 break;
11189 }
11190 }
11191
11192 return TRUE;
11193 }
11194
11195 /* Unwinding tables are not referenced directly. This pass marks them as
11196 required if the corresponding code section is marked. */
11197
11198 static bfd_boolean
11199 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
11200 elf_gc_mark_hook_fn gc_mark_hook)
11201 {
11202 bfd *sub;
11203 Elf_Internal_Shdr **elf_shdrp;
11204 bfd_boolean again;
11205
11206 /* Marking EH data may cause additional code sections to be marked,
11207 requiring multiple passes. */
11208 again = TRUE;
11209 while (again)
11210 {
11211 again = FALSE;
11212 for (sub = info->input_bfds; sub != NULL; sub = sub->link_next)
11213 {
11214 asection *o;
11215
11216 if (! is_arm_elf (sub))
11217 continue;
11218
11219 elf_shdrp = elf_elfsections (sub);
11220 for (o = sub->sections; o != NULL; o = o->next)
11221 {
11222 Elf_Internal_Shdr *hdr;
11223
11224 hdr = &elf_section_data (o)->this_hdr;
11225 if (hdr->sh_type == SHT_ARM_EXIDX
11226 && hdr->sh_link
11227 && hdr->sh_link < elf_numsections (sub)
11228 && !o->gc_mark
11229 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
11230 {
11231 again = TRUE;
11232 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
11233 return FALSE;
11234 }
11235 }
11236 }
11237 }
11238
11239 return TRUE;
11240 }
11241
11242 /* Treat mapping symbols as special target symbols. */
11243
11244 static bfd_boolean
11245 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
11246 {
11247 return bfd_is_arm_special_symbol_name (sym->name,
11248 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
11249 }
11250
11251 /* This is a copy of elf_find_function() from elf.c except that
11252 ARM mapping symbols are ignored when looking for function names
11253 and STT_ARM_TFUNC is considered to a function type. */
11254
11255 static bfd_boolean
11256 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
11257 asection * section,
11258 asymbol ** symbols,
11259 bfd_vma offset,
11260 const char ** filename_ptr,
11261 const char ** functionname_ptr)
11262 {
11263 const char * filename = NULL;
11264 asymbol * func = NULL;
11265 bfd_vma low_func = 0;
11266 asymbol ** p;
11267
11268 for (p = symbols; *p != NULL; p++)
11269 {
11270 elf_symbol_type *q;
11271
11272 q = (elf_symbol_type *) *p;
11273
11274 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
11275 {
11276 default:
11277 break;
11278 case STT_FILE:
11279 filename = bfd_asymbol_name (&q->symbol);
11280 break;
11281 case STT_FUNC:
11282 case STT_ARM_TFUNC:
11283 case STT_NOTYPE:
11284 /* Skip mapping symbols. */
11285 if ((q->symbol.flags & BSF_LOCAL)
11286 && bfd_is_arm_special_symbol_name (q->symbol.name,
11287 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
11288 continue;
11289 /* Fall through. */
11290 if (bfd_get_section (&q->symbol) == section
11291 && q->symbol.value >= low_func
11292 && q->symbol.value <= offset)
11293 {
11294 func = (asymbol *) q;
11295 low_func = q->symbol.value;
11296 }
11297 break;
11298 }
11299 }
11300
11301 if (func == NULL)
11302 return FALSE;
11303
11304 if (filename_ptr)
11305 *filename_ptr = filename;
11306 if (functionname_ptr)
11307 *functionname_ptr = bfd_asymbol_name (func);
11308
11309 return TRUE;
11310 }
11311
11312
11313 /* Find the nearest line to a particular section and offset, for error
11314 reporting. This code is a duplicate of the code in elf.c, except
11315 that it uses arm_elf_find_function. */
11316
11317 static bfd_boolean
11318 elf32_arm_find_nearest_line (bfd * abfd,
11319 asection * section,
11320 asymbol ** symbols,
11321 bfd_vma offset,
11322 const char ** filename_ptr,
11323 const char ** functionname_ptr,
11324 unsigned int * line_ptr)
11325 {
11326 bfd_boolean found = FALSE;
11327
11328 /* We skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain uses it. */
11329
11330 if (_bfd_dwarf2_find_nearest_line (abfd, section, symbols, offset,
11331 filename_ptr, functionname_ptr,
11332 line_ptr, 0,
11333 & elf_tdata (abfd)->dwarf2_find_line_info))
11334 {
11335 if (!*functionname_ptr)
11336 arm_elf_find_function (abfd, section, symbols, offset,
11337 *filename_ptr ? NULL : filename_ptr,
11338 functionname_ptr);
11339
11340 return TRUE;
11341 }
11342
11343 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
11344 & found, filename_ptr,
11345 functionname_ptr, line_ptr,
11346 & elf_tdata (abfd)->line_info))
11347 return FALSE;
11348
11349 if (found && (*functionname_ptr || *line_ptr))
11350 return TRUE;
11351
11352 if (symbols == NULL)
11353 return FALSE;
11354
11355 if (! arm_elf_find_function (abfd, section, symbols, offset,
11356 filename_ptr, functionname_ptr))
11357 return FALSE;
11358
11359 *line_ptr = 0;
11360 return TRUE;
11361 }
11362
11363 static bfd_boolean
11364 elf32_arm_find_inliner_info (bfd * abfd,
11365 const char ** filename_ptr,
11366 const char ** functionname_ptr,
11367 unsigned int * line_ptr)
11368 {
11369 bfd_boolean found;
11370 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
11371 functionname_ptr, line_ptr,
11372 & elf_tdata (abfd)->dwarf2_find_line_info);
11373 return found;
11374 }
11375
11376 /* Adjust a symbol defined by a dynamic object and referenced by a
11377 regular object. The current definition is in some section of the
11378 dynamic object, but we're not including those sections. We have to
11379 change the definition to something the rest of the link can
11380 understand. */
11381
11382 static bfd_boolean
11383 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
11384 struct elf_link_hash_entry * h)
11385 {
11386 bfd * dynobj;
11387 asection * s;
11388 struct elf32_arm_link_hash_entry * eh;
11389 struct elf32_arm_link_hash_table *globals;
11390
11391 globals = elf32_arm_hash_table (info);
11392 if (globals == NULL)
11393 return FALSE;
11394
11395 dynobj = elf_hash_table (info)->dynobj;
11396
11397 /* Make sure we know what is going on here. */
11398 BFD_ASSERT (dynobj != NULL
11399 && (h->needs_plt
11400 || h->u.weakdef != NULL
11401 || (h->def_dynamic
11402 && h->ref_regular
11403 && !h->def_regular)));
11404
11405 eh = (struct elf32_arm_link_hash_entry *) h;
11406
11407 /* If this is a function, put it in the procedure linkage table. We
11408 will fill in the contents of the procedure linkage table later,
11409 when we know the address of the .got section. */
11410 if (h->type == STT_FUNC || h->type == STT_ARM_TFUNC
11411 || h->needs_plt)
11412 {
11413 if (h->plt.refcount <= 0
11414 || SYMBOL_CALLS_LOCAL (info, h)
11415 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
11416 && h->root.type == bfd_link_hash_undefweak))
11417 {
11418 /* This case can occur if we saw a PLT32 reloc in an input
11419 file, but the symbol was never referred to by a dynamic
11420 object, or if all references were garbage collected. In
11421 such a case, we don't actually need to build a procedure
11422 linkage table, and we can just do a PC24 reloc instead. */
11423 h->plt.offset = (bfd_vma) -1;
11424 eh->plt_thumb_refcount = 0;
11425 eh->plt_maybe_thumb_refcount = 0;
11426 h->needs_plt = 0;
11427 }
11428
11429 return TRUE;
11430 }
11431 else
11432 {
11433 /* It's possible that we incorrectly decided a .plt reloc was
11434 needed for an R_ARM_PC24 or similar reloc to a non-function sym
11435 in check_relocs. We can't decide accurately between function
11436 and non-function syms in check-relocs; Objects loaded later in
11437 the link may change h->type. So fix it now. */
11438 h->plt.offset = (bfd_vma) -1;
11439 eh->plt_thumb_refcount = 0;
11440 eh->plt_maybe_thumb_refcount = 0;
11441 }
11442
11443 /* If this is a weak symbol, and there is a real definition, the
11444 processor independent code will have arranged for us to see the
11445 real definition first, and we can just use the same value. */
11446 if (h->u.weakdef != NULL)
11447 {
11448 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
11449 || h->u.weakdef->root.type == bfd_link_hash_defweak);
11450 h->root.u.def.section = h->u.weakdef->root.u.def.section;
11451 h->root.u.def.value = h->u.weakdef->root.u.def.value;
11452 return TRUE;
11453 }
11454
11455 /* If there are no non-GOT references, we do not need a copy
11456 relocation. */
11457 if (!h->non_got_ref)
11458 return TRUE;
11459
11460 /* This is a reference to a symbol defined by a dynamic object which
11461 is not a function. */
11462
11463 /* If we are creating a shared library, we must presume that the
11464 only references to the symbol are via the global offset table.
11465 For such cases we need not do anything here; the relocations will
11466 be handled correctly by relocate_section. Relocatable executables
11467 can reference data in shared objects directly, so we don't need to
11468 do anything here. */
11469 if (info->shared || globals->root.is_relocatable_executable)
11470 return TRUE;
11471
11472 if (h->size == 0)
11473 {
11474 (*_bfd_error_handler) (_("dynamic variable `%s' is zero size"),
11475 h->root.root.string);
11476 return TRUE;
11477 }
11478
11479 /* We must allocate the symbol in our .dynbss section, which will
11480 become part of the .bss section of the executable. There will be
11481 an entry for this symbol in the .dynsym section. The dynamic
11482 object will contain position independent code, so all references
11483 from the dynamic object to this symbol will go through the global
11484 offset table. The dynamic linker will use the .dynsym entry to
11485 determine the address it must put in the global offset table, so
11486 both the dynamic object and the regular object will refer to the
11487 same memory location for the variable. */
11488 s = bfd_get_section_by_name (dynobj, ".dynbss");
11489 BFD_ASSERT (s != NULL);
11490
11491 /* We must generate a R_ARM_COPY reloc to tell the dynamic linker to
11492 copy the initial value out of the dynamic object and into the
11493 runtime process image. We need to remember the offset into the
11494 .rel(a).bss section we are going to use. */
11495 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0)
11496 {
11497 asection *srel;
11498
11499 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (globals, ".bss"));
11500 BFD_ASSERT (srel != NULL);
11501 srel->size += RELOC_SIZE (globals);
11502 h->needs_copy = 1;
11503 }
11504
11505 return _bfd_elf_adjust_dynamic_copy (h, s);
11506 }
11507
11508 /* Allocate space in .plt, .got and associated reloc sections for
11509 dynamic relocs. */
11510
11511 static bfd_boolean
11512 allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
11513 {
11514 struct bfd_link_info *info;
11515 struct elf32_arm_link_hash_table *htab;
11516 struct elf32_arm_link_hash_entry *eh;
11517 struct elf32_arm_relocs_copied *p;
11518 bfd_signed_vma thumb_refs;
11519
11520 eh = (struct elf32_arm_link_hash_entry *) h;
11521
11522 if (h->root.type == bfd_link_hash_indirect)
11523 return TRUE;
11524
11525 if (h->root.type == bfd_link_hash_warning)
11526 /* When warning symbols are created, they **replace** the "real"
11527 entry in the hash table, thus we never get to see the real
11528 symbol in a hash traversal. So look at it now. */
11529 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11530
11531 info = (struct bfd_link_info *) inf;
11532 htab = elf32_arm_hash_table (info);
11533 if (htab == NULL)
11534 return FALSE;
11535
11536 if (htab->root.dynamic_sections_created
11537 && h->plt.refcount > 0)
11538 {
11539 /* Make sure this symbol is output as a dynamic symbol.
11540 Undefined weak syms won't yet be marked as dynamic. */
11541 if (h->dynindx == -1
11542 && !h->forced_local)
11543 {
11544 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11545 return FALSE;
11546 }
11547
11548 if (info->shared
11549 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
11550 {
11551 asection *s = htab->splt;
11552
11553 /* If this is the first .plt entry, make room for the special
11554 first entry. */
11555 if (s->size == 0)
11556 s->size += htab->plt_header_size;
11557
11558 h->plt.offset = s->size;
11559
11560 /* If we will insert a Thumb trampoline before this PLT, leave room
11561 for it. */
11562 thumb_refs = eh->plt_thumb_refcount;
11563 if (!htab->use_blx)
11564 thumb_refs += eh->plt_maybe_thumb_refcount;
11565
11566 if (thumb_refs > 0)
11567 {
11568 h->plt.offset += PLT_THUMB_STUB_SIZE;
11569 s->size += PLT_THUMB_STUB_SIZE;
11570 }
11571
11572 /* If this symbol is not defined in a regular file, and we are
11573 not generating a shared library, then set the symbol to this
11574 location in the .plt. This is required to make function
11575 pointers compare as equal between the normal executable and
11576 the shared library. */
11577 if (! info->shared
11578 && !h->def_regular)
11579 {
11580 h->root.u.def.section = s;
11581 h->root.u.def.value = h->plt.offset;
11582
11583 /* Make sure the function is not marked as Thumb, in case
11584 it is the target of an ABS32 relocation, which will
11585 point to the PLT entry. */
11586 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
11587 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11588 }
11589
11590 /* Make room for this entry. */
11591 s->size += htab->plt_entry_size;
11592
11593 if (!htab->symbian_p)
11594 {
11595 /* We also need to make an entry in the .got.plt section, which
11596 will be placed in the .got section by the linker script. */
11597 eh->plt_got_offset = htab->sgotplt->size;
11598 htab->sgotplt->size += 4;
11599 }
11600
11601 /* We also need to make an entry in the .rel(a).plt section. */
11602 htab->srelplt->size += RELOC_SIZE (htab);
11603
11604 /* VxWorks executables have a second set of relocations for
11605 each PLT entry. They go in a separate relocation section,
11606 which is processed by the kernel loader. */
11607 if (htab->vxworks_p && !info->shared)
11608 {
11609 /* There is a relocation for the initial PLT entry:
11610 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
11611 if (h->plt.offset == htab->plt_header_size)
11612 htab->srelplt2->size += RELOC_SIZE (htab);
11613
11614 /* There are two extra relocations for each subsequent
11615 PLT entry: an R_ARM_32 relocation for the GOT entry,
11616 and an R_ARM_32 relocation for the PLT entry. */
11617 htab->srelplt2->size += RELOC_SIZE (htab) * 2;
11618 }
11619 }
11620 else
11621 {
11622 h->plt.offset = (bfd_vma) -1;
11623 h->needs_plt = 0;
11624 }
11625 }
11626 else
11627 {
11628 h->plt.offset = (bfd_vma) -1;
11629 h->needs_plt = 0;
11630 }
11631
11632 if (h->got.refcount > 0)
11633 {
11634 asection *s;
11635 bfd_boolean dyn;
11636 int tls_type = elf32_arm_hash_entry (h)->tls_type;
11637 int indx;
11638
11639 /* Make sure this symbol is output as a dynamic symbol.
11640 Undefined weak syms won't yet be marked as dynamic. */
11641 if (h->dynindx == -1
11642 && !h->forced_local)
11643 {
11644 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11645 return FALSE;
11646 }
11647
11648 if (!htab->symbian_p)
11649 {
11650 s = htab->sgot;
11651 h->got.offset = s->size;
11652
11653 if (tls_type == GOT_UNKNOWN)
11654 abort ();
11655
11656 if (tls_type == GOT_NORMAL)
11657 /* Non-TLS symbols need one GOT slot. */
11658 s->size += 4;
11659 else
11660 {
11661 if (tls_type & GOT_TLS_GD)
11662 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. */
11663 s->size += 8;
11664 if (tls_type & GOT_TLS_IE)
11665 /* R_ARM_TLS_IE32 needs one GOT slot. */
11666 s->size += 4;
11667 }
11668
11669 dyn = htab->root.dynamic_sections_created;
11670
11671 indx = 0;
11672 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
11673 && (!info->shared
11674 || !SYMBOL_REFERENCES_LOCAL (info, h)))
11675 indx = h->dynindx;
11676
11677 if (tls_type != GOT_NORMAL
11678 && (info->shared || indx != 0)
11679 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11680 || h->root.type != bfd_link_hash_undefweak))
11681 {
11682 if (tls_type & GOT_TLS_IE)
11683 htab->srelgot->size += RELOC_SIZE (htab);
11684
11685 if (tls_type & GOT_TLS_GD)
11686 htab->srelgot->size += RELOC_SIZE (htab);
11687
11688 if ((tls_type & GOT_TLS_GD) && indx != 0)
11689 htab->srelgot->size += RELOC_SIZE (htab);
11690 }
11691 else if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11692 || h->root.type != bfd_link_hash_undefweak)
11693 && (info->shared
11694 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
11695 htab->srelgot->size += RELOC_SIZE (htab);
11696 }
11697 }
11698 else
11699 h->got.offset = (bfd_vma) -1;
11700
11701 /* Allocate stubs for exported Thumb functions on v4t. */
11702 if (!htab->use_blx && h->dynindx != -1
11703 && h->def_regular
11704 && ELF_ST_TYPE (h->type) == STT_ARM_TFUNC
11705 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
11706 {
11707 struct elf_link_hash_entry * th;
11708 struct bfd_link_hash_entry * bh;
11709 struct elf_link_hash_entry * myh;
11710 char name[1024];
11711 asection *s;
11712 bh = NULL;
11713 /* Create a new symbol to regist the real location of the function. */
11714 s = h->root.u.def.section;
11715 sprintf (name, "__real_%s", h->root.root.string);
11716 _bfd_generic_link_add_one_symbol (info, s->owner,
11717 name, BSF_GLOBAL, s,
11718 h->root.u.def.value,
11719 NULL, TRUE, FALSE, &bh);
11720
11721 myh = (struct elf_link_hash_entry *) bh;
11722 myh->type = ELF_ST_INFO (STB_LOCAL, STT_ARM_TFUNC);
11723 myh->forced_local = 1;
11724 eh->export_glue = myh;
11725 th = record_arm_to_thumb_glue (info, h);
11726 /* Point the symbol at the stub. */
11727 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11728 h->root.u.def.section = th->root.u.def.section;
11729 h->root.u.def.value = th->root.u.def.value & ~1;
11730 }
11731
11732 if (eh->relocs_copied == NULL)
11733 return TRUE;
11734
11735 /* In the shared -Bsymbolic case, discard space allocated for
11736 dynamic pc-relative relocs against symbols which turn out to be
11737 defined in regular objects. For the normal shared case, discard
11738 space for pc-relative relocs that have become local due to symbol
11739 visibility changes. */
11740
11741 if (info->shared || htab->root.is_relocatable_executable)
11742 {
11743 /* The only relocs that use pc_count are R_ARM_REL32 and
11744 R_ARM_REL32_NOI, which will appear on something like
11745 ".long foo - .". We want calls to protected symbols to resolve
11746 directly to the function rather than going via the plt. If people
11747 want function pointer comparisons to work as expected then they
11748 should avoid writing assembly like ".long foo - .". */
11749 if (SYMBOL_CALLS_LOCAL (info, h))
11750 {
11751 struct elf32_arm_relocs_copied **pp;
11752
11753 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11754 {
11755 p->count -= p->pc_count;
11756 p->pc_count = 0;
11757 if (p->count == 0)
11758 *pp = p->next;
11759 else
11760 pp = &p->next;
11761 }
11762 }
11763
11764 if (htab->vxworks_p)
11765 {
11766 struct elf32_arm_relocs_copied **pp;
11767
11768 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11769 {
11770 if (strcmp (p->section->output_section->name, ".tls_vars") == 0)
11771 *pp = p->next;
11772 else
11773 pp = &p->next;
11774 }
11775 }
11776
11777 /* Also discard relocs on undefined weak syms with non-default
11778 visibility. */
11779 if (eh->relocs_copied != NULL
11780 && h->root.type == bfd_link_hash_undefweak)
11781 {
11782 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
11783 eh->relocs_copied = NULL;
11784
11785 /* Make sure undefined weak symbols are output as a dynamic
11786 symbol in PIEs. */
11787 else if (h->dynindx == -1
11788 && !h->forced_local)
11789 {
11790 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11791 return FALSE;
11792 }
11793 }
11794
11795 else if (htab->root.is_relocatable_executable && h->dynindx == -1
11796 && h->root.type == bfd_link_hash_new)
11797 {
11798 /* Output absolute symbols so that we can create relocations
11799 against them. For normal symbols we output a relocation
11800 against the section that contains them. */
11801 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11802 return FALSE;
11803 }
11804
11805 }
11806 else
11807 {
11808 /* For the non-shared case, discard space for relocs against
11809 symbols which turn out to need copy relocs or are not
11810 dynamic. */
11811
11812 if (!h->non_got_ref
11813 && ((h->def_dynamic
11814 && !h->def_regular)
11815 || (htab->root.dynamic_sections_created
11816 && (h->root.type == bfd_link_hash_undefweak
11817 || h->root.type == bfd_link_hash_undefined))))
11818 {
11819 /* Make sure this symbol is output as a dynamic symbol.
11820 Undefined weak syms won't yet be marked as dynamic. */
11821 if (h->dynindx == -1
11822 && !h->forced_local)
11823 {
11824 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11825 return FALSE;
11826 }
11827
11828 /* If that succeeded, we know we'll be keeping all the
11829 relocs. */
11830 if (h->dynindx != -1)
11831 goto keep;
11832 }
11833
11834 eh->relocs_copied = NULL;
11835
11836 keep: ;
11837 }
11838
11839 /* Finally, allocate space. */
11840 for (p = eh->relocs_copied; p != NULL; p = p->next)
11841 {
11842 asection *sreloc = elf_section_data (p->section)->sreloc;
11843 sreloc->size += p->count * RELOC_SIZE (htab);
11844 }
11845
11846 return TRUE;
11847 }
11848
11849 /* Find any dynamic relocs that apply to read-only sections. */
11850
11851 static bfd_boolean
11852 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
11853 {
11854 struct elf32_arm_link_hash_entry * eh;
11855 struct elf32_arm_relocs_copied * p;
11856
11857 if (h->root.type == bfd_link_hash_warning)
11858 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11859
11860 eh = (struct elf32_arm_link_hash_entry *) h;
11861 for (p = eh->relocs_copied; p != NULL; p = p->next)
11862 {
11863 asection *s = p->section;
11864
11865 if (s != NULL && (s->flags & SEC_READONLY) != 0)
11866 {
11867 struct bfd_link_info *info = (struct bfd_link_info *) inf;
11868
11869 info->flags |= DF_TEXTREL;
11870
11871 /* Not an error, just cut short the traversal. */
11872 return FALSE;
11873 }
11874 }
11875 return TRUE;
11876 }
11877
11878 void
11879 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
11880 int byteswap_code)
11881 {
11882 struct elf32_arm_link_hash_table *globals;
11883
11884 globals = elf32_arm_hash_table (info);
11885 if (globals == NULL)
11886 return;
11887
11888 globals->byteswap_code = byteswap_code;
11889 }
11890
11891 /* Set the sizes of the dynamic sections. */
11892
11893 static bfd_boolean
11894 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
11895 struct bfd_link_info * info)
11896 {
11897 bfd * dynobj;
11898 asection * s;
11899 bfd_boolean plt;
11900 bfd_boolean relocs;
11901 bfd *ibfd;
11902 struct elf32_arm_link_hash_table *htab;
11903
11904 htab = elf32_arm_hash_table (info);
11905 if (htab == NULL)
11906 return FALSE;
11907
11908 dynobj = elf_hash_table (info)->dynobj;
11909 BFD_ASSERT (dynobj != NULL);
11910 check_use_blx (htab);
11911
11912 if (elf_hash_table (info)->dynamic_sections_created)
11913 {
11914 /* Set the contents of the .interp section to the interpreter. */
11915 if (info->executable)
11916 {
11917 s = bfd_get_section_by_name (dynobj, ".interp");
11918 BFD_ASSERT (s != NULL);
11919 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
11920 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
11921 }
11922 }
11923
11924 /* Set up .got offsets for local syms, and space for local dynamic
11925 relocs. */
11926 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11927 {
11928 bfd_signed_vma *local_got;
11929 bfd_signed_vma *end_local_got;
11930 char *local_tls_type;
11931 bfd_size_type locsymcount;
11932 Elf_Internal_Shdr *symtab_hdr;
11933 asection *srel;
11934 bfd_boolean is_vxworks = htab->vxworks_p;
11935
11936 if (! is_arm_elf (ibfd))
11937 continue;
11938
11939 for (s = ibfd->sections; s != NULL; s = s->next)
11940 {
11941 struct elf32_arm_relocs_copied *p;
11942
11943 for (p = (struct elf32_arm_relocs_copied *)
11944 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
11945 {
11946 if (!bfd_is_abs_section (p->section)
11947 && bfd_is_abs_section (p->section->output_section))
11948 {
11949 /* Input section has been discarded, either because
11950 it is a copy of a linkonce section or due to
11951 linker script /DISCARD/, so we'll be discarding
11952 the relocs too. */
11953 }
11954 else if (is_vxworks
11955 && strcmp (p->section->output_section->name,
11956 ".tls_vars") == 0)
11957 {
11958 /* Relocations in vxworks .tls_vars sections are
11959 handled specially by the loader. */
11960 }
11961 else if (p->count != 0)
11962 {
11963 srel = elf_section_data (p->section)->sreloc;
11964 srel->size += p->count * RELOC_SIZE (htab);
11965 if ((p->section->output_section->flags & SEC_READONLY) != 0)
11966 info->flags |= DF_TEXTREL;
11967 }
11968 }
11969 }
11970
11971 local_got = elf_local_got_refcounts (ibfd);
11972 if (!local_got)
11973 continue;
11974
11975 symtab_hdr = & elf_symtab_hdr (ibfd);
11976 locsymcount = symtab_hdr->sh_info;
11977 end_local_got = local_got + locsymcount;
11978 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
11979 s = htab->sgot;
11980 srel = htab->srelgot;
11981 for (; local_got < end_local_got; ++local_got, ++local_tls_type)
11982 {
11983 if (*local_got > 0)
11984 {
11985 *local_got = s->size;
11986 if (*local_tls_type & GOT_TLS_GD)
11987 /* TLS_GD relocs need an 8-byte structure in the GOT. */
11988 s->size += 8;
11989 if (*local_tls_type & GOT_TLS_IE)
11990 s->size += 4;
11991 if (*local_tls_type == GOT_NORMAL)
11992 s->size += 4;
11993
11994 if (info->shared || *local_tls_type == GOT_TLS_GD)
11995 srel->size += RELOC_SIZE (htab);
11996 }
11997 else
11998 *local_got = (bfd_vma) -1;
11999 }
12000 }
12001
12002 if (htab->tls_ldm_got.refcount > 0)
12003 {
12004 /* Allocate two GOT entries and one dynamic relocation (if necessary)
12005 for R_ARM_TLS_LDM32 relocations. */
12006 htab->tls_ldm_got.offset = htab->sgot->size;
12007 htab->sgot->size += 8;
12008 if (info->shared)
12009 htab->srelgot->size += RELOC_SIZE (htab);
12010 }
12011 else
12012 htab->tls_ldm_got.offset = -1;
12013
12014 /* Allocate global sym .plt and .got entries, and space for global
12015 sym dynamic relocs. */
12016 elf_link_hash_traverse (& htab->root, allocate_dynrelocs, info);
12017
12018 /* Here we rummage through the found bfds to collect glue information. */
12019 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
12020 {
12021 if (! is_arm_elf (ibfd))
12022 continue;
12023
12024 /* Initialise mapping tables for code/data. */
12025 bfd_elf32_arm_init_maps (ibfd);
12026
12027 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
12028 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info))
12029 /* xgettext:c-format */
12030 _bfd_error_handler (_("Errors encountered processing file %s"),
12031 ibfd->filename);
12032 }
12033
12034 /* Allocate space for the glue sections now that we've sized them. */
12035 bfd_elf32_arm_allocate_interworking_sections (info);
12036
12037 /* The check_relocs and adjust_dynamic_symbol entry points have
12038 determined the sizes of the various dynamic sections. Allocate
12039 memory for them. */
12040 plt = FALSE;
12041 relocs = FALSE;
12042 for (s = dynobj->sections; s != NULL; s = s->next)
12043 {
12044 const char * name;
12045
12046 if ((s->flags & SEC_LINKER_CREATED) == 0)
12047 continue;
12048
12049 /* It's OK to base decisions on the section name, because none
12050 of the dynobj section names depend upon the input files. */
12051 name = bfd_get_section_name (dynobj, s);
12052
12053 if (strcmp (name, ".plt") == 0)
12054 {
12055 /* Remember whether there is a PLT. */
12056 plt = s->size != 0;
12057 }
12058 else if (CONST_STRNEQ (name, ".rel"))
12059 {
12060 if (s->size != 0)
12061 {
12062 /* Remember whether there are any reloc sections other
12063 than .rel(a).plt and .rela.plt.unloaded. */
12064 if (s != htab->srelplt && s != htab->srelplt2)
12065 relocs = TRUE;
12066
12067 /* We use the reloc_count field as a counter if we need
12068 to copy relocs into the output file. */
12069 s->reloc_count = 0;
12070 }
12071 }
12072 else if (! CONST_STRNEQ (name, ".got")
12073 && strcmp (name, ".dynbss") != 0)
12074 {
12075 /* It's not one of our sections, so don't allocate space. */
12076 continue;
12077 }
12078
12079 if (s->size == 0)
12080 {
12081 /* If we don't need this section, strip it from the
12082 output file. This is mostly to handle .rel(a).bss and
12083 .rel(a).plt. We must create both sections in
12084 create_dynamic_sections, because they must be created
12085 before the linker maps input sections to output
12086 sections. The linker does that before
12087 adjust_dynamic_symbol is called, and it is that
12088 function which decides whether anything needs to go
12089 into these sections. */
12090 s->flags |= SEC_EXCLUDE;
12091 continue;
12092 }
12093
12094 if ((s->flags & SEC_HAS_CONTENTS) == 0)
12095 continue;
12096
12097 /* Allocate memory for the section contents. */
12098 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
12099 if (s->contents == NULL)
12100 return FALSE;
12101 }
12102
12103 if (elf_hash_table (info)->dynamic_sections_created)
12104 {
12105 /* Add some entries to the .dynamic section. We fill in the
12106 values later, in elf32_arm_finish_dynamic_sections, but we
12107 must add the entries now so that we get the correct size for
12108 the .dynamic section. The DT_DEBUG entry is filled in by the
12109 dynamic linker and used by the debugger. */
12110 #define add_dynamic_entry(TAG, VAL) \
12111 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
12112
12113 if (info->executable)
12114 {
12115 if (!add_dynamic_entry (DT_DEBUG, 0))
12116 return FALSE;
12117 }
12118
12119 if (plt)
12120 {
12121 if ( !add_dynamic_entry (DT_PLTGOT, 0)
12122 || !add_dynamic_entry (DT_PLTRELSZ, 0)
12123 || !add_dynamic_entry (DT_PLTREL,
12124 htab->use_rel ? DT_REL : DT_RELA)
12125 || !add_dynamic_entry (DT_JMPREL, 0))
12126 return FALSE;
12127 }
12128
12129 if (relocs)
12130 {
12131 if (htab->use_rel)
12132 {
12133 if (!add_dynamic_entry (DT_REL, 0)
12134 || !add_dynamic_entry (DT_RELSZ, 0)
12135 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
12136 return FALSE;
12137 }
12138 else
12139 {
12140 if (!add_dynamic_entry (DT_RELA, 0)
12141 || !add_dynamic_entry (DT_RELASZ, 0)
12142 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
12143 return FALSE;
12144 }
12145 }
12146
12147 /* If any dynamic relocs apply to a read-only section,
12148 then we need a DT_TEXTREL entry. */
12149 if ((info->flags & DF_TEXTREL) == 0)
12150 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
12151 info);
12152
12153 if ((info->flags & DF_TEXTREL) != 0)
12154 {
12155 if (!add_dynamic_entry (DT_TEXTREL, 0))
12156 return FALSE;
12157 }
12158 if (htab->vxworks_p
12159 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
12160 return FALSE;
12161 }
12162 #undef add_dynamic_entry
12163
12164 return TRUE;
12165 }
12166
12167 /* Finish up dynamic symbol handling. We set the contents of various
12168 dynamic sections here. */
12169
12170 static bfd_boolean
12171 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
12172 struct bfd_link_info * info,
12173 struct elf_link_hash_entry * h,
12174 Elf_Internal_Sym * sym)
12175 {
12176 bfd * dynobj;
12177 struct elf32_arm_link_hash_table *htab;
12178 struct elf32_arm_link_hash_entry *eh;
12179
12180 dynobj = elf_hash_table (info)->dynobj;
12181 htab = elf32_arm_hash_table (info);
12182 if (htab == NULL)
12183 return FALSE;
12184
12185 eh = (struct elf32_arm_link_hash_entry *) h;
12186
12187 if (h->plt.offset != (bfd_vma) -1)
12188 {
12189 asection * splt;
12190 asection * srel;
12191 bfd_byte *loc;
12192 bfd_vma plt_index;
12193 Elf_Internal_Rela rel;
12194
12195 /* This symbol has an entry in the procedure linkage table. Set
12196 it up. */
12197
12198 BFD_ASSERT (h->dynindx != -1);
12199
12200 splt = bfd_get_section_by_name (dynobj, ".plt");
12201 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".plt"));
12202 BFD_ASSERT (splt != NULL && srel != NULL);
12203
12204 /* Fill in the entry in the procedure linkage table. */
12205 if (htab->symbian_p)
12206 {
12207 put_arm_insn (htab, output_bfd,
12208 elf32_arm_symbian_plt_entry[0],
12209 splt->contents + h->plt.offset);
12210 bfd_put_32 (output_bfd,
12211 elf32_arm_symbian_plt_entry[1],
12212 splt->contents + h->plt.offset + 4);
12213
12214 /* Fill in the entry in the .rel.plt section. */
12215 rel.r_offset = (splt->output_section->vma
12216 + splt->output_offset
12217 + h->plt.offset + 4);
12218 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
12219
12220 /* Get the index in the procedure linkage table which
12221 corresponds to this symbol. This is the index of this symbol
12222 in all the symbols for which we are making plt entries. The
12223 first entry in the procedure linkage table is reserved. */
12224 plt_index = ((h->plt.offset - htab->plt_header_size)
12225 / htab->plt_entry_size);
12226 }
12227 else
12228 {
12229 bfd_vma got_offset, got_address, plt_address;
12230 bfd_vma got_displacement;
12231 asection * sgot;
12232 bfd_byte * ptr;
12233
12234 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
12235 BFD_ASSERT (sgot != NULL);
12236
12237 /* Get the offset into the .got.plt table of the entry that
12238 corresponds to this function. */
12239 got_offset = eh->plt_got_offset;
12240
12241 /* Get the index in the procedure linkage table which
12242 corresponds to this symbol. This is the index of this symbol
12243 in all the symbols for which we are making plt entries. The
12244 first three entries in .got.plt are reserved; after that
12245 symbols appear in the same order as in .plt. */
12246 plt_index = (got_offset - 12) / 4;
12247
12248 /* Calculate the address of the GOT entry. */
12249 got_address = (sgot->output_section->vma
12250 + sgot->output_offset
12251 + got_offset);
12252
12253 /* ...and the address of the PLT entry. */
12254 plt_address = (splt->output_section->vma
12255 + splt->output_offset
12256 + h->plt.offset);
12257
12258 ptr = htab->splt->contents + h->plt.offset;
12259 if (htab->vxworks_p && info->shared)
12260 {
12261 unsigned int i;
12262 bfd_vma val;
12263
12264 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
12265 {
12266 val = elf32_arm_vxworks_shared_plt_entry[i];
12267 if (i == 2)
12268 val |= got_address - sgot->output_section->vma;
12269 if (i == 5)
12270 val |= plt_index * RELOC_SIZE (htab);
12271 if (i == 2 || i == 5)
12272 bfd_put_32 (output_bfd, val, ptr);
12273 else
12274 put_arm_insn (htab, output_bfd, val, ptr);
12275 }
12276 }
12277 else if (htab->vxworks_p)
12278 {
12279 unsigned int i;
12280 bfd_vma val;
12281
12282 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
12283 {
12284 val = elf32_arm_vxworks_exec_plt_entry[i];
12285 if (i == 2)
12286 val |= got_address;
12287 if (i == 4)
12288 val |= 0xffffff & -((h->plt.offset + i * 4 + 8) >> 2);
12289 if (i == 5)
12290 val |= plt_index * RELOC_SIZE (htab);
12291 if (i == 2 || i == 5)
12292 bfd_put_32 (output_bfd, val, ptr);
12293 else
12294 put_arm_insn (htab, output_bfd, val, ptr);
12295 }
12296
12297 loc = (htab->srelplt2->contents
12298 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
12299
12300 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
12301 referencing the GOT for this PLT entry. */
12302 rel.r_offset = plt_address + 8;
12303 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12304 rel.r_addend = got_offset;
12305 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12306 loc += RELOC_SIZE (htab);
12307
12308 /* Create the R_ARM_ABS32 relocation referencing the
12309 beginning of the PLT for this GOT entry. */
12310 rel.r_offset = got_address;
12311 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12312 rel.r_addend = 0;
12313 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12314 }
12315 else
12316 {
12317 bfd_signed_vma thumb_refs;
12318 /* Calculate the displacement between the PLT slot and the
12319 entry in the GOT. The eight-byte offset accounts for the
12320 value produced by adding to pc in the first instruction
12321 of the PLT stub. */
12322 got_displacement = got_address - (plt_address + 8);
12323
12324 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
12325
12326 thumb_refs = eh->plt_thumb_refcount;
12327 if (!htab->use_blx)
12328 thumb_refs += eh->plt_maybe_thumb_refcount;
12329
12330 if (thumb_refs > 0)
12331 {
12332 put_thumb_insn (htab, output_bfd,
12333 elf32_arm_plt_thumb_stub[0], ptr - 4);
12334 put_thumb_insn (htab, output_bfd,
12335 elf32_arm_plt_thumb_stub[1], ptr - 2);
12336 }
12337
12338 put_arm_insn (htab, output_bfd,
12339 elf32_arm_plt_entry[0]
12340 | ((got_displacement & 0x0ff00000) >> 20),
12341 ptr + 0);
12342 put_arm_insn (htab, output_bfd,
12343 elf32_arm_plt_entry[1]
12344 | ((got_displacement & 0x000ff000) >> 12),
12345 ptr+ 4);
12346 put_arm_insn (htab, output_bfd,
12347 elf32_arm_plt_entry[2]
12348 | (got_displacement & 0x00000fff),
12349 ptr + 8);
12350 #ifdef FOUR_WORD_PLT
12351 bfd_put_32 (output_bfd, elf32_arm_plt_entry[3], ptr + 12);
12352 #endif
12353 }
12354
12355 /* Fill in the entry in the global offset table. */
12356 bfd_put_32 (output_bfd,
12357 (splt->output_section->vma
12358 + splt->output_offset),
12359 sgot->contents + got_offset);
12360
12361 /* Fill in the entry in the .rel(a).plt section. */
12362 rel.r_addend = 0;
12363 rel.r_offset = got_address;
12364 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_JUMP_SLOT);
12365 }
12366
12367 loc = srel->contents + plt_index * RELOC_SIZE (htab);
12368 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12369
12370 if (!h->def_regular)
12371 {
12372 /* Mark the symbol as undefined, rather than as defined in
12373 the .plt section. Leave the value alone. */
12374 sym->st_shndx = SHN_UNDEF;
12375 /* If the symbol is weak, we do need to clear the value.
12376 Otherwise, the PLT entry would provide a definition for
12377 the symbol even if the symbol wasn't defined anywhere,
12378 and so the symbol would never be NULL. */
12379 if (!h->ref_regular_nonweak)
12380 sym->st_value = 0;
12381 }
12382 }
12383
12384 if (h->got.offset != (bfd_vma) -1
12385 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_GD) == 0
12386 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_IE) == 0)
12387 {
12388 asection * sgot;
12389 asection * srel;
12390 Elf_Internal_Rela rel;
12391 bfd_byte *loc;
12392 bfd_vma offset;
12393
12394 /* This symbol has an entry in the global offset table. Set it
12395 up. */
12396 sgot = bfd_get_section_by_name (dynobj, ".got");
12397 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".got"));
12398 BFD_ASSERT (sgot != NULL && srel != NULL);
12399
12400 offset = (h->got.offset & ~(bfd_vma) 1);
12401 rel.r_addend = 0;
12402 rel.r_offset = (sgot->output_section->vma
12403 + sgot->output_offset
12404 + offset);
12405
12406 /* If this is a static link, or it is a -Bsymbolic link and the
12407 symbol is defined locally or was forced to be local because
12408 of a version file, we just want to emit a RELATIVE reloc.
12409 The entry in the global offset table will already have been
12410 initialized in the relocate_section function. */
12411 if (info->shared
12412 && SYMBOL_REFERENCES_LOCAL (info, h))
12413 {
12414 BFD_ASSERT ((h->got.offset & 1) != 0);
12415 rel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12416 if (!htab->use_rel)
12417 {
12418 rel.r_addend = bfd_get_32 (output_bfd, sgot->contents + offset);
12419 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12420 }
12421 }
12422 else
12423 {
12424 BFD_ASSERT ((h->got.offset & 1) == 0);
12425 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12426 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
12427 }
12428
12429 loc = srel->contents + srel->reloc_count++ * RELOC_SIZE (htab);
12430 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12431 }
12432
12433 if (h->needs_copy)
12434 {
12435 asection * s;
12436 Elf_Internal_Rela rel;
12437 bfd_byte *loc;
12438
12439 /* This symbol needs a copy reloc. Set it up. */
12440 BFD_ASSERT (h->dynindx != -1
12441 && (h->root.type == bfd_link_hash_defined
12442 || h->root.type == bfd_link_hash_defweak));
12443
12444 s = bfd_get_section_by_name (h->root.u.def.section->owner,
12445 RELOC_SECTION (htab, ".bss"));
12446 BFD_ASSERT (s != NULL);
12447
12448 rel.r_addend = 0;
12449 rel.r_offset = (h->root.u.def.value
12450 + h->root.u.def.section->output_section->vma
12451 + h->root.u.def.section->output_offset);
12452 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
12453 loc = s->contents + s->reloc_count++ * RELOC_SIZE (htab);
12454 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12455 }
12456
12457 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
12458 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
12459 to the ".got" section. */
12460 if (strcmp (h->root.root.string, "_DYNAMIC") == 0
12461 || (!htab->vxworks_p && h == htab->root.hgot))
12462 sym->st_shndx = SHN_ABS;
12463
12464 return TRUE;
12465 }
12466
12467 /* Finish up the dynamic sections. */
12468
12469 static bfd_boolean
12470 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
12471 {
12472 bfd * dynobj;
12473 asection * sgot;
12474 asection * sdyn;
12475 struct elf32_arm_link_hash_table *htab;
12476
12477 htab = elf32_arm_hash_table (info);
12478 if (htab == NULL)
12479 return FALSE;
12480
12481 dynobj = elf_hash_table (info)->dynobj;
12482
12483 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
12484 BFD_ASSERT (htab->symbian_p || sgot != NULL);
12485 sdyn = bfd_get_section_by_name (dynobj, ".dynamic");
12486
12487 if (elf_hash_table (info)->dynamic_sections_created)
12488 {
12489 asection *splt;
12490 Elf32_External_Dyn *dyncon, *dynconend;
12491
12492 splt = bfd_get_section_by_name (dynobj, ".plt");
12493 BFD_ASSERT (splt != NULL && sdyn != NULL);
12494
12495 dyncon = (Elf32_External_Dyn *) sdyn->contents;
12496 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
12497
12498 for (; dyncon < dynconend; dyncon++)
12499 {
12500 Elf_Internal_Dyn dyn;
12501 const char * name;
12502 asection * s;
12503
12504 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
12505
12506 switch (dyn.d_tag)
12507 {
12508 unsigned int type;
12509
12510 default:
12511 if (htab->vxworks_p
12512 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
12513 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12514 break;
12515
12516 case DT_HASH:
12517 name = ".hash";
12518 goto get_vma_if_bpabi;
12519 case DT_STRTAB:
12520 name = ".dynstr";
12521 goto get_vma_if_bpabi;
12522 case DT_SYMTAB:
12523 name = ".dynsym";
12524 goto get_vma_if_bpabi;
12525 case DT_VERSYM:
12526 name = ".gnu.version";
12527 goto get_vma_if_bpabi;
12528 case DT_VERDEF:
12529 name = ".gnu.version_d";
12530 goto get_vma_if_bpabi;
12531 case DT_VERNEED:
12532 name = ".gnu.version_r";
12533 goto get_vma_if_bpabi;
12534
12535 case DT_PLTGOT:
12536 name = ".got";
12537 goto get_vma;
12538 case DT_JMPREL:
12539 name = RELOC_SECTION (htab, ".plt");
12540 get_vma:
12541 s = bfd_get_section_by_name (output_bfd, name);
12542 BFD_ASSERT (s != NULL);
12543 if (!htab->symbian_p)
12544 dyn.d_un.d_ptr = s->vma;
12545 else
12546 /* In the BPABI, tags in the PT_DYNAMIC section point
12547 at the file offset, not the memory address, for the
12548 convenience of the post linker. */
12549 dyn.d_un.d_ptr = s->filepos;
12550 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12551 break;
12552
12553 get_vma_if_bpabi:
12554 if (htab->symbian_p)
12555 goto get_vma;
12556 break;
12557
12558 case DT_PLTRELSZ:
12559 s = bfd_get_section_by_name (output_bfd,
12560 RELOC_SECTION (htab, ".plt"));
12561 BFD_ASSERT (s != NULL);
12562 dyn.d_un.d_val = s->size;
12563 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12564 break;
12565
12566 case DT_RELSZ:
12567 case DT_RELASZ:
12568 if (!htab->symbian_p)
12569 {
12570 /* My reading of the SVR4 ABI indicates that the
12571 procedure linkage table relocs (DT_JMPREL) should be
12572 included in the overall relocs (DT_REL). This is
12573 what Solaris does. However, UnixWare can not handle
12574 that case. Therefore, we override the DT_RELSZ entry
12575 here to make it not include the JMPREL relocs. Since
12576 the linker script arranges for .rel(a).plt to follow all
12577 other relocation sections, we don't have to worry
12578 about changing the DT_REL entry. */
12579 s = bfd_get_section_by_name (output_bfd,
12580 RELOC_SECTION (htab, ".plt"));
12581 if (s != NULL)
12582 dyn.d_un.d_val -= s->size;
12583 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12584 break;
12585 }
12586 /* Fall through. */
12587
12588 case DT_REL:
12589 case DT_RELA:
12590 /* In the BPABI, the DT_REL tag must point at the file
12591 offset, not the VMA, of the first relocation
12592 section. So, we use code similar to that in
12593 elflink.c, but do not check for SHF_ALLOC on the
12594 relcoation section, since relocations sections are
12595 never allocated under the BPABI. The comments above
12596 about Unixware notwithstanding, we include all of the
12597 relocations here. */
12598 if (htab->symbian_p)
12599 {
12600 unsigned int i;
12601 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
12602 ? SHT_REL : SHT_RELA);
12603 dyn.d_un.d_val = 0;
12604 for (i = 1; i < elf_numsections (output_bfd); i++)
12605 {
12606 Elf_Internal_Shdr *hdr
12607 = elf_elfsections (output_bfd)[i];
12608 if (hdr->sh_type == type)
12609 {
12610 if (dyn.d_tag == DT_RELSZ
12611 || dyn.d_tag == DT_RELASZ)
12612 dyn.d_un.d_val += hdr->sh_size;
12613 else if ((ufile_ptr) hdr->sh_offset
12614 <= dyn.d_un.d_val - 1)
12615 dyn.d_un.d_val = hdr->sh_offset;
12616 }
12617 }
12618 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12619 }
12620 break;
12621
12622 /* Set the bottom bit of DT_INIT/FINI if the
12623 corresponding function is Thumb. */
12624 case DT_INIT:
12625 name = info->init_function;
12626 goto get_sym;
12627 case DT_FINI:
12628 name = info->fini_function;
12629 get_sym:
12630 /* If it wasn't set by elf_bfd_final_link
12631 then there is nothing to adjust. */
12632 if (dyn.d_un.d_val != 0)
12633 {
12634 struct elf_link_hash_entry * eh;
12635
12636 eh = elf_link_hash_lookup (elf_hash_table (info), name,
12637 FALSE, FALSE, TRUE);
12638 if (eh != NULL
12639 && ELF_ST_TYPE (eh->type) == STT_ARM_TFUNC)
12640 {
12641 dyn.d_un.d_val |= 1;
12642 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12643 }
12644 }
12645 break;
12646 }
12647 }
12648
12649 /* Fill in the first entry in the procedure linkage table. */
12650 if (splt->size > 0 && htab->plt_header_size)
12651 {
12652 const bfd_vma *plt0_entry;
12653 bfd_vma got_address, plt_address, got_displacement;
12654
12655 /* Calculate the addresses of the GOT and PLT. */
12656 got_address = sgot->output_section->vma + sgot->output_offset;
12657 plt_address = splt->output_section->vma + splt->output_offset;
12658
12659 if (htab->vxworks_p)
12660 {
12661 /* The VxWorks GOT is relocated by the dynamic linker.
12662 Therefore, we must emit relocations rather than simply
12663 computing the values now. */
12664 Elf_Internal_Rela rel;
12665
12666 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
12667 put_arm_insn (htab, output_bfd, plt0_entry[0],
12668 splt->contents + 0);
12669 put_arm_insn (htab, output_bfd, plt0_entry[1],
12670 splt->contents + 4);
12671 put_arm_insn (htab, output_bfd, plt0_entry[2],
12672 splt->contents + 8);
12673 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
12674
12675 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
12676 rel.r_offset = plt_address + 12;
12677 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12678 rel.r_addend = 0;
12679 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
12680 htab->srelplt2->contents);
12681 }
12682 else
12683 {
12684 got_displacement = got_address - (plt_address + 16);
12685
12686 plt0_entry = elf32_arm_plt0_entry;
12687 put_arm_insn (htab, output_bfd, plt0_entry[0],
12688 splt->contents + 0);
12689 put_arm_insn (htab, output_bfd, plt0_entry[1],
12690 splt->contents + 4);
12691 put_arm_insn (htab, output_bfd, plt0_entry[2],
12692 splt->contents + 8);
12693 put_arm_insn (htab, output_bfd, plt0_entry[3],
12694 splt->contents + 12);
12695
12696 #ifdef FOUR_WORD_PLT
12697 /* The displacement value goes in the otherwise-unused
12698 last word of the second entry. */
12699 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
12700 #else
12701 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
12702 #endif
12703 }
12704 }
12705
12706 /* UnixWare sets the entsize of .plt to 4, although that doesn't
12707 really seem like the right value. */
12708 if (splt->output_section->owner == output_bfd)
12709 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
12710
12711 if (htab->vxworks_p && !info->shared && htab->splt->size > 0)
12712 {
12713 /* Correct the .rel(a).plt.unloaded relocations. They will have
12714 incorrect symbol indexes. */
12715 int num_plts;
12716 unsigned char *p;
12717
12718 num_plts = ((htab->splt->size - htab->plt_header_size)
12719 / htab->plt_entry_size);
12720 p = htab->srelplt2->contents + RELOC_SIZE (htab);
12721
12722 for (; num_plts; num_plts--)
12723 {
12724 Elf_Internal_Rela rel;
12725
12726 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12727 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12728 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12729 p += RELOC_SIZE (htab);
12730
12731 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12732 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12733 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12734 p += RELOC_SIZE (htab);
12735 }
12736 }
12737 }
12738
12739 /* Fill in the first three entries in the global offset table. */
12740 if (sgot)
12741 {
12742 if (sgot->size > 0)
12743 {
12744 if (sdyn == NULL)
12745 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
12746 else
12747 bfd_put_32 (output_bfd,
12748 sdyn->output_section->vma + sdyn->output_offset,
12749 sgot->contents);
12750 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
12751 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
12752 }
12753
12754 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
12755 }
12756
12757 return TRUE;
12758 }
12759
12760 static void
12761 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
12762 {
12763 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
12764 struct elf32_arm_link_hash_table *globals;
12765
12766 i_ehdrp = elf_elfheader (abfd);
12767
12768 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
12769 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
12770 else
12771 i_ehdrp->e_ident[EI_OSABI] = 0;
12772 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
12773
12774 if (link_info)
12775 {
12776 globals = elf32_arm_hash_table (link_info);
12777 if (globals != NULL && globals->byteswap_code)
12778 i_ehdrp->e_flags |= EF_ARM_BE8;
12779 }
12780 }
12781
12782 static enum elf_reloc_type_class
12783 elf32_arm_reloc_type_class (const Elf_Internal_Rela *rela)
12784 {
12785 switch ((int) ELF32_R_TYPE (rela->r_info))
12786 {
12787 case R_ARM_RELATIVE:
12788 return reloc_class_relative;
12789 case R_ARM_JUMP_SLOT:
12790 return reloc_class_plt;
12791 case R_ARM_COPY:
12792 return reloc_class_copy;
12793 default:
12794 return reloc_class_normal;
12795 }
12796 }
12797
12798 /* Set the right machine number for an Arm ELF file. */
12799
12800 static bfd_boolean
12801 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr *hdr)
12802 {
12803 if (hdr->sh_type == SHT_NOTE)
12804 *flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_SAME_CONTENTS;
12805
12806 return TRUE;
12807 }
12808
12809 static void
12810 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
12811 {
12812 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
12813 }
12814
12815 /* Return TRUE if this is an unwinding table entry. */
12816
12817 static bfd_boolean
12818 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
12819 {
12820 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
12821 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
12822 }
12823
12824
12825 /* Set the type and flags for an ARM section. We do this by
12826 the section name, which is a hack, but ought to work. */
12827
12828 static bfd_boolean
12829 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
12830 {
12831 const char * name;
12832
12833 name = bfd_get_section_name (abfd, sec);
12834
12835 if (is_arm_elf_unwind_section_name (abfd, name))
12836 {
12837 hdr->sh_type = SHT_ARM_EXIDX;
12838 hdr->sh_flags |= SHF_LINK_ORDER;
12839 }
12840 return TRUE;
12841 }
12842
12843 /* Handle an ARM specific section when reading an object file. This is
12844 called when bfd_section_from_shdr finds a section with an unknown
12845 type. */
12846
12847 static bfd_boolean
12848 elf32_arm_section_from_shdr (bfd *abfd,
12849 Elf_Internal_Shdr * hdr,
12850 const char *name,
12851 int shindex)
12852 {
12853 /* There ought to be a place to keep ELF backend specific flags, but
12854 at the moment there isn't one. We just keep track of the
12855 sections by their name, instead. Fortunately, the ABI gives
12856 names for all the ARM specific sections, so we will probably get
12857 away with this. */
12858 switch (hdr->sh_type)
12859 {
12860 case SHT_ARM_EXIDX:
12861 case SHT_ARM_PREEMPTMAP:
12862 case SHT_ARM_ATTRIBUTES:
12863 break;
12864
12865 default:
12866 return FALSE;
12867 }
12868
12869 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
12870 return FALSE;
12871
12872 return TRUE;
12873 }
12874
12875 static _arm_elf_section_data *
12876 get_arm_elf_section_data (asection * sec)
12877 {
12878 if (sec && sec->owner && is_arm_elf (sec->owner))
12879 return elf32_arm_section_data (sec);
12880 else
12881 return NULL;
12882 }
12883
12884 typedef struct
12885 {
12886 void *finfo;
12887 struct bfd_link_info *info;
12888 asection *sec;
12889 int sec_shndx;
12890 int (*func) (void *, const char *, Elf_Internal_Sym *,
12891 asection *, struct elf_link_hash_entry *);
12892 } output_arch_syminfo;
12893
12894 enum map_symbol_type
12895 {
12896 ARM_MAP_ARM,
12897 ARM_MAP_THUMB,
12898 ARM_MAP_DATA
12899 };
12900
12901
12902 /* Output a single mapping symbol. */
12903
12904 static bfd_boolean
12905 elf32_arm_output_map_sym (output_arch_syminfo *osi,
12906 enum map_symbol_type type,
12907 bfd_vma offset)
12908 {
12909 static const char *names[3] = {"$a", "$t", "$d"};
12910 Elf_Internal_Sym sym;
12911
12912 sym.st_value = osi->sec->output_section->vma
12913 + osi->sec->output_offset
12914 + offset;
12915 sym.st_size = 0;
12916 sym.st_other = 0;
12917 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
12918 sym.st_shndx = osi->sec_shndx;
12919 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
12920 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
12921 }
12922
12923
12924 /* Output mapping symbols for PLT entries associated with H. */
12925
12926 static bfd_boolean
12927 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
12928 {
12929 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
12930 struct elf32_arm_link_hash_table *htab;
12931 struct elf32_arm_link_hash_entry *eh;
12932 bfd_vma addr;
12933
12934 if (h->root.type == bfd_link_hash_indirect)
12935 return TRUE;
12936
12937 if (h->root.type == bfd_link_hash_warning)
12938 /* When warning symbols are created, they **replace** the "real"
12939 entry in the hash table, thus we never get to see the real
12940 symbol in a hash traversal. So look at it now. */
12941 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12942
12943 if (h->plt.offset == (bfd_vma) -1)
12944 return TRUE;
12945
12946 htab = elf32_arm_hash_table (osi->info);
12947 if (htab == NULL)
12948 return FALSE;
12949
12950 eh = (struct elf32_arm_link_hash_entry *) h;
12951 addr = h->plt.offset;
12952 if (htab->symbian_p)
12953 {
12954 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12955 return FALSE;
12956 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
12957 return FALSE;
12958 }
12959 else if (htab->vxworks_p)
12960 {
12961 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12962 return FALSE;
12963 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
12964 return FALSE;
12965 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
12966 return FALSE;
12967 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
12968 return FALSE;
12969 }
12970 else
12971 {
12972 bfd_signed_vma thumb_refs;
12973
12974 thumb_refs = eh->plt_thumb_refcount;
12975 if (!htab->use_blx)
12976 thumb_refs += eh->plt_maybe_thumb_refcount;
12977
12978 if (thumb_refs > 0)
12979 {
12980 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
12981 return FALSE;
12982 }
12983 #ifdef FOUR_WORD_PLT
12984 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12985 return FALSE;
12986 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
12987 return FALSE;
12988 #else
12989 /* A three-word PLT with no Thumb thunk contains only Arm code,
12990 so only need to output a mapping symbol for the first PLT entry and
12991 entries with thumb thunks. */
12992 if (thumb_refs > 0 || addr == 20)
12993 {
12994 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12995 return FALSE;
12996 }
12997 #endif
12998 }
12999
13000 return TRUE;
13001 }
13002
13003 /* Output a single local symbol for a generated stub. */
13004
13005 static bfd_boolean
13006 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
13007 bfd_vma offset, bfd_vma size)
13008 {
13009 Elf_Internal_Sym sym;
13010
13011 sym.st_value = osi->sec->output_section->vma
13012 + osi->sec->output_offset
13013 + offset;
13014 sym.st_size = size;
13015 sym.st_other = 0;
13016 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
13017 sym.st_shndx = osi->sec_shndx;
13018 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
13019 }
13020
13021 static bfd_boolean
13022 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
13023 void * in_arg)
13024 {
13025 struct elf32_arm_stub_hash_entry *stub_entry;
13026 asection *stub_sec;
13027 bfd_vma addr;
13028 char *stub_name;
13029 output_arch_syminfo *osi;
13030 const insn_sequence *template_sequence;
13031 enum stub_insn_type prev_type;
13032 int size;
13033 int i;
13034 enum map_symbol_type sym_type;
13035
13036 /* Massage our args to the form they really have. */
13037 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
13038 osi = (output_arch_syminfo *) in_arg;
13039
13040 stub_sec = stub_entry->stub_sec;
13041
13042 /* Ensure this stub is attached to the current section being
13043 processed. */
13044 if (stub_sec != osi->sec)
13045 return TRUE;
13046
13047 addr = (bfd_vma) stub_entry->stub_offset;
13048 stub_name = stub_entry->output_name;
13049
13050 template_sequence = stub_entry->stub_template;
13051 switch (template_sequence[0].type)
13052 {
13053 case ARM_TYPE:
13054 if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size))
13055 return FALSE;
13056 break;
13057 case THUMB16_TYPE:
13058 case THUMB32_TYPE:
13059 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
13060 stub_entry->stub_size))
13061 return FALSE;
13062 break;
13063 default:
13064 BFD_FAIL ();
13065 return 0;
13066 }
13067
13068 prev_type = DATA_TYPE;
13069 size = 0;
13070 for (i = 0; i < stub_entry->stub_template_size; i++)
13071 {
13072 switch (template_sequence[i].type)
13073 {
13074 case ARM_TYPE:
13075 sym_type = ARM_MAP_ARM;
13076 break;
13077
13078 case THUMB16_TYPE:
13079 case THUMB32_TYPE:
13080 sym_type = ARM_MAP_THUMB;
13081 break;
13082
13083 case DATA_TYPE:
13084 sym_type = ARM_MAP_DATA;
13085 break;
13086
13087 default:
13088 BFD_FAIL ();
13089 return FALSE;
13090 }
13091
13092 if (template_sequence[i].type != prev_type)
13093 {
13094 prev_type = template_sequence[i].type;
13095 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
13096 return FALSE;
13097 }
13098
13099 switch (template_sequence[i].type)
13100 {
13101 case ARM_TYPE:
13102 case THUMB32_TYPE:
13103 size += 4;
13104 break;
13105
13106 case THUMB16_TYPE:
13107 size += 2;
13108 break;
13109
13110 case DATA_TYPE:
13111 size += 4;
13112 break;
13113
13114 default:
13115 BFD_FAIL ();
13116 return FALSE;
13117 }
13118 }
13119
13120 return TRUE;
13121 }
13122
13123 /* Output mapping symbols for linker generated sections,
13124 and for those data-only sections that do not have a
13125 $d. */
13126
13127 static bfd_boolean
13128 elf32_arm_output_arch_local_syms (bfd *output_bfd,
13129 struct bfd_link_info *info,
13130 void *finfo,
13131 int (*func) (void *, const char *,
13132 Elf_Internal_Sym *,
13133 asection *,
13134 struct elf_link_hash_entry *))
13135 {
13136 output_arch_syminfo osi;
13137 struct elf32_arm_link_hash_table *htab;
13138 bfd_vma offset;
13139 bfd_size_type size;
13140 bfd *input_bfd;
13141
13142 htab = elf32_arm_hash_table (info);
13143 if (htab == NULL)
13144 return FALSE;
13145
13146 check_use_blx (htab);
13147
13148 osi.finfo = finfo;
13149 osi.info = info;
13150 osi.func = func;
13151
13152 /* Add a $d mapping symbol to data-only sections that
13153 don't have any mapping symbol. This may result in (harmless) redundant
13154 mapping symbols. */
13155 for (input_bfd = info->input_bfds;
13156 input_bfd != NULL;
13157 input_bfd = input_bfd->link_next)
13158 {
13159 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
13160 for (osi.sec = input_bfd->sections;
13161 osi.sec != NULL;
13162 osi.sec = osi.sec->next)
13163 {
13164 if (osi.sec->output_section != NULL
13165 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
13166 != 0)
13167 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
13168 == SEC_HAS_CONTENTS
13169 && get_arm_elf_section_data (osi.sec) != NULL
13170 && get_arm_elf_section_data (osi.sec)->mapcount == 0
13171 && osi.sec->size > 0)
13172 {
13173 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13174 (output_bfd, osi.sec->output_section);
13175 if (osi.sec_shndx != (int)SHN_BAD)
13176 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
13177 }
13178 }
13179 }
13180
13181 /* ARM->Thumb glue. */
13182 if (htab->arm_glue_size > 0)
13183 {
13184 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13185 ARM2THUMB_GLUE_SECTION_NAME);
13186
13187 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13188 (output_bfd, osi.sec->output_section);
13189 if (info->shared || htab->root.is_relocatable_executable
13190 || htab->pic_veneer)
13191 size = ARM2THUMB_PIC_GLUE_SIZE;
13192 else if (htab->use_blx)
13193 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
13194 else
13195 size = ARM2THUMB_STATIC_GLUE_SIZE;
13196
13197 for (offset = 0; offset < htab->arm_glue_size; offset += size)
13198 {
13199 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
13200 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
13201 }
13202 }
13203
13204 /* Thumb->ARM glue. */
13205 if (htab->thumb_glue_size > 0)
13206 {
13207 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13208 THUMB2ARM_GLUE_SECTION_NAME);
13209
13210 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13211 (output_bfd, osi.sec->output_section);
13212 size = THUMB2ARM_GLUE_SIZE;
13213
13214 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
13215 {
13216 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
13217 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
13218 }
13219 }
13220
13221 /* ARMv4 BX veneers. */
13222 if (htab->bx_glue_size > 0)
13223 {
13224 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13225 ARM_BX_GLUE_SECTION_NAME);
13226
13227 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13228 (output_bfd, osi.sec->output_section);
13229
13230 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
13231 }
13232
13233 /* Long calls stubs. */
13234 if (htab->stub_bfd && htab->stub_bfd->sections)
13235 {
13236 asection* stub_sec;
13237
13238 for (stub_sec = htab->stub_bfd->sections;
13239 stub_sec != NULL;
13240 stub_sec = stub_sec->next)
13241 {
13242 /* Ignore non-stub sections. */
13243 if (!strstr (stub_sec->name, STUB_SUFFIX))
13244 continue;
13245
13246 osi.sec = stub_sec;
13247
13248 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13249 (output_bfd, osi.sec->output_section);
13250
13251 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
13252 }
13253 }
13254
13255 /* Finally, output mapping symbols for the PLT. */
13256 if (!htab->splt || htab->splt->size == 0)
13257 return TRUE;
13258
13259 osi.sec_shndx = _bfd_elf_section_from_bfd_section (output_bfd,
13260 htab->splt->output_section);
13261 osi.sec = htab->splt;
13262 /* Output mapping symbols for the plt header. SymbianOS does not have a
13263 plt header. */
13264 if (htab->vxworks_p)
13265 {
13266 /* VxWorks shared libraries have no PLT header. */
13267 if (!info->shared)
13268 {
13269 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13270 return FALSE;
13271 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
13272 return FALSE;
13273 }
13274 }
13275 else if (!htab->symbian_p)
13276 {
13277 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13278 return FALSE;
13279 #ifndef FOUR_WORD_PLT
13280 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
13281 return FALSE;
13282 #endif
13283 }
13284
13285 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, (void *) &osi);
13286 return TRUE;
13287 }
13288
13289 /* Allocate target specific section data. */
13290
13291 static bfd_boolean
13292 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
13293 {
13294 if (!sec->used_by_bfd)
13295 {
13296 _arm_elf_section_data *sdata;
13297 bfd_size_type amt = sizeof (*sdata);
13298
13299 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
13300 if (sdata == NULL)
13301 return FALSE;
13302 sec->used_by_bfd = sdata;
13303 }
13304
13305 return _bfd_elf_new_section_hook (abfd, sec);
13306 }
13307
13308
13309 /* Used to order a list of mapping symbols by address. */
13310
13311 static int
13312 elf32_arm_compare_mapping (const void * a, const void * b)
13313 {
13314 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
13315 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
13316
13317 if (amap->vma > bmap->vma)
13318 return 1;
13319 else if (amap->vma < bmap->vma)
13320 return -1;
13321 else if (amap->type > bmap->type)
13322 /* Ensure results do not depend on the host qsort for objects with
13323 multiple mapping symbols at the same address by sorting on type
13324 after vma. */
13325 return 1;
13326 else if (amap->type < bmap->type)
13327 return -1;
13328 else
13329 return 0;
13330 }
13331
13332 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
13333
13334 static unsigned long
13335 offset_prel31 (unsigned long addr, bfd_vma offset)
13336 {
13337 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
13338 }
13339
13340 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
13341 relocations. */
13342
13343 static void
13344 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
13345 {
13346 unsigned long first_word = bfd_get_32 (output_bfd, from);
13347 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
13348
13349 /* High bit of first word is supposed to be zero. */
13350 if ((first_word & 0x80000000ul) == 0)
13351 first_word = offset_prel31 (first_word, offset);
13352
13353 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
13354 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
13355 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
13356 second_word = offset_prel31 (second_word, offset);
13357
13358 bfd_put_32 (output_bfd, first_word, to);
13359 bfd_put_32 (output_bfd, second_word, to + 4);
13360 }
13361
13362 /* Data for make_branch_to_a8_stub(). */
13363
13364 struct a8_branch_to_stub_data {
13365 asection *writing_section;
13366 bfd_byte *contents;
13367 };
13368
13369
13370 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
13371 places for a particular section. */
13372
13373 static bfd_boolean
13374 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
13375 void *in_arg)
13376 {
13377 struct elf32_arm_stub_hash_entry *stub_entry;
13378 struct a8_branch_to_stub_data *data;
13379 bfd_byte *contents;
13380 unsigned long branch_insn;
13381 bfd_vma veneered_insn_loc, veneer_entry_loc;
13382 bfd_signed_vma branch_offset;
13383 bfd *abfd;
13384 unsigned int target;
13385
13386 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
13387 data = (struct a8_branch_to_stub_data *) in_arg;
13388
13389 if (stub_entry->target_section != data->writing_section
13390 || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
13391 return TRUE;
13392
13393 contents = data->contents;
13394
13395 veneered_insn_loc = stub_entry->target_section->output_section->vma
13396 + stub_entry->target_section->output_offset
13397 + stub_entry->target_value;
13398
13399 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
13400 + stub_entry->stub_sec->output_offset
13401 + stub_entry->stub_offset;
13402
13403 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
13404 veneered_insn_loc &= ~3u;
13405
13406 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
13407
13408 abfd = stub_entry->target_section->owner;
13409 target = stub_entry->target_value;
13410
13411 /* We attempt to avoid this condition by setting stubs_always_after_branch
13412 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
13413 This check is just to be on the safe side... */
13414 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
13415 {
13416 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
13417 "allocated in unsafe location"), abfd);
13418 return FALSE;
13419 }
13420
13421 switch (stub_entry->stub_type)
13422 {
13423 case arm_stub_a8_veneer_b:
13424 case arm_stub_a8_veneer_b_cond:
13425 branch_insn = 0xf0009000;
13426 goto jump24;
13427
13428 case arm_stub_a8_veneer_blx:
13429 branch_insn = 0xf000e800;
13430 goto jump24;
13431
13432 case arm_stub_a8_veneer_bl:
13433 {
13434 unsigned int i1, j1, i2, j2, s;
13435
13436 branch_insn = 0xf000d000;
13437
13438 jump24:
13439 if (branch_offset < -16777216 || branch_offset > 16777214)
13440 {
13441 /* There's not much we can do apart from complain if this
13442 happens. */
13443 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
13444 "of range (input file too large)"), abfd);
13445 return FALSE;
13446 }
13447
13448 /* i1 = not(j1 eor s), so:
13449 not i1 = j1 eor s
13450 j1 = (not i1) eor s. */
13451
13452 branch_insn |= (branch_offset >> 1) & 0x7ff;
13453 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
13454 i2 = (branch_offset >> 22) & 1;
13455 i1 = (branch_offset >> 23) & 1;
13456 s = (branch_offset >> 24) & 1;
13457 j1 = (!i1) ^ s;
13458 j2 = (!i2) ^ s;
13459 branch_insn |= j2 << 11;
13460 branch_insn |= j1 << 13;
13461 branch_insn |= s << 26;
13462 }
13463 break;
13464
13465 default:
13466 BFD_FAIL ();
13467 return FALSE;
13468 }
13469
13470 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[target]);
13471 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[target + 2]);
13472
13473 return TRUE;
13474 }
13475
13476 /* Do code byteswapping. Return FALSE afterwards so that the section is
13477 written out as normal. */
13478
13479 static bfd_boolean
13480 elf32_arm_write_section (bfd *output_bfd,
13481 struct bfd_link_info *link_info,
13482 asection *sec,
13483 bfd_byte *contents)
13484 {
13485 unsigned int mapcount, errcount;
13486 _arm_elf_section_data *arm_data;
13487 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
13488 elf32_arm_section_map *map;
13489 elf32_vfp11_erratum_list *errnode;
13490 bfd_vma ptr;
13491 bfd_vma end;
13492 bfd_vma offset = sec->output_section->vma + sec->output_offset;
13493 bfd_byte tmp;
13494 unsigned int i;
13495
13496 if (globals == NULL)
13497 return FALSE;
13498
13499 /* If this section has not been allocated an _arm_elf_section_data
13500 structure then we cannot record anything. */
13501 arm_data = get_arm_elf_section_data (sec);
13502 if (arm_data == NULL)
13503 return FALSE;
13504
13505 mapcount = arm_data->mapcount;
13506 map = arm_data->map;
13507 errcount = arm_data->erratumcount;
13508
13509 if (errcount != 0)
13510 {
13511 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
13512
13513 for (errnode = arm_data->erratumlist; errnode != 0;
13514 errnode = errnode->next)
13515 {
13516 bfd_vma target = errnode->vma - offset;
13517
13518 switch (errnode->type)
13519 {
13520 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
13521 {
13522 bfd_vma branch_to_veneer;
13523 /* Original condition code of instruction, plus bit mask for
13524 ARM B instruction. */
13525 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
13526 | 0x0a000000;
13527
13528 /* The instruction is before the label. */
13529 target -= 4;
13530
13531 /* Above offset included in -4 below. */
13532 branch_to_veneer = errnode->u.b.veneer->vma
13533 - errnode->vma - 4;
13534
13535 if ((signed) branch_to_veneer < -(1 << 25)
13536 || (signed) branch_to_veneer >= (1 << 25))
13537 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13538 "range"), output_bfd);
13539
13540 insn |= (branch_to_veneer >> 2) & 0xffffff;
13541 contents[endianflip ^ target] = insn & 0xff;
13542 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
13543 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
13544 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
13545 }
13546 break;
13547
13548 case VFP11_ERRATUM_ARM_VENEER:
13549 {
13550 bfd_vma branch_from_veneer;
13551 unsigned int insn;
13552
13553 /* Take size of veneer into account. */
13554 branch_from_veneer = errnode->u.v.branch->vma
13555 - errnode->vma - 12;
13556
13557 if ((signed) branch_from_veneer < -(1 << 25)
13558 || (signed) branch_from_veneer >= (1 << 25))
13559 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13560 "range"), output_bfd);
13561
13562 /* Original instruction. */
13563 insn = errnode->u.v.branch->u.b.vfp_insn;
13564 contents[endianflip ^ target] = insn & 0xff;
13565 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
13566 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
13567 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
13568
13569 /* Branch back to insn after original insn. */
13570 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
13571 contents[endianflip ^ (target + 4)] = insn & 0xff;
13572 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
13573 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
13574 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
13575 }
13576 break;
13577
13578 default:
13579 abort ();
13580 }
13581 }
13582 }
13583
13584 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
13585 {
13586 arm_unwind_table_edit *edit_node
13587 = arm_data->u.exidx.unwind_edit_list;
13588 /* Now, sec->size is the size of the section we will write. The original
13589 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
13590 markers) was sec->rawsize. (This isn't the case if we perform no
13591 edits, then rawsize will be zero and we should use size). */
13592 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
13593 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
13594 unsigned int in_index, out_index;
13595 bfd_vma add_to_offsets = 0;
13596
13597 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
13598 {
13599 if (edit_node)
13600 {
13601 unsigned int edit_index = edit_node->index;
13602
13603 if (in_index < edit_index && in_index * 8 < input_size)
13604 {
13605 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13606 contents + in_index * 8, add_to_offsets);
13607 out_index++;
13608 in_index++;
13609 }
13610 else if (in_index == edit_index
13611 || (in_index * 8 >= input_size
13612 && edit_index == UINT_MAX))
13613 {
13614 switch (edit_node->type)
13615 {
13616 case DELETE_EXIDX_ENTRY:
13617 in_index++;
13618 add_to_offsets += 8;
13619 break;
13620
13621 case INSERT_EXIDX_CANTUNWIND_AT_END:
13622 {
13623 asection *text_sec = edit_node->linked_section;
13624 bfd_vma text_offset = text_sec->output_section->vma
13625 + text_sec->output_offset
13626 + text_sec->size;
13627 bfd_vma exidx_offset = offset + out_index * 8;
13628 unsigned long prel31_offset;
13629
13630 /* Note: this is meant to be equivalent to an
13631 R_ARM_PREL31 relocation. These synthetic
13632 EXIDX_CANTUNWIND markers are not relocated by the
13633 usual BFD method. */
13634 prel31_offset = (text_offset - exidx_offset)
13635 & 0x7ffffffful;
13636
13637 /* First address we can't unwind. */
13638 bfd_put_32 (output_bfd, prel31_offset,
13639 &edited_contents[out_index * 8]);
13640
13641 /* Code for EXIDX_CANTUNWIND. */
13642 bfd_put_32 (output_bfd, 0x1,
13643 &edited_contents[out_index * 8 + 4]);
13644
13645 out_index++;
13646 add_to_offsets -= 8;
13647 }
13648 break;
13649 }
13650
13651 edit_node = edit_node->next;
13652 }
13653 }
13654 else
13655 {
13656 /* No more edits, copy remaining entries verbatim. */
13657 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13658 contents + in_index * 8, add_to_offsets);
13659 out_index++;
13660 in_index++;
13661 }
13662 }
13663
13664 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
13665 bfd_set_section_contents (output_bfd, sec->output_section,
13666 edited_contents,
13667 (file_ptr) sec->output_offset, sec->size);
13668
13669 return TRUE;
13670 }
13671
13672 /* Fix code to point to Cortex-A8 erratum stubs. */
13673 if (globals->fix_cortex_a8)
13674 {
13675 struct a8_branch_to_stub_data data;
13676
13677 data.writing_section = sec;
13678 data.contents = contents;
13679
13680 bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub,
13681 &data);
13682 }
13683
13684 if (mapcount == 0)
13685 return FALSE;
13686
13687 if (globals->byteswap_code)
13688 {
13689 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
13690
13691 ptr = map[0].vma;
13692 for (i = 0; i < mapcount; i++)
13693 {
13694 if (i == mapcount - 1)
13695 end = sec->size;
13696 else
13697 end = map[i + 1].vma;
13698
13699 switch (map[i].type)
13700 {
13701 case 'a':
13702 /* Byte swap code words. */
13703 while (ptr + 3 < end)
13704 {
13705 tmp = contents[ptr];
13706 contents[ptr] = contents[ptr + 3];
13707 contents[ptr + 3] = tmp;
13708 tmp = contents[ptr + 1];
13709 contents[ptr + 1] = contents[ptr + 2];
13710 contents[ptr + 2] = tmp;
13711 ptr += 4;
13712 }
13713 break;
13714
13715 case 't':
13716 /* Byte swap code halfwords. */
13717 while (ptr + 1 < end)
13718 {
13719 tmp = contents[ptr];
13720 contents[ptr] = contents[ptr + 1];
13721 contents[ptr + 1] = tmp;
13722 ptr += 2;
13723 }
13724 break;
13725
13726 case 'd':
13727 /* Leave data alone. */
13728 break;
13729 }
13730 ptr = end;
13731 }
13732 }
13733
13734 free (map);
13735 arm_data->mapcount = -1;
13736 arm_data->mapsize = 0;
13737 arm_data->map = NULL;
13738
13739 return FALSE;
13740 }
13741
13742 /* Display STT_ARM_TFUNC symbols as functions. */
13743
13744 static void
13745 elf32_arm_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
13746 asymbol *asym)
13747 {
13748 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
13749
13750 if (ELF_ST_TYPE (elfsym->internal_elf_sym.st_info) == STT_ARM_TFUNC)
13751 elfsym->symbol.flags |= BSF_FUNCTION;
13752 }
13753
13754
13755 /* Mangle thumb function symbols as we read them in. */
13756
13757 static bfd_boolean
13758 elf32_arm_swap_symbol_in (bfd * abfd,
13759 const void *psrc,
13760 const void *pshn,
13761 Elf_Internal_Sym *dst)
13762 {
13763 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
13764 return FALSE;
13765
13766 /* New EABI objects mark thumb function symbols by setting the low bit of
13767 the address. Turn these into STT_ARM_TFUNC. */
13768 if ((ELF_ST_TYPE (dst->st_info) == STT_FUNC)
13769 && (dst->st_value & 1))
13770 {
13771 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_ARM_TFUNC);
13772 dst->st_value &= ~(bfd_vma) 1;
13773 }
13774 return TRUE;
13775 }
13776
13777
13778 /* Mangle thumb function symbols as we write them out. */
13779
13780 static void
13781 elf32_arm_swap_symbol_out (bfd *abfd,
13782 const Elf_Internal_Sym *src,
13783 void *cdst,
13784 void *shndx)
13785 {
13786 Elf_Internal_Sym newsym;
13787
13788 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
13789 of the address set, as per the new EABI. We do this unconditionally
13790 because objcopy does not set the elf header flags until after
13791 it writes out the symbol table. */
13792 if (ELF_ST_TYPE (src->st_info) == STT_ARM_TFUNC)
13793 {
13794 newsym = *src;
13795 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
13796 if (newsym.st_shndx != SHN_UNDEF)
13797 {
13798 /* Do this only for defined symbols. At link type, the static
13799 linker will simulate the work of dynamic linker of resolving
13800 symbols and will carry over the thumbness of found symbols to
13801 the output symbol table. It's not clear how it happens, but
13802 the thumbness of undefined symbols can well be different at
13803 runtime, and writing '1' for them will be confusing for users
13804 and possibly for dynamic linker itself.
13805 */
13806 newsym.st_value |= 1;
13807 }
13808
13809 src = &newsym;
13810 }
13811 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
13812 }
13813
13814 /* Add the PT_ARM_EXIDX program header. */
13815
13816 static bfd_boolean
13817 elf32_arm_modify_segment_map (bfd *abfd,
13818 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13819 {
13820 struct elf_segment_map *m;
13821 asection *sec;
13822
13823 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13824 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13825 {
13826 /* If there is already a PT_ARM_EXIDX header, then we do not
13827 want to add another one. This situation arises when running
13828 "strip"; the input binary already has the header. */
13829 m = elf_tdata (abfd)->segment_map;
13830 while (m && m->p_type != PT_ARM_EXIDX)
13831 m = m->next;
13832 if (!m)
13833 {
13834 m = (struct elf_segment_map *)
13835 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
13836 if (m == NULL)
13837 return FALSE;
13838 m->p_type = PT_ARM_EXIDX;
13839 m->count = 1;
13840 m->sections[0] = sec;
13841
13842 m->next = elf_tdata (abfd)->segment_map;
13843 elf_tdata (abfd)->segment_map = m;
13844 }
13845 }
13846
13847 return TRUE;
13848 }
13849
13850 /* We may add a PT_ARM_EXIDX program header. */
13851
13852 static int
13853 elf32_arm_additional_program_headers (bfd *abfd,
13854 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13855 {
13856 asection *sec;
13857
13858 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13859 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13860 return 1;
13861 else
13862 return 0;
13863 }
13864
13865 /* We have two function types: STT_FUNC and STT_ARM_TFUNC. */
13866
13867 static bfd_boolean
13868 elf32_arm_is_function_type (unsigned int type)
13869 {
13870 return (type == STT_FUNC) || (type == STT_ARM_TFUNC);
13871 }
13872
13873 /* We use this to override swap_symbol_in and swap_symbol_out. */
13874 const struct elf_size_info elf32_arm_size_info =
13875 {
13876 sizeof (Elf32_External_Ehdr),
13877 sizeof (Elf32_External_Phdr),
13878 sizeof (Elf32_External_Shdr),
13879 sizeof (Elf32_External_Rel),
13880 sizeof (Elf32_External_Rela),
13881 sizeof (Elf32_External_Sym),
13882 sizeof (Elf32_External_Dyn),
13883 sizeof (Elf_External_Note),
13884 4,
13885 1,
13886 32, 2,
13887 ELFCLASS32, EV_CURRENT,
13888 bfd_elf32_write_out_phdrs,
13889 bfd_elf32_write_shdrs_and_ehdr,
13890 bfd_elf32_checksum_contents,
13891 bfd_elf32_write_relocs,
13892 elf32_arm_swap_symbol_in,
13893 elf32_arm_swap_symbol_out,
13894 bfd_elf32_slurp_reloc_table,
13895 bfd_elf32_slurp_symbol_table,
13896 bfd_elf32_swap_dyn_in,
13897 bfd_elf32_swap_dyn_out,
13898 bfd_elf32_swap_reloc_in,
13899 bfd_elf32_swap_reloc_out,
13900 bfd_elf32_swap_reloca_in,
13901 bfd_elf32_swap_reloca_out
13902 };
13903
13904 #define ELF_ARCH bfd_arch_arm
13905 #define ELF_TARGET_ID ARM_ELF_DATA
13906 #define ELF_MACHINE_CODE EM_ARM
13907 #ifdef __QNXTARGET__
13908 #define ELF_MAXPAGESIZE 0x1000
13909 #else
13910 #define ELF_MAXPAGESIZE 0x8000
13911 #endif
13912 #define ELF_MINPAGESIZE 0x1000
13913 #define ELF_COMMONPAGESIZE 0x1000
13914
13915 #define bfd_elf32_mkobject elf32_arm_mkobject
13916
13917 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
13918 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
13919 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
13920 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
13921 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
13922 #define bfd_elf32_bfd_link_hash_table_free elf32_arm_hash_table_free
13923 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
13924 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
13925 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
13926 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
13927 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
13928 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
13929 #define bfd_elf32_bfd_final_link elf32_arm_final_link
13930
13931 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
13932 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
13933 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
13934 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
13935 #define elf_backend_check_relocs elf32_arm_check_relocs
13936 #define elf_backend_relocate_section elf32_arm_relocate_section
13937 #define elf_backend_write_section elf32_arm_write_section
13938 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
13939 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
13940 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
13941 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
13942 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
13943 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
13944 #define elf_backend_post_process_headers elf32_arm_post_process_headers
13945 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
13946 #define elf_backend_object_p elf32_arm_object_p
13947 #define elf_backend_section_flags elf32_arm_section_flags
13948 #define elf_backend_fake_sections elf32_arm_fake_sections
13949 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
13950 #define elf_backend_final_write_processing elf32_arm_final_write_processing
13951 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
13952 #define elf_backend_symbol_processing elf32_arm_symbol_processing
13953 #define elf_backend_size_info elf32_arm_size_info
13954 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
13955 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
13956 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
13957 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
13958 #define elf_backend_is_function_type elf32_arm_is_function_type
13959
13960 #define elf_backend_can_refcount 1
13961 #define elf_backend_can_gc_sections 1
13962 #define elf_backend_plt_readonly 1
13963 #define elf_backend_want_got_plt 1
13964 #define elf_backend_want_plt_sym 0
13965 #define elf_backend_may_use_rel_p 1
13966 #define elf_backend_may_use_rela_p 0
13967 #define elf_backend_default_use_rela_p 0
13968
13969 #define elf_backend_got_header_size 12
13970
13971 #undef elf_backend_obj_attrs_vendor
13972 #define elf_backend_obj_attrs_vendor "aeabi"
13973 #undef elf_backend_obj_attrs_section
13974 #define elf_backend_obj_attrs_section ".ARM.attributes"
13975 #undef elf_backend_obj_attrs_arg_type
13976 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
13977 #undef elf_backend_obj_attrs_section_type
13978 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
13979 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
13980
13981 #include "elf32-target.h"
13982
13983 /* VxWorks Targets. */
13984
13985 #undef TARGET_LITTLE_SYM
13986 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vxworks_vec
13987 #undef TARGET_LITTLE_NAME
13988 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
13989 #undef TARGET_BIG_SYM
13990 #define TARGET_BIG_SYM bfd_elf32_bigarm_vxworks_vec
13991 #undef TARGET_BIG_NAME
13992 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
13993
13994 /* Like elf32_arm_link_hash_table_create -- but overrides
13995 appropriately for VxWorks. */
13996
13997 static struct bfd_link_hash_table *
13998 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
13999 {
14000 struct bfd_link_hash_table *ret;
14001
14002 ret = elf32_arm_link_hash_table_create (abfd);
14003 if (ret)
14004 {
14005 struct elf32_arm_link_hash_table *htab
14006 = (struct elf32_arm_link_hash_table *) ret;
14007 htab->use_rel = 0;
14008 htab->vxworks_p = 1;
14009 }
14010 return ret;
14011 }
14012
14013 static void
14014 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
14015 {
14016 elf32_arm_final_write_processing (abfd, linker);
14017 elf_vxworks_final_write_processing (abfd, linker);
14018 }
14019
14020 #undef elf32_bed
14021 #define elf32_bed elf32_arm_vxworks_bed
14022
14023 #undef bfd_elf32_bfd_link_hash_table_create
14024 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
14025 #undef elf_backend_add_symbol_hook
14026 #define elf_backend_add_symbol_hook elf_vxworks_add_symbol_hook
14027 #undef elf_backend_final_write_processing
14028 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
14029 #undef elf_backend_emit_relocs
14030 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
14031
14032 #undef elf_backend_may_use_rel_p
14033 #define elf_backend_may_use_rel_p 0
14034 #undef elf_backend_may_use_rela_p
14035 #define elf_backend_may_use_rela_p 1
14036 #undef elf_backend_default_use_rela_p
14037 #define elf_backend_default_use_rela_p 1
14038 #undef elf_backend_want_plt_sym
14039 #define elf_backend_want_plt_sym 1
14040 #undef ELF_MAXPAGESIZE
14041 #define ELF_MAXPAGESIZE 0x1000
14042
14043 #include "elf32-target.h"
14044
14045
14046 /* Merge backend specific data from an object file to the output
14047 object file when linking. */
14048
14049 static bfd_boolean
14050 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
14051 {
14052 flagword out_flags;
14053 flagword in_flags;
14054 bfd_boolean flags_compatible = TRUE;
14055 asection *sec;
14056
14057 /* Check if we have the same endianess. */
14058 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
14059 return FALSE;
14060
14061 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
14062 return TRUE;
14063
14064 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
14065 return FALSE;
14066
14067 /* The input BFD must have had its flags initialised. */
14068 /* The following seems bogus to me -- The flags are initialized in
14069 the assembler but I don't think an elf_flags_init field is
14070 written into the object. */
14071 /* BFD_ASSERT (elf_flags_init (ibfd)); */
14072
14073 in_flags = elf_elfheader (ibfd)->e_flags;
14074 out_flags = elf_elfheader (obfd)->e_flags;
14075
14076 /* In theory there is no reason why we couldn't handle this. However
14077 in practice it isn't even close to working and there is no real
14078 reason to want it. */
14079 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
14080 && !(ibfd->flags & DYNAMIC)
14081 && (in_flags & EF_ARM_BE8))
14082 {
14083 _bfd_error_handler (_("error: %B is already in final BE8 format"),
14084 ibfd);
14085 return FALSE;
14086 }
14087
14088 if (!elf_flags_init (obfd))
14089 {
14090 /* If the input is the default architecture and had the default
14091 flags then do not bother setting the flags for the output
14092 architecture, instead allow future merges to do this. If no
14093 future merges ever set these flags then they will retain their
14094 uninitialised values, which surprise surprise, correspond
14095 to the default values. */
14096 if (bfd_get_arch_info (ibfd)->the_default
14097 && elf_elfheader (ibfd)->e_flags == 0)
14098 return TRUE;
14099
14100 elf_flags_init (obfd) = TRUE;
14101 elf_elfheader (obfd)->e_flags = in_flags;
14102
14103 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
14104 && bfd_get_arch_info (obfd)->the_default)
14105 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
14106
14107 return TRUE;
14108 }
14109
14110 /* Determine what should happen if the input ARM architecture
14111 does not match the output ARM architecture. */
14112 if (! bfd_arm_merge_machines (ibfd, obfd))
14113 return FALSE;
14114
14115 /* Identical flags must be compatible. */
14116 if (in_flags == out_flags)
14117 return TRUE;
14118
14119 /* Check to see if the input BFD actually contains any sections. If
14120 not, its flags may not have been initialised either, but it
14121 cannot actually cause any incompatiblity. Do not short-circuit
14122 dynamic objects; their section list may be emptied by
14123 elf_link_add_object_symbols.
14124
14125 Also check to see if there are no code sections in the input.
14126 In this case there is no need to check for code specific flags.
14127 XXX - do we need to worry about floating-point format compatability
14128 in data sections ? */
14129 if (!(ibfd->flags & DYNAMIC))
14130 {
14131 bfd_boolean null_input_bfd = TRUE;
14132 bfd_boolean only_data_sections = TRUE;
14133
14134 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
14135 {
14136 /* Ignore synthetic glue sections. */
14137 if (strcmp (sec->name, ".glue_7")
14138 && strcmp (sec->name, ".glue_7t"))
14139 {
14140 if ((bfd_get_section_flags (ibfd, sec)
14141 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
14142 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
14143 only_data_sections = FALSE;
14144
14145 null_input_bfd = FALSE;
14146 break;
14147 }
14148 }
14149
14150 if (null_input_bfd || only_data_sections)
14151 return TRUE;
14152 }
14153
14154 /* Complain about various flag mismatches. */
14155 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
14156 EF_ARM_EABI_VERSION (out_flags)))
14157 {
14158 _bfd_error_handler
14159 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
14160 ibfd, obfd,
14161 (in_flags & EF_ARM_EABIMASK) >> 24,
14162 (out_flags & EF_ARM_EABIMASK) >> 24);
14163 return FALSE;
14164 }
14165
14166 /* Not sure what needs to be checked for EABI versions >= 1. */
14167 /* VxWorks libraries do not use these flags. */
14168 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
14169 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
14170 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
14171 {
14172 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
14173 {
14174 _bfd_error_handler
14175 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
14176 ibfd, obfd,
14177 in_flags & EF_ARM_APCS_26 ? 26 : 32,
14178 out_flags & EF_ARM_APCS_26 ? 26 : 32);
14179 flags_compatible = FALSE;
14180 }
14181
14182 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
14183 {
14184 if (in_flags & EF_ARM_APCS_FLOAT)
14185 _bfd_error_handler
14186 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
14187 ibfd, obfd);
14188 else
14189 _bfd_error_handler
14190 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
14191 ibfd, obfd);
14192
14193 flags_compatible = FALSE;
14194 }
14195
14196 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
14197 {
14198 if (in_flags & EF_ARM_VFP_FLOAT)
14199 _bfd_error_handler
14200 (_("error: %B uses VFP instructions, whereas %B does not"),
14201 ibfd, obfd);
14202 else
14203 _bfd_error_handler
14204 (_("error: %B uses FPA instructions, whereas %B does not"),
14205 ibfd, obfd);
14206
14207 flags_compatible = FALSE;
14208 }
14209
14210 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
14211 {
14212 if (in_flags & EF_ARM_MAVERICK_FLOAT)
14213 _bfd_error_handler
14214 (_("error: %B uses Maverick instructions, whereas %B does not"),
14215 ibfd, obfd);
14216 else
14217 _bfd_error_handler
14218 (_("error: %B does not use Maverick instructions, whereas %B does"),
14219 ibfd, obfd);
14220
14221 flags_compatible = FALSE;
14222 }
14223
14224 #ifdef EF_ARM_SOFT_FLOAT
14225 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
14226 {
14227 /* We can allow interworking between code that is VFP format
14228 layout, and uses either soft float or integer regs for
14229 passing floating point arguments and results. We already
14230 know that the APCS_FLOAT flags match; similarly for VFP
14231 flags. */
14232 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
14233 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
14234 {
14235 if (in_flags & EF_ARM_SOFT_FLOAT)
14236 _bfd_error_handler
14237 (_("error: %B uses software FP, whereas %B uses hardware FP"),
14238 ibfd, obfd);
14239 else
14240 _bfd_error_handler
14241 (_("error: %B uses hardware FP, whereas %B uses software FP"),
14242 ibfd, obfd);
14243
14244 flags_compatible = FALSE;
14245 }
14246 }
14247 #endif
14248
14249 /* Interworking mismatch is only a warning. */
14250 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
14251 {
14252 if (in_flags & EF_ARM_INTERWORK)
14253 {
14254 _bfd_error_handler
14255 (_("Warning: %B supports interworking, whereas %B does not"),
14256 ibfd, obfd);
14257 }
14258 else
14259 {
14260 _bfd_error_handler
14261 (_("Warning: %B does not support interworking, whereas %B does"),
14262 ibfd, obfd);
14263 }
14264 }
14265 }
14266
14267 return flags_compatible;
14268 }
14269
14270
14271 /* Symbian OS Targets. */
14272
14273 #undef TARGET_LITTLE_SYM
14274 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_symbian_vec
14275 #undef TARGET_LITTLE_NAME
14276 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
14277 #undef TARGET_BIG_SYM
14278 #define TARGET_BIG_SYM bfd_elf32_bigarm_symbian_vec
14279 #undef TARGET_BIG_NAME
14280 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
14281
14282 /* Like elf32_arm_link_hash_table_create -- but overrides
14283 appropriately for Symbian OS. */
14284
14285 static struct bfd_link_hash_table *
14286 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
14287 {
14288 struct bfd_link_hash_table *ret;
14289
14290 ret = elf32_arm_link_hash_table_create (abfd);
14291 if (ret)
14292 {
14293 struct elf32_arm_link_hash_table *htab
14294 = (struct elf32_arm_link_hash_table *)ret;
14295 /* There is no PLT header for Symbian OS. */
14296 htab->plt_header_size = 0;
14297 /* The PLT entries are each one instruction and one word. */
14298 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
14299 htab->symbian_p = 1;
14300 /* Symbian uses armv5t or above, so use_blx is always true. */
14301 htab->use_blx = 1;
14302 htab->root.is_relocatable_executable = 1;
14303 }
14304 return ret;
14305 }
14306
14307 static const struct bfd_elf_special_section
14308 elf32_arm_symbian_special_sections[] =
14309 {
14310 /* In a BPABI executable, the dynamic linking sections do not go in
14311 the loadable read-only segment. The post-linker may wish to
14312 refer to these sections, but they are not part of the final
14313 program image. */
14314 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
14315 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
14316 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
14317 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
14318 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
14319 /* These sections do not need to be writable as the SymbianOS
14320 postlinker will arrange things so that no dynamic relocation is
14321 required. */
14322 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
14323 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
14324 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
14325 { NULL, 0, 0, 0, 0 }
14326 };
14327
14328 static void
14329 elf32_arm_symbian_begin_write_processing (bfd *abfd,
14330 struct bfd_link_info *link_info)
14331 {
14332 /* BPABI objects are never loaded directly by an OS kernel; they are
14333 processed by a postlinker first, into an OS-specific format. If
14334 the D_PAGED bit is set on the file, BFD will align segments on
14335 page boundaries, so that an OS can directly map the file. With
14336 BPABI objects, that just results in wasted space. In addition,
14337 because we clear the D_PAGED bit, map_sections_to_segments will
14338 recognize that the program headers should not be mapped into any
14339 loadable segment. */
14340 abfd->flags &= ~D_PAGED;
14341 elf32_arm_begin_write_processing (abfd, link_info);
14342 }
14343
14344 static bfd_boolean
14345 elf32_arm_symbian_modify_segment_map (bfd *abfd,
14346 struct bfd_link_info *info)
14347 {
14348 struct elf_segment_map *m;
14349 asection *dynsec;
14350
14351 /* BPABI shared libraries and executables should have a PT_DYNAMIC
14352 segment. However, because the .dynamic section is not marked
14353 with SEC_LOAD, the generic ELF code will not create such a
14354 segment. */
14355 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
14356 if (dynsec)
14357 {
14358 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
14359 if (m->p_type == PT_DYNAMIC)
14360 break;
14361
14362 if (m == NULL)
14363 {
14364 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
14365 m->next = elf_tdata (abfd)->segment_map;
14366 elf_tdata (abfd)->segment_map = m;
14367 }
14368 }
14369
14370 /* Also call the generic arm routine. */
14371 return elf32_arm_modify_segment_map (abfd, info);
14372 }
14373
14374 /* Return address for Ith PLT stub in section PLT, for relocation REL
14375 or (bfd_vma) -1 if it should not be included. */
14376
14377 static bfd_vma
14378 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
14379 const arelent *rel ATTRIBUTE_UNUSED)
14380 {
14381 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
14382 }
14383
14384
14385 #undef elf32_bed
14386 #define elf32_bed elf32_arm_symbian_bed
14387
14388 /* The dynamic sections are not allocated on SymbianOS; the postlinker
14389 will process them and then discard them. */
14390 #undef ELF_DYNAMIC_SEC_FLAGS
14391 #define ELF_DYNAMIC_SEC_FLAGS \
14392 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
14393
14394 #undef elf_backend_add_symbol_hook
14395 #undef elf_backend_emit_relocs
14396
14397 #undef bfd_elf32_bfd_link_hash_table_create
14398 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
14399 #undef elf_backend_special_sections
14400 #define elf_backend_special_sections elf32_arm_symbian_special_sections
14401 #undef elf_backend_begin_write_processing
14402 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
14403 #undef elf_backend_final_write_processing
14404 #define elf_backend_final_write_processing elf32_arm_final_write_processing
14405
14406 #undef elf_backend_modify_segment_map
14407 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
14408
14409 /* There is no .got section for BPABI objects, and hence no header. */
14410 #undef elf_backend_got_header_size
14411 #define elf_backend_got_header_size 0
14412
14413 /* Similarly, there is no .got.plt section. */
14414 #undef elf_backend_want_got_plt
14415 #define elf_backend_want_got_plt 0
14416
14417 #undef elf_backend_plt_sym_val
14418 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
14419
14420 #undef elf_backend_may_use_rel_p
14421 #define elf_backend_may_use_rel_p 1
14422 #undef elf_backend_may_use_rela_p
14423 #define elf_backend_may_use_rela_p 0
14424 #undef elf_backend_default_use_rela_p
14425 #define elf_backend_default_use_rela_p 0
14426 #undef elf_backend_want_plt_sym
14427 #define elf_backend_want_plt_sym 0
14428 #undef ELF_MAXPAGESIZE
14429 #define ELF_MAXPAGESIZE 0x8000
14430
14431 #include "elf32-target.h"
This page took 0.366776 seconds and 4 git commands to generate.