* elf32-arm.c (arm_build_one_stub): Use the hash entry of the
[deliverable/binutils-gdb.git] / bfd / elf32-arm.c
1 /* 32-bit ELF support for ARM
2 Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
3 2008, 2009 Free Software Foundation, Inc.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include <limits.h>
24
25 #include "bfd.h"
26 #include "libiberty.h"
27 #include "libbfd.h"
28 #include "elf-bfd.h"
29 #include "elf-vxworks.h"
30 #include "elf/arm.h"
31
32 /* Return the relocation section associated with NAME. HTAB is the
33 bfd's elf32_arm_link_hash_entry. */
34 #define RELOC_SECTION(HTAB, NAME) \
35 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
36
37 /* Return size of a relocation entry. HTAB is the bfd's
38 elf32_arm_link_hash_entry. */
39 #define RELOC_SIZE(HTAB) \
40 ((HTAB)->use_rel \
41 ? sizeof (Elf32_External_Rel) \
42 : sizeof (Elf32_External_Rela))
43
44 /* Return function to swap relocations in. HTAB is the bfd's
45 elf32_arm_link_hash_entry. */
46 #define SWAP_RELOC_IN(HTAB) \
47 ((HTAB)->use_rel \
48 ? bfd_elf32_swap_reloc_in \
49 : bfd_elf32_swap_reloca_in)
50
51 /* Return function to swap relocations out. HTAB is the bfd's
52 elf32_arm_link_hash_entry. */
53 #define SWAP_RELOC_OUT(HTAB) \
54 ((HTAB)->use_rel \
55 ? bfd_elf32_swap_reloc_out \
56 : bfd_elf32_swap_reloca_out)
57
58 #define elf_info_to_howto 0
59 #define elf_info_to_howto_rel elf32_arm_info_to_howto
60
61 #define ARM_ELF_ABI_VERSION 0
62 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
63
64 static struct elf_backend_data elf32_arm_vxworks_bed;
65
66 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
67 struct bfd_link_info *link_info,
68 asection *sec,
69 bfd_byte *contents);
70
71 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
72 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
73 in that slot. */
74
75 static reloc_howto_type elf32_arm_howto_table_1[] =
76 {
77 /* No relocation. */
78 HOWTO (R_ARM_NONE, /* type */
79 0, /* rightshift */
80 0, /* size (0 = byte, 1 = short, 2 = long) */
81 0, /* bitsize */
82 FALSE, /* pc_relative */
83 0, /* bitpos */
84 complain_overflow_dont,/* complain_on_overflow */
85 bfd_elf_generic_reloc, /* special_function */
86 "R_ARM_NONE", /* name */
87 FALSE, /* partial_inplace */
88 0, /* src_mask */
89 0, /* dst_mask */
90 FALSE), /* pcrel_offset */
91
92 HOWTO (R_ARM_PC24, /* type */
93 2, /* rightshift */
94 2, /* size (0 = byte, 1 = short, 2 = long) */
95 24, /* bitsize */
96 TRUE, /* pc_relative */
97 0, /* bitpos */
98 complain_overflow_signed,/* complain_on_overflow */
99 bfd_elf_generic_reloc, /* special_function */
100 "R_ARM_PC24", /* name */
101 FALSE, /* partial_inplace */
102 0x00ffffff, /* src_mask */
103 0x00ffffff, /* dst_mask */
104 TRUE), /* pcrel_offset */
105
106 /* 32 bit absolute */
107 HOWTO (R_ARM_ABS32, /* type */
108 0, /* rightshift */
109 2, /* size (0 = byte, 1 = short, 2 = long) */
110 32, /* bitsize */
111 FALSE, /* pc_relative */
112 0, /* bitpos */
113 complain_overflow_bitfield,/* complain_on_overflow */
114 bfd_elf_generic_reloc, /* special_function */
115 "R_ARM_ABS32", /* name */
116 FALSE, /* partial_inplace */
117 0xffffffff, /* src_mask */
118 0xffffffff, /* dst_mask */
119 FALSE), /* pcrel_offset */
120
121 /* standard 32bit pc-relative reloc */
122 HOWTO (R_ARM_REL32, /* type */
123 0, /* rightshift */
124 2, /* size (0 = byte, 1 = short, 2 = long) */
125 32, /* bitsize */
126 TRUE, /* pc_relative */
127 0, /* bitpos */
128 complain_overflow_bitfield,/* complain_on_overflow */
129 bfd_elf_generic_reloc, /* special_function */
130 "R_ARM_REL32", /* name */
131 FALSE, /* partial_inplace */
132 0xffffffff, /* src_mask */
133 0xffffffff, /* dst_mask */
134 TRUE), /* pcrel_offset */
135
136 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
137 HOWTO (R_ARM_LDR_PC_G0, /* type */
138 0, /* rightshift */
139 0, /* size (0 = byte, 1 = short, 2 = long) */
140 32, /* bitsize */
141 TRUE, /* pc_relative */
142 0, /* bitpos */
143 complain_overflow_dont,/* complain_on_overflow */
144 bfd_elf_generic_reloc, /* special_function */
145 "R_ARM_LDR_PC_G0", /* name */
146 FALSE, /* partial_inplace */
147 0xffffffff, /* src_mask */
148 0xffffffff, /* dst_mask */
149 TRUE), /* pcrel_offset */
150
151 /* 16 bit absolute */
152 HOWTO (R_ARM_ABS16, /* type */
153 0, /* rightshift */
154 1, /* size (0 = byte, 1 = short, 2 = long) */
155 16, /* bitsize */
156 FALSE, /* pc_relative */
157 0, /* bitpos */
158 complain_overflow_bitfield,/* complain_on_overflow */
159 bfd_elf_generic_reloc, /* special_function */
160 "R_ARM_ABS16", /* name */
161 FALSE, /* partial_inplace */
162 0x0000ffff, /* src_mask */
163 0x0000ffff, /* dst_mask */
164 FALSE), /* pcrel_offset */
165
166 /* 12 bit absolute */
167 HOWTO (R_ARM_ABS12, /* type */
168 0, /* rightshift */
169 2, /* size (0 = byte, 1 = short, 2 = long) */
170 12, /* bitsize */
171 FALSE, /* pc_relative */
172 0, /* bitpos */
173 complain_overflow_bitfield,/* complain_on_overflow */
174 bfd_elf_generic_reloc, /* special_function */
175 "R_ARM_ABS12", /* name */
176 FALSE, /* partial_inplace */
177 0x00000fff, /* src_mask */
178 0x00000fff, /* dst_mask */
179 FALSE), /* pcrel_offset */
180
181 HOWTO (R_ARM_THM_ABS5, /* type */
182 6, /* rightshift */
183 1, /* size (0 = byte, 1 = short, 2 = long) */
184 5, /* bitsize */
185 FALSE, /* pc_relative */
186 0, /* bitpos */
187 complain_overflow_bitfield,/* complain_on_overflow */
188 bfd_elf_generic_reloc, /* special_function */
189 "R_ARM_THM_ABS5", /* name */
190 FALSE, /* partial_inplace */
191 0x000007e0, /* src_mask */
192 0x000007e0, /* dst_mask */
193 FALSE), /* pcrel_offset */
194
195 /* 8 bit absolute */
196 HOWTO (R_ARM_ABS8, /* type */
197 0, /* rightshift */
198 0, /* size (0 = byte, 1 = short, 2 = long) */
199 8, /* bitsize */
200 FALSE, /* pc_relative */
201 0, /* bitpos */
202 complain_overflow_bitfield,/* complain_on_overflow */
203 bfd_elf_generic_reloc, /* special_function */
204 "R_ARM_ABS8", /* name */
205 FALSE, /* partial_inplace */
206 0x000000ff, /* src_mask */
207 0x000000ff, /* dst_mask */
208 FALSE), /* pcrel_offset */
209
210 HOWTO (R_ARM_SBREL32, /* type */
211 0, /* rightshift */
212 2, /* size (0 = byte, 1 = short, 2 = long) */
213 32, /* bitsize */
214 FALSE, /* pc_relative */
215 0, /* bitpos */
216 complain_overflow_dont,/* complain_on_overflow */
217 bfd_elf_generic_reloc, /* special_function */
218 "R_ARM_SBREL32", /* name */
219 FALSE, /* partial_inplace */
220 0xffffffff, /* src_mask */
221 0xffffffff, /* dst_mask */
222 FALSE), /* pcrel_offset */
223
224 HOWTO (R_ARM_THM_CALL, /* type */
225 1, /* rightshift */
226 2, /* size (0 = byte, 1 = short, 2 = long) */
227 25, /* bitsize */
228 TRUE, /* pc_relative */
229 0, /* bitpos */
230 complain_overflow_signed,/* complain_on_overflow */
231 bfd_elf_generic_reloc, /* special_function */
232 "R_ARM_THM_CALL", /* name */
233 FALSE, /* partial_inplace */
234 0x07ff07ff, /* src_mask */
235 0x07ff07ff, /* dst_mask */
236 TRUE), /* pcrel_offset */
237
238 HOWTO (R_ARM_THM_PC8, /* type */
239 1, /* rightshift */
240 1, /* size (0 = byte, 1 = short, 2 = long) */
241 8, /* bitsize */
242 TRUE, /* pc_relative */
243 0, /* bitpos */
244 complain_overflow_signed,/* complain_on_overflow */
245 bfd_elf_generic_reloc, /* special_function */
246 "R_ARM_THM_PC8", /* name */
247 FALSE, /* partial_inplace */
248 0x000000ff, /* src_mask */
249 0x000000ff, /* dst_mask */
250 TRUE), /* pcrel_offset */
251
252 HOWTO (R_ARM_BREL_ADJ, /* type */
253 1, /* rightshift */
254 1, /* size (0 = byte, 1 = short, 2 = long) */
255 32, /* bitsize */
256 FALSE, /* pc_relative */
257 0, /* bitpos */
258 complain_overflow_signed,/* complain_on_overflow */
259 bfd_elf_generic_reloc, /* special_function */
260 "R_ARM_BREL_ADJ", /* name */
261 FALSE, /* partial_inplace */
262 0xffffffff, /* src_mask */
263 0xffffffff, /* dst_mask */
264 FALSE), /* pcrel_offset */
265
266 HOWTO (R_ARM_SWI24, /* type */
267 0, /* rightshift */
268 0, /* size (0 = byte, 1 = short, 2 = long) */
269 0, /* bitsize */
270 FALSE, /* pc_relative */
271 0, /* bitpos */
272 complain_overflow_signed,/* complain_on_overflow */
273 bfd_elf_generic_reloc, /* special_function */
274 "R_ARM_SWI24", /* name */
275 FALSE, /* partial_inplace */
276 0x00000000, /* src_mask */
277 0x00000000, /* dst_mask */
278 FALSE), /* pcrel_offset */
279
280 HOWTO (R_ARM_THM_SWI8, /* type */
281 0, /* rightshift */
282 0, /* size (0 = byte, 1 = short, 2 = long) */
283 0, /* bitsize */
284 FALSE, /* pc_relative */
285 0, /* bitpos */
286 complain_overflow_signed,/* complain_on_overflow */
287 bfd_elf_generic_reloc, /* special_function */
288 "R_ARM_SWI8", /* name */
289 FALSE, /* partial_inplace */
290 0x00000000, /* src_mask */
291 0x00000000, /* dst_mask */
292 FALSE), /* pcrel_offset */
293
294 /* BLX instruction for the ARM. */
295 HOWTO (R_ARM_XPC25, /* type */
296 2, /* rightshift */
297 2, /* size (0 = byte, 1 = short, 2 = long) */
298 25, /* bitsize */
299 TRUE, /* pc_relative */
300 0, /* bitpos */
301 complain_overflow_signed,/* complain_on_overflow */
302 bfd_elf_generic_reloc, /* special_function */
303 "R_ARM_XPC25", /* name */
304 FALSE, /* partial_inplace */
305 0x00ffffff, /* src_mask */
306 0x00ffffff, /* dst_mask */
307 TRUE), /* pcrel_offset */
308
309 /* BLX instruction for the Thumb. */
310 HOWTO (R_ARM_THM_XPC22, /* type */
311 2, /* rightshift */
312 2, /* size (0 = byte, 1 = short, 2 = long) */
313 22, /* bitsize */
314 TRUE, /* pc_relative */
315 0, /* bitpos */
316 complain_overflow_signed,/* complain_on_overflow */
317 bfd_elf_generic_reloc, /* special_function */
318 "R_ARM_THM_XPC22", /* name */
319 FALSE, /* partial_inplace */
320 0x07ff07ff, /* src_mask */
321 0x07ff07ff, /* dst_mask */
322 TRUE), /* pcrel_offset */
323
324 /* Dynamic TLS relocations. */
325
326 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
327 0, /* rightshift */
328 2, /* size (0 = byte, 1 = short, 2 = long) */
329 32, /* bitsize */
330 FALSE, /* pc_relative */
331 0, /* bitpos */
332 complain_overflow_bitfield,/* complain_on_overflow */
333 bfd_elf_generic_reloc, /* special_function */
334 "R_ARM_TLS_DTPMOD32", /* name */
335 TRUE, /* partial_inplace */
336 0xffffffff, /* src_mask */
337 0xffffffff, /* dst_mask */
338 FALSE), /* pcrel_offset */
339
340 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
341 0, /* rightshift */
342 2, /* size (0 = byte, 1 = short, 2 = long) */
343 32, /* bitsize */
344 FALSE, /* pc_relative */
345 0, /* bitpos */
346 complain_overflow_bitfield,/* complain_on_overflow */
347 bfd_elf_generic_reloc, /* special_function */
348 "R_ARM_TLS_DTPOFF32", /* name */
349 TRUE, /* partial_inplace */
350 0xffffffff, /* src_mask */
351 0xffffffff, /* dst_mask */
352 FALSE), /* pcrel_offset */
353
354 HOWTO (R_ARM_TLS_TPOFF32, /* type */
355 0, /* rightshift */
356 2, /* size (0 = byte, 1 = short, 2 = long) */
357 32, /* bitsize */
358 FALSE, /* pc_relative */
359 0, /* bitpos */
360 complain_overflow_bitfield,/* complain_on_overflow */
361 bfd_elf_generic_reloc, /* special_function */
362 "R_ARM_TLS_TPOFF32", /* name */
363 TRUE, /* partial_inplace */
364 0xffffffff, /* src_mask */
365 0xffffffff, /* dst_mask */
366 FALSE), /* pcrel_offset */
367
368 /* Relocs used in ARM Linux */
369
370 HOWTO (R_ARM_COPY, /* type */
371 0, /* rightshift */
372 2, /* size (0 = byte, 1 = short, 2 = long) */
373 32, /* bitsize */
374 FALSE, /* pc_relative */
375 0, /* bitpos */
376 complain_overflow_bitfield,/* complain_on_overflow */
377 bfd_elf_generic_reloc, /* special_function */
378 "R_ARM_COPY", /* name */
379 TRUE, /* partial_inplace */
380 0xffffffff, /* src_mask */
381 0xffffffff, /* dst_mask */
382 FALSE), /* pcrel_offset */
383
384 HOWTO (R_ARM_GLOB_DAT, /* type */
385 0, /* rightshift */
386 2, /* size (0 = byte, 1 = short, 2 = long) */
387 32, /* bitsize */
388 FALSE, /* pc_relative */
389 0, /* bitpos */
390 complain_overflow_bitfield,/* complain_on_overflow */
391 bfd_elf_generic_reloc, /* special_function */
392 "R_ARM_GLOB_DAT", /* name */
393 TRUE, /* partial_inplace */
394 0xffffffff, /* src_mask */
395 0xffffffff, /* dst_mask */
396 FALSE), /* pcrel_offset */
397
398 HOWTO (R_ARM_JUMP_SLOT, /* type */
399 0, /* rightshift */
400 2, /* size (0 = byte, 1 = short, 2 = long) */
401 32, /* bitsize */
402 FALSE, /* pc_relative */
403 0, /* bitpos */
404 complain_overflow_bitfield,/* complain_on_overflow */
405 bfd_elf_generic_reloc, /* special_function */
406 "R_ARM_JUMP_SLOT", /* name */
407 TRUE, /* partial_inplace */
408 0xffffffff, /* src_mask */
409 0xffffffff, /* dst_mask */
410 FALSE), /* pcrel_offset */
411
412 HOWTO (R_ARM_RELATIVE, /* type */
413 0, /* rightshift */
414 2, /* size (0 = byte, 1 = short, 2 = long) */
415 32, /* bitsize */
416 FALSE, /* pc_relative */
417 0, /* bitpos */
418 complain_overflow_bitfield,/* complain_on_overflow */
419 bfd_elf_generic_reloc, /* special_function */
420 "R_ARM_RELATIVE", /* name */
421 TRUE, /* partial_inplace */
422 0xffffffff, /* src_mask */
423 0xffffffff, /* dst_mask */
424 FALSE), /* pcrel_offset */
425
426 HOWTO (R_ARM_GOTOFF32, /* type */
427 0, /* rightshift */
428 2, /* size (0 = byte, 1 = short, 2 = long) */
429 32, /* bitsize */
430 FALSE, /* pc_relative */
431 0, /* bitpos */
432 complain_overflow_bitfield,/* complain_on_overflow */
433 bfd_elf_generic_reloc, /* special_function */
434 "R_ARM_GOTOFF32", /* name */
435 TRUE, /* partial_inplace */
436 0xffffffff, /* src_mask */
437 0xffffffff, /* dst_mask */
438 FALSE), /* pcrel_offset */
439
440 HOWTO (R_ARM_GOTPC, /* type */
441 0, /* rightshift */
442 2, /* size (0 = byte, 1 = short, 2 = long) */
443 32, /* bitsize */
444 TRUE, /* pc_relative */
445 0, /* bitpos */
446 complain_overflow_bitfield,/* complain_on_overflow */
447 bfd_elf_generic_reloc, /* special_function */
448 "R_ARM_GOTPC", /* name */
449 TRUE, /* partial_inplace */
450 0xffffffff, /* src_mask */
451 0xffffffff, /* dst_mask */
452 TRUE), /* pcrel_offset */
453
454 HOWTO (R_ARM_GOT32, /* type */
455 0, /* rightshift */
456 2, /* size (0 = byte, 1 = short, 2 = long) */
457 32, /* bitsize */
458 FALSE, /* pc_relative */
459 0, /* bitpos */
460 complain_overflow_bitfield,/* complain_on_overflow */
461 bfd_elf_generic_reloc, /* special_function */
462 "R_ARM_GOT32", /* name */
463 TRUE, /* partial_inplace */
464 0xffffffff, /* src_mask */
465 0xffffffff, /* dst_mask */
466 FALSE), /* pcrel_offset */
467
468 HOWTO (R_ARM_PLT32, /* type */
469 2, /* rightshift */
470 2, /* size (0 = byte, 1 = short, 2 = long) */
471 24, /* bitsize */
472 TRUE, /* pc_relative */
473 0, /* bitpos */
474 complain_overflow_bitfield,/* complain_on_overflow */
475 bfd_elf_generic_reloc, /* special_function */
476 "R_ARM_PLT32", /* name */
477 FALSE, /* partial_inplace */
478 0x00ffffff, /* src_mask */
479 0x00ffffff, /* dst_mask */
480 TRUE), /* pcrel_offset */
481
482 HOWTO (R_ARM_CALL, /* type */
483 2, /* rightshift */
484 2, /* size (0 = byte, 1 = short, 2 = long) */
485 24, /* bitsize */
486 TRUE, /* pc_relative */
487 0, /* bitpos */
488 complain_overflow_signed,/* complain_on_overflow */
489 bfd_elf_generic_reloc, /* special_function */
490 "R_ARM_CALL", /* name */
491 FALSE, /* partial_inplace */
492 0x00ffffff, /* src_mask */
493 0x00ffffff, /* dst_mask */
494 TRUE), /* pcrel_offset */
495
496 HOWTO (R_ARM_JUMP24, /* type */
497 2, /* rightshift */
498 2, /* size (0 = byte, 1 = short, 2 = long) */
499 24, /* bitsize */
500 TRUE, /* pc_relative */
501 0, /* bitpos */
502 complain_overflow_signed,/* complain_on_overflow */
503 bfd_elf_generic_reloc, /* special_function */
504 "R_ARM_JUMP24", /* name */
505 FALSE, /* partial_inplace */
506 0x00ffffff, /* src_mask */
507 0x00ffffff, /* dst_mask */
508 TRUE), /* pcrel_offset */
509
510 HOWTO (R_ARM_THM_JUMP24, /* type */
511 1, /* rightshift */
512 2, /* size (0 = byte, 1 = short, 2 = long) */
513 24, /* bitsize */
514 TRUE, /* pc_relative */
515 0, /* bitpos */
516 complain_overflow_signed,/* complain_on_overflow */
517 bfd_elf_generic_reloc, /* special_function */
518 "R_ARM_THM_JUMP24", /* name */
519 FALSE, /* partial_inplace */
520 0x07ff2fff, /* src_mask */
521 0x07ff2fff, /* dst_mask */
522 TRUE), /* pcrel_offset */
523
524 HOWTO (R_ARM_BASE_ABS, /* type */
525 0, /* rightshift */
526 2, /* size (0 = byte, 1 = short, 2 = long) */
527 32, /* bitsize */
528 FALSE, /* pc_relative */
529 0, /* bitpos */
530 complain_overflow_dont,/* complain_on_overflow */
531 bfd_elf_generic_reloc, /* special_function */
532 "R_ARM_BASE_ABS", /* name */
533 FALSE, /* partial_inplace */
534 0xffffffff, /* src_mask */
535 0xffffffff, /* dst_mask */
536 FALSE), /* pcrel_offset */
537
538 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
539 0, /* rightshift */
540 2, /* size (0 = byte, 1 = short, 2 = long) */
541 12, /* bitsize */
542 TRUE, /* pc_relative */
543 0, /* bitpos */
544 complain_overflow_dont,/* complain_on_overflow */
545 bfd_elf_generic_reloc, /* special_function */
546 "R_ARM_ALU_PCREL_7_0", /* name */
547 FALSE, /* partial_inplace */
548 0x00000fff, /* src_mask */
549 0x00000fff, /* dst_mask */
550 TRUE), /* pcrel_offset */
551
552 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
553 0, /* rightshift */
554 2, /* size (0 = byte, 1 = short, 2 = long) */
555 12, /* bitsize */
556 TRUE, /* pc_relative */
557 8, /* bitpos */
558 complain_overflow_dont,/* complain_on_overflow */
559 bfd_elf_generic_reloc, /* special_function */
560 "R_ARM_ALU_PCREL_15_8",/* name */
561 FALSE, /* partial_inplace */
562 0x00000fff, /* src_mask */
563 0x00000fff, /* dst_mask */
564 TRUE), /* pcrel_offset */
565
566 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
567 0, /* rightshift */
568 2, /* size (0 = byte, 1 = short, 2 = long) */
569 12, /* bitsize */
570 TRUE, /* pc_relative */
571 16, /* bitpos */
572 complain_overflow_dont,/* complain_on_overflow */
573 bfd_elf_generic_reloc, /* special_function */
574 "R_ARM_ALU_PCREL_23_15",/* name */
575 FALSE, /* partial_inplace */
576 0x00000fff, /* src_mask */
577 0x00000fff, /* dst_mask */
578 TRUE), /* pcrel_offset */
579
580 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
581 0, /* rightshift */
582 2, /* size (0 = byte, 1 = short, 2 = long) */
583 12, /* bitsize */
584 FALSE, /* pc_relative */
585 0, /* bitpos */
586 complain_overflow_dont,/* complain_on_overflow */
587 bfd_elf_generic_reloc, /* special_function */
588 "R_ARM_LDR_SBREL_11_0",/* name */
589 FALSE, /* partial_inplace */
590 0x00000fff, /* src_mask */
591 0x00000fff, /* dst_mask */
592 FALSE), /* pcrel_offset */
593
594 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
595 0, /* rightshift */
596 2, /* size (0 = byte, 1 = short, 2 = long) */
597 8, /* bitsize */
598 FALSE, /* pc_relative */
599 12, /* bitpos */
600 complain_overflow_dont,/* complain_on_overflow */
601 bfd_elf_generic_reloc, /* special_function */
602 "R_ARM_ALU_SBREL_19_12",/* name */
603 FALSE, /* partial_inplace */
604 0x000ff000, /* src_mask */
605 0x000ff000, /* dst_mask */
606 FALSE), /* pcrel_offset */
607
608 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
609 0, /* rightshift */
610 2, /* size (0 = byte, 1 = short, 2 = long) */
611 8, /* bitsize */
612 FALSE, /* pc_relative */
613 20, /* bitpos */
614 complain_overflow_dont,/* complain_on_overflow */
615 bfd_elf_generic_reloc, /* special_function */
616 "R_ARM_ALU_SBREL_27_20",/* name */
617 FALSE, /* partial_inplace */
618 0x0ff00000, /* src_mask */
619 0x0ff00000, /* dst_mask */
620 FALSE), /* pcrel_offset */
621
622 HOWTO (R_ARM_TARGET1, /* type */
623 0, /* rightshift */
624 2, /* size (0 = byte, 1 = short, 2 = long) */
625 32, /* bitsize */
626 FALSE, /* pc_relative */
627 0, /* bitpos */
628 complain_overflow_dont,/* complain_on_overflow */
629 bfd_elf_generic_reloc, /* special_function */
630 "R_ARM_TARGET1", /* name */
631 FALSE, /* partial_inplace */
632 0xffffffff, /* src_mask */
633 0xffffffff, /* dst_mask */
634 FALSE), /* pcrel_offset */
635
636 HOWTO (R_ARM_ROSEGREL32, /* type */
637 0, /* rightshift */
638 2, /* size (0 = byte, 1 = short, 2 = long) */
639 32, /* bitsize */
640 FALSE, /* pc_relative */
641 0, /* bitpos */
642 complain_overflow_dont,/* complain_on_overflow */
643 bfd_elf_generic_reloc, /* special_function */
644 "R_ARM_ROSEGREL32", /* name */
645 FALSE, /* partial_inplace */
646 0xffffffff, /* src_mask */
647 0xffffffff, /* dst_mask */
648 FALSE), /* pcrel_offset */
649
650 HOWTO (R_ARM_V4BX, /* type */
651 0, /* rightshift */
652 2, /* size (0 = byte, 1 = short, 2 = long) */
653 32, /* bitsize */
654 FALSE, /* pc_relative */
655 0, /* bitpos */
656 complain_overflow_dont,/* complain_on_overflow */
657 bfd_elf_generic_reloc, /* special_function */
658 "R_ARM_V4BX", /* name */
659 FALSE, /* partial_inplace */
660 0xffffffff, /* src_mask */
661 0xffffffff, /* dst_mask */
662 FALSE), /* pcrel_offset */
663
664 HOWTO (R_ARM_TARGET2, /* type */
665 0, /* rightshift */
666 2, /* size (0 = byte, 1 = short, 2 = long) */
667 32, /* bitsize */
668 FALSE, /* pc_relative */
669 0, /* bitpos */
670 complain_overflow_signed,/* complain_on_overflow */
671 bfd_elf_generic_reloc, /* special_function */
672 "R_ARM_TARGET2", /* name */
673 FALSE, /* partial_inplace */
674 0xffffffff, /* src_mask */
675 0xffffffff, /* dst_mask */
676 TRUE), /* pcrel_offset */
677
678 HOWTO (R_ARM_PREL31, /* type */
679 0, /* rightshift */
680 2, /* size (0 = byte, 1 = short, 2 = long) */
681 31, /* bitsize */
682 TRUE, /* pc_relative */
683 0, /* bitpos */
684 complain_overflow_signed,/* complain_on_overflow */
685 bfd_elf_generic_reloc, /* special_function */
686 "R_ARM_PREL31", /* name */
687 FALSE, /* partial_inplace */
688 0x7fffffff, /* src_mask */
689 0x7fffffff, /* dst_mask */
690 TRUE), /* pcrel_offset */
691
692 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
693 0, /* rightshift */
694 2, /* size (0 = byte, 1 = short, 2 = long) */
695 16, /* bitsize */
696 FALSE, /* pc_relative */
697 0, /* bitpos */
698 complain_overflow_dont,/* complain_on_overflow */
699 bfd_elf_generic_reloc, /* special_function */
700 "R_ARM_MOVW_ABS_NC", /* name */
701 FALSE, /* partial_inplace */
702 0x000f0fff, /* src_mask */
703 0x000f0fff, /* dst_mask */
704 FALSE), /* pcrel_offset */
705
706 HOWTO (R_ARM_MOVT_ABS, /* type */
707 0, /* rightshift */
708 2, /* size (0 = byte, 1 = short, 2 = long) */
709 16, /* bitsize */
710 FALSE, /* pc_relative */
711 0, /* bitpos */
712 complain_overflow_bitfield,/* complain_on_overflow */
713 bfd_elf_generic_reloc, /* special_function */
714 "R_ARM_MOVT_ABS", /* name */
715 FALSE, /* partial_inplace */
716 0x000f0fff, /* src_mask */
717 0x000f0fff, /* dst_mask */
718 FALSE), /* pcrel_offset */
719
720 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
721 0, /* rightshift */
722 2, /* size (0 = byte, 1 = short, 2 = long) */
723 16, /* bitsize */
724 TRUE, /* pc_relative */
725 0, /* bitpos */
726 complain_overflow_dont,/* complain_on_overflow */
727 bfd_elf_generic_reloc, /* special_function */
728 "R_ARM_MOVW_PREL_NC", /* name */
729 FALSE, /* partial_inplace */
730 0x000f0fff, /* src_mask */
731 0x000f0fff, /* dst_mask */
732 TRUE), /* pcrel_offset */
733
734 HOWTO (R_ARM_MOVT_PREL, /* type */
735 0, /* rightshift */
736 2, /* size (0 = byte, 1 = short, 2 = long) */
737 16, /* bitsize */
738 TRUE, /* pc_relative */
739 0, /* bitpos */
740 complain_overflow_bitfield,/* complain_on_overflow */
741 bfd_elf_generic_reloc, /* special_function */
742 "R_ARM_MOVT_PREL", /* name */
743 FALSE, /* partial_inplace */
744 0x000f0fff, /* src_mask */
745 0x000f0fff, /* dst_mask */
746 TRUE), /* pcrel_offset */
747
748 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
749 0, /* rightshift */
750 2, /* size (0 = byte, 1 = short, 2 = long) */
751 16, /* bitsize */
752 FALSE, /* pc_relative */
753 0, /* bitpos */
754 complain_overflow_dont,/* complain_on_overflow */
755 bfd_elf_generic_reloc, /* special_function */
756 "R_ARM_THM_MOVW_ABS_NC",/* name */
757 FALSE, /* partial_inplace */
758 0x040f70ff, /* src_mask */
759 0x040f70ff, /* dst_mask */
760 FALSE), /* pcrel_offset */
761
762 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
763 0, /* rightshift */
764 2, /* size (0 = byte, 1 = short, 2 = long) */
765 16, /* bitsize */
766 FALSE, /* pc_relative */
767 0, /* bitpos */
768 complain_overflow_bitfield,/* complain_on_overflow */
769 bfd_elf_generic_reloc, /* special_function */
770 "R_ARM_THM_MOVT_ABS", /* name */
771 FALSE, /* partial_inplace */
772 0x040f70ff, /* src_mask */
773 0x040f70ff, /* dst_mask */
774 FALSE), /* pcrel_offset */
775
776 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
777 0, /* rightshift */
778 2, /* size (0 = byte, 1 = short, 2 = long) */
779 16, /* bitsize */
780 TRUE, /* pc_relative */
781 0, /* bitpos */
782 complain_overflow_dont,/* complain_on_overflow */
783 bfd_elf_generic_reloc, /* special_function */
784 "R_ARM_THM_MOVW_PREL_NC",/* name */
785 FALSE, /* partial_inplace */
786 0x040f70ff, /* src_mask */
787 0x040f70ff, /* dst_mask */
788 TRUE), /* pcrel_offset */
789
790 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
791 0, /* rightshift */
792 2, /* size (0 = byte, 1 = short, 2 = long) */
793 16, /* bitsize */
794 TRUE, /* pc_relative */
795 0, /* bitpos */
796 complain_overflow_bitfield,/* complain_on_overflow */
797 bfd_elf_generic_reloc, /* special_function */
798 "R_ARM_THM_MOVT_PREL", /* name */
799 FALSE, /* partial_inplace */
800 0x040f70ff, /* src_mask */
801 0x040f70ff, /* dst_mask */
802 TRUE), /* pcrel_offset */
803
804 HOWTO (R_ARM_THM_JUMP19, /* type */
805 1, /* rightshift */
806 2, /* size (0 = byte, 1 = short, 2 = long) */
807 19, /* bitsize */
808 TRUE, /* pc_relative */
809 0, /* bitpos */
810 complain_overflow_signed,/* complain_on_overflow */
811 bfd_elf_generic_reloc, /* special_function */
812 "R_ARM_THM_JUMP19", /* name */
813 FALSE, /* partial_inplace */
814 0x043f2fff, /* src_mask */
815 0x043f2fff, /* dst_mask */
816 TRUE), /* pcrel_offset */
817
818 HOWTO (R_ARM_THM_JUMP6, /* type */
819 1, /* rightshift */
820 1, /* size (0 = byte, 1 = short, 2 = long) */
821 6, /* bitsize */
822 TRUE, /* pc_relative */
823 0, /* bitpos */
824 complain_overflow_unsigned,/* complain_on_overflow */
825 bfd_elf_generic_reloc, /* special_function */
826 "R_ARM_THM_JUMP6", /* name */
827 FALSE, /* partial_inplace */
828 0x02f8, /* src_mask */
829 0x02f8, /* dst_mask */
830 TRUE), /* pcrel_offset */
831
832 /* These are declared as 13-bit signed relocations because we can
833 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
834 versa. */
835 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
836 0, /* rightshift */
837 2, /* size (0 = byte, 1 = short, 2 = long) */
838 13, /* bitsize */
839 TRUE, /* pc_relative */
840 0, /* bitpos */
841 complain_overflow_dont,/* complain_on_overflow */
842 bfd_elf_generic_reloc, /* special_function */
843 "R_ARM_THM_ALU_PREL_11_0",/* name */
844 FALSE, /* partial_inplace */
845 0xffffffff, /* src_mask */
846 0xffffffff, /* dst_mask */
847 TRUE), /* pcrel_offset */
848
849 HOWTO (R_ARM_THM_PC12, /* type */
850 0, /* rightshift */
851 2, /* size (0 = byte, 1 = short, 2 = long) */
852 13, /* bitsize */
853 TRUE, /* pc_relative */
854 0, /* bitpos */
855 complain_overflow_dont,/* complain_on_overflow */
856 bfd_elf_generic_reloc, /* special_function */
857 "R_ARM_THM_PC12", /* name */
858 FALSE, /* partial_inplace */
859 0xffffffff, /* src_mask */
860 0xffffffff, /* dst_mask */
861 TRUE), /* pcrel_offset */
862
863 HOWTO (R_ARM_ABS32_NOI, /* type */
864 0, /* rightshift */
865 2, /* size (0 = byte, 1 = short, 2 = long) */
866 32, /* bitsize */
867 FALSE, /* pc_relative */
868 0, /* bitpos */
869 complain_overflow_dont,/* complain_on_overflow */
870 bfd_elf_generic_reloc, /* special_function */
871 "R_ARM_ABS32_NOI", /* name */
872 FALSE, /* partial_inplace */
873 0xffffffff, /* src_mask */
874 0xffffffff, /* dst_mask */
875 FALSE), /* pcrel_offset */
876
877 HOWTO (R_ARM_REL32_NOI, /* type */
878 0, /* rightshift */
879 2, /* size (0 = byte, 1 = short, 2 = long) */
880 32, /* bitsize */
881 TRUE, /* pc_relative */
882 0, /* bitpos */
883 complain_overflow_dont,/* complain_on_overflow */
884 bfd_elf_generic_reloc, /* special_function */
885 "R_ARM_REL32_NOI", /* name */
886 FALSE, /* partial_inplace */
887 0xffffffff, /* src_mask */
888 0xffffffff, /* dst_mask */
889 FALSE), /* pcrel_offset */
890
891 /* Group relocations. */
892
893 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
894 0, /* rightshift */
895 2, /* size (0 = byte, 1 = short, 2 = long) */
896 32, /* bitsize */
897 TRUE, /* pc_relative */
898 0, /* bitpos */
899 complain_overflow_dont,/* complain_on_overflow */
900 bfd_elf_generic_reloc, /* special_function */
901 "R_ARM_ALU_PC_G0_NC", /* name */
902 FALSE, /* partial_inplace */
903 0xffffffff, /* src_mask */
904 0xffffffff, /* dst_mask */
905 TRUE), /* pcrel_offset */
906
907 HOWTO (R_ARM_ALU_PC_G0, /* type */
908 0, /* rightshift */
909 2, /* size (0 = byte, 1 = short, 2 = long) */
910 32, /* bitsize */
911 TRUE, /* pc_relative */
912 0, /* bitpos */
913 complain_overflow_dont,/* complain_on_overflow */
914 bfd_elf_generic_reloc, /* special_function */
915 "R_ARM_ALU_PC_G0", /* name */
916 FALSE, /* partial_inplace */
917 0xffffffff, /* src_mask */
918 0xffffffff, /* dst_mask */
919 TRUE), /* pcrel_offset */
920
921 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
922 0, /* rightshift */
923 2, /* size (0 = byte, 1 = short, 2 = long) */
924 32, /* bitsize */
925 TRUE, /* pc_relative */
926 0, /* bitpos */
927 complain_overflow_dont,/* complain_on_overflow */
928 bfd_elf_generic_reloc, /* special_function */
929 "R_ARM_ALU_PC_G1_NC", /* name */
930 FALSE, /* partial_inplace */
931 0xffffffff, /* src_mask */
932 0xffffffff, /* dst_mask */
933 TRUE), /* pcrel_offset */
934
935 HOWTO (R_ARM_ALU_PC_G1, /* type */
936 0, /* rightshift */
937 2, /* size (0 = byte, 1 = short, 2 = long) */
938 32, /* bitsize */
939 TRUE, /* pc_relative */
940 0, /* bitpos */
941 complain_overflow_dont,/* complain_on_overflow */
942 bfd_elf_generic_reloc, /* special_function */
943 "R_ARM_ALU_PC_G1", /* name */
944 FALSE, /* partial_inplace */
945 0xffffffff, /* src_mask */
946 0xffffffff, /* dst_mask */
947 TRUE), /* pcrel_offset */
948
949 HOWTO (R_ARM_ALU_PC_G2, /* type */
950 0, /* rightshift */
951 2, /* size (0 = byte, 1 = short, 2 = long) */
952 32, /* bitsize */
953 TRUE, /* pc_relative */
954 0, /* bitpos */
955 complain_overflow_dont,/* complain_on_overflow */
956 bfd_elf_generic_reloc, /* special_function */
957 "R_ARM_ALU_PC_G2", /* name */
958 FALSE, /* partial_inplace */
959 0xffffffff, /* src_mask */
960 0xffffffff, /* dst_mask */
961 TRUE), /* pcrel_offset */
962
963 HOWTO (R_ARM_LDR_PC_G1, /* type */
964 0, /* rightshift */
965 2, /* size (0 = byte, 1 = short, 2 = long) */
966 32, /* bitsize */
967 TRUE, /* pc_relative */
968 0, /* bitpos */
969 complain_overflow_dont,/* complain_on_overflow */
970 bfd_elf_generic_reloc, /* special_function */
971 "R_ARM_LDR_PC_G1", /* name */
972 FALSE, /* partial_inplace */
973 0xffffffff, /* src_mask */
974 0xffffffff, /* dst_mask */
975 TRUE), /* pcrel_offset */
976
977 HOWTO (R_ARM_LDR_PC_G2, /* type */
978 0, /* rightshift */
979 2, /* size (0 = byte, 1 = short, 2 = long) */
980 32, /* bitsize */
981 TRUE, /* pc_relative */
982 0, /* bitpos */
983 complain_overflow_dont,/* complain_on_overflow */
984 bfd_elf_generic_reloc, /* special_function */
985 "R_ARM_LDR_PC_G2", /* name */
986 FALSE, /* partial_inplace */
987 0xffffffff, /* src_mask */
988 0xffffffff, /* dst_mask */
989 TRUE), /* pcrel_offset */
990
991 HOWTO (R_ARM_LDRS_PC_G0, /* type */
992 0, /* rightshift */
993 2, /* size (0 = byte, 1 = short, 2 = long) */
994 32, /* bitsize */
995 TRUE, /* pc_relative */
996 0, /* bitpos */
997 complain_overflow_dont,/* complain_on_overflow */
998 bfd_elf_generic_reloc, /* special_function */
999 "R_ARM_LDRS_PC_G0", /* name */
1000 FALSE, /* partial_inplace */
1001 0xffffffff, /* src_mask */
1002 0xffffffff, /* dst_mask */
1003 TRUE), /* pcrel_offset */
1004
1005 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1006 0, /* rightshift */
1007 2, /* size (0 = byte, 1 = short, 2 = long) */
1008 32, /* bitsize */
1009 TRUE, /* pc_relative */
1010 0, /* bitpos */
1011 complain_overflow_dont,/* complain_on_overflow */
1012 bfd_elf_generic_reloc, /* special_function */
1013 "R_ARM_LDRS_PC_G1", /* name */
1014 FALSE, /* partial_inplace */
1015 0xffffffff, /* src_mask */
1016 0xffffffff, /* dst_mask */
1017 TRUE), /* pcrel_offset */
1018
1019 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1020 0, /* rightshift */
1021 2, /* size (0 = byte, 1 = short, 2 = long) */
1022 32, /* bitsize */
1023 TRUE, /* pc_relative */
1024 0, /* bitpos */
1025 complain_overflow_dont,/* complain_on_overflow */
1026 bfd_elf_generic_reloc, /* special_function */
1027 "R_ARM_LDRS_PC_G2", /* name */
1028 FALSE, /* partial_inplace */
1029 0xffffffff, /* src_mask */
1030 0xffffffff, /* dst_mask */
1031 TRUE), /* pcrel_offset */
1032
1033 HOWTO (R_ARM_LDC_PC_G0, /* type */
1034 0, /* rightshift */
1035 2, /* size (0 = byte, 1 = short, 2 = long) */
1036 32, /* bitsize */
1037 TRUE, /* pc_relative */
1038 0, /* bitpos */
1039 complain_overflow_dont,/* complain_on_overflow */
1040 bfd_elf_generic_reloc, /* special_function */
1041 "R_ARM_LDC_PC_G0", /* name */
1042 FALSE, /* partial_inplace */
1043 0xffffffff, /* src_mask */
1044 0xffffffff, /* dst_mask */
1045 TRUE), /* pcrel_offset */
1046
1047 HOWTO (R_ARM_LDC_PC_G1, /* type */
1048 0, /* rightshift */
1049 2, /* size (0 = byte, 1 = short, 2 = long) */
1050 32, /* bitsize */
1051 TRUE, /* pc_relative */
1052 0, /* bitpos */
1053 complain_overflow_dont,/* complain_on_overflow */
1054 bfd_elf_generic_reloc, /* special_function */
1055 "R_ARM_LDC_PC_G1", /* name */
1056 FALSE, /* partial_inplace */
1057 0xffffffff, /* src_mask */
1058 0xffffffff, /* dst_mask */
1059 TRUE), /* pcrel_offset */
1060
1061 HOWTO (R_ARM_LDC_PC_G2, /* type */
1062 0, /* rightshift */
1063 2, /* size (0 = byte, 1 = short, 2 = long) */
1064 32, /* bitsize */
1065 TRUE, /* pc_relative */
1066 0, /* bitpos */
1067 complain_overflow_dont,/* complain_on_overflow */
1068 bfd_elf_generic_reloc, /* special_function */
1069 "R_ARM_LDC_PC_G2", /* name */
1070 FALSE, /* partial_inplace */
1071 0xffffffff, /* src_mask */
1072 0xffffffff, /* dst_mask */
1073 TRUE), /* pcrel_offset */
1074
1075 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1076 0, /* rightshift */
1077 2, /* size (0 = byte, 1 = short, 2 = long) */
1078 32, /* bitsize */
1079 TRUE, /* pc_relative */
1080 0, /* bitpos */
1081 complain_overflow_dont,/* complain_on_overflow */
1082 bfd_elf_generic_reloc, /* special_function */
1083 "R_ARM_ALU_SB_G0_NC", /* name */
1084 FALSE, /* partial_inplace */
1085 0xffffffff, /* src_mask */
1086 0xffffffff, /* dst_mask */
1087 TRUE), /* pcrel_offset */
1088
1089 HOWTO (R_ARM_ALU_SB_G0, /* type */
1090 0, /* rightshift */
1091 2, /* size (0 = byte, 1 = short, 2 = long) */
1092 32, /* bitsize */
1093 TRUE, /* pc_relative */
1094 0, /* bitpos */
1095 complain_overflow_dont,/* complain_on_overflow */
1096 bfd_elf_generic_reloc, /* special_function */
1097 "R_ARM_ALU_SB_G0", /* name */
1098 FALSE, /* partial_inplace */
1099 0xffffffff, /* src_mask */
1100 0xffffffff, /* dst_mask */
1101 TRUE), /* pcrel_offset */
1102
1103 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1104 0, /* rightshift */
1105 2, /* size (0 = byte, 1 = short, 2 = long) */
1106 32, /* bitsize */
1107 TRUE, /* pc_relative */
1108 0, /* bitpos */
1109 complain_overflow_dont,/* complain_on_overflow */
1110 bfd_elf_generic_reloc, /* special_function */
1111 "R_ARM_ALU_SB_G1_NC", /* name */
1112 FALSE, /* partial_inplace */
1113 0xffffffff, /* src_mask */
1114 0xffffffff, /* dst_mask */
1115 TRUE), /* pcrel_offset */
1116
1117 HOWTO (R_ARM_ALU_SB_G1, /* type */
1118 0, /* rightshift */
1119 2, /* size (0 = byte, 1 = short, 2 = long) */
1120 32, /* bitsize */
1121 TRUE, /* pc_relative */
1122 0, /* bitpos */
1123 complain_overflow_dont,/* complain_on_overflow */
1124 bfd_elf_generic_reloc, /* special_function */
1125 "R_ARM_ALU_SB_G1", /* name */
1126 FALSE, /* partial_inplace */
1127 0xffffffff, /* src_mask */
1128 0xffffffff, /* dst_mask */
1129 TRUE), /* pcrel_offset */
1130
1131 HOWTO (R_ARM_ALU_SB_G2, /* type */
1132 0, /* rightshift */
1133 2, /* size (0 = byte, 1 = short, 2 = long) */
1134 32, /* bitsize */
1135 TRUE, /* pc_relative */
1136 0, /* bitpos */
1137 complain_overflow_dont,/* complain_on_overflow */
1138 bfd_elf_generic_reloc, /* special_function */
1139 "R_ARM_ALU_SB_G2", /* name */
1140 FALSE, /* partial_inplace */
1141 0xffffffff, /* src_mask */
1142 0xffffffff, /* dst_mask */
1143 TRUE), /* pcrel_offset */
1144
1145 HOWTO (R_ARM_LDR_SB_G0, /* type */
1146 0, /* rightshift */
1147 2, /* size (0 = byte, 1 = short, 2 = long) */
1148 32, /* bitsize */
1149 TRUE, /* pc_relative */
1150 0, /* bitpos */
1151 complain_overflow_dont,/* complain_on_overflow */
1152 bfd_elf_generic_reloc, /* special_function */
1153 "R_ARM_LDR_SB_G0", /* name */
1154 FALSE, /* partial_inplace */
1155 0xffffffff, /* src_mask */
1156 0xffffffff, /* dst_mask */
1157 TRUE), /* pcrel_offset */
1158
1159 HOWTO (R_ARM_LDR_SB_G1, /* type */
1160 0, /* rightshift */
1161 2, /* size (0 = byte, 1 = short, 2 = long) */
1162 32, /* bitsize */
1163 TRUE, /* pc_relative */
1164 0, /* bitpos */
1165 complain_overflow_dont,/* complain_on_overflow */
1166 bfd_elf_generic_reloc, /* special_function */
1167 "R_ARM_LDR_SB_G1", /* name */
1168 FALSE, /* partial_inplace */
1169 0xffffffff, /* src_mask */
1170 0xffffffff, /* dst_mask */
1171 TRUE), /* pcrel_offset */
1172
1173 HOWTO (R_ARM_LDR_SB_G2, /* type */
1174 0, /* rightshift */
1175 2, /* size (0 = byte, 1 = short, 2 = long) */
1176 32, /* bitsize */
1177 TRUE, /* pc_relative */
1178 0, /* bitpos */
1179 complain_overflow_dont,/* complain_on_overflow */
1180 bfd_elf_generic_reloc, /* special_function */
1181 "R_ARM_LDR_SB_G2", /* name */
1182 FALSE, /* partial_inplace */
1183 0xffffffff, /* src_mask */
1184 0xffffffff, /* dst_mask */
1185 TRUE), /* pcrel_offset */
1186
1187 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1188 0, /* rightshift */
1189 2, /* size (0 = byte, 1 = short, 2 = long) */
1190 32, /* bitsize */
1191 TRUE, /* pc_relative */
1192 0, /* bitpos */
1193 complain_overflow_dont,/* complain_on_overflow */
1194 bfd_elf_generic_reloc, /* special_function */
1195 "R_ARM_LDRS_SB_G0", /* name */
1196 FALSE, /* partial_inplace */
1197 0xffffffff, /* src_mask */
1198 0xffffffff, /* dst_mask */
1199 TRUE), /* pcrel_offset */
1200
1201 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1202 0, /* rightshift */
1203 2, /* size (0 = byte, 1 = short, 2 = long) */
1204 32, /* bitsize */
1205 TRUE, /* pc_relative */
1206 0, /* bitpos */
1207 complain_overflow_dont,/* complain_on_overflow */
1208 bfd_elf_generic_reloc, /* special_function */
1209 "R_ARM_LDRS_SB_G1", /* name */
1210 FALSE, /* partial_inplace */
1211 0xffffffff, /* src_mask */
1212 0xffffffff, /* dst_mask */
1213 TRUE), /* pcrel_offset */
1214
1215 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1216 0, /* rightshift */
1217 2, /* size (0 = byte, 1 = short, 2 = long) */
1218 32, /* bitsize */
1219 TRUE, /* pc_relative */
1220 0, /* bitpos */
1221 complain_overflow_dont,/* complain_on_overflow */
1222 bfd_elf_generic_reloc, /* special_function */
1223 "R_ARM_LDRS_SB_G2", /* name */
1224 FALSE, /* partial_inplace */
1225 0xffffffff, /* src_mask */
1226 0xffffffff, /* dst_mask */
1227 TRUE), /* pcrel_offset */
1228
1229 HOWTO (R_ARM_LDC_SB_G0, /* type */
1230 0, /* rightshift */
1231 2, /* size (0 = byte, 1 = short, 2 = long) */
1232 32, /* bitsize */
1233 TRUE, /* pc_relative */
1234 0, /* bitpos */
1235 complain_overflow_dont,/* complain_on_overflow */
1236 bfd_elf_generic_reloc, /* special_function */
1237 "R_ARM_LDC_SB_G0", /* name */
1238 FALSE, /* partial_inplace */
1239 0xffffffff, /* src_mask */
1240 0xffffffff, /* dst_mask */
1241 TRUE), /* pcrel_offset */
1242
1243 HOWTO (R_ARM_LDC_SB_G1, /* type */
1244 0, /* rightshift */
1245 2, /* size (0 = byte, 1 = short, 2 = long) */
1246 32, /* bitsize */
1247 TRUE, /* pc_relative */
1248 0, /* bitpos */
1249 complain_overflow_dont,/* complain_on_overflow */
1250 bfd_elf_generic_reloc, /* special_function */
1251 "R_ARM_LDC_SB_G1", /* name */
1252 FALSE, /* partial_inplace */
1253 0xffffffff, /* src_mask */
1254 0xffffffff, /* dst_mask */
1255 TRUE), /* pcrel_offset */
1256
1257 HOWTO (R_ARM_LDC_SB_G2, /* type */
1258 0, /* rightshift */
1259 2, /* size (0 = byte, 1 = short, 2 = long) */
1260 32, /* bitsize */
1261 TRUE, /* pc_relative */
1262 0, /* bitpos */
1263 complain_overflow_dont,/* complain_on_overflow */
1264 bfd_elf_generic_reloc, /* special_function */
1265 "R_ARM_LDC_SB_G2", /* name */
1266 FALSE, /* partial_inplace */
1267 0xffffffff, /* src_mask */
1268 0xffffffff, /* dst_mask */
1269 TRUE), /* pcrel_offset */
1270
1271 /* End of group relocations. */
1272
1273 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1274 0, /* rightshift */
1275 2, /* size (0 = byte, 1 = short, 2 = long) */
1276 16, /* bitsize */
1277 FALSE, /* pc_relative */
1278 0, /* bitpos */
1279 complain_overflow_dont,/* complain_on_overflow */
1280 bfd_elf_generic_reloc, /* special_function */
1281 "R_ARM_MOVW_BREL_NC", /* name */
1282 FALSE, /* partial_inplace */
1283 0x0000ffff, /* src_mask */
1284 0x0000ffff, /* dst_mask */
1285 FALSE), /* pcrel_offset */
1286
1287 HOWTO (R_ARM_MOVT_BREL, /* type */
1288 0, /* rightshift */
1289 2, /* size (0 = byte, 1 = short, 2 = long) */
1290 16, /* bitsize */
1291 FALSE, /* pc_relative */
1292 0, /* bitpos */
1293 complain_overflow_bitfield,/* complain_on_overflow */
1294 bfd_elf_generic_reloc, /* special_function */
1295 "R_ARM_MOVT_BREL", /* name */
1296 FALSE, /* partial_inplace */
1297 0x0000ffff, /* src_mask */
1298 0x0000ffff, /* dst_mask */
1299 FALSE), /* pcrel_offset */
1300
1301 HOWTO (R_ARM_MOVW_BREL, /* type */
1302 0, /* rightshift */
1303 2, /* size (0 = byte, 1 = short, 2 = long) */
1304 16, /* bitsize */
1305 FALSE, /* pc_relative */
1306 0, /* bitpos */
1307 complain_overflow_dont,/* complain_on_overflow */
1308 bfd_elf_generic_reloc, /* special_function */
1309 "R_ARM_MOVW_BREL", /* name */
1310 FALSE, /* partial_inplace */
1311 0x0000ffff, /* src_mask */
1312 0x0000ffff, /* dst_mask */
1313 FALSE), /* pcrel_offset */
1314
1315 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1316 0, /* rightshift */
1317 2, /* size (0 = byte, 1 = short, 2 = long) */
1318 16, /* bitsize */
1319 FALSE, /* pc_relative */
1320 0, /* bitpos */
1321 complain_overflow_dont,/* complain_on_overflow */
1322 bfd_elf_generic_reloc, /* special_function */
1323 "R_ARM_THM_MOVW_BREL_NC",/* name */
1324 FALSE, /* partial_inplace */
1325 0x040f70ff, /* src_mask */
1326 0x040f70ff, /* dst_mask */
1327 FALSE), /* pcrel_offset */
1328
1329 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1330 0, /* rightshift */
1331 2, /* size (0 = byte, 1 = short, 2 = long) */
1332 16, /* bitsize */
1333 FALSE, /* pc_relative */
1334 0, /* bitpos */
1335 complain_overflow_bitfield,/* complain_on_overflow */
1336 bfd_elf_generic_reloc, /* special_function */
1337 "R_ARM_THM_MOVT_BREL", /* name */
1338 FALSE, /* partial_inplace */
1339 0x040f70ff, /* src_mask */
1340 0x040f70ff, /* dst_mask */
1341 FALSE), /* pcrel_offset */
1342
1343 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1344 0, /* rightshift */
1345 2, /* size (0 = byte, 1 = short, 2 = long) */
1346 16, /* bitsize */
1347 FALSE, /* pc_relative */
1348 0, /* bitpos */
1349 complain_overflow_dont,/* complain_on_overflow */
1350 bfd_elf_generic_reloc, /* special_function */
1351 "R_ARM_THM_MOVW_BREL", /* name */
1352 FALSE, /* partial_inplace */
1353 0x040f70ff, /* src_mask */
1354 0x040f70ff, /* dst_mask */
1355 FALSE), /* pcrel_offset */
1356
1357 EMPTY_HOWTO (90), /* Unallocated. */
1358 EMPTY_HOWTO (91),
1359 EMPTY_HOWTO (92),
1360 EMPTY_HOWTO (93),
1361
1362 HOWTO (R_ARM_PLT32_ABS, /* type */
1363 0, /* rightshift */
1364 2, /* size (0 = byte, 1 = short, 2 = long) */
1365 32, /* bitsize */
1366 FALSE, /* pc_relative */
1367 0, /* bitpos */
1368 complain_overflow_dont,/* complain_on_overflow */
1369 bfd_elf_generic_reloc, /* special_function */
1370 "R_ARM_PLT32_ABS", /* name */
1371 FALSE, /* partial_inplace */
1372 0xffffffff, /* src_mask */
1373 0xffffffff, /* dst_mask */
1374 FALSE), /* pcrel_offset */
1375
1376 HOWTO (R_ARM_GOT_ABS, /* type */
1377 0, /* rightshift */
1378 2, /* size (0 = byte, 1 = short, 2 = long) */
1379 32, /* bitsize */
1380 FALSE, /* pc_relative */
1381 0, /* bitpos */
1382 complain_overflow_dont,/* complain_on_overflow */
1383 bfd_elf_generic_reloc, /* special_function */
1384 "R_ARM_GOT_ABS", /* name */
1385 FALSE, /* partial_inplace */
1386 0xffffffff, /* src_mask */
1387 0xffffffff, /* dst_mask */
1388 FALSE), /* pcrel_offset */
1389
1390 HOWTO (R_ARM_GOT_PREL, /* type */
1391 0, /* rightshift */
1392 2, /* size (0 = byte, 1 = short, 2 = long) */
1393 32, /* bitsize */
1394 TRUE, /* pc_relative */
1395 0, /* bitpos */
1396 complain_overflow_dont, /* complain_on_overflow */
1397 bfd_elf_generic_reloc, /* special_function */
1398 "R_ARM_GOT_PREL", /* name */
1399 FALSE, /* partial_inplace */
1400 0xffffffff, /* src_mask */
1401 0xffffffff, /* dst_mask */
1402 TRUE), /* pcrel_offset */
1403
1404 HOWTO (R_ARM_GOT_BREL12, /* type */
1405 0, /* rightshift */
1406 2, /* size (0 = byte, 1 = short, 2 = long) */
1407 12, /* bitsize */
1408 FALSE, /* pc_relative */
1409 0, /* bitpos */
1410 complain_overflow_bitfield,/* complain_on_overflow */
1411 bfd_elf_generic_reloc, /* special_function */
1412 "R_ARM_GOT_BREL12", /* name */
1413 FALSE, /* partial_inplace */
1414 0x00000fff, /* src_mask */
1415 0x00000fff, /* dst_mask */
1416 FALSE), /* pcrel_offset */
1417
1418 HOWTO (R_ARM_GOTOFF12, /* type */
1419 0, /* rightshift */
1420 2, /* size (0 = byte, 1 = short, 2 = long) */
1421 12, /* bitsize */
1422 FALSE, /* pc_relative */
1423 0, /* bitpos */
1424 complain_overflow_bitfield,/* complain_on_overflow */
1425 bfd_elf_generic_reloc, /* special_function */
1426 "R_ARM_GOTOFF12", /* name */
1427 FALSE, /* partial_inplace */
1428 0x00000fff, /* src_mask */
1429 0x00000fff, /* dst_mask */
1430 FALSE), /* pcrel_offset */
1431
1432 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1433
1434 /* GNU extension to record C++ vtable member usage */
1435 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1436 0, /* rightshift */
1437 2, /* size (0 = byte, 1 = short, 2 = long) */
1438 0, /* bitsize */
1439 FALSE, /* pc_relative */
1440 0, /* bitpos */
1441 complain_overflow_dont, /* complain_on_overflow */
1442 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1443 "R_ARM_GNU_VTENTRY", /* name */
1444 FALSE, /* partial_inplace */
1445 0, /* src_mask */
1446 0, /* dst_mask */
1447 FALSE), /* pcrel_offset */
1448
1449 /* GNU extension to record C++ vtable hierarchy */
1450 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1451 0, /* rightshift */
1452 2, /* size (0 = byte, 1 = short, 2 = long) */
1453 0, /* bitsize */
1454 FALSE, /* pc_relative */
1455 0, /* bitpos */
1456 complain_overflow_dont, /* complain_on_overflow */
1457 NULL, /* special_function */
1458 "R_ARM_GNU_VTINHERIT", /* name */
1459 FALSE, /* partial_inplace */
1460 0, /* src_mask */
1461 0, /* dst_mask */
1462 FALSE), /* pcrel_offset */
1463
1464 HOWTO (R_ARM_THM_JUMP11, /* type */
1465 1, /* rightshift */
1466 1, /* size (0 = byte, 1 = short, 2 = long) */
1467 11, /* bitsize */
1468 TRUE, /* pc_relative */
1469 0, /* bitpos */
1470 complain_overflow_signed, /* complain_on_overflow */
1471 bfd_elf_generic_reloc, /* special_function */
1472 "R_ARM_THM_JUMP11", /* name */
1473 FALSE, /* partial_inplace */
1474 0x000007ff, /* src_mask */
1475 0x000007ff, /* dst_mask */
1476 TRUE), /* pcrel_offset */
1477
1478 HOWTO (R_ARM_THM_JUMP8, /* type */
1479 1, /* rightshift */
1480 1, /* size (0 = byte, 1 = short, 2 = long) */
1481 8, /* bitsize */
1482 TRUE, /* pc_relative */
1483 0, /* bitpos */
1484 complain_overflow_signed, /* complain_on_overflow */
1485 bfd_elf_generic_reloc, /* special_function */
1486 "R_ARM_THM_JUMP8", /* name */
1487 FALSE, /* partial_inplace */
1488 0x000000ff, /* src_mask */
1489 0x000000ff, /* dst_mask */
1490 TRUE), /* pcrel_offset */
1491
1492 /* TLS relocations */
1493 HOWTO (R_ARM_TLS_GD32, /* type */
1494 0, /* rightshift */
1495 2, /* size (0 = byte, 1 = short, 2 = long) */
1496 32, /* bitsize */
1497 FALSE, /* pc_relative */
1498 0, /* bitpos */
1499 complain_overflow_bitfield,/* complain_on_overflow */
1500 NULL, /* special_function */
1501 "R_ARM_TLS_GD32", /* name */
1502 TRUE, /* partial_inplace */
1503 0xffffffff, /* src_mask */
1504 0xffffffff, /* dst_mask */
1505 FALSE), /* pcrel_offset */
1506
1507 HOWTO (R_ARM_TLS_LDM32, /* type */
1508 0, /* rightshift */
1509 2, /* size (0 = byte, 1 = short, 2 = long) */
1510 32, /* bitsize */
1511 FALSE, /* pc_relative */
1512 0, /* bitpos */
1513 complain_overflow_bitfield,/* complain_on_overflow */
1514 bfd_elf_generic_reloc, /* special_function */
1515 "R_ARM_TLS_LDM32", /* name */
1516 TRUE, /* partial_inplace */
1517 0xffffffff, /* src_mask */
1518 0xffffffff, /* dst_mask */
1519 FALSE), /* pcrel_offset */
1520
1521 HOWTO (R_ARM_TLS_LDO32, /* type */
1522 0, /* rightshift */
1523 2, /* size (0 = byte, 1 = short, 2 = long) */
1524 32, /* bitsize */
1525 FALSE, /* pc_relative */
1526 0, /* bitpos */
1527 complain_overflow_bitfield,/* complain_on_overflow */
1528 bfd_elf_generic_reloc, /* special_function */
1529 "R_ARM_TLS_LDO32", /* name */
1530 TRUE, /* partial_inplace */
1531 0xffffffff, /* src_mask */
1532 0xffffffff, /* dst_mask */
1533 FALSE), /* pcrel_offset */
1534
1535 HOWTO (R_ARM_TLS_IE32, /* type */
1536 0, /* rightshift */
1537 2, /* size (0 = byte, 1 = short, 2 = long) */
1538 32, /* bitsize */
1539 FALSE, /* pc_relative */
1540 0, /* bitpos */
1541 complain_overflow_bitfield,/* complain_on_overflow */
1542 NULL, /* special_function */
1543 "R_ARM_TLS_IE32", /* name */
1544 TRUE, /* partial_inplace */
1545 0xffffffff, /* src_mask */
1546 0xffffffff, /* dst_mask */
1547 FALSE), /* pcrel_offset */
1548
1549 HOWTO (R_ARM_TLS_LE32, /* type */
1550 0, /* rightshift */
1551 2, /* size (0 = byte, 1 = short, 2 = long) */
1552 32, /* bitsize */
1553 FALSE, /* pc_relative */
1554 0, /* bitpos */
1555 complain_overflow_bitfield,/* complain_on_overflow */
1556 bfd_elf_generic_reloc, /* special_function */
1557 "R_ARM_TLS_LE32", /* name */
1558 TRUE, /* partial_inplace */
1559 0xffffffff, /* src_mask */
1560 0xffffffff, /* dst_mask */
1561 FALSE), /* pcrel_offset */
1562
1563 HOWTO (R_ARM_TLS_LDO12, /* type */
1564 0, /* rightshift */
1565 2, /* size (0 = byte, 1 = short, 2 = long) */
1566 12, /* bitsize */
1567 FALSE, /* pc_relative */
1568 0, /* bitpos */
1569 complain_overflow_bitfield,/* complain_on_overflow */
1570 bfd_elf_generic_reloc, /* special_function */
1571 "R_ARM_TLS_LDO12", /* name */
1572 FALSE, /* partial_inplace */
1573 0x00000fff, /* src_mask */
1574 0x00000fff, /* dst_mask */
1575 FALSE), /* pcrel_offset */
1576
1577 HOWTO (R_ARM_TLS_LE12, /* type */
1578 0, /* rightshift */
1579 2, /* size (0 = byte, 1 = short, 2 = long) */
1580 12, /* bitsize */
1581 FALSE, /* pc_relative */
1582 0, /* bitpos */
1583 complain_overflow_bitfield,/* complain_on_overflow */
1584 bfd_elf_generic_reloc, /* special_function */
1585 "R_ARM_TLS_LE12", /* name */
1586 FALSE, /* partial_inplace */
1587 0x00000fff, /* src_mask */
1588 0x00000fff, /* dst_mask */
1589 FALSE), /* pcrel_offset */
1590
1591 HOWTO (R_ARM_TLS_IE12GP, /* type */
1592 0, /* rightshift */
1593 2, /* size (0 = byte, 1 = short, 2 = long) */
1594 12, /* bitsize */
1595 FALSE, /* pc_relative */
1596 0, /* bitpos */
1597 complain_overflow_bitfield,/* complain_on_overflow */
1598 bfd_elf_generic_reloc, /* special_function */
1599 "R_ARM_TLS_IE12GP", /* name */
1600 FALSE, /* partial_inplace */
1601 0x00000fff, /* src_mask */
1602 0x00000fff, /* dst_mask */
1603 FALSE), /* pcrel_offset */
1604 };
1605
1606 /* 112-127 private relocations
1607 128 R_ARM_ME_TOO, obsolete
1608 129-255 unallocated in AAELF.
1609
1610 249-255 extended, currently unused, relocations: */
1611
1612 static reloc_howto_type elf32_arm_howto_table_2[4] =
1613 {
1614 HOWTO (R_ARM_RREL32, /* type */
1615 0, /* rightshift */
1616 0, /* size (0 = byte, 1 = short, 2 = long) */
1617 0, /* bitsize */
1618 FALSE, /* pc_relative */
1619 0, /* bitpos */
1620 complain_overflow_dont,/* complain_on_overflow */
1621 bfd_elf_generic_reloc, /* special_function */
1622 "R_ARM_RREL32", /* name */
1623 FALSE, /* partial_inplace */
1624 0, /* src_mask */
1625 0, /* dst_mask */
1626 FALSE), /* pcrel_offset */
1627
1628 HOWTO (R_ARM_RABS32, /* type */
1629 0, /* rightshift */
1630 0, /* size (0 = byte, 1 = short, 2 = long) */
1631 0, /* bitsize */
1632 FALSE, /* pc_relative */
1633 0, /* bitpos */
1634 complain_overflow_dont,/* complain_on_overflow */
1635 bfd_elf_generic_reloc, /* special_function */
1636 "R_ARM_RABS32", /* name */
1637 FALSE, /* partial_inplace */
1638 0, /* src_mask */
1639 0, /* dst_mask */
1640 FALSE), /* pcrel_offset */
1641
1642 HOWTO (R_ARM_RPC24, /* type */
1643 0, /* rightshift */
1644 0, /* size (0 = byte, 1 = short, 2 = long) */
1645 0, /* bitsize */
1646 FALSE, /* pc_relative */
1647 0, /* bitpos */
1648 complain_overflow_dont,/* complain_on_overflow */
1649 bfd_elf_generic_reloc, /* special_function */
1650 "R_ARM_RPC24", /* name */
1651 FALSE, /* partial_inplace */
1652 0, /* src_mask */
1653 0, /* dst_mask */
1654 FALSE), /* pcrel_offset */
1655
1656 HOWTO (R_ARM_RBASE, /* type */
1657 0, /* rightshift */
1658 0, /* size (0 = byte, 1 = short, 2 = long) */
1659 0, /* bitsize */
1660 FALSE, /* pc_relative */
1661 0, /* bitpos */
1662 complain_overflow_dont,/* complain_on_overflow */
1663 bfd_elf_generic_reloc, /* special_function */
1664 "R_ARM_RBASE", /* name */
1665 FALSE, /* partial_inplace */
1666 0, /* src_mask */
1667 0, /* dst_mask */
1668 FALSE) /* pcrel_offset */
1669 };
1670
1671 static reloc_howto_type *
1672 elf32_arm_howto_from_type (unsigned int r_type)
1673 {
1674 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1675 return &elf32_arm_howto_table_1[r_type];
1676
1677 if (r_type >= R_ARM_RREL32
1678 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_2))
1679 return &elf32_arm_howto_table_2[r_type - R_ARM_RREL32];
1680
1681 return NULL;
1682 }
1683
1684 static void
1685 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1686 Elf_Internal_Rela * elf_reloc)
1687 {
1688 unsigned int r_type;
1689
1690 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1691 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1692 }
1693
1694 struct elf32_arm_reloc_map
1695 {
1696 bfd_reloc_code_real_type bfd_reloc_val;
1697 unsigned char elf_reloc_val;
1698 };
1699
1700 /* All entries in this list must also be present in elf32_arm_howto_table. */
1701 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1702 {
1703 {BFD_RELOC_NONE, R_ARM_NONE},
1704 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1705 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1706 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1707 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1708 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1709 {BFD_RELOC_32, R_ARM_ABS32},
1710 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1711 {BFD_RELOC_8, R_ARM_ABS8},
1712 {BFD_RELOC_16, R_ARM_ABS16},
1713 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1714 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1715 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1716 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1717 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1718 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1719 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1720 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1721 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1722 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1723 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1724 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1725 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1726 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1727 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1728 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1729 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1730 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1731 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1732 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1733 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1734 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1735 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1736 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1737 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1738 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1739 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1740 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1741 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1742 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1743 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1744 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1745 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1746 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1747 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1748 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1749 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1750 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1751 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1752 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1753 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1754 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1755 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1756 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1757 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1758 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1759 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1760 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1761 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1762 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1763 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1764 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1765 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1766 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1767 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1768 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1769 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1770 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1771 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1772 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1773 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1774 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1775 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1776 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1777 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1778 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1779 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1780 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX}
1781 };
1782
1783 static reloc_howto_type *
1784 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1785 bfd_reloc_code_real_type code)
1786 {
1787 unsigned int i;
1788
1789 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1790 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1791 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1792
1793 return NULL;
1794 }
1795
1796 static reloc_howto_type *
1797 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1798 const char *r_name)
1799 {
1800 unsigned int i;
1801
1802 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1803 if (elf32_arm_howto_table_1[i].name != NULL
1804 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1805 return &elf32_arm_howto_table_1[i];
1806
1807 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1808 if (elf32_arm_howto_table_2[i].name != NULL
1809 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1810 return &elf32_arm_howto_table_2[i];
1811
1812 return NULL;
1813 }
1814
1815 /* Support for core dump NOTE sections. */
1816
1817 static bfd_boolean
1818 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1819 {
1820 int offset;
1821 size_t size;
1822
1823 switch (note->descsz)
1824 {
1825 default:
1826 return FALSE;
1827
1828 case 148: /* Linux/ARM 32-bit. */
1829 /* pr_cursig */
1830 elf_tdata (abfd)->core_signal = bfd_get_16 (abfd, note->descdata + 12);
1831
1832 /* pr_pid */
1833 elf_tdata (abfd)->core_pid = bfd_get_32 (abfd, note->descdata + 24);
1834
1835 /* pr_reg */
1836 offset = 72;
1837 size = 72;
1838
1839 break;
1840 }
1841
1842 /* Make a ".reg/999" section. */
1843 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1844 size, note->descpos + offset);
1845 }
1846
1847 static bfd_boolean
1848 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
1849 {
1850 switch (note->descsz)
1851 {
1852 default:
1853 return FALSE;
1854
1855 case 124: /* Linux/ARM elf_prpsinfo. */
1856 elf_tdata (abfd)->core_program
1857 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
1858 elf_tdata (abfd)->core_command
1859 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
1860 }
1861
1862 /* Note that for some reason, a spurious space is tacked
1863 onto the end of the args in some (at least one anyway)
1864 implementations, so strip it off if it exists. */
1865 {
1866 char *command = elf_tdata (abfd)->core_command;
1867 int n = strlen (command);
1868
1869 if (0 < n && command[n - 1] == ' ')
1870 command[n - 1] = '\0';
1871 }
1872
1873 return TRUE;
1874 }
1875
1876 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vec
1877 #define TARGET_LITTLE_NAME "elf32-littlearm"
1878 #define TARGET_BIG_SYM bfd_elf32_bigarm_vec
1879 #define TARGET_BIG_NAME "elf32-bigarm"
1880
1881 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
1882 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
1883
1884 typedef unsigned long int insn32;
1885 typedef unsigned short int insn16;
1886
1887 /* In lieu of proper flags, assume all EABIv4 or later objects are
1888 interworkable. */
1889 #define INTERWORK_FLAG(abfd) \
1890 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
1891 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
1892 || ((abfd)->flags & BFD_LINKER_CREATED))
1893
1894 /* The linker script knows the section names for placement.
1895 The entry_names are used to do simple name mangling on the stubs.
1896 Given a function name, and its type, the stub can be found. The
1897 name can be changed. The only requirement is the %s be present. */
1898 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
1899 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
1900
1901 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
1902 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
1903
1904 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
1905 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
1906
1907 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
1908 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
1909
1910 #define STUB_ENTRY_NAME "__%s_veneer"
1911
1912 /* The name of the dynamic interpreter. This is put in the .interp
1913 section. */
1914 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
1915
1916 #ifdef FOUR_WORD_PLT
1917
1918 /* The first entry in a procedure linkage table looks like
1919 this. It is set up so that any shared library function that is
1920 called before the relocation has been set up calls the dynamic
1921 linker first. */
1922 static const bfd_vma elf32_arm_plt0_entry [] =
1923 {
1924 0xe52de004, /* str lr, [sp, #-4]! */
1925 0xe59fe010, /* ldr lr, [pc, #16] */
1926 0xe08fe00e, /* add lr, pc, lr */
1927 0xe5bef008, /* ldr pc, [lr, #8]! */
1928 };
1929
1930 /* Subsequent entries in a procedure linkage table look like
1931 this. */
1932 static const bfd_vma elf32_arm_plt_entry [] =
1933 {
1934 0xe28fc600, /* add ip, pc, #NN */
1935 0xe28cca00, /* add ip, ip, #NN */
1936 0xe5bcf000, /* ldr pc, [ip, #NN]! */
1937 0x00000000, /* unused */
1938 };
1939
1940 #else
1941
1942 /* The first entry in a procedure linkage table looks like
1943 this. It is set up so that any shared library function that is
1944 called before the relocation has been set up calls the dynamic
1945 linker first. */
1946 static const bfd_vma elf32_arm_plt0_entry [] =
1947 {
1948 0xe52de004, /* str lr, [sp, #-4]! */
1949 0xe59fe004, /* ldr lr, [pc, #4] */
1950 0xe08fe00e, /* add lr, pc, lr */
1951 0xe5bef008, /* ldr pc, [lr, #8]! */
1952 0x00000000, /* &GOT[0] - . */
1953 };
1954
1955 /* Subsequent entries in a procedure linkage table look like
1956 this. */
1957 static const bfd_vma elf32_arm_plt_entry [] =
1958 {
1959 0xe28fc600, /* add ip, pc, #0xNN00000 */
1960 0xe28cca00, /* add ip, ip, #0xNN000 */
1961 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
1962 };
1963
1964 #endif
1965
1966 /* The format of the first entry in the procedure linkage table
1967 for a VxWorks executable. */
1968 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
1969 {
1970 0xe52dc008, /* str ip,[sp,#-8]! */
1971 0xe59fc000, /* ldr ip,[pc] */
1972 0xe59cf008, /* ldr pc,[ip,#8] */
1973 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
1974 };
1975
1976 /* The format of subsequent entries in a VxWorks executable. */
1977 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
1978 {
1979 0xe59fc000, /* ldr ip,[pc] */
1980 0xe59cf000, /* ldr pc,[ip] */
1981 0x00000000, /* .long @got */
1982 0xe59fc000, /* ldr ip,[pc] */
1983 0xea000000, /* b _PLT */
1984 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1985 };
1986
1987 /* The format of entries in a VxWorks shared library. */
1988 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
1989 {
1990 0xe59fc000, /* ldr ip,[pc] */
1991 0xe79cf009, /* ldr pc,[ip,r9] */
1992 0x00000000, /* .long @got */
1993 0xe59fc000, /* ldr ip,[pc] */
1994 0xe599f008, /* ldr pc,[r9,#8] */
1995 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1996 };
1997
1998 /* An initial stub used if the PLT entry is referenced from Thumb code. */
1999 #define PLT_THUMB_STUB_SIZE 4
2000 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2001 {
2002 0x4778, /* bx pc */
2003 0x46c0 /* nop */
2004 };
2005
2006 /* The entries in a PLT when using a DLL-based target with multiple
2007 address spaces. */
2008 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2009 {
2010 0xe51ff004, /* ldr pc, [pc, #-4] */
2011 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2012 };
2013
2014 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2015 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2016 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2017 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2018 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2019 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2020
2021 enum stub_insn_type
2022 {
2023 THUMB16_TYPE = 1,
2024 THUMB32_TYPE,
2025 ARM_TYPE,
2026 DATA_TYPE
2027 };
2028
2029 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2030 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2031 is inserted in arm_build_one_stub(). */
2032 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2033 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2034 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2035 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2036 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2037 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2038
2039 typedef struct
2040 {
2041 bfd_vma data;
2042 enum stub_insn_type type;
2043 unsigned int r_type;
2044 int reloc_addend;
2045 } insn_sequence;
2046
2047 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2048 to reach the stub if necessary. */
2049 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2050 {
2051 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2052 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2053 };
2054
2055 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2056 available. */
2057 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2058 {
2059 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2060 ARM_INSN(0xe12fff1c), /* bx ip */
2061 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2062 };
2063
2064 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2065 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2066 {
2067 THUMB16_INSN(0xb401), /* push {r0} */
2068 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2069 THUMB16_INSN(0x4684), /* mov ip, r0 */
2070 THUMB16_INSN(0xbc01), /* pop {r0} */
2071 THUMB16_INSN(0x4760), /* bx ip */
2072 THUMB16_INSN(0xbf00), /* nop */
2073 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2074 };
2075
2076 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2077 allowed. */
2078 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2079 {
2080 THUMB16_INSN(0x4778), /* bx pc */
2081 THUMB16_INSN(0x46c0), /* nop */
2082 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2083 ARM_INSN(0xe12fff1c), /* bx ip */
2084 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2085 };
2086
2087 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2088 available. */
2089 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2090 {
2091 THUMB16_INSN(0x4778), /* bx pc */
2092 THUMB16_INSN(0x46c0), /* nop */
2093 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2094 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2095 };
2096
2097 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2098 one, when the destination is close enough. */
2099 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2100 {
2101 THUMB16_INSN(0x4778), /* bx pc */
2102 THUMB16_INSN(0x46c0), /* nop */
2103 ARM_REL_INSN(0xea000000, -8), /* b (X-8) */
2104 };
2105
2106 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2107 blx to reach the stub if necessary. */
2108 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2109 {
2110 ARM_INSN(0xe59fc000), /* ldr r12, [pc] */
2111 ARM_INSN(0xe08ff00c), /* add pc, pc, ip */
2112 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2113 };
2114
2115 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2116 blx to reach the stub if necessary. We can not add into pc;
2117 it is not guaranteed to mode switch (different in ARMv6 and
2118 ARMv7). */
2119 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2120 {
2121 ARM_INSN(0xe59fc004), /* ldr r12, [pc, #4] */
2122 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2123 ARM_INSN(0xe12fff1c), /* bx ip */
2124 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2125 };
2126
2127 /* V4T ARM -> ARM long branch stub, PIC. */
2128 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2129 {
2130 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2131 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2132 ARM_INSN(0xe12fff1c), /* bx ip */
2133 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2134 };
2135
2136 /* V4T Thumb -> ARM long branch stub, PIC. */
2137 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2138 {
2139 THUMB16_INSN(0x4778), /* bx pc */
2140 THUMB16_INSN(0x46c0), /* nop */
2141 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2142 ARM_INSN(0xe08cf00f), /* add pc, ip, pc */
2143 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2144 };
2145
2146 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2147 architectures. */
2148 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2149 {
2150 THUMB16_INSN(0xb401), /* push {r0} */
2151 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2152 THUMB16_INSN(0x46fc), /* mov ip, pc */
2153 THUMB16_INSN(0x4484), /* add ip, r0 */
2154 THUMB16_INSN(0xbc01), /* pop {r0} */
2155 THUMB16_INSN(0x4760), /* bx ip */
2156 DATA_WORD(0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2157 };
2158
2159 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2160 allowed. */
2161 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2162 {
2163 THUMB16_INSN(0x4778), /* bx pc */
2164 THUMB16_INSN(0x46c0), /* nop */
2165 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2166 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2167 ARM_INSN(0xe12fff1c), /* bx ip */
2168 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2169 };
2170
2171 /* Cortex-A8 erratum-workaround stubs. */
2172
2173 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2174 can't use a conditional branch to reach this stub). */
2175
2176 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2177 {
2178 THUMB16_BCOND_INSN(0xd001), /* b<cond>.n true. */
2179 THUMB32_B_INSN(0xf000b800, -4), /* b.w insn_after_original_branch. */
2180 THUMB32_B_INSN(0xf000b800, -4) /* true: b.w original_branch_dest. */
2181 };
2182
2183 /* Stub used for b.w and bl.w instructions. */
2184
2185 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2186 {
2187 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2188 };
2189
2190 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2191 {
2192 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2193 };
2194
2195 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2196 instruction (which switches to ARM mode) to point to this stub. Jump to the
2197 real destination using an ARM-mode branch. */
2198
2199 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2200 {
2201 ARM_REL_INSN(0xea000000, -8) /* b original_branch_dest. */
2202 };
2203
2204 /* Section name for stubs is the associated section name plus this
2205 string. */
2206 #define STUB_SUFFIX ".stub"
2207
2208 /* One entry per long/short branch stub defined above. */
2209 #define DEF_STUBS \
2210 DEF_STUB(long_branch_any_any) \
2211 DEF_STUB(long_branch_v4t_arm_thumb) \
2212 DEF_STUB(long_branch_thumb_only) \
2213 DEF_STUB(long_branch_v4t_thumb_thumb) \
2214 DEF_STUB(long_branch_v4t_thumb_arm) \
2215 DEF_STUB(short_branch_v4t_thumb_arm) \
2216 DEF_STUB(long_branch_any_arm_pic) \
2217 DEF_STUB(long_branch_any_thumb_pic) \
2218 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2219 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2220 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2221 DEF_STUB(long_branch_thumb_only_pic) \
2222 DEF_STUB(a8_veneer_b_cond) \
2223 DEF_STUB(a8_veneer_b) \
2224 DEF_STUB(a8_veneer_bl) \
2225 DEF_STUB(a8_veneer_blx)
2226
2227 #define DEF_STUB(x) arm_stub_##x,
2228 enum elf32_arm_stub_type {
2229 arm_stub_none,
2230 DEF_STUBS
2231 };
2232 #undef DEF_STUB
2233
2234 typedef struct
2235 {
2236 const insn_sequence* template;
2237 int template_size;
2238 } stub_def;
2239
2240 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2241 static const stub_def stub_definitions[] = {
2242 {NULL, 0},
2243 DEF_STUBS
2244 };
2245
2246 struct elf32_arm_stub_hash_entry
2247 {
2248 /* Base hash table entry structure. */
2249 struct bfd_hash_entry root;
2250
2251 /* The stub section. */
2252 asection *stub_sec;
2253
2254 /* Offset within stub_sec of the beginning of this stub. */
2255 bfd_vma stub_offset;
2256
2257 /* Given the symbol's value and its section we can determine its final
2258 value when building the stubs (so the stub knows where to jump). */
2259 bfd_vma target_value;
2260 asection *target_section;
2261
2262 /* Offset to apply to relocation referencing target_value. */
2263 bfd_vma target_addend;
2264
2265 /* The instruction which caused this stub to be generated (only valid for
2266 Cortex-A8 erratum workaround stubs at present). */
2267 unsigned long orig_insn;
2268
2269 /* The stub type. */
2270 enum elf32_arm_stub_type stub_type;
2271 /* Its encoding size in bytes. */
2272 int stub_size;
2273 /* Its template. */
2274 const insn_sequence *stub_template;
2275 /* The size of the template (number of entries). */
2276 int stub_template_size;
2277
2278 /* The symbol table entry, if any, that this was derived from. */
2279 struct elf32_arm_link_hash_entry *h;
2280
2281 /* Destination symbol type (STT_ARM_TFUNC, ...) */
2282 unsigned char st_type;
2283
2284 /* Where this stub is being called from, or, in the case of combined
2285 stub sections, the first input section in the group. */
2286 asection *id_sec;
2287
2288 /* The name for the local symbol at the start of this stub. The
2289 stub name in the hash table has to be unique; this does not, so
2290 it can be friendlier. */
2291 char *output_name;
2292 };
2293
2294 /* Used to build a map of a section. This is required for mixed-endian
2295 code/data. */
2296
2297 typedef struct elf32_elf_section_map
2298 {
2299 bfd_vma vma;
2300 char type;
2301 }
2302 elf32_arm_section_map;
2303
2304 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2305
2306 typedef enum
2307 {
2308 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2309 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2310 VFP11_ERRATUM_ARM_VENEER,
2311 VFP11_ERRATUM_THUMB_VENEER
2312 }
2313 elf32_vfp11_erratum_type;
2314
2315 typedef struct elf32_vfp11_erratum_list
2316 {
2317 struct elf32_vfp11_erratum_list *next;
2318 bfd_vma vma;
2319 union
2320 {
2321 struct
2322 {
2323 struct elf32_vfp11_erratum_list *veneer;
2324 unsigned int vfp_insn;
2325 } b;
2326 struct
2327 {
2328 struct elf32_vfp11_erratum_list *branch;
2329 unsigned int id;
2330 } v;
2331 } u;
2332 elf32_vfp11_erratum_type type;
2333 }
2334 elf32_vfp11_erratum_list;
2335
2336 typedef enum
2337 {
2338 DELETE_EXIDX_ENTRY,
2339 INSERT_EXIDX_CANTUNWIND_AT_END
2340 }
2341 arm_unwind_edit_type;
2342
2343 /* A (sorted) list of edits to apply to an unwind table. */
2344 typedef struct arm_unwind_table_edit
2345 {
2346 arm_unwind_edit_type type;
2347 /* Note: we sometimes want to insert an unwind entry corresponding to a
2348 section different from the one we're currently writing out, so record the
2349 (text) section this edit relates to here. */
2350 asection *linked_section;
2351 unsigned int index;
2352 struct arm_unwind_table_edit *next;
2353 }
2354 arm_unwind_table_edit;
2355
2356 typedef struct _arm_elf_section_data
2357 {
2358 /* Information about mapping symbols. */
2359 struct bfd_elf_section_data elf;
2360 unsigned int mapcount;
2361 unsigned int mapsize;
2362 elf32_arm_section_map *map;
2363 /* Information about CPU errata. */
2364 unsigned int erratumcount;
2365 elf32_vfp11_erratum_list *erratumlist;
2366 /* Information about unwind tables. */
2367 union
2368 {
2369 /* Unwind info attached to a text section. */
2370 struct
2371 {
2372 asection *arm_exidx_sec;
2373 } text;
2374
2375 /* Unwind info attached to an .ARM.exidx section. */
2376 struct
2377 {
2378 arm_unwind_table_edit *unwind_edit_list;
2379 arm_unwind_table_edit *unwind_edit_tail;
2380 } exidx;
2381 } u;
2382 }
2383 _arm_elf_section_data;
2384
2385 #define elf32_arm_section_data(sec) \
2386 ((_arm_elf_section_data *) elf_section_data (sec))
2387
2388 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2389 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2390 so may be created multiple times: we use an array of these entries whilst
2391 relaxing which we can refresh easily, then create stubs for each potentially
2392 erratum-triggering instruction once we've settled on a solution. */
2393
2394 struct a8_erratum_fix {
2395 bfd *input_bfd;
2396 asection *section;
2397 bfd_vma offset;
2398 bfd_vma addend;
2399 unsigned long orig_insn;
2400 char *stub_name;
2401 enum elf32_arm_stub_type stub_type;
2402 };
2403
2404 /* A table of relocs applied to branches which might trigger Cortex-A8
2405 erratum. */
2406
2407 struct a8_erratum_reloc {
2408 bfd_vma from;
2409 bfd_vma destination;
2410 unsigned int r_type;
2411 unsigned char st_type;
2412 const char *sym_name;
2413 bfd_boolean non_a8_stub;
2414 };
2415
2416 /* The size of the thread control block. */
2417 #define TCB_SIZE 8
2418
2419 struct elf_arm_obj_tdata
2420 {
2421 struct elf_obj_tdata root;
2422
2423 /* tls_type for each local got entry. */
2424 char *local_got_tls_type;
2425
2426 /* Zero to warn when linking objects with incompatible enum sizes. */
2427 int no_enum_size_warning;
2428
2429 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2430 int no_wchar_size_warning;
2431 };
2432
2433 #define elf_arm_tdata(bfd) \
2434 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2435
2436 #define elf32_arm_local_got_tls_type(bfd) \
2437 (elf_arm_tdata (bfd)->local_got_tls_type)
2438
2439 #define is_arm_elf(bfd) \
2440 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2441 && elf_tdata (bfd) != NULL \
2442 && elf_object_id (bfd) == ARM_ELF_TDATA)
2443
2444 static bfd_boolean
2445 elf32_arm_mkobject (bfd *abfd)
2446 {
2447 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2448 ARM_ELF_TDATA);
2449 }
2450
2451 /* The ARM linker needs to keep track of the number of relocs that it
2452 decides to copy in check_relocs for each symbol. This is so that
2453 it can discard PC relative relocs if it doesn't need them when
2454 linking with -Bsymbolic. We store the information in a field
2455 extending the regular ELF linker hash table. */
2456
2457 /* This structure keeps track of the number of relocs we have copied
2458 for a given symbol. */
2459 struct elf32_arm_relocs_copied
2460 {
2461 /* Next section. */
2462 struct elf32_arm_relocs_copied * next;
2463 /* A section in dynobj. */
2464 asection * section;
2465 /* Number of relocs copied in this section. */
2466 bfd_size_type count;
2467 /* Number of PC-relative relocs copied in this section. */
2468 bfd_size_type pc_count;
2469 };
2470
2471 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2472
2473 /* Arm ELF linker hash entry. */
2474 struct elf32_arm_link_hash_entry
2475 {
2476 struct elf_link_hash_entry root;
2477
2478 /* Number of PC relative relocs copied for this symbol. */
2479 struct elf32_arm_relocs_copied * relocs_copied;
2480
2481 /* We reference count Thumb references to a PLT entry separately,
2482 so that we can emit the Thumb trampoline only if needed. */
2483 bfd_signed_vma plt_thumb_refcount;
2484
2485 /* Some references from Thumb code may be eliminated by BL->BLX
2486 conversion, so record them separately. */
2487 bfd_signed_vma plt_maybe_thumb_refcount;
2488
2489 /* Since PLT entries have variable size if the Thumb prologue is
2490 used, we need to record the index into .got.plt instead of
2491 recomputing it from the PLT offset. */
2492 bfd_signed_vma plt_got_offset;
2493
2494 #define GOT_UNKNOWN 0
2495 #define GOT_NORMAL 1
2496 #define GOT_TLS_GD 2
2497 #define GOT_TLS_IE 4
2498 unsigned char tls_type;
2499
2500 /* The symbol marking the real symbol location for exported thumb
2501 symbols with Arm stubs. */
2502 struct elf_link_hash_entry *export_glue;
2503
2504 /* A pointer to the most recently used stub hash entry against this
2505 symbol. */
2506 struct elf32_arm_stub_hash_entry *stub_cache;
2507 };
2508
2509 /* Traverse an arm ELF linker hash table. */
2510 #define elf32_arm_link_hash_traverse(table, func, info) \
2511 (elf_link_hash_traverse \
2512 (&(table)->root, \
2513 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2514 (info)))
2515
2516 /* Get the ARM elf linker hash table from a link_info structure. */
2517 #define elf32_arm_hash_table(info) \
2518 ((struct elf32_arm_link_hash_table *) ((info)->hash))
2519
2520 #define arm_stub_hash_lookup(table, string, create, copy) \
2521 ((struct elf32_arm_stub_hash_entry *) \
2522 bfd_hash_lookup ((table), (string), (create), (copy)))
2523
2524 /* ARM ELF linker hash table. */
2525 struct elf32_arm_link_hash_table
2526 {
2527 /* The main hash table. */
2528 struct elf_link_hash_table root;
2529
2530 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
2531 bfd_size_type thumb_glue_size;
2532
2533 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
2534 bfd_size_type arm_glue_size;
2535
2536 /* The size in bytes of section containing the ARMv4 BX veneers. */
2537 bfd_size_type bx_glue_size;
2538
2539 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
2540 veneer has been populated. */
2541 bfd_vma bx_glue_offset[15];
2542
2543 /* The size in bytes of the section containing glue for VFP11 erratum
2544 veneers. */
2545 bfd_size_type vfp11_erratum_glue_size;
2546
2547 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
2548 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
2549 elf32_arm_write_section(). */
2550 struct a8_erratum_fix *a8_erratum_fixes;
2551 unsigned int num_a8_erratum_fixes;
2552
2553 /* An arbitrary input BFD chosen to hold the glue sections. */
2554 bfd * bfd_of_glue_owner;
2555
2556 /* Nonzero to output a BE8 image. */
2557 int byteswap_code;
2558
2559 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
2560 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
2561 int target1_is_rel;
2562
2563 /* The relocation to use for R_ARM_TARGET2 relocations. */
2564 int target2_reloc;
2565
2566 /* 0 = Ignore R_ARM_V4BX.
2567 1 = Convert BX to MOV PC.
2568 2 = Generate v4 interworing stubs. */
2569 int fix_v4bx;
2570
2571 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
2572 int fix_cortex_a8;
2573
2574 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
2575 int use_blx;
2576
2577 /* What sort of code sequences we should look for which may trigger the
2578 VFP11 denorm erratum. */
2579 bfd_arm_vfp11_fix vfp11_fix;
2580
2581 /* Global counter for the number of fixes we have emitted. */
2582 int num_vfp11_fixes;
2583
2584 /* Nonzero to force PIC branch veneers. */
2585 int pic_veneer;
2586
2587 /* The number of bytes in the initial entry in the PLT. */
2588 bfd_size_type plt_header_size;
2589
2590 /* The number of bytes in the subsequent PLT etries. */
2591 bfd_size_type plt_entry_size;
2592
2593 /* True if the target system is VxWorks. */
2594 int vxworks_p;
2595
2596 /* True if the target system is Symbian OS. */
2597 int symbian_p;
2598
2599 /* True if the target uses REL relocations. */
2600 int use_rel;
2601
2602 /* Short-cuts to get to dynamic linker sections. */
2603 asection *sgot;
2604 asection *sgotplt;
2605 asection *srelgot;
2606 asection *splt;
2607 asection *srelplt;
2608 asection *sdynbss;
2609 asection *srelbss;
2610
2611 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
2612 asection *srelplt2;
2613
2614 /* Data for R_ARM_TLS_LDM32 relocations. */
2615 union
2616 {
2617 bfd_signed_vma refcount;
2618 bfd_vma offset;
2619 } tls_ldm_got;
2620
2621 /* Small local sym cache. */
2622 struct sym_cache sym_cache;
2623
2624 /* For convenience in allocate_dynrelocs. */
2625 bfd * obfd;
2626
2627 /* The stub hash table. */
2628 struct bfd_hash_table stub_hash_table;
2629
2630 /* Linker stub bfd. */
2631 bfd *stub_bfd;
2632
2633 /* Linker call-backs. */
2634 asection * (*add_stub_section) (const char *, asection *);
2635 void (*layout_sections_again) (void);
2636
2637 /* Array to keep track of which stub sections have been created, and
2638 information on stub grouping. */
2639 struct map_stub
2640 {
2641 /* This is the section to which stubs in the group will be
2642 attached. */
2643 asection *link_sec;
2644 /* The stub section. */
2645 asection *stub_sec;
2646 } *stub_group;
2647
2648 /* Assorted information used by elf32_arm_size_stubs. */
2649 unsigned int bfd_count;
2650 int top_index;
2651 asection **input_list;
2652 };
2653
2654 /* Create an entry in an ARM ELF linker hash table. */
2655
2656 static struct bfd_hash_entry *
2657 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
2658 struct bfd_hash_table * table,
2659 const char * string)
2660 {
2661 struct elf32_arm_link_hash_entry * ret =
2662 (struct elf32_arm_link_hash_entry *) entry;
2663
2664 /* Allocate the structure if it has not already been allocated by a
2665 subclass. */
2666 if (ret == NULL)
2667 ret = bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
2668 if (ret == NULL)
2669 return (struct bfd_hash_entry *) ret;
2670
2671 /* Call the allocation method of the superclass. */
2672 ret = ((struct elf32_arm_link_hash_entry *)
2673 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2674 table, string));
2675 if (ret != NULL)
2676 {
2677 ret->relocs_copied = NULL;
2678 ret->tls_type = GOT_UNKNOWN;
2679 ret->plt_thumb_refcount = 0;
2680 ret->plt_maybe_thumb_refcount = 0;
2681 ret->plt_got_offset = -1;
2682 ret->export_glue = NULL;
2683
2684 ret->stub_cache = NULL;
2685 }
2686
2687 return (struct bfd_hash_entry *) ret;
2688 }
2689
2690 /* Initialize an entry in the stub hash table. */
2691
2692 static struct bfd_hash_entry *
2693 stub_hash_newfunc (struct bfd_hash_entry *entry,
2694 struct bfd_hash_table *table,
2695 const char *string)
2696 {
2697 /* Allocate the structure if it has not already been allocated by a
2698 subclass. */
2699 if (entry == NULL)
2700 {
2701 entry = bfd_hash_allocate (table,
2702 sizeof (struct elf32_arm_stub_hash_entry));
2703 if (entry == NULL)
2704 return entry;
2705 }
2706
2707 /* Call the allocation method of the superclass. */
2708 entry = bfd_hash_newfunc (entry, table, string);
2709 if (entry != NULL)
2710 {
2711 struct elf32_arm_stub_hash_entry *eh;
2712
2713 /* Initialize the local fields. */
2714 eh = (struct elf32_arm_stub_hash_entry *) entry;
2715 eh->stub_sec = NULL;
2716 eh->stub_offset = 0;
2717 eh->target_value = 0;
2718 eh->target_section = NULL;
2719 eh->stub_type = arm_stub_none;
2720 eh->stub_size = 0;
2721 eh->stub_template = NULL;
2722 eh->stub_template_size = 0;
2723 eh->h = NULL;
2724 eh->id_sec = NULL;
2725 }
2726
2727 return entry;
2728 }
2729
2730 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
2731 shortcuts to them in our hash table. */
2732
2733 static bfd_boolean
2734 create_got_section (bfd *dynobj, struct bfd_link_info *info)
2735 {
2736 struct elf32_arm_link_hash_table *htab;
2737
2738 htab = elf32_arm_hash_table (info);
2739 /* BPABI objects never have a GOT, or associated sections. */
2740 if (htab->symbian_p)
2741 return TRUE;
2742
2743 if (! _bfd_elf_create_got_section (dynobj, info))
2744 return FALSE;
2745
2746 htab->sgot = bfd_get_section_by_name (dynobj, ".got");
2747 htab->sgotplt = bfd_get_section_by_name (dynobj, ".got.plt");
2748 if (!htab->sgot || !htab->sgotplt)
2749 abort ();
2750
2751 htab->srelgot = bfd_get_section_by_name (dynobj,
2752 RELOC_SECTION (htab, ".got"));
2753 if (htab->srelgot == NULL)
2754 return FALSE;
2755 return TRUE;
2756 }
2757
2758 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
2759 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
2760 hash table. */
2761
2762 static bfd_boolean
2763 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
2764 {
2765 struct elf32_arm_link_hash_table *htab;
2766
2767 htab = elf32_arm_hash_table (info);
2768 if (!htab->sgot && !create_got_section (dynobj, info))
2769 return FALSE;
2770
2771 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
2772 return FALSE;
2773
2774 htab->splt = bfd_get_section_by_name (dynobj, ".plt");
2775 htab->srelplt = bfd_get_section_by_name (dynobj,
2776 RELOC_SECTION (htab, ".plt"));
2777 htab->sdynbss = bfd_get_section_by_name (dynobj, ".dynbss");
2778 if (!info->shared)
2779 htab->srelbss = bfd_get_section_by_name (dynobj,
2780 RELOC_SECTION (htab, ".bss"));
2781
2782 if (htab->vxworks_p)
2783 {
2784 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
2785 return FALSE;
2786
2787 if (info->shared)
2788 {
2789 htab->plt_header_size = 0;
2790 htab->plt_entry_size
2791 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
2792 }
2793 else
2794 {
2795 htab->plt_header_size
2796 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
2797 htab->plt_entry_size
2798 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
2799 }
2800 }
2801
2802 if (!htab->splt
2803 || !htab->srelplt
2804 || !htab->sdynbss
2805 || (!info->shared && !htab->srelbss))
2806 abort ();
2807
2808 return TRUE;
2809 }
2810
2811 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2812
2813 static void
2814 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
2815 struct elf_link_hash_entry *dir,
2816 struct elf_link_hash_entry *ind)
2817 {
2818 struct elf32_arm_link_hash_entry *edir, *eind;
2819
2820 edir = (struct elf32_arm_link_hash_entry *) dir;
2821 eind = (struct elf32_arm_link_hash_entry *) ind;
2822
2823 if (eind->relocs_copied != NULL)
2824 {
2825 if (edir->relocs_copied != NULL)
2826 {
2827 struct elf32_arm_relocs_copied **pp;
2828 struct elf32_arm_relocs_copied *p;
2829
2830 /* Add reloc counts against the indirect sym to the direct sym
2831 list. Merge any entries against the same section. */
2832 for (pp = &eind->relocs_copied; (p = *pp) != NULL; )
2833 {
2834 struct elf32_arm_relocs_copied *q;
2835
2836 for (q = edir->relocs_copied; q != NULL; q = q->next)
2837 if (q->section == p->section)
2838 {
2839 q->pc_count += p->pc_count;
2840 q->count += p->count;
2841 *pp = p->next;
2842 break;
2843 }
2844 if (q == NULL)
2845 pp = &p->next;
2846 }
2847 *pp = edir->relocs_copied;
2848 }
2849
2850 edir->relocs_copied = eind->relocs_copied;
2851 eind->relocs_copied = NULL;
2852 }
2853
2854 if (ind->root.type == bfd_link_hash_indirect)
2855 {
2856 /* Copy over PLT info. */
2857 edir->plt_thumb_refcount += eind->plt_thumb_refcount;
2858 eind->plt_thumb_refcount = 0;
2859 edir->plt_maybe_thumb_refcount += eind->plt_maybe_thumb_refcount;
2860 eind->plt_maybe_thumb_refcount = 0;
2861
2862 if (dir->got.refcount <= 0)
2863 {
2864 edir->tls_type = eind->tls_type;
2865 eind->tls_type = GOT_UNKNOWN;
2866 }
2867 }
2868
2869 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2870 }
2871
2872 /* Create an ARM elf linker hash table. */
2873
2874 static struct bfd_link_hash_table *
2875 elf32_arm_link_hash_table_create (bfd *abfd)
2876 {
2877 struct elf32_arm_link_hash_table *ret;
2878 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
2879
2880 ret = bfd_malloc (amt);
2881 if (ret == NULL)
2882 return NULL;
2883
2884 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
2885 elf32_arm_link_hash_newfunc,
2886 sizeof (struct elf32_arm_link_hash_entry)))
2887 {
2888 free (ret);
2889 return NULL;
2890 }
2891
2892 ret->sgot = NULL;
2893 ret->sgotplt = NULL;
2894 ret->srelgot = NULL;
2895 ret->splt = NULL;
2896 ret->srelplt = NULL;
2897 ret->sdynbss = NULL;
2898 ret->srelbss = NULL;
2899 ret->srelplt2 = NULL;
2900 ret->thumb_glue_size = 0;
2901 ret->arm_glue_size = 0;
2902 ret->bx_glue_size = 0;
2903 memset (ret->bx_glue_offset, 0, sizeof (ret->bx_glue_offset));
2904 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
2905 ret->vfp11_erratum_glue_size = 0;
2906 ret->num_vfp11_fixes = 0;
2907 ret->fix_cortex_a8 = 0;
2908 ret->bfd_of_glue_owner = NULL;
2909 ret->byteswap_code = 0;
2910 ret->target1_is_rel = 0;
2911 ret->target2_reloc = R_ARM_NONE;
2912 #ifdef FOUR_WORD_PLT
2913 ret->plt_header_size = 16;
2914 ret->plt_entry_size = 16;
2915 #else
2916 ret->plt_header_size = 20;
2917 ret->plt_entry_size = 12;
2918 #endif
2919 ret->fix_v4bx = 0;
2920 ret->use_blx = 0;
2921 ret->vxworks_p = 0;
2922 ret->symbian_p = 0;
2923 ret->use_rel = 1;
2924 ret->sym_cache.abfd = NULL;
2925 ret->obfd = abfd;
2926 ret->tls_ldm_got.refcount = 0;
2927 ret->stub_bfd = NULL;
2928 ret->add_stub_section = NULL;
2929 ret->layout_sections_again = NULL;
2930 ret->stub_group = NULL;
2931 ret->bfd_count = 0;
2932 ret->top_index = 0;
2933 ret->input_list = NULL;
2934
2935 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2936 sizeof (struct elf32_arm_stub_hash_entry)))
2937 {
2938 free (ret);
2939 return NULL;
2940 }
2941
2942 return &ret->root.root;
2943 }
2944
2945 /* Free the derived linker hash table. */
2946
2947 static void
2948 elf32_arm_hash_table_free (struct bfd_link_hash_table *hash)
2949 {
2950 struct elf32_arm_link_hash_table *ret
2951 = (struct elf32_arm_link_hash_table *) hash;
2952
2953 bfd_hash_table_free (&ret->stub_hash_table);
2954 _bfd_generic_link_hash_table_free (hash);
2955 }
2956
2957 /* Determine if we're dealing with a Thumb only architecture. */
2958
2959 static bfd_boolean
2960 using_thumb_only (struct elf32_arm_link_hash_table *globals)
2961 {
2962 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2963 Tag_CPU_arch);
2964 int profile;
2965
2966 if (arch != TAG_CPU_ARCH_V7)
2967 return FALSE;
2968
2969 profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2970 Tag_CPU_arch_profile);
2971
2972 return profile == 'M';
2973 }
2974
2975 /* Determine if we're dealing with a Thumb-2 object. */
2976
2977 static bfd_boolean
2978 using_thumb2 (struct elf32_arm_link_hash_table *globals)
2979 {
2980 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2981 Tag_CPU_arch);
2982 return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7;
2983 }
2984
2985 static bfd_boolean
2986 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
2987 {
2988 switch (stub_type)
2989 {
2990 case arm_stub_long_branch_thumb_only:
2991 case arm_stub_long_branch_v4t_thumb_arm:
2992 case arm_stub_short_branch_v4t_thumb_arm:
2993 case arm_stub_long_branch_v4t_thumb_arm_pic:
2994 case arm_stub_long_branch_thumb_only_pic:
2995 return TRUE;
2996 case arm_stub_none:
2997 BFD_FAIL ();
2998 return FALSE;
2999 break;
3000 default:
3001 return FALSE;
3002 }
3003 }
3004
3005 /* Determine the type of stub needed, if any, for a call. */
3006
3007 static enum elf32_arm_stub_type
3008 arm_type_of_stub (struct bfd_link_info *info,
3009 asection *input_sec,
3010 const Elf_Internal_Rela *rel,
3011 unsigned char st_type,
3012 struct elf32_arm_link_hash_entry *hash,
3013 bfd_vma destination,
3014 asection *sym_sec,
3015 bfd *input_bfd,
3016 const char *name)
3017 {
3018 bfd_vma location;
3019 bfd_signed_vma branch_offset;
3020 unsigned int r_type;
3021 struct elf32_arm_link_hash_table * globals;
3022 int thumb2;
3023 int thumb_only;
3024 enum elf32_arm_stub_type stub_type = arm_stub_none;
3025 int use_plt = 0;
3026
3027 /* We don't know the actual type of destination in case it is of
3028 type STT_SECTION: give up. */
3029 if (st_type == STT_SECTION)
3030 return stub_type;
3031
3032 globals = elf32_arm_hash_table (info);
3033
3034 thumb_only = using_thumb_only (globals);
3035
3036 thumb2 = using_thumb2 (globals);
3037
3038 /* Determine where the call point is. */
3039 location = (input_sec->output_offset
3040 + input_sec->output_section->vma
3041 + rel->r_offset);
3042
3043 branch_offset = (bfd_signed_vma)(destination - location);
3044
3045 r_type = ELF32_R_TYPE (rel->r_info);
3046
3047 /* Keep a simpler condition, for the sake of clarity. */
3048 if (globals->splt != NULL && hash != NULL && hash->root.plt.offset != (bfd_vma) -1)
3049 {
3050 use_plt = 1;
3051 /* Note when dealing with PLT entries: the main PLT stub is in
3052 ARM mode, so if the branch is in Thumb mode, another
3053 Thumb->ARM stub will be inserted later just before the ARM
3054 PLT stub. We don't take this extra distance into account
3055 here, because if a long branch stub is needed, we'll add a
3056 Thumb->Arm one and branch directly to the ARM PLT entry
3057 because it avoids spreading offset corrections in several
3058 places. */
3059 }
3060
3061 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
3062 {
3063 /* Handle cases where:
3064 - this call goes too far (different Thumb/Thumb2 max
3065 distance)
3066 - it's a Thumb->Arm call and blx is not available, or it's a
3067 Thumb->Arm branch (not bl). A stub is needed in this case,
3068 but only if this call is not through a PLT entry. Indeed,
3069 PLT stubs handle mode switching already.
3070 */
3071 if ((!thumb2
3072 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3073 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3074 || (thumb2
3075 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3076 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3077 || ((st_type != STT_ARM_TFUNC)
3078 && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
3079 || (r_type == R_ARM_THM_JUMP24))
3080 && !use_plt))
3081 {
3082 if (st_type == STT_ARM_TFUNC)
3083 {
3084 /* Thumb to thumb. */
3085 if (!thumb_only)
3086 {
3087 stub_type = (info->shared | globals->pic_veneer)
3088 /* PIC stubs. */
3089 ? ((globals->use_blx
3090 && (r_type ==R_ARM_THM_CALL))
3091 /* V5T and above. Stub starts with ARM code, so
3092 we must be able to switch mode before
3093 reaching it, which is only possible for 'bl'
3094 (ie R_ARM_THM_CALL relocation). */
3095 ? arm_stub_long_branch_any_thumb_pic
3096 /* On V4T, use Thumb code only. */
3097 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3098
3099 /* non-PIC stubs. */
3100 : ((globals->use_blx
3101 && (r_type ==R_ARM_THM_CALL))
3102 /* V5T and above. */
3103 ? arm_stub_long_branch_any_any
3104 /* V4T. */
3105 : arm_stub_long_branch_v4t_thumb_thumb);
3106 }
3107 else
3108 {
3109 stub_type = (info->shared | globals->pic_veneer)
3110 /* PIC stub. */
3111 ? arm_stub_long_branch_thumb_only_pic
3112 /* non-PIC stub. */
3113 : arm_stub_long_branch_thumb_only;
3114 }
3115 }
3116 else
3117 {
3118 /* Thumb to arm. */
3119 if (sym_sec != NULL
3120 && sym_sec->owner != NULL
3121 && !INTERWORK_FLAG (sym_sec->owner))
3122 {
3123 (*_bfd_error_handler)
3124 (_("%B(%s): warning: interworking not enabled.\n"
3125 " first occurrence: %B: Thumb call to ARM"),
3126 sym_sec->owner, input_bfd, name);
3127 }
3128
3129 stub_type = (info->shared | globals->pic_veneer)
3130 /* PIC stubs. */
3131 ? ((globals->use_blx
3132 && (r_type ==R_ARM_THM_CALL))
3133 /* V5T and above. */
3134 ? arm_stub_long_branch_any_arm_pic
3135 /* V4T PIC stub. */
3136 : arm_stub_long_branch_v4t_thumb_arm_pic)
3137
3138 /* non-PIC stubs. */
3139 : ((globals->use_blx
3140 && (r_type ==R_ARM_THM_CALL))
3141 /* V5T and above. */
3142 ? arm_stub_long_branch_any_any
3143 /* V4T. */
3144 : arm_stub_long_branch_v4t_thumb_arm);
3145
3146 /* Handle v4t short branches. */
3147 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3148 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3149 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3150 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3151 }
3152 }
3153 }
3154 else if (r_type == R_ARM_CALL || r_type == R_ARM_JUMP24 || r_type == R_ARM_PLT32)
3155 {
3156 if (st_type == STT_ARM_TFUNC)
3157 {
3158 /* Arm to thumb. */
3159
3160 if (sym_sec != NULL
3161 && sym_sec->owner != NULL
3162 && !INTERWORK_FLAG (sym_sec->owner))
3163 {
3164 (*_bfd_error_handler)
3165 (_("%B(%s): warning: interworking not enabled.\n"
3166 " first occurrence: %B: ARM call to Thumb"),
3167 sym_sec->owner, input_bfd, name);
3168 }
3169
3170 /* We have an extra 2-bytes reach because of
3171 the mode change (bit 24 (H) of BLX encoding). */
3172 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
3173 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
3174 || ((r_type == R_ARM_CALL) && !globals->use_blx)
3175 || (r_type == R_ARM_JUMP24)
3176 || (r_type == R_ARM_PLT32))
3177 {
3178 stub_type = (info->shared | globals->pic_veneer)
3179 /* PIC stubs. */
3180 ? ((globals->use_blx)
3181 /* V5T and above. */
3182 ? arm_stub_long_branch_any_thumb_pic
3183 /* V4T stub. */
3184 : arm_stub_long_branch_v4t_arm_thumb_pic)
3185
3186 /* non-PIC stubs. */
3187 : ((globals->use_blx)
3188 /* V5T and above. */
3189 ? arm_stub_long_branch_any_any
3190 /* V4T. */
3191 : arm_stub_long_branch_v4t_arm_thumb);
3192 }
3193 }
3194 else
3195 {
3196 /* Arm to arm. */
3197 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
3198 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
3199 {
3200 stub_type = (info->shared | globals->pic_veneer)
3201 /* PIC stubs. */
3202 ? arm_stub_long_branch_any_arm_pic
3203 /* non-PIC stubs. */
3204 : arm_stub_long_branch_any_any;
3205 }
3206 }
3207 }
3208
3209 return stub_type;
3210 }
3211
3212 /* Build a name for an entry in the stub hash table. */
3213
3214 static char *
3215 elf32_arm_stub_name (const asection *input_section,
3216 const asection *sym_sec,
3217 const struct elf32_arm_link_hash_entry *hash,
3218 const Elf_Internal_Rela *rel)
3219 {
3220 char *stub_name;
3221 bfd_size_type len;
3222
3223 if (hash)
3224 {
3225 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1;
3226 stub_name = bfd_malloc (len);
3227 if (stub_name != NULL)
3228 sprintf (stub_name, "%08x_%s+%x",
3229 input_section->id & 0xffffffff,
3230 hash->root.root.root.string,
3231 (int) rel->r_addend & 0xffffffff);
3232 }
3233 else
3234 {
3235 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1;
3236 stub_name = bfd_malloc (len);
3237 if (stub_name != NULL)
3238 sprintf (stub_name, "%08x_%x:%x+%x",
3239 input_section->id & 0xffffffff,
3240 sym_sec->id & 0xffffffff,
3241 (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
3242 (int) rel->r_addend & 0xffffffff);
3243 }
3244
3245 return stub_name;
3246 }
3247
3248 /* Look up an entry in the stub hash. Stub entries are cached because
3249 creating the stub name takes a bit of time. */
3250
3251 static struct elf32_arm_stub_hash_entry *
3252 elf32_arm_get_stub_entry (const asection *input_section,
3253 const asection *sym_sec,
3254 struct elf_link_hash_entry *hash,
3255 const Elf_Internal_Rela *rel,
3256 struct elf32_arm_link_hash_table *htab)
3257 {
3258 struct elf32_arm_stub_hash_entry *stub_entry;
3259 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
3260 const asection *id_sec;
3261
3262 if ((input_section->flags & SEC_CODE) == 0)
3263 return NULL;
3264
3265 /* If this input section is part of a group of sections sharing one
3266 stub section, then use the id of the first section in the group.
3267 Stub names need to include a section id, as there may well be
3268 more than one stub used to reach say, printf, and we need to
3269 distinguish between them. */
3270 id_sec = htab->stub_group[input_section->id].link_sec;
3271
3272 if (h != NULL && h->stub_cache != NULL
3273 && h->stub_cache->h == h
3274 && h->stub_cache->id_sec == id_sec)
3275 {
3276 stub_entry = h->stub_cache;
3277 }
3278 else
3279 {
3280 char *stub_name;
3281
3282 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel);
3283 if (stub_name == NULL)
3284 return NULL;
3285
3286 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
3287 stub_name, FALSE, FALSE);
3288 if (h != NULL)
3289 h->stub_cache = stub_entry;
3290
3291 free (stub_name);
3292 }
3293
3294 return stub_entry;
3295 }
3296
3297 /* Find or create a stub section. Returns a pointer to the stub section, and
3298 the section to which the stub section will be attached (in *LINK_SEC_P).
3299 LINK_SEC_P may be NULL. */
3300
3301 static asection *
3302 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
3303 struct elf32_arm_link_hash_table *htab)
3304 {
3305 asection *link_sec;
3306 asection *stub_sec;
3307
3308 link_sec = htab->stub_group[section->id].link_sec;
3309 stub_sec = htab->stub_group[section->id].stub_sec;
3310 if (stub_sec == NULL)
3311 {
3312 stub_sec = htab->stub_group[link_sec->id].stub_sec;
3313 if (stub_sec == NULL)
3314 {
3315 size_t namelen;
3316 bfd_size_type len;
3317 char *s_name;
3318
3319 namelen = strlen (link_sec->name);
3320 len = namelen + sizeof (STUB_SUFFIX);
3321 s_name = bfd_alloc (htab->stub_bfd, len);
3322 if (s_name == NULL)
3323 return NULL;
3324
3325 memcpy (s_name, link_sec->name, namelen);
3326 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
3327 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
3328 if (stub_sec == NULL)
3329 return NULL;
3330 htab->stub_group[link_sec->id].stub_sec = stub_sec;
3331 }
3332 htab->stub_group[section->id].stub_sec = stub_sec;
3333 }
3334
3335 if (link_sec_p)
3336 *link_sec_p = link_sec;
3337
3338 return stub_sec;
3339 }
3340
3341 /* Add a new stub entry to the stub hash. Not all fields of the new
3342 stub entry are initialised. */
3343
3344 static struct elf32_arm_stub_hash_entry *
3345 elf32_arm_add_stub (const char *stub_name,
3346 asection *section,
3347 struct elf32_arm_link_hash_table *htab)
3348 {
3349 asection *link_sec;
3350 asection *stub_sec;
3351 struct elf32_arm_stub_hash_entry *stub_entry;
3352
3353 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab);
3354 if (stub_sec == NULL)
3355 return NULL;
3356
3357 /* Enter this entry into the linker stub hash table. */
3358 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3359 TRUE, FALSE);
3360 if (stub_entry == NULL)
3361 {
3362 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
3363 section->owner,
3364 stub_name);
3365 return NULL;
3366 }
3367
3368 stub_entry->stub_sec = stub_sec;
3369 stub_entry->stub_offset = 0;
3370 stub_entry->id_sec = link_sec;
3371
3372 return stub_entry;
3373 }
3374
3375 /* Store an Arm insn into an output section not processed by
3376 elf32_arm_write_section. */
3377
3378 static void
3379 put_arm_insn (struct elf32_arm_link_hash_table * htab,
3380 bfd * output_bfd, bfd_vma val, void * ptr)
3381 {
3382 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3383 bfd_putl32 (val, ptr);
3384 else
3385 bfd_putb32 (val, ptr);
3386 }
3387
3388 /* Store a 16-bit Thumb insn into an output section not processed by
3389 elf32_arm_write_section. */
3390
3391 static void
3392 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
3393 bfd * output_bfd, bfd_vma val, void * ptr)
3394 {
3395 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3396 bfd_putl16 (val, ptr);
3397 else
3398 bfd_putb16 (val, ptr);
3399 }
3400
3401 static bfd_reloc_status_type elf32_arm_final_link_relocate
3402 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
3403 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
3404 const char *, int, struct elf_link_hash_entry *, bfd_boolean *, char **);
3405
3406 static bfd_boolean
3407 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
3408 void * in_arg)
3409 {
3410 #define MAXRELOCS 2
3411 struct elf32_arm_stub_hash_entry *stub_entry;
3412 struct bfd_link_info *info;
3413 struct elf32_arm_link_hash_table *htab;
3414 asection *stub_sec;
3415 bfd *stub_bfd;
3416 bfd_vma stub_addr;
3417 bfd_byte *loc;
3418 bfd_vma sym_value;
3419 int template_size;
3420 int size;
3421 const insn_sequence *template;
3422 int i;
3423 struct elf32_arm_link_hash_table * globals;
3424 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
3425 int stub_reloc_offset[MAXRELOCS] = {0, 0};
3426 int nrelocs = 0;
3427
3428 /* Massage our args to the form they really have. */
3429 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3430 info = (struct bfd_link_info *) in_arg;
3431
3432 globals = elf32_arm_hash_table (info);
3433
3434 htab = elf32_arm_hash_table (info);
3435 stub_sec = stub_entry->stub_sec;
3436
3437 /* Make a note of the offset within the stubs for this entry. */
3438 stub_entry->stub_offset = stub_sec->size;
3439 loc = stub_sec->contents + stub_entry->stub_offset;
3440
3441 stub_bfd = stub_sec->owner;
3442
3443 /* This is the address of the start of the stub. */
3444 stub_addr = stub_sec->output_section->vma + stub_sec->output_offset
3445 + stub_entry->stub_offset;
3446
3447 /* This is the address of the stub destination. */
3448 sym_value = (stub_entry->target_value
3449 + stub_entry->target_section->output_offset
3450 + stub_entry->target_section->output_section->vma);
3451
3452 template = stub_entry->stub_template;
3453 template_size = stub_entry->stub_template_size;
3454
3455 size = 0;
3456 for (i = 0; i < template_size; i++)
3457 {
3458 switch (template[i].type)
3459 {
3460 case THUMB16_TYPE:
3461 {
3462 bfd_vma data = template[i].data;
3463 if (template[i].reloc_addend != 0)
3464 {
3465 /* We've borrowed the reloc_addend field to mean we should
3466 insert a condition code into this (Thumb-1 branch)
3467 instruction. See THUMB16_BCOND_INSN. */
3468 BFD_ASSERT ((data & 0xff00) == 0xd000);
3469 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
3470 }
3471 put_thumb_insn (globals, stub_bfd, data, loc + size);
3472 size += 2;
3473 }
3474 break;
3475
3476 case THUMB32_TYPE:
3477 put_thumb_insn (globals, stub_bfd, (template[i].data >> 16) & 0xffff,
3478 loc + size);
3479 put_thumb_insn (globals, stub_bfd, template[i].data & 0xffff,
3480 loc + size + 2);
3481 if (template[i].r_type != R_ARM_NONE)
3482 {
3483 stub_reloc_idx[nrelocs] = i;
3484 stub_reloc_offset[nrelocs++] = size;
3485 }
3486 size += 4;
3487 break;
3488
3489 case ARM_TYPE:
3490 put_arm_insn (globals, stub_bfd, template[i].data, loc + size);
3491 /* Handle cases where the target is encoded within the
3492 instruction. */
3493 if (template[i].r_type == R_ARM_JUMP24)
3494 {
3495 stub_reloc_idx[nrelocs] = i;
3496 stub_reloc_offset[nrelocs++] = size;
3497 }
3498 size += 4;
3499 break;
3500
3501 case DATA_TYPE:
3502 bfd_put_32 (stub_bfd, template[i].data, loc + size);
3503 stub_reloc_idx[nrelocs] = i;
3504 stub_reloc_offset[nrelocs++] = size;
3505 size += 4;
3506 break;
3507
3508 default:
3509 BFD_FAIL ();
3510 return FALSE;
3511 }
3512 }
3513
3514 stub_sec->size += size;
3515
3516 /* Stub size has already been computed in arm_size_one_stub. Check
3517 consistency. */
3518 BFD_ASSERT (size == stub_entry->stub_size);
3519
3520 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
3521 if (stub_entry->st_type == STT_ARM_TFUNC)
3522 sym_value |= 1;
3523
3524 /* Assume there is at least one and at most MAXRELOCS entries to relocate
3525 in each stub. */
3526 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
3527
3528 for (i = 0; i < nrelocs; i++)
3529 if (template[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24
3530 || template[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19
3531 || template[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL
3532 || template[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22)
3533 {
3534 Elf_Internal_Rela rel;
3535 bfd_boolean unresolved_reloc;
3536 char *error_message;
3537 int sym_flags
3538 = (template[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22)
3539 ? STT_ARM_TFUNC : 0;
3540 bfd_vma points_to = sym_value + stub_entry->target_addend;
3541
3542 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
3543 rel.r_info = ELF32_R_INFO (0, template[stub_reloc_idx[i]].r_type);
3544 rel.r_addend = template[stub_reloc_idx[i]].reloc_addend;
3545
3546 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
3547 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
3548 template should refer back to the instruction after the original
3549 branch. */
3550 points_to = sym_value;
3551
3552 /* There may be unintended consequences if this is not true. */
3553 BFD_ASSERT (stub_entry->h == NULL);
3554
3555 /* Note: _bfd_final_link_relocate doesn't handle these relocations
3556 properly. We should probably use this function unconditionally,
3557 rather than only for certain relocations listed in the enclosing
3558 conditional, for the sake of consistency. */
3559 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
3560 (template[stub_reloc_idx[i]].r_type),
3561 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
3562 points_to, info, stub_entry->target_section, "", sym_flags,
3563 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
3564 &error_message);
3565 }
3566 else
3567 {
3568 _bfd_final_link_relocate (elf32_arm_howto_from_type
3569 (template[stub_reloc_idx[i]].r_type), stub_bfd, stub_sec,
3570 stub_sec->contents, stub_entry->stub_offset + stub_reloc_offset[i],
3571 sym_value + stub_entry->target_addend,
3572 template[stub_reloc_idx[i]].reloc_addend);
3573 }
3574
3575 return TRUE;
3576 #undef MAXRELOCS
3577 }
3578
3579 /* Calculate the template, template size and instruction size for a stub.
3580 Return value is the instruction size. */
3581
3582 static unsigned int
3583 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
3584 const insn_sequence **stub_template,
3585 int *stub_template_size)
3586 {
3587 const insn_sequence *template = NULL;
3588 int template_size = 0, i;
3589 unsigned int size;
3590
3591 template = stub_definitions[stub_type].template;
3592 template_size = stub_definitions[stub_type].template_size;
3593
3594 size = 0;
3595 for (i = 0; i < template_size; i++)
3596 {
3597 switch (template[i].type)
3598 {
3599 case THUMB16_TYPE:
3600 size += 2;
3601 break;
3602
3603 case ARM_TYPE:
3604 case THUMB32_TYPE:
3605 case DATA_TYPE:
3606 size += 4;
3607 break;
3608
3609 default:
3610 BFD_FAIL ();
3611 return FALSE;
3612 }
3613 }
3614
3615 if (stub_template)
3616 *stub_template = template;
3617
3618 if (stub_template_size)
3619 *stub_template_size = template_size;
3620
3621 return size;
3622 }
3623
3624 /* As above, but don't actually build the stub. Just bump offset so
3625 we know stub section sizes. */
3626
3627 static bfd_boolean
3628 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
3629 void * in_arg)
3630 {
3631 struct elf32_arm_stub_hash_entry *stub_entry;
3632 struct elf32_arm_link_hash_table *htab;
3633 const insn_sequence *template;
3634 int template_size, size;
3635
3636 /* Massage our args to the form they really have. */
3637 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3638 htab = (struct elf32_arm_link_hash_table *) in_arg;
3639
3640 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
3641 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
3642
3643 size = find_stub_size_and_template (stub_entry->stub_type, &template,
3644 &template_size);
3645
3646 stub_entry->stub_size = size;
3647 stub_entry->stub_template = template;
3648 stub_entry->stub_template_size = template_size;
3649
3650 size = (size + 7) & ~7;
3651 stub_entry->stub_sec->size += size;
3652
3653 return TRUE;
3654 }
3655
3656 /* External entry points for sizing and building linker stubs. */
3657
3658 /* Set up various things so that we can make a list of input sections
3659 for each output section included in the link. Returns -1 on error,
3660 0 when no stubs will be needed, and 1 on success. */
3661
3662 int
3663 elf32_arm_setup_section_lists (bfd *output_bfd,
3664 struct bfd_link_info *info)
3665 {
3666 bfd *input_bfd;
3667 unsigned int bfd_count;
3668 int top_id, top_index;
3669 asection *section;
3670 asection **input_list, **list;
3671 bfd_size_type amt;
3672 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3673
3674 if (! is_elf_hash_table (htab))
3675 return 0;
3676
3677 /* Count the number of input BFDs and find the top input section id. */
3678 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
3679 input_bfd != NULL;
3680 input_bfd = input_bfd->link_next)
3681 {
3682 bfd_count += 1;
3683 for (section = input_bfd->sections;
3684 section != NULL;
3685 section = section->next)
3686 {
3687 if (top_id < section->id)
3688 top_id = section->id;
3689 }
3690 }
3691 htab->bfd_count = bfd_count;
3692
3693 amt = sizeof (struct map_stub) * (top_id + 1);
3694 htab->stub_group = bfd_zmalloc (amt);
3695 if (htab->stub_group == NULL)
3696 return -1;
3697
3698 /* We can't use output_bfd->section_count here to find the top output
3699 section index as some sections may have been removed, and
3700 _bfd_strip_section_from_output doesn't renumber the indices. */
3701 for (section = output_bfd->sections, top_index = 0;
3702 section != NULL;
3703 section = section->next)
3704 {
3705 if (top_index < section->index)
3706 top_index = section->index;
3707 }
3708
3709 htab->top_index = top_index;
3710 amt = sizeof (asection *) * (top_index + 1);
3711 input_list = bfd_malloc (amt);
3712 htab->input_list = input_list;
3713 if (input_list == NULL)
3714 return -1;
3715
3716 /* For sections we aren't interested in, mark their entries with a
3717 value we can check later. */
3718 list = input_list + top_index;
3719 do
3720 *list = bfd_abs_section_ptr;
3721 while (list-- != input_list);
3722
3723 for (section = output_bfd->sections;
3724 section != NULL;
3725 section = section->next)
3726 {
3727 if ((section->flags & SEC_CODE) != 0)
3728 input_list[section->index] = NULL;
3729 }
3730
3731 return 1;
3732 }
3733
3734 /* The linker repeatedly calls this function for each input section,
3735 in the order that input sections are linked into output sections.
3736 Build lists of input sections to determine groupings between which
3737 we may insert linker stubs. */
3738
3739 void
3740 elf32_arm_next_input_section (struct bfd_link_info *info,
3741 asection *isec)
3742 {
3743 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3744
3745 if (isec->output_section->index <= htab->top_index)
3746 {
3747 asection **list = htab->input_list + isec->output_section->index;
3748
3749 if (*list != bfd_abs_section_ptr)
3750 {
3751 /* Steal the link_sec pointer for our list. */
3752 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
3753 /* This happens to make the list in reverse order,
3754 which we reverse later. */
3755 PREV_SEC (isec) = *list;
3756 *list = isec;
3757 }
3758 }
3759 }
3760
3761 /* See whether we can group stub sections together. Grouping stub
3762 sections may result in fewer stubs. More importantly, we need to
3763 put all .init* and .fini* stubs at the end of the .init or
3764 .fini output sections respectively, because glibc splits the
3765 _init and _fini functions into multiple parts. Putting a stub in
3766 the middle of a function is not a good idea. */
3767
3768 static void
3769 group_sections (struct elf32_arm_link_hash_table *htab,
3770 bfd_size_type stub_group_size,
3771 bfd_boolean stubs_always_after_branch)
3772 {
3773 asection **list = htab->input_list;
3774
3775 do
3776 {
3777 asection *tail = *list;
3778 asection *head;
3779
3780 if (tail == bfd_abs_section_ptr)
3781 continue;
3782
3783 /* Reverse the list: we must avoid placing stubs at the
3784 beginning of the section because the beginning of the text
3785 section may be required for an interrupt vector in bare metal
3786 code. */
3787 #define NEXT_SEC PREV_SEC
3788 head = NULL;
3789 while (tail != NULL)
3790 {
3791 /* Pop from tail. */
3792 asection *item = tail;
3793 tail = PREV_SEC (item);
3794
3795 /* Push on head. */
3796 NEXT_SEC (item) = head;
3797 head = item;
3798 }
3799
3800 while (head != NULL)
3801 {
3802 asection *curr;
3803 asection *next;
3804 bfd_vma stub_group_start = head->output_offset;
3805 bfd_vma end_of_next;
3806
3807 curr = head;
3808 while (NEXT_SEC (curr) != NULL)
3809 {
3810 next = NEXT_SEC (curr);
3811 end_of_next = next->output_offset + next->size;
3812 if (end_of_next - stub_group_start >= stub_group_size)
3813 /* End of NEXT is too far from start, so stop. */
3814 break;
3815 /* Add NEXT to the group. */
3816 curr = next;
3817 }
3818
3819 /* OK, the size from the start to the start of CURR is less
3820 than stub_group_size and thus can be handled by one stub
3821 section. (Or the head section is itself larger than
3822 stub_group_size, in which case we may be toast.)
3823 We should really be keeping track of the total size of
3824 stubs added here, as stubs contribute to the final output
3825 section size. */
3826 do
3827 {
3828 next = NEXT_SEC (head);
3829 /* Set up this stub group. */
3830 htab->stub_group[head->id].link_sec = curr;
3831 }
3832 while (head != curr && (head = next) != NULL);
3833
3834 /* But wait, there's more! Input sections up to stub_group_size
3835 bytes after the stub section can be handled by it too. */
3836 if (!stubs_always_after_branch)
3837 {
3838 stub_group_start = curr->output_offset + curr->size;
3839
3840 while (next != NULL)
3841 {
3842 end_of_next = next->output_offset + next->size;
3843 if (end_of_next - stub_group_start >= stub_group_size)
3844 /* End of NEXT is too far from stubs, so stop. */
3845 break;
3846 /* Add NEXT to the stub group. */
3847 head = next;
3848 next = NEXT_SEC (head);
3849 htab->stub_group[head->id].link_sec = curr;
3850 }
3851 }
3852 head = next;
3853 }
3854 }
3855 while (list++ != htab->input_list + htab->top_index);
3856
3857 free (htab->input_list);
3858 #undef PREV_SEC
3859 #undef NEXT_SEC
3860 }
3861
3862 /* Comparison function for sorting/searching relocations relating to Cortex-A8
3863 erratum fix. */
3864
3865 static int
3866 a8_reloc_compare (const void *a, const void *b)
3867 {
3868 const struct a8_erratum_reloc *ra = a, *rb = b;
3869
3870 if (ra->from < rb->from)
3871 return -1;
3872 else if (ra->from > rb->from)
3873 return 1;
3874 else
3875 return 0;
3876 }
3877
3878 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
3879 const char *, char **);
3880
3881 /* Helper function to scan code for sequences which might trigger the Cortex-A8
3882 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
3883 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
3884 otherwise. */
3885
3886 static bfd_boolean
3887 cortex_a8_erratum_scan (bfd *input_bfd,
3888 struct bfd_link_info *info,
3889 struct a8_erratum_fix **a8_fixes_p,
3890 unsigned int *num_a8_fixes_p,
3891 unsigned int *a8_fix_table_size_p,
3892 struct a8_erratum_reloc *a8_relocs,
3893 unsigned int num_a8_relocs)
3894 {
3895 asection *section;
3896 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3897 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
3898 unsigned int num_a8_fixes = *num_a8_fixes_p;
3899 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
3900
3901 for (section = input_bfd->sections;
3902 section != NULL;
3903 section = section->next)
3904 {
3905 bfd_byte *contents = NULL;
3906 struct _arm_elf_section_data *sec_data;
3907 unsigned int span;
3908 bfd_vma base_vma;
3909
3910 if (elf_section_type (section) != SHT_PROGBITS
3911 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3912 || (section->flags & SEC_EXCLUDE) != 0
3913 || (section->sec_info_type == ELF_INFO_TYPE_JUST_SYMS)
3914 || (section->output_section == bfd_abs_section_ptr))
3915 continue;
3916
3917 base_vma = section->output_section->vma + section->output_offset;
3918
3919 if (elf_section_data (section)->this_hdr.contents != NULL)
3920 contents = elf_section_data (section)->this_hdr.contents;
3921 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3922 return TRUE;
3923
3924 sec_data = elf32_arm_section_data (section);
3925
3926 for (span = 0; span < sec_data->mapcount; span++)
3927 {
3928 unsigned int span_start = sec_data->map[span].vma;
3929 unsigned int span_end = (span == sec_data->mapcount - 1)
3930 ? section->size : sec_data->map[span + 1].vma;
3931 unsigned int i;
3932 char span_type = sec_data->map[span].type;
3933 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
3934
3935 if (span_type != 't')
3936 continue;
3937
3938 /* Span is entirely within a single 4KB region: skip scanning. */
3939 if (((base_vma + span_start) & ~0xfff)
3940 == ((base_vma + span_end) & ~0xfff))
3941 continue;
3942
3943 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
3944
3945 * The opcode is BLX.W, BL.W, B.W, Bcc.W
3946 * The branch target is in the same 4KB region as the
3947 first half of the branch.
3948 * The instruction before the branch is a 32-bit
3949 length non-branch instruction. */
3950 for (i = span_start; i < span_end;)
3951 {
3952 unsigned int insn = bfd_getl16 (&contents[i]);
3953 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
3954 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
3955
3956 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
3957 insn_32bit = TRUE;
3958
3959 if (insn_32bit)
3960 {
3961 /* Load the rest of the insn (in manual-friendly order). */
3962 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
3963
3964 /* Encoding T4: B<c>.W. */
3965 is_b = (insn & 0xf800d000) == 0xf0009000;
3966 /* Encoding T1: BL<c>.W. */
3967 is_bl = (insn & 0xf800d000) == 0xf000d000;
3968 /* Encoding T2: BLX<c>.W. */
3969 is_blx = (insn & 0xf800d000) == 0xf000c000;
3970 /* Encoding T3: B<c>.W (not permitted in IT block). */
3971 is_bcc = (insn & 0xf800d000) == 0xf0008000
3972 && (insn & 0x07f00000) != 0x03800000;
3973 }
3974
3975 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
3976
3977 if (((base_vma + i) & 0xfff) == 0xffe
3978 && insn_32bit
3979 && is_32bit_branch
3980 && last_was_32bit
3981 && ! last_was_branch)
3982 {
3983 bfd_signed_vma offset;
3984 bfd_boolean force_target_arm = FALSE;
3985 bfd_boolean force_target_thumb = FALSE;
3986 bfd_vma target;
3987 enum elf32_arm_stub_type stub_type = arm_stub_none;
3988 struct a8_erratum_reloc key, *found;
3989
3990 key.from = base_vma + i;
3991 found = bsearch (&key, a8_relocs, num_a8_relocs,
3992 sizeof (struct a8_erratum_reloc),
3993 &a8_reloc_compare);
3994
3995 if (found)
3996 {
3997 char *error_message = NULL;
3998 struct elf_link_hash_entry *entry;
3999
4000 /* We don't care about the error returned from this
4001 function, only if there is glue or not. */
4002 entry = find_thumb_glue (info, found->sym_name,
4003 &error_message);
4004
4005 if (entry)
4006 found->non_a8_stub = TRUE;
4007
4008 if (found->r_type == R_ARM_THM_CALL
4009 && found->st_type != STT_ARM_TFUNC)
4010 force_target_arm = TRUE;
4011 else if (found->r_type == R_ARM_THM_CALL
4012 && found->st_type == STT_ARM_TFUNC)
4013 force_target_thumb = TRUE;
4014 }
4015
4016 /* Check if we have an offending branch instruction. */
4017
4018 if (found && found->non_a8_stub)
4019 /* We've already made a stub for this instruction, e.g.
4020 it's a long branch or a Thumb->ARM stub. Assume that
4021 stub will suffice to work around the A8 erratum (see
4022 setting of always_after_branch above). */
4023 ;
4024 else if (is_bcc)
4025 {
4026 offset = (insn & 0x7ff) << 1;
4027 offset |= (insn & 0x3f0000) >> 4;
4028 offset |= (insn & 0x2000) ? 0x40000 : 0;
4029 offset |= (insn & 0x800) ? 0x80000 : 0;
4030 offset |= (insn & 0x4000000) ? 0x100000 : 0;
4031 if (offset & 0x100000)
4032 offset |= ~ ((bfd_signed_vma) 0xfffff);
4033 stub_type = arm_stub_a8_veneer_b_cond;
4034 }
4035 else if (is_b || is_bl || is_blx)
4036 {
4037 int s = (insn & 0x4000000) != 0;
4038 int j1 = (insn & 0x2000) != 0;
4039 int j2 = (insn & 0x800) != 0;
4040 int i1 = !(j1 ^ s);
4041 int i2 = !(j2 ^ s);
4042
4043 offset = (insn & 0x7ff) << 1;
4044 offset |= (insn & 0x3ff0000) >> 4;
4045 offset |= i2 << 22;
4046 offset |= i1 << 23;
4047 offset |= s << 24;
4048 if (offset & 0x1000000)
4049 offset |= ~ ((bfd_signed_vma) 0xffffff);
4050
4051 if (is_blx)
4052 offset &= ~ ((bfd_signed_vma) 3);
4053
4054 stub_type = is_blx ? arm_stub_a8_veneer_blx :
4055 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
4056 }
4057
4058 if (stub_type != arm_stub_none)
4059 {
4060 bfd_vma pc_for_insn = base_vma + i + 4;
4061
4062 /* The original instruction is a BL, but the target is
4063 an ARM instruction. If we were not making a stub,
4064 the BL would have been converted to a BLX. Use the
4065 BLX stub instead in that case. */
4066 if (htab->use_blx && force_target_arm
4067 && stub_type == arm_stub_a8_veneer_bl)
4068 {
4069 stub_type = arm_stub_a8_veneer_blx;
4070 is_blx = TRUE;
4071 is_bl = FALSE;
4072 }
4073 /* Conversely, if the original instruction was
4074 BLX but the target is Thumb mode, use the BL
4075 stub. */
4076 else if (force_target_thumb
4077 && stub_type == arm_stub_a8_veneer_blx)
4078 {
4079 stub_type = arm_stub_a8_veneer_bl;
4080 is_blx = FALSE;
4081 is_bl = TRUE;
4082 }
4083
4084 if (is_blx)
4085 pc_for_insn &= ~ ((bfd_vma) 3);
4086
4087 /* If we found a relocation, use the proper destination,
4088 not the offset in the (unrelocated) instruction.
4089 Note this is always done if we switched the stub type
4090 above. */
4091 if (found)
4092 offset =
4093 (bfd_signed_vma) (found->destination - pc_for_insn);
4094
4095 target = pc_for_insn + offset;
4096
4097 /* The BLX stub is ARM-mode code. Adjust the offset to
4098 take the different PC value (+8 instead of +4) into
4099 account. */
4100 if (stub_type == arm_stub_a8_veneer_blx)
4101 offset += 4;
4102
4103 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
4104 {
4105 char *stub_name;
4106
4107 if (num_a8_fixes == a8_fix_table_size)
4108 {
4109 a8_fix_table_size *= 2;
4110 a8_fixes = bfd_realloc (a8_fixes,
4111 sizeof (struct a8_erratum_fix)
4112 * a8_fix_table_size);
4113 }
4114
4115 stub_name = bfd_malloc (8 + 1 + 8 + 1);
4116 if (stub_name != NULL)
4117 sprintf (stub_name, "%x:%x", section->id, i);
4118
4119 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
4120 a8_fixes[num_a8_fixes].section = section;
4121 a8_fixes[num_a8_fixes].offset = i;
4122 a8_fixes[num_a8_fixes].addend = offset;
4123 a8_fixes[num_a8_fixes].orig_insn = insn;
4124 a8_fixes[num_a8_fixes].stub_name = stub_name;
4125 a8_fixes[num_a8_fixes].stub_type = stub_type;
4126
4127 num_a8_fixes++;
4128 }
4129 }
4130 }
4131
4132 i += insn_32bit ? 4 : 2;
4133 last_was_32bit = insn_32bit;
4134 last_was_branch = is_32bit_branch;
4135 }
4136 }
4137
4138 if (elf_section_data (section)->this_hdr.contents == NULL)
4139 free (contents);
4140 }
4141
4142 *a8_fixes_p = a8_fixes;
4143 *num_a8_fixes_p = num_a8_fixes;
4144 *a8_fix_table_size_p = a8_fix_table_size;
4145
4146 return FALSE;
4147 }
4148
4149 /* Determine and set the size of the stub section for a final link.
4150
4151 The basic idea here is to examine all the relocations looking for
4152 PC-relative calls to a target that is unreachable with a "bl"
4153 instruction. */
4154
4155 bfd_boolean
4156 elf32_arm_size_stubs (bfd *output_bfd,
4157 bfd *stub_bfd,
4158 struct bfd_link_info *info,
4159 bfd_signed_vma group_size,
4160 asection * (*add_stub_section) (const char *, asection *),
4161 void (*layout_sections_again) (void))
4162 {
4163 bfd_size_type stub_group_size;
4164 bfd_boolean stubs_always_after_branch;
4165 bfd_boolean stub_changed = 0;
4166 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4167 struct a8_erratum_fix *a8_fixes = NULL;
4168 unsigned int num_a8_fixes = 0, prev_num_a8_fixes = 0, a8_fix_table_size = 10;
4169 struct a8_erratum_reloc *a8_relocs = NULL;
4170 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
4171
4172 if (htab->fix_cortex_a8)
4173 {
4174 a8_fixes = bfd_zmalloc (sizeof (struct a8_erratum_fix)
4175 * a8_fix_table_size);
4176 a8_relocs = bfd_zmalloc (sizeof (struct a8_erratum_reloc)
4177 * a8_reloc_table_size);
4178 }
4179
4180 /* Propagate mach to stub bfd, because it may not have been
4181 finalized when we created stub_bfd. */
4182 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
4183 bfd_get_mach (output_bfd));
4184
4185 /* Stash our params away. */
4186 htab->stub_bfd = stub_bfd;
4187 htab->add_stub_section = add_stub_section;
4188 htab->layout_sections_again = layout_sections_again;
4189 stubs_always_after_branch = group_size < 0;
4190
4191 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
4192 as the first half of a 32-bit branch straddling two 4K pages. This is a
4193 crude way of enforcing that. */
4194 if (htab->fix_cortex_a8)
4195 stubs_always_after_branch = 1;
4196
4197 if (group_size < 0)
4198 stub_group_size = -group_size;
4199 else
4200 stub_group_size = group_size;
4201
4202 if (stub_group_size == 1)
4203 {
4204 /* Default values. */
4205 /* Thumb branch range is +-4MB has to be used as the default
4206 maximum size (a given section can contain both ARM and Thumb
4207 code, so the worst case has to be taken into account).
4208
4209 This value is 24K less than that, which allows for 2025
4210 12-byte stubs. If we exceed that, then we will fail to link.
4211 The user will have to relink with an explicit group size
4212 option. */
4213 stub_group_size = 4170000;
4214 }
4215
4216 group_sections (htab, stub_group_size, stubs_always_after_branch);
4217
4218 while (1)
4219 {
4220 bfd *input_bfd;
4221 unsigned int bfd_indx;
4222 asection *stub_sec;
4223
4224 num_a8_fixes = 0;
4225
4226 for (input_bfd = info->input_bfds, bfd_indx = 0;
4227 input_bfd != NULL;
4228 input_bfd = input_bfd->link_next, bfd_indx++)
4229 {
4230 Elf_Internal_Shdr *symtab_hdr;
4231 asection *section;
4232 Elf_Internal_Sym *local_syms = NULL;
4233
4234 num_a8_relocs = 0;
4235
4236 /* We'll need the symbol table in a second. */
4237 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4238 if (symtab_hdr->sh_info == 0)
4239 continue;
4240
4241 /* Walk over each section attached to the input bfd. */
4242 for (section = input_bfd->sections;
4243 section != NULL;
4244 section = section->next)
4245 {
4246 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
4247
4248 /* If there aren't any relocs, then there's nothing more
4249 to do. */
4250 if ((section->flags & SEC_RELOC) == 0
4251 || section->reloc_count == 0
4252 || (section->flags & SEC_CODE) == 0)
4253 continue;
4254
4255 /* If this section is a link-once section that will be
4256 discarded, then don't create any stubs. */
4257 if (section->output_section == NULL
4258 || section->output_section->owner != output_bfd)
4259 continue;
4260
4261 /* Get the relocs. */
4262 internal_relocs
4263 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
4264 NULL, info->keep_memory);
4265 if (internal_relocs == NULL)
4266 goto error_ret_free_local;
4267
4268 /* Now examine each relocation. */
4269 irela = internal_relocs;
4270 irelaend = irela + section->reloc_count;
4271 for (; irela < irelaend; irela++)
4272 {
4273 unsigned int r_type, r_indx;
4274 enum elf32_arm_stub_type stub_type;
4275 struct elf32_arm_stub_hash_entry *stub_entry;
4276 asection *sym_sec;
4277 bfd_vma sym_value;
4278 bfd_vma destination;
4279 struct elf32_arm_link_hash_entry *hash;
4280 const char *sym_name;
4281 char *stub_name;
4282 const asection *id_sec;
4283 unsigned char st_type;
4284 bfd_boolean created_stub = FALSE;
4285
4286 r_type = ELF32_R_TYPE (irela->r_info);
4287 r_indx = ELF32_R_SYM (irela->r_info);
4288
4289 if (r_type >= (unsigned int) R_ARM_max)
4290 {
4291 bfd_set_error (bfd_error_bad_value);
4292 error_ret_free_internal:
4293 if (elf_section_data (section)->relocs == NULL)
4294 free (internal_relocs);
4295 goto error_ret_free_local;
4296 }
4297
4298 /* Only look for stubs on branch instructions. */
4299 if ((r_type != (unsigned int) R_ARM_CALL)
4300 && (r_type != (unsigned int) R_ARM_THM_CALL)
4301 && (r_type != (unsigned int) R_ARM_JUMP24)
4302 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
4303 && (r_type != (unsigned int) R_ARM_THM_XPC22)
4304 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
4305 && (r_type != (unsigned int) R_ARM_PLT32))
4306 continue;
4307
4308 /* Now determine the call target, its name, value,
4309 section. */
4310 sym_sec = NULL;
4311 sym_value = 0;
4312 destination = 0;
4313 hash = NULL;
4314 sym_name = NULL;
4315 if (r_indx < symtab_hdr->sh_info)
4316 {
4317 /* It's a local symbol. */
4318 Elf_Internal_Sym *sym;
4319 Elf_Internal_Shdr *hdr;
4320
4321 if (local_syms == NULL)
4322 {
4323 local_syms
4324 = (Elf_Internal_Sym *) symtab_hdr->contents;
4325 if (local_syms == NULL)
4326 local_syms
4327 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
4328 symtab_hdr->sh_info, 0,
4329 NULL, NULL, NULL);
4330 if (local_syms == NULL)
4331 goto error_ret_free_internal;
4332 }
4333
4334 sym = local_syms + r_indx;
4335 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
4336 sym_sec = hdr->bfd_section;
4337 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
4338 sym_value = sym->st_value;
4339 destination = (sym_value + irela->r_addend
4340 + sym_sec->output_offset
4341 + sym_sec->output_section->vma);
4342 st_type = ELF_ST_TYPE (sym->st_info);
4343 sym_name
4344 = bfd_elf_string_from_elf_section (input_bfd,
4345 symtab_hdr->sh_link,
4346 sym->st_name);
4347 }
4348 else
4349 {
4350 /* It's an external symbol. */
4351 int e_indx;
4352
4353 e_indx = r_indx - symtab_hdr->sh_info;
4354 hash = ((struct elf32_arm_link_hash_entry *)
4355 elf_sym_hashes (input_bfd)[e_indx]);
4356
4357 while (hash->root.root.type == bfd_link_hash_indirect
4358 || hash->root.root.type == bfd_link_hash_warning)
4359 hash = ((struct elf32_arm_link_hash_entry *)
4360 hash->root.root.u.i.link);
4361
4362 if (hash->root.root.type == bfd_link_hash_defined
4363 || hash->root.root.type == bfd_link_hash_defweak)
4364 {
4365 sym_sec = hash->root.root.u.def.section;
4366 sym_value = hash->root.root.u.def.value;
4367
4368 struct elf32_arm_link_hash_table *globals =
4369 elf32_arm_hash_table (info);
4370
4371 /* For a destination in a shared library,
4372 use the PLT stub as target address to
4373 decide whether a branch stub is
4374 needed. */
4375 if (globals->splt != NULL && hash != NULL
4376 && hash->root.plt.offset != (bfd_vma) -1)
4377 {
4378 sym_sec = globals->splt;
4379 sym_value = hash->root.plt.offset;
4380 if (sym_sec->output_section != NULL)
4381 destination = (sym_value
4382 + sym_sec->output_offset
4383 + sym_sec->output_section->vma);
4384 }
4385 else if (sym_sec->output_section != NULL)
4386 destination = (sym_value + irela->r_addend
4387 + sym_sec->output_offset
4388 + sym_sec->output_section->vma);
4389 }
4390 else if ((hash->root.root.type == bfd_link_hash_undefined)
4391 || (hash->root.root.type == bfd_link_hash_undefweak))
4392 {
4393 /* For a shared library, use the PLT stub as
4394 target address to decide whether a long
4395 branch stub is needed.
4396 For absolute code, they cannot be handled. */
4397 struct elf32_arm_link_hash_table *globals =
4398 elf32_arm_hash_table (info);
4399
4400 if (globals->splt != NULL && hash != NULL
4401 && hash->root.plt.offset != (bfd_vma) -1)
4402 {
4403 sym_sec = globals->splt;
4404 sym_value = hash->root.plt.offset;
4405 if (sym_sec->output_section != NULL)
4406 destination = (sym_value
4407 + sym_sec->output_offset
4408 + sym_sec->output_section->vma);
4409 }
4410 else
4411 continue;
4412 }
4413 else
4414 {
4415 bfd_set_error (bfd_error_bad_value);
4416 goto error_ret_free_internal;
4417 }
4418 st_type = ELF_ST_TYPE (hash->root.type);
4419 sym_name = hash->root.root.root.string;
4420 }
4421
4422 do
4423 {
4424 /* Determine what (if any) linker stub is needed. */
4425 stub_type = arm_type_of_stub (info, section, irela,
4426 st_type, hash,
4427 destination, sym_sec,
4428 input_bfd, sym_name);
4429 if (stub_type == arm_stub_none)
4430 break;
4431
4432 /* Support for grouping stub sections. */
4433 id_sec = htab->stub_group[section->id].link_sec;
4434
4435 /* Get the name of this stub. */
4436 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash,
4437 irela);
4438 if (!stub_name)
4439 goto error_ret_free_internal;
4440
4441 /* We've either created a stub for this reloc already,
4442 or we are about to. */
4443 created_stub = TRUE;
4444
4445 stub_entry = arm_stub_hash_lookup
4446 (&htab->stub_hash_table, stub_name,
4447 FALSE, FALSE);
4448 if (stub_entry != NULL)
4449 {
4450 /* The proper stub has already been created. */
4451 free (stub_name);
4452 break;
4453 }
4454
4455 stub_entry = elf32_arm_add_stub (stub_name, section,
4456 htab);
4457 if (stub_entry == NULL)
4458 {
4459 free (stub_name);
4460 goto error_ret_free_internal;
4461 }
4462
4463 stub_entry->target_value = sym_value;
4464 stub_entry->target_section = sym_sec;
4465 stub_entry->stub_type = stub_type;
4466 stub_entry->h = hash;
4467 stub_entry->st_type = st_type;
4468
4469 if (sym_name == NULL)
4470 sym_name = "unnamed";
4471 stub_entry->output_name
4472 = bfd_alloc (htab->stub_bfd,
4473 sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
4474 + strlen (sym_name));
4475 if (stub_entry->output_name == NULL)
4476 {
4477 free (stub_name);
4478 goto error_ret_free_internal;
4479 }
4480
4481 /* For historical reasons, use the existing names for
4482 ARM-to-Thumb and Thumb-to-ARM stubs. */
4483 if ( ((r_type == (unsigned int) R_ARM_THM_CALL)
4484 || (r_type == (unsigned int) R_ARM_THM_JUMP24))
4485 && st_type != STT_ARM_TFUNC)
4486 sprintf (stub_entry->output_name,
4487 THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
4488 else if ( ((r_type == (unsigned int) R_ARM_CALL)
4489 || (r_type == (unsigned int) R_ARM_JUMP24))
4490 && st_type == STT_ARM_TFUNC)
4491 sprintf (stub_entry->output_name,
4492 ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
4493 else
4494 sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
4495 sym_name);
4496
4497 stub_changed = TRUE;
4498 }
4499 while (0);
4500
4501 /* Look for relocations which might trigger Cortex-A8
4502 erratum. */
4503 if (htab->fix_cortex_a8
4504 && (r_type == (unsigned int) R_ARM_THM_JUMP24
4505 || r_type == (unsigned int) R_ARM_THM_JUMP19
4506 || r_type == (unsigned int) R_ARM_THM_CALL
4507 || r_type == (unsigned int) R_ARM_THM_XPC22))
4508 {
4509 bfd_vma from = section->output_section->vma
4510 + section->output_offset
4511 + irela->r_offset;
4512
4513 if ((from & 0xfff) == 0xffe)
4514 {
4515 /* Found a candidate. Note we haven't checked the
4516 destination is within 4K here: if we do so (and
4517 don't create an entry in a8_relocs) we can't tell
4518 that a branch should have been relocated when
4519 scanning later. */
4520 if (num_a8_relocs == a8_reloc_table_size)
4521 {
4522 a8_reloc_table_size *= 2;
4523 a8_relocs = bfd_realloc (a8_relocs,
4524 sizeof (struct a8_erratum_reloc)
4525 * a8_reloc_table_size);
4526 }
4527
4528 a8_relocs[num_a8_relocs].from = from;
4529 a8_relocs[num_a8_relocs].destination = destination;
4530 a8_relocs[num_a8_relocs].r_type = r_type;
4531 a8_relocs[num_a8_relocs].st_type = st_type;
4532 a8_relocs[num_a8_relocs].sym_name = sym_name;
4533 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
4534
4535 num_a8_relocs++;
4536 }
4537 }
4538 }
4539
4540 /* We're done with the internal relocs, free them. */
4541 if (elf_section_data (section)->relocs == NULL)
4542 free (internal_relocs);
4543 }
4544
4545 if (htab->fix_cortex_a8)
4546 {
4547 /* Sort relocs which might apply to Cortex-A8 erratum. */
4548 qsort (a8_relocs, num_a8_relocs, sizeof (struct a8_erratum_reloc),
4549 &a8_reloc_compare);
4550
4551 /* Scan for branches which might trigger Cortex-A8 erratum. */
4552 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
4553 &num_a8_fixes, &a8_fix_table_size,
4554 a8_relocs, num_a8_relocs) != 0)
4555 goto error_ret_free_local;
4556 }
4557 }
4558
4559 if (htab->fix_cortex_a8 && num_a8_fixes != prev_num_a8_fixes)
4560 stub_changed = TRUE;
4561
4562 if (!stub_changed)
4563 break;
4564
4565 /* OK, we've added some stubs. Find out the new size of the
4566 stub sections. */
4567 for (stub_sec = htab->stub_bfd->sections;
4568 stub_sec != NULL;
4569 stub_sec = stub_sec->next)
4570 {
4571 /* Ignore non-stub sections. */
4572 if (!strstr (stub_sec->name, STUB_SUFFIX))
4573 continue;
4574
4575 stub_sec->size = 0;
4576 }
4577
4578 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
4579
4580 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
4581 if (htab->fix_cortex_a8)
4582 for (i = 0; i < num_a8_fixes; i++)
4583 {
4584 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
4585 a8_fixes[i].section, htab);
4586
4587 if (stub_sec == NULL)
4588 goto error_ret_free_local;
4589
4590 stub_sec->size
4591 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
4592 NULL);
4593 }
4594
4595
4596 /* Ask the linker to do its stuff. */
4597 (*htab->layout_sections_again) ();
4598 stub_changed = FALSE;
4599 prev_num_a8_fixes = num_a8_fixes;
4600 }
4601
4602 /* Add stubs for Cortex-A8 erratum fixes now. */
4603 if (htab->fix_cortex_a8)
4604 {
4605 for (i = 0; i < num_a8_fixes; i++)
4606 {
4607 struct elf32_arm_stub_hash_entry *stub_entry;
4608 char *stub_name = a8_fixes[i].stub_name;
4609 asection *section = a8_fixes[i].section;
4610 unsigned int section_id = a8_fixes[i].section->id;
4611 asection *link_sec = htab->stub_group[section_id].link_sec;
4612 asection *stub_sec = htab->stub_group[section_id].stub_sec;
4613 const insn_sequence *template;
4614 int template_size, size = 0;
4615
4616 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4617 TRUE, FALSE);
4618 if (stub_entry == NULL)
4619 {
4620 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
4621 section->owner,
4622 stub_name);
4623 return FALSE;
4624 }
4625
4626 stub_entry->stub_sec = stub_sec;
4627 stub_entry->stub_offset = 0;
4628 stub_entry->id_sec = link_sec;
4629 stub_entry->stub_type = a8_fixes[i].stub_type;
4630 stub_entry->target_section = a8_fixes[i].section;
4631 stub_entry->target_value = a8_fixes[i].offset;
4632 stub_entry->target_addend = a8_fixes[i].addend;
4633 stub_entry->orig_insn = a8_fixes[i].orig_insn;
4634 stub_entry->st_type = STT_ARM_TFUNC;
4635
4636 size = find_stub_size_and_template (a8_fixes[i].stub_type, &template,
4637 &template_size);
4638
4639 stub_entry->stub_size = size;
4640 stub_entry->stub_template = template;
4641 stub_entry->stub_template_size = template_size;
4642 }
4643
4644 /* Stash the Cortex-A8 erratum fix array for use later in
4645 elf32_arm_write_section(). */
4646 htab->a8_erratum_fixes = a8_fixes;
4647 htab->num_a8_erratum_fixes = num_a8_fixes;
4648 }
4649 else
4650 {
4651 htab->a8_erratum_fixes = NULL;
4652 htab->num_a8_erratum_fixes = 0;
4653 }
4654 return TRUE;
4655
4656 error_ret_free_local:
4657 return FALSE;
4658 }
4659
4660 /* Build all the stubs associated with the current output file. The
4661 stubs are kept in a hash table attached to the main linker hash
4662 table. We also set up the .plt entries for statically linked PIC
4663 functions here. This function is called via arm_elf_finish in the
4664 linker. */
4665
4666 bfd_boolean
4667 elf32_arm_build_stubs (struct bfd_link_info *info)
4668 {
4669 asection *stub_sec;
4670 struct bfd_hash_table *table;
4671 struct elf32_arm_link_hash_table *htab;
4672
4673 htab = elf32_arm_hash_table (info);
4674
4675 for (stub_sec = htab->stub_bfd->sections;
4676 stub_sec != NULL;
4677 stub_sec = stub_sec->next)
4678 {
4679 bfd_size_type size;
4680
4681 /* Ignore non-stub sections. */
4682 if (!strstr (stub_sec->name, STUB_SUFFIX))
4683 continue;
4684
4685 /* Allocate memory to hold the linker stubs. */
4686 size = stub_sec->size;
4687 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
4688 if (stub_sec->contents == NULL && size != 0)
4689 return FALSE;
4690 stub_sec->size = 0;
4691 }
4692
4693 /* Build the stubs as directed by the stub hash table. */
4694 table = &htab->stub_hash_table;
4695 bfd_hash_traverse (table, arm_build_one_stub, info);
4696
4697 return TRUE;
4698 }
4699
4700 /* Locate the Thumb encoded calling stub for NAME. */
4701
4702 static struct elf_link_hash_entry *
4703 find_thumb_glue (struct bfd_link_info *link_info,
4704 const char *name,
4705 char **error_message)
4706 {
4707 char *tmp_name;
4708 struct elf_link_hash_entry *hash;
4709 struct elf32_arm_link_hash_table *hash_table;
4710
4711 /* We need a pointer to the armelf specific hash table. */
4712 hash_table = elf32_arm_hash_table (link_info);
4713
4714 tmp_name = bfd_malloc ((bfd_size_type) strlen (name)
4715 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
4716
4717 BFD_ASSERT (tmp_name);
4718
4719 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
4720
4721 hash = elf_link_hash_lookup
4722 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4723
4724 if (hash == NULL
4725 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
4726 tmp_name, name) == -1)
4727 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4728
4729 free (tmp_name);
4730
4731 return hash;
4732 }
4733
4734 /* Locate the ARM encoded calling stub for NAME. */
4735
4736 static struct elf_link_hash_entry *
4737 find_arm_glue (struct bfd_link_info *link_info,
4738 const char *name,
4739 char **error_message)
4740 {
4741 char *tmp_name;
4742 struct elf_link_hash_entry *myh;
4743 struct elf32_arm_link_hash_table *hash_table;
4744
4745 /* We need a pointer to the elfarm specific hash table. */
4746 hash_table = elf32_arm_hash_table (link_info);
4747
4748 tmp_name = bfd_malloc ((bfd_size_type) strlen (name)
4749 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
4750
4751 BFD_ASSERT (tmp_name);
4752
4753 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
4754
4755 myh = elf_link_hash_lookup
4756 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4757
4758 if (myh == NULL
4759 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
4760 tmp_name, name) == -1)
4761 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4762
4763 free (tmp_name);
4764
4765 return myh;
4766 }
4767
4768 /* ARM->Thumb glue (static images):
4769
4770 .arm
4771 __func_from_arm:
4772 ldr r12, __func_addr
4773 bx r12
4774 __func_addr:
4775 .word func @ behave as if you saw a ARM_32 reloc.
4776
4777 (v5t static images)
4778 .arm
4779 __func_from_arm:
4780 ldr pc, __func_addr
4781 __func_addr:
4782 .word func @ behave as if you saw a ARM_32 reloc.
4783
4784 (relocatable images)
4785 .arm
4786 __func_from_arm:
4787 ldr r12, __func_offset
4788 add r12, r12, pc
4789 bx r12
4790 __func_offset:
4791 .word func - . */
4792
4793 #define ARM2THUMB_STATIC_GLUE_SIZE 12
4794 static const insn32 a2t1_ldr_insn = 0xe59fc000;
4795 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
4796 static const insn32 a2t3_func_addr_insn = 0x00000001;
4797
4798 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
4799 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
4800 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
4801
4802 #define ARM2THUMB_PIC_GLUE_SIZE 16
4803 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
4804 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
4805 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
4806
4807 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
4808
4809 .thumb .thumb
4810 .align 2 .align 2
4811 __func_from_thumb: __func_from_thumb:
4812 bx pc push {r6, lr}
4813 nop ldr r6, __func_addr
4814 .arm mov lr, pc
4815 b func bx r6
4816 .arm
4817 ;; back_to_thumb
4818 ldmia r13! {r6, lr}
4819 bx lr
4820 __func_addr:
4821 .word func */
4822
4823 #define THUMB2ARM_GLUE_SIZE 8
4824 static const insn16 t2a1_bx_pc_insn = 0x4778;
4825 static const insn16 t2a2_noop_insn = 0x46c0;
4826 static const insn32 t2a3_b_insn = 0xea000000;
4827
4828 #define VFP11_ERRATUM_VENEER_SIZE 8
4829
4830 #define ARM_BX_VENEER_SIZE 12
4831 static const insn32 armbx1_tst_insn = 0xe3100001;
4832 static const insn32 armbx2_moveq_insn = 0x01a0f000;
4833 static const insn32 armbx3_bx_insn = 0xe12fff10;
4834
4835 #ifndef ELFARM_NABI_C_INCLUDED
4836 static void
4837 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
4838 {
4839 asection * s;
4840 bfd_byte * contents;
4841
4842 if (size == 0)
4843 {
4844 /* Do not include empty glue sections in the output. */
4845 if (abfd != NULL)
4846 {
4847 s = bfd_get_section_by_name (abfd, name);
4848 if (s != NULL)
4849 s->flags |= SEC_EXCLUDE;
4850 }
4851 return;
4852 }
4853
4854 BFD_ASSERT (abfd != NULL);
4855
4856 s = bfd_get_section_by_name (abfd, name);
4857 BFD_ASSERT (s != NULL);
4858
4859 contents = bfd_alloc (abfd, size);
4860
4861 BFD_ASSERT (s->size == size);
4862 s->contents = contents;
4863 }
4864
4865 bfd_boolean
4866 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
4867 {
4868 struct elf32_arm_link_hash_table * globals;
4869
4870 globals = elf32_arm_hash_table (info);
4871 BFD_ASSERT (globals != NULL);
4872
4873 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4874 globals->arm_glue_size,
4875 ARM2THUMB_GLUE_SECTION_NAME);
4876
4877 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4878 globals->thumb_glue_size,
4879 THUMB2ARM_GLUE_SECTION_NAME);
4880
4881 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4882 globals->vfp11_erratum_glue_size,
4883 VFP11_ERRATUM_VENEER_SECTION_NAME);
4884
4885 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4886 globals->bx_glue_size,
4887 ARM_BX_GLUE_SECTION_NAME);
4888
4889 return TRUE;
4890 }
4891
4892 /* Allocate space and symbols for calling a Thumb function from Arm mode.
4893 returns the symbol identifying the stub. */
4894
4895 static struct elf_link_hash_entry *
4896 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
4897 struct elf_link_hash_entry * h)
4898 {
4899 const char * name = h->root.root.string;
4900 asection * s;
4901 char * tmp_name;
4902 struct elf_link_hash_entry * myh;
4903 struct bfd_link_hash_entry * bh;
4904 struct elf32_arm_link_hash_table * globals;
4905 bfd_vma val;
4906 bfd_size_type size;
4907
4908 globals = elf32_arm_hash_table (link_info);
4909
4910 BFD_ASSERT (globals != NULL);
4911 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
4912
4913 s = bfd_get_section_by_name
4914 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
4915
4916 BFD_ASSERT (s != NULL);
4917
4918 tmp_name = bfd_malloc ((bfd_size_type) strlen (name) + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
4919
4920 BFD_ASSERT (tmp_name);
4921
4922 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
4923
4924 myh = elf_link_hash_lookup
4925 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
4926
4927 if (myh != NULL)
4928 {
4929 /* We've already seen this guy. */
4930 free (tmp_name);
4931 return myh;
4932 }
4933
4934 /* The only trick here is using hash_table->arm_glue_size as the value.
4935 Even though the section isn't allocated yet, this is where we will be
4936 putting it. The +1 on the value marks that the stub has not been
4937 output yet - not that it is a Thumb function. */
4938 bh = NULL;
4939 val = globals->arm_glue_size + 1;
4940 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
4941 tmp_name, BSF_GLOBAL, s, val,
4942 NULL, TRUE, FALSE, &bh);
4943
4944 myh = (struct elf_link_hash_entry *) bh;
4945 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
4946 myh->forced_local = 1;
4947
4948 free (tmp_name);
4949
4950 if (link_info->shared || globals->root.is_relocatable_executable
4951 || globals->pic_veneer)
4952 size = ARM2THUMB_PIC_GLUE_SIZE;
4953 else if (globals->use_blx)
4954 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
4955 else
4956 size = ARM2THUMB_STATIC_GLUE_SIZE;
4957
4958 s->size += size;
4959 globals->arm_glue_size += size;
4960
4961 return myh;
4962 }
4963
4964 /* Allocate space for ARMv4 BX veneers. */
4965
4966 static void
4967 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
4968 {
4969 asection * s;
4970 struct elf32_arm_link_hash_table *globals;
4971 char *tmp_name;
4972 struct elf_link_hash_entry *myh;
4973 struct bfd_link_hash_entry *bh;
4974 bfd_vma val;
4975
4976 /* BX PC does not need a veneer. */
4977 if (reg == 15)
4978 return;
4979
4980 globals = elf32_arm_hash_table (link_info);
4981
4982 BFD_ASSERT (globals != NULL);
4983 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
4984
4985 /* Check if this veneer has already been allocated. */
4986 if (globals->bx_glue_offset[reg])
4987 return;
4988
4989 s = bfd_get_section_by_name
4990 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
4991
4992 BFD_ASSERT (s != NULL);
4993
4994 /* Add symbol for veneer. */
4995 tmp_name = bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
4996
4997 BFD_ASSERT (tmp_name);
4998
4999 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
5000
5001 myh = elf_link_hash_lookup
5002 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
5003
5004 BFD_ASSERT (myh == NULL);
5005
5006 bh = NULL;
5007 val = globals->bx_glue_size;
5008 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5009 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5010 NULL, TRUE, FALSE, &bh);
5011
5012 myh = (struct elf_link_hash_entry *) bh;
5013 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5014 myh->forced_local = 1;
5015
5016 s->size += ARM_BX_VENEER_SIZE;
5017 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
5018 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
5019 }
5020
5021
5022 /* Add an entry to the code/data map for section SEC. */
5023
5024 static void
5025 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
5026 {
5027 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
5028 unsigned int newidx;
5029
5030 if (sec_data->map == NULL)
5031 {
5032 sec_data->map = bfd_malloc (sizeof (elf32_arm_section_map));
5033 sec_data->mapcount = 0;
5034 sec_data->mapsize = 1;
5035 }
5036
5037 newidx = sec_data->mapcount++;
5038
5039 if (sec_data->mapcount > sec_data->mapsize)
5040 {
5041 sec_data->mapsize *= 2;
5042 sec_data->map = bfd_realloc_or_free (sec_data->map, sec_data->mapsize
5043 * sizeof (elf32_arm_section_map));
5044 }
5045
5046 if (sec_data->map)
5047 {
5048 sec_data->map[newidx].vma = vma;
5049 sec_data->map[newidx].type = type;
5050 }
5051 }
5052
5053
5054 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
5055 veneers are handled for now. */
5056
5057 static bfd_vma
5058 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
5059 elf32_vfp11_erratum_list *branch,
5060 bfd *branch_bfd,
5061 asection *branch_sec,
5062 unsigned int offset)
5063 {
5064 asection *s;
5065 struct elf32_arm_link_hash_table *hash_table;
5066 char *tmp_name;
5067 struct elf_link_hash_entry *myh;
5068 struct bfd_link_hash_entry *bh;
5069 bfd_vma val;
5070 struct _arm_elf_section_data *sec_data;
5071 int errcount;
5072 elf32_vfp11_erratum_list *newerr;
5073
5074 hash_table = elf32_arm_hash_table (link_info);
5075
5076 BFD_ASSERT (hash_table != NULL);
5077 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
5078
5079 s = bfd_get_section_by_name
5080 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
5081
5082 sec_data = elf32_arm_section_data (s);
5083
5084 BFD_ASSERT (s != NULL);
5085
5086 tmp_name = bfd_malloc ((bfd_size_type) strlen
5087 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
5088
5089 BFD_ASSERT (tmp_name);
5090
5091 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
5092 hash_table->num_vfp11_fixes);
5093
5094 myh = elf_link_hash_lookup
5095 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5096
5097 BFD_ASSERT (myh == NULL);
5098
5099 bh = NULL;
5100 val = hash_table->vfp11_erratum_glue_size;
5101 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
5102 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5103 NULL, TRUE, FALSE, &bh);
5104
5105 myh = (struct elf_link_hash_entry *) bh;
5106 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5107 myh->forced_local = 1;
5108
5109 /* Link veneer back to calling location. */
5110 errcount = ++(sec_data->erratumcount);
5111 newerr = bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5112
5113 newerr->type = VFP11_ERRATUM_ARM_VENEER;
5114 newerr->vma = -1;
5115 newerr->u.v.branch = branch;
5116 newerr->u.v.id = hash_table->num_vfp11_fixes;
5117 branch->u.b.veneer = newerr;
5118
5119 newerr->next = sec_data->erratumlist;
5120 sec_data->erratumlist = newerr;
5121
5122 /* A symbol for the return from the veneer. */
5123 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
5124 hash_table->num_vfp11_fixes);
5125
5126 myh = elf_link_hash_lookup
5127 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5128
5129 if (myh != NULL)
5130 abort ();
5131
5132 bh = NULL;
5133 val = offset + 4;
5134 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
5135 branch_sec, val, NULL, TRUE, FALSE, &bh);
5136
5137 myh = (struct elf_link_hash_entry *) bh;
5138 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5139 myh->forced_local = 1;
5140
5141 free (tmp_name);
5142
5143 /* Generate a mapping symbol for the veneer section, and explicitly add an
5144 entry for that symbol to the code/data map for the section. */
5145 if (hash_table->vfp11_erratum_glue_size == 0)
5146 {
5147 bh = NULL;
5148 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
5149 ever requires this erratum fix. */
5150 _bfd_generic_link_add_one_symbol (link_info,
5151 hash_table->bfd_of_glue_owner, "$a",
5152 BSF_LOCAL, s, 0, NULL,
5153 TRUE, FALSE, &bh);
5154
5155 myh = (struct elf_link_hash_entry *) bh;
5156 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5157 myh->forced_local = 1;
5158
5159 /* The elf32_arm_init_maps function only cares about symbols from input
5160 BFDs. We must make a note of this generated mapping symbol
5161 ourselves so that code byteswapping works properly in
5162 elf32_arm_write_section. */
5163 elf32_arm_section_map_add (s, 'a', 0);
5164 }
5165
5166 s->size += VFP11_ERRATUM_VENEER_SIZE;
5167 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
5168 hash_table->num_vfp11_fixes++;
5169
5170 /* The offset of the veneer. */
5171 return val;
5172 }
5173
5174 #define ARM_GLUE_SECTION_FLAGS \
5175 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
5176 | SEC_READONLY | SEC_LINKER_CREATED)
5177
5178 /* Create a fake section for use by the ARM backend of the linker. */
5179
5180 static bfd_boolean
5181 arm_make_glue_section (bfd * abfd, const char * name)
5182 {
5183 asection * sec;
5184
5185 sec = bfd_get_section_by_name (abfd, name);
5186 if (sec != NULL)
5187 /* Already made. */
5188 return TRUE;
5189
5190 sec = bfd_make_section_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
5191
5192 if (sec == NULL
5193 || !bfd_set_section_alignment (abfd, sec, 2))
5194 return FALSE;
5195
5196 /* Set the gc mark to prevent the section from being removed by garbage
5197 collection, despite the fact that no relocs refer to this section. */
5198 sec->gc_mark = 1;
5199
5200 return TRUE;
5201 }
5202
5203 /* Add the glue sections to ABFD. This function is called from the
5204 linker scripts in ld/emultempl/{armelf}.em. */
5205
5206 bfd_boolean
5207 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
5208 struct bfd_link_info *info)
5209 {
5210 /* If we are only performing a partial
5211 link do not bother adding the glue. */
5212 if (info->relocatable)
5213 return TRUE;
5214
5215 return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
5216 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
5217 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
5218 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
5219 }
5220
5221 /* Select a BFD to be used to hold the sections used by the glue code.
5222 This function is called from the linker scripts in ld/emultempl/
5223 {armelf/pe}.em. */
5224
5225 bfd_boolean
5226 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
5227 {
5228 struct elf32_arm_link_hash_table *globals;
5229
5230 /* If we are only performing a partial link
5231 do not bother getting a bfd to hold the glue. */
5232 if (info->relocatable)
5233 return TRUE;
5234
5235 /* Make sure we don't attach the glue sections to a dynamic object. */
5236 BFD_ASSERT (!(abfd->flags & DYNAMIC));
5237
5238 globals = elf32_arm_hash_table (info);
5239
5240 BFD_ASSERT (globals != NULL);
5241
5242 if (globals->bfd_of_glue_owner != NULL)
5243 return TRUE;
5244
5245 /* Save the bfd for later use. */
5246 globals->bfd_of_glue_owner = abfd;
5247
5248 return TRUE;
5249 }
5250
5251 static void
5252 check_use_blx (struct elf32_arm_link_hash_table *globals)
5253 {
5254 if (bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
5255 Tag_CPU_arch) > 2)
5256 globals->use_blx = 1;
5257 }
5258
5259 bfd_boolean
5260 bfd_elf32_arm_process_before_allocation (bfd *abfd,
5261 struct bfd_link_info *link_info)
5262 {
5263 Elf_Internal_Shdr *symtab_hdr;
5264 Elf_Internal_Rela *internal_relocs = NULL;
5265 Elf_Internal_Rela *irel, *irelend;
5266 bfd_byte *contents = NULL;
5267
5268 asection *sec;
5269 struct elf32_arm_link_hash_table *globals;
5270
5271 /* If we are only performing a partial link do not bother
5272 to construct any glue. */
5273 if (link_info->relocatable)
5274 return TRUE;
5275
5276 /* Here we have a bfd that is to be included on the link. We have a
5277 hook to do reloc rummaging, before section sizes are nailed down. */
5278 globals = elf32_arm_hash_table (link_info);
5279
5280 BFD_ASSERT (globals != NULL);
5281
5282 check_use_blx (globals);
5283
5284 if (globals->byteswap_code && !bfd_big_endian (abfd))
5285 {
5286 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
5287 abfd);
5288 return FALSE;
5289 }
5290
5291 /* PR 5398: If we have not decided to include any loadable sections in
5292 the output then we will not have a glue owner bfd. This is OK, it
5293 just means that there is nothing else for us to do here. */
5294 if (globals->bfd_of_glue_owner == NULL)
5295 return TRUE;
5296
5297 /* Rummage around all the relocs and map the glue vectors. */
5298 sec = abfd->sections;
5299
5300 if (sec == NULL)
5301 return TRUE;
5302
5303 for (; sec != NULL; sec = sec->next)
5304 {
5305 if (sec->reloc_count == 0)
5306 continue;
5307
5308 if ((sec->flags & SEC_EXCLUDE) != 0)
5309 continue;
5310
5311 symtab_hdr = & elf_symtab_hdr (abfd);
5312
5313 /* Load the relocs. */
5314 internal_relocs
5315 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
5316
5317 if (internal_relocs == NULL)
5318 goto error_return;
5319
5320 irelend = internal_relocs + sec->reloc_count;
5321 for (irel = internal_relocs; irel < irelend; irel++)
5322 {
5323 long r_type;
5324 unsigned long r_index;
5325
5326 struct elf_link_hash_entry *h;
5327
5328 r_type = ELF32_R_TYPE (irel->r_info);
5329 r_index = ELF32_R_SYM (irel->r_info);
5330
5331 /* These are the only relocation types we care about. */
5332 if ( r_type != R_ARM_PC24
5333 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
5334 continue;
5335
5336 /* Get the section contents if we haven't done so already. */
5337 if (contents == NULL)
5338 {
5339 /* Get cached copy if it exists. */
5340 if (elf_section_data (sec)->this_hdr.contents != NULL)
5341 contents = elf_section_data (sec)->this_hdr.contents;
5342 else
5343 {
5344 /* Go get them off disk. */
5345 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
5346 goto error_return;
5347 }
5348 }
5349
5350 if (r_type == R_ARM_V4BX)
5351 {
5352 int reg;
5353
5354 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
5355 record_arm_bx_glue (link_info, reg);
5356 continue;
5357 }
5358
5359 /* If the relocation is not against a symbol it cannot concern us. */
5360 h = NULL;
5361
5362 /* We don't care about local symbols. */
5363 if (r_index < symtab_hdr->sh_info)
5364 continue;
5365
5366 /* This is an external symbol. */
5367 r_index -= symtab_hdr->sh_info;
5368 h = (struct elf_link_hash_entry *)
5369 elf_sym_hashes (abfd)[r_index];
5370
5371 /* If the relocation is against a static symbol it must be within
5372 the current section and so cannot be a cross ARM/Thumb relocation. */
5373 if (h == NULL)
5374 continue;
5375
5376 /* If the call will go through a PLT entry then we do not need
5377 glue. */
5378 if (globals->splt != NULL && h->plt.offset != (bfd_vma) -1)
5379 continue;
5380
5381 switch (r_type)
5382 {
5383 case R_ARM_PC24:
5384 /* This one is a call from arm code. We need to look up
5385 the target of the call. If it is a thumb target, we
5386 insert glue. */
5387 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
5388 record_arm_to_thumb_glue (link_info, h);
5389 break;
5390
5391 default:
5392 abort ();
5393 }
5394 }
5395
5396 if (contents != NULL
5397 && elf_section_data (sec)->this_hdr.contents != contents)
5398 free (contents);
5399 contents = NULL;
5400
5401 if (internal_relocs != NULL
5402 && elf_section_data (sec)->relocs != internal_relocs)
5403 free (internal_relocs);
5404 internal_relocs = NULL;
5405 }
5406
5407 return TRUE;
5408
5409 error_return:
5410 if (contents != NULL
5411 && elf_section_data (sec)->this_hdr.contents != contents)
5412 free (contents);
5413 if (internal_relocs != NULL
5414 && elf_section_data (sec)->relocs != internal_relocs)
5415 free (internal_relocs);
5416
5417 return FALSE;
5418 }
5419 #endif
5420
5421
5422 /* Initialise maps of ARM/Thumb/data for input BFDs. */
5423
5424 void
5425 bfd_elf32_arm_init_maps (bfd *abfd)
5426 {
5427 Elf_Internal_Sym *isymbuf;
5428 Elf_Internal_Shdr *hdr;
5429 unsigned int i, localsyms;
5430
5431 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
5432 if (! is_arm_elf (abfd))
5433 return;
5434
5435 if ((abfd->flags & DYNAMIC) != 0)
5436 return;
5437
5438 hdr = & elf_symtab_hdr (abfd);
5439 localsyms = hdr->sh_info;
5440
5441 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
5442 should contain the number of local symbols, which should come before any
5443 global symbols. Mapping symbols are always local. */
5444 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
5445 NULL);
5446
5447 /* No internal symbols read? Skip this BFD. */
5448 if (isymbuf == NULL)
5449 return;
5450
5451 for (i = 0; i < localsyms; i++)
5452 {
5453 Elf_Internal_Sym *isym = &isymbuf[i];
5454 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
5455 const char *name;
5456
5457 if (sec != NULL
5458 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
5459 {
5460 name = bfd_elf_string_from_elf_section (abfd,
5461 hdr->sh_link, isym->st_name);
5462
5463 if (bfd_is_arm_special_symbol_name (name,
5464 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
5465 elf32_arm_section_map_add (sec, name[1], isym->st_value);
5466 }
5467 }
5468 }
5469
5470
5471 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
5472 say what they wanted. */
5473
5474 void
5475 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
5476 {
5477 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5478 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5479
5480 if (globals->fix_cortex_a8 == -1)
5481 {
5482 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
5483 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
5484 && (out_attr[Tag_CPU_arch_profile].i == 'A'
5485 || out_attr[Tag_CPU_arch_profile].i == 0))
5486 globals->fix_cortex_a8 = 1;
5487 else
5488 globals->fix_cortex_a8 = 0;
5489 }
5490 }
5491
5492
5493 void
5494 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
5495 {
5496 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5497 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5498
5499 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
5500 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
5501 {
5502 switch (globals->vfp11_fix)
5503 {
5504 case BFD_ARM_VFP11_FIX_DEFAULT:
5505 case BFD_ARM_VFP11_FIX_NONE:
5506 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5507 break;
5508
5509 default:
5510 /* Give a warning, but do as the user requests anyway. */
5511 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
5512 "workaround is not necessary for target architecture"), obfd);
5513 }
5514 }
5515 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
5516 /* For earlier architectures, we might need the workaround, but do not
5517 enable it by default. If users is running with broken hardware, they
5518 must enable the erratum fix explicitly. */
5519 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5520 }
5521
5522
5523 enum bfd_arm_vfp11_pipe
5524 {
5525 VFP11_FMAC,
5526 VFP11_LS,
5527 VFP11_DS,
5528 VFP11_BAD
5529 };
5530
5531 /* Return a VFP register number. This is encoded as RX:X for single-precision
5532 registers, or X:RX for double-precision registers, where RX is the group of
5533 four bits in the instruction encoding and X is the single extension bit.
5534 RX and X fields are specified using their lowest (starting) bit. The return
5535 value is:
5536
5537 0...31: single-precision registers s0...s31
5538 32...63: double-precision registers d0...d31.
5539
5540 Although X should be zero for VFP11 (encoding d0...d15 only), we might
5541 encounter VFP3 instructions, so we allow the full range for DP registers. */
5542
5543 static unsigned int
5544 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
5545 unsigned int x)
5546 {
5547 if (is_double)
5548 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
5549 else
5550 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
5551 }
5552
5553 /* Set bits in *WMASK according to a register number REG as encoded by
5554 bfd_arm_vfp11_regno(). Ignore d16-d31. */
5555
5556 static void
5557 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
5558 {
5559 if (reg < 32)
5560 *wmask |= 1 << reg;
5561 else if (reg < 48)
5562 *wmask |= 3 << ((reg - 32) * 2);
5563 }
5564
5565 /* Return TRUE if WMASK overwrites anything in REGS. */
5566
5567 static bfd_boolean
5568 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
5569 {
5570 int i;
5571
5572 for (i = 0; i < numregs; i++)
5573 {
5574 unsigned int reg = regs[i];
5575
5576 if (reg < 32 && (wmask & (1 << reg)) != 0)
5577 return TRUE;
5578
5579 reg -= 32;
5580
5581 if (reg >= 16)
5582 continue;
5583
5584 if ((wmask & (3 << (reg * 2))) != 0)
5585 return TRUE;
5586 }
5587
5588 return FALSE;
5589 }
5590
5591 /* In this function, we're interested in two things: finding input registers
5592 for VFP data-processing instructions, and finding the set of registers which
5593 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
5594 hold the written set, so FLDM etc. are easy to deal with (we're only
5595 interested in 32 SP registers or 16 dp registers, due to the VFP version
5596 implemented by the chip in question). DP registers are marked by setting
5597 both SP registers in the write mask). */
5598
5599 static enum bfd_arm_vfp11_pipe
5600 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
5601 int *numregs)
5602 {
5603 enum bfd_arm_vfp11_pipe pipe = VFP11_BAD;
5604 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
5605
5606 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
5607 {
5608 unsigned int pqrs;
5609 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5610 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5611
5612 pqrs = ((insn & 0x00800000) >> 20)
5613 | ((insn & 0x00300000) >> 19)
5614 | ((insn & 0x00000040) >> 6);
5615
5616 switch (pqrs)
5617 {
5618 case 0: /* fmac[sd]. */
5619 case 1: /* fnmac[sd]. */
5620 case 2: /* fmsc[sd]. */
5621 case 3: /* fnmsc[sd]. */
5622 pipe = VFP11_FMAC;
5623 bfd_arm_vfp11_write_mask (destmask, fd);
5624 regs[0] = fd;
5625 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5626 regs[2] = fm;
5627 *numregs = 3;
5628 break;
5629
5630 case 4: /* fmul[sd]. */
5631 case 5: /* fnmul[sd]. */
5632 case 6: /* fadd[sd]. */
5633 case 7: /* fsub[sd]. */
5634 pipe = VFP11_FMAC;
5635 goto vfp_binop;
5636
5637 case 8: /* fdiv[sd]. */
5638 pipe = VFP11_DS;
5639 vfp_binop:
5640 bfd_arm_vfp11_write_mask (destmask, fd);
5641 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5642 regs[1] = fm;
5643 *numregs = 2;
5644 break;
5645
5646 case 15: /* extended opcode. */
5647 {
5648 unsigned int extn = ((insn >> 15) & 0x1e)
5649 | ((insn >> 7) & 1);
5650
5651 switch (extn)
5652 {
5653 case 0: /* fcpy[sd]. */
5654 case 1: /* fabs[sd]. */
5655 case 2: /* fneg[sd]. */
5656 case 8: /* fcmp[sd]. */
5657 case 9: /* fcmpe[sd]. */
5658 case 10: /* fcmpz[sd]. */
5659 case 11: /* fcmpez[sd]. */
5660 case 16: /* fuito[sd]. */
5661 case 17: /* fsito[sd]. */
5662 case 24: /* ftoui[sd]. */
5663 case 25: /* ftouiz[sd]. */
5664 case 26: /* ftosi[sd]. */
5665 case 27: /* ftosiz[sd]. */
5666 /* These instructions will not bounce due to underflow. */
5667 *numregs = 0;
5668 pipe = VFP11_FMAC;
5669 break;
5670
5671 case 3: /* fsqrt[sd]. */
5672 /* fsqrt cannot underflow, but it can (perhaps) overwrite
5673 registers to cause the erratum in previous instructions. */
5674 bfd_arm_vfp11_write_mask (destmask, fd);
5675 pipe = VFP11_DS;
5676 break;
5677
5678 case 15: /* fcvt{ds,sd}. */
5679 {
5680 int rnum = 0;
5681
5682 bfd_arm_vfp11_write_mask (destmask, fd);
5683
5684 /* Only FCVTSD can underflow. */
5685 if ((insn & 0x100) != 0)
5686 regs[rnum++] = fm;
5687
5688 *numregs = rnum;
5689
5690 pipe = VFP11_FMAC;
5691 }
5692 break;
5693
5694 default:
5695 return VFP11_BAD;
5696 }
5697 }
5698 break;
5699
5700 default:
5701 return VFP11_BAD;
5702 }
5703 }
5704 /* Two-register transfer. */
5705 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
5706 {
5707 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5708
5709 if ((insn & 0x100000) == 0)
5710 {
5711 if (is_double)
5712 bfd_arm_vfp11_write_mask (destmask, fm);
5713 else
5714 {
5715 bfd_arm_vfp11_write_mask (destmask, fm);
5716 bfd_arm_vfp11_write_mask (destmask, fm + 1);
5717 }
5718 }
5719
5720 pipe = VFP11_LS;
5721 }
5722 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
5723 {
5724 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5725 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
5726
5727 switch (puw)
5728 {
5729 case 0: /* Two-reg transfer. We should catch these above. */
5730 abort ();
5731
5732 case 2: /* fldm[sdx]. */
5733 case 3:
5734 case 5:
5735 {
5736 unsigned int i, offset = insn & 0xff;
5737
5738 if (is_double)
5739 offset >>= 1;
5740
5741 for (i = fd; i < fd + offset; i++)
5742 bfd_arm_vfp11_write_mask (destmask, i);
5743 }
5744 break;
5745
5746 case 4: /* fld[sd]. */
5747 case 6:
5748 bfd_arm_vfp11_write_mask (destmask, fd);
5749 break;
5750
5751 default:
5752 return VFP11_BAD;
5753 }
5754
5755 pipe = VFP11_LS;
5756 }
5757 /* Single-register transfer. Note L==0. */
5758 else if ((insn & 0x0f100e10) == 0x0e000a10)
5759 {
5760 unsigned int opcode = (insn >> 21) & 7;
5761 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
5762
5763 switch (opcode)
5764 {
5765 case 0: /* fmsr/fmdlr. */
5766 case 1: /* fmdhr. */
5767 /* Mark fmdhr and fmdlr as writing to the whole of the DP
5768 destination register. I don't know if this is exactly right,
5769 but it is the conservative choice. */
5770 bfd_arm_vfp11_write_mask (destmask, fn);
5771 break;
5772
5773 case 7: /* fmxr. */
5774 break;
5775 }
5776
5777 pipe = VFP11_LS;
5778 }
5779
5780 return pipe;
5781 }
5782
5783
5784 static int elf32_arm_compare_mapping (const void * a, const void * b);
5785
5786
5787 /* Look for potentially-troublesome code sequences which might trigger the
5788 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
5789 (available from ARM) for details of the erratum. A short version is
5790 described in ld.texinfo. */
5791
5792 bfd_boolean
5793 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
5794 {
5795 asection *sec;
5796 bfd_byte *contents = NULL;
5797 int state = 0;
5798 int regs[3], numregs = 0;
5799 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5800 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
5801
5802 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
5803 The states transition as follows:
5804
5805 0 -> 1 (vector) or 0 -> 2 (scalar)
5806 A VFP FMAC-pipeline instruction has been seen. Fill
5807 regs[0]..regs[numregs-1] with its input operands. Remember this
5808 instruction in 'first_fmac'.
5809
5810 1 -> 2
5811 Any instruction, except for a VFP instruction which overwrites
5812 regs[*].
5813
5814 1 -> 3 [ -> 0 ] or
5815 2 -> 3 [ -> 0 ]
5816 A VFP instruction has been seen which overwrites any of regs[*].
5817 We must make a veneer! Reset state to 0 before examining next
5818 instruction.
5819
5820 2 -> 0
5821 If we fail to match anything in state 2, reset to state 0 and reset
5822 the instruction pointer to the instruction after 'first_fmac'.
5823
5824 If the VFP11 vector mode is in use, there must be at least two unrelated
5825 instructions between anti-dependent VFP11 instructions to properly avoid
5826 triggering the erratum, hence the use of the extra state 1. */
5827
5828 /* If we are only performing a partial link do not bother
5829 to construct any glue. */
5830 if (link_info->relocatable)
5831 return TRUE;
5832
5833 /* Skip if this bfd does not correspond to an ELF image. */
5834 if (! is_arm_elf (abfd))
5835 return TRUE;
5836
5837 /* We should have chosen a fix type by the time we get here. */
5838 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
5839
5840 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
5841 return TRUE;
5842
5843 /* Skip this BFD if it corresponds to an executable or dynamic object. */
5844 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
5845 return TRUE;
5846
5847 for (sec = abfd->sections; sec != NULL; sec = sec->next)
5848 {
5849 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
5850 struct _arm_elf_section_data *sec_data;
5851
5852 /* If we don't have executable progbits, we're not interested in this
5853 section. Also skip if section is to be excluded. */
5854 if (elf_section_type (sec) != SHT_PROGBITS
5855 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
5856 || (sec->flags & SEC_EXCLUDE) != 0
5857 || sec->sec_info_type == ELF_INFO_TYPE_JUST_SYMS
5858 || sec->output_section == bfd_abs_section_ptr
5859 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
5860 continue;
5861
5862 sec_data = elf32_arm_section_data (sec);
5863
5864 if (sec_data->mapcount == 0)
5865 continue;
5866
5867 if (elf_section_data (sec)->this_hdr.contents != NULL)
5868 contents = elf_section_data (sec)->this_hdr.contents;
5869 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
5870 goto error_return;
5871
5872 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
5873 elf32_arm_compare_mapping);
5874
5875 for (span = 0; span < sec_data->mapcount; span++)
5876 {
5877 unsigned int span_start = sec_data->map[span].vma;
5878 unsigned int span_end = (span == sec_data->mapcount - 1)
5879 ? sec->size : sec_data->map[span + 1].vma;
5880 char span_type = sec_data->map[span].type;
5881
5882 /* FIXME: Only ARM mode is supported at present. We may need to
5883 support Thumb-2 mode also at some point. */
5884 if (span_type != 'a')
5885 continue;
5886
5887 for (i = span_start; i < span_end;)
5888 {
5889 unsigned int next_i = i + 4;
5890 unsigned int insn = bfd_big_endian (abfd)
5891 ? (contents[i] << 24)
5892 | (contents[i + 1] << 16)
5893 | (contents[i + 2] << 8)
5894 | contents[i + 3]
5895 : (contents[i + 3] << 24)
5896 | (contents[i + 2] << 16)
5897 | (contents[i + 1] << 8)
5898 | contents[i];
5899 unsigned int writemask = 0;
5900 enum bfd_arm_vfp11_pipe pipe;
5901
5902 switch (state)
5903 {
5904 case 0:
5905 pipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
5906 &numregs);
5907 /* I'm assuming the VFP11 erratum can trigger with denorm
5908 operands on either the FMAC or the DS pipeline. This might
5909 lead to slightly overenthusiastic veneer insertion. */
5910 if (pipe == VFP11_FMAC || pipe == VFP11_DS)
5911 {
5912 state = use_vector ? 1 : 2;
5913 first_fmac = i;
5914 veneer_of_insn = insn;
5915 }
5916 break;
5917
5918 case 1:
5919 {
5920 int other_regs[3], other_numregs;
5921 pipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
5922 other_regs,
5923 &other_numregs);
5924 if (pipe != VFP11_BAD
5925 && bfd_arm_vfp11_antidependency (writemask, regs,
5926 numregs))
5927 state = 3;
5928 else
5929 state = 2;
5930 }
5931 break;
5932
5933 case 2:
5934 {
5935 int other_regs[3], other_numregs;
5936 pipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
5937 other_regs,
5938 &other_numregs);
5939 if (pipe != VFP11_BAD
5940 && bfd_arm_vfp11_antidependency (writemask, regs,
5941 numregs))
5942 state = 3;
5943 else
5944 {
5945 state = 0;
5946 next_i = first_fmac + 4;
5947 }
5948 }
5949 break;
5950
5951 case 3:
5952 abort (); /* Should be unreachable. */
5953 }
5954
5955 if (state == 3)
5956 {
5957 elf32_vfp11_erratum_list *newerr
5958 = bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5959 int errcount;
5960
5961 errcount = ++(elf32_arm_section_data (sec)->erratumcount);
5962
5963 newerr->u.b.vfp_insn = veneer_of_insn;
5964
5965 switch (span_type)
5966 {
5967 case 'a':
5968 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
5969 break;
5970
5971 default:
5972 abort ();
5973 }
5974
5975 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
5976 first_fmac);
5977
5978 newerr->vma = -1;
5979
5980 newerr->next = sec_data->erratumlist;
5981 sec_data->erratumlist = newerr;
5982
5983 state = 0;
5984 }
5985
5986 i = next_i;
5987 }
5988 }
5989
5990 if (contents != NULL
5991 && elf_section_data (sec)->this_hdr.contents != contents)
5992 free (contents);
5993 contents = NULL;
5994 }
5995
5996 return TRUE;
5997
5998 error_return:
5999 if (contents != NULL
6000 && elf_section_data (sec)->this_hdr.contents != contents)
6001 free (contents);
6002
6003 return FALSE;
6004 }
6005
6006 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
6007 after sections have been laid out, using specially-named symbols. */
6008
6009 void
6010 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
6011 struct bfd_link_info *link_info)
6012 {
6013 asection *sec;
6014 struct elf32_arm_link_hash_table *globals;
6015 char *tmp_name;
6016
6017 if (link_info->relocatable)
6018 return;
6019
6020 /* Skip if this bfd does not correspond to an ELF image. */
6021 if (! is_arm_elf (abfd))
6022 return;
6023
6024 globals = elf32_arm_hash_table (link_info);
6025
6026 tmp_name = bfd_malloc ((bfd_size_type) strlen
6027 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6028
6029 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6030 {
6031 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6032 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
6033
6034 for (; errnode != NULL; errnode = errnode->next)
6035 {
6036 struct elf_link_hash_entry *myh;
6037 bfd_vma vma;
6038
6039 switch (errnode->type)
6040 {
6041 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
6042 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
6043 /* Find veneer symbol. */
6044 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6045 errnode->u.b.veneer->u.v.id);
6046
6047 myh = elf_link_hash_lookup
6048 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6049
6050 if (myh == NULL)
6051 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6052 "`%s'"), abfd, tmp_name);
6053
6054 vma = myh->root.u.def.section->output_section->vma
6055 + myh->root.u.def.section->output_offset
6056 + myh->root.u.def.value;
6057
6058 errnode->u.b.veneer->vma = vma;
6059 break;
6060
6061 case VFP11_ERRATUM_ARM_VENEER:
6062 case VFP11_ERRATUM_THUMB_VENEER:
6063 /* Find return location. */
6064 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6065 errnode->u.v.id);
6066
6067 myh = elf_link_hash_lookup
6068 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6069
6070 if (myh == NULL)
6071 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6072 "`%s'"), abfd, tmp_name);
6073
6074 vma = myh->root.u.def.section->output_section->vma
6075 + myh->root.u.def.section->output_offset
6076 + myh->root.u.def.value;
6077
6078 errnode->u.v.branch->vma = vma;
6079 break;
6080
6081 default:
6082 abort ();
6083 }
6084 }
6085 }
6086
6087 free (tmp_name);
6088 }
6089
6090
6091 /* Set target relocation values needed during linking. */
6092
6093 void
6094 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
6095 struct bfd_link_info *link_info,
6096 int target1_is_rel,
6097 char * target2_type,
6098 int fix_v4bx,
6099 int use_blx,
6100 bfd_arm_vfp11_fix vfp11_fix,
6101 int no_enum_warn, int no_wchar_warn,
6102 int pic_veneer, int fix_cortex_a8)
6103 {
6104 struct elf32_arm_link_hash_table *globals;
6105
6106 globals = elf32_arm_hash_table (link_info);
6107
6108 globals->target1_is_rel = target1_is_rel;
6109 if (strcmp (target2_type, "rel") == 0)
6110 globals->target2_reloc = R_ARM_REL32;
6111 else if (strcmp (target2_type, "abs") == 0)
6112 globals->target2_reloc = R_ARM_ABS32;
6113 else if (strcmp (target2_type, "got-rel") == 0)
6114 globals->target2_reloc = R_ARM_GOT_PREL;
6115 else
6116 {
6117 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
6118 target2_type);
6119 }
6120 globals->fix_v4bx = fix_v4bx;
6121 globals->use_blx |= use_blx;
6122 globals->vfp11_fix = vfp11_fix;
6123 globals->pic_veneer = pic_veneer;
6124 globals->fix_cortex_a8 = fix_cortex_a8;
6125
6126 BFD_ASSERT (is_arm_elf (output_bfd));
6127 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
6128 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
6129 }
6130
6131 /* Replace the target offset of a Thumb bl or b.w instruction. */
6132
6133 static void
6134 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
6135 {
6136 bfd_vma upper;
6137 bfd_vma lower;
6138 int reloc_sign;
6139
6140 BFD_ASSERT ((offset & 1) == 0);
6141
6142 upper = bfd_get_16 (abfd, insn);
6143 lower = bfd_get_16 (abfd, insn + 2);
6144 reloc_sign = (offset < 0) ? 1 : 0;
6145 upper = (upper & ~(bfd_vma) 0x7ff)
6146 | ((offset >> 12) & 0x3ff)
6147 | (reloc_sign << 10);
6148 lower = (lower & ~(bfd_vma) 0x2fff)
6149 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
6150 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
6151 | ((offset >> 1) & 0x7ff);
6152 bfd_put_16 (abfd, upper, insn);
6153 bfd_put_16 (abfd, lower, insn + 2);
6154 }
6155
6156 /* Thumb code calling an ARM function. */
6157
6158 static int
6159 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
6160 const char * name,
6161 bfd * input_bfd,
6162 bfd * output_bfd,
6163 asection * input_section,
6164 bfd_byte * hit_data,
6165 asection * sym_sec,
6166 bfd_vma offset,
6167 bfd_signed_vma addend,
6168 bfd_vma val,
6169 char **error_message)
6170 {
6171 asection * s = 0;
6172 bfd_vma my_offset;
6173 long int ret_offset;
6174 struct elf_link_hash_entry * myh;
6175 struct elf32_arm_link_hash_table * globals;
6176
6177 myh = find_thumb_glue (info, name, error_message);
6178 if (myh == NULL)
6179 return FALSE;
6180
6181 globals = elf32_arm_hash_table (info);
6182
6183 BFD_ASSERT (globals != NULL);
6184 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6185
6186 my_offset = myh->root.u.def.value;
6187
6188 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6189 THUMB2ARM_GLUE_SECTION_NAME);
6190
6191 BFD_ASSERT (s != NULL);
6192 BFD_ASSERT (s->contents != NULL);
6193 BFD_ASSERT (s->output_section != NULL);
6194
6195 if ((my_offset & 0x01) == 0x01)
6196 {
6197 if (sym_sec != NULL
6198 && sym_sec->owner != NULL
6199 && !INTERWORK_FLAG (sym_sec->owner))
6200 {
6201 (*_bfd_error_handler)
6202 (_("%B(%s): warning: interworking not enabled.\n"
6203 " first occurrence: %B: thumb call to arm"),
6204 sym_sec->owner, input_bfd, name);
6205
6206 return FALSE;
6207 }
6208
6209 --my_offset;
6210 myh->root.u.def.value = my_offset;
6211
6212 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
6213 s->contents + my_offset);
6214
6215 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
6216 s->contents + my_offset + 2);
6217
6218 ret_offset =
6219 /* Address of destination of the stub. */
6220 ((bfd_signed_vma) val)
6221 - ((bfd_signed_vma)
6222 /* Offset from the start of the current section
6223 to the start of the stubs. */
6224 (s->output_offset
6225 /* Offset of the start of this stub from the start of the stubs. */
6226 + my_offset
6227 /* Address of the start of the current section. */
6228 + s->output_section->vma)
6229 /* The branch instruction is 4 bytes into the stub. */
6230 + 4
6231 /* ARM branches work from the pc of the instruction + 8. */
6232 + 8);
6233
6234 put_arm_insn (globals, output_bfd,
6235 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
6236 s->contents + my_offset + 4);
6237 }
6238
6239 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
6240
6241 /* Now go back and fix up the original BL insn to point to here. */
6242 ret_offset =
6243 /* Address of where the stub is located. */
6244 (s->output_section->vma + s->output_offset + my_offset)
6245 /* Address of where the BL is located. */
6246 - (input_section->output_section->vma + input_section->output_offset
6247 + offset)
6248 /* Addend in the relocation. */
6249 - addend
6250 /* Biassing for PC-relative addressing. */
6251 - 8;
6252
6253 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
6254
6255 return TRUE;
6256 }
6257
6258 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
6259
6260 static struct elf_link_hash_entry *
6261 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
6262 const char * name,
6263 bfd * input_bfd,
6264 bfd * output_bfd,
6265 asection * sym_sec,
6266 bfd_vma val,
6267 asection * s,
6268 char ** error_message)
6269 {
6270 bfd_vma my_offset;
6271 long int ret_offset;
6272 struct elf_link_hash_entry * myh;
6273 struct elf32_arm_link_hash_table * globals;
6274
6275 myh = find_arm_glue (info, name, error_message);
6276 if (myh == NULL)
6277 return NULL;
6278
6279 globals = elf32_arm_hash_table (info);
6280
6281 BFD_ASSERT (globals != NULL);
6282 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6283
6284 my_offset = myh->root.u.def.value;
6285
6286 if ((my_offset & 0x01) == 0x01)
6287 {
6288 if (sym_sec != NULL
6289 && sym_sec->owner != NULL
6290 && !INTERWORK_FLAG (sym_sec->owner))
6291 {
6292 (*_bfd_error_handler)
6293 (_("%B(%s): warning: interworking not enabled.\n"
6294 " first occurrence: %B: arm call to thumb"),
6295 sym_sec->owner, input_bfd, name);
6296 }
6297
6298 --my_offset;
6299 myh->root.u.def.value = my_offset;
6300
6301 if (info->shared || globals->root.is_relocatable_executable
6302 || globals->pic_veneer)
6303 {
6304 /* For relocatable objects we can't use absolute addresses,
6305 so construct the address from a relative offset. */
6306 /* TODO: If the offset is small it's probably worth
6307 constructing the address with adds. */
6308 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
6309 s->contents + my_offset);
6310 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
6311 s->contents + my_offset + 4);
6312 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
6313 s->contents + my_offset + 8);
6314 /* Adjust the offset by 4 for the position of the add,
6315 and 8 for the pipeline offset. */
6316 ret_offset = (val - (s->output_offset
6317 + s->output_section->vma
6318 + my_offset + 12))
6319 | 1;
6320 bfd_put_32 (output_bfd, ret_offset,
6321 s->contents + my_offset + 12);
6322 }
6323 else if (globals->use_blx)
6324 {
6325 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
6326 s->contents + my_offset);
6327
6328 /* It's a thumb address. Add the low order bit. */
6329 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
6330 s->contents + my_offset + 4);
6331 }
6332 else
6333 {
6334 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
6335 s->contents + my_offset);
6336
6337 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
6338 s->contents + my_offset + 4);
6339
6340 /* It's a thumb address. Add the low order bit. */
6341 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
6342 s->contents + my_offset + 8);
6343
6344 my_offset += 12;
6345 }
6346 }
6347
6348 BFD_ASSERT (my_offset <= globals->arm_glue_size);
6349
6350 return myh;
6351 }
6352
6353 /* Arm code calling a Thumb function. */
6354
6355 static int
6356 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
6357 const char * name,
6358 bfd * input_bfd,
6359 bfd * output_bfd,
6360 asection * input_section,
6361 bfd_byte * hit_data,
6362 asection * sym_sec,
6363 bfd_vma offset,
6364 bfd_signed_vma addend,
6365 bfd_vma val,
6366 char **error_message)
6367 {
6368 unsigned long int tmp;
6369 bfd_vma my_offset;
6370 asection * s;
6371 long int ret_offset;
6372 struct elf_link_hash_entry * myh;
6373 struct elf32_arm_link_hash_table * globals;
6374
6375 globals = elf32_arm_hash_table (info);
6376
6377 BFD_ASSERT (globals != NULL);
6378 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6379
6380 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6381 ARM2THUMB_GLUE_SECTION_NAME);
6382 BFD_ASSERT (s != NULL);
6383 BFD_ASSERT (s->contents != NULL);
6384 BFD_ASSERT (s->output_section != NULL);
6385
6386 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
6387 sym_sec, val, s, error_message);
6388 if (!myh)
6389 return FALSE;
6390
6391 my_offset = myh->root.u.def.value;
6392 tmp = bfd_get_32 (input_bfd, hit_data);
6393 tmp = tmp & 0xFF000000;
6394
6395 /* Somehow these are both 4 too far, so subtract 8. */
6396 ret_offset = (s->output_offset
6397 + my_offset
6398 + s->output_section->vma
6399 - (input_section->output_offset
6400 + input_section->output_section->vma
6401 + offset + addend)
6402 - 8);
6403
6404 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
6405
6406 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
6407
6408 return TRUE;
6409 }
6410
6411 /* Populate Arm stub for an exported Thumb function. */
6412
6413 static bfd_boolean
6414 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
6415 {
6416 struct bfd_link_info * info = (struct bfd_link_info *) inf;
6417 asection * s;
6418 struct elf_link_hash_entry * myh;
6419 struct elf32_arm_link_hash_entry *eh;
6420 struct elf32_arm_link_hash_table * globals;
6421 asection *sec;
6422 bfd_vma val;
6423 char *error_message;
6424
6425 eh = elf32_arm_hash_entry (h);
6426 /* Allocate stubs for exported Thumb functions on v4t. */
6427 if (eh->export_glue == NULL)
6428 return TRUE;
6429
6430 globals = elf32_arm_hash_table (info);
6431
6432 BFD_ASSERT (globals != NULL);
6433 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6434
6435 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6436 ARM2THUMB_GLUE_SECTION_NAME);
6437 BFD_ASSERT (s != NULL);
6438 BFD_ASSERT (s->contents != NULL);
6439 BFD_ASSERT (s->output_section != NULL);
6440
6441 sec = eh->export_glue->root.u.def.section;
6442
6443 BFD_ASSERT (sec->output_section != NULL);
6444
6445 val = eh->export_glue->root.u.def.value + sec->output_offset
6446 + sec->output_section->vma;
6447
6448 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
6449 h->root.u.def.section->owner,
6450 globals->obfd, sec, val, s,
6451 &error_message);
6452 BFD_ASSERT (myh);
6453 return TRUE;
6454 }
6455
6456 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
6457
6458 static bfd_vma
6459 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
6460 {
6461 bfd_byte *p;
6462 bfd_vma glue_addr;
6463 asection *s;
6464 struct elf32_arm_link_hash_table *globals;
6465
6466 globals = elf32_arm_hash_table (info);
6467
6468 BFD_ASSERT (globals != NULL);
6469 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6470
6471 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6472 ARM_BX_GLUE_SECTION_NAME);
6473 BFD_ASSERT (s != NULL);
6474 BFD_ASSERT (s->contents != NULL);
6475 BFD_ASSERT (s->output_section != NULL);
6476
6477 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
6478
6479 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
6480
6481 if ((globals->bx_glue_offset[reg] & 1) == 0)
6482 {
6483 p = s->contents + glue_addr;
6484 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
6485 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
6486 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
6487 globals->bx_glue_offset[reg] |= 1;
6488 }
6489
6490 return glue_addr + s->output_section->vma + s->output_offset;
6491 }
6492
6493 /* Generate Arm stubs for exported Thumb symbols. */
6494 static void
6495 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
6496 struct bfd_link_info *link_info)
6497 {
6498 struct elf32_arm_link_hash_table * globals;
6499
6500 if (link_info == NULL)
6501 /* Ignore this if we are not called by the ELF backend linker. */
6502 return;
6503
6504 globals = elf32_arm_hash_table (link_info);
6505 /* If blx is available then exported Thumb symbols are OK and there is
6506 nothing to do. */
6507 if (globals->use_blx)
6508 return;
6509
6510 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
6511 link_info);
6512 }
6513
6514 /* Some relocations map to different relocations depending on the
6515 target. Return the real relocation. */
6516
6517 static int
6518 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
6519 int r_type)
6520 {
6521 switch (r_type)
6522 {
6523 case R_ARM_TARGET1:
6524 if (globals->target1_is_rel)
6525 return R_ARM_REL32;
6526 else
6527 return R_ARM_ABS32;
6528
6529 case R_ARM_TARGET2:
6530 return globals->target2_reloc;
6531
6532 default:
6533 return r_type;
6534 }
6535 }
6536
6537 /* Return the base VMA address which should be subtracted from real addresses
6538 when resolving @dtpoff relocation.
6539 This is PT_TLS segment p_vaddr. */
6540
6541 static bfd_vma
6542 dtpoff_base (struct bfd_link_info *info)
6543 {
6544 /* If tls_sec is NULL, we should have signalled an error already. */
6545 if (elf_hash_table (info)->tls_sec == NULL)
6546 return 0;
6547 return elf_hash_table (info)->tls_sec->vma;
6548 }
6549
6550 /* Return the relocation value for @tpoff relocation
6551 if STT_TLS virtual address is ADDRESS. */
6552
6553 static bfd_vma
6554 tpoff (struct bfd_link_info *info, bfd_vma address)
6555 {
6556 struct elf_link_hash_table *htab = elf_hash_table (info);
6557 bfd_vma base;
6558
6559 /* If tls_sec is NULL, we should have signalled an error already. */
6560 if (htab->tls_sec == NULL)
6561 return 0;
6562 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
6563 return address - htab->tls_sec->vma + base;
6564 }
6565
6566 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
6567 VALUE is the relocation value. */
6568
6569 static bfd_reloc_status_type
6570 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
6571 {
6572 if (value > 0xfff)
6573 return bfd_reloc_overflow;
6574
6575 value |= bfd_get_32 (abfd, data) & 0xfffff000;
6576 bfd_put_32 (abfd, value, data);
6577 return bfd_reloc_ok;
6578 }
6579
6580 /* For a given value of n, calculate the value of G_n as required to
6581 deal with group relocations. We return it in the form of an
6582 encoded constant-and-rotation, together with the final residual. If n is
6583 specified as less than zero, then final_residual is filled with the
6584 input value and no further action is performed. */
6585
6586 static bfd_vma
6587 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
6588 {
6589 int current_n;
6590 bfd_vma g_n;
6591 bfd_vma encoded_g_n = 0;
6592 bfd_vma residual = value; /* Also known as Y_n. */
6593
6594 for (current_n = 0; current_n <= n; current_n++)
6595 {
6596 int shift;
6597
6598 /* Calculate which part of the value to mask. */
6599 if (residual == 0)
6600 shift = 0;
6601 else
6602 {
6603 int msb;
6604
6605 /* Determine the most significant bit in the residual and
6606 align the resulting value to a 2-bit boundary. */
6607 for (msb = 30; msb >= 0; msb -= 2)
6608 if (residual & (3 << msb))
6609 break;
6610
6611 /* The desired shift is now (msb - 6), or zero, whichever
6612 is the greater. */
6613 shift = msb - 6;
6614 if (shift < 0)
6615 shift = 0;
6616 }
6617
6618 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
6619 g_n = residual & (0xff << shift);
6620 encoded_g_n = (g_n >> shift)
6621 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
6622
6623 /* Calculate the residual for the next time around. */
6624 residual &= ~g_n;
6625 }
6626
6627 *final_residual = residual;
6628
6629 return encoded_g_n;
6630 }
6631
6632 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
6633 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
6634
6635 static int
6636 identify_add_or_sub (bfd_vma insn)
6637 {
6638 int opcode = insn & 0x1e00000;
6639
6640 if (opcode == 1 << 23) /* ADD */
6641 return 1;
6642
6643 if (opcode == 1 << 22) /* SUB */
6644 return -1;
6645
6646 return 0;
6647 }
6648
6649 /* Perform a relocation as part of a final link. */
6650
6651 static bfd_reloc_status_type
6652 elf32_arm_final_link_relocate (reloc_howto_type * howto,
6653 bfd * input_bfd,
6654 bfd * output_bfd,
6655 asection * input_section,
6656 bfd_byte * contents,
6657 Elf_Internal_Rela * rel,
6658 bfd_vma value,
6659 struct bfd_link_info * info,
6660 asection * sym_sec,
6661 const char * sym_name,
6662 int sym_flags,
6663 struct elf_link_hash_entry * h,
6664 bfd_boolean * unresolved_reloc_p,
6665 char ** error_message)
6666 {
6667 unsigned long r_type = howto->type;
6668 unsigned long r_symndx;
6669 bfd_byte * hit_data = contents + rel->r_offset;
6670 bfd * dynobj = NULL;
6671 Elf_Internal_Shdr * symtab_hdr;
6672 struct elf_link_hash_entry ** sym_hashes;
6673 bfd_vma * local_got_offsets;
6674 asection * sgot = NULL;
6675 asection * splt = NULL;
6676 asection * sreloc = NULL;
6677 bfd_vma addend;
6678 bfd_signed_vma signed_addend;
6679 struct elf32_arm_link_hash_table * globals;
6680
6681 globals = elf32_arm_hash_table (info);
6682
6683 BFD_ASSERT (is_arm_elf (input_bfd));
6684
6685 /* Some relocation types map to different relocations depending on the
6686 target. We pick the right one here. */
6687 r_type = arm_real_reloc_type (globals, r_type);
6688 if (r_type != howto->type)
6689 howto = elf32_arm_howto_from_type (r_type);
6690
6691 /* If the start address has been set, then set the EF_ARM_HASENTRY
6692 flag. Setting this more than once is redundant, but the cost is
6693 not too high, and it keeps the code simple.
6694
6695 The test is done here, rather than somewhere else, because the
6696 start address is only set just before the final link commences.
6697
6698 Note - if the user deliberately sets a start address of 0, the
6699 flag will not be set. */
6700 if (bfd_get_start_address (output_bfd) != 0)
6701 elf_elfheader (output_bfd)->e_flags |= EF_ARM_HASENTRY;
6702
6703 dynobj = elf_hash_table (info)->dynobj;
6704 if (dynobj)
6705 {
6706 sgot = bfd_get_section_by_name (dynobj, ".got");
6707 splt = bfd_get_section_by_name (dynobj, ".plt");
6708 }
6709 symtab_hdr = & elf_symtab_hdr (input_bfd);
6710 sym_hashes = elf_sym_hashes (input_bfd);
6711 local_got_offsets = elf_local_got_offsets (input_bfd);
6712 r_symndx = ELF32_R_SYM (rel->r_info);
6713
6714 if (globals->use_rel)
6715 {
6716 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
6717
6718 if (addend & ((howto->src_mask + 1) >> 1))
6719 {
6720 signed_addend = -1;
6721 signed_addend &= ~ howto->src_mask;
6722 signed_addend |= addend;
6723 }
6724 else
6725 signed_addend = addend;
6726 }
6727 else
6728 addend = signed_addend = rel->r_addend;
6729
6730 switch (r_type)
6731 {
6732 case R_ARM_NONE:
6733 /* We don't need to find a value for this symbol. It's just a
6734 marker. */
6735 *unresolved_reloc_p = FALSE;
6736 return bfd_reloc_ok;
6737
6738 case R_ARM_ABS12:
6739 if (!globals->vxworks_p)
6740 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
6741
6742 case R_ARM_PC24:
6743 case R_ARM_ABS32:
6744 case R_ARM_ABS32_NOI:
6745 case R_ARM_REL32:
6746 case R_ARM_REL32_NOI:
6747 case R_ARM_CALL:
6748 case R_ARM_JUMP24:
6749 case R_ARM_XPC25:
6750 case R_ARM_PREL31:
6751 case R_ARM_PLT32:
6752 /* Handle relocations which should use the PLT entry. ABS32/REL32
6753 will use the symbol's value, which may point to a PLT entry, but we
6754 don't need to handle that here. If we created a PLT entry, all
6755 branches in this object should go to it, except if the PLT is too
6756 far away, in which case a long branch stub should be inserted. */
6757 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
6758 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
6759 && r_type != R_ARM_CALL
6760 && r_type != R_ARM_JUMP24
6761 && r_type != R_ARM_PLT32)
6762 && h != NULL
6763 && splt != NULL
6764 && h->plt.offset != (bfd_vma) -1)
6765 {
6766 /* If we've created a .plt section, and assigned a PLT entry to
6767 this function, it should not be known to bind locally. If
6768 it were, we would have cleared the PLT entry. */
6769 BFD_ASSERT (!SYMBOL_CALLS_LOCAL (info, h));
6770
6771 value = (splt->output_section->vma
6772 + splt->output_offset
6773 + h->plt.offset);
6774 *unresolved_reloc_p = FALSE;
6775 return _bfd_final_link_relocate (howto, input_bfd, input_section,
6776 contents, rel->r_offset, value,
6777 rel->r_addend);
6778 }
6779
6780 /* When generating a shared object or relocatable executable, these
6781 relocations are copied into the output file to be resolved at
6782 run time. */
6783 if ((info->shared || globals->root.is_relocatable_executable)
6784 && (input_section->flags & SEC_ALLOC)
6785 && !(elf32_arm_hash_table (info)->vxworks_p
6786 && strcmp (input_section->output_section->name,
6787 ".tls_vars") == 0)
6788 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
6789 || !SYMBOL_CALLS_LOCAL (info, h))
6790 && (h == NULL
6791 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6792 || h->root.type != bfd_link_hash_undefweak)
6793 && r_type != R_ARM_PC24
6794 && r_type != R_ARM_CALL
6795 && r_type != R_ARM_JUMP24
6796 && r_type != R_ARM_PREL31
6797 && r_type != R_ARM_PLT32)
6798 {
6799 Elf_Internal_Rela outrel;
6800 bfd_byte *loc;
6801 bfd_boolean skip, relocate;
6802
6803 *unresolved_reloc_p = FALSE;
6804
6805 if (sreloc == NULL)
6806 {
6807 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
6808 ! globals->use_rel);
6809
6810 if (sreloc == NULL)
6811 return bfd_reloc_notsupported;
6812 }
6813
6814 skip = FALSE;
6815 relocate = FALSE;
6816
6817 outrel.r_addend = addend;
6818 outrel.r_offset =
6819 _bfd_elf_section_offset (output_bfd, info, input_section,
6820 rel->r_offset);
6821 if (outrel.r_offset == (bfd_vma) -1)
6822 skip = TRUE;
6823 else if (outrel.r_offset == (bfd_vma) -2)
6824 skip = TRUE, relocate = TRUE;
6825 outrel.r_offset += (input_section->output_section->vma
6826 + input_section->output_offset);
6827
6828 if (skip)
6829 memset (&outrel, 0, sizeof outrel);
6830 else if (h != NULL
6831 && h->dynindx != -1
6832 && (!info->shared
6833 || !info->symbolic
6834 || !h->def_regular))
6835 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
6836 else
6837 {
6838 int symbol;
6839
6840 /* This symbol is local, or marked to become local. */
6841 if (sym_flags == STT_ARM_TFUNC)
6842 value |= 1;
6843 if (globals->symbian_p)
6844 {
6845 asection *osec;
6846
6847 /* On Symbian OS, the data segment and text segement
6848 can be relocated independently. Therefore, we
6849 must indicate the segment to which this
6850 relocation is relative. The BPABI allows us to
6851 use any symbol in the right segment; we just use
6852 the section symbol as it is convenient. (We
6853 cannot use the symbol given by "h" directly as it
6854 will not appear in the dynamic symbol table.)
6855
6856 Note that the dynamic linker ignores the section
6857 symbol value, so we don't subtract osec->vma
6858 from the emitted reloc addend. */
6859 if (sym_sec)
6860 osec = sym_sec->output_section;
6861 else
6862 osec = input_section->output_section;
6863 symbol = elf_section_data (osec)->dynindx;
6864 if (symbol == 0)
6865 {
6866 struct elf_link_hash_table *htab = elf_hash_table (info);
6867
6868 if ((osec->flags & SEC_READONLY) == 0
6869 && htab->data_index_section != NULL)
6870 osec = htab->data_index_section;
6871 else
6872 osec = htab->text_index_section;
6873 symbol = elf_section_data (osec)->dynindx;
6874 }
6875 BFD_ASSERT (symbol != 0);
6876 }
6877 else
6878 /* On SVR4-ish systems, the dynamic loader cannot
6879 relocate the text and data segments independently,
6880 so the symbol does not matter. */
6881 symbol = 0;
6882 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
6883 if (globals->use_rel)
6884 relocate = TRUE;
6885 else
6886 outrel.r_addend += value;
6887 }
6888
6889 loc = sreloc->contents;
6890 loc += sreloc->reloc_count++ * RELOC_SIZE (globals);
6891 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
6892
6893 /* If this reloc is against an external symbol, we do not want to
6894 fiddle with the addend. Otherwise, we need to include the symbol
6895 value so that it becomes an addend for the dynamic reloc. */
6896 if (! relocate)
6897 return bfd_reloc_ok;
6898
6899 return _bfd_final_link_relocate (howto, input_bfd, input_section,
6900 contents, rel->r_offset, value,
6901 (bfd_vma) 0);
6902 }
6903 else switch (r_type)
6904 {
6905 case R_ARM_ABS12:
6906 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
6907
6908 case R_ARM_XPC25: /* Arm BLX instruction. */
6909 case R_ARM_CALL:
6910 case R_ARM_JUMP24:
6911 case R_ARM_PC24: /* Arm B/BL instruction. */
6912 case R_ARM_PLT32:
6913 {
6914 bfd_vma from;
6915 bfd_signed_vma branch_offset;
6916 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
6917
6918 if (r_type == R_ARM_XPC25)
6919 {
6920 /* Check for Arm calling Arm function. */
6921 /* FIXME: Should we translate the instruction into a BL
6922 instruction instead ? */
6923 if (sym_flags != STT_ARM_TFUNC)
6924 (*_bfd_error_handler)
6925 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
6926 input_bfd,
6927 h ? h->root.root.string : "(local)");
6928 }
6929 else if (r_type == R_ARM_PC24)
6930 {
6931 /* Check for Arm calling Thumb function. */
6932 if (sym_flags == STT_ARM_TFUNC)
6933 {
6934 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
6935 output_bfd, input_section,
6936 hit_data, sym_sec, rel->r_offset,
6937 signed_addend, value,
6938 error_message))
6939 return bfd_reloc_ok;
6940 else
6941 return bfd_reloc_dangerous;
6942 }
6943 }
6944
6945 /* Check if a stub has to be inserted because the
6946 destination is too far or we are changing mode. */
6947 if ( r_type == R_ARM_CALL
6948 || r_type == R_ARM_JUMP24
6949 || r_type == R_ARM_PLT32)
6950 {
6951 /* If the call goes through a PLT entry, make sure to
6952 check distance to the right destination address. */
6953 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
6954 {
6955 value = (splt->output_section->vma
6956 + splt->output_offset
6957 + h->plt.offset);
6958 *unresolved_reloc_p = FALSE;
6959 }
6960
6961 from = (input_section->output_section->vma
6962 + input_section->output_offset
6963 + rel->r_offset);
6964 branch_offset = (bfd_signed_vma)(value - from);
6965
6966 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
6967 || branch_offset < ARM_MAX_BWD_BRANCH_OFFSET
6968 || ((sym_flags == STT_ARM_TFUNC)
6969 && (((r_type == R_ARM_CALL) && !globals->use_blx)
6970 || (r_type == R_ARM_JUMP24)
6971 || (r_type == R_ARM_PLT32) ))
6972 )
6973 {
6974 /* The target is out of reach, so redirect the
6975 branch to the local stub for this function. */
6976
6977 stub_entry = elf32_arm_get_stub_entry (input_section,
6978 sym_sec, h,
6979 rel, globals);
6980 if (stub_entry != NULL)
6981 value = (stub_entry->stub_offset
6982 + stub_entry->stub_sec->output_offset
6983 + stub_entry->stub_sec->output_section->vma);
6984 }
6985 }
6986
6987 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
6988 where:
6989 S is the address of the symbol in the relocation.
6990 P is address of the instruction being relocated.
6991 A is the addend (extracted from the instruction) in bytes.
6992
6993 S is held in 'value'.
6994 P is the base address of the section containing the
6995 instruction plus the offset of the reloc into that
6996 section, ie:
6997 (input_section->output_section->vma +
6998 input_section->output_offset +
6999 rel->r_offset).
7000 A is the addend, converted into bytes, ie:
7001 (signed_addend * 4)
7002
7003 Note: None of these operations have knowledge of the pipeline
7004 size of the processor, thus it is up to the assembler to
7005 encode this information into the addend. */
7006 value -= (input_section->output_section->vma
7007 + input_section->output_offset);
7008 value -= rel->r_offset;
7009 if (globals->use_rel)
7010 value += (signed_addend << howto->size);
7011 else
7012 /* RELA addends do not have to be adjusted by howto->size. */
7013 value += signed_addend;
7014
7015 signed_addend = value;
7016 signed_addend >>= howto->rightshift;
7017
7018 /* A branch to an undefined weak symbol is turned into a jump to
7019 the next instruction unless a PLT entry will be created. */
7020 if (h && h->root.type == bfd_link_hash_undefweak
7021 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7022 {
7023 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000)
7024 | 0x0affffff;
7025 }
7026 else
7027 {
7028 /* Perform a signed range check. */
7029 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
7030 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
7031 return bfd_reloc_overflow;
7032
7033 addend = (value & 2);
7034
7035 value = (signed_addend & howto->dst_mask)
7036 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
7037
7038 if (r_type == R_ARM_CALL)
7039 {
7040 /* Set the H bit in the BLX instruction. */
7041 if (sym_flags == STT_ARM_TFUNC)
7042 {
7043 if (addend)
7044 value |= (1 << 24);
7045 else
7046 value &= ~(bfd_vma)(1 << 24);
7047 }
7048
7049 /* Select the correct instruction (BL or BLX). */
7050 /* Only if we are not handling a BL to a stub. In this
7051 case, mode switching is performed by the stub. */
7052 if (sym_flags == STT_ARM_TFUNC && !stub_entry)
7053 value |= (1 << 28);
7054 else
7055 {
7056 value &= ~(bfd_vma)(1 << 28);
7057 value |= (1 << 24);
7058 }
7059 }
7060 }
7061 }
7062 break;
7063
7064 case R_ARM_ABS32:
7065 value += addend;
7066 if (sym_flags == STT_ARM_TFUNC)
7067 value |= 1;
7068 break;
7069
7070 case R_ARM_ABS32_NOI:
7071 value += addend;
7072 break;
7073
7074 case R_ARM_REL32:
7075 value += addend;
7076 if (sym_flags == STT_ARM_TFUNC)
7077 value |= 1;
7078 value -= (input_section->output_section->vma
7079 + input_section->output_offset + rel->r_offset);
7080 break;
7081
7082 case R_ARM_REL32_NOI:
7083 value += addend;
7084 value -= (input_section->output_section->vma
7085 + input_section->output_offset + rel->r_offset);
7086 break;
7087
7088 case R_ARM_PREL31:
7089 value -= (input_section->output_section->vma
7090 + input_section->output_offset + rel->r_offset);
7091 value += signed_addend;
7092 if (! h || h->root.type != bfd_link_hash_undefweak)
7093 {
7094 /* Check for overflow. */
7095 if ((value ^ (value >> 1)) & (1 << 30))
7096 return bfd_reloc_overflow;
7097 }
7098 value &= 0x7fffffff;
7099 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
7100 if (sym_flags == STT_ARM_TFUNC)
7101 value |= 1;
7102 break;
7103 }
7104
7105 bfd_put_32 (input_bfd, value, hit_data);
7106 return bfd_reloc_ok;
7107
7108 case R_ARM_ABS8:
7109 value += addend;
7110 if ((long) value > 0x7f || (long) value < -0x80)
7111 return bfd_reloc_overflow;
7112
7113 bfd_put_8 (input_bfd, value, hit_data);
7114 return bfd_reloc_ok;
7115
7116 case R_ARM_ABS16:
7117 value += addend;
7118
7119 if ((long) value > 0x7fff || (long) value < -0x8000)
7120 return bfd_reloc_overflow;
7121
7122 bfd_put_16 (input_bfd, value, hit_data);
7123 return bfd_reloc_ok;
7124
7125 case R_ARM_THM_ABS5:
7126 /* Support ldr and str instructions for the thumb. */
7127 if (globals->use_rel)
7128 {
7129 /* Need to refetch addend. */
7130 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7131 /* ??? Need to determine shift amount from operand size. */
7132 addend >>= howto->rightshift;
7133 }
7134 value += addend;
7135
7136 /* ??? Isn't value unsigned? */
7137 if ((long) value > 0x1f || (long) value < -0x10)
7138 return bfd_reloc_overflow;
7139
7140 /* ??? Value needs to be properly shifted into place first. */
7141 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
7142 bfd_put_16 (input_bfd, value, hit_data);
7143 return bfd_reloc_ok;
7144
7145 case R_ARM_THM_ALU_PREL_11_0:
7146 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
7147 {
7148 bfd_vma insn;
7149 bfd_signed_vma relocation;
7150
7151 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7152 | bfd_get_16 (input_bfd, hit_data + 2);
7153
7154 if (globals->use_rel)
7155 {
7156 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
7157 | ((insn & (1 << 26)) >> 15);
7158 if (insn & 0xf00000)
7159 signed_addend = -signed_addend;
7160 }
7161
7162 relocation = value + signed_addend;
7163 relocation -= (input_section->output_section->vma
7164 + input_section->output_offset
7165 + rel->r_offset);
7166
7167 value = abs (relocation);
7168
7169 if (value >= 0x1000)
7170 return bfd_reloc_overflow;
7171
7172 insn = (insn & 0xfb0f8f00) | (value & 0xff)
7173 | ((value & 0x700) << 4)
7174 | ((value & 0x800) << 15);
7175 if (relocation < 0)
7176 insn |= 0xa00000;
7177
7178 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7179 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7180
7181 return bfd_reloc_ok;
7182 }
7183
7184 case R_ARM_THM_PC8:
7185 /* PR 10073: This reloc is not generated by the GNU toolchain,
7186 but it is supported for compatibility with third party libraries
7187 generated by other compilers, specifically the ARM/IAR. */
7188 {
7189 bfd_vma insn;
7190 bfd_signed_vma relocation;
7191
7192 insn = bfd_get_16 (input_bfd, hit_data);
7193
7194 if (globals->use_rel)
7195 addend = (insn & 0x00ff) << 2;
7196
7197 relocation = value + addend;
7198 relocation -= (input_section->output_section->vma
7199 + input_section->output_offset
7200 + rel->r_offset);
7201
7202 value = abs (relocation);
7203
7204 /* We do not check for overflow of this reloc. Although strictly
7205 speaking this is incorrect, it appears to be necessary in order
7206 to work with IAR generated relocs. Since GCC and GAS do not
7207 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
7208 a problem for them. */
7209 value &= 0x3fc;
7210
7211 insn = (insn & 0xff00) | (value >> 2);
7212
7213 bfd_put_16 (input_bfd, insn, hit_data);
7214
7215 return bfd_reloc_ok;
7216 }
7217
7218 case R_ARM_THM_PC12:
7219 /* Corresponds to: ldr.w reg, [pc, #offset]. */
7220 {
7221 bfd_vma insn;
7222 bfd_signed_vma relocation;
7223
7224 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7225 | bfd_get_16 (input_bfd, hit_data + 2);
7226
7227 if (globals->use_rel)
7228 {
7229 signed_addend = insn & 0xfff;
7230 if (!(insn & (1 << 23)))
7231 signed_addend = -signed_addend;
7232 }
7233
7234 relocation = value + signed_addend;
7235 relocation -= (input_section->output_section->vma
7236 + input_section->output_offset
7237 + rel->r_offset);
7238
7239 value = abs (relocation);
7240
7241 if (value >= 0x1000)
7242 return bfd_reloc_overflow;
7243
7244 insn = (insn & 0xff7ff000) | value;
7245 if (relocation >= 0)
7246 insn |= (1 << 23);
7247
7248 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7249 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7250
7251 return bfd_reloc_ok;
7252 }
7253
7254 case R_ARM_THM_XPC22:
7255 case R_ARM_THM_CALL:
7256 case R_ARM_THM_JUMP24:
7257 /* Thumb BL (branch long instruction). */
7258 {
7259 bfd_vma relocation;
7260 bfd_vma reloc_sign;
7261 bfd_boolean overflow = FALSE;
7262 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7263 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7264 bfd_signed_vma reloc_signed_max;
7265 bfd_signed_vma reloc_signed_min;
7266 bfd_vma check;
7267 bfd_signed_vma signed_check;
7268 int bitsize;
7269 int thumb2 = using_thumb2 (globals);
7270
7271 /* A branch to an undefined weak symbol is turned into a jump to
7272 the next instruction unless a PLT entry will be created. */
7273 if (h && h->root.type == bfd_link_hash_undefweak
7274 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7275 {
7276 bfd_put_16 (input_bfd, 0xe000, hit_data);
7277 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
7278 return bfd_reloc_ok;
7279 }
7280
7281 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
7282 with Thumb-1) involving the J1 and J2 bits. */
7283 if (globals->use_rel)
7284 {
7285 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
7286 bfd_vma upper = upper_insn & 0x3ff;
7287 bfd_vma lower = lower_insn & 0x7ff;
7288 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
7289 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
7290 bfd_vma i1 = j1 ^ s ? 0 : 1;
7291 bfd_vma i2 = j2 ^ s ? 0 : 1;
7292
7293 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
7294 /* Sign extend. */
7295 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
7296
7297 signed_addend = addend;
7298 }
7299
7300 if (r_type == R_ARM_THM_XPC22)
7301 {
7302 /* Check for Thumb to Thumb call. */
7303 /* FIXME: Should we translate the instruction into a BL
7304 instruction instead ? */
7305 if (sym_flags == STT_ARM_TFUNC)
7306 (*_bfd_error_handler)
7307 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
7308 input_bfd,
7309 h ? h->root.root.string : "(local)");
7310 }
7311 else
7312 {
7313 /* If it is not a call to Thumb, assume call to Arm.
7314 If it is a call relative to a section name, then it is not a
7315 function call at all, but rather a long jump. Calls through
7316 the PLT do not require stubs. */
7317 if (sym_flags != STT_ARM_TFUNC && sym_flags != STT_SECTION
7318 && (h == NULL || splt == NULL
7319 || h->plt.offset == (bfd_vma) -1))
7320 {
7321 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7322 {
7323 /* Convert BL to BLX. */
7324 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7325 }
7326 else if (( r_type != R_ARM_THM_CALL)
7327 && (r_type != R_ARM_THM_JUMP24))
7328 {
7329 if (elf32_thumb_to_arm_stub
7330 (info, sym_name, input_bfd, output_bfd, input_section,
7331 hit_data, sym_sec, rel->r_offset, signed_addend, value,
7332 error_message))
7333 return bfd_reloc_ok;
7334 else
7335 return bfd_reloc_dangerous;
7336 }
7337 }
7338 else if (sym_flags == STT_ARM_TFUNC && globals->use_blx
7339 && r_type == R_ARM_THM_CALL)
7340 {
7341 /* Make sure this is a BL. */
7342 lower_insn |= 0x1800;
7343 }
7344 }
7345
7346 /* Handle calls via the PLT. */
7347 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7348 {
7349 value = (splt->output_section->vma
7350 + splt->output_offset
7351 + h->plt.offset);
7352 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7353 {
7354 /* If the Thumb BLX instruction is available, convert the
7355 BL to a BLX instruction to call the ARM-mode PLT entry. */
7356 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7357 }
7358 else
7359 /* Target the Thumb stub before the ARM PLT entry. */
7360 value -= PLT_THUMB_STUB_SIZE;
7361 *unresolved_reloc_p = FALSE;
7362 }
7363
7364 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
7365 {
7366 /* Check if a stub has to be inserted because the destination
7367 is too far. */
7368 bfd_vma from;
7369 bfd_signed_vma branch_offset;
7370 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
7371
7372 from = (input_section->output_section->vma
7373 + input_section->output_offset
7374 + rel->r_offset);
7375 branch_offset = (bfd_signed_vma)(value - from);
7376
7377 if ((!thumb2
7378 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
7379 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
7380 ||
7381 (thumb2
7382 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
7383 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
7384 || ((sym_flags != STT_ARM_TFUNC)
7385 && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
7386 || r_type == R_ARM_THM_JUMP24)))
7387 {
7388 /* The target is out of reach or we are changing modes, so
7389 redirect the branch to the local stub for this
7390 function. */
7391 stub_entry = elf32_arm_get_stub_entry (input_section,
7392 sym_sec, h,
7393 rel, globals);
7394 if (stub_entry != NULL)
7395 value = (stub_entry->stub_offset
7396 + stub_entry->stub_sec->output_offset
7397 + stub_entry->stub_sec->output_section->vma);
7398
7399 /* If this call becomes a call to Arm, force BLX. */
7400 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
7401 {
7402 if ((stub_entry
7403 && !arm_stub_is_thumb (stub_entry->stub_type))
7404 || (sym_flags != STT_ARM_TFUNC))
7405 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7406 }
7407 }
7408 }
7409
7410 relocation = value + signed_addend;
7411
7412 relocation -= (input_section->output_section->vma
7413 + input_section->output_offset
7414 + rel->r_offset);
7415
7416 check = relocation >> howto->rightshift;
7417
7418 /* If this is a signed value, the rightshift just dropped
7419 leading 1 bits (assuming twos complement). */
7420 if ((bfd_signed_vma) relocation >= 0)
7421 signed_check = check;
7422 else
7423 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
7424
7425 /* Calculate the permissable maximum and minimum values for
7426 this relocation according to whether we're relocating for
7427 Thumb-2 or not. */
7428 bitsize = howto->bitsize;
7429 if (!thumb2)
7430 bitsize -= 2;
7431 reloc_signed_max = ((1 << (bitsize - 1)) - 1) >> howto->rightshift;
7432 reloc_signed_min = ~reloc_signed_max;
7433
7434 /* Assumes two's complement. */
7435 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7436 overflow = TRUE;
7437
7438 if ((lower_insn & 0x5000) == 0x4000)
7439 /* For a BLX instruction, make sure that the relocation is rounded up
7440 to a word boundary. This follows the semantics of the instruction
7441 which specifies that bit 1 of the target address will come from bit
7442 1 of the base address. */
7443 relocation = (relocation + 2) & ~ 3;
7444
7445 /* Put RELOCATION back into the insn. Assumes two's complement.
7446 We use the Thumb-2 encoding, which is safe even if dealing with
7447 a Thumb-1 instruction by virtue of our overflow check above. */
7448 reloc_sign = (signed_check < 0) ? 1 : 0;
7449 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
7450 | ((relocation >> 12) & 0x3ff)
7451 | (reloc_sign << 10);
7452 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
7453 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
7454 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
7455 | ((relocation >> 1) & 0x7ff);
7456
7457 /* Put the relocated value back in the object file: */
7458 bfd_put_16 (input_bfd, upper_insn, hit_data);
7459 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7460
7461 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7462 }
7463 break;
7464
7465 case R_ARM_THM_JUMP19:
7466 /* Thumb32 conditional branch instruction. */
7467 {
7468 bfd_vma relocation;
7469 bfd_boolean overflow = FALSE;
7470 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7471 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7472 bfd_signed_vma reloc_signed_max = 0xffffe;
7473 bfd_signed_vma reloc_signed_min = -0x100000;
7474 bfd_signed_vma signed_check;
7475
7476 /* Need to refetch the addend, reconstruct the top three bits,
7477 and squish the two 11 bit pieces together. */
7478 if (globals->use_rel)
7479 {
7480 bfd_vma S = (upper_insn & 0x0400) >> 10;
7481 bfd_vma upper = (upper_insn & 0x003f);
7482 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
7483 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
7484 bfd_vma lower = (lower_insn & 0x07ff);
7485
7486 upper |= J1 << 6;
7487 upper |= J2 << 7;
7488 upper |= (!S) << 8;
7489 upper -= 0x0100; /* Sign extend. */
7490
7491 addend = (upper << 12) | (lower << 1);
7492 signed_addend = addend;
7493 }
7494
7495 /* Handle calls via the PLT. */
7496 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7497 {
7498 value = (splt->output_section->vma
7499 + splt->output_offset
7500 + h->plt.offset);
7501 /* Target the Thumb stub before the ARM PLT entry. */
7502 value -= PLT_THUMB_STUB_SIZE;
7503 *unresolved_reloc_p = FALSE;
7504 }
7505
7506 /* ??? Should handle interworking? GCC might someday try to
7507 use this for tail calls. */
7508
7509 relocation = value + signed_addend;
7510 relocation -= (input_section->output_section->vma
7511 + input_section->output_offset
7512 + rel->r_offset);
7513 signed_check = (bfd_signed_vma) relocation;
7514
7515 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7516 overflow = TRUE;
7517
7518 /* Put RELOCATION back into the insn. */
7519 {
7520 bfd_vma S = (relocation & 0x00100000) >> 20;
7521 bfd_vma J2 = (relocation & 0x00080000) >> 19;
7522 bfd_vma J1 = (relocation & 0x00040000) >> 18;
7523 bfd_vma hi = (relocation & 0x0003f000) >> 12;
7524 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
7525
7526 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
7527 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
7528 }
7529
7530 /* Put the relocated value back in the object file: */
7531 bfd_put_16 (input_bfd, upper_insn, hit_data);
7532 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7533
7534 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7535 }
7536
7537 case R_ARM_THM_JUMP11:
7538 case R_ARM_THM_JUMP8:
7539 case R_ARM_THM_JUMP6:
7540 /* Thumb B (branch) instruction). */
7541 {
7542 bfd_signed_vma relocation;
7543 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
7544 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
7545 bfd_signed_vma signed_check;
7546
7547 /* CZB cannot jump backward. */
7548 if (r_type == R_ARM_THM_JUMP6)
7549 reloc_signed_min = 0;
7550
7551 if (globals->use_rel)
7552 {
7553 /* Need to refetch addend. */
7554 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7555 if (addend & ((howto->src_mask + 1) >> 1))
7556 {
7557 signed_addend = -1;
7558 signed_addend &= ~ howto->src_mask;
7559 signed_addend |= addend;
7560 }
7561 else
7562 signed_addend = addend;
7563 /* The value in the insn has been right shifted. We need to
7564 undo this, so that we can perform the address calculation
7565 in terms of bytes. */
7566 signed_addend <<= howto->rightshift;
7567 }
7568 relocation = value + signed_addend;
7569
7570 relocation -= (input_section->output_section->vma
7571 + input_section->output_offset
7572 + rel->r_offset);
7573
7574 relocation >>= howto->rightshift;
7575 signed_check = relocation;
7576
7577 if (r_type == R_ARM_THM_JUMP6)
7578 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
7579 else
7580 relocation &= howto->dst_mask;
7581 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
7582
7583 bfd_put_16 (input_bfd, relocation, hit_data);
7584
7585 /* Assumes two's complement. */
7586 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7587 return bfd_reloc_overflow;
7588
7589 return bfd_reloc_ok;
7590 }
7591
7592 case R_ARM_ALU_PCREL7_0:
7593 case R_ARM_ALU_PCREL15_8:
7594 case R_ARM_ALU_PCREL23_15:
7595 {
7596 bfd_vma insn;
7597 bfd_vma relocation;
7598
7599 insn = bfd_get_32 (input_bfd, hit_data);
7600 if (globals->use_rel)
7601 {
7602 /* Extract the addend. */
7603 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
7604 signed_addend = addend;
7605 }
7606 relocation = value + signed_addend;
7607
7608 relocation -= (input_section->output_section->vma
7609 + input_section->output_offset
7610 + rel->r_offset);
7611 insn = (insn & ~0xfff)
7612 | ((howto->bitpos << 7) & 0xf00)
7613 | ((relocation >> howto->bitpos) & 0xff);
7614 bfd_put_32 (input_bfd, value, hit_data);
7615 }
7616 return bfd_reloc_ok;
7617
7618 case R_ARM_GNU_VTINHERIT:
7619 case R_ARM_GNU_VTENTRY:
7620 return bfd_reloc_ok;
7621
7622 case R_ARM_GOTOFF32:
7623 /* Relocation is relative to the start of the
7624 global offset table. */
7625
7626 BFD_ASSERT (sgot != NULL);
7627 if (sgot == NULL)
7628 return bfd_reloc_notsupported;
7629
7630 /* If we are addressing a Thumb function, we need to adjust the
7631 address by one, so that attempts to call the function pointer will
7632 correctly interpret it as Thumb code. */
7633 if (sym_flags == STT_ARM_TFUNC)
7634 value += 1;
7635
7636 /* Note that sgot->output_offset is not involved in this
7637 calculation. We always want the start of .got. If we
7638 define _GLOBAL_OFFSET_TABLE in a different way, as is
7639 permitted by the ABI, we might have to change this
7640 calculation. */
7641 value -= sgot->output_section->vma;
7642 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7643 contents, rel->r_offset, value,
7644 rel->r_addend);
7645
7646 case R_ARM_GOTPC:
7647 /* Use global offset table as symbol value. */
7648 BFD_ASSERT (sgot != NULL);
7649
7650 if (sgot == NULL)
7651 return bfd_reloc_notsupported;
7652
7653 *unresolved_reloc_p = FALSE;
7654 value = sgot->output_section->vma;
7655 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7656 contents, rel->r_offset, value,
7657 rel->r_addend);
7658
7659 case R_ARM_GOT32:
7660 case R_ARM_GOT_PREL:
7661 /* Relocation is to the entry for this symbol in the
7662 global offset table. */
7663 if (sgot == NULL)
7664 return bfd_reloc_notsupported;
7665
7666 if (h != NULL)
7667 {
7668 bfd_vma off;
7669 bfd_boolean dyn;
7670
7671 off = h->got.offset;
7672 BFD_ASSERT (off != (bfd_vma) -1);
7673 dyn = globals->root.dynamic_sections_created;
7674
7675 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
7676 || (info->shared
7677 && SYMBOL_REFERENCES_LOCAL (info, h))
7678 || (ELF_ST_VISIBILITY (h->other)
7679 && h->root.type == bfd_link_hash_undefweak))
7680 {
7681 /* This is actually a static link, or it is a -Bsymbolic link
7682 and the symbol is defined locally. We must initialize this
7683 entry in the global offset table. Since the offset must
7684 always be a multiple of 4, we use the least significant bit
7685 to record whether we have initialized it already.
7686
7687 When doing a dynamic link, we create a .rel(a).got relocation
7688 entry to initialize the value. This is done in the
7689 finish_dynamic_symbol routine. */
7690 if ((off & 1) != 0)
7691 off &= ~1;
7692 else
7693 {
7694 /* If we are addressing a Thumb function, we need to
7695 adjust the address by one, so that attempts to
7696 call the function pointer will correctly
7697 interpret it as Thumb code. */
7698 if (sym_flags == STT_ARM_TFUNC)
7699 value |= 1;
7700
7701 bfd_put_32 (output_bfd, value, sgot->contents + off);
7702 h->got.offset |= 1;
7703 }
7704 }
7705 else
7706 *unresolved_reloc_p = FALSE;
7707
7708 value = sgot->output_offset + off;
7709 }
7710 else
7711 {
7712 bfd_vma off;
7713
7714 BFD_ASSERT (local_got_offsets != NULL &&
7715 local_got_offsets[r_symndx] != (bfd_vma) -1);
7716
7717 off = local_got_offsets[r_symndx];
7718
7719 /* The offset must always be a multiple of 4. We use the
7720 least significant bit to record whether we have already
7721 generated the necessary reloc. */
7722 if ((off & 1) != 0)
7723 off &= ~1;
7724 else
7725 {
7726 /* If we are addressing a Thumb function, we need to
7727 adjust the address by one, so that attempts to
7728 call the function pointer will correctly
7729 interpret it as Thumb code. */
7730 if (sym_flags == STT_ARM_TFUNC)
7731 value |= 1;
7732
7733 if (globals->use_rel)
7734 bfd_put_32 (output_bfd, value, sgot->contents + off);
7735
7736 if (info->shared)
7737 {
7738 asection * srelgot;
7739 Elf_Internal_Rela outrel;
7740 bfd_byte *loc;
7741
7742 srelgot = (bfd_get_section_by_name
7743 (dynobj, RELOC_SECTION (globals, ".got")));
7744 BFD_ASSERT (srelgot != NULL);
7745
7746 outrel.r_addend = addend + value;
7747 outrel.r_offset = (sgot->output_section->vma
7748 + sgot->output_offset
7749 + off);
7750 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
7751 loc = srelgot->contents;
7752 loc += srelgot->reloc_count++ * RELOC_SIZE (globals);
7753 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7754 }
7755
7756 local_got_offsets[r_symndx] |= 1;
7757 }
7758
7759 value = sgot->output_offset + off;
7760 }
7761 if (r_type != R_ARM_GOT32)
7762 value += sgot->output_section->vma;
7763
7764 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7765 contents, rel->r_offset, value,
7766 rel->r_addend);
7767
7768 case R_ARM_TLS_LDO32:
7769 value = value - dtpoff_base (info);
7770
7771 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7772 contents, rel->r_offset, value,
7773 rel->r_addend);
7774
7775 case R_ARM_TLS_LDM32:
7776 {
7777 bfd_vma off;
7778
7779 if (globals->sgot == NULL)
7780 abort ();
7781
7782 off = globals->tls_ldm_got.offset;
7783
7784 if ((off & 1) != 0)
7785 off &= ~1;
7786 else
7787 {
7788 /* If we don't know the module number, create a relocation
7789 for it. */
7790 if (info->shared)
7791 {
7792 Elf_Internal_Rela outrel;
7793 bfd_byte *loc;
7794
7795 if (globals->srelgot == NULL)
7796 abort ();
7797
7798 outrel.r_addend = 0;
7799 outrel.r_offset = (globals->sgot->output_section->vma
7800 + globals->sgot->output_offset + off);
7801 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
7802
7803 if (globals->use_rel)
7804 bfd_put_32 (output_bfd, outrel.r_addend,
7805 globals->sgot->contents + off);
7806
7807 loc = globals->srelgot->contents;
7808 loc += globals->srelgot->reloc_count++ * RELOC_SIZE (globals);
7809 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7810 }
7811 else
7812 bfd_put_32 (output_bfd, 1, globals->sgot->contents + off);
7813
7814 globals->tls_ldm_got.offset |= 1;
7815 }
7816
7817 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
7818 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
7819
7820 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7821 contents, rel->r_offset, value,
7822 rel->r_addend);
7823 }
7824
7825 case R_ARM_TLS_GD32:
7826 case R_ARM_TLS_IE32:
7827 {
7828 bfd_vma off;
7829 int indx;
7830 char tls_type;
7831
7832 if (globals->sgot == NULL)
7833 abort ();
7834
7835 indx = 0;
7836 if (h != NULL)
7837 {
7838 bfd_boolean dyn;
7839 dyn = globals->root.dynamic_sections_created;
7840 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
7841 && (!info->shared
7842 || !SYMBOL_REFERENCES_LOCAL (info, h)))
7843 {
7844 *unresolved_reloc_p = FALSE;
7845 indx = h->dynindx;
7846 }
7847 off = h->got.offset;
7848 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
7849 }
7850 else
7851 {
7852 if (local_got_offsets == NULL)
7853 abort ();
7854 off = local_got_offsets[r_symndx];
7855 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
7856 }
7857
7858 if (tls_type == GOT_UNKNOWN)
7859 abort ();
7860
7861 if ((off & 1) != 0)
7862 off &= ~1;
7863 else
7864 {
7865 bfd_boolean need_relocs = FALSE;
7866 Elf_Internal_Rela outrel;
7867 bfd_byte *loc = NULL;
7868 int cur_off = off;
7869
7870 /* The GOT entries have not been initialized yet. Do it
7871 now, and emit any relocations. If both an IE GOT and a
7872 GD GOT are necessary, we emit the GD first. */
7873
7874 if ((info->shared || indx != 0)
7875 && (h == NULL
7876 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7877 || h->root.type != bfd_link_hash_undefweak))
7878 {
7879 need_relocs = TRUE;
7880 if (globals->srelgot == NULL)
7881 abort ();
7882 loc = globals->srelgot->contents;
7883 loc += globals->srelgot->reloc_count * RELOC_SIZE (globals);
7884 }
7885
7886 if (tls_type & GOT_TLS_GD)
7887 {
7888 if (need_relocs)
7889 {
7890 outrel.r_addend = 0;
7891 outrel.r_offset = (globals->sgot->output_section->vma
7892 + globals->sgot->output_offset
7893 + cur_off);
7894 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
7895
7896 if (globals->use_rel)
7897 bfd_put_32 (output_bfd, outrel.r_addend,
7898 globals->sgot->contents + cur_off);
7899
7900 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7901 globals->srelgot->reloc_count++;
7902 loc += RELOC_SIZE (globals);
7903
7904 if (indx == 0)
7905 bfd_put_32 (output_bfd, value - dtpoff_base (info),
7906 globals->sgot->contents + cur_off + 4);
7907 else
7908 {
7909 outrel.r_addend = 0;
7910 outrel.r_info = ELF32_R_INFO (indx,
7911 R_ARM_TLS_DTPOFF32);
7912 outrel.r_offset += 4;
7913
7914 if (globals->use_rel)
7915 bfd_put_32 (output_bfd, outrel.r_addend,
7916 globals->sgot->contents + cur_off + 4);
7917
7918
7919 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7920 globals->srelgot->reloc_count++;
7921 loc += RELOC_SIZE (globals);
7922 }
7923 }
7924 else
7925 {
7926 /* If we are not emitting relocations for a
7927 general dynamic reference, then we must be in a
7928 static link or an executable link with the
7929 symbol binding locally. Mark it as belonging
7930 to module 1, the executable. */
7931 bfd_put_32 (output_bfd, 1,
7932 globals->sgot->contents + cur_off);
7933 bfd_put_32 (output_bfd, value - dtpoff_base (info),
7934 globals->sgot->contents + cur_off + 4);
7935 }
7936
7937 cur_off += 8;
7938 }
7939
7940 if (tls_type & GOT_TLS_IE)
7941 {
7942 if (need_relocs)
7943 {
7944 if (indx == 0)
7945 outrel.r_addend = value - dtpoff_base (info);
7946 else
7947 outrel.r_addend = 0;
7948 outrel.r_offset = (globals->sgot->output_section->vma
7949 + globals->sgot->output_offset
7950 + cur_off);
7951 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
7952
7953 if (globals->use_rel)
7954 bfd_put_32 (output_bfd, outrel.r_addend,
7955 globals->sgot->contents + cur_off);
7956
7957 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7958 globals->srelgot->reloc_count++;
7959 loc += RELOC_SIZE (globals);
7960 }
7961 else
7962 bfd_put_32 (output_bfd, tpoff (info, value),
7963 globals->sgot->contents + cur_off);
7964 cur_off += 4;
7965 }
7966
7967 if (h != NULL)
7968 h->got.offset |= 1;
7969 else
7970 local_got_offsets[r_symndx] |= 1;
7971 }
7972
7973 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
7974 off += 8;
7975 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
7976 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
7977
7978 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7979 contents, rel->r_offset, value,
7980 rel->r_addend);
7981 }
7982
7983 case R_ARM_TLS_LE32:
7984 if (info->shared)
7985 {
7986 (*_bfd_error_handler)
7987 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
7988 input_bfd, input_section,
7989 (long) rel->r_offset, howto->name);
7990 return FALSE;
7991 }
7992 else
7993 value = tpoff (info, value);
7994
7995 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7996 contents, rel->r_offset, value,
7997 rel->r_addend);
7998
7999 case R_ARM_V4BX:
8000 if (globals->fix_v4bx)
8001 {
8002 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8003
8004 /* Ensure that we have a BX instruction. */
8005 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
8006
8007 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
8008 {
8009 /* Branch to veneer. */
8010 bfd_vma glue_addr;
8011 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
8012 glue_addr -= input_section->output_section->vma
8013 + input_section->output_offset
8014 + rel->r_offset + 8;
8015 insn = (insn & 0xf0000000) | 0x0a000000
8016 | ((glue_addr >> 2) & 0x00ffffff);
8017 }
8018 else
8019 {
8020 /* Preserve Rm (lowest four bits) and the condition code
8021 (highest four bits). Other bits encode MOV PC,Rm. */
8022 insn = (insn & 0xf000000f) | 0x01a0f000;
8023 }
8024
8025 bfd_put_32 (input_bfd, insn, hit_data);
8026 }
8027 return bfd_reloc_ok;
8028
8029 case R_ARM_MOVW_ABS_NC:
8030 case R_ARM_MOVT_ABS:
8031 case R_ARM_MOVW_PREL_NC:
8032 case R_ARM_MOVT_PREL:
8033 /* Until we properly support segment-base-relative addressing then
8034 we assume the segment base to be zero, as for the group relocations.
8035 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
8036 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
8037 case R_ARM_MOVW_BREL_NC:
8038 case R_ARM_MOVW_BREL:
8039 case R_ARM_MOVT_BREL:
8040 {
8041 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8042
8043 if (globals->use_rel)
8044 {
8045 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8046 signed_addend = (addend ^ 0x8000) - 0x8000;
8047 }
8048
8049 value += signed_addend;
8050
8051 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
8052 value -= (input_section->output_section->vma
8053 + input_section->output_offset + rel->r_offset);
8054
8055 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
8056 return bfd_reloc_overflow;
8057
8058 if (sym_flags == STT_ARM_TFUNC)
8059 value |= 1;
8060
8061 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
8062 || r_type == R_ARM_MOVT_BREL)
8063 value >>= 16;
8064
8065 insn &= 0xfff0f000;
8066 insn |= value & 0xfff;
8067 insn |= (value & 0xf000) << 4;
8068 bfd_put_32 (input_bfd, insn, hit_data);
8069 }
8070 return bfd_reloc_ok;
8071
8072 case R_ARM_THM_MOVW_ABS_NC:
8073 case R_ARM_THM_MOVT_ABS:
8074 case R_ARM_THM_MOVW_PREL_NC:
8075 case R_ARM_THM_MOVT_PREL:
8076 /* Until we properly support segment-base-relative addressing then
8077 we assume the segment base to be zero, as for the above relocations.
8078 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
8079 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
8080 as R_ARM_THM_MOVT_ABS. */
8081 case R_ARM_THM_MOVW_BREL_NC:
8082 case R_ARM_THM_MOVW_BREL:
8083 case R_ARM_THM_MOVT_BREL:
8084 {
8085 bfd_vma insn;
8086
8087 insn = bfd_get_16 (input_bfd, hit_data) << 16;
8088 insn |= bfd_get_16 (input_bfd, hit_data + 2);
8089
8090 if (globals->use_rel)
8091 {
8092 addend = ((insn >> 4) & 0xf000)
8093 | ((insn >> 15) & 0x0800)
8094 | ((insn >> 4) & 0x0700)
8095 | (insn & 0x00ff);
8096 signed_addend = (addend ^ 0x8000) - 0x8000;
8097 }
8098
8099 value += signed_addend;
8100
8101 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
8102 value -= (input_section->output_section->vma
8103 + input_section->output_offset + rel->r_offset);
8104
8105 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
8106 return bfd_reloc_overflow;
8107
8108 if (sym_flags == STT_ARM_TFUNC)
8109 value |= 1;
8110
8111 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
8112 || r_type == R_ARM_THM_MOVT_BREL)
8113 value >>= 16;
8114
8115 insn &= 0xfbf08f00;
8116 insn |= (value & 0xf000) << 4;
8117 insn |= (value & 0x0800) << 15;
8118 insn |= (value & 0x0700) << 4;
8119 insn |= (value & 0x00ff);
8120
8121 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8122 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8123 }
8124 return bfd_reloc_ok;
8125
8126 case R_ARM_ALU_PC_G0_NC:
8127 case R_ARM_ALU_PC_G1_NC:
8128 case R_ARM_ALU_PC_G0:
8129 case R_ARM_ALU_PC_G1:
8130 case R_ARM_ALU_PC_G2:
8131 case R_ARM_ALU_SB_G0_NC:
8132 case R_ARM_ALU_SB_G1_NC:
8133 case R_ARM_ALU_SB_G0:
8134 case R_ARM_ALU_SB_G1:
8135 case R_ARM_ALU_SB_G2:
8136 {
8137 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8138 bfd_vma pc = input_section->output_section->vma
8139 + input_section->output_offset + rel->r_offset;
8140 /* sb should be the origin of the *segment* containing the symbol.
8141 It is not clear how to obtain this OS-dependent value, so we
8142 make an arbitrary choice of zero. */
8143 bfd_vma sb = 0;
8144 bfd_vma residual;
8145 bfd_vma g_n;
8146 bfd_signed_vma signed_value;
8147 int group = 0;
8148
8149 /* Determine which group of bits to select. */
8150 switch (r_type)
8151 {
8152 case R_ARM_ALU_PC_G0_NC:
8153 case R_ARM_ALU_PC_G0:
8154 case R_ARM_ALU_SB_G0_NC:
8155 case R_ARM_ALU_SB_G0:
8156 group = 0;
8157 break;
8158
8159 case R_ARM_ALU_PC_G1_NC:
8160 case R_ARM_ALU_PC_G1:
8161 case R_ARM_ALU_SB_G1_NC:
8162 case R_ARM_ALU_SB_G1:
8163 group = 1;
8164 break;
8165
8166 case R_ARM_ALU_PC_G2:
8167 case R_ARM_ALU_SB_G2:
8168 group = 2;
8169 break;
8170
8171 default:
8172 abort ();
8173 }
8174
8175 /* If REL, extract the addend from the insn. If RELA, it will
8176 have already been fetched for us. */
8177 if (globals->use_rel)
8178 {
8179 int negative;
8180 bfd_vma constant = insn & 0xff;
8181 bfd_vma rotation = (insn & 0xf00) >> 8;
8182
8183 if (rotation == 0)
8184 signed_addend = constant;
8185 else
8186 {
8187 /* Compensate for the fact that in the instruction, the
8188 rotation is stored in multiples of 2 bits. */
8189 rotation *= 2;
8190
8191 /* Rotate "constant" right by "rotation" bits. */
8192 signed_addend = (constant >> rotation) |
8193 (constant << (8 * sizeof (bfd_vma) - rotation));
8194 }
8195
8196 /* Determine if the instruction is an ADD or a SUB.
8197 (For REL, this determines the sign of the addend.) */
8198 negative = identify_add_or_sub (insn);
8199 if (negative == 0)
8200 {
8201 (*_bfd_error_handler)
8202 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
8203 input_bfd, input_section,
8204 (long) rel->r_offset, howto->name);
8205 return bfd_reloc_overflow;
8206 }
8207
8208 signed_addend *= negative;
8209 }
8210
8211 /* Compute the value (X) to go in the place. */
8212 if (r_type == R_ARM_ALU_PC_G0_NC
8213 || r_type == R_ARM_ALU_PC_G1_NC
8214 || r_type == R_ARM_ALU_PC_G0
8215 || r_type == R_ARM_ALU_PC_G1
8216 || r_type == R_ARM_ALU_PC_G2)
8217 /* PC relative. */
8218 signed_value = value - pc + signed_addend;
8219 else
8220 /* Section base relative. */
8221 signed_value = value - sb + signed_addend;
8222
8223 /* If the target symbol is a Thumb function, then set the
8224 Thumb bit in the address. */
8225 if (sym_flags == STT_ARM_TFUNC)
8226 signed_value |= 1;
8227
8228 /* Calculate the value of the relevant G_n, in encoded
8229 constant-with-rotation format. */
8230 g_n = calculate_group_reloc_mask (abs (signed_value), group,
8231 &residual);
8232
8233 /* Check for overflow if required. */
8234 if ((r_type == R_ARM_ALU_PC_G0
8235 || r_type == R_ARM_ALU_PC_G1
8236 || r_type == R_ARM_ALU_PC_G2
8237 || r_type == R_ARM_ALU_SB_G0
8238 || r_type == R_ARM_ALU_SB_G1
8239 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
8240 {
8241 (*_bfd_error_handler)
8242 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8243 input_bfd, input_section,
8244 (long) rel->r_offset, abs (signed_value), howto->name);
8245 return bfd_reloc_overflow;
8246 }
8247
8248 /* Mask out the value and the ADD/SUB part of the opcode; take care
8249 not to destroy the S bit. */
8250 insn &= 0xff1ff000;
8251
8252 /* Set the opcode according to whether the value to go in the
8253 place is negative. */
8254 if (signed_value < 0)
8255 insn |= 1 << 22;
8256 else
8257 insn |= 1 << 23;
8258
8259 /* Encode the offset. */
8260 insn |= g_n;
8261
8262 bfd_put_32 (input_bfd, insn, hit_data);
8263 }
8264 return bfd_reloc_ok;
8265
8266 case R_ARM_LDR_PC_G0:
8267 case R_ARM_LDR_PC_G1:
8268 case R_ARM_LDR_PC_G2:
8269 case R_ARM_LDR_SB_G0:
8270 case R_ARM_LDR_SB_G1:
8271 case R_ARM_LDR_SB_G2:
8272 {
8273 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8274 bfd_vma pc = input_section->output_section->vma
8275 + input_section->output_offset + rel->r_offset;
8276 bfd_vma sb = 0; /* See note above. */
8277 bfd_vma residual;
8278 bfd_signed_vma signed_value;
8279 int group = 0;
8280
8281 /* Determine which groups of bits to calculate. */
8282 switch (r_type)
8283 {
8284 case R_ARM_LDR_PC_G0:
8285 case R_ARM_LDR_SB_G0:
8286 group = 0;
8287 break;
8288
8289 case R_ARM_LDR_PC_G1:
8290 case R_ARM_LDR_SB_G1:
8291 group = 1;
8292 break;
8293
8294 case R_ARM_LDR_PC_G2:
8295 case R_ARM_LDR_SB_G2:
8296 group = 2;
8297 break;
8298
8299 default:
8300 abort ();
8301 }
8302
8303 /* If REL, extract the addend from the insn. If RELA, it will
8304 have already been fetched for us. */
8305 if (globals->use_rel)
8306 {
8307 int negative = (insn & (1 << 23)) ? 1 : -1;
8308 signed_addend = negative * (insn & 0xfff);
8309 }
8310
8311 /* Compute the value (X) to go in the place. */
8312 if (r_type == R_ARM_LDR_PC_G0
8313 || r_type == R_ARM_LDR_PC_G1
8314 || r_type == R_ARM_LDR_PC_G2)
8315 /* PC relative. */
8316 signed_value = value - pc + signed_addend;
8317 else
8318 /* Section base relative. */
8319 signed_value = value - sb + signed_addend;
8320
8321 /* Calculate the value of the relevant G_{n-1} to obtain
8322 the residual at that stage. */
8323 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8324
8325 /* Check for overflow. */
8326 if (residual >= 0x1000)
8327 {
8328 (*_bfd_error_handler)
8329 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8330 input_bfd, input_section,
8331 (long) rel->r_offset, abs (signed_value), howto->name);
8332 return bfd_reloc_overflow;
8333 }
8334
8335 /* Mask out the value and U bit. */
8336 insn &= 0xff7ff000;
8337
8338 /* Set the U bit if the value to go in the place is non-negative. */
8339 if (signed_value >= 0)
8340 insn |= 1 << 23;
8341
8342 /* Encode the offset. */
8343 insn |= residual;
8344
8345 bfd_put_32 (input_bfd, insn, hit_data);
8346 }
8347 return bfd_reloc_ok;
8348
8349 case R_ARM_LDRS_PC_G0:
8350 case R_ARM_LDRS_PC_G1:
8351 case R_ARM_LDRS_PC_G2:
8352 case R_ARM_LDRS_SB_G0:
8353 case R_ARM_LDRS_SB_G1:
8354 case R_ARM_LDRS_SB_G2:
8355 {
8356 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8357 bfd_vma pc = input_section->output_section->vma
8358 + input_section->output_offset + rel->r_offset;
8359 bfd_vma sb = 0; /* See note above. */
8360 bfd_vma residual;
8361 bfd_signed_vma signed_value;
8362 int group = 0;
8363
8364 /* Determine which groups of bits to calculate. */
8365 switch (r_type)
8366 {
8367 case R_ARM_LDRS_PC_G0:
8368 case R_ARM_LDRS_SB_G0:
8369 group = 0;
8370 break;
8371
8372 case R_ARM_LDRS_PC_G1:
8373 case R_ARM_LDRS_SB_G1:
8374 group = 1;
8375 break;
8376
8377 case R_ARM_LDRS_PC_G2:
8378 case R_ARM_LDRS_SB_G2:
8379 group = 2;
8380 break;
8381
8382 default:
8383 abort ();
8384 }
8385
8386 /* If REL, extract the addend from the insn. If RELA, it will
8387 have already been fetched for us. */
8388 if (globals->use_rel)
8389 {
8390 int negative = (insn & (1 << 23)) ? 1 : -1;
8391 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
8392 }
8393
8394 /* Compute the value (X) to go in the place. */
8395 if (r_type == R_ARM_LDRS_PC_G0
8396 || r_type == R_ARM_LDRS_PC_G1
8397 || r_type == R_ARM_LDRS_PC_G2)
8398 /* PC relative. */
8399 signed_value = value - pc + signed_addend;
8400 else
8401 /* Section base relative. */
8402 signed_value = value - sb + signed_addend;
8403
8404 /* Calculate the value of the relevant G_{n-1} to obtain
8405 the residual at that stage. */
8406 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8407
8408 /* Check for overflow. */
8409 if (residual >= 0x100)
8410 {
8411 (*_bfd_error_handler)
8412 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8413 input_bfd, input_section,
8414 (long) rel->r_offset, abs (signed_value), howto->name);
8415 return bfd_reloc_overflow;
8416 }
8417
8418 /* Mask out the value and U bit. */
8419 insn &= 0xff7ff0f0;
8420
8421 /* Set the U bit if the value to go in the place is non-negative. */
8422 if (signed_value >= 0)
8423 insn |= 1 << 23;
8424
8425 /* Encode the offset. */
8426 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
8427
8428 bfd_put_32 (input_bfd, insn, hit_data);
8429 }
8430 return bfd_reloc_ok;
8431
8432 case R_ARM_LDC_PC_G0:
8433 case R_ARM_LDC_PC_G1:
8434 case R_ARM_LDC_PC_G2:
8435 case R_ARM_LDC_SB_G0:
8436 case R_ARM_LDC_SB_G1:
8437 case R_ARM_LDC_SB_G2:
8438 {
8439 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8440 bfd_vma pc = input_section->output_section->vma
8441 + input_section->output_offset + rel->r_offset;
8442 bfd_vma sb = 0; /* See note above. */
8443 bfd_vma residual;
8444 bfd_signed_vma signed_value;
8445 int group = 0;
8446
8447 /* Determine which groups of bits to calculate. */
8448 switch (r_type)
8449 {
8450 case R_ARM_LDC_PC_G0:
8451 case R_ARM_LDC_SB_G0:
8452 group = 0;
8453 break;
8454
8455 case R_ARM_LDC_PC_G1:
8456 case R_ARM_LDC_SB_G1:
8457 group = 1;
8458 break;
8459
8460 case R_ARM_LDC_PC_G2:
8461 case R_ARM_LDC_SB_G2:
8462 group = 2;
8463 break;
8464
8465 default:
8466 abort ();
8467 }
8468
8469 /* If REL, extract the addend from the insn. If RELA, it will
8470 have already been fetched for us. */
8471 if (globals->use_rel)
8472 {
8473 int negative = (insn & (1 << 23)) ? 1 : -1;
8474 signed_addend = negative * ((insn & 0xff) << 2);
8475 }
8476
8477 /* Compute the value (X) to go in the place. */
8478 if (r_type == R_ARM_LDC_PC_G0
8479 || r_type == R_ARM_LDC_PC_G1
8480 || r_type == R_ARM_LDC_PC_G2)
8481 /* PC relative. */
8482 signed_value = value - pc + signed_addend;
8483 else
8484 /* Section base relative. */
8485 signed_value = value - sb + signed_addend;
8486
8487 /* Calculate the value of the relevant G_{n-1} to obtain
8488 the residual at that stage. */
8489 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8490
8491 /* Check for overflow. (The absolute value to go in the place must be
8492 divisible by four and, after having been divided by four, must
8493 fit in eight bits.) */
8494 if ((residual & 0x3) != 0 || residual >= 0x400)
8495 {
8496 (*_bfd_error_handler)
8497 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8498 input_bfd, input_section,
8499 (long) rel->r_offset, abs (signed_value), howto->name);
8500 return bfd_reloc_overflow;
8501 }
8502
8503 /* Mask out the value and U bit. */
8504 insn &= 0xff7fff00;
8505
8506 /* Set the U bit if the value to go in the place is non-negative. */
8507 if (signed_value >= 0)
8508 insn |= 1 << 23;
8509
8510 /* Encode the offset. */
8511 insn |= residual >> 2;
8512
8513 bfd_put_32 (input_bfd, insn, hit_data);
8514 }
8515 return bfd_reloc_ok;
8516
8517 default:
8518 return bfd_reloc_notsupported;
8519 }
8520 }
8521
8522 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
8523 static void
8524 arm_add_to_rel (bfd * abfd,
8525 bfd_byte * address,
8526 reloc_howto_type * howto,
8527 bfd_signed_vma increment)
8528 {
8529 bfd_signed_vma addend;
8530
8531 if (howto->type == R_ARM_THM_CALL
8532 || howto->type == R_ARM_THM_JUMP24)
8533 {
8534 int upper_insn, lower_insn;
8535 int upper, lower;
8536
8537 upper_insn = bfd_get_16 (abfd, address);
8538 lower_insn = bfd_get_16 (abfd, address + 2);
8539 upper = upper_insn & 0x7ff;
8540 lower = lower_insn & 0x7ff;
8541
8542 addend = (upper << 12) | (lower << 1);
8543 addend += increment;
8544 addend >>= 1;
8545
8546 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
8547 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
8548
8549 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
8550 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
8551 }
8552 else
8553 {
8554 bfd_vma contents;
8555
8556 contents = bfd_get_32 (abfd, address);
8557
8558 /* Get the (signed) value from the instruction. */
8559 addend = contents & howto->src_mask;
8560 if (addend & ((howto->src_mask + 1) >> 1))
8561 {
8562 bfd_signed_vma mask;
8563
8564 mask = -1;
8565 mask &= ~ howto->src_mask;
8566 addend |= mask;
8567 }
8568
8569 /* Add in the increment, (which is a byte value). */
8570 switch (howto->type)
8571 {
8572 default:
8573 addend += increment;
8574 break;
8575
8576 case R_ARM_PC24:
8577 case R_ARM_PLT32:
8578 case R_ARM_CALL:
8579 case R_ARM_JUMP24:
8580 addend <<= howto->size;
8581 addend += increment;
8582
8583 /* Should we check for overflow here ? */
8584
8585 /* Drop any undesired bits. */
8586 addend >>= howto->rightshift;
8587 break;
8588 }
8589
8590 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
8591
8592 bfd_put_32 (abfd, contents, address);
8593 }
8594 }
8595
8596 #define IS_ARM_TLS_RELOC(R_TYPE) \
8597 ((R_TYPE) == R_ARM_TLS_GD32 \
8598 || (R_TYPE) == R_ARM_TLS_LDO32 \
8599 || (R_TYPE) == R_ARM_TLS_LDM32 \
8600 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
8601 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
8602 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
8603 || (R_TYPE) == R_ARM_TLS_LE32 \
8604 || (R_TYPE) == R_ARM_TLS_IE32)
8605
8606 /* Relocate an ARM ELF section. */
8607
8608 static bfd_boolean
8609 elf32_arm_relocate_section (bfd * output_bfd,
8610 struct bfd_link_info * info,
8611 bfd * input_bfd,
8612 asection * input_section,
8613 bfd_byte * contents,
8614 Elf_Internal_Rela * relocs,
8615 Elf_Internal_Sym * local_syms,
8616 asection ** local_sections)
8617 {
8618 Elf_Internal_Shdr *symtab_hdr;
8619 struct elf_link_hash_entry **sym_hashes;
8620 Elf_Internal_Rela *rel;
8621 Elf_Internal_Rela *relend;
8622 const char *name;
8623 struct elf32_arm_link_hash_table * globals;
8624
8625 globals = elf32_arm_hash_table (info);
8626
8627 symtab_hdr = & elf_symtab_hdr (input_bfd);
8628 sym_hashes = elf_sym_hashes (input_bfd);
8629
8630 rel = relocs;
8631 relend = relocs + input_section->reloc_count;
8632 for (; rel < relend; rel++)
8633 {
8634 int r_type;
8635 reloc_howto_type * howto;
8636 unsigned long r_symndx;
8637 Elf_Internal_Sym * sym;
8638 asection * sec;
8639 struct elf_link_hash_entry * h;
8640 bfd_vma relocation;
8641 bfd_reloc_status_type r;
8642 arelent bfd_reloc;
8643 char sym_type;
8644 bfd_boolean unresolved_reloc = FALSE;
8645 char *error_message = NULL;
8646
8647 r_symndx = ELF32_R_SYM (rel->r_info);
8648 r_type = ELF32_R_TYPE (rel->r_info);
8649 r_type = arm_real_reloc_type (globals, r_type);
8650
8651 if ( r_type == R_ARM_GNU_VTENTRY
8652 || r_type == R_ARM_GNU_VTINHERIT)
8653 continue;
8654
8655 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
8656 howto = bfd_reloc.howto;
8657
8658 h = NULL;
8659 sym = NULL;
8660 sec = NULL;
8661
8662 if (r_symndx < symtab_hdr->sh_info)
8663 {
8664 sym = local_syms + r_symndx;
8665 sym_type = ELF32_ST_TYPE (sym->st_info);
8666 sec = local_sections[r_symndx];
8667 if (globals->use_rel)
8668 {
8669 relocation = (sec->output_section->vma
8670 + sec->output_offset
8671 + sym->st_value);
8672 if (!info->relocatable
8673 && (sec->flags & SEC_MERGE)
8674 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
8675 {
8676 asection *msec;
8677 bfd_vma addend, value;
8678
8679 switch (r_type)
8680 {
8681 case R_ARM_MOVW_ABS_NC:
8682 case R_ARM_MOVT_ABS:
8683 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8684 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
8685 addend = (addend ^ 0x8000) - 0x8000;
8686 break;
8687
8688 case R_ARM_THM_MOVW_ABS_NC:
8689 case R_ARM_THM_MOVT_ABS:
8690 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
8691 << 16;
8692 value |= bfd_get_16 (input_bfd,
8693 contents + rel->r_offset + 2);
8694 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
8695 | ((value & 0x04000000) >> 15);
8696 addend = (addend ^ 0x8000) - 0x8000;
8697 break;
8698
8699 default:
8700 if (howto->rightshift
8701 || (howto->src_mask & (howto->src_mask + 1)))
8702 {
8703 (*_bfd_error_handler)
8704 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
8705 input_bfd, input_section,
8706 (long) rel->r_offset, howto->name);
8707 return FALSE;
8708 }
8709
8710 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8711
8712 /* Get the (signed) value from the instruction. */
8713 addend = value & howto->src_mask;
8714 if (addend & ((howto->src_mask + 1) >> 1))
8715 {
8716 bfd_signed_vma mask;
8717
8718 mask = -1;
8719 mask &= ~ howto->src_mask;
8720 addend |= mask;
8721 }
8722 break;
8723 }
8724
8725 msec = sec;
8726 addend =
8727 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
8728 - relocation;
8729 addend += msec->output_section->vma + msec->output_offset;
8730
8731 /* Cases here must match those in the preceeding
8732 switch statement. */
8733 switch (r_type)
8734 {
8735 case R_ARM_MOVW_ABS_NC:
8736 case R_ARM_MOVT_ABS:
8737 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
8738 | (addend & 0xfff);
8739 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
8740 break;
8741
8742 case R_ARM_THM_MOVW_ABS_NC:
8743 case R_ARM_THM_MOVT_ABS:
8744 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
8745 | (addend & 0xff) | ((addend & 0x0800) << 15);
8746 bfd_put_16 (input_bfd, value >> 16,
8747 contents + rel->r_offset);
8748 bfd_put_16 (input_bfd, value,
8749 contents + rel->r_offset + 2);
8750 break;
8751
8752 default:
8753 value = (value & ~ howto->dst_mask)
8754 | (addend & howto->dst_mask);
8755 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
8756 break;
8757 }
8758 }
8759 }
8760 else
8761 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
8762 }
8763 else
8764 {
8765 bfd_boolean warned;
8766
8767 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
8768 r_symndx, symtab_hdr, sym_hashes,
8769 h, sec, relocation,
8770 unresolved_reloc, warned);
8771
8772 sym_type = h->type;
8773 }
8774
8775 if (sec != NULL && elf_discarded_section (sec))
8776 {
8777 /* For relocs against symbols from removed linkonce sections,
8778 or sections discarded by a linker script, we just want the
8779 section contents zeroed. Avoid any special processing. */
8780 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
8781 rel->r_info = 0;
8782 rel->r_addend = 0;
8783 continue;
8784 }
8785
8786 if (info->relocatable)
8787 {
8788 /* This is a relocatable link. We don't have to change
8789 anything, unless the reloc is against a section symbol,
8790 in which case we have to adjust according to where the
8791 section symbol winds up in the output section. */
8792 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
8793 {
8794 if (globals->use_rel)
8795 arm_add_to_rel (input_bfd, contents + rel->r_offset,
8796 howto, (bfd_signed_vma) sec->output_offset);
8797 else
8798 rel->r_addend += sec->output_offset;
8799 }
8800 continue;
8801 }
8802
8803 if (h != NULL)
8804 name = h->root.root.string;
8805 else
8806 {
8807 name = (bfd_elf_string_from_elf_section
8808 (input_bfd, symtab_hdr->sh_link, sym->st_name));
8809 if (name == NULL || *name == '\0')
8810 name = bfd_section_name (input_bfd, sec);
8811 }
8812
8813 if (r_symndx != 0
8814 && r_type != R_ARM_NONE
8815 && (h == NULL
8816 || h->root.type == bfd_link_hash_defined
8817 || h->root.type == bfd_link_hash_defweak)
8818 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
8819 {
8820 (*_bfd_error_handler)
8821 ((sym_type == STT_TLS
8822 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
8823 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
8824 input_bfd,
8825 input_section,
8826 (long) rel->r_offset,
8827 howto->name,
8828 name);
8829 }
8830
8831 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
8832 input_section, contents, rel,
8833 relocation, info, sec, name,
8834 (h ? ELF_ST_TYPE (h->type) :
8835 ELF_ST_TYPE (sym->st_info)), h,
8836 &unresolved_reloc, &error_message);
8837
8838 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
8839 because such sections are not SEC_ALLOC and thus ld.so will
8840 not process them. */
8841 if (unresolved_reloc
8842 && !((input_section->flags & SEC_DEBUGGING) != 0
8843 && h->def_dynamic))
8844 {
8845 (*_bfd_error_handler)
8846 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
8847 input_bfd,
8848 input_section,
8849 (long) rel->r_offset,
8850 howto->name,
8851 h->root.root.string);
8852 return FALSE;
8853 }
8854
8855 if (r != bfd_reloc_ok)
8856 {
8857 switch (r)
8858 {
8859 case bfd_reloc_overflow:
8860 /* If the overflowing reloc was to an undefined symbol,
8861 we have already printed one error message and there
8862 is no point complaining again. */
8863 if ((! h ||
8864 h->root.type != bfd_link_hash_undefined)
8865 && (!((*info->callbacks->reloc_overflow)
8866 (info, (h ? &h->root : NULL), name, howto->name,
8867 (bfd_vma) 0, input_bfd, input_section,
8868 rel->r_offset))))
8869 return FALSE;
8870 break;
8871
8872 case bfd_reloc_undefined:
8873 if (!((*info->callbacks->undefined_symbol)
8874 (info, name, input_bfd, input_section,
8875 rel->r_offset, TRUE)))
8876 return FALSE;
8877 break;
8878
8879 case bfd_reloc_outofrange:
8880 error_message = _("out of range");
8881 goto common_error;
8882
8883 case bfd_reloc_notsupported:
8884 error_message = _("unsupported relocation");
8885 goto common_error;
8886
8887 case bfd_reloc_dangerous:
8888 /* error_message should already be set. */
8889 goto common_error;
8890
8891 default:
8892 error_message = _("unknown error");
8893 /* Fall through. */
8894
8895 common_error:
8896 BFD_ASSERT (error_message != NULL);
8897 if (!((*info->callbacks->reloc_dangerous)
8898 (info, error_message, input_bfd, input_section,
8899 rel->r_offset)))
8900 return FALSE;
8901 break;
8902 }
8903 }
8904 }
8905
8906 return TRUE;
8907 }
8908
8909 /* Add a new unwind edit to the list described by HEAD, TAIL. If INDEX is zero,
8910 adds the edit to the start of the list. (The list must be built in order of
8911 ascending INDEX: the function's callers are primarily responsible for
8912 maintaining that condition). */
8913
8914 static void
8915 add_unwind_table_edit (arm_unwind_table_edit **head,
8916 arm_unwind_table_edit **tail,
8917 arm_unwind_edit_type type,
8918 asection *linked_section,
8919 unsigned int index)
8920 {
8921 arm_unwind_table_edit *new_edit = xmalloc (sizeof (arm_unwind_table_edit));
8922
8923 new_edit->type = type;
8924 new_edit->linked_section = linked_section;
8925 new_edit->index = index;
8926
8927 if (index > 0)
8928 {
8929 new_edit->next = NULL;
8930
8931 if (*tail)
8932 (*tail)->next = new_edit;
8933
8934 (*tail) = new_edit;
8935
8936 if (!*head)
8937 (*head) = new_edit;
8938 }
8939 else
8940 {
8941 new_edit->next = *head;
8942
8943 if (!*tail)
8944 *tail = new_edit;
8945
8946 *head = new_edit;
8947 }
8948 }
8949
8950 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
8951
8952 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
8953 static void
8954 adjust_exidx_size(asection *exidx_sec, int adjust)
8955 {
8956 asection *out_sec;
8957
8958 if (!exidx_sec->rawsize)
8959 exidx_sec->rawsize = exidx_sec->size;
8960
8961 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
8962 out_sec = exidx_sec->output_section;
8963 /* Adjust size of output section. */
8964 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
8965 }
8966
8967 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
8968 static void
8969 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
8970 {
8971 struct _arm_elf_section_data *exidx_arm_data;
8972
8973 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
8974 add_unwind_table_edit (
8975 &exidx_arm_data->u.exidx.unwind_edit_list,
8976 &exidx_arm_data->u.exidx.unwind_edit_tail,
8977 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
8978
8979 adjust_exidx_size(exidx_sec, 8);
8980 }
8981
8982 /* Scan .ARM.exidx tables, and create a list describing edits which should be
8983 made to those tables, such that:
8984
8985 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
8986 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
8987 codes which have been inlined into the index).
8988
8989 The edits are applied when the tables are written
8990 (in elf32_arm_write_section).
8991 */
8992
8993 bfd_boolean
8994 elf32_arm_fix_exidx_coverage (asection **text_section_order,
8995 unsigned int num_text_sections,
8996 struct bfd_link_info *info)
8997 {
8998 bfd *inp;
8999 unsigned int last_second_word = 0, i;
9000 asection *last_exidx_sec = NULL;
9001 asection *last_text_sec = NULL;
9002 int last_unwind_type = -1;
9003
9004 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
9005 text sections. */
9006 for (inp = info->input_bfds; inp != NULL; inp = inp->link_next)
9007 {
9008 asection *sec;
9009
9010 for (sec = inp->sections; sec != NULL; sec = sec->next)
9011 {
9012 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
9013 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
9014
9015 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
9016 continue;
9017
9018 if (elf_sec->linked_to)
9019 {
9020 Elf_Internal_Shdr *linked_hdr
9021 = &elf_section_data (elf_sec->linked_to)->this_hdr;
9022 struct _arm_elf_section_data *linked_sec_arm_data
9023 = get_arm_elf_section_data (linked_hdr->bfd_section);
9024
9025 if (linked_sec_arm_data == NULL)
9026 continue;
9027
9028 /* Link this .ARM.exidx section back from the text section it
9029 describes. */
9030 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
9031 }
9032 }
9033 }
9034
9035 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
9036 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
9037 and add EXIDX_CANTUNWIND entries for sections with no unwind table data.
9038 */
9039
9040 for (i = 0; i < num_text_sections; i++)
9041 {
9042 asection *sec = text_section_order[i];
9043 asection *exidx_sec;
9044 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
9045 struct _arm_elf_section_data *exidx_arm_data;
9046 bfd_byte *contents = NULL;
9047 int deleted_exidx_bytes = 0;
9048 bfd_vma j;
9049 arm_unwind_table_edit *unwind_edit_head = NULL;
9050 arm_unwind_table_edit *unwind_edit_tail = NULL;
9051 Elf_Internal_Shdr *hdr;
9052 bfd *ibfd;
9053
9054 if (arm_data == NULL)
9055 continue;
9056
9057 exidx_sec = arm_data->u.text.arm_exidx_sec;
9058 if (exidx_sec == NULL)
9059 {
9060 /* Section has no unwind data. */
9061 if (last_unwind_type == 0 || !last_exidx_sec)
9062 continue;
9063
9064 /* Ignore zero sized sections. */
9065 if (sec->size == 0)
9066 continue;
9067
9068 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9069 last_unwind_type = 0;
9070 continue;
9071 }
9072
9073 /* Skip /DISCARD/ sections. */
9074 if (bfd_is_abs_section (exidx_sec->output_section))
9075 continue;
9076
9077 hdr = &elf_section_data (exidx_sec)->this_hdr;
9078 if (hdr->sh_type != SHT_ARM_EXIDX)
9079 continue;
9080
9081 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
9082 if (exidx_arm_data == NULL)
9083 continue;
9084
9085 ibfd = exidx_sec->owner;
9086
9087 if (hdr->contents != NULL)
9088 contents = hdr->contents;
9089 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
9090 /* An error? */
9091 continue;
9092
9093 for (j = 0; j < hdr->sh_size; j += 8)
9094 {
9095 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
9096 int unwind_type;
9097 int elide = 0;
9098
9099 /* An EXIDX_CANTUNWIND entry. */
9100 if (second_word == 1)
9101 {
9102 if (last_unwind_type == 0)
9103 elide = 1;
9104 unwind_type = 0;
9105 }
9106 /* Inlined unwinding data. Merge if equal to previous. */
9107 else if ((second_word & 0x80000000) != 0)
9108 {
9109 if (last_second_word == second_word && last_unwind_type == 1)
9110 elide = 1;
9111 unwind_type = 1;
9112 last_second_word = second_word;
9113 }
9114 /* Normal table entry. In theory we could merge these too,
9115 but duplicate entries are likely to be much less common. */
9116 else
9117 unwind_type = 2;
9118
9119 if (elide)
9120 {
9121 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
9122 DELETE_EXIDX_ENTRY, NULL, j / 8);
9123
9124 deleted_exidx_bytes += 8;
9125 }
9126
9127 last_unwind_type = unwind_type;
9128 }
9129
9130 /* Free contents if we allocated it ourselves. */
9131 if (contents != hdr->contents)
9132 free (contents);
9133
9134 /* Record edits to be applied later (in elf32_arm_write_section). */
9135 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
9136 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
9137
9138 if (deleted_exidx_bytes > 0)
9139 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
9140
9141 last_exidx_sec = exidx_sec;
9142 last_text_sec = sec;
9143 }
9144
9145 /* Add terminating CANTUNWIND entry. */
9146 if (last_exidx_sec && last_unwind_type != 0)
9147 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9148
9149 return TRUE;
9150 }
9151
9152 static bfd_boolean
9153 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
9154 bfd *ibfd, const char *name)
9155 {
9156 asection *sec, *osec;
9157
9158 sec = bfd_get_section_by_name (ibfd, name);
9159 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
9160 return TRUE;
9161
9162 osec = sec->output_section;
9163 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
9164 return TRUE;
9165
9166 if (! bfd_set_section_contents (obfd, osec, sec->contents,
9167 sec->output_offset, sec->size))
9168 return FALSE;
9169
9170 return TRUE;
9171 }
9172
9173 static bfd_boolean
9174 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
9175 {
9176 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
9177
9178 /* Invoke the regular ELF backend linker to do all the work. */
9179 if (!bfd_elf_final_link (abfd, info))
9180 return FALSE;
9181
9182 /* Write out any glue sections now that we have created all the
9183 stubs. */
9184 if (globals->bfd_of_glue_owner != NULL)
9185 {
9186 if (! elf32_arm_output_glue_section (info, abfd,
9187 globals->bfd_of_glue_owner,
9188 ARM2THUMB_GLUE_SECTION_NAME))
9189 return FALSE;
9190
9191 if (! elf32_arm_output_glue_section (info, abfd,
9192 globals->bfd_of_glue_owner,
9193 THUMB2ARM_GLUE_SECTION_NAME))
9194 return FALSE;
9195
9196 if (! elf32_arm_output_glue_section (info, abfd,
9197 globals->bfd_of_glue_owner,
9198 VFP11_ERRATUM_VENEER_SECTION_NAME))
9199 return FALSE;
9200
9201 if (! elf32_arm_output_glue_section (info, abfd,
9202 globals->bfd_of_glue_owner,
9203 ARM_BX_GLUE_SECTION_NAME))
9204 return FALSE;
9205 }
9206
9207 return TRUE;
9208 }
9209
9210 /* Set the right machine number. */
9211
9212 static bfd_boolean
9213 elf32_arm_object_p (bfd *abfd)
9214 {
9215 unsigned int mach;
9216
9217 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
9218
9219 if (mach != bfd_mach_arm_unknown)
9220 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9221
9222 else if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
9223 bfd_default_set_arch_mach (abfd, bfd_arch_arm, bfd_mach_arm_ep9312);
9224
9225 else
9226 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9227
9228 return TRUE;
9229 }
9230
9231 /* Function to keep ARM specific flags in the ELF header. */
9232
9233 static bfd_boolean
9234 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
9235 {
9236 if (elf_flags_init (abfd)
9237 && elf_elfheader (abfd)->e_flags != flags)
9238 {
9239 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
9240 {
9241 if (flags & EF_ARM_INTERWORK)
9242 (*_bfd_error_handler)
9243 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
9244 abfd);
9245 else
9246 _bfd_error_handler
9247 (_("Warning: Clearing the interworking flag of %B due to outside request"),
9248 abfd);
9249 }
9250 }
9251 else
9252 {
9253 elf_elfheader (abfd)->e_flags = flags;
9254 elf_flags_init (abfd) = TRUE;
9255 }
9256
9257 return TRUE;
9258 }
9259
9260 /* Copy backend specific data from one object module to another. */
9261
9262 static bfd_boolean
9263 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
9264 {
9265 flagword in_flags;
9266 flagword out_flags;
9267
9268 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
9269 return TRUE;
9270
9271 in_flags = elf_elfheader (ibfd)->e_flags;
9272 out_flags = elf_elfheader (obfd)->e_flags;
9273
9274 if (elf_flags_init (obfd)
9275 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
9276 && in_flags != out_flags)
9277 {
9278 /* Cannot mix APCS26 and APCS32 code. */
9279 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
9280 return FALSE;
9281
9282 /* Cannot mix float APCS and non-float APCS code. */
9283 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
9284 return FALSE;
9285
9286 /* If the src and dest have different interworking flags
9287 then turn off the interworking bit. */
9288 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
9289 {
9290 if (out_flags & EF_ARM_INTERWORK)
9291 _bfd_error_handler
9292 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
9293 obfd, ibfd);
9294
9295 in_flags &= ~EF_ARM_INTERWORK;
9296 }
9297
9298 /* Likewise for PIC, though don't warn for this case. */
9299 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
9300 in_flags &= ~EF_ARM_PIC;
9301 }
9302
9303 elf_elfheader (obfd)->e_flags = in_flags;
9304 elf_flags_init (obfd) = TRUE;
9305
9306 /* Also copy the EI_OSABI field. */
9307 elf_elfheader (obfd)->e_ident[EI_OSABI] =
9308 elf_elfheader (ibfd)->e_ident[EI_OSABI];
9309
9310 /* Copy object attributes. */
9311 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9312
9313 return TRUE;
9314 }
9315
9316 /* Values for Tag_ABI_PCS_R9_use. */
9317 enum
9318 {
9319 AEABI_R9_V6,
9320 AEABI_R9_SB,
9321 AEABI_R9_TLS,
9322 AEABI_R9_unused
9323 };
9324
9325 /* Values for Tag_ABI_PCS_RW_data. */
9326 enum
9327 {
9328 AEABI_PCS_RW_data_absolute,
9329 AEABI_PCS_RW_data_PCrel,
9330 AEABI_PCS_RW_data_SBrel,
9331 AEABI_PCS_RW_data_unused
9332 };
9333
9334 /* Values for Tag_ABI_enum_size. */
9335 enum
9336 {
9337 AEABI_enum_unused,
9338 AEABI_enum_short,
9339 AEABI_enum_wide,
9340 AEABI_enum_forced_wide
9341 };
9342
9343 /* Determine whether an object attribute tag takes an integer, a
9344 string or both. */
9345
9346 static int
9347 elf32_arm_obj_attrs_arg_type (int tag)
9348 {
9349 if (tag == Tag_compatibility)
9350 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
9351 else if (tag == Tag_nodefaults)
9352 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
9353 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
9354 return ATTR_TYPE_FLAG_STR_VAL;
9355 else if (tag < 32)
9356 return ATTR_TYPE_FLAG_INT_VAL;
9357 else
9358 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
9359 }
9360
9361 /* The ABI defines that Tag_conformance should be emitted first, and that
9362 Tag_nodefaults should be second (if either is defined). This sets those
9363 two positions, and bumps up the position of all the remaining tags to
9364 compensate. */
9365 static int
9366 elf32_arm_obj_attrs_order (int num)
9367 {
9368 if (num == 4)
9369 return Tag_conformance;
9370 if (num == 5)
9371 return Tag_nodefaults;
9372 if ((num - 2) < Tag_nodefaults)
9373 return num - 2;
9374 if ((num - 1) < Tag_conformance)
9375 return num - 1;
9376 return num;
9377 }
9378
9379 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
9380 Returns -1 if no architecture could be read. */
9381
9382 static int
9383 get_secondary_compatible_arch (bfd *abfd)
9384 {
9385 obj_attribute *attr =
9386 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9387
9388 /* Note: the tag and its argument below are uleb128 values, though
9389 currently-defined values fit in one byte for each. */
9390 if (attr->s
9391 && attr->s[0] == Tag_CPU_arch
9392 && (attr->s[1] & 128) != 128
9393 && attr->s[2] == 0)
9394 return attr->s[1];
9395
9396 /* This tag is "safely ignorable", so don't complain if it looks funny. */
9397 return -1;
9398 }
9399
9400 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
9401 The tag is removed if ARCH is -1. */
9402
9403 static void
9404 set_secondary_compatible_arch (bfd *abfd, int arch)
9405 {
9406 obj_attribute *attr =
9407 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9408
9409 if (arch == -1)
9410 {
9411 attr->s = NULL;
9412 return;
9413 }
9414
9415 /* Note: the tag and its argument below are uleb128 values, though
9416 currently-defined values fit in one byte for each. */
9417 if (!attr->s)
9418 attr->s = bfd_alloc (abfd, 3);
9419 attr->s[0] = Tag_CPU_arch;
9420 attr->s[1] = arch;
9421 attr->s[2] = '\0';
9422 }
9423
9424 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
9425 into account. */
9426
9427 static int
9428 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
9429 int newtag, int secondary_compat)
9430 {
9431 #define T(X) TAG_CPU_ARCH_##X
9432 int tagl, tagh, result;
9433 const int v6t2[] =
9434 {
9435 T(V6T2), /* PRE_V4. */
9436 T(V6T2), /* V4. */
9437 T(V6T2), /* V4T. */
9438 T(V6T2), /* V5T. */
9439 T(V6T2), /* V5TE. */
9440 T(V6T2), /* V5TEJ. */
9441 T(V6T2), /* V6. */
9442 T(V7), /* V6KZ. */
9443 T(V6T2) /* V6T2. */
9444 };
9445 const int v6k[] =
9446 {
9447 T(V6K), /* PRE_V4. */
9448 T(V6K), /* V4. */
9449 T(V6K), /* V4T. */
9450 T(V6K), /* V5T. */
9451 T(V6K), /* V5TE. */
9452 T(V6K), /* V5TEJ. */
9453 T(V6K), /* V6. */
9454 T(V6KZ), /* V6KZ. */
9455 T(V7), /* V6T2. */
9456 T(V6K) /* V6K. */
9457 };
9458 const int v7[] =
9459 {
9460 T(V7), /* PRE_V4. */
9461 T(V7), /* V4. */
9462 T(V7), /* V4T. */
9463 T(V7), /* V5T. */
9464 T(V7), /* V5TE. */
9465 T(V7), /* V5TEJ. */
9466 T(V7), /* V6. */
9467 T(V7), /* V6KZ. */
9468 T(V7), /* V6T2. */
9469 T(V7), /* V6K. */
9470 T(V7) /* V7. */
9471 };
9472 const int v6_m[] =
9473 {
9474 -1, /* PRE_V4. */
9475 -1, /* V4. */
9476 T(V6K), /* V4T. */
9477 T(V6K), /* V5T. */
9478 T(V6K), /* V5TE. */
9479 T(V6K), /* V5TEJ. */
9480 T(V6K), /* V6. */
9481 T(V6KZ), /* V6KZ. */
9482 T(V7), /* V6T2. */
9483 T(V6K), /* V6K. */
9484 T(V7), /* V7. */
9485 T(V6_M) /* V6_M. */
9486 };
9487 const int v6s_m[] =
9488 {
9489 -1, /* PRE_V4. */
9490 -1, /* V4. */
9491 T(V6K), /* V4T. */
9492 T(V6K), /* V5T. */
9493 T(V6K), /* V5TE. */
9494 T(V6K), /* V5TEJ. */
9495 T(V6K), /* V6. */
9496 T(V6KZ), /* V6KZ. */
9497 T(V7), /* V6T2. */
9498 T(V6K), /* V6K. */
9499 T(V7), /* V7. */
9500 T(V6S_M), /* V6_M. */
9501 T(V6S_M) /* V6S_M. */
9502 };
9503 const int v4t_plus_v6_m[] =
9504 {
9505 -1, /* PRE_V4. */
9506 -1, /* V4. */
9507 T(V4T), /* V4T. */
9508 T(V5T), /* V5T. */
9509 T(V5TE), /* V5TE. */
9510 T(V5TEJ), /* V5TEJ. */
9511 T(V6), /* V6. */
9512 T(V6KZ), /* V6KZ. */
9513 T(V6T2), /* V6T2. */
9514 T(V6K), /* V6K. */
9515 T(V7), /* V7. */
9516 T(V6_M), /* V6_M. */
9517 T(V6S_M), /* V6S_M. */
9518 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
9519 };
9520 const int *comb[] =
9521 {
9522 v6t2,
9523 v6k,
9524 v7,
9525 v6_m,
9526 v6s_m,
9527 /* Pseudo-architecture. */
9528 v4t_plus_v6_m
9529 };
9530
9531 /* Check we've not got a higher architecture than we know about. */
9532
9533 if (oldtag >= MAX_TAG_CPU_ARCH || newtag >= MAX_TAG_CPU_ARCH)
9534 {
9535 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
9536 return -1;
9537 }
9538
9539 /* Override old tag if we have a Tag_also_compatible_with on the output. */
9540
9541 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
9542 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
9543 oldtag = T(V4T_PLUS_V6_M);
9544
9545 /* And override the new tag if we have a Tag_also_compatible_with on the
9546 input. */
9547
9548 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
9549 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
9550 newtag = T(V4T_PLUS_V6_M);
9551
9552 tagl = (oldtag < newtag) ? oldtag : newtag;
9553 result = tagh = (oldtag > newtag) ? oldtag : newtag;
9554
9555 /* Architectures before V6KZ add features monotonically. */
9556 if (tagh <= TAG_CPU_ARCH_V6KZ)
9557 return result;
9558
9559 result = comb[tagh - T(V6T2)][tagl];
9560
9561 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
9562 as the canonical version. */
9563 if (result == T(V4T_PLUS_V6_M))
9564 {
9565 result = T(V4T);
9566 *secondary_compat_out = T(V6_M);
9567 }
9568 else
9569 *secondary_compat_out = -1;
9570
9571 if (result == -1)
9572 {
9573 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
9574 ibfd, oldtag, newtag);
9575 return -1;
9576 }
9577
9578 return result;
9579 #undef T
9580 }
9581
9582 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
9583 are conflicting attributes. */
9584
9585 static bfd_boolean
9586 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
9587 {
9588 obj_attribute *in_attr;
9589 obj_attribute *out_attr;
9590 obj_attribute_list *in_list;
9591 obj_attribute_list *out_list;
9592 obj_attribute_list **out_listp;
9593 /* Some tags have 0 = don't care, 1 = strong requirement,
9594 2 = weak requirement. */
9595 static const int order_021[3] = {0, 2, 1};
9596 /* For use with Tag_VFP_arch. */
9597 static const int order_01243[5] = {0, 1, 2, 4, 3};
9598 int i;
9599 bfd_boolean result = TRUE;
9600
9601 /* Skip the linker stubs file. This preserves previous behavior
9602 of accepting unknown attributes in the first input file - but
9603 is that a bug? */
9604 if (ibfd->flags & BFD_LINKER_CREATED)
9605 return TRUE;
9606
9607 if (!elf_known_obj_attributes_proc (obfd)[0].i)
9608 {
9609 /* This is the first object. Copy the attributes. */
9610 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9611
9612 /* Use the Tag_null value to indicate the attributes have been
9613 initialized. */
9614 elf_known_obj_attributes_proc (obfd)[0].i = 1;
9615
9616 return TRUE;
9617 }
9618
9619 in_attr = elf_known_obj_attributes_proc (ibfd);
9620 out_attr = elf_known_obj_attributes_proc (obfd);
9621 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
9622 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
9623 {
9624 /* Ignore mismatches if the object doesn't use floating point. */
9625 if (out_attr[Tag_ABI_FP_number_model].i == 0)
9626 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
9627 else if (in_attr[Tag_ABI_FP_number_model].i != 0)
9628 {
9629 _bfd_error_handler
9630 (_("error: %B uses VFP register arguments, %B does not"),
9631 ibfd, obfd);
9632 result = FALSE;
9633 }
9634 }
9635
9636 for (i = 4; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
9637 {
9638 /* Merge this attribute with existing attributes. */
9639 switch (i)
9640 {
9641 case Tag_CPU_raw_name:
9642 case Tag_CPU_name:
9643 /* These are merged after Tag_CPU_arch. */
9644 break;
9645
9646 case Tag_ABI_optimization_goals:
9647 case Tag_ABI_FP_optimization_goals:
9648 /* Use the first value seen. */
9649 break;
9650
9651 case Tag_CPU_arch:
9652 {
9653 int secondary_compat = -1, secondary_compat_out = -1;
9654 unsigned int saved_out_attr = out_attr[i].i;
9655 static const char *name_table[] = {
9656 /* These aren't real CPU names, but we can't guess
9657 that from the architecture version alone. */
9658 "Pre v4",
9659 "ARM v4",
9660 "ARM v4T",
9661 "ARM v5T",
9662 "ARM v5TE",
9663 "ARM v5TEJ",
9664 "ARM v6",
9665 "ARM v6KZ",
9666 "ARM v6T2",
9667 "ARM v6K",
9668 "ARM v7",
9669 "ARM v6-M",
9670 "ARM v6S-M"
9671 };
9672
9673 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
9674 secondary_compat = get_secondary_compatible_arch (ibfd);
9675 secondary_compat_out = get_secondary_compatible_arch (obfd);
9676 out_attr[i].i = tag_cpu_arch_combine (ibfd, out_attr[i].i,
9677 &secondary_compat_out,
9678 in_attr[i].i,
9679 secondary_compat);
9680 set_secondary_compatible_arch (obfd, secondary_compat_out);
9681
9682 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
9683 if (out_attr[i].i == saved_out_attr)
9684 ; /* Leave the names alone. */
9685 else if (out_attr[i].i == in_attr[i].i)
9686 {
9687 /* The output architecture has been changed to match the
9688 input architecture. Use the input names. */
9689 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
9690 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
9691 : NULL;
9692 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
9693 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
9694 : NULL;
9695 }
9696 else
9697 {
9698 out_attr[Tag_CPU_name].s = NULL;
9699 out_attr[Tag_CPU_raw_name].s = NULL;
9700 }
9701
9702 /* If we still don't have a value for Tag_CPU_name,
9703 make one up now. Tag_CPU_raw_name remains blank. */
9704 if (out_attr[Tag_CPU_name].s == NULL
9705 && out_attr[i].i < ARRAY_SIZE (name_table))
9706 out_attr[Tag_CPU_name].s =
9707 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
9708 }
9709 break;
9710
9711 case Tag_ARM_ISA_use:
9712 case Tag_THUMB_ISA_use:
9713 case Tag_WMMX_arch:
9714 case Tag_Advanced_SIMD_arch:
9715 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
9716 case Tag_ABI_FP_rounding:
9717 case Tag_ABI_FP_exceptions:
9718 case Tag_ABI_FP_user_exceptions:
9719 case Tag_ABI_FP_number_model:
9720 case Tag_VFP_HP_extension:
9721 case Tag_CPU_unaligned_access:
9722 case Tag_T2EE_use:
9723 case Tag_Virtualization_use:
9724 case Tag_MPextension_use:
9725 /* Use the largest value specified. */
9726 if (in_attr[i].i > out_attr[i].i)
9727 out_attr[i].i = in_attr[i].i;
9728 break;
9729
9730 case Tag_ABI_align8_preserved:
9731 case Tag_ABI_PCS_RO_data:
9732 /* Use the smallest value specified. */
9733 if (in_attr[i].i < out_attr[i].i)
9734 out_attr[i].i = in_attr[i].i;
9735 break;
9736
9737 case Tag_ABI_align8_needed:
9738 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
9739 && (in_attr[Tag_ABI_align8_preserved].i == 0
9740 || out_attr[Tag_ABI_align8_preserved].i == 0))
9741 {
9742 /* This error message should be enabled once all non-conformant
9743 binaries in the toolchain have had the attributes set
9744 properly.
9745 _bfd_error_handler
9746 (_("error: %B: 8-byte data alignment conflicts with %B"),
9747 obfd, ibfd);
9748 result = FALSE; */
9749 }
9750 /* Fall through. */
9751 case Tag_ABI_FP_denormal:
9752 case Tag_ABI_PCS_GOT_use:
9753 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
9754 value if greater than 2 (for future-proofing). */
9755 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
9756 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
9757 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
9758 out_attr[i].i = in_attr[i].i;
9759 break;
9760
9761
9762 case Tag_CPU_arch_profile:
9763 if (out_attr[i].i != in_attr[i].i)
9764 {
9765 /* 0 will merge with anything.
9766 'A' and 'S' merge to 'A'.
9767 'R' and 'S' merge to 'R'.
9768 'M' and 'A|R|S' is an error. */
9769 if (out_attr[i].i == 0
9770 || (out_attr[i].i == 'S'
9771 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
9772 out_attr[i].i = in_attr[i].i;
9773 else if (in_attr[i].i == 0
9774 || (in_attr[i].i == 'S'
9775 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
9776 ; /* Do nothing. */
9777 else
9778 {
9779 _bfd_error_handler
9780 (_("error: %B: Conflicting architecture profiles %c/%c"),
9781 ibfd,
9782 in_attr[i].i ? in_attr[i].i : '0',
9783 out_attr[i].i ? out_attr[i].i : '0');
9784 result = FALSE;
9785 }
9786 }
9787 break;
9788 case Tag_VFP_arch:
9789 /* Use the "greatest" from the sequence 0, 1, 2, 4, 3, or the
9790 largest value if greater than 4 (for future-proofing). */
9791 if ((in_attr[i].i > 4 && in_attr[i].i > out_attr[i].i)
9792 || (in_attr[i].i <= 4 && out_attr[i].i <= 4
9793 && order_01243[in_attr[i].i] > order_01243[out_attr[i].i]))
9794 out_attr[i].i = in_attr[i].i;
9795 break;
9796 case Tag_PCS_config:
9797 if (out_attr[i].i == 0)
9798 out_attr[i].i = in_attr[i].i;
9799 else if (in_attr[i].i != 0 && out_attr[i].i != 0)
9800 {
9801 /* It's sometimes ok to mix different configs, so this is only
9802 a warning. */
9803 _bfd_error_handler
9804 (_("Warning: %B: Conflicting platform configuration"), ibfd);
9805 }
9806 break;
9807 case Tag_ABI_PCS_R9_use:
9808 if (in_attr[i].i != out_attr[i].i
9809 && out_attr[i].i != AEABI_R9_unused
9810 && in_attr[i].i != AEABI_R9_unused)
9811 {
9812 _bfd_error_handler
9813 (_("error: %B: Conflicting use of R9"), ibfd);
9814 result = FALSE;
9815 }
9816 if (out_attr[i].i == AEABI_R9_unused)
9817 out_attr[i].i = in_attr[i].i;
9818 break;
9819 case Tag_ABI_PCS_RW_data:
9820 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
9821 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
9822 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
9823 {
9824 _bfd_error_handler
9825 (_("error: %B: SB relative addressing conflicts with use of R9"),
9826 ibfd);
9827 result = FALSE;
9828 }
9829 /* Use the smallest value specified. */
9830 if (in_attr[i].i < out_attr[i].i)
9831 out_attr[i].i = in_attr[i].i;
9832 break;
9833 case Tag_ABI_PCS_wchar_t:
9834 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
9835 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
9836 {
9837 _bfd_error_handler
9838 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
9839 ibfd, in_attr[i].i, out_attr[i].i);
9840 }
9841 else if (in_attr[i].i && !out_attr[i].i)
9842 out_attr[i].i = in_attr[i].i;
9843 break;
9844 case Tag_ABI_enum_size:
9845 if (in_attr[i].i != AEABI_enum_unused)
9846 {
9847 if (out_attr[i].i == AEABI_enum_unused
9848 || out_attr[i].i == AEABI_enum_forced_wide)
9849 {
9850 /* The existing object is compatible with anything.
9851 Use whatever requirements the new object has. */
9852 out_attr[i].i = in_attr[i].i;
9853 }
9854 else if (in_attr[i].i != AEABI_enum_forced_wide
9855 && out_attr[i].i != in_attr[i].i
9856 && !elf_arm_tdata (obfd)->no_enum_size_warning)
9857 {
9858 static const char *aeabi_enum_names[] =
9859 { "", "variable-size", "32-bit", "" };
9860 const char *in_name =
9861 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
9862 ? aeabi_enum_names[in_attr[i].i]
9863 : "<unknown>";
9864 const char *out_name =
9865 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
9866 ? aeabi_enum_names[out_attr[i].i]
9867 : "<unknown>";
9868 _bfd_error_handler
9869 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
9870 ibfd, in_name, out_name);
9871 }
9872 }
9873 break;
9874 case Tag_ABI_VFP_args:
9875 /* Aready done. */
9876 break;
9877 case Tag_ABI_WMMX_args:
9878 if (in_attr[i].i != out_attr[i].i)
9879 {
9880 _bfd_error_handler
9881 (_("error: %B uses iWMMXt register arguments, %B does not"),
9882 ibfd, obfd);
9883 result = FALSE;
9884 }
9885 break;
9886 case Tag_compatibility:
9887 /* Merged in target-independent code. */
9888 break;
9889 case Tag_ABI_HardFP_use:
9890 /* 1 (SP) and 2 (DP) conflict, so combine to 3 (SP & DP). */
9891 if ((in_attr[i].i == 1 && out_attr[i].i == 2)
9892 || (in_attr[i].i == 2 && out_attr[i].i == 1))
9893 out_attr[i].i = 3;
9894 else if (in_attr[i].i > out_attr[i].i)
9895 out_attr[i].i = in_attr[i].i;
9896 break;
9897 case Tag_ABI_FP_16bit_format:
9898 if (in_attr[i].i != 0 && out_attr[i].i != 0)
9899 {
9900 if (in_attr[i].i != out_attr[i].i)
9901 {
9902 _bfd_error_handler
9903 (_("error: fp16 format mismatch between %B and %B"),
9904 ibfd, obfd);
9905 result = FALSE;
9906 }
9907 }
9908 if (in_attr[i].i != 0)
9909 out_attr[i].i = in_attr[i].i;
9910 break;
9911
9912 case Tag_nodefaults:
9913 /* This tag is set if it exists, but the value is unused (and is
9914 typically zero). We don't actually need to do anything here -
9915 the merge happens automatically when the type flags are merged
9916 below. */
9917 break;
9918 case Tag_also_compatible_with:
9919 /* Already done in Tag_CPU_arch. */
9920 break;
9921 case Tag_conformance:
9922 /* Keep the attribute if it matches. Throw it away otherwise.
9923 No attribute means no claim to conform. */
9924 if (!in_attr[i].s || !out_attr[i].s
9925 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
9926 out_attr[i].s = NULL;
9927 break;
9928
9929 default:
9930 {
9931 bfd *err_bfd = NULL;
9932
9933 /* The "known_obj_attributes" table does contain some undefined
9934 attributes. Ensure that there are unused. */
9935 if (out_attr[i].i != 0 || out_attr[i].s != NULL)
9936 err_bfd = obfd;
9937 else if (in_attr[i].i != 0 || in_attr[i].s != NULL)
9938 err_bfd = ibfd;
9939
9940 if (err_bfd != NULL)
9941 {
9942 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
9943 if ((i & 127) < 64)
9944 {
9945 _bfd_error_handler
9946 (_("%B: Unknown mandatory EABI object attribute %d"),
9947 err_bfd, i);
9948 bfd_set_error (bfd_error_bad_value);
9949 result = FALSE;
9950 }
9951 else
9952 {
9953 _bfd_error_handler
9954 (_("Warning: %B: Unknown EABI object attribute %d"),
9955 err_bfd, i);
9956 }
9957 }
9958
9959 /* Only pass on attributes that match in both inputs. */
9960 if (in_attr[i].i != out_attr[i].i
9961 || in_attr[i].s != out_attr[i].s
9962 || (in_attr[i].s != NULL && out_attr[i].s != NULL
9963 && strcmp (in_attr[i].s, out_attr[i].s) != 0))
9964 {
9965 out_attr[i].i = 0;
9966 out_attr[i].s = NULL;
9967 }
9968 }
9969 }
9970
9971 /* If out_attr was copied from in_attr then it won't have a type yet. */
9972 if (in_attr[i].type && !out_attr[i].type)
9973 out_attr[i].type = in_attr[i].type;
9974 }
9975
9976 /* Merge Tag_compatibility attributes and any common GNU ones. */
9977 _bfd_elf_merge_object_attributes (ibfd, obfd);
9978
9979 /* Check for any attributes not known on ARM. */
9980 in_list = elf_other_obj_attributes_proc (ibfd);
9981 out_listp = &elf_other_obj_attributes_proc (obfd);
9982 out_list = *out_listp;
9983
9984 for (; in_list || out_list; )
9985 {
9986 bfd *err_bfd = NULL;
9987 int err_tag = 0;
9988
9989 /* The tags for each list are in numerical order. */
9990 /* If the tags are equal, then merge. */
9991 if (out_list && (!in_list || in_list->tag > out_list->tag))
9992 {
9993 /* This attribute only exists in obfd. We can't merge, and we don't
9994 know what the tag means, so delete it. */
9995 err_bfd = obfd;
9996 err_tag = out_list->tag;
9997 *out_listp = out_list->next;
9998 out_list = *out_listp;
9999 }
10000 else if (in_list && (!out_list || in_list->tag < out_list->tag))
10001 {
10002 /* This attribute only exists in ibfd. We can't merge, and we don't
10003 know what the tag means, so ignore it. */
10004 err_bfd = ibfd;
10005 err_tag = in_list->tag;
10006 in_list = in_list->next;
10007 }
10008 else /* The tags are equal. */
10009 {
10010 /* As present, all attributes in the list are unknown, and
10011 therefore can't be merged meaningfully. */
10012 err_bfd = obfd;
10013 err_tag = out_list->tag;
10014
10015 /* Only pass on attributes that match in both inputs. */
10016 if (in_list->attr.i != out_list->attr.i
10017 || in_list->attr.s != out_list->attr.s
10018 || (in_list->attr.s && out_list->attr.s
10019 && strcmp (in_list->attr.s, out_list->attr.s) != 0))
10020 {
10021 /* No match. Delete the attribute. */
10022 *out_listp = out_list->next;
10023 out_list = *out_listp;
10024 }
10025 else
10026 {
10027 /* Matched. Keep the attribute and move to the next. */
10028 out_list = out_list->next;
10029 in_list = in_list->next;
10030 }
10031 }
10032
10033 if (err_bfd)
10034 {
10035 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
10036 if ((err_tag & 127) < 64)
10037 {
10038 _bfd_error_handler
10039 (_("%B: Unknown mandatory EABI object attribute %d"),
10040 err_bfd, err_tag);
10041 bfd_set_error (bfd_error_bad_value);
10042 result = FALSE;
10043 }
10044 else
10045 {
10046 _bfd_error_handler
10047 (_("Warning: %B: Unknown EABI object attribute %d"),
10048 err_bfd, err_tag);
10049 }
10050 }
10051 }
10052 return result;
10053 }
10054
10055
10056 /* Return TRUE if the two EABI versions are incompatible. */
10057
10058 static bfd_boolean
10059 elf32_arm_versions_compatible (unsigned iver, unsigned over)
10060 {
10061 /* v4 and v5 are the same spec before and after it was released,
10062 so allow mixing them. */
10063 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
10064 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
10065 return TRUE;
10066
10067 return (iver == over);
10068 }
10069
10070 /* Merge backend specific data from an object file to the output
10071 object file when linking. */
10072
10073 static bfd_boolean
10074 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
10075 {
10076 flagword out_flags;
10077 flagword in_flags;
10078 bfd_boolean flags_compatible = TRUE;
10079 asection *sec;
10080
10081 /* Check if we have the same endianess. */
10082 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
10083 return FALSE;
10084
10085 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
10086 return TRUE;
10087
10088 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
10089 return FALSE;
10090
10091 /* The input BFD must have had its flags initialised. */
10092 /* The following seems bogus to me -- The flags are initialized in
10093 the assembler but I don't think an elf_flags_init field is
10094 written into the object. */
10095 /* BFD_ASSERT (elf_flags_init (ibfd)); */
10096
10097 in_flags = elf_elfheader (ibfd)->e_flags;
10098 out_flags = elf_elfheader (obfd)->e_flags;
10099
10100 /* In theory there is no reason why we couldn't handle this. However
10101 in practice it isn't even close to working and there is no real
10102 reason to want it. */
10103 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
10104 && !(ibfd->flags & DYNAMIC)
10105 && (in_flags & EF_ARM_BE8))
10106 {
10107 _bfd_error_handler (_("error: %B is already in final BE8 format"),
10108 ibfd);
10109 return FALSE;
10110 }
10111
10112 if (!elf_flags_init (obfd))
10113 {
10114 /* If the input is the default architecture and had the default
10115 flags then do not bother setting the flags for the output
10116 architecture, instead allow future merges to do this. If no
10117 future merges ever set these flags then they will retain their
10118 uninitialised values, which surprise surprise, correspond
10119 to the default values. */
10120 if (bfd_get_arch_info (ibfd)->the_default
10121 && elf_elfheader (ibfd)->e_flags == 0)
10122 return TRUE;
10123
10124 elf_flags_init (obfd) = TRUE;
10125 elf_elfheader (obfd)->e_flags = in_flags;
10126
10127 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
10128 && bfd_get_arch_info (obfd)->the_default)
10129 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
10130
10131 return TRUE;
10132 }
10133
10134 /* Determine what should happen if the input ARM architecture
10135 does not match the output ARM architecture. */
10136 if (! bfd_arm_merge_machines (ibfd, obfd))
10137 return FALSE;
10138
10139 /* Identical flags must be compatible. */
10140 if (in_flags == out_flags)
10141 return TRUE;
10142
10143 /* Check to see if the input BFD actually contains any sections. If
10144 not, its flags may not have been initialised either, but it
10145 cannot actually cause any incompatiblity. Do not short-circuit
10146 dynamic objects; their section list may be emptied by
10147 elf_link_add_object_symbols.
10148
10149 Also check to see if there are no code sections in the input.
10150 In this case there is no need to check for code specific flags.
10151 XXX - do we need to worry about floating-point format compatability
10152 in data sections ? */
10153 if (!(ibfd->flags & DYNAMIC))
10154 {
10155 bfd_boolean null_input_bfd = TRUE;
10156 bfd_boolean only_data_sections = TRUE;
10157
10158 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
10159 {
10160 /* Ignore synthetic glue sections. */
10161 if (strcmp (sec->name, ".glue_7")
10162 && strcmp (sec->name, ".glue_7t"))
10163 {
10164 if ((bfd_get_section_flags (ibfd, sec)
10165 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
10166 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
10167 only_data_sections = FALSE;
10168
10169 null_input_bfd = FALSE;
10170 break;
10171 }
10172 }
10173
10174 if (null_input_bfd || only_data_sections)
10175 return TRUE;
10176 }
10177
10178 /* Complain about various flag mismatches. */
10179 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
10180 EF_ARM_EABI_VERSION (out_flags)))
10181 {
10182 _bfd_error_handler
10183 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
10184 ibfd, obfd,
10185 (in_flags & EF_ARM_EABIMASK) >> 24,
10186 (out_flags & EF_ARM_EABIMASK) >> 24);
10187 return FALSE;
10188 }
10189
10190 /* Not sure what needs to be checked for EABI versions >= 1. */
10191 /* VxWorks libraries do not use these flags. */
10192 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
10193 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
10194 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
10195 {
10196 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
10197 {
10198 _bfd_error_handler
10199 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
10200 ibfd, obfd,
10201 in_flags & EF_ARM_APCS_26 ? 26 : 32,
10202 out_flags & EF_ARM_APCS_26 ? 26 : 32);
10203 flags_compatible = FALSE;
10204 }
10205
10206 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
10207 {
10208 if (in_flags & EF_ARM_APCS_FLOAT)
10209 _bfd_error_handler
10210 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
10211 ibfd, obfd);
10212 else
10213 _bfd_error_handler
10214 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
10215 ibfd, obfd);
10216
10217 flags_compatible = FALSE;
10218 }
10219
10220 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
10221 {
10222 if (in_flags & EF_ARM_VFP_FLOAT)
10223 _bfd_error_handler
10224 (_("error: %B uses VFP instructions, whereas %B does not"),
10225 ibfd, obfd);
10226 else
10227 _bfd_error_handler
10228 (_("error: %B uses FPA instructions, whereas %B does not"),
10229 ibfd, obfd);
10230
10231 flags_compatible = FALSE;
10232 }
10233
10234 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
10235 {
10236 if (in_flags & EF_ARM_MAVERICK_FLOAT)
10237 _bfd_error_handler
10238 (_("error: %B uses Maverick instructions, whereas %B does not"),
10239 ibfd, obfd);
10240 else
10241 _bfd_error_handler
10242 (_("error: %B does not use Maverick instructions, whereas %B does"),
10243 ibfd, obfd);
10244
10245 flags_compatible = FALSE;
10246 }
10247
10248 #ifdef EF_ARM_SOFT_FLOAT
10249 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
10250 {
10251 /* We can allow interworking between code that is VFP format
10252 layout, and uses either soft float or integer regs for
10253 passing floating point arguments and results. We already
10254 know that the APCS_FLOAT flags match; similarly for VFP
10255 flags. */
10256 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
10257 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
10258 {
10259 if (in_flags & EF_ARM_SOFT_FLOAT)
10260 _bfd_error_handler
10261 (_("error: %B uses software FP, whereas %B uses hardware FP"),
10262 ibfd, obfd);
10263 else
10264 _bfd_error_handler
10265 (_("error: %B uses hardware FP, whereas %B uses software FP"),
10266 ibfd, obfd);
10267
10268 flags_compatible = FALSE;
10269 }
10270 }
10271 #endif
10272
10273 /* Interworking mismatch is only a warning. */
10274 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
10275 {
10276 if (in_flags & EF_ARM_INTERWORK)
10277 {
10278 _bfd_error_handler
10279 (_("Warning: %B supports interworking, whereas %B does not"),
10280 ibfd, obfd);
10281 }
10282 else
10283 {
10284 _bfd_error_handler
10285 (_("Warning: %B does not support interworking, whereas %B does"),
10286 ibfd, obfd);
10287 }
10288 }
10289 }
10290
10291 return flags_compatible;
10292 }
10293
10294 /* Display the flags field. */
10295
10296 static bfd_boolean
10297 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
10298 {
10299 FILE * file = (FILE *) ptr;
10300 unsigned long flags;
10301
10302 BFD_ASSERT (abfd != NULL && ptr != NULL);
10303
10304 /* Print normal ELF private data. */
10305 _bfd_elf_print_private_bfd_data (abfd, ptr);
10306
10307 flags = elf_elfheader (abfd)->e_flags;
10308 /* Ignore init flag - it may not be set, despite the flags field
10309 containing valid data. */
10310
10311 /* xgettext:c-format */
10312 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
10313
10314 switch (EF_ARM_EABI_VERSION (flags))
10315 {
10316 case EF_ARM_EABI_UNKNOWN:
10317 /* The following flag bits are GNU extensions and not part of the
10318 official ARM ELF extended ABI. Hence they are only decoded if
10319 the EABI version is not set. */
10320 if (flags & EF_ARM_INTERWORK)
10321 fprintf (file, _(" [interworking enabled]"));
10322
10323 if (flags & EF_ARM_APCS_26)
10324 fprintf (file, " [APCS-26]");
10325 else
10326 fprintf (file, " [APCS-32]");
10327
10328 if (flags & EF_ARM_VFP_FLOAT)
10329 fprintf (file, _(" [VFP float format]"));
10330 else if (flags & EF_ARM_MAVERICK_FLOAT)
10331 fprintf (file, _(" [Maverick float format]"));
10332 else
10333 fprintf (file, _(" [FPA float format]"));
10334
10335 if (flags & EF_ARM_APCS_FLOAT)
10336 fprintf (file, _(" [floats passed in float registers]"));
10337
10338 if (flags & EF_ARM_PIC)
10339 fprintf (file, _(" [position independent]"));
10340
10341 if (flags & EF_ARM_NEW_ABI)
10342 fprintf (file, _(" [new ABI]"));
10343
10344 if (flags & EF_ARM_OLD_ABI)
10345 fprintf (file, _(" [old ABI]"));
10346
10347 if (flags & EF_ARM_SOFT_FLOAT)
10348 fprintf (file, _(" [software FP]"));
10349
10350 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
10351 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
10352 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
10353 | EF_ARM_MAVERICK_FLOAT);
10354 break;
10355
10356 case EF_ARM_EABI_VER1:
10357 fprintf (file, _(" [Version1 EABI]"));
10358
10359 if (flags & EF_ARM_SYMSARESORTED)
10360 fprintf (file, _(" [sorted symbol table]"));
10361 else
10362 fprintf (file, _(" [unsorted symbol table]"));
10363
10364 flags &= ~ EF_ARM_SYMSARESORTED;
10365 break;
10366
10367 case EF_ARM_EABI_VER2:
10368 fprintf (file, _(" [Version2 EABI]"));
10369
10370 if (flags & EF_ARM_SYMSARESORTED)
10371 fprintf (file, _(" [sorted symbol table]"));
10372 else
10373 fprintf (file, _(" [unsorted symbol table]"));
10374
10375 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
10376 fprintf (file, _(" [dynamic symbols use segment index]"));
10377
10378 if (flags & EF_ARM_MAPSYMSFIRST)
10379 fprintf (file, _(" [mapping symbols precede others]"));
10380
10381 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
10382 | EF_ARM_MAPSYMSFIRST);
10383 break;
10384
10385 case EF_ARM_EABI_VER3:
10386 fprintf (file, _(" [Version3 EABI]"));
10387 break;
10388
10389 case EF_ARM_EABI_VER4:
10390 fprintf (file, _(" [Version4 EABI]"));
10391 goto eabi;
10392
10393 case EF_ARM_EABI_VER5:
10394 fprintf (file, _(" [Version5 EABI]"));
10395 eabi:
10396 if (flags & EF_ARM_BE8)
10397 fprintf (file, _(" [BE8]"));
10398
10399 if (flags & EF_ARM_LE8)
10400 fprintf (file, _(" [LE8]"));
10401
10402 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
10403 break;
10404
10405 default:
10406 fprintf (file, _(" <EABI version unrecognised>"));
10407 break;
10408 }
10409
10410 flags &= ~ EF_ARM_EABIMASK;
10411
10412 if (flags & EF_ARM_RELEXEC)
10413 fprintf (file, _(" [relocatable executable]"));
10414
10415 if (flags & EF_ARM_HASENTRY)
10416 fprintf (file, _(" [has entry point]"));
10417
10418 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_HASENTRY);
10419
10420 if (flags)
10421 fprintf (file, _("<Unrecognised flag bits set>"));
10422
10423 fputc ('\n', file);
10424
10425 return TRUE;
10426 }
10427
10428 static int
10429 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
10430 {
10431 switch (ELF_ST_TYPE (elf_sym->st_info))
10432 {
10433 case STT_ARM_TFUNC:
10434 return ELF_ST_TYPE (elf_sym->st_info);
10435
10436 case STT_ARM_16BIT:
10437 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
10438 This allows us to distinguish between data used by Thumb instructions
10439 and non-data (which is probably code) inside Thumb regions of an
10440 executable. */
10441 if (type != STT_OBJECT && type != STT_TLS)
10442 return ELF_ST_TYPE (elf_sym->st_info);
10443 break;
10444
10445 default:
10446 break;
10447 }
10448
10449 return type;
10450 }
10451
10452 static asection *
10453 elf32_arm_gc_mark_hook (asection *sec,
10454 struct bfd_link_info *info,
10455 Elf_Internal_Rela *rel,
10456 struct elf_link_hash_entry *h,
10457 Elf_Internal_Sym *sym)
10458 {
10459 if (h != NULL)
10460 switch (ELF32_R_TYPE (rel->r_info))
10461 {
10462 case R_ARM_GNU_VTINHERIT:
10463 case R_ARM_GNU_VTENTRY:
10464 return NULL;
10465 }
10466
10467 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
10468 }
10469
10470 /* Update the got entry reference counts for the section being removed. */
10471
10472 static bfd_boolean
10473 elf32_arm_gc_sweep_hook (bfd * abfd,
10474 struct bfd_link_info * info,
10475 asection * sec,
10476 const Elf_Internal_Rela * relocs)
10477 {
10478 Elf_Internal_Shdr *symtab_hdr;
10479 struct elf_link_hash_entry **sym_hashes;
10480 bfd_signed_vma *local_got_refcounts;
10481 const Elf_Internal_Rela *rel, *relend;
10482 struct elf32_arm_link_hash_table * globals;
10483
10484 if (info->relocatable)
10485 return TRUE;
10486
10487 globals = elf32_arm_hash_table (info);
10488
10489 elf_section_data (sec)->local_dynrel = NULL;
10490
10491 symtab_hdr = & elf_symtab_hdr (abfd);
10492 sym_hashes = elf_sym_hashes (abfd);
10493 local_got_refcounts = elf_local_got_refcounts (abfd);
10494
10495 check_use_blx (globals);
10496
10497 relend = relocs + sec->reloc_count;
10498 for (rel = relocs; rel < relend; rel++)
10499 {
10500 unsigned long r_symndx;
10501 struct elf_link_hash_entry *h = NULL;
10502 int r_type;
10503
10504 r_symndx = ELF32_R_SYM (rel->r_info);
10505 if (r_symndx >= symtab_hdr->sh_info)
10506 {
10507 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10508 while (h->root.type == bfd_link_hash_indirect
10509 || h->root.type == bfd_link_hash_warning)
10510 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10511 }
10512
10513 r_type = ELF32_R_TYPE (rel->r_info);
10514 r_type = arm_real_reloc_type (globals, r_type);
10515 switch (r_type)
10516 {
10517 case R_ARM_GOT32:
10518 case R_ARM_GOT_PREL:
10519 case R_ARM_TLS_GD32:
10520 case R_ARM_TLS_IE32:
10521 if (h != NULL)
10522 {
10523 if (h->got.refcount > 0)
10524 h->got.refcount -= 1;
10525 }
10526 else if (local_got_refcounts != NULL)
10527 {
10528 if (local_got_refcounts[r_symndx] > 0)
10529 local_got_refcounts[r_symndx] -= 1;
10530 }
10531 break;
10532
10533 case R_ARM_TLS_LDM32:
10534 elf32_arm_hash_table (info)->tls_ldm_got.refcount -= 1;
10535 break;
10536
10537 case R_ARM_ABS32:
10538 case R_ARM_ABS32_NOI:
10539 case R_ARM_REL32:
10540 case R_ARM_REL32_NOI:
10541 case R_ARM_PC24:
10542 case R_ARM_PLT32:
10543 case R_ARM_CALL:
10544 case R_ARM_JUMP24:
10545 case R_ARM_PREL31:
10546 case R_ARM_THM_CALL:
10547 case R_ARM_THM_JUMP24:
10548 case R_ARM_THM_JUMP19:
10549 case R_ARM_MOVW_ABS_NC:
10550 case R_ARM_MOVT_ABS:
10551 case R_ARM_MOVW_PREL_NC:
10552 case R_ARM_MOVT_PREL:
10553 case R_ARM_THM_MOVW_ABS_NC:
10554 case R_ARM_THM_MOVT_ABS:
10555 case R_ARM_THM_MOVW_PREL_NC:
10556 case R_ARM_THM_MOVT_PREL:
10557 /* Should the interworking branches be here also? */
10558
10559 if (h != NULL)
10560 {
10561 struct elf32_arm_link_hash_entry *eh;
10562 struct elf32_arm_relocs_copied **pp;
10563 struct elf32_arm_relocs_copied *p;
10564
10565 eh = (struct elf32_arm_link_hash_entry *) h;
10566
10567 if (h->plt.refcount > 0)
10568 {
10569 h->plt.refcount -= 1;
10570 if (r_type == R_ARM_THM_CALL)
10571 eh->plt_maybe_thumb_refcount--;
10572
10573 if (r_type == R_ARM_THM_JUMP24
10574 || r_type == R_ARM_THM_JUMP19)
10575 eh->plt_thumb_refcount--;
10576 }
10577
10578 if (r_type == R_ARM_ABS32
10579 || r_type == R_ARM_REL32
10580 || r_type == R_ARM_ABS32_NOI
10581 || r_type == R_ARM_REL32_NOI)
10582 {
10583 for (pp = &eh->relocs_copied; (p = *pp) != NULL;
10584 pp = &p->next)
10585 if (p->section == sec)
10586 {
10587 p->count -= 1;
10588 if (ELF32_R_TYPE (rel->r_info) == R_ARM_REL32
10589 || ELF32_R_TYPE (rel->r_info) == R_ARM_REL32_NOI)
10590 p->pc_count -= 1;
10591 if (p->count == 0)
10592 *pp = p->next;
10593 break;
10594 }
10595 }
10596 }
10597 break;
10598
10599 default:
10600 break;
10601 }
10602 }
10603
10604 return TRUE;
10605 }
10606
10607 /* Look through the relocs for a section during the first phase. */
10608
10609 static bfd_boolean
10610 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
10611 asection *sec, const Elf_Internal_Rela *relocs)
10612 {
10613 Elf_Internal_Shdr *symtab_hdr;
10614 struct elf_link_hash_entry **sym_hashes;
10615 const Elf_Internal_Rela *rel;
10616 const Elf_Internal_Rela *rel_end;
10617 bfd *dynobj;
10618 asection *sreloc;
10619 bfd_vma *local_got_offsets;
10620 struct elf32_arm_link_hash_table *htab;
10621 bfd_boolean needs_plt;
10622 unsigned long nsyms;
10623
10624 if (info->relocatable)
10625 return TRUE;
10626
10627 BFD_ASSERT (is_arm_elf (abfd));
10628
10629 htab = elf32_arm_hash_table (info);
10630 sreloc = NULL;
10631
10632 /* Create dynamic sections for relocatable executables so that we can
10633 copy relocations. */
10634 if (htab->root.is_relocatable_executable
10635 && ! htab->root.dynamic_sections_created)
10636 {
10637 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
10638 return FALSE;
10639 }
10640
10641 dynobj = elf_hash_table (info)->dynobj;
10642 local_got_offsets = elf_local_got_offsets (abfd);
10643
10644 symtab_hdr = & elf_symtab_hdr (abfd);
10645 sym_hashes = elf_sym_hashes (abfd);
10646 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
10647
10648 rel_end = relocs + sec->reloc_count;
10649 for (rel = relocs; rel < rel_end; rel++)
10650 {
10651 struct elf_link_hash_entry *h;
10652 struct elf32_arm_link_hash_entry *eh;
10653 unsigned long r_symndx;
10654 int r_type;
10655
10656 r_symndx = ELF32_R_SYM (rel->r_info);
10657 r_type = ELF32_R_TYPE (rel->r_info);
10658 r_type = arm_real_reloc_type (htab, r_type);
10659
10660 if (r_symndx >= nsyms
10661 /* PR 9934: It is possible to have relocations that do not
10662 refer to symbols, thus it is also possible to have an
10663 object file containing relocations but no symbol table. */
10664 && (r_symndx > 0 || nsyms > 0))
10665 {
10666 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
10667 r_symndx);
10668 return FALSE;
10669 }
10670
10671 if (nsyms == 0 || r_symndx < symtab_hdr->sh_info)
10672 h = NULL;
10673 else
10674 {
10675 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10676 while (h->root.type == bfd_link_hash_indirect
10677 || h->root.type == bfd_link_hash_warning)
10678 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10679 }
10680
10681 eh = (struct elf32_arm_link_hash_entry *) h;
10682
10683 switch (r_type)
10684 {
10685 case R_ARM_GOT32:
10686 case R_ARM_GOT_PREL:
10687 case R_ARM_TLS_GD32:
10688 case R_ARM_TLS_IE32:
10689 /* This symbol requires a global offset table entry. */
10690 {
10691 int tls_type, old_tls_type;
10692
10693 switch (r_type)
10694 {
10695 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
10696 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
10697 default: tls_type = GOT_NORMAL; break;
10698 }
10699
10700 if (h != NULL)
10701 {
10702 h->got.refcount++;
10703 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
10704 }
10705 else
10706 {
10707 bfd_signed_vma *local_got_refcounts;
10708
10709 /* This is a global offset table entry for a local symbol. */
10710 local_got_refcounts = elf_local_got_refcounts (abfd);
10711 if (local_got_refcounts == NULL)
10712 {
10713 bfd_size_type size;
10714
10715 size = symtab_hdr->sh_info;
10716 size *= (sizeof (bfd_signed_vma) + sizeof (char));
10717 local_got_refcounts = bfd_zalloc (abfd, size);
10718 if (local_got_refcounts == NULL)
10719 return FALSE;
10720 elf_local_got_refcounts (abfd) = local_got_refcounts;
10721 elf32_arm_local_got_tls_type (abfd)
10722 = (char *) (local_got_refcounts + symtab_hdr->sh_info);
10723 }
10724 local_got_refcounts[r_symndx] += 1;
10725 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
10726 }
10727
10728 /* We will already have issued an error message if there is a
10729 TLS / non-TLS mismatch, based on the symbol type. We don't
10730 support any linker relaxations. So just combine any TLS
10731 types needed. */
10732 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
10733 && tls_type != GOT_NORMAL)
10734 tls_type |= old_tls_type;
10735
10736 if (old_tls_type != tls_type)
10737 {
10738 if (h != NULL)
10739 elf32_arm_hash_entry (h)->tls_type = tls_type;
10740 else
10741 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
10742 }
10743 }
10744 /* Fall through. */
10745
10746 case R_ARM_TLS_LDM32:
10747 if (r_type == R_ARM_TLS_LDM32)
10748 htab->tls_ldm_got.refcount++;
10749 /* Fall through. */
10750
10751 case R_ARM_GOTOFF32:
10752 case R_ARM_GOTPC:
10753 if (htab->sgot == NULL)
10754 {
10755 if (htab->root.dynobj == NULL)
10756 htab->root.dynobj = abfd;
10757 if (!create_got_section (htab->root.dynobj, info))
10758 return FALSE;
10759 }
10760 break;
10761
10762 case R_ARM_ABS12:
10763 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
10764 ldr __GOTT_INDEX__ offsets. */
10765 if (!htab->vxworks_p)
10766 break;
10767 /* Fall through. */
10768
10769 case R_ARM_PC24:
10770 case R_ARM_PLT32:
10771 case R_ARM_CALL:
10772 case R_ARM_JUMP24:
10773 case R_ARM_PREL31:
10774 case R_ARM_THM_CALL:
10775 case R_ARM_THM_JUMP24:
10776 case R_ARM_THM_JUMP19:
10777 needs_plt = 1;
10778 goto normal_reloc;
10779
10780 case R_ARM_MOVW_ABS_NC:
10781 case R_ARM_MOVT_ABS:
10782 case R_ARM_THM_MOVW_ABS_NC:
10783 case R_ARM_THM_MOVT_ABS:
10784 if (info->shared)
10785 {
10786 (*_bfd_error_handler)
10787 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
10788 abfd, elf32_arm_howto_table_1[r_type].name,
10789 (h) ? h->root.root.string : "a local symbol");
10790 bfd_set_error (bfd_error_bad_value);
10791 return FALSE;
10792 }
10793
10794 /* Fall through. */
10795 case R_ARM_ABS32:
10796 case R_ARM_ABS32_NOI:
10797 case R_ARM_REL32:
10798 case R_ARM_REL32_NOI:
10799 case R_ARM_MOVW_PREL_NC:
10800 case R_ARM_MOVT_PREL:
10801 case R_ARM_THM_MOVW_PREL_NC:
10802 case R_ARM_THM_MOVT_PREL:
10803 needs_plt = 0;
10804 normal_reloc:
10805
10806 /* Should the interworking branches be listed here? */
10807 if (h != NULL)
10808 {
10809 /* If this reloc is in a read-only section, we might
10810 need a copy reloc. We can't check reliably at this
10811 stage whether the section is read-only, as input
10812 sections have not yet been mapped to output sections.
10813 Tentatively set the flag for now, and correct in
10814 adjust_dynamic_symbol. */
10815 if (!info->shared)
10816 h->non_got_ref = 1;
10817
10818 /* We may need a .plt entry if the function this reloc
10819 refers to is in a different object. We can't tell for
10820 sure yet, because something later might force the
10821 symbol local. */
10822 if (needs_plt)
10823 h->needs_plt = 1;
10824
10825 /* If we create a PLT entry, this relocation will reference
10826 it, even if it's an ABS32 relocation. */
10827 h->plt.refcount += 1;
10828
10829 /* It's too early to use htab->use_blx here, so we have to
10830 record possible blx references separately from
10831 relocs that definitely need a thumb stub. */
10832
10833 if (r_type == R_ARM_THM_CALL)
10834 eh->plt_maybe_thumb_refcount += 1;
10835
10836 if (r_type == R_ARM_THM_JUMP24
10837 || r_type == R_ARM_THM_JUMP19)
10838 eh->plt_thumb_refcount += 1;
10839 }
10840
10841 /* If we are creating a shared library or relocatable executable,
10842 and this is a reloc against a global symbol, or a non PC
10843 relative reloc against a local symbol, then we need to copy
10844 the reloc into the shared library. However, if we are linking
10845 with -Bsymbolic, we do not need to copy a reloc against a
10846 global symbol which is defined in an object we are
10847 including in the link (i.e., DEF_REGULAR is set). At
10848 this point we have not seen all the input files, so it is
10849 possible that DEF_REGULAR is not set now but will be set
10850 later (it is never cleared). We account for that
10851 possibility below by storing information in the
10852 relocs_copied field of the hash table entry. */
10853 if ((info->shared || htab->root.is_relocatable_executable)
10854 && (sec->flags & SEC_ALLOC) != 0
10855 && ((r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI)
10856 || (h != NULL && ! h->needs_plt
10857 && (! info->symbolic || ! h->def_regular))))
10858 {
10859 struct elf32_arm_relocs_copied *p, **head;
10860
10861 /* When creating a shared object, we must copy these
10862 reloc types into the output file. We create a reloc
10863 section in dynobj and make room for this reloc. */
10864 if (sreloc == NULL)
10865 {
10866 sreloc = _bfd_elf_make_dynamic_reloc_section
10867 (sec, dynobj, 2, abfd, ! htab->use_rel);
10868
10869 if (sreloc == NULL)
10870 return FALSE;
10871
10872 /* BPABI objects never have dynamic relocations mapped. */
10873 if (htab->symbian_p)
10874 {
10875 flagword flags;
10876
10877 flags = bfd_get_section_flags (dynobj, sreloc);
10878 flags &= ~(SEC_LOAD | SEC_ALLOC);
10879 bfd_set_section_flags (dynobj, sreloc, flags);
10880 }
10881 }
10882
10883 /* If this is a global symbol, we count the number of
10884 relocations we need for this symbol. */
10885 if (h != NULL)
10886 {
10887 head = &((struct elf32_arm_link_hash_entry *) h)->relocs_copied;
10888 }
10889 else
10890 {
10891 /* Track dynamic relocs needed for local syms too.
10892 We really need local syms available to do this
10893 easily. Oh well. */
10894 asection *s;
10895 void *vpp;
10896 Elf_Internal_Sym *isym;
10897
10898 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
10899 abfd, r_symndx);
10900 if (isym == NULL)
10901 return FALSE;
10902
10903 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
10904 if (s == NULL)
10905 s = sec;
10906
10907 vpp = &elf_section_data (s)->local_dynrel;
10908 head = (struct elf32_arm_relocs_copied **) vpp;
10909 }
10910
10911 p = *head;
10912 if (p == NULL || p->section != sec)
10913 {
10914 bfd_size_type amt = sizeof *p;
10915
10916 p = bfd_alloc (htab->root.dynobj, amt);
10917 if (p == NULL)
10918 return FALSE;
10919 p->next = *head;
10920 *head = p;
10921 p->section = sec;
10922 p->count = 0;
10923 p->pc_count = 0;
10924 }
10925
10926 if (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
10927 p->pc_count += 1;
10928 p->count += 1;
10929 }
10930 break;
10931
10932 /* This relocation describes the C++ object vtable hierarchy.
10933 Reconstruct it for later use during GC. */
10934 case R_ARM_GNU_VTINHERIT:
10935 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
10936 return FALSE;
10937 break;
10938
10939 /* This relocation describes which C++ vtable entries are actually
10940 used. Record for later use during GC. */
10941 case R_ARM_GNU_VTENTRY:
10942 BFD_ASSERT (h != NULL);
10943 if (h != NULL
10944 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
10945 return FALSE;
10946 break;
10947 }
10948 }
10949
10950 return TRUE;
10951 }
10952
10953 /* Unwinding tables are not referenced directly. This pass marks them as
10954 required if the corresponding code section is marked. */
10955
10956 static bfd_boolean
10957 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
10958 elf_gc_mark_hook_fn gc_mark_hook)
10959 {
10960 bfd *sub;
10961 Elf_Internal_Shdr **elf_shdrp;
10962 bfd_boolean again;
10963
10964 /* Marking EH data may cause additional code sections to be marked,
10965 requiring multiple passes. */
10966 again = TRUE;
10967 while (again)
10968 {
10969 again = FALSE;
10970 for (sub = info->input_bfds; sub != NULL; sub = sub->link_next)
10971 {
10972 asection *o;
10973
10974 if (! is_arm_elf (sub))
10975 continue;
10976
10977 elf_shdrp = elf_elfsections (sub);
10978 for (o = sub->sections; o != NULL; o = o->next)
10979 {
10980 Elf_Internal_Shdr *hdr;
10981
10982 hdr = &elf_section_data (o)->this_hdr;
10983 if (hdr->sh_type == SHT_ARM_EXIDX
10984 && hdr->sh_link
10985 && hdr->sh_link < elf_numsections (sub)
10986 && !o->gc_mark
10987 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
10988 {
10989 again = TRUE;
10990 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
10991 return FALSE;
10992 }
10993 }
10994 }
10995 }
10996
10997 return TRUE;
10998 }
10999
11000 /* Treat mapping symbols as special target symbols. */
11001
11002 static bfd_boolean
11003 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
11004 {
11005 return bfd_is_arm_special_symbol_name (sym->name,
11006 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
11007 }
11008
11009 /* This is a copy of elf_find_function() from elf.c except that
11010 ARM mapping symbols are ignored when looking for function names
11011 and STT_ARM_TFUNC is considered to a function type. */
11012
11013 static bfd_boolean
11014 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
11015 asection * section,
11016 asymbol ** symbols,
11017 bfd_vma offset,
11018 const char ** filename_ptr,
11019 const char ** functionname_ptr)
11020 {
11021 const char * filename = NULL;
11022 asymbol * func = NULL;
11023 bfd_vma low_func = 0;
11024 asymbol ** p;
11025
11026 for (p = symbols; *p != NULL; p++)
11027 {
11028 elf_symbol_type *q;
11029
11030 q = (elf_symbol_type *) *p;
11031
11032 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
11033 {
11034 default:
11035 break;
11036 case STT_FILE:
11037 filename = bfd_asymbol_name (&q->symbol);
11038 break;
11039 case STT_FUNC:
11040 case STT_ARM_TFUNC:
11041 case STT_NOTYPE:
11042 /* Skip mapping symbols. */
11043 if ((q->symbol.flags & BSF_LOCAL)
11044 && bfd_is_arm_special_symbol_name (q->symbol.name,
11045 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
11046 continue;
11047 /* Fall through. */
11048 if (bfd_get_section (&q->symbol) == section
11049 && q->symbol.value >= low_func
11050 && q->symbol.value <= offset)
11051 {
11052 func = (asymbol *) q;
11053 low_func = q->symbol.value;
11054 }
11055 break;
11056 }
11057 }
11058
11059 if (func == NULL)
11060 return FALSE;
11061
11062 if (filename_ptr)
11063 *filename_ptr = filename;
11064 if (functionname_ptr)
11065 *functionname_ptr = bfd_asymbol_name (func);
11066
11067 return TRUE;
11068 }
11069
11070
11071 /* Find the nearest line to a particular section and offset, for error
11072 reporting. This code is a duplicate of the code in elf.c, except
11073 that it uses arm_elf_find_function. */
11074
11075 static bfd_boolean
11076 elf32_arm_find_nearest_line (bfd * abfd,
11077 asection * section,
11078 asymbol ** symbols,
11079 bfd_vma offset,
11080 const char ** filename_ptr,
11081 const char ** functionname_ptr,
11082 unsigned int * line_ptr)
11083 {
11084 bfd_boolean found = FALSE;
11085
11086 /* We skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain uses it. */
11087
11088 if (_bfd_dwarf2_find_nearest_line (abfd, section, symbols, offset,
11089 filename_ptr, functionname_ptr,
11090 line_ptr, 0,
11091 & elf_tdata (abfd)->dwarf2_find_line_info))
11092 {
11093 if (!*functionname_ptr)
11094 arm_elf_find_function (abfd, section, symbols, offset,
11095 *filename_ptr ? NULL : filename_ptr,
11096 functionname_ptr);
11097
11098 return TRUE;
11099 }
11100
11101 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
11102 & found, filename_ptr,
11103 functionname_ptr, line_ptr,
11104 & elf_tdata (abfd)->line_info))
11105 return FALSE;
11106
11107 if (found && (*functionname_ptr || *line_ptr))
11108 return TRUE;
11109
11110 if (symbols == NULL)
11111 return FALSE;
11112
11113 if (! arm_elf_find_function (abfd, section, symbols, offset,
11114 filename_ptr, functionname_ptr))
11115 return FALSE;
11116
11117 *line_ptr = 0;
11118 return TRUE;
11119 }
11120
11121 static bfd_boolean
11122 elf32_arm_find_inliner_info (bfd * abfd,
11123 const char ** filename_ptr,
11124 const char ** functionname_ptr,
11125 unsigned int * line_ptr)
11126 {
11127 bfd_boolean found;
11128 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
11129 functionname_ptr, line_ptr,
11130 & elf_tdata (abfd)->dwarf2_find_line_info);
11131 return found;
11132 }
11133
11134 /* Adjust a symbol defined by a dynamic object and referenced by a
11135 regular object. The current definition is in some section of the
11136 dynamic object, but we're not including those sections. We have to
11137 change the definition to something the rest of the link can
11138 understand. */
11139
11140 static bfd_boolean
11141 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
11142 struct elf_link_hash_entry * h)
11143 {
11144 bfd * dynobj;
11145 asection * s;
11146 struct elf32_arm_link_hash_entry * eh;
11147 struct elf32_arm_link_hash_table *globals;
11148
11149 globals = elf32_arm_hash_table (info);
11150 dynobj = elf_hash_table (info)->dynobj;
11151
11152 /* Make sure we know what is going on here. */
11153 BFD_ASSERT (dynobj != NULL
11154 && (h->needs_plt
11155 || h->u.weakdef != NULL
11156 || (h->def_dynamic
11157 && h->ref_regular
11158 && !h->def_regular)));
11159
11160 eh = (struct elf32_arm_link_hash_entry *) h;
11161
11162 /* If this is a function, put it in the procedure linkage table. We
11163 will fill in the contents of the procedure linkage table later,
11164 when we know the address of the .got section. */
11165 if (h->type == STT_FUNC || h->type == STT_ARM_TFUNC
11166 || h->needs_plt)
11167 {
11168 if (h->plt.refcount <= 0
11169 || SYMBOL_CALLS_LOCAL (info, h)
11170 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
11171 && h->root.type == bfd_link_hash_undefweak))
11172 {
11173 /* This case can occur if we saw a PLT32 reloc in an input
11174 file, but the symbol was never referred to by a dynamic
11175 object, or if all references were garbage collected. In
11176 such a case, we don't actually need to build a procedure
11177 linkage table, and we can just do a PC24 reloc instead. */
11178 h->plt.offset = (bfd_vma) -1;
11179 eh->plt_thumb_refcount = 0;
11180 eh->plt_maybe_thumb_refcount = 0;
11181 h->needs_plt = 0;
11182 }
11183
11184 return TRUE;
11185 }
11186 else
11187 {
11188 /* It's possible that we incorrectly decided a .plt reloc was
11189 needed for an R_ARM_PC24 or similar reloc to a non-function sym
11190 in check_relocs. We can't decide accurately between function
11191 and non-function syms in check-relocs; Objects loaded later in
11192 the link may change h->type. So fix it now. */
11193 h->plt.offset = (bfd_vma) -1;
11194 eh->plt_thumb_refcount = 0;
11195 eh->plt_maybe_thumb_refcount = 0;
11196 }
11197
11198 /* If this is a weak symbol, and there is a real definition, the
11199 processor independent code will have arranged for us to see the
11200 real definition first, and we can just use the same value. */
11201 if (h->u.weakdef != NULL)
11202 {
11203 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
11204 || h->u.weakdef->root.type == bfd_link_hash_defweak);
11205 h->root.u.def.section = h->u.weakdef->root.u.def.section;
11206 h->root.u.def.value = h->u.weakdef->root.u.def.value;
11207 return TRUE;
11208 }
11209
11210 /* If there are no non-GOT references, we do not need a copy
11211 relocation. */
11212 if (!h->non_got_ref)
11213 return TRUE;
11214
11215 /* This is a reference to a symbol defined by a dynamic object which
11216 is not a function. */
11217
11218 /* If we are creating a shared library, we must presume that the
11219 only references to the symbol are via the global offset table.
11220 For such cases we need not do anything here; the relocations will
11221 be handled correctly by relocate_section. Relocatable executables
11222 can reference data in shared objects directly, so we don't need to
11223 do anything here. */
11224 if (info->shared || globals->root.is_relocatable_executable)
11225 return TRUE;
11226
11227 if (h->size == 0)
11228 {
11229 (*_bfd_error_handler) (_("dynamic variable `%s' is zero size"),
11230 h->root.root.string);
11231 return TRUE;
11232 }
11233
11234 /* We must allocate the symbol in our .dynbss section, which will
11235 become part of the .bss section of the executable. There will be
11236 an entry for this symbol in the .dynsym section. The dynamic
11237 object will contain position independent code, so all references
11238 from the dynamic object to this symbol will go through the global
11239 offset table. The dynamic linker will use the .dynsym entry to
11240 determine the address it must put in the global offset table, so
11241 both the dynamic object and the regular object will refer to the
11242 same memory location for the variable. */
11243 s = bfd_get_section_by_name (dynobj, ".dynbss");
11244 BFD_ASSERT (s != NULL);
11245
11246 /* We must generate a R_ARM_COPY reloc to tell the dynamic linker to
11247 copy the initial value out of the dynamic object and into the
11248 runtime process image. We need to remember the offset into the
11249 .rel(a).bss section we are going to use. */
11250 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0)
11251 {
11252 asection *srel;
11253
11254 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (globals, ".bss"));
11255 BFD_ASSERT (srel != NULL);
11256 srel->size += RELOC_SIZE (globals);
11257 h->needs_copy = 1;
11258 }
11259
11260 return _bfd_elf_adjust_dynamic_copy (h, s);
11261 }
11262
11263 /* Allocate space in .plt, .got and associated reloc sections for
11264 dynamic relocs. */
11265
11266 static bfd_boolean
11267 allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
11268 {
11269 struct bfd_link_info *info;
11270 struct elf32_arm_link_hash_table *htab;
11271 struct elf32_arm_link_hash_entry *eh;
11272 struct elf32_arm_relocs_copied *p;
11273 bfd_signed_vma thumb_refs;
11274
11275 eh = (struct elf32_arm_link_hash_entry *) h;
11276
11277 if (h->root.type == bfd_link_hash_indirect)
11278 return TRUE;
11279
11280 if (h->root.type == bfd_link_hash_warning)
11281 /* When warning symbols are created, they **replace** the "real"
11282 entry in the hash table, thus we never get to see the real
11283 symbol in a hash traversal. So look at it now. */
11284 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11285
11286 info = (struct bfd_link_info *) inf;
11287 htab = elf32_arm_hash_table (info);
11288
11289 if (htab->root.dynamic_sections_created
11290 && h->plt.refcount > 0)
11291 {
11292 /* Make sure this symbol is output as a dynamic symbol.
11293 Undefined weak syms won't yet be marked as dynamic. */
11294 if (h->dynindx == -1
11295 && !h->forced_local)
11296 {
11297 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11298 return FALSE;
11299 }
11300
11301 if (info->shared
11302 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
11303 {
11304 asection *s = htab->splt;
11305
11306 /* If this is the first .plt entry, make room for the special
11307 first entry. */
11308 if (s->size == 0)
11309 s->size += htab->plt_header_size;
11310
11311 h->plt.offset = s->size;
11312
11313 /* If we will insert a Thumb trampoline before this PLT, leave room
11314 for it. */
11315 thumb_refs = eh->plt_thumb_refcount;
11316 if (!htab->use_blx)
11317 thumb_refs += eh->plt_maybe_thumb_refcount;
11318
11319 if (thumb_refs > 0)
11320 {
11321 h->plt.offset += PLT_THUMB_STUB_SIZE;
11322 s->size += PLT_THUMB_STUB_SIZE;
11323 }
11324
11325 /* If this symbol is not defined in a regular file, and we are
11326 not generating a shared library, then set the symbol to this
11327 location in the .plt. This is required to make function
11328 pointers compare as equal between the normal executable and
11329 the shared library. */
11330 if (! info->shared
11331 && !h->def_regular)
11332 {
11333 h->root.u.def.section = s;
11334 h->root.u.def.value = h->plt.offset;
11335 }
11336
11337 /* Make sure the function is not marked as Thumb, in case
11338 it is the target of an ABS32 relocation, which will
11339 point to the PLT entry. */
11340 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
11341 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11342
11343 /* Make room for this entry. */
11344 s->size += htab->plt_entry_size;
11345
11346 if (!htab->symbian_p)
11347 {
11348 /* We also need to make an entry in the .got.plt section, which
11349 will be placed in the .got section by the linker script. */
11350 eh->plt_got_offset = htab->sgotplt->size;
11351 htab->sgotplt->size += 4;
11352 }
11353
11354 /* We also need to make an entry in the .rel(a).plt section. */
11355 htab->srelplt->size += RELOC_SIZE (htab);
11356
11357 /* VxWorks executables have a second set of relocations for
11358 each PLT entry. They go in a separate relocation section,
11359 which is processed by the kernel loader. */
11360 if (htab->vxworks_p && !info->shared)
11361 {
11362 /* There is a relocation for the initial PLT entry:
11363 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
11364 if (h->plt.offset == htab->plt_header_size)
11365 htab->srelplt2->size += RELOC_SIZE (htab);
11366
11367 /* There are two extra relocations for each subsequent
11368 PLT entry: an R_ARM_32 relocation for the GOT entry,
11369 and an R_ARM_32 relocation for the PLT entry. */
11370 htab->srelplt2->size += RELOC_SIZE (htab) * 2;
11371 }
11372 }
11373 else
11374 {
11375 h->plt.offset = (bfd_vma) -1;
11376 h->needs_plt = 0;
11377 }
11378 }
11379 else
11380 {
11381 h->plt.offset = (bfd_vma) -1;
11382 h->needs_plt = 0;
11383 }
11384
11385 if (h->got.refcount > 0)
11386 {
11387 asection *s;
11388 bfd_boolean dyn;
11389 int tls_type = elf32_arm_hash_entry (h)->tls_type;
11390 int indx;
11391
11392 /* Make sure this symbol is output as a dynamic symbol.
11393 Undefined weak syms won't yet be marked as dynamic. */
11394 if (h->dynindx == -1
11395 && !h->forced_local)
11396 {
11397 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11398 return FALSE;
11399 }
11400
11401 if (!htab->symbian_p)
11402 {
11403 s = htab->sgot;
11404 h->got.offset = s->size;
11405
11406 if (tls_type == GOT_UNKNOWN)
11407 abort ();
11408
11409 if (tls_type == GOT_NORMAL)
11410 /* Non-TLS symbols need one GOT slot. */
11411 s->size += 4;
11412 else
11413 {
11414 if (tls_type & GOT_TLS_GD)
11415 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. */
11416 s->size += 8;
11417 if (tls_type & GOT_TLS_IE)
11418 /* R_ARM_TLS_IE32 needs one GOT slot. */
11419 s->size += 4;
11420 }
11421
11422 dyn = htab->root.dynamic_sections_created;
11423
11424 indx = 0;
11425 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
11426 && (!info->shared
11427 || !SYMBOL_REFERENCES_LOCAL (info, h)))
11428 indx = h->dynindx;
11429
11430 if (tls_type != GOT_NORMAL
11431 && (info->shared || indx != 0)
11432 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11433 || h->root.type != bfd_link_hash_undefweak))
11434 {
11435 if (tls_type & GOT_TLS_IE)
11436 htab->srelgot->size += RELOC_SIZE (htab);
11437
11438 if (tls_type & GOT_TLS_GD)
11439 htab->srelgot->size += RELOC_SIZE (htab);
11440
11441 if ((tls_type & GOT_TLS_GD) && indx != 0)
11442 htab->srelgot->size += RELOC_SIZE (htab);
11443 }
11444 else if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11445 || h->root.type != bfd_link_hash_undefweak)
11446 && (info->shared
11447 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
11448 htab->srelgot->size += RELOC_SIZE (htab);
11449 }
11450 }
11451 else
11452 h->got.offset = (bfd_vma) -1;
11453
11454 /* Allocate stubs for exported Thumb functions on v4t. */
11455 if (!htab->use_blx && h->dynindx != -1
11456 && h->def_regular
11457 && ELF_ST_TYPE (h->type) == STT_ARM_TFUNC
11458 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
11459 {
11460 struct elf_link_hash_entry * th;
11461 struct bfd_link_hash_entry * bh;
11462 struct elf_link_hash_entry * myh;
11463 char name[1024];
11464 asection *s;
11465 bh = NULL;
11466 /* Create a new symbol to regist the real location of the function. */
11467 s = h->root.u.def.section;
11468 sprintf (name, "__real_%s", h->root.root.string);
11469 _bfd_generic_link_add_one_symbol (info, s->owner,
11470 name, BSF_GLOBAL, s,
11471 h->root.u.def.value,
11472 NULL, TRUE, FALSE, &bh);
11473
11474 myh = (struct elf_link_hash_entry *) bh;
11475 myh->type = ELF_ST_INFO (STB_LOCAL, STT_ARM_TFUNC);
11476 myh->forced_local = 1;
11477 eh->export_glue = myh;
11478 th = record_arm_to_thumb_glue (info, h);
11479 /* Point the symbol at the stub. */
11480 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11481 h->root.u.def.section = th->root.u.def.section;
11482 h->root.u.def.value = th->root.u.def.value & ~1;
11483 }
11484
11485 if (eh->relocs_copied == NULL)
11486 return TRUE;
11487
11488 /* In the shared -Bsymbolic case, discard space allocated for
11489 dynamic pc-relative relocs against symbols which turn out to be
11490 defined in regular objects. For the normal shared case, discard
11491 space for pc-relative relocs that have become local due to symbol
11492 visibility changes. */
11493
11494 if (info->shared || htab->root.is_relocatable_executable)
11495 {
11496 /* The only relocs that use pc_count are R_ARM_REL32 and
11497 R_ARM_REL32_NOI, which will appear on something like
11498 ".long foo - .". We want calls to protected symbols to resolve
11499 directly to the function rather than going via the plt. If people
11500 want function pointer comparisons to work as expected then they
11501 should avoid writing assembly like ".long foo - .". */
11502 if (SYMBOL_CALLS_LOCAL (info, h))
11503 {
11504 struct elf32_arm_relocs_copied **pp;
11505
11506 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11507 {
11508 p->count -= p->pc_count;
11509 p->pc_count = 0;
11510 if (p->count == 0)
11511 *pp = p->next;
11512 else
11513 pp = &p->next;
11514 }
11515 }
11516
11517 if (elf32_arm_hash_table (info)->vxworks_p)
11518 {
11519 struct elf32_arm_relocs_copied **pp;
11520
11521 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11522 {
11523 if (strcmp (p->section->output_section->name, ".tls_vars") == 0)
11524 *pp = p->next;
11525 else
11526 pp = &p->next;
11527 }
11528 }
11529
11530 /* Also discard relocs on undefined weak syms with non-default
11531 visibility. */
11532 if (eh->relocs_copied != NULL
11533 && h->root.type == bfd_link_hash_undefweak)
11534 {
11535 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
11536 eh->relocs_copied = NULL;
11537
11538 /* Make sure undefined weak symbols are output as a dynamic
11539 symbol in PIEs. */
11540 else if (h->dynindx == -1
11541 && !h->forced_local)
11542 {
11543 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11544 return FALSE;
11545 }
11546 }
11547
11548 else if (htab->root.is_relocatable_executable && h->dynindx == -1
11549 && h->root.type == bfd_link_hash_new)
11550 {
11551 /* Output absolute symbols so that we can create relocations
11552 against them. For normal symbols we output a relocation
11553 against the section that contains them. */
11554 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11555 return FALSE;
11556 }
11557
11558 }
11559 else
11560 {
11561 /* For the non-shared case, discard space for relocs against
11562 symbols which turn out to need copy relocs or are not
11563 dynamic. */
11564
11565 if (!h->non_got_ref
11566 && ((h->def_dynamic
11567 && !h->def_regular)
11568 || (htab->root.dynamic_sections_created
11569 && (h->root.type == bfd_link_hash_undefweak
11570 || h->root.type == bfd_link_hash_undefined))))
11571 {
11572 /* Make sure this symbol is output as a dynamic symbol.
11573 Undefined weak syms won't yet be marked as dynamic. */
11574 if (h->dynindx == -1
11575 && !h->forced_local)
11576 {
11577 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11578 return FALSE;
11579 }
11580
11581 /* If that succeeded, we know we'll be keeping all the
11582 relocs. */
11583 if (h->dynindx != -1)
11584 goto keep;
11585 }
11586
11587 eh->relocs_copied = NULL;
11588
11589 keep: ;
11590 }
11591
11592 /* Finally, allocate space. */
11593 for (p = eh->relocs_copied; p != NULL; p = p->next)
11594 {
11595 asection *sreloc = elf_section_data (p->section)->sreloc;
11596 sreloc->size += p->count * RELOC_SIZE (htab);
11597 }
11598
11599 return TRUE;
11600 }
11601
11602 /* Find any dynamic relocs that apply to read-only sections. */
11603
11604 static bfd_boolean
11605 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
11606 {
11607 struct elf32_arm_link_hash_entry * eh;
11608 struct elf32_arm_relocs_copied * p;
11609
11610 if (h->root.type == bfd_link_hash_warning)
11611 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11612
11613 eh = (struct elf32_arm_link_hash_entry *) h;
11614 for (p = eh->relocs_copied; p != NULL; p = p->next)
11615 {
11616 asection *s = p->section;
11617
11618 if (s != NULL && (s->flags & SEC_READONLY) != 0)
11619 {
11620 struct bfd_link_info *info = (struct bfd_link_info *) inf;
11621
11622 info->flags |= DF_TEXTREL;
11623
11624 /* Not an error, just cut short the traversal. */
11625 return FALSE;
11626 }
11627 }
11628 return TRUE;
11629 }
11630
11631 void
11632 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
11633 int byteswap_code)
11634 {
11635 struct elf32_arm_link_hash_table *globals;
11636
11637 globals = elf32_arm_hash_table (info);
11638 globals->byteswap_code = byteswap_code;
11639 }
11640
11641 /* Set the sizes of the dynamic sections. */
11642
11643 static bfd_boolean
11644 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
11645 struct bfd_link_info * info)
11646 {
11647 bfd * dynobj;
11648 asection * s;
11649 bfd_boolean plt;
11650 bfd_boolean relocs;
11651 bfd *ibfd;
11652 struct elf32_arm_link_hash_table *htab;
11653
11654 htab = elf32_arm_hash_table (info);
11655 dynobj = elf_hash_table (info)->dynobj;
11656 BFD_ASSERT (dynobj != NULL);
11657 check_use_blx (htab);
11658
11659 if (elf_hash_table (info)->dynamic_sections_created)
11660 {
11661 /* Set the contents of the .interp section to the interpreter. */
11662 if (info->executable)
11663 {
11664 s = bfd_get_section_by_name (dynobj, ".interp");
11665 BFD_ASSERT (s != NULL);
11666 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
11667 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
11668 }
11669 }
11670
11671 /* Set up .got offsets for local syms, and space for local dynamic
11672 relocs. */
11673 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11674 {
11675 bfd_signed_vma *local_got;
11676 bfd_signed_vma *end_local_got;
11677 char *local_tls_type;
11678 bfd_size_type locsymcount;
11679 Elf_Internal_Shdr *symtab_hdr;
11680 asection *srel;
11681 bfd_boolean is_vxworks = elf32_arm_hash_table (info)->vxworks_p;
11682
11683 if (! is_arm_elf (ibfd))
11684 continue;
11685
11686 for (s = ibfd->sections; s != NULL; s = s->next)
11687 {
11688 struct elf32_arm_relocs_copied *p;
11689
11690 for (p = elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
11691 {
11692 if (!bfd_is_abs_section (p->section)
11693 && bfd_is_abs_section (p->section->output_section))
11694 {
11695 /* Input section has been discarded, either because
11696 it is a copy of a linkonce section or due to
11697 linker script /DISCARD/, so we'll be discarding
11698 the relocs too. */
11699 }
11700 else if (is_vxworks
11701 && strcmp (p->section->output_section->name,
11702 ".tls_vars") == 0)
11703 {
11704 /* Relocations in vxworks .tls_vars sections are
11705 handled specially by the loader. */
11706 }
11707 else if (p->count != 0)
11708 {
11709 srel = elf_section_data (p->section)->sreloc;
11710 srel->size += p->count * RELOC_SIZE (htab);
11711 if ((p->section->output_section->flags & SEC_READONLY) != 0)
11712 info->flags |= DF_TEXTREL;
11713 }
11714 }
11715 }
11716
11717 local_got = elf_local_got_refcounts (ibfd);
11718 if (!local_got)
11719 continue;
11720
11721 symtab_hdr = & elf_symtab_hdr (ibfd);
11722 locsymcount = symtab_hdr->sh_info;
11723 end_local_got = local_got + locsymcount;
11724 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
11725 s = htab->sgot;
11726 srel = htab->srelgot;
11727 for (; local_got < end_local_got; ++local_got, ++local_tls_type)
11728 {
11729 if (*local_got > 0)
11730 {
11731 *local_got = s->size;
11732 if (*local_tls_type & GOT_TLS_GD)
11733 /* TLS_GD relocs need an 8-byte structure in the GOT. */
11734 s->size += 8;
11735 if (*local_tls_type & GOT_TLS_IE)
11736 s->size += 4;
11737 if (*local_tls_type == GOT_NORMAL)
11738 s->size += 4;
11739
11740 if (info->shared || *local_tls_type == GOT_TLS_GD)
11741 srel->size += RELOC_SIZE (htab);
11742 }
11743 else
11744 *local_got = (bfd_vma) -1;
11745 }
11746 }
11747
11748 if (htab->tls_ldm_got.refcount > 0)
11749 {
11750 /* Allocate two GOT entries and one dynamic relocation (if necessary)
11751 for R_ARM_TLS_LDM32 relocations. */
11752 htab->tls_ldm_got.offset = htab->sgot->size;
11753 htab->sgot->size += 8;
11754 if (info->shared)
11755 htab->srelgot->size += RELOC_SIZE (htab);
11756 }
11757 else
11758 htab->tls_ldm_got.offset = -1;
11759
11760 /* Allocate global sym .plt and .got entries, and space for global
11761 sym dynamic relocs. */
11762 elf_link_hash_traverse (& htab->root, allocate_dynrelocs, info);
11763
11764 /* Here we rummage through the found bfds to collect glue information. */
11765 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11766 {
11767 if (! is_arm_elf (ibfd))
11768 continue;
11769
11770 /* Initialise mapping tables for code/data. */
11771 bfd_elf32_arm_init_maps (ibfd);
11772
11773 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
11774 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info))
11775 /* xgettext:c-format */
11776 _bfd_error_handler (_("Errors encountered processing file %s"),
11777 ibfd->filename);
11778 }
11779
11780 /* Allocate space for the glue sections now that we've sized them. */
11781 bfd_elf32_arm_allocate_interworking_sections (info);
11782
11783 /* The check_relocs and adjust_dynamic_symbol entry points have
11784 determined the sizes of the various dynamic sections. Allocate
11785 memory for them. */
11786 plt = FALSE;
11787 relocs = FALSE;
11788 for (s = dynobj->sections; s != NULL; s = s->next)
11789 {
11790 const char * name;
11791
11792 if ((s->flags & SEC_LINKER_CREATED) == 0)
11793 continue;
11794
11795 /* It's OK to base decisions on the section name, because none
11796 of the dynobj section names depend upon the input files. */
11797 name = bfd_get_section_name (dynobj, s);
11798
11799 if (strcmp (name, ".plt") == 0)
11800 {
11801 /* Remember whether there is a PLT. */
11802 plt = s->size != 0;
11803 }
11804 else if (CONST_STRNEQ (name, ".rel"))
11805 {
11806 if (s->size != 0)
11807 {
11808 /* Remember whether there are any reloc sections other
11809 than .rel(a).plt and .rela.plt.unloaded. */
11810 if (s != htab->srelplt && s != htab->srelplt2)
11811 relocs = TRUE;
11812
11813 /* We use the reloc_count field as a counter if we need
11814 to copy relocs into the output file. */
11815 s->reloc_count = 0;
11816 }
11817 }
11818 else if (! CONST_STRNEQ (name, ".got")
11819 && strcmp (name, ".dynbss") != 0)
11820 {
11821 /* It's not one of our sections, so don't allocate space. */
11822 continue;
11823 }
11824
11825 if (s->size == 0)
11826 {
11827 /* If we don't need this section, strip it from the
11828 output file. This is mostly to handle .rel(a).bss and
11829 .rel(a).plt. We must create both sections in
11830 create_dynamic_sections, because they must be created
11831 before the linker maps input sections to output
11832 sections. The linker does that before
11833 adjust_dynamic_symbol is called, and it is that
11834 function which decides whether anything needs to go
11835 into these sections. */
11836 s->flags |= SEC_EXCLUDE;
11837 continue;
11838 }
11839
11840 if ((s->flags & SEC_HAS_CONTENTS) == 0)
11841 continue;
11842
11843 /* Allocate memory for the section contents. */
11844 s->contents = bfd_zalloc (dynobj, s->size);
11845 if (s->contents == NULL)
11846 return FALSE;
11847 }
11848
11849 if (elf_hash_table (info)->dynamic_sections_created)
11850 {
11851 /* Add some entries to the .dynamic section. We fill in the
11852 values later, in elf32_arm_finish_dynamic_sections, but we
11853 must add the entries now so that we get the correct size for
11854 the .dynamic section. The DT_DEBUG entry is filled in by the
11855 dynamic linker and used by the debugger. */
11856 #define add_dynamic_entry(TAG, VAL) \
11857 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
11858
11859 if (info->executable)
11860 {
11861 if (!add_dynamic_entry (DT_DEBUG, 0))
11862 return FALSE;
11863 }
11864
11865 if (plt)
11866 {
11867 if ( !add_dynamic_entry (DT_PLTGOT, 0)
11868 || !add_dynamic_entry (DT_PLTRELSZ, 0)
11869 || !add_dynamic_entry (DT_PLTREL,
11870 htab->use_rel ? DT_REL : DT_RELA)
11871 || !add_dynamic_entry (DT_JMPREL, 0))
11872 return FALSE;
11873 }
11874
11875 if (relocs)
11876 {
11877 if (htab->use_rel)
11878 {
11879 if (!add_dynamic_entry (DT_REL, 0)
11880 || !add_dynamic_entry (DT_RELSZ, 0)
11881 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
11882 return FALSE;
11883 }
11884 else
11885 {
11886 if (!add_dynamic_entry (DT_RELA, 0)
11887 || !add_dynamic_entry (DT_RELASZ, 0)
11888 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
11889 return FALSE;
11890 }
11891 }
11892
11893 /* If any dynamic relocs apply to a read-only section,
11894 then we need a DT_TEXTREL entry. */
11895 if ((info->flags & DF_TEXTREL) == 0)
11896 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
11897 info);
11898
11899 if ((info->flags & DF_TEXTREL) != 0)
11900 {
11901 if (!add_dynamic_entry (DT_TEXTREL, 0))
11902 return FALSE;
11903 }
11904 if (htab->vxworks_p
11905 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
11906 return FALSE;
11907 }
11908 #undef add_dynamic_entry
11909
11910 return TRUE;
11911 }
11912
11913 /* Finish up dynamic symbol handling. We set the contents of various
11914 dynamic sections here. */
11915
11916 static bfd_boolean
11917 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
11918 struct bfd_link_info * info,
11919 struct elf_link_hash_entry * h,
11920 Elf_Internal_Sym * sym)
11921 {
11922 bfd * dynobj;
11923 struct elf32_arm_link_hash_table *htab;
11924 struct elf32_arm_link_hash_entry *eh;
11925
11926 dynobj = elf_hash_table (info)->dynobj;
11927 htab = elf32_arm_hash_table (info);
11928 eh = (struct elf32_arm_link_hash_entry *) h;
11929
11930 if (h->plt.offset != (bfd_vma) -1)
11931 {
11932 asection * splt;
11933 asection * srel;
11934 bfd_byte *loc;
11935 bfd_vma plt_index;
11936 Elf_Internal_Rela rel;
11937
11938 /* This symbol has an entry in the procedure linkage table. Set
11939 it up. */
11940
11941 BFD_ASSERT (h->dynindx != -1);
11942
11943 splt = bfd_get_section_by_name (dynobj, ".plt");
11944 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".plt"));
11945 BFD_ASSERT (splt != NULL && srel != NULL);
11946
11947 /* Fill in the entry in the procedure linkage table. */
11948 if (htab->symbian_p)
11949 {
11950 put_arm_insn (htab, output_bfd,
11951 elf32_arm_symbian_plt_entry[0],
11952 splt->contents + h->plt.offset);
11953 bfd_put_32 (output_bfd,
11954 elf32_arm_symbian_plt_entry[1],
11955 splt->contents + h->plt.offset + 4);
11956
11957 /* Fill in the entry in the .rel.plt section. */
11958 rel.r_offset = (splt->output_section->vma
11959 + splt->output_offset
11960 + h->plt.offset + 4);
11961 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
11962
11963 /* Get the index in the procedure linkage table which
11964 corresponds to this symbol. This is the index of this symbol
11965 in all the symbols for which we are making plt entries. The
11966 first entry in the procedure linkage table is reserved. */
11967 plt_index = ((h->plt.offset - htab->plt_header_size)
11968 / htab->plt_entry_size);
11969 }
11970 else
11971 {
11972 bfd_vma got_offset, got_address, plt_address;
11973 bfd_vma got_displacement;
11974 asection * sgot;
11975 bfd_byte * ptr;
11976
11977 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
11978 BFD_ASSERT (sgot != NULL);
11979
11980 /* Get the offset into the .got.plt table of the entry that
11981 corresponds to this function. */
11982 got_offset = eh->plt_got_offset;
11983
11984 /* Get the index in the procedure linkage table which
11985 corresponds to this symbol. This is the index of this symbol
11986 in all the symbols for which we are making plt entries. The
11987 first three entries in .got.plt are reserved; after that
11988 symbols appear in the same order as in .plt. */
11989 plt_index = (got_offset - 12) / 4;
11990
11991 /* Calculate the address of the GOT entry. */
11992 got_address = (sgot->output_section->vma
11993 + sgot->output_offset
11994 + got_offset);
11995
11996 /* ...and the address of the PLT entry. */
11997 plt_address = (splt->output_section->vma
11998 + splt->output_offset
11999 + h->plt.offset);
12000
12001 ptr = htab->splt->contents + h->plt.offset;
12002 if (htab->vxworks_p && info->shared)
12003 {
12004 unsigned int i;
12005 bfd_vma val;
12006
12007 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
12008 {
12009 val = elf32_arm_vxworks_shared_plt_entry[i];
12010 if (i == 2)
12011 val |= got_address - sgot->output_section->vma;
12012 if (i == 5)
12013 val |= plt_index * RELOC_SIZE (htab);
12014 if (i == 2 || i == 5)
12015 bfd_put_32 (output_bfd, val, ptr);
12016 else
12017 put_arm_insn (htab, output_bfd, val, ptr);
12018 }
12019 }
12020 else if (htab->vxworks_p)
12021 {
12022 unsigned int i;
12023 bfd_vma val;
12024
12025 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
12026 {
12027 val = elf32_arm_vxworks_exec_plt_entry[i];
12028 if (i == 2)
12029 val |= got_address;
12030 if (i == 4)
12031 val |= 0xffffff & -((h->plt.offset + i * 4 + 8) >> 2);
12032 if (i == 5)
12033 val |= plt_index * RELOC_SIZE (htab);
12034 if (i == 2 || i == 5)
12035 bfd_put_32 (output_bfd, val, ptr);
12036 else
12037 put_arm_insn (htab, output_bfd, val, ptr);
12038 }
12039
12040 loc = (htab->srelplt2->contents
12041 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
12042
12043 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
12044 referencing the GOT for this PLT entry. */
12045 rel.r_offset = plt_address + 8;
12046 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12047 rel.r_addend = got_offset;
12048 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12049 loc += RELOC_SIZE (htab);
12050
12051 /* Create the R_ARM_ABS32 relocation referencing the
12052 beginning of the PLT for this GOT entry. */
12053 rel.r_offset = got_address;
12054 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12055 rel.r_addend = 0;
12056 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12057 }
12058 else
12059 {
12060 bfd_signed_vma thumb_refs;
12061 /* Calculate the displacement between the PLT slot and the
12062 entry in the GOT. The eight-byte offset accounts for the
12063 value produced by adding to pc in the first instruction
12064 of the PLT stub. */
12065 got_displacement = got_address - (plt_address + 8);
12066
12067 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
12068
12069 thumb_refs = eh->plt_thumb_refcount;
12070 if (!htab->use_blx)
12071 thumb_refs += eh->plt_maybe_thumb_refcount;
12072
12073 if (thumb_refs > 0)
12074 {
12075 put_thumb_insn (htab, output_bfd,
12076 elf32_arm_plt_thumb_stub[0], ptr - 4);
12077 put_thumb_insn (htab, output_bfd,
12078 elf32_arm_plt_thumb_stub[1], ptr - 2);
12079 }
12080
12081 put_arm_insn (htab, output_bfd,
12082 elf32_arm_plt_entry[0]
12083 | ((got_displacement & 0x0ff00000) >> 20),
12084 ptr + 0);
12085 put_arm_insn (htab, output_bfd,
12086 elf32_arm_plt_entry[1]
12087 | ((got_displacement & 0x000ff000) >> 12),
12088 ptr+ 4);
12089 put_arm_insn (htab, output_bfd,
12090 elf32_arm_plt_entry[2]
12091 | (got_displacement & 0x00000fff),
12092 ptr + 8);
12093 #ifdef FOUR_WORD_PLT
12094 bfd_put_32 (output_bfd, elf32_arm_plt_entry[3], ptr + 12);
12095 #endif
12096 }
12097
12098 /* Fill in the entry in the global offset table. */
12099 bfd_put_32 (output_bfd,
12100 (splt->output_section->vma
12101 + splt->output_offset),
12102 sgot->contents + got_offset);
12103
12104 /* Fill in the entry in the .rel(a).plt section. */
12105 rel.r_addend = 0;
12106 rel.r_offset = got_address;
12107 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_JUMP_SLOT);
12108 }
12109
12110 loc = srel->contents + plt_index * RELOC_SIZE (htab);
12111 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12112
12113 if (!h->def_regular)
12114 {
12115 /* Mark the symbol as undefined, rather than as defined in
12116 the .plt section. Leave the value alone. */
12117 sym->st_shndx = SHN_UNDEF;
12118 /* If the symbol is weak, we do need to clear the value.
12119 Otherwise, the PLT entry would provide a definition for
12120 the symbol even if the symbol wasn't defined anywhere,
12121 and so the symbol would never be NULL. */
12122 if (!h->ref_regular_nonweak)
12123 sym->st_value = 0;
12124 }
12125 }
12126
12127 if (h->got.offset != (bfd_vma) -1
12128 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_GD) == 0
12129 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_IE) == 0)
12130 {
12131 asection * sgot;
12132 asection * srel;
12133 Elf_Internal_Rela rel;
12134 bfd_byte *loc;
12135 bfd_vma offset;
12136
12137 /* This symbol has an entry in the global offset table. Set it
12138 up. */
12139 sgot = bfd_get_section_by_name (dynobj, ".got");
12140 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".got"));
12141 BFD_ASSERT (sgot != NULL && srel != NULL);
12142
12143 offset = (h->got.offset & ~(bfd_vma) 1);
12144 rel.r_addend = 0;
12145 rel.r_offset = (sgot->output_section->vma
12146 + sgot->output_offset
12147 + offset);
12148
12149 /* If this is a static link, or it is a -Bsymbolic link and the
12150 symbol is defined locally or was forced to be local because
12151 of a version file, we just want to emit a RELATIVE reloc.
12152 The entry in the global offset table will already have been
12153 initialized in the relocate_section function. */
12154 if (info->shared
12155 && SYMBOL_REFERENCES_LOCAL (info, h))
12156 {
12157 BFD_ASSERT ((h->got.offset & 1) != 0);
12158 rel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12159 if (!htab->use_rel)
12160 {
12161 rel.r_addend = bfd_get_32 (output_bfd, sgot->contents + offset);
12162 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12163 }
12164 }
12165 else
12166 {
12167 BFD_ASSERT ((h->got.offset & 1) == 0);
12168 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12169 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
12170 }
12171
12172 loc = srel->contents + srel->reloc_count++ * RELOC_SIZE (htab);
12173 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12174 }
12175
12176 if (h->needs_copy)
12177 {
12178 asection * s;
12179 Elf_Internal_Rela rel;
12180 bfd_byte *loc;
12181
12182 /* This symbol needs a copy reloc. Set it up. */
12183 BFD_ASSERT (h->dynindx != -1
12184 && (h->root.type == bfd_link_hash_defined
12185 || h->root.type == bfd_link_hash_defweak));
12186
12187 s = bfd_get_section_by_name (h->root.u.def.section->owner,
12188 RELOC_SECTION (htab, ".bss"));
12189 BFD_ASSERT (s != NULL);
12190
12191 rel.r_addend = 0;
12192 rel.r_offset = (h->root.u.def.value
12193 + h->root.u.def.section->output_section->vma
12194 + h->root.u.def.section->output_offset);
12195 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
12196 loc = s->contents + s->reloc_count++ * RELOC_SIZE (htab);
12197 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12198 }
12199
12200 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
12201 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
12202 to the ".got" section. */
12203 if (strcmp (h->root.root.string, "_DYNAMIC") == 0
12204 || (!htab->vxworks_p && h == htab->root.hgot))
12205 sym->st_shndx = SHN_ABS;
12206
12207 return TRUE;
12208 }
12209
12210 /* Finish up the dynamic sections. */
12211
12212 static bfd_boolean
12213 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
12214 {
12215 bfd * dynobj;
12216 asection * sgot;
12217 asection * sdyn;
12218
12219 dynobj = elf_hash_table (info)->dynobj;
12220
12221 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
12222 BFD_ASSERT (elf32_arm_hash_table (info)->symbian_p || sgot != NULL);
12223 sdyn = bfd_get_section_by_name (dynobj, ".dynamic");
12224
12225 if (elf_hash_table (info)->dynamic_sections_created)
12226 {
12227 asection *splt;
12228 Elf32_External_Dyn *dyncon, *dynconend;
12229 struct elf32_arm_link_hash_table *htab;
12230
12231 htab = elf32_arm_hash_table (info);
12232 splt = bfd_get_section_by_name (dynobj, ".plt");
12233 BFD_ASSERT (splt != NULL && sdyn != NULL);
12234
12235 dyncon = (Elf32_External_Dyn *) sdyn->contents;
12236 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
12237
12238 for (; dyncon < dynconend; dyncon++)
12239 {
12240 Elf_Internal_Dyn dyn;
12241 const char * name;
12242 asection * s;
12243
12244 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
12245
12246 switch (dyn.d_tag)
12247 {
12248 unsigned int type;
12249
12250 default:
12251 if (htab->vxworks_p
12252 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
12253 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12254 break;
12255
12256 case DT_HASH:
12257 name = ".hash";
12258 goto get_vma_if_bpabi;
12259 case DT_STRTAB:
12260 name = ".dynstr";
12261 goto get_vma_if_bpabi;
12262 case DT_SYMTAB:
12263 name = ".dynsym";
12264 goto get_vma_if_bpabi;
12265 case DT_VERSYM:
12266 name = ".gnu.version";
12267 goto get_vma_if_bpabi;
12268 case DT_VERDEF:
12269 name = ".gnu.version_d";
12270 goto get_vma_if_bpabi;
12271 case DT_VERNEED:
12272 name = ".gnu.version_r";
12273 goto get_vma_if_bpabi;
12274
12275 case DT_PLTGOT:
12276 name = ".got";
12277 goto get_vma;
12278 case DT_JMPREL:
12279 name = RELOC_SECTION (htab, ".plt");
12280 get_vma:
12281 s = bfd_get_section_by_name (output_bfd, name);
12282 BFD_ASSERT (s != NULL);
12283 if (!htab->symbian_p)
12284 dyn.d_un.d_ptr = s->vma;
12285 else
12286 /* In the BPABI, tags in the PT_DYNAMIC section point
12287 at the file offset, not the memory address, for the
12288 convenience of the post linker. */
12289 dyn.d_un.d_ptr = s->filepos;
12290 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12291 break;
12292
12293 get_vma_if_bpabi:
12294 if (htab->symbian_p)
12295 goto get_vma;
12296 break;
12297
12298 case DT_PLTRELSZ:
12299 s = bfd_get_section_by_name (output_bfd,
12300 RELOC_SECTION (htab, ".plt"));
12301 BFD_ASSERT (s != NULL);
12302 dyn.d_un.d_val = s->size;
12303 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12304 break;
12305
12306 case DT_RELSZ:
12307 case DT_RELASZ:
12308 if (!htab->symbian_p)
12309 {
12310 /* My reading of the SVR4 ABI indicates that the
12311 procedure linkage table relocs (DT_JMPREL) should be
12312 included in the overall relocs (DT_REL). This is
12313 what Solaris does. However, UnixWare can not handle
12314 that case. Therefore, we override the DT_RELSZ entry
12315 here to make it not include the JMPREL relocs. Since
12316 the linker script arranges for .rel(a).plt to follow all
12317 other relocation sections, we don't have to worry
12318 about changing the DT_REL entry. */
12319 s = bfd_get_section_by_name (output_bfd,
12320 RELOC_SECTION (htab, ".plt"));
12321 if (s != NULL)
12322 dyn.d_un.d_val -= s->size;
12323 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12324 break;
12325 }
12326 /* Fall through. */
12327
12328 case DT_REL:
12329 case DT_RELA:
12330 /* In the BPABI, the DT_REL tag must point at the file
12331 offset, not the VMA, of the first relocation
12332 section. So, we use code similar to that in
12333 elflink.c, but do not check for SHF_ALLOC on the
12334 relcoation section, since relocations sections are
12335 never allocated under the BPABI. The comments above
12336 about Unixware notwithstanding, we include all of the
12337 relocations here. */
12338 if (htab->symbian_p)
12339 {
12340 unsigned int i;
12341 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
12342 ? SHT_REL : SHT_RELA);
12343 dyn.d_un.d_val = 0;
12344 for (i = 1; i < elf_numsections (output_bfd); i++)
12345 {
12346 Elf_Internal_Shdr *hdr
12347 = elf_elfsections (output_bfd)[i];
12348 if (hdr->sh_type == type)
12349 {
12350 if (dyn.d_tag == DT_RELSZ
12351 || dyn.d_tag == DT_RELASZ)
12352 dyn.d_un.d_val += hdr->sh_size;
12353 else if ((ufile_ptr) hdr->sh_offset
12354 <= dyn.d_un.d_val - 1)
12355 dyn.d_un.d_val = hdr->sh_offset;
12356 }
12357 }
12358 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12359 }
12360 break;
12361
12362 /* Set the bottom bit of DT_INIT/FINI if the
12363 corresponding function is Thumb. */
12364 case DT_INIT:
12365 name = info->init_function;
12366 goto get_sym;
12367 case DT_FINI:
12368 name = info->fini_function;
12369 get_sym:
12370 /* If it wasn't set by elf_bfd_final_link
12371 then there is nothing to adjust. */
12372 if (dyn.d_un.d_val != 0)
12373 {
12374 struct elf_link_hash_entry * eh;
12375
12376 eh = elf_link_hash_lookup (elf_hash_table (info), name,
12377 FALSE, FALSE, TRUE);
12378 if (eh != NULL
12379 && ELF_ST_TYPE (eh->type) == STT_ARM_TFUNC)
12380 {
12381 dyn.d_un.d_val |= 1;
12382 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12383 }
12384 }
12385 break;
12386 }
12387 }
12388
12389 /* Fill in the first entry in the procedure linkage table. */
12390 if (splt->size > 0 && elf32_arm_hash_table (info)->plt_header_size)
12391 {
12392 const bfd_vma *plt0_entry;
12393 bfd_vma got_address, plt_address, got_displacement;
12394
12395 /* Calculate the addresses of the GOT and PLT. */
12396 got_address = sgot->output_section->vma + sgot->output_offset;
12397 plt_address = splt->output_section->vma + splt->output_offset;
12398
12399 if (htab->vxworks_p)
12400 {
12401 /* The VxWorks GOT is relocated by the dynamic linker.
12402 Therefore, we must emit relocations rather than simply
12403 computing the values now. */
12404 Elf_Internal_Rela rel;
12405
12406 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
12407 put_arm_insn (htab, output_bfd, plt0_entry[0],
12408 splt->contents + 0);
12409 put_arm_insn (htab, output_bfd, plt0_entry[1],
12410 splt->contents + 4);
12411 put_arm_insn (htab, output_bfd, plt0_entry[2],
12412 splt->contents + 8);
12413 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
12414
12415 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
12416 rel.r_offset = plt_address + 12;
12417 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12418 rel.r_addend = 0;
12419 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
12420 htab->srelplt2->contents);
12421 }
12422 else
12423 {
12424 got_displacement = got_address - (plt_address + 16);
12425
12426 plt0_entry = elf32_arm_plt0_entry;
12427 put_arm_insn (htab, output_bfd, plt0_entry[0],
12428 splt->contents + 0);
12429 put_arm_insn (htab, output_bfd, plt0_entry[1],
12430 splt->contents + 4);
12431 put_arm_insn (htab, output_bfd, plt0_entry[2],
12432 splt->contents + 8);
12433 put_arm_insn (htab, output_bfd, plt0_entry[3],
12434 splt->contents + 12);
12435
12436 #ifdef FOUR_WORD_PLT
12437 /* The displacement value goes in the otherwise-unused
12438 last word of the second entry. */
12439 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
12440 #else
12441 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
12442 #endif
12443 }
12444 }
12445
12446 /* UnixWare sets the entsize of .plt to 4, although that doesn't
12447 really seem like the right value. */
12448 if (splt->output_section->owner == output_bfd)
12449 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
12450
12451 if (htab->vxworks_p && !info->shared && htab->splt->size > 0)
12452 {
12453 /* Correct the .rel(a).plt.unloaded relocations. They will have
12454 incorrect symbol indexes. */
12455 int num_plts;
12456 unsigned char *p;
12457
12458 num_plts = ((htab->splt->size - htab->plt_header_size)
12459 / htab->plt_entry_size);
12460 p = htab->srelplt2->contents + RELOC_SIZE (htab);
12461
12462 for (; num_plts; num_plts--)
12463 {
12464 Elf_Internal_Rela rel;
12465
12466 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12467 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12468 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12469 p += RELOC_SIZE (htab);
12470
12471 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12472 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12473 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12474 p += RELOC_SIZE (htab);
12475 }
12476 }
12477 }
12478
12479 /* Fill in the first three entries in the global offset table. */
12480 if (sgot)
12481 {
12482 if (sgot->size > 0)
12483 {
12484 if (sdyn == NULL)
12485 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
12486 else
12487 bfd_put_32 (output_bfd,
12488 sdyn->output_section->vma + sdyn->output_offset,
12489 sgot->contents);
12490 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
12491 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
12492 }
12493
12494 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
12495 }
12496
12497 return TRUE;
12498 }
12499
12500 static void
12501 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
12502 {
12503 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
12504 struct elf32_arm_link_hash_table *globals;
12505
12506 i_ehdrp = elf_elfheader (abfd);
12507
12508 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
12509 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
12510 else
12511 i_ehdrp->e_ident[EI_OSABI] = 0;
12512 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
12513
12514 if (link_info)
12515 {
12516 globals = elf32_arm_hash_table (link_info);
12517 if (globals->byteswap_code)
12518 i_ehdrp->e_flags |= EF_ARM_BE8;
12519 }
12520 }
12521
12522 static enum elf_reloc_type_class
12523 elf32_arm_reloc_type_class (const Elf_Internal_Rela *rela)
12524 {
12525 switch ((int) ELF32_R_TYPE (rela->r_info))
12526 {
12527 case R_ARM_RELATIVE:
12528 return reloc_class_relative;
12529 case R_ARM_JUMP_SLOT:
12530 return reloc_class_plt;
12531 case R_ARM_COPY:
12532 return reloc_class_copy;
12533 default:
12534 return reloc_class_normal;
12535 }
12536 }
12537
12538 /* Set the right machine number for an Arm ELF file. */
12539
12540 static bfd_boolean
12541 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr *hdr)
12542 {
12543 if (hdr->sh_type == SHT_NOTE)
12544 *flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_SAME_CONTENTS;
12545
12546 return TRUE;
12547 }
12548
12549 static void
12550 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
12551 {
12552 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
12553 }
12554
12555 /* Return TRUE if this is an unwinding table entry. */
12556
12557 static bfd_boolean
12558 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
12559 {
12560 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
12561 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
12562 }
12563
12564
12565 /* Set the type and flags for an ARM section. We do this by
12566 the section name, which is a hack, but ought to work. */
12567
12568 static bfd_boolean
12569 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
12570 {
12571 const char * name;
12572
12573 name = bfd_get_section_name (abfd, sec);
12574
12575 if (is_arm_elf_unwind_section_name (abfd, name))
12576 {
12577 hdr->sh_type = SHT_ARM_EXIDX;
12578 hdr->sh_flags |= SHF_LINK_ORDER;
12579 }
12580 return TRUE;
12581 }
12582
12583 /* Handle an ARM specific section when reading an object file. This is
12584 called when bfd_section_from_shdr finds a section with an unknown
12585 type. */
12586
12587 static bfd_boolean
12588 elf32_arm_section_from_shdr (bfd *abfd,
12589 Elf_Internal_Shdr * hdr,
12590 const char *name,
12591 int shindex)
12592 {
12593 /* There ought to be a place to keep ELF backend specific flags, but
12594 at the moment there isn't one. We just keep track of the
12595 sections by their name, instead. Fortunately, the ABI gives
12596 names for all the ARM specific sections, so we will probably get
12597 away with this. */
12598 switch (hdr->sh_type)
12599 {
12600 case SHT_ARM_EXIDX:
12601 case SHT_ARM_PREEMPTMAP:
12602 case SHT_ARM_ATTRIBUTES:
12603 break;
12604
12605 default:
12606 return FALSE;
12607 }
12608
12609 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
12610 return FALSE;
12611
12612 return TRUE;
12613 }
12614
12615 /* A structure used to record a list of sections, independently
12616 of the next and prev fields in the asection structure. */
12617 typedef struct section_list
12618 {
12619 asection * sec;
12620 struct section_list * next;
12621 struct section_list * prev;
12622 }
12623 section_list;
12624
12625 /* Unfortunately we need to keep a list of sections for which
12626 an _arm_elf_section_data structure has been allocated. This
12627 is because it is possible for functions like elf32_arm_write_section
12628 to be called on a section which has had an elf_data_structure
12629 allocated for it (and so the used_by_bfd field is valid) but
12630 for which the ARM extended version of this structure - the
12631 _arm_elf_section_data structure - has not been allocated. */
12632 static section_list * sections_with_arm_elf_section_data = NULL;
12633
12634 static void
12635 record_section_with_arm_elf_section_data (asection * sec)
12636 {
12637 struct section_list * entry;
12638
12639 entry = bfd_malloc (sizeof (* entry));
12640 if (entry == NULL)
12641 return;
12642 entry->sec = sec;
12643 entry->next = sections_with_arm_elf_section_data;
12644 entry->prev = NULL;
12645 if (entry->next != NULL)
12646 entry->next->prev = entry;
12647 sections_with_arm_elf_section_data = entry;
12648 }
12649
12650 static struct section_list *
12651 find_arm_elf_section_entry (asection * sec)
12652 {
12653 struct section_list * entry;
12654 static struct section_list * last_entry = NULL;
12655
12656 /* This is a short cut for the typical case where the sections are added
12657 to the sections_with_arm_elf_section_data list in forward order and
12658 then looked up here in backwards order. This makes a real difference
12659 to the ld-srec/sec64k.exp linker test. */
12660 entry = sections_with_arm_elf_section_data;
12661 if (last_entry != NULL)
12662 {
12663 if (last_entry->sec == sec)
12664 entry = last_entry;
12665 else if (last_entry->next != NULL
12666 && last_entry->next->sec == sec)
12667 entry = last_entry->next;
12668 }
12669
12670 for (; entry; entry = entry->next)
12671 if (entry->sec == sec)
12672 break;
12673
12674 if (entry)
12675 /* Record the entry prior to this one - it is the entry we are most
12676 likely to want to locate next time. Also this way if we have been
12677 called from unrecord_section_with_arm_elf_section_data() we will not
12678 be caching a pointer that is about to be freed. */
12679 last_entry = entry->prev;
12680
12681 return entry;
12682 }
12683
12684 static _arm_elf_section_data *
12685 get_arm_elf_section_data (asection * sec)
12686 {
12687 struct section_list * entry;
12688
12689 entry = find_arm_elf_section_entry (sec);
12690
12691 if (entry)
12692 return elf32_arm_section_data (entry->sec);
12693 else
12694 return NULL;
12695 }
12696
12697 static void
12698 unrecord_section_with_arm_elf_section_data (asection * sec)
12699 {
12700 struct section_list * entry;
12701
12702 entry = find_arm_elf_section_entry (sec);
12703
12704 if (entry)
12705 {
12706 if (entry->prev != NULL)
12707 entry->prev->next = entry->next;
12708 if (entry->next != NULL)
12709 entry->next->prev = entry->prev;
12710 if (entry == sections_with_arm_elf_section_data)
12711 sections_with_arm_elf_section_data = entry->next;
12712 free (entry);
12713 }
12714 }
12715
12716
12717 typedef struct
12718 {
12719 void *finfo;
12720 struct bfd_link_info *info;
12721 asection *sec;
12722 int sec_shndx;
12723 int (*func) (void *, const char *, Elf_Internal_Sym *,
12724 asection *, struct elf_link_hash_entry *);
12725 } output_arch_syminfo;
12726
12727 enum map_symbol_type
12728 {
12729 ARM_MAP_ARM,
12730 ARM_MAP_THUMB,
12731 ARM_MAP_DATA
12732 };
12733
12734
12735 /* Output a single mapping symbol. */
12736
12737 static bfd_boolean
12738 elf32_arm_output_map_sym (output_arch_syminfo *osi,
12739 enum map_symbol_type type,
12740 bfd_vma offset)
12741 {
12742 static const char *names[3] = {"$a", "$t", "$d"};
12743 struct elf32_arm_link_hash_table *htab;
12744 Elf_Internal_Sym sym;
12745
12746 htab = elf32_arm_hash_table (osi->info);
12747 sym.st_value = osi->sec->output_section->vma
12748 + osi->sec->output_offset
12749 + offset;
12750 sym.st_size = 0;
12751 sym.st_other = 0;
12752 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
12753 sym.st_shndx = osi->sec_shndx;
12754 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
12755 }
12756
12757
12758 /* Output mapping symbols for PLT entries associated with H. */
12759
12760 static bfd_boolean
12761 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
12762 {
12763 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
12764 struct elf32_arm_link_hash_table *htab;
12765 struct elf32_arm_link_hash_entry *eh;
12766 bfd_vma addr;
12767
12768 htab = elf32_arm_hash_table (osi->info);
12769
12770 if (h->root.type == bfd_link_hash_indirect)
12771 return TRUE;
12772
12773 if (h->root.type == bfd_link_hash_warning)
12774 /* When warning symbols are created, they **replace** the "real"
12775 entry in the hash table, thus we never get to see the real
12776 symbol in a hash traversal. So look at it now. */
12777 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12778
12779 if (h->plt.offset == (bfd_vma) -1)
12780 return TRUE;
12781
12782 eh = (struct elf32_arm_link_hash_entry *) h;
12783 addr = h->plt.offset;
12784 if (htab->symbian_p)
12785 {
12786 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12787 return FALSE;
12788 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
12789 return FALSE;
12790 }
12791 else if (htab->vxworks_p)
12792 {
12793 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12794 return FALSE;
12795 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
12796 return FALSE;
12797 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
12798 return FALSE;
12799 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
12800 return FALSE;
12801 }
12802 else
12803 {
12804 bfd_signed_vma thumb_refs;
12805
12806 thumb_refs = eh->plt_thumb_refcount;
12807 if (!htab->use_blx)
12808 thumb_refs += eh->plt_maybe_thumb_refcount;
12809
12810 if (thumb_refs > 0)
12811 {
12812 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
12813 return FALSE;
12814 }
12815 #ifdef FOUR_WORD_PLT
12816 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12817 return FALSE;
12818 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
12819 return FALSE;
12820 #else
12821 /* A three-word PLT with no Thumb thunk contains only Arm code,
12822 so only need to output a mapping symbol for the first PLT entry and
12823 entries with thumb thunks. */
12824 if (thumb_refs > 0 || addr == 20)
12825 {
12826 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12827 return FALSE;
12828 }
12829 #endif
12830 }
12831
12832 return TRUE;
12833 }
12834
12835 /* Output a single local symbol for a generated stub. */
12836
12837 static bfd_boolean
12838 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
12839 bfd_vma offset, bfd_vma size)
12840 {
12841 struct elf32_arm_link_hash_table *htab;
12842 Elf_Internal_Sym sym;
12843
12844 htab = elf32_arm_hash_table (osi->info);
12845 sym.st_value = osi->sec->output_section->vma
12846 + osi->sec->output_offset
12847 + offset;
12848 sym.st_size = size;
12849 sym.st_other = 0;
12850 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
12851 sym.st_shndx = osi->sec_shndx;
12852 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
12853 }
12854
12855 static bfd_boolean
12856 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
12857 void * in_arg)
12858 {
12859 struct elf32_arm_stub_hash_entry *stub_entry;
12860 struct bfd_link_info *info;
12861 struct elf32_arm_link_hash_table *htab;
12862 asection *stub_sec;
12863 bfd_vma addr;
12864 char *stub_name;
12865 output_arch_syminfo *osi;
12866 const insn_sequence *template;
12867 enum stub_insn_type prev_type;
12868 int size;
12869 int i;
12870 enum map_symbol_type sym_type;
12871
12872 /* Massage our args to the form they really have. */
12873 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
12874 osi = (output_arch_syminfo *) in_arg;
12875
12876 info = osi->info;
12877
12878 htab = elf32_arm_hash_table (info);
12879 stub_sec = stub_entry->stub_sec;
12880
12881 /* Ensure this stub is attached to the current section being
12882 processed. */
12883 if (stub_sec != osi->sec)
12884 return TRUE;
12885
12886 addr = (bfd_vma) stub_entry->stub_offset;
12887 stub_name = stub_entry->output_name;
12888
12889 template = stub_entry->stub_template;
12890 switch (template[0].type)
12891 {
12892 case ARM_TYPE:
12893 if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size))
12894 return FALSE;
12895 break;
12896 case THUMB16_TYPE:
12897 case THUMB32_TYPE:
12898 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
12899 stub_entry->stub_size))
12900 return FALSE;
12901 break;
12902 default:
12903 BFD_FAIL ();
12904 return 0;
12905 }
12906
12907 prev_type = DATA_TYPE;
12908 size = 0;
12909 for (i = 0; i < stub_entry->stub_template_size; i++)
12910 {
12911 switch (template[i].type)
12912 {
12913 case ARM_TYPE:
12914 sym_type = ARM_MAP_ARM;
12915 break;
12916
12917 case THUMB16_TYPE:
12918 case THUMB32_TYPE:
12919 sym_type = ARM_MAP_THUMB;
12920 break;
12921
12922 case DATA_TYPE:
12923 sym_type = ARM_MAP_DATA;
12924 break;
12925
12926 default:
12927 BFD_FAIL ();
12928 return FALSE;
12929 }
12930
12931 if (template[i].type != prev_type)
12932 {
12933 prev_type = template[i].type;
12934 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
12935 return FALSE;
12936 }
12937
12938 switch (template[i].type)
12939 {
12940 case ARM_TYPE:
12941 case THUMB32_TYPE:
12942 size += 4;
12943 break;
12944
12945 case THUMB16_TYPE:
12946 size += 2;
12947 break;
12948
12949 case DATA_TYPE:
12950 size += 4;
12951 break;
12952
12953 default:
12954 BFD_FAIL ();
12955 return FALSE;
12956 }
12957 }
12958
12959 return TRUE;
12960 }
12961
12962 /* Output mapping symbols for linker generated sections. */
12963
12964 static bfd_boolean
12965 elf32_arm_output_arch_local_syms (bfd *output_bfd,
12966 struct bfd_link_info *info,
12967 void *finfo,
12968 int (*func) (void *, const char *,
12969 Elf_Internal_Sym *,
12970 asection *,
12971 struct elf_link_hash_entry *))
12972 {
12973 output_arch_syminfo osi;
12974 struct elf32_arm_link_hash_table *htab;
12975 bfd_vma offset;
12976 bfd_size_type size;
12977
12978 htab = elf32_arm_hash_table (info);
12979 check_use_blx (htab);
12980
12981 osi.finfo = finfo;
12982 osi.info = info;
12983 osi.func = func;
12984
12985 /* ARM->Thumb glue. */
12986 if (htab->arm_glue_size > 0)
12987 {
12988 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
12989 ARM2THUMB_GLUE_SECTION_NAME);
12990
12991 osi.sec_shndx = _bfd_elf_section_from_bfd_section
12992 (output_bfd, osi.sec->output_section);
12993 if (info->shared || htab->root.is_relocatable_executable
12994 || htab->pic_veneer)
12995 size = ARM2THUMB_PIC_GLUE_SIZE;
12996 else if (htab->use_blx)
12997 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
12998 else
12999 size = ARM2THUMB_STATIC_GLUE_SIZE;
13000
13001 for (offset = 0; offset < htab->arm_glue_size; offset += size)
13002 {
13003 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
13004 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
13005 }
13006 }
13007
13008 /* Thumb->ARM glue. */
13009 if (htab->thumb_glue_size > 0)
13010 {
13011 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13012 THUMB2ARM_GLUE_SECTION_NAME);
13013
13014 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13015 (output_bfd, osi.sec->output_section);
13016 size = THUMB2ARM_GLUE_SIZE;
13017
13018 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
13019 {
13020 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
13021 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
13022 }
13023 }
13024
13025 /* ARMv4 BX veneers. */
13026 if (htab->bx_glue_size > 0)
13027 {
13028 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13029 ARM_BX_GLUE_SECTION_NAME);
13030
13031 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13032 (output_bfd, osi.sec->output_section);
13033
13034 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
13035 }
13036
13037 /* Long calls stubs. */
13038 if (htab->stub_bfd && htab->stub_bfd->sections)
13039 {
13040 asection* stub_sec;
13041
13042 for (stub_sec = htab->stub_bfd->sections;
13043 stub_sec != NULL;
13044 stub_sec = stub_sec->next)
13045 {
13046 /* Ignore non-stub sections. */
13047 if (!strstr (stub_sec->name, STUB_SUFFIX))
13048 continue;
13049
13050 osi.sec = stub_sec;
13051
13052 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13053 (output_bfd, osi.sec->output_section);
13054
13055 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
13056 }
13057 }
13058
13059 /* Finally, output mapping symbols for the PLT. */
13060 if (!htab->splt || htab->splt->size == 0)
13061 return TRUE;
13062
13063 osi.sec_shndx = _bfd_elf_section_from_bfd_section (output_bfd,
13064 htab->splt->output_section);
13065 osi.sec = htab->splt;
13066 /* Output mapping symbols for the plt header. SymbianOS does not have a
13067 plt header. */
13068 if (htab->vxworks_p)
13069 {
13070 /* VxWorks shared libraries have no PLT header. */
13071 if (!info->shared)
13072 {
13073 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13074 return FALSE;
13075 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
13076 return FALSE;
13077 }
13078 }
13079 else if (!htab->symbian_p)
13080 {
13081 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13082 return FALSE;
13083 #ifndef FOUR_WORD_PLT
13084 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
13085 return FALSE;
13086 #endif
13087 }
13088
13089 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, (void *) &osi);
13090 return TRUE;
13091 }
13092
13093 /* Allocate target specific section data. */
13094
13095 static bfd_boolean
13096 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
13097 {
13098 if (!sec->used_by_bfd)
13099 {
13100 _arm_elf_section_data *sdata;
13101 bfd_size_type amt = sizeof (*sdata);
13102
13103 sdata = bfd_zalloc (abfd, amt);
13104 if (sdata == NULL)
13105 return FALSE;
13106 sec->used_by_bfd = sdata;
13107 }
13108
13109 record_section_with_arm_elf_section_data (sec);
13110
13111 return _bfd_elf_new_section_hook (abfd, sec);
13112 }
13113
13114
13115 /* Used to order a list of mapping symbols by address. */
13116
13117 static int
13118 elf32_arm_compare_mapping (const void * a, const void * b)
13119 {
13120 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
13121 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
13122
13123 if (amap->vma > bmap->vma)
13124 return 1;
13125 else if (amap->vma < bmap->vma)
13126 return -1;
13127 else if (amap->type > bmap->type)
13128 /* Ensure results do not depend on the host qsort for objects with
13129 multiple mapping symbols at the same address by sorting on type
13130 after vma. */
13131 return 1;
13132 else if (amap->type < bmap->type)
13133 return -1;
13134 else
13135 return 0;
13136 }
13137
13138 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
13139
13140 static unsigned long
13141 offset_prel31 (unsigned long addr, bfd_vma offset)
13142 {
13143 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
13144 }
13145
13146 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
13147 relocations. */
13148
13149 static void
13150 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
13151 {
13152 unsigned long first_word = bfd_get_32 (output_bfd, from);
13153 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
13154
13155 /* High bit of first word is supposed to be zero. */
13156 if ((first_word & 0x80000000ul) == 0)
13157 first_word = offset_prel31 (first_word, offset);
13158
13159 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
13160 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
13161 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
13162 second_word = offset_prel31 (second_word, offset);
13163
13164 bfd_put_32 (output_bfd, first_word, to);
13165 bfd_put_32 (output_bfd, second_word, to + 4);
13166 }
13167
13168 /* Data for make_branch_to_a8_stub(). */
13169
13170 struct a8_branch_to_stub_data {
13171 asection *writing_section;
13172 bfd_byte *contents;
13173 };
13174
13175
13176 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
13177 places for a particular section. */
13178
13179 static bfd_boolean
13180 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
13181 void *in_arg)
13182 {
13183 struct elf32_arm_stub_hash_entry *stub_entry;
13184 struct a8_branch_to_stub_data *data;
13185 bfd_byte *contents;
13186 unsigned long branch_insn;
13187 bfd_vma veneered_insn_loc, veneer_entry_loc;
13188 bfd_signed_vma branch_offset;
13189 bfd *abfd;
13190 unsigned int index;
13191
13192 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
13193 data = (struct a8_branch_to_stub_data *) in_arg;
13194
13195 if (stub_entry->target_section != data->writing_section
13196 || stub_entry->stub_type < arm_stub_a8_veneer_b_cond)
13197 return TRUE;
13198
13199 contents = data->contents;
13200
13201 veneered_insn_loc = stub_entry->target_section->output_section->vma
13202 + stub_entry->target_section->output_offset
13203 + stub_entry->target_value;
13204
13205 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
13206 + stub_entry->stub_sec->output_offset
13207 + stub_entry->stub_offset;
13208
13209 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
13210 veneered_insn_loc &= ~3u;
13211
13212 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
13213
13214 abfd = stub_entry->target_section->owner;
13215 index = stub_entry->target_value;
13216
13217 /* We attempt to avoid this condition by setting stubs_always_after_branch
13218 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
13219 This check is just to be on the safe side... */
13220 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
13221 {
13222 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
13223 "allocated in unsafe location"), abfd);
13224 return FALSE;
13225 }
13226
13227 switch (stub_entry->stub_type)
13228 {
13229 case arm_stub_a8_veneer_b:
13230 case arm_stub_a8_veneer_b_cond:
13231 branch_insn = 0xf0009000;
13232 goto jump24;
13233
13234 case arm_stub_a8_veneer_blx:
13235 branch_insn = 0xf000e800;
13236 goto jump24;
13237
13238 case arm_stub_a8_veneer_bl:
13239 {
13240 unsigned int i1, j1, i2, j2, s;
13241
13242 branch_insn = 0xf000d000;
13243
13244 jump24:
13245 if (branch_offset < -16777216 || branch_offset > 16777214)
13246 {
13247 /* There's not much we can do apart from complain if this
13248 happens. */
13249 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
13250 "of range (input file too large)"), abfd);
13251 return FALSE;
13252 }
13253
13254 /* i1 = not(j1 eor s), so:
13255 not i1 = j1 eor s
13256 j1 = (not i1) eor s. */
13257
13258 branch_insn |= (branch_offset >> 1) & 0x7ff;
13259 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
13260 i2 = (branch_offset >> 22) & 1;
13261 i1 = (branch_offset >> 23) & 1;
13262 s = (branch_offset >> 24) & 1;
13263 j1 = (!i1) ^ s;
13264 j2 = (!i2) ^ s;
13265 branch_insn |= j2 << 11;
13266 branch_insn |= j1 << 13;
13267 branch_insn |= s << 26;
13268 }
13269 break;
13270
13271 default:
13272 BFD_FAIL ();
13273 return FALSE;
13274 }
13275
13276 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[index]);
13277 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[index + 2]);
13278
13279 return TRUE;
13280 }
13281
13282 /* Do code byteswapping. Return FALSE afterwards so that the section is
13283 written out as normal. */
13284
13285 static bfd_boolean
13286 elf32_arm_write_section (bfd *output_bfd,
13287 struct bfd_link_info *link_info,
13288 asection *sec,
13289 bfd_byte *contents)
13290 {
13291 unsigned int mapcount, errcount;
13292 _arm_elf_section_data *arm_data;
13293 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
13294 elf32_arm_section_map *map;
13295 elf32_vfp11_erratum_list *errnode;
13296 bfd_vma ptr;
13297 bfd_vma end;
13298 bfd_vma offset = sec->output_section->vma + sec->output_offset;
13299 bfd_byte tmp;
13300 unsigned int i;
13301
13302 /* If this section has not been allocated an _arm_elf_section_data
13303 structure then we cannot record anything. */
13304 arm_data = get_arm_elf_section_data (sec);
13305 if (arm_data == NULL)
13306 return FALSE;
13307
13308 mapcount = arm_data->mapcount;
13309 map = arm_data->map;
13310 errcount = arm_data->erratumcount;
13311
13312 if (errcount != 0)
13313 {
13314 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
13315
13316 for (errnode = arm_data->erratumlist; errnode != 0;
13317 errnode = errnode->next)
13318 {
13319 bfd_vma index = errnode->vma - offset;
13320
13321 switch (errnode->type)
13322 {
13323 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
13324 {
13325 bfd_vma branch_to_veneer;
13326 /* Original condition code of instruction, plus bit mask for
13327 ARM B instruction. */
13328 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
13329 | 0x0a000000;
13330
13331 /* The instruction is before the label. */
13332 index -= 4;
13333
13334 /* Above offset included in -4 below. */
13335 branch_to_veneer = errnode->u.b.veneer->vma
13336 - errnode->vma - 4;
13337
13338 if ((signed) branch_to_veneer < -(1 << 25)
13339 || (signed) branch_to_veneer >= (1 << 25))
13340 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13341 "range"), output_bfd);
13342
13343 insn |= (branch_to_veneer >> 2) & 0xffffff;
13344 contents[endianflip ^ index] = insn & 0xff;
13345 contents[endianflip ^ (index + 1)] = (insn >> 8) & 0xff;
13346 contents[endianflip ^ (index + 2)] = (insn >> 16) & 0xff;
13347 contents[endianflip ^ (index + 3)] = (insn >> 24) & 0xff;
13348 }
13349 break;
13350
13351 case VFP11_ERRATUM_ARM_VENEER:
13352 {
13353 bfd_vma branch_from_veneer;
13354 unsigned int insn;
13355
13356 /* Take size of veneer into account. */
13357 branch_from_veneer = errnode->u.v.branch->vma
13358 - errnode->vma - 12;
13359
13360 if ((signed) branch_from_veneer < -(1 << 25)
13361 || (signed) branch_from_veneer >= (1 << 25))
13362 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13363 "range"), output_bfd);
13364
13365 /* Original instruction. */
13366 insn = errnode->u.v.branch->u.b.vfp_insn;
13367 contents[endianflip ^ index] = insn & 0xff;
13368 contents[endianflip ^ (index + 1)] = (insn >> 8) & 0xff;
13369 contents[endianflip ^ (index + 2)] = (insn >> 16) & 0xff;
13370 contents[endianflip ^ (index + 3)] = (insn >> 24) & 0xff;
13371
13372 /* Branch back to insn after original insn. */
13373 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
13374 contents[endianflip ^ (index + 4)] = insn & 0xff;
13375 contents[endianflip ^ (index + 5)] = (insn >> 8) & 0xff;
13376 contents[endianflip ^ (index + 6)] = (insn >> 16) & 0xff;
13377 contents[endianflip ^ (index + 7)] = (insn >> 24) & 0xff;
13378 }
13379 break;
13380
13381 default:
13382 abort ();
13383 }
13384 }
13385 }
13386
13387 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
13388 {
13389 arm_unwind_table_edit *edit_node
13390 = arm_data->u.exidx.unwind_edit_list;
13391 /* Now, sec->size is the size of the section we will write. The original
13392 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
13393 markers) was sec->rawsize. (This isn't the case if we perform no
13394 edits, then rawsize will be zero and we should use size). */
13395 bfd_byte *edited_contents = bfd_malloc (sec->size);
13396 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
13397 unsigned int in_index, out_index;
13398 bfd_vma add_to_offsets = 0;
13399
13400 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
13401 {
13402 if (edit_node)
13403 {
13404 unsigned int edit_index = edit_node->index;
13405
13406 if (in_index < edit_index && in_index * 8 < input_size)
13407 {
13408 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13409 contents + in_index * 8, add_to_offsets);
13410 out_index++;
13411 in_index++;
13412 }
13413 else if (in_index == edit_index
13414 || (in_index * 8 >= input_size
13415 && edit_index == UINT_MAX))
13416 {
13417 switch (edit_node->type)
13418 {
13419 case DELETE_EXIDX_ENTRY:
13420 in_index++;
13421 add_to_offsets += 8;
13422 break;
13423
13424 case INSERT_EXIDX_CANTUNWIND_AT_END:
13425 {
13426 asection *text_sec = edit_node->linked_section;
13427 bfd_vma text_offset = text_sec->output_section->vma
13428 + text_sec->output_offset
13429 + text_sec->size;
13430 bfd_vma exidx_offset = offset + out_index * 8;
13431 unsigned long prel31_offset;
13432
13433 /* Note: this is meant to be equivalent to an
13434 R_ARM_PREL31 relocation. These synthetic
13435 EXIDX_CANTUNWIND markers are not relocated by the
13436 usual BFD method. */
13437 prel31_offset = (text_offset - exidx_offset)
13438 & 0x7ffffffful;
13439
13440 /* First address we can't unwind. */
13441 bfd_put_32 (output_bfd, prel31_offset,
13442 &edited_contents[out_index * 8]);
13443
13444 /* Code for EXIDX_CANTUNWIND. */
13445 bfd_put_32 (output_bfd, 0x1,
13446 &edited_contents[out_index * 8 + 4]);
13447
13448 out_index++;
13449 add_to_offsets -= 8;
13450 }
13451 break;
13452 }
13453
13454 edit_node = edit_node->next;
13455 }
13456 }
13457 else
13458 {
13459 /* No more edits, copy remaining entries verbatim. */
13460 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13461 contents + in_index * 8, add_to_offsets);
13462 out_index++;
13463 in_index++;
13464 }
13465 }
13466
13467 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
13468 bfd_set_section_contents (output_bfd, sec->output_section,
13469 edited_contents,
13470 (file_ptr) sec->output_offset, sec->size);
13471
13472 return TRUE;
13473 }
13474
13475 /* Fix code to point to Cortex-A8 erratum stubs. */
13476 if (globals->fix_cortex_a8)
13477 {
13478 struct a8_branch_to_stub_data data;
13479
13480 data.writing_section = sec;
13481 data.contents = contents;
13482
13483 bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub,
13484 &data);
13485 }
13486
13487 if (mapcount == 0)
13488 return FALSE;
13489
13490 if (globals->byteswap_code)
13491 {
13492 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
13493
13494 ptr = map[0].vma;
13495 for (i = 0; i < mapcount; i++)
13496 {
13497 if (i == mapcount - 1)
13498 end = sec->size;
13499 else
13500 end = map[i + 1].vma;
13501
13502 switch (map[i].type)
13503 {
13504 case 'a':
13505 /* Byte swap code words. */
13506 while (ptr + 3 < end)
13507 {
13508 tmp = contents[ptr];
13509 contents[ptr] = contents[ptr + 3];
13510 contents[ptr + 3] = tmp;
13511 tmp = contents[ptr + 1];
13512 contents[ptr + 1] = contents[ptr + 2];
13513 contents[ptr + 2] = tmp;
13514 ptr += 4;
13515 }
13516 break;
13517
13518 case 't':
13519 /* Byte swap code halfwords. */
13520 while (ptr + 1 < end)
13521 {
13522 tmp = contents[ptr];
13523 contents[ptr] = contents[ptr + 1];
13524 contents[ptr + 1] = tmp;
13525 ptr += 2;
13526 }
13527 break;
13528
13529 case 'd':
13530 /* Leave data alone. */
13531 break;
13532 }
13533 ptr = end;
13534 }
13535 }
13536
13537 free (map);
13538 arm_data->mapcount = 0;
13539 arm_data->mapsize = 0;
13540 arm_data->map = NULL;
13541 unrecord_section_with_arm_elf_section_data (sec);
13542
13543 return FALSE;
13544 }
13545
13546 static void
13547 unrecord_section_via_map_over_sections (bfd * abfd ATTRIBUTE_UNUSED,
13548 asection * sec,
13549 void * ignore ATTRIBUTE_UNUSED)
13550 {
13551 unrecord_section_with_arm_elf_section_data (sec);
13552 }
13553
13554 static bfd_boolean
13555 elf32_arm_close_and_cleanup (bfd * abfd)
13556 {
13557 if (abfd->sections)
13558 bfd_map_over_sections (abfd,
13559 unrecord_section_via_map_over_sections,
13560 NULL);
13561
13562 return _bfd_elf_close_and_cleanup (abfd);
13563 }
13564
13565 static bfd_boolean
13566 elf32_arm_bfd_free_cached_info (bfd * abfd)
13567 {
13568 if (abfd->sections)
13569 bfd_map_over_sections (abfd,
13570 unrecord_section_via_map_over_sections,
13571 NULL);
13572
13573 return _bfd_free_cached_info (abfd);
13574 }
13575
13576 /* Display STT_ARM_TFUNC symbols as functions. */
13577
13578 static void
13579 elf32_arm_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
13580 asymbol *asym)
13581 {
13582 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
13583
13584 if (ELF_ST_TYPE (elfsym->internal_elf_sym.st_info) == STT_ARM_TFUNC)
13585 elfsym->symbol.flags |= BSF_FUNCTION;
13586 }
13587
13588
13589 /* Mangle thumb function symbols as we read them in. */
13590
13591 static bfd_boolean
13592 elf32_arm_swap_symbol_in (bfd * abfd,
13593 const void *psrc,
13594 const void *pshn,
13595 Elf_Internal_Sym *dst)
13596 {
13597 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
13598 return FALSE;
13599
13600 /* New EABI objects mark thumb function symbols by setting the low bit of
13601 the address. Turn these into STT_ARM_TFUNC. */
13602 if ((ELF_ST_TYPE (dst->st_info) == STT_FUNC)
13603 && (dst->st_value & 1))
13604 {
13605 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_ARM_TFUNC);
13606 dst->st_value &= ~(bfd_vma) 1;
13607 }
13608 return TRUE;
13609 }
13610
13611
13612 /* Mangle thumb function symbols as we write them out. */
13613
13614 static void
13615 elf32_arm_swap_symbol_out (bfd *abfd,
13616 const Elf_Internal_Sym *src,
13617 void *cdst,
13618 void *shndx)
13619 {
13620 Elf_Internal_Sym newsym;
13621
13622 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
13623 of the address set, as per the new EABI. We do this unconditionally
13624 because objcopy does not set the elf header flags until after
13625 it writes out the symbol table. */
13626 if (ELF_ST_TYPE (src->st_info) == STT_ARM_TFUNC)
13627 {
13628 newsym = *src;
13629 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
13630 if (newsym.st_shndx != SHN_UNDEF)
13631 {
13632 /* Do this only for defined symbols. At link type, the static
13633 linker will simulate the work of dynamic linker of resolving
13634 symbols and will carry over the thumbness of found symbols to
13635 the output symbol table. It's not clear how it happens, but
13636 the thumbness of undefined symbols can well be different at
13637 runtime, and writing '1' for them will be confusing for users
13638 and possibly for dynamic linker itself.
13639 */
13640 newsym.st_value |= 1;
13641 }
13642
13643 src = &newsym;
13644 }
13645 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
13646 }
13647
13648 /* Add the PT_ARM_EXIDX program header. */
13649
13650 static bfd_boolean
13651 elf32_arm_modify_segment_map (bfd *abfd,
13652 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13653 {
13654 struct elf_segment_map *m;
13655 asection *sec;
13656
13657 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13658 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13659 {
13660 /* If there is already a PT_ARM_EXIDX header, then we do not
13661 want to add another one. This situation arises when running
13662 "strip"; the input binary already has the header. */
13663 m = elf_tdata (abfd)->segment_map;
13664 while (m && m->p_type != PT_ARM_EXIDX)
13665 m = m->next;
13666 if (!m)
13667 {
13668 m = bfd_zalloc (abfd, sizeof (struct elf_segment_map));
13669 if (m == NULL)
13670 return FALSE;
13671 m->p_type = PT_ARM_EXIDX;
13672 m->count = 1;
13673 m->sections[0] = sec;
13674
13675 m->next = elf_tdata (abfd)->segment_map;
13676 elf_tdata (abfd)->segment_map = m;
13677 }
13678 }
13679
13680 return TRUE;
13681 }
13682
13683 /* We may add a PT_ARM_EXIDX program header. */
13684
13685 static int
13686 elf32_arm_additional_program_headers (bfd *abfd,
13687 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13688 {
13689 asection *sec;
13690
13691 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13692 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13693 return 1;
13694 else
13695 return 0;
13696 }
13697
13698 /* We have two function types: STT_FUNC and STT_ARM_TFUNC. */
13699
13700 static bfd_boolean
13701 elf32_arm_is_function_type (unsigned int type)
13702 {
13703 return (type == STT_FUNC) || (type == STT_ARM_TFUNC);
13704 }
13705
13706 /* We use this to override swap_symbol_in and swap_symbol_out. */
13707 const struct elf_size_info elf32_arm_size_info =
13708 {
13709 sizeof (Elf32_External_Ehdr),
13710 sizeof (Elf32_External_Phdr),
13711 sizeof (Elf32_External_Shdr),
13712 sizeof (Elf32_External_Rel),
13713 sizeof (Elf32_External_Rela),
13714 sizeof (Elf32_External_Sym),
13715 sizeof (Elf32_External_Dyn),
13716 sizeof (Elf_External_Note),
13717 4,
13718 1,
13719 32, 2,
13720 ELFCLASS32, EV_CURRENT,
13721 bfd_elf32_write_out_phdrs,
13722 bfd_elf32_write_shdrs_and_ehdr,
13723 bfd_elf32_checksum_contents,
13724 bfd_elf32_write_relocs,
13725 elf32_arm_swap_symbol_in,
13726 elf32_arm_swap_symbol_out,
13727 bfd_elf32_slurp_reloc_table,
13728 bfd_elf32_slurp_symbol_table,
13729 bfd_elf32_swap_dyn_in,
13730 bfd_elf32_swap_dyn_out,
13731 bfd_elf32_swap_reloc_in,
13732 bfd_elf32_swap_reloc_out,
13733 bfd_elf32_swap_reloca_in,
13734 bfd_elf32_swap_reloca_out
13735 };
13736
13737 #define ELF_ARCH bfd_arch_arm
13738 #define ELF_MACHINE_CODE EM_ARM
13739 #ifdef __QNXTARGET__
13740 #define ELF_MAXPAGESIZE 0x1000
13741 #else
13742 #define ELF_MAXPAGESIZE 0x8000
13743 #endif
13744 #define ELF_MINPAGESIZE 0x1000
13745 #define ELF_COMMONPAGESIZE 0x1000
13746
13747 #define bfd_elf32_mkobject elf32_arm_mkobject
13748
13749 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
13750 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
13751 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
13752 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
13753 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
13754 #define bfd_elf32_bfd_link_hash_table_free elf32_arm_hash_table_free
13755 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
13756 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
13757 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
13758 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
13759 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
13760 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
13761 #define bfd_elf32_close_and_cleanup elf32_arm_close_and_cleanup
13762 #define bfd_elf32_bfd_free_cached_info elf32_arm_bfd_free_cached_info
13763 #define bfd_elf32_bfd_final_link elf32_arm_final_link
13764
13765 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
13766 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
13767 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
13768 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
13769 #define elf_backend_check_relocs elf32_arm_check_relocs
13770 #define elf_backend_relocate_section elf32_arm_relocate_section
13771 #define elf_backend_write_section elf32_arm_write_section
13772 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
13773 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
13774 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
13775 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
13776 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
13777 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
13778 #define elf_backend_post_process_headers elf32_arm_post_process_headers
13779 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
13780 #define elf_backend_object_p elf32_arm_object_p
13781 #define elf_backend_section_flags elf32_arm_section_flags
13782 #define elf_backend_fake_sections elf32_arm_fake_sections
13783 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
13784 #define elf_backend_final_write_processing elf32_arm_final_write_processing
13785 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
13786 #define elf_backend_symbol_processing elf32_arm_symbol_processing
13787 #define elf_backend_size_info elf32_arm_size_info
13788 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
13789 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
13790 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
13791 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
13792 #define elf_backend_is_function_type elf32_arm_is_function_type
13793
13794 #define elf_backend_can_refcount 1
13795 #define elf_backend_can_gc_sections 1
13796 #define elf_backend_plt_readonly 1
13797 #define elf_backend_want_got_plt 1
13798 #define elf_backend_want_plt_sym 0
13799 #define elf_backend_may_use_rel_p 1
13800 #define elf_backend_may_use_rela_p 0
13801 #define elf_backend_default_use_rela_p 0
13802
13803 #define elf_backend_got_header_size 12
13804
13805 #undef elf_backend_obj_attrs_vendor
13806 #define elf_backend_obj_attrs_vendor "aeabi"
13807 #undef elf_backend_obj_attrs_section
13808 #define elf_backend_obj_attrs_section ".ARM.attributes"
13809 #undef elf_backend_obj_attrs_arg_type
13810 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
13811 #undef elf_backend_obj_attrs_section_type
13812 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
13813 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
13814
13815 #include "elf32-target.h"
13816
13817 /* VxWorks Targets. */
13818
13819 #undef TARGET_LITTLE_SYM
13820 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vxworks_vec
13821 #undef TARGET_LITTLE_NAME
13822 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
13823 #undef TARGET_BIG_SYM
13824 #define TARGET_BIG_SYM bfd_elf32_bigarm_vxworks_vec
13825 #undef TARGET_BIG_NAME
13826 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
13827
13828 /* Like elf32_arm_link_hash_table_create -- but overrides
13829 appropriately for VxWorks. */
13830
13831 static struct bfd_link_hash_table *
13832 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
13833 {
13834 struct bfd_link_hash_table *ret;
13835
13836 ret = elf32_arm_link_hash_table_create (abfd);
13837 if (ret)
13838 {
13839 struct elf32_arm_link_hash_table *htab
13840 = (struct elf32_arm_link_hash_table *) ret;
13841 htab->use_rel = 0;
13842 htab->vxworks_p = 1;
13843 }
13844 return ret;
13845 }
13846
13847 static void
13848 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
13849 {
13850 elf32_arm_final_write_processing (abfd, linker);
13851 elf_vxworks_final_write_processing (abfd, linker);
13852 }
13853
13854 #undef elf32_bed
13855 #define elf32_bed elf32_arm_vxworks_bed
13856
13857 #undef bfd_elf32_bfd_link_hash_table_create
13858 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
13859 #undef elf_backend_add_symbol_hook
13860 #define elf_backend_add_symbol_hook elf_vxworks_add_symbol_hook
13861 #undef elf_backend_final_write_processing
13862 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
13863 #undef elf_backend_emit_relocs
13864 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
13865
13866 #undef elf_backend_may_use_rel_p
13867 #define elf_backend_may_use_rel_p 0
13868 #undef elf_backend_may_use_rela_p
13869 #define elf_backend_may_use_rela_p 1
13870 #undef elf_backend_default_use_rela_p
13871 #define elf_backend_default_use_rela_p 1
13872 #undef elf_backend_want_plt_sym
13873 #define elf_backend_want_plt_sym 1
13874 #undef ELF_MAXPAGESIZE
13875 #define ELF_MAXPAGESIZE 0x1000
13876
13877 #include "elf32-target.h"
13878
13879
13880 /* Symbian OS Targets. */
13881
13882 #undef TARGET_LITTLE_SYM
13883 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_symbian_vec
13884 #undef TARGET_LITTLE_NAME
13885 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
13886 #undef TARGET_BIG_SYM
13887 #define TARGET_BIG_SYM bfd_elf32_bigarm_symbian_vec
13888 #undef TARGET_BIG_NAME
13889 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
13890
13891 /* Like elf32_arm_link_hash_table_create -- but overrides
13892 appropriately for Symbian OS. */
13893
13894 static struct bfd_link_hash_table *
13895 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
13896 {
13897 struct bfd_link_hash_table *ret;
13898
13899 ret = elf32_arm_link_hash_table_create (abfd);
13900 if (ret)
13901 {
13902 struct elf32_arm_link_hash_table *htab
13903 = (struct elf32_arm_link_hash_table *)ret;
13904 /* There is no PLT header for Symbian OS. */
13905 htab->plt_header_size = 0;
13906 /* The PLT entries are each one instruction and one word. */
13907 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
13908 htab->symbian_p = 1;
13909 /* Symbian uses armv5t or above, so use_blx is always true. */
13910 htab->use_blx = 1;
13911 htab->root.is_relocatable_executable = 1;
13912 }
13913 return ret;
13914 }
13915
13916 static const struct bfd_elf_special_section
13917 elf32_arm_symbian_special_sections[] =
13918 {
13919 /* In a BPABI executable, the dynamic linking sections do not go in
13920 the loadable read-only segment. The post-linker may wish to
13921 refer to these sections, but they are not part of the final
13922 program image. */
13923 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
13924 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
13925 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
13926 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
13927 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
13928 /* These sections do not need to be writable as the SymbianOS
13929 postlinker will arrange things so that no dynamic relocation is
13930 required. */
13931 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
13932 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
13933 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
13934 { NULL, 0, 0, 0, 0 }
13935 };
13936
13937 static void
13938 elf32_arm_symbian_begin_write_processing (bfd *abfd,
13939 struct bfd_link_info *link_info)
13940 {
13941 /* BPABI objects are never loaded directly by an OS kernel; they are
13942 processed by a postlinker first, into an OS-specific format. If
13943 the D_PAGED bit is set on the file, BFD will align segments on
13944 page boundaries, so that an OS can directly map the file. With
13945 BPABI objects, that just results in wasted space. In addition,
13946 because we clear the D_PAGED bit, map_sections_to_segments will
13947 recognize that the program headers should not be mapped into any
13948 loadable segment. */
13949 abfd->flags &= ~D_PAGED;
13950 elf32_arm_begin_write_processing (abfd, link_info);
13951 }
13952
13953 static bfd_boolean
13954 elf32_arm_symbian_modify_segment_map (bfd *abfd,
13955 struct bfd_link_info *info)
13956 {
13957 struct elf_segment_map *m;
13958 asection *dynsec;
13959
13960 /* BPABI shared libraries and executables should have a PT_DYNAMIC
13961 segment. However, because the .dynamic section is not marked
13962 with SEC_LOAD, the generic ELF code will not create such a
13963 segment. */
13964 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
13965 if (dynsec)
13966 {
13967 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
13968 if (m->p_type == PT_DYNAMIC)
13969 break;
13970
13971 if (m == NULL)
13972 {
13973 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
13974 m->next = elf_tdata (abfd)->segment_map;
13975 elf_tdata (abfd)->segment_map = m;
13976 }
13977 }
13978
13979 /* Also call the generic arm routine. */
13980 return elf32_arm_modify_segment_map (abfd, info);
13981 }
13982
13983 /* Return address for Ith PLT stub in section PLT, for relocation REL
13984 or (bfd_vma) -1 if it should not be included. */
13985
13986 static bfd_vma
13987 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
13988 const arelent *rel ATTRIBUTE_UNUSED)
13989 {
13990 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
13991 }
13992
13993
13994 #undef elf32_bed
13995 #define elf32_bed elf32_arm_symbian_bed
13996
13997 /* The dynamic sections are not allocated on SymbianOS; the postlinker
13998 will process them and then discard them. */
13999 #undef ELF_DYNAMIC_SEC_FLAGS
14000 #define ELF_DYNAMIC_SEC_FLAGS \
14001 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
14002
14003 #undef elf_backend_add_symbol_hook
14004 #undef elf_backend_emit_relocs
14005
14006 #undef bfd_elf32_bfd_link_hash_table_create
14007 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
14008 #undef elf_backend_special_sections
14009 #define elf_backend_special_sections elf32_arm_symbian_special_sections
14010 #undef elf_backend_begin_write_processing
14011 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
14012 #undef elf_backend_final_write_processing
14013 #define elf_backend_final_write_processing elf32_arm_final_write_processing
14014
14015 #undef elf_backend_modify_segment_map
14016 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
14017
14018 /* There is no .got section for BPABI objects, and hence no header. */
14019 #undef elf_backend_got_header_size
14020 #define elf_backend_got_header_size 0
14021
14022 /* Similarly, there is no .got.plt section. */
14023 #undef elf_backend_want_got_plt
14024 #define elf_backend_want_got_plt 0
14025
14026 #undef elf_backend_plt_sym_val
14027 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
14028
14029 #undef elf_backend_may_use_rel_p
14030 #define elf_backend_may_use_rel_p 1
14031 #undef elf_backend_may_use_rela_p
14032 #define elf_backend_may_use_rela_p 0
14033 #undef elf_backend_default_use_rela_p
14034 #define elf_backend_default_use_rela_p 0
14035 #undef elf_backend_want_plt_sym
14036 #define elf_backend_want_plt_sym 0
14037 #undef ELF_MAXPAGESIZE
14038 #define ELF_MAXPAGESIZE 0x8000
14039
14040 #include "elf32-target.h"
This page took 0.413618 seconds and 5 git commands to generate.