* bfd-in.h (bfd_get_dynamic_symcount): Define.
[deliverable/binutils-gdb.git] / bfd / coff-sh.c
1 /* BFD back-end for Hitachi Super-H COFF binaries.
2 Copyright 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001
3 Free Software Foundation, Inc.
4 Contributed by Cygnus Support.
5 Written by Steve Chamberlain, <sac@cygnus.com>.
6 Relaxing code written by Ian Lance Taylor, <ian@cygnus.com>.
7
8 This file is part of BFD, the Binary File Descriptor library.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
23
24 #include "bfd.h"
25 #include "sysdep.h"
26 #include "libiberty.h"
27 #include "libbfd.h"
28 #include "bfdlink.h"
29 #include "coff/sh.h"
30 #include "coff/internal.h"
31
32 #ifdef COFF_WITH_PE
33 #include "coff/pe.h"
34
35 #ifndef COFF_IMAGE_WITH_PE
36 static boolean sh_align_load_span
37 PARAMS ((bfd *, asection *, bfd_byte *,
38 boolean (*) (bfd *, asection *, PTR, bfd_byte *, bfd_vma),
39 PTR, bfd_vma **, bfd_vma *, bfd_vma, bfd_vma, boolean *));
40
41 #define _bfd_sh_align_load_span sh_align_load_span
42 #endif
43 #endif
44
45 #include "libcoff.h"
46
47 /* Internal functions. */
48 static bfd_reloc_status_type sh_reloc
49 PARAMS ((bfd *, arelent *, asymbol *, PTR, asection *, bfd *, char **));
50 static long get_symbol_value PARAMS ((asymbol *));
51 static boolean sh_relax_section
52 PARAMS ((bfd *, asection *, struct bfd_link_info *, boolean *));
53 static boolean sh_relax_delete_bytes
54 PARAMS ((bfd *, asection *, bfd_vma, int));
55 #ifndef COFF_IMAGE_WITH_PE
56 static const struct sh_opcode *sh_insn_info PARAMS ((unsigned int));
57 #endif
58 static boolean sh_align_loads
59 PARAMS ((bfd *, asection *, struct internal_reloc *, bfd_byte *, boolean *));
60 static boolean sh_swap_insns
61 PARAMS ((bfd *, asection *, PTR, bfd_byte *, bfd_vma));
62 static boolean sh_relocate_section
63 PARAMS ((bfd *, struct bfd_link_info *, bfd *, asection *, bfd_byte *,
64 struct internal_reloc *, struct internal_syment *, asection **));
65 static bfd_byte *sh_coff_get_relocated_section_contents
66 PARAMS ((bfd *, struct bfd_link_info *, struct bfd_link_order *,
67 bfd_byte *, boolean, asymbol **));
68 static reloc_howto_type * sh_coff_reloc_type_lookup PARAMS ((bfd *, bfd_reloc_code_real_type));
69
70 #ifdef COFF_WITH_PE
71 /* Can't build import tables with 2**4 alignment. */
72 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER 2
73 #else
74 /* Default section alignment to 2**4. */
75 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER 4
76 #endif
77
78 #ifdef COFF_IMAGE_WITH_PE
79 /* Align PE executables. */
80 #define COFF_PAGE_SIZE 0x1000
81 #endif
82
83 /* Generate long file names. */
84 #define COFF_LONG_FILENAMES
85
86 #ifdef COFF_WITH_PE
87 static boolean in_reloc_p PARAMS ((bfd *, reloc_howto_type *));
88 /* Return true if this relocation should
89 appear in the output .reloc section. */
90 static boolean in_reloc_p (abfd, howto)
91 bfd * abfd ATTRIBUTE_UNUSED;
92 reloc_howto_type * howto;
93 {
94 return ! howto->pc_relative && howto->type != R_SH_IMAGEBASE;
95 }
96 #endif
97
98 /* The supported relocations. There are a lot of relocations defined
99 in coff/internal.h which we do not expect to ever see. */
100 static reloc_howto_type sh_coff_howtos[] =
101 {
102 EMPTY_HOWTO (0),
103 EMPTY_HOWTO (1),
104 #ifdef COFF_WITH_PE
105 /* Windows CE */
106 HOWTO (R_SH_IMM32CE, /* type */
107 0, /* rightshift */
108 2, /* size (0 = byte, 1 = short, 2 = long) */
109 32, /* bitsize */
110 false, /* pc_relative */
111 0, /* bitpos */
112 complain_overflow_bitfield, /* complain_on_overflow */
113 sh_reloc, /* special_function */
114 "r_imm32ce", /* name */
115 true, /* partial_inplace */
116 0xffffffff, /* src_mask */
117 0xffffffff, /* dst_mask */
118 false), /* pcrel_offset */
119 #else
120 EMPTY_HOWTO (2),
121 #endif
122 EMPTY_HOWTO (3), /* R_SH_PCREL8 */
123 EMPTY_HOWTO (4), /* R_SH_PCREL16 */
124 EMPTY_HOWTO (5), /* R_SH_HIGH8 */
125 EMPTY_HOWTO (6), /* R_SH_IMM24 */
126 EMPTY_HOWTO (7), /* R_SH_LOW16 */
127 EMPTY_HOWTO (8),
128 EMPTY_HOWTO (9), /* R_SH_PCDISP8BY4 */
129
130 HOWTO (R_SH_PCDISP8BY2, /* type */
131 1, /* rightshift */
132 1, /* size (0 = byte, 1 = short, 2 = long) */
133 8, /* bitsize */
134 true, /* pc_relative */
135 0, /* bitpos */
136 complain_overflow_signed, /* complain_on_overflow */
137 sh_reloc, /* special_function */
138 "r_pcdisp8by2", /* name */
139 true, /* partial_inplace */
140 0xff, /* src_mask */
141 0xff, /* dst_mask */
142 true), /* pcrel_offset */
143
144 EMPTY_HOWTO (11), /* R_SH_PCDISP8 */
145
146 HOWTO (R_SH_PCDISP, /* type */
147 1, /* rightshift */
148 1, /* size (0 = byte, 1 = short, 2 = long) */
149 12, /* bitsize */
150 true, /* pc_relative */
151 0, /* bitpos */
152 complain_overflow_signed, /* complain_on_overflow */
153 sh_reloc, /* special_function */
154 "r_pcdisp12by2", /* name */
155 true, /* partial_inplace */
156 0xfff, /* src_mask */
157 0xfff, /* dst_mask */
158 true), /* pcrel_offset */
159
160 EMPTY_HOWTO (13),
161
162 HOWTO (R_SH_IMM32, /* type */
163 0, /* rightshift */
164 2, /* size (0 = byte, 1 = short, 2 = long) */
165 32, /* bitsize */
166 false, /* pc_relative */
167 0, /* bitpos */
168 complain_overflow_bitfield, /* complain_on_overflow */
169 sh_reloc, /* special_function */
170 "r_imm32", /* name */
171 true, /* partial_inplace */
172 0xffffffff, /* src_mask */
173 0xffffffff, /* dst_mask */
174 false), /* pcrel_offset */
175
176 EMPTY_HOWTO (15),
177 #ifdef COFF_WITH_PE
178 HOWTO (R_SH_IMAGEBASE, /* type */
179 0, /* rightshift */
180 2, /* size (0 = byte, 1 = short, 2 = long) */
181 32, /* bitsize */
182 false, /* pc_relative */
183 0, /* bitpos */
184 complain_overflow_bitfield, /* complain_on_overflow */
185 sh_reloc, /* special_function */
186 "rva32", /* name */
187 true, /* partial_inplace */
188 0xffffffff, /* src_mask */
189 0xffffffff, /* dst_mask */
190 false), /* pcrel_offset */
191 #else
192 EMPTY_HOWTO (16), /* R_SH_IMM8 */
193 #endif
194 EMPTY_HOWTO (17), /* R_SH_IMM8BY2 */
195 EMPTY_HOWTO (18), /* R_SH_IMM8BY4 */
196 EMPTY_HOWTO (19), /* R_SH_IMM4 */
197 EMPTY_HOWTO (20), /* R_SH_IMM4BY2 */
198 EMPTY_HOWTO (21), /* R_SH_IMM4BY4 */
199
200 HOWTO (R_SH_PCRELIMM8BY2, /* type */
201 1, /* rightshift */
202 1, /* size (0 = byte, 1 = short, 2 = long) */
203 8, /* bitsize */
204 true, /* pc_relative */
205 0, /* bitpos */
206 complain_overflow_unsigned, /* complain_on_overflow */
207 sh_reloc, /* special_function */
208 "r_pcrelimm8by2", /* name */
209 true, /* partial_inplace */
210 0xff, /* src_mask */
211 0xff, /* dst_mask */
212 true), /* pcrel_offset */
213
214 HOWTO (R_SH_PCRELIMM8BY4, /* type */
215 2, /* rightshift */
216 1, /* size (0 = byte, 1 = short, 2 = long) */
217 8, /* bitsize */
218 true, /* pc_relative */
219 0, /* bitpos */
220 complain_overflow_unsigned, /* complain_on_overflow */
221 sh_reloc, /* special_function */
222 "r_pcrelimm8by4", /* name */
223 true, /* partial_inplace */
224 0xff, /* src_mask */
225 0xff, /* dst_mask */
226 true), /* pcrel_offset */
227
228 HOWTO (R_SH_IMM16, /* type */
229 0, /* rightshift */
230 1, /* size (0 = byte, 1 = short, 2 = long) */
231 16, /* bitsize */
232 false, /* pc_relative */
233 0, /* bitpos */
234 complain_overflow_bitfield, /* complain_on_overflow */
235 sh_reloc, /* special_function */
236 "r_imm16", /* name */
237 true, /* partial_inplace */
238 0xffff, /* src_mask */
239 0xffff, /* dst_mask */
240 false), /* pcrel_offset */
241
242 HOWTO (R_SH_SWITCH16, /* type */
243 0, /* rightshift */
244 1, /* size (0 = byte, 1 = short, 2 = long) */
245 16, /* bitsize */
246 false, /* pc_relative */
247 0, /* bitpos */
248 complain_overflow_bitfield, /* complain_on_overflow */
249 sh_reloc, /* special_function */
250 "r_switch16", /* name */
251 true, /* partial_inplace */
252 0xffff, /* src_mask */
253 0xffff, /* dst_mask */
254 false), /* pcrel_offset */
255
256 HOWTO (R_SH_SWITCH32, /* type */
257 0, /* rightshift */
258 2, /* size (0 = byte, 1 = short, 2 = long) */
259 32, /* bitsize */
260 false, /* pc_relative */
261 0, /* bitpos */
262 complain_overflow_bitfield, /* complain_on_overflow */
263 sh_reloc, /* special_function */
264 "r_switch32", /* name */
265 true, /* partial_inplace */
266 0xffffffff, /* src_mask */
267 0xffffffff, /* dst_mask */
268 false), /* pcrel_offset */
269
270 HOWTO (R_SH_USES, /* type */
271 0, /* rightshift */
272 1, /* size (0 = byte, 1 = short, 2 = long) */
273 16, /* bitsize */
274 false, /* pc_relative */
275 0, /* bitpos */
276 complain_overflow_bitfield, /* complain_on_overflow */
277 sh_reloc, /* special_function */
278 "r_uses", /* name */
279 true, /* partial_inplace */
280 0xffff, /* src_mask */
281 0xffff, /* dst_mask */
282 false), /* pcrel_offset */
283
284 HOWTO (R_SH_COUNT, /* type */
285 0, /* rightshift */
286 2, /* size (0 = byte, 1 = short, 2 = long) */
287 32, /* bitsize */
288 false, /* pc_relative */
289 0, /* bitpos */
290 complain_overflow_bitfield, /* complain_on_overflow */
291 sh_reloc, /* special_function */
292 "r_count", /* name */
293 true, /* partial_inplace */
294 0xffffffff, /* src_mask */
295 0xffffffff, /* dst_mask */
296 false), /* pcrel_offset */
297
298 HOWTO (R_SH_ALIGN, /* type */
299 0, /* rightshift */
300 2, /* size (0 = byte, 1 = short, 2 = long) */
301 32, /* bitsize */
302 false, /* pc_relative */
303 0, /* bitpos */
304 complain_overflow_bitfield, /* complain_on_overflow */
305 sh_reloc, /* special_function */
306 "r_align", /* name */
307 true, /* partial_inplace */
308 0xffffffff, /* src_mask */
309 0xffffffff, /* dst_mask */
310 false), /* pcrel_offset */
311
312 HOWTO (R_SH_CODE, /* type */
313 0, /* rightshift */
314 2, /* size (0 = byte, 1 = short, 2 = long) */
315 32, /* bitsize */
316 false, /* pc_relative */
317 0, /* bitpos */
318 complain_overflow_bitfield, /* complain_on_overflow */
319 sh_reloc, /* special_function */
320 "r_code", /* name */
321 true, /* partial_inplace */
322 0xffffffff, /* src_mask */
323 0xffffffff, /* dst_mask */
324 false), /* pcrel_offset */
325
326 HOWTO (R_SH_DATA, /* type */
327 0, /* rightshift */
328 2, /* size (0 = byte, 1 = short, 2 = long) */
329 32, /* bitsize */
330 false, /* pc_relative */
331 0, /* bitpos */
332 complain_overflow_bitfield, /* complain_on_overflow */
333 sh_reloc, /* special_function */
334 "r_data", /* name */
335 true, /* partial_inplace */
336 0xffffffff, /* src_mask */
337 0xffffffff, /* dst_mask */
338 false), /* pcrel_offset */
339
340 HOWTO (R_SH_LABEL, /* type */
341 0, /* rightshift */
342 2, /* size (0 = byte, 1 = short, 2 = long) */
343 32, /* bitsize */
344 false, /* pc_relative */
345 0, /* bitpos */
346 complain_overflow_bitfield, /* complain_on_overflow */
347 sh_reloc, /* special_function */
348 "r_label", /* name */
349 true, /* partial_inplace */
350 0xffffffff, /* src_mask */
351 0xffffffff, /* dst_mask */
352 false), /* pcrel_offset */
353
354 HOWTO (R_SH_SWITCH8, /* type */
355 0, /* rightshift */
356 0, /* size (0 = byte, 1 = short, 2 = long) */
357 8, /* bitsize */
358 false, /* pc_relative */
359 0, /* bitpos */
360 complain_overflow_bitfield, /* complain_on_overflow */
361 sh_reloc, /* special_function */
362 "r_switch8", /* name */
363 true, /* partial_inplace */
364 0xff, /* src_mask */
365 0xff, /* dst_mask */
366 false) /* pcrel_offset */
367 };
368
369 #define SH_COFF_HOWTO_COUNT (sizeof sh_coff_howtos / sizeof sh_coff_howtos[0])
370
371 /* Check for a bad magic number. */
372 #define BADMAG(x) SHBADMAG(x)
373
374 /* Customize coffcode.h (this is not currently used). */
375 #define SH 1
376
377 /* FIXME: This should not be set here. */
378 #define __A_MAGIC_SET__
379
380 #ifndef COFF_WITH_PE
381 /* Swap the r_offset field in and out. */
382 #define SWAP_IN_RELOC_OFFSET H_GET_32
383 #define SWAP_OUT_RELOC_OFFSET H_PUT_32
384
385 /* Swap out extra information in the reloc structure. */
386 #define SWAP_OUT_RELOC_EXTRA(abfd, src, dst) \
387 do \
388 { \
389 dst->r_stuff[0] = 'S'; \
390 dst->r_stuff[1] = 'C'; \
391 } \
392 while (0)
393 #endif
394
395 /* Get the value of a symbol, when performing a relocation. */
396
397 static long
398 get_symbol_value (symbol)
399 asymbol *symbol;
400 {
401 bfd_vma relocation;
402
403 if (bfd_is_com_section (symbol->section))
404 relocation = 0;
405 else
406 relocation = (symbol->value +
407 symbol->section->output_section->vma +
408 symbol->section->output_offset);
409
410 return relocation;
411 }
412
413 #ifdef COFF_WITH_PE
414 /* Convert an rtype to howto for the COFF backend linker.
415 Copied from coff-i386. */
416 #define coff_rtype_to_howto coff_sh_rtype_to_howto
417 static reloc_howto_type * coff_sh_rtype_to_howto PARAMS ((bfd *, asection *, struct internal_reloc *, struct coff_link_hash_entry *, struct internal_syment *, bfd_vma *));
418
419 static reloc_howto_type *
420 coff_sh_rtype_to_howto (abfd, sec, rel, h, sym, addendp)
421 bfd * abfd ATTRIBUTE_UNUSED;
422 asection * sec;
423 struct internal_reloc * rel;
424 struct coff_link_hash_entry * h;
425 struct internal_syment * sym;
426 bfd_vma * addendp;
427 {
428 reloc_howto_type * howto;
429
430 howto = sh_coff_howtos + rel->r_type;
431
432 *addendp = 0;
433
434 if (howto->pc_relative)
435 *addendp += sec->vma;
436
437 if (sym != NULL && sym->n_scnum == 0 && sym->n_value != 0)
438 {
439 /* This is a common symbol. The section contents include the
440 size (sym->n_value) as an addend. The relocate_section
441 function will be adding in the final value of the symbol. We
442 need to subtract out the current size in order to get the
443 correct result. */
444 BFD_ASSERT (h != NULL);
445 }
446
447 if (howto->pc_relative)
448 {
449 *addendp -= 4;
450
451 /* If the symbol is defined, then the generic code is going to
452 add back the symbol value in order to cancel out an
453 adjustment it made to the addend. However, we set the addend
454 to 0 at the start of this function. We need to adjust here,
455 to avoid the adjustment the generic code will make. FIXME:
456 This is getting a bit hackish. */
457 if (sym != NULL && sym->n_scnum != 0)
458 *addendp -= sym->n_value;
459 }
460
461 if (rel->r_type == R_SH_IMAGEBASE)
462 *addendp -= pe_data (sec->output_section->owner)->pe_opthdr.ImageBase;
463
464 return howto;
465 }
466
467 #endif /* COFF_WITH_PE */
468
469 /* This structure is used to map BFD reloc codes to SH PE relocs. */
470 struct shcoff_reloc_map
471 {
472 bfd_reloc_code_real_type bfd_reloc_val;
473 unsigned char shcoff_reloc_val;
474 };
475
476 #ifdef COFF_WITH_PE
477 /* An array mapping BFD reloc codes to SH PE relocs. */
478 static const struct shcoff_reloc_map sh_reloc_map[] =
479 {
480 { BFD_RELOC_32, R_SH_IMM32CE },
481 { BFD_RELOC_RVA, R_SH_IMAGEBASE },
482 { BFD_RELOC_CTOR, R_SH_IMM32CE },
483 };
484 #else
485 /* An array mapping BFD reloc codes to SH PE relocs. */
486 static const struct shcoff_reloc_map sh_reloc_map[] =
487 {
488 { BFD_RELOC_32, R_SH_IMM32 },
489 { BFD_RELOC_CTOR, R_SH_IMM32 },
490 };
491 #endif
492
493 /* Given a BFD reloc code, return the howto structure for the
494 corresponding SH PE reloc. */
495 #define coff_bfd_reloc_type_lookup sh_coff_reloc_type_lookup
496
497 static reloc_howto_type *
498 sh_coff_reloc_type_lookup (abfd, code)
499 bfd * abfd ATTRIBUTE_UNUSED;
500 bfd_reloc_code_real_type code;
501 {
502 unsigned int i;
503
504 for (i = ARRAY_SIZE (sh_reloc_map); i--;)
505 if (sh_reloc_map[i].bfd_reloc_val == code)
506 return &sh_coff_howtos[(int) sh_reloc_map[i].shcoff_reloc_val];
507
508 fprintf (stderr, "SH Error: unknown reloc type %d\n", code);
509 return NULL;
510 }
511
512 /* This macro is used in coffcode.h to get the howto corresponding to
513 an internal reloc. */
514
515 #define RTYPE2HOWTO(relent, internal) \
516 ((relent)->howto = \
517 ((internal)->r_type < SH_COFF_HOWTO_COUNT \
518 ? &sh_coff_howtos[(internal)->r_type] \
519 : (reloc_howto_type *) NULL))
520
521 /* This is the same as the macro in coffcode.h, except that it copies
522 r_offset into reloc_entry->addend for some relocs. */
523 #define CALC_ADDEND(abfd, ptr, reloc, cache_ptr) \
524 { \
525 coff_symbol_type *coffsym = (coff_symbol_type *) NULL; \
526 if (ptr && bfd_asymbol_bfd (ptr) != abfd) \
527 coffsym = (obj_symbols (abfd) \
528 + (cache_ptr->sym_ptr_ptr - symbols)); \
529 else if (ptr) \
530 coffsym = coff_symbol_from (abfd, ptr); \
531 if (coffsym != (coff_symbol_type *) NULL \
532 && coffsym->native->u.syment.n_scnum == 0) \
533 cache_ptr->addend = 0; \
534 else if (ptr && bfd_asymbol_bfd (ptr) == abfd \
535 && ptr->section != (asection *) NULL) \
536 cache_ptr->addend = - (ptr->section->vma + ptr->value); \
537 else \
538 cache_ptr->addend = 0; \
539 if ((reloc).r_type == R_SH_SWITCH8 \
540 || (reloc).r_type == R_SH_SWITCH16 \
541 || (reloc).r_type == R_SH_SWITCH32 \
542 || (reloc).r_type == R_SH_USES \
543 || (reloc).r_type == R_SH_COUNT \
544 || (reloc).r_type == R_SH_ALIGN) \
545 cache_ptr->addend = (reloc).r_offset; \
546 }
547
548 /* This is the howto function for the SH relocations. */
549
550 static bfd_reloc_status_type
551 sh_reloc (abfd, reloc_entry, symbol_in, data, input_section, output_bfd,
552 error_message)
553 bfd *abfd;
554 arelent *reloc_entry;
555 asymbol *symbol_in;
556 PTR data;
557 asection *input_section;
558 bfd *output_bfd;
559 char **error_message ATTRIBUTE_UNUSED;
560 {
561 unsigned long insn;
562 bfd_vma sym_value;
563 unsigned short r_type;
564 bfd_vma addr = reloc_entry->address;
565 bfd_byte *hit_data = addr + (bfd_byte *) data;
566
567 r_type = reloc_entry->howto->type;
568
569 if (output_bfd != NULL)
570 {
571 /* Partial linking--do nothing. */
572 reloc_entry->address += input_section->output_offset;
573 return bfd_reloc_ok;
574 }
575
576 /* Almost all relocs have to do with relaxing. If any work must be
577 done for them, it has been done in sh_relax_section. */
578 if (r_type != R_SH_IMM32
579 #ifdef COFF_WITH_PE
580 && r_type != R_SH_IMM32CE
581 && r_type != R_SH_IMAGEBASE
582 #endif
583 && (r_type != R_SH_PCDISP
584 || (symbol_in->flags & BSF_LOCAL) != 0))
585 return bfd_reloc_ok;
586
587 if (symbol_in != NULL
588 && bfd_is_und_section (symbol_in->section))
589 return bfd_reloc_undefined;
590
591 sym_value = get_symbol_value (symbol_in);
592
593 switch (r_type)
594 {
595 case R_SH_IMM32:
596 #ifdef COFF_WITH_PE
597 case R_SH_IMM32CE:
598 #endif
599 insn = bfd_get_32 (abfd, hit_data);
600 insn += sym_value + reloc_entry->addend;
601 bfd_put_32 (abfd, (bfd_vma) insn, hit_data);
602 break;
603 #ifdef COFF_WITH_PE
604 case R_SH_IMAGEBASE:
605 insn = bfd_get_32 (abfd, hit_data);
606 insn += sym_value + reloc_entry->addend;
607 insn -= pe_data (input_section->output_section->owner)->pe_opthdr.ImageBase;
608 bfd_put_32 (abfd, (bfd_vma) insn, hit_data);
609 break;
610 #endif
611 case R_SH_PCDISP:
612 insn = bfd_get_16 (abfd, hit_data);
613 sym_value += reloc_entry->addend;
614 sym_value -= (input_section->output_section->vma
615 + input_section->output_offset
616 + addr
617 + 4);
618 sym_value += (insn & 0xfff) << 1;
619 if (insn & 0x800)
620 sym_value -= 0x1000;
621 insn = (insn & 0xf000) | (sym_value & 0xfff);
622 bfd_put_16 (abfd, (bfd_vma) insn, hit_data);
623 if (sym_value < (bfd_vma) -0x1000 || sym_value >= 0x1000)
624 return bfd_reloc_overflow;
625 break;
626 default:
627 abort ();
628 break;
629 }
630
631 return bfd_reloc_ok;
632 }
633
634 #define coff_bfd_merge_private_bfd_data _bfd_generic_verify_endian_match
635
636 /* We can do relaxing. */
637 #define coff_bfd_relax_section sh_relax_section
638
639 /* We use the special COFF backend linker. */
640 #define coff_relocate_section sh_relocate_section
641
642 /* When relaxing, we need to use special code to get the relocated
643 section contents. */
644 #define coff_bfd_get_relocated_section_contents \
645 sh_coff_get_relocated_section_contents
646
647 #include "coffcode.h"
648 \f
649 /* This function handles relaxing on the SH.
650
651 Function calls on the SH look like this:
652
653 movl L1,r0
654 ...
655 jsr @r0
656 ...
657 L1:
658 .long function
659
660 The compiler and assembler will cooperate to create R_SH_USES
661 relocs on the jsr instructions. The r_offset field of the
662 R_SH_USES reloc is the PC relative offset to the instruction which
663 loads the register (the r_offset field is computed as though it
664 were a jump instruction, so the offset value is actually from four
665 bytes past the instruction). The linker can use this reloc to
666 determine just which function is being called, and thus decide
667 whether it is possible to replace the jsr with a bsr.
668
669 If multiple function calls are all based on a single register load
670 (i.e., the same function is called multiple times), the compiler
671 guarantees that each function call will have an R_SH_USES reloc.
672 Therefore, if the linker is able to convert each R_SH_USES reloc
673 which refers to that address, it can safely eliminate the register
674 load.
675
676 When the assembler creates an R_SH_USES reloc, it examines it to
677 determine which address is being loaded (L1 in the above example).
678 It then counts the number of references to that address, and
679 creates an R_SH_COUNT reloc at that address. The r_offset field of
680 the R_SH_COUNT reloc will be the number of references. If the
681 linker is able to eliminate a register load, it can use the
682 R_SH_COUNT reloc to see whether it can also eliminate the function
683 address.
684
685 SH relaxing also handles another, unrelated, matter. On the SH, if
686 a load or store instruction is not aligned on a four byte boundary,
687 the memory cycle interferes with the 32 bit instruction fetch,
688 causing a one cycle bubble in the pipeline. Therefore, we try to
689 align load and store instructions on four byte boundaries if we
690 can, by swapping them with one of the adjacent instructions. */
691
692 static boolean
693 sh_relax_section (abfd, sec, link_info, again)
694 bfd *abfd;
695 asection *sec;
696 struct bfd_link_info *link_info;
697 boolean *again;
698 {
699 struct internal_reloc *internal_relocs;
700 struct internal_reloc *free_relocs = NULL;
701 boolean have_code;
702 struct internal_reloc *irel, *irelend;
703 bfd_byte *contents = NULL;
704 bfd_byte *free_contents = NULL;
705
706 *again = false;
707
708 if (link_info->relocateable
709 || (sec->flags & SEC_RELOC) == 0
710 || sec->reloc_count == 0)
711 return true;
712
713 /* If this is the first time we have been called for this section,
714 initialize the cooked size. */
715 if (sec->_cooked_size == 0)
716 sec->_cooked_size = sec->_raw_size;
717
718 internal_relocs = (_bfd_coff_read_internal_relocs
719 (abfd, sec, link_info->keep_memory,
720 (bfd_byte *) NULL, false,
721 (struct internal_reloc *) NULL));
722 if (internal_relocs == NULL)
723 goto error_return;
724 if (! link_info->keep_memory)
725 free_relocs = internal_relocs;
726
727 have_code = false;
728
729 irelend = internal_relocs + sec->reloc_count;
730 for (irel = internal_relocs; irel < irelend; irel++)
731 {
732 bfd_vma laddr, paddr, symval;
733 unsigned short insn;
734 struct internal_reloc *irelfn, *irelscan, *irelcount;
735 struct internal_syment sym;
736 bfd_signed_vma foff;
737
738 if (irel->r_type == R_SH_CODE)
739 have_code = true;
740
741 if (irel->r_type != R_SH_USES)
742 continue;
743
744 /* Get the section contents. */
745 if (contents == NULL)
746 {
747 if (coff_section_data (abfd, sec) != NULL
748 && coff_section_data (abfd, sec)->contents != NULL)
749 contents = coff_section_data (abfd, sec)->contents;
750 else
751 {
752 contents = (bfd_byte *) bfd_malloc (sec->_raw_size);
753 if (contents == NULL)
754 goto error_return;
755 free_contents = contents;
756
757 if (! bfd_get_section_contents (abfd, sec, contents,
758 (file_ptr) 0, sec->_raw_size))
759 goto error_return;
760 }
761 }
762
763 /* The r_offset field of the R_SH_USES reloc will point us to
764 the register load. The 4 is because the r_offset field is
765 computed as though it were a jump offset, which are based
766 from 4 bytes after the jump instruction. */
767 laddr = irel->r_vaddr - sec->vma + 4;
768 /* Careful to sign extend the 32-bit offset. */
769 laddr += ((irel->r_offset & 0xffffffff) ^ 0x80000000) - 0x80000000;
770 if (laddr >= sec->_raw_size)
771 {
772 (*_bfd_error_handler) ("%s: 0x%lx: warning: bad R_SH_USES offset",
773 bfd_archive_filename (abfd),
774 (unsigned long) irel->r_vaddr);
775 continue;
776 }
777 insn = bfd_get_16 (abfd, contents + laddr);
778
779 /* If the instruction is not mov.l NN,rN, we don't know what to do. */
780 if ((insn & 0xf000) != 0xd000)
781 {
782 ((*_bfd_error_handler)
783 ("%s: 0x%lx: warning: R_SH_USES points to unrecognized insn 0x%x",
784 bfd_archive_filename (abfd), (unsigned long) irel->r_vaddr, insn));
785 continue;
786 }
787
788 /* Get the address from which the register is being loaded. The
789 displacement in the mov.l instruction is quadrupled. It is a
790 displacement from four bytes after the movl instruction, but,
791 before adding in the PC address, two least significant bits
792 of the PC are cleared. We assume that the section is aligned
793 on a four byte boundary. */
794 paddr = insn & 0xff;
795 paddr *= 4;
796 paddr += (laddr + 4) &~ (bfd_vma) 3;
797 if (paddr >= sec->_raw_size)
798 {
799 ((*_bfd_error_handler)
800 ("%s: 0x%lx: warning: bad R_SH_USES load offset",
801 bfd_archive_filename (abfd), (unsigned long) irel->r_vaddr));
802 continue;
803 }
804
805 /* Get the reloc for the address from which the register is
806 being loaded. This reloc will tell us which function is
807 actually being called. */
808 paddr += sec->vma;
809 for (irelfn = internal_relocs; irelfn < irelend; irelfn++)
810 if (irelfn->r_vaddr == paddr
811 #ifdef COFF_WITH_PE
812 && (irelfn->r_type == R_SH_IMM32
813 || irelfn->r_type == R_SH_IMM32CE
814 || irelfn->r_type == R_SH_IMAGEBASE))
815
816 #else
817 && irelfn->r_type == R_SH_IMM32)
818 #endif
819 break;
820 if (irelfn >= irelend)
821 {
822 ((*_bfd_error_handler)
823 ("%s: 0x%lx: warning: could not find expected reloc",
824 bfd_archive_filename (abfd), (unsigned long) paddr));
825 continue;
826 }
827
828 /* Get the value of the symbol referred to by the reloc. */
829 if (! _bfd_coff_get_external_symbols (abfd))
830 goto error_return;
831 bfd_coff_swap_sym_in (abfd,
832 ((bfd_byte *) obj_coff_external_syms (abfd)
833 + (irelfn->r_symndx
834 * bfd_coff_symesz (abfd))),
835 &sym);
836 if (sym.n_scnum != 0 && sym.n_scnum != sec->target_index)
837 {
838 ((*_bfd_error_handler)
839 ("%s: 0x%lx: warning: symbol in unexpected section",
840 bfd_archive_filename (abfd), (unsigned long) paddr));
841 continue;
842 }
843
844 if (sym.n_sclass != C_EXT)
845 {
846 symval = (sym.n_value
847 - sec->vma
848 + sec->output_section->vma
849 + sec->output_offset);
850 }
851 else
852 {
853 struct coff_link_hash_entry *h;
854
855 h = obj_coff_sym_hashes (abfd)[irelfn->r_symndx];
856 BFD_ASSERT (h != NULL);
857 if (h->root.type != bfd_link_hash_defined
858 && h->root.type != bfd_link_hash_defweak)
859 {
860 /* This appears to be a reference to an undefined
861 symbol. Just ignore it--it will be caught by the
862 regular reloc processing. */
863 continue;
864 }
865
866 symval = (h->root.u.def.value
867 + h->root.u.def.section->output_section->vma
868 + h->root.u.def.section->output_offset);
869 }
870
871 symval += bfd_get_32 (abfd, contents + paddr - sec->vma);
872
873 /* See if this function call can be shortened. */
874 foff = (symval
875 - (irel->r_vaddr
876 - sec->vma
877 + sec->output_section->vma
878 + sec->output_offset
879 + 4));
880 if (foff < -0x1000 || foff >= 0x1000)
881 {
882 /* After all that work, we can't shorten this function call. */
883 continue;
884 }
885
886 /* Shorten the function call. */
887
888 /* For simplicity of coding, we are going to modify the section
889 contents, the section relocs, and the BFD symbol table. We
890 must tell the rest of the code not to free up this
891 information. It would be possible to instead create a table
892 of changes which have to be made, as is done in coff-mips.c;
893 that would be more work, but would require less memory when
894 the linker is run. */
895
896 if (coff_section_data (abfd, sec) == NULL)
897 {
898 bfd_size_type amt = sizeof (struct coff_section_tdata);
899 sec->used_by_bfd = (PTR) bfd_zalloc (abfd, amt);
900 if (sec->used_by_bfd == NULL)
901 goto error_return;
902 }
903
904 coff_section_data (abfd, sec)->relocs = internal_relocs;
905 coff_section_data (abfd, sec)->keep_relocs = true;
906 free_relocs = NULL;
907
908 coff_section_data (abfd, sec)->contents = contents;
909 coff_section_data (abfd, sec)->keep_contents = true;
910 free_contents = NULL;
911
912 obj_coff_keep_syms (abfd) = true;
913
914 /* Replace the jsr with a bsr. */
915
916 /* Change the R_SH_USES reloc into an R_SH_PCDISP reloc, and
917 replace the jsr with a bsr. */
918 irel->r_type = R_SH_PCDISP;
919 irel->r_symndx = irelfn->r_symndx;
920 if (sym.n_sclass != C_EXT)
921 {
922 /* If this needs to be changed because of future relaxing,
923 it will be handled here like other internal PCDISP
924 relocs. */
925 bfd_put_16 (abfd,
926 (bfd_vma) 0xb000 | ((foff >> 1) & 0xfff),
927 contents + irel->r_vaddr - sec->vma);
928 }
929 else
930 {
931 /* We can't fully resolve this yet, because the external
932 symbol value may be changed by future relaxing. We let
933 the final link phase handle it. */
934 bfd_put_16 (abfd, (bfd_vma) 0xb000,
935 contents + irel->r_vaddr - sec->vma);
936 }
937
938 /* See if there is another R_SH_USES reloc referring to the same
939 register load. */
940 for (irelscan = internal_relocs; irelscan < irelend; irelscan++)
941 if (irelscan->r_type == R_SH_USES
942 && laddr == irelscan->r_vaddr - sec->vma + 4 + irelscan->r_offset)
943 break;
944 if (irelscan < irelend)
945 {
946 /* Some other function call depends upon this register load,
947 and we have not yet converted that function call.
948 Indeed, we may never be able to convert it. There is
949 nothing else we can do at this point. */
950 continue;
951 }
952
953 /* Look for a R_SH_COUNT reloc on the location where the
954 function address is stored. Do this before deleting any
955 bytes, to avoid confusion about the address. */
956 for (irelcount = internal_relocs; irelcount < irelend; irelcount++)
957 if (irelcount->r_vaddr == paddr
958 && irelcount->r_type == R_SH_COUNT)
959 break;
960
961 /* Delete the register load. */
962 if (! sh_relax_delete_bytes (abfd, sec, laddr, 2))
963 goto error_return;
964
965 /* That will change things, so, just in case it permits some
966 other function call to come within range, we should relax
967 again. Note that this is not required, and it may be slow. */
968 *again = true;
969
970 /* Now check whether we got a COUNT reloc. */
971 if (irelcount >= irelend)
972 {
973 ((*_bfd_error_handler)
974 ("%s: 0x%lx: warning: could not find expected COUNT reloc",
975 bfd_archive_filename (abfd), (unsigned long) paddr));
976 continue;
977 }
978
979 /* The number of uses is stored in the r_offset field. We've
980 just deleted one. */
981 if (irelcount->r_offset == 0)
982 {
983 ((*_bfd_error_handler) ("%s: 0x%lx: warning: bad count",
984 bfd_archive_filename (abfd),
985 (unsigned long) paddr));
986 continue;
987 }
988
989 --irelcount->r_offset;
990
991 /* If there are no more uses, we can delete the address. Reload
992 the address from irelfn, in case it was changed by the
993 previous call to sh_relax_delete_bytes. */
994 if (irelcount->r_offset == 0)
995 {
996 if (! sh_relax_delete_bytes (abfd, sec,
997 irelfn->r_vaddr - sec->vma, 4))
998 goto error_return;
999 }
1000
1001 /* We've done all we can with that function call. */
1002 }
1003
1004 /* Look for load and store instructions that we can align on four
1005 byte boundaries. */
1006 if (have_code)
1007 {
1008 boolean swapped;
1009
1010 /* Get the section contents. */
1011 if (contents == NULL)
1012 {
1013 if (coff_section_data (abfd, sec) != NULL
1014 && coff_section_data (abfd, sec)->contents != NULL)
1015 contents = coff_section_data (abfd, sec)->contents;
1016 else
1017 {
1018 contents = (bfd_byte *) bfd_malloc (sec->_raw_size);
1019 if (contents == NULL)
1020 goto error_return;
1021 free_contents = contents;
1022
1023 if (! bfd_get_section_contents (abfd, sec, contents,
1024 (file_ptr) 0, sec->_raw_size))
1025 goto error_return;
1026 }
1027 }
1028
1029 if (! sh_align_loads (abfd, sec, internal_relocs, contents, &swapped))
1030 goto error_return;
1031
1032 if (swapped)
1033 {
1034 if (coff_section_data (abfd, sec) == NULL)
1035 {
1036 bfd_size_type amt = sizeof (struct coff_section_tdata);
1037 sec->used_by_bfd = (PTR) bfd_zalloc (abfd, amt);
1038 if (sec->used_by_bfd == NULL)
1039 goto error_return;
1040 }
1041
1042 coff_section_data (abfd, sec)->relocs = internal_relocs;
1043 coff_section_data (abfd, sec)->keep_relocs = true;
1044 free_relocs = NULL;
1045
1046 coff_section_data (abfd, sec)->contents = contents;
1047 coff_section_data (abfd, sec)->keep_contents = true;
1048 free_contents = NULL;
1049
1050 obj_coff_keep_syms (abfd) = true;
1051 }
1052 }
1053
1054 if (free_relocs != NULL)
1055 {
1056 free (free_relocs);
1057 free_relocs = NULL;
1058 }
1059
1060 if (free_contents != NULL)
1061 {
1062 if (! link_info->keep_memory)
1063 free (free_contents);
1064 else
1065 {
1066 /* Cache the section contents for coff_link_input_bfd. */
1067 if (coff_section_data (abfd, sec) == NULL)
1068 {
1069 bfd_size_type amt = sizeof (struct coff_section_tdata);
1070 sec->used_by_bfd = (PTR) bfd_zalloc (abfd, amt);
1071 if (sec->used_by_bfd == NULL)
1072 goto error_return;
1073 coff_section_data (abfd, sec)->relocs = NULL;
1074 }
1075 coff_section_data (abfd, sec)->contents = contents;
1076 }
1077 }
1078
1079 return true;
1080
1081 error_return:
1082 if (free_relocs != NULL)
1083 free (free_relocs);
1084 if (free_contents != NULL)
1085 free (free_contents);
1086 return false;
1087 }
1088
1089 /* Delete some bytes from a section while relaxing. */
1090
1091 static boolean
1092 sh_relax_delete_bytes (abfd, sec, addr, count)
1093 bfd *abfd;
1094 asection *sec;
1095 bfd_vma addr;
1096 int count;
1097 {
1098 bfd_byte *contents;
1099 struct internal_reloc *irel, *irelend;
1100 struct internal_reloc *irelalign;
1101 bfd_vma toaddr;
1102 bfd_byte *esym, *esymend;
1103 bfd_size_type symesz;
1104 struct coff_link_hash_entry **sym_hash;
1105 asection *o;
1106
1107 contents = coff_section_data (abfd, sec)->contents;
1108
1109 /* The deletion must stop at the next ALIGN reloc for an aligment
1110 power larger than the number of bytes we are deleting. */
1111
1112 irelalign = NULL;
1113 toaddr = sec->_cooked_size;
1114
1115 irel = coff_section_data (abfd, sec)->relocs;
1116 irelend = irel + sec->reloc_count;
1117 for (; irel < irelend; irel++)
1118 {
1119 if (irel->r_type == R_SH_ALIGN
1120 && irel->r_vaddr - sec->vma > addr
1121 && count < (1 << irel->r_offset))
1122 {
1123 irelalign = irel;
1124 toaddr = irel->r_vaddr - sec->vma;
1125 break;
1126 }
1127 }
1128
1129 /* Actually delete the bytes. */
1130 memmove (contents + addr, contents + addr + count,
1131 (size_t) (toaddr - addr - count));
1132 if (irelalign == NULL)
1133 sec->_cooked_size -= count;
1134 else
1135 {
1136 int i;
1137
1138 #define NOP_OPCODE (0x0009)
1139
1140 BFD_ASSERT ((count & 1) == 0);
1141 for (i = 0; i < count; i += 2)
1142 bfd_put_16 (abfd, (bfd_vma) NOP_OPCODE, contents + toaddr - count + i);
1143 }
1144
1145 /* Adjust all the relocs. */
1146 for (irel = coff_section_data (abfd, sec)->relocs; irel < irelend; irel++)
1147 {
1148 bfd_vma nraddr, stop;
1149 bfd_vma start = 0;
1150 int insn = 0;
1151 struct internal_syment sym;
1152 int off, adjust, oinsn;
1153 bfd_signed_vma voff = 0;
1154 boolean overflow;
1155
1156 /* Get the new reloc address. */
1157 nraddr = irel->r_vaddr - sec->vma;
1158 if ((irel->r_vaddr - sec->vma > addr
1159 && irel->r_vaddr - sec->vma < toaddr)
1160 || (irel->r_type == R_SH_ALIGN
1161 && irel->r_vaddr - sec->vma == toaddr))
1162 nraddr -= count;
1163
1164 /* See if this reloc was for the bytes we have deleted, in which
1165 case we no longer care about it. Don't delete relocs which
1166 represent addresses, though. */
1167 if (irel->r_vaddr - sec->vma >= addr
1168 && irel->r_vaddr - sec->vma < addr + count
1169 && irel->r_type != R_SH_ALIGN
1170 && irel->r_type != R_SH_CODE
1171 && irel->r_type != R_SH_DATA
1172 && irel->r_type != R_SH_LABEL)
1173 irel->r_type = R_SH_UNUSED;
1174
1175 /* If this is a PC relative reloc, see if the range it covers
1176 includes the bytes we have deleted. */
1177 switch (irel->r_type)
1178 {
1179 default:
1180 break;
1181
1182 case R_SH_PCDISP8BY2:
1183 case R_SH_PCDISP:
1184 case R_SH_PCRELIMM8BY2:
1185 case R_SH_PCRELIMM8BY4:
1186 start = irel->r_vaddr - sec->vma;
1187 insn = bfd_get_16 (abfd, contents + nraddr);
1188 break;
1189 }
1190
1191 switch (irel->r_type)
1192 {
1193 default:
1194 start = stop = addr;
1195 break;
1196
1197 case R_SH_IMM32:
1198 #ifdef COFF_WITH_PE
1199 case R_SH_IMM32CE:
1200 case R_SH_IMAGEBASE:
1201 #endif
1202 /* If this reloc is against a symbol defined in this
1203 section, and the symbol will not be adjusted below, we
1204 must check the addend to see it will put the value in
1205 range to be adjusted, and hence must be changed. */
1206 bfd_coff_swap_sym_in (abfd,
1207 ((bfd_byte *) obj_coff_external_syms (abfd)
1208 + (irel->r_symndx
1209 * bfd_coff_symesz (abfd))),
1210 &sym);
1211 if (sym.n_sclass != C_EXT
1212 && sym.n_scnum == sec->target_index
1213 && ((bfd_vma) sym.n_value <= addr
1214 || (bfd_vma) sym.n_value >= toaddr))
1215 {
1216 bfd_vma val;
1217
1218 val = bfd_get_32 (abfd, contents + nraddr);
1219 val += sym.n_value;
1220 if (val > addr && val < toaddr)
1221 bfd_put_32 (abfd, val - count, contents + nraddr);
1222 }
1223 start = stop = addr;
1224 break;
1225
1226 case R_SH_PCDISP8BY2:
1227 off = insn & 0xff;
1228 if (off & 0x80)
1229 off -= 0x100;
1230 stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1231 break;
1232
1233 case R_SH_PCDISP:
1234 bfd_coff_swap_sym_in (abfd,
1235 ((bfd_byte *) obj_coff_external_syms (abfd)
1236 + (irel->r_symndx
1237 * bfd_coff_symesz (abfd))),
1238 &sym);
1239 if (sym.n_sclass == C_EXT)
1240 start = stop = addr;
1241 else
1242 {
1243 off = insn & 0xfff;
1244 if (off & 0x800)
1245 off -= 0x1000;
1246 stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1247 }
1248 break;
1249
1250 case R_SH_PCRELIMM8BY2:
1251 off = insn & 0xff;
1252 stop = start + 4 + off * 2;
1253 break;
1254
1255 case R_SH_PCRELIMM8BY4:
1256 off = insn & 0xff;
1257 stop = (start &~ (bfd_vma) 3) + 4 + off * 4;
1258 break;
1259
1260 case R_SH_SWITCH8:
1261 case R_SH_SWITCH16:
1262 case R_SH_SWITCH32:
1263 /* These relocs types represent
1264 .word L2-L1
1265 The r_offset field holds the difference between the reloc
1266 address and L1. That is the start of the reloc, and
1267 adding in the contents gives us the top. We must adjust
1268 both the r_offset field and the section contents. */
1269
1270 start = irel->r_vaddr - sec->vma;
1271 stop = (bfd_vma) ((bfd_signed_vma) start - (long) irel->r_offset);
1272
1273 if (start > addr
1274 && start < toaddr
1275 && (stop <= addr || stop >= toaddr))
1276 irel->r_offset += count;
1277 else if (stop > addr
1278 && stop < toaddr
1279 && (start <= addr || start >= toaddr))
1280 irel->r_offset -= count;
1281
1282 start = stop;
1283
1284 if (irel->r_type == R_SH_SWITCH16)
1285 voff = bfd_get_signed_16 (abfd, contents + nraddr);
1286 else if (irel->r_type == R_SH_SWITCH8)
1287 voff = bfd_get_8 (abfd, contents + nraddr);
1288 else
1289 voff = bfd_get_signed_32 (abfd, contents + nraddr);
1290 stop = (bfd_vma) ((bfd_signed_vma) start + voff);
1291
1292 break;
1293
1294 case R_SH_USES:
1295 start = irel->r_vaddr - sec->vma;
1296 stop = (bfd_vma) ((bfd_signed_vma) start
1297 + (long) irel->r_offset
1298 + 4);
1299 break;
1300 }
1301
1302 if (start > addr
1303 && start < toaddr
1304 && (stop <= addr || stop >= toaddr))
1305 adjust = count;
1306 else if (stop > addr
1307 && stop < toaddr
1308 && (start <= addr || start >= toaddr))
1309 adjust = - count;
1310 else
1311 adjust = 0;
1312
1313 if (adjust != 0)
1314 {
1315 oinsn = insn;
1316 overflow = false;
1317 switch (irel->r_type)
1318 {
1319 default:
1320 abort ();
1321 break;
1322
1323 case R_SH_PCDISP8BY2:
1324 case R_SH_PCRELIMM8BY2:
1325 insn += adjust / 2;
1326 if ((oinsn & 0xff00) != (insn & 0xff00))
1327 overflow = true;
1328 bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1329 break;
1330
1331 case R_SH_PCDISP:
1332 insn += adjust / 2;
1333 if ((oinsn & 0xf000) != (insn & 0xf000))
1334 overflow = true;
1335 bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1336 break;
1337
1338 case R_SH_PCRELIMM8BY4:
1339 BFD_ASSERT (adjust == count || count >= 4);
1340 if (count >= 4)
1341 insn += adjust / 4;
1342 else
1343 {
1344 if ((irel->r_vaddr & 3) == 0)
1345 ++insn;
1346 }
1347 if ((oinsn & 0xff00) != (insn & 0xff00))
1348 overflow = true;
1349 bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1350 break;
1351
1352 case R_SH_SWITCH8:
1353 voff += adjust;
1354 if (voff < 0 || voff >= 0xff)
1355 overflow = true;
1356 bfd_put_8 (abfd, (bfd_vma) voff, contents + nraddr);
1357 break;
1358
1359 case R_SH_SWITCH16:
1360 voff += adjust;
1361 if (voff < - 0x8000 || voff >= 0x8000)
1362 overflow = true;
1363 bfd_put_signed_16 (abfd, (bfd_vma) voff, contents + nraddr);
1364 break;
1365
1366 case R_SH_SWITCH32:
1367 voff += adjust;
1368 bfd_put_signed_32 (abfd, (bfd_vma) voff, contents + nraddr);
1369 break;
1370
1371 case R_SH_USES:
1372 irel->r_offset += adjust;
1373 break;
1374 }
1375
1376 if (overflow)
1377 {
1378 ((*_bfd_error_handler)
1379 ("%s: 0x%lx: fatal: reloc overflow while relaxing",
1380 bfd_archive_filename (abfd), (unsigned long) irel->r_vaddr));
1381 bfd_set_error (bfd_error_bad_value);
1382 return false;
1383 }
1384 }
1385
1386 irel->r_vaddr = nraddr + sec->vma;
1387 }
1388
1389 /* Look through all the other sections. If there contain any IMM32
1390 relocs against internal symbols which we are not going to adjust
1391 below, we may need to adjust the addends. */
1392 for (o = abfd->sections; o != NULL; o = o->next)
1393 {
1394 struct internal_reloc *internal_relocs;
1395 struct internal_reloc *irelscan, *irelscanend;
1396 bfd_byte *ocontents;
1397
1398 if (o == sec
1399 || (o->flags & SEC_RELOC) == 0
1400 || o->reloc_count == 0)
1401 continue;
1402
1403 /* We always cache the relocs. Perhaps, if info->keep_memory is
1404 false, we should free them, if we are permitted to, when we
1405 leave sh_coff_relax_section. */
1406 internal_relocs = (_bfd_coff_read_internal_relocs
1407 (abfd, o, true, (bfd_byte *) NULL, false,
1408 (struct internal_reloc *) NULL));
1409 if (internal_relocs == NULL)
1410 return false;
1411
1412 ocontents = NULL;
1413 irelscanend = internal_relocs + o->reloc_count;
1414 for (irelscan = internal_relocs; irelscan < irelscanend; irelscan++)
1415 {
1416 struct internal_syment sym;
1417
1418 #ifdef COFF_WITH_PE
1419 if (irelscan->r_type != R_SH_IMM32
1420 && irelscan->r_type != R_SH_IMAGEBASE
1421 && irelscan->r_type != R_SH_IMM32CE)
1422 #else
1423 if (irelscan->r_type != R_SH_IMM32)
1424 #endif
1425 continue;
1426
1427 bfd_coff_swap_sym_in (abfd,
1428 ((bfd_byte *) obj_coff_external_syms (abfd)
1429 + (irelscan->r_symndx
1430 * bfd_coff_symesz (abfd))),
1431 &sym);
1432 if (sym.n_sclass != C_EXT
1433 && sym.n_scnum == sec->target_index
1434 && ((bfd_vma) sym.n_value <= addr
1435 || (bfd_vma) sym.n_value >= toaddr))
1436 {
1437 bfd_vma val;
1438
1439 if (ocontents == NULL)
1440 {
1441 if (coff_section_data (abfd, o)->contents != NULL)
1442 ocontents = coff_section_data (abfd, o)->contents;
1443 else
1444 {
1445 /* We always cache the section contents.
1446 Perhaps, if info->keep_memory is false, we
1447 should free them, if we are permitted to,
1448 when we leave sh_coff_relax_section. */
1449 ocontents = (bfd_byte *) bfd_malloc (o->_raw_size);
1450 if (ocontents == NULL)
1451 return false;
1452 if (! bfd_get_section_contents (abfd, o, ocontents,
1453 (file_ptr) 0,
1454 o->_raw_size))
1455 return false;
1456 coff_section_data (abfd, o)->contents = ocontents;
1457 }
1458 }
1459
1460 val = bfd_get_32 (abfd, ocontents + irelscan->r_vaddr - o->vma);
1461 val += sym.n_value;
1462 if (val > addr && val < toaddr)
1463 bfd_put_32 (abfd, val - count,
1464 ocontents + irelscan->r_vaddr - o->vma);
1465
1466 coff_section_data (abfd, o)->keep_contents = true;
1467 }
1468 }
1469 }
1470
1471 /* Adjusting the internal symbols will not work if something has
1472 already retrieved the generic symbols. It would be possible to
1473 make this work by adjusting the generic symbols at the same time.
1474 However, this case should not arise in normal usage. */
1475 if (obj_symbols (abfd) != NULL
1476 || obj_raw_syments (abfd) != NULL)
1477 {
1478 ((*_bfd_error_handler)
1479 ("%s: fatal: generic symbols retrieved before relaxing",
1480 bfd_archive_filename (abfd)));
1481 bfd_set_error (bfd_error_invalid_operation);
1482 return false;
1483 }
1484
1485 /* Adjust all the symbols. */
1486 sym_hash = obj_coff_sym_hashes (abfd);
1487 symesz = bfd_coff_symesz (abfd);
1488 esym = (bfd_byte *) obj_coff_external_syms (abfd);
1489 esymend = esym + obj_raw_syment_count (abfd) * symesz;
1490 while (esym < esymend)
1491 {
1492 struct internal_syment isym;
1493
1494 bfd_coff_swap_sym_in (abfd, (PTR) esym, (PTR) &isym);
1495
1496 if (isym.n_scnum == sec->target_index
1497 && (bfd_vma) isym.n_value > addr
1498 && (bfd_vma) isym.n_value < toaddr)
1499 {
1500 isym.n_value -= count;
1501
1502 bfd_coff_swap_sym_out (abfd, (PTR) &isym, (PTR) esym);
1503
1504 if (*sym_hash != NULL)
1505 {
1506 BFD_ASSERT ((*sym_hash)->root.type == bfd_link_hash_defined
1507 || (*sym_hash)->root.type == bfd_link_hash_defweak);
1508 BFD_ASSERT ((*sym_hash)->root.u.def.value >= addr
1509 && (*sym_hash)->root.u.def.value < toaddr);
1510 (*sym_hash)->root.u.def.value -= count;
1511 }
1512 }
1513
1514 esym += (isym.n_numaux + 1) * symesz;
1515 sym_hash += isym.n_numaux + 1;
1516 }
1517
1518 /* See if we can move the ALIGN reloc forward. We have adjusted
1519 r_vaddr for it already. */
1520 if (irelalign != NULL)
1521 {
1522 bfd_vma alignto, alignaddr;
1523
1524 alignto = BFD_ALIGN (toaddr, 1 << irelalign->r_offset);
1525 alignaddr = BFD_ALIGN (irelalign->r_vaddr - sec->vma,
1526 1 << irelalign->r_offset);
1527 if (alignto != alignaddr)
1528 {
1529 /* Tail recursion. */
1530 return sh_relax_delete_bytes (abfd, sec, alignaddr,
1531 (int) (alignto - alignaddr));
1532 }
1533 }
1534
1535 return true;
1536 }
1537 \f
1538 /* This is yet another version of the SH opcode table, used to rapidly
1539 get information about a particular instruction. */
1540
1541 /* The opcode map is represented by an array of these structures. The
1542 array is indexed by the high order four bits in the instruction. */
1543
1544 struct sh_major_opcode
1545 {
1546 /* A pointer to the instruction list. This is an array which
1547 contains all the instructions with this major opcode. */
1548 const struct sh_minor_opcode *minor_opcodes;
1549 /* The number of elements in minor_opcodes. */
1550 unsigned short count;
1551 };
1552
1553 /* This structure holds information for a set of SH opcodes. The
1554 instruction code is anded with the mask value, and the resulting
1555 value is used to search the order opcode list. */
1556
1557 struct sh_minor_opcode
1558 {
1559 /* The sorted opcode list. */
1560 const struct sh_opcode *opcodes;
1561 /* The number of elements in opcodes. */
1562 unsigned short count;
1563 /* The mask value to use when searching the opcode list. */
1564 unsigned short mask;
1565 };
1566
1567 /* This structure holds information for an SH instruction. An array
1568 of these structures is sorted in order by opcode. */
1569
1570 struct sh_opcode
1571 {
1572 /* The code for this instruction, after it has been anded with the
1573 mask value in the sh_major_opcode structure. */
1574 unsigned short opcode;
1575 /* Flags for this instruction. */
1576 unsigned long flags;
1577 };
1578
1579 /* Flag which appear in the sh_opcode structure. */
1580
1581 /* This instruction loads a value from memory. */
1582 #define LOAD (0x1)
1583
1584 /* This instruction stores a value to memory. */
1585 #define STORE (0x2)
1586
1587 /* This instruction is a branch. */
1588 #define BRANCH (0x4)
1589
1590 /* This instruction has a delay slot. */
1591 #define DELAY (0x8)
1592
1593 /* This instruction uses the value in the register in the field at
1594 mask 0x0f00 of the instruction. */
1595 #define USES1 (0x10)
1596 #define USES1_REG(x) ((x & 0x0f00) >> 8)
1597
1598 /* This instruction uses the value in the register in the field at
1599 mask 0x00f0 of the instruction. */
1600 #define USES2 (0x20)
1601 #define USES2_REG(x) ((x & 0x00f0) >> 4)
1602
1603 /* This instruction uses the value in register 0. */
1604 #define USESR0 (0x40)
1605
1606 /* This instruction sets the value in the register in the field at
1607 mask 0x0f00 of the instruction. */
1608 #define SETS1 (0x80)
1609 #define SETS1_REG(x) ((x & 0x0f00) >> 8)
1610
1611 /* This instruction sets the value in the register in the field at
1612 mask 0x00f0 of the instruction. */
1613 #define SETS2 (0x100)
1614 #define SETS2_REG(x) ((x & 0x00f0) >> 4)
1615
1616 /* This instruction sets register 0. */
1617 #define SETSR0 (0x200)
1618
1619 /* This instruction sets a special register. */
1620 #define SETSSP (0x400)
1621
1622 /* This instruction uses a special register. */
1623 #define USESSP (0x800)
1624
1625 /* This instruction uses the floating point register in the field at
1626 mask 0x0f00 of the instruction. */
1627 #define USESF1 (0x1000)
1628 #define USESF1_REG(x) ((x & 0x0f00) >> 8)
1629
1630 /* This instruction uses the floating point register in the field at
1631 mask 0x00f0 of the instruction. */
1632 #define USESF2 (0x2000)
1633 #define USESF2_REG(x) ((x & 0x00f0) >> 4)
1634
1635 /* This instruction uses floating point register 0. */
1636 #define USESF0 (0x4000)
1637
1638 /* This instruction sets the floating point register in the field at
1639 mask 0x0f00 of the instruction. */
1640 #define SETSF1 (0x8000)
1641 #define SETSF1_REG(x) ((x & 0x0f00) >> 8)
1642
1643 #define USESAS (0x10000)
1644 #define USESAS_REG(x) (((((x) >> 8) - 2) & 3) + 2)
1645 #define USESR8 (0x20000)
1646 #define SETSAS (0x40000)
1647 #define SETSAS_REG(x) USESAS_REG (x)
1648
1649 #ifndef COFF_IMAGE_WITH_PE
1650 static boolean sh_insn_uses_reg
1651 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1652 static boolean sh_insn_sets_reg
1653 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1654 static boolean sh_insn_uses_or_sets_reg
1655 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1656 static boolean sh_insn_uses_freg
1657 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1658 static boolean sh_insn_sets_freg
1659 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1660 static boolean sh_insn_uses_or_sets_freg
1661 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1662 static boolean sh_insns_conflict
1663 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int,
1664 const struct sh_opcode *));
1665 static boolean sh_load_use
1666 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int,
1667 const struct sh_opcode *));
1668 #endif
1669 /* The opcode maps. */
1670
1671 #define MAP(a) a, sizeof a / sizeof a[0]
1672
1673 static const struct sh_opcode sh_opcode00[] =
1674 {
1675 { 0x0008, SETSSP }, /* clrt */
1676 { 0x0009, 0 }, /* nop */
1677 { 0x000b, BRANCH | DELAY | USESSP }, /* rts */
1678 { 0x0018, SETSSP }, /* sett */
1679 { 0x0019, SETSSP }, /* div0u */
1680 { 0x001b, 0 }, /* sleep */
1681 { 0x0028, SETSSP }, /* clrmac */
1682 { 0x002b, BRANCH | DELAY | SETSSP }, /* rte */
1683 { 0x0038, USESSP | SETSSP }, /* ldtlb */
1684 { 0x0048, SETSSP }, /* clrs */
1685 { 0x0058, SETSSP } /* sets */
1686 };
1687
1688 static const struct sh_opcode sh_opcode01[] =
1689 {
1690 { 0x0003, BRANCH | DELAY | USES1 | SETSSP }, /* bsrf rn */
1691 { 0x000a, SETS1 | USESSP }, /* sts mach,rn */
1692 { 0x001a, SETS1 | USESSP }, /* sts macl,rn */
1693 { 0x0023, BRANCH | DELAY | USES1 }, /* braf rn */
1694 { 0x0029, SETS1 | USESSP }, /* movt rn */
1695 { 0x002a, SETS1 | USESSP }, /* sts pr,rn */
1696 { 0x005a, SETS1 | USESSP }, /* sts fpul,rn */
1697 { 0x006a, SETS1 | USESSP }, /* sts fpscr,rn / sts dsr,rn */
1698 { 0x0083, LOAD | USES1 }, /* pref @rn */
1699 { 0x007a, SETS1 | USESSP }, /* sts a0,rn */
1700 { 0x008a, SETS1 | USESSP }, /* sts x0,rn */
1701 { 0x009a, SETS1 | USESSP }, /* sts x1,rn */
1702 { 0x00aa, SETS1 | USESSP }, /* sts y0,rn */
1703 { 0x00ba, SETS1 | USESSP } /* sts y1,rn */
1704 };
1705
1706 /* These sixteen instructions can be handled with one table entry below. */
1707 #if 0
1708 { 0x0002, SETS1 | USESSP }, /* stc sr,rn */
1709 { 0x0012, SETS1 | USESSP }, /* stc gbr,rn */
1710 { 0x0022, SETS1 | USESSP }, /* stc vbr,rn */
1711 { 0x0032, SETS1 | USESSP }, /* stc ssr,rn */
1712 { 0x0042, SETS1 | USESSP }, /* stc spc,rn */
1713 { 0x0052, SETS1 | USESSP }, /* stc mod,rn */
1714 { 0x0062, SETS1 | USESSP }, /* stc rs,rn */
1715 { 0x0072, SETS1 | USESSP }, /* stc re,rn */
1716 { 0x0082, SETS1 | USESSP }, /* stc r0_bank,rn */
1717 { 0x0092, SETS1 | USESSP }, /* stc r1_bank,rn */
1718 { 0x00a2, SETS1 | USESSP }, /* stc r2_bank,rn */
1719 { 0x00b2, SETS1 | USESSP }, /* stc r3_bank,rn */
1720 { 0x00c2, SETS1 | USESSP }, /* stc r4_bank,rn */
1721 { 0x00d2, SETS1 | USESSP }, /* stc r5_bank,rn */
1722 { 0x00e2, SETS1 | USESSP }, /* stc r6_bank,rn */
1723 { 0x00f2, SETS1 | USESSP } /* stc r7_bank,rn */
1724 #endif
1725
1726 static const struct sh_opcode sh_opcode02[] =
1727 {
1728 { 0x0002, SETS1 | USESSP }, /* stc <special_reg>,rn */
1729 { 0x0004, STORE | USES1 | USES2 | USESR0 }, /* mov.b rm,@(r0,rn) */
1730 { 0x0005, STORE | USES1 | USES2 | USESR0 }, /* mov.w rm,@(r0,rn) */
1731 { 0x0006, STORE | USES1 | USES2 | USESR0 }, /* mov.l rm,@(r0,rn) */
1732 { 0x0007, SETSSP | USES1 | USES2 }, /* mul.l rm,rn */
1733 { 0x000c, LOAD | SETS1 | USES2 | USESR0 }, /* mov.b @(r0,rm),rn */
1734 { 0x000d, LOAD | SETS1 | USES2 | USESR0 }, /* mov.w @(r0,rm),rn */
1735 { 0x000e, LOAD | SETS1 | USES2 | USESR0 }, /* mov.l @(r0,rm),rn */
1736 { 0x000f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.l @rm+,@rn+ */
1737 };
1738
1739 static const struct sh_minor_opcode sh_opcode0[] =
1740 {
1741 { MAP (sh_opcode00), 0xffff },
1742 { MAP (sh_opcode01), 0xf0ff },
1743 { MAP (sh_opcode02), 0xf00f }
1744 };
1745
1746 static const struct sh_opcode sh_opcode10[] =
1747 {
1748 { 0x1000, STORE | USES1 | USES2 } /* mov.l rm,@(disp,rn) */
1749 };
1750
1751 static const struct sh_minor_opcode sh_opcode1[] =
1752 {
1753 { MAP (sh_opcode10), 0xf000 }
1754 };
1755
1756 static const struct sh_opcode sh_opcode20[] =
1757 {
1758 { 0x2000, STORE | USES1 | USES2 }, /* mov.b rm,@rn */
1759 { 0x2001, STORE | USES1 | USES2 }, /* mov.w rm,@rn */
1760 { 0x2002, STORE | USES1 | USES2 }, /* mov.l rm,@rn */
1761 { 0x2004, STORE | SETS1 | USES1 | USES2 }, /* mov.b rm,@-rn */
1762 { 0x2005, STORE | SETS1 | USES1 | USES2 }, /* mov.w rm,@-rn */
1763 { 0x2006, STORE | SETS1 | USES1 | USES2 }, /* mov.l rm,@-rn */
1764 { 0x2007, SETSSP | USES1 | USES2 | USESSP }, /* div0s */
1765 { 0x2008, SETSSP | USES1 | USES2 }, /* tst rm,rn */
1766 { 0x2009, SETS1 | USES1 | USES2 }, /* and rm,rn */
1767 { 0x200a, SETS1 | USES1 | USES2 }, /* xor rm,rn */
1768 { 0x200b, SETS1 | USES1 | USES2 }, /* or rm,rn */
1769 { 0x200c, SETSSP | USES1 | USES2 }, /* cmp/str rm,rn */
1770 { 0x200d, SETS1 | USES1 | USES2 }, /* xtrct rm,rn */
1771 { 0x200e, SETSSP | USES1 | USES2 }, /* mulu.w rm,rn */
1772 { 0x200f, SETSSP | USES1 | USES2 } /* muls.w rm,rn */
1773 };
1774
1775 static const struct sh_minor_opcode sh_opcode2[] =
1776 {
1777 { MAP (sh_opcode20), 0xf00f }
1778 };
1779
1780 static const struct sh_opcode sh_opcode30[] =
1781 {
1782 { 0x3000, SETSSP | USES1 | USES2 }, /* cmp/eq rm,rn */
1783 { 0x3002, SETSSP | USES1 | USES2 }, /* cmp/hs rm,rn */
1784 { 0x3003, SETSSP | USES1 | USES2 }, /* cmp/ge rm,rn */
1785 { 0x3004, SETSSP | USESSP | USES1 | USES2 }, /* div1 rm,rn */
1786 { 0x3005, SETSSP | USES1 | USES2 }, /* dmulu.l rm,rn */
1787 { 0x3006, SETSSP | USES1 | USES2 }, /* cmp/hi rm,rn */
1788 { 0x3007, SETSSP | USES1 | USES2 }, /* cmp/gt rm,rn */
1789 { 0x3008, SETS1 | USES1 | USES2 }, /* sub rm,rn */
1790 { 0x300a, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* subc rm,rn */
1791 { 0x300b, SETS1 | SETSSP | USES1 | USES2 }, /* subv rm,rn */
1792 { 0x300c, SETS1 | USES1 | USES2 }, /* add rm,rn */
1793 { 0x300d, SETSSP | USES1 | USES2 }, /* dmuls.l rm,rn */
1794 { 0x300e, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* addc rm,rn */
1795 { 0x300f, SETS1 | SETSSP | USES1 | USES2 } /* addv rm,rn */
1796 };
1797
1798 static const struct sh_minor_opcode sh_opcode3[] =
1799 {
1800 { MAP (sh_opcode30), 0xf00f }
1801 };
1802
1803 static const struct sh_opcode sh_opcode40[] =
1804 {
1805 { 0x4000, SETS1 | SETSSP | USES1 }, /* shll rn */
1806 { 0x4001, SETS1 | SETSSP | USES1 }, /* shlr rn */
1807 { 0x4002, STORE | SETS1 | USES1 | USESSP }, /* sts.l mach,@-rn */
1808 { 0x4004, SETS1 | SETSSP | USES1 }, /* rotl rn */
1809 { 0x4005, SETS1 | SETSSP | USES1 }, /* rotr rn */
1810 { 0x4006, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,mach */
1811 { 0x4008, SETS1 | USES1 }, /* shll2 rn */
1812 { 0x4009, SETS1 | USES1 }, /* shlr2 rn */
1813 { 0x400a, SETSSP | USES1 }, /* lds rm,mach */
1814 { 0x400b, BRANCH | DELAY | USES1 }, /* jsr @rn */
1815 { 0x4010, SETS1 | SETSSP | USES1 }, /* dt rn */
1816 { 0x4011, SETSSP | USES1 }, /* cmp/pz rn */
1817 { 0x4012, STORE | SETS1 | USES1 | USESSP }, /* sts.l macl,@-rn */
1818 { 0x4014, SETSSP | USES1 }, /* setrc rm */
1819 { 0x4015, SETSSP | USES1 }, /* cmp/pl rn */
1820 { 0x4016, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,macl */
1821 { 0x4018, SETS1 | USES1 }, /* shll8 rn */
1822 { 0x4019, SETS1 | USES1 }, /* shlr8 rn */
1823 { 0x401a, SETSSP | USES1 }, /* lds rm,macl */
1824 { 0x401b, LOAD | SETSSP | USES1 }, /* tas.b @rn */
1825 { 0x4020, SETS1 | SETSSP | USES1 }, /* shal rn */
1826 { 0x4021, SETS1 | SETSSP | USES1 }, /* shar rn */
1827 { 0x4022, STORE | SETS1 | USES1 | USESSP }, /* sts.l pr,@-rn */
1828 { 0x4024, SETS1 | SETSSP | USES1 | USESSP }, /* rotcl rn */
1829 { 0x4025, SETS1 | SETSSP | USES1 | USESSP }, /* rotcr rn */
1830 { 0x4026, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,pr */
1831 { 0x4028, SETS1 | USES1 }, /* shll16 rn */
1832 { 0x4029, SETS1 | USES1 }, /* shlr16 rn */
1833 { 0x402a, SETSSP | USES1 }, /* lds rm,pr */
1834 { 0x402b, BRANCH | DELAY | USES1 }, /* jmp @rn */
1835 { 0x4052, STORE | SETS1 | USES1 | USESSP }, /* sts.l fpul,@-rn */
1836 { 0x4056, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,fpul */
1837 { 0x405a, SETSSP | USES1 }, /* lds.l rm,fpul */
1838 { 0x4062, STORE | SETS1 | USES1 | USESSP }, /* sts.l fpscr / dsr,@-rn */
1839 { 0x4066, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,fpscr / dsr */
1840 { 0x406a, SETSSP | USES1 }, /* lds rm,fpscr / lds rm,dsr */
1841 { 0x4072, STORE | SETS1 | USES1 | USESSP }, /* sts.l a0,@-rn */
1842 { 0x4076, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,a0 */
1843 { 0x407a, SETSSP | USES1 }, /* lds.l rm,a0 */
1844 { 0x4082, STORE | SETS1 | USES1 | USESSP }, /* sts.l x0,@-rn */
1845 { 0x4086, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,x0 */
1846 { 0x408a, SETSSP | USES1 }, /* lds.l rm,x0 */
1847 { 0x4092, STORE | SETS1 | USES1 | USESSP }, /* sts.l x1,@-rn */
1848 { 0x4096, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,x1 */
1849 { 0x409a, SETSSP | USES1 }, /* lds.l rm,x1 */
1850 { 0x40a2, STORE | SETS1 | USES1 | USESSP }, /* sts.l y0,@-rn */
1851 { 0x40a6, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,y0 */
1852 { 0x40aa, SETSSP | USES1 }, /* lds.l rm,y0 */
1853 { 0x40b2, STORE | SETS1 | USES1 | USESSP }, /* sts.l y1,@-rn */
1854 { 0x40b6, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,y1 */
1855 { 0x40ba, SETSSP | USES1 } /* lds.l rm,y1 */
1856 #if 0 /* These groups sixteen insns can be
1857 handled with one table entry each below. */
1858 { 0x4003, STORE | SETS1 | USES1 | USESSP }, /* stc.l sr,@-rn */
1859 { 0x4013, STORE | SETS1 | USES1 | USESSP }, /* stc.l gbr,@-rn */
1860 { 0x4023, STORE | SETS1 | USES1 | USESSP }, /* stc.l vbr,@-rn */
1861 { 0x4033, STORE | SETS1 | USES1 | USESSP }, /* stc.l ssr,@-rn */
1862 { 0x4043, STORE | SETS1 | USES1 | USESSP }, /* stc.l spc,@-rn */
1863 { 0x4053, STORE | SETS1 | USES1 | USESSP }, /* stc.l mod,@-rn */
1864 { 0x4063, STORE | SETS1 | USES1 | USESSP }, /* stc.l rs,@-rn */
1865 { 0x4073, STORE | SETS1 | USES1 | USESSP }, /* stc.l re,@-rn */
1866 { 0x4083, STORE | SETS1 | USES1 | USESSP }, /* stc.l r0_bank,@-rn */
1867 ..
1868 { 0x40f3, STORE | SETS1 | USES1 | USESSP }, /* stc.l r7_bank,@-rn */
1869
1870 { 0x4007, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,sr */
1871 { 0x4017, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,gbr */
1872 { 0x4027, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,vbr */
1873 { 0x4037, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,ssr */
1874 { 0x4047, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,spc */
1875 { 0x4057, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,mod */
1876 { 0x4067, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,rs */
1877 { 0x4077, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,re */
1878 { 0x4087, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,r0_bank */
1879 ..
1880 { 0x40f7, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,r7_bank */
1881
1882 { 0x400e, SETSSP | USES1 }, /* ldc rm,sr */
1883 { 0x401e, SETSSP | USES1 }, /* ldc rm,gbr */
1884 { 0x402e, SETSSP | USES1 }, /* ldc rm,vbr */
1885 { 0x403e, SETSSP | USES1 }, /* ldc rm,ssr */
1886 { 0x404e, SETSSP | USES1 }, /* ldc rm,spc */
1887 { 0x405e, SETSSP | USES1 }, /* ldc rm,mod */
1888 { 0x406e, SETSSP | USES1 }, /* ldc rm,rs */
1889 { 0x407e, SETSSP | USES1 } /* ldc rm,re */
1890 { 0x408e, SETSSP | USES1 } /* ldc rm,r0_bank */
1891 ..
1892 { 0x40fe, SETSSP | USES1 } /* ldc rm,r7_bank */
1893 #endif
1894 };
1895
1896 static const struct sh_opcode sh_opcode41[] =
1897 {
1898 { 0x4003, STORE | SETS1 | USES1 | USESSP }, /* stc.l <special_reg>,@-rn */
1899 { 0x4007, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,<special_reg> */
1900 { 0x400c, SETS1 | USES1 | USES2 }, /* shad rm,rn */
1901 { 0x400d, SETS1 | USES1 | USES2 }, /* shld rm,rn */
1902 { 0x400e, SETSSP | USES1 }, /* ldc rm,<special_reg> */
1903 { 0x400f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.w @rm+,@rn+ */
1904 };
1905
1906 static const struct sh_minor_opcode sh_opcode4[] =
1907 {
1908 { MAP (sh_opcode40), 0xf0ff },
1909 { MAP (sh_opcode41), 0xf00f }
1910 };
1911
1912 static const struct sh_opcode sh_opcode50[] =
1913 {
1914 { 0x5000, LOAD | SETS1 | USES2 } /* mov.l @(disp,rm),rn */
1915 };
1916
1917 static const struct sh_minor_opcode sh_opcode5[] =
1918 {
1919 { MAP (sh_opcode50), 0xf000 }
1920 };
1921
1922 static const struct sh_opcode sh_opcode60[] =
1923 {
1924 { 0x6000, LOAD | SETS1 | USES2 }, /* mov.b @rm,rn */
1925 { 0x6001, LOAD | SETS1 | USES2 }, /* mov.w @rm,rn */
1926 { 0x6002, LOAD | SETS1 | USES2 }, /* mov.l @rm,rn */
1927 { 0x6003, SETS1 | USES2 }, /* mov rm,rn */
1928 { 0x6004, LOAD | SETS1 | SETS2 | USES2 }, /* mov.b @rm+,rn */
1929 { 0x6005, LOAD | SETS1 | SETS2 | USES2 }, /* mov.w @rm+,rn */
1930 { 0x6006, LOAD | SETS1 | SETS2 | USES2 }, /* mov.l @rm+,rn */
1931 { 0x6007, SETS1 | USES2 }, /* not rm,rn */
1932 { 0x6008, SETS1 | USES2 }, /* swap.b rm,rn */
1933 { 0x6009, SETS1 | USES2 }, /* swap.w rm,rn */
1934 { 0x600a, SETS1 | SETSSP | USES2 | USESSP }, /* negc rm,rn */
1935 { 0x600b, SETS1 | USES2 }, /* neg rm,rn */
1936 { 0x600c, SETS1 | USES2 }, /* extu.b rm,rn */
1937 { 0x600d, SETS1 | USES2 }, /* extu.w rm,rn */
1938 { 0x600e, SETS1 | USES2 }, /* exts.b rm,rn */
1939 { 0x600f, SETS1 | USES2 } /* exts.w rm,rn */
1940 };
1941
1942 static const struct sh_minor_opcode sh_opcode6[] =
1943 {
1944 { MAP (sh_opcode60), 0xf00f }
1945 };
1946
1947 static const struct sh_opcode sh_opcode70[] =
1948 {
1949 { 0x7000, SETS1 | USES1 } /* add #imm,rn */
1950 };
1951
1952 static const struct sh_minor_opcode sh_opcode7[] =
1953 {
1954 { MAP (sh_opcode70), 0xf000 }
1955 };
1956
1957 static const struct sh_opcode sh_opcode80[] =
1958 {
1959 { 0x8000, STORE | USES2 | USESR0 }, /* mov.b r0,@(disp,rn) */
1960 { 0x8100, STORE | USES2 | USESR0 }, /* mov.w r0,@(disp,rn) */
1961 { 0x8200, SETSSP }, /* setrc #imm */
1962 { 0x8400, LOAD | SETSR0 | USES2 }, /* mov.b @(disp,rm),r0 */
1963 { 0x8500, LOAD | SETSR0 | USES2 }, /* mov.w @(disp,rn),r0 */
1964 { 0x8800, SETSSP | USESR0 }, /* cmp/eq #imm,r0 */
1965 { 0x8900, BRANCH | USESSP }, /* bt label */
1966 { 0x8b00, BRANCH | USESSP }, /* bf label */
1967 { 0x8c00, SETSSP }, /* ldrs @(disp,pc) */
1968 { 0x8d00, BRANCH | DELAY | USESSP }, /* bt/s label */
1969 { 0x8e00, SETSSP }, /* ldre @(disp,pc) */
1970 { 0x8f00, BRANCH | DELAY | USESSP } /* bf/s label */
1971 };
1972
1973 static const struct sh_minor_opcode sh_opcode8[] =
1974 {
1975 { MAP (sh_opcode80), 0xff00 }
1976 };
1977
1978 static const struct sh_opcode sh_opcode90[] =
1979 {
1980 { 0x9000, LOAD | SETS1 } /* mov.w @(disp,pc),rn */
1981 };
1982
1983 static const struct sh_minor_opcode sh_opcode9[] =
1984 {
1985 { MAP (sh_opcode90), 0xf000 }
1986 };
1987
1988 static const struct sh_opcode sh_opcodea0[] =
1989 {
1990 { 0xa000, BRANCH | DELAY } /* bra label */
1991 };
1992
1993 static const struct sh_minor_opcode sh_opcodea[] =
1994 {
1995 { MAP (sh_opcodea0), 0xf000 }
1996 };
1997
1998 static const struct sh_opcode sh_opcodeb0[] =
1999 {
2000 { 0xb000, BRANCH | DELAY } /* bsr label */
2001 };
2002
2003 static const struct sh_minor_opcode sh_opcodeb[] =
2004 {
2005 { MAP (sh_opcodeb0), 0xf000 }
2006 };
2007
2008 static const struct sh_opcode sh_opcodec0[] =
2009 {
2010 { 0xc000, STORE | USESR0 | USESSP }, /* mov.b r0,@(disp,gbr) */
2011 { 0xc100, STORE | USESR0 | USESSP }, /* mov.w r0,@(disp,gbr) */
2012 { 0xc200, STORE | USESR0 | USESSP }, /* mov.l r0,@(disp,gbr) */
2013 { 0xc300, BRANCH | USESSP }, /* trapa #imm */
2014 { 0xc400, LOAD | SETSR0 | USESSP }, /* mov.b @(disp,gbr),r0 */
2015 { 0xc500, LOAD | SETSR0 | USESSP }, /* mov.w @(disp,gbr),r0 */
2016 { 0xc600, LOAD | SETSR0 | USESSP }, /* mov.l @(disp,gbr),r0 */
2017 { 0xc700, SETSR0 }, /* mova @(disp,pc),r0 */
2018 { 0xc800, SETSSP | USESR0 }, /* tst #imm,r0 */
2019 { 0xc900, SETSR0 | USESR0 }, /* and #imm,r0 */
2020 { 0xca00, SETSR0 | USESR0 }, /* xor #imm,r0 */
2021 { 0xcb00, SETSR0 | USESR0 }, /* or #imm,r0 */
2022 { 0xcc00, LOAD | SETSSP | USESR0 | USESSP }, /* tst.b #imm,@(r0,gbr) */
2023 { 0xcd00, LOAD | STORE | USESR0 | USESSP }, /* and.b #imm,@(r0,gbr) */
2024 { 0xce00, LOAD | STORE | USESR0 | USESSP }, /* xor.b #imm,@(r0,gbr) */
2025 { 0xcf00, LOAD | STORE | USESR0 | USESSP } /* or.b #imm,@(r0,gbr) */
2026 };
2027
2028 static const struct sh_minor_opcode sh_opcodec[] =
2029 {
2030 { MAP (sh_opcodec0), 0xff00 }
2031 };
2032
2033 static const struct sh_opcode sh_opcoded0[] =
2034 {
2035 { 0xd000, LOAD | SETS1 } /* mov.l @(disp,pc),rn */
2036 };
2037
2038 static const struct sh_minor_opcode sh_opcoded[] =
2039 {
2040 { MAP (sh_opcoded0), 0xf000 }
2041 };
2042
2043 static const struct sh_opcode sh_opcodee0[] =
2044 {
2045 { 0xe000, SETS1 } /* mov #imm,rn */
2046 };
2047
2048 static const struct sh_minor_opcode sh_opcodee[] =
2049 {
2050 { MAP (sh_opcodee0), 0xf000 }
2051 };
2052
2053 static const struct sh_opcode sh_opcodef0[] =
2054 {
2055 { 0xf000, SETSF1 | USESF1 | USESF2 }, /* fadd fm,fn */
2056 { 0xf001, SETSF1 | USESF1 | USESF2 }, /* fsub fm,fn */
2057 { 0xf002, SETSF1 | USESF1 | USESF2 }, /* fmul fm,fn */
2058 { 0xf003, SETSF1 | USESF1 | USESF2 }, /* fdiv fm,fn */
2059 { 0xf004, SETSSP | USESF1 | USESF2 }, /* fcmp/eq fm,fn */
2060 { 0xf005, SETSSP | USESF1 | USESF2 }, /* fcmp/gt fm,fn */
2061 { 0xf006, LOAD | SETSF1 | USES2 | USESR0 }, /* fmov.s @(r0,rm),fn */
2062 { 0xf007, STORE | USES1 | USESF2 | USESR0 }, /* fmov.s fm,@(r0,rn) */
2063 { 0xf008, LOAD | SETSF1 | USES2 }, /* fmov.s @rm,fn */
2064 { 0xf009, LOAD | SETS2 | SETSF1 | USES2 }, /* fmov.s @rm+,fn */
2065 { 0xf00a, STORE | USES1 | USESF2 }, /* fmov.s fm,@rn */
2066 { 0xf00b, STORE | SETS1 | USES1 | USESF2 }, /* fmov.s fm,@-rn */
2067 { 0xf00c, SETSF1 | USESF2 }, /* fmov fm,fn */
2068 { 0xf00e, SETSF1 | USESF1 | USESF2 | USESF0 } /* fmac f0,fm,fn */
2069 };
2070
2071 static const struct sh_opcode sh_opcodef1[] =
2072 {
2073 { 0xf00d, SETSF1 | USESSP }, /* fsts fpul,fn */
2074 { 0xf01d, SETSSP | USESF1 }, /* flds fn,fpul */
2075 { 0xf02d, SETSF1 | USESSP }, /* float fpul,fn */
2076 { 0xf03d, SETSSP | USESF1 }, /* ftrc fn,fpul */
2077 { 0xf04d, SETSF1 | USESF1 }, /* fneg fn */
2078 { 0xf05d, SETSF1 | USESF1 }, /* fabs fn */
2079 { 0xf06d, SETSF1 | USESF1 }, /* fsqrt fn */
2080 { 0xf07d, SETSSP | USESF1 }, /* ftst/nan fn */
2081 { 0xf08d, SETSF1 }, /* fldi0 fn */
2082 { 0xf09d, SETSF1 } /* fldi1 fn */
2083 };
2084
2085 static const struct sh_minor_opcode sh_opcodef[] =
2086 {
2087 { MAP (sh_opcodef0), 0xf00f },
2088 { MAP (sh_opcodef1), 0xf0ff }
2089 };
2090
2091 #ifndef COFF_IMAGE_WITH_PE
2092 static struct sh_major_opcode sh_opcodes[] =
2093 {
2094 { MAP (sh_opcode0) },
2095 { MAP (sh_opcode1) },
2096 { MAP (sh_opcode2) },
2097 { MAP (sh_opcode3) },
2098 { MAP (sh_opcode4) },
2099 { MAP (sh_opcode5) },
2100 { MAP (sh_opcode6) },
2101 { MAP (sh_opcode7) },
2102 { MAP (sh_opcode8) },
2103 { MAP (sh_opcode9) },
2104 { MAP (sh_opcodea) },
2105 { MAP (sh_opcodeb) },
2106 { MAP (sh_opcodec) },
2107 { MAP (sh_opcoded) },
2108 { MAP (sh_opcodee) },
2109 { MAP (sh_opcodef) }
2110 };
2111 #endif
2112
2113 /* The double data transfer / parallel processing insns are not
2114 described here. This will cause sh_align_load_span to leave them alone. */
2115
2116 static const struct sh_opcode sh_dsp_opcodef0[] =
2117 {
2118 { 0xf400, USESAS | SETSAS | LOAD | SETSSP }, /* movs.x @-as,ds */
2119 { 0xf401, USESAS | SETSAS | STORE | USESSP }, /* movs.x ds,@-as */
2120 { 0xf404, USESAS | LOAD | SETSSP }, /* movs.x @as,ds */
2121 { 0xf405, USESAS | STORE | USESSP }, /* movs.x ds,@as */
2122 { 0xf408, USESAS | SETSAS | LOAD | SETSSP }, /* movs.x @as+,ds */
2123 { 0xf409, USESAS | SETSAS | STORE | USESSP }, /* movs.x ds,@as+ */
2124 { 0xf40c, USESAS | SETSAS | LOAD | SETSSP | USESR8 }, /* movs.x @as+r8,ds */
2125 { 0xf40d, USESAS | SETSAS | STORE | USESSP | USESR8 } /* movs.x ds,@as+r8 */
2126 };
2127
2128 static const struct sh_minor_opcode sh_dsp_opcodef[] =
2129 {
2130 { MAP (sh_dsp_opcodef0), 0xfc0d }
2131 };
2132
2133 #ifndef COFF_IMAGE_WITH_PE
2134 /* Given an instruction, return a pointer to the corresponding
2135 sh_opcode structure. Return NULL if the instruction is not
2136 recognized. */
2137
2138 static const struct sh_opcode *
2139 sh_insn_info (insn)
2140 unsigned int insn;
2141 {
2142 const struct sh_major_opcode *maj;
2143 const struct sh_minor_opcode *min, *minend;
2144
2145 maj = &sh_opcodes[(insn & 0xf000) >> 12];
2146 min = maj->minor_opcodes;
2147 minend = min + maj->count;
2148 for (; min < minend; min++)
2149 {
2150 unsigned int l;
2151 const struct sh_opcode *op, *opend;
2152
2153 l = insn & min->mask;
2154 op = min->opcodes;
2155 opend = op + min->count;
2156
2157 /* Since the opcodes tables are sorted, we could use a binary
2158 search here if the count were above some cutoff value. */
2159 for (; op < opend; op++)
2160 if (op->opcode == l)
2161 return op;
2162 }
2163
2164 return NULL;
2165 }
2166
2167 /* See whether an instruction uses or sets a general purpose register */
2168
2169 static boolean
2170 sh_insn_uses_or_sets_reg (insn, op, reg)
2171 unsigned int insn;
2172 const struct sh_opcode *op;
2173 unsigned int reg;
2174 {
2175 if (sh_insn_uses_reg (insn, op, reg))
2176 return true;
2177
2178 return sh_insn_sets_reg (insn, op, reg);
2179 }
2180
2181 /* See whether an instruction uses a general purpose register. */
2182
2183 static boolean
2184 sh_insn_uses_reg (insn, op, reg)
2185 unsigned int insn;
2186 const struct sh_opcode *op;
2187 unsigned int reg;
2188 {
2189 unsigned int f;
2190
2191 f = op->flags;
2192
2193 if ((f & USES1) != 0
2194 && USES1_REG (insn) == reg)
2195 return true;
2196 if ((f & USES2) != 0
2197 && USES2_REG (insn) == reg)
2198 return true;
2199 if ((f & USESR0) != 0
2200 && reg == 0)
2201 return true;
2202 if ((f & USESAS) && reg == USESAS_REG (insn))
2203 return true;
2204 if ((f & USESR8) && reg == 8)
2205 return true;
2206
2207 return false;
2208 }
2209
2210 /* See whether an instruction sets a general purpose register. */
2211
2212 static boolean
2213 sh_insn_sets_reg (insn, op, reg)
2214 unsigned int insn;
2215 const struct sh_opcode *op;
2216 unsigned int reg;
2217 {
2218 unsigned int f;
2219
2220 f = op->flags;
2221
2222 if ((f & SETS1) != 0
2223 && SETS1_REG (insn) == reg)
2224 return true;
2225 if ((f & SETS2) != 0
2226 && SETS2_REG (insn) == reg)
2227 return true;
2228 if ((f & SETSR0) != 0
2229 && reg == 0)
2230 return true;
2231 if ((f & SETSAS) && reg == SETSAS_REG (insn))
2232 return true;
2233
2234 return false;
2235 }
2236
2237 /* See whether an instruction uses or sets a floating point register */
2238
2239 static boolean
2240 sh_insn_uses_or_sets_freg (insn, op, reg)
2241 unsigned int insn;
2242 const struct sh_opcode *op;
2243 unsigned int reg;
2244 {
2245 if (sh_insn_uses_freg (insn, op, reg))
2246 return true;
2247
2248 return sh_insn_sets_freg (insn, op, reg);
2249 }
2250
2251 /* See whether an instruction uses a floating point register. */
2252
2253 static boolean
2254 sh_insn_uses_freg (insn, op, freg)
2255 unsigned int insn;
2256 const struct sh_opcode *op;
2257 unsigned int freg;
2258 {
2259 unsigned int f;
2260
2261 f = op->flags;
2262
2263 /* We can't tell if this is a double-precision insn, so just play safe
2264 and assume that it might be. So not only have we test FREG against
2265 itself, but also even FREG against FREG+1 - if the using insn uses
2266 just the low part of a double precision value - but also an odd
2267 FREG against FREG-1 - if the setting insn sets just the low part
2268 of a double precision value.
2269 So what this all boils down to is that we have to ignore the lowest
2270 bit of the register number. */
2271
2272 if ((f & USESF1) != 0
2273 && (USESF1_REG (insn) & 0xe) == (freg & 0xe))
2274 return true;
2275 if ((f & USESF2) != 0
2276 && (USESF2_REG (insn) & 0xe) == (freg & 0xe))
2277 return true;
2278 if ((f & USESF0) != 0
2279 && freg == 0)
2280 return true;
2281
2282 return false;
2283 }
2284
2285 /* See whether an instruction sets a floating point register. */
2286
2287 static boolean
2288 sh_insn_sets_freg (insn, op, freg)
2289 unsigned int insn;
2290 const struct sh_opcode *op;
2291 unsigned int freg;
2292 {
2293 unsigned int f;
2294
2295 f = op->flags;
2296
2297 /* We can't tell if this is a double-precision insn, so just play safe
2298 and assume that it might be. So not only have we test FREG against
2299 itself, but also even FREG against FREG+1 - if the using insn uses
2300 just the low part of a double precision value - but also an odd
2301 FREG against FREG-1 - if the setting insn sets just the low part
2302 of a double precision value.
2303 So what this all boils down to is that we have to ignore the lowest
2304 bit of the register number. */
2305
2306 if ((f & SETSF1) != 0
2307 && (SETSF1_REG (insn) & 0xe) == (freg & 0xe))
2308 return true;
2309
2310 return false;
2311 }
2312
2313 /* See whether instructions I1 and I2 conflict, assuming I1 comes
2314 before I2. OP1 and OP2 are the corresponding sh_opcode structures.
2315 This should return true if there is a conflict, or false if the
2316 instructions can be swapped safely. */
2317
2318 static boolean
2319 sh_insns_conflict (i1, op1, i2, op2)
2320 unsigned int i1;
2321 const struct sh_opcode *op1;
2322 unsigned int i2;
2323 const struct sh_opcode *op2;
2324 {
2325 unsigned int f1, f2;
2326
2327 f1 = op1->flags;
2328 f2 = op2->flags;
2329
2330 /* Load of fpscr conflicts with floating point operations.
2331 FIXME: shouldn't test raw opcodes here. */
2332 if (((i1 & 0xf0ff) == 0x4066 && (i2 & 0xf000) == 0xf000)
2333 || ((i2 & 0xf0ff) == 0x4066 && (i1 & 0xf000) == 0xf000))
2334 return true;
2335
2336 if ((f1 & (BRANCH | DELAY)) != 0
2337 || (f2 & (BRANCH | DELAY)) != 0)
2338 return true;
2339
2340 if (((f1 | f2) & SETSSP)
2341 && (f1 & (SETSSP | USESSP))
2342 && (f2 & (SETSSP | USESSP)))
2343 return true;
2344
2345 if ((f1 & SETS1) != 0
2346 && sh_insn_uses_or_sets_reg (i2, op2, SETS1_REG (i1)))
2347 return true;
2348 if ((f1 & SETS2) != 0
2349 && sh_insn_uses_or_sets_reg (i2, op2, SETS2_REG (i1)))
2350 return true;
2351 if ((f1 & SETSR0) != 0
2352 && sh_insn_uses_or_sets_reg (i2, op2, 0))
2353 return true;
2354 if ((f1 & SETSAS)
2355 && sh_insn_uses_or_sets_reg (i2, op2, SETSAS_REG (i1)))
2356 return true;
2357 if ((f1 & SETSF1) != 0
2358 && sh_insn_uses_or_sets_freg (i2, op2, SETSF1_REG (i1)))
2359 return true;
2360
2361 if ((f2 & SETS1) != 0
2362 && sh_insn_uses_or_sets_reg (i1, op1, SETS1_REG (i2)))
2363 return true;
2364 if ((f2 & SETS2) != 0
2365 && sh_insn_uses_or_sets_reg (i1, op1, SETS2_REG (i2)))
2366 return true;
2367 if ((f2 & SETSR0) != 0
2368 && sh_insn_uses_or_sets_reg (i1, op1, 0))
2369 return true;
2370 if ((f2 & SETSAS)
2371 && sh_insn_uses_or_sets_reg (i1, op1, SETSAS_REG (i2)))
2372 return true;
2373 if ((f2 & SETSF1) != 0
2374 && sh_insn_uses_or_sets_freg (i1, op1, SETSF1_REG (i2)))
2375 return true;
2376
2377 /* The instructions do not conflict. */
2378 return false;
2379 }
2380
2381 /* I1 is a load instruction, and I2 is some other instruction. Return
2382 true if I1 loads a register which I2 uses. */
2383
2384 static boolean
2385 sh_load_use (i1, op1, i2, op2)
2386 unsigned int i1;
2387 const struct sh_opcode *op1;
2388 unsigned int i2;
2389 const struct sh_opcode *op2;
2390 {
2391 unsigned int f1;
2392
2393 f1 = op1->flags;
2394
2395 if ((f1 & LOAD) == 0)
2396 return false;
2397
2398 /* If both SETS1 and SETSSP are set, that means a load to a special
2399 register using postincrement addressing mode, which we don't care
2400 about here. */
2401 if ((f1 & SETS1) != 0
2402 && (f1 & SETSSP) == 0
2403 && sh_insn_uses_reg (i2, op2, (i1 & 0x0f00) >> 8))
2404 return true;
2405
2406 if ((f1 & SETSR0) != 0
2407 && sh_insn_uses_reg (i2, op2, 0))
2408 return true;
2409
2410 if ((f1 & SETSF1) != 0
2411 && sh_insn_uses_freg (i2, op2, (i1 & 0x0f00) >> 8))
2412 return true;
2413
2414 return false;
2415 }
2416
2417 /* Try to align loads and stores within a span of memory. This is
2418 called by both the ELF and the COFF sh targets. ABFD and SEC are
2419 the BFD and section we are examining. CONTENTS is the contents of
2420 the section. SWAP is the routine to call to swap two instructions.
2421 RELOCS is a pointer to the internal relocation information, to be
2422 passed to SWAP. PLABEL is a pointer to the current label in a
2423 sorted list of labels; LABEL_END is the end of the list. START and
2424 STOP are the range of memory to examine. If a swap is made,
2425 *PSWAPPED is set to true. */
2426
2427 #ifdef COFF_WITH_PE
2428 static
2429 #endif
2430 boolean
2431 _bfd_sh_align_load_span (abfd, sec, contents, swap, relocs,
2432 plabel, label_end, start, stop, pswapped)
2433 bfd *abfd;
2434 asection *sec;
2435 bfd_byte *contents;
2436 boolean (*swap) PARAMS ((bfd *, asection *, PTR, bfd_byte *, bfd_vma));
2437 PTR relocs;
2438 bfd_vma **plabel;
2439 bfd_vma *label_end;
2440 bfd_vma start;
2441 bfd_vma stop;
2442 boolean *pswapped;
2443 {
2444 int dsp = (abfd->arch_info->mach == bfd_mach_sh_dsp
2445 || abfd->arch_info->mach == bfd_mach_sh3_dsp);
2446 bfd_vma i;
2447
2448 /* The SH4 has a Harvard architecture, hence aligning loads is not
2449 desirable. In fact, it is counter-productive, since it interferes
2450 with the schedules generated by the compiler. */
2451 if (abfd->arch_info->mach == bfd_mach_sh4)
2452 return true;
2453
2454 /* If we are linking sh[3]-dsp code, swap the FPU instructions for DSP
2455 instructions. */
2456 if (dsp)
2457 {
2458 sh_opcodes[0xf].minor_opcodes = sh_dsp_opcodef;
2459 sh_opcodes[0xf].count = sizeof sh_dsp_opcodef / sizeof sh_dsp_opcodef;
2460 }
2461
2462 /* Instructions should be aligned on 2 byte boundaries. */
2463 if ((start & 1) == 1)
2464 ++start;
2465
2466 /* Now look through the unaligned addresses. */
2467 i = start;
2468 if ((i & 2) == 0)
2469 i += 2;
2470 for (; i < stop; i += 4)
2471 {
2472 unsigned int insn;
2473 const struct sh_opcode *op;
2474 unsigned int prev_insn = 0;
2475 const struct sh_opcode *prev_op = NULL;
2476
2477 insn = bfd_get_16 (abfd, contents + i);
2478 op = sh_insn_info (insn);
2479 if (op == NULL
2480 || (op->flags & (LOAD | STORE)) == 0)
2481 continue;
2482
2483 /* This is a load or store which is not on a four byte boundary. */
2484
2485 while (*plabel < label_end && **plabel < i)
2486 ++*plabel;
2487
2488 if (i > start)
2489 {
2490 prev_insn = bfd_get_16 (abfd, contents + i - 2);
2491 /* If INSN is the field b of a parallel processing insn, it is not
2492 a load / store after all. Note that the test here might mistake
2493 the field_b of a pcopy insn for the starting code of a parallel
2494 processing insn; this might miss a swapping opportunity, but at
2495 least we're on the safe side. */
2496 if (dsp && (prev_insn & 0xfc00) == 0xf800)
2497 continue;
2498
2499 /* Check if prev_insn is actually the field b of a parallel
2500 processing insn. Again, this can give a spurious match
2501 after a pcopy. */
2502 if (dsp && i - 2 > start)
2503 {
2504 unsigned pprev_insn = bfd_get_16 (abfd, contents + i - 4);
2505
2506 if ((pprev_insn & 0xfc00) == 0xf800)
2507 prev_op = NULL;
2508 else
2509 prev_op = sh_insn_info (prev_insn);
2510 }
2511 else
2512 prev_op = sh_insn_info (prev_insn);
2513
2514 /* If the load/store instruction is in a delay slot, we
2515 can't swap. */
2516 if (prev_op == NULL
2517 || (prev_op->flags & DELAY) != 0)
2518 continue;
2519 }
2520 if (i > start
2521 && (*plabel >= label_end || **plabel != i)
2522 && prev_op != NULL
2523 && (prev_op->flags & (LOAD | STORE)) == 0
2524 && ! sh_insns_conflict (prev_insn, prev_op, insn, op))
2525 {
2526 boolean ok;
2527
2528 /* The load/store instruction does not have a label, and
2529 there is a previous instruction; PREV_INSN is not
2530 itself a load/store instruction, and PREV_INSN and
2531 INSN do not conflict. */
2532
2533 ok = true;
2534
2535 if (i >= start + 4)
2536 {
2537 unsigned int prev2_insn;
2538 const struct sh_opcode *prev2_op;
2539
2540 prev2_insn = bfd_get_16 (abfd, contents + i - 4);
2541 prev2_op = sh_insn_info (prev2_insn);
2542
2543 /* If the instruction before PREV_INSN has a delay
2544 slot--that is, PREV_INSN is in a delay slot--we
2545 can not swap. */
2546 if (prev2_op == NULL
2547 || (prev2_op->flags & DELAY) != 0)
2548 ok = false;
2549
2550 /* If the instruction before PREV_INSN is a load,
2551 and it sets a register which INSN uses, then
2552 putting INSN immediately after PREV_INSN will
2553 cause a pipeline bubble, so there is no point to
2554 making the swap. */
2555 if (ok
2556 && (prev2_op->flags & LOAD) != 0
2557 && sh_load_use (prev2_insn, prev2_op, insn, op))
2558 ok = false;
2559 }
2560
2561 if (ok)
2562 {
2563 if (! (*swap) (abfd, sec, relocs, contents, i - 2))
2564 return false;
2565 *pswapped = true;
2566 continue;
2567 }
2568 }
2569
2570 while (*plabel < label_end && **plabel < i + 2)
2571 ++*plabel;
2572
2573 if (i + 2 < stop
2574 && (*plabel >= label_end || **plabel != i + 2))
2575 {
2576 unsigned int next_insn;
2577 const struct sh_opcode *next_op;
2578
2579 /* There is an instruction after the load/store
2580 instruction, and it does not have a label. */
2581 next_insn = bfd_get_16 (abfd, contents + i + 2);
2582 next_op = sh_insn_info (next_insn);
2583 if (next_op != NULL
2584 && (next_op->flags & (LOAD | STORE)) == 0
2585 && ! sh_insns_conflict (insn, op, next_insn, next_op))
2586 {
2587 boolean ok;
2588
2589 /* NEXT_INSN is not itself a load/store instruction,
2590 and it does not conflict with INSN. */
2591
2592 ok = true;
2593
2594 /* If PREV_INSN is a load, and it sets a register
2595 which NEXT_INSN uses, then putting NEXT_INSN
2596 immediately after PREV_INSN will cause a pipeline
2597 bubble, so there is no reason to make this swap. */
2598 if (prev_op != NULL
2599 && (prev_op->flags & LOAD) != 0
2600 && sh_load_use (prev_insn, prev_op, next_insn, next_op))
2601 ok = false;
2602
2603 /* If INSN is a load, and it sets a register which
2604 the insn after NEXT_INSN uses, then doing the
2605 swap will cause a pipeline bubble, so there is no
2606 reason to make the swap. However, if the insn
2607 after NEXT_INSN is itself a load or store
2608 instruction, then it is misaligned, so
2609 optimistically hope that it will be swapped
2610 itself, and just live with the pipeline bubble if
2611 it isn't. */
2612 if (ok
2613 && i + 4 < stop
2614 && (op->flags & LOAD) != 0)
2615 {
2616 unsigned int next2_insn;
2617 const struct sh_opcode *next2_op;
2618
2619 next2_insn = bfd_get_16 (abfd, contents + i + 4);
2620 next2_op = sh_insn_info (next2_insn);
2621 if ((next2_op->flags & (LOAD | STORE)) == 0
2622 && sh_load_use (insn, op, next2_insn, next2_op))
2623 ok = false;
2624 }
2625
2626 if (ok)
2627 {
2628 if (! (*swap) (abfd, sec, relocs, contents, i))
2629 return false;
2630 *pswapped = true;
2631 continue;
2632 }
2633 }
2634 }
2635 }
2636
2637 return true;
2638 }
2639 #endif /* not COFF_IMAGE_WITH_PE */
2640
2641 /* Look for loads and stores which we can align to four byte
2642 boundaries. See the longer comment above sh_relax_section for why
2643 this is desirable. This sets *PSWAPPED if some instruction was
2644 swapped. */
2645
2646 static boolean
2647 sh_align_loads (abfd, sec, internal_relocs, contents, pswapped)
2648 bfd *abfd;
2649 asection *sec;
2650 struct internal_reloc *internal_relocs;
2651 bfd_byte *contents;
2652 boolean *pswapped;
2653 {
2654 struct internal_reloc *irel, *irelend;
2655 bfd_vma *labels = NULL;
2656 bfd_vma *label, *label_end;
2657 bfd_size_type amt;
2658
2659 *pswapped = false;
2660
2661 irelend = internal_relocs + sec->reloc_count;
2662
2663 /* Get all the addresses with labels on them. */
2664 amt = (bfd_size_type) sec->reloc_count * sizeof (bfd_vma);
2665 labels = (bfd_vma *) bfd_malloc (amt);
2666 if (labels == NULL)
2667 goto error_return;
2668 label_end = labels;
2669 for (irel = internal_relocs; irel < irelend; irel++)
2670 {
2671 if (irel->r_type == R_SH_LABEL)
2672 {
2673 *label_end = irel->r_vaddr - sec->vma;
2674 ++label_end;
2675 }
2676 }
2677
2678 /* Note that the assembler currently always outputs relocs in
2679 address order. If that ever changes, this code will need to sort
2680 the label values and the relocs. */
2681
2682 label = labels;
2683
2684 for (irel = internal_relocs; irel < irelend; irel++)
2685 {
2686 bfd_vma start, stop;
2687
2688 if (irel->r_type != R_SH_CODE)
2689 continue;
2690
2691 start = irel->r_vaddr - sec->vma;
2692
2693 for (irel++; irel < irelend; irel++)
2694 if (irel->r_type == R_SH_DATA)
2695 break;
2696 if (irel < irelend)
2697 stop = irel->r_vaddr - sec->vma;
2698 else
2699 stop = sec->_cooked_size;
2700
2701 if (! _bfd_sh_align_load_span (abfd, sec, contents, sh_swap_insns,
2702 (PTR) internal_relocs, &label,
2703 label_end, start, stop, pswapped))
2704 goto error_return;
2705 }
2706
2707 free (labels);
2708
2709 return true;
2710
2711 error_return:
2712 if (labels != NULL)
2713 free (labels);
2714 return false;
2715 }
2716
2717 /* Swap two SH instructions. */
2718
2719 static boolean
2720 sh_swap_insns (abfd, sec, relocs, contents, addr)
2721 bfd *abfd;
2722 asection *sec;
2723 PTR relocs;
2724 bfd_byte *contents;
2725 bfd_vma addr;
2726 {
2727 struct internal_reloc *internal_relocs = (struct internal_reloc *) relocs;
2728 unsigned short i1, i2;
2729 struct internal_reloc *irel, *irelend;
2730
2731 /* Swap the instructions themselves. */
2732 i1 = bfd_get_16 (abfd, contents + addr);
2733 i2 = bfd_get_16 (abfd, contents + addr + 2);
2734 bfd_put_16 (abfd, (bfd_vma) i2, contents + addr);
2735 bfd_put_16 (abfd, (bfd_vma) i1, contents + addr + 2);
2736
2737 /* Adjust all reloc addresses. */
2738 irelend = internal_relocs + sec->reloc_count;
2739 for (irel = internal_relocs; irel < irelend; irel++)
2740 {
2741 int type, add;
2742
2743 /* There are a few special types of relocs that we don't want to
2744 adjust. These relocs do not apply to the instruction itself,
2745 but are only associated with the address. */
2746 type = irel->r_type;
2747 if (type == R_SH_ALIGN
2748 || type == R_SH_CODE
2749 || type == R_SH_DATA
2750 || type == R_SH_LABEL)
2751 continue;
2752
2753 /* If an R_SH_USES reloc points to one of the addresses being
2754 swapped, we must adjust it. It would be incorrect to do this
2755 for a jump, though, since we want to execute both
2756 instructions after the jump. (We have avoided swapping
2757 around a label, so the jump will not wind up executing an
2758 instruction it shouldn't). */
2759 if (type == R_SH_USES)
2760 {
2761 bfd_vma off;
2762
2763 off = irel->r_vaddr - sec->vma + 4 + irel->r_offset;
2764 if (off == addr)
2765 irel->r_offset += 2;
2766 else if (off == addr + 2)
2767 irel->r_offset -= 2;
2768 }
2769
2770 if (irel->r_vaddr - sec->vma == addr)
2771 {
2772 irel->r_vaddr += 2;
2773 add = -2;
2774 }
2775 else if (irel->r_vaddr - sec->vma == addr + 2)
2776 {
2777 irel->r_vaddr -= 2;
2778 add = 2;
2779 }
2780 else
2781 add = 0;
2782
2783 if (add != 0)
2784 {
2785 bfd_byte *loc;
2786 unsigned short insn, oinsn;
2787 boolean overflow;
2788
2789 loc = contents + irel->r_vaddr - sec->vma;
2790 overflow = false;
2791 switch (type)
2792 {
2793 default:
2794 break;
2795
2796 case R_SH_PCDISP8BY2:
2797 case R_SH_PCRELIMM8BY2:
2798 insn = bfd_get_16 (abfd, loc);
2799 oinsn = insn;
2800 insn += add / 2;
2801 if ((oinsn & 0xff00) != (insn & 0xff00))
2802 overflow = true;
2803 bfd_put_16 (abfd, (bfd_vma) insn, loc);
2804 break;
2805
2806 case R_SH_PCDISP:
2807 insn = bfd_get_16 (abfd, loc);
2808 oinsn = insn;
2809 insn += add / 2;
2810 if ((oinsn & 0xf000) != (insn & 0xf000))
2811 overflow = true;
2812 bfd_put_16 (abfd, (bfd_vma) insn, loc);
2813 break;
2814
2815 case R_SH_PCRELIMM8BY4:
2816 /* This reloc ignores the least significant 3 bits of
2817 the program counter before adding in the offset.
2818 This means that if ADDR is at an even address, the
2819 swap will not affect the offset. If ADDR is an at an
2820 odd address, then the instruction will be crossing a
2821 four byte boundary, and must be adjusted. */
2822 if ((addr & 3) != 0)
2823 {
2824 insn = bfd_get_16 (abfd, loc);
2825 oinsn = insn;
2826 insn += add / 2;
2827 if ((oinsn & 0xff00) != (insn & 0xff00))
2828 overflow = true;
2829 bfd_put_16 (abfd, (bfd_vma) insn, loc);
2830 }
2831
2832 break;
2833 }
2834
2835 if (overflow)
2836 {
2837 ((*_bfd_error_handler)
2838 ("%s: 0x%lx: fatal: reloc overflow while relaxing",
2839 bfd_archive_filename (abfd), (unsigned long) irel->r_vaddr));
2840 bfd_set_error (bfd_error_bad_value);
2841 return false;
2842 }
2843 }
2844 }
2845
2846 return true;
2847 }
2848 \f
2849 /* This is a modification of _bfd_coff_generic_relocate_section, which
2850 will handle SH relaxing. */
2851
2852 static boolean
2853 sh_relocate_section (output_bfd, info, input_bfd, input_section, contents,
2854 relocs, syms, sections)
2855 bfd *output_bfd ATTRIBUTE_UNUSED;
2856 struct bfd_link_info *info;
2857 bfd *input_bfd;
2858 asection *input_section;
2859 bfd_byte *contents;
2860 struct internal_reloc *relocs;
2861 struct internal_syment *syms;
2862 asection **sections;
2863 {
2864 struct internal_reloc *rel;
2865 struct internal_reloc *relend;
2866
2867 rel = relocs;
2868 relend = rel + input_section->reloc_count;
2869 for (; rel < relend; rel++)
2870 {
2871 long symndx;
2872 struct coff_link_hash_entry *h;
2873 struct internal_syment *sym;
2874 bfd_vma addend;
2875 bfd_vma val;
2876 reloc_howto_type *howto;
2877 bfd_reloc_status_type rstat;
2878
2879 /* Almost all relocs have to do with relaxing. If any work must
2880 be done for them, it has been done in sh_relax_section. */
2881 if (rel->r_type != R_SH_IMM32
2882 #ifdef COFF_WITH_PE
2883 && rel->r_type != R_SH_IMM32CE
2884 && rel->r_type != R_SH_IMAGEBASE
2885 #endif
2886 && rel->r_type != R_SH_PCDISP)
2887 continue;
2888
2889 symndx = rel->r_symndx;
2890
2891 if (symndx == -1)
2892 {
2893 h = NULL;
2894 sym = NULL;
2895 }
2896 else
2897 {
2898 if (symndx < 0
2899 || (unsigned long) symndx >= obj_raw_syment_count (input_bfd))
2900 {
2901 (*_bfd_error_handler)
2902 ("%s: illegal symbol index %ld in relocs",
2903 bfd_archive_filename (input_bfd), symndx);
2904 bfd_set_error (bfd_error_bad_value);
2905 return false;
2906 }
2907 h = obj_coff_sym_hashes (input_bfd)[symndx];
2908 sym = syms + symndx;
2909 }
2910
2911 if (sym != NULL && sym->n_scnum != 0)
2912 addend = - sym->n_value;
2913 else
2914 addend = 0;
2915
2916 if (rel->r_type == R_SH_PCDISP)
2917 addend -= 4;
2918
2919 if (rel->r_type >= SH_COFF_HOWTO_COUNT)
2920 howto = NULL;
2921 else
2922 howto = &sh_coff_howtos[rel->r_type];
2923
2924 if (howto == NULL)
2925 {
2926 bfd_set_error (bfd_error_bad_value);
2927 return false;
2928 }
2929
2930 #ifdef COFF_WITH_PE
2931 if (rel->r_type == R_SH_IMAGEBASE)
2932 addend -= pe_data (input_section->output_section->owner)->pe_opthdr.ImageBase;
2933 #endif
2934
2935 val = 0;
2936
2937 if (h == NULL)
2938 {
2939 asection *sec;
2940
2941 /* There is nothing to do for an internal PCDISP reloc. */
2942 if (rel->r_type == R_SH_PCDISP)
2943 continue;
2944
2945 if (symndx == -1)
2946 {
2947 sec = bfd_abs_section_ptr;
2948 val = 0;
2949 }
2950 else
2951 {
2952 sec = sections[symndx];
2953 val = (sec->output_section->vma
2954 + sec->output_offset
2955 + sym->n_value
2956 - sec->vma);
2957 }
2958 }
2959 else
2960 {
2961 if (h->root.type == bfd_link_hash_defined
2962 || h->root.type == bfd_link_hash_defweak)
2963 {
2964 asection *sec;
2965
2966 sec = h->root.u.def.section;
2967 val = (h->root.u.def.value
2968 + sec->output_section->vma
2969 + sec->output_offset);
2970 }
2971 else if (! info->relocateable)
2972 {
2973 if (! ((*info->callbacks->undefined_symbol)
2974 (info, h->root.root.string, input_bfd, input_section,
2975 rel->r_vaddr - input_section->vma, true)))
2976 return false;
2977 }
2978 }
2979
2980 rstat = _bfd_final_link_relocate (howto, input_bfd, input_section,
2981 contents,
2982 rel->r_vaddr - input_section->vma,
2983 val, addend);
2984
2985 switch (rstat)
2986 {
2987 default:
2988 abort ();
2989 case bfd_reloc_ok:
2990 break;
2991 case bfd_reloc_overflow:
2992 {
2993 const char *name;
2994 char buf[SYMNMLEN + 1];
2995
2996 if (symndx == -1)
2997 name = "*ABS*";
2998 else if (h != NULL)
2999 name = h->root.root.string;
3000 else if (sym->_n._n_n._n_zeroes == 0
3001 && sym->_n._n_n._n_offset != 0)
3002 name = obj_coff_strings (input_bfd) + sym->_n._n_n._n_offset;
3003 else
3004 {
3005 strncpy (buf, sym->_n._n_name, SYMNMLEN);
3006 buf[SYMNMLEN] = '\0';
3007 name = buf;
3008 }
3009
3010 if (! ((*info->callbacks->reloc_overflow)
3011 (info, name, howto->name, (bfd_vma) 0, input_bfd,
3012 input_section, rel->r_vaddr - input_section->vma)))
3013 return false;
3014 }
3015 }
3016 }
3017
3018 return true;
3019 }
3020
3021 /* This is a version of bfd_generic_get_relocated_section_contents
3022 which uses sh_relocate_section. */
3023
3024 static bfd_byte *
3025 sh_coff_get_relocated_section_contents (output_bfd, link_info, link_order,
3026 data, relocateable, symbols)
3027 bfd *output_bfd;
3028 struct bfd_link_info *link_info;
3029 struct bfd_link_order *link_order;
3030 bfd_byte *data;
3031 boolean relocateable;
3032 asymbol **symbols;
3033 {
3034 asection *input_section = link_order->u.indirect.section;
3035 bfd *input_bfd = input_section->owner;
3036 asection **sections = NULL;
3037 struct internal_reloc *internal_relocs = NULL;
3038 struct internal_syment *internal_syms = NULL;
3039
3040 /* We only need to handle the case of relaxing, or of having a
3041 particular set of section contents, specially. */
3042 if (relocateable
3043 || coff_section_data (input_bfd, input_section) == NULL
3044 || coff_section_data (input_bfd, input_section)->contents == NULL)
3045 return bfd_generic_get_relocated_section_contents (output_bfd, link_info,
3046 link_order, data,
3047 relocateable,
3048 symbols);
3049
3050 memcpy (data, coff_section_data (input_bfd, input_section)->contents,
3051 (size_t) input_section->_raw_size);
3052
3053 if ((input_section->flags & SEC_RELOC) != 0
3054 && input_section->reloc_count > 0)
3055 {
3056 bfd_size_type symesz = bfd_coff_symesz (input_bfd);
3057 bfd_byte *esym, *esymend;
3058 struct internal_syment *isymp;
3059 asection **secpp;
3060 bfd_size_type amt;
3061
3062 if (! _bfd_coff_get_external_symbols (input_bfd))
3063 goto error_return;
3064
3065 internal_relocs = (_bfd_coff_read_internal_relocs
3066 (input_bfd, input_section, false, (bfd_byte *) NULL,
3067 false, (struct internal_reloc *) NULL));
3068 if (internal_relocs == NULL)
3069 goto error_return;
3070
3071 amt = obj_raw_syment_count (input_bfd);
3072 amt *= sizeof (struct internal_syment);
3073 internal_syms = (struct internal_syment *) bfd_malloc (amt);
3074 if (internal_syms == NULL)
3075 goto error_return;
3076
3077 amt = obj_raw_syment_count (input_bfd);
3078 amt *= sizeof (asection *);
3079 sections = (asection **) bfd_malloc (amt);
3080 if (sections == NULL)
3081 goto error_return;
3082
3083 isymp = internal_syms;
3084 secpp = sections;
3085 esym = (bfd_byte *) obj_coff_external_syms (input_bfd);
3086 esymend = esym + obj_raw_syment_count (input_bfd) * symesz;
3087 while (esym < esymend)
3088 {
3089 bfd_coff_swap_sym_in (input_bfd, (PTR) esym, (PTR) isymp);
3090
3091 if (isymp->n_scnum != 0)
3092 *secpp = coff_section_from_bfd_index (input_bfd, isymp->n_scnum);
3093 else
3094 {
3095 if (isymp->n_value == 0)
3096 *secpp = bfd_und_section_ptr;
3097 else
3098 *secpp = bfd_com_section_ptr;
3099 }
3100
3101 esym += (isymp->n_numaux + 1) * symesz;
3102 secpp += isymp->n_numaux + 1;
3103 isymp += isymp->n_numaux + 1;
3104 }
3105
3106 if (! sh_relocate_section (output_bfd, link_info, input_bfd,
3107 input_section, data, internal_relocs,
3108 internal_syms, sections))
3109 goto error_return;
3110
3111 free (sections);
3112 sections = NULL;
3113 free (internal_syms);
3114 internal_syms = NULL;
3115 free (internal_relocs);
3116 internal_relocs = NULL;
3117 }
3118
3119 return data;
3120
3121 error_return:
3122 if (internal_relocs != NULL)
3123 free (internal_relocs);
3124 if (internal_syms != NULL)
3125 free (internal_syms);
3126 if (sections != NULL)
3127 free (sections);
3128 return NULL;
3129 }
3130
3131 /* The target vectors. */
3132
3133 #ifndef TARGET_SHL_SYM
3134 CREATE_BIG_COFF_TARGET_VEC (shcoff_vec, "coff-sh", BFD_IS_RELAXABLE, 0, '_', NULL)
3135 #endif
3136
3137 #ifdef TARGET_SHL_SYM
3138 #define TARGET_SYM TARGET_SHL_SYM
3139 #else
3140 #define TARGET_SYM shlcoff_vec
3141 #endif
3142
3143 #ifndef TARGET_SHL_NAME
3144 #define TARGET_SHL_NAME "coff-shl"
3145 #endif
3146
3147 #ifdef COFF_WITH_PE
3148 CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM, TARGET_SHL_NAME, BFD_IS_RELAXABLE,
3149 SEC_CODE | SEC_DATA, '_', NULL);
3150 #else
3151 CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM, TARGET_SHL_NAME, BFD_IS_RELAXABLE,
3152 0, '_', NULL)
3153 #endif
3154
3155 #ifndef TARGET_SHL_SYM
3156 static const bfd_target * coff_small_object_p PARAMS ((bfd *));
3157 static boolean coff_small_new_section_hook PARAMS ((bfd *, asection *));
3158 /* Some people want versions of the SH COFF target which do not align
3159 to 16 byte boundaries. We implement that by adding a couple of new
3160 target vectors. These are just like the ones above, but they
3161 change the default section alignment. To generate them in the
3162 assembler, use -small. To use them in the linker, use -b
3163 coff-sh{l}-small and -oformat coff-sh{l}-small.
3164
3165 Yes, this is a horrible hack. A general solution for setting
3166 section alignment in COFF is rather complex. ELF handles this
3167 correctly. */
3168
3169 /* Only recognize the small versions if the target was not defaulted.
3170 Otherwise we won't recognize the non default endianness. */
3171
3172 static const bfd_target *
3173 coff_small_object_p (abfd)
3174 bfd *abfd;
3175 {
3176 if (abfd->target_defaulted)
3177 {
3178 bfd_set_error (bfd_error_wrong_format);
3179 return NULL;
3180 }
3181 return coff_object_p (abfd);
3182 }
3183
3184 /* Set the section alignment for the small versions. */
3185
3186 static boolean
3187 coff_small_new_section_hook (abfd, section)
3188 bfd *abfd;
3189 asection *section;
3190 {
3191 if (! coff_new_section_hook (abfd, section))
3192 return false;
3193
3194 /* We must align to at least a four byte boundary, because longword
3195 accesses must be on a four byte boundary. */
3196 if (section->alignment_power == COFF_DEFAULT_SECTION_ALIGNMENT_POWER)
3197 section->alignment_power = 2;
3198
3199 return true;
3200 }
3201
3202 /* This is copied from bfd_coff_std_swap_table so that we can change
3203 the default section alignment power. */
3204
3205 static const bfd_coff_backend_data bfd_coff_small_swap_table =
3206 {
3207 coff_swap_aux_in, coff_swap_sym_in, coff_swap_lineno_in,
3208 coff_swap_aux_out, coff_swap_sym_out,
3209 coff_swap_lineno_out, coff_swap_reloc_out,
3210 coff_swap_filehdr_out, coff_swap_aouthdr_out,
3211 coff_swap_scnhdr_out,
3212 FILHSZ, AOUTSZ, SCNHSZ, SYMESZ, AUXESZ, RELSZ, LINESZ, FILNMLEN,
3213 #ifdef COFF_LONG_FILENAMES
3214 true,
3215 #else
3216 false,
3217 #endif
3218 #ifdef COFF_LONG_SECTION_NAMES
3219 true,
3220 #else
3221 false,
3222 #endif
3223 2,
3224 #ifdef COFF_FORCE_SYMBOLS_IN_STRINGS
3225 true,
3226 #else
3227 false,
3228 #endif
3229 #ifdef COFF_DEBUG_STRING_WIDE_PREFIX
3230 4,
3231 #else
3232 2,
3233 #endif
3234 coff_swap_filehdr_in, coff_swap_aouthdr_in, coff_swap_scnhdr_in,
3235 coff_swap_reloc_in, coff_bad_format_hook, coff_set_arch_mach_hook,
3236 coff_mkobject_hook, styp_to_sec_flags, coff_set_alignment_hook,
3237 coff_slurp_symbol_table, symname_in_debug_hook, coff_pointerize_aux_hook,
3238 coff_print_aux, coff_reloc16_extra_cases, coff_reloc16_estimate,
3239 coff_classify_symbol, coff_compute_section_file_positions,
3240 coff_start_final_link, coff_relocate_section, coff_rtype_to_howto,
3241 coff_adjust_symndx, coff_link_add_one_symbol,
3242 coff_link_output_has_begun, coff_final_link_postscript
3243 };
3244
3245 #define coff_small_close_and_cleanup \
3246 coff_close_and_cleanup
3247 #define coff_small_bfd_free_cached_info \
3248 coff_bfd_free_cached_info
3249 #define coff_small_get_section_contents \
3250 coff_get_section_contents
3251 #define coff_small_get_section_contents_in_window \
3252 coff_get_section_contents_in_window
3253
3254 extern const bfd_target shlcoff_small_vec;
3255
3256 const bfd_target shcoff_small_vec =
3257 {
3258 "coff-sh-small", /* name */
3259 bfd_target_coff_flavour,
3260 BFD_ENDIAN_BIG, /* data byte order is big */
3261 BFD_ENDIAN_BIG, /* header byte order is big */
3262
3263 (HAS_RELOC | EXEC_P | /* object flags */
3264 HAS_LINENO | HAS_DEBUG |
3265 HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
3266
3267 (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
3268 '_', /* leading symbol underscore */
3269 '/', /* ar_pad_char */
3270 15, /* ar_max_namelen */
3271 bfd_getb64, bfd_getb_signed_64, bfd_putb64,
3272 bfd_getb32, bfd_getb_signed_32, bfd_putb32,
3273 bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* data */
3274 bfd_getb64, bfd_getb_signed_64, bfd_putb64,
3275 bfd_getb32, bfd_getb_signed_32, bfd_putb32,
3276 bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* hdrs */
3277
3278 {_bfd_dummy_target, coff_small_object_p, /* bfd_check_format */
3279 bfd_generic_archive_p, _bfd_dummy_target},
3280 {bfd_false, coff_mkobject, _bfd_generic_mkarchive, /* bfd_set_format */
3281 bfd_false},
3282 {bfd_false, coff_write_object_contents, /* bfd_write_contents */
3283 _bfd_write_archive_contents, bfd_false},
3284
3285 BFD_JUMP_TABLE_GENERIC (coff_small),
3286 BFD_JUMP_TABLE_COPY (coff),
3287 BFD_JUMP_TABLE_CORE (_bfd_nocore),
3288 BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
3289 BFD_JUMP_TABLE_SYMBOLS (coff),
3290 BFD_JUMP_TABLE_RELOCS (coff),
3291 BFD_JUMP_TABLE_WRITE (coff),
3292 BFD_JUMP_TABLE_LINK (coff),
3293 BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
3294
3295 & shlcoff_small_vec,
3296
3297 (PTR) &bfd_coff_small_swap_table
3298 };
3299
3300 const bfd_target shlcoff_small_vec =
3301 {
3302 "coff-shl-small", /* name */
3303 bfd_target_coff_flavour,
3304 BFD_ENDIAN_LITTLE, /* data byte order is little */
3305 BFD_ENDIAN_LITTLE, /* header byte order is little endian too*/
3306
3307 (HAS_RELOC | EXEC_P | /* object flags */
3308 HAS_LINENO | HAS_DEBUG |
3309 HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
3310
3311 (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
3312 '_', /* leading symbol underscore */
3313 '/', /* ar_pad_char */
3314 15, /* ar_max_namelen */
3315 bfd_getl64, bfd_getl_signed_64, bfd_putl64,
3316 bfd_getl32, bfd_getl_signed_32, bfd_putl32,
3317 bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* data */
3318 bfd_getl64, bfd_getl_signed_64, bfd_putl64,
3319 bfd_getl32, bfd_getl_signed_32, bfd_putl32,
3320 bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* hdrs */
3321
3322 {_bfd_dummy_target, coff_small_object_p, /* bfd_check_format */
3323 bfd_generic_archive_p, _bfd_dummy_target},
3324 {bfd_false, coff_mkobject, _bfd_generic_mkarchive, /* bfd_set_format */
3325 bfd_false},
3326 {bfd_false, coff_write_object_contents, /* bfd_write_contents */
3327 _bfd_write_archive_contents, bfd_false},
3328
3329 BFD_JUMP_TABLE_GENERIC (coff_small),
3330 BFD_JUMP_TABLE_COPY (coff),
3331 BFD_JUMP_TABLE_CORE (_bfd_nocore),
3332 BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
3333 BFD_JUMP_TABLE_SYMBOLS (coff),
3334 BFD_JUMP_TABLE_RELOCS (coff),
3335 BFD_JUMP_TABLE_WRITE (coff),
3336 BFD_JUMP_TABLE_LINK (coff),
3337 BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
3338
3339 & shcoff_small_vec,
3340
3341 (PTR) &bfd_coff_small_swap_table
3342 };
3343 #endif
This page took 0.103486 seconds and 4 git commands to generate.