MIPS/GAS: Don't convert RELA JALR relocations on R6
[deliverable/binutils-gdb.git] / gas / config / tc-ia64.c
1 /* tc-ia64.c -- Assembler for the HP/Intel IA-64 architecture.
2 Copyright (C) 1998-2016 Free Software Foundation, Inc.
3 Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
4
5 This file is part of GAS, the GNU Assembler.
6
7 GAS is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GAS is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GAS; see the file COPYING. If not, write to
19 the Free Software Foundation, 51 Franklin Street - Fifth Floor,
20 Boston, MA 02110-1301, USA. */
21
22 /*
23 TODO:
24
25 - optional operands
26 - directives:
27 .eb
28 .estate
29 .lb
30 .popsection
31 .previous
32 .psr
33 .pushsection
34 - labels are wrong if automatic alignment is introduced
35 (e.g., checkout the second real10 definition in test-data.s)
36 - DV-related stuff:
37 <reg>.safe_across_calls and any other DV-related directives I don't
38 have documentation for.
39 verify mod-sched-brs reads/writes are checked/marked (and other
40 notes)
41
42 */
43
44 #include "as.h"
45 #include "safe-ctype.h"
46 #include "dwarf2dbg.h"
47 #include "subsegs.h"
48
49 #include "opcode/ia64.h"
50
51 #include "elf/ia64.h"
52 #include "bfdver.h"
53 #include <time.h>
54
55 #ifdef HAVE_LIMITS_H
56 #include <limits.h>
57 #endif
58
59 #define NELEMS(a) ((int) (sizeof (a)/sizeof ((a)[0])))
60
61 /* Some systems define MIN in, e.g., param.h. */
62 #undef MIN
63 #define MIN(a,b) ((a) < (b) ? (a) : (b))
64
65 #define NUM_SLOTS 4
66 #define PREV_SLOT md.slot[(md.curr_slot + NUM_SLOTS - 1) % NUM_SLOTS]
67 #define CURR_SLOT md.slot[md.curr_slot]
68
69 #define O_pseudo_fixup (O_max + 1)
70
71 enum special_section
72 {
73 /* IA-64 ABI section pseudo-ops. */
74 SPECIAL_SECTION_BSS = 0,
75 SPECIAL_SECTION_SBSS,
76 SPECIAL_SECTION_SDATA,
77 SPECIAL_SECTION_RODATA,
78 SPECIAL_SECTION_COMMENT,
79 SPECIAL_SECTION_UNWIND,
80 SPECIAL_SECTION_UNWIND_INFO,
81 /* HPUX specific section pseudo-ops. */
82 SPECIAL_SECTION_INIT_ARRAY,
83 SPECIAL_SECTION_FINI_ARRAY,
84 };
85
86 enum reloc_func
87 {
88 FUNC_DTP_MODULE,
89 FUNC_DTP_RELATIVE,
90 FUNC_FPTR_RELATIVE,
91 FUNC_GP_RELATIVE,
92 FUNC_LT_RELATIVE,
93 FUNC_LT_RELATIVE_X,
94 FUNC_PC_RELATIVE,
95 FUNC_PLT_RELATIVE,
96 FUNC_SEC_RELATIVE,
97 FUNC_SEG_RELATIVE,
98 FUNC_TP_RELATIVE,
99 FUNC_LTV_RELATIVE,
100 FUNC_LT_FPTR_RELATIVE,
101 FUNC_LT_DTP_MODULE,
102 FUNC_LT_DTP_RELATIVE,
103 FUNC_LT_TP_RELATIVE,
104 FUNC_IPLT_RELOC,
105 #ifdef TE_VMS
106 FUNC_SLOTCOUNT_RELOC,
107 #endif
108 };
109
110 enum reg_symbol
111 {
112 REG_GR = 0,
113 REG_FR = (REG_GR + 128),
114 REG_AR = (REG_FR + 128),
115 REG_CR = (REG_AR + 128),
116 REG_DAHR = (REG_CR + 128),
117 REG_P = (REG_DAHR + 8),
118 REG_BR = (REG_P + 64),
119 REG_IP = (REG_BR + 8),
120 REG_CFM,
121 REG_PR,
122 REG_PR_ROT,
123 REG_PSR,
124 REG_PSR_L,
125 REG_PSR_UM,
126 /* The following are pseudo-registers for use by gas only. */
127 IND_CPUID,
128 IND_DBR,
129 IND_DTR,
130 IND_ITR,
131 IND_IBR,
132 IND_MSR,
133 IND_PKR,
134 IND_PMC,
135 IND_PMD,
136 IND_DAHR,
137 IND_RR,
138 /* The following pseudo-registers are used for unwind directives only: */
139 REG_PSP,
140 REG_PRIUNAT,
141 REG_NUM
142 };
143
144 enum dynreg_type
145 {
146 DYNREG_GR = 0, /* dynamic general purpose register */
147 DYNREG_FR, /* dynamic floating point register */
148 DYNREG_PR, /* dynamic predicate register */
149 DYNREG_NUM_TYPES
150 };
151
152 enum operand_match_result
153 {
154 OPERAND_MATCH,
155 OPERAND_OUT_OF_RANGE,
156 OPERAND_MISMATCH
157 };
158
159 /* On the ia64, we can't know the address of a text label until the
160 instructions are packed into a bundle. To handle this, we keep
161 track of the list of labels that appear in front of each
162 instruction. */
163 struct label_fix
164 {
165 struct label_fix *next;
166 struct symbol *sym;
167 bfd_boolean dw2_mark_labels;
168 };
169
170 #ifdef TE_VMS
171 /* An internally used relocation. */
172 #define DUMMY_RELOC_IA64_SLOTCOUNT (BFD_RELOC_UNUSED + 1)
173 #endif
174
175 /* This is the endianness of the current section. */
176 extern int target_big_endian;
177
178 /* This is the default endianness. */
179 static int default_big_endian = TARGET_BYTES_BIG_ENDIAN;
180
181 void (*ia64_number_to_chars) (char *, valueT, int);
182
183 static void ia64_float_to_chars_bigendian (char *, LITTLENUM_TYPE *, int);
184 static void ia64_float_to_chars_littleendian (char *, LITTLENUM_TYPE *, int);
185
186 static void (*ia64_float_to_chars) (char *, LITTLENUM_TYPE *, int);
187
188 static struct hash_control *alias_hash;
189 static struct hash_control *alias_name_hash;
190 static struct hash_control *secalias_hash;
191 static struct hash_control *secalias_name_hash;
192
193 /* List of chars besides those in app.c:symbol_chars that can start an
194 operand. Used to prevent the scrubber eating vital white-space. */
195 const char ia64_symbol_chars[] = "@?";
196
197 /* Characters which always start a comment. */
198 const char comment_chars[] = "";
199
200 /* Characters which start a comment at the beginning of a line. */
201 const char line_comment_chars[] = "#";
202
203 /* Characters which may be used to separate multiple commands on a
204 single line. */
205 const char line_separator_chars[] = ";{}";
206
207 /* Characters which are used to indicate an exponent in a floating
208 point number. */
209 const char EXP_CHARS[] = "eE";
210
211 /* Characters which mean that a number is a floating point constant,
212 as in 0d1.0. */
213 const char FLT_CHARS[] = "rRsSfFdDxXpP";
214
215 /* ia64-specific option processing: */
216
217 const char *md_shortopts = "m:N:x::";
218
219 struct option md_longopts[] =
220 {
221 #define OPTION_MCONSTANT_GP (OPTION_MD_BASE + 1)
222 {"mconstant-gp", no_argument, NULL, OPTION_MCONSTANT_GP},
223 #define OPTION_MAUTO_PIC (OPTION_MD_BASE + 2)
224 {"mauto-pic", no_argument, NULL, OPTION_MAUTO_PIC}
225 };
226
227 size_t md_longopts_size = sizeof (md_longopts);
228
229 static struct
230 {
231 struct hash_control *pseudo_hash; /* pseudo opcode hash table */
232 struct hash_control *reg_hash; /* register name hash table */
233 struct hash_control *dynreg_hash; /* dynamic register hash table */
234 struct hash_control *const_hash; /* constant hash table */
235 struct hash_control *entry_hash; /* code entry hint hash table */
236
237 /* If X_op is != O_absent, the registername for the instruction's
238 qualifying predicate. If NULL, p0 is assumed for instructions
239 that are predictable. */
240 expressionS qp;
241
242 /* Optimize for which CPU. */
243 enum
244 {
245 itanium1,
246 itanium2
247 } tune;
248
249 /* What to do when hint.b is used. */
250 enum
251 {
252 hint_b_error,
253 hint_b_warning,
254 hint_b_ok
255 } hint_b;
256
257 unsigned int
258 manual_bundling : 1,
259 debug_dv: 1,
260 detect_dv: 1,
261 explicit_mode : 1, /* which mode we're in */
262 default_explicit_mode : 1, /* which mode is the default */
263 mode_explicitly_set : 1, /* was the current mode explicitly set? */
264 auto_align : 1,
265 keep_pending_output : 1;
266
267 /* What to do when something is wrong with unwind directives. */
268 enum
269 {
270 unwind_check_warning,
271 unwind_check_error
272 } unwind_check;
273
274 /* Each bundle consists of up to three instructions. We keep
275 track of four most recent instructions so we can correctly set
276 the end_of_insn_group for the last instruction in a bundle. */
277 int curr_slot;
278 int num_slots_in_use;
279 struct slot
280 {
281 unsigned int
282 end_of_insn_group : 1,
283 manual_bundling_on : 1,
284 manual_bundling_off : 1,
285 loc_directive_seen : 1;
286 signed char user_template; /* user-selected template, if any */
287 unsigned char qp_regno; /* qualifying predicate */
288 /* This duplicates a good fraction of "struct fix" but we
289 can't use a "struct fix" instead since we can't call
290 fix_new_exp() until we know the address of the instruction. */
291 int num_fixups;
292 struct insn_fix
293 {
294 bfd_reloc_code_real_type code;
295 enum ia64_opnd opnd; /* type of operand in need of fix */
296 unsigned int is_pcrel : 1; /* is operand pc-relative? */
297 expressionS expr; /* the value to be inserted */
298 }
299 fixup[2]; /* at most two fixups per insn */
300 struct ia64_opcode *idesc;
301 struct label_fix *label_fixups;
302 struct label_fix *tag_fixups;
303 struct unw_rec_list *unwind_record; /* Unwind directive. */
304 expressionS opnd[6];
305 const char *src_file;
306 unsigned int src_line;
307 struct dwarf2_line_info debug_line;
308 }
309 slot[NUM_SLOTS];
310
311 segT last_text_seg;
312
313 struct dynreg
314 {
315 struct dynreg *next; /* next dynamic register */
316 const char *name;
317 unsigned short base; /* the base register number */
318 unsigned short num_regs; /* # of registers in this set */
319 }
320 *dynreg[DYNREG_NUM_TYPES], in, loc, out, rot;
321
322 flagword flags; /* ELF-header flags */
323
324 struct mem_offset {
325 unsigned hint:1; /* is this hint currently valid? */
326 bfd_vma offset; /* mem.offset offset */
327 bfd_vma base; /* mem.offset base */
328 } mem_offset;
329
330 int path; /* number of alt. entry points seen */
331 const char **entry_labels; /* labels of all alternate paths in
332 the current DV-checking block. */
333 int maxpaths; /* size currently allocated for
334 entry_labels */
335
336 int pointer_size; /* size in bytes of a pointer */
337 int pointer_size_shift; /* shift size of a pointer for alignment */
338
339 symbolS *indregsym[IND_RR - IND_CPUID + 1];
340 }
341 md;
342
343 /* These are not const, because they are modified to MMI for non-itanium1
344 targets below. */
345 /* MFI bundle of nops. */
346 static unsigned char le_nop[16] =
347 {
348 0x0c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
349 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00
350 };
351 /* MFI bundle of nops with stop-bit. */
352 static unsigned char le_nop_stop[16] =
353 {
354 0x0d, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
355 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00
356 };
357
358 /* application registers: */
359
360 #define AR_K0 0
361 #define AR_K7 7
362 #define AR_RSC 16
363 #define AR_BSP 17
364 #define AR_BSPSTORE 18
365 #define AR_RNAT 19
366 #define AR_FCR 21
367 #define AR_EFLAG 24
368 #define AR_CSD 25
369 #define AR_SSD 26
370 #define AR_CFLG 27
371 #define AR_FSR 28
372 #define AR_FIR 29
373 #define AR_FDR 30
374 #define AR_CCV 32
375 #define AR_UNAT 36
376 #define AR_FPSR 40
377 #define AR_ITC 44
378 #define AR_RUC 45
379 #define AR_PFS 64
380 #define AR_LC 65
381 #define AR_EC 66
382
383 static const struct
384 {
385 const char *name;
386 unsigned int regnum;
387 }
388 ar[] =
389 {
390 {"ar.k0", AR_K0}, {"ar.k1", AR_K0 + 1},
391 {"ar.k2", AR_K0 + 2}, {"ar.k3", AR_K0 + 3},
392 {"ar.k4", AR_K0 + 4}, {"ar.k5", AR_K0 + 5},
393 {"ar.k6", AR_K0 + 6}, {"ar.k7", AR_K7},
394 {"ar.rsc", AR_RSC}, {"ar.bsp", AR_BSP},
395 {"ar.bspstore", AR_BSPSTORE}, {"ar.rnat", AR_RNAT},
396 {"ar.fcr", AR_FCR}, {"ar.eflag", AR_EFLAG},
397 {"ar.csd", AR_CSD}, {"ar.ssd", AR_SSD},
398 {"ar.cflg", AR_CFLG}, {"ar.fsr", AR_FSR},
399 {"ar.fir", AR_FIR}, {"ar.fdr", AR_FDR},
400 {"ar.ccv", AR_CCV}, {"ar.unat", AR_UNAT},
401 {"ar.fpsr", AR_FPSR}, {"ar.itc", AR_ITC},
402 {"ar.ruc", AR_RUC}, {"ar.pfs", AR_PFS},
403 {"ar.lc", AR_LC}, {"ar.ec", AR_EC},
404 };
405
406 /* control registers: */
407
408 #define CR_DCR 0
409 #define CR_ITM 1
410 #define CR_IVA 2
411 #define CR_PTA 8
412 #define CR_GPTA 9
413 #define CR_IPSR 16
414 #define CR_ISR 17
415 #define CR_IIP 19
416 #define CR_IFA 20
417 #define CR_ITIR 21
418 #define CR_IIPA 22
419 #define CR_IFS 23
420 #define CR_IIM 24
421 #define CR_IHA 25
422 #define CR_IIB0 26
423 #define CR_IIB1 27
424 #define CR_LID 64
425 #define CR_IVR 65
426 #define CR_TPR 66
427 #define CR_EOI 67
428 #define CR_IRR0 68
429 #define CR_IRR3 71
430 #define CR_ITV 72
431 #define CR_PMV 73
432 #define CR_CMCV 74
433 #define CR_LRR0 80
434 #define CR_LRR1 81
435
436 static const struct
437 {
438 const char *name;
439 unsigned int regnum;
440 }
441 cr[] =
442 {
443 {"cr.dcr", CR_DCR},
444 {"cr.itm", CR_ITM},
445 {"cr.iva", CR_IVA},
446 {"cr.pta", CR_PTA},
447 {"cr.gpta", CR_GPTA},
448 {"cr.ipsr", CR_IPSR},
449 {"cr.isr", CR_ISR},
450 {"cr.iip", CR_IIP},
451 {"cr.ifa", CR_IFA},
452 {"cr.itir", CR_ITIR},
453 {"cr.iipa", CR_IIPA},
454 {"cr.ifs", CR_IFS},
455 {"cr.iim", CR_IIM},
456 {"cr.iha", CR_IHA},
457 {"cr.iib0", CR_IIB0},
458 {"cr.iib1", CR_IIB1},
459 {"cr.lid", CR_LID},
460 {"cr.ivr", CR_IVR},
461 {"cr.tpr", CR_TPR},
462 {"cr.eoi", CR_EOI},
463 {"cr.irr0", CR_IRR0},
464 {"cr.irr1", CR_IRR0 + 1},
465 {"cr.irr2", CR_IRR0 + 2},
466 {"cr.irr3", CR_IRR3},
467 {"cr.itv", CR_ITV},
468 {"cr.pmv", CR_PMV},
469 {"cr.cmcv", CR_CMCV},
470 {"cr.lrr0", CR_LRR0},
471 {"cr.lrr1", CR_LRR1}
472 };
473
474 #define PSR_MFL 4
475 #define PSR_IC 13
476 #define PSR_DFL 18
477 #define PSR_CPL 32
478
479 static const struct const_desc
480 {
481 const char *name;
482 valueT value;
483 }
484 const_bits[] =
485 {
486 /* PSR constant masks: */
487
488 /* 0: reserved */
489 {"psr.be", ((valueT) 1) << 1},
490 {"psr.up", ((valueT) 1) << 2},
491 {"psr.ac", ((valueT) 1) << 3},
492 {"psr.mfl", ((valueT) 1) << 4},
493 {"psr.mfh", ((valueT) 1) << 5},
494 /* 6-12: reserved */
495 {"psr.ic", ((valueT) 1) << 13},
496 {"psr.i", ((valueT) 1) << 14},
497 {"psr.pk", ((valueT) 1) << 15},
498 /* 16: reserved */
499 {"psr.dt", ((valueT) 1) << 17},
500 {"psr.dfl", ((valueT) 1) << 18},
501 {"psr.dfh", ((valueT) 1) << 19},
502 {"psr.sp", ((valueT) 1) << 20},
503 {"psr.pp", ((valueT) 1) << 21},
504 {"psr.di", ((valueT) 1) << 22},
505 {"psr.si", ((valueT) 1) << 23},
506 {"psr.db", ((valueT) 1) << 24},
507 {"psr.lp", ((valueT) 1) << 25},
508 {"psr.tb", ((valueT) 1) << 26},
509 {"psr.rt", ((valueT) 1) << 27},
510 /* 28-31: reserved */
511 /* 32-33: cpl (current privilege level) */
512 {"psr.is", ((valueT) 1) << 34},
513 {"psr.mc", ((valueT) 1) << 35},
514 {"psr.it", ((valueT) 1) << 36},
515 {"psr.id", ((valueT) 1) << 37},
516 {"psr.da", ((valueT) 1) << 38},
517 {"psr.dd", ((valueT) 1) << 39},
518 {"psr.ss", ((valueT) 1) << 40},
519 /* 41-42: ri (restart instruction) */
520 {"psr.ed", ((valueT) 1) << 43},
521 {"psr.bn", ((valueT) 1) << 44},
522 };
523
524 /* indirect register-sets/memory: */
525
526 static const struct
527 {
528 const char *name;
529 unsigned int regnum;
530 }
531 indirect_reg[] =
532 {
533 { "CPUID", IND_CPUID },
534 { "cpuid", IND_CPUID },
535 { "dbr", IND_DBR },
536 { "dtr", IND_DTR },
537 { "itr", IND_ITR },
538 { "ibr", IND_IBR },
539 { "msr", IND_MSR },
540 { "pkr", IND_PKR },
541 { "pmc", IND_PMC },
542 { "pmd", IND_PMD },
543 { "dahr", IND_DAHR },
544 { "rr", IND_RR },
545 };
546
547 /* Pseudo functions used to indicate relocation types (these functions
548 start with an at sign (@). */
549 static struct
550 {
551 const char *name;
552 enum pseudo_type
553 {
554 PSEUDO_FUNC_NONE,
555 PSEUDO_FUNC_RELOC,
556 PSEUDO_FUNC_CONST,
557 PSEUDO_FUNC_REG,
558 PSEUDO_FUNC_FLOAT
559 }
560 type;
561 union
562 {
563 unsigned long ival;
564 symbolS *sym;
565 }
566 u;
567 }
568 pseudo_func[] =
569 {
570 /* reloc pseudo functions (these must come first!): */
571 { "dtpmod", PSEUDO_FUNC_RELOC, { 0 } },
572 { "dtprel", PSEUDO_FUNC_RELOC, { 0 } },
573 { "fptr", PSEUDO_FUNC_RELOC, { 0 } },
574 { "gprel", PSEUDO_FUNC_RELOC, { 0 } },
575 { "ltoff", PSEUDO_FUNC_RELOC, { 0 } },
576 { "ltoffx", PSEUDO_FUNC_RELOC, { 0 } },
577 { "pcrel", PSEUDO_FUNC_RELOC, { 0 } },
578 { "pltoff", PSEUDO_FUNC_RELOC, { 0 } },
579 { "secrel", PSEUDO_FUNC_RELOC, { 0 } },
580 { "segrel", PSEUDO_FUNC_RELOC, { 0 } },
581 { "tprel", PSEUDO_FUNC_RELOC, { 0 } },
582 { "ltv", PSEUDO_FUNC_RELOC, { 0 } },
583 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_FPTR_RELATIVE */
584 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_DTP_MODULE */
585 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_DTP_RELATIVE */
586 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_TP_RELATIVE */
587 { "iplt", PSEUDO_FUNC_RELOC, { 0 } },
588 #ifdef TE_VMS
589 { "slotcount", PSEUDO_FUNC_RELOC, { 0 } },
590 #endif
591
592 /* mbtype4 constants: */
593 { "alt", PSEUDO_FUNC_CONST, { 0xa } },
594 { "brcst", PSEUDO_FUNC_CONST, { 0x0 } },
595 { "mix", PSEUDO_FUNC_CONST, { 0x8 } },
596 { "rev", PSEUDO_FUNC_CONST, { 0xb } },
597 { "shuf", PSEUDO_FUNC_CONST, { 0x9 } },
598
599 /* fclass constants: */
600 { "nat", PSEUDO_FUNC_CONST, { 0x100 } },
601 { "qnan", PSEUDO_FUNC_CONST, { 0x080 } },
602 { "snan", PSEUDO_FUNC_CONST, { 0x040 } },
603 { "pos", PSEUDO_FUNC_CONST, { 0x001 } },
604 { "neg", PSEUDO_FUNC_CONST, { 0x002 } },
605 { "zero", PSEUDO_FUNC_CONST, { 0x004 } },
606 { "unorm", PSEUDO_FUNC_CONST, { 0x008 } },
607 { "norm", PSEUDO_FUNC_CONST, { 0x010 } },
608 { "inf", PSEUDO_FUNC_CONST, { 0x020 } },
609
610 { "natval", PSEUDO_FUNC_CONST, { 0x100 } }, /* old usage */
611
612 /* hint constants: */
613 { "pause", PSEUDO_FUNC_CONST, { 0x0 } },
614 { "priority", PSEUDO_FUNC_CONST, { 0x1 } },
615
616 /* tf constants: */
617 { "clz", PSEUDO_FUNC_CONST, { 32 } },
618 { "mpy", PSEUDO_FUNC_CONST, { 33 } },
619 { "datahints", PSEUDO_FUNC_CONST, { 34 } },
620
621 /* unwind-related constants: */
622 { "svr4", PSEUDO_FUNC_CONST, { ELFOSABI_NONE } },
623 { "hpux", PSEUDO_FUNC_CONST, { ELFOSABI_HPUX } },
624 { "nt", PSEUDO_FUNC_CONST, { 2 } }, /* conflicts w/ELFOSABI_NETBSD */
625 { "linux", PSEUDO_FUNC_CONST, { ELFOSABI_GNU } },
626 { "freebsd", PSEUDO_FUNC_CONST, { ELFOSABI_FREEBSD } },
627 { "openvms", PSEUDO_FUNC_CONST, { ELFOSABI_OPENVMS } },
628 { "nsk", PSEUDO_FUNC_CONST, { ELFOSABI_NSK } },
629
630 /* unwind-related registers: */
631 { "priunat",PSEUDO_FUNC_REG, { REG_PRIUNAT } }
632 };
633
634 /* 41-bit nop opcodes (one per unit): */
635 static const bfd_vma nop[IA64_NUM_UNITS] =
636 {
637 0x0000000000LL, /* NIL => break 0 */
638 0x0008000000LL, /* I-unit nop */
639 0x0008000000LL, /* M-unit nop */
640 0x4000000000LL, /* B-unit nop */
641 0x0008000000LL, /* F-unit nop */
642 0x0000000000LL, /* L-"unit" nop immediate */
643 0x0008000000LL, /* X-unit nop */
644 };
645
646 /* Can't be `const' as it's passed to input routines (which have the
647 habit of setting temporary sentinels. */
648 static char special_section_name[][20] =
649 {
650 {".bss"}, {".sbss"}, {".sdata"}, {".rodata"}, {".comment"},
651 {".IA_64.unwind"}, {".IA_64.unwind_info"},
652 {".init_array"}, {".fini_array"}
653 };
654
655 /* The best template for a particular sequence of up to three
656 instructions: */
657 #define N IA64_NUM_TYPES
658 static unsigned char best_template[N][N][N];
659 #undef N
660
661 /* Resource dependencies currently in effect */
662 static struct rsrc {
663 int depind; /* dependency index */
664 const struct ia64_dependency *dependency; /* actual dependency */
665 unsigned specific:1, /* is this a specific bit/regno? */
666 link_to_qp_branch:1; /* will a branch on the same QP clear it?*/
667 int index; /* specific regno/bit within dependency */
668 int note; /* optional qualifying note (0 if none) */
669 #define STATE_NONE 0
670 #define STATE_STOP 1
671 #define STATE_SRLZ 2
672 int insn_srlz; /* current insn serialization state */
673 int data_srlz; /* current data serialization state */
674 int qp_regno; /* qualifying predicate for this usage */
675 const char *file; /* what file marked this dependency */
676 unsigned int line; /* what line marked this dependency */
677 struct mem_offset mem_offset; /* optional memory offset hint */
678 enum { CMP_NONE, CMP_OR, CMP_AND } cmp_type; /* OR or AND compare? */
679 int path; /* corresponding code entry index */
680 } *regdeps = NULL;
681 static int regdepslen = 0;
682 static int regdepstotlen = 0;
683 static const char *dv_mode[] = { "RAW", "WAW", "WAR" };
684 static const char *dv_sem[] = { "none", "implied", "impliedf",
685 "data", "instr", "specific", "stop", "other" };
686 static const char *dv_cmp_type[] = { "none", "OR", "AND" };
687
688 /* Current state of PR mutexation */
689 static struct qpmutex {
690 valueT prmask;
691 int path;
692 } *qp_mutexes = NULL; /* QP mutex bitmasks */
693 static int qp_mutexeslen = 0;
694 static int qp_mutexestotlen = 0;
695 static valueT qp_safe_across_calls = 0;
696
697 /* Current state of PR implications */
698 static struct qp_imply {
699 unsigned p1:6;
700 unsigned p2:6;
701 unsigned p2_branched:1;
702 int path;
703 } *qp_implies = NULL;
704 static int qp_implieslen = 0;
705 static int qp_impliestotlen = 0;
706
707 /* Keep track of static GR values so that indirect register usage can
708 sometimes be tracked. */
709 static struct gr {
710 unsigned known:1;
711 int path;
712 valueT value;
713 } gr_values[128] = {
714 {
715 1,
716 #ifdef INT_MAX
717 INT_MAX,
718 #else
719 (((1 << (8 * sizeof(gr_values->path) - 2)) - 1) << 1) + 1,
720 #endif
721 0
722 }
723 };
724
725 /* Remember the alignment frag. */
726 static fragS *align_frag;
727
728 /* These are the routines required to output the various types of
729 unwind records. */
730
731 /* A slot_number is a frag address plus the slot index (0-2). We use the
732 frag address here so that if there is a section switch in the middle of
733 a function, then instructions emitted to a different section are not
734 counted. Since there may be more than one frag for a function, this
735 means we also need to keep track of which frag this address belongs to
736 so we can compute inter-frag distances. This also nicely solves the
737 problem with nops emitted for align directives, which can't easily be
738 counted, but can easily be derived from frag sizes. */
739
740 typedef struct unw_rec_list {
741 unwind_record r;
742 unsigned long slot_number;
743 fragS *slot_frag;
744 struct unw_rec_list *next;
745 } unw_rec_list;
746
747 #define SLOT_NUM_NOT_SET (unsigned)-1
748
749 /* Linked list of saved prologue counts. A very poor
750 implementation of a map from label numbers to prologue counts. */
751 typedef struct label_prologue_count
752 {
753 struct label_prologue_count *next;
754 unsigned long label_number;
755 unsigned int prologue_count;
756 } label_prologue_count;
757
758 typedef struct proc_pending
759 {
760 symbolS *sym;
761 struct proc_pending *next;
762 } proc_pending;
763
764 static struct
765 {
766 /* Maintain a list of unwind entries for the current function. */
767 unw_rec_list *list;
768 unw_rec_list *tail;
769
770 /* Any unwind entries that should be attached to the current slot
771 that an insn is being constructed for. */
772 unw_rec_list *current_entry;
773
774 /* These are used to create the unwind table entry for this function. */
775 proc_pending proc_pending;
776 symbolS *info; /* pointer to unwind info */
777 symbolS *personality_routine;
778 segT saved_text_seg;
779 subsegT saved_text_subseg;
780 unsigned int force_unwind_entry : 1; /* force generation of unwind entry? */
781
782 /* TRUE if processing unwind directives in a prologue region. */
783 unsigned int prologue : 1;
784 unsigned int prologue_mask : 4;
785 unsigned int prologue_gr : 7;
786 unsigned int body : 1;
787 unsigned int insn : 1;
788 unsigned int prologue_count; /* number of .prologues seen so far */
789 /* Prologue counts at previous .label_state directives. */
790 struct label_prologue_count * saved_prologue_counts;
791
792 /* List of split up .save-s. */
793 unw_p_record *pending_saves;
794 } unwind;
795
796 /* The input value is a negated offset from psp, and specifies an address
797 psp - offset. The encoded value is psp + 16 - (4 * offset). Thus we
798 must add 16 and divide by 4 to get the encoded value. */
799
800 #define ENCODED_PSP_OFFSET(OFFSET) (((OFFSET) + 16) / 4)
801
802 typedef void (*vbyte_func) (int, char *, char *);
803
804 /* Forward declarations: */
805 static void dot_alias (int);
806 static int parse_operand_and_eval (expressionS *, int);
807 static void emit_one_bundle (void);
808 static bfd_reloc_code_real_type ia64_gen_real_reloc_type (struct symbol *,
809 bfd_reloc_code_real_type);
810 static void insn_group_break (int, int, int);
811 static void add_qp_mutex (valueT);
812 static void add_qp_imply (int, int);
813 static void clear_qp_mutex (valueT);
814 static void clear_qp_implies (valueT, valueT);
815 static void print_dependency (const char *, int);
816 static void instruction_serialization (void);
817 static void data_serialization (void);
818 static void output_R3_format (vbyte_func, unw_record_type, unsigned long);
819 static void output_B3_format (vbyte_func, unsigned long, unsigned long);
820 static void output_B4_format (vbyte_func, unw_record_type, unsigned long);
821 static void free_saved_prologue_counts (void);
822
823 /* Determine if application register REGNUM resides only in the integer
824 unit (as opposed to the memory unit). */
825 static int
826 ar_is_only_in_integer_unit (int reg)
827 {
828 reg -= REG_AR;
829 return reg >= 64 && reg <= 111;
830 }
831
832 /* Determine if application register REGNUM resides only in the memory
833 unit (as opposed to the integer unit). */
834 static int
835 ar_is_only_in_memory_unit (int reg)
836 {
837 reg -= REG_AR;
838 return reg >= 0 && reg <= 47;
839 }
840
841 /* Switch to section NAME and create section if necessary. It's
842 rather ugly that we have to manipulate input_line_pointer but I
843 don't see any other way to accomplish the same thing without
844 changing obj-elf.c (which may be the Right Thing, in the end). */
845 static void
846 set_section (char *name)
847 {
848 char *saved_input_line_pointer;
849
850 saved_input_line_pointer = input_line_pointer;
851 input_line_pointer = name;
852 obj_elf_section (0);
853 input_line_pointer = saved_input_line_pointer;
854 }
855
856 /* Map 's' to SHF_IA_64_SHORT. */
857
858 bfd_vma
859 ia64_elf_section_letter (int letter, const char **ptr_msg)
860 {
861 if (letter == 's')
862 return SHF_IA_64_SHORT;
863 else if (letter == 'o')
864 return SHF_LINK_ORDER;
865 #ifdef TE_VMS
866 else if (letter == 'O')
867 return SHF_IA_64_VMS_OVERLAID;
868 else if (letter == 'g')
869 return SHF_IA_64_VMS_GLOBAL;
870 #endif
871
872 *ptr_msg = _("bad .section directive: want a,o,s,w,x,M,S,G,T in string");
873 return -1;
874 }
875
876 /* Map SHF_IA_64_SHORT to SEC_SMALL_DATA. */
877
878 flagword
879 ia64_elf_section_flags (flagword flags,
880 bfd_vma attr,
881 int type ATTRIBUTE_UNUSED)
882 {
883 if (attr & SHF_IA_64_SHORT)
884 flags |= SEC_SMALL_DATA;
885 return flags;
886 }
887
888 int
889 ia64_elf_section_type (const char *str, size_t len)
890 {
891 #define STREQ(s) ((len == sizeof (s) - 1) && (strncmp (str, s, sizeof (s) - 1) == 0))
892
893 if (STREQ (ELF_STRING_ia64_unwind_info))
894 return SHT_PROGBITS;
895
896 if (STREQ (ELF_STRING_ia64_unwind_info_once))
897 return SHT_PROGBITS;
898
899 if (STREQ (ELF_STRING_ia64_unwind))
900 return SHT_IA_64_UNWIND;
901
902 if (STREQ (ELF_STRING_ia64_unwind_once))
903 return SHT_IA_64_UNWIND;
904
905 if (STREQ ("unwind"))
906 return SHT_IA_64_UNWIND;
907
908 return -1;
909 #undef STREQ
910 }
911
912 static unsigned int
913 set_regstack (unsigned int ins,
914 unsigned int locs,
915 unsigned int outs,
916 unsigned int rots)
917 {
918 /* Size of frame. */
919 unsigned int sof;
920
921 sof = ins + locs + outs;
922 if (sof > 96)
923 {
924 as_bad (_("Size of frame exceeds maximum of 96 registers"));
925 return 0;
926 }
927 if (rots > sof)
928 {
929 as_warn (_("Size of rotating registers exceeds frame size"));
930 return 0;
931 }
932 md.in.base = REG_GR + 32;
933 md.loc.base = md.in.base + ins;
934 md.out.base = md.loc.base + locs;
935
936 md.in.num_regs = ins;
937 md.loc.num_regs = locs;
938 md.out.num_regs = outs;
939 md.rot.num_regs = rots;
940 return sof;
941 }
942
943 void
944 ia64_flush_insns (void)
945 {
946 struct label_fix *lfix;
947 segT saved_seg;
948 subsegT saved_subseg;
949 unw_rec_list *ptr;
950 bfd_boolean mark;
951
952 if (!md.last_text_seg)
953 return;
954
955 saved_seg = now_seg;
956 saved_subseg = now_subseg;
957
958 subseg_set (md.last_text_seg, 0);
959
960 while (md.num_slots_in_use > 0)
961 emit_one_bundle (); /* force out queued instructions */
962
963 /* In case there are labels following the last instruction, resolve
964 those now. */
965 mark = FALSE;
966 for (lfix = CURR_SLOT.label_fixups; lfix; lfix = lfix->next)
967 {
968 symbol_set_value_now (lfix->sym);
969 mark |= lfix->dw2_mark_labels;
970 }
971 if (mark)
972 {
973 dwarf2_where (&CURR_SLOT.debug_line);
974 CURR_SLOT.debug_line.flags |= DWARF2_FLAG_BASIC_BLOCK;
975 dwarf2_gen_line_info (frag_now_fix (), &CURR_SLOT.debug_line);
976 dwarf2_consume_line_info ();
977 }
978 CURR_SLOT.label_fixups = 0;
979
980 for (lfix = CURR_SLOT.tag_fixups; lfix; lfix = lfix->next)
981 symbol_set_value_now (lfix->sym);
982 CURR_SLOT.tag_fixups = 0;
983
984 /* In case there are unwind directives following the last instruction,
985 resolve those now. We only handle prologue, body, and endp directives
986 here. Give an error for others. */
987 for (ptr = unwind.current_entry; ptr; ptr = ptr->next)
988 {
989 switch (ptr->r.type)
990 {
991 case prologue:
992 case prologue_gr:
993 case body:
994 case endp:
995 ptr->slot_number = (unsigned long) frag_more (0);
996 ptr->slot_frag = frag_now;
997 break;
998
999 /* Allow any record which doesn't have a "t" field (i.e.,
1000 doesn't relate to a particular instruction). */
1001 case unwabi:
1002 case br_gr:
1003 case copy_state:
1004 case fr_mem:
1005 case frgr_mem:
1006 case gr_gr:
1007 case gr_mem:
1008 case label_state:
1009 case rp_br:
1010 case spill_base:
1011 case spill_mask:
1012 /* nothing */
1013 break;
1014
1015 default:
1016 as_bad (_("Unwind directive not followed by an instruction."));
1017 break;
1018 }
1019 }
1020 unwind.current_entry = NULL;
1021
1022 subseg_set (saved_seg, saved_subseg);
1023
1024 if (md.qp.X_op == O_register)
1025 as_bad (_("qualifying predicate not followed by instruction"));
1026 }
1027
1028 void
1029 ia64_cons_align (int nbytes)
1030 {
1031 if (md.auto_align)
1032 {
1033 int log;
1034 for (log = 0; (nbytes & 1) != 1; nbytes >>= 1)
1035 log++;
1036
1037 do_align (log, NULL, 0, 0);
1038 }
1039 }
1040
1041 #ifdef TE_VMS
1042
1043 /* .vms_common section, symbol, size, alignment */
1044
1045 static void
1046 obj_elf_vms_common (int ignore ATTRIBUTE_UNUSED)
1047 {
1048 const char *sec_name;
1049 char *sym_name;
1050 char c;
1051 offsetT size;
1052 offsetT cur_size;
1053 offsetT temp;
1054 symbolS *symbolP;
1055 segT current_seg = now_seg;
1056 subsegT current_subseg = now_subseg;
1057 offsetT log_align;
1058
1059 /* Section name. */
1060 sec_name = obj_elf_section_name ();
1061 if (sec_name == NULL)
1062 return;
1063
1064 /* Symbol name. */
1065 SKIP_WHITESPACE ();
1066 if (*input_line_pointer == ',')
1067 {
1068 input_line_pointer++;
1069 SKIP_WHITESPACE ();
1070 }
1071 else
1072 {
1073 as_bad (_("expected ',' after section name"));
1074 ignore_rest_of_line ();
1075 return;
1076 }
1077
1078 c = get_symbol_name (&sym_name);
1079
1080 if (input_line_pointer == sym_name)
1081 {
1082 (void) restore_line_pointer (c);
1083 as_bad (_("expected symbol name"));
1084 ignore_rest_of_line ();
1085 return;
1086 }
1087
1088 symbolP = symbol_find_or_make (sym_name);
1089 (void) restore_line_pointer (c);
1090
1091 if ((S_IS_DEFINED (symbolP) || symbol_equated_p (symbolP))
1092 && !S_IS_COMMON (symbolP))
1093 {
1094 as_bad (_("Ignoring attempt to re-define symbol"));
1095 ignore_rest_of_line ();
1096 return;
1097 }
1098
1099 /* Symbol size. */
1100 SKIP_WHITESPACE ();
1101 if (*input_line_pointer == ',')
1102 {
1103 input_line_pointer++;
1104 SKIP_WHITESPACE ();
1105 }
1106 else
1107 {
1108 as_bad (_("expected ',' after symbol name"));
1109 ignore_rest_of_line ();
1110 return;
1111 }
1112
1113 temp = get_absolute_expression ();
1114 size = temp;
1115 size &= ((offsetT) 2 << (stdoutput->arch_info->bits_per_address - 1)) - 1;
1116 if (temp != size)
1117 {
1118 as_warn (_("size (%ld) out of range, ignored"), (long) temp);
1119 ignore_rest_of_line ();
1120 return;
1121 }
1122
1123 /* Alignment. */
1124 SKIP_WHITESPACE ();
1125 if (*input_line_pointer == ',')
1126 {
1127 input_line_pointer++;
1128 SKIP_WHITESPACE ();
1129 }
1130 else
1131 {
1132 as_bad (_("expected ',' after symbol size"));
1133 ignore_rest_of_line ();
1134 return;
1135 }
1136
1137 log_align = get_absolute_expression ();
1138
1139 demand_empty_rest_of_line ();
1140
1141 obj_elf_change_section
1142 (sec_name, SHT_NOBITS,
1143 SHF_ALLOC | SHF_WRITE | SHF_IA_64_VMS_OVERLAID | SHF_IA_64_VMS_GLOBAL,
1144 0, NULL, 1, 0);
1145
1146 S_SET_VALUE (symbolP, 0);
1147 S_SET_SIZE (symbolP, size);
1148 S_SET_EXTERNAL (symbolP);
1149 S_SET_SEGMENT (symbolP, now_seg);
1150
1151 symbol_get_bfdsym (symbolP)->flags |= BSF_OBJECT;
1152
1153 record_alignment (now_seg, log_align);
1154
1155 cur_size = bfd_section_size (stdoutput, now_seg);
1156 if ((int) size > cur_size)
1157 {
1158 char *pfrag
1159 = frag_var (rs_fill, 1, 1, (relax_substateT)0, NULL,
1160 (valueT)size - (valueT)cur_size, NULL);
1161 *pfrag = 0;
1162 bfd_section_size (stdoutput, now_seg) = size;
1163 }
1164
1165 /* Switch back to current segment. */
1166 subseg_set (current_seg, current_subseg);
1167
1168 #ifdef md_elf_section_change_hook
1169 md_elf_section_change_hook ();
1170 #endif
1171 }
1172
1173 #endif /* TE_VMS */
1174
1175 /* Output COUNT bytes to a memory location. */
1176 static char *vbyte_mem_ptr = NULL;
1177
1178 static void
1179 output_vbyte_mem (int count, char *ptr, char *comment ATTRIBUTE_UNUSED)
1180 {
1181 int x;
1182 if (vbyte_mem_ptr == NULL)
1183 abort ();
1184
1185 if (count == 0)
1186 return;
1187 for (x = 0; x < count; x++)
1188 *(vbyte_mem_ptr++) = ptr[x];
1189 }
1190
1191 /* Count the number of bytes required for records. */
1192 static int vbyte_count = 0;
1193 static void
1194 count_output (int count,
1195 char *ptr ATTRIBUTE_UNUSED,
1196 char *comment ATTRIBUTE_UNUSED)
1197 {
1198 vbyte_count += count;
1199 }
1200
1201 static void
1202 output_R1_format (vbyte_func f, unw_record_type rtype, int rlen)
1203 {
1204 int r = 0;
1205 char byte;
1206 if (rlen > 0x1f)
1207 {
1208 output_R3_format (f, rtype, rlen);
1209 return;
1210 }
1211
1212 if (rtype == body)
1213 r = 1;
1214 else if (rtype != prologue)
1215 as_bad (_("record type is not valid"));
1216
1217 byte = UNW_R1 | (r << 5) | (rlen & 0x1f);
1218 (*f) (1, &byte, NULL);
1219 }
1220
1221 static void
1222 output_R2_format (vbyte_func f, int mask, int grsave, unsigned long rlen)
1223 {
1224 char bytes[20];
1225 int count = 2;
1226 mask = (mask & 0x0f);
1227 grsave = (grsave & 0x7f);
1228
1229 bytes[0] = (UNW_R2 | (mask >> 1));
1230 bytes[1] = (((mask & 0x01) << 7) | grsave);
1231 count += output_leb128 (bytes + 2, rlen, 0);
1232 (*f) (count, bytes, NULL);
1233 }
1234
1235 static void
1236 output_R3_format (vbyte_func f, unw_record_type rtype, unsigned long rlen)
1237 {
1238 int r = 0, count;
1239 char bytes[20];
1240 if (rlen <= 0x1f)
1241 {
1242 output_R1_format (f, rtype, rlen);
1243 return;
1244 }
1245
1246 if (rtype == body)
1247 r = 1;
1248 else if (rtype != prologue)
1249 as_bad (_("record type is not valid"));
1250 bytes[0] = (UNW_R3 | r);
1251 count = output_leb128 (bytes + 1, rlen, 0);
1252 (*f) (count + 1, bytes, NULL);
1253 }
1254
1255 static void
1256 output_P1_format (vbyte_func f, int brmask)
1257 {
1258 char byte;
1259 byte = UNW_P1 | (brmask & 0x1f);
1260 (*f) (1, &byte, NULL);
1261 }
1262
1263 static void
1264 output_P2_format (vbyte_func f, int brmask, int gr)
1265 {
1266 char bytes[2];
1267 brmask = (brmask & 0x1f);
1268 bytes[0] = UNW_P2 | (brmask >> 1);
1269 bytes[1] = (((brmask & 1) << 7) | gr);
1270 (*f) (2, bytes, NULL);
1271 }
1272
1273 static void
1274 output_P3_format (vbyte_func f, unw_record_type rtype, int reg)
1275 {
1276 char bytes[2];
1277 int r = 0;
1278 reg = (reg & 0x7f);
1279 switch (rtype)
1280 {
1281 case psp_gr:
1282 r = 0;
1283 break;
1284 case rp_gr:
1285 r = 1;
1286 break;
1287 case pfs_gr:
1288 r = 2;
1289 break;
1290 case preds_gr:
1291 r = 3;
1292 break;
1293 case unat_gr:
1294 r = 4;
1295 break;
1296 case lc_gr:
1297 r = 5;
1298 break;
1299 case rp_br:
1300 r = 6;
1301 break;
1302 case rnat_gr:
1303 r = 7;
1304 break;
1305 case bsp_gr:
1306 r = 8;
1307 break;
1308 case bspstore_gr:
1309 r = 9;
1310 break;
1311 case fpsr_gr:
1312 r = 10;
1313 break;
1314 case priunat_gr:
1315 r = 11;
1316 break;
1317 default:
1318 as_bad (_("Invalid record type for P3 format."));
1319 }
1320 bytes[0] = (UNW_P3 | (r >> 1));
1321 bytes[1] = (((r & 1) << 7) | reg);
1322 (*f) (2, bytes, NULL);
1323 }
1324
1325 static void
1326 output_P4_format (vbyte_func f, unsigned char *imask, unsigned long imask_size)
1327 {
1328 imask[0] = UNW_P4;
1329 (*f) (imask_size, (char *) imask, NULL);
1330 }
1331
1332 static void
1333 output_P5_format (vbyte_func f, int grmask, unsigned long frmask)
1334 {
1335 char bytes[4];
1336 grmask = (grmask & 0x0f);
1337
1338 bytes[0] = UNW_P5;
1339 bytes[1] = ((grmask << 4) | ((frmask & 0x000f0000) >> 16));
1340 bytes[2] = ((frmask & 0x0000ff00) >> 8);
1341 bytes[3] = (frmask & 0x000000ff);
1342 (*f) (4, bytes, NULL);
1343 }
1344
1345 static void
1346 output_P6_format (vbyte_func f, unw_record_type rtype, int rmask)
1347 {
1348 char byte;
1349 int r = 0;
1350
1351 if (rtype == gr_mem)
1352 r = 1;
1353 else if (rtype != fr_mem)
1354 as_bad (_("Invalid record type for format P6"));
1355 byte = (UNW_P6 | (r << 4) | (rmask & 0x0f));
1356 (*f) (1, &byte, NULL);
1357 }
1358
1359 static void
1360 output_P7_format (vbyte_func f,
1361 unw_record_type rtype,
1362 unsigned long w1,
1363 unsigned long w2)
1364 {
1365 char bytes[20];
1366 int count = 1;
1367 int r = 0;
1368 count += output_leb128 (bytes + 1, w1, 0);
1369 switch (rtype)
1370 {
1371 case mem_stack_f:
1372 r = 0;
1373 count += output_leb128 (bytes + count, w2 >> 4, 0);
1374 break;
1375 case mem_stack_v:
1376 r = 1;
1377 break;
1378 case spill_base:
1379 r = 2;
1380 break;
1381 case psp_sprel:
1382 r = 3;
1383 break;
1384 case rp_when:
1385 r = 4;
1386 break;
1387 case rp_psprel:
1388 r = 5;
1389 break;
1390 case pfs_when:
1391 r = 6;
1392 break;
1393 case pfs_psprel:
1394 r = 7;
1395 break;
1396 case preds_when:
1397 r = 8;
1398 break;
1399 case preds_psprel:
1400 r = 9;
1401 break;
1402 case lc_when:
1403 r = 10;
1404 break;
1405 case lc_psprel:
1406 r = 11;
1407 break;
1408 case unat_when:
1409 r = 12;
1410 break;
1411 case unat_psprel:
1412 r = 13;
1413 break;
1414 case fpsr_when:
1415 r = 14;
1416 break;
1417 case fpsr_psprel:
1418 r = 15;
1419 break;
1420 default:
1421 break;
1422 }
1423 bytes[0] = (UNW_P7 | r);
1424 (*f) (count, bytes, NULL);
1425 }
1426
1427 static void
1428 output_P8_format (vbyte_func f, unw_record_type rtype, unsigned long t)
1429 {
1430 char bytes[20];
1431 int r = 0;
1432 int count = 2;
1433 bytes[0] = UNW_P8;
1434 switch (rtype)
1435 {
1436 case rp_sprel:
1437 r = 1;
1438 break;
1439 case pfs_sprel:
1440 r = 2;
1441 break;
1442 case preds_sprel:
1443 r = 3;
1444 break;
1445 case lc_sprel:
1446 r = 4;
1447 break;
1448 case unat_sprel:
1449 r = 5;
1450 break;
1451 case fpsr_sprel:
1452 r = 6;
1453 break;
1454 case bsp_when:
1455 r = 7;
1456 break;
1457 case bsp_psprel:
1458 r = 8;
1459 break;
1460 case bsp_sprel:
1461 r = 9;
1462 break;
1463 case bspstore_when:
1464 r = 10;
1465 break;
1466 case bspstore_psprel:
1467 r = 11;
1468 break;
1469 case bspstore_sprel:
1470 r = 12;
1471 break;
1472 case rnat_when:
1473 r = 13;
1474 break;
1475 case rnat_psprel:
1476 r = 14;
1477 break;
1478 case rnat_sprel:
1479 r = 15;
1480 break;
1481 case priunat_when_gr:
1482 r = 16;
1483 break;
1484 case priunat_psprel:
1485 r = 17;
1486 break;
1487 case priunat_sprel:
1488 r = 18;
1489 break;
1490 case priunat_when_mem:
1491 r = 19;
1492 break;
1493 default:
1494 break;
1495 }
1496 bytes[1] = r;
1497 count += output_leb128 (bytes + 2, t, 0);
1498 (*f) (count, bytes, NULL);
1499 }
1500
1501 static void
1502 output_P9_format (vbyte_func f, int grmask, int gr)
1503 {
1504 char bytes[3];
1505 bytes[0] = UNW_P9;
1506 bytes[1] = (grmask & 0x0f);
1507 bytes[2] = (gr & 0x7f);
1508 (*f) (3, bytes, NULL);
1509 }
1510
1511 static void
1512 output_P10_format (vbyte_func f, int abi, int context)
1513 {
1514 char bytes[3];
1515 bytes[0] = UNW_P10;
1516 bytes[1] = (abi & 0xff);
1517 bytes[2] = (context & 0xff);
1518 (*f) (3, bytes, NULL);
1519 }
1520
1521 static void
1522 output_B1_format (vbyte_func f, unw_record_type rtype, unsigned long label)
1523 {
1524 char byte;
1525 int r = 0;
1526 if (label > 0x1f)
1527 {
1528 output_B4_format (f, rtype, label);
1529 return;
1530 }
1531 if (rtype == copy_state)
1532 r = 1;
1533 else if (rtype != label_state)
1534 as_bad (_("Invalid record type for format B1"));
1535
1536 byte = (UNW_B1 | (r << 5) | (label & 0x1f));
1537 (*f) (1, &byte, NULL);
1538 }
1539
1540 static void
1541 output_B2_format (vbyte_func f, unsigned long ecount, unsigned long t)
1542 {
1543 char bytes[20];
1544 int count = 1;
1545 if (ecount > 0x1f)
1546 {
1547 output_B3_format (f, ecount, t);
1548 return;
1549 }
1550 bytes[0] = (UNW_B2 | (ecount & 0x1f));
1551 count += output_leb128 (bytes + 1, t, 0);
1552 (*f) (count, bytes, NULL);
1553 }
1554
1555 static void
1556 output_B3_format (vbyte_func f, unsigned long ecount, unsigned long t)
1557 {
1558 char bytes[20];
1559 int count = 1;
1560 if (ecount <= 0x1f)
1561 {
1562 output_B2_format (f, ecount, t);
1563 return;
1564 }
1565 bytes[0] = UNW_B3;
1566 count += output_leb128 (bytes + 1, t, 0);
1567 count += output_leb128 (bytes + count, ecount, 0);
1568 (*f) (count, bytes, NULL);
1569 }
1570
1571 static void
1572 output_B4_format (vbyte_func f, unw_record_type rtype, unsigned long label)
1573 {
1574 char bytes[20];
1575 int r = 0;
1576 int count = 1;
1577 if (label <= 0x1f)
1578 {
1579 output_B1_format (f, rtype, label);
1580 return;
1581 }
1582
1583 if (rtype == copy_state)
1584 r = 1;
1585 else if (rtype != label_state)
1586 as_bad (_("Invalid record type for format B1"));
1587
1588 bytes[0] = (UNW_B4 | (r << 3));
1589 count += output_leb128 (bytes + 1, label, 0);
1590 (*f) (count, bytes, NULL);
1591 }
1592
1593 static char
1594 format_ab_reg (int ab, int reg)
1595 {
1596 int ret;
1597 ab = (ab & 3);
1598 reg = (reg & 0x1f);
1599 ret = (ab << 5) | reg;
1600 return ret;
1601 }
1602
1603 static void
1604 output_X1_format (vbyte_func f,
1605 unw_record_type rtype,
1606 int ab,
1607 int reg,
1608 unsigned long t,
1609 unsigned long w1)
1610 {
1611 char bytes[20];
1612 int r = 0;
1613 int count = 2;
1614 bytes[0] = UNW_X1;
1615
1616 if (rtype == spill_sprel)
1617 r = 1;
1618 else if (rtype != spill_psprel)
1619 as_bad (_("Invalid record type for format X1"));
1620 bytes[1] = ((r << 7) | format_ab_reg (ab, reg));
1621 count += output_leb128 (bytes + 2, t, 0);
1622 count += output_leb128 (bytes + count, w1, 0);
1623 (*f) (count, bytes, NULL);
1624 }
1625
1626 static void
1627 output_X2_format (vbyte_func f,
1628 int ab,
1629 int reg,
1630 int x,
1631 int y,
1632 int treg,
1633 unsigned long t)
1634 {
1635 char bytes[20];
1636 int count = 3;
1637 bytes[0] = UNW_X2;
1638 bytes[1] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1639 bytes[2] = (((y & 1) << 7) | (treg & 0x7f));
1640 count += output_leb128 (bytes + 3, t, 0);
1641 (*f) (count, bytes, NULL);
1642 }
1643
1644 static void
1645 output_X3_format (vbyte_func f,
1646 unw_record_type rtype,
1647 int qp,
1648 int ab,
1649 int reg,
1650 unsigned long t,
1651 unsigned long w1)
1652 {
1653 char bytes[20];
1654 int r = 0;
1655 int count = 3;
1656 bytes[0] = UNW_X3;
1657
1658 if (rtype == spill_sprel_p)
1659 r = 1;
1660 else if (rtype != spill_psprel_p)
1661 as_bad (_("Invalid record type for format X3"));
1662 bytes[1] = ((r << 7) | (qp & 0x3f));
1663 bytes[2] = format_ab_reg (ab, reg);
1664 count += output_leb128 (bytes + 3, t, 0);
1665 count += output_leb128 (bytes + count, w1, 0);
1666 (*f) (count, bytes, NULL);
1667 }
1668
1669 static void
1670 output_X4_format (vbyte_func f,
1671 int qp,
1672 int ab,
1673 int reg,
1674 int x,
1675 int y,
1676 int treg,
1677 unsigned long t)
1678 {
1679 char bytes[20];
1680 int count = 4;
1681 bytes[0] = UNW_X4;
1682 bytes[1] = (qp & 0x3f);
1683 bytes[2] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1684 bytes[3] = (((y & 1) << 7) | (treg & 0x7f));
1685 count += output_leb128 (bytes + 4, t, 0);
1686 (*f) (count, bytes, NULL);
1687 }
1688
1689 /* This function checks whether there are any outstanding .save-s and
1690 discards them if so. */
1691
1692 static void
1693 check_pending_save (void)
1694 {
1695 if (unwind.pending_saves)
1696 {
1697 unw_rec_list *cur, *prev;
1698
1699 as_warn (_("Previous .save incomplete"));
1700 for (cur = unwind.list, prev = NULL; cur; )
1701 if (&cur->r.record.p == unwind.pending_saves)
1702 {
1703 if (prev)
1704 prev->next = cur->next;
1705 else
1706 unwind.list = cur->next;
1707 if (cur == unwind.tail)
1708 unwind.tail = prev;
1709 if (cur == unwind.current_entry)
1710 unwind.current_entry = cur->next;
1711 /* Don't free the first discarded record, it's being used as
1712 terminator for (currently) br_gr and gr_gr processing, and
1713 also prevents leaving a dangling pointer to it in its
1714 predecessor. */
1715 cur->r.record.p.grmask = 0;
1716 cur->r.record.p.brmask = 0;
1717 cur->r.record.p.frmask = 0;
1718 prev = cur->r.record.p.next;
1719 cur->r.record.p.next = NULL;
1720 cur = prev;
1721 break;
1722 }
1723 else
1724 {
1725 prev = cur;
1726 cur = cur->next;
1727 }
1728 while (cur)
1729 {
1730 prev = cur;
1731 cur = cur->r.record.p.next;
1732 free (prev);
1733 }
1734 unwind.pending_saves = NULL;
1735 }
1736 }
1737
1738 /* This function allocates a record list structure, and initializes fields. */
1739
1740 static unw_rec_list *
1741 alloc_record (unw_record_type t)
1742 {
1743 unw_rec_list *ptr;
1744 ptr = XNEW (unw_rec_list);
1745 memset (ptr, 0, sizeof (*ptr));
1746 ptr->slot_number = SLOT_NUM_NOT_SET;
1747 ptr->r.type = t;
1748 return ptr;
1749 }
1750
1751 /* Dummy unwind record used for calculating the length of the last prologue or
1752 body region. */
1753
1754 static unw_rec_list *
1755 output_endp (void)
1756 {
1757 unw_rec_list *ptr = alloc_record (endp);
1758 return ptr;
1759 }
1760
1761 static unw_rec_list *
1762 output_prologue (void)
1763 {
1764 unw_rec_list *ptr = alloc_record (prologue);
1765 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1766 return ptr;
1767 }
1768
1769 static unw_rec_list *
1770 output_prologue_gr (unsigned int saved_mask, unsigned int reg)
1771 {
1772 unw_rec_list *ptr = alloc_record (prologue_gr);
1773 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1774 ptr->r.record.r.grmask = saved_mask;
1775 ptr->r.record.r.grsave = reg;
1776 return ptr;
1777 }
1778
1779 static unw_rec_list *
1780 output_body (void)
1781 {
1782 unw_rec_list *ptr = alloc_record (body);
1783 return ptr;
1784 }
1785
1786 static unw_rec_list *
1787 output_mem_stack_f (unsigned int size)
1788 {
1789 unw_rec_list *ptr = alloc_record (mem_stack_f);
1790 ptr->r.record.p.size = size;
1791 return ptr;
1792 }
1793
1794 static unw_rec_list *
1795 output_mem_stack_v (void)
1796 {
1797 unw_rec_list *ptr = alloc_record (mem_stack_v);
1798 return ptr;
1799 }
1800
1801 static unw_rec_list *
1802 output_psp_gr (unsigned int gr)
1803 {
1804 unw_rec_list *ptr = alloc_record (psp_gr);
1805 ptr->r.record.p.r.gr = gr;
1806 return ptr;
1807 }
1808
1809 static unw_rec_list *
1810 output_psp_sprel (unsigned int offset)
1811 {
1812 unw_rec_list *ptr = alloc_record (psp_sprel);
1813 ptr->r.record.p.off.sp = offset / 4;
1814 return ptr;
1815 }
1816
1817 static unw_rec_list *
1818 output_rp_when (void)
1819 {
1820 unw_rec_list *ptr = alloc_record (rp_when);
1821 return ptr;
1822 }
1823
1824 static unw_rec_list *
1825 output_rp_gr (unsigned int gr)
1826 {
1827 unw_rec_list *ptr = alloc_record (rp_gr);
1828 ptr->r.record.p.r.gr = gr;
1829 return ptr;
1830 }
1831
1832 static unw_rec_list *
1833 output_rp_br (unsigned int br)
1834 {
1835 unw_rec_list *ptr = alloc_record (rp_br);
1836 ptr->r.record.p.r.br = br;
1837 return ptr;
1838 }
1839
1840 static unw_rec_list *
1841 output_rp_psprel (unsigned int offset)
1842 {
1843 unw_rec_list *ptr = alloc_record (rp_psprel);
1844 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1845 return ptr;
1846 }
1847
1848 static unw_rec_list *
1849 output_rp_sprel (unsigned int offset)
1850 {
1851 unw_rec_list *ptr = alloc_record (rp_sprel);
1852 ptr->r.record.p.off.sp = offset / 4;
1853 return ptr;
1854 }
1855
1856 static unw_rec_list *
1857 output_pfs_when (void)
1858 {
1859 unw_rec_list *ptr = alloc_record (pfs_when);
1860 return ptr;
1861 }
1862
1863 static unw_rec_list *
1864 output_pfs_gr (unsigned int gr)
1865 {
1866 unw_rec_list *ptr = alloc_record (pfs_gr);
1867 ptr->r.record.p.r.gr = gr;
1868 return ptr;
1869 }
1870
1871 static unw_rec_list *
1872 output_pfs_psprel (unsigned int offset)
1873 {
1874 unw_rec_list *ptr = alloc_record (pfs_psprel);
1875 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1876 return ptr;
1877 }
1878
1879 static unw_rec_list *
1880 output_pfs_sprel (unsigned int offset)
1881 {
1882 unw_rec_list *ptr = alloc_record (pfs_sprel);
1883 ptr->r.record.p.off.sp = offset / 4;
1884 return ptr;
1885 }
1886
1887 static unw_rec_list *
1888 output_preds_when (void)
1889 {
1890 unw_rec_list *ptr = alloc_record (preds_when);
1891 return ptr;
1892 }
1893
1894 static unw_rec_list *
1895 output_preds_gr (unsigned int gr)
1896 {
1897 unw_rec_list *ptr = alloc_record (preds_gr);
1898 ptr->r.record.p.r.gr = gr;
1899 return ptr;
1900 }
1901
1902 static unw_rec_list *
1903 output_preds_psprel (unsigned int offset)
1904 {
1905 unw_rec_list *ptr = alloc_record (preds_psprel);
1906 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1907 return ptr;
1908 }
1909
1910 static unw_rec_list *
1911 output_preds_sprel (unsigned int offset)
1912 {
1913 unw_rec_list *ptr = alloc_record (preds_sprel);
1914 ptr->r.record.p.off.sp = offset / 4;
1915 return ptr;
1916 }
1917
1918 static unw_rec_list *
1919 output_fr_mem (unsigned int mask)
1920 {
1921 unw_rec_list *ptr = alloc_record (fr_mem);
1922 unw_rec_list *cur = ptr;
1923
1924 ptr->r.record.p.frmask = mask;
1925 unwind.pending_saves = &ptr->r.record.p;
1926 for (;;)
1927 {
1928 unw_rec_list *prev = cur;
1929
1930 /* Clear least significant set bit. */
1931 mask &= ~(mask & (~mask + 1));
1932 if (!mask)
1933 return ptr;
1934 cur = alloc_record (fr_mem);
1935 cur->r.record.p.frmask = mask;
1936 /* Retain only least significant bit. */
1937 prev->r.record.p.frmask ^= mask;
1938 prev->r.record.p.next = cur;
1939 }
1940 }
1941
1942 static unw_rec_list *
1943 output_frgr_mem (unsigned int gr_mask, unsigned int fr_mask)
1944 {
1945 unw_rec_list *ptr = alloc_record (frgr_mem);
1946 unw_rec_list *cur = ptr;
1947
1948 unwind.pending_saves = &cur->r.record.p;
1949 cur->r.record.p.frmask = fr_mask;
1950 while (fr_mask)
1951 {
1952 unw_rec_list *prev = cur;
1953
1954 /* Clear least significant set bit. */
1955 fr_mask &= ~(fr_mask & (~fr_mask + 1));
1956 if (!gr_mask && !fr_mask)
1957 return ptr;
1958 cur = alloc_record (frgr_mem);
1959 cur->r.record.p.frmask = fr_mask;
1960 /* Retain only least significant bit. */
1961 prev->r.record.p.frmask ^= fr_mask;
1962 prev->r.record.p.next = cur;
1963 }
1964 cur->r.record.p.grmask = gr_mask;
1965 for (;;)
1966 {
1967 unw_rec_list *prev = cur;
1968
1969 /* Clear least significant set bit. */
1970 gr_mask &= ~(gr_mask & (~gr_mask + 1));
1971 if (!gr_mask)
1972 return ptr;
1973 cur = alloc_record (frgr_mem);
1974 cur->r.record.p.grmask = gr_mask;
1975 /* Retain only least significant bit. */
1976 prev->r.record.p.grmask ^= gr_mask;
1977 prev->r.record.p.next = cur;
1978 }
1979 }
1980
1981 static unw_rec_list *
1982 output_gr_gr (unsigned int mask, unsigned int reg)
1983 {
1984 unw_rec_list *ptr = alloc_record (gr_gr);
1985 unw_rec_list *cur = ptr;
1986
1987 ptr->r.record.p.grmask = mask;
1988 ptr->r.record.p.r.gr = reg;
1989 unwind.pending_saves = &ptr->r.record.p;
1990 for (;;)
1991 {
1992 unw_rec_list *prev = cur;
1993
1994 /* Clear least significant set bit. */
1995 mask &= ~(mask & (~mask + 1));
1996 if (!mask)
1997 return ptr;
1998 cur = alloc_record (gr_gr);
1999 cur->r.record.p.grmask = mask;
2000 /* Indicate this record shouldn't be output. */
2001 cur->r.record.p.r.gr = REG_NUM;
2002 /* Retain only least significant bit. */
2003 prev->r.record.p.grmask ^= mask;
2004 prev->r.record.p.next = cur;
2005 }
2006 }
2007
2008 static unw_rec_list *
2009 output_gr_mem (unsigned int mask)
2010 {
2011 unw_rec_list *ptr = alloc_record (gr_mem);
2012 unw_rec_list *cur = ptr;
2013
2014 ptr->r.record.p.grmask = mask;
2015 unwind.pending_saves = &ptr->r.record.p;
2016 for (;;)
2017 {
2018 unw_rec_list *prev = cur;
2019
2020 /* Clear least significant set bit. */
2021 mask &= ~(mask & (~mask + 1));
2022 if (!mask)
2023 return ptr;
2024 cur = alloc_record (gr_mem);
2025 cur->r.record.p.grmask = mask;
2026 /* Retain only least significant bit. */
2027 prev->r.record.p.grmask ^= mask;
2028 prev->r.record.p.next = cur;
2029 }
2030 }
2031
2032 static unw_rec_list *
2033 output_br_mem (unsigned int mask)
2034 {
2035 unw_rec_list *ptr = alloc_record (br_mem);
2036 unw_rec_list *cur = ptr;
2037
2038 ptr->r.record.p.brmask = mask;
2039 unwind.pending_saves = &ptr->r.record.p;
2040 for (;;)
2041 {
2042 unw_rec_list *prev = cur;
2043
2044 /* Clear least significant set bit. */
2045 mask &= ~(mask & (~mask + 1));
2046 if (!mask)
2047 return ptr;
2048 cur = alloc_record (br_mem);
2049 cur->r.record.p.brmask = mask;
2050 /* Retain only least significant bit. */
2051 prev->r.record.p.brmask ^= mask;
2052 prev->r.record.p.next = cur;
2053 }
2054 }
2055
2056 static unw_rec_list *
2057 output_br_gr (unsigned int mask, unsigned int reg)
2058 {
2059 unw_rec_list *ptr = alloc_record (br_gr);
2060 unw_rec_list *cur = ptr;
2061
2062 ptr->r.record.p.brmask = mask;
2063 ptr->r.record.p.r.gr = reg;
2064 unwind.pending_saves = &ptr->r.record.p;
2065 for (;;)
2066 {
2067 unw_rec_list *prev = cur;
2068
2069 /* Clear least significant set bit. */
2070 mask &= ~(mask & (~mask + 1));
2071 if (!mask)
2072 return ptr;
2073 cur = alloc_record (br_gr);
2074 cur->r.record.p.brmask = mask;
2075 /* Indicate this record shouldn't be output. */
2076 cur->r.record.p.r.gr = REG_NUM;
2077 /* Retain only least significant bit. */
2078 prev->r.record.p.brmask ^= mask;
2079 prev->r.record.p.next = cur;
2080 }
2081 }
2082
2083 static unw_rec_list *
2084 output_spill_base (unsigned int offset)
2085 {
2086 unw_rec_list *ptr = alloc_record (spill_base);
2087 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2088 return ptr;
2089 }
2090
2091 static unw_rec_list *
2092 output_unat_when (void)
2093 {
2094 unw_rec_list *ptr = alloc_record (unat_when);
2095 return ptr;
2096 }
2097
2098 static unw_rec_list *
2099 output_unat_gr (unsigned int gr)
2100 {
2101 unw_rec_list *ptr = alloc_record (unat_gr);
2102 ptr->r.record.p.r.gr = gr;
2103 return ptr;
2104 }
2105
2106 static unw_rec_list *
2107 output_unat_psprel (unsigned int offset)
2108 {
2109 unw_rec_list *ptr = alloc_record (unat_psprel);
2110 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2111 return ptr;
2112 }
2113
2114 static unw_rec_list *
2115 output_unat_sprel (unsigned int offset)
2116 {
2117 unw_rec_list *ptr = alloc_record (unat_sprel);
2118 ptr->r.record.p.off.sp = offset / 4;
2119 return ptr;
2120 }
2121
2122 static unw_rec_list *
2123 output_lc_when (void)
2124 {
2125 unw_rec_list *ptr = alloc_record (lc_when);
2126 return ptr;
2127 }
2128
2129 static unw_rec_list *
2130 output_lc_gr (unsigned int gr)
2131 {
2132 unw_rec_list *ptr = alloc_record (lc_gr);
2133 ptr->r.record.p.r.gr = gr;
2134 return ptr;
2135 }
2136
2137 static unw_rec_list *
2138 output_lc_psprel (unsigned int offset)
2139 {
2140 unw_rec_list *ptr = alloc_record (lc_psprel);
2141 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2142 return ptr;
2143 }
2144
2145 static unw_rec_list *
2146 output_lc_sprel (unsigned int offset)
2147 {
2148 unw_rec_list *ptr = alloc_record (lc_sprel);
2149 ptr->r.record.p.off.sp = offset / 4;
2150 return ptr;
2151 }
2152
2153 static unw_rec_list *
2154 output_fpsr_when (void)
2155 {
2156 unw_rec_list *ptr = alloc_record (fpsr_when);
2157 return ptr;
2158 }
2159
2160 static unw_rec_list *
2161 output_fpsr_gr (unsigned int gr)
2162 {
2163 unw_rec_list *ptr = alloc_record (fpsr_gr);
2164 ptr->r.record.p.r.gr = gr;
2165 return ptr;
2166 }
2167
2168 static unw_rec_list *
2169 output_fpsr_psprel (unsigned int offset)
2170 {
2171 unw_rec_list *ptr = alloc_record (fpsr_psprel);
2172 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2173 return ptr;
2174 }
2175
2176 static unw_rec_list *
2177 output_fpsr_sprel (unsigned int offset)
2178 {
2179 unw_rec_list *ptr = alloc_record (fpsr_sprel);
2180 ptr->r.record.p.off.sp = offset / 4;
2181 return ptr;
2182 }
2183
2184 static unw_rec_list *
2185 output_priunat_when_gr (void)
2186 {
2187 unw_rec_list *ptr = alloc_record (priunat_when_gr);
2188 return ptr;
2189 }
2190
2191 static unw_rec_list *
2192 output_priunat_when_mem (void)
2193 {
2194 unw_rec_list *ptr = alloc_record (priunat_when_mem);
2195 return ptr;
2196 }
2197
2198 static unw_rec_list *
2199 output_priunat_gr (unsigned int gr)
2200 {
2201 unw_rec_list *ptr = alloc_record (priunat_gr);
2202 ptr->r.record.p.r.gr = gr;
2203 return ptr;
2204 }
2205
2206 static unw_rec_list *
2207 output_priunat_psprel (unsigned int offset)
2208 {
2209 unw_rec_list *ptr = alloc_record (priunat_psprel);
2210 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2211 return ptr;
2212 }
2213
2214 static unw_rec_list *
2215 output_priunat_sprel (unsigned int offset)
2216 {
2217 unw_rec_list *ptr = alloc_record (priunat_sprel);
2218 ptr->r.record.p.off.sp = offset / 4;
2219 return ptr;
2220 }
2221
2222 static unw_rec_list *
2223 output_bsp_when (void)
2224 {
2225 unw_rec_list *ptr = alloc_record (bsp_when);
2226 return ptr;
2227 }
2228
2229 static unw_rec_list *
2230 output_bsp_gr (unsigned int gr)
2231 {
2232 unw_rec_list *ptr = alloc_record (bsp_gr);
2233 ptr->r.record.p.r.gr = gr;
2234 return ptr;
2235 }
2236
2237 static unw_rec_list *
2238 output_bsp_psprel (unsigned int offset)
2239 {
2240 unw_rec_list *ptr = alloc_record (bsp_psprel);
2241 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2242 return ptr;
2243 }
2244
2245 static unw_rec_list *
2246 output_bsp_sprel (unsigned int offset)
2247 {
2248 unw_rec_list *ptr = alloc_record (bsp_sprel);
2249 ptr->r.record.p.off.sp = offset / 4;
2250 return ptr;
2251 }
2252
2253 static unw_rec_list *
2254 output_bspstore_when (void)
2255 {
2256 unw_rec_list *ptr = alloc_record (bspstore_when);
2257 return ptr;
2258 }
2259
2260 static unw_rec_list *
2261 output_bspstore_gr (unsigned int gr)
2262 {
2263 unw_rec_list *ptr = alloc_record (bspstore_gr);
2264 ptr->r.record.p.r.gr = gr;
2265 return ptr;
2266 }
2267
2268 static unw_rec_list *
2269 output_bspstore_psprel (unsigned int offset)
2270 {
2271 unw_rec_list *ptr = alloc_record (bspstore_psprel);
2272 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2273 return ptr;
2274 }
2275
2276 static unw_rec_list *
2277 output_bspstore_sprel (unsigned int offset)
2278 {
2279 unw_rec_list *ptr = alloc_record (bspstore_sprel);
2280 ptr->r.record.p.off.sp = offset / 4;
2281 return ptr;
2282 }
2283
2284 static unw_rec_list *
2285 output_rnat_when (void)
2286 {
2287 unw_rec_list *ptr = alloc_record (rnat_when);
2288 return ptr;
2289 }
2290
2291 static unw_rec_list *
2292 output_rnat_gr (unsigned int gr)
2293 {
2294 unw_rec_list *ptr = alloc_record (rnat_gr);
2295 ptr->r.record.p.r.gr = gr;
2296 return ptr;
2297 }
2298
2299 static unw_rec_list *
2300 output_rnat_psprel (unsigned int offset)
2301 {
2302 unw_rec_list *ptr = alloc_record (rnat_psprel);
2303 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2304 return ptr;
2305 }
2306
2307 static unw_rec_list *
2308 output_rnat_sprel (unsigned int offset)
2309 {
2310 unw_rec_list *ptr = alloc_record (rnat_sprel);
2311 ptr->r.record.p.off.sp = offset / 4;
2312 return ptr;
2313 }
2314
2315 static unw_rec_list *
2316 output_unwabi (unsigned long abi, unsigned long context)
2317 {
2318 unw_rec_list *ptr = alloc_record (unwabi);
2319 ptr->r.record.p.abi = abi;
2320 ptr->r.record.p.context = context;
2321 return ptr;
2322 }
2323
2324 static unw_rec_list *
2325 output_epilogue (unsigned long ecount)
2326 {
2327 unw_rec_list *ptr = alloc_record (epilogue);
2328 ptr->r.record.b.ecount = ecount;
2329 return ptr;
2330 }
2331
2332 static unw_rec_list *
2333 output_label_state (unsigned long label)
2334 {
2335 unw_rec_list *ptr = alloc_record (label_state);
2336 ptr->r.record.b.label = label;
2337 return ptr;
2338 }
2339
2340 static unw_rec_list *
2341 output_copy_state (unsigned long label)
2342 {
2343 unw_rec_list *ptr = alloc_record (copy_state);
2344 ptr->r.record.b.label = label;
2345 return ptr;
2346 }
2347
2348 static unw_rec_list *
2349 output_spill_psprel (unsigned int ab,
2350 unsigned int reg,
2351 unsigned int offset,
2352 unsigned int predicate)
2353 {
2354 unw_rec_list *ptr = alloc_record (predicate ? spill_psprel_p : spill_psprel);
2355 ptr->r.record.x.ab = ab;
2356 ptr->r.record.x.reg = reg;
2357 ptr->r.record.x.where.pspoff = ENCODED_PSP_OFFSET (offset);
2358 ptr->r.record.x.qp = predicate;
2359 return ptr;
2360 }
2361
2362 static unw_rec_list *
2363 output_spill_sprel (unsigned int ab,
2364 unsigned int reg,
2365 unsigned int offset,
2366 unsigned int predicate)
2367 {
2368 unw_rec_list *ptr = alloc_record (predicate ? spill_sprel_p : spill_sprel);
2369 ptr->r.record.x.ab = ab;
2370 ptr->r.record.x.reg = reg;
2371 ptr->r.record.x.where.spoff = offset / 4;
2372 ptr->r.record.x.qp = predicate;
2373 return ptr;
2374 }
2375
2376 static unw_rec_list *
2377 output_spill_reg (unsigned int ab,
2378 unsigned int reg,
2379 unsigned int targ_reg,
2380 unsigned int xy,
2381 unsigned int predicate)
2382 {
2383 unw_rec_list *ptr = alloc_record (predicate ? spill_reg_p : spill_reg);
2384 ptr->r.record.x.ab = ab;
2385 ptr->r.record.x.reg = reg;
2386 ptr->r.record.x.where.reg = targ_reg;
2387 ptr->r.record.x.xy = xy;
2388 ptr->r.record.x.qp = predicate;
2389 return ptr;
2390 }
2391
2392 /* Given a unw_rec_list process the correct format with the
2393 specified function. */
2394
2395 static void
2396 process_one_record (unw_rec_list *ptr, vbyte_func f)
2397 {
2398 unsigned int fr_mask, gr_mask;
2399
2400 switch (ptr->r.type)
2401 {
2402 /* This is a dummy record that takes up no space in the output. */
2403 case endp:
2404 break;
2405
2406 case gr_mem:
2407 case fr_mem:
2408 case br_mem:
2409 case frgr_mem:
2410 /* These are taken care of by prologue/prologue_gr. */
2411 break;
2412
2413 case prologue_gr:
2414 case prologue:
2415 if (ptr->r.type == prologue_gr)
2416 output_R2_format (f, ptr->r.record.r.grmask,
2417 ptr->r.record.r.grsave, ptr->r.record.r.rlen);
2418 else
2419 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2420
2421 /* Output descriptor(s) for union of register spills (if any). */
2422 gr_mask = ptr->r.record.r.mask.gr_mem;
2423 fr_mask = ptr->r.record.r.mask.fr_mem;
2424 if (fr_mask)
2425 {
2426 if ((fr_mask & ~0xfUL) == 0)
2427 output_P6_format (f, fr_mem, fr_mask);
2428 else
2429 {
2430 output_P5_format (f, gr_mask, fr_mask);
2431 gr_mask = 0;
2432 }
2433 }
2434 if (gr_mask)
2435 output_P6_format (f, gr_mem, gr_mask);
2436 if (ptr->r.record.r.mask.br_mem)
2437 output_P1_format (f, ptr->r.record.r.mask.br_mem);
2438
2439 /* output imask descriptor if necessary: */
2440 if (ptr->r.record.r.mask.i)
2441 output_P4_format (f, ptr->r.record.r.mask.i,
2442 ptr->r.record.r.imask_size);
2443 break;
2444
2445 case body:
2446 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2447 break;
2448 case mem_stack_f:
2449 case mem_stack_v:
2450 output_P7_format (f, ptr->r.type, ptr->r.record.p.t,
2451 ptr->r.record.p.size);
2452 break;
2453 case psp_gr:
2454 case rp_gr:
2455 case pfs_gr:
2456 case preds_gr:
2457 case unat_gr:
2458 case lc_gr:
2459 case fpsr_gr:
2460 case priunat_gr:
2461 case bsp_gr:
2462 case bspstore_gr:
2463 case rnat_gr:
2464 output_P3_format (f, ptr->r.type, ptr->r.record.p.r.gr);
2465 break;
2466 case rp_br:
2467 output_P3_format (f, rp_br, ptr->r.record.p.r.br);
2468 break;
2469 case psp_sprel:
2470 output_P7_format (f, psp_sprel, ptr->r.record.p.off.sp, 0);
2471 break;
2472 case rp_when:
2473 case pfs_when:
2474 case preds_when:
2475 case unat_when:
2476 case lc_when:
2477 case fpsr_when:
2478 output_P7_format (f, ptr->r.type, ptr->r.record.p.t, 0);
2479 break;
2480 case rp_psprel:
2481 case pfs_psprel:
2482 case preds_psprel:
2483 case unat_psprel:
2484 case lc_psprel:
2485 case fpsr_psprel:
2486 case spill_base:
2487 output_P7_format (f, ptr->r.type, ptr->r.record.p.off.psp, 0);
2488 break;
2489 case rp_sprel:
2490 case pfs_sprel:
2491 case preds_sprel:
2492 case unat_sprel:
2493 case lc_sprel:
2494 case fpsr_sprel:
2495 case priunat_sprel:
2496 case bsp_sprel:
2497 case bspstore_sprel:
2498 case rnat_sprel:
2499 output_P8_format (f, ptr->r.type, ptr->r.record.p.off.sp);
2500 break;
2501 case gr_gr:
2502 if (ptr->r.record.p.r.gr < REG_NUM)
2503 {
2504 const unw_rec_list *cur = ptr;
2505
2506 gr_mask = cur->r.record.p.grmask;
2507 while ((cur = cur->r.record.p.next) != NULL)
2508 gr_mask |= cur->r.record.p.grmask;
2509 output_P9_format (f, gr_mask, ptr->r.record.p.r.gr);
2510 }
2511 break;
2512 case br_gr:
2513 if (ptr->r.record.p.r.gr < REG_NUM)
2514 {
2515 const unw_rec_list *cur = ptr;
2516
2517 gr_mask = cur->r.record.p.brmask;
2518 while ((cur = cur->r.record.p.next) != NULL)
2519 gr_mask |= cur->r.record.p.brmask;
2520 output_P2_format (f, gr_mask, ptr->r.record.p.r.gr);
2521 }
2522 break;
2523 case spill_mask:
2524 as_bad (_("spill_mask record unimplemented."));
2525 break;
2526 case priunat_when_gr:
2527 case priunat_when_mem:
2528 case bsp_when:
2529 case bspstore_when:
2530 case rnat_when:
2531 output_P8_format (f, ptr->r.type, ptr->r.record.p.t);
2532 break;
2533 case priunat_psprel:
2534 case bsp_psprel:
2535 case bspstore_psprel:
2536 case rnat_psprel:
2537 output_P8_format (f, ptr->r.type, ptr->r.record.p.off.psp);
2538 break;
2539 case unwabi:
2540 output_P10_format (f, ptr->r.record.p.abi, ptr->r.record.p.context);
2541 break;
2542 case epilogue:
2543 output_B3_format (f, ptr->r.record.b.ecount, ptr->r.record.b.t);
2544 break;
2545 case label_state:
2546 case copy_state:
2547 output_B4_format (f, ptr->r.type, ptr->r.record.b.label);
2548 break;
2549 case spill_psprel:
2550 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2551 ptr->r.record.x.reg, ptr->r.record.x.t,
2552 ptr->r.record.x.where.pspoff);
2553 break;
2554 case spill_sprel:
2555 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2556 ptr->r.record.x.reg, ptr->r.record.x.t,
2557 ptr->r.record.x.where.spoff);
2558 break;
2559 case spill_reg:
2560 output_X2_format (f, ptr->r.record.x.ab, ptr->r.record.x.reg,
2561 ptr->r.record.x.xy >> 1, ptr->r.record.x.xy,
2562 ptr->r.record.x.where.reg, ptr->r.record.x.t);
2563 break;
2564 case spill_psprel_p:
2565 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2566 ptr->r.record.x.ab, ptr->r.record.x.reg,
2567 ptr->r.record.x.t, ptr->r.record.x.where.pspoff);
2568 break;
2569 case spill_sprel_p:
2570 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2571 ptr->r.record.x.ab, ptr->r.record.x.reg,
2572 ptr->r.record.x.t, ptr->r.record.x.where.spoff);
2573 break;
2574 case spill_reg_p:
2575 output_X4_format (f, ptr->r.record.x.qp, ptr->r.record.x.ab,
2576 ptr->r.record.x.reg, ptr->r.record.x.xy >> 1,
2577 ptr->r.record.x.xy, ptr->r.record.x.where.reg,
2578 ptr->r.record.x.t);
2579 break;
2580 default:
2581 as_bad (_("record_type_not_valid"));
2582 break;
2583 }
2584 }
2585
2586 /* Given a unw_rec_list list, process all the records with
2587 the specified function. */
2588 static void
2589 process_unw_records (unw_rec_list *list, vbyte_func f)
2590 {
2591 unw_rec_list *ptr;
2592 for (ptr = list; ptr; ptr = ptr->next)
2593 process_one_record (ptr, f);
2594 }
2595
2596 /* Determine the size of a record list in bytes. */
2597 static int
2598 calc_record_size (unw_rec_list *list)
2599 {
2600 vbyte_count = 0;
2601 process_unw_records (list, count_output);
2602 return vbyte_count;
2603 }
2604
2605 /* Return the number of bits set in the input value.
2606 Perhaps this has a better place... */
2607 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
2608 # define popcount __builtin_popcount
2609 #else
2610 static int
2611 popcount (unsigned x)
2612 {
2613 static const unsigned char popcnt[16] =
2614 {
2615 0, 1, 1, 2,
2616 1, 2, 2, 3,
2617 1, 2, 2, 3,
2618 2, 3, 3, 4
2619 };
2620
2621 if (x < NELEMS (popcnt))
2622 return popcnt[x];
2623 return popcnt[x % NELEMS (popcnt)] + popcount (x / NELEMS (popcnt));
2624 }
2625 #endif
2626
2627 /* Update IMASK bitmask to reflect the fact that one or more registers
2628 of type TYPE are saved starting at instruction with index T. If N
2629 bits are set in REGMASK, it is assumed that instructions T through
2630 T+N-1 save these registers.
2631
2632 TYPE values:
2633 0: no save
2634 1: instruction saves next fp reg
2635 2: instruction saves next general reg
2636 3: instruction saves next branch reg */
2637 static void
2638 set_imask (unw_rec_list *region,
2639 unsigned long regmask,
2640 unsigned long t,
2641 unsigned int type)
2642 {
2643 unsigned char *imask;
2644 unsigned long imask_size;
2645 unsigned int i;
2646 int pos;
2647
2648 imask = region->r.record.r.mask.i;
2649 imask_size = region->r.record.r.imask_size;
2650 if (!imask)
2651 {
2652 imask_size = (region->r.record.r.rlen * 2 + 7) / 8 + 1;
2653 imask = XCNEWVEC (unsigned char, imask_size);
2654
2655 region->r.record.r.imask_size = imask_size;
2656 region->r.record.r.mask.i = imask;
2657 }
2658
2659 i = (t / 4) + 1;
2660 pos = 2 * (3 - t % 4);
2661 while (regmask)
2662 {
2663 if (i >= imask_size)
2664 {
2665 as_bad (_("Ignoring attempt to spill beyond end of region"));
2666 return;
2667 }
2668
2669 imask[i] |= (type & 0x3) << pos;
2670
2671 regmask &= (regmask - 1);
2672 pos -= 2;
2673 if (pos < 0)
2674 {
2675 pos = 0;
2676 ++i;
2677 }
2678 }
2679 }
2680
2681 /* Return the number of instruction slots from FIRST_ADDR to SLOT_ADDR.
2682 SLOT_FRAG is the frag containing SLOT_ADDR, and FIRST_FRAG is the frag
2683 containing FIRST_ADDR. If BEFORE_RELAX, then we use worst-case estimates
2684 for frag sizes. */
2685
2686 static unsigned long
2687 slot_index (unsigned long slot_addr,
2688 fragS *slot_frag,
2689 unsigned long first_addr,
2690 fragS *first_frag,
2691 int before_relax)
2692 {
2693 unsigned long s_index = 0;
2694
2695 /* First time we are called, the initial address and frag are invalid. */
2696 if (first_addr == 0)
2697 return 0;
2698
2699 /* If the two addresses are in different frags, then we need to add in
2700 the remaining size of this frag, and then the entire size of intermediate
2701 frags. */
2702 while (slot_frag != first_frag)
2703 {
2704 unsigned long start_addr = (unsigned long) &first_frag->fr_literal;
2705
2706 if (! before_relax)
2707 {
2708 /* We can get the final addresses only during and after
2709 relaxation. */
2710 if (first_frag->fr_next && first_frag->fr_next->fr_address)
2711 s_index += 3 * ((first_frag->fr_next->fr_address
2712 - first_frag->fr_address
2713 - first_frag->fr_fix) >> 4);
2714 }
2715 else
2716 /* We don't know what the final addresses will be. We try our
2717 best to estimate. */
2718 switch (first_frag->fr_type)
2719 {
2720 default:
2721 break;
2722
2723 case rs_space:
2724 as_fatal (_("Only constant space allocation is supported"));
2725 break;
2726
2727 case rs_align:
2728 case rs_align_code:
2729 case rs_align_test:
2730 /* Take alignment into account. Assume the worst case
2731 before relaxation. */
2732 s_index += 3 * ((1 << first_frag->fr_offset) >> 4);
2733 break;
2734
2735 case rs_org:
2736 if (first_frag->fr_symbol)
2737 {
2738 as_fatal (_("Only constant offsets are supported"));
2739 break;
2740 }
2741 case rs_fill:
2742 s_index += 3 * (first_frag->fr_offset >> 4);
2743 break;
2744 }
2745
2746 /* Add in the full size of the frag converted to instruction slots. */
2747 s_index += 3 * (first_frag->fr_fix >> 4);
2748 /* Subtract away the initial part before first_addr. */
2749 s_index -= (3 * ((first_addr >> 4) - (start_addr >> 4))
2750 + ((first_addr & 0x3) - (start_addr & 0x3)));
2751
2752 /* Move to the beginning of the next frag. */
2753 first_frag = first_frag->fr_next;
2754 first_addr = (unsigned long) &first_frag->fr_literal;
2755
2756 /* This can happen if there is section switching in the middle of a
2757 function, causing the frag chain for the function to be broken.
2758 It is too difficult to recover safely from this problem, so we just
2759 exit with an error. */
2760 if (first_frag == NULL)
2761 as_fatal (_("Section switching in code is not supported."));
2762 }
2763
2764 /* Add in the used part of the last frag. */
2765 s_index += (3 * ((slot_addr >> 4) - (first_addr >> 4))
2766 + ((slot_addr & 0x3) - (first_addr & 0x3)));
2767 return s_index;
2768 }
2769
2770 /* Optimize unwind record directives. */
2771
2772 static unw_rec_list *
2773 optimize_unw_records (unw_rec_list *list)
2774 {
2775 if (!list)
2776 return NULL;
2777
2778 /* If the only unwind record is ".prologue" or ".prologue" followed
2779 by ".body", then we can optimize the unwind directives away. */
2780 if (list->r.type == prologue
2781 && (list->next->r.type == endp
2782 || (list->next->r.type == body && list->next->next->r.type == endp)))
2783 return NULL;
2784
2785 return list;
2786 }
2787
2788 /* Given a complete record list, process any records which have
2789 unresolved fields, (ie length counts for a prologue). After
2790 this has been run, all necessary information should be available
2791 within each record to generate an image. */
2792
2793 static void
2794 fixup_unw_records (unw_rec_list *list, int before_relax)
2795 {
2796 unw_rec_list *ptr, *region = 0;
2797 unsigned long first_addr = 0, rlen = 0, t;
2798 fragS *first_frag = 0;
2799
2800 for (ptr = list; ptr; ptr = ptr->next)
2801 {
2802 if (ptr->slot_number == SLOT_NUM_NOT_SET)
2803 as_bad (_(" Insn slot not set in unwind record."));
2804 t = slot_index (ptr->slot_number, ptr->slot_frag,
2805 first_addr, first_frag, before_relax);
2806 switch (ptr->r.type)
2807 {
2808 case prologue:
2809 case prologue_gr:
2810 case body:
2811 {
2812 unw_rec_list *last;
2813 int size;
2814 unsigned long last_addr = 0;
2815 fragS *last_frag = NULL;
2816
2817 first_addr = ptr->slot_number;
2818 first_frag = ptr->slot_frag;
2819 /* Find either the next body/prologue start, or the end of
2820 the function, and determine the size of the region. */
2821 for (last = ptr->next; last != NULL; last = last->next)
2822 if (last->r.type == prologue || last->r.type == prologue_gr
2823 || last->r.type == body || last->r.type == endp)
2824 {
2825 last_addr = last->slot_number;
2826 last_frag = last->slot_frag;
2827 break;
2828 }
2829 size = slot_index (last_addr, last_frag, first_addr, first_frag,
2830 before_relax);
2831 rlen = ptr->r.record.r.rlen = size;
2832 if (ptr->r.type == body)
2833 /* End of region. */
2834 region = 0;
2835 else
2836 region = ptr;
2837 break;
2838 }
2839 case epilogue:
2840 if (t < rlen)
2841 ptr->r.record.b.t = rlen - 1 - t;
2842 else
2843 /* This happens when a memory-stack-less procedure uses a
2844 ".restore sp" directive at the end of a region to pop
2845 the frame state. */
2846 ptr->r.record.b.t = 0;
2847 break;
2848
2849 case mem_stack_f:
2850 case mem_stack_v:
2851 case rp_when:
2852 case pfs_when:
2853 case preds_when:
2854 case unat_when:
2855 case lc_when:
2856 case fpsr_when:
2857 case priunat_when_gr:
2858 case priunat_when_mem:
2859 case bsp_when:
2860 case bspstore_when:
2861 case rnat_when:
2862 ptr->r.record.p.t = t;
2863 break;
2864
2865 case spill_reg:
2866 case spill_sprel:
2867 case spill_psprel:
2868 case spill_reg_p:
2869 case spill_sprel_p:
2870 case spill_psprel_p:
2871 ptr->r.record.x.t = t;
2872 break;
2873
2874 case frgr_mem:
2875 if (!region)
2876 {
2877 as_bad (_("frgr_mem record before region record!"));
2878 return;
2879 }
2880 region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask;
2881 region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask;
2882 set_imask (region, ptr->r.record.p.frmask, t, 1);
2883 set_imask (region, ptr->r.record.p.grmask, t, 2);
2884 break;
2885 case fr_mem:
2886 if (!region)
2887 {
2888 as_bad (_("fr_mem record before region record!"));
2889 return;
2890 }
2891 region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask;
2892 set_imask (region, ptr->r.record.p.frmask, t, 1);
2893 break;
2894 case gr_mem:
2895 if (!region)
2896 {
2897 as_bad (_("gr_mem record before region record!"));
2898 return;
2899 }
2900 region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask;
2901 set_imask (region, ptr->r.record.p.grmask, t, 2);
2902 break;
2903 case br_mem:
2904 if (!region)
2905 {
2906 as_bad (_("br_mem record before region record!"));
2907 return;
2908 }
2909 region->r.record.r.mask.br_mem |= ptr->r.record.p.brmask;
2910 set_imask (region, ptr->r.record.p.brmask, t, 3);
2911 break;
2912
2913 case gr_gr:
2914 if (!region)
2915 {
2916 as_bad (_("gr_gr record before region record!"));
2917 return;
2918 }
2919 set_imask (region, ptr->r.record.p.grmask, t, 2);
2920 break;
2921 case br_gr:
2922 if (!region)
2923 {
2924 as_bad (_("br_gr record before region record!"));
2925 return;
2926 }
2927 set_imask (region, ptr->r.record.p.brmask, t, 3);
2928 break;
2929
2930 default:
2931 break;
2932 }
2933 }
2934 }
2935
2936 /* Estimate the size of a frag before relaxing. We only have one type of frag
2937 to handle here, which is the unwind info frag. */
2938
2939 int
2940 ia64_estimate_size_before_relax (fragS *frag,
2941 asection *segtype ATTRIBUTE_UNUSED)
2942 {
2943 unw_rec_list *list;
2944 int len, size, pad;
2945
2946 /* ??? This code is identical to the first part of ia64_convert_frag. */
2947 list = (unw_rec_list *) frag->fr_opcode;
2948 fixup_unw_records (list, 0);
2949
2950 len = calc_record_size (list);
2951 /* pad to pointer-size boundary. */
2952 pad = len % md.pointer_size;
2953 if (pad != 0)
2954 len += md.pointer_size - pad;
2955 /* Add 8 for the header. */
2956 size = len + 8;
2957 /* Add a pointer for the personality offset. */
2958 if (frag->fr_offset)
2959 size += md.pointer_size;
2960
2961 /* fr_var carries the max_chars that we created the fragment with.
2962 We must, of course, have allocated enough memory earlier. */
2963 gas_assert (frag->fr_var >= size);
2964
2965 return frag->fr_fix + size;
2966 }
2967
2968 /* This function converts a rs_machine_dependent variant frag into a
2969 normal fill frag with the unwind image from the record list. */
2970 void
2971 ia64_convert_frag (fragS *frag)
2972 {
2973 unw_rec_list *list;
2974 int len, size, pad;
2975 valueT flag_value;
2976
2977 /* ??? This code is identical to ia64_estimate_size_before_relax. */
2978 list = (unw_rec_list *) frag->fr_opcode;
2979 fixup_unw_records (list, 0);
2980
2981 len = calc_record_size (list);
2982 /* pad to pointer-size boundary. */
2983 pad = len % md.pointer_size;
2984 if (pad != 0)
2985 len += md.pointer_size - pad;
2986 /* Add 8 for the header. */
2987 size = len + 8;
2988 /* Add a pointer for the personality offset. */
2989 if (frag->fr_offset)
2990 size += md.pointer_size;
2991
2992 /* fr_var carries the max_chars that we created the fragment with.
2993 We must, of course, have allocated enough memory earlier. */
2994 gas_assert (frag->fr_var >= size);
2995
2996 /* Initialize the header area. fr_offset is initialized with
2997 unwind.personality_routine. */
2998 if (frag->fr_offset)
2999 {
3000 if (md.flags & EF_IA_64_ABI64)
3001 flag_value = (bfd_vma) 3 << 32;
3002 else
3003 /* 32-bit unwind info block. */
3004 flag_value = (bfd_vma) 0x1003 << 32;
3005 }
3006 else
3007 flag_value = 0;
3008
3009 md_number_to_chars (frag->fr_literal,
3010 (((bfd_vma) 1 << 48) /* Version. */
3011 | flag_value /* U & E handler flags. */
3012 | (len / md.pointer_size)), /* Length. */
3013 8);
3014
3015 /* Skip the header. */
3016 vbyte_mem_ptr = frag->fr_literal + 8;
3017 process_unw_records (list, output_vbyte_mem);
3018
3019 /* Fill the padding bytes with zeros. */
3020 if (pad != 0)
3021 md_number_to_chars (frag->fr_literal + len + 8 - md.pointer_size + pad, 0,
3022 md.pointer_size - pad);
3023 /* Fill the unwind personality with zeros. */
3024 if (frag->fr_offset)
3025 md_number_to_chars (frag->fr_literal + size - md.pointer_size, 0,
3026 md.pointer_size);
3027
3028 frag->fr_fix += size;
3029 frag->fr_type = rs_fill;
3030 frag->fr_var = 0;
3031 frag->fr_offset = 0;
3032 }
3033
3034 static int
3035 parse_predicate_and_operand (expressionS *e, unsigned *qp, const char *po)
3036 {
3037 int sep = parse_operand_and_eval (e, ',');
3038
3039 *qp = e->X_add_number - REG_P;
3040 if (e->X_op != O_register || *qp > 63)
3041 {
3042 as_bad (_("First operand to .%s must be a predicate"), po);
3043 *qp = 0;
3044 }
3045 else if (*qp == 0)
3046 as_warn (_("Pointless use of p0 as first operand to .%s"), po);
3047 if (sep == ',')
3048 sep = parse_operand_and_eval (e, ',');
3049 else
3050 e->X_op = O_absent;
3051 return sep;
3052 }
3053
3054 static void
3055 convert_expr_to_ab_reg (const expressionS *e,
3056 unsigned int *ab,
3057 unsigned int *regp,
3058 const char *po,
3059 int n)
3060 {
3061 unsigned int reg = e->X_add_number;
3062
3063 *ab = *regp = 0; /* Anything valid is good here. */
3064
3065 if (e->X_op != O_register)
3066 reg = REG_GR; /* Anything invalid is good here. */
3067
3068 if (reg >= (REG_GR + 4) && reg <= (REG_GR + 7))
3069 {
3070 *ab = 0;
3071 *regp = reg - REG_GR;
3072 }
3073 else if ((reg >= (REG_FR + 2) && reg <= (REG_FR + 5))
3074 || (reg >= (REG_FR + 16) && reg <= (REG_FR + 31)))
3075 {
3076 *ab = 1;
3077 *regp = reg - REG_FR;
3078 }
3079 else if (reg >= (REG_BR + 1) && reg <= (REG_BR + 5))
3080 {
3081 *ab = 2;
3082 *regp = reg - REG_BR;
3083 }
3084 else
3085 {
3086 *ab = 3;
3087 switch (reg)
3088 {
3089 case REG_PR: *regp = 0; break;
3090 case REG_PSP: *regp = 1; break;
3091 case REG_PRIUNAT: *regp = 2; break;
3092 case REG_BR + 0: *regp = 3; break;
3093 case REG_AR + AR_BSP: *regp = 4; break;
3094 case REG_AR + AR_BSPSTORE: *regp = 5; break;
3095 case REG_AR + AR_RNAT: *regp = 6; break;
3096 case REG_AR + AR_UNAT: *regp = 7; break;
3097 case REG_AR + AR_FPSR: *regp = 8; break;
3098 case REG_AR + AR_PFS: *regp = 9; break;
3099 case REG_AR + AR_LC: *regp = 10; break;
3100
3101 default:
3102 as_bad (_("Operand %d to .%s must be a preserved register"), n, po);
3103 break;
3104 }
3105 }
3106 }
3107
3108 static void
3109 convert_expr_to_xy_reg (const expressionS *e,
3110 unsigned int *xy,
3111 unsigned int *regp,
3112 const char *po,
3113 int n)
3114 {
3115 unsigned int reg = e->X_add_number;
3116
3117 *xy = *regp = 0; /* Anything valid is good here. */
3118
3119 if (e->X_op != O_register)
3120 reg = REG_GR; /* Anything invalid is good here. */
3121
3122 if (reg >= (REG_GR + 1) && reg <= (REG_GR + 127))
3123 {
3124 *xy = 0;
3125 *regp = reg - REG_GR;
3126 }
3127 else if (reg >= (REG_FR + 2) && reg <= (REG_FR + 127))
3128 {
3129 *xy = 1;
3130 *regp = reg - REG_FR;
3131 }
3132 else if (reg >= REG_BR && reg <= (REG_BR + 7))
3133 {
3134 *xy = 2;
3135 *regp = reg - REG_BR;
3136 }
3137 else
3138 as_bad (_("Operand %d to .%s must be a writable register"), n, po);
3139 }
3140
3141 static void
3142 dot_align (int arg)
3143 {
3144 /* The current frag is an alignment frag. */
3145 align_frag = frag_now;
3146 s_align_bytes (arg);
3147 }
3148
3149 static void
3150 dot_radix (int dummy ATTRIBUTE_UNUSED)
3151 {
3152 char *radix;
3153 int ch;
3154
3155 SKIP_WHITESPACE ();
3156
3157 if (is_it_end_of_statement ())
3158 return;
3159 ch = get_symbol_name (&radix);
3160 ia64_canonicalize_symbol_name (radix);
3161 if (strcasecmp (radix, "C"))
3162 as_bad (_("Radix `%s' unsupported or invalid"), radix);
3163 (void) restore_line_pointer (ch);
3164 demand_empty_rest_of_line ();
3165 }
3166
3167 /* Helper function for .loc directives. If the assembler is not generating
3168 line number info, then we need to remember which instructions have a .loc
3169 directive, and only call dwarf2_gen_line_info for those instructions. */
3170
3171 static void
3172 dot_loc (int x)
3173 {
3174 CURR_SLOT.loc_directive_seen = 1;
3175 dwarf2_directive_loc (x);
3176 }
3177
3178 /* .sbss, .bss etc. are macros that expand into ".section SECNAME". */
3179 static void
3180 dot_special_section (int which)
3181 {
3182 set_section ((char *) special_section_name[which]);
3183 }
3184
3185 /* Return -1 for warning and 0 for error. */
3186
3187 static int
3188 unwind_diagnostic (const char * region, const char *directive)
3189 {
3190 if (md.unwind_check == unwind_check_warning)
3191 {
3192 as_warn (_(".%s outside of %s"), directive, region);
3193 return -1;
3194 }
3195 else
3196 {
3197 as_bad (_(".%s outside of %s"), directive, region);
3198 ignore_rest_of_line ();
3199 return 0;
3200 }
3201 }
3202
3203 /* Return 1 if a directive is in a procedure, -1 if a directive isn't in
3204 a procedure but the unwind directive check is set to warning, 0 if
3205 a directive isn't in a procedure and the unwind directive check is set
3206 to error. */
3207
3208 static int
3209 in_procedure (const char *directive)
3210 {
3211 if (unwind.proc_pending.sym
3212 && (!unwind.saved_text_seg || strcmp (directive, "endp") == 0))
3213 return 1;
3214 return unwind_diagnostic ("procedure", directive);
3215 }
3216
3217 /* Return 1 if a directive is in a prologue, -1 if a directive isn't in
3218 a prologue but the unwind directive check is set to warning, 0 if
3219 a directive isn't in a prologue and the unwind directive check is set
3220 to error. */
3221
3222 static int
3223 in_prologue (const char *directive)
3224 {
3225 int in = in_procedure (directive);
3226
3227 if (in > 0 && !unwind.prologue)
3228 in = unwind_diagnostic ("prologue", directive);
3229 check_pending_save ();
3230 return in;
3231 }
3232
3233 /* Return 1 if a directive is in a body, -1 if a directive isn't in
3234 a body but the unwind directive check is set to warning, 0 if
3235 a directive isn't in a body and the unwind directive check is set
3236 to error. */
3237
3238 static int
3239 in_body (const char *directive)
3240 {
3241 int in = in_procedure (directive);
3242
3243 if (in > 0 && !unwind.body)
3244 in = unwind_diagnostic ("body region", directive);
3245 return in;
3246 }
3247
3248 static void
3249 add_unwind_entry (unw_rec_list *ptr, int sep)
3250 {
3251 if (ptr)
3252 {
3253 if (unwind.tail)
3254 unwind.tail->next = ptr;
3255 else
3256 unwind.list = ptr;
3257 unwind.tail = ptr;
3258
3259 /* The current entry can in fact be a chain of unwind entries. */
3260 if (unwind.current_entry == NULL)
3261 unwind.current_entry = ptr;
3262 }
3263
3264 /* The current entry can in fact be a chain of unwind entries. */
3265 if (unwind.current_entry == NULL)
3266 unwind.current_entry = ptr;
3267
3268 if (sep == ',')
3269 {
3270 char *name;
3271 /* Parse a tag permitted for the current directive. */
3272 int ch;
3273
3274 SKIP_WHITESPACE ();
3275 ch = get_symbol_name (&name);
3276 /* FIXME: For now, just issue a warning that this isn't implemented. */
3277 {
3278 static int warned;
3279
3280 if (!warned)
3281 {
3282 warned = 1;
3283 as_warn (_("Tags on unwind pseudo-ops aren't supported, yet"));
3284 }
3285 }
3286 (void) restore_line_pointer (ch);
3287 }
3288 if (sep != NOT_A_CHAR)
3289 demand_empty_rest_of_line ();
3290 }
3291
3292 static void
3293 dot_fframe (int dummy ATTRIBUTE_UNUSED)
3294 {
3295 expressionS e;
3296 int sep;
3297
3298 if (!in_prologue ("fframe"))
3299 return;
3300
3301 sep = parse_operand_and_eval (&e, ',');
3302
3303 if (e.X_op != O_constant)
3304 {
3305 as_bad (_("First operand to .fframe must be a constant"));
3306 e.X_add_number = 0;
3307 }
3308 add_unwind_entry (output_mem_stack_f (e.X_add_number), sep);
3309 }
3310
3311 static void
3312 dot_vframe (int dummy ATTRIBUTE_UNUSED)
3313 {
3314 expressionS e;
3315 unsigned reg;
3316 int sep;
3317
3318 if (!in_prologue ("vframe"))
3319 return;
3320
3321 sep = parse_operand_and_eval (&e, ',');
3322 reg = e.X_add_number - REG_GR;
3323 if (e.X_op != O_register || reg > 127)
3324 {
3325 as_bad (_("First operand to .vframe must be a general register"));
3326 reg = 0;
3327 }
3328 add_unwind_entry (output_mem_stack_v (), sep);
3329 if (! (unwind.prologue_mask & 2))
3330 add_unwind_entry (output_psp_gr (reg), NOT_A_CHAR);
3331 else if (reg != unwind.prologue_gr
3332 + (unsigned) popcount (unwind.prologue_mask & -(2 << 1)))
3333 as_warn (_("Operand of .vframe contradicts .prologue"));
3334 }
3335
3336 static void
3337 dot_vframesp (int psp)
3338 {
3339 expressionS e;
3340 int sep;
3341
3342 if (psp)
3343 as_warn (_(".vframepsp is meaningless, assuming .vframesp was meant"));
3344
3345 if (!in_prologue ("vframesp"))
3346 return;
3347
3348 sep = parse_operand_and_eval (&e, ',');
3349 if (e.X_op != O_constant)
3350 {
3351 as_bad (_("Operand to .vframesp must be a constant (sp-relative offset)"));
3352 e.X_add_number = 0;
3353 }
3354 add_unwind_entry (output_mem_stack_v (), sep);
3355 add_unwind_entry (output_psp_sprel (e.X_add_number), NOT_A_CHAR);
3356 }
3357
3358 static void
3359 dot_save (int dummy ATTRIBUTE_UNUSED)
3360 {
3361 expressionS e1, e2;
3362 unsigned reg1, reg2;
3363 int sep;
3364
3365 if (!in_prologue ("save"))
3366 return;
3367
3368 sep = parse_operand_and_eval (&e1, ',');
3369 if (sep == ',')
3370 sep = parse_operand_and_eval (&e2, ',');
3371 else
3372 e2.X_op = O_absent;
3373
3374 reg1 = e1.X_add_number;
3375 /* Make sure its a valid ar.xxx reg, OR its br0, aka 'rp'. */
3376 if (e1.X_op != O_register)
3377 {
3378 as_bad (_("First operand to .save not a register"));
3379 reg1 = REG_PR; /* Anything valid is good here. */
3380 }
3381 reg2 = e2.X_add_number - REG_GR;
3382 if (e2.X_op != O_register || reg2 > 127)
3383 {
3384 as_bad (_("Second operand to .save not a valid register"));
3385 reg2 = 0;
3386 }
3387 switch (reg1)
3388 {
3389 case REG_AR + AR_BSP:
3390 add_unwind_entry (output_bsp_when (), sep);
3391 add_unwind_entry (output_bsp_gr (reg2), NOT_A_CHAR);
3392 break;
3393 case REG_AR + AR_BSPSTORE:
3394 add_unwind_entry (output_bspstore_when (), sep);
3395 add_unwind_entry (output_bspstore_gr (reg2), NOT_A_CHAR);
3396 break;
3397 case REG_AR + AR_RNAT:
3398 add_unwind_entry (output_rnat_when (), sep);
3399 add_unwind_entry (output_rnat_gr (reg2), NOT_A_CHAR);
3400 break;
3401 case REG_AR + AR_UNAT:
3402 add_unwind_entry (output_unat_when (), sep);
3403 add_unwind_entry (output_unat_gr (reg2), NOT_A_CHAR);
3404 break;
3405 case REG_AR + AR_FPSR:
3406 add_unwind_entry (output_fpsr_when (), sep);
3407 add_unwind_entry (output_fpsr_gr (reg2), NOT_A_CHAR);
3408 break;
3409 case REG_AR + AR_PFS:
3410 add_unwind_entry (output_pfs_when (), sep);
3411 if (! (unwind.prologue_mask & 4))
3412 add_unwind_entry (output_pfs_gr (reg2), NOT_A_CHAR);
3413 else if (reg2 != unwind.prologue_gr
3414 + (unsigned) popcount (unwind.prologue_mask & -(4 << 1)))
3415 as_warn (_("Second operand of .save contradicts .prologue"));
3416 break;
3417 case REG_AR + AR_LC:
3418 add_unwind_entry (output_lc_when (), sep);
3419 add_unwind_entry (output_lc_gr (reg2), NOT_A_CHAR);
3420 break;
3421 case REG_BR:
3422 add_unwind_entry (output_rp_when (), sep);
3423 if (! (unwind.prologue_mask & 8))
3424 add_unwind_entry (output_rp_gr (reg2), NOT_A_CHAR);
3425 else if (reg2 != unwind.prologue_gr)
3426 as_warn (_("Second operand of .save contradicts .prologue"));
3427 break;
3428 case REG_PR:
3429 add_unwind_entry (output_preds_when (), sep);
3430 if (! (unwind.prologue_mask & 1))
3431 add_unwind_entry (output_preds_gr (reg2), NOT_A_CHAR);
3432 else if (reg2 != unwind.prologue_gr
3433 + (unsigned) popcount (unwind.prologue_mask & -(1 << 1)))
3434 as_warn (_("Second operand of .save contradicts .prologue"));
3435 break;
3436 case REG_PRIUNAT:
3437 add_unwind_entry (output_priunat_when_gr (), sep);
3438 add_unwind_entry (output_priunat_gr (reg2), NOT_A_CHAR);
3439 break;
3440 default:
3441 as_bad (_("First operand to .save not a valid register"));
3442 add_unwind_entry (NULL, sep);
3443 break;
3444 }
3445 }
3446
3447 static void
3448 dot_restore (int dummy ATTRIBUTE_UNUSED)
3449 {
3450 expressionS e1;
3451 unsigned long ecount; /* # of _additional_ regions to pop */
3452 int sep;
3453
3454 if (!in_body ("restore"))
3455 return;
3456
3457 sep = parse_operand_and_eval (&e1, ',');
3458 if (e1.X_op != O_register || e1.X_add_number != REG_GR + 12)
3459 as_bad (_("First operand to .restore must be stack pointer (sp)"));
3460
3461 if (sep == ',')
3462 {
3463 expressionS e2;
3464
3465 sep = parse_operand_and_eval (&e2, ',');
3466 if (e2.X_op != O_constant || e2.X_add_number < 0)
3467 {
3468 as_bad (_("Second operand to .restore must be a constant >= 0"));
3469 e2.X_add_number = 0;
3470 }
3471 ecount = e2.X_add_number;
3472 }
3473 else
3474 ecount = unwind.prologue_count - 1;
3475
3476 if (ecount >= unwind.prologue_count)
3477 {
3478 as_bad (_("Epilogue count of %lu exceeds number of nested prologues (%u)"),
3479 ecount + 1, unwind.prologue_count);
3480 ecount = 0;
3481 }
3482
3483 add_unwind_entry (output_epilogue (ecount), sep);
3484
3485 if (ecount < unwind.prologue_count)
3486 unwind.prologue_count -= ecount + 1;
3487 else
3488 unwind.prologue_count = 0;
3489 }
3490
3491 static void
3492 dot_restorereg (int pred)
3493 {
3494 unsigned int qp, ab, reg;
3495 expressionS e;
3496 int sep;
3497 const char * const po = pred ? "restorereg.p" : "restorereg";
3498
3499 if (!in_procedure (po))
3500 return;
3501
3502 if (pred)
3503 sep = parse_predicate_and_operand (&e, &qp, po);
3504 else
3505 {
3506 sep = parse_operand_and_eval (&e, ',');
3507 qp = 0;
3508 }
3509 convert_expr_to_ab_reg (&e, &ab, &reg, po, 1 + pred);
3510
3511 add_unwind_entry (output_spill_reg (ab, reg, 0, 0, qp), sep);
3512 }
3513
3514 static const char *special_linkonce_name[] =
3515 {
3516 ".gnu.linkonce.ia64unw.", ".gnu.linkonce.ia64unwi."
3517 };
3518
3519 static void
3520 start_unwind_section (const segT text_seg, int sec_index)
3521 {
3522 /*
3523 Use a slightly ugly scheme to derive the unwind section names from
3524 the text section name:
3525
3526 text sect. unwind table sect.
3527 name: name: comments:
3528 ---------- ----------------- --------------------------------
3529 .text .IA_64.unwind
3530 .text.foo .IA_64.unwind.text.foo
3531 .foo .IA_64.unwind.foo
3532 .gnu.linkonce.t.foo
3533 .gnu.linkonce.ia64unw.foo
3534 _info .IA_64.unwind_info gas issues error message (ditto)
3535 _infoFOO .IA_64.unwind_infoFOO gas issues error message (ditto)
3536
3537 This mapping is done so that:
3538
3539 (a) An object file with unwind info only in .text will use
3540 unwind section names .IA_64.unwind and .IA_64.unwind_info.
3541 This follows the letter of the ABI and also ensures backwards
3542 compatibility with older toolchains.
3543
3544 (b) An object file with unwind info in multiple text sections
3545 will use separate unwind sections for each text section.
3546 This allows us to properly set the "sh_info" and "sh_link"
3547 fields in SHT_IA_64_UNWIND as required by the ABI and also
3548 lets GNU ld support programs with multiple segments
3549 containing unwind info (as might be the case for certain
3550 embedded applications).
3551
3552 (c) An error is issued if there would be a name clash.
3553 */
3554
3555 const char *text_name, *sec_text_name;
3556 char *sec_name;
3557 const char *prefix = special_section_name [sec_index];
3558 const char *suffix;
3559
3560 sec_text_name = segment_name (text_seg);
3561 text_name = sec_text_name;
3562 if (strncmp (text_name, "_info", 5) == 0)
3563 {
3564 as_bad (_("Illegal section name `%s' (causes unwind section name clash)"),
3565 text_name);
3566 ignore_rest_of_line ();
3567 return;
3568 }
3569 if (strcmp (text_name, ".text") == 0)
3570 text_name = "";
3571
3572 /* Build the unwind section name by appending the (possibly stripped)
3573 text section name to the unwind prefix. */
3574 suffix = text_name;
3575 if (strncmp (text_name, ".gnu.linkonce.t.",
3576 sizeof (".gnu.linkonce.t.") - 1) == 0)
3577 {
3578 prefix = special_linkonce_name [sec_index - SPECIAL_SECTION_UNWIND];
3579 suffix += sizeof (".gnu.linkonce.t.") - 1;
3580 }
3581
3582 sec_name = concat (prefix, suffix, NULL);
3583
3584 /* Handle COMDAT group. */
3585 if ((text_seg->flags & SEC_LINK_ONCE) != 0
3586 && (elf_section_flags (text_seg) & SHF_GROUP) != 0)
3587 {
3588 char *section;
3589 const char *group_name = elf_group_name (text_seg);
3590
3591 if (group_name == NULL)
3592 {
3593 as_bad (_("Group section `%s' has no group signature"),
3594 sec_text_name);
3595 ignore_rest_of_line ();
3596 free (sec_name);
3597 return;
3598 }
3599
3600 /* We have to construct a fake section directive. */
3601 section = concat (sec_name, ",\"aG\",@progbits,", group_name, ",comdat", NULL);
3602 set_section (section);
3603 free (section);
3604 }
3605 else
3606 {
3607 set_section (sec_name);
3608 bfd_set_section_flags (stdoutput, now_seg,
3609 SEC_LOAD | SEC_ALLOC | SEC_READONLY);
3610 }
3611
3612 elf_linked_to_section (now_seg) = text_seg;
3613 free (sec_name);
3614 }
3615
3616 static void
3617 generate_unwind_image (const segT text_seg)
3618 {
3619 int size, pad;
3620 unw_rec_list *list;
3621
3622 /* Mark the end of the unwind info, so that we can compute the size of the
3623 last unwind region. */
3624 add_unwind_entry (output_endp (), NOT_A_CHAR);
3625
3626 /* Force out pending instructions, to make sure all unwind records have
3627 a valid slot_number field. */
3628 ia64_flush_insns ();
3629
3630 /* Generate the unwind record. */
3631 list = optimize_unw_records (unwind.list);
3632 fixup_unw_records (list, 1);
3633 size = calc_record_size (list);
3634
3635 if (size > 0 || unwind.force_unwind_entry)
3636 {
3637 unwind.force_unwind_entry = 0;
3638 /* pad to pointer-size boundary. */
3639 pad = size % md.pointer_size;
3640 if (pad != 0)
3641 size += md.pointer_size - pad;
3642 /* Add 8 for the header. */
3643 size += 8;
3644 /* Add a pointer for the personality offset. */
3645 if (unwind.personality_routine)
3646 size += md.pointer_size;
3647 }
3648
3649 /* If there are unwind records, switch sections, and output the info. */
3650 if (size != 0)
3651 {
3652 expressionS exp;
3653 bfd_reloc_code_real_type reloc;
3654
3655 start_unwind_section (text_seg, SPECIAL_SECTION_UNWIND_INFO);
3656
3657 /* Make sure the section has 4 byte alignment for ILP32 and
3658 8 byte alignment for LP64. */
3659 frag_align (md.pointer_size_shift, 0, 0);
3660 record_alignment (now_seg, md.pointer_size_shift);
3661
3662 /* Set expression which points to start of unwind descriptor area. */
3663 unwind.info = expr_build_dot ();
3664
3665 frag_var (rs_machine_dependent, size, size, 0, 0,
3666 (offsetT) (long) unwind.personality_routine,
3667 (char *) list);
3668
3669 /* Add the personality address to the image. */
3670 if (unwind.personality_routine != 0)
3671 {
3672 exp.X_op = O_symbol;
3673 exp.X_add_symbol = unwind.personality_routine;
3674 exp.X_add_number = 0;
3675
3676 if (md.flags & EF_IA_64_BE)
3677 {
3678 if (md.flags & EF_IA_64_ABI64)
3679 reloc = BFD_RELOC_IA64_LTOFF_FPTR64MSB;
3680 else
3681 reloc = BFD_RELOC_IA64_LTOFF_FPTR32MSB;
3682 }
3683 else
3684 {
3685 if (md.flags & EF_IA_64_ABI64)
3686 reloc = BFD_RELOC_IA64_LTOFF_FPTR64LSB;
3687 else
3688 reloc = BFD_RELOC_IA64_LTOFF_FPTR32LSB;
3689 }
3690
3691 fix_new_exp (frag_now, frag_now_fix () - md.pointer_size,
3692 md.pointer_size, &exp, 0, reloc);
3693 unwind.personality_routine = 0;
3694 }
3695 }
3696
3697 free_saved_prologue_counts ();
3698 unwind.list = unwind.tail = unwind.current_entry = NULL;
3699 }
3700
3701 static void
3702 dot_handlerdata (int dummy ATTRIBUTE_UNUSED)
3703 {
3704 if (!in_procedure ("handlerdata"))
3705 return;
3706 unwind.force_unwind_entry = 1;
3707
3708 /* Remember which segment we're in so we can switch back after .endp */
3709 unwind.saved_text_seg = now_seg;
3710 unwind.saved_text_subseg = now_subseg;
3711
3712 /* Generate unwind info into unwind-info section and then leave that
3713 section as the currently active one so dataXX directives go into
3714 the language specific data area of the unwind info block. */
3715 generate_unwind_image (now_seg);
3716 demand_empty_rest_of_line ();
3717 }
3718
3719 static void
3720 dot_unwentry (int dummy ATTRIBUTE_UNUSED)
3721 {
3722 if (!in_procedure ("unwentry"))
3723 return;
3724 unwind.force_unwind_entry = 1;
3725 demand_empty_rest_of_line ();
3726 }
3727
3728 static void
3729 dot_altrp (int dummy ATTRIBUTE_UNUSED)
3730 {
3731 expressionS e;
3732 unsigned reg;
3733
3734 if (!in_prologue ("altrp"))
3735 return;
3736
3737 parse_operand_and_eval (&e, 0);
3738 reg = e.X_add_number - REG_BR;
3739 if (e.X_op != O_register || reg > 7)
3740 {
3741 as_bad (_("First operand to .altrp not a valid branch register"));
3742 reg = 0;
3743 }
3744 add_unwind_entry (output_rp_br (reg), 0);
3745 }
3746
3747 static void
3748 dot_savemem (int psprel)
3749 {
3750 expressionS e1, e2;
3751 int sep;
3752 int reg1, val;
3753 const char * const po = psprel ? "savepsp" : "savesp";
3754
3755 if (!in_prologue (po))
3756 return;
3757
3758 sep = parse_operand_and_eval (&e1, ',');
3759 if (sep == ',')
3760 sep = parse_operand_and_eval (&e2, ',');
3761 else
3762 e2.X_op = O_absent;
3763
3764 reg1 = e1.X_add_number;
3765 val = e2.X_add_number;
3766
3767 /* Make sure its a valid ar.xxx reg, OR its br0, aka 'rp'. */
3768 if (e1.X_op != O_register)
3769 {
3770 as_bad (_("First operand to .%s not a register"), po);
3771 reg1 = REG_PR; /* Anything valid is good here. */
3772 }
3773 if (e2.X_op != O_constant)
3774 {
3775 as_bad (_("Second operand to .%s not a constant"), po);
3776 val = 0;
3777 }
3778
3779 switch (reg1)
3780 {
3781 case REG_AR + AR_BSP:
3782 add_unwind_entry (output_bsp_when (), sep);
3783 add_unwind_entry ((psprel
3784 ? output_bsp_psprel
3785 : output_bsp_sprel) (val), NOT_A_CHAR);
3786 break;
3787 case REG_AR + AR_BSPSTORE:
3788 add_unwind_entry (output_bspstore_when (), sep);
3789 add_unwind_entry ((psprel
3790 ? output_bspstore_psprel
3791 : output_bspstore_sprel) (val), NOT_A_CHAR);
3792 break;
3793 case REG_AR + AR_RNAT:
3794 add_unwind_entry (output_rnat_when (), sep);
3795 add_unwind_entry ((psprel
3796 ? output_rnat_psprel
3797 : output_rnat_sprel) (val), NOT_A_CHAR);
3798 break;
3799 case REG_AR + AR_UNAT:
3800 add_unwind_entry (output_unat_when (), sep);
3801 add_unwind_entry ((psprel
3802 ? output_unat_psprel
3803 : output_unat_sprel) (val), NOT_A_CHAR);
3804 break;
3805 case REG_AR + AR_FPSR:
3806 add_unwind_entry (output_fpsr_when (), sep);
3807 add_unwind_entry ((psprel
3808 ? output_fpsr_psprel
3809 : output_fpsr_sprel) (val), NOT_A_CHAR);
3810 break;
3811 case REG_AR + AR_PFS:
3812 add_unwind_entry (output_pfs_when (), sep);
3813 add_unwind_entry ((psprel
3814 ? output_pfs_psprel
3815 : output_pfs_sprel) (val), NOT_A_CHAR);
3816 break;
3817 case REG_AR + AR_LC:
3818 add_unwind_entry (output_lc_when (), sep);
3819 add_unwind_entry ((psprel
3820 ? output_lc_psprel
3821 : output_lc_sprel) (val), NOT_A_CHAR);
3822 break;
3823 case REG_BR:
3824 add_unwind_entry (output_rp_when (), sep);
3825 add_unwind_entry ((psprel
3826 ? output_rp_psprel
3827 : output_rp_sprel) (val), NOT_A_CHAR);
3828 break;
3829 case REG_PR:
3830 add_unwind_entry (output_preds_when (), sep);
3831 add_unwind_entry ((psprel
3832 ? output_preds_psprel
3833 : output_preds_sprel) (val), NOT_A_CHAR);
3834 break;
3835 case REG_PRIUNAT:
3836 add_unwind_entry (output_priunat_when_mem (), sep);
3837 add_unwind_entry ((psprel
3838 ? output_priunat_psprel
3839 : output_priunat_sprel) (val), NOT_A_CHAR);
3840 break;
3841 default:
3842 as_bad (_("First operand to .%s not a valid register"), po);
3843 add_unwind_entry (NULL, sep);
3844 break;
3845 }
3846 }
3847
3848 static void
3849 dot_saveg (int dummy ATTRIBUTE_UNUSED)
3850 {
3851 expressionS e;
3852 unsigned grmask;
3853 int sep;
3854
3855 if (!in_prologue ("save.g"))
3856 return;
3857
3858 sep = parse_operand_and_eval (&e, ',');
3859
3860 grmask = e.X_add_number;
3861 if (e.X_op != O_constant
3862 || e.X_add_number <= 0
3863 || e.X_add_number > 0xf)
3864 {
3865 as_bad (_("First operand to .save.g must be a positive 4-bit constant"));
3866 grmask = 0;
3867 }
3868
3869 if (sep == ',')
3870 {
3871 unsigned reg;
3872 int n = popcount (grmask);
3873
3874 parse_operand_and_eval (&e, 0);
3875 reg = e.X_add_number - REG_GR;
3876 if (e.X_op != O_register || reg > 127)
3877 {
3878 as_bad (_("Second operand to .save.g must be a general register"));
3879 reg = 0;
3880 }
3881 else if (reg > 128U - n)
3882 {
3883 as_bad (_("Second operand to .save.g must be the first of %d general registers"), n);
3884 reg = 0;
3885 }
3886 add_unwind_entry (output_gr_gr (grmask, reg), 0);
3887 }
3888 else
3889 add_unwind_entry (output_gr_mem (grmask), 0);
3890 }
3891
3892 static void
3893 dot_savef (int dummy ATTRIBUTE_UNUSED)
3894 {
3895 expressionS e;
3896
3897 if (!in_prologue ("save.f"))
3898 return;
3899
3900 parse_operand_and_eval (&e, 0);
3901
3902 if (e.X_op != O_constant
3903 || e.X_add_number <= 0
3904 || e.X_add_number > 0xfffff)
3905 {
3906 as_bad (_("Operand to .save.f must be a positive 20-bit constant"));
3907 e.X_add_number = 0;
3908 }
3909 add_unwind_entry (output_fr_mem (e.X_add_number), 0);
3910 }
3911
3912 static void
3913 dot_saveb (int dummy ATTRIBUTE_UNUSED)
3914 {
3915 expressionS e;
3916 unsigned brmask;
3917 int sep;
3918
3919 if (!in_prologue ("save.b"))
3920 return;
3921
3922 sep = parse_operand_and_eval (&e, ',');
3923
3924 brmask = e.X_add_number;
3925 if (e.X_op != O_constant
3926 || e.X_add_number <= 0
3927 || e.X_add_number > 0x1f)
3928 {
3929 as_bad (_("First operand to .save.b must be a positive 5-bit constant"));
3930 brmask = 0;
3931 }
3932
3933 if (sep == ',')
3934 {
3935 unsigned reg;
3936 int n = popcount (brmask);
3937
3938 parse_operand_and_eval (&e, 0);
3939 reg = e.X_add_number - REG_GR;
3940 if (e.X_op != O_register || reg > 127)
3941 {
3942 as_bad (_("Second operand to .save.b must be a general register"));
3943 reg = 0;
3944 }
3945 else if (reg > 128U - n)
3946 {
3947 as_bad (_("Second operand to .save.b must be the first of %d general registers"), n);
3948 reg = 0;
3949 }
3950 add_unwind_entry (output_br_gr (brmask, reg), 0);
3951 }
3952 else
3953 add_unwind_entry (output_br_mem (brmask), 0);
3954 }
3955
3956 static void
3957 dot_savegf (int dummy ATTRIBUTE_UNUSED)
3958 {
3959 expressionS e1, e2;
3960
3961 if (!in_prologue ("save.gf"))
3962 return;
3963
3964 if (parse_operand_and_eval (&e1, ',') == ',')
3965 parse_operand_and_eval (&e2, 0);
3966 else
3967 e2.X_op = O_absent;
3968
3969 if (e1.X_op != O_constant
3970 || e1.X_add_number < 0
3971 || e1.X_add_number > 0xf)
3972 {
3973 as_bad (_("First operand to .save.gf must be a non-negative 4-bit constant"));
3974 e1.X_op = O_absent;
3975 e1.X_add_number = 0;
3976 }
3977 if (e2.X_op != O_constant
3978 || e2.X_add_number < 0
3979 || e2.X_add_number > 0xfffff)
3980 {
3981 as_bad (_("Second operand to .save.gf must be a non-negative 20-bit constant"));
3982 e2.X_op = O_absent;
3983 e2.X_add_number = 0;
3984 }
3985 if (e1.X_op == O_constant
3986 && e2.X_op == O_constant
3987 && e1.X_add_number == 0
3988 && e2.X_add_number == 0)
3989 as_bad (_("Operands to .save.gf may not be both zero"));
3990
3991 add_unwind_entry (output_frgr_mem (e1.X_add_number, e2.X_add_number), 0);
3992 }
3993
3994 static void
3995 dot_spill (int dummy ATTRIBUTE_UNUSED)
3996 {
3997 expressionS e;
3998
3999 if (!in_prologue ("spill"))
4000 return;
4001
4002 parse_operand_and_eval (&e, 0);
4003
4004 if (e.X_op != O_constant)
4005 {
4006 as_bad (_("Operand to .spill must be a constant"));
4007 e.X_add_number = 0;
4008 }
4009 add_unwind_entry (output_spill_base (e.X_add_number), 0);
4010 }
4011
4012 static void
4013 dot_spillreg (int pred)
4014 {
4015 int sep;
4016 unsigned int qp, ab, xy, reg, treg;
4017 expressionS e;
4018 const char * const po = pred ? "spillreg.p" : "spillreg";
4019
4020 if (!in_procedure (po))
4021 return;
4022
4023 if (pred)
4024 sep = parse_predicate_and_operand (&e, &qp, po);
4025 else
4026 {
4027 sep = parse_operand_and_eval (&e, ',');
4028 qp = 0;
4029 }
4030 convert_expr_to_ab_reg (&e, &ab, &reg, po, 1 + pred);
4031
4032 if (sep == ',')
4033 sep = parse_operand_and_eval (&e, ',');
4034 else
4035 e.X_op = O_absent;
4036 convert_expr_to_xy_reg (&e, &xy, &treg, po, 2 + pred);
4037
4038 add_unwind_entry (output_spill_reg (ab, reg, treg, xy, qp), sep);
4039 }
4040
4041 static void
4042 dot_spillmem (int psprel)
4043 {
4044 expressionS e;
4045 int pred = (psprel < 0), sep;
4046 unsigned int qp, ab, reg;
4047 const char * po;
4048
4049 if (pred)
4050 {
4051 psprel = ~psprel;
4052 po = psprel ? "spillpsp.p" : "spillsp.p";
4053 }
4054 else
4055 po = psprel ? "spillpsp" : "spillsp";
4056
4057 if (!in_procedure (po))
4058 return;
4059
4060 if (pred)
4061 sep = parse_predicate_and_operand (&e, &qp, po);
4062 else
4063 {
4064 sep = parse_operand_and_eval (&e, ',');
4065 qp = 0;
4066 }
4067 convert_expr_to_ab_reg (&e, &ab, &reg, po, 1 + pred);
4068
4069 if (sep == ',')
4070 sep = parse_operand_and_eval (&e, ',');
4071 else
4072 e.X_op = O_absent;
4073 if (e.X_op != O_constant)
4074 {
4075 as_bad (_("Operand %d to .%s must be a constant"), 2 + pred, po);
4076 e.X_add_number = 0;
4077 }
4078
4079 if (psprel)
4080 add_unwind_entry (output_spill_psprel (ab, reg, e.X_add_number, qp), sep);
4081 else
4082 add_unwind_entry (output_spill_sprel (ab, reg, e.X_add_number, qp), sep);
4083 }
4084
4085 static unsigned int
4086 get_saved_prologue_count (unsigned long lbl)
4087 {
4088 label_prologue_count *lpc = unwind.saved_prologue_counts;
4089
4090 while (lpc != NULL && lpc->label_number != lbl)
4091 lpc = lpc->next;
4092
4093 if (lpc != NULL)
4094 return lpc->prologue_count;
4095
4096 as_bad (_("Missing .label_state %ld"), lbl);
4097 return 1;
4098 }
4099
4100 static void
4101 save_prologue_count (unsigned long lbl, unsigned int count)
4102 {
4103 label_prologue_count *lpc = unwind.saved_prologue_counts;
4104
4105 while (lpc != NULL && lpc->label_number != lbl)
4106 lpc = lpc->next;
4107
4108 if (lpc != NULL)
4109 lpc->prologue_count = count;
4110 else
4111 {
4112 label_prologue_count *new_lpc = XNEW (label_prologue_count);
4113
4114 new_lpc->next = unwind.saved_prologue_counts;
4115 new_lpc->label_number = lbl;
4116 new_lpc->prologue_count = count;
4117 unwind.saved_prologue_counts = new_lpc;
4118 }
4119 }
4120
4121 static void
4122 free_saved_prologue_counts (void)
4123 {
4124 label_prologue_count *lpc = unwind.saved_prologue_counts;
4125 label_prologue_count *next;
4126
4127 while (lpc != NULL)
4128 {
4129 next = lpc->next;
4130 free (lpc);
4131 lpc = next;
4132 }
4133
4134 unwind.saved_prologue_counts = NULL;
4135 }
4136
4137 static void
4138 dot_label_state (int dummy ATTRIBUTE_UNUSED)
4139 {
4140 expressionS e;
4141
4142 if (!in_body ("label_state"))
4143 return;
4144
4145 parse_operand_and_eval (&e, 0);
4146 if (e.X_op == O_constant)
4147 save_prologue_count (e.X_add_number, unwind.prologue_count);
4148 else
4149 {
4150 as_bad (_("Operand to .label_state must be a constant"));
4151 e.X_add_number = 0;
4152 }
4153 add_unwind_entry (output_label_state (e.X_add_number), 0);
4154 }
4155
4156 static void
4157 dot_copy_state (int dummy ATTRIBUTE_UNUSED)
4158 {
4159 expressionS e;
4160
4161 if (!in_body ("copy_state"))
4162 return;
4163
4164 parse_operand_and_eval (&e, 0);
4165 if (e.X_op == O_constant)
4166 unwind.prologue_count = get_saved_prologue_count (e.X_add_number);
4167 else
4168 {
4169 as_bad (_("Operand to .copy_state must be a constant"));
4170 e.X_add_number = 0;
4171 }
4172 add_unwind_entry (output_copy_state (e.X_add_number), 0);
4173 }
4174
4175 static void
4176 dot_unwabi (int dummy ATTRIBUTE_UNUSED)
4177 {
4178 expressionS e1, e2;
4179 unsigned char sep;
4180
4181 if (!in_prologue ("unwabi"))
4182 return;
4183
4184 sep = parse_operand_and_eval (&e1, ',');
4185 if (sep == ',')
4186 parse_operand_and_eval (&e2, 0);
4187 else
4188 e2.X_op = O_absent;
4189
4190 if (e1.X_op != O_constant)
4191 {
4192 as_bad (_("First operand to .unwabi must be a constant"));
4193 e1.X_add_number = 0;
4194 }
4195
4196 if (e2.X_op != O_constant)
4197 {
4198 as_bad (_("Second operand to .unwabi must be a constant"));
4199 e2.X_add_number = 0;
4200 }
4201
4202 add_unwind_entry (output_unwabi (e1.X_add_number, e2.X_add_number), 0);
4203 }
4204
4205 static void
4206 dot_personality (int dummy ATTRIBUTE_UNUSED)
4207 {
4208 char *name, *p, c;
4209
4210 if (!in_procedure ("personality"))
4211 return;
4212 SKIP_WHITESPACE ();
4213 c = get_symbol_name (&name);
4214 p = input_line_pointer;
4215 unwind.personality_routine = symbol_find_or_make (name);
4216 unwind.force_unwind_entry = 1;
4217 *p = c;
4218 SKIP_WHITESPACE_AFTER_NAME ();
4219 demand_empty_rest_of_line ();
4220 }
4221
4222 static void
4223 dot_proc (int dummy ATTRIBUTE_UNUSED)
4224 {
4225 char *name, *p, c;
4226 symbolS *sym;
4227 proc_pending *pending, *last_pending;
4228
4229 if (unwind.proc_pending.sym)
4230 {
4231 (md.unwind_check == unwind_check_warning
4232 ? as_warn
4233 : as_bad) (_("Missing .endp after previous .proc"));
4234 while (unwind.proc_pending.next)
4235 {
4236 pending = unwind.proc_pending.next;
4237 unwind.proc_pending.next = pending->next;
4238 free (pending);
4239 }
4240 }
4241 last_pending = NULL;
4242
4243 /* Parse names of main and alternate entry points and mark them as
4244 function symbols: */
4245 while (1)
4246 {
4247 SKIP_WHITESPACE ();
4248 c = get_symbol_name (&name);
4249 p = input_line_pointer;
4250 if (!*name)
4251 as_bad (_("Empty argument of .proc"));
4252 else
4253 {
4254 sym = symbol_find_or_make (name);
4255 if (S_IS_DEFINED (sym))
4256 as_bad (_("`%s' was already defined"), name);
4257 else if (!last_pending)
4258 {
4259 unwind.proc_pending.sym = sym;
4260 last_pending = &unwind.proc_pending;
4261 }
4262 else
4263 {
4264 pending = XNEW (proc_pending);
4265 pending->sym = sym;
4266 last_pending = last_pending->next = pending;
4267 }
4268 symbol_get_bfdsym (sym)->flags |= BSF_FUNCTION;
4269 }
4270 *p = c;
4271 SKIP_WHITESPACE_AFTER_NAME ();
4272 if (*input_line_pointer != ',')
4273 break;
4274 ++input_line_pointer;
4275 }
4276 if (!last_pending)
4277 {
4278 unwind.proc_pending.sym = expr_build_dot ();
4279 last_pending = &unwind.proc_pending;
4280 }
4281 last_pending->next = NULL;
4282 demand_empty_rest_of_line ();
4283 do_align (4, NULL, 0, 0);
4284
4285 unwind.prologue = 0;
4286 unwind.prologue_count = 0;
4287 unwind.body = 0;
4288 unwind.insn = 0;
4289 unwind.list = unwind.tail = unwind.current_entry = NULL;
4290 unwind.personality_routine = 0;
4291 }
4292
4293 static void
4294 dot_body (int dummy ATTRIBUTE_UNUSED)
4295 {
4296 if (!in_procedure ("body"))
4297 return;
4298 if (!unwind.prologue && !unwind.body && unwind.insn)
4299 as_warn (_("Initial .body should precede any instructions"));
4300 check_pending_save ();
4301
4302 unwind.prologue = 0;
4303 unwind.prologue_mask = 0;
4304 unwind.body = 1;
4305
4306 add_unwind_entry (output_body (), 0);
4307 }
4308
4309 static void
4310 dot_prologue (int dummy ATTRIBUTE_UNUSED)
4311 {
4312 unsigned mask = 0, grsave = 0;
4313
4314 if (!in_procedure ("prologue"))
4315 return;
4316 if (unwind.prologue)
4317 {
4318 as_bad (_(".prologue within prologue"));
4319 ignore_rest_of_line ();
4320 return;
4321 }
4322 if (!unwind.body && unwind.insn)
4323 as_warn (_("Initial .prologue should precede any instructions"));
4324
4325 if (!is_it_end_of_statement ())
4326 {
4327 expressionS e;
4328 int n, sep = parse_operand_and_eval (&e, ',');
4329
4330 if (e.X_op != O_constant
4331 || e.X_add_number < 0
4332 || e.X_add_number > 0xf)
4333 as_bad (_("First operand to .prologue must be a positive 4-bit constant"));
4334 else if (e.X_add_number == 0)
4335 as_warn (_("Pointless use of zero first operand to .prologue"));
4336 else
4337 mask = e.X_add_number;
4338
4339 n = popcount (mask);
4340
4341 if (sep == ',')
4342 parse_operand_and_eval (&e, 0);
4343 else
4344 e.X_op = O_absent;
4345
4346 if (e.X_op == O_constant
4347 && e.X_add_number >= 0
4348 && e.X_add_number < 128)
4349 {
4350 if (md.unwind_check == unwind_check_error)
4351 as_warn (_("Using a constant as second operand to .prologue is deprecated"));
4352 grsave = e.X_add_number;
4353 }
4354 else if (e.X_op != O_register
4355 || (grsave = e.X_add_number - REG_GR) > 127)
4356 {
4357 as_bad (_("Second operand to .prologue must be a general register"));
4358 grsave = 0;
4359 }
4360 else if (grsave > 128U - n)
4361 {
4362 as_bad (_("Second operand to .prologue must be the first of %d general registers"), n);
4363 grsave = 0;
4364 }
4365 }
4366
4367 if (mask)
4368 add_unwind_entry (output_prologue_gr (mask, grsave), 0);
4369 else
4370 add_unwind_entry (output_prologue (), 0);
4371
4372 unwind.prologue = 1;
4373 unwind.prologue_mask = mask;
4374 unwind.prologue_gr = grsave;
4375 unwind.body = 0;
4376 ++unwind.prologue_count;
4377 }
4378
4379 static void
4380 dot_endp (int dummy ATTRIBUTE_UNUSED)
4381 {
4382 expressionS e;
4383 int bytes_per_address;
4384 long where;
4385 segT saved_seg;
4386 subsegT saved_subseg;
4387 proc_pending *pending;
4388 int unwind_check = md.unwind_check;
4389
4390 md.unwind_check = unwind_check_error;
4391 if (!in_procedure ("endp"))
4392 return;
4393 md.unwind_check = unwind_check;
4394
4395 if (unwind.saved_text_seg)
4396 {
4397 saved_seg = unwind.saved_text_seg;
4398 saved_subseg = unwind.saved_text_subseg;
4399 unwind.saved_text_seg = NULL;
4400 }
4401 else
4402 {
4403 saved_seg = now_seg;
4404 saved_subseg = now_subseg;
4405 }
4406
4407 insn_group_break (1, 0, 0);
4408
4409 /* If there wasn't a .handlerdata, we haven't generated an image yet. */
4410 if (!unwind.info)
4411 generate_unwind_image (saved_seg);
4412
4413 if (unwind.info || unwind.force_unwind_entry)
4414 {
4415 symbolS *proc_end;
4416
4417 subseg_set (md.last_text_seg, 0);
4418 proc_end = expr_build_dot ();
4419
4420 start_unwind_section (saved_seg, SPECIAL_SECTION_UNWIND);
4421
4422 /* Make sure that section has 4 byte alignment for ILP32 and
4423 8 byte alignment for LP64. */
4424 record_alignment (now_seg, md.pointer_size_shift);
4425
4426 /* Need space for 3 pointers for procedure start, procedure end,
4427 and unwind info. */
4428 memset (frag_more (3 * md.pointer_size), 0, 3 * md.pointer_size);
4429 where = frag_now_fix () - (3 * md.pointer_size);
4430 bytes_per_address = bfd_arch_bits_per_address (stdoutput) / 8;
4431
4432 /* Issue the values of a) Proc Begin, b) Proc End, c) Unwind Record. */
4433 e.X_op = O_pseudo_fixup;
4434 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4435 e.X_add_number = 0;
4436 if (!S_IS_LOCAL (unwind.proc_pending.sym)
4437 && S_IS_DEFINED (unwind.proc_pending.sym))
4438 e.X_add_symbol = symbol_temp_new (S_GET_SEGMENT (unwind.proc_pending.sym),
4439 S_GET_VALUE (unwind.proc_pending.sym),
4440 symbol_get_frag (unwind.proc_pending.sym));
4441 else
4442 e.X_add_symbol = unwind.proc_pending.sym;
4443 ia64_cons_fix_new (frag_now, where, bytes_per_address, &e,
4444 BFD_RELOC_NONE);
4445
4446 e.X_op = O_pseudo_fixup;
4447 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4448 e.X_add_number = 0;
4449 e.X_add_symbol = proc_end;
4450 ia64_cons_fix_new (frag_now, where + bytes_per_address,
4451 bytes_per_address, &e, BFD_RELOC_NONE);
4452
4453 if (unwind.info)
4454 {
4455 e.X_op = O_pseudo_fixup;
4456 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4457 e.X_add_number = 0;
4458 e.X_add_symbol = unwind.info;
4459 ia64_cons_fix_new (frag_now, where + (bytes_per_address * 2),
4460 bytes_per_address, &e, BFD_RELOC_NONE);
4461 }
4462 }
4463 subseg_set (saved_seg, saved_subseg);
4464
4465 /* Set symbol sizes. */
4466 pending = &unwind.proc_pending;
4467 if (S_GET_NAME (pending->sym))
4468 {
4469 do
4470 {
4471 symbolS *sym = pending->sym;
4472
4473 if (!S_IS_DEFINED (sym))
4474 as_bad (_("`%s' was not defined within procedure"), S_GET_NAME (sym));
4475 else if (S_GET_SIZE (sym) == 0
4476 && symbol_get_obj (sym)->size == NULL)
4477 {
4478 fragS *frag = symbol_get_frag (sym);
4479
4480 if (frag)
4481 {
4482 if (frag == frag_now && SEG_NORMAL (now_seg))
4483 S_SET_SIZE (sym, frag_now_fix () - S_GET_VALUE (sym));
4484 else
4485 {
4486 symbol_get_obj (sym)->size = XNEW (expressionS);
4487 symbol_get_obj (sym)->size->X_op = O_subtract;
4488 symbol_get_obj (sym)->size->X_add_symbol
4489 = symbol_new (FAKE_LABEL_NAME, now_seg,
4490 frag_now_fix (), frag_now);
4491 symbol_get_obj (sym)->size->X_op_symbol = sym;
4492 symbol_get_obj (sym)->size->X_add_number = 0;
4493 }
4494 }
4495 }
4496 } while ((pending = pending->next) != NULL);
4497 }
4498
4499 /* Parse names of main and alternate entry points. */
4500 while (1)
4501 {
4502 char *name, *p, c;
4503
4504 SKIP_WHITESPACE ();
4505 c = get_symbol_name (&name);
4506 p = input_line_pointer;
4507 if (!*name)
4508 (md.unwind_check == unwind_check_warning
4509 ? as_warn
4510 : as_bad) (_("Empty argument of .endp"));
4511 else
4512 {
4513 symbolS *sym = symbol_find (name);
4514
4515 for (pending = &unwind.proc_pending; pending; pending = pending->next)
4516 {
4517 if (sym == pending->sym)
4518 {
4519 pending->sym = NULL;
4520 break;
4521 }
4522 }
4523 if (!sym || !pending)
4524 as_warn (_("`%s' was not specified with previous .proc"), name);
4525 }
4526 *p = c;
4527 SKIP_WHITESPACE_AFTER_NAME ();
4528 if (*input_line_pointer != ',')
4529 break;
4530 ++input_line_pointer;
4531 }
4532 demand_empty_rest_of_line ();
4533
4534 /* Deliberately only checking for the main entry point here; the
4535 language spec even says all arguments to .endp are ignored. */
4536 if (unwind.proc_pending.sym
4537 && S_GET_NAME (unwind.proc_pending.sym)
4538 && strcmp (S_GET_NAME (unwind.proc_pending.sym), FAKE_LABEL_NAME))
4539 as_warn (_("`%s' should be an operand to this .endp"),
4540 S_GET_NAME (unwind.proc_pending.sym));
4541 while (unwind.proc_pending.next)
4542 {
4543 pending = unwind.proc_pending.next;
4544 unwind.proc_pending.next = pending->next;
4545 free (pending);
4546 }
4547 unwind.proc_pending.sym = unwind.info = NULL;
4548 }
4549
4550 static void
4551 dot_template (int template_val)
4552 {
4553 CURR_SLOT.user_template = template_val;
4554 }
4555
4556 static void
4557 dot_regstk (int dummy ATTRIBUTE_UNUSED)
4558 {
4559 int ins, locs, outs, rots;
4560
4561 if (is_it_end_of_statement ())
4562 ins = locs = outs = rots = 0;
4563 else
4564 {
4565 ins = get_absolute_expression ();
4566 if (*input_line_pointer++ != ',')
4567 goto err;
4568 locs = get_absolute_expression ();
4569 if (*input_line_pointer++ != ',')
4570 goto err;
4571 outs = get_absolute_expression ();
4572 if (*input_line_pointer++ != ',')
4573 goto err;
4574 rots = get_absolute_expression ();
4575 }
4576 set_regstack (ins, locs, outs, rots);
4577 return;
4578
4579 err:
4580 as_bad (_("Comma expected"));
4581 ignore_rest_of_line ();
4582 }
4583
4584 static void
4585 dot_rot (int type)
4586 {
4587 offsetT num_regs;
4588 valueT num_alloced = 0;
4589 struct dynreg **drpp, *dr;
4590 int ch, base_reg = 0;
4591 char *name, *start;
4592 size_t len;
4593
4594 switch (type)
4595 {
4596 case DYNREG_GR: base_reg = REG_GR + 32; break;
4597 case DYNREG_FR: base_reg = REG_FR + 32; break;
4598 case DYNREG_PR: base_reg = REG_P + 16; break;
4599 default: break;
4600 }
4601
4602 /* First, remove existing names from hash table. */
4603 for (dr = md.dynreg[type]; dr && dr->num_regs; dr = dr->next)
4604 {
4605 hash_delete (md.dynreg_hash, dr->name, FALSE);
4606 /* FIXME: Free dr->name. */
4607 dr->num_regs = 0;
4608 }
4609
4610 drpp = &md.dynreg[type];
4611 while (1)
4612 {
4613 ch = get_symbol_name (&start);
4614 len = strlen (ia64_canonicalize_symbol_name (start));
4615 *input_line_pointer = ch;
4616
4617 SKIP_WHITESPACE_AFTER_NAME ();
4618 if (*input_line_pointer != '[')
4619 {
4620 as_bad (_("Expected '['"));
4621 goto err;
4622 }
4623 ++input_line_pointer; /* skip '[' */
4624
4625 num_regs = get_absolute_expression ();
4626
4627 if (*input_line_pointer++ != ']')
4628 {
4629 as_bad (_("Expected ']'"));
4630 goto err;
4631 }
4632 if (num_regs <= 0)
4633 {
4634 as_bad (_("Number of elements must be positive"));
4635 goto err;
4636 }
4637 SKIP_WHITESPACE ();
4638
4639 num_alloced += num_regs;
4640 switch (type)
4641 {
4642 case DYNREG_GR:
4643 if (num_alloced > md.rot.num_regs)
4644 {
4645 as_bad (_("Used more than the declared %d rotating registers"),
4646 md.rot.num_regs);
4647 goto err;
4648 }
4649 break;
4650 case DYNREG_FR:
4651 if (num_alloced > 96)
4652 {
4653 as_bad (_("Used more than the available 96 rotating registers"));
4654 goto err;
4655 }
4656 break;
4657 case DYNREG_PR:
4658 if (num_alloced > 48)
4659 {
4660 as_bad (_("Used more than the available 48 rotating registers"));
4661 goto err;
4662 }
4663 break;
4664
4665 default:
4666 break;
4667 }
4668
4669 if (!*drpp)
4670 {
4671 *drpp = XOBNEW (&notes, struct dynreg);
4672 memset (*drpp, 0, sizeof (*dr));
4673 }
4674
4675 name = XOBNEWVEC (&notes, char, len + 1);
4676 memcpy (name, start, len);
4677 name[len] = '\0';
4678
4679 dr = *drpp;
4680 dr->name = name;
4681 dr->num_regs = num_regs;
4682 dr->base = base_reg;
4683 drpp = &dr->next;
4684 base_reg += num_regs;
4685
4686 if (hash_insert (md.dynreg_hash, name, dr))
4687 {
4688 as_bad (_("Attempt to redefine register set `%s'"), name);
4689 obstack_free (&notes, name);
4690 goto err;
4691 }
4692
4693 if (*input_line_pointer != ',')
4694 break;
4695 ++input_line_pointer; /* skip comma */
4696 SKIP_WHITESPACE ();
4697 }
4698 demand_empty_rest_of_line ();
4699 return;
4700
4701 err:
4702 ignore_rest_of_line ();
4703 }
4704
4705 static void
4706 dot_byteorder (int byteorder)
4707 {
4708 segment_info_type *seginfo = seg_info (now_seg);
4709
4710 if (byteorder == -1)
4711 {
4712 if (seginfo->tc_segment_info_data.endian == 0)
4713 seginfo->tc_segment_info_data.endian = default_big_endian ? 1 : 2;
4714 byteorder = seginfo->tc_segment_info_data.endian == 1;
4715 }
4716 else
4717 seginfo->tc_segment_info_data.endian = byteorder ? 1 : 2;
4718
4719 if (target_big_endian != byteorder)
4720 {
4721 target_big_endian = byteorder;
4722 if (target_big_endian)
4723 {
4724 ia64_number_to_chars = number_to_chars_bigendian;
4725 ia64_float_to_chars = ia64_float_to_chars_bigendian;
4726 }
4727 else
4728 {
4729 ia64_number_to_chars = number_to_chars_littleendian;
4730 ia64_float_to_chars = ia64_float_to_chars_littleendian;
4731 }
4732 }
4733 }
4734
4735 static void
4736 dot_psr (int dummy ATTRIBUTE_UNUSED)
4737 {
4738 char *option;
4739 int ch;
4740
4741 while (1)
4742 {
4743 ch = get_symbol_name (&option);
4744 if (strcmp (option, "lsb") == 0)
4745 md.flags &= ~EF_IA_64_BE;
4746 else if (strcmp (option, "msb") == 0)
4747 md.flags |= EF_IA_64_BE;
4748 else if (strcmp (option, "abi32") == 0)
4749 md.flags &= ~EF_IA_64_ABI64;
4750 else if (strcmp (option, "abi64") == 0)
4751 md.flags |= EF_IA_64_ABI64;
4752 else
4753 as_bad (_("Unknown psr option `%s'"), option);
4754 *input_line_pointer = ch;
4755
4756 SKIP_WHITESPACE_AFTER_NAME ();
4757 if (*input_line_pointer != ',')
4758 break;
4759
4760 ++input_line_pointer;
4761 SKIP_WHITESPACE ();
4762 }
4763 demand_empty_rest_of_line ();
4764 }
4765
4766 static void
4767 dot_ln (int dummy ATTRIBUTE_UNUSED)
4768 {
4769 new_logical_line (0, get_absolute_expression ());
4770 demand_empty_rest_of_line ();
4771 }
4772
4773 static void
4774 cross_section (int ref, void (*builder) (int), int ua)
4775 {
4776 char *start, *end;
4777 int saved_auto_align;
4778 unsigned int section_count;
4779 char *name;
4780 char c;
4781
4782 SKIP_WHITESPACE ();
4783 start = input_line_pointer;
4784 c = get_symbol_name (&name);
4785 if (input_line_pointer == start)
4786 {
4787 as_bad (_("Missing section name"));
4788 ignore_rest_of_line ();
4789 return;
4790 }
4791 * input_line_pointer = c;
4792 SKIP_WHITESPACE_AFTER_NAME ();
4793 end = input_line_pointer;
4794 if (*input_line_pointer != ',')
4795 {
4796 as_bad (_("Comma expected after section name"));
4797 ignore_rest_of_line ();
4798 return;
4799 }
4800 *end = '\0';
4801 end = input_line_pointer + 1; /* skip comma */
4802 input_line_pointer = start;
4803 md.keep_pending_output = 1;
4804 section_count = bfd_count_sections (stdoutput);
4805 obj_elf_section (0);
4806 if (section_count != bfd_count_sections (stdoutput))
4807 as_warn (_("Creating sections with .xdataN/.xrealN/.xstringZ is deprecated."));
4808 input_line_pointer = end;
4809 saved_auto_align = md.auto_align;
4810 if (ua)
4811 md.auto_align = 0;
4812 (*builder) (ref);
4813 if (ua)
4814 md.auto_align = saved_auto_align;
4815 obj_elf_previous (0);
4816 md.keep_pending_output = 0;
4817 }
4818
4819 static void
4820 dot_xdata (int size)
4821 {
4822 cross_section (size, cons, 0);
4823 }
4824
4825 /* Why doesn't float_cons() call md_cons_align() the way cons() does? */
4826
4827 static void
4828 stmt_float_cons (int kind)
4829 {
4830 size_t alignment;
4831
4832 switch (kind)
4833 {
4834 case 'd':
4835 alignment = 3;
4836 break;
4837
4838 case 'x':
4839 case 'X':
4840 alignment = 4;
4841 break;
4842
4843 case 'f':
4844 default:
4845 alignment = 2;
4846 break;
4847 }
4848 do_align (alignment, NULL, 0, 0);
4849 float_cons (kind);
4850 }
4851
4852 static void
4853 stmt_cons_ua (int size)
4854 {
4855 int saved_auto_align = md.auto_align;
4856
4857 md.auto_align = 0;
4858 cons (size);
4859 md.auto_align = saved_auto_align;
4860 }
4861
4862 static void
4863 dot_xfloat_cons (int kind)
4864 {
4865 cross_section (kind, stmt_float_cons, 0);
4866 }
4867
4868 static void
4869 dot_xstringer (int zero)
4870 {
4871 cross_section (zero, stringer, 0);
4872 }
4873
4874 static void
4875 dot_xdata_ua (int size)
4876 {
4877 cross_section (size, cons, 1);
4878 }
4879
4880 static void
4881 dot_xfloat_cons_ua (int kind)
4882 {
4883 cross_section (kind, float_cons, 1);
4884 }
4885
4886 /* .reg.val <regname>,value */
4887
4888 static void
4889 dot_reg_val (int dummy ATTRIBUTE_UNUSED)
4890 {
4891 expressionS reg;
4892
4893 expression_and_evaluate (&reg);
4894 if (reg.X_op != O_register)
4895 {
4896 as_bad (_("Register name expected"));
4897 ignore_rest_of_line ();
4898 }
4899 else if (*input_line_pointer++ != ',')
4900 {
4901 as_bad (_("Comma expected"));
4902 ignore_rest_of_line ();
4903 }
4904 else
4905 {
4906 valueT value = get_absolute_expression ();
4907 int regno = reg.X_add_number;
4908 if (regno <= REG_GR || regno > REG_GR + 127)
4909 as_warn (_("Register value annotation ignored"));
4910 else
4911 {
4912 gr_values[regno - REG_GR].known = 1;
4913 gr_values[regno - REG_GR].value = value;
4914 gr_values[regno - REG_GR].path = md.path;
4915 }
4916 }
4917 demand_empty_rest_of_line ();
4918 }
4919
4920 /*
4921 .serialize.data
4922 .serialize.instruction
4923 */
4924 static void
4925 dot_serialize (int type)
4926 {
4927 insn_group_break (0, 0, 0);
4928 if (type)
4929 instruction_serialization ();
4930 else
4931 data_serialization ();
4932 insn_group_break (0, 0, 0);
4933 demand_empty_rest_of_line ();
4934 }
4935
4936 /* select dv checking mode
4937 .auto
4938 .explicit
4939 .default
4940
4941 A stop is inserted when changing modes
4942 */
4943
4944 static void
4945 dot_dv_mode (int type)
4946 {
4947 if (md.manual_bundling)
4948 as_warn (_("Directive invalid within a bundle"));
4949
4950 if (type == 'E' || type == 'A')
4951 md.mode_explicitly_set = 0;
4952 else
4953 md.mode_explicitly_set = 1;
4954
4955 md.detect_dv = 1;
4956 switch (type)
4957 {
4958 case 'A':
4959 case 'a':
4960 if (md.explicit_mode)
4961 insn_group_break (1, 0, 0);
4962 md.explicit_mode = 0;
4963 break;
4964 case 'E':
4965 case 'e':
4966 if (!md.explicit_mode)
4967 insn_group_break (1, 0, 0);
4968 md.explicit_mode = 1;
4969 break;
4970 default:
4971 case 'd':
4972 if (md.explicit_mode != md.default_explicit_mode)
4973 insn_group_break (1, 0, 0);
4974 md.explicit_mode = md.default_explicit_mode;
4975 md.mode_explicitly_set = 0;
4976 break;
4977 }
4978 }
4979
4980 static void
4981 print_prmask (valueT mask)
4982 {
4983 int regno;
4984 const char *comma = "";
4985 for (regno = 0; regno < 64; regno++)
4986 {
4987 if (mask & ((valueT) 1 << regno))
4988 {
4989 fprintf (stderr, "%s p%d", comma, regno);
4990 comma = ",";
4991 }
4992 }
4993 }
4994
4995 /*
4996 .pred.rel.clear [p1 [,p2 [,...]]] (also .pred.rel "clear" or @clear)
4997 .pred.rel.imply p1, p2 (also .pred.rel "imply" or @imply)
4998 .pred.rel.mutex p1, p2 [,...] (also .pred.rel "mutex" or @mutex)
4999 .pred.safe_across_calls p1 [, p2 [,...]]
5000 */
5001
5002 static void
5003 dot_pred_rel (int type)
5004 {
5005 valueT mask = 0;
5006 int count = 0;
5007 int p1 = -1, p2 = -1;
5008
5009 if (type == 0)
5010 {
5011 if (*input_line_pointer == '"')
5012 {
5013 int len;
5014 char *form = demand_copy_C_string (&len);
5015
5016 if (strcmp (form, "mutex") == 0)
5017 type = 'm';
5018 else if (strcmp (form, "clear") == 0)
5019 type = 'c';
5020 else if (strcmp (form, "imply") == 0)
5021 type = 'i';
5022 obstack_free (&notes, form);
5023 }
5024 else if (*input_line_pointer == '@')
5025 {
5026 char *form;
5027 char c;
5028
5029 ++input_line_pointer;
5030 c = get_symbol_name (&form);
5031
5032 if (strcmp (form, "mutex") == 0)
5033 type = 'm';
5034 else if (strcmp (form, "clear") == 0)
5035 type = 'c';
5036 else if (strcmp (form, "imply") == 0)
5037 type = 'i';
5038 (void) restore_line_pointer (c);
5039 }
5040 else
5041 {
5042 as_bad (_("Missing predicate relation type"));
5043 ignore_rest_of_line ();
5044 return;
5045 }
5046 if (type == 0)
5047 {
5048 as_bad (_("Unrecognized predicate relation type"));
5049 ignore_rest_of_line ();
5050 return;
5051 }
5052 if (*input_line_pointer == ',')
5053 ++input_line_pointer;
5054 SKIP_WHITESPACE ();
5055 }
5056
5057 while (1)
5058 {
5059 valueT bits = 1;
5060 int sep, regno;
5061 expressionS pr, *pr1, *pr2;
5062
5063 sep = parse_operand_and_eval (&pr, ',');
5064 if (pr.X_op == O_register
5065 && pr.X_add_number >= REG_P
5066 && pr.X_add_number <= REG_P + 63)
5067 {
5068 regno = pr.X_add_number - REG_P;
5069 bits <<= regno;
5070 count++;
5071 if (p1 == -1)
5072 p1 = regno;
5073 else if (p2 == -1)
5074 p2 = regno;
5075 }
5076 else if (type != 'i'
5077 && pr.X_op == O_subtract
5078 && (pr1 = symbol_get_value_expression (pr.X_add_symbol))
5079 && pr1->X_op == O_register
5080 && pr1->X_add_number >= REG_P
5081 && pr1->X_add_number <= REG_P + 63
5082 && (pr2 = symbol_get_value_expression (pr.X_op_symbol))
5083 && pr2->X_op == O_register
5084 && pr2->X_add_number >= REG_P
5085 && pr2->X_add_number <= REG_P + 63)
5086 {
5087 /* It's a range. */
5088 int stop;
5089
5090 regno = pr1->X_add_number - REG_P;
5091 stop = pr2->X_add_number - REG_P;
5092 if (regno >= stop)
5093 {
5094 as_bad (_("Bad register range"));
5095 ignore_rest_of_line ();
5096 return;
5097 }
5098 bits = ((bits << stop) << 1) - (bits << regno);
5099 count += stop - regno + 1;
5100 }
5101 else
5102 {
5103 as_bad (_("Predicate register expected"));
5104 ignore_rest_of_line ();
5105 return;
5106 }
5107 if (mask & bits)
5108 as_warn (_("Duplicate predicate register ignored"));
5109 mask |= bits;
5110 if (sep != ',')
5111 break;
5112 }
5113
5114 switch (type)
5115 {
5116 case 'c':
5117 if (count == 0)
5118 mask = ~(valueT) 0;
5119 clear_qp_mutex (mask);
5120 clear_qp_implies (mask, (valueT) 0);
5121 break;
5122 case 'i':
5123 if (count != 2 || p1 == -1 || p2 == -1)
5124 as_bad (_("Predicate source and target required"));
5125 else if (p1 == 0 || p2 == 0)
5126 as_bad (_("Use of p0 is not valid in this context"));
5127 else
5128 add_qp_imply (p1, p2);
5129 break;
5130 case 'm':
5131 if (count < 2)
5132 {
5133 as_bad (_("At least two PR arguments expected"));
5134 break;
5135 }
5136 else if (mask & 1)
5137 {
5138 as_bad (_("Use of p0 is not valid in this context"));
5139 break;
5140 }
5141 add_qp_mutex (mask);
5142 break;
5143 case 's':
5144 /* note that we don't override any existing relations */
5145 if (count == 0)
5146 {
5147 as_bad (_("At least one PR argument expected"));
5148 break;
5149 }
5150 if (md.debug_dv)
5151 {
5152 fprintf (stderr, "Safe across calls: ");
5153 print_prmask (mask);
5154 fprintf (stderr, "\n");
5155 }
5156 qp_safe_across_calls = mask;
5157 break;
5158 }
5159 demand_empty_rest_of_line ();
5160 }
5161
5162 /* .entry label [, label [, ...]]
5163 Hint to DV code that the given labels are to be considered entry points.
5164 Otherwise, only global labels are considered entry points. */
5165
5166 static void
5167 dot_entry (int dummy ATTRIBUTE_UNUSED)
5168 {
5169 const char *err;
5170 char *name;
5171 int c;
5172 symbolS *symbolP;
5173
5174 do
5175 {
5176 c = get_symbol_name (&name);
5177 symbolP = symbol_find_or_make (name);
5178
5179 err = hash_insert (md.entry_hash, S_GET_NAME (symbolP), (void *) symbolP);
5180 if (err)
5181 as_fatal (_("Inserting \"%s\" into entry hint table failed: %s"),
5182 name, err);
5183
5184 *input_line_pointer = c;
5185 SKIP_WHITESPACE_AFTER_NAME ();
5186 c = *input_line_pointer;
5187 if (c == ',')
5188 {
5189 input_line_pointer++;
5190 SKIP_WHITESPACE ();
5191 if (*input_line_pointer == '\n')
5192 c = '\n';
5193 }
5194 }
5195 while (c == ',');
5196
5197 demand_empty_rest_of_line ();
5198 }
5199
5200 /* .mem.offset offset, base
5201 "base" is used to distinguish between offsets from a different base. */
5202
5203 static void
5204 dot_mem_offset (int dummy ATTRIBUTE_UNUSED)
5205 {
5206 md.mem_offset.hint = 1;
5207 md.mem_offset.offset = get_absolute_expression ();
5208 if (*input_line_pointer != ',')
5209 {
5210 as_bad (_("Comma expected"));
5211 ignore_rest_of_line ();
5212 return;
5213 }
5214 ++input_line_pointer;
5215 md.mem_offset.base = get_absolute_expression ();
5216 demand_empty_rest_of_line ();
5217 }
5218
5219 /* ia64-specific pseudo-ops: */
5220 const pseudo_typeS md_pseudo_table[] =
5221 {
5222 { "radix", dot_radix, 0 },
5223 { "lcomm", s_lcomm_bytes, 1 },
5224 { "loc", dot_loc, 0 },
5225 { "bss", dot_special_section, SPECIAL_SECTION_BSS },
5226 { "sbss", dot_special_section, SPECIAL_SECTION_SBSS },
5227 { "sdata", dot_special_section, SPECIAL_SECTION_SDATA },
5228 { "rodata", dot_special_section, SPECIAL_SECTION_RODATA },
5229 { "comment", dot_special_section, SPECIAL_SECTION_COMMENT },
5230 { "ia_64.unwind", dot_special_section, SPECIAL_SECTION_UNWIND },
5231 { "ia_64.unwind_info", dot_special_section, SPECIAL_SECTION_UNWIND_INFO },
5232 { "init_array", dot_special_section, SPECIAL_SECTION_INIT_ARRAY },
5233 { "fini_array", dot_special_section, SPECIAL_SECTION_FINI_ARRAY },
5234 { "proc", dot_proc, 0 },
5235 { "body", dot_body, 0 },
5236 { "prologue", dot_prologue, 0 },
5237 { "endp", dot_endp, 0 },
5238
5239 { "fframe", dot_fframe, 0 },
5240 { "vframe", dot_vframe, 0 },
5241 { "vframesp", dot_vframesp, 0 },
5242 { "vframepsp", dot_vframesp, 1 },
5243 { "save", dot_save, 0 },
5244 { "restore", dot_restore, 0 },
5245 { "restorereg", dot_restorereg, 0 },
5246 { "restorereg.p", dot_restorereg, 1 },
5247 { "handlerdata", dot_handlerdata, 0 },
5248 { "unwentry", dot_unwentry, 0 },
5249 { "altrp", dot_altrp, 0 },
5250 { "savesp", dot_savemem, 0 },
5251 { "savepsp", dot_savemem, 1 },
5252 { "save.g", dot_saveg, 0 },
5253 { "save.f", dot_savef, 0 },
5254 { "save.b", dot_saveb, 0 },
5255 { "save.gf", dot_savegf, 0 },
5256 { "spill", dot_spill, 0 },
5257 { "spillreg", dot_spillreg, 0 },
5258 { "spillsp", dot_spillmem, 0 },
5259 { "spillpsp", dot_spillmem, 1 },
5260 { "spillreg.p", dot_spillreg, 1 },
5261 { "spillsp.p", dot_spillmem, ~0 },
5262 { "spillpsp.p", dot_spillmem, ~1 },
5263 { "label_state", dot_label_state, 0 },
5264 { "copy_state", dot_copy_state, 0 },
5265 { "unwabi", dot_unwabi, 0 },
5266 { "personality", dot_personality, 0 },
5267 { "mii", dot_template, 0x0 },
5268 { "mli", dot_template, 0x2 }, /* old format, for compatibility */
5269 { "mlx", dot_template, 0x2 },
5270 { "mmi", dot_template, 0x4 },
5271 { "mfi", dot_template, 0x6 },
5272 { "mmf", dot_template, 0x7 },
5273 { "mib", dot_template, 0x8 },
5274 { "mbb", dot_template, 0x9 },
5275 { "bbb", dot_template, 0xb },
5276 { "mmb", dot_template, 0xc },
5277 { "mfb", dot_template, 0xe },
5278 { "align", dot_align, 0 },
5279 { "regstk", dot_regstk, 0 },
5280 { "rotr", dot_rot, DYNREG_GR },
5281 { "rotf", dot_rot, DYNREG_FR },
5282 { "rotp", dot_rot, DYNREG_PR },
5283 { "lsb", dot_byteorder, 0 },
5284 { "msb", dot_byteorder, 1 },
5285 { "psr", dot_psr, 0 },
5286 { "alias", dot_alias, 0 },
5287 { "secalias", dot_alias, 1 },
5288 { "ln", dot_ln, 0 }, /* source line info (for debugging) */
5289
5290 { "xdata1", dot_xdata, 1 },
5291 { "xdata2", dot_xdata, 2 },
5292 { "xdata4", dot_xdata, 4 },
5293 { "xdata8", dot_xdata, 8 },
5294 { "xdata16", dot_xdata, 16 },
5295 { "xreal4", dot_xfloat_cons, 'f' },
5296 { "xreal8", dot_xfloat_cons, 'd' },
5297 { "xreal10", dot_xfloat_cons, 'x' },
5298 { "xreal16", dot_xfloat_cons, 'X' },
5299 { "xstring", dot_xstringer, 8 + 0 },
5300 { "xstringz", dot_xstringer, 8 + 1 },
5301
5302 /* unaligned versions: */
5303 { "xdata2.ua", dot_xdata_ua, 2 },
5304 { "xdata4.ua", dot_xdata_ua, 4 },
5305 { "xdata8.ua", dot_xdata_ua, 8 },
5306 { "xdata16.ua", dot_xdata_ua, 16 },
5307 { "xreal4.ua", dot_xfloat_cons_ua, 'f' },
5308 { "xreal8.ua", dot_xfloat_cons_ua, 'd' },
5309 { "xreal10.ua", dot_xfloat_cons_ua, 'x' },
5310 { "xreal16.ua", dot_xfloat_cons_ua, 'X' },
5311
5312 /* annotations/DV checking support */
5313 { "entry", dot_entry, 0 },
5314 { "mem.offset", dot_mem_offset, 0 },
5315 { "pred.rel", dot_pred_rel, 0 },
5316 { "pred.rel.clear", dot_pred_rel, 'c' },
5317 { "pred.rel.imply", dot_pred_rel, 'i' },
5318 { "pred.rel.mutex", dot_pred_rel, 'm' },
5319 { "pred.safe_across_calls", dot_pred_rel, 's' },
5320 { "reg.val", dot_reg_val, 0 },
5321 { "serialize.data", dot_serialize, 0 },
5322 { "serialize.instruction", dot_serialize, 1 },
5323 { "auto", dot_dv_mode, 'a' },
5324 { "explicit", dot_dv_mode, 'e' },
5325 { "default", dot_dv_mode, 'd' },
5326
5327 /* ??? These are needed to make gas/testsuite/gas/elf/ehopt.s work.
5328 IA-64 aligns data allocation pseudo-ops by default, so we have to
5329 tell it that these ones are supposed to be unaligned. Long term,
5330 should rewrite so that only IA-64 specific data allocation pseudo-ops
5331 are aligned by default. */
5332 {"2byte", stmt_cons_ua, 2},
5333 {"4byte", stmt_cons_ua, 4},
5334 {"8byte", stmt_cons_ua, 8},
5335
5336 #ifdef TE_VMS
5337 {"vms_common", obj_elf_vms_common, 0},
5338 #endif
5339
5340 { NULL, 0, 0 }
5341 };
5342
5343 static const struct pseudo_opcode
5344 {
5345 const char *name;
5346 void (*handler) (int);
5347 int arg;
5348 }
5349 pseudo_opcode[] =
5350 {
5351 /* these are more like pseudo-ops, but don't start with a dot */
5352 { "data1", cons, 1 },
5353 { "data2", cons, 2 },
5354 { "data4", cons, 4 },
5355 { "data8", cons, 8 },
5356 { "data16", cons, 16 },
5357 { "real4", stmt_float_cons, 'f' },
5358 { "real8", stmt_float_cons, 'd' },
5359 { "real10", stmt_float_cons, 'x' },
5360 { "real16", stmt_float_cons, 'X' },
5361 { "string", stringer, 8 + 0 },
5362 { "stringz", stringer, 8 + 1 },
5363
5364 /* unaligned versions: */
5365 { "data2.ua", stmt_cons_ua, 2 },
5366 { "data4.ua", stmt_cons_ua, 4 },
5367 { "data8.ua", stmt_cons_ua, 8 },
5368 { "data16.ua", stmt_cons_ua, 16 },
5369 { "real4.ua", float_cons, 'f' },
5370 { "real8.ua", float_cons, 'd' },
5371 { "real10.ua", float_cons, 'x' },
5372 { "real16.ua", float_cons, 'X' },
5373 };
5374
5375 /* Declare a register by creating a symbol for it and entering it in
5376 the symbol table. */
5377
5378 static symbolS *
5379 declare_register (const char *name, unsigned int regnum)
5380 {
5381 const char *err;
5382 symbolS *sym;
5383
5384 sym = symbol_create (name, reg_section, regnum, &zero_address_frag);
5385
5386 err = hash_insert (md.reg_hash, S_GET_NAME (sym), (void *) sym);
5387 if (err)
5388 as_fatal ("Inserting \"%s\" into register table failed: %s",
5389 name, err);
5390
5391 return sym;
5392 }
5393
5394 static void
5395 declare_register_set (const char *prefix,
5396 unsigned int num_regs,
5397 unsigned int base_regnum)
5398 {
5399 char name[8];
5400 unsigned int i;
5401
5402 for (i = 0; i < num_regs; ++i)
5403 {
5404 snprintf (name, sizeof (name), "%s%u", prefix, i);
5405 declare_register (name, base_regnum + i);
5406 }
5407 }
5408
5409 static unsigned int
5410 operand_width (enum ia64_opnd opnd)
5411 {
5412 const struct ia64_operand *odesc = &elf64_ia64_operands[opnd];
5413 unsigned int bits = 0;
5414 int i;
5415
5416 bits = 0;
5417 for (i = 0; i < NELEMS (odesc->field) && odesc->field[i].bits; ++i)
5418 bits += odesc->field[i].bits;
5419
5420 return bits;
5421 }
5422
5423 static enum operand_match_result
5424 operand_match (const struct ia64_opcode *idesc, int res_index, expressionS *e)
5425 {
5426 enum ia64_opnd opnd = idesc->operands[res_index];
5427 int bits, relocatable = 0;
5428 struct insn_fix *fix;
5429 bfd_signed_vma val;
5430
5431 switch (opnd)
5432 {
5433 /* constants: */
5434
5435 case IA64_OPND_AR_CCV:
5436 if (e->X_op == O_register && e->X_add_number == REG_AR + 32)
5437 return OPERAND_MATCH;
5438 break;
5439
5440 case IA64_OPND_AR_CSD:
5441 if (e->X_op == O_register && e->X_add_number == REG_AR + 25)
5442 return OPERAND_MATCH;
5443 break;
5444
5445 case IA64_OPND_AR_PFS:
5446 if (e->X_op == O_register && e->X_add_number == REG_AR + 64)
5447 return OPERAND_MATCH;
5448 break;
5449
5450 case IA64_OPND_GR0:
5451 if (e->X_op == O_register && e->X_add_number == REG_GR + 0)
5452 return OPERAND_MATCH;
5453 break;
5454
5455 case IA64_OPND_IP:
5456 if (e->X_op == O_register && e->X_add_number == REG_IP)
5457 return OPERAND_MATCH;
5458 break;
5459
5460 case IA64_OPND_PR:
5461 if (e->X_op == O_register && e->X_add_number == REG_PR)
5462 return OPERAND_MATCH;
5463 break;
5464
5465 case IA64_OPND_PR_ROT:
5466 if (e->X_op == O_register && e->X_add_number == REG_PR_ROT)
5467 return OPERAND_MATCH;
5468 break;
5469
5470 case IA64_OPND_PSR:
5471 if (e->X_op == O_register && e->X_add_number == REG_PSR)
5472 return OPERAND_MATCH;
5473 break;
5474
5475 case IA64_OPND_PSR_L:
5476 if (e->X_op == O_register && e->X_add_number == REG_PSR_L)
5477 return OPERAND_MATCH;
5478 break;
5479
5480 case IA64_OPND_PSR_UM:
5481 if (e->X_op == O_register && e->X_add_number == REG_PSR_UM)
5482 return OPERAND_MATCH;
5483 break;
5484
5485 case IA64_OPND_C1:
5486 if (e->X_op == O_constant)
5487 {
5488 if (e->X_add_number == 1)
5489 return OPERAND_MATCH;
5490 else
5491 return OPERAND_OUT_OF_RANGE;
5492 }
5493 break;
5494
5495 case IA64_OPND_C8:
5496 if (e->X_op == O_constant)
5497 {
5498 if (e->X_add_number == 8)
5499 return OPERAND_MATCH;
5500 else
5501 return OPERAND_OUT_OF_RANGE;
5502 }
5503 break;
5504
5505 case IA64_OPND_C16:
5506 if (e->X_op == O_constant)
5507 {
5508 if (e->X_add_number == 16)
5509 return OPERAND_MATCH;
5510 else
5511 return OPERAND_OUT_OF_RANGE;
5512 }
5513 break;
5514
5515 /* register operands: */
5516
5517 case IA64_OPND_AR3:
5518 if (e->X_op == O_register && e->X_add_number >= REG_AR
5519 && e->X_add_number < REG_AR + 128)
5520 return OPERAND_MATCH;
5521 break;
5522
5523 case IA64_OPND_B1:
5524 case IA64_OPND_B2:
5525 if (e->X_op == O_register && e->X_add_number >= REG_BR
5526 && e->X_add_number < REG_BR + 8)
5527 return OPERAND_MATCH;
5528 break;
5529
5530 case IA64_OPND_CR3:
5531 if (e->X_op == O_register && e->X_add_number >= REG_CR
5532 && e->X_add_number < REG_CR + 128)
5533 return OPERAND_MATCH;
5534 break;
5535
5536 case IA64_OPND_DAHR3:
5537 if (e->X_op == O_register && e->X_add_number >= REG_DAHR
5538 && e->X_add_number < REG_DAHR + 8)
5539 return OPERAND_MATCH;
5540 break;
5541
5542 case IA64_OPND_F1:
5543 case IA64_OPND_F2:
5544 case IA64_OPND_F3:
5545 case IA64_OPND_F4:
5546 if (e->X_op == O_register && e->X_add_number >= REG_FR
5547 && e->X_add_number < REG_FR + 128)
5548 return OPERAND_MATCH;
5549 break;
5550
5551 case IA64_OPND_P1:
5552 case IA64_OPND_P2:
5553 if (e->X_op == O_register && e->X_add_number >= REG_P
5554 && e->X_add_number < REG_P + 64)
5555 return OPERAND_MATCH;
5556 break;
5557
5558 case IA64_OPND_R1:
5559 case IA64_OPND_R2:
5560 case IA64_OPND_R3:
5561 if (e->X_op == O_register && e->X_add_number >= REG_GR
5562 && e->X_add_number < REG_GR + 128)
5563 return OPERAND_MATCH;
5564 break;
5565
5566 case IA64_OPND_R3_2:
5567 if (e->X_op == O_register && e->X_add_number >= REG_GR)
5568 {
5569 if (e->X_add_number < REG_GR + 4)
5570 return OPERAND_MATCH;
5571 else if (e->X_add_number < REG_GR + 128)
5572 return OPERAND_OUT_OF_RANGE;
5573 }
5574 break;
5575
5576 /* indirect operands: */
5577 case IA64_OPND_CPUID_R3:
5578 case IA64_OPND_DBR_R3:
5579 case IA64_OPND_DTR_R3:
5580 case IA64_OPND_ITR_R3:
5581 case IA64_OPND_IBR_R3:
5582 case IA64_OPND_MSR_R3:
5583 case IA64_OPND_PKR_R3:
5584 case IA64_OPND_PMC_R3:
5585 case IA64_OPND_PMD_R3:
5586 case IA64_OPND_DAHR_R3:
5587 case IA64_OPND_RR_R3:
5588 if (e->X_op == O_index && e->X_op_symbol
5589 && (S_GET_VALUE (e->X_op_symbol) - IND_CPUID
5590 == opnd - IA64_OPND_CPUID_R3))
5591 return OPERAND_MATCH;
5592 break;
5593
5594 case IA64_OPND_MR3:
5595 if (e->X_op == O_index && !e->X_op_symbol)
5596 return OPERAND_MATCH;
5597 break;
5598
5599 /* immediate operands: */
5600 case IA64_OPND_CNT2a:
5601 case IA64_OPND_LEN4:
5602 case IA64_OPND_LEN6:
5603 bits = operand_width (idesc->operands[res_index]);
5604 if (e->X_op == O_constant)
5605 {
5606 if ((bfd_vma) (e->X_add_number - 1) < ((bfd_vma) 1 << bits))
5607 return OPERAND_MATCH;
5608 else
5609 return OPERAND_OUT_OF_RANGE;
5610 }
5611 break;
5612
5613 case IA64_OPND_CNT2b:
5614 if (e->X_op == O_constant)
5615 {
5616 if ((bfd_vma) (e->X_add_number - 1) < 3)
5617 return OPERAND_MATCH;
5618 else
5619 return OPERAND_OUT_OF_RANGE;
5620 }
5621 break;
5622
5623 case IA64_OPND_CNT2c:
5624 val = e->X_add_number;
5625 if (e->X_op == O_constant)
5626 {
5627 if ((val == 0 || val == 7 || val == 15 || val == 16))
5628 return OPERAND_MATCH;
5629 else
5630 return OPERAND_OUT_OF_RANGE;
5631 }
5632 break;
5633
5634 case IA64_OPND_SOR:
5635 /* SOR must be an integer multiple of 8 */
5636 if (e->X_op == O_constant && e->X_add_number & 0x7)
5637 return OPERAND_OUT_OF_RANGE;
5638 case IA64_OPND_SOF:
5639 case IA64_OPND_SOL:
5640 if (e->X_op == O_constant)
5641 {
5642 if ((bfd_vma) e->X_add_number <= 96)
5643 return OPERAND_MATCH;
5644 else
5645 return OPERAND_OUT_OF_RANGE;
5646 }
5647 break;
5648
5649 case IA64_OPND_IMMU62:
5650 if (e->X_op == O_constant)
5651 {
5652 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << 62))
5653 return OPERAND_MATCH;
5654 else
5655 return OPERAND_OUT_OF_RANGE;
5656 }
5657 else
5658 {
5659 /* FIXME -- need 62-bit relocation type */
5660 as_bad (_("62-bit relocation not yet implemented"));
5661 }
5662 break;
5663
5664 case IA64_OPND_IMMU64:
5665 if (e->X_op == O_symbol || e->X_op == O_pseudo_fixup
5666 || e->X_op == O_subtract)
5667 {
5668 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5669 fix->code = BFD_RELOC_IA64_IMM64;
5670 if (e->X_op != O_subtract)
5671 {
5672 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5673 if (e->X_op == O_pseudo_fixup)
5674 e->X_op = O_symbol;
5675 }
5676
5677 fix->opnd = idesc->operands[res_index];
5678 fix->expr = *e;
5679 fix->is_pcrel = 0;
5680 ++CURR_SLOT.num_fixups;
5681 return OPERAND_MATCH;
5682 }
5683 else if (e->X_op == O_constant)
5684 return OPERAND_MATCH;
5685 break;
5686
5687 case IA64_OPND_IMMU5b:
5688 if (e->X_op == O_constant)
5689 {
5690 val = e->X_add_number;
5691 if (val >= 32 && val <= 63)
5692 return OPERAND_MATCH;
5693 else
5694 return OPERAND_OUT_OF_RANGE;
5695 }
5696 break;
5697
5698 case IA64_OPND_CCNT5:
5699 case IA64_OPND_CNT5:
5700 case IA64_OPND_CNT6:
5701 case IA64_OPND_CPOS6a:
5702 case IA64_OPND_CPOS6b:
5703 case IA64_OPND_CPOS6c:
5704 case IA64_OPND_IMMU2:
5705 case IA64_OPND_IMMU7a:
5706 case IA64_OPND_IMMU7b:
5707 case IA64_OPND_IMMU16:
5708 case IA64_OPND_IMMU19:
5709 case IA64_OPND_IMMU21:
5710 case IA64_OPND_IMMU24:
5711 case IA64_OPND_MBTYPE4:
5712 case IA64_OPND_MHTYPE8:
5713 case IA64_OPND_POS6:
5714 bits = operand_width (idesc->operands[res_index]);
5715 if (e->X_op == O_constant)
5716 {
5717 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5718 return OPERAND_MATCH;
5719 else
5720 return OPERAND_OUT_OF_RANGE;
5721 }
5722 break;
5723
5724 case IA64_OPND_IMMU9:
5725 bits = operand_width (idesc->operands[res_index]);
5726 if (e->X_op == O_constant)
5727 {
5728 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5729 {
5730 int lobits = e->X_add_number & 0x3;
5731 if (((bfd_vma) e->X_add_number & 0x3C) != 0 && lobits == 0)
5732 e->X_add_number |= (bfd_vma) 0x3;
5733 return OPERAND_MATCH;
5734 }
5735 else
5736 return OPERAND_OUT_OF_RANGE;
5737 }
5738 break;
5739
5740 case IA64_OPND_IMM44:
5741 /* least 16 bits must be zero */
5742 if ((e->X_add_number & 0xffff) != 0)
5743 /* XXX technically, this is wrong: we should not be issuing warning
5744 messages until we're sure this instruction pattern is going to
5745 be used! */
5746 as_warn (_("lower 16 bits of mask ignored"));
5747
5748 if (e->X_op == O_constant)
5749 {
5750 if (((e->X_add_number >= 0
5751 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 44))
5752 || (e->X_add_number < 0
5753 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 44))))
5754 {
5755 /* sign-extend */
5756 if (e->X_add_number >= 0
5757 && (e->X_add_number & ((bfd_vma) 1 << 43)) != 0)
5758 {
5759 e->X_add_number |= ~(((bfd_vma) 1 << 44) - 1);
5760 }
5761 return OPERAND_MATCH;
5762 }
5763 else
5764 return OPERAND_OUT_OF_RANGE;
5765 }
5766 break;
5767
5768 case IA64_OPND_IMM17:
5769 /* bit 0 is a don't care (pr0 is hardwired to 1) */
5770 if (e->X_op == O_constant)
5771 {
5772 if (((e->X_add_number >= 0
5773 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 17))
5774 || (e->X_add_number < 0
5775 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 17))))
5776 {
5777 /* sign-extend */
5778 if (e->X_add_number >= 0
5779 && (e->X_add_number & ((bfd_vma) 1 << 16)) != 0)
5780 {
5781 e->X_add_number |= ~(((bfd_vma) 1 << 17) - 1);
5782 }
5783 return OPERAND_MATCH;
5784 }
5785 else
5786 return OPERAND_OUT_OF_RANGE;
5787 }
5788 break;
5789
5790 case IA64_OPND_IMM14:
5791 case IA64_OPND_IMM22:
5792 relocatable = 1;
5793 case IA64_OPND_IMM1:
5794 case IA64_OPND_IMM8:
5795 case IA64_OPND_IMM8U4:
5796 case IA64_OPND_IMM8M1:
5797 case IA64_OPND_IMM8M1U4:
5798 case IA64_OPND_IMM8M1U8:
5799 case IA64_OPND_IMM9a:
5800 case IA64_OPND_IMM9b:
5801 bits = operand_width (idesc->operands[res_index]);
5802 if (relocatable && (e->X_op == O_symbol
5803 || e->X_op == O_subtract
5804 || e->X_op == O_pseudo_fixup))
5805 {
5806 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5807
5808 if (idesc->operands[res_index] == IA64_OPND_IMM14)
5809 fix->code = BFD_RELOC_IA64_IMM14;
5810 else
5811 fix->code = BFD_RELOC_IA64_IMM22;
5812
5813 if (e->X_op != O_subtract)
5814 {
5815 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5816 if (e->X_op == O_pseudo_fixup)
5817 e->X_op = O_symbol;
5818 }
5819
5820 fix->opnd = idesc->operands[res_index];
5821 fix->expr = *e;
5822 fix->is_pcrel = 0;
5823 ++CURR_SLOT.num_fixups;
5824 return OPERAND_MATCH;
5825 }
5826 else if (e->X_op != O_constant
5827 && ! (e->X_op == O_big && opnd == IA64_OPND_IMM8M1U8))
5828 return OPERAND_MISMATCH;
5829
5830 if (opnd == IA64_OPND_IMM8M1U4)
5831 {
5832 /* Zero is not valid for unsigned compares that take an adjusted
5833 constant immediate range. */
5834 if (e->X_add_number == 0)
5835 return OPERAND_OUT_OF_RANGE;
5836
5837 /* Sign-extend 32-bit unsigned numbers, so that the following range
5838 checks will work. */
5839 val = e->X_add_number;
5840 if (((val & (~(bfd_vma) 0 << 32)) == 0)
5841 && ((val & ((bfd_vma) 1 << 31)) != 0))
5842 val = ((val << 32) >> 32);
5843
5844 /* Check for 0x100000000. This is valid because
5845 0x100000000-1 is the same as ((uint32_t) -1). */
5846 if (val == ((bfd_signed_vma) 1 << 32))
5847 return OPERAND_MATCH;
5848
5849 val = val - 1;
5850 }
5851 else if (opnd == IA64_OPND_IMM8M1U8)
5852 {
5853 /* Zero is not valid for unsigned compares that take an adjusted
5854 constant immediate range. */
5855 if (e->X_add_number == 0)
5856 return OPERAND_OUT_OF_RANGE;
5857
5858 /* Check for 0x10000000000000000. */
5859 if (e->X_op == O_big)
5860 {
5861 if (generic_bignum[0] == 0
5862 && generic_bignum[1] == 0
5863 && generic_bignum[2] == 0
5864 && generic_bignum[3] == 0
5865 && generic_bignum[4] == 1)
5866 return OPERAND_MATCH;
5867 else
5868 return OPERAND_OUT_OF_RANGE;
5869 }
5870 else
5871 val = e->X_add_number - 1;
5872 }
5873 else if (opnd == IA64_OPND_IMM8M1)
5874 val = e->X_add_number - 1;
5875 else if (opnd == IA64_OPND_IMM8U4)
5876 {
5877 /* Sign-extend 32-bit unsigned numbers, so that the following range
5878 checks will work. */
5879 val = e->X_add_number;
5880 if (((val & (~(bfd_vma) 0 << 32)) == 0)
5881 && ((val & ((bfd_vma) 1 << 31)) != 0))
5882 val = ((val << 32) >> 32);
5883 }
5884 else
5885 val = e->X_add_number;
5886
5887 if ((val >= 0 && (bfd_vma) val < ((bfd_vma) 1 << (bits - 1)))
5888 || (val < 0 && (bfd_vma) -val <= ((bfd_vma) 1 << (bits - 1))))
5889 return OPERAND_MATCH;
5890 else
5891 return OPERAND_OUT_OF_RANGE;
5892
5893 case IA64_OPND_INC3:
5894 /* +/- 1, 4, 8, 16 */
5895 val = e->X_add_number;
5896 if (val < 0)
5897 val = -val;
5898 if (e->X_op == O_constant)
5899 {
5900 if ((val == 1 || val == 4 || val == 8 || val == 16))
5901 return OPERAND_MATCH;
5902 else
5903 return OPERAND_OUT_OF_RANGE;
5904 }
5905 break;
5906
5907 case IA64_OPND_TGT25:
5908 case IA64_OPND_TGT25b:
5909 case IA64_OPND_TGT25c:
5910 case IA64_OPND_TGT64:
5911 if (e->X_op == O_symbol)
5912 {
5913 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5914 if (opnd == IA64_OPND_TGT25)
5915 fix->code = BFD_RELOC_IA64_PCREL21F;
5916 else if (opnd == IA64_OPND_TGT25b)
5917 fix->code = BFD_RELOC_IA64_PCREL21M;
5918 else if (opnd == IA64_OPND_TGT25c)
5919 fix->code = BFD_RELOC_IA64_PCREL21B;
5920 else if (opnd == IA64_OPND_TGT64)
5921 fix->code = BFD_RELOC_IA64_PCREL60B;
5922 else
5923 abort ();
5924
5925 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5926 fix->opnd = idesc->operands[res_index];
5927 fix->expr = *e;
5928 fix->is_pcrel = 1;
5929 ++CURR_SLOT.num_fixups;
5930 return OPERAND_MATCH;
5931 }
5932 case IA64_OPND_TAG13:
5933 case IA64_OPND_TAG13b:
5934 switch (e->X_op)
5935 {
5936 case O_constant:
5937 return OPERAND_MATCH;
5938
5939 case O_symbol:
5940 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5941 /* There are no external relocs for TAG13/TAG13b fields, so we
5942 create a dummy reloc. This will not live past md_apply_fix. */
5943 fix->code = BFD_RELOC_UNUSED;
5944 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5945 fix->opnd = idesc->operands[res_index];
5946 fix->expr = *e;
5947 fix->is_pcrel = 1;
5948 ++CURR_SLOT.num_fixups;
5949 return OPERAND_MATCH;
5950
5951 default:
5952 break;
5953 }
5954 break;
5955
5956 case IA64_OPND_LDXMOV:
5957 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5958 fix->code = BFD_RELOC_IA64_LDXMOV;
5959 fix->opnd = idesc->operands[res_index];
5960 fix->expr = *e;
5961 fix->is_pcrel = 0;
5962 ++CURR_SLOT.num_fixups;
5963 return OPERAND_MATCH;
5964
5965 case IA64_OPND_STRD5b:
5966 if (e->X_op == O_constant)
5967 {
5968 /* 5-bit signed scaled by 64 */
5969 if ((e->X_add_number <= ( 0xf << 6 ))
5970 && (e->X_add_number >= -( 0x10 << 6 )))
5971 {
5972
5973 /* Must be a multiple of 64 */
5974 if ((e->X_add_number & 0x3f) != 0)
5975 as_warn (_("stride must be a multiple of 64; lower 6 bits ignored"));
5976
5977 e->X_add_number &= ~ 0x3f;
5978 return OPERAND_MATCH;
5979 }
5980 else
5981 return OPERAND_OUT_OF_RANGE;
5982 }
5983 break;
5984 case IA64_OPND_CNT6a:
5985 if (e->X_op == O_constant)
5986 {
5987 /* 6-bit unsigned biased by 1 -- count 0 is meaningless */
5988 if ((e->X_add_number <= 64)
5989 && (e->X_add_number > 0) )
5990 {
5991 return OPERAND_MATCH;
5992 }
5993 else
5994 return OPERAND_OUT_OF_RANGE;
5995 }
5996 break;
5997
5998 default:
5999 break;
6000 }
6001 return OPERAND_MISMATCH;
6002 }
6003
6004 static int
6005 parse_operand (expressionS *e, int more)
6006 {
6007 int sep = '\0';
6008
6009 memset (e, 0, sizeof (*e));
6010 e->X_op = O_absent;
6011 SKIP_WHITESPACE ();
6012 expression (e);
6013 sep = *input_line_pointer;
6014 if (more && (sep == ',' || sep == more))
6015 ++input_line_pointer;
6016 return sep;
6017 }
6018
6019 static int
6020 parse_operand_and_eval (expressionS *e, int more)
6021 {
6022 int sep = parse_operand (e, more);
6023 resolve_expression (e);
6024 return sep;
6025 }
6026
6027 static int
6028 parse_operand_maybe_eval (expressionS *e, int more, enum ia64_opnd op)
6029 {
6030 int sep = parse_operand (e, more);
6031 switch (op)
6032 {
6033 case IA64_OPND_IMM14:
6034 case IA64_OPND_IMM22:
6035 case IA64_OPND_IMMU64:
6036 case IA64_OPND_TGT25:
6037 case IA64_OPND_TGT25b:
6038 case IA64_OPND_TGT25c:
6039 case IA64_OPND_TGT64:
6040 case IA64_OPND_TAG13:
6041 case IA64_OPND_TAG13b:
6042 case IA64_OPND_LDXMOV:
6043 break;
6044 default:
6045 resolve_expression (e);
6046 break;
6047 }
6048 return sep;
6049 }
6050
6051 /* Returns the next entry in the opcode table that matches the one in
6052 IDESC, and frees the entry in IDESC. If no matching entry is
6053 found, NULL is returned instead. */
6054
6055 static struct ia64_opcode *
6056 get_next_opcode (struct ia64_opcode *idesc)
6057 {
6058 struct ia64_opcode *next = ia64_find_next_opcode (idesc);
6059 ia64_free_opcode (idesc);
6060 return next;
6061 }
6062
6063 /* Parse the operands for the opcode and find the opcode variant that
6064 matches the specified operands, or NULL if no match is possible. */
6065
6066 static struct ia64_opcode *
6067 parse_operands (struct ia64_opcode *idesc)
6068 {
6069 int i = 0, highest_unmatched_operand, num_operands = 0, num_outputs = 0;
6070 int error_pos, out_of_range_pos, curr_out_of_range_pos, sep = 0;
6071 int reg1, reg2;
6072 char reg_class;
6073 enum ia64_opnd expected_operand = IA64_OPND_NIL;
6074 enum operand_match_result result;
6075 char mnemonic[129];
6076 char *first_arg = 0, *end, *saved_input_pointer;
6077 unsigned int sof;
6078
6079 gas_assert (strlen (idesc->name) <= 128);
6080
6081 strcpy (mnemonic, idesc->name);
6082 if (idesc->operands[2] == IA64_OPND_SOF
6083 || idesc->operands[1] == IA64_OPND_SOF)
6084 {
6085 /* To make the common idiom "alloc loc?=ar.pfs,0,1,0,0" work, we
6086 can't parse the first operand until we have parsed the
6087 remaining operands of the "alloc" instruction. */
6088 SKIP_WHITESPACE ();
6089 first_arg = input_line_pointer;
6090 end = strchr (input_line_pointer, '=');
6091 if (!end)
6092 {
6093 as_bad (_("Expected separator `='"));
6094 return 0;
6095 }
6096 input_line_pointer = end + 1;
6097 ++i;
6098 ++num_outputs;
6099 }
6100
6101 for (; ; ++i)
6102 {
6103 if (i < NELEMS (CURR_SLOT.opnd))
6104 {
6105 sep = parse_operand_maybe_eval (CURR_SLOT.opnd + i, '=',
6106 idesc->operands[i]);
6107 if (CURR_SLOT.opnd[i].X_op == O_absent)
6108 break;
6109 }
6110 else
6111 {
6112 expressionS dummy;
6113
6114 sep = parse_operand (&dummy, '=');
6115 if (dummy.X_op == O_absent)
6116 break;
6117 }
6118
6119 ++num_operands;
6120
6121 if (sep != '=' && sep != ',')
6122 break;
6123
6124 if (sep == '=')
6125 {
6126 if (num_outputs > 0)
6127 as_bad (_("Duplicate equal sign (=) in instruction"));
6128 else
6129 num_outputs = i + 1;
6130 }
6131 }
6132 if (sep != '\0')
6133 {
6134 as_bad (_("Illegal operand separator `%c'"), sep);
6135 return 0;
6136 }
6137
6138 if (idesc->operands[2] == IA64_OPND_SOF
6139 || idesc->operands[1] == IA64_OPND_SOF)
6140 {
6141 /* Map alloc r1=ar.pfs,i,l,o,r to alloc r1=ar.pfs,(i+l+o),(i+l),r.
6142 Note, however, that due to that mapping operand numbers in error
6143 messages for any of the constant operands will not be correct. */
6144 know (strcmp (idesc->name, "alloc") == 0);
6145 /* The first operand hasn't been parsed/initialized, yet (but
6146 num_operands intentionally doesn't account for that). */
6147 i = num_operands > 4 ? 2 : 1;
6148 #define FORCE_CONST(n) (CURR_SLOT.opnd[n].X_op == O_constant \
6149 ? CURR_SLOT.opnd[n].X_add_number \
6150 : 0)
6151 sof = set_regstack (FORCE_CONST(i),
6152 FORCE_CONST(i + 1),
6153 FORCE_CONST(i + 2),
6154 FORCE_CONST(i + 3));
6155 #undef FORCE_CONST
6156
6157 /* now we can parse the first arg: */
6158 saved_input_pointer = input_line_pointer;
6159 input_line_pointer = first_arg;
6160 sep = parse_operand_maybe_eval (CURR_SLOT.opnd + 0, '=',
6161 idesc->operands[0]);
6162 if (sep != '=')
6163 --num_outputs; /* force error */
6164 input_line_pointer = saved_input_pointer;
6165
6166 CURR_SLOT.opnd[i].X_add_number = sof;
6167 if (CURR_SLOT.opnd[i + 1].X_op == O_constant
6168 && CURR_SLOT.opnd[i + 2].X_op == O_constant)
6169 CURR_SLOT.opnd[i + 1].X_add_number
6170 = sof - CURR_SLOT.opnd[i + 2].X_add_number;
6171 else
6172 CURR_SLOT.opnd[i + 1].X_op = O_illegal;
6173 CURR_SLOT.opnd[i + 2] = CURR_SLOT.opnd[i + 3];
6174 }
6175
6176 highest_unmatched_operand = -4;
6177 curr_out_of_range_pos = -1;
6178 error_pos = 0;
6179 for (; idesc; idesc = get_next_opcode (idesc))
6180 {
6181 if (num_outputs != idesc->num_outputs)
6182 continue; /* mismatch in # of outputs */
6183 if (highest_unmatched_operand < 0)
6184 highest_unmatched_operand |= 1;
6185 if (num_operands > NELEMS (idesc->operands)
6186 || (num_operands < NELEMS (idesc->operands)
6187 && idesc->operands[num_operands])
6188 || (num_operands > 0 && !idesc->operands[num_operands - 1]))
6189 continue; /* mismatch in number of arguments */
6190 if (highest_unmatched_operand < 0)
6191 highest_unmatched_operand |= 2;
6192
6193 CURR_SLOT.num_fixups = 0;
6194
6195 /* Try to match all operands. If we see an out-of-range operand,
6196 then continue trying to match the rest of the operands, since if
6197 the rest match, then this idesc will give the best error message. */
6198
6199 out_of_range_pos = -1;
6200 for (i = 0; i < num_operands && idesc->operands[i]; ++i)
6201 {
6202 result = operand_match (idesc, i, CURR_SLOT.opnd + i);
6203 if (result != OPERAND_MATCH)
6204 {
6205 if (result != OPERAND_OUT_OF_RANGE)
6206 break;
6207 if (out_of_range_pos < 0)
6208 /* remember position of the first out-of-range operand: */
6209 out_of_range_pos = i;
6210 }
6211 }
6212
6213 /* If we did not match all operands, or if at least one operand was
6214 out-of-range, then this idesc does not match. Keep track of which
6215 idesc matched the most operands before failing. If we have two
6216 idescs that failed at the same position, and one had an out-of-range
6217 operand, then prefer the out-of-range operand. Thus if we have
6218 "add r0=0x1000000,r1" we get an error saying the constant is out
6219 of range instead of an error saying that the constant should have been
6220 a register. */
6221
6222 if (i != num_operands || out_of_range_pos >= 0)
6223 {
6224 if (i > highest_unmatched_operand
6225 || (i == highest_unmatched_operand
6226 && out_of_range_pos > curr_out_of_range_pos))
6227 {
6228 highest_unmatched_operand = i;
6229 if (out_of_range_pos >= 0)
6230 {
6231 expected_operand = idesc->operands[out_of_range_pos];
6232 error_pos = out_of_range_pos;
6233 }
6234 else
6235 {
6236 expected_operand = idesc->operands[i];
6237 error_pos = i;
6238 }
6239 curr_out_of_range_pos = out_of_range_pos;
6240 }
6241 continue;
6242 }
6243
6244 break;
6245 }
6246 if (!idesc)
6247 {
6248 if (expected_operand)
6249 as_bad (_("Operand %u of `%s' should be %s"),
6250 error_pos + 1, mnemonic,
6251 elf64_ia64_operands[expected_operand].desc);
6252 else if (highest_unmatched_operand < 0 && !(highest_unmatched_operand & 1))
6253 as_bad (_("Wrong number of output operands"));
6254 else if (highest_unmatched_operand < 0 && !(highest_unmatched_operand & 2))
6255 as_bad (_("Wrong number of input operands"));
6256 else
6257 as_bad (_("Operand mismatch"));
6258 return 0;
6259 }
6260
6261 /* Check that the instruction doesn't use
6262 - r0, f0, or f1 as output operands
6263 - the same predicate twice as output operands
6264 - r0 as address of a base update load or store
6265 - the same GR as output and address of a base update load
6266 - two even- or two odd-numbered FRs as output operands of a floating
6267 point parallel load.
6268 At most two (conflicting) output (or output-like) operands can exist,
6269 (floating point parallel loads have three outputs, but the base register,
6270 if updated, cannot conflict with the actual outputs). */
6271 reg2 = reg1 = -1;
6272 for (i = 0; i < num_operands; ++i)
6273 {
6274 int regno = 0;
6275
6276 reg_class = 0;
6277 switch (idesc->operands[i])
6278 {
6279 case IA64_OPND_R1:
6280 case IA64_OPND_R2:
6281 case IA64_OPND_R3:
6282 if (i < num_outputs)
6283 {
6284 if (CURR_SLOT.opnd[i].X_add_number == REG_GR)
6285 reg_class = 'r';
6286 else if (reg1 < 0)
6287 reg1 = CURR_SLOT.opnd[i].X_add_number;
6288 else if (reg2 < 0)
6289 reg2 = CURR_SLOT.opnd[i].X_add_number;
6290 }
6291 break;
6292 case IA64_OPND_P1:
6293 case IA64_OPND_P2:
6294 if (i < num_outputs)
6295 {
6296 if (reg1 < 0)
6297 reg1 = CURR_SLOT.opnd[i].X_add_number;
6298 else if (reg2 < 0)
6299 reg2 = CURR_SLOT.opnd[i].X_add_number;
6300 }
6301 break;
6302 case IA64_OPND_F1:
6303 case IA64_OPND_F2:
6304 case IA64_OPND_F3:
6305 case IA64_OPND_F4:
6306 if (i < num_outputs)
6307 {
6308 if (CURR_SLOT.opnd[i].X_add_number >= REG_FR
6309 && CURR_SLOT.opnd[i].X_add_number <= REG_FR + 1)
6310 {
6311 reg_class = 'f';
6312 regno = CURR_SLOT.opnd[i].X_add_number - REG_FR;
6313 }
6314 else if (reg1 < 0)
6315 reg1 = CURR_SLOT.opnd[i].X_add_number;
6316 else if (reg2 < 0)
6317 reg2 = CURR_SLOT.opnd[i].X_add_number;
6318 }
6319 break;
6320 case IA64_OPND_MR3:
6321 if (idesc->flags & IA64_OPCODE_POSTINC)
6322 {
6323 if (CURR_SLOT.opnd[i].X_add_number == REG_GR)
6324 reg_class = 'm';
6325 else if (reg1 < 0)
6326 reg1 = CURR_SLOT.opnd[i].X_add_number;
6327 else if (reg2 < 0)
6328 reg2 = CURR_SLOT.opnd[i].X_add_number;
6329 }
6330 break;
6331 default:
6332 break;
6333 }
6334 switch (reg_class)
6335 {
6336 case 0:
6337 break;
6338 default:
6339 as_warn (_("Invalid use of `%c%d' as output operand"), reg_class, regno);
6340 break;
6341 case 'm':
6342 as_warn (_("Invalid use of `r%d' as base update address operand"), regno);
6343 break;
6344 }
6345 }
6346 if (reg1 == reg2)
6347 {
6348 if (reg1 >= REG_GR && reg1 <= REG_GR + 127)
6349 {
6350 reg1 -= REG_GR;
6351 reg_class = 'r';
6352 }
6353 else if (reg1 >= REG_P && reg1 <= REG_P + 63)
6354 {
6355 reg1 -= REG_P;
6356 reg_class = 'p';
6357 }
6358 else if (reg1 >= REG_FR && reg1 <= REG_FR + 127)
6359 {
6360 reg1 -= REG_FR;
6361 reg_class = 'f';
6362 }
6363 else
6364 reg_class = 0;
6365 if (reg_class)
6366 as_warn (_("Invalid duplicate use of `%c%d'"), reg_class, reg1);
6367 }
6368 else if (((reg1 >= REG_FR && reg1 <= REG_FR + 31
6369 && reg2 >= REG_FR && reg2 <= REG_FR + 31)
6370 || (reg1 >= REG_FR + 32 && reg1 <= REG_FR + 127
6371 && reg2 >= REG_FR + 32 && reg2 <= REG_FR + 127))
6372 && ! ((reg1 ^ reg2) & 1))
6373 as_warn (_("Invalid simultaneous use of `f%d' and `f%d'"),
6374 reg1 - REG_FR, reg2 - REG_FR);
6375 else if ((reg1 >= REG_FR && reg1 <= REG_FR + 31
6376 && reg2 >= REG_FR + 32 && reg2 <= REG_FR + 127)
6377 || (reg1 >= REG_FR + 32 && reg1 <= REG_FR + 127
6378 && reg2 >= REG_FR && reg2 <= REG_FR + 31))
6379 as_warn (_("Dangerous simultaneous use of `f%d' and `f%d'"),
6380 reg1 - REG_FR, reg2 - REG_FR);
6381 return idesc;
6382 }
6383
6384 static void
6385 build_insn (struct slot *slot, bfd_vma *insnp)
6386 {
6387 const struct ia64_operand *odesc, *o2desc;
6388 struct ia64_opcode *idesc = slot->idesc;
6389 bfd_vma insn;
6390 bfd_signed_vma val;
6391 const char *err;
6392 int i;
6393
6394 insn = idesc->opcode | slot->qp_regno;
6395
6396 for (i = 0; i < NELEMS (idesc->operands) && idesc->operands[i]; ++i)
6397 {
6398 if (slot->opnd[i].X_op == O_register
6399 || slot->opnd[i].X_op == O_constant
6400 || slot->opnd[i].X_op == O_index)
6401 val = slot->opnd[i].X_add_number;
6402 else if (slot->opnd[i].X_op == O_big)
6403 {
6404 /* This must be the value 0x10000000000000000. */
6405 gas_assert (idesc->operands[i] == IA64_OPND_IMM8M1U8);
6406 val = 0;
6407 }
6408 else
6409 val = 0;
6410
6411 switch (idesc->operands[i])
6412 {
6413 case IA64_OPND_IMMU64:
6414 *insnp++ = (val >> 22) & 0x1ffffffffffLL;
6415 insn |= (((val & 0x7f) << 13) | (((val >> 7) & 0x1ff) << 27)
6416 | (((val >> 16) & 0x1f) << 22) | (((val >> 21) & 0x1) << 21)
6417 | (((val >> 63) & 0x1) << 36));
6418 continue;
6419
6420 case IA64_OPND_IMMU62:
6421 val &= 0x3fffffffffffffffULL;
6422 if (val != slot->opnd[i].X_add_number)
6423 as_warn (_("Value truncated to 62 bits"));
6424 *insnp++ = (val >> 21) & 0x1ffffffffffLL;
6425 insn |= (((val & 0xfffff) << 6) | (((val >> 20) & 0x1) << 36));
6426 continue;
6427
6428 case IA64_OPND_TGT64:
6429 val >>= 4;
6430 *insnp++ = ((val >> 20) & 0x7fffffffffLL) << 2;
6431 insn |= ((((val >> 59) & 0x1) << 36)
6432 | (((val >> 0) & 0xfffff) << 13));
6433 continue;
6434
6435 case IA64_OPND_AR3:
6436 val -= REG_AR;
6437 break;
6438
6439 case IA64_OPND_B1:
6440 case IA64_OPND_B2:
6441 val -= REG_BR;
6442 break;
6443
6444 case IA64_OPND_CR3:
6445 val -= REG_CR;
6446 break;
6447
6448 case IA64_OPND_DAHR3:
6449 val -= REG_DAHR;
6450 break;
6451
6452 case IA64_OPND_F1:
6453 case IA64_OPND_F2:
6454 case IA64_OPND_F3:
6455 case IA64_OPND_F4:
6456 val -= REG_FR;
6457 break;
6458
6459 case IA64_OPND_P1:
6460 case IA64_OPND_P2:
6461 val -= REG_P;
6462 break;
6463
6464 case IA64_OPND_R1:
6465 case IA64_OPND_R2:
6466 case IA64_OPND_R3:
6467 case IA64_OPND_R3_2:
6468 case IA64_OPND_CPUID_R3:
6469 case IA64_OPND_DBR_R3:
6470 case IA64_OPND_DTR_R3:
6471 case IA64_OPND_ITR_R3:
6472 case IA64_OPND_IBR_R3:
6473 case IA64_OPND_MR3:
6474 case IA64_OPND_MSR_R3:
6475 case IA64_OPND_PKR_R3:
6476 case IA64_OPND_PMC_R3:
6477 case IA64_OPND_PMD_R3:
6478 case IA64_OPND_DAHR_R3:
6479 case IA64_OPND_RR_R3:
6480 val -= REG_GR;
6481 break;
6482
6483 default:
6484 break;
6485 }
6486
6487 odesc = elf64_ia64_operands + idesc->operands[i];
6488 err = (*odesc->insert) (odesc, val, &insn);
6489 if (err)
6490 as_bad_where (slot->src_file, slot->src_line,
6491 _("Bad operand value: %s"), err);
6492 if (idesc->flags & IA64_OPCODE_PSEUDO)
6493 {
6494 if ((idesc->flags & IA64_OPCODE_F2_EQ_F3)
6495 && odesc == elf64_ia64_operands + IA64_OPND_F3)
6496 {
6497 o2desc = elf64_ia64_operands + IA64_OPND_F2;
6498 (*o2desc->insert) (o2desc, val, &insn);
6499 }
6500 if ((idesc->flags & IA64_OPCODE_LEN_EQ_64MCNT)
6501 && (odesc == elf64_ia64_operands + IA64_OPND_CPOS6a
6502 || odesc == elf64_ia64_operands + IA64_OPND_POS6))
6503 {
6504 o2desc = elf64_ia64_operands + IA64_OPND_LEN6;
6505 (*o2desc->insert) (o2desc, 64 - val, &insn);
6506 }
6507 }
6508 }
6509 *insnp = insn;
6510 }
6511
6512 static void
6513 emit_one_bundle (void)
6514 {
6515 int manual_bundling_off = 0, manual_bundling = 0;
6516 enum ia64_unit required_unit, insn_unit = 0;
6517 enum ia64_insn_type type[3], insn_type;
6518 unsigned int template_val, orig_template;
6519 bfd_vma insn[3] = { -1, -1, -1 };
6520 struct ia64_opcode *idesc;
6521 int end_of_insn_group = 0, user_template = -1;
6522 int n, i, j, first, curr, last_slot;
6523 bfd_vma t0 = 0, t1 = 0;
6524 struct label_fix *lfix;
6525 bfd_boolean mark_label;
6526 struct insn_fix *ifix;
6527 char mnemonic[16];
6528 fixS *fix;
6529 char *f;
6530 int addr_mod;
6531
6532 first = (md.curr_slot + NUM_SLOTS - md.num_slots_in_use) % NUM_SLOTS;
6533 know (first >= 0 && first < NUM_SLOTS);
6534 n = MIN (3, md.num_slots_in_use);
6535
6536 /* Determine template: user user_template if specified, best match
6537 otherwise: */
6538
6539 if (md.slot[first].user_template >= 0)
6540 user_template = template_val = md.slot[first].user_template;
6541 else
6542 {
6543 /* Auto select appropriate template. */
6544 memset (type, 0, sizeof (type));
6545 curr = first;
6546 for (i = 0; i < n; ++i)
6547 {
6548 if (md.slot[curr].label_fixups && i != 0)
6549 break;
6550 type[i] = md.slot[curr].idesc->type;
6551 curr = (curr + 1) % NUM_SLOTS;
6552 }
6553 template_val = best_template[type[0]][type[1]][type[2]];
6554 }
6555
6556 /* initialize instructions with appropriate nops: */
6557 for (i = 0; i < 3; ++i)
6558 insn[i] = nop[ia64_templ_desc[template_val].exec_unit[i]];
6559
6560 f = frag_more (16);
6561
6562 /* Check to see if this bundle is at an offset that is a multiple of 16-bytes
6563 from the start of the frag. */
6564 addr_mod = frag_now_fix () & 15;
6565 if (frag_now->has_code && frag_now->insn_addr != addr_mod)
6566 as_bad (_("instruction address is not a multiple of 16"));
6567 frag_now->insn_addr = addr_mod;
6568 frag_now->has_code = 1;
6569
6570 /* now fill in slots with as many insns as possible: */
6571 curr = first;
6572 idesc = md.slot[curr].idesc;
6573 end_of_insn_group = 0;
6574 last_slot = -1;
6575 for (i = 0; i < 3 && md.num_slots_in_use > 0; ++i)
6576 {
6577 /* If we have unwind records, we may need to update some now. */
6578 unw_rec_list *ptr = md.slot[curr].unwind_record;
6579 unw_rec_list *end_ptr = NULL;
6580
6581 if (ptr)
6582 {
6583 /* Find the last prologue/body record in the list for the current
6584 insn, and set the slot number for all records up to that point.
6585 This needs to be done now, because prologue/body records refer to
6586 the current point, not the point after the instruction has been
6587 issued. This matters because there may have been nops emitted
6588 meanwhile. Any non-prologue non-body record followed by a
6589 prologue/body record must also refer to the current point. */
6590 unw_rec_list *last_ptr;
6591
6592 for (j = 1; end_ptr == NULL && j < md.num_slots_in_use; ++j)
6593 end_ptr = md.slot[(curr + j) % NUM_SLOTS].unwind_record;
6594 for (last_ptr = NULL; ptr != end_ptr; ptr = ptr->next)
6595 if (ptr->r.type == prologue || ptr->r.type == prologue_gr
6596 || ptr->r.type == body)
6597 last_ptr = ptr;
6598 if (last_ptr)
6599 {
6600 /* Make last_ptr point one after the last prologue/body
6601 record. */
6602 last_ptr = last_ptr->next;
6603 for (ptr = md.slot[curr].unwind_record; ptr != last_ptr;
6604 ptr = ptr->next)
6605 {
6606 ptr->slot_number = (unsigned long) f + i;
6607 ptr->slot_frag = frag_now;
6608 }
6609 /* Remove the initialized records, so that we won't accidentally
6610 update them again if we insert a nop and continue. */
6611 md.slot[curr].unwind_record = last_ptr;
6612 }
6613 }
6614
6615 manual_bundling_off = md.slot[curr].manual_bundling_off;
6616 if (md.slot[curr].manual_bundling_on)
6617 {
6618 if (curr == first)
6619 manual_bundling = 1;
6620 else
6621 break; /* Need to start a new bundle. */
6622 }
6623
6624 /* If this instruction specifies a template, then it must be the first
6625 instruction of a bundle. */
6626 if (curr != first && md.slot[curr].user_template >= 0)
6627 break;
6628
6629 if (idesc->flags & IA64_OPCODE_SLOT2)
6630 {
6631 if (manual_bundling && !manual_bundling_off)
6632 {
6633 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6634 _("`%s' must be last in bundle"), idesc->name);
6635 if (i < 2)
6636 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6637 }
6638 i = 2;
6639 }
6640 if (idesc->flags & IA64_OPCODE_LAST)
6641 {
6642 int required_slot;
6643 unsigned int required_template;
6644
6645 /* If we need a stop bit after an M slot, our only choice is
6646 template 5 (M;;MI). If we need a stop bit after a B
6647 slot, our only choice is to place it at the end of the
6648 bundle, because the only available templates are MIB,
6649 MBB, BBB, MMB, and MFB. We don't handle anything other
6650 than M and B slots because these are the only kind of
6651 instructions that can have the IA64_OPCODE_LAST bit set. */
6652 required_template = template_val;
6653 switch (idesc->type)
6654 {
6655 case IA64_TYPE_M:
6656 required_slot = 0;
6657 required_template = 5;
6658 break;
6659
6660 case IA64_TYPE_B:
6661 required_slot = 2;
6662 break;
6663
6664 default:
6665 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6666 _("Internal error: don't know how to force %s to end of instruction group"),
6667 idesc->name);
6668 required_slot = i;
6669 break;
6670 }
6671 if (manual_bundling
6672 && (i > required_slot
6673 || (required_slot == 2 && !manual_bundling_off)
6674 || (user_template >= 0
6675 /* Changing from MMI to M;MI is OK. */
6676 && (template_val ^ required_template) > 1)))
6677 {
6678 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6679 _("`%s' must be last in instruction group"),
6680 idesc->name);
6681 if (i < 2 && required_slot == 2 && !manual_bundling_off)
6682 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6683 }
6684 if (required_slot < i)
6685 /* Can't fit this instruction. */
6686 break;
6687
6688 i = required_slot;
6689 if (required_template != template_val)
6690 {
6691 /* If we switch the template, we need to reset the NOPs
6692 after slot i. The slot-types of the instructions ahead
6693 of i never change, so we don't need to worry about
6694 changing NOPs in front of this slot. */
6695 for (j = i; j < 3; ++j)
6696 insn[j] = nop[ia64_templ_desc[required_template].exec_unit[j]];
6697
6698 /* We just picked a template that includes the stop bit in the
6699 middle, so we don't need another one emitted later. */
6700 md.slot[curr].end_of_insn_group = 0;
6701 }
6702 template_val = required_template;
6703 }
6704 if (curr != first && md.slot[curr].label_fixups)
6705 {
6706 if (manual_bundling)
6707 {
6708 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6709 _("Label must be first in a bundle"));
6710 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6711 }
6712 /* This insn must go into the first slot of a bundle. */
6713 break;
6714 }
6715
6716 if (end_of_insn_group && md.num_slots_in_use >= 1)
6717 {
6718 /* We need an instruction group boundary in the middle of a
6719 bundle. See if we can switch to an other template with
6720 an appropriate boundary. */
6721
6722 orig_template = template_val;
6723 if (i == 1 && (user_template == 4
6724 || (user_template < 0
6725 && (ia64_templ_desc[template_val].exec_unit[0]
6726 == IA64_UNIT_M))))
6727 {
6728 template_val = 5;
6729 end_of_insn_group = 0;
6730 }
6731 else if (i == 2 && (user_template == 0
6732 || (user_template < 0
6733 && (ia64_templ_desc[template_val].exec_unit[1]
6734 == IA64_UNIT_I)))
6735 /* This test makes sure we don't switch the template if
6736 the next instruction is one that needs to be first in
6737 an instruction group. Since all those instructions are
6738 in the M group, there is no way such an instruction can
6739 fit in this bundle even if we switch the template. The
6740 reason we have to check for this is that otherwise we
6741 may end up generating "MI;;I M.." which has the deadly
6742 effect that the second M instruction is no longer the
6743 first in the group! --davidm 99/12/16 */
6744 && (idesc->flags & IA64_OPCODE_FIRST) == 0)
6745 {
6746 template_val = 1;
6747 end_of_insn_group = 0;
6748 }
6749 else if (i == 1
6750 && user_template == 0
6751 && !(idesc->flags & IA64_OPCODE_FIRST))
6752 /* Use the next slot. */
6753 continue;
6754 else if (curr != first)
6755 /* can't fit this insn */
6756 break;
6757
6758 if (template_val != orig_template)
6759 /* if we switch the template, we need to reset the NOPs
6760 after slot i. The slot-types of the instructions ahead
6761 of i never change, so we don't need to worry about
6762 changing NOPs in front of this slot. */
6763 for (j = i; j < 3; ++j)
6764 insn[j] = nop[ia64_templ_desc[template_val].exec_unit[j]];
6765 }
6766 required_unit = ia64_templ_desc[template_val].exec_unit[i];
6767
6768 /* resolve dynamic opcodes such as "break", "hint", and "nop": */
6769 if (idesc->type == IA64_TYPE_DYN)
6770 {
6771 enum ia64_opnd opnd1, opnd2;
6772
6773 if ((strcmp (idesc->name, "nop") == 0)
6774 || (strcmp (idesc->name, "break") == 0))
6775 insn_unit = required_unit;
6776 else if (strcmp (idesc->name, "hint") == 0)
6777 {
6778 insn_unit = required_unit;
6779 if (required_unit == IA64_UNIT_B)
6780 {
6781 switch (md.hint_b)
6782 {
6783 case hint_b_ok:
6784 break;
6785 case hint_b_warning:
6786 as_warn (_("hint in B unit may be treated as nop"));
6787 break;
6788 case hint_b_error:
6789 /* When manual bundling is off and there is no
6790 user template, we choose a different unit so
6791 that hint won't go into the current slot. We
6792 will fill the current bundle with nops and
6793 try to put hint into the next bundle. */
6794 if (!manual_bundling && user_template < 0)
6795 insn_unit = IA64_UNIT_I;
6796 else
6797 as_bad (_("hint in B unit can't be used"));
6798 break;
6799 }
6800 }
6801 }
6802 else if (strcmp (idesc->name, "chk.s") == 0
6803 || strcmp (idesc->name, "mov") == 0)
6804 {
6805 insn_unit = IA64_UNIT_M;
6806 if (required_unit == IA64_UNIT_I
6807 || (required_unit == IA64_UNIT_F && template_val == 6))
6808 insn_unit = IA64_UNIT_I;
6809 }
6810 else
6811 as_fatal (_("emit_one_bundle: unexpected dynamic op"));
6812
6813 snprintf (mnemonic, sizeof (mnemonic), "%s.%c",
6814 idesc->name, "?imbfxx"[insn_unit]);
6815 opnd1 = idesc->operands[0];
6816 opnd2 = idesc->operands[1];
6817 ia64_free_opcode (idesc);
6818 idesc = ia64_find_opcode (mnemonic);
6819 /* moves to/from ARs have collisions */
6820 if (opnd1 == IA64_OPND_AR3 || opnd2 == IA64_OPND_AR3)
6821 {
6822 while (idesc != NULL
6823 && (idesc->operands[0] != opnd1
6824 || idesc->operands[1] != opnd2))
6825 idesc = get_next_opcode (idesc);
6826 }
6827 md.slot[curr].idesc = idesc;
6828 }
6829 else
6830 {
6831 insn_type = idesc->type;
6832 insn_unit = IA64_UNIT_NIL;
6833 switch (insn_type)
6834 {
6835 case IA64_TYPE_A:
6836 if (required_unit == IA64_UNIT_I || required_unit == IA64_UNIT_M)
6837 insn_unit = required_unit;
6838 break;
6839 case IA64_TYPE_X: insn_unit = IA64_UNIT_L; break;
6840 case IA64_TYPE_I: insn_unit = IA64_UNIT_I; break;
6841 case IA64_TYPE_M: insn_unit = IA64_UNIT_M; break;
6842 case IA64_TYPE_B: insn_unit = IA64_UNIT_B; break;
6843 case IA64_TYPE_F: insn_unit = IA64_UNIT_F; break;
6844 default: break;
6845 }
6846 }
6847
6848 if (insn_unit != required_unit)
6849 continue; /* Try next slot. */
6850
6851 /* Now is a good time to fix up the labels for this insn. */
6852 mark_label = FALSE;
6853 for (lfix = md.slot[curr].label_fixups; lfix; lfix = lfix->next)
6854 {
6855 S_SET_VALUE (lfix->sym, frag_now_fix () - 16);
6856 symbol_set_frag (lfix->sym, frag_now);
6857 mark_label |= lfix->dw2_mark_labels;
6858 }
6859 for (lfix = md.slot[curr].tag_fixups; lfix; lfix = lfix->next)
6860 {
6861 S_SET_VALUE (lfix->sym, frag_now_fix () - 16 + i);
6862 symbol_set_frag (lfix->sym, frag_now);
6863 }
6864
6865 if (debug_type == DEBUG_DWARF2
6866 || md.slot[curr].loc_directive_seen
6867 || mark_label)
6868 {
6869 bfd_vma addr = frag_now->fr_address + frag_now_fix () - 16 + i;
6870
6871 md.slot[curr].loc_directive_seen = 0;
6872 if (mark_label)
6873 md.slot[curr].debug_line.flags |= DWARF2_FLAG_BASIC_BLOCK;
6874
6875 dwarf2_gen_line_info (addr, &md.slot[curr].debug_line);
6876 }
6877
6878 build_insn (md.slot + curr, insn + i);
6879
6880 ptr = md.slot[curr].unwind_record;
6881 if (ptr)
6882 {
6883 /* Set slot numbers for all remaining unwind records belonging to the
6884 current insn. There can not be any prologue/body unwind records
6885 here. */
6886 for (; ptr != end_ptr; ptr = ptr->next)
6887 {
6888 ptr->slot_number = (unsigned long) f + i;
6889 ptr->slot_frag = frag_now;
6890 }
6891 md.slot[curr].unwind_record = NULL;
6892 }
6893
6894 for (j = 0; j < md.slot[curr].num_fixups; ++j)
6895 {
6896 ifix = md.slot[curr].fixup + j;
6897 fix = fix_new_exp (frag_now, frag_now_fix () - 16 + i, 8,
6898 &ifix->expr, ifix->is_pcrel, ifix->code);
6899 fix->tc_fix_data.opnd = ifix->opnd;
6900 fix->fx_file = md.slot[curr].src_file;
6901 fix->fx_line = md.slot[curr].src_line;
6902 }
6903
6904 end_of_insn_group = md.slot[curr].end_of_insn_group;
6905
6906 /* This adjustment to "i" must occur after the fix, otherwise the fix
6907 is assigned to the wrong slot, and the VMS linker complains. */
6908 if (required_unit == IA64_UNIT_L)
6909 {
6910 know (i == 1);
6911 /* skip one slot for long/X-unit instructions */
6912 ++i;
6913 }
6914 --md.num_slots_in_use;
6915 last_slot = i;
6916
6917 /* clear slot: */
6918 ia64_free_opcode (md.slot[curr].idesc);
6919 memset (md.slot + curr, 0, sizeof (md.slot[curr]));
6920 md.slot[curr].user_template = -1;
6921
6922 if (manual_bundling_off)
6923 {
6924 manual_bundling = 0;
6925 break;
6926 }
6927 curr = (curr + 1) % NUM_SLOTS;
6928 idesc = md.slot[curr].idesc;
6929 }
6930
6931 /* A user template was specified, but the first following instruction did
6932 not fit. This can happen with or without manual bundling. */
6933 if (md.num_slots_in_use > 0 && last_slot < 0)
6934 {
6935 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6936 _("`%s' does not fit into %s template"),
6937 idesc->name, ia64_templ_desc[template_val].name);
6938 /* Drop first insn so we don't livelock. */
6939 --md.num_slots_in_use;
6940 know (curr == first);
6941 ia64_free_opcode (md.slot[curr].idesc);
6942 memset (md.slot + curr, 0, sizeof (md.slot[curr]));
6943 md.slot[curr].user_template = -1;
6944 }
6945 else if (manual_bundling > 0)
6946 {
6947 if (md.num_slots_in_use > 0)
6948 {
6949 if (last_slot >= 2)
6950 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6951 _("`%s' does not fit into bundle"), idesc->name);
6952 else
6953 {
6954 const char *where;
6955
6956 if (template_val == 2)
6957 where = "X slot";
6958 else if (last_slot == 0)
6959 where = "slots 2 or 3";
6960 else
6961 where = "slot 3";
6962 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6963 _("`%s' can't go in %s of %s template"),
6964 idesc->name, where, ia64_templ_desc[template_val].name);
6965 }
6966 }
6967 else
6968 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6969 _("Missing '}' at end of file"));
6970 }
6971
6972 know (md.num_slots_in_use < NUM_SLOTS);
6973
6974 t0 = end_of_insn_group | (template_val << 1) | (insn[0] << 5) | (insn[1] << 46);
6975 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
6976
6977 number_to_chars_littleendian (f + 0, t0, 8);
6978 number_to_chars_littleendian (f + 8, t1, 8);
6979 }
6980
6981 int
6982 md_parse_option (int c, const char *arg)
6983 {
6984
6985 switch (c)
6986 {
6987 /* Switches from the Intel assembler. */
6988 case 'm':
6989 if (strcmp (arg, "ilp64") == 0
6990 || strcmp (arg, "lp64") == 0
6991 || strcmp (arg, "p64") == 0)
6992 {
6993 md.flags |= EF_IA_64_ABI64;
6994 }
6995 else if (strcmp (arg, "ilp32") == 0)
6996 {
6997 md.flags &= ~EF_IA_64_ABI64;
6998 }
6999 else if (strcmp (arg, "le") == 0)
7000 {
7001 md.flags &= ~EF_IA_64_BE;
7002 default_big_endian = 0;
7003 }
7004 else if (strcmp (arg, "be") == 0)
7005 {
7006 md.flags |= EF_IA_64_BE;
7007 default_big_endian = 1;
7008 }
7009 else if (strncmp (arg, "unwind-check=", 13) == 0)
7010 {
7011 arg += 13;
7012 if (strcmp (arg, "warning") == 0)
7013 md.unwind_check = unwind_check_warning;
7014 else if (strcmp (arg, "error") == 0)
7015 md.unwind_check = unwind_check_error;
7016 else
7017 return 0;
7018 }
7019 else if (strncmp (arg, "hint.b=", 7) == 0)
7020 {
7021 arg += 7;
7022 if (strcmp (arg, "ok") == 0)
7023 md.hint_b = hint_b_ok;
7024 else if (strcmp (arg, "warning") == 0)
7025 md.hint_b = hint_b_warning;
7026 else if (strcmp (arg, "error") == 0)
7027 md.hint_b = hint_b_error;
7028 else
7029 return 0;
7030 }
7031 else if (strncmp (arg, "tune=", 5) == 0)
7032 {
7033 arg += 5;
7034 if (strcmp (arg, "itanium1") == 0)
7035 md.tune = itanium1;
7036 else if (strcmp (arg, "itanium2") == 0)
7037 md.tune = itanium2;
7038 else
7039 return 0;
7040 }
7041 else
7042 return 0;
7043 break;
7044
7045 case 'N':
7046 if (strcmp (arg, "so") == 0)
7047 {
7048 /* Suppress signon message. */
7049 }
7050 else if (strcmp (arg, "pi") == 0)
7051 {
7052 /* Reject privileged instructions. FIXME */
7053 }
7054 else if (strcmp (arg, "us") == 0)
7055 {
7056 /* Allow union of signed and unsigned range. FIXME */
7057 }
7058 else if (strcmp (arg, "close_fcalls") == 0)
7059 {
7060 /* Do not resolve global function calls. */
7061 }
7062 else
7063 return 0;
7064 break;
7065
7066 case 'C':
7067 /* temp[="prefix"] Insert temporary labels into the object file
7068 symbol table prefixed by "prefix".
7069 Default prefix is ":temp:".
7070 */
7071 break;
7072
7073 case 'a':
7074 /* indirect=<tgt> Assume unannotated indirect branches behavior
7075 according to <tgt> --
7076 exit: branch out from the current context (default)
7077 labels: all labels in context may be branch targets
7078 */
7079 if (strncmp (arg, "indirect=", 9) != 0)
7080 return 0;
7081 break;
7082
7083 case 'x':
7084 /* -X conflicts with an ignored option, use -x instead */
7085 md.detect_dv = 1;
7086 if (!arg || strcmp (arg, "explicit") == 0)
7087 {
7088 /* set default mode to explicit */
7089 md.default_explicit_mode = 1;
7090 break;
7091 }
7092 else if (strcmp (arg, "auto") == 0)
7093 {
7094 md.default_explicit_mode = 0;
7095 }
7096 else if (strcmp (arg, "none") == 0)
7097 {
7098 md.detect_dv = 0;
7099 }
7100 else if (strcmp (arg, "debug") == 0)
7101 {
7102 md.debug_dv = 1;
7103 }
7104 else if (strcmp (arg, "debugx") == 0)
7105 {
7106 md.default_explicit_mode = 1;
7107 md.debug_dv = 1;
7108 }
7109 else if (strcmp (arg, "debugn") == 0)
7110 {
7111 md.debug_dv = 1;
7112 md.detect_dv = 0;
7113 }
7114 else
7115 {
7116 as_bad (_("Unrecognized option '-x%s'"), arg);
7117 }
7118 break;
7119
7120 case 'S':
7121 /* nops Print nops statistics. */
7122 break;
7123
7124 /* GNU specific switches for gcc. */
7125 case OPTION_MCONSTANT_GP:
7126 md.flags |= EF_IA_64_CONS_GP;
7127 break;
7128
7129 case OPTION_MAUTO_PIC:
7130 md.flags |= EF_IA_64_NOFUNCDESC_CONS_GP;
7131 break;
7132
7133 default:
7134 return 0;
7135 }
7136
7137 return 1;
7138 }
7139
7140 void
7141 md_show_usage (FILE *stream)
7142 {
7143 fputs (_("\
7144 IA-64 options:\n\
7145 --mconstant-gp mark output file as using the constant-GP model\n\
7146 (sets ELF header flag EF_IA_64_CONS_GP)\n\
7147 --mauto-pic mark output file as using the constant-GP model\n\
7148 without function descriptors (sets ELF header flag\n\
7149 EF_IA_64_NOFUNCDESC_CONS_GP)\n\
7150 -milp32|-milp64|-mlp64|-mp64 select data model (default -mlp64)\n\
7151 -mle | -mbe select little- or big-endian byte order (default -mle)\n\
7152 -mtune=[itanium1|itanium2]\n\
7153 tune for a specific CPU (default -mtune=itanium2)\n\
7154 -munwind-check=[warning|error]\n\
7155 unwind directive check (default -munwind-check=warning)\n\
7156 -mhint.b=[ok|warning|error]\n\
7157 hint.b check (default -mhint.b=error)\n\
7158 -x | -xexplicit turn on dependency violation checking\n"), stream);
7159 /* Note for translators: "automagically" can be translated as "automatically" here. */
7160 fputs (_("\
7161 -xauto automagically remove dependency violations (default)\n\
7162 -xnone turn off dependency violation checking\n\
7163 -xdebug debug dependency violation checker\n\
7164 -xdebugn debug dependency violation checker but turn off\n\
7165 dependency violation checking\n\
7166 -xdebugx debug dependency violation checker and turn on\n\
7167 dependency violation checking\n"),
7168 stream);
7169 }
7170
7171 void
7172 ia64_after_parse_args (void)
7173 {
7174 if (debug_type == DEBUG_STABS)
7175 as_fatal (_("--gstabs is not supported for ia64"));
7176 }
7177
7178 /* Return true if TYPE fits in TEMPL at SLOT. */
7179
7180 static int
7181 match (int templ, int type, int slot)
7182 {
7183 enum ia64_unit unit;
7184 int result;
7185
7186 unit = ia64_templ_desc[templ].exec_unit[slot];
7187 switch (type)
7188 {
7189 case IA64_TYPE_DYN: result = 1; break; /* for nop and break */
7190 case IA64_TYPE_A:
7191 result = (unit == IA64_UNIT_I || unit == IA64_UNIT_M);
7192 break;
7193 case IA64_TYPE_X: result = (unit == IA64_UNIT_L); break;
7194 case IA64_TYPE_I: result = (unit == IA64_UNIT_I); break;
7195 case IA64_TYPE_M: result = (unit == IA64_UNIT_M); break;
7196 case IA64_TYPE_B: result = (unit == IA64_UNIT_B); break;
7197 case IA64_TYPE_F: result = (unit == IA64_UNIT_F); break;
7198 default: result = 0; break;
7199 }
7200 return result;
7201 }
7202
7203 /* For Itanium 1, add a bit of extra goodness if a nop of type F or B would fit
7204 in TEMPL at SLOT. For Itanium 2, add a bit of extra goodness if a nop of
7205 type M or I would fit in TEMPL at SLOT. */
7206
7207 static inline int
7208 extra_goodness (int templ, int slot)
7209 {
7210 switch (md.tune)
7211 {
7212 case itanium1:
7213 if (slot == 1 && match (templ, IA64_TYPE_F, slot))
7214 return 2;
7215 else if (slot == 2 && match (templ, IA64_TYPE_B, slot))
7216 return 1;
7217 else
7218 return 0;
7219 break;
7220 case itanium2:
7221 if (match (templ, IA64_TYPE_M, slot)
7222 || match (templ, IA64_TYPE_I, slot))
7223 /* Favor M- and I-unit NOPs. We definitely want to avoid
7224 F-unit and B-unit may cause split-issue or less-than-optimal
7225 branch-prediction. */
7226 return 2;
7227 else
7228 return 0;
7229 break;
7230 default:
7231 abort ();
7232 return 0;
7233 }
7234 }
7235
7236 /* This function is called once, at assembler startup time. It sets
7237 up all the tables, etc. that the MD part of the assembler will need
7238 that can be determined before arguments are parsed. */
7239 void
7240 md_begin (void)
7241 {
7242 int i, j, k, t, goodness, best, ok;
7243 const char *err;
7244 char name[8];
7245
7246 md.auto_align = 1;
7247 md.explicit_mode = md.default_explicit_mode;
7248
7249 bfd_set_section_alignment (stdoutput, text_section, 4);
7250
7251 /* Make sure function pointers get initialized. */
7252 target_big_endian = -1;
7253 dot_byteorder (default_big_endian);
7254
7255 alias_hash = hash_new ();
7256 alias_name_hash = hash_new ();
7257 secalias_hash = hash_new ();
7258 secalias_name_hash = hash_new ();
7259
7260 pseudo_func[FUNC_DTP_MODULE].u.sym =
7261 symbol_new (".<dtpmod>", undefined_section, FUNC_DTP_MODULE,
7262 &zero_address_frag);
7263
7264 pseudo_func[FUNC_DTP_RELATIVE].u.sym =
7265 symbol_new (".<dtprel>", undefined_section, FUNC_DTP_RELATIVE,
7266 &zero_address_frag);
7267
7268 pseudo_func[FUNC_FPTR_RELATIVE].u.sym =
7269 symbol_new (".<fptr>", undefined_section, FUNC_FPTR_RELATIVE,
7270 &zero_address_frag);
7271
7272 pseudo_func[FUNC_GP_RELATIVE].u.sym =
7273 symbol_new (".<gprel>", undefined_section, FUNC_GP_RELATIVE,
7274 &zero_address_frag);
7275
7276 pseudo_func[FUNC_LT_RELATIVE].u.sym =
7277 symbol_new (".<ltoff>", undefined_section, FUNC_LT_RELATIVE,
7278 &zero_address_frag);
7279
7280 pseudo_func[FUNC_LT_RELATIVE_X].u.sym =
7281 symbol_new (".<ltoffx>", undefined_section, FUNC_LT_RELATIVE_X,
7282 &zero_address_frag);
7283
7284 pseudo_func[FUNC_PC_RELATIVE].u.sym =
7285 symbol_new (".<pcrel>", undefined_section, FUNC_PC_RELATIVE,
7286 &zero_address_frag);
7287
7288 pseudo_func[FUNC_PLT_RELATIVE].u.sym =
7289 symbol_new (".<pltoff>", undefined_section, FUNC_PLT_RELATIVE,
7290 &zero_address_frag);
7291
7292 pseudo_func[FUNC_SEC_RELATIVE].u.sym =
7293 symbol_new (".<secrel>", undefined_section, FUNC_SEC_RELATIVE,
7294 &zero_address_frag);
7295
7296 pseudo_func[FUNC_SEG_RELATIVE].u.sym =
7297 symbol_new (".<segrel>", undefined_section, FUNC_SEG_RELATIVE,
7298 &zero_address_frag);
7299
7300 pseudo_func[FUNC_TP_RELATIVE].u.sym =
7301 symbol_new (".<tprel>", undefined_section, FUNC_TP_RELATIVE,
7302 &zero_address_frag);
7303
7304 pseudo_func[FUNC_LTV_RELATIVE].u.sym =
7305 symbol_new (".<ltv>", undefined_section, FUNC_LTV_RELATIVE,
7306 &zero_address_frag);
7307
7308 pseudo_func[FUNC_LT_FPTR_RELATIVE].u.sym =
7309 symbol_new (".<ltoff.fptr>", undefined_section, FUNC_LT_FPTR_RELATIVE,
7310 &zero_address_frag);
7311
7312 pseudo_func[FUNC_LT_DTP_MODULE].u.sym =
7313 symbol_new (".<ltoff.dtpmod>", undefined_section, FUNC_LT_DTP_MODULE,
7314 &zero_address_frag);
7315
7316 pseudo_func[FUNC_LT_DTP_RELATIVE].u.sym =
7317 symbol_new (".<ltoff.dptrel>", undefined_section, FUNC_LT_DTP_RELATIVE,
7318 &zero_address_frag);
7319
7320 pseudo_func[FUNC_LT_TP_RELATIVE].u.sym =
7321 symbol_new (".<ltoff.tprel>", undefined_section, FUNC_LT_TP_RELATIVE,
7322 &zero_address_frag);
7323
7324 pseudo_func[FUNC_IPLT_RELOC].u.sym =
7325 symbol_new (".<iplt>", undefined_section, FUNC_IPLT_RELOC,
7326 &zero_address_frag);
7327
7328 #ifdef TE_VMS
7329 pseudo_func[FUNC_SLOTCOUNT_RELOC].u.sym =
7330 symbol_new (".<slotcount>", undefined_section, FUNC_SLOTCOUNT_RELOC,
7331 &zero_address_frag);
7332 #endif
7333
7334 if (md.tune != itanium1)
7335 {
7336 /* Convert MFI NOPs bundles into MMI NOPs bundles. */
7337 le_nop[0] = 0x8;
7338 le_nop_stop[0] = 0x9;
7339 }
7340
7341 /* Compute the table of best templates. We compute goodness as a
7342 base 4 value, in which each match counts for 3. Match-failures
7343 result in NOPs and we use extra_goodness() to pick the execution
7344 units that are best suited for issuing the NOP. */
7345 for (i = 0; i < IA64_NUM_TYPES; ++i)
7346 for (j = 0; j < IA64_NUM_TYPES; ++j)
7347 for (k = 0; k < IA64_NUM_TYPES; ++k)
7348 {
7349 best = 0;
7350 for (t = 0; t < NELEMS (ia64_templ_desc); ++t)
7351 {
7352 goodness = 0;
7353 if (match (t, i, 0))
7354 {
7355 if (match (t, j, 1))
7356 {
7357 if ((t == 2 && j == IA64_TYPE_X) || match (t, k, 2))
7358 goodness = 3 + 3 + 3;
7359 else
7360 goodness = 3 + 3 + extra_goodness (t, 2);
7361 }
7362 else if (match (t, j, 2))
7363 goodness = 3 + 3 + extra_goodness (t, 1);
7364 else
7365 {
7366 goodness = 3;
7367 goodness += extra_goodness (t, 1);
7368 goodness += extra_goodness (t, 2);
7369 }
7370 }
7371 else if (match (t, i, 1))
7372 {
7373 if ((t == 2 && i == IA64_TYPE_X) || match (t, j, 2))
7374 goodness = 3 + 3;
7375 else
7376 goodness = 3 + extra_goodness (t, 2);
7377 }
7378 else if (match (t, i, 2))
7379 goodness = 3 + extra_goodness (t, 1);
7380
7381 if (goodness > best)
7382 {
7383 best = goodness;
7384 best_template[i][j][k] = t;
7385 }
7386 }
7387 }
7388
7389 #ifdef DEBUG_TEMPLATES
7390 /* For debugging changes to the best_template calculations. We don't care
7391 about combinations with invalid instructions, so start the loops at 1. */
7392 for (i = 0; i < IA64_NUM_TYPES; ++i)
7393 for (j = 0; j < IA64_NUM_TYPES; ++j)
7394 for (k = 0; k < IA64_NUM_TYPES; ++k)
7395 {
7396 char type_letter[IA64_NUM_TYPES] = { 'n', 'a', 'i', 'm', 'b', 'f',
7397 'x', 'd' };
7398 fprintf (stderr, "%c%c%c %s\n", type_letter[i], type_letter[j],
7399 type_letter[k],
7400 ia64_templ_desc[best_template[i][j][k]].name);
7401 }
7402 #endif
7403
7404 for (i = 0; i < NUM_SLOTS; ++i)
7405 md.slot[i].user_template = -1;
7406
7407 md.pseudo_hash = hash_new ();
7408 for (i = 0; i < NELEMS (pseudo_opcode); ++i)
7409 {
7410 err = hash_insert (md.pseudo_hash, pseudo_opcode[i].name,
7411 (void *) (pseudo_opcode + i));
7412 if (err)
7413 as_fatal (_("ia64.md_begin: can't hash `%s': %s"),
7414 pseudo_opcode[i].name, err);
7415 }
7416
7417 md.reg_hash = hash_new ();
7418 md.dynreg_hash = hash_new ();
7419 md.const_hash = hash_new ();
7420 md.entry_hash = hash_new ();
7421
7422 /* general registers: */
7423 declare_register_set ("r", 128, REG_GR);
7424 declare_register ("gp", REG_GR + 1);
7425 declare_register ("sp", REG_GR + 12);
7426 declare_register ("tp", REG_GR + 13);
7427 declare_register_set ("ret", 4, REG_GR + 8);
7428
7429 /* floating point registers: */
7430 declare_register_set ("f", 128, REG_FR);
7431 declare_register_set ("farg", 8, REG_FR + 8);
7432 declare_register_set ("fret", 8, REG_FR + 8);
7433
7434 /* branch registers: */
7435 declare_register_set ("b", 8, REG_BR);
7436 declare_register ("rp", REG_BR + 0);
7437
7438 /* predicate registers: */
7439 declare_register_set ("p", 64, REG_P);
7440 declare_register ("pr", REG_PR);
7441 declare_register ("pr.rot", REG_PR_ROT);
7442
7443 /* application registers: */
7444 declare_register_set ("ar", 128, REG_AR);
7445 for (i = 0; i < NELEMS (ar); ++i)
7446 declare_register (ar[i].name, REG_AR + ar[i].regnum);
7447
7448 /* control registers: */
7449 declare_register_set ("cr", 128, REG_CR);
7450 for (i = 0; i < NELEMS (cr); ++i)
7451 declare_register (cr[i].name, REG_CR + cr[i].regnum);
7452
7453 /* dahr registers: */
7454 declare_register_set ("dahr", 8, REG_DAHR);
7455
7456 declare_register ("ip", REG_IP);
7457 declare_register ("cfm", REG_CFM);
7458 declare_register ("psr", REG_PSR);
7459 declare_register ("psr.l", REG_PSR_L);
7460 declare_register ("psr.um", REG_PSR_UM);
7461
7462 for (i = 0; i < NELEMS (indirect_reg); ++i)
7463 {
7464 unsigned int regnum = indirect_reg[i].regnum;
7465
7466 md.indregsym[regnum - IND_CPUID] = declare_register (indirect_reg[i].name, regnum);
7467 }
7468
7469 /* pseudo-registers used to specify unwind info: */
7470 declare_register ("psp", REG_PSP);
7471
7472 for (i = 0; i < NELEMS (const_bits); ++i)
7473 {
7474 err = hash_insert (md.const_hash, const_bits[i].name,
7475 (void *) (const_bits + i));
7476 if (err)
7477 as_fatal (_("Inserting \"%s\" into constant hash table failed: %s"),
7478 name, err);
7479 }
7480
7481 /* Set the architecture and machine depending on defaults and command line
7482 options. */
7483 if (md.flags & EF_IA_64_ABI64)
7484 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf64);
7485 else
7486 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf32);
7487
7488 if (! ok)
7489 as_warn (_("Could not set architecture and machine"));
7490
7491 /* Set the pointer size and pointer shift size depending on md.flags */
7492
7493 if (md.flags & EF_IA_64_ABI64)
7494 {
7495 md.pointer_size = 8; /* pointers are 8 bytes */
7496 md.pointer_size_shift = 3; /* alignment is 8 bytes = 2^2 */
7497 }
7498 else
7499 {
7500 md.pointer_size = 4; /* pointers are 4 bytes */
7501 md.pointer_size_shift = 2; /* alignment is 4 bytes = 2^2 */
7502 }
7503
7504 md.mem_offset.hint = 0;
7505 md.path = 0;
7506 md.maxpaths = 0;
7507 md.entry_labels = NULL;
7508 }
7509
7510 /* Set the default options in md. Cannot do this in md_begin because
7511 that is called after md_parse_option which is where we set the
7512 options in md based on command line options. */
7513
7514 void
7515 ia64_init (int argc ATTRIBUTE_UNUSED, char **argv ATTRIBUTE_UNUSED)
7516 {
7517 md.flags = MD_FLAGS_DEFAULT;
7518 #ifndef TE_VMS
7519 /* Don't turn on dependency checking for VMS, doesn't work. */
7520 md.detect_dv = 1;
7521 #endif
7522 /* FIXME: We should change it to unwind_check_error someday. */
7523 md.unwind_check = unwind_check_warning;
7524 md.hint_b = hint_b_error;
7525 md.tune = itanium2;
7526 }
7527
7528 /* Return a string for the target object file format. */
7529
7530 const char *
7531 ia64_target_format (void)
7532 {
7533 if (OUTPUT_FLAVOR == bfd_target_elf_flavour)
7534 {
7535 if (md.flags & EF_IA_64_BE)
7536 {
7537 if (md.flags & EF_IA_64_ABI64)
7538 #if defined(TE_AIX50)
7539 return "elf64-ia64-aix-big";
7540 #elif defined(TE_HPUX)
7541 return "elf64-ia64-hpux-big";
7542 #else
7543 return "elf64-ia64-big";
7544 #endif
7545 else
7546 #if defined(TE_AIX50)
7547 return "elf32-ia64-aix-big";
7548 #elif defined(TE_HPUX)
7549 return "elf32-ia64-hpux-big";
7550 #else
7551 return "elf32-ia64-big";
7552 #endif
7553 }
7554 else
7555 {
7556 if (md.flags & EF_IA_64_ABI64)
7557 #if defined (TE_AIX50)
7558 return "elf64-ia64-aix-little";
7559 #elif defined (TE_VMS)
7560 {
7561 md.flags |= EF_IA_64_ARCHVER_1;
7562 return "elf64-ia64-vms";
7563 }
7564 #else
7565 return "elf64-ia64-little";
7566 #endif
7567 else
7568 #ifdef TE_AIX50
7569 return "elf32-ia64-aix-little";
7570 #else
7571 return "elf32-ia64-little";
7572 #endif
7573 }
7574 }
7575 else
7576 return "unknown-format";
7577 }
7578
7579 void
7580 ia64_end_of_source (void)
7581 {
7582 /* terminate insn group upon reaching end of file: */
7583 insn_group_break (1, 0, 0);
7584
7585 /* emits slots we haven't written yet: */
7586 ia64_flush_insns ();
7587
7588 bfd_set_private_flags (stdoutput, md.flags);
7589
7590 md.mem_offset.hint = 0;
7591 }
7592
7593 void
7594 ia64_start_line (void)
7595 {
7596 static int first;
7597
7598 if (!first) {
7599 /* Make sure we don't reference input_line_pointer[-1] when that's
7600 not valid. */
7601 first = 1;
7602 return;
7603 }
7604
7605 if (md.qp.X_op == O_register)
7606 as_bad (_("qualifying predicate not followed by instruction"));
7607 md.qp.X_op = O_absent;
7608
7609 if (ignore_input ())
7610 return;
7611
7612 if (input_line_pointer[0] == ';' && input_line_pointer[-1] == ';')
7613 {
7614 if (md.detect_dv && !md.explicit_mode)
7615 {
7616 static int warned;
7617
7618 if (!warned)
7619 {
7620 warned = 1;
7621 as_warn (_("Explicit stops are ignored in auto mode"));
7622 }
7623 }
7624 else
7625 insn_group_break (1, 0, 0);
7626 }
7627 else if (input_line_pointer[-1] == '{')
7628 {
7629 if (md.manual_bundling)
7630 as_warn (_("Found '{' when manual bundling is already turned on"));
7631 else
7632 CURR_SLOT.manual_bundling_on = 1;
7633 md.manual_bundling = 1;
7634
7635 /* Bundling is only acceptable in explicit mode
7636 or when in default automatic mode. */
7637 if (md.detect_dv && !md.explicit_mode)
7638 {
7639 if (!md.mode_explicitly_set
7640 && !md.default_explicit_mode)
7641 dot_dv_mode ('E');
7642 else
7643 as_warn (_("Found '{' after explicit switch to automatic mode"));
7644 }
7645 }
7646 else if (input_line_pointer[-1] == '}')
7647 {
7648 if (!md.manual_bundling)
7649 as_warn (_("Found '}' when manual bundling is off"));
7650 else
7651 PREV_SLOT.manual_bundling_off = 1;
7652 md.manual_bundling = 0;
7653
7654 /* switch back to automatic mode, if applicable */
7655 if (md.detect_dv
7656 && md.explicit_mode
7657 && !md.mode_explicitly_set
7658 && !md.default_explicit_mode)
7659 dot_dv_mode ('A');
7660 }
7661 }
7662
7663 /* This is a hook for ia64_frob_label, so that it can distinguish tags from
7664 labels. */
7665 static int defining_tag = 0;
7666
7667 int
7668 ia64_unrecognized_line (int ch)
7669 {
7670 switch (ch)
7671 {
7672 case '(':
7673 expression_and_evaluate (&md.qp);
7674 if (*input_line_pointer++ != ')')
7675 {
7676 as_bad (_("Expected ')'"));
7677 return 0;
7678 }
7679 if (md.qp.X_op != O_register)
7680 {
7681 as_bad (_("Qualifying predicate expected"));
7682 return 0;
7683 }
7684 if (md.qp.X_add_number < REG_P || md.qp.X_add_number >= REG_P + 64)
7685 {
7686 as_bad (_("Predicate register expected"));
7687 return 0;
7688 }
7689 return 1;
7690
7691 case '[':
7692 {
7693 char *s;
7694 char c;
7695 symbolS *tag;
7696 int temp;
7697
7698 if (md.qp.X_op == O_register)
7699 {
7700 as_bad (_("Tag must come before qualifying predicate."));
7701 return 0;
7702 }
7703
7704 /* This implements just enough of read_a_source_file in read.c to
7705 recognize labels. */
7706 if (is_name_beginner (*input_line_pointer))
7707 {
7708 c = get_symbol_name (&s);
7709 }
7710 else if (LOCAL_LABELS_FB
7711 && ISDIGIT (*input_line_pointer))
7712 {
7713 temp = 0;
7714 while (ISDIGIT (*input_line_pointer))
7715 temp = (temp * 10) + *input_line_pointer++ - '0';
7716 fb_label_instance_inc (temp);
7717 s = fb_label_name (temp, 0);
7718 c = *input_line_pointer;
7719 }
7720 else
7721 {
7722 s = NULL;
7723 c = '\0';
7724 }
7725 if (c != ':')
7726 {
7727 /* Put ':' back for error messages' sake. */
7728 *input_line_pointer++ = ':';
7729 as_bad (_("Expected ':'"));
7730 return 0;
7731 }
7732
7733 defining_tag = 1;
7734 tag = colon (s);
7735 defining_tag = 0;
7736 /* Put ':' back for error messages' sake. */
7737 *input_line_pointer++ = ':';
7738 if (*input_line_pointer++ != ']')
7739 {
7740 as_bad (_("Expected ']'"));
7741 return 0;
7742 }
7743 if (! tag)
7744 {
7745 as_bad (_("Tag name expected"));
7746 return 0;
7747 }
7748 return 1;
7749 }
7750
7751 default:
7752 break;
7753 }
7754
7755 /* Not a valid line. */
7756 return 0;
7757 }
7758
7759 void
7760 ia64_frob_label (struct symbol *sym)
7761 {
7762 struct label_fix *fix;
7763
7764 /* Tags need special handling since they are not bundle breaks like
7765 labels. */
7766 if (defining_tag)
7767 {
7768 fix = XOBNEW (&notes, struct label_fix);
7769 fix->sym = sym;
7770 fix->next = CURR_SLOT.tag_fixups;
7771 fix->dw2_mark_labels = FALSE;
7772 CURR_SLOT.tag_fixups = fix;
7773
7774 return;
7775 }
7776
7777 if (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE)
7778 {
7779 md.last_text_seg = now_seg;
7780 fix = XOBNEW (&notes, struct label_fix);
7781 fix->sym = sym;
7782 fix->next = CURR_SLOT.label_fixups;
7783 fix->dw2_mark_labels = dwarf2_loc_mark_labels;
7784 CURR_SLOT.label_fixups = fix;
7785
7786 /* Keep track of how many code entry points we've seen. */
7787 if (md.path == md.maxpaths)
7788 {
7789 md.maxpaths += 20;
7790 md.entry_labels = XRESIZEVEC (const char *, md.entry_labels,
7791 md.maxpaths);
7792 }
7793 md.entry_labels[md.path++] = S_GET_NAME (sym);
7794 }
7795 }
7796
7797 #ifdef TE_HPUX
7798 /* The HP-UX linker will give unresolved symbol errors for symbols
7799 that are declared but unused. This routine removes declared,
7800 unused symbols from an object. */
7801 int
7802 ia64_frob_symbol (struct symbol *sym)
7803 {
7804 if ((S_GET_SEGMENT (sym) == bfd_und_section_ptr && ! symbol_used_p (sym) &&
7805 ELF_ST_VISIBILITY (S_GET_OTHER (sym)) == STV_DEFAULT)
7806 || (S_GET_SEGMENT (sym) == bfd_abs_section_ptr
7807 && ! S_IS_EXTERNAL (sym)))
7808 return 1;
7809 return 0;
7810 }
7811 #endif
7812
7813 void
7814 ia64_flush_pending_output (void)
7815 {
7816 if (!md.keep_pending_output
7817 && bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE)
7818 {
7819 /* ??? This causes many unnecessary stop bits to be emitted.
7820 Unfortunately, it isn't clear if it is safe to remove this. */
7821 insn_group_break (1, 0, 0);
7822 ia64_flush_insns ();
7823 }
7824 }
7825
7826 /* Do ia64-specific expression optimization. All that's done here is
7827 to transform index expressions that are either due to the indexing
7828 of rotating registers or due to the indexing of indirect register
7829 sets. */
7830 int
7831 ia64_optimize_expr (expressionS *l, operatorT op, expressionS *r)
7832 {
7833 if (op != O_index)
7834 return 0;
7835 resolve_expression (l);
7836 if (l->X_op == O_register)
7837 {
7838 unsigned num_regs = l->X_add_number >> 16;
7839
7840 resolve_expression (r);
7841 if (num_regs)
7842 {
7843 /* Left side is a .rotX-allocated register. */
7844 if (r->X_op != O_constant)
7845 {
7846 as_bad (_("Rotating register index must be a non-negative constant"));
7847 r->X_add_number = 0;
7848 }
7849 else if ((valueT) r->X_add_number >= num_regs)
7850 {
7851 as_bad (_("Index out of range 0..%u"), num_regs - 1);
7852 r->X_add_number = 0;
7853 }
7854 l->X_add_number = (l->X_add_number & 0xffff) + r->X_add_number;
7855 return 1;
7856 }
7857 else if (l->X_add_number >= IND_CPUID && l->X_add_number <= IND_RR)
7858 {
7859 if (r->X_op != O_register
7860 || r->X_add_number < REG_GR
7861 || r->X_add_number > REG_GR + 127)
7862 {
7863 as_bad (_("Indirect register index must be a general register"));
7864 r->X_add_number = REG_GR;
7865 }
7866 l->X_op = O_index;
7867 l->X_op_symbol = md.indregsym[l->X_add_number - IND_CPUID];
7868 l->X_add_number = r->X_add_number;
7869 return 1;
7870 }
7871 }
7872 as_bad (_("Index can only be applied to rotating or indirect registers"));
7873 /* Fall back to some register use of which has as little as possible
7874 side effects, to minimize subsequent error messages. */
7875 l->X_op = O_register;
7876 l->X_add_number = REG_GR + 3;
7877 return 1;
7878 }
7879
7880 int
7881 ia64_parse_name (char *name, expressionS *e, char *nextcharP)
7882 {
7883 struct const_desc *cdesc;
7884 struct dynreg *dr = 0;
7885 unsigned int idx;
7886 struct symbol *sym;
7887 char *end;
7888
7889 if (*name == '@')
7890 {
7891 enum pseudo_type pseudo_type = PSEUDO_FUNC_NONE;
7892
7893 /* Find what relocation pseudo-function we're dealing with. */
7894 for (idx = 0; idx < NELEMS (pseudo_func); ++idx)
7895 if (pseudo_func[idx].name
7896 && pseudo_func[idx].name[0] == name[1]
7897 && strcmp (pseudo_func[idx].name + 1, name + 2) == 0)
7898 {
7899 pseudo_type = pseudo_func[idx].type;
7900 break;
7901 }
7902 switch (pseudo_type)
7903 {
7904 case PSEUDO_FUNC_RELOC:
7905 end = input_line_pointer;
7906 if (*nextcharP != '(')
7907 {
7908 as_bad (_("Expected '('"));
7909 break;
7910 }
7911 /* Skip '('. */
7912 ++input_line_pointer;
7913 expression (e);
7914 if (*input_line_pointer != ')')
7915 {
7916 as_bad (_("Missing ')'"));
7917 goto done;
7918 }
7919 /* Skip ')'. */
7920 ++input_line_pointer;
7921 #ifdef TE_VMS
7922 if (idx == FUNC_SLOTCOUNT_RELOC)
7923 {
7924 /* @slotcount can accept any expression. Canonicalize. */
7925 e->X_add_symbol = make_expr_symbol (e);
7926 e->X_op = O_symbol;
7927 e->X_add_number = 0;
7928 }
7929 #endif
7930 if (e->X_op != O_symbol)
7931 {
7932 if (e->X_op != O_pseudo_fixup)
7933 {
7934 as_bad (_("Not a symbolic expression"));
7935 goto done;
7936 }
7937 if (idx != FUNC_LT_RELATIVE)
7938 {
7939 as_bad (_("Illegal combination of relocation functions"));
7940 goto done;
7941 }
7942 switch (S_GET_VALUE (e->X_op_symbol))
7943 {
7944 case FUNC_FPTR_RELATIVE:
7945 idx = FUNC_LT_FPTR_RELATIVE; break;
7946 case FUNC_DTP_MODULE:
7947 idx = FUNC_LT_DTP_MODULE; break;
7948 case FUNC_DTP_RELATIVE:
7949 idx = FUNC_LT_DTP_RELATIVE; break;
7950 case FUNC_TP_RELATIVE:
7951 idx = FUNC_LT_TP_RELATIVE; break;
7952 default:
7953 as_bad (_("Illegal combination of relocation functions"));
7954 goto done;
7955 }
7956 }
7957 /* Make sure gas doesn't get rid of local symbols that are used
7958 in relocs. */
7959 e->X_op = O_pseudo_fixup;
7960 e->X_op_symbol = pseudo_func[idx].u.sym;
7961 done:
7962 *nextcharP = *input_line_pointer;
7963 break;
7964
7965 case PSEUDO_FUNC_CONST:
7966 e->X_op = O_constant;
7967 e->X_add_number = pseudo_func[idx].u.ival;
7968 break;
7969
7970 case PSEUDO_FUNC_REG:
7971 e->X_op = O_register;
7972 e->X_add_number = pseudo_func[idx].u.ival;
7973 break;
7974
7975 default:
7976 return 0;
7977 }
7978 return 1;
7979 }
7980
7981 /* first see if NAME is a known register name: */
7982 sym = hash_find (md.reg_hash, name);
7983 if (sym)
7984 {
7985 e->X_op = O_register;
7986 e->X_add_number = S_GET_VALUE (sym);
7987 return 1;
7988 }
7989
7990 cdesc = hash_find (md.const_hash, name);
7991 if (cdesc)
7992 {
7993 e->X_op = O_constant;
7994 e->X_add_number = cdesc->value;
7995 return 1;
7996 }
7997
7998 /* check for inN, locN, or outN: */
7999 idx = 0;
8000 switch (name[0])
8001 {
8002 case 'i':
8003 if (name[1] == 'n' && ISDIGIT (name[2]))
8004 {
8005 dr = &md.in;
8006 idx = 2;
8007 }
8008 break;
8009
8010 case 'l':
8011 if (name[1] == 'o' && name[2] == 'c' && ISDIGIT (name[3]))
8012 {
8013 dr = &md.loc;
8014 idx = 3;
8015 }
8016 break;
8017
8018 case 'o':
8019 if (name[1] == 'u' && name[2] == 't' && ISDIGIT (name[3]))
8020 {
8021 dr = &md.out;
8022 idx = 3;
8023 }
8024 break;
8025
8026 default:
8027 break;
8028 }
8029
8030 /* Ignore register numbers with leading zeroes, except zero itself. */
8031 if (dr && (name[idx] != '0' || name[idx + 1] == '\0'))
8032 {
8033 unsigned long regnum;
8034
8035 /* The name is inN, locN, or outN; parse the register number. */
8036 regnum = strtoul (name + idx, &end, 10);
8037 if (end > name + idx && *end == '\0' && regnum < 96)
8038 {
8039 if (regnum >= dr->num_regs)
8040 {
8041 if (!dr->num_regs)
8042 as_bad (_("No current frame"));
8043 else
8044 as_bad (_("Register number out of range 0..%u"),
8045 dr->num_regs - 1);
8046 regnum = 0;
8047 }
8048 e->X_op = O_register;
8049 e->X_add_number = dr->base + regnum;
8050 return 1;
8051 }
8052 }
8053
8054 end = xstrdup (name);
8055 name = ia64_canonicalize_symbol_name (end);
8056 if ((dr = hash_find (md.dynreg_hash, name)))
8057 {
8058 /* We've got ourselves the name of a rotating register set.
8059 Store the base register number in the low 16 bits of
8060 X_add_number and the size of the register set in the top 16
8061 bits. */
8062 e->X_op = O_register;
8063 e->X_add_number = dr->base | (dr->num_regs << 16);
8064 free (end);
8065 return 1;
8066 }
8067 free (end);
8068 return 0;
8069 }
8070
8071 /* Remove the '#' suffix that indicates a symbol as opposed to a register. */
8072
8073 char *
8074 ia64_canonicalize_symbol_name (char *name)
8075 {
8076 size_t len = strlen (name), full = len;
8077
8078 while (len > 0 && name[len - 1] == '#')
8079 --len;
8080 if (len <= 0)
8081 {
8082 if (full > 0)
8083 as_bad (_("Standalone `#' is illegal"));
8084 }
8085 else if (len < full - 1)
8086 as_warn (_("Redundant `#' suffix operators"));
8087 name[len] = '\0';
8088 return name;
8089 }
8090
8091 /* Return true if idesc is a conditional branch instruction. This excludes
8092 the modulo scheduled branches, and br.ia. Mod-sched branches are excluded
8093 because they always read/write resources regardless of the value of the
8094 qualifying predicate. br.ia must always use p0, and hence is always
8095 taken. Thus this function returns true for branches which can fall
8096 through, and which use no resources if they do fall through. */
8097
8098 static int
8099 is_conditional_branch (struct ia64_opcode *idesc)
8100 {
8101 /* br is a conditional branch. Everything that starts with br. except
8102 br.ia, br.c{loop,top,exit}, and br.w{top,exit} is a conditional branch.
8103 Everything that starts with brl is a conditional branch. */
8104 return (idesc->name[0] == 'b' && idesc->name[1] == 'r'
8105 && (idesc->name[2] == '\0'
8106 || (idesc->name[2] == '.' && idesc->name[3] != 'i'
8107 && idesc->name[3] != 'c' && idesc->name[3] != 'w')
8108 || idesc->name[2] == 'l'
8109 /* br.cond, br.call, br.clr */
8110 || (idesc->name[2] == '.' && idesc->name[3] == 'c'
8111 && (idesc->name[4] == 'a' || idesc->name[4] == 'o'
8112 || (idesc->name[4] == 'l' && idesc->name[5] == 'r')))));
8113 }
8114
8115 /* Return whether the given opcode is a taken branch. If there's any doubt,
8116 returns zero. */
8117
8118 static int
8119 is_taken_branch (struct ia64_opcode *idesc)
8120 {
8121 return ((is_conditional_branch (idesc) && CURR_SLOT.qp_regno == 0)
8122 || strncmp (idesc->name, "br.ia", 5) == 0);
8123 }
8124
8125 /* Return whether the given opcode is an interruption or rfi. If there's any
8126 doubt, returns zero. */
8127
8128 static int
8129 is_interruption_or_rfi (struct ia64_opcode *idesc)
8130 {
8131 if (strcmp (idesc->name, "rfi") == 0)
8132 return 1;
8133 return 0;
8134 }
8135
8136 /* Returns the index of the given dependency in the opcode's list of chks, or
8137 -1 if there is no dependency. */
8138
8139 static int
8140 depends_on (int depind, struct ia64_opcode *idesc)
8141 {
8142 int i;
8143 const struct ia64_opcode_dependency *dep = idesc->dependencies;
8144 for (i = 0; i < dep->nchks; i++)
8145 {
8146 if (depind == DEP (dep->chks[i]))
8147 return i;
8148 }
8149 return -1;
8150 }
8151
8152 /* Determine a set of specific resources used for a particular resource
8153 class. Returns the number of specific resources identified For those
8154 cases which are not determinable statically, the resource returned is
8155 marked nonspecific.
8156
8157 Meanings of value in 'NOTE':
8158 1) only read/write when the register number is explicitly encoded in the
8159 insn.
8160 2) only read CFM when accessing a rotating GR, FR, or PR. mov pr only
8161 accesses CFM when qualifying predicate is in the rotating region.
8162 3) general register value is used to specify an indirect register; not
8163 determinable statically.
8164 4) only read the given resource when bits 7:0 of the indirect index
8165 register value does not match the register number of the resource; not
8166 determinable statically.
8167 5) all rules are implementation specific.
8168 6) only when both the index specified by the reader and the index specified
8169 by the writer have the same value in bits 63:61; not determinable
8170 statically.
8171 7) only access the specified resource when the corresponding mask bit is
8172 set
8173 8) PSR.dfh is only read when these insns reference FR32-127. PSR.dfl is
8174 only read when these insns reference FR2-31
8175 9) PSR.mfl is only written when these insns write FR2-31. PSR.mfh is only
8176 written when these insns write FR32-127
8177 10) The PSR.bn bit is only accessed when one of GR16-31 is specified in the
8178 instruction
8179 11) The target predicates are written independently of PR[qp], but source
8180 registers are only read if PR[qp] is true. Since the state of PR[qp]
8181 cannot statically be determined, all source registers are marked used.
8182 12) This insn only reads the specified predicate register when that
8183 register is the PR[qp].
8184 13) This reference to ld-c only applies to the GR whose value is loaded
8185 with data returned from memory, not the post-incremented address register.
8186 14) The RSE resource includes the implementation-specific RSE internal
8187 state resources. At least one (and possibly more) of these resources are
8188 read by each instruction listed in IC:rse-readers. At least one (and
8189 possibly more) of these resources are written by each insn listed in
8190 IC:rse-writers.
8191 15+16) Represents reserved instructions, which the assembler does not
8192 generate.
8193 17) CR[TPR] has a RAW dependency only between mov-to-CR-TPR and
8194 mov-to-PSR-l or ssm instructions that set PSR.i, PSR.pp or PSR.up.
8195
8196 Memory resources (i.e. locations in memory) are *not* marked or tracked by
8197 this code; there are no dependency violations based on memory access.
8198 */
8199
8200 #define MAX_SPECS 256
8201 #define DV_CHK 1
8202 #define DV_REG 0
8203
8204 static int
8205 specify_resource (const struct ia64_dependency *dep,
8206 struct ia64_opcode *idesc,
8207 /* is this a DV chk or a DV reg? */
8208 int type,
8209 /* returned specific resources */
8210 struct rsrc specs[MAX_SPECS],
8211 /* resource note for this insn's usage */
8212 int note,
8213 /* which execution path to examine */
8214 int path)
8215 {
8216 int count = 0;
8217 int i;
8218 int rsrc_write = 0;
8219 struct rsrc tmpl;
8220
8221 if (dep->mode == IA64_DV_WAW
8222 || (dep->mode == IA64_DV_RAW && type == DV_REG)
8223 || (dep->mode == IA64_DV_WAR && type == DV_CHK))
8224 rsrc_write = 1;
8225
8226 /* template for any resources we identify */
8227 tmpl.dependency = dep;
8228 tmpl.note = note;
8229 tmpl.insn_srlz = tmpl.data_srlz = 0;
8230 tmpl.qp_regno = CURR_SLOT.qp_regno;
8231 tmpl.link_to_qp_branch = 1;
8232 tmpl.mem_offset.hint = 0;
8233 tmpl.mem_offset.offset = 0;
8234 tmpl.mem_offset.base = 0;
8235 tmpl.specific = 1;
8236 tmpl.index = -1;
8237 tmpl.cmp_type = CMP_NONE;
8238 tmpl.depind = 0;
8239 tmpl.file = NULL;
8240 tmpl.line = 0;
8241 tmpl.path = 0;
8242
8243 #define UNHANDLED \
8244 as_warn (_("Unhandled dependency %s for %s (%s), note %d"), \
8245 dep->name, idesc->name, (rsrc_write?"write":"read"), note)
8246 #define KNOWN(REG) (gr_values[REG].known && gr_values[REG].path >= path)
8247
8248 /* we don't need to track these */
8249 if (dep->semantics == IA64_DVS_NONE)
8250 return 0;
8251
8252 switch (dep->specifier)
8253 {
8254 case IA64_RS_AR_K:
8255 if (note == 1)
8256 {
8257 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8258 {
8259 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8260 if (regno >= 0 && regno <= 7)
8261 {
8262 specs[count] = tmpl;
8263 specs[count++].index = regno;
8264 }
8265 }
8266 }
8267 else if (note == 0)
8268 {
8269 for (i = 0; i < 8; i++)
8270 {
8271 specs[count] = tmpl;
8272 specs[count++].index = i;
8273 }
8274 }
8275 else
8276 {
8277 UNHANDLED;
8278 }
8279 break;
8280
8281 case IA64_RS_AR_UNAT:
8282 /* This is a mov =AR or mov AR= instruction. */
8283 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8284 {
8285 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8286 if (regno == AR_UNAT)
8287 {
8288 specs[count++] = tmpl;
8289 }
8290 }
8291 else
8292 {
8293 /* This is a spill/fill, or other instruction that modifies the
8294 unat register. */
8295
8296 /* Unless we can determine the specific bits used, mark the whole
8297 thing; bits 8:3 of the memory address indicate the bit used in
8298 UNAT. The .mem.offset hint may be used to eliminate a small
8299 subset of conflicts. */
8300 specs[count] = tmpl;
8301 if (md.mem_offset.hint)
8302 {
8303 if (md.debug_dv)
8304 fprintf (stderr, " Using hint for spill/fill\n");
8305 /* The index isn't actually used, just set it to something
8306 approximating the bit index. */
8307 specs[count].index = (md.mem_offset.offset >> 3) & 0x3F;
8308 specs[count].mem_offset.hint = 1;
8309 specs[count].mem_offset.offset = md.mem_offset.offset;
8310 specs[count++].mem_offset.base = md.mem_offset.base;
8311 }
8312 else
8313 {
8314 specs[count++].specific = 0;
8315 }
8316 }
8317 break;
8318
8319 case IA64_RS_AR:
8320 if (note == 1)
8321 {
8322 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8323 {
8324 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8325 if ((regno >= 8 && regno <= 15)
8326 || (regno >= 20 && regno <= 23)
8327 || (regno >= 31 && regno <= 39)
8328 || (regno >= 41 && regno <= 47)
8329 || (regno >= 67 && regno <= 111))
8330 {
8331 specs[count] = tmpl;
8332 specs[count++].index = regno;
8333 }
8334 }
8335 }
8336 else
8337 {
8338 UNHANDLED;
8339 }
8340 break;
8341
8342 case IA64_RS_ARb:
8343 if (note == 1)
8344 {
8345 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8346 {
8347 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8348 if ((regno >= 48 && regno <= 63)
8349 || (regno >= 112 && regno <= 127))
8350 {
8351 specs[count] = tmpl;
8352 specs[count++].index = regno;
8353 }
8354 }
8355 }
8356 else if (note == 0)
8357 {
8358 for (i = 48; i < 64; i++)
8359 {
8360 specs[count] = tmpl;
8361 specs[count++].index = i;
8362 }
8363 for (i = 112; i < 128; i++)
8364 {
8365 specs[count] = tmpl;
8366 specs[count++].index = i;
8367 }
8368 }
8369 else
8370 {
8371 UNHANDLED;
8372 }
8373 break;
8374
8375 case IA64_RS_BR:
8376 if (note != 1)
8377 {
8378 UNHANDLED;
8379 }
8380 else
8381 {
8382 if (rsrc_write)
8383 {
8384 for (i = 0; i < idesc->num_outputs; i++)
8385 if (idesc->operands[i] == IA64_OPND_B1
8386 || idesc->operands[i] == IA64_OPND_B2)
8387 {
8388 specs[count] = tmpl;
8389 specs[count++].index =
8390 CURR_SLOT.opnd[i].X_add_number - REG_BR;
8391 }
8392 }
8393 else
8394 {
8395 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
8396 if (idesc->operands[i] == IA64_OPND_B1
8397 || idesc->operands[i] == IA64_OPND_B2)
8398 {
8399 specs[count] = tmpl;
8400 specs[count++].index =
8401 CURR_SLOT.opnd[i].X_add_number - REG_BR;
8402 }
8403 }
8404 }
8405 break;
8406
8407 case IA64_RS_CPUID: /* four or more registers */
8408 if (note == 3)
8409 {
8410 if (idesc->operands[!rsrc_write] == IA64_OPND_CPUID_R3)
8411 {
8412 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8413 if (regno >= 0 && regno < NELEMS (gr_values)
8414 && KNOWN (regno))
8415 {
8416 specs[count] = tmpl;
8417 specs[count++].index = gr_values[regno].value & 0xFF;
8418 }
8419 else
8420 {
8421 specs[count] = tmpl;
8422 specs[count++].specific = 0;
8423 }
8424 }
8425 }
8426 else
8427 {
8428 UNHANDLED;
8429 }
8430 break;
8431
8432 case IA64_RS_DBR: /* four or more registers */
8433 if (note == 3)
8434 {
8435 if (idesc->operands[!rsrc_write] == IA64_OPND_DBR_R3)
8436 {
8437 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8438 if (regno >= 0 && regno < NELEMS (gr_values)
8439 && KNOWN (regno))
8440 {
8441 specs[count] = tmpl;
8442 specs[count++].index = gr_values[regno].value & 0xFF;
8443 }
8444 else
8445 {
8446 specs[count] = tmpl;
8447 specs[count++].specific = 0;
8448 }
8449 }
8450 }
8451 else if (note == 0 && !rsrc_write)
8452 {
8453 specs[count] = tmpl;
8454 specs[count++].specific = 0;
8455 }
8456 else
8457 {
8458 UNHANDLED;
8459 }
8460 break;
8461
8462 case IA64_RS_IBR: /* four or more registers */
8463 if (note == 3)
8464 {
8465 if (idesc->operands[!rsrc_write] == IA64_OPND_IBR_R3)
8466 {
8467 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8468 if (regno >= 0 && regno < NELEMS (gr_values)
8469 && KNOWN (regno))
8470 {
8471 specs[count] = tmpl;
8472 specs[count++].index = gr_values[regno].value & 0xFF;
8473 }
8474 else
8475 {
8476 specs[count] = tmpl;
8477 specs[count++].specific = 0;
8478 }
8479 }
8480 }
8481 else
8482 {
8483 UNHANDLED;
8484 }
8485 break;
8486
8487 case IA64_RS_MSR:
8488 if (note == 5)
8489 {
8490 /* These are implementation specific. Force all references to
8491 conflict with all other references. */
8492 specs[count] = tmpl;
8493 specs[count++].specific = 0;
8494 }
8495 else
8496 {
8497 UNHANDLED;
8498 }
8499 break;
8500
8501 case IA64_RS_PKR: /* 16 or more registers */
8502 if (note == 3 || note == 4)
8503 {
8504 if (idesc->operands[!rsrc_write] == IA64_OPND_PKR_R3)
8505 {
8506 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8507 if (regno >= 0 && regno < NELEMS (gr_values)
8508 && KNOWN (regno))
8509 {
8510 if (note == 3)
8511 {
8512 specs[count] = tmpl;
8513 specs[count++].index = gr_values[regno].value & 0xFF;
8514 }
8515 else
8516 for (i = 0; i < NELEMS (gr_values); i++)
8517 {
8518 /* Uses all registers *except* the one in R3. */
8519 if ((unsigned)i != (gr_values[regno].value & 0xFF))
8520 {
8521 specs[count] = tmpl;
8522 specs[count++].index = i;
8523 }
8524 }
8525 }
8526 else
8527 {
8528 specs[count] = tmpl;
8529 specs[count++].specific = 0;
8530 }
8531 }
8532 }
8533 else if (note == 0)
8534 {
8535 /* probe et al. */
8536 specs[count] = tmpl;
8537 specs[count++].specific = 0;
8538 }
8539 break;
8540
8541 case IA64_RS_PMC: /* four or more registers */
8542 if (note == 3)
8543 {
8544 if (idesc->operands[!rsrc_write] == IA64_OPND_PMC_R3
8545 || (!rsrc_write && idesc->operands[1] == IA64_OPND_PMD_R3))
8546
8547 {
8548 int reg_index = ((idesc->operands[1] == IA64_OPND_R3 && !rsrc_write)
8549 ? 1 : !rsrc_write);
8550 int regno = CURR_SLOT.opnd[reg_index].X_add_number - REG_GR;
8551 if (regno >= 0 && regno < NELEMS (gr_values)
8552 && KNOWN (regno))
8553 {
8554 specs[count] = tmpl;
8555 specs[count++].index = gr_values[regno].value & 0xFF;
8556 }
8557 else
8558 {
8559 specs[count] = tmpl;
8560 specs[count++].specific = 0;
8561 }
8562 }
8563 }
8564 else
8565 {
8566 UNHANDLED;
8567 }
8568 break;
8569
8570 case IA64_RS_PMD: /* four or more registers */
8571 if (note == 3)
8572 {
8573 if (idesc->operands[!rsrc_write] == IA64_OPND_PMD_R3)
8574 {
8575 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8576 if (regno >= 0 && regno < NELEMS (gr_values)
8577 && KNOWN (regno))
8578 {
8579 specs[count] = tmpl;
8580 specs[count++].index = gr_values[regno].value & 0xFF;
8581 }
8582 else
8583 {
8584 specs[count] = tmpl;
8585 specs[count++].specific = 0;
8586 }
8587 }
8588 }
8589 else
8590 {
8591 UNHANDLED;
8592 }
8593 break;
8594
8595 case IA64_RS_RR: /* eight registers */
8596 if (note == 6)
8597 {
8598 if (idesc->operands[!rsrc_write] == IA64_OPND_RR_R3)
8599 {
8600 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8601 if (regno >= 0 && regno < NELEMS (gr_values)
8602 && KNOWN (regno))
8603 {
8604 specs[count] = tmpl;
8605 specs[count++].index = (gr_values[regno].value >> 61) & 0x7;
8606 }
8607 else
8608 {
8609 specs[count] = tmpl;
8610 specs[count++].specific = 0;
8611 }
8612 }
8613 }
8614 else if (note == 0 && !rsrc_write)
8615 {
8616 specs[count] = tmpl;
8617 specs[count++].specific = 0;
8618 }
8619 else
8620 {
8621 UNHANDLED;
8622 }
8623 break;
8624
8625 case IA64_RS_CR_IRR:
8626 if (note == 0)
8627 {
8628 /* handle mov-from-CR-IVR; it's a read that writes CR[IRR] */
8629 int regno = CURR_SLOT.opnd[1].X_add_number - REG_CR;
8630 if (rsrc_write
8631 && idesc->operands[1] == IA64_OPND_CR3
8632 && regno == CR_IVR)
8633 {
8634 for (i = 0; i < 4; i++)
8635 {
8636 specs[count] = tmpl;
8637 specs[count++].index = CR_IRR0 + i;
8638 }
8639 }
8640 }
8641 else if (note == 1)
8642 {
8643 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8644 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8645 && regno >= CR_IRR0
8646 && regno <= CR_IRR3)
8647 {
8648 specs[count] = tmpl;
8649 specs[count++].index = regno;
8650 }
8651 }
8652 else
8653 {
8654 UNHANDLED;
8655 }
8656 break;
8657
8658 case IA64_RS_CR_IIB:
8659 if (note != 0)
8660 {
8661 UNHANDLED;
8662 }
8663 else
8664 {
8665 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8666 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8667 && (regno == CR_IIB0 || regno == CR_IIB1))
8668 {
8669 specs[count] = tmpl;
8670 specs[count++].index = regno;
8671 }
8672 }
8673 break;
8674
8675 case IA64_RS_CR_LRR:
8676 if (note != 1)
8677 {
8678 UNHANDLED;
8679 }
8680 else
8681 {
8682 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8683 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8684 && (regno == CR_LRR0 || regno == CR_LRR1))
8685 {
8686 specs[count] = tmpl;
8687 specs[count++].index = regno;
8688 }
8689 }
8690 break;
8691
8692 case IA64_RS_CR:
8693 if (note == 1)
8694 {
8695 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
8696 {
8697 specs[count] = tmpl;
8698 specs[count++].index =
8699 CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8700 }
8701 }
8702 else
8703 {
8704 UNHANDLED;
8705 }
8706 break;
8707
8708 case IA64_RS_DAHR:
8709 if (note == 0)
8710 {
8711 if (idesc->operands[!rsrc_write] == IA64_OPND_DAHR3)
8712 {
8713 specs[count] = tmpl;
8714 specs[count++].index =
8715 CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_DAHR;
8716 }
8717 }
8718 else
8719 {
8720 UNHANDLED;
8721 }
8722 break;
8723
8724 case IA64_RS_FR:
8725 case IA64_RS_FRb:
8726 if (note != 1)
8727 {
8728 UNHANDLED;
8729 }
8730 else if (rsrc_write)
8731 {
8732 if (dep->specifier == IA64_RS_FRb
8733 && idesc->operands[0] == IA64_OPND_F1)
8734 {
8735 specs[count] = tmpl;
8736 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_FR;
8737 }
8738 }
8739 else
8740 {
8741 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
8742 {
8743 if (idesc->operands[i] == IA64_OPND_F2
8744 || idesc->operands[i] == IA64_OPND_F3
8745 || idesc->operands[i] == IA64_OPND_F4)
8746 {
8747 specs[count] = tmpl;
8748 specs[count++].index =
8749 CURR_SLOT.opnd[i].X_add_number - REG_FR;
8750 }
8751 }
8752 }
8753 break;
8754
8755 case IA64_RS_GR:
8756 if (note == 13)
8757 {
8758 /* This reference applies only to the GR whose value is loaded with
8759 data returned from memory. */
8760 specs[count] = tmpl;
8761 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_GR;
8762 }
8763 else if (note == 1)
8764 {
8765 if (rsrc_write)
8766 {
8767 for (i = 0; i < idesc->num_outputs; i++)
8768 if (idesc->operands[i] == IA64_OPND_R1
8769 || idesc->operands[i] == IA64_OPND_R2
8770 || idesc->operands[i] == IA64_OPND_R3)
8771 {
8772 specs[count] = tmpl;
8773 specs[count++].index =
8774 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8775 }
8776 if (idesc->flags & IA64_OPCODE_POSTINC)
8777 for (i = 0; i < NELEMS (idesc->operands); i++)
8778 if (idesc->operands[i] == IA64_OPND_MR3)
8779 {
8780 specs[count] = tmpl;
8781 specs[count++].index =
8782 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8783 }
8784 }
8785 else
8786 {
8787 /* Look for anything that reads a GR. */
8788 for (i = 0; i < NELEMS (idesc->operands); i++)
8789 {
8790 if (idesc->operands[i] == IA64_OPND_MR3
8791 || idesc->operands[i] == IA64_OPND_CPUID_R3
8792 || idesc->operands[i] == IA64_OPND_DBR_R3
8793 || idesc->operands[i] == IA64_OPND_IBR_R3
8794 || idesc->operands[i] == IA64_OPND_MSR_R3
8795 || idesc->operands[i] == IA64_OPND_PKR_R3
8796 || idesc->operands[i] == IA64_OPND_PMC_R3
8797 || idesc->operands[i] == IA64_OPND_PMD_R3
8798 || idesc->operands[i] == IA64_OPND_DAHR_R3
8799 || idesc->operands[i] == IA64_OPND_RR_R3
8800 || ((i >= idesc->num_outputs)
8801 && (idesc->operands[i] == IA64_OPND_R1
8802 || idesc->operands[i] == IA64_OPND_R2
8803 || idesc->operands[i] == IA64_OPND_R3
8804 /* addl source register. */
8805 || idesc->operands[i] == IA64_OPND_R3_2)))
8806 {
8807 specs[count] = tmpl;
8808 specs[count++].index =
8809 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8810 }
8811 }
8812 }
8813 }
8814 else
8815 {
8816 UNHANDLED;
8817 }
8818 break;
8819
8820 /* This is the same as IA64_RS_PRr, except that the register range is
8821 from 1 - 15, and there are no rotating register reads/writes here. */
8822 case IA64_RS_PR:
8823 if (note == 0)
8824 {
8825 for (i = 1; i < 16; i++)
8826 {
8827 specs[count] = tmpl;
8828 specs[count++].index = i;
8829 }
8830 }
8831 else if (note == 7)
8832 {
8833 valueT mask = 0;
8834 /* Mark only those registers indicated by the mask. */
8835 if (rsrc_write)
8836 {
8837 mask = CURR_SLOT.opnd[2].X_add_number;
8838 for (i = 1; i < 16; i++)
8839 if (mask & ((valueT) 1 << i))
8840 {
8841 specs[count] = tmpl;
8842 specs[count++].index = i;
8843 }
8844 }
8845 else
8846 {
8847 UNHANDLED;
8848 }
8849 }
8850 else if (note == 11) /* note 11 implies note 1 as well */
8851 {
8852 if (rsrc_write)
8853 {
8854 for (i = 0; i < idesc->num_outputs; i++)
8855 {
8856 if (idesc->operands[i] == IA64_OPND_P1
8857 || idesc->operands[i] == IA64_OPND_P2)
8858 {
8859 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8860 if (regno >= 1 && regno < 16)
8861 {
8862 specs[count] = tmpl;
8863 specs[count++].index = regno;
8864 }
8865 }
8866 }
8867 }
8868 else
8869 {
8870 UNHANDLED;
8871 }
8872 }
8873 else if (note == 12)
8874 {
8875 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
8876 {
8877 specs[count] = tmpl;
8878 specs[count++].index = CURR_SLOT.qp_regno;
8879 }
8880 }
8881 else if (note == 1)
8882 {
8883 if (rsrc_write)
8884 {
8885 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8886 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8887 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
8888 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
8889
8890 if ((idesc->operands[0] == IA64_OPND_P1
8891 || idesc->operands[0] == IA64_OPND_P2)
8892 && p1 >= 1 && p1 < 16)
8893 {
8894 specs[count] = tmpl;
8895 specs[count].cmp_type =
8896 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
8897 specs[count++].index = p1;
8898 }
8899 if ((idesc->operands[1] == IA64_OPND_P1
8900 || idesc->operands[1] == IA64_OPND_P2)
8901 && p2 >= 1 && p2 < 16)
8902 {
8903 specs[count] = tmpl;
8904 specs[count].cmp_type =
8905 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
8906 specs[count++].index = p2;
8907 }
8908 }
8909 else
8910 {
8911 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
8912 {
8913 specs[count] = tmpl;
8914 specs[count++].index = CURR_SLOT.qp_regno;
8915 }
8916 if (idesc->operands[1] == IA64_OPND_PR)
8917 {
8918 for (i = 1; i < 16; i++)
8919 {
8920 specs[count] = tmpl;
8921 specs[count++].index = i;
8922 }
8923 }
8924 }
8925 }
8926 else
8927 {
8928 UNHANDLED;
8929 }
8930 break;
8931
8932 /* This is the general case for PRs. IA64_RS_PR and IA64_RS_PR63 are
8933 simplified cases of this. */
8934 case IA64_RS_PRr:
8935 if (note == 0)
8936 {
8937 for (i = 16; i < 63; i++)
8938 {
8939 specs[count] = tmpl;
8940 specs[count++].index = i;
8941 }
8942 }
8943 else if (note == 7)
8944 {
8945 valueT mask = 0;
8946 /* Mark only those registers indicated by the mask. */
8947 if (rsrc_write
8948 && idesc->operands[0] == IA64_OPND_PR)
8949 {
8950 mask = CURR_SLOT.opnd[2].X_add_number;
8951 if (mask & ((valueT) 1 << 16))
8952 for (i = 16; i < 63; i++)
8953 {
8954 specs[count] = tmpl;
8955 specs[count++].index = i;
8956 }
8957 }
8958 else if (rsrc_write
8959 && idesc->operands[0] == IA64_OPND_PR_ROT)
8960 {
8961 for (i = 16; i < 63; i++)
8962 {
8963 specs[count] = tmpl;
8964 specs[count++].index = i;
8965 }
8966 }
8967 else
8968 {
8969 UNHANDLED;
8970 }
8971 }
8972 else if (note == 11) /* note 11 implies note 1 as well */
8973 {
8974 if (rsrc_write)
8975 {
8976 for (i = 0; i < idesc->num_outputs; i++)
8977 {
8978 if (idesc->operands[i] == IA64_OPND_P1
8979 || idesc->operands[i] == IA64_OPND_P2)
8980 {
8981 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8982 if (regno >= 16 && regno < 63)
8983 {
8984 specs[count] = tmpl;
8985 specs[count++].index = regno;
8986 }
8987 }
8988 }
8989 }
8990 else
8991 {
8992 UNHANDLED;
8993 }
8994 }
8995 else if (note == 12)
8996 {
8997 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
8998 {
8999 specs[count] = tmpl;
9000 specs[count++].index = CURR_SLOT.qp_regno;
9001 }
9002 }
9003 else if (note == 1)
9004 {
9005 if (rsrc_write)
9006 {
9007 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
9008 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
9009 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
9010 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
9011
9012 if ((idesc->operands[0] == IA64_OPND_P1
9013 || idesc->operands[0] == IA64_OPND_P2)
9014 && p1 >= 16 && p1 < 63)
9015 {
9016 specs[count] = tmpl;
9017 specs[count].cmp_type =
9018 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
9019 specs[count++].index = p1;
9020 }
9021 if ((idesc->operands[1] == IA64_OPND_P1
9022 || idesc->operands[1] == IA64_OPND_P2)
9023 && p2 >= 16 && p2 < 63)
9024 {
9025 specs[count] = tmpl;
9026 specs[count].cmp_type =
9027 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
9028 specs[count++].index = p2;
9029 }
9030 }
9031 else
9032 {
9033 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
9034 {
9035 specs[count] = tmpl;
9036 specs[count++].index = CURR_SLOT.qp_regno;
9037 }
9038 if (idesc->operands[1] == IA64_OPND_PR)
9039 {
9040 for (i = 16; i < 63; i++)
9041 {
9042 specs[count] = tmpl;
9043 specs[count++].index = i;
9044 }
9045 }
9046 }
9047 }
9048 else
9049 {
9050 UNHANDLED;
9051 }
9052 break;
9053
9054 case IA64_RS_PSR:
9055 /* Verify that the instruction is using the PSR bit indicated in
9056 dep->regindex. */
9057 if (note == 0)
9058 {
9059 if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_UM)
9060 {
9061 if (dep->regindex < 6)
9062 {
9063 specs[count++] = tmpl;
9064 }
9065 }
9066 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR)
9067 {
9068 if (dep->regindex < 32
9069 || dep->regindex == 35
9070 || dep->regindex == 36
9071 || (!rsrc_write && dep->regindex == PSR_CPL))
9072 {
9073 specs[count++] = tmpl;
9074 }
9075 }
9076 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_L)
9077 {
9078 if (dep->regindex < 32
9079 || dep->regindex == 35
9080 || dep->regindex == 36
9081 || (rsrc_write && dep->regindex == PSR_CPL))
9082 {
9083 specs[count++] = tmpl;
9084 }
9085 }
9086 else
9087 {
9088 /* Several PSR bits have very specific dependencies. */
9089 switch (dep->regindex)
9090 {
9091 default:
9092 specs[count++] = tmpl;
9093 break;
9094 case PSR_IC:
9095 if (rsrc_write)
9096 {
9097 specs[count++] = tmpl;
9098 }
9099 else
9100 {
9101 /* Only certain CR accesses use PSR.ic */
9102 if (idesc->operands[0] == IA64_OPND_CR3
9103 || idesc->operands[1] == IA64_OPND_CR3)
9104 {
9105 int reg_index =
9106 ((idesc->operands[0] == IA64_OPND_CR3)
9107 ? 0 : 1);
9108 int regno =
9109 CURR_SLOT.opnd[reg_index].X_add_number - REG_CR;
9110
9111 switch (regno)
9112 {
9113 default:
9114 break;
9115 case CR_ITIR:
9116 case CR_IFS:
9117 case CR_IIM:
9118 case CR_IIP:
9119 case CR_IPSR:
9120 case CR_ISR:
9121 case CR_IFA:
9122 case CR_IHA:
9123 case CR_IIB0:
9124 case CR_IIB1:
9125 case CR_IIPA:
9126 specs[count++] = tmpl;
9127 break;
9128 }
9129 }
9130 }
9131 break;
9132 case PSR_CPL:
9133 if (rsrc_write)
9134 {
9135 specs[count++] = tmpl;
9136 }
9137 else
9138 {
9139 /* Only some AR accesses use cpl */
9140 if (idesc->operands[0] == IA64_OPND_AR3
9141 || idesc->operands[1] == IA64_OPND_AR3)
9142 {
9143 int reg_index =
9144 ((idesc->operands[0] == IA64_OPND_AR3)
9145 ? 0 : 1);
9146 int regno =
9147 CURR_SLOT.opnd[reg_index].X_add_number - REG_AR;
9148
9149 if (regno == AR_ITC
9150 || regno == AR_RUC
9151 || (reg_index == 0
9152 && (regno == AR_RSC
9153 || (regno >= AR_K0
9154 && regno <= AR_K7))))
9155 {
9156 specs[count++] = tmpl;
9157 }
9158 }
9159 else
9160 {
9161 specs[count++] = tmpl;
9162 }
9163 break;
9164 }
9165 }
9166 }
9167 }
9168 else if (note == 7)
9169 {
9170 valueT mask = 0;
9171 if (idesc->operands[0] == IA64_OPND_IMMU24)
9172 {
9173 mask = CURR_SLOT.opnd[0].X_add_number;
9174 }
9175 else
9176 {
9177 UNHANDLED;
9178 }
9179 if (mask & ((valueT) 1 << dep->regindex))
9180 {
9181 specs[count++] = tmpl;
9182 }
9183 }
9184 else if (note == 8)
9185 {
9186 int min = dep->regindex == PSR_DFL ? 2 : 32;
9187 int max = dep->regindex == PSR_DFL ? 31 : 127;
9188 /* dfh is read on FR32-127; dfl is read on FR2-31 */
9189 for (i = 0; i < NELEMS (idesc->operands); i++)
9190 {
9191 if (idesc->operands[i] == IA64_OPND_F1
9192 || idesc->operands[i] == IA64_OPND_F2
9193 || idesc->operands[i] == IA64_OPND_F3
9194 || idesc->operands[i] == IA64_OPND_F4)
9195 {
9196 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9197 if (reg >= min && reg <= max)
9198 {
9199 specs[count++] = tmpl;
9200 }
9201 }
9202 }
9203 }
9204 else if (note == 9)
9205 {
9206 int min = dep->regindex == PSR_MFL ? 2 : 32;
9207 int max = dep->regindex == PSR_MFL ? 31 : 127;
9208 /* mfh is read on writes to FR32-127; mfl is read on writes to
9209 FR2-31 */
9210 for (i = 0; i < idesc->num_outputs; i++)
9211 {
9212 if (idesc->operands[i] == IA64_OPND_F1)
9213 {
9214 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9215 if (reg >= min && reg <= max)
9216 {
9217 specs[count++] = tmpl;
9218 }
9219 }
9220 }
9221 }
9222 else if (note == 10)
9223 {
9224 for (i = 0; i < NELEMS (idesc->operands); i++)
9225 {
9226 if (idesc->operands[i] == IA64_OPND_R1
9227 || idesc->operands[i] == IA64_OPND_R2
9228 || idesc->operands[i] == IA64_OPND_R3)
9229 {
9230 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9231 if (regno >= 16 && regno <= 31)
9232 {
9233 specs[count++] = tmpl;
9234 }
9235 }
9236 }
9237 }
9238 else
9239 {
9240 UNHANDLED;
9241 }
9242 break;
9243
9244 case IA64_RS_AR_FPSR:
9245 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
9246 {
9247 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
9248 if (regno == AR_FPSR)
9249 {
9250 specs[count++] = tmpl;
9251 }
9252 }
9253 else
9254 {
9255 specs[count++] = tmpl;
9256 }
9257 break;
9258
9259 case IA64_RS_ARX:
9260 /* Handle all AR[REG] resources */
9261 if (note == 0 || note == 1)
9262 {
9263 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
9264 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3
9265 && regno == dep->regindex)
9266 {
9267 specs[count++] = tmpl;
9268 }
9269 /* other AR[REG] resources may be affected by AR accesses */
9270 else if (idesc->operands[0] == IA64_OPND_AR3)
9271 {
9272 /* AR[] writes */
9273 regno = CURR_SLOT.opnd[0].X_add_number - REG_AR;
9274 switch (dep->regindex)
9275 {
9276 default:
9277 break;
9278 case AR_BSP:
9279 case AR_RNAT:
9280 if (regno == AR_BSPSTORE)
9281 {
9282 specs[count++] = tmpl;
9283 }
9284 case AR_RSC:
9285 if (!rsrc_write &&
9286 (regno == AR_BSPSTORE
9287 || regno == AR_RNAT))
9288 {
9289 specs[count++] = tmpl;
9290 }
9291 break;
9292 }
9293 }
9294 else if (idesc->operands[1] == IA64_OPND_AR3)
9295 {
9296 /* AR[] reads */
9297 regno = CURR_SLOT.opnd[1].X_add_number - REG_AR;
9298 switch (dep->regindex)
9299 {
9300 default:
9301 break;
9302 case AR_RSC:
9303 if (regno == AR_BSPSTORE || regno == AR_RNAT)
9304 {
9305 specs[count++] = tmpl;
9306 }
9307 break;
9308 }
9309 }
9310 else
9311 {
9312 specs[count++] = tmpl;
9313 }
9314 }
9315 else
9316 {
9317 UNHANDLED;
9318 }
9319 break;
9320
9321 case IA64_RS_CRX:
9322 /* Handle all CR[REG] resources.
9323 ??? FIXME: The rule 17 isn't really handled correctly. */
9324 if (note == 0 || note == 1 || note == 17)
9325 {
9326 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
9327 {
9328 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
9329 if (regno == dep->regindex)
9330 {
9331 specs[count++] = tmpl;
9332 }
9333 else if (!rsrc_write)
9334 {
9335 /* Reads from CR[IVR] affect other resources. */
9336 if (regno == CR_IVR)
9337 {
9338 if ((dep->regindex >= CR_IRR0
9339 && dep->regindex <= CR_IRR3)
9340 || dep->regindex == CR_TPR)
9341 {
9342 specs[count++] = tmpl;
9343 }
9344 }
9345 }
9346 }
9347 else
9348 {
9349 specs[count++] = tmpl;
9350 }
9351 }
9352 else
9353 {
9354 UNHANDLED;
9355 }
9356 break;
9357
9358 case IA64_RS_INSERVICE:
9359 /* look for write of EOI (67) or read of IVR (65) */
9360 if ((idesc->operands[0] == IA64_OPND_CR3
9361 && CURR_SLOT.opnd[0].X_add_number - REG_CR == CR_EOI)
9362 || (idesc->operands[1] == IA64_OPND_CR3
9363 && CURR_SLOT.opnd[1].X_add_number - REG_CR == CR_IVR))
9364 {
9365 specs[count++] = tmpl;
9366 }
9367 break;
9368
9369 case IA64_RS_GR0:
9370 if (note == 1)
9371 {
9372 specs[count++] = tmpl;
9373 }
9374 else
9375 {
9376 UNHANDLED;
9377 }
9378 break;
9379
9380 case IA64_RS_CFM:
9381 if (note != 2)
9382 {
9383 specs[count++] = tmpl;
9384 }
9385 else
9386 {
9387 /* Check if any of the registers accessed are in the rotating region.
9388 mov to/from pr accesses CFM only when qp_regno is in the rotating
9389 region */
9390 for (i = 0; i < NELEMS (idesc->operands); i++)
9391 {
9392 if (idesc->operands[i] == IA64_OPND_R1
9393 || idesc->operands[i] == IA64_OPND_R2
9394 || idesc->operands[i] == IA64_OPND_R3)
9395 {
9396 int num = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9397 /* Assumes that md.rot.num_regs is always valid */
9398 if (md.rot.num_regs > 0
9399 && num > 31
9400 && num < 31 + md.rot.num_regs)
9401 {
9402 specs[count] = tmpl;
9403 specs[count++].specific = 0;
9404 }
9405 }
9406 else if (idesc->operands[i] == IA64_OPND_F1
9407 || idesc->operands[i] == IA64_OPND_F2
9408 || idesc->operands[i] == IA64_OPND_F3
9409 || idesc->operands[i] == IA64_OPND_F4)
9410 {
9411 int num = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9412 if (num > 31)
9413 {
9414 specs[count] = tmpl;
9415 specs[count++].specific = 0;
9416 }
9417 }
9418 else if (idesc->operands[i] == IA64_OPND_P1
9419 || idesc->operands[i] == IA64_OPND_P2)
9420 {
9421 int num = CURR_SLOT.opnd[i].X_add_number - REG_P;
9422 if (num > 15)
9423 {
9424 specs[count] = tmpl;
9425 specs[count++].specific = 0;
9426 }
9427 }
9428 }
9429 if (CURR_SLOT.qp_regno > 15)
9430 {
9431 specs[count] = tmpl;
9432 specs[count++].specific = 0;
9433 }
9434 }
9435 break;
9436
9437 /* This is the same as IA64_RS_PRr, except simplified to account for
9438 the fact that there is only one register. */
9439 case IA64_RS_PR63:
9440 if (note == 0)
9441 {
9442 specs[count++] = tmpl;
9443 }
9444 else if (note == 7)
9445 {
9446 valueT mask = 0;
9447 if (idesc->operands[2] == IA64_OPND_IMM17)
9448 mask = CURR_SLOT.opnd[2].X_add_number;
9449 if (mask & ((valueT) 1 << 63))
9450 specs[count++] = tmpl;
9451 }
9452 else if (note == 11)
9453 {
9454 if ((idesc->operands[0] == IA64_OPND_P1
9455 && CURR_SLOT.opnd[0].X_add_number - REG_P == 63)
9456 || (idesc->operands[1] == IA64_OPND_P2
9457 && CURR_SLOT.opnd[1].X_add_number - REG_P == 63))
9458 {
9459 specs[count++] = tmpl;
9460 }
9461 }
9462 else if (note == 12)
9463 {
9464 if (CURR_SLOT.qp_regno == 63)
9465 {
9466 specs[count++] = tmpl;
9467 }
9468 }
9469 else if (note == 1)
9470 {
9471 if (rsrc_write)
9472 {
9473 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
9474 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
9475 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
9476 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
9477
9478 if (p1 == 63
9479 && (idesc->operands[0] == IA64_OPND_P1
9480 || idesc->operands[0] == IA64_OPND_P2))
9481 {
9482 specs[count] = tmpl;
9483 specs[count++].cmp_type =
9484 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
9485 }
9486 if (p2 == 63
9487 && (idesc->operands[1] == IA64_OPND_P1
9488 || idesc->operands[1] == IA64_OPND_P2))
9489 {
9490 specs[count] = tmpl;
9491 specs[count++].cmp_type =
9492 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
9493 }
9494 }
9495 else
9496 {
9497 if (CURR_SLOT.qp_regno == 63)
9498 {
9499 specs[count++] = tmpl;
9500 }
9501 }
9502 }
9503 else
9504 {
9505 UNHANDLED;
9506 }
9507 break;
9508
9509 case IA64_RS_RSE:
9510 /* FIXME we can identify some individual RSE written resources, but RSE
9511 read resources have not yet been completely identified, so for now
9512 treat RSE as a single resource */
9513 if (strncmp (idesc->name, "mov", 3) == 0)
9514 {
9515 if (rsrc_write)
9516 {
9517 if (idesc->operands[0] == IA64_OPND_AR3
9518 && CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE)
9519 {
9520 specs[count++] = tmpl;
9521 }
9522 }
9523 else
9524 {
9525 if (idesc->operands[0] == IA64_OPND_AR3)
9526 {
9527 if (CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE
9528 || CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_RNAT)
9529 {
9530 specs[count++] = tmpl;
9531 }
9532 }
9533 else if (idesc->operands[1] == IA64_OPND_AR3)
9534 {
9535 if (CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSP
9536 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSPSTORE
9537 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_RNAT)
9538 {
9539 specs[count++] = tmpl;
9540 }
9541 }
9542 }
9543 }
9544 else
9545 {
9546 specs[count++] = tmpl;
9547 }
9548 break;
9549
9550 case IA64_RS_ANY:
9551 /* FIXME -- do any of these need to be non-specific? */
9552 specs[count++] = tmpl;
9553 break;
9554
9555 default:
9556 as_bad (_("Unrecognized dependency specifier %d\n"), dep->specifier);
9557 break;
9558 }
9559
9560 return count;
9561 }
9562
9563 /* Clear branch flags on marked resources. This breaks the link between the
9564 QP of the marking instruction and a subsequent branch on the same QP. */
9565
9566 static void
9567 clear_qp_branch_flag (valueT mask)
9568 {
9569 int i;
9570 for (i = 0; i < regdepslen; i++)
9571 {
9572 valueT bit = ((valueT) 1 << regdeps[i].qp_regno);
9573 if ((bit & mask) != 0)
9574 {
9575 regdeps[i].link_to_qp_branch = 0;
9576 }
9577 }
9578 }
9579
9580 /* MASK contains 2 and only 2 PRs which are mutually exclusive. Remove
9581 any mutexes which contain one of the PRs and create new ones when
9582 needed. */
9583
9584 static int
9585 update_qp_mutex (valueT mask)
9586 {
9587 int i;
9588 int add = 0;
9589
9590 i = 0;
9591 while (i < qp_mutexeslen)
9592 {
9593 if ((qp_mutexes[i].prmask & mask) != 0)
9594 {
9595 /* If it destroys and creates the same mutex, do nothing. */
9596 if (qp_mutexes[i].prmask == mask
9597 && qp_mutexes[i].path == md.path)
9598 {
9599 i++;
9600 add = -1;
9601 }
9602 else
9603 {
9604 int keep = 0;
9605
9606 if (md.debug_dv)
9607 {
9608 fprintf (stderr, " Clearing mutex relation");
9609 print_prmask (qp_mutexes[i].prmask);
9610 fprintf (stderr, "\n");
9611 }
9612
9613 /* Deal with the old mutex with more than 3+ PRs only if
9614 the new mutex on the same execution path with it.
9615
9616 FIXME: The 3+ mutex support is incomplete.
9617 dot_pred_rel () may be a better place to fix it. */
9618 if (qp_mutexes[i].path == md.path)
9619 {
9620 /* If it is a proper subset of the mutex, create a
9621 new mutex. */
9622 if (add == 0
9623 && (qp_mutexes[i].prmask & mask) == mask)
9624 add = 1;
9625
9626 qp_mutexes[i].prmask &= ~mask;
9627 if (qp_mutexes[i].prmask & (qp_mutexes[i].prmask - 1))
9628 {
9629 /* Modify the mutex if there are more than one
9630 PR left. */
9631 keep = 1;
9632 i++;
9633 }
9634 }
9635
9636 if (keep == 0)
9637 /* Remove the mutex. */
9638 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
9639 }
9640 }
9641 else
9642 ++i;
9643 }
9644
9645 if (add == 1)
9646 add_qp_mutex (mask);
9647
9648 return add;
9649 }
9650
9651 /* Remove any mutexes which contain any of the PRs indicated in the mask.
9652
9653 Any changes to a PR clears the mutex relations which include that PR. */
9654
9655 static void
9656 clear_qp_mutex (valueT mask)
9657 {
9658 int i;
9659
9660 i = 0;
9661 while (i < qp_mutexeslen)
9662 {
9663 if ((qp_mutexes[i].prmask & mask) != 0)
9664 {
9665 if (md.debug_dv)
9666 {
9667 fprintf (stderr, " Clearing mutex relation");
9668 print_prmask (qp_mutexes[i].prmask);
9669 fprintf (stderr, "\n");
9670 }
9671 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
9672 }
9673 else
9674 ++i;
9675 }
9676 }
9677
9678 /* Clear implies relations which contain PRs in the given masks.
9679 P1_MASK indicates the source of the implies relation, while P2_MASK
9680 indicates the implied PR. */
9681
9682 static void
9683 clear_qp_implies (valueT p1_mask, valueT p2_mask)
9684 {
9685 int i;
9686
9687 i = 0;
9688 while (i < qp_implieslen)
9689 {
9690 if ((((valueT) 1 << qp_implies[i].p1) & p1_mask) != 0
9691 || (((valueT) 1 << qp_implies[i].p2) & p2_mask) != 0)
9692 {
9693 if (md.debug_dv)
9694 fprintf (stderr, "Clearing implied relation PR%d->PR%d\n",
9695 qp_implies[i].p1, qp_implies[i].p2);
9696 qp_implies[i] = qp_implies[--qp_implieslen];
9697 }
9698 else
9699 ++i;
9700 }
9701 }
9702
9703 /* Add the PRs specified to the list of implied relations. */
9704
9705 static void
9706 add_qp_imply (int p1, int p2)
9707 {
9708 valueT mask;
9709 valueT bit;
9710 int i;
9711
9712 /* p0 is not meaningful here. */
9713 if (p1 == 0 || p2 == 0)
9714 abort ();
9715
9716 if (p1 == p2)
9717 return;
9718
9719 /* If it exists already, ignore it. */
9720 for (i = 0; i < qp_implieslen; i++)
9721 {
9722 if (qp_implies[i].p1 == p1
9723 && qp_implies[i].p2 == p2
9724 && qp_implies[i].path == md.path
9725 && !qp_implies[i].p2_branched)
9726 return;
9727 }
9728
9729 if (qp_implieslen == qp_impliestotlen)
9730 {
9731 qp_impliestotlen += 20;
9732 qp_implies = XRESIZEVEC (struct qp_imply, qp_implies, qp_impliestotlen);
9733 }
9734 if (md.debug_dv)
9735 fprintf (stderr, " Registering PR%d implies PR%d\n", p1, p2);
9736 qp_implies[qp_implieslen].p1 = p1;
9737 qp_implies[qp_implieslen].p2 = p2;
9738 qp_implies[qp_implieslen].path = md.path;
9739 qp_implies[qp_implieslen++].p2_branched = 0;
9740
9741 /* Add in the implied transitive relations; for everything that p2 implies,
9742 make p1 imply that, too; for everything that implies p1, make it imply p2
9743 as well. */
9744 for (i = 0; i < qp_implieslen; i++)
9745 {
9746 if (qp_implies[i].p1 == p2)
9747 add_qp_imply (p1, qp_implies[i].p2);
9748 if (qp_implies[i].p2 == p1)
9749 add_qp_imply (qp_implies[i].p1, p2);
9750 }
9751 /* Add in mutex relations implied by this implies relation; for each mutex
9752 relation containing p2, duplicate it and replace p2 with p1. */
9753 bit = (valueT) 1 << p1;
9754 mask = (valueT) 1 << p2;
9755 for (i = 0; i < qp_mutexeslen; i++)
9756 {
9757 if (qp_mutexes[i].prmask & mask)
9758 add_qp_mutex ((qp_mutexes[i].prmask & ~mask) | bit);
9759 }
9760 }
9761
9762 /* Add the PRs specified in the mask to the mutex list; this means that only
9763 one of the PRs can be true at any time. PR0 should never be included in
9764 the mask. */
9765
9766 static void
9767 add_qp_mutex (valueT mask)
9768 {
9769 if (mask & 0x1)
9770 abort ();
9771
9772 if (qp_mutexeslen == qp_mutexestotlen)
9773 {
9774 qp_mutexestotlen += 20;
9775 qp_mutexes = XRESIZEVEC (struct qpmutex, qp_mutexes, qp_mutexestotlen);
9776 }
9777 if (md.debug_dv)
9778 {
9779 fprintf (stderr, " Registering mutex on");
9780 print_prmask (mask);
9781 fprintf (stderr, "\n");
9782 }
9783 qp_mutexes[qp_mutexeslen].path = md.path;
9784 qp_mutexes[qp_mutexeslen++].prmask = mask;
9785 }
9786
9787 static int
9788 has_suffix_p (const char *name, const char *suffix)
9789 {
9790 size_t namelen = strlen (name);
9791 size_t sufflen = strlen (suffix);
9792
9793 if (namelen <= sufflen)
9794 return 0;
9795 return strcmp (name + namelen - sufflen, suffix) == 0;
9796 }
9797
9798 static void
9799 clear_register_values (void)
9800 {
9801 int i;
9802 if (md.debug_dv)
9803 fprintf (stderr, " Clearing register values\n");
9804 for (i = 1; i < NELEMS (gr_values); i++)
9805 gr_values[i].known = 0;
9806 }
9807
9808 /* Keep track of register values/changes which affect DV tracking.
9809
9810 optimization note: should add a flag to classes of insns where otherwise we
9811 have to examine a group of strings to identify them. */
9812
9813 static void
9814 note_register_values (struct ia64_opcode *idesc)
9815 {
9816 valueT qp_changemask = 0;
9817 int i;
9818
9819 /* Invalidate values for registers being written to. */
9820 for (i = 0; i < idesc->num_outputs; i++)
9821 {
9822 if (idesc->operands[i] == IA64_OPND_R1
9823 || idesc->operands[i] == IA64_OPND_R2
9824 || idesc->operands[i] == IA64_OPND_R3)
9825 {
9826 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9827 if (regno > 0 && regno < NELEMS (gr_values))
9828 gr_values[regno].known = 0;
9829 }
9830 else if (idesc->operands[i] == IA64_OPND_R3_2)
9831 {
9832 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9833 if (regno > 0 && regno < 4)
9834 gr_values[regno].known = 0;
9835 }
9836 else if (idesc->operands[i] == IA64_OPND_P1
9837 || idesc->operands[i] == IA64_OPND_P2)
9838 {
9839 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
9840 qp_changemask |= (valueT) 1 << regno;
9841 }
9842 else if (idesc->operands[i] == IA64_OPND_PR)
9843 {
9844 if (idesc->operands[2] & (valueT) 0x10000)
9845 qp_changemask = ~(valueT) 0x1FFFF | idesc->operands[2];
9846 else
9847 qp_changemask = idesc->operands[2];
9848 break;
9849 }
9850 else if (idesc->operands[i] == IA64_OPND_PR_ROT)
9851 {
9852 if (idesc->operands[1] & ((valueT) 1 << 43))
9853 qp_changemask = -((valueT) 1 << 44) | idesc->operands[1];
9854 else
9855 qp_changemask = idesc->operands[1];
9856 qp_changemask &= ~(valueT) 0xFFFF;
9857 break;
9858 }
9859 }
9860
9861 /* Always clear qp branch flags on any PR change. */
9862 /* FIXME there may be exceptions for certain compares. */
9863 clear_qp_branch_flag (qp_changemask);
9864
9865 /* Invalidate rotating registers on insns which affect RRBs in CFM. */
9866 if (idesc->flags & IA64_OPCODE_MOD_RRBS)
9867 {
9868 qp_changemask |= ~(valueT) 0xFFFF;
9869 if (strcmp (idesc->name, "clrrrb.pr") != 0)
9870 {
9871 for (i = 32; i < 32 + md.rot.num_regs; i++)
9872 gr_values[i].known = 0;
9873 }
9874 clear_qp_mutex (qp_changemask);
9875 clear_qp_implies (qp_changemask, qp_changemask);
9876 }
9877 /* After a call, all register values are undefined, except those marked
9878 as "safe". */
9879 else if (strncmp (idesc->name, "br.call", 6) == 0
9880 || strncmp (idesc->name, "brl.call", 7) == 0)
9881 {
9882 /* FIXME keep GR values which are marked as "safe_across_calls" */
9883 clear_register_values ();
9884 clear_qp_mutex (~qp_safe_across_calls);
9885 clear_qp_implies (~qp_safe_across_calls, ~qp_safe_across_calls);
9886 clear_qp_branch_flag (~qp_safe_across_calls);
9887 }
9888 else if (is_interruption_or_rfi (idesc)
9889 || is_taken_branch (idesc))
9890 {
9891 clear_register_values ();
9892 clear_qp_mutex (~(valueT) 0);
9893 clear_qp_implies (~(valueT) 0, ~(valueT) 0);
9894 }
9895 /* Look for mutex and implies relations. */
9896 else if ((idesc->operands[0] == IA64_OPND_P1
9897 || idesc->operands[0] == IA64_OPND_P2)
9898 && (idesc->operands[1] == IA64_OPND_P1
9899 || idesc->operands[1] == IA64_OPND_P2))
9900 {
9901 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
9902 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
9903 valueT p1mask = (p1 != 0) ? (valueT) 1 << p1 : 0;
9904 valueT p2mask = (p2 != 0) ? (valueT) 1 << p2 : 0;
9905
9906 /* If both PRs are PR0, we can't really do anything. */
9907 if (p1 == 0 && p2 == 0)
9908 {
9909 if (md.debug_dv)
9910 fprintf (stderr, " Ignoring PRs due to inclusion of p0\n");
9911 }
9912 /* In general, clear mutexes and implies which include P1 or P2,
9913 with the following exceptions. */
9914 else if (has_suffix_p (idesc->name, ".or.andcm")
9915 || has_suffix_p (idesc->name, ".and.orcm"))
9916 {
9917 clear_qp_implies (p2mask, p1mask);
9918 }
9919 else if (has_suffix_p (idesc->name, ".andcm")
9920 || has_suffix_p (idesc->name, ".and"))
9921 {
9922 clear_qp_implies (0, p1mask | p2mask);
9923 }
9924 else if (has_suffix_p (idesc->name, ".orcm")
9925 || has_suffix_p (idesc->name, ".or"))
9926 {
9927 clear_qp_mutex (p1mask | p2mask);
9928 clear_qp_implies (p1mask | p2mask, 0);
9929 }
9930 else
9931 {
9932 int added = 0;
9933
9934 clear_qp_implies (p1mask | p2mask, p1mask | p2mask);
9935
9936 /* If one of the PRs is PR0, we call clear_qp_mutex. */
9937 if (p1 == 0 || p2 == 0)
9938 clear_qp_mutex (p1mask | p2mask);
9939 else
9940 added = update_qp_mutex (p1mask | p2mask);
9941
9942 if (CURR_SLOT.qp_regno == 0
9943 || has_suffix_p (idesc->name, ".unc"))
9944 {
9945 if (added == 0 && p1 && p2)
9946 add_qp_mutex (p1mask | p2mask);
9947 if (CURR_SLOT.qp_regno != 0)
9948 {
9949 if (p1)
9950 add_qp_imply (p1, CURR_SLOT.qp_regno);
9951 if (p2)
9952 add_qp_imply (p2, CURR_SLOT.qp_regno);
9953 }
9954 }
9955 }
9956 }
9957 /* Look for mov imm insns into GRs. */
9958 else if (idesc->operands[0] == IA64_OPND_R1
9959 && (idesc->operands[1] == IA64_OPND_IMM22
9960 || idesc->operands[1] == IA64_OPND_IMMU64)
9961 && CURR_SLOT.opnd[1].X_op == O_constant
9962 && (strcmp (idesc->name, "mov") == 0
9963 || strcmp (idesc->name, "movl") == 0))
9964 {
9965 int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR;
9966 if (regno > 0 && regno < NELEMS (gr_values))
9967 {
9968 gr_values[regno].known = 1;
9969 gr_values[regno].value = CURR_SLOT.opnd[1].X_add_number;
9970 gr_values[regno].path = md.path;
9971 if (md.debug_dv)
9972 {
9973 fprintf (stderr, " Know gr%d = ", regno);
9974 fprintf_vma (stderr, gr_values[regno].value);
9975 fputs ("\n", stderr);
9976 }
9977 }
9978 }
9979 /* Look for dep.z imm insns. */
9980 else if (idesc->operands[0] == IA64_OPND_R1
9981 && idesc->operands[1] == IA64_OPND_IMM8
9982 && strcmp (idesc->name, "dep.z") == 0)
9983 {
9984 int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR;
9985 if (regno > 0 && regno < NELEMS (gr_values))
9986 {
9987 valueT value = CURR_SLOT.opnd[1].X_add_number;
9988
9989 if (CURR_SLOT.opnd[3].X_add_number < 64)
9990 value &= ((valueT)1 << CURR_SLOT.opnd[3].X_add_number) - 1;
9991 value <<= CURR_SLOT.opnd[2].X_add_number;
9992 gr_values[regno].known = 1;
9993 gr_values[regno].value = value;
9994 gr_values[regno].path = md.path;
9995 if (md.debug_dv)
9996 {
9997 fprintf (stderr, " Know gr%d = ", regno);
9998 fprintf_vma (stderr, gr_values[regno].value);
9999 fputs ("\n", stderr);
10000 }
10001 }
10002 }
10003 else
10004 {
10005 clear_qp_mutex (qp_changemask);
10006 clear_qp_implies (qp_changemask, qp_changemask);
10007 }
10008 }
10009
10010 /* Return whether the given predicate registers are currently mutex. */
10011
10012 static int
10013 qp_mutex (int p1, int p2, int path)
10014 {
10015 int i;
10016 valueT mask;
10017
10018 if (p1 != p2)
10019 {
10020 mask = ((valueT) 1 << p1) | (valueT) 1 << p2;
10021 for (i = 0; i < qp_mutexeslen; i++)
10022 {
10023 if (qp_mutexes[i].path >= path
10024 && (qp_mutexes[i].prmask & mask) == mask)
10025 return 1;
10026 }
10027 }
10028 return 0;
10029 }
10030
10031 /* Return whether the given resource is in the given insn's list of chks
10032 Return 1 if the conflict is absolutely determined, 2 if it's a potential
10033 conflict. */
10034
10035 static int
10036 resources_match (struct rsrc *rs,
10037 struct ia64_opcode *idesc,
10038 int note,
10039 int qp_regno,
10040 int path)
10041 {
10042 struct rsrc specs[MAX_SPECS];
10043 int count;
10044
10045 /* If the marked resource's qp_regno and the given qp_regno are mutex,
10046 we don't need to check. One exception is note 11, which indicates that
10047 target predicates are written regardless of PR[qp]. */
10048 if (qp_mutex (rs->qp_regno, qp_regno, path)
10049 && note != 11)
10050 return 0;
10051
10052 count = specify_resource (rs->dependency, idesc, DV_CHK, specs, note, path);
10053 while (count-- > 0)
10054 {
10055 /* UNAT checking is a bit more specific than other resources */
10056 if (rs->dependency->specifier == IA64_RS_AR_UNAT
10057 && specs[count].mem_offset.hint
10058 && rs->mem_offset.hint)
10059 {
10060 if (rs->mem_offset.base == specs[count].mem_offset.base)
10061 {
10062 if (((rs->mem_offset.offset >> 3) & 0x3F) ==
10063 ((specs[count].mem_offset.offset >> 3) & 0x3F))
10064 return 1;
10065 else
10066 continue;
10067 }
10068 }
10069
10070 /* Skip apparent PR write conflicts where both writes are an AND or both
10071 writes are an OR. */
10072 if (rs->dependency->specifier == IA64_RS_PR
10073 || rs->dependency->specifier == IA64_RS_PRr
10074 || rs->dependency->specifier == IA64_RS_PR63)
10075 {
10076 if (specs[count].cmp_type != CMP_NONE
10077 && specs[count].cmp_type == rs->cmp_type)
10078 {
10079 if (md.debug_dv)
10080 fprintf (stderr, " %s on parallel compare allowed (PR%d)\n",
10081 dv_mode[rs->dependency->mode],
10082 rs->dependency->specifier != IA64_RS_PR63 ?
10083 specs[count].index : 63);
10084 continue;
10085 }
10086 if (md.debug_dv)
10087 fprintf (stderr,
10088 " %s on parallel compare conflict %s vs %s on PR%d\n",
10089 dv_mode[rs->dependency->mode],
10090 dv_cmp_type[rs->cmp_type],
10091 dv_cmp_type[specs[count].cmp_type],
10092 rs->dependency->specifier != IA64_RS_PR63 ?
10093 specs[count].index : 63);
10094
10095 }
10096
10097 /* If either resource is not specific, conservatively assume a conflict
10098 */
10099 if (!specs[count].specific || !rs->specific)
10100 return 2;
10101 else if (specs[count].index == rs->index)
10102 return 1;
10103 }
10104
10105 return 0;
10106 }
10107
10108 /* Indicate an instruction group break; if INSERT_STOP is non-zero, then
10109 insert a stop to create the break. Update all resource dependencies
10110 appropriately. If QP_REGNO is non-zero, only apply the break to resources
10111 which use the same QP_REGNO and have the link_to_qp_branch flag set.
10112 If SAVE_CURRENT is non-zero, don't affect resources marked by the current
10113 instruction. */
10114
10115 static void
10116 insn_group_break (int insert_stop, int qp_regno, int save_current)
10117 {
10118 int i;
10119
10120 if (insert_stop && md.num_slots_in_use > 0)
10121 PREV_SLOT.end_of_insn_group = 1;
10122
10123 if (md.debug_dv)
10124 {
10125 fprintf (stderr, " Insn group break%s",
10126 (insert_stop ? " (w/stop)" : ""));
10127 if (qp_regno != 0)
10128 fprintf (stderr, " effective for QP=%d", qp_regno);
10129 fprintf (stderr, "\n");
10130 }
10131
10132 i = 0;
10133 while (i < regdepslen)
10134 {
10135 const struct ia64_dependency *dep = regdeps[i].dependency;
10136
10137 if (qp_regno != 0
10138 && regdeps[i].qp_regno != qp_regno)
10139 {
10140 ++i;
10141 continue;
10142 }
10143
10144 if (save_current
10145 && CURR_SLOT.src_file == regdeps[i].file
10146 && CURR_SLOT.src_line == regdeps[i].line)
10147 {
10148 ++i;
10149 continue;
10150 }
10151
10152 /* clear dependencies which are automatically cleared by a stop, or
10153 those that have reached the appropriate state of insn serialization */
10154 if (dep->semantics == IA64_DVS_IMPLIED
10155 || dep->semantics == IA64_DVS_IMPLIEDF
10156 || regdeps[i].insn_srlz == STATE_SRLZ)
10157 {
10158 print_dependency ("Removing", i);
10159 regdeps[i] = regdeps[--regdepslen];
10160 }
10161 else
10162 {
10163 if (dep->semantics == IA64_DVS_DATA
10164 || dep->semantics == IA64_DVS_INSTR
10165 || dep->semantics == IA64_DVS_SPECIFIC)
10166 {
10167 if (regdeps[i].insn_srlz == STATE_NONE)
10168 regdeps[i].insn_srlz = STATE_STOP;
10169 if (regdeps[i].data_srlz == STATE_NONE)
10170 regdeps[i].data_srlz = STATE_STOP;
10171 }
10172 ++i;
10173 }
10174 }
10175 }
10176
10177 /* Add the given resource usage spec to the list of active dependencies. */
10178
10179 static void
10180 mark_resource (struct ia64_opcode *idesc ATTRIBUTE_UNUSED,
10181 const struct ia64_dependency *dep ATTRIBUTE_UNUSED,
10182 struct rsrc *spec,
10183 int depind,
10184 int path)
10185 {
10186 if (regdepslen == regdepstotlen)
10187 {
10188 regdepstotlen += 20;
10189 regdeps = XRESIZEVEC (struct rsrc, regdeps, regdepstotlen);
10190 }
10191
10192 regdeps[regdepslen] = *spec;
10193 regdeps[regdepslen].depind = depind;
10194 regdeps[regdepslen].path = path;
10195 regdeps[regdepslen].file = CURR_SLOT.src_file;
10196 regdeps[regdepslen].line = CURR_SLOT.src_line;
10197
10198 print_dependency ("Adding", regdepslen);
10199
10200 ++regdepslen;
10201 }
10202
10203 static void
10204 print_dependency (const char *action, int depind)
10205 {
10206 if (md.debug_dv)
10207 {
10208 fprintf (stderr, " %s %s '%s'",
10209 action, dv_mode[(regdeps[depind].dependency)->mode],
10210 (regdeps[depind].dependency)->name);
10211 if (regdeps[depind].specific && regdeps[depind].index >= 0)
10212 fprintf (stderr, " (%d)", regdeps[depind].index);
10213 if (regdeps[depind].mem_offset.hint)
10214 {
10215 fputs (" ", stderr);
10216 fprintf_vma (stderr, regdeps[depind].mem_offset.base);
10217 fputs ("+", stderr);
10218 fprintf_vma (stderr, regdeps[depind].mem_offset.offset);
10219 }
10220 fprintf (stderr, "\n");
10221 }
10222 }
10223
10224 static void
10225 instruction_serialization (void)
10226 {
10227 int i;
10228 if (md.debug_dv)
10229 fprintf (stderr, " Instruction serialization\n");
10230 for (i = 0; i < regdepslen; i++)
10231 if (regdeps[i].insn_srlz == STATE_STOP)
10232 regdeps[i].insn_srlz = STATE_SRLZ;
10233 }
10234
10235 static void
10236 data_serialization (void)
10237 {
10238 int i = 0;
10239 if (md.debug_dv)
10240 fprintf (stderr, " Data serialization\n");
10241 while (i < regdepslen)
10242 {
10243 if (regdeps[i].data_srlz == STATE_STOP
10244 /* Note: as of 991210, all "other" dependencies are cleared by a
10245 data serialization. This might change with new tables */
10246 || (regdeps[i].dependency)->semantics == IA64_DVS_OTHER)
10247 {
10248 print_dependency ("Removing", i);
10249 regdeps[i] = regdeps[--regdepslen];
10250 }
10251 else
10252 ++i;
10253 }
10254 }
10255
10256 /* Insert stops and serializations as needed to avoid DVs. */
10257
10258 static void
10259 remove_marked_resource (struct rsrc *rs)
10260 {
10261 switch (rs->dependency->semantics)
10262 {
10263 case IA64_DVS_SPECIFIC:
10264 if (md.debug_dv)
10265 fprintf (stderr, "Implementation-specific, assume worst case...\n");
10266 /* ...fall through... */
10267 case IA64_DVS_INSTR:
10268 if (md.debug_dv)
10269 fprintf (stderr, "Inserting instr serialization\n");
10270 if (rs->insn_srlz < STATE_STOP)
10271 insn_group_break (1, 0, 0);
10272 if (rs->insn_srlz < STATE_SRLZ)
10273 {
10274 struct slot oldslot = CURR_SLOT;
10275 /* Manually jam a srlz.i insn into the stream */
10276 memset (&CURR_SLOT, 0, sizeof (CURR_SLOT));
10277 CURR_SLOT.user_template = -1;
10278 CURR_SLOT.idesc = ia64_find_opcode ("srlz.i");
10279 instruction_serialization ();
10280 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10281 if (++md.num_slots_in_use >= NUM_SLOTS)
10282 emit_one_bundle ();
10283 CURR_SLOT = oldslot;
10284 }
10285 insn_group_break (1, 0, 0);
10286 break;
10287 case IA64_DVS_OTHER: /* as of rev2 (991220) of the DV tables, all
10288 "other" types of DV are eliminated
10289 by a data serialization */
10290 case IA64_DVS_DATA:
10291 if (md.debug_dv)
10292 fprintf (stderr, "Inserting data serialization\n");
10293 if (rs->data_srlz < STATE_STOP)
10294 insn_group_break (1, 0, 0);
10295 {
10296 struct slot oldslot = CURR_SLOT;
10297 /* Manually jam a srlz.d insn into the stream */
10298 memset (&CURR_SLOT, 0, sizeof (CURR_SLOT));
10299 CURR_SLOT.user_template = -1;
10300 CURR_SLOT.idesc = ia64_find_opcode ("srlz.d");
10301 data_serialization ();
10302 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10303 if (++md.num_slots_in_use >= NUM_SLOTS)
10304 emit_one_bundle ();
10305 CURR_SLOT = oldslot;
10306 }
10307 break;
10308 case IA64_DVS_IMPLIED:
10309 case IA64_DVS_IMPLIEDF:
10310 if (md.debug_dv)
10311 fprintf (stderr, "Inserting stop\n");
10312 insn_group_break (1, 0, 0);
10313 break;
10314 default:
10315 break;
10316 }
10317 }
10318
10319 /* Check the resources used by the given opcode against the current dependency
10320 list.
10321
10322 The check is run once for each execution path encountered. In this case,
10323 a unique execution path is the sequence of instructions following a code
10324 entry point, e.g. the following has three execution paths, one starting
10325 at L0, one at L1, and one at L2.
10326
10327 L0: nop
10328 L1: add
10329 L2: add
10330 br.ret
10331 */
10332
10333 static void
10334 check_dependencies (struct ia64_opcode *idesc)
10335 {
10336 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
10337 int path;
10338 int i;
10339
10340 /* Note that the number of marked resources may change within the
10341 loop if in auto mode. */
10342 i = 0;
10343 while (i < regdepslen)
10344 {
10345 struct rsrc *rs = &regdeps[i];
10346 const struct ia64_dependency *dep = rs->dependency;
10347 int chkind;
10348 int note;
10349 int start_over = 0;
10350
10351 if (dep->semantics == IA64_DVS_NONE
10352 || (chkind = depends_on (rs->depind, idesc)) == -1)
10353 {
10354 ++i;
10355 continue;
10356 }
10357
10358 note = NOTE (opdeps->chks[chkind]);
10359
10360 /* Check this resource against each execution path seen thus far. */
10361 for (path = 0; path <= md.path; path++)
10362 {
10363 int matchtype;
10364
10365 /* If the dependency wasn't on the path being checked, ignore it. */
10366 if (rs->path < path)
10367 continue;
10368
10369 /* If the QP for this insn implies a QP which has branched, don't
10370 bother checking. Ed. NOTE: I don't think this check is terribly
10371 useful; what's the point of generating code which will only be
10372 reached if its QP is zero?
10373 This code was specifically inserted to handle the following code,
10374 based on notes from Intel's DV checking code, where p1 implies p2.
10375
10376 mov r4 = 2
10377 (p2) br.cond L
10378 (p1) mov r4 = 7
10379 */
10380 if (CURR_SLOT.qp_regno != 0)
10381 {
10382 int skip = 0;
10383 int implies;
10384 for (implies = 0; implies < qp_implieslen; implies++)
10385 {
10386 if (qp_implies[implies].path >= path
10387 && qp_implies[implies].p1 == CURR_SLOT.qp_regno
10388 && qp_implies[implies].p2_branched)
10389 {
10390 skip = 1;
10391 break;
10392 }
10393 }
10394 if (skip)
10395 continue;
10396 }
10397
10398 if ((matchtype = resources_match (rs, idesc, note,
10399 CURR_SLOT.qp_regno, path)) != 0)
10400 {
10401 char msg[1024];
10402 char pathmsg[256] = "";
10403 char indexmsg[256] = "";
10404 int certain = (matchtype == 1 && CURR_SLOT.qp_regno == 0);
10405
10406 if (path != 0)
10407 snprintf (pathmsg, sizeof (pathmsg),
10408 " when entry is at label '%s'",
10409 md.entry_labels[path - 1]);
10410 if (matchtype == 1 && rs->index >= 0)
10411 snprintf (indexmsg, sizeof (indexmsg),
10412 ", specific resource number is %d",
10413 rs->index);
10414 snprintf (msg, sizeof (msg),
10415 "Use of '%s' %s %s dependency '%s' (%s)%s%s",
10416 idesc->name,
10417 (certain ? "violates" : "may violate"),
10418 dv_mode[dep->mode], dep->name,
10419 dv_sem[dep->semantics],
10420 pathmsg, indexmsg);
10421
10422 if (md.explicit_mode)
10423 {
10424 as_warn ("%s", msg);
10425 if (path < md.path)
10426 as_warn (_("Only the first path encountering the conflict is reported"));
10427 as_warn_where (rs->file, rs->line,
10428 _("This is the location of the conflicting usage"));
10429 /* Don't bother checking other paths, to avoid duplicating
10430 the same warning */
10431 break;
10432 }
10433 else
10434 {
10435 if (md.debug_dv)
10436 fprintf (stderr, "%s @ %s:%d\n", msg, rs->file, rs->line);
10437
10438 remove_marked_resource (rs);
10439
10440 /* since the set of dependencies has changed, start over */
10441 /* FIXME -- since we're removing dvs as we go, we
10442 probably don't really need to start over... */
10443 start_over = 1;
10444 break;
10445 }
10446 }
10447 }
10448 if (start_over)
10449 i = 0;
10450 else
10451 ++i;
10452 }
10453 }
10454
10455 /* Register new dependencies based on the given opcode. */
10456
10457 static void
10458 mark_resources (struct ia64_opcode *idesc)
10459 {
10460 int i;
10461 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
10462 int add_only_qp_reads = 0;
10463
10464 /* A conditional branch only uses its resources if it is taken; if it is
10465 taken, we stop following that path. The other branch types effectively
10466 *always* write their resources. If it's not taken, register only QP
10467 reads. */
10468 if (is_conditional_branch (idesc) || is_interruption_or_rfi (idesc))
10469 {
10470 add_only_qp_reads = 1;
10471 }
10472
10473 if (md.debug_dv)
10474 fprintf (stderr, "Registering '%s' resource usage\n", idesc->name);
10475
10476 for (i = 0; i < opdeps->nregs; i++)
10477 {
10478 const struct ia64_dependency *dep;
10479 struct rsrc specs[MAX_SPECS];
10480 int note;
10481 int path;
10482 int count;
10483
10484 dep = ia64_find_dependency (opdeps->regs[i]);
10485 note = NOTE (opdeps->regs[i]);
10486
10487 if (add_only_qp_reads
10488 && !(dep->mode == IA64_DV_WAR
10489 && (dep->specifier == IA64_RS_PR
10490 || dep->specifier == IA64_RS_PRr
10491 || dep->specifier == IA64_RS_PR63)))
10492 continue;
10493
10494 count = specify_resource (dep, idesc, DV_REG, specs, note, md.path);
10495
10496 while (count-- > 0)
10497 {
10498 mark_resource (idesc, dep, &specs[count],
10499 DEP (opdeps->regs[i]), md.path);
10500 }
10501
10502 /* The execution path may affect register values, which may in turn
10503 affect which indirect-access resources are accessed. */
10504 switch (dep->specifier)
10505 {
10506 default:
10507 break;
10508 case IA64_RS_CPUID:
10509 case IA64_RS_DBR:
10510 case IA64_RS_IBR:
10511 case IA64_RS_MSR:
10512 case IA64_RS_PKR:
10513 case IA64_RS_PMC:
10514 case IA64_RS_PMD:
10515 case IA64_RS_RR:
10516 for (path = 0; path < md.path; path++)
10517 {
10518 count = specify_resource (dep, idesc, DV_REG, specs, note, path);
10519 while (count-- > 0)
10520 mark_resource (idesc, dep, &specs[count],
10521 DEP (opdeps->regs[i]), path);
10522 }
10523 break;
10524 }
10525 }
10526 }
10527
10528 /* Remove dependencies when they no longer apply. */
10529
10530 static void
10531 update_dependencies (struct ia64_opcode *idesc)
10532 {
10533 int i;
10534
10535 if (strcmp (idesc->name, "srlz.i") == 0)
10536 {
10537 instruction_serialization ();
10538 }
10539 else if (strcmp (idesc->name, "srlz.d") == 0)
10540 {
10541 data_serialization ();
10542 }
10543 else if (is_interruption_or_rfi (idesc)
10544 || is_taken_branch (idesc))
10545 {
10546 /* Although technically the taken branch doesn't clear dependencies
10547 which require a srlz.[id], we don't follow the branch; the next
10548 instruction is assumed to start with a clean slate. */
10549 regdepslen = 0;
10550 md.path = 0;
10551 }
10552 else if (is_conditional_branch (idesc)
10553 && CURR_SLOT.qp_regno != 0)
10554 {
10555 int is_call = strstr (idesc->name, ".call") != NULL;
10556
10557 for (i = 0; i < qp_implieslen; i++)
10558 {
10559 /* If the conditional branch's predicate is implied by the predicate
10560 in an existing dependency, remove that dependency. */
10561 if (qp_implies[i].p2 == CURR_SLOT.qp_regno)
10562 {
10563 int depind = 0;
10564 /* Note that this implied predicate takes a branch so that if
10565 a later insn generates a DV but its predicate implies this
10566 one, we can avoid the false DV warning. */
10567 qp_implies[i].p2_branched = 1;
10568 while (depind < regdepslen)
10569 {
10570 if (regdeps[depind].qp_regno == qp_implies[i].p1)
10571 {
10572 print_dependency ("Removing", depind);
10573 regdeps[depind] = regdeps[--regdepslen];
10574 }
10575 else
10576 ++depind;
10577 }
10578 }
10579 }
10580 /* Any marked resources which have this same predicate should be
10581 cleared, provided that the QP hasn't been modified between the
10582 marking instruction and the branch. */
10583 if (is_call)
10584 {
10585 insn_group_break (0, CURR_SLOT.qp_regno, 1);
10586 }
10587 else
10588 {
10589 i = 0;
10590 while (i < regdepslen)
10591 {
10592 if (regdeps[i].qp_regno == CURR_SLOT.qp_regno
10593 && regdeps[i].link_to_qp_branch
10594 && (regdeps[i].file != CURR_SLOT.src_file
10595 || regdeps[i].line != CURR_SLOT.src_line))
10596 {
10597 /* Treat like a taken branch */
10598 print_dependency ("Removing", i);
10599 regdeps[i] = regdeps[--regdepslen];
10600 }
10601 else
10602 ++i;
10603 }
10604 }
10605 }
10606 }
10607
10608 /* Examine the current instruction for dependency violations. */
10609
10610 static int
10611 check_dv (struct ia64_opcode *idesc)
10612 {
10613 if (md.debug_dv)
10614 {
10615 fprintf (stderr, "Checking %s for violations (line %d, %d/%d)\n",
10616 idesc->name, CURR_SLOT.src_line,
10617 idesc->dependencies->nchks,
10618 idesc->dependencies->nregs);
10619 }
10620
10621 /* Look through the list of currently marked resources; if the current
10622 instruction has the dependency in its chks list which uses that resource,
10623 check against the specific resources used. */
10624 check_dependencies (idesc);
10625
10626 /* Look up the instruction's regdeps (RAW writes, WAW writes, and WAR reads),
10627 then add them to the list of marked resources. */
10628 mark_resources (idesc);
10629
10630 /* There are several types of dependency semantics, and each has its own
10631 requirements for being cleared
10632
10633 Instruction serialization (insns separated by interruption, rfi, or
10634 writer + srlz.i + reader, all in separate groups) clears DVS_INSTR.
10635
10636 Data serialization (instruction serialization, or writer + srlz.d +
10637 reader, where writer and srlz.d are in separate groups) clears
10638 DVS_DATA. (This also clears DVS_OTHER, but that is not guaranteed to
10639 always be the case).
10640
10641 Instruction group break (groups separated by stop, taken branch,
10642 interruption or rfi) clears DVS_IMPLIED and DVS_IMPLIEDF.
10643 */
10644 update_dependencies (idesc);
10645
10646 /* Sometimes, knowing a register value allows us to avoid giving a false DV
10647 warning. Keep track of as many as possible that are useful. */
10648 note_register_values (idesc);
10649
10650 /* We don't need or want this anymore. */
10651 md.mem_offset.hint = 0;
10652
10653 return 0;
10654 }
10655
10656 /* Translate one line of assembly. Pseudo ops and labels do not show
10657 here. */
10658 void
10659 md_assemble (char *str)
10660 {
10661 char *saved_input_line_pointer, *temp;
10662 const char *mnemonic;
10663 const struct pseudo_opcode *pdesc;
10664 struct ia64_opcode *idesc;
10665 unsigned char qp_regno;
10666 unsigned int flags;
10667 int ch;
10668
10669 saved_input_line_pointer = input_line_pointer;
10670 input_line_pointer = str;
10671
10672 /* extract the opcode (mnemonic): */
10673
10674 ch = get_symbol_name (&temp);
10675 mnemonic = temp;
10676 pdesc = (struct pseudo_opcode *) hash_find (md.pseudo_hash, mnemonic);
10677 if (pdesc)
10678 {
10679 (void) restore_line_pointer (ch);
10680 (*pdesc->handler) (pdesc->arg);
10681 goto done;
10682 }
10683
10684 /* Find the instruction descriptor matching the arguments. */
10685
10686 idesc = ia64_find_opcode (mnemonic);
10687 (void) restore_line_pointer (ch);
10688 if (!idesc)
10689 {
10690 as_bad (_("Unknown opcode `%s'"), mnemonic);
10691 goto done;
10692 }
10693
10694 idesc = parse_operands (idesc);
10695 if (!idesc)
10696 goto done;
10697
10698 /* Handle the dynamic ops we can handle now: */
10699 if (idesc->type == IA64_TYPE_DYN)
10700 {
10701 if (strcmp (idesc->name, "add") == 0)
10702 {
10703 if (CURR_SLOT.opnd[2].X_op == O_register
10704 && CURR_SLOT.opnd[2].X_add_number < 4)
10705 mnemonic = "addl";
10706 else
10707 mnemonic = "adds";
10708 ia64_free_opcode (idesc);
10709 idesc = ia64_find_opcode (mnemonic);
10710 }
10711 else if (strcmp (idesc->name, "mov") == 0)
10712 {
10713 enum ia64_opnd opnd1, opnd2;
10714 int rop;
10715
10716 opnd1 = idesc->operands[0];
10717 opnd2 = idesc->operands[1];
10718 if (opnd1 == IA64_OPND_AR3)
10719 rop = 0;
10720 else if (opnd2 == IA64_OPND_AR3)
10721 rop = 1;
10722 else
10723 abort ();
10724 if (CURR_SLOT.opnd[rop].X_op == O_register)
10725 {
10726 if (ar_is_only_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
10727 mnemonic = "mov.i";
10728 else if (ar_is_only_in_memory_unit (CURR_SLOT.opnd[rop].X_add_number))
10729 mnemonic = "mov.m";
10730 else
10731 rop = -1;
10732 }
10733 else
10734 abort ();
10735 if (rop >= 0)
10736 {
10737 ia64_free_opcode (idesc);
10738 idesc = ia64_find_opcode (mnemonic);
10739 while (idesc != NULL
10740 && (idesc->operands[0] != opnd1
10741 || idesc->operands[1] != opnd2))
10742 idesc = get_next_opcode (idesc);
10743 }
10744 }
10745 }
10746 else if (strcmp (idesc->name, "mov.i") == 0
10747 || strcmp (idesc->name, "mov.m") == 0)
10748 {
10749 enum ia64_opnd opnd1, opnd2;
10750 int rop;
10751
10752 opnd1 = idesc->operands[0];
10753 opnd2 = idesc->operands[1];
10754 if (opnd1 == IA64_OPND_AR3)
10755 rop = 0;
10756 else if (opnd2 == IA64_OPND_AR3)
10757 rop = 1;
10758 else
10759 abort ();
10760 if (CURR_SLOT.opnd[rop].X_op == O_register)
10761 {
10762 char unit = 'a';
10763 if (ar_is_only_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
10764 unit = 'i';
10765 else if (ar_is_only_in_memory_unit (CURR_SLOT.opnd[rop].X_add_number))
10766 unit = 'm';
10767 if (unit != 'a' && unit != idesc->name [4])
10768 as_bad (_("AR %d can only be accessed by %c-unit"),
10769 (int) (CURR_SLOT.opnd[rop].X_add_number - REG_AR),
10770 TOUPPER (unit));
10771 }
10772 }
10773 else if (strcmp (idesc->name, "hint.b") == 0)
10774 {
10775 switch (md.hint_b)
10776 {
10777 case hint_b_ok:
10778 break;
10779 case hint_b_warning:
10780 as_warn (_("hint.b may be treated as nop"));
10781 break;
10782 case hint_b_error:
10783 as_bad (_("hint.b shouldn't be used"));
10784 break;
10785 }
10786 }
10787
10788 qp_regno = 0;
10789 if (md.qp.X_op == O_register)
10790 {
10791 qp_regno = md.qp.X_add_number - REG_P;
10792 md.qp.X_op = O_absent;
10793 }
10794
10795 flags = idesc->flags;
10796
10797 if ((flags & IA64_OPCODE_FIRST) != 0)
10798 {
10799 /* The alignment frag has to end with a stop bit only if the
10800 next instruction after the alignment directive has to be
10801 the first instruction in an instruction group. */
10802 if (align_frag)
10803 {
10804 while (align_frag->fr_type != rs_align_code)
10805 {
10806 align_frag = align_frag->fr_next;
10807 if (!align_frag)
10808 break;
10809 }
10810 /* align_frag can be NULL if there are directives in
10811 between. */
10812 if (align_frag && align_frag->fr_next == frag_now)
10813 align_frag->tc_frag_data = 1;
10814 }
10815
10816 insn_group_break (1, 0, 0);
10817 }
10818 align_frag = NULL;
10819
10820 if ((flags & IA64_OPCODE_NO_PRED) != 0 && qp_regno != 0)
10821 {
10822 as_bad (_("`%s' cannot be predicated"), idesc->name);
10823 goto done;
10824 }
10825
10826 /* Build the instruction. */
10827 CURR_SLOT.qp_regno = qp_regno;
10828 CURR_SLOT.idesc = idesc;
10829 CURR_SLOT.src_file = as_where (&CURR_SLOT.src_line);
10830 dwarf2_where (&CURR_SLOT.debug_line);
10831 dwarf2_consume_line_info ();
10832
10833 /* Add unwind entries, if there are any. */
10834 if (unwind.current_entry)
10835 {
10836 CURR_SLOT.unwind_record = unwind.current_entry;
10837 unwind.current_entry = NULL;
10838 }
10839 if (unwind.pending_saves)
10840 {
10841 if (unwind.pending_saves->next)
10842 {
10843 /* Attach the next pending save to the next slot so that its
10844 slot number will get set correctly. */
10845 add_unwind_entry (unwind.pending_saves->next, NOT_A_CHAR);
10846 unwind.pending_saves = &unwind.pending_saves->next->r.record.p;
10847 }
10848 else
10849 unwind.pending_saves = NULL;
10850 }
10851 if (unwind.proc_pending.sym && S_IS_DEFINED (unwind.proc_pending.sym))
10852 unwind.insn = 1;
10853
10854 /* Check for dependency violations. */
10855 if (md.detect_dv)
10856 check_dv (idesc);
10857
10858 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10859 if (++md.num_slots_in_use >= NUM_SLOTS)
10860 emit_one_bundle ();
10861
10862 if ((flags & IA64_OPCODE_LAST) != 0)
10863 insn_group_break (1, 0, 0);
10864
10865 md.last_text_seg = now_seg;
10866
10867 done:
10868 input_line_pointer = saved_input_line_pointer;
10869 }
10870
10871 /* Called when symbol NAME cannot be found in the symbol table.
10872 Should be used for dynamic valued symbols only. */
10873
10874 symbolS *
10875 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
10876 {
10877 return 0;
10878 }
10879
10880 /* Called for any expression that can not be recognized. When the
10881 function is called, `input_line_pointer' will point to the start of
10882 the expression. */
10883
10884 void
10885 md_operand (expressionS *e)
10886 {
10887 switch (*input_line_pointer)
10888 {
10889 case '[':
10890 ++input_line_pointer;
10891 expression_and_evaluate (e);
10892 if (*input_line_pointer != ']')
10893 {
10894 as_bad (_("Closing bracket missing"));
10895 goto err;
10896 }
10897 else
10898 {
10899 if (e->X_op != O_register
10900 || e->X_add_number < REG_GR
10901 || e->X_add_number > REG_GR + 127)
10902 {
10903 as_bad (_("Index must be a general register"));
10904 e->X_add_number = REG_GR;
10905 }
10906
10907 ++input_line_pointer;
10908 e->X_op = O_index;
10909 }
10910 break;
10911
10912 default:
10913 break;
10914 }
10915 return;
10916
10917 err:
10918 ignore_rest_of_line ();
10919 }
10920
10921 /* Return 1 if it's OK to adjust a reloc by replacing the symbol with
10922 a section symbol plus some offset. For relocs involving @fptr(),
10923 directives we don't want such adjustments since we need to have the
10924 original symbol's name in the reloc. */
10925 int
10926 ia64_fix_adjustable (fixS *fix)
10927 {
10928 /* Prevent all adjustments to global symbols */
10929 if (S_IS_EXTERNAL (fix->fx_addsy) || S_IS_WEAK (fix->fx_addsy))
10930 return 0;
10931
10932 switch (fix->fx_r_type)
10933 {
10934 case BFD_RELOC_IA64_FPTR64I:
10935 case BFD_RELOC_IA64_FPTR32MSB:
10936 case BFD_RELOC_IA64_FPTR32LSB:
10937 case BFD_RELOC_IA64_FPTR64MSB:
10938 case BFD_RELOC_IA64_FPTR64LSB:
10939 case BFD_RELOC_IA64_LTOFF_FPTR22:
10940 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10941 return 0;
10942 default:
10943 break;
10944 }
10945
10946 return 1;
10947 }
10948
10949 int
10950 ia64_force_relocation (fixS *fix)
10951 {
10952 switch (fix->fx_r_type)
10953 {
10954 case BFD_RELOC_IA64_FPTR64I:
10955 case BFD_RELOC_IA64_FPTR32MSB:
10956 case BFD_RELOC_IA64_FPTR32LSB:
10957 case BFD_RELOC_IA64_FPTR64MSB:
10958 case BFD_RELOC_IA64_FPTR64LSB:
10959
10960 case BFD_RELOC_IA64_LTOFF22:
10961 case BFD_RELOC_IA64_LTOFF64I:
10962 case BFD_RELOC_IA64_LTOFF_FPTR22:
10963 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10964 case BFD_RELOC_IA64_PLTOFF22:
10965 case BFD_RELOC_IA64_PLTOFF64I:
10966 case BFD_RELOC_IA64_PLTOFF64MSB:
10967 case BFD_RELOC_IA64_PLTOFF64LSB:
10968
10969 case BFD_RELOC_IA64_LTOFF22X:
10970 case BFD_RELOC_IA64_LDXMOV:
10971 return 1;
10972
10973 default:
10974 break;
10975 }
10976
10977 return generic_force_reloc (fix);
10978 }
10979
10980 /* Decide from what point a pc-relative relocation is relative to,
10981 relative to the pc-relative fixup. Er, relatively speaking. */
10982 long
10983 ia64_pcrel_from_section (fixS *fix, segT sec)
10984 {
10985 unsigned long off = fix->fx_frag->fr_address + fix->fx_where;
10986
10987 if (bfd_get_section_flags (stdoutput, sec) & SEC_CODE)
10988 off &= ~0xfUL;
10989
10990 return off;
10991 }
10992
10993
10994 /* Used to emit section-relative relocs for the dwarf2 debug data. */
10995 void
10996 ia64_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
10997 {
10998 expressionS exp;
10999
11000 exp.X_op = O_pseudo_fixup;
11001 exp.X_op_symbol = pseudo_func[FUNC_SEC_RELATIVE].u.sym;
11002 exp.X_add_number = 0;
11003 exp.X_add_symbol = symbol;
11004 emit_expr (&exp, size);
11005 }
11006
11007 /* This is called whenever some data item (not an instruction) needs a
11008 fixup. We pick the right reloc code depending on the byteorder
11009 currently in effect. */
11010 void
11011 ia64_cons_fix_new (fragS *f, int where, int nbytes, expressionS *exp,
11012 bfd_reloc_code_real_type code)
11013 {
11014 fixS *fix;
11015
11016 switch (nbytes)
11017 {
11018 /* There are no reloc for 8 and 16 bit quantities, but we allow
11019 them here since they will work fine as long as the expression
11020 is fully defined at the end of the pass over the source file. */
11021 case 1: code = BFD_RELOC_8; break;
11022 case 2: code = BFD_RELOC_16; break;
11023 case 4:
11024 if (target_big_endian)
11025 code = BFD_RELOC_IA64_DIR32MSB;
11026 else
11027 code = BFD_RELOC_IA64_DIR32LSB;
11028 break;
11029
11030 case 8:
11031 /* In 32-bit mode, data8 could mean function descriptors too. */
11032 if (exp->X_op == O_pseudo_fixup
11033 && exp->X_op_symbol
11034 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC
11035 && !(md.flags & EF_IA_64_ABI64))
11036 {
11037 if (target_big_endian)
11038 code = BFD_RELOC_IA64_IPLTMSB;
11039 else
11040 code = BFD_RELOC_IA64_IPLTLSB;
11041 exp->X_op = O_symbol;
11042 break;
11043 }
11044 else
11045 {
11046 if (target_big_endian)
11047 code = BFD_RELOC_IA64_DIR64MSB;
11048 else
11049 code = BFD_RELOC_IA64_DIR64LSB;
11050 break;
11051 }
11052
11053 case 16:
11054 if (exp->X_op == O_pseudo_fixup
11055 && exp->X_op_symbol
11056 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC)
11057 {
11058 if (target_big_endian)
11059 code = BFD_RELOC_IA64_IPLTMSB;
11060 else
11061 code = BFD_RELOC_IA64_IPLTLSB;
11062 exp->X_op = O_symbol;
11063 break;
11064 }
11065 /* FALLTHRU */
11066
11067 default:
11068 as_bad (_("Unsupported fixup size %d"), nbytes);
11069 ignore_rest_of_line ();
11070 return;
11071 }
11072
11073 if (exp->X_op == O_pseudo_fixup)
11074 {
11075 exp->X_op = O_symbol;
11076 code = ia64_gen_real_reloc_type (exp->X_op_symbol, code);
11077 /* ??? If code unchanged, unsupported. */
11078 }
11079
11080 fix = fix_new_exp (f, where, nbytes, exp, 0, code);
11081 /* We need to store the byte order in effect in case we're going
11082 to fix an 8 or 16 bit relocation (for which there no real
11083 relocs available). See md_apply_fix(). */
11084 fix->tc_fix_data.bigendian = target_big_endian;
11085 }
11086
11087 /* Return the actual relocation we wish to associate with the pseudo
11088 reloc described by SYM and R_TYPE. SYM should be one of the
11089 symbols in the pseudo_func array, or NULL. */
11090
11091 static bfd_reloc_code_real_type
11092 ia64_gen_real_reloc_type (struct symbol *sym, bfd_reloc_code_real_type r_type)
11093 {
11094 bfd_reloc_code_real_type newr = 0;
11095 const char *type = NULL, *suffix = "";
11096
11097 if (sym == NULL)
11098 {
11099 return r_type;
11100 }
11101
11102 switch (S_GET_VALUE (sym))
11103 {
11104 case FUNC_FPTR_RELATIVE:
11105 switch (r_type)
11106 {
11107 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_FPTR64I; break;
11108 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_FPTR32MSB; break;
11109 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_FPTR32LSB; break;
11110 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_FPTR64MSB; break;
11111 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_FPTR64LSB; break;
11112 default: type = "FPTR"; break;
11113 }
11114 break;
11115
11116 case FUNC_GP_RELATIVE:
11117 switch (r_type)
11118 {
11119 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_GPREL22; break;
11120 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_GPREL64I; break;
11121 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_GPREL32MSB; break;
11122 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_GPREL32LSB; break;
11123 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_GPREL64MSB; break;
11124 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_GPREL64LSB; break;
11125 default: type = "GPREL"; break;
11126 }
11127 break;
11128
11129 case FUNC_LT_RELATIVE:
11130 switch (r_type)
11131 {
11132 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_LTOFF22; break;
11133 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_LTOFF64I; break;
11134 default: type = "LTOFF"; break;
11135 }
11136 break;
11137
11138 case FUNC_LT_RELATIVE_X:
11139 switch (r_type)
11140 {
11141 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_LTOFF22X; break;
11142 default: type = "LTOFF"; suffix = "X"; break;
11143 }
11144 break;
11145
11146 case FUNC_PC_RELATIVE:
11147 switch (r_type)
11148 {
11149 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_PCREL22; break;
11150 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_PCREL64I; break;
11151 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_PCREL32MSB; break;
11152 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_PCREL32LSB; break;
11153 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_PCREL64MSB; break;
11154 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_PCREL64LSB; break;
11155 default: type = "PCREL"; break;
11156 }
11157 break;
11158
11159 case FUNC_PLT_RELATIVE:
11160 switch (r_type)
11161 {
11162 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_PLTOFF22; break;
11163 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_PLTOFF64I; break;
11164 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_PLTOFF64MSB;break;
11165 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_PLTOFF64LSB;break;
11166 default: type = "PLTOFF"; break;
11167 }
11168 break;
11169
11170 case FUNC_SEC_RELATIVE:
11171 switch (r_type)
11172 {
11173 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_SECREL32MSB;break;
11174 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_SECREL32LSB;break;
11175 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_SECREL64MSB;break;
11176 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_SECREL64LSB;break;
11177 default: type = "SECREL"; break;
11178 }
11179 break;
11180
11181 case FUNC_SEG_RELATIVE:
11182 switch (r_type)
11183 {
11184 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_SEGREL32MSB;break;
11185 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_SEGREL32LSB;break;
11186 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_SEGREL64MSB;break;
11187 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_SEGREL64LSB;break;
11188 default: type = "SEGREL"; break;
11189 }
11190 break;
11191
11192 case FUNC_LTV_RELATIVE:
11193 switch (r_type)
11194 {
11195 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_LTV32MSB; break;
11196 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_LTV32LSB; break;
11197 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_LTV64MSB; break;
11198 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_LTV64LSB; break;
11199 default: type = "LTV"; break;
11200 }
11201 break;
11202
11203 case FUNC_LT_FPTR_RELATIVE:
11204 switch (r_type)
11205 {
11206 case BFD_RELOC_IA64_IMM22:
11207 newr = BFD_RELOC_IA64_LTOFF_FPTR22; break;
11208 case BFD_RELOC_IA64_IMM64:
11209 newr = BFD_RELOC_IA64_LTOFF_FPTR64I; break;
11210 case BFD_RELOC_IA64_DIR32MSB:
11211 newr = BFD_RELOC_IA64_LTOFF_FPTR32MSB; break;
11212 case BFD_RELOC_IA64_DIR32LSB:
11213 newr = BFD_RELOC_IA64_LTOFF_FPTR32LSB; break;
11214 case BFD_RELOC_IA64_DIR64MSB:
11215 newr = BFD_RELOC_IA64_LTOFF_FPTR64MSB; break;
11216 case BFD_RELOC_IA64_DIR64LSB:
11217 newr = BFD_RELOC_IA64_LTOFF_FPTR64LSB; break;
11218 default:
11219 type = "LTOFF_FPTR"; break;
11220 }
11221 break;
11222
11223 case FUNC_TP_RELATIVE:
11224 switch (r_type)
11225 {
11226 case BFD_RELOC_IA64_IMM14: newr = BFD_RELOC_IA64_TPREL14; break;
11227 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_TPREL22; break;
11228 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_TPREL64I; break;
11229 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_TPREL64MSB; break;
11230 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_TPREL64LSB; break;
11231 default: type = "TPREL"; break;
11232 }
11233 break;
11234
11235 case FUNC_LT_TP_RELATIVE:
11236 switch (r_type)
11237 {
11238 case BFD_RELOC_IA64_IMM22:
11239 newr = BFD_RELOC_IA64_LTOFF_TPREL22; break;
11240 default:
11241 type = "LTOFF_TPREL"; break;
11242 }
11243 break;
11244
11245 case FUNC_DTP_MODULE:
11246 switch (r_type)
11247 {
11248 case BFD_RELOC_IA64_DIR64MSB:
11249 newr = BFD_RELOC_IA64_DTPMOD64MSB; break;
11250 case BFD_RELOC_IA64_DIR64LSB:
11251 newr = BFD_RELOC_IA64_DTPMOD64LSB; break;
11252 default:
11253 type = "DTPMOD"; break;
11254 }
11255 break;
11256
11257 case FUNC_LT_DTP_MODULE:
11258 switch (r_type)
11259 {
11260 case BFD_RELOC_IA64_IMM22:
11261 newr = BFD_RELOC_IA64_LTOFF_DTPMOD22; break;
11262 default:
11263 type = "LTOFF_DTPMOD"; break;
11264 }
11265 break;
11266
11267 case FUNC_DTP_RELATIVE:
11268 switch (r_type)
11269 {
11270 case BFD_RELOC_IA64_DIR32MSB:
11271 newr = BFD_RELOC_IA64_DTPREL32MSB; break;
11272 case BFD_RELOC_IA64_DIR32LSB:
11273 newr = BFD_RELOC_IA64_DTPREL32LSB; break;
11274 case BFD_RELOC_IA64_DIR64MSB:
11275 newr = BFD_RELOC_IA64_DTPREL64MSB; break;
11276 case BFD_RELOC_IA64_DIR64LSB:
11277 newr = BFD_RELOC_IA64_DTPREL64LSB; break;
11278 case BFD_RELOC_IA64_IMM14:
11279 newr = BFD_RELOC_IA64_DTPREL14; break;
11280 case BFD_RELOC_IA64_IMM22:
11281 newr = BFD_RELOC_IA64_DTPREL22; break;
11282 case BFD_RELOC_IA64_IMM64:
11283 newr = BFD_RELOC_IA64_DTPREL64I; break;
11284 default:
11285 type = "DTPREL"; break;
11286 }
11287 break;
11288
11289 case FUNC_LT_DTP_RELATIVE:
11290 switch (r_type)
11291 {
11292 case BFD_RELOC_IA64_IMM22:
11293 newr = BFD_RELOC_IA64_LTOFF_DTPREL22; break;
11294 default:
11295 type = "LTOFF_DTPREL"; break;
11296 }
11297 break;
11298
11299 case FUNC_IPLT_RELOC:
11300 switch (r_type)
11301 {
11302 case BFD_RELOC_IA64_IPLTMSB: return r_type;
11303 case BFD_RELOC_IA64_IPLTLSB: return r_type;
11304 default: type = "IPLT"; break;
11305 }
11306 break;
11307
11308 #ifdef TE_VMS
11309 case FUNC_SLOTCOUNT_RELOC:
11310 return DUMMY_RELOC_IA64_SLOTCOUNT;
11311 #endif
11312
11313 default:
11314 abort ();
11315 }
11316
11317 if (newr)
11318 return newr;
11319 else
11320 {
11321 int width;
11322
11323 if (!type)
11324 abort ();
11325 switch (r_type)
11326 {
11327 case BFD_RELOC_IA64_DIR32MSB: width = 32; suffix = "MSB"; break;
11328 case BFD_RELOC_IA64_DIR32LSB: width = 32; suffix = "LSB"; break;
11329 case BFD_RELOC_IA64_DIR64MSB: width = 64; suffix = "MSB"; break;
11330 case BFD_RELOC_IA64_DIR64LSB: width = 64; suffix = "LSB"; break;
11331 case BFD_RELOC_UNUSED: width = 13; break;
11332 case BFD_RELOC_IA64_IMM14: width = 14; break;
11333 case BFD_RELOC_IA64_IMM22: width = 22; break;
11334 case BFD_RELOC_IA64_IMM64: width = 64; suffix = "I"; break;
11335 default: abort ();
11336 }
11337
11338 /* This should be an error, but since previously there wasn't any
11339 diagnostic here, don't make it fail because of this for now. */
11340 as_warn (_("Cannot express %s%d%s relocation"), type, width, suffix);
11341 return r_type;
11342 }
11343 }
11344
11345 /* Here is where generate the appropriate reloc for pseudo relocation
11346 functions. */
11347 void
11348 ia64_validate_fix (fixS *fix)
11349 {
11350 switch (fix->fx_r_type)
11351 {
11352 case BFD_RELOC_IA64_FPTR64I:
11353 case BFD_RELOC_IA64_FPTR32MSB:
11354 case BFD_RELOC_IA64_FPTR64LSB:
11355 case BFD_RELOC_IA64_LTOFF_FPTR22:
11356 case BFD_RELOC_IA64_LTOFF_FPTR64I:
11357 if (fix->fx_offset != 0)
11358 as_bad_where (fix->fx_file, fix->fx_line,
11359 _("No addend allowed in @fptr() relocation"));
11360 break;
11361 default:
11362 break;
11363 }
11364 }
11365
11366 static void
11367 fix_insn (fixS *fix, const struct ia64_operand *odesc, valueT value)
11368 {
11369 bfd_vma insn[3], t0, t1, control_bits;
11370 const char *err;
11371 char *fixpos;
11372 long slot;
11373
11374 slot = fix->fx_where & 0x3;
11375 fixpos = fix->fx_frag->fr_literal + (fix->fx_where - slot);
11376
11377 /* Bundles are always in little-endian byte order */
11378 t0 = bfd_getl64 (fixpos);
11379 t1 = bfd_getl64 (fixpos + 8);
11380 control_bits = t0 & 0x1f;
11381 insn[0] = (t0 >> 5) & 0x1ffffffffffLL;
11382 insn[1] = ((t0 >> 46) & 0x3ffff) | ((t1 & 0x7fffff) << 18);
11383 insn[2] = (t1 >> 23) & 0x1ffffffffffLL;
11384
11385 err = NULL;
11386 if (odesc - elf64_ia64_operands == IA64_OPND_IMMU64)
11387 {
11388 insn[1] = (value >> 22) & 0x1ffffffffffLL;
11389 insn[2] |= (((value & 0x7f) << 13)
11390 | (((value >> 7) & 0x1ff) << 27)
11391 | (((value >> 16) & 0x1f) << 22)
11392 | (((value >> 21) & 0x1) << 21)
11393 | (((value >> 63) & 0x1) << 36));
11394 }
11395 else if (odesc - elf64_ia64_operands == IA64_OPND_IMMU62)
11396 {
11397 if (value & ~0x3fffffffffffffffULL)
11398 err = _("integer operand out of range");
11399 insn[1] = (value >> 21) & 0x1ffffffffffLL;
11400 insn[2] |= (((value & 0xfffff) << 6) | (((value >> 20) & 0x1) << 36));
11401 }
11402 else if (odesc - elf64_ia64_operands == IA64_OPND_TGT64)
11403 {
11404 value >>= 4;
11405 insn[1] = ((value >> 20) & 0x7fffffffffLL) << 2;
11406 insn[2] |= ((((value >> 59) & 0x1) << 36)
11407 | (((value >> 0) & 0xfffff) << 13));
11408 }
11409 else
11410 err = (*odesc->insert) (odesc, value, insn + slot);
11411
11412 if (err)
11413 as_bad_where (fix->fx_file, fix->fx_line, "%s", err);
11414
11415 t0 = control_bits | (insn[0] << 5) | (insn[1] << 46);
11416 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
11417 number_to_chars_littleendian (fixpos + 0, t0, 8);
11418 number_to_chars_littleendian (fixpos + 8, t1, 8);
11419 }
11420
11421 /* Attempt to simplify or even eliminate a fixup. The return value is
11422 ignored; perhaps it was once meaningful, but now it is historical.
11423 To indicate that a fixup has been eliminated, set FIXP->FX_DONE.
11424
11425 If fixp->fx_addsy is non-NULL, we'll have to generate a reloc entry
11426 (if possible). */
11427
11428 void
11429 md_apply_fix (fixS *fix, valueT *valP, segT seg ATTRIBUTE_UNUSED)
11430 {
11431 char *fixpos;
11432 valueT value = *valP;
11433
11434 fixpos = fix->fx_frag->fr_literal + fix->fx_where;
11435
11436 if (fix->fx_pcrel)
11437 {
11438 switch (fix->fx_r_type)
11439 {
11440 case BFD_RELOC_IA64_PCREL21B: break;
11441 case BFD_RELOC_IA64_PCREL21BI: break;
11442 case BFD_RELOC_IA64_PCREL21F: break;
11443 case BFD_RELOC_IA64_PCREL21M: break;
11444 case BFD_RELOC_IA64_PCREL60B: break;
11445 case BFD_RELOC_IA64_PCREL22: break;
11446 case BFD_RELOC_IA64_PCREL64I: break;
11447 case BFD_RELOC_IA64_PCREL32MSB: break;
11448 case BFD_RELOC_IA64_PCREL32LSB: break;
11449 case BFD_RELOC_IA64_PCREL64MSB: break;
11450 case BFD_RELOC_IA64_PCREL64LSB: break;
11451 default:
11452 fix->fx_r_type = ia64_gen_real_reloc_type (pseudo_func[FUNC_PC_RELATIVE].u.sym,
11453 fix->fx_r_type);
11454 break;
11455 }
11456 }
11457 if (fix->fx_addsy)
11458 {
11459 switch ((unsigned) fix->fx_r_type)
11460 {
11461 case BFD_RELOC_UNUSED:
11462 /* This must be a TAG13 or TAG13b operand. There are no external
11463 relocs defined for them, so we must give an error. */
11464 as_bad_where (fix->fx_file, fix->fx_line,
11465 _("%s must have a constant value"),
11466 elf64_ia64_operands[fix->tc_fix_data.opnd].desc);
11467 fix->fx_done = 1;
11468 return;
11469
11470 case BFD_RELOC_IA64_TPREL14:
11471 case BFD_RELOC_IA64_TPREL22:
11472 case BFD_RELOC_IA64_TPREL64I:
11473 case BFD_RELOC_IA64_LTOFF_TPREL22:
11474 case BFD_RELOC_IA64_LTOFF_DTPMOD22:
11475 case BFD_RELOC_IA64_DTPREL14:
11476 case BFD_RELOC_IA64_DTPREL22:
11477 case BFD_RELOC_IA64_DTPREL64I:
11478 case BFD_RELOC_IA64_LTOFF_DTPREL22:
11479 S_SET_THREAD_LOCAL (fix->fx_addsy);
11480 break;
11481
11482 #ifdef TE_VMS
11483 case DUMMY_RELOC_IA64_SLOTCOUNT:
11484 as_bad_where (fix->fx_file, fix->fx_line,
11485 _("cannot resolve @slotcount parameter"));
11486 fix->fx_done = 1;
11487 return;
11488 #endif
11489
11490 default:
11491 break;
11492 }
11493 }
11494 else if (fix->tc_fix_data.opnd == IA64_OPND_NIL)
11495 {
11496 #ifdef TE_VMS
11497 if (fix->fx_r_type == DUMMY_RELOC_IA64_SLOTCOUNT)
11498 {
11499 /* For @slotcount, convert an addresses difference to a slots
11500 difference. */
11501 valueT v;
11502
11503 v = (value >> 4) * 3;
11504 switch (value & 0x0f)
11505 {
11506 case 0:
11507 case 1:
11508 case 2:
11509 v += value & 0x0f;
11510 break;
11511 case 0x0f:
11512 v += 2;
11513 break;
11514 case 0x0e:
11515 v += 1;
11516 break;
11517 default:
11518 as_bad (_("invalid @slotcount value"));
11519 }
11520 value = v;
11521 }
11522 #endif
11523
11524 if (fix->tc_fix_data.bigendian)
11525 number_to_chars_bigendian (fixpos, value, fix->fx_size);
11526 else
11527 number_to_chars_littleendian (fixpos, value, fix->fx_size);
11528 fix->fx_done = 1;
11529 }
11530 else
11531 {
11532 fix_insn (fix, elf64_ia64_operands + fix->tc_fix_data.opnd, value);
11533 fix->fx_done = 1;
11534 }
11535 }
11536
11537 /* Generate the BFD reloc to be stuck in the object file from the
11538 fixup used internally in the assembler. */
11539
11540 arelent *
11541 tc_gen_reloc (asection *sec ATTRIBUTE_UNUSED, fixS *fixp)
11542 {
11543 arelent *reloc;
11544
11545 reloc = XNEW (arelent);
11546 reloc->sym_ptr_ptr = XNEW (asymbol *);
11547 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
11548 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
11549 reloc->addend = fixp->fx_offset;
11550 reloc->howto = bfd_reloc_type_lookup (stdoutput, fixp->fx_r_type);
11551
11552 if (!reloc->howto)
11553 {
11554 as_bad_where (fixp->fx_file, fixp->fx_line,
11555 _("Cannot represent %s relocation in object file"),
11556 bfd_get_reloc_code_name (fixp->fx_r_type));
11557 free (reloc);
11558 return NULL;
11559 }
11560 return reloc;
11561 }
11562
11563 /* Turn a string in input_line_pointer into a floating point constant
11564 of type TYPE, and store the appropriate bytes in *LIT. The number
11565 of LITTLENUMS emitted is stored in *SIZE. An error message is
11566 returned, or NULL on OK. */
11567
11568 #define MAX_LITTLENUMS 5
11569
11570 const char *
11571 md_atof (int type, char *lit, int *size)
11572 {
11573 LITTLENUM_TYPE words[MAX_LITTLENUMS];
11574 char *t;
11575 int prec;
11576
11577 switch (type)
11578 {
11579 /* IEEE floats */
11580 case 'f':
11581 case 'F':
11582 case 's':
11583 case 'S':
11584 prec = 2;
11585 break;
11586
11587 case 'd':
11588 case 'D':
11589 case 'r':
11590 case 'R':
11591 prec = 4;
11592 break;
11593
11594 case 'x':
11595 case 'X':
11596 case 'p':
11597 case 'P':
11598 prec = 5;
11599 break;
11600
11601 default:
11602 *size = 0;
11603 return _("Unrecognized or unsupported floating point constant");
11604 }
11605 t = atof_ieee (input_line_pointer, type, words);
11606 if (t)
11607 input_line_pointer = t;
11608
11609 (*ia64_float_to_chars) (lit, words, prec);
11610
11611 if (type == 'X')
11612 {
11613 /* It is 10 byte floating point with 6 byte padding. */
11614 memset (&lit [10], 0, 6);
11615 *size = 8 * sizeof (LITTLENUM_TYPE);
11616 }
11617 else
11618 *size = prec * sizeof (LITTLENUM_TYPE);
11619
11620 return NULL;
11621 }
11622
11623 /* Handle ia64 specific semantics of the align directive. */
11624
11625 void
11626 ia64_md_do_align (int n ATTRIBUTE_UNUSED,
11627 const char *fill ATTRIBUTE_UNUSED,
11628 int len ATTRIBUTE_UNUSED,
11629 int max ATTRIBUTE_UNUSED)
11630 {
11631 if (subseg_text_p (now_seg))
11632 ia64_flush_insns ();
11633 }
11634
11635 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
11636 of an rs_align_code fragment. */
11637
11638 void
11639 ia64_handle_align (fragS *fragp)
11640 {
11641 int bytes;
11642 char *p;
11643 const unsigned char *nop_type;
11644
11645 if (fragp->fr_type != rs_align_code)
11646 return;
11647
11648 /* Check if this frag has to end with a stop bit. */
11649 nop_type = fragp->tc_frag_data ? le_nop_stop : le_nop;
11650
11651 bytes = fragp->fr_next->fr_address - fragp->fr_address - fragp->fr_fix;
11652 p = fragp->fr_literal + fragp->fr_fix;
11653
11654 /* If no paddings are needed, we check if we need a stop bit. */
11655 if (!bytes && fragp->tc_frag_data)
11656 {
11657 if (fragp->fr_fix < 16)
11658 #if 1
11659 /* FIXME: It won't work with
11660 .align 16
11661 alloc r32=ar.pfs,1,2,4,0
11662 */
11663 ;
11664 #else
11665 as_bad_where (fragp->fr_file, fragp->fr_line,
11666 _("Can't add stop bit to mark end of instruction group"));
11667 #endif
11668 else
11669 /* Bundles are always in little-endian byte order. Make sure
11670 the previous bundle has the stop bit. */
11671 *(p - 16) |= 1;
11672 }
11673
11674 /* Make sure we are on a 16-byte boundary, in case someone has been
11675 putting data into a text section. */
11676 if (bytes & 15)
11677 {
11678 int fix = bytes & 15;
11679 memset (p, 0, fix);
11680 p += fix;
11681 bytes -= fix;
11682 fragp->fr_fix += fix;
11683 }
11684
11685 /* Instruction bundles are always little-endian. */
11686 memcpy (p, nop_type, 16);
11687 fragp->fr_var = 16;
11688 }
11689
11690 static void
11691 ia64_float_to_chars_bigendian (char *lit, LITTLENUM_TYPE *words,
11692 int prec)
11693 {
11694 while (prec--)
11695 {
11696 number_to_chars_bigendian (lit, (long) (*words++),
11697 sizeof (LITTLENUM_TYPE));
11698 lit += sizeof (LITTLENUM_TYPE);
11699 }
11700 }
11701
11702 static void
11703 ia64_float_to_chars_littleendian (char *lit, LITTLENUM_TYPE *words,
11704 int prec)
11705 {
11706 while (prec--)
11707 {
11708 number_to_chars_littleendian (lit, (long) (words[prec]),
11709 sizeof (LITTLENUM_TYPE));
11710 lit += sizeof (LITTLENUM_TYPE);
11711 }
11712 }
11713
11714 void
11715 ia64_elf_section_change_hook (void)
11716 {
11717 if (elf_section_type (now_seg) == SHT_IA_64_UNWIND
11718 && elf_linked_to_section (now_seg) == NULL)
11719 elf_linked_to_section (now_seg) = text_section;
11720 dot_byteorder (-1);
11721 }
11722
11723 /* Check if a label should be made global. */
11724 void
11725 ia64_check_label (symbolS *label)
11726 {
11727 if (*input_line_pointer == ':')
11728 {
11729 S_SET_EXTERNAL (label);
11730 input_line_pointer++;
11731 }
11732 }
11733
11734 /* Used to remember where .alias and .secalias directives are seen. We
11735 will rename symbol and section names when we are about to output
11736 the relocatable file. */
11737 struct alias
11738 {
11739 const char *file; /* The file where the directive is seen. */
11740 unsigned int line; /* The line number the directive is at. */
11741 const char *name; /* The original name of the symbol. */
11742 };
11743
11744 /* Called for .alias and .secalias directives. If SECTION is 1, it is
11745 .secalias. Otherwise, it is .alias. */
11746 static void
11747 dot_alias (int section)
11748 {
11749 char *name, *alias;
11750 char delim;
11751 char *end_name;
11752 int len;
11753 const char *error_string;
11754 struct alias *h;
11755 const char *a;
11756 struct hash_control *ahash, *nhash;
11757 const char *kind;
11758
11759 delim = get_symbol_name (&name);
11760 end_name = input_line_pointer;
11761 *end_name = delim;
11762
11763 if (name == end_name)
11764 {
11765 as_bad (_("expected symbol name"));
11766 ignore_rest_of_line ();
11767 return;
11768 }
11769
11770 SKIP_WHITESPACE_AFTER_NAME ();
11771
11772 if (*input_line_pointer != ',')
11773 {
11774 *end_name = 0;
11775 as_bad (_("expected comma after \"%s\""), name);
11776 *end_name = delim;
11777 ignore_rest_of_line ();
11778 return;
11779 }
11780
11781 input_line_pointer++;
11782 *end_name = 0;
11783 ia64_canonicalize_symbol_name (name);
11784
11785 /* We call demand_copy_C_string to check if alias string is valid.
11786 There should be a closing `"' and no `\0' in the string. */
11787 alias = demand_copy_C_string (&len);
11788 if (alias == NULL)
11789 {
11790 ignore_rest_of_line ();
11791 return;
11792 }
11793
11794 /* Make a copy of name string. */
11795 len = strlen (name) + 1;
11796 obstack_grow (&notes, name, len);
11797 name = obstack_finish (&notes);
11798
11799 if (section)
11800 {
11801 kind = "section";
11802 ahash = secalias_hash;
11803 nhash = secalias_name_hash;
11804 }
11805 else
11806 {
11807 kind = "symbol";
11808 ahash = alias_hash;
11809 nhash = alias_name_hash;
11810 }
11811
11812 /* Check if alias has been used before. */
11813 h = (struct alias *) hash_find (ahash, alias);
11814 if (h)
11815 {
11816 if (strcmp (h->name, name))
11817 as_bad (_("`%s' is already the alias of %s `%s'"),
11818 alias, kind, h->name);
11819 goto out;
11820 }
11821
11822 /* Check if name already has an alias. */
11823 a = (const char *) hash_find (nhash, name);
11824 if (a)
11825 {
11826 if (strcmp (a, alias))
11827 as_bad (_("%s `%s' already has an alias `%s'"), kind, name, a);
11828 goto out;
11829 }
11830
11831 h = XNEW (struct alias);
11832 h->file = as_where (&h->line);
11833 h->name = name;
11834
11835 error_string = hash_jam (ahash, alias, (void *) h);
11836 if (error_string)
11837 {
11838 as_fatal (_("inserting \"%s\" into %s alias hash table failed: %s"),
11839 alias, kind, error_string);
11840 goto out;
11841 }
11842
11843 error_string = hash_jam (nhash, name, (void *) alias);
11844 if (error_string)
11845 {
11846 as_fatal (_("inserting \"%s\" into %s name hash table failed: %s"),
11847 alias, kind, error_string);
11848 out:
11849 obstack_free (&notes, name);
11850 obstack_free (&notes, alias);
11851 }
11852
11853 demand_empty_rest_of_line ();
11854 }
11855
11856 /* It renames the original symbol name to its alias. */
11857 static void
11858 do_alias (const char *alias, void *value)
11859 {
11860 struct alias *h = (struct alias *) value;
11861 symbolS *sym = symbol_find (h->name);
11862
11863 if (sym == NULL)
11864 {
11865 #ifdef TE_VMS
11866 /* Uses .alias extensively to alias CRTL functions to same with
11867 decc$ prefix. Sometimes function gets optimized away and a
11868 warning results, which should be suppressed. */
11869 if (strncmp (alias, "decc$", 5) != 0)
11870 #endif
11871 as_warn_where (h->file, h->line,
11872 _("symbol `%s' aliased to `%s' is not used"),
11873 h->name, alias);
11874 }
11875 else
11876 S_SET_NAME (sym, (char *) alias);
11877 }
11878
11879 /* Called from write_object_file. */
11880 void
11881 ia64_adjust_symtab (void)
11882 {
11883 hash_traverse (alias_hash, do_alias);
11884 }
11885
11886 /* It renames the original section name to its alias. */
11887 static void
11888 do_secalias (const char *alias, void *value)
11889 {
11890 struct alias *h = (struct alias *) value;
11891 segT sec = bfd_get_section_by_name (stdoutput, h->name);
11892
11893 if (sec == NULL)
11894 as_warn_where (h->file, h->line,
11895 _("section `%s' aliased to `%s' is not used"),
11896 h->name, alias);
11897 else
11898 sec->name = alias;
11899 }
11900
11901 /* Called from write_object_file. */
11902 void
11903 ia64_frob_file (void)
11904 {
11905 hash_traverse (secalias_hash, do_secalias);
11906 }
11907
11908 #ifdef TE_VMS
11909 #define NT_VMS_MHD 1
11910 #define NT_VMS_LNM 2
11911
11912 /* Integrity VMS 8.x identifies it's ELF modules with a standard ELF
11913 .note section. */
11914
11915 /* Manufacture a VMS-like time string. */
11916 static void
11917 get_vms_time (char *Now)
11918 {
11919 char *pnt;
11920 time_t timeb;
11921
11922 time (&timeb);
11923 pnt = ctime (&timeb);
11924 pnt[3] = 0;
11925 pnt[7] = 0;
11926 pnt[10] = 0;
11927 pnt[16] = 0;
11928 pnt[24] = 0;
11929 sprintf (Now, "%2s-%3s-%s %s", pnt + 8, pnt + 4, pnt + 20, pnt + 11);
11930 }
11931
11932 void
11933 ia64_vms_note (void)
11934 {
11935 char *p;
11936 asection *seg = now_seg;
11937 subsegT subseg = now_subseg;
11938 asection *secp = NULL;
11939 char *bname;
11940 char buf [256];
11941 symbolS *sym;
11942
11943 /* Create the .note section. */
11944
11945 secp = subseg_new (".note", 0);
11946 bfd_set_section_flags (stdoutput,
11947 secp,
11948 SEC_HAS_CONTENTS | SEC_READONLY);
11949
11950 /* Module header note (MHD). */
11951 bname = xstrdup (lbasename (out_file_name));
11952 if ((p = strrchr (bname, '.')))
11953 *p = '\0';
11954
11955 /* VMS note header is 24 bytes long. */
11956 p = frag_more (8 + 8 + 8);
11957 number_to_chars_littleendian (p + 0, 8, 8);
11958 number_to_chars_littleendian (p + 8, 40 + strlen (bname), 8);
11959 number_to_chars_littleendian (p + 16, NT_VMS_MHD, 8);
11960
11961 p = frag_more (8);
11962 strcpy (p, "IPF/VMS");
11963
11964 p = frag_more (17 + 17 + strlen (bname) + 1 + 5);
11965 get_vms_time (p);
11966 strcpy (p + 17, "24-FEB-2005 15:00");
11967 p += 17 + 17;
11968 strcpy (p, bname);
11969 p += strlen (bname) + 1;
11970 free (bname);
11971 strcpy (p, "V1.0");
11972
11973 frag_align (3, 0, 0);
11974
11975 /* Language processor name note. */
11976 sprintf (buf, "GNU assembler version %s (%s) using BFD version %s",
11977 VERSION, TARGET_ALIAS, BFD_VERSION_STRING);
11978
11979 p = frag_more (8 + 8 + 8);
11980 number_to_chars_littleendian (p + 0, 8, 8);
11981 number_to_chars_littleendian (p + 8, strlen (buf) + 1, 8);
11982 number_to_chars_littleendian (p + 16, NT_VMS_LNM, 8);
11983
11984 p = frag_more (8);
11985 strcpy (p, "IPF/VMS");
11986
11987 p = frag_more (strlen (buf) + 1);
11988 strcpy (p, buf);
11989
11990 frag_align (3, 0, 0);
11991
11992 secp = subseg_new (".vms_display_name_info", 0);
11993 bfd_set_section_flags (stdoutput,
11994 secp,
11995 SEC_HAS_CONTENTS | SEC_READONLY);
11996
11997 /* This symbol should be passed on the command line and be variable
11998 according to language. */
11999 sym = symbol_new ("__gnat_vms_display_name@gnat_demangler_rtl",
12000 absolute_section, 0, &zero_address_frag);
12001 symbol_table_insert (sym);
12002 symbol_get_bfdsym (sym)->flags |= BSF_DEBUGGING | BSF_DYNAMIC;
12003
12004 p = frag_more (4);
12005 /* Format 3 of VMS demangler Spec. */
12006 number_to_chars_littleendian (p, 3, 4);
12007
12008 p = frag_more (4);
12009 /* Place holder for symbol table index of above symbol. */
12010 number_to_chars_littleendian (p, -1, 4);
12011
12012 frag_align (3, 0, 0);
12013
12014 /* We probably can't restore the current segment, for there likely
12015 isn't one yet... */
12016 if (seg && subseg)
12017 subseg_set (seg, subseg);
12018 }
12019
12020 #endif /* TE_VMS */
This page took 0.279913 seconds and 4 git commands to generate.