Banish PARAMS and PTR. Convert to ISO C.
[deliverable/binutils-gdb.git] / gas / config / tc-ia64.c
1 /* tc-ia64.c -- Assembler for the HP/Intel IA-64 architecture.
2 Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
3 Free Software Foundation, Inc.
4 Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
5
6 This file is part of GAS, the GNU Assembler.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GAS; see the file COPYING. If not, write to
20 the Free Software Foundation, 51 Franklin Street - Fifth Floor,
21 Boston, MA 02110-1301, USA. */
22
23 /*
24 TODO:
25
26 - optional operands
27 - directives:
28 .eb
29 .estate
30 .lb
31 .popsection
32 .previous
33 .psr
34 .pushsection
35 - labels are wrong if automatic alignment is introduced
36 (e.g., checkout the second real10 definition in test-data.s)
37 - DV-related stuff:
38 <reg>.safe_across_calls and any other DV-related directives I don't
39 have documentation for.
40 verify mod-sched-brs reads/writes are checked/marked (and other
41 notes)
42
43 */
44
45 #include "as.h"
46 #include "safe-ctype.h"
47 #include "dwarf2dbg.h"
48 #include "subsegs.h"
49
50 #include "opcode/ia64.h"
51
52 #include "elf/ia64.h"
53
54 #ifdef HAVE_LIMITS_H
55 #include <limits.h>
56 #endif
57
58 #define NELEMS(a) ((int) (sizeof (a)/sizeof ((a)[0])))
59
60 /* Some systems define MIN in, e.g., param.h. */
61 #undef MIN
62 #define MIN(a,b) ((a) < (b) ? (a) : (b))
63
64 #define NUM_SLOTS 4
65 #define PREV_SLOT md.slot[(md.curr_slot + NUM_SLOTS - 1) % NUM_SLOTS]
66 #define CURR_SLOT md.slot[md.curr_slot]
67
68 #define O_pseudo_fixup (O_max + 1)
69
70 enum special_section
71 {
72 /* IA-64 ABI section pseudo-ops. */
73 SPECIAL_SECTION_BSS = 0,
74 SPECIAL_SECTION_SBSS,
75 SPECIAL_SECTION_SDATA,
76 SPECIAL_SECTION_RODATA,
77 SPECIAL_SECTION_COMMENT,
78 SPECIAL_SECTION_UNWIND,
79 SPECIAL_SECTION_UNWIND_INFO,
80 /* HPUX specific section pseudo-ops. */
81 SPECIAL_SECTION_INIT_ARRAY,
82 SPECIAL_SECTION_FINI_ARRAY,
83 };
84
85 enum reloc_func
86 {
87 FUNC_DTP_MODULE,
88 FUNC_DTP_RELATIVE,
89 FUNC_FPTR_RELATIVE,
90 FUNC_GP_RELATIVE,
91 FUNC_LT_RELATIVE,
92 FUNC_LT_RELATIVE_X,
93 FUNC_PC_RELATIVE,
94 FUNC_PLT_RELATIVE,
95 FUNC_SEC_RELATIVE,
96 FUNC_SEG_RELATIVE,
97 FUNC_TP_RELATIVE,
98 FUNC_LTV_RELATIVE,
99 FUNC_LT_FPTR_RELATIVE,
100 FUNC_LT_DTP_MODULE,
101 FUNC_LT_DTP_RELATIVE,
102 FUNC_LT_TP_RELATIVE,
103 FUNC_IPLT_RELOC,
104 };
105
106 enum reg_symbol
107 {
108 REG_GR = 0,
109 REG_FR = (REG_GR + 128),
110 REG_AR = (REG_FR + 128),
111 REG_CR = (REG_AR + 128),
112 REG_P = (REG_CR + 128),
113 REG_BR = (REG_P + 64),
114 REG_IP = (REG_BR + 8),
115 REG_CFM,
116 REG_PR,
117 REG_PR_ROT,
118 REG_PSR,
119 REG_PSR_L,
120 REG_PSR_UM,
121 /* The following are pseudo-registers for use by gas only. */
122 IND_CPUID,
123 IND_DBR,
124 IND_DTR,
125 IND_ITR,
126 IND_IBR,
127 IND_MSR,
128 IND_PKR,
129 IND_PMC,
130 IND_PMD,
131 IND_RR,
132 /* The following pseudo-registers are used for unwind directives only: */
133 REG_PSP,
134 REG_PRIUNAT,
135 REG_NUM
136 };
137
138 enum dynreg_type
139 {
140 DYNREG_GR = 0, /* dynamic general purpose register */
141 DYNREG_FR, /* dynamic floating point register */
142 DYNREG_PR, /* dynamic predicate register */
143 DYNREG_NUM_TYPES
144 };
145
146 enum operand_match_result
147 {
148 OPERAND_MATCH,
149 OPERAND_OUT_OF_RANGE,
150 OPERAND_MISMATCH
151 };
152
153 /* On the ia64, we can't know the address of a text label until the
154 instructions are packed into a bundle. To handle this, we keep
155 track of the list of labels that appear in front of each
156 instruction. */
157 struct label_fix
158 {
159 struct label_fix *next;
160 struct symbol *sym;
161 bfd_boolean dw2_mark_labels;
162 };
163
164 /* This is the endianness of the current section. */
165 extern int target_big_endian;
166
167 /* This is the default endianness. */
168 static int default_big_endian = TARGET_BYTES_BIG_ENDIAN;
169
170 void (*ia64_number_to_chars) (char *, valueT, int);
171
172 static void ia64_float_to_chars_bigendian (char *, LITTLENUM_TYPE *, int);
173 static void ia64_float_to_chars_littleendian (char *, LITTLENUM_TYPE *, int);
174
175 static void (*ia64_float_to_chars) (char *, LITTLENUM_TYPE *, int);
176
177 static struct hash_control *alias_hash;
178 static struct hash_control *alias_name_hash;
179 static struct hash_control *secalias_hash;
180 static struct hash_control *secalias_name_hash;
181
182 /* List of chars besides those in app.c:symbol_chars that can start an
183 operand. Used to prevent the scrubber eating vital white-space. */
184 const char ia64_symbol_chars[] = "@?";
185
186 /* Characters which always start a comment. */
187 const char comment_chars[] = "";
188
189 /* Characters which start a comment at the beginning of a line. */
190 const char line_comment_chars[] = "#";
191
192 /* Characters which may be used to separate multiple commands on a
193 single line. */
194 const char line_separator_chars[] = ";{}";
195
196 /* Characters which are used to indicate an exponent in a floating
197 point number. */
198 const char EXP_CHARS[] = "eE";
199
200 /* Characters which mean that a number is a floating point constant,
201 as in 0d1.0. */
202 const char FLT_CHARS[] = "rRsSfFdDxXpP";
203
204 /* ia64-specific option processing: */
205
206 const char *md_shortopts = "m:N:x::";
207
208 struct option md_longopts[] =
209 {
210 #define OPTION_MCONSTANT_GP (OPTION_MD_BASE + 1)
211 {"mconstant-gp", no_argument, NULL, OPTION_MCONSTANT_GP},
212 #define OPTION_MAUTO_PIC (OPTION_MD_BASE + 2)
213 {"mauto-pic", no_argument, NULL, OPTION_MAUTO_PIC}
214 };
215
216 size_t md_longopts_size = sizeof (md_longopts);
217
218 static struct
219 {
220 struct hash_control *pseudo_hash; /* pseudo opcode hash table */
221 struct hash_control *reg_hash; /* register name hash table */
222 struct hash_control *dynreg_hash; /* dynamic register hash table */
223 struct hash_control *const_hash; /* constant hash table */
224 struct hash_control *entry_hash; /* code entry hint hash table */
225
226 /* If X_op is != O_absent, the registername for the instruction's
227 qualifying predicate. If NULL, p0 is assumed for instructions
228 that are predictable. */
229 expressionS qp;
230
231 /* Optimize for which CPU. */
232 enum
233 {
234 itanium1,
235 itanium2
236 } tune;
237
238 /* What to do when hint.b is used. */
239 enum
240 {
241 hint_b_error,
242 hint_b_warning,
243 hint_b_ok
244 } hint_b;
245
246 unsigned int
247 manual_bundling : 1,
248 debug_dv: 1,
249 detect_dv: 1,
250 explicit_mode : 1, /* which mode we're in */
251 default_explicit_mode : 1, /* which mode is the default */
252 mode_explicitly_set : 1, /* was the current mode explicitly set? */
253 auto_align : 1,
254 keep_pending_output : 1;
255
256 /* What to do when something is wrong with unwind directives. */
257 enum
258 {
259 unwind_check_warning,
260 unwind_check_error
261 } unwind_check;
262
263 /* Each bundle consists of up to three instructions. We keep
264 track of four most recent instructions so we can correctly set
265 the end_of_insn_group for the last instruction in a bundle. */
266 int curr_slot;
267 int num_slots_in_use;
268 struct slot
269 {
270 unsigned int
271 end_of_insn_group : 1,
272 manual_bundling_on : 1,
273 manual_bundling_off : 1,
274 loc_directive_seen : 1;
275 signed char user_template; /* user-selected template, if any */
276 unsigned char qp_regno; /* qualifying predicate */
277 /* This duplicates a good fraction of "struct fix" but we
278 can't use a "struct fix" instead since we can't call
279 fix_new_exp() until we know the address of the instruction. */
280 int num_fixups;
281 struct insn_fix
282 {
283 bfd_reloc_code_real_type code;
284 enum ia64_opnd opnd; /* type of operand in need of fix */
285 unsigned int is_pcrel : 1; /* is operand pc-relative? */
286 expressionS expr; /* the value to be inserted */
287 }
288 fixup[2]; /* at most two fixups per insn */
289 struct ia64_opcode *idesc;
290 struct label_fix *label_fixups;
291 struct label_fix *tag_fixups;
292 struct unw_rec_list *unwind_record; /* Unwind directive. */
293 expressionS opnd[6];
294 char *src_file;
295 unsigned int src_line;
296 struct dwarf2_line_info debug_line;
297 }
298 slot[NUM_SLOTS];
299
300 segT last_text_seg;
301
302 struct dynreg
303 {
304 struct dynreg *next; /* next dynamic register */
305 const char *name;
306 unsigned short base; /* the base register number */
307 unsigned short num_regs; /* # of registers in this set */
308 }
309 *dynreg[DYNREG_NUM_TYPES], in, loc, out, rot;
310
311 flagword flags; /* ELF-header flags */
312
313 struct mem_offset {
314 unsigned hint:1; /* is this hint currently valid? */
315 bfd_vma offset; /* mem.offset offset */
316 bfd_vma base; /* mem.offset base */
317 } mem_offset;
318
319 int path; /* number of alt. entry points seen */
320 const char **entry_labels; /* labels of all alternate paths in
321 the current DV-checking block. */
322 int maxpaths; /* size currently allocated for
323 entry_labels */
324
325 int pointer_size; /* size in bytes of a pointer */
326 int pointer_size_shift; /* shift size of a pointer for alignment */
327
328 symbolS *indregsym[IND_RR - IND_CPUID + 1];
329 }
330 md;
331
332 /* These are not const, because they are modified to MMI for non-itanium1
333 targets below. */
334 /* MFI bundle of nops. */
335 static unsigned char le_nop[16] =
336 {
337 0x0c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
338 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00
339 };
340 /* MFI bundle of nops with stop-bit. */
341 static unsigned char le_nop_stop[16] =
342 {
343 0x0d, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
344 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00
345 };
346
347 /* application registers: */
348
349 #define AR_K0 0
350 #define AR_K7 7
351 #define AR_RSC 16
352 #define AR_BSP 17
353 #define AR_BSPSTORE 18
354 #define AR_RNAT 19
355 #define AR_FCR 21
356 #define AR_EFLAG 24
357 #define AR_CSD 25
358 #define AR_SSD 26
359 #define AR_CFLG 27
360 #define AR_FSR 28
361 #define AR_FIR 29
362 #define AR_FDR 30
363 #define AR_CCV 32
364 #define AR_UNAT 36
365 #define AR_FPSR 40
366 #define AR_ITC 44
367 #define AR_RUC 45
368 #define AR_PFS 64
369 #define AR_LC 65
370 #define AR_EC 66
371
372 static const struct
373 {
374 const char *name;
375 unsigned int regnum;
376 }
377 ar[] =
378 {
379 {"ar.k0", AR_K0}, {"ar.k1", AR_K0 + 1},
380 {"ar.k2", AR_K0 + 2}, {"ar.k3", AR_K0 + 3},
381 {"ar.k4", AR_K0 + 4}, {"ar.k5", AR_K0 + 5},
382 {"ar.k6", AR_K0 + 6}, {"ar.k7", AR_K7},
383 {"ar.rsc", AR_RSC}, {"ar.bsp", AR_BSP},
384 {"ar.bspstore", AR_BSPSTORE}, {"ar.rnat", AR_RNAT},
385 {"ar.fcr", AR_FCR}, {"ar.eflag", AR_EFLAG},
386 {"ar.csd", AR_CSD}, {"ar.ssd", AR_SSD},
387 {"ar.cflg", AR_CFLG}, {"ar.fsr", AR_FSR},
388 {"ar.fir", AR_FIR}, {"ar.fdr", AR_FDR},
389 {"ar.ccv", AR_CCV}, {"ar.unat", AR_UNAT},
390 {"ar.fpsr", AR_FPSR}, {"ar.itc", AR_ITC},
391 {"ar.ruc", AR_RUC}, {"ar.pfs", AR_PFS},
392 {"ar.lc", AR_LC}, {"ar.ec", AR_EC},
393 };
394
395 /* control registers: */
396
397 #define CR_DCR 0
398 #define CR_ITM 1
399 #define CR_IVA 2
400 #define CR_PTA 8
401 #define CR_GPTA 9
402 #define CR_IPSR 16
403 #define CR_ISR 17
404 #define CR_IIP 19
405 #define CR_IFA 20
406 #define CR_ITIR 21
407 #define CR_IIPA 22
408 #define CR_IFS 23
409 #define CR_IIM 24
410 #define CR_IHA 25
411 #define CR_LID 64
412 #define CR_IVR 65
413 #define CR_TPR 66
414 #define CR_EOI 67
415 #define CR_IRR0 68
416 #define CR_IRR3 71
417 #define CR_ITV 72
418 #define CR_PMV 73
419 #define CR_CMCV 74
420 #define CR_LRR0 80
421 #define CR_LRR1 81
422
423 static const struct
424 {
425 const char *name;
426 unsigned int regnum;
427 }
428 cr[] =
429 {
430 {"cr.dcr", CR_DCR},
431 {"cr.itm", CR_ITM},
432 {"cr.iva", CR_IVA},
433 {"cr.pta", CR_PTA},
434 {"cr.gpta", CR_GPTA},
435 {"cr.ipsr", CR_IPSR},
436 {"cr.isr", CR_ISR},
437 {"cr.iip", CR_IIP},
438 {"cr.ifa", CR_IFA},
439 {"cr.itir", CR_ITIR},
440 {"cr.iipa", CR_IIPA},
441 {"cr.ifs", CR_IFS},
442 {"cr.iim", CR_IIM},
443 {"cr.iha", CR_IHA},
444 {"cr.lid", CR_LID},
445 {"cr.ivr", CR_IVR},
446 {"cr.tpr", CR_TPR},
447 {"cr.eoi", CR_EOI},
448 {"cr.irr0", CR_IRR0},
449 {"cr.irr1", CR_IRR0 + 1},
450 {"cr.irr2", CR_IRR0 + 2},
451 {"cr.irr3", CR_IRR3},
452 {"cr.itv", CR_ITV},
453 {"cr.pmv", CR_PMV},
454 {"cr.cmcv", CR_CMCV},
455 {"cr.lrr0", CR_LRR0},
456 {"cr.lrr1", CR_LRR1}
457 };
458
459 #define PSR_MFL 4
460 #define PSR_IC 13
461 #define PSR_DFL 18
462 #define PSR_CPL 32
463
464 static const struct const_desc
465 {
466 const char *name;
467 valueT value;
468 }
469 const_bits[] =
470 {
471 /* PSR constant masks: */
472
473 /* 0: reserved */
474 {"psr.be", ((valueT) 1) << 1},
475 {"psr.up", ((valueT) 1) << 2},
476 {"psr.ac", ((valueT) 1) << 3},
477 {"psr.mfl", ((valueT) 1) << 4},
478 {"psr.mfh", ((valueT) 1) << 5},
479 /* 6-12: reserved */
480 {"psr.ic", ((valueT) 1) << 13},
481 {"psr.i", ((valueT) 1) << 14},
482 {"psr.pk", ((valueT) 1) << 15},
483 /* 16: reserved */
484 {"psr.dt", ((valueT) 1) << 17},
485 {"psr.dfl", ((valueT) 1) << 18},
486 {"psr.dfh", ((valueT) 1) << 19},
487 {"psr.sp", ((valueT) 1) << 20},
488 {"psr.pp", ((valueT) 1) << 21},
489 {"psr.di", ((valueT) 1) << 22},
490 {"psr.si", ((valueT) 1) << 23},
491 {"psr.db", ((valueT) 1) << 24},
492 {"psr.lp", ((valueT) 1) << 25},
493 {"psr.tb", ((valueT) 1) << 26},
494 {"psr.rt", ((valueT) 1) << 27},
495 /* 28-31: reserved */
496 /* 32-33: cpl (current privilege level) */
497 {"psr.is", ((valueT) 1) << 34},
498 {"psr.mc", ((valueT) 1) << 35},
499 {"psr.it", ((valueT) 1) << 36},
500 {"psr.id", ((valueT) 1) << 37},
501 {"psr.da", ((valueT) 1) << 38},
502 {"psr.dd", ((valueT) 1) << 39},
503 {"psr.ss", ((valueT) 1) << 40},
504 /* 41-42: ri (restart instruction) */
505 {"psr.ed", ((valueT) 1) << 43},
506 {"psr.bn", ((valueT) 1) << 44},
507 };
508
509 /* indirect register-sets/memory: */
510
511 static const struct
512 {
513 const char *name;
514 unsigned int regnum;
515 }
516 indirect_reg[] =
517 {
518 { "CPUID", IND_CPUID },
519 { "cpuid", IND_CPUID },
520 { "dbr", IND_DBR },
521 { "dtr", IND_DTR },
522 { "itr", IND_ITR },
523 { "ibr", IND_IBR },
524 { "msr", IND_MSR },
525 { "pkr", IND_PKR },
526 { "pmc", IND_PMC },
527 { "pmd", IND_PMD },
528 { "rr", IND_RR },
529 };
530
531 /* Pseudo functions used to indicate relocation types (these functions
532 start with an at sign (@). */
533 static struct
534 {
535 const char *name;
536 enum pseudo_type
537 {
538 PSEUDO_FUNC_NONE,
539 PSEUDO_FUNC_RELOC,
540 PSEUDO_FUNC_CONST,
541 PSEUDO_FUNC_REG,
542 PSEUDO_FUNC_FLOAT
543 }
544 type;
545 union
546 {
547 unsigned long ival;
548 symbolS *sym;
549 }
550 u;
551 }
552 pseudo_func[] =
553 {
554 /* reloc pseudo functions (these must come first!): */
555 { "dtpmod", PSEUDO_FUNC_RELOC, { 0 } },
556 { "dtprel", PSEUDO_FUNC_RELOC, { 0 } },
557 { "fptr", PSEUDO_FUNC_RELOC, { 0 } },
558 { "gprel", PSEUDO_FUNC_RELOC, { 0 } },
559 { "ltoff", PSEUDO_FUNC_RELOC, { 0 } },
560 { "ltoffx", PSEUDO_FUNC_RELOC, { 0 } },
561 { "pcrel", PSEUDO_FUNC_RELOC, { 0 } },
562 { "pltoff", PSEUDO_FUNC_RELOC, { 0 } },
563 { "secrel", PSEUDO_FUNC_RELOC, { 0 } },
564 { "segrel", PSEUDO_FUNC_RELOC, { 0 } },
565 { "tprel", PSEUDO_FUNC_RELOC, { 0 } },
566 { "ltv", PSEUDO_FUNC_RELOC, { 0 } },
567 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_FPTR_RELATIVE */
568 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_DTP_MODULE */
569 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_DTP_RELATIVE */
570 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_TP_RELATIVE */
571 { "iplt", PSEUDO_FUNC_RELOC, { 0 } },
572
573 /* mbtype4 constants: */
574 { "alt", PSEUDO_FUNC_CONST, { 0xa } },
575 { "brcst", PSEUDO_FUNC_CONST, { 0x0 } },
576 { "mix", PSEUDO_FUNC_CONST, { 0x8 } },
577 { "rev", PSEUDO_FUNC_CONST, { 0xb } },
578 { "shuf", PSEUDO_FUNC_CONST, { 0x9 } },
579
580 /* fclass constants: */
581 { "nat", PSEUDO_FUNC_CONST, { 0x100 } },
582 { "qnan", PSEUDO_FUNC_CONST, { 0x080 } },
583 { "snan", PSEUDO_FUNC_CONST, { 0x040 } },
584 { "pos", PSEUDO_FUNC_CONST, { 0x001 } },
585 { "neg", PSEUDO_FUNC_CONST, { 0x002 } },
586 { "zero", PSEUDO_FUNC_CONST, { 0x004 } },
587 { "unorm", PSEUDO_FUNC_CONST, { 0x008 } },
588 { "norm", PSEUDO_FUNC_CONST, { 0x010 } },
589 { "inf", PSEUDO_FUNC_CONST, { 0x020 } },
590
591 { "natval", PSEUDO_FUNC_CONST, { 0x100 } }, /* old usage */
592
593 /* hint constants: */
594 { "pause", PSEUDO_FUNC_CONST, { 0x0 } },
595
596 /* unwind-related constants: */
597 { "svr4", PSEUDO_FUNC_CONST, { ELFOSABI_NONE } },
598 { "hpux", PSEUDO_FUNC_CONST, { ELFOSABI_HPUX } },
599 { "nt", PSEUDO_FUNC_CONST, { 2 } }, /* conflicts w/ELFOSABI_NETBSD */
600 { "linux", PSEUDO_FUNC_CONST, { ELFOSABI_LINUX } },
601 { "freebsd", PSEUDO_FUNC_CONST, { ELFOSABI_FREEBSD } },
602 { "openvms", PSEUDO_FUNC_CONST, { ELFOSABI_OPENVMS } },
603 { "nsk", PSEUDO_FUNC_CONST, { ELFOSABI_NSK } },
604
605 /* unwind-related registers: */
606 { "priunat",PSEUDO_FUNC_REG, { REG_PRIUNAT } }
607 };
608
609 /* 41-bit nop opcodes (one per unit): */
610 static const bfd_vma nop[IA64_NUM_UNITS] =
611 {
612 0x0000000000LL, /* NIL => break 0 */
613 0x0008000000LL, /* I-unit nop */
614 0x0008000000LL, /* M-unit nop */
615 0x4000000000LL, /* B-unit nop */
616 0x0008000000LL, /* F-unit nop */
617 0x0000000000LL, /* L-"unit" nop immediate */
618 0x0008000000LL, /* X-unit nop */
619 };
620
621 /* Can't be `const' as it's passed to input routines (which have the
622 habit of setting temporary sentinels. */
623 static char special_section_name[][20] =
624 {
625 {".bss"}, {".sbss"}, {".sdata"}, {".rodata"}, {".comment"},
626 {".IA_64.unwind"}, {".IA_64.unwind_info"},
627 {".init_array"}, {".fini_array"}
628 };
629
630 /* The best template for a particular sequence of up to three
631 instructions: */
632 #define N IA64_NUM_TYPES
633 static unsigned char best_template[N][N][N];
634 #undef N
635
636 /* Resource dependencies currently in effect */
637 static struct rsrc {
638 int depind; /* dependency index */
639 const struct ia64_dependency *dependency; /* actual dependency */
640 unsigned specific:1, /* is this a specific bit/regno? */
641 link_to_qp_branch:1; /* will a branch on the same QP clear it?*/
642 int index; /* specific regno/bit within dependency */
643 int note; /* optional qualifying note (0 if none) */
644 #define STATE_NONE 0
645 #define STATE_STOP 1
646 #define STATE_SRLZ 2
647 int insn_srlz; /* current insn serialization state */
648 int data_srlz; /* current data serialization state */
649 int qp_regno; /* qualifying predicate for this usage */
650 char *file; /* what file marked this dependency */
651 unsigned int line; /* what line marked this dependency */
652 struct mem_offset mem_offset; /* optional memory offset hint */
653 enum { CMP_NONE, CMP_OR, CMP_AND } cmp_type; /* OR or AND compare? */
654 int path; /* corresponding code entry index */
655 } *regdeps = NULL;
656 static int regdepslen = 0;
657 static int regdepstotlen = 0;
658 static const char *dv_mode[] = { "RAW", "WAW", "WAR" };
659 static const char *dv_sem[] = { "none", "implied", "impliedf",
660 "data", "instr", "specific", "stop", "other" };
661 static const char *dv_cmp_type[] = { "none", "OR", "AND" };
662
663 /* Current state of PR mutexation */
664 static struct qpmutex {
665 valueT prmask;
666 int path;
667 } *qp_mutexes = NULL; /* QP mutex bitmasks */
668 static int qp_mutexeslen = 0;
669 static int qp_mutexestotlen = 0;
670 static valueT qp_safe_across_calls = 0;
671
672 /* Current state of PR implications */
673 static struct qp_imply {
674 unsigned p1:6;
675 unsigned p2:6;
676 unsigned p2_branched:1;
677 int path;
678 } *qp_implies = NULL;
679 static int qp_implieslen = 0;
680 static int qp_impliestotlen = 0;
681
682 /* Keep track of static GR values so that indirect register usage can
683 sometimes be tracked. */
684 static struct gr {
685 unsigned known:1;
686 int path;
687 valueT value;
688 } gr_values[128] = {
689 {
690 1,
691 #ifdef INT_MAX
692 INT_MAX,
693 #else
694 (((1 << (8 * sizeof(gr_values->path) - 2)) - 1) << 1) + 1,
695 #endif
696 0
697 }
698 };
699
700 /* Remember the alignment frag. */
701 static fragS *align_frag;
702
703 /* These are the routines required to output the various types of
704 unwind records. */
705
706 /* A slot_number is a frag address plus the slot index (0-2). We use the
707 frag address here so that if there is a section switch in the middle of
708 a function, then instructions emitted to a different section are not
709 counted. Since there may be more than one frag for a function, this
710 means we also need to keep track of which frag this address belongs to
711 so we can compute inter-frag distances. This also nicely solves the
712 problem with nops emitted for align directives, which can't easily be
713 counted, but can easily be derived from frag sizes. */
714
715 typedef struct unw_rec_list {
716 unwind_record r;
717 unsigned long slot_number;
718 fragS *slot_frag;
719 struct unw_rec_list *next;
720 } unw_rec_list;
721
722 #define SLOT_NUM_NOT_SET (unsigned)-1
723
724 /* Linked list of saved prologue counts. A very poor
725 implementation of a map from label numbers to prologue counts. */
726 typedef struct label_prologue_count
727 {
728 struct label_prologue_count *next;
729 unsigned long label_number;
730 unsigned int prologue_count;
731 } label_prologue_count;
732
733 typedef struct proc_pending
734 {
735 symbolS *sym;
736 struct proc_pending *next;
737 } proc_pending;
738
739 static struct
740 {
741 /* Maintain a list of unwind entries for the current function. */
742 unw_rec_list *list;
743 unw_rec_list *tail;
744
745 /* Any unwind entries that should be attached to the current slot
746 that an insn is being constructed for. */
747 unw_rec_list *current_entry;
748
749 /* These are used to create the unwind table entry for this function. */
750 proc_pending proc_pending;
751 symbolS *info; /* pointer to unwind info */
752 symbolS *personality_routine;
753 segT saved_text_seg;
754 subsegT saved_text_subseg;
755 unsigned int force_unwind_entry : 1; /* force generation of unwind entry? */
756
757 /* TRUE if processing unwind directives in a prologue region. */
758 unsigned int prologue : 1;
759 unsigned int prologue_mask : 4;
760 unsigned int prologue_gr : 7;
761 unsigned int body : 1;
762 unsigned int insn : 1;
763 unsigned int prologue_count; /* number of .prologues seen so far */
764 /* Prologue counts at previous .label_state directives. */
765 struct label_prologue_count * saved_prologue_counts;
766
767 /* List of split up .save-s. */
768 unw_p_record *pending_saves;
769 } unwind;
770
771 /* The input value is a negated offset from psp, and specifies an address
772 psp - offset. The encoded value is psp + 16 - (4 * offset). Thus we
773 must add 16 and divide by 4 to get the encoded value. */
774
775 #define ENCODED_PSP_OFFSET(OFFSET) (((OFFSET) + 16) / 4)
776
777 typedef void (*vbyte_func) (int, char *, char *);
778
779 /* Forward declarations: */
780 static void dot_alias (int);
781 static int parse_operand (expressionS *, int);
782 static void emit_one_bundle (void);
783 static bfd_reloc_code_real_type ia64_gen_real_reloc_type (struct symbol *,
784 bfd_reloc_code_real_type);
785 static void insn_group_break (int, int, int);
786 static void add_qp_mutex (valueT);
787 static void add_qp_imply (int, int);
788 static void clear_qp_mutex (valueT);
789 static void clear_qp_implies (valueT, valueT);
790 static void print_dependency (const char *, int);
791 static void instruction_serialization (void);
792 static void data_serialization (void);
793 static void output_R3_format (vbyte_func, unw_record_type, unsigned long);
794 static void output_B3_format (vbyte_func, unsigned long, unsigned long);
795 static void output_B4_format (vbyte_func, unw_record_type, unsigned long);
796 static void free_saved_prologue_counts (void);
797
798 /* Determine if application register REGNUM resides only in the integer
799 unit (as opposed to the memory unit). */
800 static int
801 ar_is_only_in_integer_unit (int reg)
802 {
803 reg -= REG_AR;
804 return reg >= 64 && reg <= 111;
805 }
806
807 /* Determine if application register REGNUM resides only in the memory
808 unit (as opposed to the integer unit). */
809 static int
810 ar_is_only_in_memory_unit (int reg)
811 {
812 reg -= REG_AR;
813 return reg >= 0 && reg <= 47;
814 }
815
816 /* Switch to section NAME and create section if necessary. It's
817 rather ugly that we have to manipulate input_line_pointer but I
818 don't see any other way to accomplish the same thing without
819 changing obj-elf.c (which may be the Right Thing, in the end). */
820 static void
821 set_section (char *name)
822 {
823 char *saved_input_line_pointer;
824
825 saved_input_line_pointer = input_line_pointer;
826 input_line_pointer = name;
827 obj_elf_section (0);
828 input_line_pointer = saved_input_line_pointer;
829 }
830
831 /* Map 's' to SHF_IA_64_SHORT. */
832
833 int
834 ia64_elf_section_letter (int letter, char **ptr_msg)
835 {
836 if (letter == 's')
837 return SHF_IA_64_SHORT;
838 else if (letter == 'o')
839 return SHF_LINK_ORDER;
840
841 *ptr_msg = _("Bad .section directive: want a,o,s,w,x,M,S,G,T in string");
842 return -1;
843 }
844
845 /* Map SHF_IA_64_SHORT to SEC_SMALL_DATA. */
846
847 flagword
848 ia64_elf_section_flags (flagword flags,
849 int attr,
850 int type ATTRIBUTE_UNUSED)
851 {
852 if (attr & SHF_IA_64_SHORT)
853 flags |= SEC_SMALL_DATA;
854 return flags;
855 }
856
857 int
858 ia64_elf_section_type (const char *str, size_t len)
859 {
860 #define STREQ(s) ((len == sizeof (s) - 1) && (strncmp (str, s, sizeof (s) - 1) == 0))
861
862 if (STREQ (ELF_STRING_ia64_unwind_info))
863 return SHT_PROGBITS;
864
865 if (STREQ (ELF_STRING_ia64_unwind_info_once))
866 return SHT_PROGBITS;
867
868 if (STREQ (ELF_STRING_ia64_unwind))
869 return SHT_IA_64_UNWIND;
870
871 if (STREQ (ELF_STRING_ia64_unwind_once))
872 return SHT_IA_64_UNWIND;
873
874 if (STREQ ("unwind"))
875 return SHT_IA_64_UNWIND;
876
877 return -1;
878 #undef STREQ
879 }
880
881 static unsigned int
882 set_regstack (unsigned int ins,
883 unsigned int locs,
884 unsigned int outs,
885 unsigned int rots)
886 {
887 /* Size of frame. */
888 unsigned int sof;
889
890 sof = ins + locs + outs;
891 if (sof > 96)
892 {
893 as_bad (_("Size of frame exceeds maximum of 96 registers"));
894 return 0;
895 }
896 if (rots > sof)
897 {
898 as_warn (_("Size of rotating registers exceeds frame size"));
899 return 0;
900 }
901 md.in.base = REG_GR + 32;
902 md.loc.base = md.in.base + ins;
903 md.out.base = md.loc.base + locs;
904
905 md.in.num_regs = ins;
906 md.loc.num_regs = locs;
907 md.out.num_regs = outs;
908 md.rot.num_regs = rots;
909 return sof;
910 }
911
912 void
913 ia64_flush_insns (void)
914 {
915 struct label_fix *lfix;
916 segT saved_seg;
917 subsegT saved_subseg;
918 unw_rec_list *ptr;
919 bfd_boolean mark;
920
921 if (!md.last_text_seg)
922 return;
923
924 saved_seg = now_seg;
925 saved_subseg = now_subseg;
926
927 subseg_set (md.last_text_seg, 0);
928
929 while (md.num_slots_in_use > 0)
930 emit_one_bundle (); /* force out queued instructions */
931
932 /* In case there are labels following the last instruction, resolve
933 those now. */
934 mark = FALSE;
935 for (lfix = CURR_SLOT.label_fixups; lfix; lfix = lfix->next)
936 {
937 symbol_set_value_now (lfix->sym);
938 mark |= lfix->dw2_mark_labels;
939 }
940 if (mark)
941 {
942 dwarf2_where (&CURR_SLOT.debug_line);
943 CURR_SLOT.debug_line.flags |= DWARF2_FLAG_BASIC_BLOCK;
944 dwarf2_gen_line_info (frag_now_fix (), &CURR_SLOT.debug_line);
945 dwarf2_consume_line_info ();
946 }
947 CURR_SLOT.label_fixups = 0;
948
949 for (lfix = CURR_SLOT.tag_fixups; lfix; lfix = lfix->next)
950 symbol_set_value_now (lfix->sym);
951 CURR_SLOT.tag_fixups = 0;
952
953 /* In case there are unwind directives following the last instruction,
954 resolve those now. We only handle prologue, body, and endp directives
955 here. Give an error for others. */
956 for (ptr = unwind.current_entry; ptr; ptr = ptr->next)
957 {
958 switch (ptr->r.type)
959 {
960 case prologue:
961 case prologue_gr:
962 case body:
963 case endp:
964 ptr->slot_number = (unsigned long) frag_more (0);
965 ptr->slot_frag = frag_now;
966 break;
967
968 /* Allow any record which doesn't have a "t" field (i.e.,
969 doesn't relate to a particular instruction). */
970 case unwabi:
971 case br_gr:
972 case copy_state:
973 case fr_mem:
974 case frgr_mem:
975 case gr_gr:
976 case gr_mem:
977 case label_state:
978 case rp_br:
979 case spill_base:
980 case spill_mask:
981 /* nothing */
982 break;
983
984 default:
985 as_bad (_("Unwind directive not followed by an instruction."));
986 break;
987 }
988 }
989 unwind.current_entry = NULL;
990
991 subseg_set (saved_seg, saved_subseg);
992
993 if (md.qp.X_op == O_register)
994 as_bad (_("qualifying predicate not followed by instruction"));
995 }
996
997 static void
998 ia64_do_align (int nbytes)
999 {
1000 char *saved_input_line_pointer = input_line_pointer;
1001
1002 input_line_pointer = "";
1003 s_align_bytes (nbytes);
1004 input_line_pointer = saved_input_line_pointer;
1005 }
1006
1007 void
1008 ia64_cons_align (int nbytes)
1009 {
1010 if (md.auto_align)
1011 {
1012 char *saved_input_line_pointer = input_line_pointer;
1013 input_line_pointer = "";
1014 s_align_bytes (nbytes);
1015 input_line_pointer = saved_input_line_pointer;
1016 }
1017 }
1018
1019 /* Output COUNT bytes to a memory location. */
1020 static char *vbyte_mem_ptr = NULL;
1021
1022 static void
1023 output_vbyte_mem (int count, char *ptr, char *comment ATTRIBUTE_UNUSED)
1024 {
1025 int x;
1026 if (vbyte_mem_ptr == NULL)
1027 abort ();
1028
1029 if (count == 0)
1030 return;
1031 for (x = 0; x < count; x++)
1032 *(vbyte_mem_ptr++) = ptr[x];
1033 }
1034
1035 /* Count the number of bytes required for records. */
1036 static int vbyte_count = 0;
1037 static void
1038 count_output (int count,
1039 char *ptr ATTRIBUTE_UNUSED,
1040 char *comment ATTRIBUTE_UNUSED)
1041 {
1042 vbyte_count += count;
1043 }
1044
1045 static void
1046 output_R1_format (vbyte_func f, unw_record_type rtype, int rlen)
1047 {
1048 int r = 0;
1049 char byte;
1050 if (rlen > 0x1f)
1051 {
1052 output_R3_format (f, rtype, rlen);
1053 return;
1054 }
1055
1056 if (rtype == body)
1057 r = 1;
1058 else if (rtype != prologue)
1059 as_bad (_("record type is not valid"));
1060
1061 byte = UNW_R1 | (r << 5) | (rlen & 0x1f);
1062 (*f) (1, &byte, NULL);
1063 }
1064
1065 static void
1066 output_R2_format (vbyte_func f, int mask, int grsave, unsigned long rlen)
1067 {
1068 char bytes[20];
1069 int count = 2;
1070 mask = (mask & 0x0f);
1071 grsave = (grsave & 0x7f);
1072
1073 bytes[0] = (UNW_R2 | (mask >> 1));
1074 bytes[1] = (((mask & 0x01) << 7) | grsave);
1075 count += output_leb128 (bytes + 2, rlen, 0);
1076 (*f) (count, bytes, NULL);
1077 }
1078
1079 static void
1080 output_R3_format (vbyte_func f, unw_record_type rtype, unsigned long rlen)
1081 {
1082 int r = 0, count;
1083 char bytes[20];
1084 if (rlen <= 0x1f)
1085 {
1086 output_R1_format (f, rtype, rlen);
1087 return;
1088 }
1089
1090 if (rtype == body)
1091 r = 1;
1092 else if (rtype != prologue)
1093 as_bad (_("record type is not valid"));
1094 bytes[0] = (UNW_R3 | r);
1095 count = output_leb128 (bytes + 1, rlen, 0);
1096 (*f) (count + 1, bytes, NULL);
1097 }
1098
1099 static void
1100 output_P1_format (vbyte_func f, int brmask)
1101 {
1102 char byte;
1103 byte = UNW_P1 | (brmask & 0x1f);
1104 (*f) (1, &byte, NULL);
1105 }
1106
1107 static void
1108 output_P2_format (vbyte_func f, int brmask, int gr)
1109 {
1110 char bytes[2];
1111 brmask = (brmask & 0x1f);
1112 bytes[0] = UNW_P2 | (brmask >> 1);
1113 bytes[1] = (((brmask & 1) << 7) | gr);
1114 (*f) (2, bytes, NULL);
1115 }
1116
1117 static void
1118 output_P3_format (vbyte_func f, unw_record_type rtype, int reg)
1119 {
1120 char bytes[2];
1121 int r = 0;
1122 reg = (reg & 0x7f);
1123 switch (rtype)
1124 {
1125 case psp_gr:
1126 r = 0;
1127 break;
1128 case rp_gr:
1129 r = 1;
1130 break;
1131 case pfs_gr:
1132 r = 2;
1133 break;
1134 case preds_gr:
1135 r = 3;
1136 break;
1137 case unat_gr:
1138 r = 4;
1139 break;
1140 case lc_gr:
1141 r = 5;
1142 break;
1143 case rp_br:
1144 r = 6;
1145 break;
1146 case rnat_gr:
1147 r = 7;
1148 break;
1149 case bsp_gr:
1150 r = 8;
1151 break;
1152 case bspstore_gr:
1153 r = 9;
1154 break;
1155 case fpsr_gr:
1156 r = 10;
1157 break;
1158 case priunat_gr:
1159 r = 11;
1160 break;
1161 default:
1162 as_bad (_("Invalid record type for P3 format."));
1163 }
1164 bytes[0] = (UNW_P3 | (r >> 1));
1165 bytes[1] = (((r & 1) << 7) | reg);
1166 (*f) (2, bytes, NULL);
1167 }
1168
1169 static void
1170 output_P4_format (vbyte_func f, unsigned char *imask, unsigned long imask_size)
1171 {
1172 imask[0] = UNW_P4;
1173 (*f) (imask_size, (char *) imask, NULL);
1174 }
1175
1176 static void
1177 output_P5_format (vbyte_func f, int grmask, unsigned long frmask)
1178 {
1179 char bytes[4];
1180 grmask = (grmask & 0x0f);
1181
1182 bytes[0] = UNW_P5;
1183 bytes[1] = ((grmask << 4) | ((frmask & 0x000f0000) >> 16));
1184 bytes[2] = ((frmask & 0x0000ff00) >> 8);
1185 bytes[3] = (frmask & 0x000000ff);
1186 (*f) (4, bytes, NULL);
1187 }
1188
1189 static void
1190 output_P6_format (vbyte_func f, unw_record_type rtype, int rmask)
1191 {
1192 char byte;
1193 int r = 0;
1194
1195 if (rtype == gr_mem)
1196 r = 1;
1197 else if (rtype != fr_mem)
1198 as_bad (_("Invalid record type for format P6"));
1199 byte = (UNW_P6 | (r << 4) | (rmask & 0x0f));
1200 (*f) (1, &byte, NULL);
1201 }
1202
1203 static void
1204 output_P7_format (vbyte_func f,
1205 unw_record_type rtype,
1206 unsigned long w1,
1207 unsigned long w2)
1208 {
1209 char bytes[20];
1210 int count = 1;
1211 int r = 0;
1212 count += output_leb128 (bytes + 1, w1, 0);
1213 switch (rtype)
1214 {
1215 case mem_stack_f:
1216 r = 0;
1217 count += output_leb128 (bytes + count, w2 >> 4, 0);
1218 break;
1219 case mem_stack_v:
1220 r = 1;
1221 break;
1222 case spill_base:
1223 r = 2;
1224 break;
1225 case psp_sprel:
1226 r = 3;
1227 break;
1228 case rp_when:
1229 r = 4;
1230 break;
1231 case rp_psprel:
1232 r = 5;
1233 break;
1234 case pfs_when:
1235 r = 6;
1236 break;
1237 case pfs_psprel:
1238 r = 7;
1239 break;
1240 case preds_when:
1241 r = 8;
1242 break;
1243 case preds_psprel:
1244 r = 9;
1245 break;
1246 case lc_when:
1247 r = 10;
1248 break;
1249 case lc_psprel:
1250 r = 11;
1251 break;
1252 case unat_when:
1253 r = 12;
1254 break;
1255 case unat_psprel:
1256 r = 13;
1257 break;
1258 case fpsr_when:
1259 r = 14;
1260 break;
1261 case fpsr_psprel:
1262 r = 15;
1263 break;
1264 default:
1265 break;
1266 }
1267 bytes[0] = (UNW_P7 | r);
1268 (*f) (count, bytes, NULL);
1269 }
1270
1271 static void
1272 output_P8_format (vbyte_func f, unw_record_type rtype, unsigned long t)
1273 {
1274 char bytes[20];
1275 int r = 0;
1276 int count = 2;
1277 bytes[0] = UNW_P8;
1278 switch (rtype)
1279 {
1280 case rp_sprel:
1281 r = 1;
1282 break;
1283 case pfs_sprel:
1284 r = 2;
1285 break;
1286 case preds_sprel:
1287 r = 3;
1288 break;
1289 case lc_sprel:
1290 r = 4;
1291 break;
1292 case unat_sprel:
1293 r = 5;
1294 break;
1295 case fpsr_sprel:
1296 r = 6;
1297 break;
1298 case bsp_when:
1299 r = 7;
1300 break;
1301 case bsp_psprel:
1302 r = 8;
1303 break;
1304 case bsp_sprel:
1305 r = 9;
1306 break;
1307 case bspstore_when:
1308 r = 10;
1309 break;
1310 case bspstore_psprel:
1311 r = 11;
1312 break;
1313 case bspstore_sprel:
1314 r = 12;
1315 break;
1316 case rnat_when:
1317 r = 13;
1318 break;
1319 case rnat_psprel:
1320 r = 14;
1321 break;
1322 case rnat_sprel:
1323 r = 15;
1324 break;
1325 case priunat_when_gr:
1326 r = 16;
1327 break;
1328 case priunat_psprel:
1329 r = 17;
1330 break;
1331 case priunat_sprel:
1332 r = 18;
1333 break;
1334 case priunat_when_mem:
1335 r = 19;
1336 break;
1337 default:
1338 break;
1339 }
1340 bytes[1] = r;
1341 count += output_leb128 (bytes + 2, t, 0);
1342 (*f) (count, bytes, NULL);
1343 }
1344
1345 static void
1346 output_P9_format (vbyte_func f, int grmask, int gr)
1347 {
1348 char bytes[3];
1349 bytes[0] = UNW_P9;
1350 bytes[1] = (grmask & 0x0f);
1351 bytes[2] = (gr & 0x7f);
1352 (*f) (3, bytes, NULL);
1353 }
1354
1355 static void
1356 output_P10_format (vbyte_func f, int abi, int context)
1357 {
1358 char bytes[3];
1359 bytes[0] = UNW_P10;
1360 bytes[1] = (abi & 0xff);
1361 bytes[2] = (context & 0xff);
1362 (*f) (3, bytes, NULL);
1363 }
1364
1365 static void
1366 output_B1_format (vbyte_func f, unw_record_type rtype, unsigned long label)
1367 {
1368 char byte;
1369 int r = 0;
1370 if (label > 0x1f)
1371 {
1372 output_B4_format (f, rtype, label);
1373 return;
1374 }
1375 if (rtype == copy_state)
1376 r = 1;
1377 else if (rtype != label_state)
1378 as_bad (_("Invalid record type for format B1"));
1379
1380 byte = (UNW_B1 | (r << 5) | (label & 0x1f));
1381 (*f) (1, &byte, NULL);
1382 }
1383
1384 static void
1385 output_B2_format (vbyte_func f, unsigned long ecount, unsigned long t)
1386 {
1387 char bytes[20];
1388 int count = 1;
1389 if (ecount > 0x1f)
1390 {
1391 output_B3_format (f, ecount, t);
1392 return;
1393 }
1394 bytes[0] = (UNW_B2 | (ecount & 0x1f));
1395 count += output_leb128 (bytes + 1, t, 0);
1396 (*f) (count, bytes, NULL);
1397 }
1398
1399 static void
1400 output_B3_format (vbyte_func f, unsigned long ecount, unsigned long t)
1401 {
1402 char bytes[20];
1403 int count = 1;
1404 if (ecount <= 0x1f)
1405 {
1406 output_B2_format (f, ecount, t);
1407 return;
1408 }
1409 bytes[0] = UNW_B3;
1410 count += output_leb128 (bytes + 1, t, 0);
1411 count += output_leb128 (bytes + count, ecount, 0);
1412 (*f) (count, bytes, NULL);
1413 }
1414
1415 static void
1416 output_B4_format (vbyte_func f, unw_record_type rtype, unsigned long label)
1417 {
1418 char bytes[20];
1419 int r = 0;
1420 int count = 1;
1421 if (label <= 0x1f)
1422 {
1423 output_B1_format (f, rtype, label);
1424 return;
1425 }
1426
1427 if (rtype == copy_state)
1428 r = 1;
1429 else if (rtype != label_state)
1430 as_bad (_("Invalid record type for format B1"));
1431
1432 bytes[0] = (UNW_B4 | (r << 3));
1433 count += output_leb128 (bytes + 1, label, 0);
1434 (*f) (count, bytes, NULL);
1435 }
1436
1437 static char
1438 format_ab_reg (int ab, int reg)
1439 {
1440 int ret;
1441 ab = (ab & 3);
1442 reg = (reg & 0x1f);
1443 ret = (ab << 5) | reg;
1444 return ret;
1445 }
1446
1447 static void
1448 output_X1_format (vbyte_func f,
1449 unw_record_type rtype,
1450 int ab,
1451 int reg,
1452 unsigned long t,
1453 unsigned long w1)
1454 {
1455 char bytes[20];
1456 int r = 0;
1457 int count = 2;
1458 bytes[0] = UNW_X1;
1459
1460 if (rtype == spill_sprel)
1461 r = 1;
1462 else if (rtype != spill_psprel)
1463 as_bad (_("Invalid record type for format X1"));
1464 bytes[1] = ((r << 7) | format_ab_reg (ab, reg));
1465 count += output_leb128 (bytes + 2, t, 0);
1466 count += output_leb128 (bytes + count, w1, 0);
1467 (*f) (count, bytes, NULL);
1468 }
1469
1470 static void
1471 output_X2_format (vbyte_func f,
1472 int ab,
1473 int reg,
1474 int x,
1475 int y,
1476 int treg,
1477 unsigned long t)
1478 {
1479 char bytes[20];
1480 int count = 3;
1481 bytes[0] = UNW_X2;
1482 bytes[1] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1483 bytes[2] = (((y & 1) << 7) | (treg & 0x7f));
1484 count += output_leb128 (bytes + 3, t, 0);
1485 (*f) (count, bytes, NULL);
1486 }
1487
1488 static void
1489 output_X3_format (vbyte_func f,
1490 unw_record_type rtype,
1491 int qp,
1492 int ab,
1493 int reg,
1494 unsigned long t,
1495 unsigned long w1)
1496 {
1497 char bytes[20];
1498 int r = 0;
1499 int count = 3;
1500 bytes[0] = UNW_X3;
1501
1502 if (rtype == spill_sprel_p)
1503 r = 1;
1504 else if (rtype != spill_psprel_p)
1505 as_bad (_("Invalid record type for format X3"));
1506 bytes[1] = ((r << 7) | (qp & 0x3f));
1507 bytes[2] = format_ab_reg (ab, reg);
1508 count += output_leb128 (bytes + 3, t, 0);
1509 count += output_leb128 (bytes + count, w1, 0);
1510 (*f) (count, bytes, NULL);
1511 }
1512
1513 static void
1514 output_X4_format (vbyte_func f,
1515 int qp,
1516 int ab,
1517 int reg,
1518 int x,
1519 int y,
1520 int treg,
1521 unsigned long t)
1522 {
1523 char bytes[20];
1524 int count = 4;
1525 bytes[0] = UNW_X4;
1526 bytes[1] = (qp & 0x3f);
1527 bytes[2] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1528 bytes[3] = (((y & 1) << 7) | (treg & 0x7f));
1529 count += output_leb128 (bytes + 4, t, 0);
1530 (*f) (count, bytes, NULL);
1531 }
1532
1533 /* This function checks whether there are any outstanding .save-s and
1534 discards them if so. */
1535
1536 static void
1537 check_pending_save (void)
1538 {
1539 if (unwind.pending_saves)
1540 {
1541 unw_rec_list *cur, *prev;
1542
1543 as_warn (_("Previous .save incomplete"));
1544 for (cur = unwind.list, prev = NULL; cur; )
1545 if (&cur->r.record.p == unwind.pending_saves)
1546 {
1547 if (prev)
1548 prev->next = cur->next;
1549 else
1550 unwind.list = cur->next;
1551 if (cur == unwind.tail)
1552 unwind.tail = prev;
1553 if (cur == unwind.current_entry)
1554 unwind.current_entry = cur->next;
1555 /* Don't free the first discarded record, it's being used as
1556 terminator for (currently) br_gr and gr_gr processing, and
1557 also prevents leaving a dangling pointer to it in its
1558 predecessor. */
1559 cur->r.record.p.grmask = 0;
1560 cur->r.record.p.brmask = 0;
1561 cur->r.record.p.frmask = 0;
1562 prev = cur->r.record.p.next;
1563 cur->r.record.p.next = NULL;
1564 cur = prev;
1565 break;
1566 }
1567 else
1568 {
1569 prev = cur;
1570 cur = cur->next;
1571 }
1572 while (cur)
1573 {
1574 prev = cur;
1575 cur = cur->r.record.p.next;
1576 free (prev);
1577 }
1578 unwind.pending_saves = NULL;
1579 }
1580 }
1581
1582 /* This function allocates a record list structure, and initializes fields. */
1583
1584 static unw_rec_list *
1585 alloc_record (unw_record_type t)
1586 {
1587 unw_rec_list *ptr;
1588 ptr = xmalloc (sizeof (*ptr));
1589 memset (ptr, 0, sizeof (*ptr));
1590 ptr->slot_number = SLOT_NUM_NOT_SET;
1591 ptr->r.type = t;
1592 return ptr;
1593 }
1594
1595 /* Dummy unwind record used for calculating the length of the last prologue or
1596 body region. */
1597
1598 static unw_rec_list *
1599 output_endp (void)
1600 {
1601 unw_rec_list *ptr = alloc_record (endp);
1602 return ptr;
1603 }
1604
1605 static unw_rec_list *
1606 output_prologue (void)
1607 {
1608 unw_rec_list *ptr = alloc_record (prologue);
1609 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1610 return ptr;
1611 }
1612
1613 static unw_rec_list *
1614 output_prologue_gr (unsigned int saved_mask, unsigned int reg)
1615 {
1616 unw_rec_list *ptr = alloc_record (prologue_gr);
1617 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1618 ptr->r.record.r.grmask = saved_mask;
1619 ptr->r.record.r.grsave = reg;
1620 return ptr;
1621 }
1622
1623 static unw_rec_list *
1624 output_body (void)
1625 {
1626 unw_rec_list *ptr = alloc_record (body);
1627 return ptr;
1628 }
1629
1630 static unw_rec_list *
1631 output_mem_stack_f (unsigned int size)
1632 {
1633 unw_rec_list *ptr = alloc_record (mem_stack_f);
1634 ptr->r.record.p.size = size;
1635 return ptr;
1636 }
1637
1638 static unw_rec_list *
1639 output_mem_stack_v (void)
1640 {
1641 unw_rec_list *ptr = alloc_record (mem_stack_v);
1642 return ptr;
1643 }
1644
1645 static unw_rec_list *
1646 output_psp_gr (unsigned int gr)
1647 {
1648 unw_rec_list *ptr = alloc_record (psp_gr);
1649 ptr->r.record.p.r.gr = gr;
1650 return ptr;
1651 }
1652
1653 static unw_rec_list *
1654 output_psp_sprel (unsigned int offset)
1655 {
1656 unw_rec_list *ptr = alloc_record (psp_sprel);
1657 ptr->r.record.p.off.sp = offset / 4;
1658 return ptr;
1659 }
1660
1661 static unw_rec_list *
1662 output_rp_when (void)
1663 {
1664 unw_rec_list *ptr = alloc_record (rp_when);
1665 return ptr;
1666 }
1667
1668 static unw_rec_list *
1669 output_rp_gr (unsigned int gr)
1670 {
1671 unw_rec_list *ptr = alloc_record (rp_gr);
1672 ptr->r.record.p.r.gr = gr;
1673 return ptr;
1674 }
1675
1676 static unw_rec_list *
1677 output_rp_br (unsigned int br)
1678 {
1679 unw_rec_list *ptr = alloc_record (rp_br);
1680 ptr->r.record.p.r.br = br;
1681 return ptr;
1682 }
1683
1684 static unw_rec_list *
1685 output_rp_psprel (unsigned int offset)
1686 {
1687 unw_rec_list *ptr = alloc_record (rp_psprel);
1688 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1689 return ptr;
1690 }
1691
1692 static unw_rec_list *
1693 output_rp_sprel (unsigned int offset)
1694 {
1695 unw_rec_list *ptr = alloc_record (rp_sprel);
1696 ptr->r.record.p.off.sp = offset / 4;
1697 return ptr;
1698 }
1699
1700 static unw_rec_list *
1701 output_pfs_when (void)
1702 {
1703 unw_rec_list *ptr = alloc_record (pfs_when);
1704 return ptr;
1705 }
1706
1707 static unw_rec_list *
1708 output_pfs_gr (unsigned int gr)
1709 {
1710 unw_rec_list *ptr = alloc_record (pfs_gr);
1711 ptr->r.record.p.r.gr = gr;
1712 return ptr;
1713 }
1714
1715 static unw_rec_list *
1716 output_pfs_psprel (unsigned int offset)
1717 {
1718 unw_rec_list *ptr = alloc_record (pfs_psprel);
1719 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1720 return ptr;
1721 }
1722
1723 static unw_rec_list *
1724 output_pfs_sprel (unsigned int offset)
1725 {
1726 unw_rec_list *ptr = alloc_record (pfs_sprel);
1727 ptr->r.record.p.off.sp = offset / 4;
1728 return ptr;
1729 }
1730
1731 static unw_rec_list *
1732 output_preds_when (void)
1733 {
1734 unw_rec_list *ptr = alloc_record (preds_when);
1735 return ptr;
1736 }
1737
1738 static unw_rec_list *
1739 output_preds_gr (unsigned int gr)
1740 {
1741 unw_rec_list *ptr = alloc_record (preds_gr);
1742 ptr->r.record.p.r.gr = gr;
1743 return ptr;
1744 }
1745
1746 static unw_rec_list *
1747 output_preds_psprel (unsigned int offset)
1748 {
1749 unw_rec_list *ptr = alloc_record (preds_psprel);
1750 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1751 return ptr;
1752 }
1753
1754 static unw_rec_list *
1755 output_preds_sprel (unsigned int offset)
1756 {
1757 unw_rec_list *ptr = alloc_record (preds_sprel);
1758 ptr->r.record.p.off.sp = offset / 4;
1759 return ptr;
1760 }
1761
1762 static unw_rec_list *
1763 output_fr_mem (unsigned int mask)
1764 {
1765 unw_rec_list *ptr = alloc_record (fr_mem);
1766 unw_rec_list *cur = ptr;
1767
1768 ptr->r.record.p.frmask = mask;
1769 unwind.pending_saves = &ptr->r.record.p;
1770 for (;;)
1771 {
1772 unw_rec_list *prev = cur;
1773
1774 /* Clear least significant set bit. */
1775 mask &= ~(mask & (~mask + 1));
1776 if (!mask)
1777 return ptr;
1778 cur = alloc_record (fr_mem);
1779 cur->r.record.p.frmask = mask;
1780 /* Retain only least significant bit. */
1781 prev->r.record.p.frmask ^= mask;
1782 prev->r.record.p.next = cur;
1783 }
1784 }
1785
1786 static unw_rec_list *
1787 output_frgr_mem (unsigned int gr_mask, unsigned int fr_mask)
1788 {
1789 unw_rec_list *ptr = alloc_record (frgr_mem);
1790 unw_rec_list *cur = ptr;
1791
1792 unwind.pending_saves = &cur->r.record.p;
1793 cur->r.record.p.frmask = fr_mask;
1794 while (fr_mask)
1795 {
1796 unw_rec_list *prev = cur;
1797
1798 /* Clear least significant set bit. */
1799 fr_mask &= ~(fr_mask & (~fr_mask + 1));
1800 if (!gr_mask && !fr_mask)
1801 return ptr;
1802 cur = alloc_record (frgr_mem);
1803 cur->r.record.p.frmask = fr_mask;
1804 /* Retain only least significant bit. */
1805 prev->r.record.p.frmask ^= fr_mask;
1806 prev->r.record.p.next = cur;
1807 }
1808 cur->r.record.p.grmask = gr_mask;
1809 for (;;)
1810 {
1811 unw_rec_list *prev = cur;
1812
1813 /* Clear least significant set bit. */
1814 gr_mask &= ~(gr_mask & (~gr_mask + 1));
1815 if (!gr_mask)
1816 return ptr;
1817 cur = alloc_record (frgr_mem);
1818 cur->r.record.p.grmask = gr_mask;
1819 /* Retain only least significant bit. */
1820 prev->r.record.p.grmask ^= gr_mask;
1821 prev->r.record.p.next = cur;
1822 }
1823 }
1824
1825 static unw_rec_list *
1826 output_gr_gr (unsigned int mask, unsigned int reg)
1827 {
1828 unw_rec_list *ptr = alloc_record (gr_gr);
1829 unw_rec_list *cur = ptr;
1830
1831 ptr->r.record.p.grmask = mask;
1832 ptr->r.record.p.r.gr = reg;
1833 unwind.pending_saves = &ptr->r.record.p;
1834 for (;;)
1835 {
1836 unw_rec_list *prev = cur;
1837
1838 /* Clear least significant set bit. */
1839 mask &= ~(mask & (~mask + 1));
1840 if (!mask)
1841 return ptr;
1842 cur = alloc_record (gr_gr);
1843 cur->r.record.p.grmask = mask;
1844 /* Indicate this record shouldn't be output. */
1845 cur->r.record.p.r.gr = REG_NUM;
1846 /* Retain only least significant bit. */
1847 prev->r.record.p.grmask ^= mask;
1848 prev->r.record.p.next = cur;
1849 }
1850 }
1851
1852 static unw_rec_list *
1853 output_gr_mem (unsigned int mask)
1854 {
1855 unw_rec_list *ptr = alloc_record (gr_mem);
1856 unw_rec_list *cur = ptr;
1857
1858 ptr->r.record.p.grmask = mask;
1859 unwind.pending_saves = &ptr->r.record.p;
1860 for (;;)
1861 {
1862 unw_rec_list *prev = cur;
1863
1864 /* Clear least significant set bit. */
1865 mask &= ~(mask & (~mask + 1));
1866 if (!mask)
1867 return ptr;
1868 cur = alloc_record (gr_mem);
1869 cur->r.record.p.grmask = mask;
1870 /* Retain only least significant bit. */
1871 prev->r.record.p.grmask ^= mask;
1872 prev->r.record.p.next = cur;
1873 }
1874 }
1875
1876 static unw_rec_list *
1877 output_br_mem (unsigned int mask)
1878 {
1879 unw_rec_list *ptr = alloc_record (br_mem);
1880 unw_rec_list *cur = ptr;
1881
1882 ptr->r.record.p.brmask = mask;
1883 unwind.pending_saves = &ptr->r.record.p;
1884 for (;;)
1885 {
1886 unw_rec_list *prev = cur;
1887
1888 /* Clear least significant set bit. */
1889 mask &= ~(mask & (~mask + 1));
1890 if (!mask)
1891 return ptr;
1892 cur = alloc_record (br_mem);
1893 cur->r.record.p.brmask = mask;
1894 /* Retain only least significant bit. */
1895 prev->r.record.p.brmask ^= mask;
1896 prev->r.record.p.next = cur;
1897 }
1898 }
1899
1900 static unw_rec_list *
1901 output_br_gr (unsigned int mask, unsigned int reg)
1902 {
1903 unw_rec_list *ptr = alloc_record (br_gr);
1904 unw_rec_list *cur = ptr;
1905
1906 ptr->r.record.p.brmask = mask;
1907 ptr->r.record.p.r.gr = reg;
1908 unwind.pending_saves = &ptr->r.record.p;
1909 for (;;)
1910 {
1911 unw_rec_list *prev = cur;
1912
1913 /* Clear least significant set bit. */
1914 mask &= ~(mask & (~mask + 1));
1915 if (!mask)
1916 return ptr;
1917 cur = alloc_record (br_gr);
1918 cur->r.record.p.brmask = mask;
1919 /* Indicate this record shouldn't be output. */
1920 cur->r.record.p.r.gr = REG_NUM;
1921 /* Retain only least significant bit. */
1922 prev->r.record.p.brmask ^= mask;
1923 prev->r.record.p.next = cur;
1924 }
1925 }
1926
1927 static unw_rec_list *
1928 output_spill_base (unsigned int offset)
1929 {
1930 unw_rec_list *ptr = alloc_record (spill_base);
1931 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1932 return ptr;
1933 }
1934
1935 static unw_rec_list *
1936 output_unat_when (void)
1937 {
1938 unw_rec_list *ptr = alloc_record (unat_when);
1939 return ptr;
1940 }
1941
1942 static unw_rec_list *
1943 output_unat_gr (unsigned int gr)
1944 {
1945 unw_rec_list *ptr = alloc_record (unat_gr);
1946 ptr->r.record.p.r.gr = gr;
1947 return ptr;
1948 }
1949
1950 static unw_rec_list *
1951 output_unat_psprel (unsigned int offset)
1952 {
1953 unw_rec_list *ptr = alloc_record (unat_psprel);
1954 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1955 return ptr;
1956 }
1957
1958 static unw_rec_list *
1959 output_unat_sprel (unsigned int offset)
1960 {
1961 unw_rec_list *ptr = alloc_record (unat_sprel);
1962 ptr->r.record.p.off.sp = offset / 4;
1963 return ptr;
1964 }
1965
1966 static unw_rec_list *
1967 output_lc_when (void)
1968 {
1969 unw_rec_list *ptr = alloc_record (lc_when);
1970 return ptr;
1971 }
1972
1973 static unw_rec_list *
1974 output_lc_gr (unsigned int gr)
1975 {
1976 unw_rec_list *ptr = alloc_record (lc_gr);
1977 ptr->r.record.p.r.gr = gr;
1978 return ptr;
1979 }
1980
1981 static unw_rec_list *
1982 output_lc_psprel (unsigned int offset)
1983 {
1984 unw_rec_list *ptr = alloc_record (lc_psprel);
1985 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1986 return ptr;
1987 }
1988
1989 static unw_rec_list *
1990 output_lc_sprel (unsigned int offset)
1991 {
1992 unw_rec_list *ptr = alloc_record (lc_sprel);
1993 ptr->r.record.p.off.sp = offset / 4;
1994 return ptr;
1995 }
1996
1997 static unw_rec_list *
1998 output_fpsr_when (void)
1999 {
2000 unw_rec_list *ptr = alloc_record (fpsr_when);
2001 return ptr;
2002 }
2003
2004 static unw_rec_list *
2005 output_fpsr_gr (unsigned int gr)
2006 {
2007 unw_rec_list *ptr = alloc_record (fpsr_gr);
2008 ptr->r.record.p.r.gr = gr;
2009 return ptr;
2010 }
2011
2012 static unw_rec_list *
2013 output_fpsr_psprel (unsigned int offset)
2014 {
2015 unw_rec_list *ptr = alloc_record (fpsr_psprel);
2016 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2017 return ptr;
2018 }
2019
2020 static unw_rec_list *
2021 output_fpsr_sprel (unsigned int offset)
2022 {
2023 unw_rec_list *ptr = alloc_record (fpsr_sprel);
2024 ptr->r.record.p.off.sp = offset / 4;
2025 return ptr;
2026 }
2027
2028 static unw_rec_list *
2029 output_priunat_when_gr (void)
2030 {
2031 unw_rec_list *ptr = alloc_record (priunat_when_gr);
2032 return ptr;
2033 }
2034
2035 static unw_rec_list *
2036 output_priunat_when_mem (void)
2037 {
2038 unw_rec_list *ptr = alloc_record (priunat_when_mem);
2039 return ptr;
2040 }
2041
2042 static unw_rec_list *
2043 output_priunat_gr (unsigned int gr)
2044 {
2045 unw_rec_list *ptr = alloc_record (priunat_gr);
2046 ptr->r.record.p.r.gr = gr;
2047 return ptr;
2048 }
2049
2050 static unw_rec_list *
2051 output_priunat_psprel (unsigned int offset)
2052 {
2053 unw_rec_list *ptr = alloc_record (priunat_psprel);
2054 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2055 return ptr;
2056 }
2057
2058 static unw_rec_list *
2059 output_priunat_sprel (unsigned int offset)
2060 {
2061 unw_rec_list *ptr = alloc_record (priunat_sprel);
2062 ptr->r.record.p.off.sp = offset / 4;
2063 return ptr;
2064 }
2065
2066 static unw_rec_list *
2067 output_bsp_when (void)
2068 {
2069 unw_rec_list *ptr = alloc_record (bsp_when);
2070 return ptr;
2071 }
2072
2073 static unw_rec_list *
2074 output_bsp_gr (unsigned int gr)
2075 {
2076 unw_rec_list *ptr = alloc_record (bsp_gr);
2077 ptr->r.record.p.r.gr = gr;
2078 return ptr;
2079 }
2080
2081 static unw_rec_list *
2082 output_bsp_psprel (unsigned int offset)
2083 {
2084 unw_rec_list *ptr = alloc_record (bsp_psprel);
2085 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2086 return ptr;
2087 }
2088
2089 static unw_rec_list *
2090 output_bsp_sprel (unsigned int offset)
2091 {
2092 unw_rec_list *ptr = alloc_record (bsp_sprel);
2093 ptr->r.record.p.off.sp = offset / 4;
2094 return ptr;
2095 }
2096
2097 static unw_rec_list *
2098 output_bspstore_when (void)
2099 {
2100 unw_rec_list *ptr = alloc_record (bspstore_when);
2101 return ptr;
2102 }
2103
2104 static unw_rec_list *
2105 output_bspstore_gr (unsigned int gr)
2106 {
2107 unw_rec_list *ptr = alloc_record (bspstore_gr);
2108 ptr->r.record.p.r.gr = gr;
2109 return ptr;
2110 }
2111
2112 static unw_rec_list *
2113 output_bspstore_psprel (unsigned int offset)
2114 {
2115 unw_rec_list *ptr = alloc_record (bspstore_psprel);
2116 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2117 return ptr;
2118 }
2119
2120 static unw_rec_list *
2121 output_bspstore_sprel (unsigned int offset)
2122 {
2123 unw_rec_list *ptr = alloc_record (bspstore_sprel);
2124 ptr->r.record.p.off.sp = offset / 4;
2125 return ptr;
2126 }
2127
2128 static unw_rec_list *
2129 output_rnat_when (void)
2130 {
2131 unw_rec_list *ptr = alloc_record (rnat_when);
2132 return ptr;
2133 }
2134
2135 static unw_rec_list *
2136 output_rnat_gr (unsigned int gr)
2137 {
2138 unw_rec_list *ptr = alloc_record (rnat_gr);
2139 ptr->r.record.p.r.gr = gr;
2140 return ptr;
2141 }
2142
2143 static unw_rec_list *
2144 output_rnat_psprel (unsigned int offset)
2145 {
2146 unw_rec_list *ptr = alloc_record (rnat_psprel);
2147 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2148 return ptr;
2149 }
2150
2151 static unw_rec_list *
2152 output_rnat_sprel (unsigned int offset)
2153 {
2154 unw_rec_list *ptr = alloc_record (rnat_sprel);
2155 ptr->r.record.p.off.sp = offset / 4;
2156 return ptr;
2157 }
2158
2159 static unw_rec_list *
2160 output_unwabi (unsigned long abi, unsigned long context)
2161 {
2162 unw_rec_list *ptr = alloc_record (unwabi);
2163 ptr->r.record.p.abi = abi;
2164 ptr->r.record.p.context = context;
2165 return ptr;
2166 }
2167
2168 static unw_rec_list *
2169 output_epilogue (unsigned long ecount)
2170 {
2171 unw_rec_list *ptr = alloc_record (epilogue);
2172 ptr->r.record.b.ecount = ecount;
2173 return ptr;
2174 }
2175
2176 static unw_rec_list *
2177 output_label_state (unsigned long label)
2178 {
2179 unw_rec_list *ptr = alloc_record (label_state);
2180 ptr->r.record.b.label = label;
2181 return ptr;
2182 }
2183
2184 static unw_rec_list *
2185 output_copy_state (unsigned long label)
2186 {
2187 unw_rec_list *ptr = alloc_record (copy_state);
2188 ptr->r.record.b.label = label;
2189 return ptr;
2190 }
2191
2192 static unw_rec_list *
2193 output_spill_psprel (unsigned int ab,
2194 unsigned int reg,
2195 unsigned int offset,
2196 unsigned int predicate)
2197 {
2198 unw_rec_list *ptr = alloc_record (predicate ? spill_psprel_p : spill_psprel);
2199 ptr->r.record.x.ab = ab;
2200 ptr->r.record.x.reg = reg;
2201 ptr->r.record.x.where.pspoff = ENCODED_PSP_OFFSET (offset);
2202 ptr->r.record.x.qp = predicate;
2203 return ptr;
2204 }
2205
2206 static unw_rec_list *
2207 output_spill_sprel (unsigned int ab,
2208 unsigned int reg,
2209 unsigned int offset,
2210 unsigned int predicate)
2211 {
2212 unw_rec_list *ptr = alloc_record (predicate ? spill_sprel_p : spill_sprel);
2213 ptr->r.record.x.ab = ab;
2214 ptr->r.record.x.reg = reg;
2215 ptr->r.record.x.where.spoff = offset / 4;
2216 ptr->r.record.x.qp = predicate;
2217 return ptr;
2218 }
2219
2220 static unw_rec_list *
2221 output_spill_reg (unsigned int ab,
2222 unsigned int reg,
2223 unsigned int targ_reg,
2224 unsigned int xy,
2225 unsigned int predicate)
2226 {
2227 unw_rec_list *ptr = alloc_record (predicate ? spill_reg_p : spill_reg);
2228 ptr->r.record.x.ab = ab;
2229 ptr->r.record.x.reg = reg;
2230 ptr->r.record.x.where.reg = targ_reg;
2231 ptr->r.record.x.xy = xy;
2232 ptr->r.record.x.qp = predicate;
2233 return ptr;
2234 }
2235
2236 /* Given a unw_rec_list process the correct format with the
2237 specified function. */
2238
2239 static void
2240 process_one_record (unw_rec_list *ptr, vbyte_func f)
2241 {
2242 unsigned int fr_mask, gr_mask;
2243
2244 switch (ptr->r.type)
2245 {
2246 /* This is a dummy record that takes up no space in the output. */
2247 case endp:
2248 break;
2249
2250 case gr_mem:
2251 case fr_mem:
2252 case br_mem:
2253 case frgr_mem:
2254 /* These are taken care of by prologue/prologue_gr. */
2255 break;
2256
2257 case prologue_gr:
2258 case prologue:
2259 if (ptr->r.type == prologue_gr)
2260 output_R2_format (f, ptr->r.record.r.grmask,
2261 ptr->r.record.r.grsave, ptr->r.record.r.rlen);
2262 else
2263 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2264
2265 /* Output descriptor(s) for union of register spills (if any). */
2266 gr_mask = ptr->r.record.r.mask.gr_mem;
2267 fr_mask = ptr->r.record.r.mask.fr_mem;
2268 if (fr_mask)
2269 {
2270 if ((fr_mask & ~0xfUL) == 0)
2271 output_P6_format (f, fr_mem, fr_mask);
2272 else
2273 {
2274 output_P5_format (f, gr_mask, fr_mask);
2275 gr_mask = 0;
2276 }
2277 }
2278 if (gr_mask)
2279 output_P6_format (f, gr_mem, gr_mask);
2280 if (ptr->r.record.r.mask.br_mem)
2281 output_P1_format (f, ptr->r.record.r.mask.br_mem);
2282
2283 /* output imask descriptor if necessary: */
2284 if (ptr->r.record.r.mask.i)
2285 output_P4_format (f, ptr->r.record.r.mask.i,
2286 ptr->r.record.r.imask_size);
2287 break;
2288
2289 case body:
2290 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2291 break;
2292 case mem_stack_f:
2293 case mem_stack_v:
2294 output_P7_format (f, ptr->r.type, ptr->r.record.p.t,
2295 ptr->r.record.p.size);
2296 break;
2297 case psp_gr:
2298 case rp_gr:
2299 case pfs_gr:
2300 case preds_gr:
2301 case unat_gr:
2302 case lc_gr:
2303 case fpsr_gr:
2304 case priunat_gr:
2305 case bsp_gr:
2306 case bspstore_gr:
2307 case rnat_gr:
2308 output_P3_format (f, ptr->r.type, ptr->r.record.p.r.gr);
2309 break;
2310 case rp_br:
2311 output_P3_format (f, rp_br, ptr->r.record.p.r.br);
2312 break;
2313 case psp_sprel:
2314 output_P7_format (f, psp_sprel, ptr->r.record.p.off.sp, 0);
2315 break;
2316 case rp_when:
2317 case pfs_when:
2318 case preds_when:
2319 case unat_when:
2320 case lc_when:
2321 case fpsr_when:
2322 output_P7_format (f, ptr->r.type, ptr->r.record.p.t, 0);
2323 break;
2324 case rp_psprel:
2325 case pfs_psprel:
2326 case preds_psprel:
2327 case unat_psprel:
2328 case lc_psprel:
2329 case fpsr_psprel:
2330 case spill_base:
2331 output_P7_format (f, ptr->r.type, ptr->r.record.p.off.psp, 0);
2332 break;
2333 case rp_sprel:
2334 case pfs_sprel:
2335 case preds_sprel:
2336 case unat_sprel:
2337 case lc_sprel:
2338 case fpsr_sprel:
2339 case priunat_sprel:
2340 case bsp_sprel:
2341 case bspstore_sprel:
2342 case rnat_sprel:
2343 output_P8_format (f, ptr->r.type, ptr->r.record.p.off.sp);
2344 break;
2345 case gr_gr:
2346 if (ptr->r.record.p.r.gr < REG_NUM)
2347 {
2348 const unw_rec_list *cur = ptr;
2349
2350 gr_mask = cur->r.record.p.grmask;
2351 while ((cur = cur->r.record.p.next) != NULL)
2352 gr_mask |= cur->r.record.p.grmask;
2353 output_P9_format (f, gr_mask, ptr->r.record.p.r.gr);
2354 }
2355 break;
2356 case br_gr:
2357 if (ptr->r.record.p.r.gr < REG_NUM)
2358 {
2359 const unw_rec_list *cur = ptr;
2360
2361 gr_mask = cur->r.record.p.brmask;
2362 while ((cur = cur->r.record.p.next) != NULL)
2363 gr_mask |= cur->r.record.p.brmask;
2364 output_P2_format (f, gr_mask, ptr->r.record.p.r.gr);
2365 }
2366 break;
2367 case spill_mask:
2368 as_bad (_("spill_mask record unimplemented."));
2369 break;
2370 case priunat_when_gr:
2371 case priunat_when_mem:
2372 case bsp_when:
2373 case bspstore_when:
2374 case rnat_when:
2375 output_P8_format (f, ptr->r.type, ptr->r.record.p.t);
2376 break;
2377 case priunat_psprel:
2378 case bsp_psprel:
2379 case bspstore_psprel:
2380 case rnat_psprel:
2381 output_P8_format (f, ptr->r.type, ptr->r.record.p.off.psp);
2382 break;
2383 case unwabi:
2384 output_P10_format (f, ptr->r.record.p.abi, ptr->r.record.p.context);
2385 break;
2386 case epilogue:
2387 output_B3_format (f, ptr->r.record.b.ecount, ptr->r.record.b.t);
2388 break;
2389 case label_state:
2390 case copy_state:
2391 output_B4_format (f, ptr->r.type, ptr->r.record.b.label);
2392 break;
2393 case spill_psprel:
2394 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2395 ptr->r.record.x.reg, ptr->r.record.x.t,
2396 ptr->r.record.x.where.pspoff);
2397 break;
2398 case spill_sprel:
2399 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2400 ptr->r.record.x.reg, ptr->r.record.x.t,
2401 ptr->r.record.x.where.spoff);
2402 break;
2403 case spill_reg:
2404 output_X2_format (f, ptr->r.record.x.ab, ptr->r.record.x.reg,
2405 ptr->r.record.x.xy >> 1, ptr->r.record.x.xy,
2406 ptr->r.record.x.where.reg, ptr->r.record.x.t);
2407 break;
2408 case spill_psprel_p:
2409 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2410 ptr->r.record.x.ab, ptr->r.record.x.reg,
2411 ptr->r.record.x.t, ptr->r.record.x.where.pspoff);
2412 break;
2413 case spill_sprel_p:
2414 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2415 ptr->r.record.x.ab, ptr->r.record.x.reg,
2416 ptr->r.record.x.t, ptr->r.record.x.where.spoff);
2417 break;
2418 case spill_reg_p:
2419 output_X4_format (f, ptr->r.record.x.qp, ptr->r.record.x.ab,
2420 ptr->r.record.x.reg, ptr->r.record.x.xy >> 1,
2421 ptr->r.record.x.xy, ptr->r.record.x.where.reg,
2422 ptr->r.record.x.t);
2423 break;
2424 default:
2425 as_bad (_("record_type_not_valid"));
2426 break;
2427 }
2428 }
2429
2430 /* Given a unw_rec_list list, process all the records with
2431 the specified function. */
2432 static void
2433 process_unw_records (unw_rec_list *list, vbyte_func f)
2434 {
2435 unw_rec_list *ptr;
2436 for (ptr = list; ptr; ptr = ptr->next)
2437 process_one_record (ptr, f);
2438 }
2439
2440 /* Determine the size of a record list in bytes. */
2441 static int
2442 calc_record_size (unw_rec_list *list)
2443 {
2444 vbyte_count = 0;
2445 process_unw_records (list, count_output);
2446 return vbyte_count;
2447 }
2448
2449 /* Return the number of bits set in the input value.
2450 Perhaps this has a better place... */
2451 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
2452 # define popcount __builtin_popcount
2453 #else
2454 static int
2455 popcount (unsigned x)
2456 {
2457 static const unsigned char popcnt[16] =
2458 {
2459 0, 1, 1, 2,
2460 1, 2, 2, 3,
2461 1, 2, 2, 3,
2462 2, 3, 3, 4
2463 };
2464
2465 if (x < NELEMS (popcnt))
2466 return popcnt[x];
2467 return popcnt[x % NELEMS (popcnt)] + popcount (x / NELEMS (popcnt));
2468 }
2469 #endif
2470
2471 /* Update IMASK bitmask to reflect the fact that one or more registers
2472 of type TYPE are saved starting at instruction with index T. If N
2473 bits are set in REGMASK, it is assumed that instructions T through
2474 T+N-1 save these registers.
2475
2476 TYPE values:
2477 0: no save
2478 1: instruction saves next fp reg
2479 2: instruction saves next general reg
2480 3: instruction saves next branch reg */
2481 static void
2482 set_imask (unw_rec_list *region,
2483 unsigned long regmask,
2484 unsigned long t,
2485 unsigned int type)
2486 {
2487 unsigned char *imask;
2488 unsigned long imask_size;
2489 unsigned int i;
2490 int pos;
2491
2492 imask = region->r.record.r.mask.i;
2493 imask_size = region->r.record.r.imask_size;
2494 if (!imask)
2495 {
2496 imask_size = (region->r.record.r.rlen * 2 + 7) / 8 + 1;
2497 imask = xmalloc (imask_size);
2498 memset (imask, 0, imask_size);
2499
2500 region->r.record.r.imask_size = imask_size;
2501 region->r.record.r.mask.i = imask;
2502 }
2503
2504 i = (t / 4) + 1;
2505 pos = 2 * (3 - t % 4);
2506 while (regmask)
2507 {
2508 if (i >= imask_size)
2509 {
2510 as_bad (_("Ignoring attempt to spill beyond end of region"));
2511 return;
2512 }
2513
2514 imask[i] |= (type & 0x3) << pos;
2515
2516 regmask &= (regmask - 1);
2517 pos -= 2;
2518 if (pos < 0)
2519 {
2520 pos = 0;
2521 ++i;
2522 }
2523 }
2524 }
2525
2526 /* Return the number of instruction slots from FIRST_ADDR to SLOT_ADDR.
2527 SLOT_FRAG is the frag containing SLOT_ADDR, and FIRST_FRAG is the frag
2528 containing FIRST_ADDR. If BEFORE_RELAX, then we use worst-case estimates
2529 for frag sizes. */
2530
2531 static unsigned long
2532 slot_index (unsigned long slot_addr,
2533 fragS *slot_frag,
2534 unsigned long first_addr,
2535 fragS *first_frag,
2536 int before_relax)
2537 {
2538 unsigned long index = 0;
2539
2540 /* First time we are called, the initial address and frag are invalid. */
2541 if (first_addr == 0)
2542 return 0;
2543
2544 /* If the two addresses are in different frags, then we need to add in
2545 the remaining size of this frag, and then the entire size of intermediate
2546 frags. */
2547 while (slot_frag != first_frag)
2548 {
2549 unsigned long start_addr = (unsigned long) &first_frag->fr_literal;
2550
2551 if (! before_relax)
2552 {
2553 /* We can get the final addresses only during and after
2554 relaxation. */
2555 if (first_frag->fr_next && first_frag->fr_next->fr_address)
2556 index += 3 * ((first_frag->fr_next->fr_address
2557 - first_frag->fr_address
2558 - first_frag->fr_fix) >> 4);
2559 }
2560 else
2561 /* We don't know what the final addresses will be. We try our
2562 best to estimate. */
2563 switch (first_frag->fr_type)
2564 {
2565 default:
2566 break;
2567
2568 case rs_space:
2569 as_fatal (_("Only constant space allocation is supported"));
2570 break;
2571
2572 case rs_align:
2573 case rs_align_code:
2574 case rs_align_test:
2575 /* Take alignment into account. Assume the worst case
2576 before relaxation. */
2577 index += 3 * ((1 << first_frag->fr_offset) >> 4);
2578 break;
2579
2580 case rs_org:
2581 if (first_frag->fr_symbol)
2582 {
2583 as_fatal (_("Only constant offsets are supported"));
2584 break;
2585 }
2586 case rs_fill:
2587 index += 3 * (first_frag->fr_offset >> 4);
2588 break;
2589 }
2590
2591 /* Add in the full size of the frag converted to instruction slots. */
2592 index += 3 * (first_frag->fr_fix >> 4);
2593 /* Subtract away the initial part before first_addr. */
2594 index -= (3 * ((first_addr >> 4) - (start_addr >> 4))
2595 + ((first_addr & 0x3) - (start_addr & 0x3)));
2596
2597 /* Move to the beginning of the next frag. */
2598 first_frag = first_frag->fr_next;
2599 first_addr = (unsigned long) &first_frag->fr_literal;
2600
2601 /* This can happen if there is section switching in the middle of a
2602 function, causing the frag chain for the function to be broken.
2603 It is too difficult to recover safely from this problem, so we just
2604 exit with an error. */
2605 if (first_frag == NULL)
2606 as_fatal (_("Section switching in code is not supported."));
2607 }
2608
2609 /* Add in the used part of the last frag. */
2610 index += (3 * ((slot_addr >> 4) - (first_addr >> 4))
2611 + ((slot_addr & 0x3) - (first_addr & 0x3)));
2612 return index;
2613 }
2614
2615 /* Optimize unwind record directives. */
2616
2617 static unw_rec_list *
2618 optimize_unw_records (unw_rec_list *list)
2619 {
2620 if (!list)
2621 return NULL;
2622
2623 /* If the only unwind record is ".prologue" or ".prologue" followed
2624 by ".body", then we can optimize the unwind directives away. */
2625 if (list->r.type == prologue
2626 && (list->next->r.type == endp
2627 || (list->next->r.type == body && list->next->next->r.type == endp)))
2628 return NULL;
2629
2630 return list;
2631 }
2632
2633 /* Given a complete record list, process any records which have
2634 unresolved fields, (ie length counts for a prologue). After
2635 this has been run, all necessary information should be available
2636 within each record to generate an image. */
2637
2638 static void
2639 fixup_unw_records (unw_rec_list *list, int before_relax)
2640 {
2641 unw_rec_list *ptr, *region = 0;
2642 unsigned long first_addr = 0, rlen = 0, t;
2643 fragS *first_frag = 0;
2644
2645 for (ptr = list; ptr; ptr = ptr->next)
2646 {
2647 if (ptr->slot_number == SLOT_NUM_NOT_SET)
2648 as_bad (_(" Insn slot not set in unwind record."));
2649 t = slot_index (ptr->slot_number, ptr->slot_frag,
2650 first_addr, first_frag, before_relax);
2651 switch (ptr->r.type)
2652 {
2653 case prologue:
2654 case prologue_gr:
2655 case body:
2656 {
2657 unw_rec_list *last;
2658 int size;
2659 unsigned long last_addr = 0;
2660 fragS *last_frag = NULL;
2661
2662 first_addr = ptr->slot_number;
2663 first_frag = ptr->slot_frag;
2664 /* Find either the next body/prologue start, or the end of
2665 the function, and determine the size of the region. */
2666 for (last = ptr->next; last != NULL; last = last->next)
2667 if (last->r.type == prologue || last->r.type == prologue_gr
2668 || last->r.type == body || last->r.type == endp)
2669 {
2670 last_addr = last->slot_number;
2671 last_frag = last->slot_frag;
2672 break;
2673 }
2674 size = slot_index (last_addr, last_frag, first_addr, first_frag,
2675 before_relax);
2676 rlen = ptr->r.record.r.rlen = size;
2677 if (ptr->r.type == body)
2678 /* End of region. */
2679 region = 0;
2680 else
2681 region = ptr;
2682 break;
2683 }
2684 case epilogue:
2685 if (t < rlen)
2686 ptr->r.record.b.t = rlen - 1 - t;
2687 else
2688 /* This happens when a memory-stack-less procedure uses a
2689 ".restore sp" directive at the end of a region to pop
2690 the frame state. */
2691 ptr->r.record.b.t = 0;
2692 break;
2693
2694 case mem_stack_f:
2695 case mem_stack_v:
2696 case rp_when:
2697 case pfs_when:
2698 case preds_when:
2699 case unat_when:
2700 case lc_when:
2701 case fpsr_when:
2702 case priunat_when_gr:
2703 case priunat_when_mem:
2704 case bsp_when:
2705 case bspstore_when:
2706 case rnat_when:
2707 ptr->r.record.p.t = t;
2708 break;
2709
2710 case spill_reg:
2711 case spill_sprel:
2712 case spill_psprel:
2713 case spill_reg_p:
2714 case spill_sprel_p:
2715 case spill_psprel_p:
2716 ptr->r.record.x.t = t;
2717 break;
2718
2719 case frgr_mem:
2720 if (!region)
2721 {
2722 as_bad (_("frgr_mem record before region record!"));
2723 return;
2724 }
2725 region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask;
2726 region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask;
2727 set_imask (region, ptr->r.record.p.frmask, t, 1);
2728 set_imask (region, ptr->r.record.p.grmask, t, 2);
2729 break;
2730 case fr_mem:
2731 if (!region)
2732 {
2733 as_bad (_("fr_mem record before region record!"));
2734 return;
2735 }
2736 region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask;
2737 set_imask (region, ptr->r.record.p.frmask, t, 1);
2738 break;
2739 case gr_mem:
2740 if (!region)
2741 {
2742 as_bad (_("gr_mem record before region record!"));
2743 return;
2744 }
2745 region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask;
2746 set_imask (region, ptr->r.record.p.grmask, t, 2);
2747 break;
2748 case br_mem:
2749 if (!region)
2750 {
2751 as_bad (_("br_mem record before region record!"));
2752 return;
2753 }
2754 region->r.record.r.mask.br_mem |= ptr->r.record.p.brmask;
2755 set_imask (region, ptr->r.record.p.brmask, t, 3);
2756 break;
2757
2758 case gr_gr:
2759 if (!region)
2760 {
2761 as_bad (_("gr_gr record before region record!"));
2762 return;
2763 }
2764 set_imask (region, ptr->r.record.p.grmask, t, 2);
2765 break;
2766 case br_gr:
2767 if (!region)
2768 {
2769 as_bad (_("br_gr record before region record!"));
2770 return;
2771 }
2772 set_imask (region, ptr->r.record.p.brmask, t, 3);
2773 break;
2774
2775 default:
2776 break;
2777 }
2778 }
2779 }
2780
2781 /* Estimate the size of a frag before relaxing. We only have one type of frag
2782 to handle here, which is the unwind info frag. */
2783
2784 int
2785 ia64_estimate_size_before_relax (fragS *frag,
2786 asection *segtype ATTRIBUTE_UNUSED)
2787 {
2788 unw_rec_list *list;
2789 int len, size, pad;
2790
2791 /* ??? This code is identical to the first part of ia64_convert_frag. */
2792 list = (unw_rec_list *) frag->fr_opcode;
2793 fixup_unw_records (list, 0);
2794
2795 len = calc_record_size (list);
2796 /* pad to pointer-size boundary. */
2797 pad = len % md.pointer_size;
2798 if (pad != 0)
2799 len += md.pointer_size - pad;
2800 /* Add 8 for the header. */
2801 size = len + 8;
2802 /* Add a pointer for the personality offset. */
2803 if (frag->fr_offset)
2804 size += md.pointer_size;
2805
2806 /* fr_var carries the max_chars that we created the fragment with.
2807 We must, of course, have allocated enough memory earlier. */
2808 assert (frag->fr_var >= size);
2809
2810 return frag->fr_fix + size;
2811 }
2812
2813 /* This function converts a rs_machine_dependent variant frag into a
2814 normal fill frag with the unwind image from the the record list. */
2815 void
2816 ia64_convert_frag (fragS *frag)
2817 {
2818 unw_rec_list *list;
2819 int len, size, pad;
2820 valueT flag_value;
2821
2822 /* ??? This code is identical to ia64_estimate_size_before_relax. */
2823 list = (unw_rec_list *) frag->fr_opcode;
2824 fixup_unw_records (list, 0);
2825
2826 len = calc_record_size (list);
2827 /* pad to pointer-size boundary. */
2828 pad = len % md.pointer_size;
2829 if (pad != 0)
2830 len += md.pointer_size - pad;
2831 /* Add 8 for the header. */
2832 size = len + 8;
2833 /* Add a pointer for the personality offset. */
2834 if (frag->fr_offset)
2835 size += md.pointer_size;
2836
2837 /* fr_var carries the max_chars that we created the fragment with.
2838 We must, of course, have allocated enough memory earlier. */
2839 assert (frag->fr_var >= size);
2840
2841 /* Initialize the header area. fr_offset is initialized with
2842 unwind.personality_routine. */
2843 if (frag->fr_offset)
2844 {
2845 if (md.flags & EF_IA_64_ABI64)
2846 flag_value = (bfd_vma) 3 << 32;
2847 else
2848 /* 32-bit unwind info block. */
2849 flag_value = (bfd_vma) 0x1003 << 32;
2850 }
2851 else
2852 flag_value = 0;
2853
2854 md_number_to_chars (frag->fr_literal,
2855 (((bfd_vma) 1 << 48) /* Version. */
2856 | flag_value /* U & E handler flags. */
2857 | (len / md.pointer_size)), /* Length. */
2858 8);
2859
2860 /* Skip the header. */
2861 vbyte_mem_ptr = frag->fr_literal + 8;
2862 process_unw_records (list, output_vbyte_mem);
2863
2864 /* Fill the padding bytes with zeros. */
2865 if (pad != 0)
2866 md_number_to_chars (frag->fr_literal + len + 8 - md.pointer_size + pad, 0,
2867 md.pointer_size - pad);
2868 /* Fill the unwind personality with zeros. */
2869 if (frag->fr_offset)
2870 md_number_to_chars (frag->fr_literal + size - md.pointer_size, 0,
2871 md.pointer_size);
2872
2873 frag->fr_fix += size;
2874 frag->fr_type = rs_fill;
2875 frag->fr_var = 0;
2876 frag->fr_offset = 0;
2877 }
2878
2879 static int
2880 parse_predicate_and_operand (expressionS *e, unsigned *qp, const char *po)
2881 {
2882 int sep = parse_operand (e, ',');
2883
2884 *qp = e->X_add_number - REG_P;
2885 if (e->X_op != O_register || *qp > 63)
2886 {
2887 as_bad (_("First operand to .%s must be a predicate"), po);
2888 *qp = 0;
2889 }
2890 else if (*qp == 0)
2891 as_warn (_("Pointless use of p0 as first operand to .%s"), po);
2892 if (sep == ',')
2893 sep = parse_operand (e, ',');
2894 else
2895 e->X_op = O_absent;
2896 return sep;
2897 }
2898
2899 static void
2900 convert_expr_to_ab_reg (const expressionS *e,
2901 unsigned int *ab,
2902 unsigned int *regp,
2903 const char *po,
2904 int n)
2905 {
2906 unsigned int reg = e->X_add_number;
2907
2908 *ab = *regp = 0; /* Anything valid is good here. */
2909
2910 if (e->X_op != O_register)
2911 reg = REG_GR; /* Anything invalid is good here. */
2912
2913 if (reg >= (REG_GR + 4) && reg <= (REG_GR + 7))
2914 {
2915 *ab = 0;
2916 *regp = reg - REG_GR;
2917 }
2918 else if ((reg >= (REG_FR + 2) && reg <= (REG_FR + 5))
2919 || (reg >= (REG_FR + 16) && reg <= (REG_FR + 31)))
2920 {
2921 *ab = 1;
2922 *regp = reg - REG_FR;
2923 }
2924 else if (reg >= (REG_BR + 1) && reg <= (REG_BR + 5))
2925 {
2926 *ab = 2;
2927 *regp = reg - REG_BR;
2928 }
2929 else
2930 {
2931 *ab = 3;
2932 switch (reg)
2933 {
2934 case REG_PR: *regp = 0; break;
2935 case REG_PSP: *regp = 1; break;
2936 case REG_PRIUNAT: *regp = 2; break;
2937 case REG_BR + 0: *regp = 3; break;
2938 case REG_AR + AR_BSP: *regp = 4; break;
2939 case REG_AR + AR_BSPSTORE: *regp = 5; break;
2940 case REG_AR + AR_RNAT: *regp = 6; break;
2941 case REG_AR + AR_UNAT: *regp = 7; break;
2942 case REG_AR + AR_FPSR: *regp = 8; break;
2943 case REG_AR + AR_PFS: *regp = 9; break;
2944 case REG_AR + AR_LC: *regp = 10; break;
2945
2946 default:
2947 as_bad (_("Operand %d to .%s must be a preserved register"), n, po);
2948 break;
2949 }
2950 }
2951 }
2952
2953 static void
2954 convert_expr_to_xy_reg (const expressionS *e,
2955 unsigned int *xy,
2956 unsigned int *regp,
2957 const char *po,
2958 int n)
2959 {
2960 unsigned int reg = e->X_add_number;
2961
2962 *xy = *regp = 0; /* Anything valid is good here. */
2963
2964 if (e->X_op != O_register)
2965 reg = REG_GR; /* Anything invalid is good here. */
2966
2967 if (reg >= (REG_GR + 1) && reg <= (REG_GR + 127))
2968 {
2969 *xy = 0;
2970 *regp = reg - REG_GR;
2971 }
2972 else if (reg >= (REG_FR + 2) && reg <= (REG_FR + 127))
2973 {
2974 *xy = 1;
2975 *regp = reg - REG_FR;
2976 }
2977 else if (reg >= REG_BR && reg <= (REG_BR + 7))
2978 {
2979 *xy = 2;
2980 *regp = reg - REG_BR;
2981 }
2982 else
2983 as_bad (_("Operand %d to .%s must be a writable register"), n, po);
2984 }
2985
2986 static void
2987 dot_align (int arg)
2988 {
2989 /* The current frag is an alignment frag. */
2990 align_frag = frag_now;
2991 s_align_bytes (arg);
2992 }
2993
2994 static void
2995 dot_radix (int dummy ATTRIBUTE_UNUSED)
2996 {
2997 char *radix;
2998 int ch;
2999
3000 SKIP_WHITESPACE ();
3001
3002 if (is_it_end_of_statement ())
3003 return;
3004 radix = input_line_pointer;
3005 ch = get_symbol_end ();
3006 ia64_canonicalize_symbol_name (radix);
3007 if (strcasecmp (radix, "C"))
3008 as_bad (_("Radix `%s' unsupported or invalid"), radix);
3009 *input_line_pointer = ch;
3010 demand_empty_rest_of_line ();
3011 }
3012
3013 /* Helper function for .loc directives. If the assembler is not generating
3014 line number info, then we need to remember which instructions have a .loc
3015 directive, and only call dwarf2_gen_line_info for those instructions. */
3016
3017 static void
3018 dot_loc (int x)
3019 {
3020 CURR_SLOT.loc_directive_seen = 1;
3021 dwarf2_directive_loc (x);
3022 }
3023
3024 /* .sbss, .bss etc. are macros that expand into ".section SECNAME". */
3025 static void
3026 dot_special_section (int which)
3027 {
3028 set_section ((char *) special_section_name[which]);
3029 }
3030
3031 /* Return -1 for warning and 0 for error. */
3032
3033 static int
3034 unwind_diagnostic (const char * region, const char *directive)
3035 {
3036 if (md.unwind_check == unwind_check_warning)
3037 {
3038 as_warn (_(".%s outside of %s"), directive, region);
3039 return -1;
3040 }
3041 else
3042 {
3043 as_bad (_(".%s outside of %s"), directive, region);
3044 ignore_rest_of_line ();
3045 return 0;
3046 }
3047 }
3048
3049 /* Return 1 if a directive is in a procedure, -1 if a directive isn't in
3050 a procedure but the unwind directive check is set to warning, 0 if
3051 a directive isn't in a procedure and the unwind directive check is set
3052 to error. */
3053
3054 static int
3055 in_procedure (const char *directive)
3056 {
3057 if (unwind.proc_pending.sym
3058 && (!unwind.saved_text_seg || strcmp (directive, "endp") == 0))
3059 return 1;
3060 return unwind_diagnostic ("procedure", directive);
3061 }
3062
3063 /* Return 1 if a directive is in a prologue, -1 if a directive isn't in
3064 a prologue but the unwind directive check is set to warning, 0 if
3065 a directive isn't in a prologue and the unwind directive check is set
3066 to error. */
3067
3068 static int
3069 in_prologue (const char *directive)
3070 {
3071 int in = in_procedure (directive);
3072
3073 if (in > 0 && !unwind.prologue)
3074 in = unwind_diagnostic ("prologue", directive);
3075 check_pending_save ();
3076 return in;
3077 }
3078
3079 /* Return 1 if a directive is in a body, -1 if a directive isn't in
3080 a body but the unwind directive check is set to warning, 0 if
3081 a directive isn't in a body and the unwind directive check is set
3082 to error. */
3083
3084 static int
3085 in_body (const char *directive)
3086 {
3087 int in = in_procedure (directive);
3088
3089 if (in > 0 && !unwind.body)
3090 in = unwind_diagnostic ("body region", directive);
3091 return in;
3092 }
3093
3094 static void
3095 add_unwind_entry (unw_rec_list *ptr, int sep)
3096 {
3097 if (ptr)
3098 {
3099 if (unwind.tail)
3100 unwind.tail->next = ptr;
3101 else
3102 unwind.list = ptr;
3103 unwind.tail = ptr;
3104
3105 /* The current entry can in fact be a chain of unwind entries. */
3106 if (unwind.current_entry == NULL)
3107 unwind.current_entry = ptr;
3108 }
3109
3110 /* The current entry can in fact be a chain of unwind entries. */
3111 if (unwind.current_entry == NULL)
3112 unwind.current_entry = ptr;
3113
3114 if (sep == ',')
3115 {
3116 /* Parse a tag permitted for the current directive. */
3117 int ch;
3118
3119 SKIP_WHITESPACE ();
3120 ch = get_symbol_end ();
3121 /* FIXME: For now, just issue a warning that this isn't implemented. */
3122 {
3123 static int warned;
3124
3125 if (!warned)
3126 {
3127 warned = 1;
3128 as_warn (_("Tags on unwind pseudo-ops aren't supported, yet"));
3129 }
3130 }
3131 *input_line_pointer = ch;
3132 }
3133 if (sep != NOT_A_CHAR)
3134 demand_empty_rest_of_line ();
3135 }
3136
3137 static void
3138 dot_fframe (int dummy ATTRIBUTE_UNUSED)
3139 {
3140 expressionS e;
3141 int sep;
3142
3143 if (!in_prologue ("fframe"))
3144 return;
3145
3146 sep = parse_operand (&e, ',');
3147
3148 if (e.X_op != O_constant)
3149 {
3150 as_bad (_("First operand to .fframe must be a constant"));
3151 e.X_add_number = 0;
3152 }
3153 add_unwind_entry (output_mem_stack_f (e.X_add_number), sep);
3154 }
3155
3156 static void
3157 dot_vframe (int dummy ATTRIBUTE_UNUSED)
3158 {
3159 expressionS e;
3160 unsigned reg;
3161 int sep;
3162
3163 if (!in_prologue ("vframe"))
3164 return;
3165
3166 sep = parse_operand (&e, ',');
3167 reg = e.X_add_number - REG_GR;
3168 if (e.X_op != O_register || reg > 127)
3169 {
3170 as_bad (_("First operand to .vframe must be a general register"));
3171 reg = 0;
3172 }
3173 add_unwind_entry (output_mem_stack_v (), sep);
3174 if (! (unwind.prologue_mask & 2))
3175 add_unwind_entry (output_psp_gr (reg), NOT_A_CHAR);
3176 else if (reg != unwind.prologue_gr
3177 + (unsigned) popcount (unwind.prologue_mask & (-2 << 1)))
3178 as_warn (_("Operand of .vframe contradicts .prologue"));
3179 }
3180
3181 static void
3182 dot_vframesp (int psp)
3183 {
3184 expressionS e;
3185 int sep;
3186
3187 if (psp)
3188 as_warn (_(".vframepsp is meaningless, assuming .vframesp was meant"));
3189
3190 if (!in_prologue ("vframesp"))
3191 return;
3192
3193 sep = parse_operand (&e, ',');
3194 if (e.X_op != O_constant)
3195 {
3196 as_bad (_("Operand to .vframesp must be a constant (sp-relative offset)"));
3197 e.X_add_number = 0;
3198 }
3199 add_unwind_entry (output_mem_stack_v (), sep);
3200 add_unwind_entry (output_psp_sprel (e.X_add_number), NOT_A_CHAR);
3201 }
3202
3203 static void
3204 dot_save (int dummy ATTRIBUTE_UNUSED)
3205 {
3206 expressionS e1, e2;
3207 unsigned reg1, reg2;
3208 int sep;
3209
3210 if (!in_prologue ("save"))
3211 return;
3212
3213 sep = parse_operand (&e1, ',');
3214 if (sep == ',')
3215 sep = parse_operand (&e2, ',');
3216 else
3217 e2.X_op = O_absent;
3218
3219 reg1 = e1.X_add_number;
3220 /* Make sure its a valid ar.xxx reg, OR its br0, aka 'rp'. */
3221 if (e1.X_op != O_register)
3222 {
3223 as_bad (_("First operand to .save not a register"));
3224 reg1 = REG_PR; /* Anything valid is good here. */
3225 }
3226 reg2 = e2.X_add_number - REG_GR;
3227 if (e2.X_op != O_register || reg2 > 127)
3228 {
3229 as_bad (_("Second operand to .save not a valid register"));
3230 reg2 = 0;
3231 }
3232 switch (reg1)
3233 {
3234 case REG_AR + AR_BSP:
3235 add_unwind_entry (output_bsp_when (), sep);
3236 add_unwind_entry (output_bsp_gr (reg2), NOT_A_CHAR);
3237 break;
3238 case REG_AR + AR_BSPSTORE:
3239 add_unwind_entry (output_bspstore_when (), sep);
3240 add_unwind_entry (output_bspstore_gr (reg2), NOT_A_CHAR);
3241 break;
3242 case REG_AR + AR_RNAT:
3243 add_unwind_entry (output_rnat_when (), sep);
3244 add_unwind_entry (output_rnat_gr (reg2), NOT_A_CHAR);
3245 break;
3246 case REG_AR + AR_UNAT:
3247 add_unwind_entry (output_unat_when (), sep);
3248 add_unwind_entry (output_unat_gr (reg2), NOT_A_CHAR);
3249 break;
3250 case REG_AR + AR_FPSR:
3251 add_unwind_entry (output_fpsr_when (), sep);
3252 add_unwind_entry (output_fpsr_gr (reg2), NOT_A_CHAR);
3253 break;
3254 case REG_AR + AR_PFS:
3255 add_unwind_entry (output_pfs_when (), sep);
3256 if (! (unwind.prologue_mask & 4))
3257 add_unwind_entry (output_pfs_gr (reg2), NOT_A_CHAR);
3258 else if (reg2 != unwind.prologue_gr
3259 + (unsigned) popcount (unwind.prologue_mask & (-4 << 1)))
3260 as_warn (_("Second operand of .save contradicts .prologue"));
3261 break;
3262 case REG_AR + AR_LC:
3263 add_unwind_entry (output_lc_when (), sep);
3264 add_unwind_entry (output_lc_gr (reg2), NOT_A_CHAR);
3265 break;
3266 case REG_BR:
3267 add_unwind_entry (output_rp_when (), sep);
3268 if (! (unwind.prologue_mask & 8))
3269 add_unwind_entry (output_rp_gr (reg2), NOT_A_CHAR);
3270 else if (reg2 != unwind.prologue_gr)
3271 as_warn (_("Second operand of .save contradicts .prologue"));
3272 break;
3273 case REG_PR:
3274 add_unwind_entry (output_preds_when (), sep);
3275 if (! (unwind.prologue_mask & 1))
3276 add_unwind_entry (output_preds_gr (reg2), NOT_A_CHAR);
3277 else if (reg2 != unwind.prologue_gr
3278 + (unsigned) popcount (unwind.prologue_mask & (-1 << 1)))
3279 as_warn (_("Second operand of .save contradicts .prologue"));
3280 break;
3281 case REG_PRIUNAT:
3282 add_unwind_entry (output_priunat_when_gr (), sep);
3283 add_unwind_entry (output_priunat_gr (reg2), NOT_A_CHAR);
3284 break;
3285 default:
3286 as_bad (_("First operand to .save not a valid register"));
3287 add_unwind_entry (NULL, sep);
3288 break;
3289 }
3290 }
3291
3292 static void
3293 dot_restore (int dummy ATTRIBUTE_UNUSED)
3294 {
3295 expressionS e1;
3296 unsigned long ecount; /* # of _additional_ regions to pop */
3297 int sep;
3298
3299 if (!in_body ("restore"))
3300 return;
3301
3302 sep = parse_operand (&e1, ',');
3303 if (e1.X_op != O_register || e1.X_add_number != REG_GR + 12)
3304 as_bad (_("First operand to .restore must be stack pointer (sp)"));
3305
3306 if (sep == ',')
3307 {
3308 expressionS e2;
3309
3310 sep = parse_operand (&e2, ',');
3311 if (e2.X_op != O_constant || e2.X_add_number < 0)
3312 {
3313 as_bad (_("Second operand to .restore must be a constant >= 0"));
3314 e2.X_add_number = 0;
3315 }
3316 ecount = e2.X_add_number;
3317 }
3318 else
3319 ecount = unwind.prologue_count - 1;
3320
3321 if (ecount >= unwind.prologue_count)
3322 {
3323 as_bad (_("Epilogue count of %lu exceeds number of nested prologues (%u)"),
3324 ecount + 1, unwind.prologue_count);
3325 ecount = 0;
3326 }
3327
3328 add_unwind_entry (output_epilogue (ecount), sep);
3329
3330 if (ecount < unwind.prologue_count)
3331 unwind.prologue_count -= ecount + 1;
3332 else
3333 unwind.prologue_count = 0;
3334 }
3335
3336 static void
3337 dot_restorereg (int pred)
3338 {
3339 unsigned int qp, ab, reg;
3340 expressionS e;
3341 int sep;
3342 const char * const po = pred ? "restorereg.p" : "restorereg";
3343
3344 if (!in_procedure (po))
3345 return;
3346
3347 if (pred)
3348 sep = parse_predicate_and_operand (&e, &qp, po);
3349 else
3350 {
3351 sep = parse_operand (&e, ',');
3352 qp = 0;
3353 }
3354 convert_expr_to_ab_reg (&e, &ab, &reg, po, 1 + pred);
3355
3356 add_unwind_entry (output_spill_reg (ab, reg, 0, 0, qp), sep);
3357 }
3358
3359 static char *special_linkonce_name[] =
3360 {
3361 ".gnu.linkonce.ia64unw.", ".gnu.linkonce.ia64unwi."
3362 };
3363
3364 static void
3365 start_unwind_section (const segT text_seg, int sec_index)
3366 {
3367 /*
3368 Use a slightly ugly scheme to derive the unwind section names from
3369 the text section name:
3370
3371 text sect. unwind table sect.
3372 name: name: comments:
3373 ---------- ----------------- --------------------------------
3374 .text .IA_64.unwind
3375 .text.foo .IA_64.unwind.text.foo
3376 .foo .IA_64.unwind.foo
3377 .gnu.linkonce.t.foo
3378 .gnu.linkonce.ia64unw.foo
3379 _info .IA_64.unwind_info gas issues error message (ditto)
3380 _infoFOO .IA_64.unwind_infoFOO gas issues error message (ditto)
3381
3382 This mapping is done so that:
3383
3384 (a) An object file with unwind info only in .text will use
3385 unwind section names .IA_64.unwind and .IA_64.unwind_info.
3386 This follows the letter of the ABI and also ensures backwards
3387 compatibility with older toolchains.
3388
3389 (b) An object file with unwind info in multiple text sections
3390 will use separate unwind sections for each text section.
3391 This allows us to properly set the "sh_info" and "sh_link"
3392 fields in SHT_IA_64_UNWIND as required by the ABI and also
3393 lets GNU ld support programs with multiple segments
3394 containing unwind info (as might be the case for certain
3395 embedded applications).
3396
3397 (c) An error is issued if there would be a name clash.
3398 */
3399
3400 const char *text_name, *sec_text_name;
3401 char *sec_name;
3402 const char *prefix = special_section_name [sec_index];
3403 const char *suffix;
3404 size_t prefix_len, suffix_len, sec_name_len;
3405
3406 sec_text_name = segment_name (text_seg);
3407 text_name = sec_text_name;
3408 if (strncmp (text_name, "_info", 5) == 0)
3409 {
3410 as_bad (_("Illegal section name `%s' (causes unwind section name clash)"),
3411 text_name);
3412 ignore_rest_of_line ();
3413 return;
3414 }
3415 if (strcmp (text_name, ".text") == 0)
3416 text_name = "";
3417
3418 /* Build the unwind section name by appending the (possibly stripped)
3419 text section name to the unwind prefix. */
3420 suffix = text_name;
3421 if (strncmp (text_name, ".gnu.linkonce.t.",
3422 sizeof (".gnu.linkonce.t.") - 1) == 0)
3423 {
3424 prefix = special_linkonce_name [sec_index - SPECIAL_SECTION_UNWIND];
3425 suffix += sizeof (".gnu.linkonce.t.") - 1;
3426 }
3427
3428 prefix_len = strlen (prefix);
3429 suffix_len = strlen (suffix);
3430 sec_name_len = prefix_len + suffix_len;
3431 sec_name = alloca (sec_name_len + 1);
3432 memcpy (sec_name, prefix, prefix_len);
3433 memcpy (sec_name + prefix_len, suffix, suffix_len);
3434 sec_name [sec_name_len] = '\0';
3435
3436 /* Handle COMDAT group. */
3437 if ((text_seg->flags & SEC_LINK_ONCE) != 0
3438 && (elf_section_flags (text_seg) & SHF_GROUP) != 0)
3439 {
3440 char *section;
3441 size_t len, group_name_len;
3442 const char *group_name = elf_group_name (text_seg);
3443
3444 if (group_name == NULL)
3445 {
3446 as_bad (_("Group section `%s' has no group signature"),
3447 sec_text_name);
3448 ignore_rest_of_line ();
3449 return;
3450 }
3451 /* We have to construct a fake section directive. */
3452 group_name_len = strlen (group_name);
3453 len = (sec_name_len
3454 + 16 /* ,"aG",@progbits, */
3455 + group_name_len /* ,group_name */
3456 + 7); /* ,comdat */
3457
3458 section = alloca (len + 1);
3459 memcpy (section, sec_name, sec_name_len);
3460 memcpy (section + sec_name_len, ",\"aG\",@progbits,", 16);
3461 memcpy (section + sec_name_len + 16, group_name, group_name_len);
3462 memcpy (section + len - 7, ",comdat", 7);
3463 section [len] = '\0';
3464 set_section (section);
3465 }
3466 else
3467 {
3468 set_section (sec_name);
3469 bfd_set_section_flags (stdoutput, now_seg,
3470 SEC_LOAD | SEC_ALLOC | SEC_READONLY);
3471 }
3472
3473 elf_linked_to_section (now_seg) = text_seg;
3474 }
3475
3476 static void
3477 generate_unwind_image (const segT text_seg)
3478 {
3479 int size, pad;
3480 unw_rec_list *list;
3481
3482 /* Mark the end of the unwind info, so that we can compute the size of the
3483 last unwind region. */
3484 add_unwind_entry (output_endp (), NOT_A_CHAR);
3485
3486 /* Force out pending instructions, to make sure all unwind records have
3487 a valid slot_number field. */
3488 ia64_flush_insns ();
3489
3490 /* Generate the unwind record. */
3491 list = optimize_unw_records (unwind.list);
3492 fixup_unw_records (list, 1);
3493 size = calc_record_size (list);
3494
3495 if (size > 0 || unwind.force_unwind_entry)
3496 {
3497 unwind.force_unwind_entry = 0;
3498 /* pad to pointer-size boundary. */
3499 pad = size % md.pointer_size;
3500 if (pad != 0)
3501 size += md.pointer_size - pad;
3502 /* Add 8 for the header. */
3503 size += 8;
3504 /* Add a pointer for the personality offset. */
3505 if (unwind.personality_routine)
3506 size += md.pointer_size;
3507 }
3508
3509 /* If there are unwind records, switch sections, and output the info. */
3510 if (size != 0)
3511 {
3512 expressionS exp;
3513 bfd_reloc_code_real_type reloc;
3514
3515 start_unwind_section (text_seg, SPECIAL_SECTION_UNWIND_INFO);
3516
3517 /* Make sure the section has 4 byte alignment for ILP32 and
3518 8 byte alignment for LP64. */
3519 frag_align (md.pointer_size_shift, 0, 0);
3520 record_alignment (now_seg, md.pointer_size_shift);
3521
3522 /* Set expression which points to start of unwind descriptor area. */
3523 unwind.info = expr_build_dot ();
3524
3525 frag_var (rs_machine_dependent, size, size, 0, 0,
3526 (offsetT) (long) unwind.personality_routine,
3527 (char *) list);
3528
3529 /* Add the personality address to the image. */
3530 if (unwind.personality_routine != 0)
3531 {
3532 exp.X_op = O_symbol;
3533 exp.X_add_symbol = unwind.personality_routine;
3534 exp.X_add_number = 0;
3535
3536 if (md.flags & EF_IA_64_BE)
3537 {
3538 if (md.flags & EF_IA_64_ABI64)
3539 reloc = BFD_RELOC_IA64_LTOFF_FPTR64MSB;
3540 else
3541 reloc = BFD_RELOC_IA64_LTOFF_FPTR32MSB;
3542 }
3543 else
3544 {
3545 if (md.flags & EF_IA_64_ABI64)
3546 reloc = BFD_RELOC_IA64_LTOFF_FPTR64LSB;
3547 else
3548 reloc = BFD_RELOC_IA64_LTOFF_FPTR32LSB;
3549 }
3550
3551 fix_new_exp (frag_now, frag_now_fix () - md.pointer_size,
3552 md.pointer_size, &exp, 0, reloc);
3553 unwind.personality_routine = 0;
3554 }
3555 }
3556
3557 free_saved_prologue_counts ();
3558 unwind.list = unwind.tail = unwind.current_entry = NULL;
3559 }
3560
3561 static void
3562 dot_handlerdata (int dummy ATTRIBUTE_UNUSED)
3563 {
3564 if (!in_procedure ("handlerdata"))
3565 return;
3566 unwind.force_unwind_entry = 1;
3567
3568 /* Remember which segment we're in so we can switch back after .endp */
3569 unwind.saved_text_seg = now_seg;
3570 unwind.saved_text_subseg = now_subseg;
3571
3572 /* Generate unwind info into unwind-info section and then leave that
3573 section as the currently active one so dataXX directives go into
3574 the language specific data area of the unwind info block. */
3575 generate_unwind_image (now_seg);
3576 demand_empty_rest_of_line ();
3577 }
3578
3579 static void
3580 dot_unwentry (int dummy ATTRIBUTE_UNUSED)
3581 {
3582 if (!in_procedure ("unwentry"))
3583 return;
3584 unwind.force_unwind_entry = 1;
3585 demand_empty_rest_of_line ();
3586 }
3587
3588 static void
3589 dot_altrp (int dummy ATTRIBUTE_UNUSED)
3590 {
3591 expressionS e;
3592 unsigned reg;
3593
3594 if (!in_prologue ("altrp"))
3595 return;
3596
3597 parse_operand (&e, 0);
3598 reg = e.X_add_number - REG_BR;
3599 if (e.X_op != O_register || reg > 7)
3600 {
3601 as_bad (_("First operand to .altrp not a valid branch register"));
3602 reg = 0;
3603 }
3604 add_unwind_entry (output_rp_br (reg), 0);
3605 }
3606
3607 static void
3608 dot_savemem (int psprel)
3609 {
3610 expressionS e1, e2;
3611 int sep;
3612 int reg1, val;
3613 const char * const po = psprel ? "savepsp" : "savesp";
3614
3615 if (!in_prologue (po))
3616 return;
3617
3618 sep = parse_operand (&e1, ',');
3619 if (sep == ',')
3620 sep = parse_operand (&e2, ',');
3621 else
3622 e2.X_op = O_absent;
3623
3624 reg1 = e1.X_add_number;
3625 val = e2.X_add_number;
3626
3627 /* Make sure its a valid ar.xxx reg, OR its br0, aka 'rp'. */
3628 if (e1.X_op != O_register)
3629 {
3630 as_bad (_("First operand to .%s not a register"), po);
3631 reg1 = REG_PR; /* Anything valid is good here. */
3632 }
3633 if (e2.X_op != O_constant)
3634 {
3635 as_bad (_("Second operand to .%s not a constant"), po);
3636 val = 0;
3637 }
3638
3639 switch (reg1)
3640 {
3641 case REG_AR + AR_BSP:
3642 add_unwind_entry (output_bsp_when (), sep);
3643 add_unwind_entry ((psprel
3644 ? output_bsp_psprel
3645 : output_bsp_sprel) (val), NOT_A_CHAR);
3646 break;
3647 case REG_AR + AR_BSPSTORE:
3648 add_unwind_entry (output_bspstore_when (), sep);
3649 add_unwind_entry ((psprel
3650 ? output_bspstore_psprel
3651 : output_bspstore_sprel) (val), NOT_A_CHAR);
3652 break;
3653 case REG_AR + AR_RNAT:
3654 add_unwind_entry (output_rnat_when (), sep);
3655 add_unwind_entry ((psprel
3656 ? output_rnat_psprel
3657 : output_rnat_sprel) (val), NOT_A_CHAR);
3658 break;
3659 case REG_AR + AR_UNAT:
3660 add_unwind_entry (output_unat_when (), sep);
3661 add_unwind_entry ((psprel
3662 ? output_unat_psprel
3663 : output_unat_sprel) (val), NOT_A_CHAR);
3664 break;
3665 case REG_AR + AR_FPSR:
3666 add_unwind_entry (output_fpsr_when (), sep);
3667 add_unwind_entry ((psprel
3668 ? output_fpsr_psprel
3669 : output_fpsr_sprel) (val), NOT_A_CHAR);
3670 break;
3671 case REG_AR + AR_PFS:
3672 add_unwind_entry (output_pfs_when (), sep);
3673 add_unwind_entry ((psprel
3674 ? output_pfs_psprel
3675 : output_pfs_sprel) (val), NOT_A_CHAR);
3676 break;
3677 case REG_AR + AR_LC:
3678 add_unwind_entry (output_lc_when (), sep);
3679 add_unwind_entry ((psprel
3680 ? output_lc_psprel
3681 : output_lc_sprel) (val), NOT_A_CHAR);
3682 break;
3683 case REG_BR:
3684 add_unwind_entry (output_rp_when (), sep);
3685 add_unwind_entry ((psprel
3686 ? output_rp_psprel
3687 : output_rp_sprel) (val), NOT_A_CHAR);
3688 break;
3689 case REG_PR:
3690 add_unwind_entry (output_preds_when (), sep);
3691 add_unwind_entry ((psprel
3692 ? output_preds_psprel
3693 : output_preds_sprel) (val), NOT_A_CHAR);
3694 break;
3695 case REG_PRIUNAT:
3696 add_unwind_entry (output_priunat_when_mem (), sep);
3697 add_unwind_entry ((psprel
3698 ? output_priunat_psprel
3699 : output_priunat_sprel) (val), NOT_A_CHAR);
3700 break;
3701 default:
3702 as_bad (_("First operand to .%s not a valid register"), po);
3703 add_unwind_entry (NULL, sep);
3704 break;
3705 }
3706 }
3707
3708 static void
3709 dot_saveg (int dummy ATTRIBUTE_UNUSED)
3710 {
3711 expressionS e;
3712 unsigned grmask;
3713 int sep;
3714
3715 if (!in_prologue ("save.g"))
3716 return;
3717
3718 sep = parse_operand (&e, ',');
3719
3720 grmask = e.X_add_number;
3721 if (e.X_op != O_constant
3722 || e.X_add_number <= 0
3723 || e.X_add_number > 0xf)
3724 {
3725 as_bad (_("First operand to .save.g must be a positive 4-bit constant"));
3726 grmask = 0;
3727 }
3728
3729 if (sep == ',')
3730 {
3731 unsigned reg;
3732 int n = popcount (grmask);
3733
3734 parse_operand (&e, 0);
3735 reg = e.X_add_number - REG_GR;
3736 if (e.X_op != O_register || reg > 127)
3737 {
3738 as_bad (_("Second operand to .save.g must be a general register"));
3739 reg = 0;
3740 }
3741 else if (reg > 128U - n)
3742 {
3743 as_bad (_("Second operand to .save.g must be the first of %d general registers"), n);
3744 reg = 0;
3745 }
3746 add_unwind_entry (output_gr_gr (grmask, reg), 0);
3747 }
3748 else
3749 add_unwind_entry (output_gr_mem (grmask), 0);
3750 }
3751
3752 static void
3753 dot_savef (int dummy ATTRIBUTE_UNUSED)
3754 {
3755 expressionS e;
3756
3757 if (!in_prologue ("save.f"))
3758 return;
3759
3760 parse_operand (&e, 0);
3761
3762 if (e.X_op != O_constant
3763 || e.X_add_number <= 0
3764 || e.X_add_number > 0xfffff)
3765 {
3766 as_bad (_("Operand to .save.f must be a positive 20-bit constant"));
3767 e.X_add_number = 0;
3768 }
3769 add_unwind_entry (output_fr_mem (e.X_add_number), 0);
3770 }
3771
3772 static void
3773 dot_saveb (int dummy ATTRIBUTE_UNUSED)
3774 {
3775 expressionS e;
3776 unsigned brmask;
3777 int sep;
3778
3779 if (!in_prologue ("save.b"))
3780 return;
3781
3782 sep = parse_operand (&e, ',');
3783
3784 brmask = e.X_add_number;
3785 if (e.X_op != O_constant
3786 || e.X_add_number <= 0
3787 || e.X_add_number > 0x1f)
3788 {
3789 as_bad (_("First operand to .save.b must be a positive 5-bit constant"));
3790 brmask = 0;
3791 }
3792
3793 if (sep == ',')
3794 {
3795 unsigned reg;
3796 int n = popcount (brmask);
3797
3798 parse_operand (&e, 0);
3799 reg = e.X_add_number - REG_GR;
3800 if (e.X_op != O_register || reg > 127)
3801 {
3802 as_bad (_("Second operand to .save.b must be a general register"));
3803 reg = 0;
3804 }
3805 else if (reg > 128U - n)
3806 {
3807 as_bad (_("Second operand to .save.b must be the first of %d general registers"), n);
3808 reg = 0;
3809 }
3810 add_unwind_entry (output_br_gr (brmask, reg), 0);
3811 }
3812 else
3813 add_unwind_entry (output_br_mem (brmask), 0);
3814 }
3815
3816 static void
3817 dot_savegf (int dummy ATTRIBUTE_UNUSED)
3818 {
3819 expressionS e1, e2;
3820
3821 if (!in_prologue ("save.gf"))
3822 return;
3823
3824 if (parse_operand (&e1, ',') == ',')
3825 parse_operand (&e2, 0);
3826 else
3827 e2.X_op = O_absent;
3828
3829 if (e1.X_op != O_constant
3830 || e1.X_add_number < 0
3831 || e1.X_add_number > 0xf)
3832 {
3833 as_bad (_("First operand to .save.gf must be a non-negative 4-bit constant"));
3834 e1.X_op = O_absent;
3835 e1.X_add_number = 0;
3836 }
3837 if (e2.X_op != O_constant
3838 || e2.X_add_number < 0
3839 || e2.X_add_number > 0xfffff)
3840 {
3841 as_bad (_("Second operand to .save.gf must be a non-negative 20-bit constant"));
3842 e2.X_op = O_absent;
3843 e2.X_add_number = 0;
3844 }
3845 if (e1.X_op == O_constant
3846 && e2.X_op == O_constant
3847 && e1.X_add_number == 0
3848 && e2.X_add_number == 0)
3849 as_bad (_("Operands to .save.gf may not be both zero"));
3850
3851 add_unwind_entry (output_frgr_mem (e1.X_add_number, e2.X_add_number), 0);
3852 }
3853
3854 static void
3855 dot_spill (int dummy ATTRIBUTE_UNUSED)
3856 {
3857 expressionS e;
3858
3859 if (!in_prologue ("spill"))
3860 return;
3861
3862 parse_operand (&e, 0);
3863
3864 if (e.X_op != O_constant)
3865 {
3866 as_bad (_("Operand to .spill must be a constant"));
3867 e.X_add_number = 0;
3868 }
3869 add_unwind_entry (output_spill_base (e.X_add_number), 0);
3870 }
3871
3872 static void
3873 dot_spillreg (int pred)
3874 {
3875 int sep;
3876 unsigned int qp, ab, xy, reg, treg;
3877 expressionS e;
3878 const char * const po = pred ? "spillreg.p" : "spillreg";
3879
3880 if (!in_procedure (po))
3881 return;
3882
3883 if (pred)
3884 sep = parse_predicate_and_operand (&e, &qp, po);
3885 else
3886 {
3887 sep = parse_operand (&e, ',');
3888 qp = 0;
3889 }
3890 convert_expr_to_ab_reg (&e, &ab, &reg, po, 1 + pred);
3891
3892 if (sep == ',')
3893 sep = parse_operand (&e, ',');
3894 else
3895 e.X_op = O_absent;
3896 convert_expr_to_xy_reg (&e, &xy, &treg, po, 2 + pred);
3897
3898 add_unwind_entry (output_spill_reg (ab, reg, treg, xy, qp), sep);
3899 }
3900
3901 static void
3902 dot_spillmem (int psprel)
3903 {
3904 expressionS e;
3905 int pred = (psprel < 0), sep;
3906 unsigned int qp, ab, reg;
3907 const char * po;
3908
3909 if (pred)
3910 {
3911 psprel = ~psprel;
3912 po = psprel ? "spillpsp.p" : "spillsp.p";
3913 }
3914 else
3915 po = psprel ? "spillpsp" : "spillsp";
3916
3917 if (!in_procedure (po))
3918 return;
3919
3920 if (pred)
3921 sep = parse_predicate_and_operand (&e, &qp, po);
3922 else
3923 {
3924 sep = parse_operand (&e, ',');
3925 qp = 0;
3926 }
3927 convert_expr_to_ab_reg (&e, &ab, &reg, po, 1 + pred);
3928
3929 if (sep == ',')
3930 sep = parse_operand (&e, ',');
3931 else
3932 e.X_op = O_absent;
3933 if (e.X_op != O_constant)
3934 {
3935 as_bad (_("Operand %d to .%s must be a constant"), 2 + pred, po);
3936 e.X_add_number = 0;
3937 }
3938
3939 if (psprel)
3940 add_unwind_entry (output_spill_psprel (ab, reg, e.X_add_number, qp), sep);
3941 else
3942 add_unwind_entry (output_spill_sprel (ab, reg, e.X_add_number, qp), sep);
3943 }
3944
3945 static unsigned int
3946 get_saved_prologue_count (unsigned long lbl)
3947 {
3948 label_prologue_count *lpc = unwind.saved_prologue_counts;
3949
3950 while (lpc != NULL && lpc->label_number != lbl)
3951 lpc = lpc->next;
3952
3953 if (lpc != NULL)
3954 return lpc->prologue_count;
3955
3956 as_bad (_("Missing .label_state %ld"), lbl);
3957 return 1;
3958 }
3959
3960 static void
3961 save_prologue_count (unsigned long lbl, unsigned int count)
3962 {
3963 label_prologue_count *lpc = unwind.saved_prologue_counts;
3964
3965 while (lpc != NULL && lpc->label_number != lbl)
3966 lpc = lpc->next;
3967
3968 if (lpc != NULL)
3969 lpc->prologue_count = count;
3970 else
3971 {
3972 label_prologue_count *new_lpc = xmalloc (sizeof (* new_lpc));
3973
3974 new_lpc->next = unwind.saved_prologue_counts;
3975 new_lpc->label_number = lbl;
3976 new_lpc->prologue_count = count;
3977 unwind.saved_prologue_counts = new_lpc;
3978 }
3979 }
3980
3981 static void
3982 free_saved_prologue_counts ()
3983 {
3984 label_prologue_count *lpc = unwind.saved_prologue_counts;
3985 label_prologue_count *next;
3986
3987 while (lpc != NULL)
3988 {
3989 next = lpc->next;
3990 free (lpc);
3991 lpc = next;
3992 }
3993
3994 unwind.saved_prologue_counts = NULL;
3995 }
3996
3997 static void
3998 dot_label_state (int dummy ATTRIBUTE_UNUSED)
3999 {
4000 expressionS e;
4001
4002 if (!in_body ("label_state"))
4003 return;
4004
4005 parse_operand (&e, 0);
4006 if (e.X_op == O_constant)
4007 save_prologue_count (e.X_add_number, unwind.prologue_count);
4008 else
4009 {
4010 as_bad (_("Operand to .label_state must be a constant"));
4011 e.X_add_number = 0;
4012 }
4013 add_unwind_entry (output_label_state (e.X_add_number), 0);
4014 }
4015
4016 static void
4017 dot_copy_state (int dummy ATTRIBUTE_UNUSED)
4018 {
4019 expressionS e;
4020
4021 if (!in_body ("copy_state"))
4022 return;
4023
4024 parse_operand (&e, 0);
4025 if (e.X_op == O_constant)
4026 unwind.prologue_count = get_saved_prologue_count (e.X_add_number);
4027 else
4028 {
4029 as_bad (_("Operand to .copy_state must be a constant"));
4030 e.X_add_number = 0;
4031 }
4032 add_unwind_entry (output_copy_state (e.X_add_number), 0);
4033 }
4034
4035 static void
4036 dot_unwabi (int dummy ATTRIBUTE_UNUSED)
4037 {
4038 expressionS e1, e2;
4039 unsigned char sep;
4040
4041 if (!in_prologue ("unwabi"))
4042 return;
4043
4044 sep = parse_operand (&e1, ',');
4045 if (sep == ',')
4046 parse_operand (&e2, 0);
4047 else
4048 e2.X_op = O_absent;
4049
4050 if (e1.X_op != O_constant)
4051 {
4052 as_bad (_("First operand to .unwabi must be a constant"));
4053 e1.X_add_number = 0;
4054 }
4055
4056 if (e2.X_op != O_constant)
4057 {
4058 as_bad (_("Second operand to .unwabi must be a constant"));
4059 e2.X_add_number = 0;
4060 }
4061
4062 add_unwind_entry (output_unwabi (e1.X_add_number, e2.X_add_number), 0);
4063 }
4064
4065 static void
4066 dot_personality (int dummy ATTRIBUTE_UNUSED)
4067 {
4068 char *name, *p, c;
4069 if (!in_procedure ("personality"))
4070 return;
4071 SKIP_WHITESPACE ();
4072 name = input_line_pointer;
4073 c = get_symbol_end ();
4074 p = input_line_pointer;
4075 unwind.personality_routine = symbol_find_or_make (name);
4076 unwind.force_unwind_entry = 1;
4077 *p = c;
4078 SKIP_WHITESPACE ();
4079 demand_empty_rest_of_line ();
4080 }
4081
4082 static void
4083 dot_proc (int dummy ATTRIBUTE_UNUSED)
4084 {
4085 char *name, *p, c;
4086 symbolS *sym;
4087 proc_pending *pending, *last_pending;
4088
4089 if (unwind.proc_pending.sym)
4090 {
4091 (md.unwind_check == unwind_check_warning
4092 ? as_warn
4093 : as_bad) (_("Missing .endp after previous .proc"));
4094 while (unwind.proc_pending.next)
4095 {
4096 pending = unwind.proc_pending.next;
4097 unwind.proc_pending.next = pending->next;
4098 free (pending);
4099 }
4100 }
4101 last_pending = NULL;
4102
4103 /* Parse names of main and alternate entry points and mark them as
4104 function symbols: */
4105 while (1)
4106 {
4107 SKIP_WHITESPACE ();
4108 name = input_line_pointer;
4109 c = get_symbol_end ();
4110 p = input_line_pointer;
4111 if (!*name)
4112 as_bad (_("Empty argument of .proc"));
4113 else
4114 {
4115 sym = symbol_find_or_make (name);
4116 if (S_IS_DEFINED (sym))
4117 as_bad (_("`%s' was already defined"), name);
4118 else if (!last_pending)
4119 {
4120 unwind.proc_pending.sym = sym;
4121 last_pending = &unwind.proc_pending;
4122 }
4123 else
4124 {
4125 pending = xmalloc (sizeof (*pending));
4126 pending->sym = sym;
4127 last_pending = last_pending->next = pending;
4128 }
4129 symbol_get_bfdsym (sym)->flags |= BSF_FUNCTION;
4130 }
4131 *p = c;
4132 SKIP_WHITESPACE ();
4133 if (*input_line_pointer != ',')
4134 break;
4135 ++input_line_pointer;
4136 }
4137 if (!last_pending)
4138 {
4139 unwind.proc_pending.sym = expr_build_dot ();
4140 last_pending = &unwind.proc_pending;
4141 }
4142 last_pending->next = NULL;
4143 demand_empty_rest_of_line ();
4144 ia64_do_align (16);
4145
4146 unwind.prologue = 0;
4147 unwind.prologue_count = 0;
4148 unwind.body = 0;
4149 unwind.insn = 0;
4150 unwind.list = unwind.tail = unwind.current_entry = NULL;
4151 unwind.personality_routine = 0;
4152 }
4153
4154 static void
4155 dot_body (int dummy ATTRIBUTE_UNUSED)
4156 {
4157 if (!in_procedure ("body"))
4158 return;
4159 if (!unwind.prologue && !unwind.body && unwind.insn)
4160 as_warn (_("Initial .body should precede any instructions"));
4161 check_pending_save ();
4162
4163 unwind.prologue = 0;
4164 unwind.prologue_mask = 0;
4165 unwind.body = 1;
4166
4167 add_unwind_entry (output_body (), 0);
4168 }
4169
4170 static void
4171 dot_prologue (int dummy ATTRIBUTE_UNUSED)
4172 {
4173 unsigned mask = 0, grsave = 0;
4174
4175 if (!in_procedure ("prologue"))
4176 return;
4177 if (unwind.prologue)
4178 {
4179 as_bad (_(".prologue within prologue"));
4180 ignore_rest_of_line ();
4181 return;
4182 }
4183 if (!unwind.body && unwind.insn)
4184 as_warn (_("Initial .prologue should precede any instructions"));
4185
4186 if (!is_it_end_of_statement ())
4187 {
4188 expressionS e;
4189 int n, sep = parse_operand (&e, ',');
4190
4191 if (e.X_op != O_constant
4192 || e.X_add_number < 0
4193 || e.X_add_number > 0xf)
4194 as_bad (_("First operand to .prologue must be a positive 4-bit constant"));
4195 else if (e.X_add_number == 0)
4196 as_warn (_("Pointless use of zero first operand to .prologue"));
4197 else
4198 mask = e.X_add_number;
4199 n = popcount (mask);
4200
4201 if (sep == ',')
4202 parse_operand (&e, 0);
4203 else
4204 e.X_op = O_absent;
4205 if (e.X_op == O_constant
4206 && e.X_add_number >= 0
4207 && e.X_add_number < 128)
4208 {
4209 if (md.unwind_check == unwind_check_error)
4210 as_warn (_("Using a constant as second operand to .prologue is deprecated"));
4211 grsave = e.X_add_number;
4212 }
4213 else if (e.X_op != O_register
4214 || (grsave = e.X_add_number - REG_GR) > 127)
4215 {
4216 as_bad (_("Second operand to .prologue must be a general register"));
4217 grsave = 0;
4218 }
4219 else if (grsave > 128U - n)
4220 {
4221 as_bad (_("Second operand to .prologue must be the first of %d general registers"), n);
4222 grsave = 0;
4223 }
4224
4225 }
4226
4227 if (mask)
4228 add_unwind_entry (output_prologue_gr (mask, grsave), 0);
4229 else
4230 add_unwind_entry (output_prologue (), 0);
4231
4232 unwind.prologue = 1;
4233 unwind.prologue_mask = mask;
4234 unwind.prologue_gr = grsave;
4235 unwind.body = 0;
4236 ++unwind.prologue_count;
4237 }
4238
4239 static void
4240 dot_endp (int dummy ATTRIBUTE_UNUSED)
4241 {
4242 expressionS e;
4243 int bytes_per_address;
4244 long where;
4245 segT saved_seg;
4246 subsegT saved_subseg;
4247 proc_pending *pending;
4248 int unwind_check = md.unwind_check;
4249
4250 md.unwind_check = unwind_check_error;
4251 if (!in_procedure ("endp"))
4252 return;
4253 md.unwind_check = unwind_check;
4254
4255 if (unwind.saved_text_seg)
4256 {
4257 saved_seg = unwind.saved_text_seg;
4258 saved_subseg = unwind.saved_text_subseg;
4259 unwind.saved_text_seg = NULL;
4260 }
4261 else
4262 {
4263 saved_seg = now_seg;
4264 saved_subseg = now_subseg;
4265 }
4266
4267 insn_group_break (1, 0, 0);
4268
4269 /* If there wasn't a .handlerdata, we haven't generated an image yet. */
4270 if (!unwind.info)
4271 generate_unwind_image (saved_seg);
4272
4273 if (unwind.info || unwind.force_unwind_entry)
4274 {
4275 symbolS *proc_end;
4276
4277 subseg_set (md.last_text_seg, 0);
4278 proc_end = expr_build_dot ();
4279
4280 start_unwind_section (saved_seg, SPECIAL_SECTION_UNWIND);
4281
4282 /* Make sure that section has 4 byte alignment for ILP32 and
4283 8 byte alignment for LP64. */
4284 record_alignment (now_seg, md.pointer_size_shift);
4285
4286 /* Need space for 3 pointers for procedure start, procedure end,
4287 and unwind info. */
4288 memset (frag_more (3 * md.pointer_size), 0, 3 * md.pointer_size);
4289 where = frag_now_fix () - (3 * md.pointer_size);
4290 bytes_per_address = bfd_arch_bits_per_address (stdoutput) / 8;
4291
4292 /* Issue the values of a) Proc Begin, b) Proc End, c) Unwind Record. */
4293 e.X_op = O_pseudo_fixup;
4294 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4295 e.X_add_number = 0;
4296 if (!S_IS_LOCAL (unwind.proc_pending.sym)
4297 && S_IS_DEFINED (unwind.proc_pending.sym))
4298 e.X_add_symbol = symbol_temp_new (S_GET_SEGMENT (unwind.proc_pending.sym),
4299 S_GET_VALUE (unwind.proc_pending.sym),
4300 symbol_get_frag (unwind.proc_pending.sym));
4301 else
4302 e.X_add_symbol = unwind.proc_pending.sym;
4303 ia64_cons_fix_new (frag_now, where, bytes_per_address, &e);
4304
4305 e.X_op = O_pseudo_fixup;
4306 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4307 e.X_add_number = 0;
4308 e.X_add_symbol = proc_end;
4309 ia64_cons_fix_new (frag_now, where + bytes_per_address,
4310 bytes_per_address, &e);
4311
4312 if (unwind.info)
4313 {
4314 e.X_op = O_pseudo_fixup;
4315 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4316 e.X_add_number = 0;
4317 e.X_add_symbol = unwind.info;
4318 ia64_cons_fix_new (frag_now, where + (bytes_per_address * 2),
4319 bytes_per_address, &e);
4320 }
4321 }
4322 subseg_set (saved_seg, saved_subseg);
4323
4324 /* Set symbol sizes. */
4325 pending = &unwind.proc_pending;
4326 if (S_GET_NAME (pending->sym))
4327 {
4328 do
4329 {
4330 symbolS *sym = pending->sym;
4331
4332 if (!S_IS_DEFINED (sym))
4333 as_bad (_("`%s' was not defined within procedure"), S_GET_NAME (sym));
4334 else if (S_GET_SIZE (sym) == 0
4335 && symbol_get_obj (sym)->size == NULL)
4336 {
4337 fragS *frag = symbol_get_frag (sym);
4338
4339 if (frag)
4340 {
4341 if (frag == frag_now && SEG_NORMAL (now_seg))
4342 S_SET_SIZE (sym, frag_now_fix () - S_GET_VALUE (sym));
4343 else
4344 {
4345 symbol_get_obj (sym)->size =
4346 (expressionS *) xmalloc (sizeof (expressionS));
4347 symbol_get_obj (sym)->size->X_op = O_subtract;
4348 symbol_get_obj (sym)->size->X_add_symbol
4349 = symbol_new (FAKE_LABEL_NAME, now_seg,
4350 frag_now_fix (), frag_now);
4351 symbol_get_obj (sym)->size->X_op_symbol = sym;
4352 symbol_get_obj (sym)->size->X_add_number = 0;
4353 }
4354 }
4355 }
4356 } while ((pending = pending->next) != NULL);
4357 }
4358
4359 /* Parse names of main and alternate entry points. */
4360 while (1)
4361 {
4362 char *name, *p, c;
4363
4364 SKIP_WHITESPACE ();
4365 name = input_line_pointer;
4366 c = get_symbol_end ();
4367 p = input_line_pointer;
4368 if (!*name)
4369 (md.unwind_check == unwind_check_warning
4370 ? as_warn
4371 : as_bad) (_("Empty argument of .endp"));
4372 else
4373 {
4374 symbolS *sym = symbol_find (name);
4375
4376 for (pending = &unwind.proc_pending; pending; pending = pending->next)
4377 {
4378 if (sym == pending->sym)
4379 {
4380 pending->sym = NULL;
4381 break;
4382 }
4383 }
4384 if (!sym || !pending)
4385 as_warn (_("`%s' was not specified with previous .proc"), name);
4386 }
4387 *p = c;
4388 SKIP_WHITESPACE ();
4389 if (*input_line_pointer != ',')
4390 break;
4391 ++input_line_pointer;
4392 }
4393 demand_empty_rest_of_line ();
4394
4395 /* Deliberately only checking for the main entry point here; the
4396 language spec even says all arguments to .endp are ignored. */
4397 if (unwind.proc_pending.sym
4398 && S_GET_NAME (unwind.proc_pending.sym)
4399 && strcmp (S_GET_NAME (unwind.proc_pending.sym), FAKE_LABEL_NAME))
4400 as_warn (_("`%s' should be an operand to this .endp"),
4401 S_GET_NAME (unwind.proc_pending.sym));
4402 while (unwind.proc_pending.next)
4403 {
4404 pending = unwind.proc_pending.next;
4405 unwind.proc_pending.next = pending->next;
4406 free (pending);
4407 }
4408 unwind.proc_pending.sym = unwind.info = NULL;
4409 }
4410
4411 static void
4412 dot_template (int template)
4413 {
4414 CURR_SLOT.user_template = template;
4415 }
4416
4417 static void
4418 dot_regstk (int dummy ATTRIBUTE_UNUSED)
4419 {
4420 int ins, locs, outs, rots;
4421
4422 if (is_it_end_of_statement ())
4423 ins = locs = outs = rots = 0;
4424 else
4425 {
4426 ins = get_absolute_expression ();
4427 if (*input_line_pointer++ != ',')
4428 goto err;
4429 locs = get_absolute_expression ();
4430 if (*input_line_pointer++ != ',')
4431 goto err;
4432 outs = get_absolute_expression ();
4433 if (*input_line_pointer++ != ',')
4434 goto err;
4435 rots = get_absolute_expression ();
4436 }
4437 set_regstack (ins, locs, outs, rots);
4438 return;
4439
4440 err:
4441 as_bad (_("Comma expected"));
4442 ignore_rest_of_line ();
4443 }
4444
4445 static void
4446 dot_rot (int type)
4447 {
4448 offsetT num_regs;
4449 valueT num_alloced = 0;
4450 struct dynreg **drpp, *dr;
4451 int ch, base_reg = 0;
4452 char *name, *start;
4453 size_t len;
4454
4455 switch (type)
4456 {
4457 case DYNREG_GR: base_reg = REG_GR + 32; break;
4458 case DYNREG_FR: base_reg = REG_FR + 32; break;
4459 case DYNREG_PR: base_reg = REG_P + 16; break;
4460 default: break;
4461 }
4462
4463 /* First, remove existing names from hash table. */
4464 for (dr = md.dynreg[type]; dr && dr->num_regs; dr = dr->next)
4465 {
4466 hash_delete (md.dynreg_hash, dr->name, FALSE);
4467 /* FIXME: Free dr->name. */
4468 dr->num_regs = 0;
4469 }
4470
4471 drpp = &md.dynreg[type];
4472 while (1)
4473 {
4474 start = input_line_pointer;
4475 ch = get_symbol_end ();
4476 len = strlen (ia64_canonicalize_symbol_name (start));
4477 *input_line_pointer = ch;
4478
4479 SKIP_WHITESPACE ();
4480 if (*input_line_pointer != '[')
4481 {
4482 as_bad (_("Expected '['"));
4483 goto err;
4484 }
4485 ++input_line_pointer; /* skip '[' */
4486
4487 num_regs = get_absolute_expression ();
4488
4489 if (*input_line_pointer++ != ']')
4490 {
4491 as_bad (_("Expected ']'"));
4492 goto err;
4493 }
4494 if (num_regs <= 0)
4495 {
4496 as_bad (_("Number of elements must be positive"));
4497 goto err;
4498 }
4499 SKIP_WHITESPACE ();
4500
4501 num_alloced += num_regs;
4502 switch (type)
4503 {
4504 case DYNREG_GR:
4505 if (num_alloced > md.rot.num_regs)
4506 {
4507 as_bad (_("Used more than the declared %d rotating registers"),
4508 md.rot.num_regs);
4509 goto err;
4510 }
4511 break;
4512 case DYNREG_FR:
4513 if (num_alloced > 96)
4514 {
4515 as_bad (_("Used more than the available 96 rotating registers"));
4516 goto err;
4517 }
4518 break;
4519 case DYNREG_PR:
4520 if (num_alloced > 48)
4521 {
4522 as_bad (_("Used more than the available 48 rotating registers"));
4523 goto err;
4524 }
4525 break;
4526
4527 default:
4528 break;
4529 }
4530
4531 if (!*drpp)
4532 {
4533 *drpp = obstack_alloc (&notes, sizeof (*dr));
4534 memset (*drpp, 0, sizeof (*dr));
4535 }
4536
4537 name = obstack_alloc (&notes, len + 1);
4538 memcpy (name, start, len);
4539 name[len] = '\0';
4540
4541 dr = *drpp;
4542 dr->name = name;
4543 dr->num_regs = num_regs;
4544 dr->base = base_reg;
4545 drpp = &dr->next;
4546 base_reg += num_regs;
4547
4548 if (hash_insert (md.dynreg_hash, name, dr))
4549 {
4550 as_bad (_("Attempt to redefine register set `%s'"), name);
4551 obstack_free (&notes, name);
4552 goto err;
4553 }
4554
4555 if (*input_line_pointer != ',')
4556 break;
4557 ++input_line_pointer; /* skip comma */
4558 SKIP_WHITESPACE ();
4559 }
4560 demand_empty_rest_of_line ();
4561 return;
4562
4563 err:
4564 ignore_rest_of_line ();
4565 }
4566
4567 static void
4568 dot_byteorder (int byteorder)
4569 {
4570 segment_info_type *seginfo = seg_info (now_seg);
4571
4572 if (byteorder == -1)
4573 {
4574 if (seginfo->tc_segment_info_data.endian == 0)
4575 seginfo->tc_segment_info_data.endian = default_big_endian ? 1 : 2;
4576 byteorder = seginfo->tc_segment_info_data.endian == 1;
4577 }
4578 else
4579 seginfo->tc_segment_info_data.endian = byteorder ? 1 : 2;
4580
4581 if (target_big_endian != byteorder)
4582 {
4583 target_big_endian = byteorder;
4584 if (target_big_endian)
4585 {
4586 ia64_number_to_chars = number_to_chars_bigendian;
4587 ia64_float_to_chars = ia64_float_to_chars_bigendian;
4588 }
4589 else
4590 {
4591 ia64_number_to_chars = number_to_chars_littleendian;
4592 ia64_float_to_chars = ia64_float_to_chars_littleendian;
4593 }
4594 }
4595 }
4596
4597 static void
4598 dot_psr (int dummy ATTRIBUTE_UNUSED)
4599 {
4600 char *option;
4601 int ch;
4602
4603 while (1)
4604 {
4605 option = input_line_pointer;
4606 ch = get_symbol_end ();
4607 if (strcmp (option, "lsb") == 0)
4608 md.flags &= ~EF_IA_64_BE;
4609 else if (strcmp (option, "msb") == 0)
4610 md.flags |= EF_IA_64_BE;
4611 else if (strcmp (option, "abi32") == 0)
4612 md.flags &= ~EF_IA_64_ABI64;
4613 else if (strcmp (option, "abi64") == 0)
4614 md.flags |= EF_IA_64_ABI64;
4615 else
4616 as_bad (_("Unknown psr option `%s'"), option);
4617 *input_line_pointer = ch;
4618
4619 SKIP_WHITESPACE ();
4620 if (*input_line_pointer != ',')
4621 break;
4622
4623 ++input_line_pointer;
4624 SKIP_WHITESPACE ();
4625 }
4626 demand_empty_rest_of_line ();
4627 }
4628
4629 static void
4630 dot_ln (int dummy ATTRIBUTE_UNUSED)
4631 {
4632 new_logical_line (0, get_absolute_expression ());
4633 demand_empty_rest_of_line ();
4634 }
4635
4636 static void
4637 cross_section (int ref, void (*cons) (int), int ua)
4638 {
4639 char *start, *end;
4640 int saved_auto_align;
4641 unsigned int section_count;
4642
4643 SKIP_WHITESPACE ();
4644 start = input_line_pointer;
4645 if (*start == '"')
4646 {
4647 int len;
4648 char *name;
4649
4650 name = demand_copy_C_string (&len);
4651 obstack_free(&notes, name);
4652 if (!name)
4653 {
4654 ignore_rest_of_line ();
4655 return;
4656 }
4657 }
4658 else
4659 {
4660 char c = get_symbol_end ();
4661
4662 if (input_line_pointer == start)
4663 {
4664 as_bad (_("Missing section name"));
4665 ignore_rest_of_line ();
4666 return;
4667 }
4668 *input_line_pointer = c;
4669 }
4670 end = input_line_pointer;
4671 SKIP_WHITESPACE ();
4672 if (*input_line_pointer != ',')
4673 {
4674 as_bad (_("Comma expected after section name"));
4675 ignore_rest_of_line ();
4676 return;
4677 }
4678 *end = '\0';
4679 end = input_line_pointer + 1; /* skip comma */
4680 input_line_pointer = start;
4681 md.keep_pending_output = 1;
4682 section_count = bfd_count_sections(stdoutput);
4683 obj_elf_section (0);
4684 if (section_count != bfd_count_sections(stdoutput))
4685 as_warn (_("Creating sections with .xdataN/.xrealN/.xstringZ is deprecated."));
4686 input_line_pointer = end;
4687 saved_auto_align = md.auto_align;
4688 if (ua)
4689 md.auto_align = 0;
4690 (*cons) (ref);
4691 if (ua)
4692 md.auto_align = saved_auto_align;
4693 obj_elf_previous (0);
4694 md.keep_pending_output = 0;
4695 }
4696
4697 static void
4698 dot_xdata (int size)
4699 {
4700 cross_section (size, cons, 0);
4701 }
4702
4703 /* Why doesn't float_cons() call md_cons_align() the way cons() does? */
4704
4705 static void
4706 stmt_float_cons (int kind)
4707 {
4708 size_t alignment;
4709
4710 switch (kind)
4711 {
4712 case 'd':
4713 alignment = 8;
4714 break;
4715
4716 case 'x':
4717 case 'X':
4718 alignment = 16;
4719 break;
4720
4721 case 'f':
4722 default:
4723 alignment = 4;
4724 break;
4725 }
4726 ia64_do_align (alignment);
4727 float_cons (kind);
4728 }
4729
4730 static void
4731 stmt_cons_ua (int size)
4732 {
4733 int saved_auto_align = md.auto_align;
4734
4735 md.auto_align = 0;
4736 cons (size);
4737 md.auto_align = saved_auto_align;
4738 }
4739
4740 static void
4741 dot_xfloat_cons (int kind)
4742 {
4743 cross_section (kind, stmt_float_cons, 0);
4744 }
4745
4746 static void
4747 dot_xstringer (int zero)
4748 {
4749 cross_section (zero, stringer, 0);
4750 }
4751
4752 static void
4753 dot_xdata_ua (int size)
4754 {
4755 cross_section (size, cons, 1);
4756 }
4757
4758 static void
4759 dot_xfloat_cons_ua (int kind)
4760 {
4761 cross_section (kind, float_cons, 1);
4762 }
4763
4764 /* .reg.val <regname>,value */
4765
4766 static void
4767 dot_reg_val (int dummy ATTRIBUTE_UNUSED)
4768 {
4769 expressionS reg;
4770
4771 expression_and_evaluate (&reg);
4772 if (reg.X_op != O_register)
4773 {
4774 as_bad (_("Register name expected"));
4775 ignore_rest_of_line ();
4776 }
4777 else if (*input_line_pointer++ != ',')
4778 {
4779 as_bad (_("Comma expected"));
4780 ignore_rest_of_line ();
4781 }
4782 else
4783 {
4784 valueT value = get_absolute_expression ();
4785 int regno = reg.X_add_number;
4786 if (regno <= REG_GR || regno > REG_GR + 127)
4787 as_warn (_("Register value annotation ignored"));
4788 else
4789 {
4790 gr_values[regno - REG_GR].known = 1;
4791 gr_values[regno - REG_GR].value = value;
4792 gr_values[regno - REG_GR].path = md.path;
4793 }
4794 }
4795 demand_empty_rest_of_line ();
4796 }
4797
4798 /*
4799 .serialize.data
4800 .serialize.instruction
4801 */
4802 static void
4803 dot_serialize (int type)
4804 {
4805 insn_group_break (0, 0, 0);
4806 if (type)
4807 instruction_serialization ();
4808 else
4809 data_serialization ();
4810 insn_group_break (0, 0, 0);
4811 demand_empty_rest_of_line ();
4812 }
4813
4814 /* select dv checking mode
4815 .auto
4816 .explicit
4817 .default
4818
4819 A stop is inserted when changing modes
4820 */
4821
4822 static void
4823 dot_dv_mode (int type)
4824 {
4825 if (md.manual_bundling)
4826 as_warn (_("Directive invalid within a bundle"));
4827
4828 if (type == 'E' || type == 'A')
4829 md.mode_explicitly_set = 0;
4830 else
4831 md.mode_explicitly_set = 1;
4832
4833 md.detect_dv = 1;
4834 switch (type)
4835 {
4836 case 'A':
4837 case 'a':
4838 if (md.explicit_mode)
4839 insn_group_break (1, 0, 0);
4840 md.explicit_mode = 0;
4841 break;
4842 case 'E':
4843 case 'e':
4844 if (!md.explicit_mode)
4845 insn_group_break (1, 0, 0);
4846 md.explicit_mode = 1;
4847 break;
4848 default:
4849 case 'd':
4850 if (md.explicit_mode != md.default_explicit_mode)
4851 insn_group_break (1, 0, 0);
4852 md.explicit_mode = md.default_explicit_mode;
4853 md.mode_explicitly_set = 0;
4854 break;
4855 }
4856 }
4857
4858 static void
4859 print_prmask (valueT mask)
4860 {
4861 int regno;
4862 char *comma = "";
4863 for (regno = 0; regno < 64; regno++)
4864 {
4865 if (mask & ((valueT) 1 << regno))
4866 {
4867 fprintf (stderr, "%s p%d", comma, regno);
4868 comma = ",";
4869 }
4870 }
4871 }
4872
4873 /*
4874 .pred.rel.clear [p1 [,p2 [,...]]] (also .pred.rel "clear" or @clear)
4875 .pred.rel.imply p1, p2 (also .pred.rel "imply" or @imply)
4876 .pred.rel.mutex p1, p2 [,...] (also .pred.rel "mutex" or @mutex)
4877 .pred.safe_across_calls p1 [, p2 [,...]]
4878 */
4879
4880 static void
4881 dot_pred_rel (int type)
4882 {
4883 valueT mask = 0;
4884 int count = 0;
4885 int p1 = -1, p2 = -1;
4886
4887 if (type == 0)
4888 {
4889 if (*input_line_pointer == '"')
4890 {
4891 int len;
4892 char *form = demand_copy_C_string (&len);
4893
4894 if (strcmp (form, "mutex") == 0)
4895 type = 'm';
4896 else if (strcmp (form, "clear") == 0)
4897 type = 'c';
4898 else if (strcmp (form, "imply") == 0)
4899 type = 'i';
4900 obstack_free (&notes, form);
4901 }
4902 else if (*input_line_pointer == '@')
4903 {
4904 char *form = ++input_line_pointer;
4905 char c = get_symbol_end();
4906
4907 if (strcmp (form, "mutex") == 0)
4908 type = 'm';
4909 else if (strcmp (form, "clear") == 0)
4910 type = 'c';
4911 else if (strcmp (form, "imply") == 0)
4912 type = 'i';
4913 *input_line_pointer = c;
4914 }
4915 else
4916 {
4917 as_bad (_("Missing predicate relation type"));
4918 ignore_rest_of_line ();
4919 return;
4920 }
4921 if (type == 0)
4922 {
4923 as_bad (_("Unrecognized predicate relation type"));
4924 ignore_rest_of_line ();
4925 return;
4926 }
4927 if (*input_line_pointer == ',')
4928 ++input_line_pointer;
4929 SKIP_WHITESPACE ();
4930 }
4931
4932 while (1)
4933 {
4934 valueT bits = 1;
4935 int sep, regno;
4936 expressionS pr, *pr1, *pr2;
4937
4938 sep = parse_operand (&pr, ',');
4939 if (pr.X_op == O_register
4940 && pr.X_add_number >= REG_P
4941 && pr.X_add_number <= REG_P + 63)
4942 {
4943 regno = pr.X_add_number - REG_P;
4944 bits <<= regno;
4945 count++;
4946 if (p1 == -1)
4947 p1 = regno;
4948 else if (p2 == -1)
4949 p2 = regno;
4950 }
4951 else if (type != 'i'
4952 && pr.X_op == O_subtract
4953 && (pr1 = symbol_get_value_expression (pr.X_add_symbol))
4954 && pr1->X_op == O_register
4955 && pr1->X_add_number >= REG_P
4956 && pr1->X_add_number <= REG_P + 63
4957 && (pr2 = symbol_get_value_expression (pr.X_op_symbol))
4958 && pr2->X_op == O_register
4959 && pr2->X_add_number >= REG_P
4960 && pr2->X_add_number <= REG_P + 63)
4961 {
4962 /* It's a range. */
4963 int stop;
4964
4965 regno = pr1->X_add_number - REG_P;
4966 stop = pr2->X_add_number - REG_P;
4967 if (regno >= stop)
4968 {
4969 as_bad (_("Bad register range"));
4970 ignore_rest_of_line ();
4971 return;
4972 }
4973 bits = ((bits << stop) << 1) - (bits << regno);
4974 count += stop - regno + 1;
4975 }
4976 else
4977 {
4978 as_bad (_("Predicate register expected"));
4979 ignore_rest_of_line ();
4980 return;
4981 }
4982 if (mask & bits)
4983 as_warn (_("Duplicate predicate register ignored"));
4984 mask |= bits;
4985 if (sep != ',')
4986 break;
4987 }
4988
4989 switch (type)
4990 {
4991 case 'c':
4992 if (count == 0)
4993 mask = ~(valueT) 0;
4994 clear_qp_mutex (mask);
4995 clear_qp_implies (mask, (valueT) 0);
4996 break;
4997 case 'i':
4998 if (count != 2 || p1 == -1 || p2 == -1)
4999 as_bad (_("Predicate source and target required"));
5000 else if (p1 == 0 || p2 == 0)
5001 as_bad (_("Use of p0 is not valid in this context"));
5002 else
5003 add_qp_imply (p1, p2);
5004 break;
5005 case 'm':
5006 if (count < 2)
5007 {
5008 as_bad (_("At least two PR arguments expected"));
5009 break;
5010 }
5011 else if (mask & 1)
5012 {
5013 as_bad (_("Use of p0 is not valid in this context"));
5014 break;
5015 }
5016 add_qp_mutex (mask);
5017 break;
5018 case 's':
5019 /* note that we don't override any existing relations */
5020 if (count == 0)
5021 {
5022 as_bad (_("At least one PR argument expected"));
5023 break;
5024 }
5025 if (md.debug_dv)
5026 {
5027 fprintf (stderr, "Safe across calls: ");
5028 print_prmask (mask);
5029 fprintf (stderr, "\n");
5030 }
5031 qp_safe_across_calls = mask;
5032 break;
5033 }
5034 demand_empty_rest_of_line ();
5035 }
5036
5037 /* .entry label [, label [, ...]]
5038 Hint to DV code that the given labels are to be considered entry points.
5039 Otherwise, only global labels are considered entry points. */
5040
5041 static void
5042 dot_entry (int dummy ATTRIBUTE_UNUSED)
5043 {
5044 const char *err;
5045 char *name;
5046 int c;
5047 symbolS *symbolP;
5048
5049 do
5050 {
5051 name = input_line_pointer;
5052 c = get_symbol_end ();
5053 symbolP = symbol_find_or_make (name);
5054
5055 err = hash_insert (md.entry_hash, S_GET_NAME (symbolP), (void *) symbolP);
5056 if (err)
5057 as_fatal (_("Inserting \"%s\" into entry hint table failed: %s"),
5058 name, err);
5059
5060 *input_line_pointer = c;
5061 SKIP_WHITESPACE ();
5062 c = *input_line_pointer;
5063 if (c == ',')
5064 {
5065 input_line_pointer++;
5066 SKIP_WHITESPACE ();
5067 if (*input_line_pointer == '\n')
5068 c = '\n';
5069 }
5070 }
5071 while (c == ',');
5072
5073 demand_empty_rest_of_line ();
5074 }
5075
5076 /* .mem.offset offset, base
5077 "base" is used to distinguish between offsets from a different base. */
5078
5079 static void
5080 dot_mem_offset (int dummy ATTRIBUTE_UNUSED)
5081 {
5082 md.mem_offset.hint = 1;
5083 md.mem_offset.offset = get_absolute_expression ();
5084 if (*input_line_pointer != ',')
5085 {
5086 as_bad (_("Comma expected"));
5087 ignore_rest_of_line ();
5088 return;
5089 }
5090 ++input_line_pointer;
5091 md.mem_offset.base = get_absolute_expression ();
5092 demand_empty_rest_of_line ();
5093 }
5094
5095 /* ia64-specific pseudo-ops: */
5096 const pseudo_typeS md_pseudo_table[] =
5097 {
5098 { "radix", dot_radix, 0 },
5099 { "lcomm", s_lcomm_bytes, 1 },
5100 { "loc", dot_loc, 0 },
5101 { "bss", dot_special_section, SPECIAL_SECTION_BSS },
5102 { "sbss", dot_special_section, SPECIAL_SECTION_SBSS },
5103 { "sdata", dot_special_section, SPECIAL_SECTION_SDATA },
5104 { "rodata", dot_special_section, SPECIAL_SECTION_RODATA },
5105 { "comment", dot_special_section, SPECIAL_SECTION_COMMENT },
5106 { "ia_64.unwind", dot_special_section, SPECIAL_SECTION_UNWIND },
5107 { "ia_64.unwind_info", dot_special_section, SPECIAL_SECTION_UNWIND_INFO },
5108 { "init_array", dot_special_section, SPECIAL_SECTION_INIT_ARRAY },
5109 { "fini_array", dot_special_section, SPECIAL_SECTION_FINI_ARRAY },
5110 { "proc", dot_proc, 0 },
5111 { "body", dot_body, 0 },
5112 { "prologue", dot_prologue, 0 },
5113 { "endp", dot_endp, 0 },
5114
5115 { "fframe", dot_fframe, 0 },
5116 { "vframe", dot_vframe, 0 },
5117 { "vframesp", dot_vframesp, 0 },
5118 { "vframepsp", dot_vframesp, 1 },
5119 { "save", dot_save, 0 },
5120 { "restore", dot_restore, 0 },
5121 { "restorereg", dot_restorereg, 0 },
5122 { "restorereg.p", dot_restorereg, 1 },
5123 { "handlerdata", dot_handlerdata, 0 },
5124 { "unwentry", dot_unwentry, 0 },
5125 { "altrp", dot_altrp, 0 },
5126 { "savesp", dot_savemem, 0 },
5127 { "savepsp", dot_savemem, 1 },
5128 { "save.g", dot_saveg, 0 },
5129 { "save.f", dot_savef, 0 },
5130 { "save.b", dot_saveb, 0 },
5131 { "save.gf", dot_savegf, 0 },
5132 { "spill", dot_spill, 0 },
5133 { "spillreg", dot_spillreg, 0 },
5134 { "spillsp", dot_spillmem, 0 },
5135 { "spillpsp", dot_spillmem, 1 },
5136 { "spillreg.p", dot_spillreg, 1 },
5137 { "spillsp.p", dot_spillmem, ~0 },
5138 { "spillpsp.p", dot_spillmem, ~1 },
5139 { "label_state", dot_label_state, 0 },
5140 { "copy_state", dot_copy_state, 0 },
5141 { "unwabi", dot_unwabi, 0 },
5142 { "personality", dot_personality, 0 },
5143 { "mii", dot_template, 0x0 },
5144 { "mli", dot_template, 0x2 }, /* old format, for compatibility */
5145 { "mlx", dot_template, 0x2 },
5146 { "mmi", dot_template, 0x4 },
5147 { "mfi", dot_template, 0x6 },
5148 { "mmf", dot_template, 0x7 },
5149 { "mib", dot_template, 0x8 },
5150 { "mbb", dot_template, 0x9 },
5151 { "bbb", dot_template, 0xb },
5152 { "mmb", dot_template, 0xc },
5153 { "mfb", dot_template, 0xe },
5154 { "align", dot_align, 0 },
5155 { "regstk", dot_regstk, 0 },
5156 { "rotr", dot_rot, DYNREG_GR },
5157 { "rotf", dot_rot, DYNREG_FR },
5158 { "rotp", dot_rot, DYNREG_PR },
5159 { "lsb", dot_byteorder, 0 },
5160 { "msb", dot_byteorder, 1 },
5161 { "psr", dot_psr, 0 },
5162 { "alias", dot_alias, 0 },
5163 { "secalias", dot_alias, 1 },
5164 { "ln", dot_ln, 0 }, /* source line info (for debugging) */
5165
5166 { "xdata1", dot_xdata, 1 },
5167 { "xdata2", dot_xdata, 2 },
5168 { "xdata4", dot_xdata, 4 },
5169 { "xdata8", dot_xdata, 8 },
5170 { "xdata16", dot_xdata, 16 },
5171 { "xreal4", dot_xfloat_cons, 'f' },
5172 { "xreal8", dot_xfloat_cons, 'd' },
5173 { "xreal10", dot_xfloat_cons, 'x' },
5174 { "xreal16", dot_xfloat_cons, 'X' },
5175 { "xstring", dot_xstringer, 8 + 0 },
5176 { "xstringz", dot_xstringer, 8 + 1 },
5177
5178 /* unaligned versions: */
5179 { "xdata2.ua", dot_xdata_ua, 2 },
5180 { "xdata4.ua", dot_xdata_ua, 4 },
5181 { "xdata8.ua", dot_xdata_ua, 8 },
5182 { "xdata16.ua", dot_xdata_ua, 16 },
5183 { "xreal4.ua", dot_xfloat_cons_ua, 'f' },
5184 { "xreal8.ua", dot_xfloat_cons_ua, 'd' },
5185 { "xreal10.ua", dot_xfloat_cons_ua, 'x' },
5186 { "xreal16.ua", dot_xfloat_cons_ua, 'X' },
5187
5188 /* annotations/DV checking support */
5189 { "entry", dot_entry, 0 },
5190 { "mem.offset", dot_mem_offset, 0 },
5191 { "pred.rel", dot_pred_rel, 0 },
5192 { "pred.rel.clear", dot_pred_rel, 'c' },
5193 { "pred.rel.imply", dot_pred_rel, 'i' },
5194 { "pred.rel.mutex", dot_pred_rel, 'm' },
5195 { "pred.safe_across_calls", dot_pred_rel, 's' },
5196 { "reg.val", dot_reg_val, 0 },
5197 { "serialize.data", dot_serialize, 0 },
5198 { "serialize.instruction", dot_serialize, 1 },
5199 { "auto", dot_dv_mode, 'a' },
5200 { "explicit", dot_dv_mode, 'e' },
5201 { "default", dot_dv_mode, 'd' },
5202
5203 /* ??? These are needed to make gas/testsuite/gas/elf/ehopt.s work.
5204 IA-64 aligns data allocation pseudo-ops by default, so we have to
5205 tell it that these ones are supposed to be unaligned. Long term,
5206 should rewrite so that only IA-64 specific data allocation pseudo-ops
5207 are aligned by default. */
5208 {"2byte", stmt_cons_ua, 2},
5209 {"4byte", stmt_cons_ua, 4},
5210 {"8byte", stmt_cons_ua, 8},
5211
5212 { NULL, 0, 0 }
5213 };
5214
5215 static const struct pseudo_opcode
5216 {
5217 const char *name;
5218 void (*handler) (int);
5219 int arg;
5220 }
5221 pseudo_opcode[] =
5222 {
5223 /* these are more like pseudo-ops, but don't start with a dot */
5224 { "data1", cons, 1 },
5225 { "data2", cons, 2 },
5226 { "data4", cons, 4 },
5227 { "data8", cons, 8 },
5228 { "data16", cons, 16 },
5229 { "real4", stmt_float_cons, 'f' },
5230 { "real8", stmt_float_cons, 'd' },
5231 { "real10", stmt_float_cons, 'x' },
5232 { "real16", stmt_float_cons, 'X' },
5233 { "string", stringer, 8 + 0 },
5234 { "stringz", stringer, 8 + 1 },
5235
5236 /* unaligned versions: */
5237 { "data2.ua", stmt_cons_ua, 2 },
5238 { "data4.ua", stmt_cons_ua, 4 },
5239 { "data8.ua", stmt_cons_ua, 8 },
5240 { "data16.ua", stmt_cons_ua, 16 },
5241 { "real4.ua", float_cons, 'f' },
5242 { "real8.ua", float_cons, 'd' },
5243 { "real10.ua", float_cons, 'x' },
5244 { "real16.ua", float_cons, 'X' },
5245 };
5246
5247 /* Declare a register by creating a symbol for it and entering it in
5248 the symbol table. */
5249
5250 static symbolS *
5251 declare_register (const char *name, unsigned int regnum)
5252 {
5253 const char *err;
5254 symbolS *sym;
5255
5256 sym = symbol_create (name, reg_section, regnum, &zero_address_frag);
5257
5258 err = hash_insert (md.reg_hash, S_GET_NAME (sym), (void *) sym);
5259 if (err)
5260 as_fatal ("Inserting \"%s\" into register table failed: %s",
5261 name, err);
5262
5263 return sym;
5264 }
5265
5266 static void
5267 declare_register_set (const char *prefix,
5268 unsigned int num_regs,
5269 unsigned int base_regnum)
5270 {
5271 char name[8];
5272 unsigned int i;
5273
5274 for (i = 0; i < num_regs; ++i)
5275 {
5276 snprintf (name, sizeof (name), "%s%u", prefix, i);
5277 declare_register (name, base_regnum + i);
5278 }
5279 }
5280
5281 static unsigned int
5282 operand_width (enum ia64_opnd opnd)
5283 {
5284 const struct ia64_operand *odesc = &elf64_ia64_operands[opnd];
5285 unsigned int bits = 0;
5286 int i;
5287
5288 bits = 0;
5289 for (i = 0; i < NELEMS (odesc->field) && odesc->field[i].bits; ++i)
5290 bits += odesc->field[i].bits;
5291
5292 return bits;
5293 }
5294
5295 static enum operand_match_result
5296 operand_match (const struct ia64_opcode *idesc, int index, expressionS *e)
5297 {
5298 enum ia64_opnd opnd = idesc->operands[index];
5299 int bits, relocatable = 0;
5300 struct insn_fix *fix;
5301 bfd_signed_vma val;
5302
5303 switch (opnd)
5304 {
5305 /* constants: */
5306
5307 case IA64_OPND_AR_CCV:
5308 if (e->X_op == O_register && e->X_add_number == REG_AR + 32)
5309 return OPERAND_MATCH;
5310 break;
5311
5312 case IA64_OPND_AR_CSD:
5313 if (e->X_op == O_register && e->X_add_number == REG_AR + 25)
5314 return OPERAND_MATCH;
5315 break;
5316
5317 case IA64_OPND_AR_PFS:
5318 if (e->X_op == O_register && e->X_add_number == REG_AR + 64)
5319 return OPERAND_MATCH;
5320 break;
5321
5322 case IA64_OPND_GR0:
5323 if (e->X_op == O_register && e->X_add_number == REG_GR + 0)
5324 return OPERAND_MATCH;
5325 break;
5326
5327 case IA64_OPND_IP:
5328 if (e->X_op == O_register && e->X_add_number == REG_IP)
5329 return OPERAND_MATCH;
5330 break;
5331
5332 case IA64_OPND_PR:
5333 if (e->X_op == O_register && e->X_add_number == REG_PR)
5334 return OPERAND_MATCH;
5335 break;
5336
5337 case IA64_OPND_PR_ROT:
5338 if (e->X_op == O_register && e->X_add_number == REG_PR_ROT)
5339 return OPERAND_MATCH;
5340 break;
5341
5342 case IA64_OPND_PSR:
5343 if (e->X_op == O_register && e->X_add_number == REG_PSR)
5344 return OPERAND_MATCH;
5345 break;
5346
5347 case IA64_OPND_PSR_L:
5348 if (e->X_op == O_register && e->X_add_number == REG_PSR_L)
5349 return OPERAND_MATCH;
5350 break;
5351
5352 case IA64_OPND_PSR_UM:
5353 if (e->X_op == O_register && e->X_add_number == REG_PSR_UM)
5354 return OPERAND_MATCH;
5355 break;
5356
5357 case IA64_OPND_C1:
5358 if (e->X_op == O_constant)
5359 {
5360 if (e->X_add_number == 1)
5361 return OPERAND_MATCH;
5362 else
5363 return OPERAND_OUT_OF_RANGE;
5364 }
5365 break;
5366
5367 case IA64_OPND_C8:
5368 if (e->X_op == O_constant)
5369 {
5370 if (e->X_add_number == 8)
5371 return OPERAND_MATCH;
5372 else
5373 return OPERAND_OUT_OF_RANGE;
5374 }
5375 break;
5376
5377 case IA64_OPND_C16:
5378 if (e->X_op == O_constant)
5379 {
5380 if (e->X_add_number == 16)
5381 return OPERAND_MATCH;
5382 else
5383 return OPERAND_OUT_OF_RANGE;
5384 }
5385 break;
5386
5387 /* register operands: */
5388
5389 case IA64_OPND_AR3:
5390 if (e->X_op == O_register && e->X_add_number >= REG_AR
5391 && e->X_add_number < REG_AR + 128)
5392 return OPERAND_MATCH;
5393 break;
5394
5395 case IA64_OPND_B1:
5396 case IA64_OPND_B2:
5397 if (e->X_op == O_register && e->X_add_number >= REG_BR
5398 && e->X_add_number < REG_BR + 8)
5399 return OPERAND_MATCH;
5400 break;
5401
5402 case IA64_OPND_CR3:
5403 if (e->X_op == O_register && e->X_add_number >= REG_CR
5404 && e->X_add_number < REG_CR + 128)
5405 return OPERAND_MATCH;
5406 break;
5407
5408 case IA64_OPND_F1:
5409 case IA64_OPND_F2:
5410 case IA64_OPND_F3:
5411 case IA64_OPND_F4:
5412 if (e->X_op == O_register && e->X_add_number >= REG_FR
5413 && e->X_add_number < REG_FR + 128)
5414 return OPERAND_MATCH;
5415 break;
5416
5417 case IA64_OPND_P1:
5418 case IA64_OPND_P2:
5419 if (e->X_op == O_register && e->X_add_number >= REG_P
5420 && e->X_add_number < REG_P + 64)
5421 return OPERAND_MATCH;
5422 break;
5423
5424 case IA64_OPND_R1:
5425 case IA64_OPND_R2:
5426 case IA64_OPND_R3:
5427 if (e->X_op == O_register && e->X_add_number >= REG_GR
5428 && e->X_add_number < REG_GR + 128)
5429 return OPERAND_MATCH;
5430 break;
5431
5432 case IA64_OPND_R3_2:
5433 if (e->X_op == O_register && e->X_add_number >= REG_GR)
5434 {
5435 if (e->X_add_number < REG_GR + 4)
5436 return OPERAND_MATCH;
5437 else if (e->X_add_number < REG_GR + 128)
5438 return OPERAND_OUT_OF_RANGE;
5439 }
5440 break;
5441
5442 /* indirect operands: */
5443 case IA64_OPND_CPUID_R3:
5444 case IA64_OPND_DBR_R3:
5445 case IA64_OPND_DTR_R3:
5446 case IA64_OPND_ITR_R3:
5447 case IA64_OPND_IBR_R3:
5448 case IA64_OPND_MSR_R3:
5449 case IA64_OPND_PKR_R3:
5450 case IA64_OPND_PMC_R3:
5451 case IA64_OPND_PMD_R3:
5452 case IA64_OPND_RR_R3:
5453 if (e->X_op == O_index && e->X_op_symbol
5454 && (S_GET_VALUE (e->X_op_symbol) - IND_CPUID
5455 == opnd - IA64_OPND_CPUID_R3))
5456 return OPERAND_MATCH;
5457 break;
5458
5459 case IA64_OPND_MR3:
5460 if (e->X_op == O_index && !e->X_op_symbol)
5461 return OPERAND_MATCH;
5462 break;
5463
5464 /* immediate operands: */
5465 case IA64_OPND_CNT2a:
5466 case IA64_OPND_LEN4:
5467 case IA64_OPND_LEN6:
5468 bits = operand_width (idesc->operands[index]);
5469 if (e->X_op == O_constant)
5470 {
5471 if ((bfd_vma) (e->X_add_number - 1) < ((bfd_vma) 1 << bits))
5472 return OPERAND_MATCH;
5473 else
5474 return OPERAND_OUT_OF_RANGE;
5475 }
5476 break;
5477
5478 case IA64_OPND_CNT2b:
5479 if (e->X_op == O_constant)
5480 {
5481 if ((bfd_vma) (e->X_add_number - 1) < 3)
5482 return OPERAND_MATCH;
5483 else
5484 return OPERAND_OUT_OF_RANGE;
5485 }
5486 break;
5487
5488 case IA64_OPND_CNT2c:
5489 val = e->X_add_number;
5490 if (e->X_op == O_constant)
5491 {
5492 if ((val == 0 || val == 7 || val == 15 || val == 16))
5493 return OPERAND_MATCH;
5494 else
5495 return OPERAND_OUT_OF_RANGE;
5496 }
5497 break;
5498
5499 case IA64_OPND_SOR:
5500 /* SOR must be an integer multiple of 8 */
5501 if (e->X_op == O_constant && e->X_add_number & 0x7)
5502 return OPERAND_OUT_OF_RANGE;
5503 case IA64_OPND_SOF:
5504 case IA64_OPND_SOL:
5505 if (e->X_op == O_constant)
5506 {
5507 if ((bfd_vma) e->X_add_number <= 96)
5508 return OPERAND_MATCH;
5509 else
5510 return OPERAND_OUT_OF_RANGE;
5511 }
5512 break;
5513
5514 case IA64_OPND_IMMU62:
5515 if (e->X_op == O_constant)
5516 {
5517 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << 62))
5518 return OPERAND_MATCH;
5519 else
5520 return OPERAND_OUT_OF_RANGE;
5521 }
5522 else
5523 {
5524 /* FIXME -- need 62-bit relocation type */
5525 as_bad (_("62-bit relocation not yet implemented"));
5526 }
5527 break;
5528
5529 case IA64_OPND_IMMU64:
5530 if (e->X_op == O_symbol || e->X_op == O_pseudo_fixup
5531 || e->X_op == O_subtract)
5532 {
5533 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5534 fix->code = BFD_RELOC_IA64_IMM64;
5535 if (e->X_op != O_subtract)
5536 {
5537 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5538 if (e->X_op == O_pseudo_fixup)
5539 e->X_op = O_symbol;
5540 }
5541
5542 fix->opnd = idesc->operands[index];
5543 fix->expr = *e;
5544 fix->is_pcrel = 0;
5545 ++CURR_SLOT.num_fixups;
5546 return OPERAND_MATCH;
5547 }
5548 else if (e->X_op == O_constant)
5549 return OPERAND_MATCH;
5550 break;
5551
5552 case IA64_OPND_IMMU5b:
5553 if (e->X_op == O_constant)
5554 {
5555 val = e->X_add_number;
5556 if (val >= 32 && val <= 63)
5557 return OPERAND_MATCH;
5558 else
5559 return OPERAND_OUT_OF_RANGE;
5560 }
5561 break;
5562
5563 case IA64_OPND_CCNT5:
5564 case IA64_OPND_CNT5:
5565 case IA64_OPND_CNT6:
5566 case IA64_OPND_CPOS6a:
5567 case IA64_OPND_CPOS6b:
5568 case IA64_OPND_CPOS6c:
5569 case IA64_OPND_IMMU2:
5570 case IA64_OPND_IMMU7a:
5571 case IA64_OPND_IMMU7b:
5572 case IA64_OPND_IMMU21:
5573 case IA64_OPND_IMMU24:
5574 case IA64_OPND_MBTYPE4:
5575 case IA64_OPND_MHTYPE8:
5576 case IA64_OPND_POS6:
5577 bits = operand_width (idesc->operands[index]);
5578 if (e->X_op == O_constant)
5579 {
5580 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5581 return OPERAND_MATCH;
5582 else
5583 return OPERAND_OUT_OF_RANGE;
5584 }
5585 break;
5586
5587 case IA64_OPND_IMMU9:
5588 bits = operand_width (idesc->operands[index]);
5589 if (e->X_op == O_constant)
5590 {
5591 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5592 {
5593 int lobits = e->X_add_number & 0x3;
5594 if (((bfd_vma) e->X_add_number & 0x3C) != 0 && lobits == 0)
5595 e->X_add_number |= (bfd_vma) 0x3;
5596 return OPERAND_MATCH;
5597 }
5598 else
5599 return OPERAND_OUT_OF_RANGE;
5600 }
5601 break;
5602
5603 case IA64_OPND_IMM44:
5604 /* least 16 bits must be zero */
5605 if ((e->X_add_number & 0xffff) != 0)
5606 /* XXX technically, this is wrong: we should not be issuing warning
5607 messages until we're sure this instruction pattern is going to
5608 be used! */
5609 as_warn (_("lower 16 bits of mask ignored"));
5610
5611 if (e->X_op == O_constant)
5612 {
5613 if (((e->X_add_number >= 0
5614 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 44))
5615 || (e->X_add_number < 0
5616 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 44))))
5617 {
5618 /* sign-extend */
5619 if (e->X_add_number >= 0
5620 && (e->X_add_number & ((bfd_vma) 1 << 43)) != 0)
5621 {
5622 e->X_add_number |= ~(((bfd_vma) 1 << 44) - 1);
5623 }
5624 return OPERAND_MATCH;
5625 }
5626 else
5627 return OPERAND_OUT_OF_RANGE;
5628 }
5629 break;
5630
5631 case IA64_OPND_IMM17:
5632 /* bit 0 is a don't care (pr0 is hardwired to 1) */
5633 if (e->X_op == O_constant)
5634 {
5635 if (((e->X_add_number >= 0
5636 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 17))
5637 || (e->X_add_number < 0
5638 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 17))))
5639 {
5640 /* sign-extend */
5641 if (e->X_add_number >= 0
5642 && (e->X_add_number & ((bfd_vma) 1 << 16)) != 0)
5643 {
5644 e->X_add_number |= ~(((bfd_vma) 1 << 17) - 1);
5645 }
5646 return OPERAND_MATCH;
5647 }
5648 else
5649 return OPERAND_OUT_OF_RANGE;
5650 }
5651 break;
5652
5653 case IA64_OPND_IMM14:
5654 case IA64_OPND_IMM22:
5655 relocatable = 1;
5656 case IA64_OPND_IMM1:
5657 case IA64_OPND_IMM8:
5658 case IA64_OPND_IMM8U4:
5659 case IA64_OPND_IMM8M1:
5660 case IA64_OPND_IMM8M1U4:
5661 case IA64_OPND_IMM8M1U8:
5662 case IA64_OPND_IMM9a:
5663 case IA64_OPND_IMM9b:
5664 bits = operand_width (idesc->operands[index]);
5665 if (relocatable && (e->X_op == O_symbol
5666 || e->X_op == O_subtract
5667 || e->X_op == O_pseudo_fixup))
5668 {
5669 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5670
5671 if (idesc->operands[index] == IA64_OPND_IMM14)
5672 fix->code = BFD_RELOC_IA64_IMM14;
5673 else
5674 fix->code = BFD_RELOC_IA64_IMM22;
5675
5676 if (e->X_op != O_subtract)
5677 {
5678 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5679 if (e->X_op == O_pseudo_fixup)
5680 e->X_op = O_symbol;
5681 }
5682
5683 fix->opnd = idesc->operands[index];
5684 fix->expr = *e;
5685 fix->is_pcrel = 0;
5686 ++CURR_SLOT.num_fixups;
5687 return OPERAND_MATCH;
5688 }
5689 else if (e->X_op != O_constant
5690 && ! (e->X_op == O_big && opnd == IA64_OPND_IMM8M1U8))
5691 return OPERAND_MISMATCH;
5692
5693 if (opnd == IA64_OPND_IMM8M1U4)
5694 {
5695 /* Zero is not valid for unsigned compares that take an adjusted
5696 constant immediate range. */
5697 if (e->X_add_number == 0)
5698 return OPERAND_OUT_OF_RANGE;
5699
5700 /* Sign-extend 32-bit unsigned numbers, so that the following range
5701 checks will work. */
5702 val = e->X_add_number;
5703 if (((val & (~(bfd_vma) 0 << 32)) == 0)
5704 && ((val & ((bfd_vma) 1 << 31)) != 0))
5705 val = ((val << 32) >> 32);
5706
5707 /* Check for 0x100000000. This is valid because
5708 0x100000000-1 is the same as ((uint32_t) -1). */
5709 if (val == ((bfd_signed_vma) 1 << 32))
5710 return OPERAND_MATCH;
5711
5712 val = val - 1;
5713 }
5714 else if (opnd == IA64_OPND_IMM8M1U8)
5715 {
5716 /* Zero is not valid for unsigned compares that take an adjusted
5717 constant immediate range. */
5718 if (e->X_add_number == 0)
5719 return OPERAND_OUT_OF_RANGE;
5720
5721 /* Check for 0x10000000000000000. */
5722 if (e->X_op == O_big)
5723 {
5724 if (generic_bignum[0] == 0
5725 && generic_bignum[1] == 0
5726 && generic_bignum[2] == 0
5727 && generic_bignum[3] == 0
5728 && generic_bignum[4] == 1)
5729 return OPERAND_MATCH;
5730 else
5731 return OPERAND_OUT_OF_RANGE;
5732 }
5733 else
5734 val = e->X_add_number - 1;
5735 }
5736 else if (opnd == IA64_OPND_IMM8M1)
5737 val = e->X_add_number - 1;
5738 else if (opnd == IA64_OPND_IMM8U4)
5739 {
5740 /* Sign-extend 32-bit unsigned numbers, so that the following range
5741 checks will work. */
5742 val = e->X_add_number;
5743 if (((val & (~(bfd_vma) 0 << 32)) == 0)
5744 && ((val & ((bfd_vma) 1 << 31)) != 0))
5745 val = ((val << 32) >> 32);
5746 }
5747 else
5748 val = e->X_add_number;
5749
5750 if ((val >= 0 && (bfd_vma) val < ((bfd_vma) 1 << (bits - 1)))
5751 || (val < 0 && (bfd_vma) -val <= ((bfd_vma) 1 << (bits - 1))))
5752 return OPERAND_MATCH;
5753 else
5754 return OPERAND_OUT_OF_RANGE;
5755
5756 case IA64_OPND_INC3:
5757 /* +/- 1, 4, 8, 16 */
5758 val = e->X_add_number;
5759 if (val < 0)
5760 val = -val;
5761 if (e->X_op == O_constant)
5762 {
5763 if ((val == 1 || val == 4 || val == 8 || val == 16))
5764 return OPERAND_MATCH;
5765 else
5766 return OPERAND_OUT_OF_RANGE;
5767 }
5768 break;
5769
5770 case IA64_OPND_TGT25:
5771 case IA64_OPND_TGT25b:
5772 case IA64_OPND_TGT25c:
5773 case IA64_OPND_TGT64:
5774 if (e->X_op == O_symbol)
5775 {
5776 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5777 if (opnd == IA64_OPND_TGT25)
5778 fix->code = BFD_RELOC_IA64_PCREL21F;
5779 else if (opnd == IA64_OPND_TGT25b)
5780 fix->code = BFD_RELOC_IA64_PCREL21M;
5781 else if (opnd == IA64_OPND_TGT25c)
5782 fix->code = BFD_RELOC_IA64_PCREL21B;
5783 else if (opnd == IA64_OPND_TGT64)
5784 fix->code = BFD_RELOC_IA64_PCREL60B;
5785 else
5786 abort ();
5787
5788 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5789 fix->opnd = idesc->operands[index];
5790 fix->expr = *e;
5791 fix->is_pcrel = 1;
5792 ++CURR_SLOT.num_fixups;
5793 return OPERAND_MATCH;
5794 }
5795 case IA64_OPND_TAG13:
5796 case IA64_OPND_TAG13b:
5797 switch (e->X_op)
5798 {
5799 case O_constant:
5800 return OPERAND_MATCH;
5801
5802 case O_symbol:
5803 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5804 /* There are no external relocs for TAG13/TAG13b fields, so we
5805 create a dummy reloc. This will not live past md_apply_fix. */
5806 fix->code = BFD_RELOC_UNUSED;
5807 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5808 fix->opnd = idesc->operands[index];
5809 fix->expr = *e;
5810 fix->is_pcrel = 1;
5811 ++CURR_SLOT.num_fixups;
5812 return OPERAND_MATCH;
5813
5814 default:
5815 break;
5816 }
5817 break;
5818
5819 case IA64_OPND_LDXMOV:
5820 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5821 fix->code = BFD_RELOC_IA64_LDXMOV;
5822 fix->opnd = idesc->operands[index];
5823 fix->expr = *e;
5824 fix->is_pcrel = 0;
5825 ++CURR_SLOT.num_fixups;
5826 return OPERAND_MATCH;
5827
5828 default:
5829 break;
5830 }
5831 return OPERAND_MISMATCH;
5832 }
5833
5834 static int
5835 parse_operand (expressionS *e, int more)
5836 {
5837 int sep = '\0';
5838
5839 memset (e, 0, sizeof (*e));
5840 e->X_op = O_absent;
5841 SKIP_WHITESPACE ();
5842 expression_and_evaluate (e);
5843 sep = *input_line_pointer;
5844 if (more && (sep == ',' || sep == more))
5845 ++input_line_pointer;
5846 return sep;
5847 }
5848
5849 /* Returns the next entry in the opcode table that matches the one in
5850 IDESC, and frees the entry in IDESC. If no matching entry is
5851 found, NULL is returned instead. */
5852
5853 static struct ia64_opcode *
5854 get_next_opcode (struct ia64_opcode *idesc)
5855 {
5856 struct ia64_opcode *next = ia64_find_next_opcode (idesc);
5857 ia64_free_opcode (idesc);
5858 return next;
5859 }
5860
5861 /* Parse the operands for the opcode and find the opcode variant that
5862 matches the specified operands, or NULL if no match is possible. */
5863
5864 static struct ia64_opcode *
5865 parse_operands (struct ia64_opcode *idesc)
5866 {
5867 int i = 0, highest_unmatched_operand, num_operands = 0, num_outputs = 0;
5868 int error_pos, out_of_range_pos, curr_out_of_range_pos, sep = 0;
5869 int reg1, reg2;
5870 char reg_class;
5871 enum ia64_opnd expected_operand = IA64_OPND_NIL;
5872 enum operand_match_result result;
5873 char mnemonic[129];
5874 char *first_arg = 0, *end, *saved_input_pointer;
5875 unsigned int sof;
5876
5877 assert (strlen (idesc->name) <= 128);
5878
5879 strcpy (mnemonic, idesc->name);
5880 if (idesc->operands[2] == IA64_OPND_SOF
5881 || idesc->operands[1] == IA64_OPND_SOF)
5882 {
5883 /* To make the common idiom "alloc loc?=ar.pfs,0,1,0,0" work, we
5884 can't parse the first operand until we have parsed the
5885 remaining operands of the "alloc" instruction. */
5886 SKIP_WHITESPACE ();
5887 first_arg = input_line_pointer;
5888 end = strchr (input_line_pointer, '=');
5889 if (!end)
5890 {
5891 as_bad (_("Expected separator `='"));
5892 return 0;
5893 }
5894 input_line_pointer = end + 1;
5895 ++i;
5896 ++num_outputs;
5897 }
5898
5899 for (; ; ++i)
5900 {
5901 if (i < NELEMS (CURR_SLOT.opnd))
5902 {
5903 sep = parse_operand (CURR_SLOT.opnd + i, '=');
5904 if (CURR_SLOT.opnd[i].X_op == O_absent)
5905 break;
5906 }
5907 else
5908 {
5909 expressionS dummy;
5910
5911 sep = parse_operand (&dummy, '=');
5912 if (dummy.X_op == O_absent)
5913 break;
5914 }
5915
5916 ++num_operands;
5917
5918 if (sep != '=' && sep != ',')
5919 break;
5920
5921 if (sep == '=')
5922 {
5923 if (num_outputs > 0)
5924 as_bad (_("Duplicate equal sign (=) in instruction"));
5925 else
5926 num_outputs = i + 1;
5927 }
5928 }
5929 if (sep != '\0')
5930 {
5931 as_bad (_("Illegal operand separator `%c'"), sep);
5932 return 0;
5933 }
5934
5935 if (idesc->operands[2] == IA64_OPND_SOF
5936 || idesc->operands[1] == IA64_OPND_SOF)
5937 {
5938 /* Map alloc r1=ar.pfs,i,l,o,r to alloc r1=ar.pfs,(i+l+o),(i+l),r.
5939 Note, however, that due to that mapping operand numbers in error
5940 messages for any of the constant operands will not be correct. */
5941 know (strcmp (idesc->name, "alloc") == 0);
5942 /* The first operand hasn't been parsed/initialized, yet (but
5943 num_operands intentionally doesn't account for that). */
5944 i = num_operands > 4 ? 2 : 1;
5945 #define FORCE_CONST(n) (CURR_SLOT.opnd[n].X_op == O_constant \
5946 ? CURR_SLOT.opnd[n].X_add_number \
5947 : 0)
5948 sof = set_regstack (FORCE_CONST(i),
5949 FORCE_CONST(i + 1),
5950 FORCE_CONST(i + 2),
5951 FORCE_CONST(i + 3));
5952 #undef FORCE_CONST
5953
5954 /* now we can parse the first arg: */
5955 saved_input_pointer = input_line_pointer;
5956 input_line_pointer = first_arg;
5957 sep = parse_operand (CURR_SLOT.opnd + 0, '=');
5958 if (sep != '=')
5959 --num_outputs; /* force error */
5960 input_line_pointer = saved_input_pointer;
5961
5962 CURR_SLOT.opnd[i].X_add_number = sof;
5963 if (CURR_SLOT.opnd[i + 1].X_op == O_constant
5964 && CURR_SLOT.opnd[i + 2].X_op == O_constant)
5965 CURR_SLOT.opnd[i + 1].X_add_number
5966 = sof - CURR_SLOT.opnd[i + 2].X_add_number;
5967 else
5968 CURR_SLOT.opnd[i + 1].X_op = O_illegal;
5969 CURR_SLOT.opnd[i + 2] = CURR_SLOT.opnd[i + 3];
5970 }
5971
5972 highest_unmatched_operand = -4;
5973 curr_out_of_range_pos = -1;
5974 error_pos = 0;
5975 for (; idesc; idesc = get_next_opcode (idesc))
5976 {
5977 if (num_outputs != idesc->num_outputs)
5978 continue; /* mismatch in # of outputs */
5979 if (highest_unmatched_operand < 0)
5980 highest_unmatched_operand |= 1;
5981 if (num_operands > NELEMS (idesc->operands)
5982 || (num_operands < NELEMS (idesc->operands)
5983 && idesc->operands[num_operands])
5984 || (num_operands > 0 && !idesc->operands[num_operands - 1]))
5985 continue; /* mismatch in number of arguments */
5986 if (highest_unmatched_operand < 0)
5987 highest_unmatched_operand |= 2;
5988
5989 CURR_SLOT.num_fixups = 0;
5990
5991 /* Try to match all operands. If we see an out-of-range operand,
5992 then continue trying to match the rest of the operands, since if
5993 the rest match, then this idesc will give the best error message. */
5994
5995 out_of_range_pos = -1;
5996 for (i = 0; i < num_operands && idesc->operands[i]; ++i)
5997 {
5998 result = operand_match (idesc, i, CURR_SLOT.opnd + i);
5999 if (result != OPERAND_MATCH)
6000 {
6001 if (result != OPERAND_OUT_OF_RANGE)
6002 break;
6003 if (out_of_range_pos < 0)
6004 /* remember position of the first out-of-range operand: */
6005 out_of_range_pos = i;
6006 }
6007 }
6008
6009 /* If we did not match all operands, or if at least one operand was
6010 out-of-range, then this idesc does not match. Keep track of which
6011 idesc matched the most operands before failing. If we have two
6012 idescs that failed at the same position, and one had an out-of-range
6013 operand, then prefer the out-of-range operand. Thus if we have
6014 "add r0=0x1000000,r1" we get an error saying the constant is out
6015 of range instead of an error saying that the constant should have been
6016 a register. */
6017
6018 if (i != num_operands || out_of_range_pos >= 0)
6019 {
6020 if (i > highest_unmatched_operand
6021 || (i == highest_unmatched_operand
6022 && out_of_range_pos > curr_out_of_range_pos))
6023 {
6024 highest_unmatched_operand = i;
6025 if (out_of_range_pos >= 0)
6026 {
6027 expected_operand = idesc->operands[out_of_range_pos];
6028 error_pos = out_of_range_pos;
6029 }
6030 else
6031 {
6032 expected_operand = idesc->operands[i];
6033 error_pos = i;
6034 }
6035 curr_out_of_range_pos = out_of_range_pos;
6036 }
6037 continue;
6038 }
6039
6040 break;
6041 }
6042 if (!idesc)
6043 {
6044 if (expected_operand)
6045 as_bad (_("Operand %u of `%s' should be %s"),
6046 error_pos + 1, mnemonic,
6047 elf64_ia64_operands[expected_operand].desc);
6048 else if (highest_unmatched_operand < 0 && !(highest_unmatched_operand & 1))
6049 as_bad (_("Wrong number of output operands"));
6050 else if (highest_unmatched_operand < 0 && !(highest_unmatched_operand & 2))
6051 as_bad (_("Wrong number of input operands"));
6052 else
6053 as_bad (_("Operand mismatch"));
6054 return 0;
6055 }
6056
6057 /* Check that the instruction doesn't use
6058 - r0, f0, or f1 as output operands
6059 - the same predicate twice as output operands
6060 - r0 as address of a base update load or store
6061 - the same GR as output and address of a base update load
6062 - two even- or two odd-numbered FRs as output operands of a floating
6063 point parallel load.
6064 At most two (conflicting) output (or output-like) operands can exist,
6065 (floating point parallel loads have three outputs, but the base register,
6066 if updated, cannot conflict with the actual outputs). */
6067 reg2 = reg1 = -1;
6068 for (i = 0; i < num_operands; ++i)
6069 {
6070 int regno = 0;
6071
6072 reg_class = 0;
6073 switch (idesc->operands[i])
6074 {
6075 case IA64_OPND_R1:
6076 case IA64_OPND_R2:
6077 case IA64_OPND_R3:
6078 if (i < num_outputs)
6079 {
6080 if (CURR_SLOT.opnd[i].X_add_number == REG_GR)
6081 reg_class = 'r';
6082 else if (reg1 < 0)
6083 reg1 = CURR_SLOT.opnd[i].X_add_number;
6084 else if (reg2 < 0)
6085 reg2 = CURR_SLOT.opnd[i].X_add_number;
6086 }
6087 break;
6088 case IA64_OPND_P1:
6089 case IA64_OPND_P2:
6090 if (i < num_outputs)
6091 {
6092 if (reg1 < 0)
6093 reg1 = CURR_SLOT.opnd[i].X_add_number;
6094 else if (reg2 < 0)
6095 reg2 = CURR_SLOT.opnd[i].X_add_number;
6096 }
6097 break;
6098 case IA64_OPND_F1:
6099 case IA64_OPND_F2:
6100 case IA64_OPND_F3:
6101 case IA64_OPND_F4:
6102 if (i < num_outputs)
6103 {
6104 if (CURR_SLOT.opnd[i].X_add_number >= REG_FR
6105 && CURR_SLOT.opnd[i].X_add_number <= REG_FR + 1)
6106 {
6107 reg_class = 'f';
6108 regno = CURR_SLOT.opnd[i].X_add_number - REG_FR;
6109 }
6110 else if (reg1 < 0)
6111 reg1 = CURR_SLOT.opnd[i].X_add_number;
6112 else if (reg2 < 0)
6113 reg2 = CURR_SLOT.opnd[i].X_add_number;
6114 }
6115 break;
6116 case IA64_OPND_MR3:
6117 if (idesc->flags & IA64_OPCODE_POSTINC)
6118 {
6119 if (CURR_SLOT.opnd[i].X_add_number == REG_GR)
6120 reg_class = 'm';
6121 else if (reg1 < 0)
6122 reg1 = CURR_SLOT.opnd[i].X_add_number;
6123 else if (reg2 < 0)
6124 reg2 = CURR_SLOT.opnd[i].X_add_number;
6125 }
6126 break;
6127 default:
6128 break;
6129 }
6130 switch (reg_class)
6131 {
6132 case 0:
6133 break;
6134 default:
6135 as_warn (_("Invalid use of `%c%d' as output operand"), reg_class, regno);
6136 break;
6137 case 'm':
6138 as_warn (_("Invalid use of `r%d' as base update address operand"), regno);
6139 break;
6140 }
6141 }
6142 if (reg1 == reg2)
6143 {
6144 if (reg1 >= REG_GR && reg1 <= REG_GR + 127)
6145 {
6146 reg1 -= REG_GR;
6147 reg_class = 'r';
6148 }
6149 else if (reg1 >= REG_P && reg1 <= REG_P + 63)
6150 {
6151 reg1 -= REG_P;
6152 reg_class = 'p';
6153 }
6154 else if (reg1 >= REG_FR && reg1 <= REG_FR + 127)
6155 {
6156 reg1 -= REG_FR;
6157 reg_class = 'f';
6158 }
6159 else
6160 reg_class = 0;
6161 if (reg_class)
6162 as_warn (_("Invalid duplicate use of `%c%d'"), reg_class, reg1);
6163 }
6164 else if (((reg1 >= REG_FR && reg1 <= REG_FR + 31
6165 && reg2 >= REG_FR && reg2 <= REG_FR + 31)
6166 || (reg1 >= REG_FR + 32 && reg1 <= REG_FR + 127
6167 && reg2 >= REG_FR + 32 && reg2 <= REG_FR + 127))
6168 && ! ((reg1 ^ reg2) & 1))
6169 as_warn (_("Invalid simultaneous use of `f%d' and `f%d'"),
6170 reg1 - REG_FR, reg2 - REG_FR);
6171 else if ((reg1 >= REG_FR && reg1 <= REG_FR + 31
6172 && reg2 >= REG_FR + 32 && reg2 <= REG_FR + 127)
6173 || (reg1 >= REG_FR + 32 && reg1 <= REG_FR + 127
6174 && reg2 >= REG_FR && reg2 <= REG_FR + 31))
6175 as_warn (_("Dangerous simultaneous use of `f%d' and `f%d'"),
6176 reg1 - REG_FR, reg2 - REG_FR);
6177 return idesc;
6178 }
6179
6180 static void
6181 build_insn (struct slot *slot, bfd_vma *insnp)
6182 {
6183 const struct ia64_operand *odesc, *o2desc;
6184 struct ia64_opcode *idesc = slot->idesc;
6185 bfd_vma insn;
6186 bfd_signed_vma val;
6187 const char *err;
6188 int i;
6189
6190 insn = idesc->opcode | slot->qp_regno;
6191
6192 for (i = 0; i < NELEMS (idesc->operands) && idesc->operands[i]; ++i)
6193 {
6194 if (slot->opnd[i].X_op == O_register
6195 || slot->opnd[i].X_op == O_constant
6196 || slot->opnd[i].X_op == O_index)
6197 val = slot->opnd[i].X_add_number;
6198 else if (slot->opnd[i].X_op == O_big)
6199 {
6200 /* This must be the value 0x10000000000000000. */
6201 assert (idesc->operands[i] == IA64_OPND_IMM8M1U8);
6202 val = 0;
6203 }
6204 else
6205 val = 0;
6206
6207 switch (idesc->operands[i])
6208 {
6209 case IA64_OPND_IMMU64:
6210 *insnp++ = (val >> 22) & 0x1ffffffffffLL;
6211 insn |= (((val & 0x7f) << 13) | (((val >> 7) & 0x1ff) << 27)
6212 | (((val >> 16) & 0x1f) << 22) | (((val >> 21) & 0x1) << 21)
6213 | (((val >> 63) & 0x1) << 36));
6214 continue;
6215
6216 case IA64_OPND_IMMU62:
6217 val &= 0x3fffffffffffffffULL;
6218 if (val != slot->opnd[i].X_add_number)
6219 as_warn (_("Value truncated to 62 bits"));
6220 *insnp++ = (val >> 21) & 0x1ffffffffffLL;
6221 insn |= (((val & 0xfffff) << 6) | (((val >> 20) & 0x1) << 36));
6222 continue;
6223
6224 case IA64_OPND_TGT64:
6225 val >>= 4;
6226 *insnp++ = ((val >> 20) & 0x7fffffffffLL) << 2;
6227 insn |= ((((val >> 59) & 0x1) << 36)
6228 | (((val >> 0) & 0xfffff) << 13));
6229 continue;
6230
6231 case IA64_OPND_AR3:
6232 val -= REG_AR;
6233 break;
6234
6235 case IA64_OPND_B1:
6236 case IA64_OPND_B2:
6237 val -= REG_BR;
6238 break;
6239
6240 case IA64_OPND_CR3:
6241 val -= REG_CR;
6242 break;
6243
6244 case IA64_OPND_F1:
6245 case IA64_OPND_F2:
6246 case IA64_OPND_F3:
6247 case IA64_OPND_F4:
6248 val -= REG_FR;
6249 break;
6250
6251 case IA64_OPND_P1:
6252 case IA64_OPND_P2:
6253 val -= REG_P;
6254 break;
6255
6256 case IA64_OPND_R1:
6257 case IA64_OPND_R2:
6258 case IA64_OPND_R3:
6259 case IA64_OPND_R3_2:
6260 case IA64_OPND_CPUID_R3:
6261 case IA64_OPND_DBR_R3:
6262 case IA64_OPND_DTR_R3:
6263 case IA64_OPND_ITR_R3:
6264 case IA64_OPND_IBR_R3:
6265 case IA64_OPND_MR3:
6266 case IA64_OPND_MSR_R3:
6267 case IA64_OPND_PKR_R3:
6268 case IA64_OPND_PMC_R3:
6269 case IA64_OPND_PMD_R3:
6270 case IA64_OPND_RR_R3:
6271 val -= REG_GR;
6272 break;
6273
6274 default:
6275 break;
6276 }
6277
6278 odesc = elf64_ia64_operands + idesc->operands[i];
6279 err = (*odesc->insert) (odesc, val, &insn);
6280 if (err)
6281 as_bad_where (slot->src_file, slot->src_line,
6282 _("Bad operand value: %s"), err);
6283 if (idesc->flags & IA64_OPCODE_PSEUDO)
6284 {
6285 if ((idesc->flags & IA64_OPCODE_F2_EQ_F3)
6286 && odesc == elf64_ia64_operands + IA64_OPND_F3)
6287 {
6288 o2desc = elf64_ia64_operands + IA64_OPND_F2;
6289 (*o2desc->insert) (o2desc, val, &insn);
6290 }
6291 if ((idesc->flags & IA64_OPCODE_LEN_EQ_64MCNT)
6292 && (odesc == elf64_ia64_operands + IA64_OPND_CPOS6a
6293 || odesc == elf64_ia64_operands + IA64_OPND_POS6))
6294 {
6295 o2desc = elf64_ia64_operands + IA64_OPND_LEN6;
6296 (*o2desc->insert) (o2desc, 64 - val, &insn);
6297 }
6298 }
6299 }
6300 *insnp = insn;
6301 }
6302
6303 static void
6304 emit_one_bundle (void)
6305 {
6306 int manual_bundling_off = 0, manual_bundling = 0;
6307 enum ia64_unit required_unit, insn_unit = 0;
6308 enum ia64_insn_type type[3], insn_type;
6309 unsigned int template, orig_template;
6310 bfd_vma insn[3] = { -1, -1, -1 };
6311 struct ia64_opcode *idesc;
6312 int end_of_insn_group = 0, user_template = -1;
6313 int n, i, j, first, curr, last_slot;
6314 bfd_vma t0 = 0, t1 = 0;
6315 struct label_fix *lfix;
6316 bfd_boolean mark_label;
6317 struct insn_fix *ifix;
6318 char mnemonic[16];
6319 fixS *fix;
6320 char *f;
6321 int addr_mod;
6322
6323 first = (md.curr_slot + NUM_SLOTS - md.num_slots_in_use) % NUM_SLOTS;
6324 know (first >= 0 && first < NUM_SLOTS);
6325 n = MIN (3, md.num_slots_in_use);
6326
6327 /* Determine template: user user_template if specified, best match
6328 otherwise: */
6329
6330 if (md.slot[first].user_template >= 0)
6331 user_template = template = md.slot[first].user_template;
6332 else
6333 {
6334 /* Auto select appropriate template. */
6335 memset (type, 0, sizeof (type));
6336 curr = first;
6337 for (i = 0; i < n; ++i)
6338 {
6339 if (md.slot[curr].label_fixups && i != 0)
6340 break;
6341 type[i] = md.slot[curr].idesc->type;
6342 curr = (curr + 1) % NUM_SLOTS;
6343 }
6344 template = best_template[type[0]][type[1]][type[2]];
6345 }
6346
6347 /* initialize instructions with appropriate nops: */
6348 for (i = 0; i < 3; ++i)
6349 insn[i] = nop[ia64_templ_desc[template].exec_unit[i]];
6350
6351 f = frag_more (16);
6352
6353 /* Check to see if this bundle is at an offset that is a multiple of 16-bytes
6354 from the start of the frag. */
6355 addr_mod = frag_now_fix () & 15;
6356 if (frag_now->has_code && frag_now->insn_addr != addr_mod)
6357 as_bad (_("instruction address is not a multiple of 16"));
6358 frag_now->insn_addr = addr_mod;
6359 frag_now->has_code = 1;
6360
6361 /* now fill in slots with as many insns as possible: */
6362 curr = first;
6363 idesc = md.slot[curr].idesc;
6364 end_of_insn_group = 0;
6365 last_slot = -1;
6366 for (i = 0; i < 3 && md.num_slots_in_use > 0; ++i)
6367 {
6368 /* If we have unwind records, we may need to update some now. */
6369 unw_rec_list *ptr = md.slot[curr].unwind_record;
6370 unw_rec_list *end_ptr = NULL;
6371
6372 if (ptr)
6373 {
6374 /* Find the last prologue/body record in the list for the current
6375 insn, and set the slot number for all records up to that point.
6376 This needs to be done now, because prologue/body records refer to
6377 the current point, not the point after the instruction has been
6378 issued. This matters because there may have been nops emitted
6379 meanwhile. Any non-prologue non-body record followed by a
6380 prologue/body record must also refer to the current point. */
6381 unw_rec_list *last_ptr;
6382
6383 for (j = 1; end_ptr == NULL && j < md.num_slots_in_use; ++j)
6384 end_ptr = md.slot[(curr + j) % NUM_SLOTS].unwind_record;
6385 for (last_ptr = NULL; ptr != end_ptr; ptr = ptr->next)
6386 if (ptr->r.type == prologue || ptr->r.type == prologue_gr
6387 || ptr->r.type == body)
6388 last_ptr = ptr;
6389 if (last_ptr)
6390 {
6391 /* Make last_ptr point one after the last prologue/body
6392 record. */
6393 last_ptr = last_ptr->next;
6394 for (ptr = md.slot[curr].unwind_record; ptr != last_ptr;
6395 ptr = ptr->next)
6396 {
6397 ptr->slot_number = (unsigned long) f + i;
6398 ptr->slot_frag = frag_now;
6399 }
6400 /* Remove the initialized records, so that we won't accidentally
6401 update them again if we insert a nop and continue. */
6402 md.slot[curr].unwind_record = last_ptr;
6403 }
6404 }
6405
6406 manual_bundling_off = md.slot[curr].manual_bundling_off;
6407 if (md.slot[curr].manual_bundling_on)
6408 {
6409 if (curr == first)
6410 manual_bundling = 1;
6411 else
6412 break; /* Need to start a new bundle. */
6413 }
6414
6415 /* If this instruction specifies a template, then it must be the first
6416 instruction of a bundle. */
6417 if (curr != first && md.slot[curr].user_template >= 0)
6418 break;
6419
6420 if (idesc->flags & IA64_OPCODE_SLOT2)
6421 {
6422 if (manual_bundling && !manual_bundling_off)
6423 {
6424 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6425 _("`%s' must be last in bundle"), idesc->name);
6426 if (i < 2)
6427 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6428 }
6429 i = 2;
6430 }
6431 if (idesc->flags & IA64_OPCODE_LAST)
6432 {
6433 int required_slot;
6434 unsigned int required_template;
6435
6436 /* If we need a stop bit after an M slot, our only choice is
6437 template 5 (M;;MI). If we need a stop bit after a B
6438 slot, our only choice is to place it at the end of the
6439 bundle, because the only available templates are MIB,
6440 MBB, BBB, MMB, and MFB. We don't handle anything other
6441 than M and B slots because these are the only kind of
6442 instructions that can have the IA64_OPCODE_LAST bit set. */
6443 required_template = template;
6444 switch (idesc->type)
6445 {
6446 case IA64_TYPE_M:
6447 required_slot = 0;
6448 required_template = 5;
6449 break;
6450
6451 case IA64_TYPE_B:
6452 required_slot = 2;
6453 break;
6454
6455 default:
6456 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6457 _("Internal error: don't know how to force %s to end of instruction group"),
6458 idesc->name);
6459 required_slot = i;
6460 break;
6461 }
6462 if (manual_bundling
6463 && (i > required_slot
6464 || (required_slot == 2 && !manual_bundling_off)
6465 || (user_template >= 0
6466 /* Changing from MMI to M;MI is OK. */
6467 && (template ^ required_template) > 1)))
6468 {
6469 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6470 _("`%s' must be last in instruction group"),
6471 idesc->name);
6472 if (i < 2 && required_slot == 2 && !manual_bundling_off)
6473 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6474 }
6475 if (required_slot < i)
6476 /* Can't fit this instruction. */
6477 break;
6478
6479 i = required_slot;
6480 if (required_template != template)
6481 {
6482 /* If we switch the template, we need to reset the NOPs
6483 after slot i. The slot-types of the instructions ahead
6484 of i never change, so we don't need to worry about
6485 changing NOPs in front of this slot. */
6486 for (j = i; j < 3; ++j)
6487 insn[j] = nop[ia64_templ_desc[required_template].exec_unit[j]];
6488
6489 /* We just picked a template that includes the stop bit in the
6490 middle, so we don't need another one emitted later. */
6491 md.slot[curr].end_of_insn_group = 0;
6492 }
6493 template = required_template;
6494 }
6495 if (curr != first && md.slot[curr].label_fixups)
6496 {
6497 if (manual_bundling)
6498 {
6499 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6500 _("Label must be first in a bundle"));
6501 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6502 }
6503 /* This insn must go into the first slot of a bundle. */
6504 break;
6505 }
6506
6507 if (end_of_insn_group && md.num_slots_in_use >= 1)
6508 {
6509 /* We need an instruction group boundary in the middle of a
6510 bundle. See if we can switch to an other template with
6511 an appropriate boundary. */
6512
6513 orig_template = template;
6514 if (i == 1 && (user_template == 4
6515 || (user_template < 0
6516 && (ia64_templ_desc[template].exec_unit[0]
6517 == IA64_UNIT_M))))
6518 {
6519 template = 5;
6520 end_of_insn_group = 0;
6521 }
6522 else if (i == 2 && (user_template == 0
6523 || (user_template < 0
6524 && (ia64_templ_desc[template].exec_unit[1]
6525 == IA64_UNIT_I)))
6526 /* This test makes sure we don't switch the template if
6527 the next instruction is one that needs to be first in
6528 an instruction group. Since all those instructions are
6529 in the M group, there is no way such an instruction can
6530 fit in this bundle even if we switch the template. The
6531 reason we have to check for this is that otherwise we
6532 may end up generating "MI;;I M.." which has the deadly
6533 effect that the second M instruction is no longer the
6534 first in the group! --davidm 99/12/16 */
6535 && (idesc->flags & IA64_OPCODE_FIRST) == 0)
6536 {
6537 template = 1;
6538 end_of_insn_group = 0;
6539 }
6540 else if (i == 1
6541 && user_template == 0
6542 && !(idesc->flags & IA64_OPCODE_FIRST))
6543 /* Use the next slot. */
6544 continue;
6545 else if (curr != first)
6546 /* can't fit this insn */
6547 break;
6548
6549 if (template != orig_template)
6550 /* if we switch the template, we need to reset the NOPs
6551 after slot i. The slot-types of the instructions ahead
6552 of i never change, so we don't need to worry about
6553 changing NOPs in front of this slot. */
6554 for (j = i; j < 3; ++j)
6555 insn[j] = nop[ia64_templ_desc[template].exec_unit[j]];
6556 }
6557 required_unit = ia64_templ_desc[template].exec_unit[i];
6558
6559 /* resolve dynamic opcodes such as "break", "hint", and "nop": */
6560 if (idesc->type == IA64_TYPE_DYN)
6561 {
6562 enum ia64_opnd opnd1, opnd2;
6563
6564 if ((strcmp (idesc->name, "nop") == 0)
6565 || (strcmp (idesc->name, "break") == 0))
6566 insn_unit = required_unit;
6567 else if (strcmp (idesc->name, "hint") == 0)
6568 {
6569 insn_unit = required_unit;
6570 if (required_unit == IA64_UNIT_B)
6571 {
6572 switch (md.hint_b)
6573 {
6574 case hint_b_ok:
6575 break;
6576 case hint_b_warning:
6577 as_warn (_("hint in B unit may be treated as nop"));
6578 break;
6579 case hint_b_error:
6580 /* When manual bundling is off and there is no
6581 user template, we choose a different unit so
6582 that hint won't go into the current slot. We
6583 will fill the current bundle with nops and
6584 try to put hint into the next bundle. */
6585 if (!manual_bundling && user_template < 0)
6586 insn_unit = IA64_UNIT_I;
6587 else
6588 as_bad (_("hint in B unit can't be used"));
6589 break;
6590 }
6591 }
6592 }
6593 else if (strcmp (idesc->name, "chk.s") == 0
6594 || strcmp (idesc->name, "mov") == 0)
6595 {
6596 insn_unit = IA64_UNIT_M;
6597 if (required_unit == IA64_UNIT_I
6598 || (required_unit == IA64_UNIT_F && template == 6))
6599 insn_unit = IA64_UNIT_I;
6600 }
6601 else
6602 as_fatal (_("emit_one_bundle: unexpected dynamic op"));
6603
6604 snprintf (mnemonic, sizeof (mnemonic), "%s.%c",
6605 idesc->name, "?imbfxx"[insn_unit]);
6606 opnd1 = idesc->operands[0];
6607 opnd2 = idesc->operands[1];
6608 ia64_free_opcode (idesc);
6609 idesc = ia64_find_opcode (mnemonic);
6610 /* moves to/from ARs have collisions */
6611 if (opnd1 == IA64_OPND_AR3 || opnd2 == IA64_OPND_AR3)
6612 {
6613 while (idesc != NULL
6614 && (idesc->operands[0] != opnd1
6615 || idesc->operands[1] != opnd2))
6616 idesc = get_next_opcode (idesc);
6617 }
6618 md.slot[curr].idesc = idesc;
6619 }
6620 else
6621 {
6622 insn_type = idesc->type;
6623 insn_unit = IA64_UNIT_NIL;
6624 switch (insn_type)
6625 {
6626 case IA64_TYPE_A:
6627 if (required_unit == IA64_UNIT_I || required_unit == IA64_UNIT_M)
6628 insn_unit = required_unit;
6629 break;
6630 case IA64_TYPE_X: insn_unit = IA64_UNIT_L; break;
6631 case IA64_TYPE_I: insn_unit = IA64_UNIT_I; break;
6632 case IA64_TYPE_M: insn_unit = IA64_UNIT_M; break;
6633 case IA64_TYPE_B: insn_unit = IA64_UNIT_B; break;
6634 case IA64_TYPE_F: insn_unit = IA64_UNIT_F; break;
6635 default: break;
6636 }
6637 }
6638
6639 if (insn_unit != required_unit)
6640 continue; /* Try next slot. */
6641
6642 /* Now is a good time to fix up the labels for this insn. */
6643 mark_label = FALSE;
6644 for (lfix = md.slot[curr].label_fixups; lfix; lfix = lfix->next)
6645 {
6646 S_SET_VALUE (lfix->sym, frag_now_fix () - 16);
6647 symbol_set_frag (lfix->sym, frag_now);
6648 mark_label |= lfix->dw2_mark_labels;
6649 }
6650 for (lfix = md.slot[curr].tag_fixups; lfix; lfix = lfix->next)
6651 {
6652 S_SET_VALUE (lfix->sym, frag_now_fix () - 16 + i);
6653 symbol_set_frag (lfix->sym, frag_now);
6654 }
6655
6656 if (debug_type == DEBUG_DWARF2
6657 || md.slot[curr].loc_directive_seen
6658 || mark_label)
6659 {
6660 bfd_vma addr = frag_now->fr_address + frag_now_fix () - 16 + i;
6661
6662 md.slot[curr].loc_directive_seen = 0;
6663 if (mark_label)
6664 md.slot[curr].debug_line.flags |= DWARF2_FLAG_BASIC_BLOCK;
6665
6666 dwarf2_gen_line_info (addr, &md.slot[curr].debug_line);
6667 }
6668
6669 build_insn (md.slot + curr, insn + i);
6670
6671 ptr = md.slot[curr].unwind_record;
6672 if (ptr)
6673 {
6674 /* Set slot numbers for all remaining unwind records belonging to the
6675 current insn. There can not be any prologue/body unwind records
6676 here. */
6677 for (; ptr != end_ptr; ptr = ptr->next)
6678 {
6679 ptr->slot_number = (unsigned long) f + i;
6680 ptr->slot_frag = frag_now;
6681 }
6682 md.slot[curr].unwind_record = NULL;
6683 }
6684
6685 if (required_unit == IA64_UNIT_L)
6686 {
6687 know (i == 1);
6688 /* skip one slot for long/X-unit instructions */
6689 ++i;
6690 }
6691 --md.num_slots_in_use;
6692 last_slot = i;
6693
6694 for (j = 0; j < md.slot[curr].num_fixups; ++j)
6695 {
6696 ifix = md.slot[curr].fixup + j;
6697 fix = fix_new_exp (frag_now, frag_now_fix () - 16 + i, 8,
6698 &ifix->expr, ifix->is_pcrel, ifix->code);
6699 fix->tc_fix_data.opnd = ifix->opnd;
6700 fix->fx_file = md.slot[curr].src_file;
6701 fix->fx_line = md.slot[curr].src_line;
6702 }
6703
6704 end_of_insn_group = md.slot[curr].end_of_insn_group;
6705
6706 /* clear slot: */
6707 ia64_free_opcode (md.slot[curr].idesc);
6708 memset (md.slot + curr, 0, sizeof (md.slot[curr]));
6709 md.slot[curr].user_template = -1;
6710
6711 if (manual_bundling_off)
6712 {
6713 manual_bundling = 0;
6714 break;
6715 }
6716 curr = (curr + 1) % NUM_SLOTS;
6717 idesc = md.slot[curr].idesc;
6718 }
6719
6720 /* A user template was specified, but the first following instruction did
6721 not fit. This can happen with or without manual bundling. */
6722 if (md.num_slots_in_use > 0 && last_slot < 0)
6723 {
6724 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6725 _("`%s' does not fit into %s template"),
6726 idesc->name, ia64_templ_desc[template].name);
6727 /* Drop first insn so we don't livelock. */
6728 --md.num_slots_in_use;
6729 know (curr == first);
6730 ia64_free_opcode (md.slot[curr].idesc);
6731 memset (md.slot + curr, 0, sizeof (md.slot[curr]));
6732 md.slot[curr].user_template = -1;
6733 }
6734 else if (manual_bundling > 0)
6735 {
6736 if (md.num_slots_in_use > 0)
6737 {
6738 if (last_slot >= 2)
6739 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6740 _("`%s' does not fit into bundle"), idesc->name);
6741 else
6742 {
6743 const char *where;
6744
6745 if (template == 2)
6746 where = "X slot";
6747 else if (last_slot == 0)
6748 where = "slots 2 or 3";
6749 else
6750 where = "slot 3";
6751 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6752 _("`%s' can't go in %s of %s template"),
6753 idesc->name, where, ia64_templ_desc[template].name);
6754 }
6755 }
6756 else
6757 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6758 _("Missing '}' at end of file"));
6759 }
6760
6761 know (md.num_slots_in_use < NUM_SLOTS);
6762
6763 t0 = end_of_insn_group | (template << 1) | (insn[0] << 5) | (insn[1] << 46);
6764 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
6765
6766 number_to_chars_littleendian (f + 0, t0, 8);
6767 number_to_chars_littleendian (f + 8, t1, 8);
6768 }
6769
6770 int
6771 md_parse_option (int c, char *arg)
6772 {
6773
6774 switch (c)
6775 {
6776 /* Switches from the Intel assembler. */
6777 case 'm':
6778 if (strcmp (arg, "ilp64") == 0
6779 || strcmp (arg, "lp64") == 0
6780 || strcmp (arg, "p64") == 0)
6781 {
6782 md.flags |= EF_IA_64_ABI64;
6783 }
6784 else if (strcmp (arg, "ilp32") == 0)
6785 {
6786 md.flags &= ~EF_IA_64_ABI64;
6787 }
6788 else if (strcmp (arg, "le") == 0)
6789 {
6790 md.flags &= ~EF_IA_64_BE;
6791 default_big_endian = 0;
6792 }
6793 else if (strcmp (arg, "be") == 0)
6794 {
6795 md.flags |= EF_IA_64_BE;
6796 default_big_endian = 1;
6797 }
6798 else if (strncmp (arg, "unwind-check=", 13) == 0)
6799 {
6800 arg += 13;
6801 if (strcmp (arg, "warning") == 0)
6802 md.unwind_check = unwind_check_warning;
6803 else if (strcmp (arg, "error") == 0)
6804 md.unwind_check = unwind_check_error;
6805 else
6806 return 0;
6807 }
6808 else if (strncmp (arg, "hint.b=", 7) == 0)
6809 {
6810 arg += 7;
6811 if (strcmp (arg, "ok") == 0)
6812 md.hint_b = hint_b_ok;
6813 else if (strcmp (arg, "warning") == 0)
6814 md.hint_b = hint_b_warning;
6815 else if (strcmp (arg, "error") == 0)
6816 md.hint_b = hint_b_error;
6817 else
6818 return 0;
6819 }
6820 else if (strncmp (arg, "tune=", 5) == 0)
6821 {
6822 arg += 5;
6823 if (strcmp (arg, "itanium1") == 0)
6824 md.tune = itanium1;
6825 else if (strcmp (arg, "itanium2") == 0)
6826 md.tune = itanium2;
6827 else
6828 return 0;
6829 }
6830 else
6831 return 0;
6832 break;
6833
6834 case 'N':
6835 if (strcmp (arg, "so") == 0)
6836 {
6837 /* Suppress signon message. */
6838 }
6839 else if (strcmp (arg, "pi") == 0)
6840 {
6841 /* Reject privileged instructions. FIXME */
6842 }
6843 else if (strcmp (arg, "us") == 0)
6844 {
6845 /* Allow union of signed and unsigned range. FIXME */
6846 }
6847 else if (strcmp (arg, "close_fcalls") == 0)
6848 {
6849 /* Do not resolve global function calls. */
6850 }
6851 else
6852 return 0;
6853 break;
6854
6855 case 'C':
6856 /* temp[="prefix"] Insert temporary labels into the object file
6857 symbol table prefixed by "prefix".
6858 Default prefix is ":temp:".
6859 */
6860 break;
6861
6862 case 'a':
6863 /* indirect=<tgt> Assume unannotated indirect branches behavior
6864 according to <tgt> --
6865 exit: branch out from the current context (default)
6866 labels: all labels in context may be branch targets
6867 */
6868 if (strncmp (arg, "indirect=", 9) != 0)
6869 return 0;
6870 break;
6871
6872 case 'x':
6873 /* -X conflicts with an ignored option, use -x instead */
6874 md.detect_dv = 1;
6875 if (!arg || strcmp (arg, "explicit") == 0)
6876 {
6877 /* set default mode to explicit */
6878 md.default_explicit_mode = 1;
6879 break;
6880 }
6881 else if (strcmp (arg, "auto") == 0)
6882 {
6883 md.default_explicit_mode = 0;
6884 }
6885 else if (strcmp (arg, "none") == 0)
6886 {
6887 md.detect_dv = 0;
6888 }
6889 else if (strcmp (arg, "debug") == 0)
6890 {
6891 md.debug_dv = 1;
6892 }
6893 else if (strcmp (arg, "debugx") == 0)
6894 {
6895 md.default_explicit_mode = 1;
6896 md.debug_dv = 1;
6897 }
6898 else if (strcmp (arg, "debugn") == 0)
6899 {
6900 md.debug_dv = 1;
6901 md.detect_dv = 0;
6902 }
6903 else
6904 {
6905 as_bad (_("Unrecognized option '-x%s'"), arg);
6906 }
6907 break;
6908
6909 case 'S':
6910 /* nops Print nops statistics. */
6911 break;
6912
6913 /* GNU specific switches for gcc. */
6914 case OPTION_MCONSTANT_GP:
6915 md.flags |= EF_IA_64_CONS_GP;
6916 break;
6917
6918 case OPTION_MAUTO_PIC:
6919 md.flags |= EF_IA_64_NOFUNCDESC_CONS_GP;
6920 break;
6921
6922 default:
6923 return 0;
6924 }
6925
6926 return 1;
6927 }
6928
6929 void
6930 md_show_usage (FILE *stream)
6931 {
6932 fputs (_("\
6933 IA-64 options:\n\
6934 --mconstant-gp mark output file as using the constant-GP model\n\
6935 (sets ELF header flag EF_IA_64_CONS_GP)\n\
6936 --mauto-pic mark output file as using the constant-GP model\n\
6937 without function descriptors (sets ELF header flag\n\
6938 EF_IA_64_NOFUNCDESC_CONS_GP)\n\
6939 -milp32|-milp64|-mlp64|-mp64 select data model (default -mlp64)\n\
6940 -mle | -mbe select little- or big-endian byte order (default -mle)\n\
6941 -mtune=[itanium1|itanium2]\n\
6942 tune for a specific CPU (default -mtune=itanium2)\n\
6943 -munwind-check=[warning|error]\n\
6944 unwind directive check (default -munwind-check=warning)\n\
6945 -mhint.b=[ok|warning|error]\n\
6946 hint.b check (default -mhint.b=error)\n\
6947 -x | -xexplicit turn on dependency violation checking\n\
6948 -xauto automagically remove dependency violations (default)\n\
6949 -xnone turn off dependency violation checking\n\
6950 -xdebug debug dependency violation checker\n\
6951 -xdebugn debug dependency violation checker but turn off\n\
6952 dependency violation checking\n\
6953 -xdebugx debug dependency violation checker and turn on\n\
6954 dependency violation checking\n"),
6955 stream);
6956 }
6957
6958 void
6959 ia64_after_parse_args (void)
6960 {
6961 if (debug_type == DEBUG_STABS)
6962 as_fatal (_("--gstabs is not supported for ia64"));
6963 }
6964
6965 /* Return true if TYPE fits in TEMPL at SLOT. */
6966
6967 static int
6968 match (int templ, int type, int slot)
6969 {
6970 enum ia64_unit unit;
6971 int result;
6972
6973 unit = ia64_templ_desc[templ].exec_unit[slot];
6974 switch (type)
6975 {
6976 case IA64_TYPE_DYN: result = 1; break; /* for nop and break */
6977 case IA64_TYPE_A:
6978 result = (unit == IA64_UNIT_I || unit == IA64_UNIT_M);
6979 break;
6980 case IA64_TYPE_X: result = (unit == IA64_UNIT_L); break;
6981 case IA64_TYPE_I: result = (unit == IA64_UNIT_I); break;
6982 case IA64_TYPE_M: result = (unit == IA64_UNIT_M); break;
6983 case IA64_TYPE_B: result = (unit == IA64_UNIT_B); break;
6984 case IA64_TYPE_F: result = (unit == IA64_UNIT_F); break;
6985 default: result = 0; break;
6986 }
6987 return result;
6988 }
6989
6990 /* For Itanium 1, add a bit of extra goodness if a nop of type F or B would fit
6991 in TEMPL at SLOT. For Itanium 2, add a bit of extra goodness if a nop of
6992 type M or I would fit in TEMPL at SLOT. */
6993
6994 static inline int
6995 extra_goodness (int templ, int slot)
6996 {
6997 switch (md.tune)
6998 {
6999 case itanium1:
7000 if (slot == 1 && match (templ, IA64_TYPE_F, slot))
7001 return 2;
7002 else if (slot == 2 && match (templ, IA64_TYPE_B, slot))
7003 return 1;
7004 else
7005 return 0;
7006 break;
7007 case itanium2:
7008 if (match (templ, IA64_TYPE_M, slot)
7009 || match (templ, IA64_TYPE_I, slot))
7010 /* Favor M- and I-unit NOPs. We definitely want to avoid
7011 F-unit and B-unit may cause split-issue or less-than-optimal
7012 branch-prediction. */
7013 return 2;
7014 else
7015 return 0;
7016 break;
7017 default:
7018 abort ();
7019 return 0;
7020 }
7021 }
7022
7023 /* This function is called once, at assembler startup time. It sets
7024 up all the tables, etc. that the MD part of the assembler will need
7025 that can be determined before arguments are parsed. */
7026 void
7027 md_begin (void)
7028 {
7029 int i, j, k, t, goodness, best, ok;
7030 const char *err;
7031 char name[8];
7032
7033 md.auto_align = 1;
7034 md.explicit_mode = md.default_explicit_mode;
7035
7036 bfd_set_section_alignment (stdoutput, text_section, 4);
7037
7038 /* Make sure function pointers get initialized. */
7039 target_big_endian = -1;
7040 dot_byteorder (default_big_endian);
7041
7042 alias_hash = hash_new ();
7043 alias_name_hash = hash_new ();
7044 secalias_hash = hash_new ();
7045 secalias_name_hash = hash_new ();
7046
7047 pseudo_func[FUNC_DTP_MODULE].u.sym =
7048 symbol_new (".<dtpmod>", undefined_section, FUNC_DTP_MODULE,
7049 &zero_address_frag);
7050
7051 pseudo_func[FUNC_DTP_RELATIVE].u.sym =
7052 symbol_new (".<dtprel>", undefined_section, FUNC_DTP_RELATIVE,
7053 &zero_address_frag);
7054
7055 pseudo_func[FUNC_FPTR_RELATIVE].u.sym =
7056 symbol_new (".<fptr>", undefined_section, FUNC_FPTR_RELATIVE,
7057 &zero_address_frag);
7058
7059 pseudo_func[FUNC_GP_RELATIVE].u.sym =
7060 symbol_new (".<gprel>", undefined_section, FUNC_GP_RELATIVE,
7061 &zero_address_frag);
7062
7063 pseudo_func[FUNC_LT_RELATIVE].u.sym =
7064 symbol_new (".<ltoff>", undefined_section, FUNC_LT_RELATIVE,
7065 &zero_address_frag);
7066
7067 pseudo_func[FUNC_LT_RELATIVE_X].u.sym =
7068 symbol_new (".<ltoffx>", undefined_section, FUNC_LT_RELATIVE_X,
7069 &zero_address_frag);
7070
7071 pseudo_func[FUNC_PC_RELATIVE].u.sym =
7072 symbol_new (".<pcrel>", undefined_section, FUNC_PC_RELATIVE,
7073 &zero_address_frag);
7074
7075 pseudo_func[FUNC_PLT_RELATIVE].u.sym =
7076 symbol_new (".<pltoff>", undefined_section, FUNC_PLT_RELATIVE,
7077 &zero_address_frag);
7078
7079 pseudo_func[FUNC_SEC_RELATIVE].u.sym =
7080 symbol_new (".<secrel>", undefined_section, FUNC_SEC_RELATIVE,
7081 &zero_address_frag);
7082
7083 pseudo_func[FUNC_SEG_RELATIVE].u.sym =
7084 symbol_new (".<segrel>", undefined_section, FUNC_SEG_RELATIVE,
7085 &zero_address_frag);
7086
7087 pseudo_func[FUNC_TP_RELATIVE].u.sym =
7088 symbol_new (".<tprel>", undefined_section, FUNC_TP_RELATIVE,
7089 &zero_address_frag);
7090
7091 pseudo_func[FUNC_LTV_RELATIVE].u.sym =
7092 symbol_new (".<ltv>", undefined_section, FUNC_LTV_RELATIVE,
7093 &zero_address_frag);
7094
7095 pseudo_func[FUNC_LT_FPTR_RELATIVE].u.sym =
7096 symbol_new (".<ltoff.fptr>", undefined_section, FUNC_LT_FPTR_RELATIVE,
7097 &zero_address_frag);
7098
7099 pseudo_func[FUNC_LT_DTP_MODULE].u.sym =
7100 symbol_new (".<ltoff.dtpmod>", undefined_section, FUNC_LT_DTP_MODULE,
7101 &zero_address_frag);
7102
7103 pseudo_func[FUNC_LT_DTP_RELATIVE].u.sym =
7104 symbol_new (".<ltoff.dptrel>", undefined_section, FUNC_LT_DTP_RELATIVE,
7105 &zero_address_frag);
7106
7107 pseudo_func[FUNC_LT_TP_RELATIVE].u.sym =
7108 symbol_new (".<ltoff.tprel>", undefined_section, FUNC_LT_TP_RELATIVE,
7109 &zero_address_frag);
7110
7111 pseudo_func[FUNC_IPLT_RELOC].u.sym =
7112 symbol_new (".<iplt>", undefined_section, FUNC_IPLT_RELOC,
7113 &zero_address_frag);
7114
7115 if (md.tune != itanium1)
7116 {
7117 /* Convert MFI NOPs bundles into MMI NOPs bundles. */
7118 le_nop[0] = 0x8;
7119 le_nop_stop[0] = 0x9;
7120 }
7121
7122 /* Compute the table of best templates. We compute goodness as a
7123 base 4 value, in which each match counts for 3. Match-failures
7124 result in NOPs and we use extra_goodness() to pick the execution
7125 units that are best suited for issuing the NOP. */
7126 for (i = 0; i < IA64_NUM_TYPES; ++i)
7127 for (j = 0; j < IA64_NUM_TYPES; ++j)
7128 for (k = 0; k < IA64_NUM_TYPES; ++k)
7129 {
7130 best = 0;
7131 for (t = 0; t < NELEMS (ia64_templ_desc); ++t)
7132 {
7133 goodness = 0;
7134 if (match (t, i, 0))
7135 {
7136 if (match (t, j, 1))
7137 {
7138 if ((t == 2 && j == IA64_TYPE_X) || match (t, k, 2))
7139 goodness = 3 + 3 + 3;
7140 else
7141 goodness = 3 + 3 + extra_goodness (t, 2);
7142 }
7143 else if (match (t, j, 2))
7144 goodness = 3 + 3 + extra_goodness (t, 1);
7145 else
7146 {
7147 goodness = 3;
7148 goodness += extra_goodness (t, 1);
7149 goodness += extra_goodness (t, 2);
7150 }
7151 }
7152 else if (match (t, i, 1))
7153 {
7154 if ((t == 2 && i == IA64_TYPE_X) || match (t, j, 2))
7155 goodness = 3 + 3;
7156 else
7157 goodness = 3 + extra_goodness (t, 2);
7158 }
7159 else if (match (t, i, 2))
7160 goodness = 3 + extra_goodness (t, 1);
7161
7162 if (goodness > best)
7163 {
7164 best = goodness;
7165 best_template[i][j][k] = t;
7166 }
7167 }
7168 }
7169
7170 #ifdef DEBUG_TEMPLATES
7171 /* For debugging changes to the best_template calculations. We don't care
7172 about combinations with invalid instructions, so start the loops at 1. */
7173 for (i = 0; i < IA64_NUM_TYPES; ++i)
7174 for (j = 0; j < IA64_NUM_TYPES; ++j)
7175 for (k = 0; k < IA64_NUM_TYPES; ++k)
7176 {
7177 char type_letter[IA64_NUM_TYPES] = { 'n', 'a', 'i', 'm', 'b', 'f',
7178 'x', 'd' };
7179 fprintf (stderr, "%c%c%c %s\n", type_letter[i], type_letter[j],
7180 type_letter[k],
7181 ia64_templ_desc[best_template[i][j][k]].name);
7182 }
7183 #endif
7184
7185 for (i = 0; i < NUM_SLOTS; ++i)
7186 md.slot[i].user_template = -1;
7187
7188 md.pseudo_hash = hash_new ();
7189 for (i = 0; i < NELEMS (pseudo_opcode); ++i)
7190 {
7191 err = hash_insert (md.pseudo_hash, pseudo_opcode[i].name,
7192 (void *) (pseudo_opcode + i));
7193 if (err)
7194 as_fatal (_("ia64.md_begin: can't hash `%s': %s"),
7195 pseudo_opcode[i].name, err);
7196 }
7197
7198 md.reg_hash = hash_new ();
7199 md.dynreg_hash = hash_new ();
7200 md.const_hash = hash_new ();
7201 md.entry_hash = hash_new ();
7202
7203 /* general registers: */
7204 declare_register_set ("r", 128, REG_GR);
7205 declare_register ("gp", REG_GR + 1);
7206 declare_register ("sp", REG_GR + 12);
7207 declare_register ("tp", REG_GR + 13);
7208 declare_register_set ("ret", 4, REG_GR + 8);
7209
7210 /* floating point registers: */
7211 declare_register_set ("f", 128, REG_FR);
7212 declare_register_set ("farg", 8, REG_FR + 8);
7213 declare_register_set ("fret", 8, REG_FR + 8);
7214
7215 /* branch registers: */
7216 declare_register_set ("b", 8, REG_BR);
7217 declare_register ("rp", REG_BR + 0);
7218
7219 /* predicate registers: */
7220 declare_register_set ("p", 64, REG_P);
7221 declare_register ("pr", REG_PR);
7222 declare_register ("pr.rot", REG_PR_ROT);
7223
7224 /* application registers: */
7225 declare_register_set ("ar", 128, REG_AR);
7226 for (i = 0; i < NELEMS (ar); ++i)
7227 declare_register (ar[i].name, REG_AR + ar[i].regnum);
7228
7229 /* control registers: */
7230 declare_register_set ("cr", 128, REG_CR);
7231 for (i = 0; i < NELEMS (cr); ++i)
7232 declare_register (cr[i].name, REG_CR + cr[i].regnum);
7233
7234 declare_register ("ip", REG_IP);
7235 declare_register ("cfm", REG_CFM);
7236 declare_register ("psr", REG_PSR);
7237 declare_register ("psr.l", REG_PSR_L);
7238 declare_register ("psr.um", REG_PSR_UM);
7239
7240 for (i = 0; i < NELEMS (indirect_reg); ++i)
7241 {
7242 unsigned int regnum = indirect_reg[i].regnum;
7243
7244 md.indregsym[regnum - IND_CPUID] = declare_register (indirect_reg[i].name, regnum);
7245 }
7246
7247 /* pseudo-registers used to specify unwind info: */
7248 declare_register ("psp", REG_PSP);
7249
7250 for (i = 0; i < NELEMS (const_bits); ++i)
7251 {
7252 err = hash_insert (md.const_hash, const_bits[i].name,
7253 (void *) (const_bits + i));
7254 if (err)
7255 as_fatal (_("Inserting \"%s\" into constant hash table failed: %s"),
7256 name, err);
7257 }
7258
7259 /* Set the architecture and machine depending on defaults and command line
7260 options. */
7261 if (md.flags & EF_IA_64_ABI64)
7262 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf64);
7263 else
7264 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf32);
7265
7266 if (! ok)
7267 as_warn (_("Could not set architecture and machine"));
7268
7269 /* Set the pointer size and pointer shift size depending on md.flags */
7270
7271 if (md.flags & EF_IA_64_ABI64)
7272 {
7273 md.pointer_size = 8; /* pointers are 8 bytes */
7274 md.pointer_size_shift = 3; /* alignment is 8 bytes = 2^2 */
7275 }
7276 else
7277 {
7278 md.pointer_size = 4; /* pointers are 4 bytes */
7279 md.pointer_size_shift = 2; /* alignment is 4 bytes = 2^2 */
7280 }
7281
7282 md.mem_offset.hint = 0;
7283 md.path = 0;
7284 md.maxpaths = 0;
7285 md.entry_labels = NULL;
7286 }
7287
7288 /* Set the default options in md. Cannot do this in md_begin because
7289 that is called after md_parse_option which is where we set the
7290 options in md based on command line options. */
7291
7292 void
7293 ia64_init (int argc ATTRIBUTE_UNUSED, char **argv ATTRIBUTE_UNUSED)
7294 {
7295 md.flags = MD_FLAGS_DEFAULT;
7296 md.detect_dv = 1;
7297 /* FIXME: We should change it to unwind_check_error someday. */
7298 md.unwind_check = unwind_check_warning;
7299 md.hint_b = hint_b_error;
7300 md.tune = itanium2;
7301 }
7302
7303 /* Return a string for the target object file format. */
7304
7305 const char *
7306 ia64_target_format (void)
7307 {
7308 if (OUTPUT_FLAVOR == bfd_target_elf_flavour)
7309 {
7310 if (md.flags & EF_IA_64_BE)
7311 {
7312 if (md.flags & EF_IA_64_ABI64)
7313 #if defined(TE_AIX50)
7314 return "elf64-ia64-aix-big";
7315 #elif defined(TE_HPUX)
7316 return "elf64-ia64-hpux-big";
7317 #else
7318 return "elf64-ia64-big";
7319 #endif
7320 else
7321 #if defined(TE_AIX50)
7322 return "elf32-ia64-aix-big";
7323 #elif defined(TE_HPUX)
7324 return "elf32-ia64-hpux-big";
7325 #else
7326 return "elf32-ia64-big";
7327 #endif
7328 }
7329 else
7330 {
7331 if (md.flags & EF_IA_64_ABI64)
7332 #ifdef TE_AIX50
7333 return "elf64-ia64-aix-little";
7334 #else
7335 return "elf64-ia64-little";
7336 #endif
7337 else
7338 #ifdef TE_AIX50
7339 return "elf32-ia64-aix-little";
7340 #else
7341 return "elf32-ia64-little";
7342 #endif
7343 }
7344 }
7345 else
7346 return "unknown-format";
7347 }
7348
7349 void
7350 ia64_end_of_source (void)
7351 {
7352 /* terminate insn group upon reaching end of file: */
7353 insn_group_break (1, 0, 0);
7354
7355 /* emits slots we haven't written yet: */
7356 ia64_flush_insns ();
7357
7358 bfd_set_private_flags (stdoutput, md.flags);
7359
7360 md.mem_offset.hint = 0;
7361 }
7362
7363 void
7364 ia64_start_line (void)
7365 {
7366 static int first;
7367
7368 if (!first) {
7369 /* Make sure we don't reference input_line_pointer[-1] when that's
7370 not valid. */
7371 first = 1;
7372 return;
7373 }
7374
7375 if (md.qp.X_op == O_register)
7376 as_bad (_("qualifying predicate not followed by instruction"));
7377 md.qp.X_op = O_absent;
7378
7379 if (ignore_input ())
7380 return;
7381
7382 if (input_line_pointer[0] == ';' && input_line_pointer[-1] == ';')
7383 {
7384 if (md.detect_dv && !md.explicit_mode)
7385 {
7386 static int warned;
7387
7388 if (!warned)
7389 {
7390 warned = 1;
7391 as_warn (_("Explicit stops are ignored in auto mode"));
7392 }
7393 }
7394 else
7395 insn_group_break (1, 0, 0);
7396 }
7397 else if (input_line_pointer[-1] == '{')
7398 {
7399 if (md.manual_bundling)
7400 as_warn (_("Found '{' when manual bundling is already turned on"));
7401 else
7402 CURR_SLOT.manual_bundling_on = 1;
7403 md.manual_bundling = 1;
7404
7405 /* Bundling is only acceptable in explicit mode
7406 or when in default automatic mode. */
7407 if (md.detect_dv && !md.explicit_mode)
7408 {
7409 if (!md.mode_explicitly_set
7410 && !md.default_explicit_mode)
7411 dot_dv_mode ('E');
7412 else
7413 as_warn (_("Found '{' after explicit switch to automatic mode"));
7414 }
7415 }
7416 else if (input_line_pointer[-1] == '}')
7417 {
7418 if (!md.manual_bundling)
7419 as_warn (_("Found '}' when manual bundling is off"));
7420 else
7421 PREV_SLOT.manual_bundling_off = 1;
7422 md.manual_bundling = 0;
7423
7424 /* switch back to automatic mode, if applicable */
7425 if (md.detect_dv
7426 && md.explicit_mode
7427 && !md.mode_explicitly_set
7428 && !md.default_explicit_mode)
7429 dot_dv_mode ('A');
7430 }
7431 }
7432
7433 /* This is a hook for ia64_frob_label, so that it can distinguish tags from
7434 labels. */
7435 static int defining_tag = 0;
7436
7437 int
7438 ia64_unrecognized_line (int ch)
7439 {
7440 switch (ch)
7441 {
7442 case '(':
7443 expression_and_evaluate (&md.qp);
7444 if (*input_line_pointer++ != ')')
7445 {
7446 as_bad (_("Expected ')'"));
7447 return 0;
7448 }
7449 if (md.qp.X_op != O_register)
7450 {
7451 as_bad (_("Qualifying predicate expected"));
7452 return 0;
7453 }
7454 if (md.qp.X_add_number < REG_P || md.qp.X_add_number >= REG_P + 64)
7455 {
7456 as_bad (_("Predicate register expected"));
7457 return 0;
7458 }
7459 return 1;
7460
7461 case '[':
7462 {
7463 char *s;
7464 char c;
7465 symbolS *tag;
7466 int temp;
7467
7468 if (md.qp.X_op == O_register)
7469 {
7470 as_bad (_("Tag must come before qualifying predicate."));
7471 return 0;
7472 }
7473
7474 /* This implements just enough of read_a_source_file in read.c to
7475 recognize labels. */
7476 if (is_name_beginner (*input_line_pointer))
7477 {
7478 s = input_line_pointer;
7479 c = get_symbol_end ();
7480 }
7481 else if (LOCAL_LABELS_FB
7482 && ISDIGIT (*input_line_pointer))
7483 {
7484 temp = 0;
7485 while (ISDIGIT (*input_line_pointer))
7486 temp = (temp * 10) + *input_line_pointer++ - '0';
7487 fb_label_instance_inc (temp);
7488 s = fb_label_name (temp, 0);
7489 c = *input_line_pointer;
7490 }
7491 else
7492 {
7493 s = NULL;
7494 c = '\0';
7495 }
7496 if (c != ':')
7497 {
7498 /* Put ':' back for error messages' sake. */
7499 *input_line_pointer++ = ':';
7500 as_bad (_("Expected ':'"));
7501 return 0;
7502 }
7503
7504 defining_tag = 1;
7505 tag = colon (s);
7506 defining_tag = 0;
7507 /* Put ':' back for error messages' sake. */
7508 *input_line_pointer++ = ':';
7509 if (*input_line_pointer++ != ']')
7510 {
7511 as_bad (_("Expected ']'"));
7512 return 0;
7513 }
7514 if (! tag)
7515 {
7516 as_bad (_("Tag name expected"));
7517 return 0;
7518 }
7519 return 1;
7520 }
7521
7522 default:
7523 break;
7524 }
7525
7526 /* Not a valid line. */
7527 return 0;
7528 }
7529
7530 void
7531 ia64_frob_label (struct symbol *sym)
7532 {
7533 struct label_fix *fix;
7534
7535 /* Tags need special handling since they are not bundle breaks like
7536 labels. */
7537 if (defining_tag)
7538 {
7539 fix = obstack_alloc (&notes, sizeof (*fix));
7540 fix->sym = sym;
7541 fix->next = CURR_SLOT.tag_fixups;
7542 fix->dw2_mark_labels = FALSE;
7543 CURR_SLOT.tag_fixups = fix;
7544
7545 return;
7546 }
7547
7548 if (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE)
7549 {
7550 md.last_text_seg = now_seg;
7551 fix = obstack_alloc (&notes, sizeof (*fix));
7552 fix->sym = sym;
7553 fix->next = CURR_SLOT.label_fixups;
7554 fix->dw2_mark_labels = dwarf2_loc_mark_labels;
7555 CURR_SLOT.label_fixups = fix;
7556
7557 /* Keep track of how many code entry points we've seen. */
7558 if (md.path == md.maxpaths)
7559 {
7560 md.maxpaths += 20;
7561 md.entry_labels = (const char **)
7562 xrealloc ((void *) md.entry_labels,
7563 md.maxpaths * sizeof (char *));
7564 }
7565 md.entry_labels[md.path++] = S_GET_NAME (sym);
7566 }
7567 }
7568
7569 #ifdef TE_HPUX
7570 /* The HP-UX linker will give unresolved symbol errors for symbols
7571 that are declared but unused. This routine removes declared,
7572 unused symbols from an object. */
7573 int
7574 ia64_frob_symbol (struct symbol *sym)
7575 {
7576 if ((S_GET_SEGMENT (sym) == &bfd_und_section && ! symbol_used_p (sym) &&
7577 ELF_ST_VISIBILITY (S_GET_OTHER (sym)) == STV_DEFAULT)
7578 || (S_GET_SEGMENT (sym) == &bfd_abs_section
7579 && ! S_IS_EXTERNAL (sym)))
7580 return 1;
7581 return 0;
7582 }
7583 #endif
7584
7585 void
7586 ia64_flush_pending_output (void)
7587 {
7588 if (!md.keep_pending_output
7589 && bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE)
7590 {
7591 /* ??? This causes many unnecessary stop bits to be emitted.
7592 Unfortunately, it isn't clear if it is safe to remove this. */
7593 insn_group_break (1, 0, 0);
7594 ia64_flush_insns ();
7595 }
7596 }
7597
7598 /* Do ia64-specific expression optimization. All that's done here is
7599 to transform index expressions that are either due to the indexing
7600 of rotating registers or due to the indexing of indirect register
7601 sets. */
7602 int
7603 ia64_optimize_expr (expressionS *l, operatorT op, expressionS *r)
7604 {
7605 if (op != O_index)
7606 return 0;
7607 resolve_expression (l);
7608 if (l->X_op == O_register)
7609 {
7610 unsigned num_regs = l->X_add_number >> 16;
7611
7612 resolve_expression (r);
7613 if (num_regs)
7614 {
7615 /* Left side is a .rotX-allocated register. */
7616 if (r->X_op != O_constant)
7617 {
7618 as_bad (_("Rotating register index must be a non-negative constant"));
7619 r->X_add_number = 0;
7620 }
7621 else if ((valueT) r->X_add_number >= num_regs)
7622 {
7623 as_bad (_("Index out of range 0..%u"), num_regs - 1);
7624 r->X_add_number = 0;
7625 }
7626 l->X_add_number = (l->X_add_number & 0xffff) + r->X_add_number;
7627 return 1;
7628 }
7629 else if (l->X_add_number >= IND_CPUID && l->X_add_number <= IND_RR)
7630 {
7631 if (r->X_op != O_register
7632 || r->X_add_number < REG_GR
7633 || r->X_add_number > REG_GR + 127)
7634 {
7635 as_bad (_("Indirect register index must be a general register"));
7636 r->X_add_number = REG_GR;
7637 }
7638 l->X_op = O_index;
7639 l->X_op_symbol = md.indregsym[l->X_add_number - IND_CPUID];
7640 l->X_add_number = r->X_add_number;
7641 return 1;
7642 }
7643 }
7644 as_bad (_("Index can only be applied to rotating or indirect registers"));
7645 /* Fall back to some register use of which has as little as possible
7646 side effects, to minimize subsequent error messages. */
7647 l->X_op = O_register;
7648 l->X_add_number = REG_GR + 3;
7649 return 1;
7650 }
7651
7652 int
7653 ia64_parse_name (char *name, expressionS *e, char *nextcharP)
7654 {
7655 struct const_desc *cdesc;
7656 struct dynreg *dr = 0;
7657 unsigned int idx;
7658 struct symbol *sym;
7659 char *end;
7660
7661 if (*name == '@')
7662 {
7663 enum pseudo_type pseudo_type = PSEUDO_FUNC_NONE;
7664
7665 /* Find what relocation pseudo-function we're dealing with. */
7666 for (idx = 0; idx < NELEMS (pseudo_func); ++idx)
7667 if (pseudo_func[idx].name
7668 && pseudo_func[idx].name[0] == name[1]
7669 && strcmp (pseudo_func[idx].name + 1, name + 2) == 0)
7670 {
7671 pseudo_type = pseudo_func[idx].type;
7672 break;
7673 }
7674 switch (pseudo_type)
7675 {
7676 case PSEUDO_FUNC_RELOC:
7677 end = input_line_pointer;
7678 if (*nextcharP != '(')
7679 {
7680 as_bad (_("Expected '('"));
7681 break;
7682 }
7683 /* Skip '('. */
7684 ++input_line_pointer;
7685 expression (e);
7686 if (*input_line_pointer != ')')
7687 {
7688 as_bad (_("Missing ')'"));
7689 goto done;
7690 }
7691 /* Skip ')'. */
7692 ++input_line_pointer;
7693 if (e->X_op != O_symbol)
7694 {
7695 if (e->X_op != O_pseudo_fixup)
7696 {
7697 as_bad (_("Not a symbolic expression"));
7698 goto done;
7699 }
7700 if (idx != FUNC_LT_RELATIVE)
7701 {
7702 as_bad (_("Illegal combination of relocation functions"));
7703 goto done;
7704 }
7705 switch (S_GET_VALUE (e->X_op_symbol))
7706 {
7707 case FUNC_FPTR_RELATIVE:
7708 idx = FUNC_LT_FPTR_RELATIVE; break;
7709 case FUNC_DTP_MODULE:
7710 idx = FUNC_LT_DTP_MODULE; break;
7711 case FUNC_DTP_RELATIVE:
7712 idx = FUNC_LT_DTP_RELATIVE; break;
7713 case FUNC_TP_RELATIVE:
7714 idx = FUNC_LT_TP_RELATIVE; break;
7715 default:
7716 as_bad (_("Illegal combination of relocation functions"));
7717 goto done;
7718 }
7719 }
7720 /* Make sure gas doesn't get rid of local symbols that are used
7721 in relocs. */
7722 e->X_op = O_pseudo_fixup;
7723 e->X_op_symbol = pseudo_func[idx].u.sym;
7724 done:
7725 *nextcharP = *input_line_pointer;
7726 break;
7727
7728 case PSEUDO_FUNC_CONST:
7729 e->X_op = O_constant;
7730 e->X_add_number = pseudo_func[idx].u.ival;
7731 break;
7732
7733 case PSEUDO_FUNC_REG:
7734 e->X_op = O_register;
7735 e->X_add_number = pseudo_func[idx].u.ival;
7736 break;
7737
7738 default:
7739 return 0;
7740 }
7741 return 1;
7742 }
7743
7744 /* first see if NAME is a known register name: */
7745 sym = hash_find (md.reg_hash, name);
7746 if (sym)
7747 {
7748 e->X_op = O_register;
7749 e->X_add_number = S_GET_VALUE (sym);
7750 return 1;
7751 }
7752
7753 cdesc = hash_find (md.const_hash, name);
7754 if (cdesc)
7755 {
7756 e->X_op = O_constant;
7757 e->X_add_number = cdesc->value;
7758 return 1;
7759 }
7760
7761 /* check for inN, locN, or outN: */
7762 idx = 0;
7763 switch (name[0])
7764 {
7765 case 'i':
7766 if (name[1] == 'n' && ISDIGIT (name[2]))
7767 {
7768 dr = &md.in;
7769 idx = 2;
7770 }
7771 break;
7772
7773 case 'l':
7774 if (name[1] == 'o' && name[2] == 'c' && ISDIGIT (name[3]))
7775 {
7776 dr = &md.loc;
7777 idx = 3;
7778 }
7779 break;
7780
7781 case 'o':
7782 if (name[1] == 'u' && name[2] == 't' && ISDIGIT (name[3]))
7783 {
7784 dr = &md.out;
7785 idx = 3;
7786 }
7787 break;
7788
7789 default:
7790 break;
7791 }
7792
7793 /* Ignore register numbers with leading zeroes, except zero itself. */
7794 if (dr && (name[idx] != '0' || name[idx + 1] == '\0'))
7795 {
7796 unsigned long regnum;
7797
7798 /* The name is inN, locN, or outN; parse the register number. */
7799 regnum = strtoul (name + idx, &end, 10);
7800 if (end > name + idx && *end == '\0' && regnum < 96)
7801 {
7802 if (regnum >= dr->num_regs)
7803 {
7804 if (!dr->num_regs)
7805 as_bad (_("No current frame"));
7806 else
7807 as_bad (_("Register number out of range 0..%u"),
7808 dr->num_regs - 1);
7809 regnum = 0;
7810 }
7811 e->X_op = O_register;
7812 e->X_add_number = dr->base + regnum;
7813 return 1;
7814 }
7815 }
7816
7817 end = alloca (strlen (name) + 1);
7818 strcpy (end, name);
7819 name = ia64_canonicalize_symbol_name (end);
7820 if ((dr = hash_find (md.dynreg_hash, name)))
7821 {
7822 /* We've got ourselves the name of a rotating register set.
7823 Store the base register number in the low 16 bits of
7824 X_add_number and the size of the register set in the top 16
7825 bits. */
7826 e->X_op = O_register;
7827 e->X_add_number = dr->base | (dr->num_regs << 16);
7828 return 1;
7829 }
7830 return 0;
7831 }
7832
7833 /* Remove the '#' suffix that indicates a symbol as opposed to a register. */
7834
7835 char *
7836 ia64_canonicalize_symbol_name (char *name)
7837 {
7838 size_t len = strlen (name), full = len;
7839
7840 while (len > 0 && name[len - 1] == '#')
7841 --len;
7842 if (len <= 0)
7843 {
7844 if (full > 0)
7845 as_bad (_("Standalone `#' is illegal"));
7846 }
7847 else if (len < full - 1)
7848 as_warn (_("Redundant `#' suffix operators"));
7849 name[len] = '\0';
7850 return name;
7851 }
7852
7853 /* Return true if idesc is a conditional branch instruction. This excludes
7854 the modulo scheduled branches, and br.ia. Mod-sched branches are excluded
7855 because they always read/write resources regardless of the value of the
7856 qualifying predicate. br.ia must always use p0, and hence is always
7857 taken. Thus this function returns true for branches which can fall
7858 through, and which use no resources if they do fall through. */
7859
7860 static int
7861 is_conditional_branch (struct ia64_opcode *idesc)
7862 {
7863 /* br is a conditional branch. Everything that starts with br. except
7864 br.ia, br.c{loop,top,exit}, and br.w{top,exit} is a conditional branch.
7865 Everything that starts with brl is a conditional branch. */
7866 return (idesc->name[0] == 'b' && idesc->name[1] == 'r'
7867 && (idesc->name[2] == '\0'
7868 || (idesc->name[2] == '.' && idesc->name[3] != 'i'
7869 && idesc->name[3] != 'c' && idesc->name[3] != 'w')
7870 || idesc->name[2] == 'l'
7871 /* br.cond, br.call, br.clr */
7872 || (idesc->name[2] == '.' && idesc->name[3] == 'c'
7873 && (idesc->name[4] == 'a' || idesc->name[4] == 'o'
7874 || (idesc->name[4] == 'l' && idesc->name[5] == 'r')))));
7875 }
7876
7877 /* Return whether the given opcode is a taken branch. If there's any doubt,
7878 returns zero. */
7879
7880 static int
7881 is_taken_branch (struct ia64_opcode *idesc)
7882 {
7883 return ((is_conditional_branch (idesc) && CURR_SLOT.qp_regno == 0)
7884 || strncmp (idesc->name, "br.ia", 5) == 0);
7885 }
7886
7887 /* Return whether the given opcode is an interruption or rfi. If there's any
7888 doubt, returns zero. */
7889
7890 static int
7891 is_interruption_or_rfi (struct ia64_opcode *idesc)
7892 {
7893 if (strcmp (idesc->name, "rfi") == 0)
7894 return 1;
7895 return 0;
7896 }
7897
7898 /* Returns the index of the given dependency in the opcode's list of chks, or
7899 -1 if there is no dependency. */
7900
7901 static int
7902 depends_on (int depind, struct ia64_opcode *idesc)
7903 {
7904 int i;
7905 const struct ia64_opcode_dependency *dep = idesc->dependencies;
7906 for (i = 0; i < dep->nchks; i++)
7907 {
7908 if (depind == DEP (dep->chks[i]))
7909 return i;
7910 }
7911 return -1;
7912 }
7913
7914 /* Determine a set of specific resources used for a particular resource
7915 class. Returns the number of specific resources identified For those
7916 cases which are not determinable statically, the resource returned is
7917 marked nonspecific.
7918
7919 Meanings of value in 'NOTE':
7920 1) only read/write when the register number is explicitly encoded in the
7921 insn.
7922 2) only read CFM when accessing a rotating GR, FR, or PR. mov pr only
7923 accesses CFM when qualifying predicate is in the rotating region.
7924 3) general register value is used to specify an indirect register; not
7925 determinable statically.
7926 4) only read the given resource when bits 7:0 of the indirect index
7927 register value does not match the register number of the resource; not
7928 determinable statically.
7929 5) all rules are implementation specific.
7930 6) only when both the index specified by the reader and the index specified
7931 by the writer have the same value in bits 63:61; not determinable
7932 statically.
7933 7) only access the specified resource when the corresponding mask bit is
7934 set
7935 8) PSR.dfh is only read when these insns reference FR32-127. PSR.dfl is
7936 only read when these insns reference FR2-31
7937 9) PSR.mfl is only written when these insns write FR2-31. PSR.mfh is only
7938 written when these insns write FR32-127
7939 10) The PSR.bn bit is only accessed when one of GR16-31 is specified in the
7940 instruction
7941 11) The target predicates are written independently of PR[qp], but source
7942 registers are only read if PR[qp] is true. Since the state of PR[qp]
7943 cannot statically be determined, all source registers are marked used.
7944 12) This insn only reads the specified predicate register when that
7945 register is the PR[qp].
7946 13) This reference to ld-c only applies to the GR whose value is loaded
7947 with data returned from memory, not the post-incremented address register.
7948 14) The RSE resource includes the implementation-specific RSE internal
7949 state resources. At least one (and possibly more) of these resources are
7950 read by each instruction listed in IC:rse-readers. At least one (and
7951 possibly more) of these resources are written by each insn listed in
7952 IC:rse-writers.
7953 15+16) Represents reserved instructions, which the assembler does not
7954 generate.
7955 17) CR[TPR] has a RAW dependency only between mov-to-CR-TPR and
7956 mov-to-PSR-l or ssm instructions that set PSR.i, PSR.pp or PSR.up.
7957
7958 Memory resources (i.e. locations in memory) are *not* marked or tracked by
7959 this code; there are no dependency violations based on memory access.
7960 */
7961
7962 #define MAX_SPECS 256
7963 #define DV_CHK 1
7964 #define DV_REG 0
7965
7966 static int
7967 specify_resource (const struct ia64_dependency *dep,
7968 struct ia64_opcode *idesc,
7969 /* is this a DV chk or a DV reg? */
7970 int type,
7971 /* returned specific resources */
7972 struct rsrc specs[MAX_SPECS],
7973 /* resource note for this insn's usage */
7974 int note,
7975 /* which execution path to examine */
7976 int path)
7977 {
7978 int count = 0;
7979 int i;
7980 int rsrc_write = 0;
7981 struct rsrc tmpl;
7982
7983 if (dep->mode == IA64_DV_WAW
7984 || (dep->mode == IA64_DV_RAW && type == DV_REG)
7985 || (dep->mode == IA64_DV_WAR && type == DV_CHK))
7986 rsrc_write = 1;
7987
7988 /* template for any resources we identify */
7989 tmpl.dependency = dep;
7990 tmpl.note = note;
7991 tmpl.insn_srlz = tmpl.data_srlz = 0;
7992 tmpl.qp_regno = CURR_SLOT.qp_regno;
7993 tmpl.link_to_qp_branch = 1;
7994 tmpl.mem_offset.hint = 0;
7995 tmpl.mem_offset.offset = 0;
7996 tmpl.mem_offset.base = 0;
7997 tmpl.specific = 1;
7998 tmpl.index = -1;
7999 tmpl.cmp_type = CMP_NONE;
8000 tmpl.depind = 0;
8001 tmpl.file = NULL;
8002 tmpl.line = 0;
8003 tmpl.path = 0;
8004
8005 #define UNHANDLED \
8006 as_warn (_("Unhandled dependency %s for %s (%s), note %d"), \
8007 dep->name, idesc->name, (rsrc_write?"write":"read"), note)
8008 #define KNOWN(REG) (gr_values[REG].known && gr_values[REG].path >= path)
8009
8010 /* we don't need to track these */
8011 if (dep->semantics == IA64_DVS_NONE)
8012 return 0;
8013
8014 switch (dep->specifier)
8015 {
8016 case IA64_RS_AR_K:
8017 if (note == 1)
8018 {
8019 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8020 {
8021 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8022 if (regno >= 0 && regno <= 7)
8023 {
8024 specs[count] = tmpl;
8025 specs[count++].index = regno;
8026 }
8027 }
8028 }
8029 else if (note == 0)
8030 {
8031 for (i = 0; i < 8; i++)
8032 {
8033 specs[count] = tmpl;
8034 specs[count++].index = i;
8035 }
8036 }
8037 else
8038 {
8039 UNHANDLED;
8040 }
8041 break;
8042
8043 case IA64_RS_AR_UNAT:
8044 /* This is a mov =AR or mov AR= instruction. */
8045 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8046 {
8047 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8048 if (regno == AR_UNAT)
8049 {
8050 specs[count++] = tmpl;
8051 }
8052 }
8053 else
8054 {
8055 /* This is a spill/fill, or other instruction that modifies the
8056 unat register. */
8057
8058 /* Unless we can determine the specific bits used, mark the whole
8059 thing; bits 8:3 of the memory address indicate the bit used in
8060 UNAT. The .mem.offset hint may be used to eliminate a small
8061 subset of conflicts. */
8062 specs[count] = tmpl;
8063 if (md.mem_offset.hint)
8064 {
8065 if (md.debug_dv)
8066 fprintf (stderr, " Using hint for spill/fill\n");
8067 /* The index isn't actually used, just set it to something
8068 approximating the bit index. */
8069 specs[count].index = (md.mem_offset.offset >> 3) & 0x3F;
8070 specs[count].mem_offset.hint = 1;
8071 specs[count].mem_offset.offset = md.mem_offset.offset;
8072 specs[count++].mem_offset.base = md.mem_offset.base;
8073 }
8074 else
8075 {
8076 specs[count++].specific = 0;
8077 }
8078 }
8079 break;
8080
8081 case IA64_RS_AR:
8082 if (note == 1)
8083 {
8084 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8085 {
8086 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8087 if ((regno >= 8 && regno <= 15)
8088 || (regno >= 20 && regno <= 23)
8089 || (regno >= 31 && regno <= 39)
8090 || (regno >= 41 && regno <= 47)
8091 || (regno >= 67 && regno <= 111))
8092 {
8093 specs[count] = tmpl;
8094 specs[count++].index = regno;
8095 }
8096 }
8097 }
8098 else
8099 {
8100 UNHANDLED;
8101 }
8102 break;
8103
8104 case IA64_RS_ARb:
8105 if (note == 1)
8106 {
8107 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8108 {
8109 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8110 if ((regno >= 48 && regno <= 63)
8111 || (regno >= 112 && regno <= 127))
8112 {
8113 specs[count] = tmpl;
8114 specs[count++].index = regno;
8115 }
8116 }
8117 }
8118 else if (note == 0)
8119 {
8120 for (i = 48; i < 64; i++)
8121 {
8122 specs[count] = tmpl;
8123 specs[count++].index = i;
8124 }
8125 for (i = 112; i < 128; i++)
8126 {
8127 specs[count] = tmpl;
8128 specs[count++].index = i;
8129 }
8130 }
8131 else
8132 {
8133 UNHANDLED;
8134 }
8135 break;
8136
8137 case IA64_RS_BR:
8138 if (note != 1)
8139 {
8140 UNHANDLED;
8141 }
8142 else
8143 {
8144 if (rsrc_write)
8145 {
8146 for (i = 0; i < idesc->num_outputs; i++)
8147 if (idesc->operands[i] == IA64_OPND_B1
8148 || idesc->operands[i] == IA64_OPND_B2)
8149 {
8150 specs[count] = tmpl;
8151 specs[count++].index =
8152 CURR_SLOT.opnd[i].X_add_number - REG_BR;
8153 }
8154 }
8155 else
8156 {
8157 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
8158 if (idesc->operands[i] == IA64_OPND_B1
8159 || idesc->operands[i] == IA64_OPND_B2)
8160 {
8161 specs[count] = tmpl;
8162 specs[count++].index =
8163 CURR_SLOT.opnd[i].X_add_number - REG_BR;
8164 }
8165 }
8166 }
8167 break;
8168
8169 case IA64_RS_CPUID: /* four or more registers */
8170 if (note == 3)
8171 {
8172 if (idesc->operands[!rsrc_write] == IA64_OPND_CPUID_R3)
8173 {
8174 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8175 if (regno >= 0 && regno < NELEMS (gr_values)
8176 && KNOWN (regno))
8177 {
8178 specs[count] = tmpl;
8179 specs[count++].index = gr_values[regno].value & 0xFF;
8180 }
8181 else
8182 {
8183 specs[count] = tmpl;
8184 specs[count++].specific = 0;
8185 }
8186 }
8187 }
8188 else
8189 {
8190 UNHANDLED;
8191 }
8192 break;
8193
8194 case IA64_RS_DBR: /* four or more registers */
8195 if (note == 3)
8196 {
8197 if (idesc->operands[!rsrc_write] == IA64_OPND_DBR_R3)
8198 {
8199 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8200 if (regno >= 0 && regno < NELEMS (gr_values)
8201 && KNOWN (regno))
8202 {
8203 specs[count] = tmpl;
8204 specs[count++].index = gr_values[regno].value & 0xFF;
8205 }
8206 else
8207 {
8208 specs[count] = tmpl;
8209 specs[count++].specific = 0;
8210 }
8211 }
8212 }
8213 else if (note == 0 && !rsrc_write)
8214 {
8215 specs[count] = tmpl;
8216 specs[count++].specific = 0;
8217 }
8218 else
8219 {
8220 UNHANDLED;
8221 }
8222 break;
8223
8224 case IA64_RS_IBR: /* four or more registers */
8225 if (note == 3)
8226 {
8227 if (idesc->operands[!rsrc_write] == IA64_OPND_IBR_R3)
8228 {
8229 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8230 if (regno >= 0 && regno < NELEMS (gr_values)
8231 && KNOWN (regno))
8232 {
8233 specs[count] = tmpl;
8234 specs[count++].index = gr_values[regno].value & 0xFF;
8235 }
8236 else
8237 {
8238 specs[count] = tmpl;
8239 specs[count++].specific = 0;
8240 }
8241 }
8242 }
8243 else
8244 {
8245 UNHANDLED;
8246 }
8247 break;
8248
8249 case IA64_RS_MSR:
8250 if (note == 5)
8251 {
8252 /* These are implementation specific. Force all references to
8253 conflict with all other references. */
8254 specs[count] = tmpl;
8255 specs[count++].specific = 0;
8256 }
8257 else
8258 {
8259 UNHANDLED;
8260 }
8261 break;
8262
8263 case IA64_RS_PKR: /* 16 or more registers */
8264 if (note == 3 || note == 4)
8265 {
8266 if (idesc->operands[!rsrc_write] == IA64_OPND_PKR_R3)
8267 {
8268 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8269 if (regno >= 0 && regno < NELEMS (gr_values)
8270 && KNOWN (regno))
8271 {
8272 if (note == 3)
8273 {
8274 specs[count] = tmpl;
8275 specs[count++].index = gr_values[regno].value & 0xFF;
8276 }
8277 else
8278 for (i = 0; i < NELEMS (gr_values); i++)
8279 {
8280 /* Uses all registers *except* the one in R3. */
8281 if ((unsigned)i != (gr_values[regno].value & 0xFF))
8282 {
8283 specs[count] = tmpl;
8284 specs[count++].index = i;
8285 }
8286 }
8287 }
8288 else
8289 {
8290 specs[count] = tmpl;
8291 specs[count++].specific = 0;
8292 }
8293 }
8294 }
8295 else if (note == 0)
8296 {
8297 /* probe et al. */
8298 specs[count] = tmpl;
8299 specs[count++].specific = 0;
8300 }
8301 break;
8302
8303 case IA64_RS_PMC: /* four or more registers */
8304 if (note == 3)
8305 {
8306 if (idesc->operands[!rsrc_write] == IA64_OPND_PMC_R3
8307 || (!rsrc_write && idesc->operands[1] == IA64_OPND_PMD_R3))
8308
8309 {
8310 int index = ((idesc->operands[1] == IA64_OPND_R3 && !rsrc_write)
8311 ? 1 : !rsrc_write);
8312 int regno = CURR_SLOT.opnd[index].X_add_number - REG_GR;
8313 if (regno >= 0 && regno < NELEMS (gr_values)
8314 && KNOWN (regno))
8315 {
8316 specs[count] = tmpl;
8317 specs[count++].index = gr_values[regno].value & 0xFF;
8318 }
8319 else
8320 {
8321 specs[count] = tmpl;
8322 specs[count++].specific = 0;
8323 }
8324 }
8325 }
8326 else
8327 {
8328 UNHANDLED;
8329 }
8330 break;
8331
8332 case IA64_RS_PMD: /* four or more registers */
8333 if (note == 3)
8334 {
8335 if (idesc->operands[!rsrc_write] == IA64_OPND_PMD_R3)
8336 {
8337 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8338 if (regno >= 0 && regno < NELEMS (gr_values)
8339 && KNOWN (regno))
8340 {
8341 specs[count] = tmpl;
8342 specs[count++].index = gr_values[regno].value & 0xFF;
8343 }
8344 else
8345 {
8346 specs[count] = tmpl;
8347 specs[count++].specific = 0;
8348 }
8349 }
8350 }
8351 else
8352 {
8353 UNHANDLED;
8354 }
8355 break;
8356
8357 case IA64_RS_RR: /* eight registers */
8358 if (note == 6)
8359 {
8360 if (idesc->operands[!rsrc_write] == IA64_OPND_RR_R3)
8361 {
8362 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8363 if (regno >= 0 && regno < NELEMS (gr_values)
8364 && KNOWN (regno))
8365 {
8366 specs[count] = tmpl;
8367 specs[count++].index = (gr_values[regno].value >> 61) & 0x7;
8368 }
8369 else
8370 {
8371 specs[count] = tmpl;
8372 specs[count++].specific = 0;
8373 }
8374 }
8375 }
8376 else if (note == 0 && !rsrc_write)
8377 {
8378 specs[count] = tmpl;
8379 specs[count++].specific = 0;
8380 }
8381 else
8382 {
8383 UNHANDLED;
8384 }
8385 break;
8386
8387 case IA64_RS_CR_IRR:
8388 if (note == 0)
8389 {
8390 /* handle mov-from-CR-IVR; it's a read that writes CR[IRR] */
8391 int regno = CURR_SLOT.opnd[1].X_add_number - REG_CR;
8392 if (rsrc_write
8393 && idesc->operands[1] == IA64_OPND_CR3
8394 && regno == CR_IVR)
8395 {
8396 for (i = 0; i < 4; i++)
8397 {
8398 specs[count] = tmpl;
8399 specs[count++].index = CR_IRR0 + i;
8400 }
8401 }
8402 }
8403 else if (note == 1)
8404 {
8405 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8406 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8407 && regno >= CR_IRR0
8408 && regno <= CR_IRR3)
8409 {
8410 specs[count] = tmpl;
8411 specs[count++].index = regno;
8412 }
8413 }
8414 else
8415 {
8416 UNHANDLED;
8417 }
8418 break;
8419
8420 case IA64_RS_CR_LRR:
8421 if (note != 1)
8422 {
8423 UNHANDLED;
8424 }
8425 else
8426 {
8427 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8428 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8429 && (regno == CR_LRR0 || regno == CR_LRR1))
8430 {
8431 specs[count] = tmpl;
8432 specs[count++].index = regno;
8433 }
8434 }
8435 break;
8436
8437 case IA64_RS_CR:
8438 if (note == 1)
8439 {
8440 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
8441 {
8442 specs[count] = tmpl;
8443 specs[count++].index =
8444 CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8445 }
8446 }
8447 else
8448 {
8449 UNHANDLED;
8450 }
8451 break;
8452
8453 case IA64_RS_FR:
8454 case IA64_RS_FRb:
8455 if (note != 1)
8456 {
8457 UNHANDLED;
8458 }
8459 else if (rsrc_write)
8460 {
8461 if (dep->specifier == IA64_RS_FRb
8462 && idesc->operands[0] == IA64_OPND_F1)
8463 {
8464 specs[count] = tmpl;
8465 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_FR;
8466 }
8467 }
8468 else
8469 {
8470 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
8471 {
8472 if (idesc->operands[i] == IA64_OPND_F2
8473 || idesc->operands[i] == IA64_OPND_F3
8474 || idesc->operands[i] == IA64_OPND_F4)
8475 {
8476 specs[count] = tmpl;
8477 specs[count++].index =
8478 CURR_SLOT.opnd[i].X_add_number - REG_FR;
8479 }
8480 }
8481 }
8482 break;
8483
8484 case IA64_RS_GR:
8485 if (note == 13)
8486 {
8487 /* This reference applies only to the GR whose value is loaded with
8488 data returned from memory. */
8489 specs[count] = tmpl;
8490 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_GR;
8491 }
8492 else if (note == 1)
8493 {
8494 if (rsrc_write)
8495 {
8496 for (i = 0; i < idesc->num_outputs; i++)
8497 if (idesc->operands[i] == IA64_OPND_R1
8498 || idesc->operands[i] == IA64_OPND_R2
8499 || idesc->operands[i] == IA64_OPND_R3)
8500 {
8501 specs[count] = tmpl;
8502 specs[count++].index =
8503 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8504 }
8505 if (idesc->flags & IA64_OPCODE_POSTINC)
8506 for (i = 0; i < NELEMS (idesc->operands); i++)
8507 if (idesc->operands[i] == IA64_OPND_MR3)
8508 {
8509 specs[count] = tmpl;
8510 specs[count++].index =
8511 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8512 }
8513 }
8514 else
8515 {
8516 /* Look for anything that reads a GR. */
8517 for (i = 0; i < NELEMS (idesc->operands); i++)
8518 {
8519 if (idesc->operands[i] == IA64_OPND_MR3
8520 || idesc->operands[i] == IA64_OPND_CPUID_R3
8521 || idesc->operands[i] == IA64_OPND_DBR_R3
8522 || idesc->operands[i] == IA64_OPND_IBR_R3
8523 || idesc->operands[i] == IA64_OPND_MSR_R3
8524 || idesc->operands[i] == IA64_OPND_PKR_R3
8525 || idesc->operands[i] == IA64_OPND_PMC_R3
8526 || idesc->operands[i] == IA64_OPND_PMD_R3
8527 || idesc->operands[i] == IA64_OPND_RR_R3
8528 || ((i >= idesc->num_outputs)
8529 && (idesc->operands[i] == IA64_OPND_R1
8530 || idesc->operands[i] == IA64_OPND_R2
8531 || idesc->operands[i] == IA64_OPND_R3
8532 /* addl source register. */
8533 || idesc->operands[i] == IA64_OPND_R3_2)))
8534 {
8535 specs[count] = tmpl;
8536 specs[count++].index =
8537 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8538 }
8539 }
8540 }
8541 }
8542 else
8543 {
8544 UNHANDLED;
8545 }
8546 break;
8547
8548 /* This is the same as IA64_RS_PRr, except that the register range is
8549 from 1 - 15, and there are no rotating register reads/writes here. */
8550 case IA64_RS_PR:
8551 if (note == 0)
8552 {
8553 for (i = 1; i < 16; i++)
8554 {
8555 specs[count] = tmpl;
8556 specs[count++].index = i;
8557 }
8558 }
8559 else if (note == 7)
8560 {
8561 valueT mask = 0;
8562 /* Mark only those registers indicated by the mask. */
8563 if (rsrc_write)
8564 {
8565 mask = CURR_SLOT.opnd[2].X_add_number;
8566 for (i = 1; i < 16; i++)
8567 if (mask & ((valueT) 1 << i))
8568 {
8569 specs[count] = tmpl;
8570 specs[count++].index = i;
8571 }
8572 }
8573 else
8574 {
8575 UNHANDLED;
8576 }
8577 }
8578 else if (note == 11) /* note 11 implies note 1 as well */
8579 {
8580 if (rsrc_write)
8581 {
8582 for (i = 0; i < idesc->num_outputs; i++)
8583 {
8584 if (idesc->operands[i] == IA64_OPND_P1
8585 || idesc->operands[i] == IA64_OPND_P2)
8586 {
8587 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8588 if (regno >= 1 && regno < 16)
8589 {
8590 specs[count] = tmpl;
8591 specs[count++].index = regno;
8592 }
8593 }
8594 }
8595 }
8596 else
8597 {
8598 UNHANDLED;
8599 }
8600 }
8601 else if (note == 12)
8602 {
8603 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
8604 {
8605 specs[count] = tmpl;
8606 specs[count++].index = CURR_SLOT.qp_regno;
8607 }
8608 }
8609 else if (note == 1)
8610 {
8611 if (rsrc_write)
8612 {
8613 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8614 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8615 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
8616 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
8617
8618 if ((idesc->operands[0] == IA64_OPND_P1
8619 || idesc->operands[0] == IA64_OPND_P2)
8620 && p1 >= 1 && p1 < 16)
8621 {
8622 specs[count] = tmpl;
8623 specs[count].cmp_type =
8624 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
8625 specs[count++].index = p1;
8626 }
8627 if ((idesc->operands[1] == IA64_OPND_P1
8628 || idesc->operands[1] == IA64_OPND_P2)
8629 && p2 >= 1 && p2 < 16)
8630 {
8631 specs[count] = tmpl;
8632 specs[count].cmp_type =
8633 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
8634 specs[count++].index = p2;
8635 }
8636 }
8637 else
8638 {
8639 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
8640 {
8641 specs[count] = tmpl;
8642 specs[count++].index = CURR_SLOT.qp_regno;
8643 }
8644 if (idesc->operands[1] == IA64_OPND_PR)
8645 {
8646 for (i = 1; i < 16; i++)
8647 {
8648 specs[count] = tmpl;
8649 specs[count++].index = i;
8650 }
8651 }
8652 }
8653 }
8654 else
8655 {
8656 UNHANDLED;
8657 }
8658 break;
8659
8660 /* This is the general case for PRs. IA64_RS_PR and IA64_RS_PR63 are
8661 simplified cases of this. */
8662 case IA64_RS_PRr:
8663 if (note == 0)
8664 {
8665 for (i = 16; i < 63; i++)
8666 {
8667 specs[count] = tmpl;
8668 specs[count++].index = i;
8669 }
8670 }
8671 else if (note == 7)
8672 {
8673 valueT mask = 0;
8674 /* Mark only those registers indicated by the mask. */
8675 if (rsrc_write
8676 && idesc->operands[0] == IA64_OPND_PR)
8677 {
8678 mask = CURR_SLOT.opnd[2].X_add_number;
8679 if (mask & ((valueT) 1 << 16))
8680 for (i = 16; i < 63; i++)
8681 {
8682 specs[count] = tmpl;
8683 specs[count++].index = i;
8684 }
8685 }
8686 else if (rsrc_write
8687 && idesc->operands[0] == IA64_OPND_PR_ROT)
8688 {
8689 for (i = 16; i < 63; i++)
8690 {
8691 specs[count] = tmpl;
8692 specs[count++].index = i;
8693 }
8694 }
8695 else
8696 {
8697 UNHANDLED;
8698 }
8699 }
8700 else if (note == 11) /* note 11 implies note 1 as well */
8701 {
8702 if (rsrc_write)
8703 {
8704 for (i = 0; i < idesc->num_outputs; i++)
8705 {
8706 if (idesc->operands[i] == IA64_OPND_P1
8707 || idesc->operands[i] == IA64_OPND_P2)
8708 {
8709 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8710 if (regno >= 16 && regno < 63)
8711 {
8712 specs[count] = tmpl;
8713 specs[count++].index = regno;
8714 }
8715 }
8716 }
8717 }
8718 else
8719 {
8720 UNHANDLED;
8721 }
8722 }
8723 else if (note == 12)
8724 {
8725 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
8726 {
8727 specs[count] = tmpl;
8728 specs[count++].index = CURR_SLOT.qp_regno;
8729 }
8730 }
8731 else if (note == 1)
8732 {
8733 if (rsrc_write)
8734 {
8735 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8736 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8737 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
8738 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
8739
8740 if ((idesc->operands[0] == IA64_OPND_P1
8741 || idesc->operands[0] == IA64_OPND_P2)
8742 && p1 >= 16 && p1 < 63)
8743 {
8744 specs[count] = tmpl;
8745 specs[count].cmp_type =
8746 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
8747 specs[count++].index = p1;
8748 }
8749 if ((idesc->operands[1] == IA64_OPND_P1
8750 || idesc->operands[1] == IA64_OPND_P2)
8751 && p2 >= 16 && p2 < 63)
8752 {
8753 specs[count] = tmpl;
8754 specs[count].cmp_type =
8755 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
8756 specs[count++].index = p2;
8757 }
8758 }
8759 else
8760 {
8761 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
8762 {
8763 specs[count] = tmpl;
8764 specs[count++].index = CURR_SLOT.qp_regno;
8765 }
8766 if (idesc->operands[1] == IA64_OPND_PR)
8767 {
8768 for (i = 16; i < 63; i++)
8769 {
8770 specs[count] = tmpl;
8771 specs[count++].index = i;
8772 }
8773 }
8774 }
8775 }
8776 else
8777 {
8778 UNHANDLED;
8779 }
8780 break;
8781
8782 case IA64_RS_PSR:
8783 /* Verify that the instruction is using the PSR bit indicated in
8784 dep->regindex. */
8785 if (note == 0)
8786 {
8787 if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_UM)
8788 {
8789 if (dep->regindex < 6)
8790 {
8791 specs[count++] = tmpl;
8792 }
8793 }
8794 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR)
8795 {
8796 if (dep->regindex < 32
8797 || dep->regindex == 35
8798 || dep->regindex == 36
8799 || (!rsrc_write && dep->regindex == PSR_CPL))
8800 {
8801 specs[count++] = tmpl;
8802 }
8803 }
8804 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_L)
8805 {
8806 if (dep->regindex < 32
8807 || dep->regindex == 35
8808 || dep->regindex == 36
8809 || (rsrc_write && dep->regindex == PSR_CPL))
8810 {
8811 specs[count++] = tmpl;
8812 }
8813 }
8814 else
8815 {
8816 /* Several PSR bits have very specific dependencies. */
8817 switch (dep->regindex)
8818 {
8819 default:
8820 specs[count++] = tmpl;
8821 break;
8822 case PSR_IC:
8823 if (rsrc_write)
8824 {
8825 specs[count++] = tmpl;
8826 }
8827 else
8828 {
8829 /* Only certain CR accesses use PSR.ic */
8830 if (idesc->operands[0] == IA64_OPND_CR3
8831 || idesc->operands[1] == IA64_OPND_CR3)
8832 {
8833 int index =
8834 ((idesc->operands[0] == IA64_OPND_CR3)
8835 ? 0 : 1);
8836 int regno =
8837 CURR_SLOT.opnd[index].X_add_number - REG_CR;
8838
8839 switch (regno)
8840 {
8841 default:
8842 break;
8843 case CR_ITIR:
8844 case CR_IFS:
8845 case CR_IIM:
8846 case CR_IIP:
8847 case CR_IPSR:
8848 case CR_ISR:
8849 case CR_IFA:
8850 case CR_IHA:
8851 case CR_IIPA:
8852 specs[count++] = tmpl;
8853 break;
8854 }
8855 }
8856 }
8857 break;
8858 case PSR_CPL:
8859 if (rsrc_write)
8860 {
8861 specs[count++] = tmpl;
8862 }
8863 else
8864 {
8865 /* Only some AR accesses use cpl */
8866 if (idesc->operands[0] == IA64_OPND_AR3
8867 || idesc->operands[1] == IA64_OPND_AR3)
8868 {
8869 int index =
8870 ((idesc->operands[0] == IA64_OPND_AR3)
8871 ? 0 : 1);
8872 int regno =
8873 CURR_SLOT.opnd[index].X_add_number - REG_AR;
8874
8875 if (regno == AR_ITC
8876 || regno == AR_RUC
8877 || (index == 0
8878 && (regno == AR_RSC
8879 || (regno >= AR_K0
8880 && regno <= AR_K7))))
8881 {
8882 specs[count++] = tmpl;
8883 }
8884 }
8885 else
8886 {
8887 specs[count++] = tmpl;
8888 }
8889 break;
8890 }
8891 }
8892 }
8893 }
8894 else if (note == 7)
8895 {
8896 valueT mask = 0;
8897 if (idesc->operands[0] == IA64_OPND_IMMU24)
8898 {
8899 mask = CURR_SLOT.opnd[0].X_add_number;
8900 }
8901 else
8902 {
8903 UNHANDLED;
8904 }
8905 if (mask & ((valueT) 1 << dep->regindex))
8906 {
8907 specs[count++] = tmpl;
8908 }
8909 }
8910 else if (note == 8)
8911 {
8912 int min = dep->regindex == PSR_DFL ? 2 : 32;
8913 int max = dep->regindex == PSR_DFL ? 31 : 127;
8914 /* dfh is read on FR32-127; dfl is read on FR2-31 */
8915 for (i = 0; i < NELEMS (idesc->operands); i++)
8916 {
8917 if (idesc->operands[i] == IA64_OPND_F1
8918 || idesc->operands[i] == IA64_OPND_F2
8919 || idesc->operands[i] == IA64_OPND_F3
8920 || idesc->operands[i] == IA64_OPND_F4)
8921 {
8922 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
8923 if (reg >= min && reg <= max)
8924 {
8925 specs[count++] = tmpl;
8926 }
8927 }
8928 }
8929 }
8930 else if (note == 9)
8931 {
8932 int min = dep->regindex == PSR_MFL ? 2 : 32;
8933 int max = dep->regindex == PSR_MFL ? 31 : 127;
8934 /* mfh is read on writes to FR32-127; mfl is read on writes to
8935 FR2-31 */
8936 for (i = 0; i < idesc->num_outputs; i++)
8937 {
8938 if (idesc->operands[i] == IA64_OPND_F1)
8939 {
8940 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
8941 if (reg >= min && reg <= max)
8942 {
8943 specs[count++] = tmpl;
8944 }
8945 }
8946 }
8947 }
8948 else if (note == 10)
8949 {
8950 for (i = 0; i < NELEMS (idesc->operands); i++)
8951 {
8952 if (idesc->operands[i] == IA64_OPND_R1
8953 || idesc->operands[i] == IA64_OPND_R2
8954 || idesc->operands[i] == IA64_OPND_R3)
8955 {
8956 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
8957 if (regno >= 16 && regno <= 31)
8958 {
8959 specs[count++] = tmpl;
8960 }
8961 }
8962 }
8963 }
8964 else
8965 {
8966 UNHANDLED;
8967 }
8968 break;
8969
8970 case IA64_RS_AR_FPSR:
8971 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8972 {
8973 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8974 if (regno == AR_FPSR)
8975 {
8976 specs[count++] = tmpl;
8977 }
8978 }
8979 else
8980 {
8981 specs[count++] = tmpl;
8982 }
8983 break;
8984
8985 case IA64_RS_ARX:
8986 /* Handle all AR[REG] resources */
8987 if (note == 0 || note == 1)
8988 {
8989 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8990 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3
8991 && regno == dep->regindex)
8992 {
8993 specs[count++] = tmpl;
8994 }
8995 /* other AR[REG] resources may be affected by AR accesses */
8996 else if (idesc->operands[0] == IA64_OPND_AR3)
8997 {
8998 /* AR[] writes */
8999 regno = CURR_SLOT.opnd[0].X_add_number - REG_AR;
9000 switch (dep->regindex)
9001 {
9002 default:
9003 break;
9004 case AR_BSP:
9005 case AR_RNAT:
9006 if (regno == AR_BSPSTORE)
9007 {
9008 specs[count++] = tmpl;
9009 }
9010 case AR_RSC:
9011 if (!rsrc_write &&
9012 (regno == AR_BSPSTORE
9013 || regno == AR_RNAT))
9014 {
9015 specs[count++] = tmpl;
9016 }
9017 break;
9018 }
9019 }
9020 else if (idesc->operands[1] == IA64_OPND_AR3)
9021 {
9022 /* AR[] reads */
9023 regno = CURR_SLOT.opnd[1].X_add_number - REG_AR;
9024 switch (dep->regindex)
9025 {
9026 default:
9027 break;
9028 case AR_RSC:
9029 if (regno == AR_BSPSTORE || regno == AR_RNAT)
9030 {
9031 specs[count++] = tmpl;
9032 }
9033 break;
9034 }
9035 }
9036 else
9037 {
9038 specs[count++] = tmpl;
9039 }
9040 }
9041 else
9042 {
9043 UNHANDLED;
9044 }
9045 break;
9046
9047 case IA64_RS_CRX:
9048 /* Handle all CR[REG] resources.
9049 ??? FIXME: The rule 17 isn't really handled correctly. */
9050 if (note == 0 || note == 1 || note == 17)
9051 {
9052 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
9053 {
9054 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
9055 if (regno == dep->regindex)
9056 {
9057 specs[count++] = tmpl;
9058 }
9059 else if (!rsrc_write)
9060 {
9061 /* Reads from CR[IVR] affect other resources. */
9062 if (regno == CR_IVR)
9063 {
9064 if ((dep->regindex >= CR_IRR0
9065 && dep->regindex <= CR_IRR3)
9066 || dep->regindex == CR_TPR)
9067 {
9068 specs[count++] = tmpl;
9069 }
9070 }
9071 }
9072 }
9073 else
9074 {
9075 specs[count++] = tmpl;
9076 }
9077 }
9078 else
9079 {
9080 UNHANDLED;
9081 }
9082 break;
9083
9084 case IA64_RS_INSERVICE:
9085 /* look for write of EOI (67) or read of IVR (65) */
9086 if ((idesc->operands[0] == IA64_OPND_CR3
9087 && CURR_SLOT.opnd[0].X_add_number - REG_CR == CR_EOI)
9088 || (idesc->operands[1] == IA64_OPND_CR3
9089 && CURR_SLOT.opnd[1].X_add_number - REG_CR == CR_IVR))
9090 {
9091 specs[count++] = tmpl;
9092 }
9093 break;
9094
9095 case IA64_RS_GR0:
9096 if (note == 1)
9097 {
9098 specs[count++] = tmpl;
9099 }
9100 else
9101 {
9102 UNHANDLED;
9103 }
9104 break;
9105
9106 case IA64_RS_CFM:
9107 if (note != 2)
9108 {
9109 specs[count++] = tmpl;
9110 }
9111 else
9112 {
9113 /* Check if any of the registers accessed are in the rotating region.
9114 mov to/from pr accesses CFM only when qp_regno is in the rotating
9115 region */
9116 for (i = 0; i < NELEMS (idesc->operands); i++)
9117 {
9118 if (idesc->operands[i] == IA64_OPND_R1
9119 || idesc->operands[i] == IA64_OPND_R2
9120 || idesc->operands[i] == IA64_OPND_R3)
9121 {
9122 int num = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9123 /* Assumes that md.rot.num_regs is always valid */
9124 if (md.rot.num_regs > 0
9125 && num > 31
9126 && num < 31 + md.rot.num_regs)
9127 {
9128 specs[count] = tmpl;
9129 specs[count++].specific = 0;
9130 }
9131 }
9132 else if (idesc->operands[i] == IA64_OPND_F1
9133 || idesc->operands[i] == IA64_OPND_F2
9134 || idesc->operands[i] == IA64_OPND_F3
9135 || idesc->operands[i] == IA64_OPND_F4)
9136 {
9137 int num = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9138 if (num > 31)
9139 {
9140 specs[count] = tmpl;
9141 specs[count++].specific = 0;
9142 }
9143 }
9144 else if (idesc->operands[i] == IA64_OPND_P1
9145 || idesc->operands[i] == IA64_OPND_P2)
9146 {
9147 int num = CURR_SLOT.opnd[i].X_add_number - REG_P;
9148 if (num > 15)
9149 {
9150 specs[count] = tmpl;
9151 specs[count++].specific = 0;
9152 }
9153 }
9154 }
9155 if (CURR_SLOT.qp_regno > 15)
9156 {
9157 specs[count] = tmpl;
9158 specs[count++].specific = 0;
9159 }
9160 }
9161 break;
9162
9163 /* This is the same as IA64_RS_PRr, except simplified to account for
9164 the fact that there is only one register. */
9165 case IA64_RS_PR63:
9166 if (note == 0)
9167 {
9168 specs[count++] = tmpl;
9169 }
9170 else if (note == 7)
9171 {
9172 valueT mask = 0;
9173 if (idesc->operands[2] == IA64_OPND_IMM17)
9174 mask = CURR_SLOT.opnd[2].X_add_number;
9175 if (mask & ((valueT) 1 << 63))
9176 specs[count++] = tmpl;
9177 }
9178 else if (note == 11)
9179 {
9180 if ((idesc->operands[0] == IA64_OPND_P1
9181 && CURR_SLOT.opnd[0].X_add_number - REG_P == 63)
9182 || (idesc->operands[1] == IA64_OPND_P2
9183 && CURR_SLOT.opnd[1].X_add_number - REG_P == 63))
9184 {
9185 specs[count++] = tmpl;
9186 }
9187 }
9188 else if (note == 12)
9189 {
9190 if (CURR_SLOT.qp_regno == 63)
9191 {
9192 specs[count++] = tmpl;
9193 }
9194 }
9195 else if (note == 1)
9196 {
9197 if (rsrc_write)
9198 {
9199 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
9200 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
9201 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
9202 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
9203
9204 if (p1 == 63
9205 && (idesc->operands[0] == IA64_OPND_P1
9206 || idesc->operands[0] == IA64_OPND_P2))
9207 {
9208 specs[count] = tmpl;
9209 specs[count++].cmp_type =
9210 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
9211 }
9212 if (p2 == 63
9213 && (idesc->operands[1] == IA64_OPND_P1
9214 || idesc->operands[1] == IA64_OPND_P2))
9215 {
9216 specs[count] = tmpl;
9217 specs[count++].cmp_type =
9218 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
9219 }
9220 }
9221 else
9222 {
9223 if (CURR_SLOT.qp_regno == 63)
9224 {
9225 specs[count++] = tmpl;
9226 }
9227 }
9228 }
9229 else
9230 {
9231 UNHANDLED;
9232 }
9233 break;
9234
9235 case IA64_RS_RSE:
9236 /* FIXME we can identify some individual RSE written resources, but RSE
9237 read resources have not yet been completely identified, so for now
9238 treat RSE as a single resource */
9239 if (strncmp (idesc->name, "mov", 3) == 0)
9240 {
9241 if (rsrc_write)
9242 {
9243 if (idesc->operands[0] == IA64_OPND_AR3
9244 && CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE)
9245 {
9246 specs[count++] = tmpl;
9247 }
9248 }
9249 else
9250 {
9251 if (idesc->operands[0] == IA64_OPND_AR3)
9252 {
9253 if (CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE
9254 || CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_RNAT)
9255 {
9256 specs[count++] = tmpl;
9257 }
9258 }
9259 else if (idesc->operands[1] == IA64_OPND_AR3)
9260 {
9261 if (CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSP
9262 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSPSTORE
9263 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_RNAT)
9264 {
9265 specs[count++] = tmpl;
9266 }
9267 }
9268 }
9269 }
9270 else
9271 {
9272 specs[count++] = tmpl;
9273 }
9274 break;
9275
9276 case IA64_RS_ANY:
9277 /* FIXME -- do any of these need to be non-specific? */
9278 specs[count++] = tmpl;
9279 break;
9280
9281 default:
9282 as_bad (_("Unrecognized dependency specifier %d\n"), dep->specifier);
9283 break;
9284 }
9285
9286 return count;
9287 }
9288
9289 /* Clear branch flags on marked resources. This breaks the link between the
9290 QP of the marking instruction and a subsequent branch on the same QP. */
9291
9292 static void
9293 clear_qp_branch_flag (valueT mask)
9294 {
9295 int i;
9296 for (i = 0; i < regdepslen; i++)
9297 {
9298 valueT bit = ((valueT) 1 << regdeps[i].qp_regno);
9299 if ((bit & mask) != 0)
9300 {
9301 regdeps[i].link_to_qp_branch = 0;
9302 }
9303 }
9304 }
9305
9306 /* MASK contains 2 and only 2 PRs which are mutually exclusive. Remove
9307 any mutexes which contain one of the PRs and create new ones when
9308 needed. */
9309
9310 static int
9311 update_qp_mutex (valueT mask)
9312 {
9313 int i;
9314 int add = 0;
9315
9316 i = 0;
9317 while (i < qp_mutexeslen)
9318 {
9319 if ((qp_mutexes[i].prmask & mask) != 0)
9320 {
9321 /* If it destroys and creates the same mutex, do nothing. */
9322 if (qp_mutexes[i].prmask == mask
9323 && qp_mutexes[i].path == md.path)
9324 {
9325 i++;
9326 add = -1;
9327 }
9328 else
9329 {
9330 int keep = 0;
9331
9332 if (md.debug_dv)
9333 {
9334 fprintf (stderr, " Clearing mutex relation");
9335 print_prmask (qp_mutexes[i].prmask);
9336 fprintf (stderr, "\n");
9337 }
9338
9339 /* Deal with the old mutex with more than 3+ PRs only if
9340 the new mutex on the same execution path with it.
9341
9342 FIXME: The 3+ mutex support is incomplete.
9343 dot_pred_rel () may be a better place to fix it. */
9344 if (qp_mutexes[i].path == md.path)
9345 {
9346 /* If it is a proper subset of the mutex, create a
9347 new mutex. */
9348 if (add == 0
9349 && (qp_mutexes[i].prmask & mask) == mask)
9350 add = 1;
9351
9352 qp_mutexes[i].prmask &= ~mask;
9353 if (qp_mutexes[i].prmask & (qp_mutexes[i].prmask - 1))
9354 {
9355 /* Modify the mutex if there are more than one
9356 PR left. */
9357 keep = 1;
9358 i++;
9359 }
9360 }
9361
9362 if (keep == 0)
9363 /* Remove the mutex. */
9364 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
9365 }
9366 }
9367 else
9368 ++i;
9369 }
9370
9371 if (add == 1)
9372 add_qp_mutex (mask);
9373
9374 return add;
9375 }
9376
9377 /* Remove any mutexes which contain any of the PRs indicated in the mask.
9378
9379 Any changes to a PR clears the mutex relations which include that PR. */
9380
9381 static void
9382 clear_qp_mutex (valueT mask)
9383 {
9384 int i;
9385
9386 i = 0;
9387 while (i < qp_mutexeslen)
9388 {
9389 if ((qp_mutexes[i].prmask & mask) != 0)
9390 {
9391 if (md.debug_dv)
9392 {
9393 fprintf (stderr, " Clearing mutex relation");
9394 print_prmask (qp_mutexes[i].prmask);
9395 fprintf (stderr, "\n");
9396 }
9397 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
9398 }
9399 else
9400 ++i;
9401 }
9402 }
9403
9404 /* Clear implies relations which contain PRs in the given masks.
9405 P1_MASK indicates the source of the implies relation, while P2_MASK
9406 indicates the implied PR. */
9407
9408 static void
9409 clear_qp_implies (valueT p1_mask, valueT p2_mask)
9410 {
9411 int i;
9412
9413 i = 0;
9414 while (i < qp_implieslen)
9415 {
9416 if ((((valueT) 1 << qp_implies[i].p1) & p1_mask) != 0
9417 || (((valueT) 1 << qp_implies[i].p2) & p2_mask) != 0)
9418 {
9419 if (md.debug_dv)
9420 fprintf (stderr, "Clearing implied relation PR%d->PR%d\n",
9421 qp_implies[i].p1, qp_implies[i].p2);
9422 qp_implies[i] = qp_implies[--qp_implieslen];
9423 }
9424 else
9425 ++i;
9426 }
9427 }
9428
9429 /* Add the PRs specified to the list of implied relations. */
9430
9431 static void
9432 add_qp_imply (int p1, int p2)
9433 {
9434 valueT mask;
9435 valueT bit;
9436 int i;
9437
9438 /* p0 is not meaningful here. */
9439 if (p1 == 0 || p2 == 0)
9440 abort ();
9441
9442 if (p1 == p2)
9443 return;
9444
9445 /* If it exists already, ignore it. */
9446 for (i = 0; i < qp_implieslen; i++)
9447 {
9448 if (qp_implies[i].p1 == p1
9449 && qp_implies[i].p2 == p2
9450 && qp_implies[i].path == md.path
9451 && !qp_implies[i].p2_branched)
9452 return;
9453 }
9454
9455 if (qp_implieslen == qp_impliestotlen)
9456 {
9457 qp_impliestotlen += 20;
9458 qp_implies = (struct qp_imply *)
9459 xrealloc ((void *) qp_implies,
9460 qp_impliestotlen * sizeof (struct qp_imply));
9461 }
9462 if (md.debug_dv)
9463 fprintf (stderr, " Registering PR%d implies PR%d\n", p1, p2);
9464 qp_implies[qp_implieslen].p1 = p1;
9465 qp_implies[qp_implieslen].p2 = p2;
9466 qp_implies[qp_implieslen].path = md.path;
9467 qp_implies[qp_implieslen++].p2_branched = 0;
9468
9469 /* Add in the implied transitive relations; for everything that p2 implies,
9470 make p1 imply that, too; for everything that implies p1, make it imply p2
9471 as well. */
9472 for (i = 0; i < qp_implieslen; i++)
9473 {
9474 if (qp_implies[i].p1 == p2)
9475 add_qp_imply (p1, qp_implies[i].p2);
9476 if (qp_implies[i].p2 == p1)
9477 add_qp_imply (qp_implies[i].p1, p2);
9478 }
9479 /* Add in mutex relations implied by this implies relation; for each mutex
9480 relation containing p2, duplicate it and replace p2 with p1. */
9481 bit = (valueT) 1 << p1;
9482 mask = (valueT) 1 << p2;
9483 for (i = 0; i < qp_mutexeslen; i++)
9484 {
9485 if (qp_mutexes[i].prmask & mask)
9486 add_qp_mutex ((qp_mutexes[i].prmask & ~mask) | bit);
9487 }
9488 }
9489
9490 /* Add the PRs specified in the mask to the mutex list; this means that only
9491 one of the PRs can be true at any time. PR0 should never be included in
9492 the mask. */
9493
9494 static void
9495 add_qp_mutex (valueT mask)
9496 {
9497 if (mask & 0x1)
9498 abort ();
9499
9500 if (qp_mutexeslen == qp_mutexestotlen)
9501 {
9502 qp_mutexestotlen += 20;
9503 qp_mutexes = (struct qpmutex *)
9504 xrealloc ((void *) qp_mutexes,
9505 qp_mutexestotlen * sizeof (struct qpmutex));
9506 }
9507 if (md.debug_dv)
9508 {
9509 fprintf (stderr, " Registering mutex on");
9510 print_prmask (mask);
9511 fprintf (stderr, "\n");
9512 }
9513 qp_mutexes[qp_mutexeslen].path = md.path;
9514 qp_mutexes[qp_mutexeslen++].prmask = mask;
9515 }
9516
9517 static int
9518 has_suffix_p (const char *name, const char *suffix)
9519 {
9520 size_t namelen = strlen (name);
9521 size_t sufflen = strlen (suffix);
9522
9523 if (namelen <= sufflen)
9524 return 0;
9525 return strcmp (name + namelen - sufflen, suffix) == 0;
9526 }
9527
9528 static void
9529 clear_register_values (void)
9530 {
9531 int i;
9532 if (md.debug_dv)
9533 fprintf (stderr, " Clearing register values\n");
9534 for (i = 1; i < NELEMS (gr_values); i++)
9535 gr_values[i].known = 0;
9536 }
9537
9538 /* Keep track of register values/changes which affect DV tracking.
9539
9540 optimization note: should add a flag to classes of insns where otherwise we
9541 have to examine a group of strings to identify them. */
9542
9543 static void
9544 note_register_values (struct ia64_opcode *idesc)
9545 {
9546 valueT qp_changemask = 0;
9547 int i;
9548
9549 /* Invalidate values for registers being written to. */
9550 for (i = 0; i < idesc->num_outputs; i++)
9551 {
9552 if (idesc->operands[i] == IA64_OPND_R1
9553 || idesc->operands[i] == IA64_OPND_R2
9554 || idesc->operands[i] == IA64_OPND_R3)
9555 {
9556 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9557 if (regno > 0 && regno < NELEMS (gr_values))
9558 gr_values[regno].known = 0;
9559 }
9560 else if (idesc->operands[i] == IA64_OPND_R3_2)
9561 {
9562 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9563 if (regno > 0 && regno < 4)
9564 gr_values[regno].known = 0;
9565 }
9566 else if (idesc->operands[i] == IA64_OPND_P1
9567 || idesc->operands[i] == IA64_OPND_P2)
9568 {
9569 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
9570 qp_changemask |= (valueT) 1 << regno;
9571 }
9572 else if (idesc->operands[i] == IA64_OPND_PR)
9573 {
9574 if (idesc->operands[2] & (valueT) 0x10000)
9575 qp_changemask = ~(valueT) 0x1FFFF | idesc->operands[2];
9576 else
9577 qp_changemask = idesc->operands[2];
9578 break;
9579 }
9580 else if (idesc->operands[i] == IA64_OPND_PR_ROT)
9581 {
9582 if (idesc->operands[1] & ((valueT) 1 << 43))
9583 qp_changemask = -((valueT) 1 << 44) | idesc->operands[1];
9584 else
9585 qp_changemask = idesc->operands[1];
9586 qp_changemask &= ~(valueT) 0xFFFF;
9587 break;
9588 }
9589 }
9590
9591 /* Always clear qp branch flags on any PR change. */
9592 /* FIXME there may be exceptions for certain compares. */
9593 clear_qp_branch_flag (qp_changemask);
9594
9595 /* Invalidate rotating registers on insns which affect RRBs in CFM. */
9596 if (idesc->flags & IA64_OPCODE_MOD_RRBS)
9597 {
9598 qp_changemask |= ~(valueT) 0xFFFF;
9599 if (strcmp (idesc->name, "clrrrb.pr") != 0)
9600 {
9601 for (i = 32; i < 32 + md.rot.num_regs; i++)
9602 gr_values[i].known = 0;
9603 }
9604 clear_qp_mutex (qp_changemask);
9605 clear_qp_implies (qp_changemask, qp_changemask);
9606 }
9607 /* After a call, all register values are undefined, except those marked
9608 as "safe". */
9609 else if (strncmp (idesc->name, "br.call", 6) == 0
9610 || strncmp (idesc->name, "brl.call", 7) == 0)
9611 {
9612 /* FIXME keep GR values which are marked as "safe_across_calls" */
9613 clear_register_values ();
9614 clear_qp_mutex (~qp_safe_across_calls);
9615 clear_qp_implies (~qp_safe_across_calls, ~qp_safe_across_calls);
9616 clear_qp_branch_flag (~qp_safe_across_calls);
9617 }
9618 else if (is_interruption_or_rfi (idesc)
9619 || is_taken_branch (idesc))
9620 {
9621 clear_register_values ();
9622 clear_qp_mutex (~(valueT) 0);
9623 clear_qp_implies (~(valueT) 0, ~(valueT) 0);
9624 }
9625 /* Look for mutex and implies relations. */
9626 else if ((idesc->operands[0] == IA64_OPND_P1
9627 || idesc->operands[0] == IA64_OPND_P2)
9628 && (idesc->operands[1] == IA64_OPND_P1
9629 || idesc->operands[1] == IA64_OPND_P2))
9630 {
9631 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
9632 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
9633 valueT p1mask = (p1 != 0) ? (valueT) 1 << p1 : 0;
9634 valueT p2mask = (p2 != 0) ? (valueT) 1 << p2 : 0;
9635
9636 /* If both PRs are PR0, we can't really do anything. */
9637 if (p1 == 0 && p2 == 0)
9638 {
9639 if (md.debug_dv)
9640 fprintf (stderr, " Ignoring PRs due to inclusion of p0\n");
9641 }
9642 /* In general, clear mutexes and implies which include P1 or P2,
9643 with the following exceptions. */
9644 else if (has_suffix_p (idesc->name, ".or.andcm")
9645 || has_suffix_p (idesc->name, ".and.orcm"))
9646 {
9647 clear_qp_implies (p2mask, p1mask);
9648 }
9649 else if (has_suffix_p (idesc->name, ".andcm")
9650 || has_suffix_p (idesc->name, ".and"))
9651 {
9652 clear_qp_implies (0, p1mask | p2mask);
9653 }
9654 else if (has_suffix_p (idesc->name, ".orcm")
9655 || has_suffix_p (idesc->name, ".or"))
9656 {
9657 clear_qp_mutex (p1mask | p2mask);
9658 clear_qp_implies (p1mask | p2mask, 0);
9659 }
9660 else
9661 {
9662 int added = 0;
9663
9664 clear_qp_implies (p1mask | p2mask, p1mask | p2mask);
9665
9666 /* If one of the PRs is PR0, we call clear_qp_mutex. */
9667 if (p1 == 0 || p2 == 0)
9668 clear_qp_mutex (p1mask | p2mask);
9669 else
9670 added = update_qp_mutex (p1mask | p2mask);
9671
9672 if (CURR_SLOT.qp_regno == 0
9673 || has_suffix_p (idesc->name, ".unc"))
9674 {
9675 if (added == 0 && p1 && p2)
9676 add_qp_mutex (p1mask | p2mask);
9677 if (CURR_SLOT.qp_regno != 0)
9678 {
9679 if (p1)
9680 add_qp_imply (p1, CURR_SLOT.qp_regno);
9681 if (p2)
9682 add_qp_imply (p2, CURR_SLOT.qp_regno);
9683 }
9684 }
9685 }
9686 }
9687 /* Look for mov imm insns into GRs. */
9688 else if (idesc->operands[0] == IA64_OPND_R1
9689 && (idesc->operands[1] == IA64_OPND_IMM22
9690 || idesc->operands[1] == IA64_OPND_IMMU64)
9691 && CURR_SLOT.opnd[1].X_op == O_constant
9692 && (strcmp (idesc->name, "mov") == 0
9693 || strcmp (idesc->name, "movl") == 0))
9694 {
9695 int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR;
9696 if (regno > 0 && regno < NELEMS (gr_values))
9697 {
9698 gr_values[regno].known = 1;
9699 gr_values[regno].value = CURR_SLOT.opnd[1].X_add_number;
9700 gr_values[regno].path = md.path;
9701 if (md.debug_dv)
9702 {
9703 fprintf (stderr, " Know gr%d = ", regno);
9704 fprintf_vma (stderr, gr_values[regno].value);
9705 fputs ("\n", stderr);
9706 }
9707 }
9708 }
9709 /* Look for dep.z imm insns. */
9710 else if (idesc->operands[0] == IA64_OPND_R1
9711 && idesc->operands[1] == IA64_OPND_IMM8
9712 && strcmp (idesc->name, "dep.z") == 0)
9713 {
9714 int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR;
9715 if (regno > 0 && regno < NELEMS (gr_values))
9716 {
9717 valueT value = CURR_SLOT.opnd[1].X_add_number;
9718
9719 if (CURR_SLOT.opnd[3].X_add_number < 64)
9720 value &= ((valueT)1 << CURR_SLOT.opnd[3].X_add_number) - 1;
9721 value <<= CURR_SLOT.opnd[2].X_add_number;
9722 gr_values[regno].known = 1;
9723 gr_values[regno].value = value;
9724 gr_values[regno].path = md.path;
9725 if (md.debug_dv)
9726 {
9727 fprintf (stderr, " Know gr%d = ", regno);
9728 fprintf_vma (stderr, gr_values[regno].value);
9729 fputs ("\n", stderr);
9730 }
9731 }
9732 }
9733 else
9734 {
9735 clear_qp_mutex (qp_changemask);
9736 clear_qp_implies (qp_changemask, qp_changemask);
9737 }
9738 }
9739
9740 /* Return whether the given predicate registers are currently mutex. */
9741
9742 static int
9743 qp_mutex (int p1, int p2, int path)
9744 {
9745 int i;
9746 valueT mask;
9747
9748 if (p1 != p2)
9749 {
9750 mask = ((valueT) 1 << p1) | (valueT) 1 << p2;
9751 for (i = 0; i < qp_mutexeslen; i++)
9752 {
9753 if (qp_mutexes[i].path >= path
9754 && (qp_mutexes[i].prmask & mask) == mask)
9755 return 1;
9756 }
9757 }
9758 return 0;
9759 }
9760
9761 /* Return whether the given resource is in the given insn's list of chks
9762 Return 1 if the conflict is absolutely determined, 2 if it's a potential
9763 conflict. */
9764
9765 static int
9766 resources_match (struct rsrc *rs,
9767 struct ia64_opcode *idesc,
9768 int note,
9769 int qp_regno,
9770 int path)
9771 {
9772 struct rsrc specs[MAX_SPECS];
9773 int count;
9774
9775 /* If the marked resource's qp_regno and the given qp_regno are mutex,
9776 we don't need to check. One exception is note 11, which indicates that
9777 target predicates are written regardless of PR[qp]. */
9778 if (qp_mutex (rs->qp_regno, qp_regno, path)
9779 && note != 11)
9780 return 0;
9781
9782 count = specify_resource (rs->dependency, idesc, DV_CHK, specs, note, path);
9783 while (count-- > 0)
9784 {
9785 /* UNAT checking is a bit more specific than other resources */
9786 if (rs->dependency->specifier == IA64_RS_AR_UNAT
9787 && specs[count].mem_offset.hint
9788 && rs->mem_offset.hint)
9789 {
9790 if (rs->mem_offset.base == specs[count].mem_offset.base)
9791 {
9792 if (((rs->mem_offset.offset >> 3) & 0x3F) ==
9793 ((specs[count].mem_offset.offset >> 3) & 0x3F))
9794 return 1;
9795 else
9796 continue;
9797 }
9798 }
9799
9800 /* Skip apparent PR write conflicts where both writes are an AND or both
9801 writes are an OR. */
9802 if (rs->dependency->specifier == IA64_RS_PR
9803 || rs->dependency->specifier == IA64_RS_PRr
9804 || rs->dependency->specifier == IA64_RS_PR63)
9805 {
9806 if (specs[count].cmp_type != CMP_NONE
9807 && specs[count].cmp_type == rs->cmp_type)
9808 {
9809 if (md.debug_dv)
9810 fprintf (stderr, " %s on parallel compare allowed (PR%d)\n",
9811 dv_mode[rs->dependency->mode],
9812 rs->dependency->specifier != IA64_RS_PR63 ?
9813 specs[count].index : 63);
9814 continue;
9815 }
9816 if (md.debug_dv)
9817 fprintf (stderr,
9818 " %s on parallel compare conflict %s vs %s on PR%d\n",
9819 dv_mode[rs->dependency->mode],
9820 dv_cmp_type[rs->cmp_type],
9821 dv_cmp_type[specs[count].cmp_type],
9822 rs->dependency->specifier != IA64_RS_PR63 ?
9823 specs[count].index : 63);
9824
9825 }
9826
9827 /* If either resource is not specific, conservatively assume a conflict
9828 */
9829 if (!specs[count].specific || !rs->specific)
9830 return 2;
9831 else if (specs[count].index == rs->index)
9832 return 1;
9833 }
9834
9835 return 0;
9836 }
9837
9838 /* Indicate an instruction group break; if INSERT_STOP is non-zero, then
9839 insert a stop to create the break. Update all resource dependencies
9840 appropriately. If QP_REGNO is non-zero, only apply the break to resources
9841 which use the same QP_REGNO and have the link_to_qp_branch flag set.
9842 If SAVE_CURRENT is non-zero, don't affect resources marked by the current
9843 instruction. */
9844
9845 static void
9846 insn_group_break (int insert_stop, int qp_regno, int save_current)
9847 {
9848 int i;
9849
9850 if (insert_stop && md.num_slots_in_use > 0)
9851 PREV_SLOT.end_of_insn_group = 1;
9852
9853 if (md.debug_dv)
9854 {
9855 fprintf (stderr, " Insn group break%s",
9856 (insert_stop ? " (w/stop)" : ""));
9857 if (qp_regno != 0)
9858 fprintf (stderr, " effective for QP=%d", qp_regno);
9859 fprintf (stderr, "\n");
9860 }
9861
9862 i = 0;
9863 while (i < regdepslen)
9864 {
9865 const struct ia64_dependency *dep = regdeps[i].dependency;
9866
9867 if (qp_regno != 0
9868 && regdeps[i].qp_regno != qp_regno)
9869 {
9870 ++i;
9871 continue;
9872 }
9873
9874 if (save_current
9875 && CURR_SLOT.src_file == regdeps[i].file
9876 && CURR_SLOT.src_line == regdeps[i].line)
9877 {
9878 ++i;
9879 continue;
9880 }
9881
9882 /* clear dependencies which are automatically cleared by a stop, or
9883 those that have reached the appropriate state of insn serialization */
9884 if (dep->semantics == IA64_DVS_IMPLIED
9885 || dep->semantics == IA64_DVS_IMPLIEDF
9886 || regdeps[i].insn_srlz == STATE_SRLZ)
9887 {
9888 print_dependency ("Removing", i);
9889 regdeps[i] = regdeps[--regdepslen];
9890 }
9891 else
9892 {
9893 if (dep->semantics == IA64_DVS_DATA
9894 || dep->semantics == IA64_DVS_INSTR
9895 || dep->semantics == IA64_DVS_SPECIFIC)
9896 {
9897 if (regdeps[i].insn_srlz == STATE_NONE)
9898 regdeps[i].insn_srlz = STATE_STOP;
9899 if (regdeps[i].data_srlz == STATE_NONE)
9900 regdeps[i].data_srlz = STATE_STOP;
9901 }
9902 ++i;
9903 }
9904 }
9905 }
9906
9907 /* Add the given resource usage spec to the list of active dependencies. */
9908
9909 static void
9910 mark_resource (struct ia64_opcode *idesc ATTRIBUTE_UNUSED,
9911 const struct ia64_dependency *dep ATTRIBUTE_UNUSED,
9912 struct rsrc *spec,
9913 int depind,
9914 int path)
9915 {
9916 if (regdepslen == regdepstotlen)
9917 {
9918 regdepstotlen += 20;
9919 regdeps = (struct rsrc *)
9920 xrealloc ((void *) regdeps,
9921 regdepstotlen * sizeof (struct rsrc));
9922 }
9923
9924 regdeps[regdepslen] = *spec;
9925 regdeps[regdepslen].depind = depind;
9926 regdeps[regdepslen].path = path;
9927 regdeps[regdepslen].file = CURR_SLOT.src_file;
9928 regdeps[regdepslen].line = CURR_SLOT.src_line;
9929
9930 print_dependency ("Adding", regdepslen);
9931
9932 ++regdepslen;
9933 }
9934
9935 static void
9936 print_dependency (const char *action, int depind)
9937 {
9938 if (md.debug_dv)
9939 {
9940 fprintf (stderr, " %s %s '%s'",
9941 action, dv_mode[(regdeps[depind].dependency)->mode],
9942 (regdeps[depind].dependency)->name);
9943 if (regdeps[depind].specific && regdeps[depind].index >= 0)
9944 fprintf (stderr, " (%d)", regdeps[depind].index);
9945 if (regdeps[depind].mem_offset.hint)
9946 {
9947 fputs (" ", stderr);
9948 fprintf_vma (stderr, regdeps[depind].mem_offset.base);
9949 fputs ("+", stderr);
9950 fprintf_vma (stderr, regdeps[depind].mem_offset.offset);
9951 }
9952 fprintf (stderr, "\n");
9953 }
9954 }
9955
9956 static void
9957 instruction_serialization (void)
9958 {
9959 int i;
9960 if (md.debug_dv)
9961 fprintf (stderr, " Instruction serialization\n");
9962 for (i = 0; i < regdepslen; i++)
9963 if (regdeps[i].insn_srlz == STATE_STOP)
9964 regdeps[i].insn_srlz = STATE_SRLZ;
9965 }
9966
9967 static void
9968 data_serialization (void)
9969 {
9970 int i = 0;
9971 if (md.debug_dv)
9972 fprintf (stderr, " Data serialization\n");
9973 while (i < regdepslen)
9974 {
9975 if (regdeps[i].data_srlz == STATE_STOP
9976 /* Note: as of 991210, all "other" dependencies are cleared by a
9977 data serialization. This might change with new tables */
9978 || (regdeps[i].dependency)->semantics == IA64_DVS_OTHER)
9979 {
9980 print_dependency ("Removing", i);
9981 regdeps[i] = regdeps[--regdepslen];
9982 }
9983 else
9984 ++i;
9985 }
9986 }
9987
9988 /* Insert stops and serializations as needed to avoid DVs. */
9989
9990 static void
9991 remove_marked_resource (struct rsrc *rs)
9992 {
9993 switch (rs->dependency->semantics)
9994 {
9995 case IA64_DVS_SPECIFIC:
9996 if (md.debug_dv)
9997 fprintf (stderr, "Implementation-specific, assume worst case...\n");
9998 /* ...fall through... */
9999 case IA64_DVS_INSTR:
10000 if (md.debug_dv)
10001 fprintf (stderr, "Inserting instr serialization\n");
10002 if (rs->insn_srlz < STATE_STOP)
10003 insn_group_break (1, 0, 0);
10004 if (rs->insn_srlz < STATE_SRLZ)
10005 {
10006 struct slot oldslot = CURR_SLOT;
10007 /* Manually jam a srlz.i insn into the stream */
10008 memset (&CURR_SLOT, 0, sizeof (CURR_SLOT));
10009 CURR_SLOT.user_template = -1;
10010 CURR_SLOT.idesc = ia64_find_opcode ("srlz.i");
10011 instruction_serialization ();
10012 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10013 if (++md.num_slots_in_use >= NUM_SLOTS)
10014 emit_one_bundle ();
10015 CURR_SLOT = oldslot;
10016 }
10017 insn_group_break (1, 0, 0);
10018 break;
10019 case IA64_DVS_OTHER: /* as of rev2 (991220) of the DV tables, all
10020 "other" types of DV are eliminated
10021 by a data serialization */
10022 case IA64_DVS_DATA:
10023 if (md.debug_dv)
10024 fprintf (stderr, "Inserting data serialization\n");
10025 if (rs->data_srlz < STATE_STOP)
10026 insn_group_break (1, 0, 0);
10027 {
10028 struct slot oldslot = CURR_SLOT;
10029 /* Manually jam a srlz.d insn into the stream */
10030 memset (&CURR_SLOT, 0, sizeof (CURR_SLOT));
10031 CURR_SLOT.user_template = -1;
10032 CURR_SLOT.idesc = ia64_find_opcode ("srlz.d");
10033 data_serialization ();
10034 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10035 if (++md.num_slots_in_use >= NUM_SLOTS)
10036 emit_one_bundle ();
10037 CURR_SLOT = oldslot;
10038 }
10039 break;
10040 case IA64_DVS_IMPLIED:
10041 case IA64_DVS_IMPLIEDF:
10042 if (md.debug_dv)
10043 fprintf (stderr, "Inserting stop\n");
10044 insn_group_break (1, 0, 0);
10045 break;
10046 default:
10047 break;
10048 }
10049 }
10050
10051 /* Check the resources used by the given opcode against the current dependency
10052 list.
10053
10054 The check is run once for each execution path encountered. In this case,
10055 a unique execution path is the sequence of instructions following a code
10056 entry point, e.g. the following has three execution paths, one starting
10057 at L0, one at L1, and one at L2.
10058
10059 L0: nop
10060 L1: add
10061 L2: add
10062 br.ret
10063 */
10064
10065 static void
10066 check_dependencies (struct ia64_opcode *idesc)
10067 {
10068 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
10069 int path;
10070 int i;
10071
10072 /* Note that the number of marked resources may change within the
10073 loop if in auto mode. */
10074 i = 0;
10075 while (i < regdepslen)
10076 {
10077 struct rsrc *rs = &regdeps[i];
10078 const struct ia64_dependency *dep = rs->dependency;
10079 int chkind;
10080 int note;
10081 int start_over = 0;
10082
10083 if (dep->semantics == IA64_DVS_NONE
10084 || (chkind = depends_on (rs->depind, idesc)) == -1)
10085 {
10086 ++i;
10087 continue;
10088 }
10089
10090 note = NOTE (opdeps->chks[chkind]);
10091
10092 /* Check this resource against each execution path seen thus far. */
10093 for (path = 0; path <= md.path; path++)
10094 {
10095 int matchtype;
10096
10097 /* If the dependency wasn't on the path being checked, ignore it. */
10098 if (rs->path < path)
10099 continue;
10100
10101 /* If the QP for this insn implies a QP which has branched, don't
10102 bother checking. Ed. NOTE: I don't think this check is terribly
10103 useful; what's the point of generating code which will only be
10104 reached if its QP is zero?
10105 This code was specifically inserted to handle the following code,
10106 based on notes from Intel's DV checking code, where p1 implies p2.
10107
10108 mov r4 = 2
10109 (p2) br.cond L
10110 (p1) mov r4 = 7
10111 */
10112 if (CURR_SLOT.qp_regno != 0)
10113 {
10114 int skip = 0;
10115 int implies;
10116 for (implies = 0; implies < qp_implieslen; implies++)
10117 {
10118 if (qp_implies[implies].path >= path
10119 && qp_implies[implies].p1 == CURR_SLOT.qp_regno
10120 && qp_implies[implies].p2_branched)
10121 {
10122 skip = 1;
10123 break;
10124 }
10125 }
10126 if (skip)
10127 continue;
10128 }
10129
10130 if ((matchtype = resources_match (rs, idesc, note,
10131 CURR_SLOT.qp_regno, path)) != 0)
10132 {
10133 char msg[1024];
10134 char pathmsg[256] = "";
10135 char indexmsg[256] = "";
10136 int certain = (matchtype == 1 && CURR_SLOT.qp_regno == 0);
10137
10138 if (path != 0)
10139 snprintf (pathmsg, sizeof (pathmsg),
10140 " when entry is at label '%s'",
10141 md.entry_labels[path - 1]);
10142 if (matchtype == 1 && rs->index >= 0)
10143 snprintf (indexmsg, sizeof (indexmsg),
10144 ", specific resource number is %d",
10145 rs->index);
10146 snprintf (msg, sizeof (msg),
10147 "Use of '%s' %s %s dependency '%s' (%s)%s%s",
10148 idesc->name,
10149 (certain ? "violates" : "may violate"),
10150 dv_mode[dep->mode], dep->name,
10151 dv_sem[dep->semantics],
10152 pathmsg, indexmsg);
10153
10154 if (md.explicit_mode)
10155 {
10156 as_warn ("%s", msg);
10157 if (path < md.path)
10158 as_warn (_("Only the first path encountering the conflict is reported"));
10159 as_warn_where (rs->file, rs->line,
10160 _("This is the location of the conflicting usage"));
10161 /* Don't bother checking other paths, to avoid duplicating
10162 the same warning */
10163 break;
10164 }
10165 else
10166 {
10167 if (md.debug_dv)
10168 fprintf (stderr, "%s @ %s:%d\n", msg, rs->file, rs->line);
10169
10170 remove_marked_resource (rs);
10171
10172 /* since the set of dependencies has changed, start over */
10173 /* FIXME -- since we're removing dvs as we go, we
10174 probably don't really need to start over... */
10175 start_over = 1;
10176 break;
10177 }
10178 }
10179 }
10180 if (start_over)
10181 i = 0;
10182 else
10183 ++i;
10184 }
10185 }
10186
10187 /* Register new dependencies based on the given opcode. */
10188
10189 static void
10190 mark_resources (struct ia64_opcode *idesc)
10191 {
10192 int i;
10193 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
10194 int add_only_qp_reads = 0;
10195
10196 /* A conditional branch only uses its resources if it is taken; if it is
10197 taken, we stop following that path. The other branch types effectively
10198 *always* write their resources. If it's not taken, register only QP
10199 reads. */
10200 if (is_conditional_branch (idesc) || is_interruption_or_rfi (idesc))
10201 {
10202 add_only_qp_reads = 1;
10203 }
10204
10205 if (md.debug_dv)
10206 fprintf (stderr, "Registering '%s' resource usage\n", idesc->name);
10207
10208 for (i = 0; i < opdeps->nregs; i++)
10209 {
10210 const struct ia64_dependency *dep;
10211 struct rsrc specs[MAX_SPECS];
10212 int note;
10213 int path;
10214 int count;
10215
10216 dep = ia64_find_dependency (opdeps->regs[i]);
10217 note = NOTE (opdeps->regs[i]);
10218
10219 if (add_only_qp_reads
10220 && !(dep->mode == IA64_DV_WAR
10221 && (dep->specifier == IA64_RS_PR
10222 || dep->specifier == IA64_RS_PRr
10223 || dep->specifier == IA64_RS_PR63)))
10224 continue;
10225
10226 count = specify_resource (dep, idesc, DV_REG, specs, note, md.path);
10227
10228 while (count-- > 0)
10229 {
10230 mark_resource (idesc, dep, &specs[count],
10231 DEP (opdeps->regs[i]), md.path);
10232 }
10233
10234 /* The execution path may affect register values, which may in turn
10235 affect which indirect-access resources are accessed. */
10236 switch (dep->specifier)
10237 {
10238 default:
10239 break;
10240 case IA64_RS_CPUID:
10241 case IA64_RS_DBR:
10242 case IA64_RS_IBR:
10243 case IA64_RS_MSR:
10244 case IA64_RS_PKR:
10245 case IA64_RS_PMC:
10246 case IA64_RS_PMD:
10247 case IA64_RS_RR:
10248 for (path = 0; path < md.path; path++)
10249 {
10250 count = specify_resource (dep, idesc, DV_REG, specs, note, path);
10251 while (count-- > 0)
10252 mark_resource (idesc, dep, &specs[count],
10253 DEP (opdeps->regs[i]), path);
10254 }
10255 break;
10256 }
10257 }
10258 }
10259
10260 /* Remove dependencies when they no longer apply. */
10261
10262 static void
10263 update_dependencies (struct ia64_opcode *idesc)
10264 {
10265 int i;
10266
10267 if (strcmp (idesc->name, "srlz.i") == 0)
10268 {
10269 instruction_serialization ();
10270 }
10271 else if (strcmp (idesc->name, "srlz.d") == 0)
10272 {
10273 data_serialization ();
10274 }
10275 else if (is_interruption_or_rfi (idesc)
10276 || is_taken_branch (idesc))
10277 {
10278 /* Although technically the taken branch doesn't clear dependencies
10279 which require a srlz.[id], we don't follow the branch; the next
10280 instruction is assumed to start with a clean slate. */
10281 regdepslen = 0;
10282 md.path = 0;
10283 }
10284 else if (is_conditional_branch (idesc)
10285 && CURR_SLOT.qp_regno != 0)
10286 {
10287 int is_call = strstr (idesc->name, ".call") != NULL;
10288
10289 for (i = 0; i < qp_implieslen; i++)
10290 {
10291 /* If the conditional branch's predicate is implied by the predicate
10292 in an existing dependency, remove that dependency. */
10293 if (qp_implies[i].p2 == CURR_SLOT.qp_regno)
10294 {
10295 int depind = 0;
10296 /* Note that this implied predicate takes a branch so that if
10297 a later insn generates a DV but its predicate implies this
10298 one, we can avoid the false DV warning. */
10299 qp_implies[i].p2_branched = 1;
10300 while (depind < regdepslen)
10301 {
10302 if (regdeps[depind].qp_regno == qp_implies[i].p1)
10303 {
10304 print_dependency ("Removing", depind);
10305 regdeps[depind] = regdeps[--regdepslen];
10306 }
10307 else
10308 ++depind;
10309 }
10310 }
10311 }
10312 /* Any marked resources which have this same predicate should be
10313 cleared, provided that the QP hasn't been modified between the
10314 marking instruction and the branch. */
10315 if (is_call)
10316 {
10317 insn_group_break (0, CURR_SLOT.qp_regno, 1);
10318 }
10319 else
10320 {
10321 i = 0;
10322 while (i < regdepslen)
10323 {
10324 if (regdeps[i].qp_regno == CURR_SLOT.qp_regno
10325 && regdeps[i].link_to_qp_branch
10326 && (regdeps[i].file != CURR_SLOT.src_file
10327 || regdeps[i].line != CURR_SLOT.src_line))
10328 {
10329 /* Treat like a taken branch */
10330 print_dependency ("Removing", i);
10331 regdeps[i] = regdeps[--regdepslen];
10332 }
10333 else
10334 ++i;
10335 }
10336 }
10337 }
10338 }
10339
10340 /* Examine the current instruction for dependency violations. */
10341
10342 static int
10343 check_dv (struct ia64_opcode *idesc)
10344 {
10345 if (md.debug_dv)
10346 {
10347 fprintf (stderr, "Checking %s for violations (line %d, %d/%d)\n",
10348 idesc->name, CURR_SLOT.src_line,
10349 idesc->dependencies->nchks,
10350 idesc->dependencies->nregs);
10351 }
10352
10353 /* Look through the list of currently marked resources; if the current
10354 instruction has the dependency in its chks list which uses that resource,
10355 check against the specific resources used. */
10356 check_dependencies (idesc);
10357
10358 /* Look up the instruction's regdeps (RAW writes, WAW writes, and WAR reads),
10359 then add them to the list of marked resources. */
10360 mark_resources (idesc);
10361
10362 /* There are several types of dependency semantics, and each has its own
10363 requirements for being cleared
10364
10365 Instruction serialization (insns separated by interruption, rfi, or
10366 writer + srlz.i + reader, all in separate groups) clears DVS_INSTR.
10367
10368 Data serialization (instruction serialization, or writer + srlz.d +
10369 reader, where writer and srlz.d are in separate groups) clears
10370 DVS_DATA. (This also clears DVS_OTHER, but that is not guaranteed to
10371 always be the case).
10372
10373 Instruction group break (groups separated by stop, taken branch,
10374 interruption or rfi) clears DVS_IMPLIED and DVS_IMPLIEDF.
10375 */
10376 update_dependencies (idesc);
10377
10378 /* Sometimes, knowing a register value allows us to avoid giving a false DV
10379 warning. Keep track of as many as possible that are useful. */
10380 note_register_values (idesc);
10381
10382 /* We don't need or want this anymore. */
10383 md.mem_offset.hint = 0;
10384
10385 return 0;
10386 }
10387
10388 /* Translate one line of assembly. Pseudo ops and labels do not show
10389 here. */
10390 void
10391 md_assemble (char *str)
10392 {
10393 char *saved_input_line_pointer, *mnemonic;
10394 const struct pseudo_opcode *pdesc;
10395 struct ia64_opcode *idesc;
10396 unsigned char qp_regno;
10397 unsigned int flags;
10398 int ch;
10399
10400 saved_input_line_pointer = input_line_pointer;
10401 input_line_pointer = str;
10402
10403 /* extract the opcode (mnemonic): */
10404
10405 mnemonic = input_line_pointer;
10406 ch = get_symbol_end ();
10407 pdesc = (struct pseudo_opcode *) hash_find (md.pseudo_hash, mnemonic);
10408 if (pdesc)
10409 {
10410 *input_line_pointer = ch;
10411 (*pdesc->handler) (pdesc->arg);
10412 goto done;
10413 }
10414
10415 /* Find the instruction descriptor matching the arguments. */
10416
10417 idesc = ia64_find_opcode (mnemonic);
10418 *input_line_pointer = ch;
10419 if (!idesc)
10420 {
10421 as_bad (_("Unknown opcode `%s'"), mnemonic);
10422 goto done;
10423 }
10424
10425 idesc = parse_operands (idesc);
10426 if (!idesc)
10427 goto done;
10428
10429 /* Handle the dynamic ops we can handle now: */
10430 if (idesc->type == IA64_TYPE_DYN)
10431 {
10432 if (strcmp (idesc->name, "add") == 0)
10433 {
10434 if (CURR_SLOT.opnd[2].X_op == O_register
10435 && CURR_SLOT.opnd[2].X_add_number < 4)
10436 mnemonic = "addl";
10437 else
10438 mnemonic = "adds";
10439 ia64_free_opcode (idesc);
10440 idesc = ia64_find_opcode (mnemonic);
10441 }
10442 else if (strcmp (idesc->name, "mov") == 0)
10443 {
10444 enum ia64_opnd opnd1, opnd2;
10445 int rop;
10446
10447 opnd1 = idesc->operands[0];
10448 opnd2 = idesc->operands[1];
10449 if (opnd1 == IA64_OPND_AR3)
10450 rop = 0;
10451 else if (opnd2 == IA64_OPND_AR3)
10452 rop = 1;
10453 else
10454 abort ();
10455 if (CURR_SLOT.opnd[rop].X_op == O_register)
10456 {
10457 if (ar_is_only_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
10458 mnemonic = "mov.i";
10459 else if (ar_is_only_in_memory_unit (CURR_SLOT.opnd[rop].X_add_number))
10460 mnemonic = "mov.m";
10461 else
10462 rop = -1;
10463 }
10464 else
10465 abort ();
10466 if (rop >= 0)
10467 {
10468 ia64_free_opcode (idesc);
10469 idesc = ia64_find_opcode (mnemonic);
10470 while (idesc != NULL
10471 && (idesc->operands[0] != opnd1
10472 || idesc->operands[1] != opnd2))
10473 idesc = get_next_opcode (idesc);
10474 }
10475 }
10476 }
10477 else if (strcmp (idesc->name, "mov.i") == 0
10478 || strcmp (idesc->name, "mov.m") == 0)
10479 {
10480 enum ia64_opnd opnd1, opnd2;
10481 int rop;
10482
10483 opnd1 = idesc->operands[0];
10484 opnd2 = idesc->operands[1];
10485 if (opnd1 == IA64_OPND_AR3)
10486 rop = 0;
10487 else if (opnd2 == IA64_OPND_AR3)
10488 rop = 1;
10489 else
10490 abort ();
10491 if (CURR_SLOT.opnd[rop].X_op == O_register)
10492 {
10493 char unit = 'a';
10494 if (ar_is_only_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
10495 unit = 'i';
10496 else if (ar_is_only_in_memory_unit (CURR_SLOT.opnd[rop].X_add_number))
10497 unit = 'm';
10498 if (unit != 'a' && unit != idesc->name [4])
10499 as_bad (_("AR %d can only be accessed by %c-unit"),
10500 (int) (CURR_SLOT.opnd[rop].X_add_number - REG_AR),
10501 TOUPPER (unit));
10502 }
10503 }
10504 else if (strcmp (idesc->name, "hint.b") == 0)
10505 {
10506 switch (md.hint_b)
10507 {
10508 case hint_b_ok:
10509 break;
10510 case hint_b_warning:
10511 as_warn (_("hint.b may be treated as nop"));
10512 break;
10513 case hint_b_error:
10514 as_bad (_("hint.b shouldn't be used"));
10515 break;
10516 }
10517 }
10518
10519 qp_regno = 0;
10520 if (md.qp.X_op == O_register)
10521 {
10522 qp_regno = md.qp.X_add_number - REG_P;
10523 md.qp.X_op = O_absent;
10524 }
10525
10526 flags = idesc->flags;
10527
10528 if ((flags & IA64_OPCODE_FIRST) != 0)
10529 {
10530 /* The alignment frag has to end with a stop bit only if the
10531 next instruction after the alignment directive has to be
10532 the first instruction in an instruction group. */
10533 if (align_frag)
10534 {
10535 while (align_frag->fr_type != rs_align_code)
10536 {
10537 align_frag = align_frag->fr_next;
10538 if (!align_frag)
10539 break;
10540 }
10541 /* align_frag can be NULL if there are directives in
10542 between. */
10543 if (align_frag && align_frag->fr_next == frag_now)
10544 align_frag->tc_frag_data = 1;
10545 }
10546
10547 insn_group_break (1, 0, 0);
10548 }
10549 align_frag = NULL;
10550
10551 if ((flags & IA64_OPCODE_NO_PRED) != 0 && qp_regno != 0)
10552 {
10553 as_bad (_("`%s' cannot be predicated"), idesc->name);
10554 goto done;
10555 }
10556
10557 /* Build the instruction. */
10558 CURR_SLOT.qp_regno = qp_regno;
10559 CURR_SLOT.idesc = idesc;
10560 as_where (&CURR_SLOT.src_file, &CURR_SLOT.src_line);
10561 dwarf2_where (&CURR_SLOT.debug_line);
10562 dwarf2_consume_line_info ();
10563
10564 /* Add unwind entries, if there are any. */
10565 if (unwind.current_entry)
10566 {
10567 CURR_SLOT.unwind_record = unwind.current_entry;
10568 unwind.current_entry = NULL;
10569 }
10570 if (unwind.pending_saves)
10571 {
10572 if (unwind.pending_saves->next)
10573 {
10574 /* Attach the next pending save to the next slot so that its
10575 slot number will get set correctly. */
10576 add_unwind_entry (unwind.pending_saves->next, NOT_A_CHAR);
10577 unwind.pending_saves = &unwind.pending_saves->next->r.record.p;
10578 }
10579 else
10580 unwind.pending_saves = NULL;
10581 }
10582 if (unwind.proc_pending.sym && S_IS_DEFINED (unwind.proc_pending.sym))
10583 unwind.insn = 1;
10584
10585 /* Check for dependency violations. */
10586 if (md.detect_dv)
10587 check_dv (idesc);
10588
10589 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10590 if (++md.num_slots_in_use >= NUM_SLOTS)
10591 emit_one_bundle ();
10592
10593 if ((flags & IA64_OPCODE_LAST) != 0)
10594 insn_group_break (1, 0, 0);
10595
10596 md.last_text_seg = now_seg;
10597
10598 done:
10599 input_line_pointer = saved_input_line_pointer;
10600 }
10601
10602 /* Called when symbol NAME cannot be found in the symbol table.
10603 Should be used for dynamic valued symbols only. */
10604
10605 symbolS *
10606 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
10607 {
10608 return 0;
10609 }
10610
10611 /* Called for any expression that can not be recognized. When the
10612 function is called, `input_line_pointer' will point to the start of
10613 the expression. */
10614
10615 void
10616 md_operand (expressionS *e)
10617 {
10618 switch (*input_line_pointer)
10619 {
10620 case '[':
10621 ++input_line_pointer;
10622 expression_and_evaluate (e);
10623 if (*input_line_pointer != ']')
10624 {
10625 as_bad (_("Closing bracket missing"));
10626 goto err;
10627 }
10628 else
10629 {
10630 if (e->X_op != O_register
10631 || e->X_add_number < REG_GR
10632 || e->X_add_number > REG_GR + 127)
10633 {
10634 as_bad (_("Index must be a general register"));
10635 e->X_add_number = REG_GR;
10636 }
10637
10638 ++input_line_pointer;
10639 e->X_op = O_index;
10640 }
10641 break;
10642
10643 default:
10644 break;
10645 }
10646 return;
10647
10648 err:
10649 ignore_rest_of_line ();
10650 }
10651
10652 /* Return 1 if it's OK to adjust a reloc by replacing the symbol with
10653 a section symbol plus some offset. For relocs involving @fptr(),
10654 directives we don't want such adjustments since we need to have the
10655 original symbol's name in the reloc. */
10656 int
10657 ia64_fix_adjustable (fixS *fix)
10658 {
10659 /* Prevent all adjustments to global symbols */
10660 if (S_IS_EXTERNAL (fix->fx_addsy) || S_IS_WEAK (fix->fx_addsy))
10661 return 0;
10662
10663 switch (fix->fx_r_type)
10664 {
10665 case BFD_RELOC_IA64_FPTR64I:
10666 case BFD_RELOC_IA64_FPTR32MSB:
10667 case BFD_RELOC_IA64_FPTR32LSB:
10668 case BFD_RELOC_IA64_FPTR64MSB:
10669 case BFD_RELOC_IA64_FPTR64LSB:
10670 case BFD_RELOC_IA64_LTOFF_FPTR22:
10671 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10672 return 0;
10673 default:
10674 break;
10675 }
10676
10677 return 1;
10678 }
10679
10680 int
10681 ia64_force_relocation (fixS *fix)
10682 {
10683 switch (fix->fx_r_type)
10684 {
10685 case BFD_RELOC_IA64_FPTR64I:
10686 case BFD_RELOC_IA64_FPTR32MSB:
10687 case BFD_RELOC_IA64_FPTR32LSB:
10688 case BFD_RELOC_IA64_FPTR64MSB:
10689 case BFD_RELOC_IA64_FPTR64LSB:
10690
10691 case BFD_RELOC_IA64_LTOFF22:
10692 case BFD_RELOC_IA64_LTOFF64I:
10693 case BFD_RELOC_IA64_LTOFF_FPTR22:
10694 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10695 case BFD_RELOC_IA64_PLTOFF22:
10696 case BFD_RELOC_IA64_PLTOFF64I:
10697 case BFD_RELOC_IA64_PLTOFF64MSB:
10698 case BFD_RELOC_IA64_PLTOFF64LSB:
10699
10700 case BFD_RELOC_IA64_LTOFF22X:
10701 case BFD_RELOC_IA64_LDXMOV:
10702 return 1;
10703
10704 default:
10705 break;
10706 }
10707
10708 return generic_force_reloc (fix);
10709 }
10710
10711 /* Decide from what point a pc-relative relocation is relative to,
10712 relative to the pc-relative fixup. Er, relatively speaking. */
10713 long
10714 ia64_pcrel_from_section (fixS *fix, segT sec)
10715 {
10716 unsigned long off = fix->fx_frag->fr_address + fix->fx_where;
10717
10718 if (bfd_get_section_flags (stdoutput, sec) & SEC_CODE)
10719 off &= ~0xfUL;
10720
10721 return off;
10722 }
10723
10724
10725 /* Used to emit section-relative relocs for the dwarf2 debug data. */
10726 void
10727 ia64_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
10728 {
10729 expressionS expr;
10730
10731 expr.X_op = O_pseudo_fixup;
10732 expr.X_op_symbol = pseudo_func[FUNC_SEC_RELATIVE].u.sym;
10733 expr.X_add_number = 0;
10734 expr.X_add_symbol = symbol;
10735 emit_expr (&expr, size);
10736 }
10737
10738 /* This is called whenever some data item (not an instruction) needs a
10739 fixup. We pick the right reloc code depending on the byteorder
10740 currently in effect. */
10741 void
10742 ia64_cons_fix_new (fragS *f, int where, int nbytes, expressionS *exp)
10743 {
10744 bfd_reloc_code_real_type code;
10745 fixS *fix;
10746
10747 switch (nbytes)
10748 {
10749 /* There are no reloc for 8 and 16 bit quantities, but we allow
10750 them here since they will work fine as long as the expression
10751 is fully defined at the end of the pass over the source file. */
10752 case 1: code = BFD_RELOC_8; break;
10753 case 2: code = BFD_RELOC_16; break;
10754 case 4:
10755 if (target_big_endian)
10756 code = BFD_RELOC_IA64_DIR32MSB;
10757 else
10758 code = BFD_RELOC_IA64_DIR32LSB;
10759 break;
10760
10761 case 8:
10762 /* In 32-bit mode, data8 could mean function descriptors too. */
10763 if (exp->X_op == O_pseudo_fixup
10764 && exp->X_op_symbol
10765 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC
10766 && !(md.flags & EF_IA_64_ABI64))
10767 {
10768 if (target_big_endian)
10769 code = BFD_RELOC_IA64_IPLTMSB;
10770 else
10771 code = BFD_RELOC_IA64_IPLTLSB;
10772 exp->X_op = O_symbol;
10773 break;
10774 }
10775 else
10776 {
10777 if (target_big_endian)
10778 code = BFD_RELOC_IA64_DIR64MSB;
10779 else
10780 code = BFD_RELOC_IA64_DIR64LSB;
10781 break;
10782 }
10783
10784 case 16:
10785 if (exp->X_op == O_pseudo_fixup
10786 && exp->X_op_symbol
10787 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC)
10788 {
10789 if (target_big_endian)
10790 code = BFD_RELOC_IA64_IPLTMSB;
10791 else
10792 code = BFD_RELOC_IA64_IPLTLSB;
10793 exp->X_op = O_symbol;
10794 break;
10795 }
10796 /* FALLTHRU */
10797
10798 default:
10799 as_bad (_("Unsupported fixup size %d"), nbytes);
10800 ignore_rest_of_line ();
10801 return;
10802 }
10803
10804 if (exp->X_op == O_pseudo_fixup)
10805 {
10806 exp->X_op = O_symbol;
10807 code = ia64_gen_real_reloc_type (exp->X_op_symbol, code);
10808 /* ??? If code unchanged, unsupported. */
10809 }
10810
10811 fix = fix_new_exp (f, where, nbytes, exp, 0, code);
10812 /* We need to store the byte order in effect in case we're going
10813 to fix an 8 or 16 bit relocation (for which there no real
10814 relocs available). See md_apply_fix(). */
10815 fix->tc_fix_data.bigendian = target_big_endian;
10816 }
10817
10818 /* Return the actual relocation we wish to associate with the pseudo
10819 reloc described by SYM and R_TYPE. SYM should be one of the
10820 symbols in the pseudo_func array, or NULL. */
10821
10822 static bfd_reloc_code_real_type
10823 ia64_gen_real_reloc_type (struct symbol *sym, bfd_reloc_code_real_type r_type)
10824 {
10825 bfd_reloc_code_real_type new = 0;
10826 const char *type = NULL, *suffix = "";
10827
10828 if (sym == NULL)
10829 {
10830 return r_type;
10831 }
10832
10833 switch (S_GET_VALUE (sym))
10834 {
10835 case FUNC_FPTR_RELATIVE:
10836 switch (r_type)
10837 {
10838 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_FPTR64I; break;
10839 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_FPTR32MSB; break;
10840 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_FPTR32LSB; break;
10841 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_FPTR64MSB; break;
10842 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_FPTR64LSB; break;
10843 default: type = "FPTR"; break;
10844 }
10845 break;
10846
10847 case FUNC_GP_RELATIVE:
10848 switch (r_type)
10849 {
10850 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_GPREL22; break;
10851 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_GPREL64I; break;
10852 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_GPREL32MSB; break;
10853 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_GPREL32LSB; break;
10854 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_GPREL64MSB; break;
10855 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_GPREL64LSB; break;
10856 default: type = "GPREL"; break;
10857 }
10858 break;
10859
10860 case FUNC_LT_RELATIVE:
10861 switch (r_type)
10862 {
10863 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_LTOFF22; break;
10864 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_LTOFF64I; break;
10865 default: type = "LTOFF"; break;
10866 }
10867 break;
10868
10869 case FUNC_LT_RELATIVE_X:
10870 switch (r_type)
10871 {
10872 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_LTOFF22X; break;
10873 default: type = "LTOFF"; suffix = "X"; break;
10874 }
10875 break;
10876
10877 case FUNC_PC_RELATIVE:
10878 switch (r_type)
10879 {
10880 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_PCREL22; break;
10881 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_PCREL64I; break;
10882 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_PCREL32MSB; break;
10883 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_PCREL32LSB; break;
10884 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_PCREL64MSB; break;
10885 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_PCREL64LSB; break;
10886 default: type = "PCREL"; break;
10887 }
10888 break;
10889
10890 case FUNC_PLT_RELATIVE:
10891 switch (r_type)
10892 {
10893 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_PLTOFF22; break;
10894 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_PLTOFF64I; break;
10895 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_PLTOFF64MSB;break;
10896 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_PLTOFF64LSB;break;
10897 default: type = "PLTOFF"; break;
10898 }
10899 break;
10900
10901 case FUNC_SEC_RELATIVE:
10902 switch (r_type)
10903 {
10904 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_SECREL32MSB;break;
10905 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_SECREL32LSB;break;
10906 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_SECREL64MSB;break;
10907 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_SECREL64LSB;break;
10908 default: type = "SECREL"; break;
10909 }
10910 break;
10911
10912 case FUNC_SEG_RELATIVE:
10913 switch (r_type)
10914 {
10915 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_SEGREL32MSB;break;
10916 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_SEGREL32LSB;break;
10917 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_SEGREL64MSB;break;
10918 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_SEGREL64LSB;break;
10919 default: type = "SEGREL"; break;
10920 }
10921 break;
10922
10923 case FUNC_LTV_RELATIVE:
10924 switch (r_type)
10925 {
10926 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_LTV32MSB; break;
10927 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_LTV32LSB; break;
10928 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_LTV64MSB; break;
10929 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_LTV64LSB; break;
10930 default: type = "LTV"; break;
10931 }
10932 break;
10933
10934 case FUNC_LT_FPTR_RELATIVE:
10935 switch (r_type)
10936 {
10937 case BFD_RELOC_IA64_IMM22:
10938 new = BFD_RELOC_IA64_LTOFF_FPTR22; break;
10939 case BFD_RELOC_IA64_IMM64:
10940 new = BFD_RELOC_IA64_LTOFF_FPTR64I; break;
10941 case BFD_RELOC_IA64_DIR32MSB:
10942 new = BFD_RELOC_IA64_LTOFF_FPTR32MSB; break;
10943 case BFD_RELOC_IA64_DIR32LSB:
10944 new = BFD_RELOC_IA64_LTOFF_FPTR32LSB; break;
10945 case BFD_RELOC_IA64_DIR64MSB:
10946 new = BFD_RELOC_IA64_LTOFF_FPTR64MSB; break;
10947 case BFD_RELOC_IA64_DIR64LSB:
10948 new = BFD_RELOC_IA64_LTOFF_FPTR64LSB; break;
10949 default:
10950 type = "LTOFF_FPTR"; break;
10951 }
10952 break;
10953
10954 case FUNC_TP_RELATIVE:
10955 switch (r_type)
10956 {
10957 case BFD_RELOC_IA64_IMM14: new = BFD_RELOC_IA64_TPREL14; break;
10958 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_TPREL22; break;
10959 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_TPREL64I; break;
10960 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_TPREL64MSB; break;
10961 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_TPREL64LSB; break;
10962 default: type = "TPREL"; break;
10963 }
10964 break;
10965
10966 case FUNC_LT_TP_RELATIVE:
10967 switch (r_type)
10968 {
10969 case BFD_RELOC_IA64_IMM22:
10970 new = BFD_RELOC_IA64_LTOFF_TPREL22; break;
10971 default:
10972 type = "LTOFF_TPREL"; break;
10973 }
10974 break;
10975
10976 case FUNC_DTP_MODULE:
10977 switch (r_type)
10978 {
10979 case BFD_RELOC_IA64_DIR64MSB:
10980 new = BFD_RELOC_IA64_DTPMOD64MSB; break;
10981 case BFD_RELOC_IA64_DIR64LSB:
10982 new = BFD_RELOC_IA64_DTPMOD64LSB; break;
10983 default:
10984 type = "DTPMOD"; break;
10985 }
10986 break;
10987
10988 case FUNC_LT_DTP_MODULE:
10989 switch (r_type)
10990 {
10991 case BFD_RELOC_IA64_IMM22:
10992 new = BFD_RELOC_IA64_LTOFF_DTPMOD22; break;
10993 default:
10994 type = "LTOFF_DTPMOD"; break;
10995 }
10996 break;
10997
10998 case FUNC_DTP_RELATIVE:
10999 switch (r_type)
11000 {
11001 case BFD_RELOC_IA64_DIR32MSB:
11002 new = BFD_RELOC_IA64_DTPREL32MSB; break;
11003 case BFD_RELOC_IA64_DIR32LSB:
11004 new = BFD_RELOC_IA64_DTPREL32LSB; break;
11005 case BFD_RELOC_IA64_DIR64MSB:
11006 new = BFD_RELOC_IA64_DTPREL64MSB; break;
11007 case BFD_RELOC_IA64_DIR64LSB:
11008 new = BFD_RELOC_IA64_DTPREL64LSB; break;
11009 case BFD_RELOC_IA64_IMM14:
11010 new = BFD_RELOC_IA64_DTPREL14; break;
11011 case BFD_RELOC_IA64_IMM22:
11012 new = BFD_RELOC_IA64_DTPREL22; break;
11013 case BFD_RELOC_IA64_IMM64:
11014 new = BFD_RELOC_IA64_DTPREL64I; break;
11015 default:
11016 type = "DTPREL"; break;
11017 }
11018 break;
11019
11020 case FUNC_LT_DTP_RELATIVE:
11021 switch (r_type)
11022 {
11023 case BFD_RELOC_IA64_IMM22:
11024 new = BFD_RELOC_IA64_LTOFF_DTPREL22; break;
11025 default:
11026 type = "LTOFF_DTPREL"; break;
11027 }
11028 break;
11029
11030 case FUNC_IPLT_RELOC:
11031 switch (r_type)
11032 {
11033 case BFD_RELOC_IA64_IPLTMSB: return r_type;
11034 case BFD_RELOC_IA64_IPLTLSB: return r_type;
11035 default: type = "IPLT"; break;
11036 }
11037 break;
11038
11039 default:
11040 abort ();
11041 }
11042
11043 if (new)
11044 return new;
11045 else
11046 {
11047 int width;
11048
11049 if (!type)
11050 abort ();
11051 switch (r_type)
11052 {
11053 case BFD_RELOC_IA64_DIR32MSB: width = 32; suffix = "MSB"; break;
11054 case BFD_RELOC_IA64_DIR32LSB: width = 32; suffix = "LSB"; break;
11055 case BFD_RELOC_IA64_DIR64MSB: width = 64; suffix = "MSB"; break;
11056 case BFD_RELOC_IA64_DIR64LSB: width = 64; suffix = "LSB"; break;
11057 case BFD_RELOC_UNUSED: width = 13; break;
11058 case BFD_RELOC_IA64_IMM14: width = 14; break;
11059 case BFD_RELOC_IA64_IMM22: width = 22; break;
11060 case BFD_RELOC_IA64_IMM64: width = 64; suffix = "I"; break;
11061 default: abort ();
11062 }
11063
11064 /* This should be an error, but since previously there wasn't any
11065 diagnostic here, don't make it fail because of this for now. */
11066 as_warn (_("Cannot express %s%d%s relocation"), type, width, suffix);
11067 return r_type;
11068 }
11069 }
11070
11071 /* Here is where generate the appropriate reloc for pseudo relocation
11072 functions. */
11073 void
11074 ia64_validate_fix (fixS *fix)
11075 {
11076 switch (fix->fx_r_type)
11077 {
11078 case BFD_RELOC_IA64_FPTR64I:
11079 case BFD_RELOC_IA64_FPTR32MSB:
11080 case BFD_RELOC_IA64_FPTR64LSB:
11081 case BFD_RELOC_IA64_LTOFF_FPTR22:
11082 case BFD_RELOC_IA64_LTOFF_FPTR64I:
11083 if (fix->fx_offset != 0)
11084 as_bad_where (fix->fx_file, fix->fx_line,
11085 _("No addend allowed in @fptr() relocation"));
11086 break;
11087 default:
11088 break;
11089 }
11090 }
11091
11092 static void
11093 fix_insn (fixS *fix, const struct ia64_operand *odesc, valueT value)
11094 {
11095 bfd_vma insn[3], t0, t1, control_bits;
11096 const char *err;
11097 char *fixpos;
11098 long slot;
11099
11100 slot = fix->fx_where & 0x3;
11101 fixpos = fix->fx_frag->fr_literal + (fix->fx_where - slot);
11102
11103 /* Bundles are always in little-endian byte order */
11104 t0 = bfd_getl64 (fixpos);
11105 t1 = bfd_getl64 (fixpos + 8);
11106 control_bits = t0 & 0x1f;
11107 insn[0] = (t0 >> 5) & 0x1ffffffffffLL;
11108 insn[1] = ((t0 >> 46) & 0x3ffff) | ((t1 & 0x7fffff) << 18);
11109 insn[2] = (t1 >> 23) & 0x1ffffffffffLL;
11110
11111 err = NULL;
11112 if (odesc - elf64_ia64_operands == IA64_OPND_IMMU64)
11113 {
11114 insn[1] = (value >> 22) & 0x1ffffffffffLL;
11115 insn[2] |= (((value & 0x7f) << 13)
11116 | (((value >> 7) & 0x1ff) << 27)
11117 | (((value >> 16) & 0x1f) << 22)
11118 | (((value >> 21) & 0x1) << 21)
11119 | (((value >> 63) & 0x1) << 36));
11120 }
11121 else if (odesc - elf64_ia64_operands == IA64_OPND_IMMU62)
11122 {
11123 if (value & ~0x3fffffffffffffffULL)
11124 err = "integer operand out of range";
11125 insn[1] = (value >> 21) & 0x1ffffffffffLL;
11126 insn[2] |= (((value & 0xfffff) << 6) | (((value >> 20) & 0x1) << 36));
11127 }
11128 else if (odesc - elf64_ia64_operands == IA64_OPND_TGT64)
11129 {
11130 value >>= 4;
11131 insn[1] = ((value >> 20) & 0x7fffffffffLL) << 2;
11132 insn[2] |= ((((value >> 59) & 0x1) << 36)
11133 | (((value >> 0) & 0xfffff) << 13));
11134 }
11135 else
11136 err = (*odesc->insert) (odesc, value, insn + slot);
11137
11138 if (err)
11139 as_bad_where (fix->fx_file, fix->fx_line, err);
11140
11141 t0 = control_bits | (insn[0] << 5) | (insn[1] << 46);
11142 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
11143 number_to_chars_littleendian (fixpos + 0, t0, 8);
11144 number_to_chars_littleendian (fixpos + 8, t1, 8);
11145 }
11146
11147 /* Attempt to simplify or even eliminate a fixup. The return value is
11148 ignored; perhaps it was once meaningful, but now it is historical.
11149 To indicate that a fixup has been eliminated, set FIXP->FX_DONE.
11150
11151 If fixp->fx_addsy is non-NULL, we'll have to generate a reloc entry
11152 (if possible). */
11153
11154 void
11155 md_apply_fix (fixS *fix, valueT *valP, segT seg ATTRIBUTE_UNUSED)
11156 {
11157 char *fixpos;
11158 valueT value = *valP;
11159
11160 fixpos = fix->fx_frag->fr_literal + fix->fx_where;
11161
11162 if (fix->fx_pcrel)
11163 {
11164 switch (fix->fx_r_type)
11165 {
11166 case BFD_RELOC_IA64_PCREL21B: break;
11167 case BFD_RELOC_IA64_PCREL21BI: break;
11168 case BFD_RELOC_IA64_PCREL21F: break;
11169 case BFD_RELOC_IA64_PCREL21M: break;
11170 case BFD_RELOC_IA64_PCREL60B: break;
11171 case BFD_RELOC_IA64_PCREL22: break;
11172 case BFD_RELOC_IA64_PCREL64I: break;
11173 case BFD_RELOC_IA64_PCREL32MSB: break;
11174 case BFD_RELOC_IA64_PCREL32LSB: break;
11175 case BFD_RELOC_IA64_PCREL64MSB: break;
11176 case BFD_RELOC_IA64_PCREL64LSB: break;
11177 default:
11178 fix->fx_r_type = ia64_gen_real_reloc_type (pseudo_func[FUNC_PC_RELATIVE].u.sym,
11179 fix->fx_r_type);
11180 break;
11181 }
11182 }
11183 if (fix->fx_addsy)
11184 {
11185 switch (fix->fx_r_type)
11186 {
11187 case BFD_RELOC_UNUSED:
11188 /* This must be a TAG13 or TAG13b operand. There are no external
11189 relocs defined for them, so we must give an error. */
11190 as_bad_where (fix->fx_file, fix->fx_line,
11191 _("%s must have a constant value"),
11192 elf64_ia64_operands[fix->tc_fix_data.opnd].desc);
11193 fix->fx_done = 1;
11194 return;
11195
11196 case BFD_RELOC_IA64_TPREL14:
11197 case BFD_RELOC_IA64_TPREL22:
11198 case BFD_RELOC_IA64_TPREL64I:
11199 case BFD_RELOC_IA64_LTOFF_TPREL22:
11200 case BFD_RELOC_IA64_LTOFF_DTPMOD22:
11201 case BFD_RELOC_IA64_DTPREL14:
11202 case BFD_RELOC_IA64_DTPREL22:
11203 case BFD_RELOC_IA64_DTPREL64I:
11204 case BFD_RELOC_IA64_LTOFF_DTPREL22:
11205 S_SET_THREAD_LOCAL (fix->fx_addsy);
11206 break;
11207
11208 default:
11209 break;
11210 }
11211 }
11212 else if (fix->tc_fix_data.opnd == IA64_OPND_NIL)
11213 {
11214 if (fix->tc_fix_data.bigendian)
11215 number_to_chars_bigendian (fixpos, value, fix->fx_size);
11216 else
11217 number_to_chars_littleendian (fixpos, value, fix->fx_size);
11218 fix->fx_done = 1;
11219 }
11220 else
11221 {
11222 fix_insn (fix, elf64_ia64_operands + fix->tc_fix_data.opnd, value);
11223 fix->fx_done = 1;
11224 }
11225 }
11226
11227 /* Generate the BFD reloc to be stuck in the object file from the
11228 fixup used internally in the assembler. */
11229
11230 arelent *
11231 tc_gen_reloc (asection *sec ATTRIBUTE_UNUSED, fixS *fixp)
11232 {
11233 arelent *reloc;
11234
11235 reloc = xmalloc (sizeof (*reloc));
11236 reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
11237 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
11238 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
11239 reloc->addend = fixp->fx_offset;
11240 reloc->howto = bfd_reloc_type_lookup (stdoutput, fixp->fx_r_type);
11241
11242 if (!reloc->howto)
11243 {
11244 as_bad_where (fixp->fx_file, fixp->fx_line,
11245 _("Cannot represent %s relocation in object file"),
11246 bfd_get_reloc_code_name (fixp->fx_r_type));
11247 free (reloc);
11248 return NULL;
11249 }
11250 return reloc;
11251 }
11252
11253 /* Turn a string in input_line_pointer into a floating point constant
11254 of type TYPE, and store the appropriate bytes in *LIT. The number
11255 of LITTLENUMS emitted is stored in *SIZE. An error message is
11256 returned, or NULL on OK. */
11257
11258 #define MAX_LITTLENUMS 5
11259
11260 char *
11261 md_atof (int type, char *lit, int *size)
11262 {
11263 LITTLENUM_TYPE words[MAX_LITTLENUMS];
11264 char *t;
11265 int prec;
11266
11267 switch (type)
11268 {
11269 /* IEEE floats */
11270 case 'f':
11271 case 'F':
11272 case 's':
11273 case 'S':
11274 prec = 2;
11275 break;
11276
11277 case 'd':
11278 case 'D':
11279 case 'r':
11280 case 'R':
11281 prec = 4;
11282 break;
11283
11284 case 'x':
11285 case 'X':
11286 case 'p':
11287 case 'P':
11288 prec = 5;
11289 break;
11290
11291 default:
11292 *size = 0;
11293 return _("Unrecognized or unsupported floating point constant");
11294 }
11295 t = atof_ieee (input_line_pointer, type, words);
11296 if (t)
11297 input_line_pointer = t;
11298
11299 (*ia64_float_to_chars) (lit, words, prec);
11300
11301 if (type == 'X')
11302 {
11303 /* It is 10 byte floating point with 6 byte padding. */
11304 memset (&lit [10], 0, 6);
11305 *size = 8 * sizeof (LITTLENUM_TYPE);
11306 }
11307 else
11308 *size = prec * sizeof (LITTLENUM_TYPE);
11309
11310 return NULL;
11311 }
11312
11313 /* Handle ia64 specific semantics of the align directive. */
11314
11315 void
11316 ia64_md_do_align (int n ATTRIBUTE_UNUSED,
11317 const char *fill ATTRIBUTE_UNUSED,
11318 int len ATTRIBUTE_UNUSED,
11319 int max ATTRIBUTE_UNUSED)
11320 {
11321 if (subseg_text_p (now_seg))
11322 ia64_flush_insns ();
11323 }
11324
11325 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
11326 of an rs_align_code fragment. */
11327
11328 void
11329 ia64_handle_align (fragS *fragp)
11330 {
11331 int bytes;
11332 char *p;
11333 const unsigned char *nop;
11334
11335 if (fragp->fr_type != rs_align_code)
11336 return;
11337
11338 /* Check if this frag has to end with a stop bit. */
11339 nop = fragp->tc_frag_data ? le_nop_stop : le_nop;
11340
11341 bytes = fragp->fr_next->fr_address - fragp->fr_address - fragp->fr_fix;
11342 p = fragp->fr_literal + fragp->fr_fix;
11343
11344 /* If no paddings are needed, we check if we need a stop bit. */
11345 if (!bytes && fragp->tc_frag_data)
11346 {
11347 if (fragp->fr_fix < 16)
11348 #if 1
11349 /* FIXME: It won't work with
11350 .align 16
11351 alloc r32=ar.pfs,1,2,4,0
11352 */
11353 ;
11354 #else
11355 as_bad_where (fragp->fr_file, fragp->fr_line,
11356 _("Can't add stop bit to mark end of instruction group"));
11357 #endif
11358 else
11359 /* Bundles are always in little-endian byte order. Make sure
11360 the previous bundle has the stop bit. */
11361 *(p - 16) |= 1;
11362 }
11363
11364 /* Make sure we are on a 16-byte boundary, in case someone has been
11365 putting data into a text section. */
11366 if (bytes & 15)
11367 {
11368 int fix = bytes & 15;
11369 memset (p, 0, fix);
11370 p += fix;
11371 bytes -= fix;
11372 fragp->fr_fix += fix;
11373 }
11374
11375 /* Instruction bundles are always little-endian. */
11376 memcpy (p, nop, 16);
11377 fragp->fr_var = 16;
11378 }
11379
11380 static void
11381 ia64_float_to_chars_bigendian (char *lit, LITTLENUM_TYPE *words,
11382 int prec)
11383 {
11384 while (prec--)
11385 {
11386 number_to_chars_bigendian (lit, (long) (*words++),
11387 sizeof (LITTLENUM_TYPE));
11388 lit += sizeof (LITTLENUM_TYPE);
11389 }
11390 }
11391
11392 static void
11393 ia64_float_to_chars_littleendian (char *lit, LITTLENUM_TYPE *words,
11394 int prec)
11395 {
11396 while (prec--)
11397 {
11398 number_to_chars_littleendian (lit, (long) (words[prec]),
11399 sizeof (LITTLENUM_TYPE));
11400 lit += sizeof (LITTLENUM_TYPE);
11401 }
11402 }
11403
11404 void
11405 ia64_elf_section_change_hook (void)
11406 {
11407 if (elf_section_type (now_seg) == SHT_IA_64_UNWIND
11408 && elf_linked_to_section (now_seg) == NULL)
11409 elf_linked_to_section (now_seg) = text_section;
11410 dot_byteorder (-1);
11411 }
11412
11413 /* Check if a label should be made global. */
11414 void
11415 ia64_check_label (symbolS *label)
11416 {
11417 if (*input_line_pointer == ':')
11418 {
11419 S_SET_EXTERNAL (label);
11420 input_line_pointer++;
11421 }
11422 }
11423
11424 /* Used to remember where .alias and .secalias directives are seen. We
11425 will rename symbol and section names when we are about to output
11426 the relocatable file. */
11427 struct alias
11428 {
11429 char *file; /* The file where the directive is seen. */
11430 unsigned int line; /* The line number the directive is at. */
11431 const char *name; /* The original name of the symbol. */
11432 };
11433
11434 /* Called for .alias and .secalias directives. If SECTION is 1, it is
11435 .secalias. Otherwise, it is .alias. */
11436 static void
11437 dot_alias (int section)
11438 {
11439 char *name, *alias;
11440 char delim;
11441 char *end_name;
11442 int len;
11443 const char *error_string;
11444 struct alias *h;
11445 const char *a;
11446 struct hash_control *ahash, *nhash;
11447 const char *kind;
11448
11449 name = input_line_pointer;
11450 delim = get_symbol_end ();
11451 end_name = input_line_pointer;
11452 *end_name = delim;
11453
11454 if (name == end_name)
11455 {
11456 as_bad (_("expected symbol name"));
11457 ignore_rest_of_line ();
11458 return;
11459 }
11460
11461 SKIP_WHITESPACE ();
11462
11463 if (*input_line_pointer != ',')
11464 {
11465 *end_name = 0;
11466 as_bad (_("expected comma after \"%s\""), name);
11467 *end_name = delim;
11468 ignore_rest_of_line ();
11469 return;
11470 }
11471
11472 input_line_pointer++;
11473 *end_name = 0;
11474 ia64_canonicalize_symbol_name (name);
11475
11476 /* We call demand_copy_C_string to check if alias string is valid.
11477 There should be a closing `"' and no `\0' in the string. */
11478 alias = demand_copy_C_string (&len);
11479 if (alias == NULL)
11480 {
11481 ignore_rest_of_line ();
11482 return;
11483 }
11484
11485 /* Make a copy of name string. */
11486 len = strlen (name) + 1;
11487 obstack_grow (&notes, name, len);
11488 name = obstack_finish (&notes);
11489
11490 if (section)
11491 {
11492 kind = "section";
11493 ahash = secalias_hash;
11494 nhash = secalias_name_hash;
11495 }
11496 else
11497 {
11498 kind = "symbol";
11499 ahash = alias_hash;
11500 nhash = alias_name_hash;
11501 }
11502
11503 /* Check if alias has been used before. */
11504 h = (struct alias *) hash_find (ahash, alias);
11505 if (h)
11506 {
11507 if (strcmp (h->name, name))
11508 as_bad (_("`%s' is already the alias of %s `%s'"),
11509 alias, kind, h->name);
11510 goto out;
11511 }
11512
11513 /* Check if name already has an alias. */
11514 a = (const char *) hash_find (nhash, name);
11515 if (a)
11516 {
11517 if (strcmp (a, alias))
11518 as_bad (_("%s `%s' already has an alias `%s'"), kind, name, a);
11519 goto out;
11520 }
11521
11522 h = (struct alias *) xmalloc (sizeof (struct alias));
11523 as_where (&h->file, &h->line);
11524 h->name = name;
11525
11526 error_string = hash_jam (ahash, alias, (void *) h);
11527 if (error_string)
11528 {
11529 as_fatal (_("inserting \"%s\" into %s alias hash table failed: %s"),
11530 alias, kind, error_string);
11531 goto out;
11532 }
11533
11534 error_string = hash_jam (nhash, name, (void *) alias);
11535 if (error_string)
11536 {
11537 as_fatal (_("inserting \"%s\" into %s name hash table failed: %s"),
11538 alias, kind, error_string);
11539 out:
11540 obstack_free (&notes, name);
11541 obstack_free (&notes, alias);
11542 }
11543
11544 demand_empty_rest_of_line ();
11545 }
11546
11547 /* It renames the original symbol name to its alias. */
11548 static void
11549 do_alias (const char *alias, void *value)
11550 {
11551 struct alias *h = (struct alias *) value;
11552 symbolS *sym = symbol_find (h->name);
11553
11554 if (sym == NULL)
11555 as_warn_where (h->file, h->line,
11556 _("symbol `%s' aliased to `%s' is not used"),
11557 h->name, alias);
11558 else
11559 S_SET_NAME (sym, (char *) alias);
11560 }
11561
11562 /* Called from write_object_file. */
11563 void
11564 ia64_adjust_symtab (void)
11565 {
11566 hash_traverse (alias_hash, do_alias);
11567 }
11568
11569 /* It renames the original section name to its alias. */
11570 static void
11571 do_secalias (const char *alias, void *value)
11572 {
11573 struct alias *h = (struct alias *) value;
11574 segT sec = bfd_get_section_by_name (stdoutput, h->name);
11575
11576 if (sec == NULL)
11577 as_warn_where (h->file, h->line,
11578 _("section `%s' aliased to `%s' is not used"),
11579 h->name, alias);
11580 else
11581 sec->name = alias;
11582 }
11583
11584 /* Called from write_object_file. */
11585 void
11586 ia64_frob_file (void)
11587 {
11588 hash_traverse (secalias_hash, do_secalias);
11589 }
This page took 0.265451 seconds and 5 git commands to generate.