gas/
[deliverable/binutils-gdb.git] / gas / config / tc-ia64.c
1 /* tc-ia64.c -- Assembler for the HP/Intel IA-64 architecture.
2 Copyright 1998, 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
3 Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
4
5 This file is part of GAS, the GNU Assembler.
6
7 GAS is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
11
12 GAS is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GAS; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
21
22 /*
23 TODO:
24
25 - optional operands
26 - directives:
27 .alias
28 .eb
29 .estate
30 .lb
31 .popsection
32 .previous
33 .psr
34 .pushsection
35 - labels are wrong if automatic alignment is introduced
36 (e.g., checkout the second real10 definition in test-data.s)
37 - DV-related stuff:
38 <reg>.safe_across_calls and any other DV-related directives I don't
39 have documentation for.
40 verify mod-sched-brs reads/writes are checked/marked (and other
41 notes)
42
43 */
44
45 #include "as.h"
46 #include "safe-ctype.h"
47 #include "dwarf2dbg.h"
48 #include "subsegs.h"
49
50 #include "opcode/ia64.h"
51
52 #include "elf/ia64.h"
53
54 #define NELEMS(a) ((int) (sizeof (a)/sizeof ((a)[0])))
55 #define MIN(a,b) ((a) < (b) ? (a) : (b))
56
57 #define NUM_SLOTS 4
58 #define PREV_SLOT md.slot[(md.curr_slot + NUM_SLOTS - 1) % NUM_SLOTS]
59 #define CURR_SLOT md.slot[md.curr_slot]
60
61 #define O_pseudo_fixup (O_max + 1)
62
63 enum special_section
64 {
65 /* IA-64 ABI section pseudo-ops. */
66 SPECIAL_SECTION_BSS = 0,
67 SPECIAL_SECTION_SBSS,
68 SPECIAL_SECTION_SDATA,
69 SPECIAL_SECTION_RODATA,
70 SPECIAL_SECTION_COMMENT,
71 SPECIAL_SECTION_UNWIND,
72 SPECIAL_SECTION_UNWIND_INFO,
73 /* HPUX specific section pseudo-ops. */
74 SPECIAL_SECTION_INIT_ARRAY,
75 SPECIAL_SECTION_FINI_ARRAY,
76 };
77
78 enum reloc_func
79 {
80 FUNC_DTP_MODULE,
81 FUNC_DTP_RELATIVE,
82 FUNC_FPTR_RELATIVE,
83 FUNC_GP_RELATIVE,
84 FUNC_LT_RELATIVE,
85 FUNC_LT_RELATIVE_X,
86 FUNC_PC_RELATIVE,
87 FUNC_PLT_RELATIVE,
88 FUNC_SEC_RELATIVE,
89 FUNC_SEG_RELATIVE,
90 FUNC_TP_RELATIVE,
91 FUNC_LTV_RELATIVE,
92 FUNC_LT_FPTR_RELATIVE,
93 FUNC_LT_DTP_MODULE,
94 FUNC_LT_DTP_RELATIVE,
95 FUNC_LT_TP_RELATIVE,
96 FUNC_IPLT_RELOC,
97 };
98
99 enum reg_symbol
100 {
101 REG_GR = 0,
102 REG_FR = (REG_GR + 128),
103 REG_AR = (REG_FR + 128),
104 REG_CR = (REG_AR + 128),
105 REG_P = (REG_CR + 128),
106 REG_BR = (REG_P + 64),
107 REG_IP = (REG_BR + 8),
108 REG_CFM,
109 REG_PR,
110 REG_PR_ROT,
111 REG_PSR,
112 REG_PSR_L,
113 REG_PSR_UM,
114 /* The following are pseudo-registers for use by gas only. */
115 IND_CPUID,
116 IND_DBR,
117 IND_DTR,
118 IND_ITR,
119 IND_IBR,
120 IND_MEM,
121 IND_MSR,
122 IND_PKR,
123 IND_PMC,
124 IND_PMD,
125 IND_RR,
126 /* The following pseudo-registers are used for unwind directives only: */
127 REG_PSP,
128 REG_PRIUNAT,
129 REG_NUM
130 };
131
132 enum dynreg_type
133 {
134 DYNREG_GR = 0, /* dynamic general purpose register */
135 DYNREG_FR, /* dynamic floating point register */
136 DYNREG_PR, /* dynamic predicate register */
137 DYNREG_NUM_TYPES
138 };
139
140 enum operand_match_result
141 {
142 OPERAND_MATCH,
143 OPERAND_OUT_OF_RANGE,
144 OPERAND_MISMATCH
145 };
146
147 /* On the ia64, we can't know the address of a text label until the
148 instructions are packed into a bundle. To handle this, we keep
149 track of the list of labels that appear in front of each
150 instruction. */
151 struct label_fix
152 {
153 struct label_fix *next;
154 struct symbol *sym;
155 };
156
157 extern int target_big_endian;
158
159 void (*ia64_number_to_chars) PARAMS ((char *, valueT, int));
160
161 static void ia64_float_to_chars_bigendian
162 PARAMS ((char *, LITTLENUM_TYPE *, int));
163 static void ia64_float_to_chars_littleendian
164 PARAMS ((char *, LITTLENUM_TYPE *, int));
165 static void (*ia64_float_to_chars)
166 PARAMS ((char *, LITTLENUM_TYPE *, int));
167
168 /* Characters which always start a comment. */
169 const char comment_chars[] = "";
170
171 /* Characters which start a comment at the beginning of a line. */
172 const char line_comment_chars[] = "#";
173
174 /* Characters which may be used to separate multiple commands on a
175 single line. */
176 const char line_separator_chars[] = ";";
177
178 /* Characters which are used to indicate an exponent in a floating
179 point number. */
180 const char EXP_CHARS[] = "eE";
181
182 /* Characters which mean that a number is a floating point constant,
183 as in 0d1.0. */
184 const char FLT_CHARS[] = "rRsSfFdDxXpP";
185
186 /* ia64-specific option processing: */
187
188 const char *md_shortopts = "m:N:x::";
189
190 struct option md_longopts[] =
191 {
192 #define OPTION_MCONSTANT_GP (OPTION_MD_BASE + 1)
193 {"mconstant-gp", no_argument, NULL, OPTION_MCONSTANT_GP},
194 #define OPTION_MAUTO_PIC (OPTION_MD_BASE + 2)
195 {"mauto-pic", no_argument, NULL, OPTION_MAUTO_PIC}
196 };
197
198 size_t md_longopts_size = sizeof (md_longopts);
199
200 static struct
201 {
202 struct hash_control *pseudo_hash; /* pseudo opcode hash table */
203 struct hash_control *reg_hash; /* register name hash table */
204 struct hash_control *dynreg_hash; /* dynamic register hash table */
205 struct hash_control *const_hash; /* constant hash table */
206 struct hash_control *entry_hash; /* code entry hint hash table */
207
208 symbolS *regsym[REG_NUM];
209
210 /* If X_op is != O_absent, the registername for the instruction's
211 qualifying predicate. If NULL, p0 is assumed for instructions
212 that are predicatable. */
213 expressionS qp;
214
215 unsigned int
216 manual_bundling : 1,
217 debug_dv: 1,
218 detect_dv: 1,
219 explicit_mode : 1, /* which mode we're in */
220 default_explicit_mode : 1, /* which mode is the default */
221 mode_explicitly_set : 1, /* was the current mode explicitly set? */
222 auto_align : 1,
223 keep_pending_output : 1;
224
225 /* Each bundle consists of up to three instructions. We keep
226 track of four most recent instructions so we can correctly set
227 the end_of_insn_group for the last instruction in a bundle. */
228 int curr_slot;
229 int num_slots_in_use;
230 struct slot
231 {
232 unsigned int
233 end_of_insn_group : 1,
234 manual_bundling_on : 1,
235 manual_bundling_off : 1;
236 signed char user_template; /* user-selected template, if any */
237 unsigned char qp_regno; /* qualifying predicate */
238 /* This duplicates a good fraction of "struct fix" but we
239 can't use a "struct fix" instead since we can't call
240 fix_new_exp() until we know the address of the instruction. */
241 int num_fixups;
242 struct insn_fix
243 {
244 bfd_reloc_code_real_type code;
245 enum ia64_opnd opnd; /* type of operand in need of fix */
246 unsigned int is_pcrel : 1; /* is operand pc-relative? */
247 expressionS expr; /* the value to be inserted */
248 }
249 fixup[2]; /* at most two fixups per insn */
250 struct ia64_opcode *idesc;
251 struct label_fix *label_fixups;
252 struct label_fix *tag_fixups;
253 struct unw_rec_list *unwind_record; /* Unwind directive. */
254 expressionS opnd[6];
255 char *src_file;
256 unsigned int src_line;
257 struct dwarf2_line_info debug_line;
258 }
259 slot[NUM_SLOTS];
260
261 segT last_text_seg;
262
263 struct dynreg
264 {
265 struct dynreg *next; /* next dynamic register */
266 const char *name;
267 unsigned short base; /* the base register number */
268 unsigned short num_regs; /* # of registers in this set */
269 }
270 *dynreg[DYNREG_NUM_TYPES], in, loc, out, rot;
271
272 flagword flags; /* ELF-header flags */
273
274 struct mem_offset {
275 unsigned hint:1; /* is this hint currently valid? */
276 bfd_vma offset; /* mem.offset offset */
277 bfd_vma base; /* mem.offset base */
278 } mem_offset;
279
280 int path; /* number of alt. entry points seen */
281 const char **entry_labels; /* labels of all alternate paths in
282 the current DV-checking block. */
283 int maxpaths; /* size currently allocated for
284 entry_labels */
285 /* Support for hardware errata workarounds. */
286
287 /* Record data about the last three insn groups. */
288 struct group
289 {
290 /* B-step workaround.
291 For each predicate register, this is set if the corresponding insn
292 group conditionally sets this register with one of the affected
293 instructions. */
294 int p_reg_set[64];
295 /* B-step workaround.
296 For each general register, this is set if the corresponding insn
297 a) is conditional one one of the predicate registers for which
298 P_REG_SET is 1 in the corresponding entry of the previous group,
299 b) sets this general register with one of the affected
300 instructions. */
301 int g_reg_set_conditionally[128];
302 } last_groups[3];
303 int group_idx;
304
305 int pointer_size; /* size in bytes of a pointer */
306 int pointer_size_shift; /* shift size of a pointer for alignment */
307 }
308 md;
309
310 /* application registers: */
311
312 #define AR_K0 0
313 #define AR_K7 7
314 #define AR_RSC 16
315 #define AR_BSP 17
316 #define AR_BSPSTORE 18
317 #define AR_RNAT 19
318 #define AR_UNAT 36
319 #define AR_FPSR 40
320 #define AR_ITC 44
321 #define AR_PFS 64
322 #define AR_LC 65
323
324 static const struct
325 {
326 const char *name;
327 int regnum;
328 }
329 ar[] =
330 {
331 {"ar.k0", 0}, {"ar.k1", 1}, {"ar.k2", 2}, {"ar.k3", 3},
332 {"ar.k4", 4}, {"ar.k5", 5}, {"ar.k6", 6}, {"ar.k7", 7},
333 {"ar.rsc", 16}, {"ar.bsp", 17},
334 {"ar.bspstore", 18}, {"ar.rnat", 19},
335 {"ar.fcr", 21}, {"ar.eflag", 24},
336 {"ar.csd", 25}, {"ar.ssd", 26},
337 {"ar.cflg", 27}, {"ar.fsr", 28},
338 {"ar.fir", 29}, {"ar.fdr", 30},
339 {"ar.ccv", 32}, {"ar.unat", 36},
340 {"ar.fpsr", 40}, {"ar.itc", 44},
341 {"ar.pfs", 64}, {"ar.lc", 65},
342 {"ar.ec", 66},
343 };
344
345 #define CR_IPSR 16
346 #define CR_ISR 17
347 #define CR_IIP 19
348 #define CR_IFA 20
349 #define CR_ITIR 21
350 #define CR_IIPA 22
351 #define CR_IFS 23
352 #define CR_IIM 24
353 #define CR_IHA 25
354 #define CR_IVR 65
355 #define CR_TPR 66
356 #define CR_EOI 67
357 #define CR_IRR0 68
358 #define CR_IRR3 71
359 #define CR_LRR0 80
360 #define CR_LRR1 81
361
362 /* control registers: */
363 static const struct
364 {
365 const char *name;
366 int regnum;
367 }
368 cr[] =
369 {
370 {"cr.dcr", 0},
371 {"cr.itm", 1},
372 {"cr.iva", 2},
373 {"cr.pta", 8},
374 {"cr.gpta", 9},
375 {"cr.ipsr", 16},
376 {"cr.isr", 17},
377 {"cr.iip", 19},
378 {"cr.ifa", 20},
379 {"cr.itir", 21},
380 {"cr.iipa", 22},
381 {"cr.ifs", 23},
382 {"cr.iim", 24},
383 {"cr.iha", 25},
384 {"cr.lid", 64},
385 {"cr.ivr", 65},
386 {"cr.tpr", 66},
387 {"cr.eoi", 67},
388 {"cr.irr0", 68},
389 {"cr.irr1", 69},
390 {"cr.irr2", 70},
391 {"cr.irr3", 71},
392 {"cr.itv", 72},
393 {"cr.pmv", 73},
394 {"cr.cmcv", 74},
395 {"cr.lrr0", 80},
396 {"cr.lrr1", 81}
397 };
398
399 #define PSR_MFL 4
400 #define PSR_IC 13
401 #define PSR_DFL 18
402 #define PSR_CPL 32
403
404 static const struct const_desc
405 {
406 const char *name;
407 valueT value;
408 }
409 const_bits[] =
410 {
411 /* PSR constant masks: */
412
413 /* 0: reserved */
414 {"psr.be", ((valueT) 1) << 1},
415 {"psr.up", ((valueT) 1) << 2},
416 {"psr.ac", ((valueT) 1) << 3},
417 {"psr.mfl", ((valueT) 1) << 4},
418 {"psr.mfh", ((valueT) 1) << 5},
419 /* 6-12: reserved */
420 {"psr.ic", ((valueT) 1) << 13},
421 {"psr.i", ((valueT) 1) << 14},
422 {"psr.pk", ((valueT) 1) << 15},
423 /* 16: reserved */
424 {"psr.dt", ((valueT) 1) << 17},
425 {"psr.dfl", ((valueT) 1) << 18},
426 {"psr.dfh", ((valueT) 1) << 19},
427 {"psr.sp", ((valueT) 1) << 20},
428 {"psr.pp", ((valueT) 1) << 21},
429 {"psr.di", ((valueT) 1) << 22},
430 {"psr.si", ((valueT) 1) << 23},
431 {"psr.db", ((valueT) 1) << 24},
432 {"psr.lp", ((valueT) 1) << 25},
433 {"psr.tb", ((valueT) 1) << 26},
434 {"psr.rt", ((valueT) 1) << 27},
435 /* 28-31: reserved */
436 /* 32-33: cpl (current privilege level) */
437 {"psr.is", ((valueT) 1) << 34},
438 {"psr.mc", ((valueT) 1) << 35},
439 {"psr.it", ((valueT) 1) << 36},
440 {"psr.id", ((valueT) 1) << 37},
441 {"psr.da", ((valueT) 1) << 38},
442 {"psr.dd", ((valueT) 1) << 39},
443 {"psr.ss", ((valueT) 1) << 40},
444 /* 41-42: ri (restart instruction) */
445 {"psr.ed", ((valueT) 1) << 43},
446 {"psr.bn", ((valueT) 1) << 44},
447 };
448
449 /* indirect register-sets/memory: */
450
451 static const struct
452 {
453 const char *name;
454 int regnum;
455 }
456 indirect_reg[] =
457 {
458 { "CPUID", IND_CPUID },
459 { "cpuid", IND_CPUID },
460 { "dbr", IND_DBR },
461 { "dtr", IND_DTR },
462 { "itr", IND_ITR },
463 { "ibr", IND_IBR },
464 { "msr", IND_MSR },
465 { "pkr", IND_PKR },
466 { "pmc", IND_PMC },
467 { "pmd", IND_PMD },
468 { "rr", IND_RR },
469 };
470
471 /* Pseudo functions used to indicate relocation types (these functions
472 start with an at sign (@). */
473 static struct
474 {
475 const char *name;
476 enum pseudo_type
477 {
478 PSEUDO_FUNC_NONE,
479 PSEUDO_FUNC_RELOC,
480 PSEUDO_FUNC_CONST,
481 PSEUDO_FUNC_REG,
482 PSEUDO_FUNC_FLOAT
483 }
484 type;
485 union
486 {
487 unsigned long ival;
488 symbolS *sym;
489 }
490 u;
491 }
492 pseudo_func[] =
493 {
494 /* reloc pseudo functions (these must come first!): */
495 { "dtpmod", PSEUDO_FUNC_RELOC, { 0 } },
496 { "dtprel", PSEUDO_FUNC_RELOC, { 0 } },
497 { "fptr", PSEUDO_FUNC_RELOC, { 0 } },
498 { "gprel", PSEUDO_FUNC_RELOC, { 0 } },
499 { "ltoff", PSEUDO_FUNC_RELOC, { 0 } },
500 { "ltoffx", PSEUDO_FUNC_RELOC, { 0 } },
501 { "pcrel", PSEUDO_FUNC_RELOC, { 0 } },
502 { "pltoff", PSEUDO_FUNC_RELOC, { 0 } },
503 { "secrel", PSEUDO_FUNC_RELOC, { 0 } },
504 { "segrel", PSEUDO_FUNC_RELOC, { 0 } },
505 { "tprel", PSEUDO_FUNC_RELOC, { 0 } },
506 { "ltv", PSEUDO_FUNC_RELOC, { 0 } },
507 { "", 0, { 0 } }, /* placeholder for FUNC_LT_FPTR_RELATIVE */
508 { "", 0, { 0 } }, /* placeholder for FUNC_LT_DTP_MODULE */
509 { "", 0, { 0 } }, /* placeholder for FUNC_LT_DTP_RELATIVE */
510 { "", 0, { 0 } }, /* placeholder for FUNC_LT_TP_RELATIVE */
511 { "iplt", PSEUDO_FUNC_RELOC, { 0 } },
512
513 /* mbtype4 constants: */
514 { "alt", PSEUDO_FUNC_CONST, { 0xa } },
515 { "brcst", PSEUDO_FUNC_CONST, { 0x0 } },
516 { "mix", PSEUDO_FUNC_CONST, { 0x8 } },
517 { "rev", PSEUDO_FUNC_CONST, { 0xb } },
518 { "shuf", PSEUDO_FUNC_CONST, { 0x9 } },
519
520 /* fclass constants: */
521 { "nat", PSEUDO_FUNC_CONST, { 0x100 } },
522 { "qnan", PSEUDO_FUNC_CONST, { 0x080 } },
523 { "snan", PSEUDO_FUNC_CONST, { 0x040 } },
524 { "pos", PSEUDO_FUNC_CONST, { 0x001 } },
525 { "neg", PSEUDO_FUNC_CONST, { 0x002 } },
526 { "zero", PSEUDO_FUNC_CONST, { 0x004 } },
527 { "unorm", PSEUDO_FUNC_CONST, { 0x008 } },
528 { "norm", PSEUDO_FUNC_CONST, { 0x010 } },
529 { "inf", PSEUDO_FUNC_CONST, { 0x020 } },
530
531 { "natval", PSEUDO_FUNC_CONST, { 0x100 } }, /* old usage */
532
533 /* hint constants: */
534 { "pause", PSEUDO_FUNC_CONST, { 0x0 } },
535
536 /* unwind-related constants: */
537 { "svr4", PSEUDO_FUNC_CONST, { 0 } },
538 { "hpux", PSEUDO_FUNC_CONST, { 1 } },
539 { "nt", PSEUDO_FUNC_CONST, { 2 } },
540
541 /* unwind-related registers: */
542 { "priunat",PSEUDO_FUNC_REG, { REG_PRIUNAT } }
543 };
544
545 /* 41-bit nop opcodes (one per unit): */
546 static const bfd_vma nop[IA64_NUM_UNITS] =
547 {
548 0x0000000000LL, /* NIL => break 0 */
549 0x0008000000LL, /* I-unit nop */
550 0x0008000000LL, /* M-unit nop */
551 0x4000000000LL, /* B-unit nop */
552 0x0008000000LL, /* F-unit nop */
553 0x0008000000LL, /* L-"unit" nop */
554 0x0008000000LL, /* X-unit nop */
555 };
556
557 /* Can't be `const' as it's passed to input routines (which have the
558 habit of setting temporary sentinels. */
559 static char special_section_name[][20] =
560 {
561 {".bss"}, {".sbss"}, {".sdata"}, {".rodata"}, {".comment"},
562 {".IA_64.unwind"}, {".IA_64.unwind_info"},
563 {".init_array"}, {".fini_array"}
564 };
565
566 static char *special_linkonce_name[] =
567 {
568 ".gnu.linkonce.ia64unw.", ".gnu.linkonce.ia64unwi."
569 };
570
571 /* The best template for a particular sequence of up to three
572 instructions: */
573 #define N IA64_NUM_TYPES
574 static unsigned char best_template[N][N][N];
575 #undef N
576
577 /* Resource dependencies currently in effect */
578 static struct rsrc {
579 int depind; /* dependency index */
580 const struct ia64_dependency *dependency; /* actual dependency */
581 unsigned specific:1, /* is this a specific bit/regno? */
582 link_to_qp_branch:1; /* will a branch on the same QP clear it?*/
583 int index; /* specific regno/bit within dependency */
584 int note; /* optional qualifying note (0 if none) */
585 #define STATE_NONE 0
586 #define STATE_STOP 1
587 #define STATE_SRLZ 2
588 int insn_srlz; /* current insn serialization state */
589 int data_srlz; /* current data serialization state */
590 int qp_regno; /* qualifying predicate for this usage */
591 char *file; /* what file marked this dependency */
592 unsigned int line; /* what line marked this dependency */
593 struct mem_offset mem_offset; /* optional memory offset hint */
594 enum { CMP_NONE, CMP_OR, CMP_AND } cmp_type; /* OR or AND compare? */
595 int path; /* corresponding code entry index */
596 } *regdeps = NULL;
597 static int regdepslen = 0;
598 static int regdepstotlen = 0;
599 static const char *dv_mode[] = { "RAW", "WAW", "WAR" };
600 static const char *dv_sem[] = { "none", "implied", "impliedf",
601 "data", "instr", "specific", "stop", "other" };
602 static const char *dv_cmp_type[] = { "none", "OR", "AND" };
603
604 /* Current state of PR mutexation */
605 static struct qpmutex {
606 valueT prmask;
607 int path;
608 } *qp_mutexes = NULL; /* QP mutex bitmasks */
609 static int qp_mutexeslen = 0;
610 static int qp_mutexestotlen = 0;
611 static valueT qp_safe_across_calls = 0;
612
613 /* Current state of PR implications */
614 static struct qp_imply {
615 unsigned p1:6;
616 unsigned p2:6;
617 unsigned p2_branched:1;
618 int path;
619 } *qp_implies = NULL;
620 static int qp_implieslen = 0;
621 static int qp_impliestotlen = 0;
622
623 /* Keep track of static GR values so that indirect register usage can
624 sometimes be tracked. */
625 static struct gr {
626 unsigned known:1;
627 int path;
628 valueT value;
629 } gr_values[128] = {{ 1, 0, 0 }};
630
631 /* These are the routines required to output the various types of
632 unwind records. */
633
634 /* A slot_number is a frag address plus the slot index (0-2). We use the
635 frag address here so that if there is a section switch in the middle of
636 a function, then instructions emitted to a different section are not
637 counted. Since there may be more than one frag for a function, this
638 means we also need to keep track of which frag this address belongs to
639 so we can compute inter-frag distances. This also nicely solves the
640 problem with nops emitted for align directives, which can't easily be
641 counted, but can easily be derived from frag sizes. */
642
643 typedef struct unw_rec_list {
644 unwind_record r;
645 unsigned long slot_number;
646 fragS *slot_frag;
647 struct unw_rec_list *next;
648 } unw_rec_list;
649
650 #define SLOT_NUM_NOT_SET (unsigned)-1
651
652 /* Linked list of saved prologue counts. A very poor
653 implementation of a map from label numbers to prologue counts. */
654 typedef struct label_prologue_count
655 {
656 struct label_prologue_count *next;
657 unsigned long label_number;
658 unsigned int prologue_count;
659 } label_prologue_count;
660
661 static struct
662 {
663 unsigned long next_slot_number;
664 fragS *next_slot_frag;
665
666 /* Maintain a list of unwind entries for the current function. */
667 unw_rec_list *list;
668 unw_rec_list *tail;
669
670 /* Any unwind entires that should be attached to the current slot
671 that an insn is being constructed for. */
672 unw_rec_list *current_entry;
673
674 /* These are used to create the unwind table entry for this function. */
675 symbolS *proc_start;
676 symbolS *proc_end;
677 symbolS *info; /* pointer to unwind info */
678 symbolS *personality_routine;
679 segT saved_text_seg;
680 subsegT saved_text_subseg;
681 unsigned int force_unwind_entry : 1; /* force generation of unwind entry? */
682
683 /* TRUE if processing unwind directives in a prologue region. */
684 int prologue;
685 int prologue_mask;
686 unsigned int prologue_count; /* number of .prologues seen so far */
687 /* Prologue counts at previous .label_state directives. */
688 struct label_prologue_count * saved_prologue_counts;
689 } unwind;
690
691 typedef void (*vbyte_func) PARAMS ((int, char *, char *));
692
693 /* Forward delarations: */
694 static int ar_is_in_integer_unit PARAMS ((int regnum));
695 static void set_section PARAMS ((char *name));
696 static unsigned int set_regstack PARAMS ((unsigned int, unsigned int,
697 unsigned int, unsigned int));
698 static void dot_radix PARAMS ((int));
699 static void dot_special_section PARAMS ((int));
700 static void dot_proc PARAMS ((int));
701 static void dot_fframe PARAMS ((int));
702 static void dot_vframe PARAMS ((int));
703 static void dot_vframesp PARAMS ((int));
704 static void dot_vframepsp PARAMS ((int));
705 static void dot_save PARAMS ((int));
706 static void dot_restore PARAMS ((int));
707 static void dot_restorereg PARAMS ((int));
708 static void dot_restorereg_p PARAMS ((int));
709 static void dot_handlerdata PARAMS ((int));
710 static void dot_unwentry PARAMS ((int));
711 static void dot_altrp PARAMS ((int));
712 static void dot_savemem PARAMS ((int));
713 static void dot_saveg PARAMS ((int));
714 static void dot_savef PARAMS ((int));
715 static void dot_saveb PARAMS ((int));
716 static void dot_savegf PARAMS ((int));
717 static void dot_spill PARAMS ((int));
718 static void dot_spillreg PARAMS ((int));
719 static void dot_spillmem PARAMS ((int));
720 static void dot_spillreg_p PARAMS ((int));
721 static void dot_spillmem_p PARAMS ((int));
722 static void dot_label_state PARAMS ((int));
723 static void dot_copy_state PARAMS ((int));
724 static void dot_unwabi PARAMS ((int));
725 static void dot_personality PARAMS ((int));
726 static void dot_body PARAMS ((int));
727 static void dot_prologue PARAMS ((int));
728 static void dot_endp PARAMS ((int));
729 static void dot_template PARAMS ((int));
730 static void dot_regstk PARAMS ((int));
731 static void dot_rot PARAMS ((int));
732 static void dot_byteorder PARAMS ((int));
733 static void dot_psr PARAMS ((int));
734 static void dot_alias PARAMS ((int));
735 static void dot_ln PARAMS ((int));
736 static char *parse_section_name PARAMS ((void));
737 static void dot_xdata PARAMS ((int));
738 static void stmt_float_cons PARAMS ((int));
739 static void stmt_cons_ua PARAMS ((int));
740 static void dot_xfloat_cons PARAMS ((int));
741 static void dot_xstringer PARAMS ((int));
742 static void dot_xdata_ua PARAMS ((int));
743 static void dot_xfloat_cons_ua PARAMS ((int));
744 static void print_prmask PARAMS ((valueT mask));
745 static void dot_pred_rel PARAMS ((int));
746 static void dot_reg_val PARAMS ((int));
747 static void dot_dv_mode PARAMS ((int));
748 static void dot_entry PARAMS ((int));
749 static void dot_mem_offset PARAMS ((int));
750 static void add_unwind_entry PARAMS((unw_rec_list *ptr));
751 static symbolS *declare_register PARAMS ((const char *name, int regnum));
752 static void declare_register_set PARAMS ((const char *, int, int));
753 static unsigned int operand_width PARAMS ((enum ia64_opnd));
754 static enum operand_match_result operand_match PARAMS ((const struct ia64_opcode *idesc,
755 int index,
756 expressionS *e));
757 static int parse_operand PARAMS ((expressionS *e));
758 static struct ia64_opcode * parse_operands PARAMS ((struct ia64_opcode *));
759 static int errata_nop_necessary_p PARAMS ((struct slot *, enum ia64_unit));
760 static void build_insn PARAMS ((struct slot *, bfd_vma *));
761 static void emit_one_bundle PARAMS ((void));
762 static void fix_insn PARAMS ((fixS *, const struct ia64_operand *, valueT));
763 static bfd_reloc_code_real_type ia64_gen_real_reloc_type PARAMS ((struct symbol *sym,
764 bfd_reloc_code_real_type r_type));
765 static void insn_group_break PARAMS ((int, int, int));
766 static void mark_resource PARAMS ((struct ia64_opcode *, const struct ia64_dependency *,
767 struct rsrc *, int depind, int path));
768 static void add_qp_mutex PARAMS((valueT mask));
769 static void add_qp_imply PARAMS((int p1, int p2));
770 static void clear_qp_branch_flag PARAMS((valueT mask));
771 static void clear_qp_mutex PARAMS((valueT mask));
772 static void clear_qp_implies PARAMS((valueT p1_mask, valueT p2_mask));
773 static int has_suffix_p PARAMS((const char *, const char *));
774 static void clear_register_values PARAMS ((void));
775 static void print_dependency PARAMS ((const char *action, int depind));
776 static void instruction_serialization PARAMS ((void));
777 static void data_serialization PARAMS ((void));
778 static void remove_marked_resource PARAMS ((struct rsrc *));
779 static int is_conditional_branch PARAMS ((struct ia64_opcode *));
780 static int is_taken_branch PARAMS ((struct ia64_opcode *));
781 static int is_interruption_or_rfi PARAMS ((struct ia64_opcode *));
782 static int depends_on PARAMS ((int, struct ia64_opcode *));
783 static int specify_resource PARAMS ((const struct ia64_dependency *,
784 struct ia64_opcode *, int, struct rsrc [], int, int));
785 static int check_dv PARAMS((struct ia64_opcode *idesc));
786 static void check_dependencies PARAMS((struct ia64_opcode *));
787 static void mark_resources PARAMS((struct ia64_opcode *));
788 static void update_dependencies PARAMS((struct ia64_opcode *));
789 static void note_register_values PARAMS((struct ia64_opcode *));
790 static int qp_mutex PARAMS ((int, int, int));
791 static int resources_match PARAMS ((struct rsrc *, struct ia64_opcode *, int, int, int));
792 static void output_vbyte_mem PARAMS ((int, char *, char *));
793 static void count_output PARAMS ((int, char *, char *));
794 static void output_R1_format PARAMS ((vbyte_func, unw_record_type, int));
795 static void output_R2_format PARAMS ((vbyte_func, int, int, unsigned long));
796 static void output_R3_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
797 static void output_P1_format PARAMS ((vbyte_func, int));
798 static void output_P2_format PARAMS ((vbyte_func, int, int));
799 static void output_P3_format PARAMS ((vbyte_func, unw_record_type, int));
800 static void output_P4_format PARAMS ((vbyte_func, unsigned char *, unsigned long));
801 static void output_P5_format PARAMS ((vbyte_func, int, unsigned long));
802 static void output_P6_format PARAMS ((vbyte_func, unw_record_type, int));
803 static void output_P7_format PARAMS ((vbyte_func, unw_record_type, unsigned long, unsigned long));
804 static void output_P8_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
805 static void output_P9_format PARAMS ((vbyte_func, int, int));
806 static void output_P10_format PARAMS ((vbyte_func, int, int));
807 static void output_B1_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
808 static void output_B2_format PARAMS ((vbyte_func, unsigned long, unsigned long));
809 static void output_B3_format PARAMS ((vbyte_func, unsigned long, unsigned long));
810 static void output_B4_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
811 static char format_ab_reg PARAMS ((int, int));
812 static void output_X1_format PARAMS ((vbyte_func, unw_record_type, int, int, unsigned long,
813 unsigned long));
814 static void output_X2_format PARAMS ((vbyte_func, int, int, int, int, int, unsigned long));
815 static void output_X3_format PARAMS ((vbyte_func, unw_record_type, int, int, int, unsigned long,
816 unsigned long));
817 static void output_X4_format PARAMS ((vbyte_func, int, int, int, int, int, int, unsigned long));
818 static void free_list_records PARAMS ((unw_rec_list *));
819 static unw_rec_list *output_prologue PARAMS ((void));
820 static unw_rec_list *output_prologue_gr PARAMS ((unsigned int, unsigned int));
821 static unw_rec_list *output_body PARAMS ((void));
822 static unw_rec_list *output_mem_stack_f PARAMS ((unsigned int));
823 static unw_rec_list *output_mem_stack_v PARAMS ((void));
824 static unw_rec_list *output_psp_gr PARAMS ((unsigned int));
825 static unw_rec_list *output_psp_sprel PARAMS ((unsigned int));
826 static unw_rec_list *output_rp_when PARAMS ((void));
827 static unw_rec_list *output_rp_gr PARAMS ((unsigned int));
828 static unw_rec_list *output_rp_br PARAMS ((unsigned int));
829 static unw_rec_list *output_rp_psprel PARAMS ((unsigned int));
830 static unw_rec_list *output_rp_sprel PARAMS ((unsigned int));
831 static unw_rec_list *output_pfs_when PARAMS ((void));
832 static unw_rec_list *output_pfs_gr PARAMS ((unsigned int));
833 static unw_rec_list *output_pfs_psprel PARAMS ((unsigned int));
834 static unw_rec_list *output_pfs_sprel PARAMS ((unsigned int));
835 static unw_rec_list *output_preds_when PARAMS ((void));
836 static unw_rec_list *output_preds_gr PARAMS ((unsigned int));
837 static unw_rec_list *output_preds_psprel PARAMS ((unsigned int));
838 static unw_rec_list *output_preds_sprel PARAMS ((unsigned int));
839 static unw_rec_list *output_fr_mem PARAMS ((unsigned int));
840 static unw_rec_list *output_frgr_mem PARAMS ((unsigned int, unsigned int));
841 static unw_rec_list *output_gr_gr PARAMS ((unsigned int, unsigned int));
842 static unw_rec_list *output_gr_mem PARAMS ((unsigned int));
843 static unw_rec_list *output_br_mem PARAMS ((unsigned int));
844 static unw_rec_list *output_br_gr PARAMS ((unsigned int, unsigned int));
845 static unw_rec_list *output_spill_base PARAMS ((unsigned int));
846 static unw_rec_list *output_unat_when PARAMS ((void));
847 static unw_rec_list *output_unat_gr PARAMS ((unsigned int));
848 static unw_rec_list *output_unat_psprel PARAMS ((unsigned int));
849 static unw_rec_list *output_unat_sprel PARAMS ((unsigned int));
850 static unw_rec_list *output_lc_when PARAMS ((void));
851 static unw_rec_list *output_lc_gr PARAMS ((unsigned int));
852 static unw_rec_list *output_lc_psprel PARAMS ((unsigned int));
853 static unw_rec_list *output_lc_sprel PARAMS ((unsigned int));
854 static unw_rec_list *output_fpsr_when PARAMS ((void));
855 static unw_rec_list *output_fpsr_gr PARAMS ((unsigned int));
856 static unw_rec_list *output_fpsr_psprel PARAMS ((unsigned int));
857 static unw_rec_list *output_fpsr_sprel PARAMS ((unsigned int));
858 static unw_rec_list *output_priunat_when_gr PARAMS ((void));
859 static unw_rec_list *output_priunat_when_mem PARAMS ((void));
860 static unw_rec_list *output_priunat_gr PARAMS ((unsigned int));
861 static unw_rec_list *output_priunat_psprel PARAMS ((unsigned int));
862 static unw_rec_list *output_priunat_sprel PARAMS ((unsigned int));
863 static unw_rec_list *output_bsp_when PARAMS ((void));
864 static unw_rec_list *output_bsp_gr PARAMS ((unsigned int));
865 static unw_rec_list *output_bsp_psprel PARAMS ((unsigned int));
866 static unw_rec_list *output_bsp_sprel PARAMS ((unsigned int));
867 static unw_rec_list *output_bspstore_when PARAMS ((void));
868 static unw_rec_list *output_bspstore_gr PARAMS ((unsigned int));
869 static unw_rec_list *output_bspstore_psprel PARAMS ((unsigned int));
870 static unw_rec_list *output_bspstore_sprel PARAMS ((unsigned int));
871 static unw_rec_list *output_rnat_when PARAMS ((void));
872 static unw_rec_list *output_rnat_gr PARAMS ((unsigned int));
873 static unw_rec_list *output_rnat_psprel PARAMS ((unsigned int));
874 static unw_rec_list *output_rnat_sprel PARAMS ((unsigned int));
875 static unw_rec_list *output_unwabi PARAMS ((unsigned long, unsigned long));
876 static unw_rec_list *output_epilogue PARAMS ((unsigned long));
877 static unw_rec_list *output_label_state PARAMS ((unsigned long));
878 static unw_rec_list *output_copy_state PARAMS ((unsigned long));
879 static unw_rec_list *output_spill_psprel PARAMS ((unsigned int, unsigned int, unsigned int));
880 static unw_rec_list *output_spill_sprel PARAMS ((unsigned int, unsigned int, unsigned int));
881 static unw_rec_list *output_spill_psprel_p PARAMS ((unsigned int, unsigned int, unsigned int,
882 unsigned int));
883 static unw_rec_list *output_spill_sprel_p PARAMS ((unsigned int, unsigned int, unsigned int,
884 unsigned int));
885 static unw_rec_list *output_spill_reg PARAMS ((unsigned int, unsigned int, unsigned int,
886 unsigned int));
887 static unw_rec_list *output_spill_reg_p PARAMS ((unsigned int, unsigned int, unsigned int,
888 unsigned int, unsigned int));
889 static void process_one_record PARAMS ((unw_rec_list *, vbyte_func));
890 static void process_unw_records PARAMS ((unw_rec_list *, vbyte_func));
891 static int calc_record_size PARAMS ((unw_rec_list *));
892 static void set_imask PARAMS ((unw_rec_list *, unsigned long, unsigned long, unsigned int));
893 static int count_bits PARAMS ((unsigned long));
894 static unsigned long slot_index PARAMS ((unsigned long, fragS *,
895 unsigned long, fragS *));
896 static unw_rec_list *optimize_unw_records PARAMS ((unw_rec_list *));
897 static void fixup_unw_records PARAMS ((unw_rec_list *));
898 static int output_unw_records PARAMS ((unw_rec_list *, void **));
899 static int convert_expr_to_ab_reg PARAMS ((expressionS *, unsigned int *, unsigned int *));
900 static int convert_expr_to_xy_reg PARAMS ((expressionS *, unsigned int *, unsigned int *));
901 static int generate_unwind_image PARAMS ((const char *));
902 static unsigned int get_saved_prologue_count PARAMS ((unsigned long));
903 static void save_prologue_count PARAMS ((unsigned long, unsigned int));
904 static void free_saved_prologue_counts PARAMS ((void));
905
906 /* Build the unwind section name by appending the (possibly stripped)
907 text section NAME to the unwind PREFIX. The resulting string
908 pointer is assigned to RESULT. The string is allocated on the
909 stack, so this must be a macro... */
910 #define make_unw_section_name(special, text_name, result) \
911 { \
912 const char *_prefix = special_section_name[special]; \
913 const char *_suffix = text_name; \
914 size_t _prefix_len, _suffix_len; \
915 char *_result; \
916 if (strncmp (text_name, ".gnu.linkonce.t.", \
917 sizeof (".gnu.linkonce.t.") - 1) == 0) \
918 { \
919 _prefix = special_linkonce_name[special - SPECIAL_SECTION_UNWIND]; \
920 _suffix += sizeof (".gnu.linkonce.t.") - 1; \
921 } \
922 _prefix_len = strlen (_prefix), _suffix_len = strlen (_suffix); \
923 _result = alloca (_prefix_len + _suffix_len + 1); \
924 memcpy (_result, _prefix, _prefix_len); \
925 memcpy (_result + _prefix_len, _suffix, _suffix_len); \
926 _result[_prefix_len + _suffix_len] = '\0'; \
927 result = _result; \
928 } \
929 while (0)
930
931 /* Determine if application register REGNUM resides in the integer
932 unit (as opposed to the memory unit). */
933 static int
934 ar_is_in_integer_unit (reg)
935 int reg;
936 {
937 reg -= REG_AR;
938
939 return (reg == 64 /* pfs */
940 || reg == 65 /* lc */
941 || reg == 66 /* ec */
942 /* ??? ias accepts and puts these in the integer unit. */
943 || (reg >= 112 && reg <= 127));
944 }
945
946 /* Switch to section NAME and create section if necessary. It's
947 rather ugly that we have to manipulate input_line_pointer but I
948 don't see any other way to accomplish the same thing without
949 changing obj-elf.c (which may be the Right Thing, in the end). */
950 static void
951 set_section (name)
952 char *name;
953 {
954 char *saved_input_line_pointer;
955
956 saved_input_line_pointer = input_line_pointer;
957 input_line_pointer = name;
958 obj_elf_section (0);
959 input_line_pointer = saved_input_line_pointer;
960 }
961
962 /* Map 's' to SHF_IA_64_SHORT. */
963
964 int
965 ia64_elf_section_letter (letter, ptr_msg)
966 int letter;
967 char **ptr_msg;
968 {
969 if (letter == 's')
970 return SHF_IA_64_SHORT;
971 else if (letter == 'o')
972 return SHF_LINK_ORDER;
973
974 *ptr_msg = _("Bad .section directive: want a,o,s,w,x,M,S,G,T in string");
975 return -1;
976 }
977
978 /* Map SHF_IA_64_SHORT to SEC_SMALL_DATA. */
979
980 flagword
981 ia64_elf_section_flags (flags, attr, type)
982 flagword flags;
983 int attr, type ATTRIBUTE_UNUSED;
984 {
985 if (attr & SHF_IA_64_SHORT)
986 flags |= SEC_SMALL_DATA;
987 return flags;
988 }
989
990 int
991 ia64_elf_section_type (str, len)
992 const char *str;
993 size_t len;
994 {
995 #define STREQ(s) ((len == sizeof (s) - 1) && (strncmp (str, s, sizeof (s) - 1) == 0))
996
997 if (STREQ (ELF_STRING_ia64_unwind_info))
998 return SHT_PROGBITS;
999
1000 if (STREQ (ELF_STRING_ia64_unwind_info_once))
1001 return SHT_PROGBITS;
1002
1003 if (STREQ (ELF_STRING_ia64_unwind))
1004 return SHT_IA_64_UNWIND;
1005
1006 if (STREQ (ELF_STRING_ia64_unwind_once))
1007 return SHT_IA_64_UNWIND;
1008
1009 if (STREQ ("unwind"))
1010 return SHT_IA_64_UNWIND;
1011
1012 if (STREQ ("init_array"))
1013 return SHT_INIT_ARRAY;
1014
1015 if (STREQ ("fini_array"))
1016 return SHT_FINI_ARRAY;
1017
1018 return -1;
1019 #undef STREQ
1020 }
1021
1022 static unsigned int
1023 set_regstack (ins, locs, outs, rots)
1024 unsigned int ins, locs, outs, rots;
1025 {
1026 /* Size of frame. */
1027 unsigned int sof;
1028
1029 sof = ins + locs + outs;
1030 if (sof > 96)
1031 {
1032 as_bad ("Size of frame exceeds maximum of 96 registers");
1033 return 0;
1034 }
1035 if (rots > sof)
1036 {
1037 as_warn ("Size of rotating registers exceeds frame size");
1038 return 0;
1039 }
1040 md.in.base = REG_GR + 32;
1041 md.loc.base = md.in.base + ins;
1042 md.out.base = md.loc.base + locs;
1043
1044 md.in.num_regs = ins;
1045 md.loc.num_regs = locs;
1046 md.out.num_regs = outs;
1047 md.rot.num_regs = rots;
1048 return sof;
1049 }
1050
1051 void
1052 ia64_flush_insns ()
1053 {
1054 struct label_fix *lfix;
1055 segT saved_seg;
1056 subsegT saved_subseg;
1057 unw_rec_list *ptr;
1058
1059 if (!md.last_text_seg)
1060 return;
1061
1062 saved_seg = now_seg;
1063 saved_subseg = now_subseg;
1064
1065 subseg_set (md.last_text_seg, 0);
1066
1067 while (md.num_slots_in_use > 0)
1068 emit_one_bundle (); /* force out queued instructions */
1069
1070 /* In case there are labels following the last instruction, resolve
1071 those now: */
1072 for (lfix = CURR_SLOT.label_fixups; lfix; lfix = lfix->next)
1073 {
1074 S_SET_VALUE (lfix->sym, frag_now_fix ());
1075 symbol_set_frag (lfix->sym, frag_now);
1076 }
1077 CURR_SLOT.label_fixups = 0;
1078 for (lfix = CURR_SLOT.tag_fixups; lfix; lfix = lfix->next)
1079 {
1080 S_SET_VALUE (lfix->sym, frag_now_fix ());
1081 symbol_set_frag (lfix->sym, frag_now);
1082 }
1083 CURR_SLOT.tag_fixups = 0;
1084
1085 /* In case there are unwind directives following the last instruction,
1086 resolve those now. We only handle body and prologue directives here.
1087 Give an error for others. */
1088 for (ptr = unwind.current_entry; ptr; ptr = ptr->next)
1089 {
1090 if (ptr->r.type == prologue || ptr->r.type == prologue_gr
1091 || ptr->r.type == body)
1092 {
1093 ptr->slot_number = (unsigned long) frag_more (0);
1094 ptr->slot_frag = frag_now;
1095 }
1096 else
1097 as_bad (_("Unwind directive not followed by an instruction."));
1098 }
1099 unwind.current_entry = NULL;
1100
1101 subseg_set (saved_seg, saved_subseg);
1102
1103 if (md.qp.X_op == O_register)
1104 as_bad ("qualifying predicate not followed by instruction");
1105 }
1106
1107 void
1108 ia64_do_align (nbytes)
1109 int nbytes;
1110 {
1111 char *saved_input_line_pointer = input_line_pointer;
1112
1113 input_line_pointer = "";
1114 s_align_bytes (nbytes);
1115 input_line_pointer = saved_input_line_pointer;
1116 }
1117
1118 void
1119 ia64_cons_align (nbytes)
1120 int nbytes;
1121 {
1122 if (md.auto_align)
1123 {
1124 char *saved_input_line_pointer = input_line_pointer;
1125 input_line_pointer = "";
1126 s_align_bytes (nbytes);
1127 input_line_pointer = saved_input_line_pointer;
1128 }
1129 }
1130
1131 /* Output COUNT bytes to a memory location. */
1132 static unsigned char *vbyte_mem_ptr = NULL;
1133
1134 void
1135 output_vbyte_mem (count, ptr, comment)
1136 int count;
1137 char *ptr;
1138 char *comment ATTRIBUTE_UNUSED;
1139 {
1140 int x;
1141 if (vbyte_mem_ptr == NULL)
1142 abort ();
1143
1144 if (count == 0)
1145 return;
1146 for (x = 0; x < count; x++)
1147 *(vbyte_mem_ptr++) = ptr[x];
1148 }
1149
1150 /* Count the number of bytes required for records. */
1151 static int vbyte_count = 0;
1152 void
1153 count_output (count, ptr, comment)
1154 int count;
1155 char *ptr ATTRIBUTE_UNUSED;
1156 char *comment ATTRIBUTE_UNUSED;
1157 {
1158 vbyte_count += count;
1159 }
1160
1161 static void
1162 output_R1_format (f, rtype, rlen)
1163 vbyte_func f;
1164 unw_record_type rtype;
1165 int rlen;
1166 {
1167 int r = 0;
1168 char byte;
1169 if (rlen > 0x1f)
1170 {
1171 output_R3_format (f, rtype, rlen);
1172 return;
1173 }
1174
1175 if (rtype == body)
1176 r = 1;
1177 else if (rtype != prologue)
1178 as_bad ("record type is not valid");
1179
1180 byte = UNW_R1 | (r << 5) | (rlen & 0x1f);
1181 (*f) (1, &byte, NULL);
1182 }
1183
1184 static void
1185 output_R2_format (f, mask, grsave, rlen)
1186 vbyte_func f;
1187 int mask, grsave;
1188 unsigned long rlen;
1189 {
1190 char bytes[20];
1191 int count = 2;
1192 mask = (mask & 0x0f);
1193 grsave = (grsave & 0x7f);
1194
1195 bytes[0] = (UNW_R2 | (mask >> 1));
1196 bytes[1] = (((mask & 0x01) << 7) | grsave);
1197 count += output_leb128 (bytes + 2, rlen, 0);
1198 (*f) (count, bytes, NULL);
1199 }
1200
1201 static void
1202 output_R3_format (f, rtype, rlen)
1203 vbyte_func f;
1204 unw_record_type rtype;
1205 unsigned long rlen;
1206 {
1207 int r = 0, count;
1208 char bytes[20];
1209 if (rlen <= 0x1f)
1210 {
1211 output_R1_format (f, rtype, rlen);
1212 return;
1213 }
1214
1215 if (rtype == body)
1216 r = 1;
1217 else if (rtype != prologue)
1218 as_bad ("record type is not valid");
1219 bytes[0] = (UNW_R3 | r);
1220 count = output_leb128 (bytes + 1, rlen, 0);
1221 (*f) (count + 1, bytes, NULL);
1222 }
1223
1224 static void
1225 output_P1_format (f, brmask)
1226 vbyte_func f;
1227 int brmask;
1228 {
1229 char byte;
1230 byte = UNW_P1 | (brmask & 0x1f);
1231 (*f) (1, &byte, NULL);
1232 }
1233
1234 static void
1235 output_P2_format (f, brmask, gr)
1236 vbyte_func f;
1237 int brmask;
1238 int gr;
1239 {
1240 char bytes[2];
1241 brmask = (brmask & 0x1f);
1242 bytes[0] = UNW_P2 | (brmask >> 1);
1243 bytes[1] = (((brmask & 1) << 7) | gr);
1244 (*f) (2, bytes, NULL);
1245 }
1246
1247 static void
1248 output_P3_format (f, rtype, reg)
1249 vbyte_func f;
1250 unw_record_type rtype;
1251 int reg;
1252 {
1253 char bytes[2];
1254 int r = 0;
1255 reg = (reg & 0x7f);
1256 switch (rtype)
1257 {
1258 case psp_gr:
1259 r = 0;
1260 break;
1261 case rp_gr:
1262 r = 1;
1263 break;
1264 case pfs_gr:
1265 r = 2;
1266 break;
1267 case preds_gr:
1268 r = 3;
1269 break;
1270 case unat_gr:
1271 r = 4;
1272 break;
1273 case lc_gr:
1274 r = 5;
1275 break;
1276 case rp_br:
1277 r = 6;
1278 break;
1279 case rnat_gr:
1280 r = 7;
1281 break;
1282 case bsp_gr:
1283 r = 8;
1284 break;
1285 case bspstore_gr:
1286 r = 9;
1287 break;
1288 case fpsr_gr:
1289 r = 10;
1290 break;
1291 case priunat_gr:
1292 r = 11;
1293 break;
1294 default:
1295 as_bad ("Invalid record type for P3 format.");
1296 }
1297 bytes[0] = (UNW_P3 | (r >> 1));
1298 bytes[1] = (((r & 1) << 7) | reg);
1299 (*f) (2, bytes, NULL);
1300 }
1301
1302 static void
1303 output_P4_format (f, imask, imask_size)
1304 vbyte_func f;
1305 unsigned char *imask;
1306 unsigned long imask_size;
1307 {
1308 imask[0] = UNW_P4;
1309 (*f) (imask_size, imask, NULL);
1310 }
1311
1312 static void
1313 output_P5_format (f, grmask, frmask)
1314 vbyte_func f;
1315 int grmask;
1316 unsigned long frmask;
1317 {
1318 char bytes[4];
1319 grmask = (grmask & 0x0f);
1320
1321 bytes[0] = UNW_P5;
1322 bytes[1] = ((grmask << 4) | ((frmask & 0x000f0000) >> 16));
1323 bytes[2] = ((frmask & 0x0000ff00) >> 8);
1324 bytes[3] = (frmask & 0x000000ff);
1325 (*f) (4, bytes, NULL);
1326 }
1327
1328 static void
1329 output_P6_format (f, rtype, rmask)
1330 vbyte_func f;
1331 unw_record_type rtype;
1332 int rmask;
1333 {
1334 char byte;
1335 int r = 0;
1336
1337 if (rtype == gr_mem)
1338 r = 1;
1339 else if (rtype != fr_mem)
1340 as_bad ("Invalid record type for format P6");
1341 byte = (UNW_P6 | (r << 4) | (rmask & 0x0f));
1342 (*f) (1, &byte, NULL);
1343 }
1344
1345 static void
1346 output_P7_format (f, rtype, w1, w2)
1347 vbyte_func f;
1348 unw_record_type rtype;
1349 unsigned long w1;
1350 unsigned long w2;
1351 {
1352 char bytes[20];
1353 int count = 1;
1354 int r = 0;
1355 count += output_leb128 (bytes + 1, w1, 0);
1356 switch (rtype)
1357 {
1358 case mem_stack_f:
1359 r = 0;
1360 count += output_leb128 (bytes + count, w2 >> 4, 0);
1361 break;
1362 case mem_stack_v:
1363 r = 1;
1364 break;
1365 case spill_base:
1366 r = 2;
1367 break;
1368 case psp_sprel:
1369 r = 3;
1370 break;
1371 case rp_when:
1372 r = 4;
1373 break;
1374 case rp_psprel:
1375 r = 5;
1376 break;
1377 case pfs_when:
1378 r = 6;
1379 break;
1380 case pfs_psprel:
1381 r = 7;
1382 break;
1383 case preds_when:
1384 r = 8;
1385 break;
1386 case preds_psprel:
1387 r = 9;
1388 break;
1389 case lc_when:
1390 r = 10;
1391 break;
1392 case lc_psprel:
1393 r = 11;
1394 break;
1395 case unat_when:
1396 r = 12;
1397 break;
1398 case unat_psprel:
1399 r = 13;
1400 break;
1401 case fpsr_when:
1402 r = 14;
1403 break;
1404 case fpsr_psprel:
1405 r = 15;
1406 break;
1407 default:
1408 break;
1409 }
1410 bytes[0] = (UNW_P7 | r);
1411 (*f) (count, bytes, NULL);
1412 }
1413
1414 static void
1415 output_P8_format (f, rtype, t)
1416 vbyte_func f;
1417 unw_record_type rtype;
1418 unsigned long t;
1419 {
1420 char bytes[20];
1421 int r = 0;
1422 int count = 2;
1423 bytes[0] = UNW_P8;
1424 switch (rtype)
1425 {
1426 case rp_sprel:
1427 r = 1;
1428 break;
1429 case pfs_sprel:
1430 r = 2;
1431 break;
1432 case preds_sprel:
1433 r = 3;
1434 break;
1435 case lc_sprel:
1436 r = 4;
1437 break;
1438 case unat_sprel:
1439 r = 5;
1440 break;
1441 case fpsr_sprel:
1442 r = 6;
1443 break;
1444 case bsp_when:
1445 r = 7;
1446 break;
1447 case bsp_psprel:
1448 r = 8;
1449 break;
1450 case bsp_sprel:
1451 r = 9;
1452 break;
1453 case bspstore_when:
1454 r = 10;
1455 break;
1456 case bspstore_psprel:
1457 r = 11;
1458 break;
1459 case bspstore_sprel:
1460 r = 12;
1461 break;
1462 case rnat_when:
1463 r = 13;
1464 break;
1465 case rnat_psprel:
1466 r = 14;
1467 break;
1468 case rnat_sprel:
1469 r = 15;
1470 break;
1471 case priunat_when_gr:
1472 r = 16;
1473 break;
1474 case priunat_psprel:
1475 r = 17;
1476 break;
1477 case priunat_sprel:
1478 r = 18;
1479 break;
1480 case priunat_when_mem:
1481 r = 19;
1482 break;
1483 default:
1484 break;
1485 }
1486 bytes[1] = r;
1487 count += output_leb128 (bytes + 2, t, 0);
1488 (*f) (count, bytes, NULL);
1489 }
1490
1491 static void
1492 output_P9_format (f, grmask, gr)
1493 vbyte_func f;
1494 int grmask;
1495 int gr;
1496 {
1497 char bytes[3];
1498 bytes[0] = UNW_P9;
1499 bytes[1] = (grmask & 0x0f);
1500 bytes[2] = (gr & 0x7f);
1501 (*f) (3, bytes, NULL);
1502 }
1503
1504 static void
1505 output_P10_format (f, abi, context)
1506 vbyte_func f;
1507 int abi;
1508 int context;
1509 {
1510 char bytes[3];
1511 bytes[0] = UNW_P10;
1512 bytes[1] = (abi & 0xff);
1513 bytes[2] = (context & 0xff);
1514 (*f) (3, bytes, NULL);
1515 }
1516
1517 static void
1518 output_B1_format (f, rtype, label)
1519 vbyte_func f;
1520 unw_record_type rtype;
1521 unsigned long label;
1522 {
1523 char byte;
1524 int r = 0;
1525 if (label > 0x1f)
1526 {
1527 output_B4_format (f, rtype, label);
1528 return;
1529 }
1530 if (rtype == copy_state)
1531 r = 1;
1532 else if (rtype != label_state)
1533 as_bad ("Invalid record type for format B1");
1534
1535 byte = (UNW_B1 | (r << 5) | (label & 0x1f));
1536 (*f) (1, &byte, NULL);
1537 }
1538
1539 static void
1540 output_B2_format (f, ecount, t)
1541 vbyte_func f;
1542 unsigned long ecount;
1543 unsigned long t;
1544 {
1545 char bytes[20];
1546 int count = 1;
1547 if (ecount > 0x1f)
1548 {
1549 output_B3_format (f, ecount, t);
1550 return;
1551 }
1552 bytes[0] = (UNW_B2 | (ecount & 0x1f));
1553 count += output_leb128 (bytes + 1, t, 0);
1554 (*f) (count, bytes, NULL);
1555 }
1556
1557 static void
1558 output_B3_format (f, ecount, t)
1559 vbyte_func f;
1560 unsigned long ecount;
1561 unsigned long t;
1562 {
1563 char bytes[20];
1564 int count = 1;
1565 if (ecount <= 0x1f)
1566 {
1567 output_B2_format (f, ecount, t);
1568 return;
1569 }
1570 bytes[0] = UNW_B3;
1571 count += output_leb128 (bytes + 1, t, 0);
1572 count += output_leb128 (bytes + count, ecount, 0);
1573 (*f) (count, bytes, NULL);
1574 }
1575
1576 static void
1577 output_B4_format (f, rtype, label)
1578 vbyte_func f;
1579 unw_record_type rtype;
1580 unsigned long label;
1581 {
1582 char bytes[20];
1583 int r = 0;
1584 int count = 1;
1585 if (label <= 0x1f)
1586 {
1587 output_B1_format (f, rtype, label);
1588 return;
1589 }
1590
1591 if (rtype == copy_state)
1592 r = 1;
1593 else if (rtype != label_state)
1594 as_bad ("Invalid record type for format B1");
1595
1596 bytes[0] = (UNW_B4 | (r << 3));
1597 count += output_leb128 (bytes + 1, label, 0);
1598 (*f) (count, bytes, NULL);
1599 }
1600
1601 static char
1602 format_ab_reg (ab, reg)
1603 int ab;
1604 int reg;
1605 {
1606 int ret;
1607 ab = (ab & 3);
1608 reg = (reg & 0x1f);
1609 ret = (ab << 5) | reg;
1610 return ret;
1611 }
1612
1613 static void
1614 output_X1_format (f, rtype, ab, reg, t, w1)
1615 vbyte_func f;
1616 unw_record_type rtype;
1617 int ab, reg;
1618 unsigned long t;
1619 unsigned long w1;
1620 {
1621 char bytes[20];
1622 int r = 0;
1623 int count = 2;
1624 bytes[0] = UNW_X1;
1625
1626 if (rtype == spill_sprel)
1627 r = 1;
1628 else if (rtype != spill_psprel)
1629 as_bad ("Invalid record type for format X1");
1630 bytes[1] = ((r << 7) | format_ab_reg (ab, reg));
1631 count += output_leb128 (bytes + 2, t, 0);
1632 count += output_leb128 (bytes + count, w1, 0);
1633 (*f) (count, bytes, NULL);
1634 }
1635
1636 static void
1637 output_X2_format (f, ab, reg, x, y, treg, t)
1638 vbyte_func f;
1639 int ab, reg;
1640 int x, y, treg;
1641 unsigned long t;
1642 {
1643 char bytes[20];
1644 int count = 3;
1645 bytes[0] = UNW_X2;
1646 bytes[1] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1647 bytes[2] = (((y & 1) << 7) | (treg & 0x7f));
1648 count += output_leb128 (bytes + 3, t, 0);
1649 (*f) (count, bytes, NULL);
1650 }
1651
1652 static void
1653 output_X3_format (f, rtype, qp, ab, reg, t, w1)
1654 vbyte_func f;
1655 unw_record_type rtype;
1656 int qp;
1657 int ab, reg;
1658 unsigned long t;
1659 unsigned long w1;
1660 {
1661 char bytes[20];
1662 int r = 0;
1663 int count = 3;
1664 bytes[0] = UNW_X3;
1665
1666 if (rtype == spill_sprel_p)
1667 r = 1;
1668 else if (rtype != spill_psprel_p)
1669 as_bad ("Invalid record type for format X3");
1670 bytes[1] = ((r << 7) | (qp & 0x3f));
1671 bytes[2] = format_ab_reg (ab, reg);
1672 count += output_leb128 (bytes + 3, t, 0);
1673 count += output_leb128 (bytes + count, w1, 0);
1674 (*f) (count, bytes, NULL);
1675 }
1676
1677 static void
1678 output_X4_format (f, qp, ab, reg, x, y, treg, t)
1679 vbyte_func f;
1680 int qp;
1681 int ab, reg;
1682 int x, y, treg;
1683 unsigned long t;
1684 {
1685 char bytes[20];
1686 int count = 4;
1687 bytes[0] = UNW_X4;
1688 bytes[1] = (qp & 0x3f);
1689 bytes[2] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1690 bytes[3] = (((y & 1) << 7) | (treg & 0x7f));
1691 count += output_leb128 (bytes + 4, t, 0);
1692 (*f) (count, bytes, NULL);
1693 }
1694
1695 /* This function allocates a record list structure, and initializes fields. */
1696
1697 static unw_rec_list *
1698 alloc_record (unw_record_type t)
1699 {
1700 unw_rec_list *ptr;
1701 ptr = xmalloc (sizeof (*ptr));
1702 ptr->next = NULL;
1703 ptr->slot_number = SLOT_NUM_NOT_SET;
1704 ptr->r.type = t;
1705 return ptr;
1706 }
1707
1708 /* This function frees an entire list of record structures. */
1709
1710 void
1711 free_list_records (unw_rec_list *first)
1712 {
1713 unw_rec_list *ptr;
1714 for (ptr = first; ptr != NULL;)
1715 {
1716 unw_rec_list *tmp = ptr;
1717
1718 if ((tmp->r.type == prologue || tmp->r.type == prologue_gr)
1719 && tmp->r.record.r.mask.i)
1720 free (tmp->r.record.r.mask.i);
1721
1722 ptr = ptr->next;
1723 free (tmp);
1724 }
1725 }
1726
1727 static unw_rec_list *
1728 output_prologue ()
1729 {
1730 unw_rec_list *ptr = alloc_record (prologue);
1731 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1732 return ptr;
1733 }
1734
1735 static unw_rec_list *
1736 output_prologue_gr (saved_mask, reg)
1737 unsigned int saved_mask;
1738 unsigned int reg;
1739 {
1740 unw_rec_list *ptr = alloc_record (prologue_gr);
1741 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1742 ptr->r.record.r.grmask = saved_mask;
1743 ptr->r.record.r.grsave = reg;
1744 return ptr;
1745 }
1746
1747 static unw_rec_list *
1748 output_body ()
1749 {
1750 unw_rec_list *ptr = alloc_record (body);
1751 return ptr;
1752 }
1753
1754 static unw_rec_list *
1755 output_mem_stack_f (size)
1756 unsigned int size;
1757 {
1758 unw_rec_list *ptr = alloc_record (mem_stack_f);
1759 ptr->r.record.p.size = size;
1760 return ptr;
1761 }
1762
1763 static unw_rec_list *
1764 output_mem_stack_v ()
1765 {
1766 unw_rec_list *ptr = alloc_record (mem_stack_v);
1767 return ptr;
1768 }
1769
1770 static unw_rec_list *
1771 output_psp_gr (gr)
1772 unsigned int gr;
1773 {
1774 unw_rec_list *ptr = alloc_record (psp_gr);
1775 ptr->r.record.p.gr = gr;
1776 return ptr;
1777 }
1778
1779 static unw_rec_list *
1780 output_psp_sprel (offset)
1781 unsigned int offset;
1782 {
1783 unw_rec_list *ptr = alloc_record (psp_sprel);
1784 ptr->r.record.p.spoff = offset / 4;
1785 return ptr;
1786 }
1787
1788 static unw_rec_list *
1789 output_rp_when ()
1790 {
1791 unw_rec_list *ptr = alloc_record (rp_when);
1792 return ptr;
1793 }
1794
1795 static unw_rec_list *
1796 output_rp_gr (gr)
1797 unsigned int gr;
1798 {
1799 unw_rec_list *ptr = alloc_record (rp_gr);
1800 ptr->r.record.p.gr = gr;
1801 return ptr;
1802 }
1803
1804 static unw_rec_list *
1805 output_rp_br (br)
1806 unsigned int br;
1807 {
1808 unw_rec_list *ptr = alloc_record (rp_br);
1809 ptr->r.record.p.br = br;
1810 return ptr;
1811 }
1812
1813 static unw_rec_list *
1814 output_rp_psprel (offset)
1815 unsigned int offset;
1816 {
1817 unw_rec_list *ptr = alloc_record (rp_psprel);
1818 ptr->r.record.p.pspoff = offset / 4;
1819 return ptr;
1820 }
1821
1822 static unw_rec_list *
1823 output_rp_sprel (offset)
1824 unsigned int offset;
1825 {
1826 unw_rec_list *ptr = alloc_record (rp_sprel);
1827 ptr->r.record.p.spoff = offset / 4;
1828 return ptr;
1829 }
1830
1831 static unw_rec_list *
1832 output_pfs_when ()
1833 {
1834 unw_rec_list *ptr = alloc_record (pfs_when);
1835 return ptr;
1836 }
1837
1838 static unw_rec_list *
1839 output_pfs_gr (gr)
1840 unsigned int gr;
1841 {
1842 unw_rec_list *ptr = alloc_record (pfs_gr);
1843 ptr->r.record.p.gr = gr;
1844 return ptr;
1845 }
1846
1847 static unw_rec_list *
1848 output_pfs_psprel (offset)
1849 unsigned int offset;
1850 {
1851 unw_rec_list *ptr = alloc_record (pfs_psprel);
1852 ptr->r.record.p.pspoff = offset / 4;
1853 return ptr;
1854 }
1855
1856 static unw_rec_list *
1857 output_pfs_sprel (offset)
1858 unsigned int offset;
1859 {
1860 unw_rec_list *ptr = alloc_record (pfs_sprel);
1861 ptr->r.record.p.spoff = offset / 4;
1862 return ptr;
1863 }
1864
1865 static unw_rec_list *
1866 output_preds_when ()
1867 {
1868 unw_rec_list *ptr = alloc_record (preds_when);
1869 return ptr;
1870 }
1871
1872 static unw_rec_list *
1873 output_preds_gr (gr)
1874 unsigned int gr;
1875 {
1876 unw_rec_list *ptr = alloc_record (preds_gr);
1877 ptr->r.record.p.gr = gr;
1878 return ptr;
1879 }
1880
1881 static unw_rec_list *
1882 output_preds_psprel (offset)
1883 unsigned int offset;
1884 {
1885 unw_rec_list *ptr = alloc_record (preds_psprel);
1886 ptr->r.record.p.pspoff = offset / 4;
1887 return ptr;
1888 }
1889
1890 static unw_rec_list *
1891 output_preds_sprel (offset)
1892 unsigned int offset;
1893 {
1894 unw_rec_list *ptr = alloc_record (preds_sprel);
1895 ptr->r.record.p.spoff = offset / 4;
1896 return ptr;
1897 }
1898
1899 static unw_rec_list *
1900 output_fr_mem (mask)
1901 unsigned int mask;
1902 {
1903 unw_rec_list *ptr = alloc_record (fr_mem);
1904 ptr->r.record.p.rmask = mask;
1905 return ptr;
1906 }
1907
1908 static unw_rec_list *
1909 output_frgr_mem (gr_mask, fr_mask)
1910 unsigned int gr_mask;
1911 unsigned int fr_mask;
1912 {
1913 unw_rec_list *ptr = alloc_record (frgr_mem);
1914 ptr->r.record.p.grmask = gr_mask;
1915 ptr->r.record.p.frmask = fr_mask;
1916 return ptr;
1917 }
1918
1919 static unw_rec_list *
1920 output_gr_gr (mask, reg)
1921 unsigned int mask;
1922 unsigned int reg;
1923 {
1924 unw_rec_list *ptr = alloc_record (gr_gr);
1925 ptr->r.record.p.grmask = mask;
1926 ptr->r.record.p.gr = reg;
1927 return ptr;
1928 }
1929
1930 static unw_rec_list *
1931 output_gr_mem (mask)
1932 unsigned int mask;
1933 {
1934 unw_rec_list *ptr = alloc_record (gr_mem);
1935 ptr->r.record.p.rmask = mask;
1936 return ptr;
1937 }
1938
1939 static unw_rec_list *
1940 output_br_mem (unsigned int mask)
1941 {
1942 unw_rec_list *ptr = alloc_record (br_mem);
1943 ptr->r.record.p.brmask = mask;
1944 return ptr;
1945 }
1946
1947 static unw_rec_list *
1948 output_br_gr (save_mask, reg)
1949 unsigned int save_mask;
1950 unsigned int reg;
1951 {
1952 unw_rec_list *ptr = alloc_record (br_gr);
1953 ptr->r.record.p.brmask = save_mask;
1954 ptr->r.record.p.gr = reg;
1955 return ptr;
1956 }
1957
1958 static unw_rec_list *
1959 output_spill_base (offset)
1960 unsigned int offset;
1961 {
1962 unw_rec_list *ptr = alloc_record (spill_base);
1963 ptr->r.record.p.pspoff = offset / 4;
1964 return ptr;
1965 }
1966
1967 static unw_rec_list *
1968 output_unat_when ()
1969 {
1970 unw_rec_list *ptr = alloc_record (unat_when);
1971 return ptr;
1972 }
1973
1974 static unw_rec_list *
1975 output_unat_gr (gr)
1976 unsigned int gr;
1977 {
1978 unw_rec_list *ptr = alloc_record (unat_gr);
1979 ptr->r.record.p.gr = gr;
1980 return ptr;
1981 }
1982
1983 static unw_rec_list *
1984 output_unat_psprel (offset)
1985 unsigned int offset;
1986 {
1987 unw_rec_list *ptr = alloc_record (unat_psprel);
1988 ptr->r.record.p.pspoff = offset / 4;
1989 return ptr;
1990 }
1991
1992 static unw_rec_list *
1993 output_unat_sprel (offset)
1994 unsigned int offset;
1995 {
1996 unw_rec_list *ptr = alloc_record (unat_sprel);
1997 ptr->r.record.p.spoff = offset / 4;
1998 return ptr;
1999 }
2000
2001 static unw_rec_list *
2002 output_lc_when ()
2003 {
2004 unw_rec_list *ptr = alloc_record (lc_when);
2005 return ptr;
2006 }
2007
2008 static unw_rec_list *
2009 output_lc_gr (gr)
2010 unsigned int gr;
2011 {
2012 unw_rec_list *ptr = alloc_record (lc_gr);
2013 ptr->r.record.p.gr = gr;
2014 return ptr;
2015 }
2016
2017 static unw_rec_list *
2018 output_lc_psprel (offset)
2019 unsigned int offset;
2020 {
2021 unw_rec_list *ptr = alloc_record (lc_psprel);
2022 ptr->r.record.p.pspoff = offset / 4;
2023 return ptr;
2024 }
2025
2026 static unw_rec_list *
2027 output_lc_sprel (offset)
2028 unsigned int offset;
2029 {
2030 unw_rec_list *ptr = alloc_record (lc_sprel);
2031 ptr->r.record.p.spoff = offset / 4;
2032 return ptr;
2033 }
2034
2035 static unw_rec_list *
2036 output_fpsr_when ()
2037 {
2038 unw_rec_list *ptr = alloc_record (fpsr_when);
2039 return ptr;
2040 }
2041
2042 static unw_rec_list *
2043 output_fpsr_gr (gr)
2044 unsigned int gr;
2045 {
2046 unw_rec_list *ptr = alloc_record (fpsr_gr);
2047 ptr->r.record.p.gr = gr;
2048 return ptr;
2049 }
2050
2051 static unw_rec_list *
2052 output_fpsr_psprel (offset)
2053 unsigned int offset;
2054 {
2055 unw_rec_list *ptr = alloc_record (fpsr_psprel);
2056 ptr->r.record.p.pspoff = offset / 4;
2057 return ptr;
2058 }
2059
2060 static unw_rec_list *
2061 output_fpsr_sprel (offset)
2062 unsigned int offset;
2063 {
2064 unw_rec_list *ptr = alloc_record (fpsr_sprel);
2065 ptr->r.record.p.spoff = offset / 4;
2066 return ptr;
2067 }
2068
2069 static unw_rec_list *
2070 output_priunat_when_gr ()
2071 {
2072 unw_rec_list *ptr = alloc_record (priunat_when_gr);
2073 return ptr;
2074 }
2075
2076 static unw_rec_list *
2077 output_priunat_when_mem ()
2078 {
2079 unw_rec_list *ptr = alloc_record (priunat_when_mem);
2080 return ptr;
2081 }
2082
2083 static unw_rec_list *
2084 output_priunat_gr (gr)
2085 unsigned int gr;
2086 {
2087 unw_rec_list *ptr = alloc_record (priunat_gr);
2088 ptr->r.record.p.gr = gr;
2089 return ptr;
2090 }
2091
2092 static unw_rec_list *
2093 output_priunat_psprel (offset)
2094 unsigned int offset;
2095 {
2096 unw_rec_list *ptr = alloc_record (priunat_psprel);
2097 ptr->r.record.p.pspoff = offset / 4;
2098 return ptr;
2099 }
2100
2101 static unw_rec_list *
2102 output_priunat_sprel (offset)
2103 unsigned int offset;
2104 {
2105 unw_rec_list *ptr = alloc_record (priunat_sprel);
2106 ptr->r.record.p.spoff = offset / 4;
2107 return ptr;
2108 }
2109
2110 static unw_rec_list *
2111 output_bsp_when ()
2112 {
2113 unw_rec_list *ptr = alloc_record (bsp_when);
2114 return ptr;
2115 }
2116
2117 static unw_rec_list *
2118 output_bsp_gr (gr)
2119 unsigned int gr;
2120 {
2121 unw_rec_list *ptr = alloc_record (bsp_gr);
2122 ptr->r.record.p.gr = gr;
2123 return ptr;
2124 }
2125
2126 static unw_rec_list *
2127 output_bsp_psprel (offset)
2128 unsigned int offset;
2129 {
2130 unw_rec_list *ptr = alloc_record (bsp_psprel);
2131 ptr->r.record.p.pspoff = offset / 4;
2132 return ptr;
2133 }
2134
2135 static unw_rec_list *
2136 output_bsp_sprel (offset)
2137 unsigned int offset;
2138 {
2139 unw_rec_list *ptr = alloc_record (bsp_sprel);
2140 ptr->r.record.p.spoff = offset / 4;
2141 return ptr;
2142 }
2143
2144 static unw_rec_list *
2145 output_bspstore_when ()
2146 {
2147 unw_rec_list *ptr = alloc_record (bspstore_when);
2148 return ptr;
2149 }
2150
2151 static unw_rec_list *
2152 output_bspstore_gr (gr)
2153 unsigned int gr;
2154 {
2155 unw_rec_list *ptr = alloc_record (bspstore_gr);
2156 ptr->r.record.p.gr = gr;
2157 return ptr;
2158 }
2159
2160 static unw_rec_list *
2161 output_bspstore_psprel (offset)
2162 unsigned int offset;
2163 {
2164 unw_rec_list *ptr = alloc_record (bspstore_psprel);
2165 ptr->r.record.p.pspoff = offset / 4;
2166 return ptr;
2167 }
2168
2169 static unw_rec_list *
2170 output_bspstore_sprel (offset)
2171 unsigned int offset;
2172 {
2173 unw_rec_list *ptr = alloc_record (bspstore_sprel);
2174 ptr->r.record.p.spoff = offset / 4;
2175 return ptr;
2176 }
2177
2178 static unw_rec_list *
2179 output_rnat_when ()
2180 {
2181 unw_rec_list *ptr = alloc_record (rnat_when);
2182 return ptr;
2183 }
2184
2185 static unw_rec_list *
2186 output_rnat_gr (gr)
2187 unsigned int gr;
2188 {
2189 unw_rec_list *ptr = alloc_record (rnat_gr);
2190 ptr->r.record.p.gr = gr;
2191 return ptr;
2192 }
2193
2194 static unw_rec_list *
2195 output_rnat_psprel (offset)
2196 unsigned int offset;
2197 {
2198 unw_rec_list *ptr = alloc_record (rnat_psprel);
2199 ptr->r.record.p.pspoff = offset / 4;
2200 return ptr;
2201 }
2202
2203 static unw_rec_list *
2204 output_rnat_sprel (offset)
2205 unsigned int offset;
2206 {
2207 unw_rec_list *ptr = alloc_record (rnat_sprel);
2208 ptr->r.record.p.spoff = offset / 4;
2209 return ptr;
2210 }
2211
2212 static unw_rec_list *
2213 output_unwabi (abi, context)
2214 unsigned long abi;
2215 unsigned long context;
2216 {
2217 unw_rec_list *ptr = alloc_record (unwabi);
2218 ptr->r.record.p.abi = abi;
2219 ptr->r.record.p.context = context;
2220 return ptr;
2221 }
2222
2223 static unw_rec_list *
2224 output_epilogue (unsigned long ecount)
2225 {
2226 unw_rec_list *ptr = alloc_record (epilogue);
2227 ptr->r.record.b.ecount = ecount;
2228 return ptr;
2229 }
2230
2231 static unw_rec_list *
2232 output_label_state (unsigned long label)
2233 {
2234 unw_rec_list *ptr = alloc_record (label_state);
2235 ptr->r.record.b.label = label;
2236 return ptr;
2237 }
2238
2239 static unw_rec_list *
2240 output_copy_state (unsigned long label)
2241 {
2242 unw_rec_list *ptr = alloc_record (copy_state);
2243 ptr->r.record.b.label = label;
2244 return ptr;
2245 }
2246
2247 static unw_rec_list *
2248 output_spill_psprel (ab, reg, offset)
2249 unsigned int ab;
2250 unsigned int reg;
2251 unsigned int offset;
2252 {
2253 unw_rec_list *ptr = alloc_record (spill_psprel);
2254 ptr->r.record.x.ab = ab;
2255 ptr->r.record.x.reg = reg;
2256 ptr->r.record.x.pspoff = offset / 4;
2257 return ptr;
2258 }
2259
2260 static unw_rec_list *
2261 output_spill_sprel (ab, reg, offset)
2262 unsigned int ab;
2263 unsigned int reg;
2264 unsigned int offset;
2265 {
2266 unw_rec_list *ptr = alloc_record (spill_sprel);
2267 ptr->r.record.x.ab = ab;
2268 ptr->r.record.x.reg = reg;
2269 ptr->r.record.x.spoff = offset / 4;
2270 return ptr;
2271 }
2272
2273 static unw_rec_list *
2274 output_spill_psprel_p (ab, reg, offset, predicate)
2275 unsigned int ab;
2276 unsigned int reg;
2277 unsigned int offset;
2278 unsigned int predicate;
2279 {
2280 unw_rec_list *ptr = alloc_record (spill_psprel_p);
2281 ptr->r.record.x.ab = ab;
2282 ptr->r.record.x.reg = reg;
2283 ptr->r.record.x.pspoff = offset / 4;
2284 ptr->r.record.x.qp = predicate;
2285 return ptr;
2286 }
2287
2288 static unw_rec_list *
2289 output_spill_sprel_p (ab, reg, offset, predicate)
2290 unsigned int ab;
2291 unsigned int reg;
2292 unsigned int offset;
2293 unsigned int predicate;
2294 {
2295 unw_rec_list *ptr = alloc_record (spill_sprel_p);
2296 ptr->r.record.x.ab = ab;
2297 ptr->r.record.x.reg = reg;
2298 ptr->r.record.x.spoff = offset / 4;
2299 ptr->r.record.x.qp = predicate;
2300 return ptr;
2301 }
2302
2303 static unw_rec_list *
2304 output_spill_reg (ab, reg, targ_reg, xy)
2305 unsigned int ab;
2306 unsigned int reg;
2307 unsigned int targ_reg;
2308 unsigned int xy;
2309 {
2310 unw_rec_list *ptr = alloc_record (spill_reg);
2311 ptr->r.record.x.ab = ab;
2312 ptr->r.record.x.reg = reg;
2313 ptr->r.record.x.treg = targ_reg;
2314 ptr->r.record.x.xy = xy;
2315 return ptr;
2316 }
2317
2318 static unw_rec_list *
2319 output_spill_reg_p (ab, reg, targ_reg, xy, predicate)
2320 unsigned int ab;
2321 unsigned int reg;
2322 unsigned int targ_reg;
2323 unsigned int xy;
2324 unsigned int predicate;
2325 {
2326 unw_rec_list *ptr = alloc_record (spill_reg_p);
2327 ptr->r.record.x.ab = ab;
2328 ptr->r.record.x.reg = reg;
2329 ptr->r.record.x.treg = targ_reg;
2330 ptr->r.record.x.xy = xy;
2331 ptr->r.record.x.qp = predicate;
2332 return ptr;
2333 }
2334
2335 /* Given a unw_rec_list process the correct format with the
2336 specified function. */
2337
2338 static void
2339 process_one_record (ptr, f)
2340 unw_rec_list *ptr;
2341 vbyte_func f;
2342 {
2343 unsigned long fr_mask, gr_mask;
2344
2345 switch (ptr->r.type)
2346 {
2347 case gr_mem:
2348 case fr_mem:
2349 case br_mem:
2350 case frgr_mem:
2351 /* These are taken care of by prologue/prologue_gr. */
2352 break;
2353
2354 case prologue_gr:
2355 case prologue:
2356 if (ptr->r.type == prologue_gr)
2357 output_R2_format (f, ptr->r.record.r.grmask,
2358 ptr->r.record.r.grsave, ptr->r.record.r.rlen);
2359 else
2360 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2361
2362 /* Output descriptor(s) for union of register spills (if any). */
2363 gr_mask = ptr->r.record.r.mask.gr_mem;
2364 fr_mask = ptr->r.record.r.mask.fr_mem;
2365 if (fr_mask)
2366 {
2367 if ((fr_mask & ~0xfUL) == 0)
2368 output_P6_format (f, fr_mem, fr_mask);
2369 else
2370 {
2371 output_P5_format (f, gr_mask, fr_mask);
2372 gr_mask = 0;
2373 }
2374 }
2375 if (gr_mask)
2376 output_P6_format (f, gr_mem, gr_mask);
2377 if (ptr->r.record.r.mask.br_mem)
2378 output_P1_format (f, ptr->r.record.r.mask.br_mem);
2379
2380 /* output imask descriptor if necessary: */
2381 if (ptr->r.record.r.mask.i)
2382 output_P4_format (f, ptr->r.record.r.mask.i,
2383 ptr->r.record.r.imask_size);
2384 break;
2385
2386 case body:
2387 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2388 break;
2389 case mem_stack_f:
2390 case mem_stack_v:
2391 output_P7_format (f, ptr->r.type, ptr->r.record.p.t,
2392 ptr->r.record.p.size);
2393 break;
2394 case psp_gr:
2395 case rp_gr:
2396 case pfs_gr:
2397 case preds_gr:
2398 case unat_gr:
2399 case lc_gr:
2400 case fpsr_gr:
2401 case priunat_gr:
2402 case bsp_gr:
2403 case bspstore_gr:
2404 case rnat_gr:
2405 output_P3_format (f, ptr->r.type, ptr->r.record.p.gr);
2406 break;
2407 case rp_br:
2408 output_P3_format (f, rp_br, ptr->r.record.p.br);
2409 break;
2410 case psp_sprel:
2411 output_P7_format (f, psp_sprel, ptr->r.record.p.spoff, 0);
2412 break;
2413 case rp_when:
2414 case pfs_when:
2415 case preds_when:
2416 case unat_when:
2417 case lc_when:
2418 case fpsr_when:
2419 output_P7_format (f, ptr->r.type, ptr->r.record.p.t, 0);
2420 break;
2421 case rp_psprel:
2422 case pfs_psprel:
2423 case preds_psprel:
2424 case unat_psprel:
2425 case lc_psprel:
2426 case fpsr_psprel:
2427 case spill_base:
2428 output_P7_format (f, ptr->r.type, ptr->r.record.p.pspoff, 0);
2429 break;
2430 case rp_sprel:
2431 case pfs_sprel:
2432 case preds_sprel:
2433 case unat_sprel:
2434 case lc_sprel:
2435 case fpsr_sprel:
2436 case priunat_sprel:
2437 case bsp_sprel:
2438 case bspstore_sprel:
2439 case rnat_sprel:
2440 output_P8_format (f, ptr->r.type, ptr->r.record.p.spoff);
2441 break;
2442 case gr_gr:
2443 output_P9_format (f, ptr->r.record.p.grmask, ptr->r.record.p.gr);
2444 break;
2445 case br_gr:
2446 output_P2_format (f, ptr->r.record.p.brmask, ptr->r.record.p.gr);
2447 break;
2448 case spill_mask:
2449 as_bad ("spill_mask record unimplemented.");
2450 break;
2451 case priunat_when_gr:
2452 case priunat_when_mem:
2453 case bsp_when:
2454 case bspstore_when:
2455 case rnat_when:
2456 output_P8_format (f, ptr->r.type, ptr->r.record.p.t);
2457 break;
2458 case priunat_psprel:
2459 case bsp_psprel:
2460 case bspstore_psprel:
2461 case rnat_psprel:
2462 output_P8_format (f, ptr->r.type, ptr->r.record.p.pspoff);
2463 break;
2464 case unwabi:
2465 output_P10_format (f, ptr->r.record.p.abi, ptr->r.record.p.context);
2466 break;
2467 case epilogue:
2468 output_B3_format (f, ptr->r.record.b.ecount, ptr->r.record.b.t);
2469 break;
2470 case label_state:
2471 case copy_state:
2472 output_B4_format (f, ptr->r.type, ptr->r.record.b.label);
2473 break;
2474 case spill_psprel:
2475 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2476 ptr->r.record.x.reg, ptr->r.record.x.t,
2477 ptr->r.record.x.pspoff);
2478 break;
2479 case spill_sprel:
2480 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2481 ptr->r.record.x.reg, ptr->r.record.x.t,
2482 ptr->r.record.x.spoff);
2483 break;
2484 case spill_reg:
2485 output_X2_format (f, ptr->r.record.x.ab, ptr->r.record.x.reg,
2486 ptr->r.record.x.xy >> 1, ptr->r.record.x.xy,
2487 ptr->r.record.x.treg, ptr->r.record.x.t);
2488 break;
2489 case spill_psprel_p:
2490 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2491 ptr->r.record.x.ab, ptr->r.record.x.reg,
2492 ptr->r.record.x.t, ptr->r.record.x.pspoff);
2493 break;
2494 case spill_sprel_p:
2495 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2496 ptr->r.record.x.ab, ptr->r.record.x.reg,
2497 ptr->r.record.x.t, ptr->r.record.x.spoff);
2498 break;
2499 case spill_reg_p:
2500 output_X4_format (f, ptr->r.record.x.qp, ptr->r.record.x.ab,
2501 ptr->r.record.x.reg, ptr->r.record.x.xy >> 1,
2502 ptr->r.record.x.xy, ptr->r.record.x.treg,
2503 ptr->r.record.x.t);
2504 break;
2505 default:
2506 as_bad ("record_type_not_valid");
2507 break;
2508 }
2509 }
2510
2511 /* Given a unw_rec_list list, process all the records with
2512 the specified function. */
2513 static void
2514 process_unw_records (list, f)
2515 unw_rec_list *list;
2516 vbyte_func f;
2517 {
2518 unw_rec_list *ptr;
2519 for (ptr = list; ptr; ptr = ptr->next)
2520 process_one_record (ptr, f);
2521 }
2522
2523 /* Determine the size of a record list in bytes. */
2524 static int
2525 calc_record_size (list)
2526 unw_rec_list *list;
2527 {
2528 vbyte_count = 0;
2529 process_unw_records (list, count_output);
2530 return vbyte_count;
2531 }
2532
2533 /* Update IMASK bitmask to reflect the fact that one or more registers
2534 of type TYPE are saved starting at instruction with index T. If N
2535 bits are set in REGMASK, it is assumed that instructions T through
2536 T+N-1 save these registers.
2537
2538 TYPE values:
2539 0: no save
2540 1: instruction saves next fp reg
2541 2: instruction saves next general reg
2542 3: instruction saves next branch reg */
2543 static void
2544 set_imask (region, regmask, t, type)
2545 unw_rec_list *region;
2546 unsigned long regmask;
2547 unsigned long t;
2548 unsigned int type;
2549 {
2550 unsigned char *imask;
2551 unsigned long imask_size;
2552 unsigned int i;
2553 int pos;
2554
2555 imask = region->r.record.r.mask.i;
2556 imask_size = region->r.record.r.imask_size;
2557 if (!imask)
2558 {
2559 imask_size = (region->r.record.r.rlen * 2 + 7) / 8 + 1;
2560 imask = xmalloc (imask_size);
2561 memset (imask, 0, imask_size);
2562
2563 region->r.record.r.imask_size = imask_size;
2564 region->r.record.r.mask.i = imask;
2565 }
2566
2567 i = (t / 4) + 1;
2568 pos = 2 * (3 - t % 4);
2569 while (regmask)
2570 {
2571 if (i >= imask_size)
2572 {
2573 as_bad ("Ignoring attempt to spill beyond end of region");
2574 return;
2575 }
2576
2577 imask[i] |= (type & 0x3) << pos;
2578
2579 regmask &= (regmask - 1);
2580 pos -= 2;
2581 if (pos < 0)
2582 {
2583 pos = 0;
2584 ++i;
2585 }
2586 }
2587 }
2588
2589 static int
2590 count_bits (unsigned long mask)
2591 {
2592 int n = 0;
2593
2594 while (mask)
2595 {
2596 mask &= mask - 1;
2597 ++n;
2598 }
2599 return n;
2600 }
2601
2602 /* Return the number of instruction slots from FIRST_ADDR to SLOT_ADDR.
2603 SLOT_FRAG is the frag containing SLOT_ADDR, and FIRST_FRAG is the frag
2604 containing FIRST_ADDR. */
2605
2606 unsigned long
2607 slot_index (slot_addr, slot_frag, first_addr, first_frag)
2608 unsigned long slot_addr;
2609 fragS *slot_frag;
2610 unsigned long first_addr;
2611 fragS *first_frag;
2612 {
2613 unsigned long index = 0;
2614
2615 /* First time we are called, the initial address and frag are invalid. */
2616 if (first_addr == 0)
2617 return 0;
2618
2619 /* If the two addresses are in different frags, then we need to add in
2620 the remaining size of this frag, and then the entire size of intermediate
2621 frags. */
2622 while (slot_frag != first_frag)
2623 {
2624 unsigned long start_addr = (unsigned long) &first_frag->fr_literal;
2625
2626 /* Add in the full size of the frag converted to instruction slots. */
2627 index += 3 * (first_frag->fr_fix >> 4);
2628 /* Subtract away the initial part before first_addr. */
2629 index -= (3 * ((first_addr >> 4) - (start_addr >> 4))
2630 + ((first_addr & 0x3) - (start_addr & 0x3)));
2631
2632 /* Move to the beginning of the next frag. */
2633 first_frag = first_frag->fr_next;
2634 first_addr = (unsigned long) &first_frag->fr_literal;
2635 }
2636
2637 /* Add in the used part of the last frag. */
2638 index += (3 * ((slot_addr >> 4) - (first_addr >> 4))
2639 + ((slot_addr & 0x3) - (first_addr & 0x3)));
2640 return index;
2641 }
2642
2643 /* Optimize unwind record directives. */
2644
2645 static unw_rec_list *
2646 optimize_unw_records (list)
2647 unw_rec_list *list;
2648 {
2649 if (!list)
2650 return NULL;
2651
2652 /* If the only unwind record is ".prologue" or ".prologue" followed
2653 by ".body", then we can optimize the unwind directives away. */
2654 if (list->r.type == prologue
2655 && (list->next == NULL
2656 || (list->next->r.type == body && list->next->next == NULL)))
2657 return NULL;
2658
2659 return list;
2660 }
2661
2662 /* Given a complete record list, process any records which have
2663 unresolved fields, (ie length counts for a prologue). After
2664 this has been run, all neccessary information should be available
2665 within each record to generate an image. */
2666
2667 static void
2668 fixup_unw_records (list)
2669 unw_rec_list *list;
2670 {
2671 unw_rec_list *ptr, *region = 0;
2672 unsigned long first_addr = 0, rlen = 0, t;
2673 fragS *first_frag = 0;
2674
2675 for (ptr = list; ptr; ptr = ptr->next)
2676 {
2677 if (ptr->slot_number == SLOT_NUM_NOT_SET)
2678 as_bad (" Insn slot not set in unwind record.");
2679 t = slot_index (ptr->slot_number, ptr->slot_frag,
2680 first_addr, first_frag);
2681 switch (ptr->r.type)
2682 {
2683 case prologue:
2684 case prologue_gr:
2685 case body:
2686 {
2687 unw_rec_list *last;
2688 int size, dir_len = 0;
2689 unsigned long last_addr;
2690 fragS *last_frag;
2691
2692 first_addr = ptr->slot_number;
2693 first_frag = ptr->slot_frag;
2694 ptr->slot_number = 0;
2695 /* Find either the next body/prologue start, or the end of
2696 the list, and determine the size of the region. */
2697 last_addr = unwind.next_slot_number;
2698 last_frag = unwind.next_slot_frag;
2699 for (last = ptr->next; last != NULL; last = last->next)
2700 if (last->r.type == prologue || last->r.type == prologue_gr
2701 || last->r.type == body)
2702 {
2703 last_addr = last->slot_number;
2704 last_frag = last->slot_frag;
2705 break;
2706 }
2707 else if (!last->next)
2708 {
2709 /* In the absence of an explicit .body directive,
2710 the prologue ends after the last instruction
2711 covered by an unwind directive. */
2712 if (ptr->r.type != body)
2713 {
2714 last_addr = last->slot_number;
2715 last_frag = last->slot_frag;
2716 switch (last->r.type)
2717 {
2718 case frgr_mem:
2719 dir_len = (count_bits (last->r.record.p.frmask)
2720 + count_bits (last->r.record.p.grmask));
2721 break;
2722 case fr_mem:
2723 case gr_mem:
2724 dir_len += count_bits (last->r.record.p.rmask);
2725 break;
2726 case br_mem:
2727 case br_gr:
2728 dir_len += count_bits (last->r.record.p.brmask);
2729 break;
2730 case gr_gr:
2731 dir_len += count_bits (last->r.record.p.grmask);
2732 break;
2733 default:
2734 dir_len = 1;
2735 break;
2736 }
2737 }
2738 break;
2739 }
2740 size = (slot_index (last_addr, last_frag, first_addr, first_frag)
2741 + dir_len);
2742 rlen = ptr->r.record.r.rlen = size;
2743 if (ptr->r.type == body)
2744 /* End of region. */
2745 region = 0;
2746 else
2747 region = ptr;
2748 break;
2749 }
2750 case epilogue:
2751 ptr->r.record.b.t = rlen - 1 - t;
2752 break;
2753
2754 case mem_stack_f:
2755 case mem_stack_v:
2756 case rp_when:
2757 case pfs_when:
2758 case preds_when:
2759 case unat_when:
2760 case lc_when:
2761 case fpsr_when:
2762 case priunat_when_gr:
2763 case priunat_when_mem:
2764 case bsp_when:
2765 case bspstore_when:
2766 case rnat_when:
2767 ptr->r.record.p.t = t;
2768 break;
2769
2770 case spill_reg:
2771 case spill_sprel:
2772 case spill_psprel:
2773 case spill_reg_p:
2774 case spill_sprel_p:
2775 case spill_psprel_p:
2776 ptr->r.record.x.t = t;
2777 break;
2778
2779 case frgr_mem:
2780 if (!region)
2781 {
2782 as_bad ("frgr_mem record before region record!\n");
2783 return;
2784 }
2785 region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask;
2786 region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask;
2787 set_imask (region, ptr->r.record.p.frmask, t, 1);
2788 set_imask (region, ptr->r.record.p.grmask, t, 2);
2789 break;
2790 case fr_mem:
2791 if (!region)
2792 {
2793 as_bad ("fr_mem record before region record!\n");
2794 return;
2795 }
2796 region->r.record.r.mask.fr_mem |= ptr->r.record.p.rmask;
2797 set_imask (region, ptr->r.record.p.rmask, t, 1);
2798 break;
2799 case gr_mem:
2800 if (!region)
2801 {
2802 as_bad ("gr_mem record before region record!\n");
2803 return;
2804 }
2805 region->r.record.r.mask.gr_mem |= ptr->r.record.p.rmask;
2806 set_imask (region, ptr->r.record.p.rmask, t, 2);
2807 break;
2808 case br_mem:
2809 if (!region)
2810 {
2811 as_bad ("br_mem record before region record!\n");
2812 return;
2813 }
2814 region->r.record.r.mask.br_mem |= ptr->r.record.p.brmask;
2815 set_imask (region, ptr->r.record.p.brmask, t, 3);
2816 break;
2817
2818 case gr_gr:
2819 if (!region)
2820 {
2821 as_bad ("gr_gr record before region record!\n");
2822 return;
2823 }
2824 set_imask (region, ptr->r.record.p.grmask, t, 2);
2825 break;
2826 case br_gr:
2827 if (!region)
2828 {
2829 as_bad ("br_gr record before region record!\n");
2830 return;
2831 }
2832 set_imask (region, ptr->r.record.p.brmask, t, 3);
2833 break;
2834
2835 default:
2836 break;
2837 }
2838 }
2839 }
2840
2841 /* Helper routine for output_unw_records. Emits the header for the unwind
2842 info. */
2843
2844 static int
2845 setup_unwind_header (int size, unsigned char **mem)
2846 {
2847 int x, extra = 0;
2848 valueT flag_value;
2849
2850 /* pad to pointer-size boundry. */
2851 x = size % md.pointer_size;
2852 if (x != 0)
2853 extra = md.pointer_size - x;
2854
2855 /* Add 8 for the header + a pointer for the
2856 personality offset. */
2857 *mem = xmalloc (size + extra + 8 + md.pointer_size);
2858
2859 /* Clear the padding area and personality. */
2860 memset (*mem + 8 + size, 0, extra + md.pointer_size);
2861
2862 /* Initialize the header area. */
2863 if (unwind.personality_routine)
2864 {
2865 if (md.flags & EF_IA_64_ABI64)
2866 flag_value = (bfd_vma) 3 << 32;
2867 else
2868 /* 32-bit unwind info block. */
2869 flag_value = (bfd_vma) 0x1003 << 32;
2870 }
2871 else
2872 flag_value = 0;
2873
2874 md_number_to_chars (*mem, (((bfd_vma) 1 << 48) /* Version. */
2875 | flag_value /* U & E handler flags. */
2876 | ((size + extra) / md.pointer_size)), /* Length. */
2877 8);
2878
2879 return extra;
2880 }
2881
2882 /* Generate an unwind image from a record list. Returns the number of
2883 bytes in the resulting image. The memory image itselof is returned
2884 in the 'ptr' parameter. */
2885 static int
2886 output_unw_records (list, ptr)
2887 unw_rec_list *list;
2888 void **ptr;
2889 {
2890 int size, extra;
2891 unsigned char *mem;
2892
2893 *ptr = NULL;
2894
2895 list = optimize_unw_records (list);
2896 fixup_unw_records (list);
2897 size = calc_record_size (list);
2898
2899 if (size > 0 || unwind.force_unwind_entry)
2900 {
2901 unwind.force_unwind_entry = 0;
2902 extra = setup_unwind_header (size, &mem);
2903
2904 vbyte_mem_ptr = mem + 8;
2905 process_unw_records (list, output_vbyte_mem);
2906
2907 *ptr = mem;
2908
2909 size += extra + 8 + md.pointer_size;
2910 }
2911 return size;
2912 }
2913
2914 static int
2915 convert_expr_to_ab_reg (e, ab, regp)
2916 expressionS *e;
2917 unsigned int *ab;
2918 unsigned int *regp;
2919 {
2920 unsigned int reg;
2921
2922 if (e->X_op != O_register)
2923 return 0;
2924
2925 reg = e->X_add_number;
2926 if (reg >= (REG_GR + 4) && reg <= (REG_GR + 7))
2927 {
2928 *ab = 0;
2929 *regp = reg - REG_GR;
2930 }
2931 else if ((reg >= (REG_FR + 2) && reg <= (REG_FR + 5))
2932 || (reg >= (REG_FR + 16) && reg <= (REG_FR + 31)))
2933 {
2934 *ab = 1;
2935 *regp = reg - REG_FR;
2936 }
2937 else if (reg >= (REG_BR + 1) && reg <= (REG_BR + 5))
2938 {
2939 *ab = 2;
2940 *regp = reg - REG_BR;
2941 }
2942 else
2943 {
2944 *ab = 3;
2945 switch (reg)
2946 {
2947 case REG_PR: *regp = 0; break;
2948 case REG_PSP: *regp = 1; break;
2949 case REG_PRIUNAT: *regp = 2; break;
2950 case REG_BR + 0: *regp = 3; break;
2951 case REG_AR + AR_BSP: *regp = 4; break;
2952 case REG_AR + AR_BSPSTORE: *regp = 5; break;
2953 case REG_AR + AR_RNAT: *regp = 6; break;
2954 case REG_AR + AR_UNAT: *regp = 7; break;
2955 case REG_AR + AR_FPSR: *regp = 8; break;
2956 case REG_AR + AR_PFS: *regp = 9; break;
2957 case REG_AR + AR_LC: *regp = 10; break;
2958
2959 default:
2960 return 0;
2961 }
2962 }
2963 return 1;
2964 }
2965
2966 static int
2967 convert_expr_to_xy_reg (e, xy, regp)
2968 expressionS *e;
2969 unsigned int *xy;
2970 unsigned int *regp;
2971 {
2972 unsigned int reg;
2973
2974 if (e->X_op != O_register)
2975 return 0;
2976
2977 reg = e->X_add_number;
2978
2979 if (/* reg >= REG_GR && */ reg <= (REG_GR + 127))
2980 {
2981 *xy = 0;
2982 *regp = reg - REG_GR;
2983 }
2984 else if (reg >= REG_FR && reg <= (REG_FR + 127))
2985 {
2986 *xy = 1;
2987 *regp = reg - REG_FR;
2988 }
2989 else if (reg >= REG_BR && reg <= (REG_BR + 7))
2990 {
2991 *xy = 2;
2992 *regp = reg - REG_BR;
2993 }
2994 else
2995 return -1;
2996 return 1;
2997 }
2998
2999 static void
3000 dot_radix (dummy)
3001 int dummy ATTRIBUTE_UNUSED;
3002 {
3003 int radix;
3004
3005 SKIP_WHITESPACE ();
3006 radix = *input_line_pointer++;
3007
3008 if (radix != 'C' && !is_end_of_line[(unsigned char) radix])
3009 {
3010 as_bad ("Radix `%c' unsupported", *input_line_pointer);
3011 ignore_rest_of_line ();
3012 return;
3013 }
3014 }
3015
3016 /* .sbss, .bss etc. are macros that expand into ".section SECNAME". */
3017 static void
3018 dot_special_section (which)
3019 int which;
3020 {
3021 set_section ((char *) special_section_name[which]);
3022 }
3023
3024 static void
3025 add_unwind_entry (ptr)
3026 unw_rec_list *ptr;
3027 {
3028 if (unwind.tail)
3029 unwind.tail->next = ptr;
3030 else
3031 unwind.list = ptr;
3032 unwind.tail = ptr;
3033
3034 /* The current entry can in fact be a chain of unwind entries. */
3035 if (unwind.current_entry == NULL)
3036 unwind.current_entry = ptr;
3037 }
3038
3039 static void
3040 dot_fframe (dummy)
3041 int dummy ATTRIBUTE_UNUSED;
3042 {
3043 expressionS e;
3044
3045 parse_operand (&e);
3046
3047 if (e.X_op != O_constant)
3048 as_bad ("Operand to .fframe must be a constant");
3049 else
3050 add_unwind_entry (output_mem_stack_f (e.X_add_number));
3051 }
3052
3053 static void
3054 dot_vframe (dummy)
3055 int dummy ATTRIBUTE_UNUSED;
3056 {
3057 expressionS e;
3058 unsigned reg;
3059
3060 parse_operand (&e);
3061 reg = e.X_add_number - REG_GR;
3062 if (e.X_op == O_register && reg < 128)
3063 {
3064 add_unwind_entry (output_mem_stack_v ());
3065 if (! (unwind.prologue_mask & 2))
3066 add_unwind_entry (output_psp_gr (reg));
3067 }
3068 else
3069 as_bad ("First operand to .vframe must be a general register");
3070 }
3071
3072 static void
3073 dot_vframesp (dummy)
3074 int dummy ATTRIBUTE_UNUSED;
3075 {
3076 expressionS e;
3077
3078 parse_operand (&e);
3079 if (e.X_op == O_constant)
3080 {
3081 add_unwind_entry (output_mem_stack_v ());
3082 add_unwind_entry (output_psp_sprel (e.X_add_number));
3083 }
3084 else
3085 as_bad ("Operand to .vframesp must be a constant (sp-relative offset)");
3086 }
3087
3088 static void
3089 dot_vframepsp (dummy)
3090 int dummy ATTRIBUTE_UNUSED;
3091 {
3092 expressionS e;
3093
3094 parse_operand (&e);
3095 if (e.X_op == O_constant)
3096 {
3097 add_unwind_entry (output_mem_stack_v ());
3098 add_unwind_entry (output_psp_sprel (e.X_add_number));
3099 }
3100 else
3101 as_bad ("Operand to .vframepsp must be a constant (psp-relative offset)");
3102 }
3103
3104 static void
3105 dot_save (dummy)
3106 int dummy ATTRIBUTE_UNUSED;
3107 {
3108 expressionS e1, e2;
3109 int sep;
3110 int reg1, reg2;
3111
3112 sep = parse_operand (&e1);
3113 if (sep != ',')
3114 as_bad ("No second operand to .save");
3115 sep = parse_operand (&e2);
3116
3117 reg1 = e1.X_add_number;
3118 reg2 = e2.X_add_number - REG_GR;
3119
3120 /* Make sure its a valid ar.xxx reg, OR its br0, aka 'rp'. */
3121 if (e1.X_op == O_register)
3122 {
3123 if (e2.X_op == O_register && reg2 >= 0 && reg2 < 128)
3124 {
3125 switch (reg1)
3126 {
3127 case REG_AR + AR_BSP:
3128 add_unwind_entry (output_bsp_when ());
3129 add_unwind_entry (output_bsp_gr (reg2));
3130 break;
3131 case REG_AR + AR_BSPSTORE:
3132 add_unwind_entry (output_bspstore_when ());
3133 add_unwind_entry (output_bspstore_gr (reg2));
3134 break;
3135 case REG_AR + AR_RNAT:
3136 add_unwind_entry (output_rnat_when ());
3137 add_unwind_entry (output_rnat_gr (reg2));
3138 break;
3139 case REG_AR + AR_UNAT:
3140 add_unwind_entry (output_unat_when ());
3141 add_unwind_entry (output_unat_gr (reg2));
3142 break;
3143 case REG_AR + AR_FPSR:
3144 add_unwind_entry (output_fpsr_when ());
3145 add_unwind_entry (output_fpsr_gr (reg2));
3146 break;
3147 case REG_AR + AR_PFS:
3148 add_unwind_entry (output_pfs_when ());
3149 if (! (unwind.prologue_mask & 4))
3150 add_unwind_entry (output_pfs_gr (reg2));
3151 break;
3152 case REG_AR + AR_LC:
3153 add_unwind_entry (output_lc_when ());
3154 add_unwind_entry (output_lc_gr (reg2));
3155 break;
3156 case REG_BR:
3157 add_unwind_entry (output_rp_when ());
3158 if (! (unwind.prologue_mask & 8))
3159 add_unwind_entry (output_rp_gr (reg2));
3160 break;
3161 case REG_PR:
3162 add_unwind_entry (output_preds_when ());
3163 if (! (unwind.prologue_mask & 1))
3164 add_unwind_entry (output_preds_gr (reg2));
3165 break;
3166 case REG_PRIUNAT:
3167 add_unwind_entry (output_priunat_when_gr ());
3168 add_unwind_entry (output_priunat_gr (reg2));
3169 break;
3170 default:
3171 as_bad ("First operand not a valid register");
3172 }
3173 }
3174 else
3175 as_bad (" Second operand not a valid register");
3176 }
3177 else
3178 as_bad ("First operand not a register");
3179 }
3180
3181 static void
3182 dot_restore (dummy)
3183 int dummy ATTRIBUTE_UNUSED;
3184 {
3185 expressionS e1, e2;
3186 unsigned long ecount; /* # of _additional_ regions to pop */
3187 int sep;
3188
3189 sep = parse_operand (&e1);
3190 if (e1.X_op != O_register || e1.X_add_number != REG_GR + 12)
3191 {
3192 as_bad ("First operand to .restore must be stack pointer (sp)");
3193 return;
3194 }
3195
3196 if (sep == ',')
3197 {
3198 parse_operand (&e2);
3199 if (e2.X_op != O_constant || e2.X_add_number < 0)
3200 {
3201 as_bad ("Second operand to .restore must be a constant >= 0");
3202 return;
3203 }
3204 ecount = e2.X_add_number;
3205 }
3206 else
3207 ecount = unwind.prologue_count - 1;
3208
3209 if (ecount >= unwind.prologue_count)
3210 {
3211 as_bad ("Epilogue count of %lu exceeds number of nested prologues (%u)",
3212 ecount + 1, unwind.prologue_count);
3213 return;
3214 }
3215
3216 add_unwind_entry (output_epilogue (ecount));
3217
3218 if (ecount < unwind.prologue_count)
3219 unwind.prologue_count -= ecount + 1;
3220 else
3221 unwind.prologue_count = 0;
3222 }
3223
3224 static void
3225 dot_restorereg (dummy)
3226 int dummy ATTRIBUTE_UNUSED;
3227 {
3228 unsigned int ab, reg;
3229 expressionS e;
3230
3231 parse_operand (&e);
3232
3233 if (!convert_expr_to_ab_reg (&e, &ab, &reg))
3234 {
3235 as_bad ("First operand to .restorereg must be a preserved register");
3236 return;
3237 }
3238 add_unwind_entry (output_spill_reg (ab, reg, 0, 0));
3239 }
3240
3241 static void
3242 dot_restorereg_p (dummy)
3243 int dummy ATTRIBUTE_UNUSED;
3244 {
3245 unsigned int qp, ab, reg;
3246 expressionS e1, e2;
3247 int sep;
3248
3249 sep = parse_operand (&e1);
3250 if (sep != ',')
3251 {
3252 as_bad ("No second operand to .restorereg.p");
3253 return;
3254 }
3255
3256 parse_operand (&e2);
3257
3258 qp = e1.X_add_number - REG_P;
3259 if (e1.X_op != O_register || qp > 63)
3260 {
3261 as_bad ("First operand to .restorereg.p must be a predicate");
3262 return;
3263 }
3264
3265 if (!convert_expr_to_ab_reg (&e2, &ab, &reg))
3266 {
3267 as_bad ("Second operand to .restorereg.p must be a preserved register");
3268 return;
3269 }
3270 add_unwind_entry (output_spill_reg_p (ab, reg, 0, 0, qp));
3271 }
3272
3273 static int
3274 generate_unwind_image (text_name)
3275 const char *text_name;
3276 {
3277 int size;
3278 void *unw_rec;
3279
3280 /* Force out pending instructions, to make sure all unwind records have
3281 a valid slot_number field. */
3282 ia64_flush_insns ();
3283
3284 /* Generate the unwind record. */
3285 size = output_unw_records (unwind.list, &unw_rec);
3286 if (size % md.pointer_size != 0)
3287 as_bad ("Unwind record is not a multiple of %d bytes.", md.pointer_size);
3288
3289 /* If there are unwind records, switch sections, and output the info. */
3290 if (size != 0)
3291 {
3292 unsigned char *where;
3293 char *sec_name;
3294 expressionS exp;
3295 bfd_reloc_code_real_type reloc;
3296
3297 make_unw_section_name (SPECIAL_SECTION_UNWIND_INFO, text_name, sec_name);
3298 set_section (sec_name);
3299 bfd_set_section_flags (stdoutput, now_seg,
3300 SEC_LOAD | SEC_ALLOC | SEC_READONLY);
3301
3302 /* Make sure the section has 4 byte alignment for ILP32 and
3303 8 byte alignment for LP64. */
3304 frag_align (md.pointer_size_shift, 0, 0);
3305 record_alignment (now_seg, md.pointer_size_shift);
3306
3307 /* Set expression which points to start of unwind descriptor area. */
3308 unwind.info = expr_build_dot ();
3309
3310 where = (unsigned char *) frag_more (size);
3311
3312 /* Issue a label for this address, and keep track of it to put it
3313 in the unwind section. */
3314
3315 /* Copy the information from the unwind record into this section. The
3316 data is already in the correct byte order. */
3317 memcpy (where, unw_rec, size);
3318
3319 /* Add the personality address to the image. */
3320 if (unwind.personality_routine != 0)
3321 {
3322 exp.X_op = O_symbol;
3323 exp.X_add_symbol = unwind.personality_routine;
3324 exp.X_add_number = 0;
3325
3326 if (md.flags & EF_IA_64_BE)
3327 {
3328 if (md.flags & EF_IA_64_ABI64)
3329 reloc = BFD_RELOC_IA64_LTOFF_FPTR64MSB;
3330 else
3331 reloc = BFD_RELOC_IA64_LTOFF_FPTR32MSB;
3332 }
3333 else
3334 {
3335 if (md.flags & EF_IA_64_ABI64)
3336 reloc = BFD_RELOC_IA64_LTOFF_FPTR64LSB;
3337 else
3338 reloc = BFD_RELOC_IA64_LTOFF_FPTR32LSB;
3339 }
3340
3341 fix_new_exp (frag_now, frag_now_fix () - md.pointer_size,
3342 md.pointer_size, &exp, 0, reloc);
3343 unwind.personality_routine = 0;
3344 }
3345 }
3346
3347 free_list_records (unwind.list);
3348 free_saved_prologue_counts ();
3349 unwind.list = unwind.tail = unwind.current_entry = NULL;
3350
3351 return size;
3352 }
3353
3354 static void
3355 dot_handlerdata (dummy)
3356 int dummy ATTRIBUTE_UNUSED;
3357 {
3358 const char *text_name = segment_name (now_seg);
3359
3360 /* If text section name starts with ".text" (which it should),
3361 strip this prefix off. */
3362 if (strcmp (text_name, ".text") == 0)
3363 text_name = "";
3364
3365 unwind.force_unwind_entry = 1;
3366
3367 /* Remember which segment we're in so we can switch back after .endp */
3368 unwind.saved_text_seg = now_seg;
3369 unwind.saved_text_subseg = now_subseg;
3370
3371 /* Generate unwind info into unwind-info section and then leave that
3372 section as the currently active one so dataXX directives go into
3373 the language specific data area of the unwind info block. */
3374 generate_unwind_image (text_name);
3375 demand_empty_rest_of_line ();
3376 }
3377
3378 static void
3379 dot_unwentry (dummy)
3380 int dummy ATTRIBUTE_UNUSED;
3381 {
3382 unwind.force_unwind_entry = 1;
3383 demand_empty_rest_of_line ();
3384 }
3385
3386 static void
3387 dot_altrp (dummy)
3388 int dummy ATTRIBUTE_UNUSED;
3389 {
3390 expressionS e;
3391 unsigned reg;
3392
3393 parse_operand (&e);
3394 reg = e.X_add_number - REG_BR;
3395 if (e.X_op == O_register && reg < 8)
3396 add_unwind_entry (output_rp_br (reg));
3397 else
3398 as_bad ("First operand not a valid branch register");
3399 }
3400
3401 static void
3402 dot_savemem (psprel)
3403 int psprel;
3404 {
3405 expressionS e1, e2;
3406 int sep;
3407 int reg1, val;
3408
3409 sep = parse_operand (&e1);
3410 if (sep != ',')
3411 as_bad ("No second operand to .save%ssp", psprel ? "p" : "");
3412 sep = parse_operand (&e2);
3413
3414 reg1 = e1.X_add_number;
3415 val = e2.X_add_number;
3416
3417 /* Make sure its a valid ar.xxx reg, OR its br0, aka 'rp'. */
3418 if (e1.X_op == O_register)
3419 {
3420 if (e2.X_op == O_constant)
3421 {
3422 switch (reg1)
3423 {
3424 case REG_AR + AR_BSP:
3425 add_unwind_entry (output_bsp_when ());
3426 add_unwind_entry ((psprel
3427 ? output_bsp_psprel
3428 : output_bsp_sprel) (val));
3429 break;
3430 case REG_AR + AR_BSPSTORE:
3431 add_unwind_entry (output_bspstore_when ());
3432 add_unwind_entry ((psprel
3433 ? output_bspstore_psprel
3434 : output_bspstore_sprel) (val));
3435 break;
3436 case REG_AR + AR_RNAT:
3437 add_unwind_entry (output_rnat_when ());
3438 add_unwind_entry ((psprel
3439 ? output_rnat_psprel
3440 : output_rnat_sprel) (val));
3441 break;
3442 case REG_AR + AR_UNAT:
3443 add_unwind_entry (output_unat_when ());
3444 add_unwind_entry ((psprel
3445 ? output_unat_psprel
3446 : output_unat_sprel) (val));
3447 break;
3448 case REG_AR + AR_FPSR:
3449 add_unwind_entry (output_fpsr_when ());
3450 add_unwind_entry ((psprel
3451 ? output_fpsr_psprel
3452 : output_fpsr_sprel) (val));
3453 break;
3454 case REG_AR + AR_PFS:
3455 add_unwind_entry (output_pfs_when ());
3456 add_unwind_entry ((psprel
3457 ? output_pfs_psprel
3458 : output_pfs_sprel) (val));
3459 break;
3460 case REG_AR + AR_LC:
3461 add_unwind_entry (output_lc_when ());
3462 add_unwind_entry ((psprel
3463 ? output_lc_psprel
3464 : output_lc_sprel) (val));
3465 break;
3466 case REG_BR:
3467 add_unwind_entry (output_rp_when ());
3468 add_unwind_entry ((psprel
3469 ? output_rp_psprel
3470 : output_rp_sprel) (val));
3471 break;
3472 case REG_PR:
3473 add_unwind_entry (output_preds_when ());
3474 add_unwind_entry ((psprel
3475 ? output_preds_psprel
3476 : output_preds_sprel) (val));
3477 break;
3478 case REG_PRIUNAT:
3479 add_unwind_entry (output_priunat_when_mem ());
3480 add_unwind_entry ((psprel
3481 ? output_priunat_psprel
3482 : output_priunat_sprel) (val));
3483 break;
3484 default:
3485 as_bad ("First operand not a valid register");
3486 }
3487 }
3488 else
3489 as_bad (" Second operand not a valid constant");
3490 }
3491 else
3492 as_bad ("First operand not a register");
3493 }
3494
3495 static void
3496 dot_saveg (dummy)
3497 int dummy ATTRIBUTE_UNUSED;
3498 {
3499 expressionS e1, e2;
3500 int sep;
3501 sep = parse_operand (&e1);
3502 if (sep == ',')
3503 parse_operand (&e2);
3504
3505 if (e1.X_op != O_constant)
3506 as_bad ("First operand to .save.g must be a constant.");
3507 else
3508 {
3509 int grmask = e1.X_add_number;
3510 if (sep != ',')
3511 add_unwind_entry (output_gr_mem (grmask));
3512 else
3513 {
3514 int reg = e2.X_add_number - REG_GR;
3515 if (e2.X_op == O_register && reg >= 0 && reg < 128)
3516 add_unwind_entry (output_gr_gr (grmask, reg));
3517 else
3518 as_bad ("Second operand is an invalid register.");
3519 }
3520 }
3521 }
3522
3523 static void
3524 dot_savef (dummy)
3525 int dummy ATTRIBUTE_UNUSED;
3526 {
3527 expressionS e1;
3528 int sep;
3529 sep = parse_operand (&e1);
3530
3531 if (e1.X_op != O_constant)
3532 as_bad ("Operand to .save.f must be a constant.");
3533 else
3534 add_unwind_entry (output_fr_mem (e1.X_add_number));
3535 }
3536
3537 static void
3538 dot_saveb (dummy)
3539 int dummy ATTRIBUTE_UNUSED;
3540 {
3541 expressionS e1, e2;
3542 unsigned int reg;
3543 unsigned char sep;
3544 int brmask;
3545
3546 sep = parse_operand (&e1);
3547 if (e1.X_op != O_constant)
3548 {
3549 as_bad ("First operand to .save.b must be a constant.");
3550 return;
3551 }
3552 brmask = e1.X_add_number;
3553
3554 if (sep == ',')
3555 {
3556 sep = parse_operand (&e2);
3557 reg = e2.X_add_number - REG_GR;
3558 if (e2.X_op != O_register || reg > 127)
3559 {
3560 as_bad ("Second operand to .save.b must be a general register.");
3561 return;
3562 }
3563 add_unwind_entry (output_br_gr (brmask, e2.X_add_number));
3564 }
3565 else
3566 add_unwind_entry (output_br_mem (brmask));
3567
3568 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
3569 ignore_rest_of_line ();
3570 }
3571
3572 static void
3573 dot_savegf (dummy)
3574 int dummy ATTRIBUTE_UNUSED;
3575 {
3576 expressionS e1, e2;
3577 int sep;
3578 sep = parse_operand (&e1);
3579 if (sep == ',')
3580 parse_operand (&e2);
3581
3582 if (e1.X_op != O_constant || sep != ',' || e2.X_op != O_constant)
3583 as_bad ("Both operands of .save.gf must be constants.");
3584 else
3585 {
3586 int grmask = e1.X_add_number;
3587 int frmask = e2.X_add_number;
3588 add_unwind_entry (output_frgr_mem (grmask, frmask));
3589 }
3590 }
3591
3592 static void
3593 dot_spill (dummy)
3594 int dummy ATTRIBUTE_UNUSED;
3595 {
3596 expressionS e;
3597 unsigned char sep;
3598
3599 sep = parse_operand (&e);
3600 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
3601 ignore_rest_of_line ();
3602
3603 if (e.X_op != O_constant)
3604 as_bad ("Operand to .spill must be a constant");
3605 else
3606 add_unwind_entry (output_spill_base (e.X_add_number));
3607 }
3608
3609 static void
3610 dot_spillreg (dummy)
3611 int dummy ATTRIBUTE_UNUSED;
3612 {
3613 int sep, ab, xy, reg, treg;
3614 expressionS e1, e2;
3615
3616 sep = parse_operand (&e1);
3617 if (sep != ',')
3618 {
3619 as_bad ("No second operand to .spillreg");
3620 return;
3621 }
3622
3623 parse_operand (&e2);
3624
3625 if (!convert_expr_to_ab_reg (&e1, &ab, &reg))
3626 {
3627 as_bad ("First operand to .spillreg must be a preserved register");
3628 return;
3629 }
3630
3631 if (!convert_expr_to_xy_reg (&e2, &xy, &treg))
3632 {
3633 as_bad ("Second operand to .spillreg must be a register");
3634 return;
3635 }
3636
3637 add_unwind_entry (output_spill_reg (ab, reg, treg, xy));
3638 }
3639
3640 static void
3641 dot_spillmem (psprel)
3642 int psprel;
3643 {
3644 expressionS e1, e2;
3645 int sep, ab, reg;
3646
3647 sep = parse_operand (&e1);
3648 if (sep != ',')
3649 {
3650 as_bad ("Second operand missing");
3651 return;
3652 }
3653
3654 parse_operand (&e2);
3655
3656 if (!convert_expr_to_ab_reg (&e1, &ab, &reg))
3657 {
3658 as_bad ("First operand to .spill%s must be a preserved register",
3659 psprel ? "psp" : "sp");
3660 return;
3661 }
3662
3663 if (e2.X_op != O_constant)
3664 {
3665 as_bad ("Second operand to .spill%s must be a constant",
3666 psprel ? "psp" : "sp");
3667 return;
3668 }
3669
3670 if (psprel)
3671 add_unwind_entry (output_spill_psprel (ab, reg, e2.X_add_number));
3672 else
3673 add_unwind_entry (output_spill_sprel (ab, reg, e2.X_add_number));
3674 }
3675
3676 static void
3677 dot_spillreg_p (dummy)
3678 int dummy ATTRIBUTE_UNUSED;
3679 {
3680 int sep, ab, xy, reg, treg;
3681 expressionS e1, e2, e3;
3682 unsigned int qp;
3683
3684 sep = parse_operand (&e1);
3685 if (sep != ',')
3686 {
3687 as_bad ("No second and third operand to .spillreg.p");
3688 return;
3689 }
3690
3691 sep = parse_operand (&e2);
3692 if (sep != ',')
3693 {
3694 as_bad ("No third operand to .spillreg.p");
3695 return;
3696 }
3697
3698 parse_operand (&e3);
3699
3700 qp = e1.X_add_number - REG_P;
3701
3702 if (e1.X_op != O_register || qp > 63)
3703 {
3704 as_bad ("First operand to .spillreg.p must be a predicate");
3705 return;
3706 }
3707
3708 if (!convert_expr_to_ab_reg (&e2, &ab, &reg))
3709 {
3710 as_bad ("Second operand to .spillreg.p must be a preserved register");
3711 return;
3712 }
3713
3714 if (!convert_expr_to_xy_reg (&e3, &xy, &treg))
3715 {
3716 as_bad ("Third operand to .spillreg.p must be a register");
3717 return;
3718 }
3719
3720 add_unwind_entry (output_spill_reg_p (ab, reg, treg, xy, qp));
3721 }
3722
3723 static void
3724 dot_spillmem_p (psprel)
3725 int psprel;
3726 {
3727 expressionS e1, e2, e3;
3728 int sep, ab, reg;
3729 unsigned int qp;
3730
3731 sep = parse_operand (&e1);
3732 if (sep != ',')
3733 {
3734 as_bad ("Second operand missing");
3735 return;
3736 }
3737
3738 parse_operand (&e2);
3739 if (sep != ',')
3740 {
3741 as_bad ("Second operand missing");
3742 return;
3743 }
3744
3745 parse_operand (&e3);
3746
3747 qp = e1.X_add_number - REG_P;
3748 if (e1.X_op != O_register || qp > 63)
3749 {
3750 as_bad ("First operand to .spill%s_p must be a predicate",
3751 psprel ? "psp" : "sp");
3752 return;
3753 }
3754
3755 if (!convert_expr_to_ab_reg (&e2, &ab, &reg))
3756 {
3757 as_bad ("Second operand to .spill%s_p must be a preserved register",
3758 psprel ? "psp" : "sp");
3759 return;
3760 }
3761
3762 if (e3.X_op != O_constant)
3763 {
3764 as_bad ("Third operand to .spill%s_p must be a constant",
3765 psprel ? "psp" : "sp");
3766 return;
3767 }
3768
3769 if (psprel)
3770 add_unwind_entry (output_spill_psprel_p (ab, reg, e3.X_add_number, qp));
3771 else
3772 add_unwind_entry (output_spill_sprel_p (ab, reg, e3.X_add_number, qp));
3773 }
3774
3775 static unsigned int
3776 get_saved_prologue_count (lbl)
3777 unsigned long lbl;
3778 {
3779 label_prologue_count *lpc = unwind.saved_prologue_counts;
3780
3781 while (lpc != NULL && lpc->label_number != lbl)
3782 lpc = lpc->next;
3783
3784 if (lpc != NULL)
3785 return lpc->prologue_count;
3786
3787 as_bad ("Missing .label_state %ld", lbl);
3788 return 1;
3789 }
3790
3791 static void
3792 save_prologue_count (lbl, count)
3793 unsigned long lbl;
3794 unsigned int count;
3795 {
3796 label_prologue_count *lpc = unwind.saved_prologue_counts;
3797
3798 while (lpc != NULL && lpc->label_number != lbl)
3799 lpc = lpc->next;
3800
3801 if (lpc != NULL)
3802 lpc->prologue_count = count;
3803 else
3804 {
3805 label_prologue_count *new_lpc = xmalloc (sizeof (* new_lpc));
3806
3807 new_lpc->next = unwind.saved_prologue_counts;
3808 new_lpc->label_number = lbl;
3809 new_lpc->prologue_count = count;
3810 unwind.saved_prologue_counts = new_lpc;
3811 }
3812 }
3813
3814 static void
3815 free_saved_prologue_counts ()
3816 {
3817 label_prologue_count *lpc = unwind.saved_prologue_counts;
3818 label_prologue_count *next;
3819
3820 while (lpc != NULL)
3821 {
3822 next = lpc->next;
3823 free (lpc);
3824 lpc = next;
3825 }
3826
3827 unwind.saved_prologue_counts = NULL;
3828 }
3829
3830 static void
3831 dot_label_state (dummy)
3832 int dummy ATTRIBUTE_UNUSED;
3833 {
3834 expressionS e;
3835
3836 parse_operand (&e);
3837 if (e.X_op != O_constant)
3838 {
3839 as_bad ("Operand to .label_state must be a constant");
3840 return;
3841 }
3842 add_unwind_entry (output_label_state (e.X_add_number));
3843 save_prologue_count (e.X_add_number, unwind.prologue_count);
3844 }
3845
3846 static void
3847 dot_copy_state (dummy)
3848 int dummy ATTRIBUTE_UNUSED;
3849 {
3850 expressionS e;
3851
3852 parse_operand (&e);
3853 if (e.X_op != O_constant)
3854 {
3855 as_bad ("Operand to .copy_state must be a constant");
3856 return;
3857 }
3858 add_unwind_entry (output_copy_state (e.X_add_number));
3859 unwind.prologue_count = get_saved_prologue_count (e.X_add_number);
3860 }
3861
3862 static void
3863 dot_unwabi (dummy)
3864 int dummy ATTRIBUTE_UNUSED;
3865 {
3866 expressionS e1, e2;
3867 unsigned char sep;
3868
3869 sep = parse_operand (&e1);
3870 if (sep != ',')
3871 {
3872 as_bad ("Second operand to .unwabi missing");
3873 return;
3874 }
3875 sep = parse_operand (&e2);
3876 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
3877 ignore_rest_of_line ();
3878
3879 if (e1.X_op != O_constant)
3880 {
3881 as_bad ("First operand to .unwabi must be a constant");
3882 return;
3883 }
3884
3885 if (e2.X_op != O_constant)
3886 {
3887 as_bad ("Second operand to .unwabi must be a constant");
3888 return;
3889 }
3890
3891 add_unwind_entry (output_unwabi (e1.X_add_number, e2.X_add_number));
3892 }
3893
3894 static void
3895 dot_personality (dummy)
3896 int dummy ATTRIBUTE_UNUSED;
3897 {
3898 char *name, *p, c;
3899 SKIP_WHITESPACE ();
3900 name = input_line_pointer;
3901 c = get_symbol_end ();
3902 p = input_line_pointer;
3903 unwind.personality_routine = symbol_find_or_make (name);
3904 unwind.force_unwind_entry = 1;
3905 *p = c;
3906 SKIP_WHITESPACE ();
3907 demand_empty_rest_of_line ();
3908 }
3909
3910 static void
3911 dot_proc (dummy)
3912 int dummy ATTRIBUTE_UNUSED;
3913 {
3914 char *name, *p, c;
3915 symbolS *sym;
3916
3917 unwind.proc_start = expr_build_dot ();
3918 /* Parse names of main and alternate entry points and mark them as
3919 function symbols: */
3920 while (1)
3921 {
3922 SKIP_WHITESPACE ();
3923 name = input_line_pointer;
3924 c = get_symbol_end ();
3925 p = input_line_pointer;
3926 sym = symbol_find_or_make (name);
3927 if (unwind.proc_start == 0)
3928 {
3929 unwind.proc_start = sym;
3930 }
3931 symbol_get_bfdsym (sym)->flags |= BSF_FUNCTION;
3932 *p = c;
3933 SKIP_WHITESPACE ();
3934 if (*input_line_pointer != ',')
3935 break;
3936 ++input_line_pointer;
3937 }
3938 demand_empty_rest_of_line ();
3939 ia64_do_align (16);
3940
3941 unwind.prologue_count = 0;
3942 unwind.list = unwind.tail = unwind.current_entry = NULL;
3943 unwind.personality_routine = 0;
3944 }
3945
3946 static void
3947 dot_body (dummy)
3948 int dummy ATTRIBUTE_UNUSED;
3949 {
3950 unwind.prologue = 0;
3951 unwind.prologue_mask = 0;
3952
3953 add_unwind_entry (output_body ());
3954 demand_empty_rest_of_line ();
3955 }
3956
3957 static void
3958 dot_prologue (dummy)
3959 int dummy ATTRIBUTE_UNUSED;
3960 {
3961 unsigned char sep;
3962 int mask = 0, grsave = 0;
3963
3964 if (!is_it_end_of_statement ())
3965 {
3966 expressionS e1, e2;
3967 sep = parse_operand (&e1);
3968 if (sep != ',')
3969 as_bad ("No second operand to .prologue");
3970 sep = parse_operand (&e2);
3971 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
3972 ignore_rest_of_line ();
3973
3974 if (e1.X_op == O_constant)
3975 {
3976 mask = e1.X_add_number;
3977
3978 if (e2.X_op == O_constant)
3979 grsave = e2.X_add_number;
3980 else if (e2.X_op == O_register
3981 && (grsave = e2.X_add_number - REG_GR) < 128)
3982 ;
3983 else
3984 as_bad ("Second operand not a constant or general register");
3985
3986 add_unwind_entry (output_prologue_gr (mask, grsave));
3987 }
3988 else
3989 as_bad ("First operand not a constant");
3990 }
3991 else
3992 add_unwind_entry (output_prologue ());
3993
3994 unwind.prologue = 1;
3995 unwind.prologue_mask = mask;
3996 ++unwind.prologue_count;
3997 }
3998
3999 static void
4000 dot_endp (dummy)
4001 int dummy ATTRIBUTE_UNUSED;
4002 {
4003 expressionS e;
4004 unsigned char *ptr;
4005 int bytes_per_address;
4006 long where;
4007 segT saved_seg;
4008 subsegT saved_subseg;
4009 const char *sec_name, *text_name;
4010 char *name, *p, c;
4011 symbolS *sym;
4012
4013 if (unwind.saved_text_seg)
4014 {
4015 saved_seg = unwind.saved_text_seg;
4016 saved_subseg = unwind.saved_text_subseg;
4017 unwind.saved_text_seg = NULL;
4018 }
4019 else
4020 {
4021 saved_seg = now_seg;
4022 saved_subseg = now_subseg;
4023 }
4024
4025 /*
4026 Use a slightly ugly scheme to derive the unwind section names from
4027 the text section name:
4028
4029 text sect. unwind table sect.
4030 name: name: comments:
4031 ---------- ----------------- --------------------------------
4032 .text .IA_64.unwind
4033 .text.foo .IA_64.unwind.text.foo
4034 .foo .IA_64.unwind.foo
4035 .gnu.linkonce.t.foo
4036 .gnu.linkonce.ia64unw.foo
4037 _info .IA_64.unwind_info gas issues error message (ditto)
4038 _infoFOO .IA_64.unwind_infoFOO gas issues error message (ditto)
4039
4040 This mapping is done so that:
4041
4042 (a) An object file with unwind info only in .text will use
4043 unwind section names .IA_64.unwind and .IA_64.unwind_info.
4044 This follows the letter of the ABI and also ensures backwards
4045 compatibility with older toolchains.
4046
4047 (b) An object file with unwind info in multiple text sections
4048 will use separate unwind sections for each text section.
4049 This allows us to properly set the "sh_info" and "sh_link"
4050 fields in SHT_IA_64_UNWIND as required by the ABI and also
4051 lets GNU ld support programs with multiple segments
4052 containing unwind info (as might be the case for certain
4053 embedded applications).
4054
4055 (c) An error is issued if there would be a name clash.
4056 */
4057 text_name = segment_name (saved_seg);
4058 if (strncmp (text_name, "_info", 5) == 0)
4059 {
4060 as_bad ("Illegal section name `%s' (causes unwind section name clash)",
4061 text_name);
4062 ignore_rest_of_line ();
4063 return;
4064 }
4065 if (strcmp (text_name, ".text") == 0)
4066 text_name = "";
4067
4068 insn_group_break (1, 0, 0);
4069
4070 /* If there wasn't a .handlerdata, we haven't generated an image yet. */
4071 if (!unwind.info)
4072 generate_unwind_image (text_name);
4073
4074 if (unwind.info || unwind.force_unwind_entry)
4075 {
4076 subseg_set (md.last_text_seg, 0);
4077 unwind.proc_end = expr_build_dot ();
4078
4079 make_unw_section_name (SPECIAL_SECTION_UNWIND, text_name, sec_name);
4080 set_section ((char *) sec_name);
4081 bfd_set_section_flags (stdoutput, now_seg,
4082 SEC_LOAD | SEC_ALLOC | SEC_READONLY);
4083
4084 /* Make sure that section has 4 byte alignment for ILP32 and
4085 8 byte alignment for LP64. */
4086 record_alignment (now_seg, md.pointer_size_shift);
4087
4088 /* Need space for 3 pointers for procedure start, procedure end,
4089 and unwind info. */
4090 ptr = frag_more (3 * md.pointer_size);
4091 where = frag_now_fix () - (3 * md.pointer_size);
4092 bytes_per_address = bfd_arch_bits_per_address (stdoutput) / 8;
4093
4094 /* Issue the values of a) Proc Begin, b) Proc End, c) Unwind Record. */
4095 e.X_op = O_pseudo_fixup;
4096 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4097 e.X_add_number = 0;
4098 e.X_add_symbol = unwind.proc_start;
4099 ia64_cons_fix_new (frag_now, where, bytes_per_address, &e);
4100
4101 e.X_op = O_pseudo_fixup;
4102 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4103 e.X_add_number = 0;
4104 e.X_add_symbol = unwind.proc_end;
4105 ia64_cons_fix_new (frag_now, where + bytes_per_address,
4106 bytes_per_address, &e);
4107
4108 if (unwind.info)
4109 {
4110 e.X_op = O_pseudo_fixup;
4111 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4112 e.X_add_number = 0;
4113 e.X_add_symbol = unwind.info;
4114 ia64_cons_fix_new (frag_now, where + (bytes_per_address * 2),
4115 bytes_per_address, &e);
4116 }
4117 else
4118 md_number_to_chars (ptr + (bytes_per_address * 2), 0,
4119 bytes_per_address);
4120
4121 }
4122 subseg_set (saved_seg, saved_subseg);
4123
4124 /* Parse names of main and alternate entry points and set symbol sizes. */
4125 while (1)
4126 {
4127 SKIP_WHITESPACE ();
4128 name = input_line_pointer;
4129 c = get_symbol_end ();
4130 p = input_line_pointer;
4131 sym = symbol_find (name);
4132 if (sym && unwind.proc_start
4133 && (symbol_get_bfdsym (sym)->flags & BSF_FUNCTION)
4134 && S_GET_SIZE (sym) == 0 && symbol_get_obj (sym)->size == NULL)
4135 {
4136 fragS *fr = symbol_get_frag (unwind.proc_start);
4137 fragS *frag = symbol_get_frag (sym);
4138
4139 /* Check whether the function label is at or beyond last
4140 .proc directive. */
4141 while (fr && fr != frag)
4142 fr = fr->fr_next;
4143 if (fr)
4144 {
4145 if (frag == frag_now && SEG_NORMAL (now_seg))
4146 S_SET_SIZE (sym, frag_now_fix () - S_GET_VALUE (sym));
4147 else
4148 {
4149 symbol_get_obj (sym)->size =
4150 (expressionS *) xmalloc (sizeof (expressionS));
4151 symbol_get_obj (sym)->size->X_op = O_subtract;
4152 symbol_get_obj (sym)->size->X_add_symbol
4153 = symbol_new (FAKE_LABEL_NAME, now_seg,
4154 frag_now_fix (), frag_now);
4155 symbol_get_obj (sym)->size->X_op_symbol = sym;
4156 symbol_get_obj (sym)->size->X_add_number = 0;
4157 }
4158 }
4159 }
4160 *p = c;
4161 SKIP_WHITESPACE ();
4162 if (*input_line_pointer != ',')
4163 break;
4164 ++input_line_pointer;
4165 }
4166 demand_empty_rest_of_line ();
4167 unwind.proc_start = unwind.proc_end = unwind.info = 0;
4168 }
4169
4170 static void
4171 dot_template (template)
4172 int template;
4173 {
4174 CURR_SLOT.user_template = template;
4175 }
4176
4177 static void
4178 dot_regstk (dummy)
4179 int dummy ATTRIBUTE_UNUSED;
4180 {
4181 int ins, locs, outs, rots;
4182
4183 if (is_it_end_of_statement ())
4184 ins = locs = outs = rots = 0;
4185 else
4186 {
4187 ins = get_absolute_expression ();
4188 if (*input_line_pointer++ != ',')
4189 goto err;
4190 locs = get_absolute_expression ();
4191 if (*input_line_pointer++ != ',')
4192 goto err;
4193 outs = get_absolute_expression ();
4194 if (*input_line_pointer++ != ',')
4195 goto err;
4196 rots = get_absolute_expression ();
4197 }
4198 set_regstack (ins, locs, outs, rots);
4199 return;
4200
4201 err:
4202 as_bad ("Comma expected");
4203 ignore_rest_of_line ();
4204 }
4205
4206 static void
4207 dot_rot (type)
4208 int type;
4209 {
4210 unsigned num_regs, num_alloced = 0;
4211 struct dynreg **drpp, *dr;
4212 int ch, base_reg = 0;
4213 char *name, *start;
4214 size_t len;
4215
4216 switch (type)
4217 {
4218 case DYNREG_GR: base_reg = REG_GR + 32; break;
4219 case DYNREG_FR: base_reg = REG_FR + 32; break;
4220 case DYNREG_PR: base_reg = REG_P + 16; break;
4221 default: break;
4222 }
4223
4224 /* First, remove existing names from hash table. */
4225 for (dr = md.dynreg[type]; dr && dr->num_regs; dr = dr->next)
4226 {
4227 hash_delete (md.dynreg_hash, dr->name);
4228 dr->num_regs = 0;
4229 }
4230
4231 drpp = &md.dynreg[type];
4232 while (1)
4233 {
4234 start = input_line_pointer;
4235 ch = get_symbol_end ();
4236 *input_line_pointer = ch;
4237 len = (input_line_pointer - start);
4238
4239 SKIP_WHITESPACE ();
4240 if (*input_line_pointer != '[')
4241 {
4242 as_bad ("Expected '['");
4243 goto err;
4244 }
4245 ++input_line_pointer; /* skip '[' */
4246
4247 num_regs = get_absolute_expression ();
4248
4249 if (*input_line_pointer++ != ']')
4250 {
4251 as_bad ("Expected ']'");
4252 goto err;
4253 }
4254 SKIP_WHITESPACE ();
4255
4256 num_alloced += num_regs;
4257 switch (type)
4258 {
4259 case DYNREG_GR:
4260 if (num_alloced > md.rot.num_regs)
4261 {
4262 as_bad ("Used more than the declared %d rotating registers",
4263 md.rot.num_regs);
4264 goto err;
4265 }
4266 break;
4267 case DYNREG_FR:
4268 if (num_alloced > 96)
4269 {
4270 as_bad ("Used more than the available 96 rotating registers");
4271 goto err;
4272 }
4273 break;
4274 case DYNREG_PR:
4275 if (num_alloced > 48)
4276 {
4277 as_bad ("Used more than the available 48 rotating registers");
4278 goto err;
4279 }
4280 break;
4281
4282 default:
4283 break;
4284 }
4285
4286 name = obstack_alloc (&notes, len + 1);
4287 memcpy (name, start, len);
4288 name[len] = '\0';
4289
4290 if (!*drpp)
4291 {
4292 *drpp = obstack_alloc (&notes, sizeof (*dr));
4293 memset (*drpp, 0, sizeof (*dr));
4294 }
4295
4296 dr = *drpp;
4297 dr->name = name;
4298 dr->num_regs = num_regs;
4299 dr->base = base_reg;
4300 drpp = &dr->next;
4301 base_reg += num_regs;
4302
4303 if (hash_insert (md.dynreg_hash, name, dr))
4304 {
4305 as_bad ("Attempt to redefine register set `%s'", name);
4306 goto err;
4307 }
4308
4309 if (*input_line_pointer != ',')
4310 break;
4311 ++input_line_pointer; /* skip comma */
4312 SKIP_WHITESPACE ();
4313 }
4314 demand_empty_rest_of_line ();
4315 return;
4316
4317 err:
4318 ignore_rest_of_line ();
4319 }
4320
4321 static void
4322 dot_byteorder (byteorder)
4323 int byteorder;
4324 {
4325 segment_info_type *seginfo = seg_info (now_seg);
4326
4327 if (byteorder == -1)
4328 {
4329 if (seginfo->tc_segment_info_data.endian == 0)
4330 seginfo->tc_segment_info_data.endian
4331 = TARGET_BYTES_BIG_ENDIAN ? 1 : 2;
4332 byteorder = seginfo->tc_segment_info_data.endian == 1;
4333 }
4334 else
4335 seginfo->tc_segment_info_data.endian = byteorder ? 1 : 2;
4336
4337 if (target_big_endian != byteorder)
4338 {
4339 target_big_endian = byteorder;
4340 if (target_big_endian)
4341 {
4342 ia64_number_to_chars = number_to_chars_bigendian;
4343 ia64_float_to_chars = ia64_float_to_chars_bigendian;
4344 }
4345 else
4346 {
4347 ia64_number_to_chars = number_to_chars_littleendian;
4348 ia64_float_to_chars = ia64_float_to_chars_littleendian;
4349 }
4350 }
4351 }
4352
4353 static void
4354 dot_psr (dummy)
4355 int dummy ATTRIBUTE_UNUSED;
4356 {
4357 char *option;
4358 int ch;
4359
4360 while (1)
4361 {
4362 option = input_line_pointer;
4363 ch = get_symbol_end ();
4364 if (strcmp (option, "lsb") == 0)
4365 md.flags &= ~EF_IA_64_BE;
4366 else if (strcmp (option, "msb") == 0)
4367 md.flags |= EF_IA_64_BE;
4368 else if (strcmp (option, "abi32") == 0)
4369 md.flags &= ~EF_IA_64_ABI64;
4370 else if (strcmp (option, "abi64") == 0)
4371 md.flags |= EF_IA_64_ABI64;
4372 else
4373 as_bad ("Unknown psr option `%s'", option);
4374 *input_line_pointer = ch;
4375
4376 SKIP_WHITESPACE ();
4377 if (*input_line_pointer != ',')
4378 break;
4379
4380 ++input_line_pointer;
4381 SKIP_WHITESPACE ();
4382 }
4383 demand_empty_rest_of_line ();
4384 }
4385
4386 static void
4387 dot_alias (dummy)
4388 int dummy ATTRIBUTE_UNUSED;
4389 {
4390 as_bad (".alias not implemented yet");
4391 }
4392
4393 static void
4394 dot_ln (dummy)
4395 int dummy ATTRIBUTE_UNUSED;
4396 {
4397 new_logical_line (0, get_absolute_expression ());
4398 demand_empty_rest_of_line ();
4399 }
4400
4401 static char *
4402 parse_section_name ()
4403 {
4404 char *name;
4405 int len;
4406
4407 SKIP_WHITESPACE ();
4408 if (*input_line_pointer != '"')
4409 {
4410 as_bad ("Missing section name");
4411 ignore_rest_of_line ();
4412 return 0;
4413 }
4414 name = demand_copy_C_string (&len);
4415 if (!name)
4416 {
4417 ignore_rest_of_line ();
4418 return 0;
4419 }
4420 SKIP_WHITESPACE ();
4421 if (*input_line_pointer != ',')
4422 {
4423 as_bad ("Comma expected after section name");
4424 ignore_rest_of_line ();
4425 return 0;
4426 }
4427 ++input_line_pointer; /* skip comma */
4428 return name;
4429 }
4430
4431 static void
4432 dot_xdata (size)
4433 int size;
4434 {
4435 char *name = parse_section_name ();
4436 if (!name)
4437 return;
4438
4439 md.keep_pending_output = 1;
4440 set_section (name);
4441 cons (size);
4442 obj_elf_previous (0);
4443 md.keep_pending_output = 0;
4444 }
4445
4446 /* Why doesn't float_cons() call md_cons_align() the way cons() does? */
4447
4448 static void
4449 stmt_float_cons (kind)
4450 int kind;
4451 {
4452 size_t alignment;
4453
4454 switch (kind)
4455 {
4456 case 'd':
4457 alignment = 8;
4458 break;
4459
4460 case 'x':
4461 case 'X':
4462 alignment = 16;
4463 break;
4464
4465 case 'f':
4466 default:
4467 alignment = 4;
4468 break;
4469 }
4470 ia64_do_align (alignment);
4471 float_cons (kind);
4472 }
4473
4474 static void
4475 stmt_cons_ua (size)
4476 int size;
4477 {
4478 int saved_auto_align = md.auto_align;
4479
4480 md.auto_align = 0;
4481 cons (size);
4482 md.auto_align = saved_auto_align;
4483 }
4484
4485 static void
4486 dot_xfloat_cons (kind)
4487 int kind;
4488 {
4489 char *name = parse_section_name ();
4490 if (!name)
4491 return;
4492
4493 md.keep_pending_output = 1;
4494 set_section (name);
4495 stmt_float_cons (kind);
4496 obj_elf_previous (0);
4497 md.keep_pending_output = 0;
4498 }
4499
4500 static void
4501 dot_xstringer (zero)
4502 int zero;
4503 {
4504 char *name = parse_section_name ();
4505 if (!name)
4506 return;
4507
4508 md.keep_pending_output = 1;
4509 set_section (name);
4510 stringer (zero);
4511 obj_elf_previous (0);
4512 md.keep_pending_output = 0;
4513 }
4514
4515 static void
4516 dot_xdata_ua (size)
4517 int size;
4518 {
4519 int saved_auto_align = md.auto_align;
4520 char *name = parse_section_name ();
4521 if (!name)
4522 return;
4523
4524 md.keep_pending_output = 1;
4525 set_section (name);
4526 md.auto_align = 0;
4527 cons (size);
4528 md.auto_align = saved_auto_align;
4529 obj_elf_previous (0);
4530 md.keep_pending_output = 0;
4531 }
4532
4533 static void
4534 dot_xfloat_cons_ua (kind)
4535 int kind;
4536 {
4537 int saved_auto_align = md.auto_align;
4538 char *name = parse_section_name ();
4539 if (!name)
4540 return;
4541
4542 md.keep_pending_output = 1;
4543 set_section (name);
4544 md.auto_align = 0;
4545 stmt_float_cons (kind);
4546 md.auto_align = saved_auto_align;
4547 obj_elf_previous (0);
4548 md.keep_pending_output = 0;
4549 }
4550
4551 /* .reg.val <regname>,value */
4552
4553 static void
4554 dot_reg_val (dummy)
4555 int dummy ATTRIBUTE_UNUSED;
4556 {
4557 expressionS reg;
4558
4559 expression (&reg);
4560 if (reg.X_op != O_register)
4561 {
4562 as_bad (_("Register name expected"));
4563 ignore_rest_of_line ();
4564 }
4565 else if (*input_line_pointer++ != ',')
4566 {
4567 as_bad (_("Comma expected"));
4568 ignore_rest_of_line ();
4569 }
4570 else
4571 {
4572 valueT value = get_absolute_expression ();
4573 int regno = reg.X_add_number;
4574 if (regno < REG_GR || regno > REG_GR + 128)
4575 as_warn (_("Register value annotation ignored"));
4576 else
4577 {
4578 gr_values[regno - REG_GR].known = 1;
4579 gr_values[regno - REG_GR].value = value;
4580 gr_values[regno - REG_GR].path = md.path;
4581 }
4582 }
4583 demand_empty_rest_of_line ();
4584 }
4585
4586 /* select dv checking mode
4587 .auto
4588 .explicit
4589 .default
4590
4591 A stop is inserted when changing modes
4592 */
4593
4594 static void
4595 dot_dv_mode (type)
4596 int type;
4597 {
4598 if (md.manual_bundling)
4599 as_warn (_("Directive invalid within a bundle"));
4600
4601 if (type == 'E' || type == 'A')
4602 md.mode_explicitly_set = 0;
4603 else
4604 md.mode_explicitly_set = 1;
4605
4606 md.detect_dv = 1;
4607 switch (type)
4608 {
4609 case 'A':
4610 case 'a':
4611 if (md.explicit_mode)
4612 insn_group_break (1, 0, 0);
4613 md.explicit_mode = 0;
4614 break;
4615 case 'E':
4616 case 'e':
4617 if (!md.explicit_mode)
4618 insn_group_break (1, 0, 0);
4619 md.explicit_mode = 1;
4620 break;
4621 default:
4622 case 'd':
4623 if (md.explicit_mode != md.default_explicit_mode)
4624 insn_group_break (1, 0, 0);
4625 md.explicit_mode = md.default_explicit_mode;
4626 md.mode_explicitly_set = 0;
4627 break;
4628 }
4629 }
4630
4631 static void
4632 print_prmask (mask)
4633 valueT mask;
4634 {
4635 int regno;
4636 char *comma = "";
4637 for (regno = 0; regno < 64; regno++)
4638 {
4639 if (mask & ((valueT) 1 << regno))
4640 {
4641 fprintf (stderr, "%s p%d", comma, regno);
4642 comma = ",";
4643 }
4644 }
4645 }
4646
4647 /*
4648 .pred.rel.clear [p1 [,p2 [,...]]] (also .pred.rel "clear")
4649 .pred.rel.imply p1, p2 (also .pred.rel "imply")
4650 .pred.rel.mutex p1, p2 [,...] (also .pred.rel "mutex")
4651 .pred.safe_across_calls p1 [, p2 [,...]]
4652 */
4653
4654 static void
4655 dot_pred_rel (type)
4656 int type;
4657 {
4658 valueT mask = 0;
4659 int count = 0;
4660 int p1 = -1, p2 = -1;
4661
4662 if (type == 0)
4663 {
4664 if (*input_line_pointer != '"')
4665 {
4666 as_bad (_("Missing predicate relation type"));
4667 ignore_rest_of_line ();
4668 return;
4669 }
4670 else
4671 {
4672 int len;
4673 char *form = demand_copy_C_string (&len);
4674 if (strcmp (form, "mutex") == 0)
4675 type = 'm';
4676 else if (strcmp (form, "clear") == 0)
4677 type = 'c';
4678 else if (strcmp (form, "imply") == 0)
4679 type = 'i';
4680 else
4681 {
4682 as_bad (_("Unrecognized predicate relation type"));
4683 ignore_rest_of_line ();
4684 return;
4685 }
4686 }
4687 if (*input_line_pointer == ',')
4688 ++input_line_pointer;
4689 SKIP_WHITESPACE ();
4690 }
4691
4692 SKIP_WHITESPACE ();
4693 while (1)
4694 {
4695 valueT bit = 1;
4696 int regno;
4697
4698 if (TOUPPER (*input_line_pointer) != 'P'
4699 || (regno = atoi (++input_line_pointer)) < 0
4700 || regno > 63)
4701 {
4702 as_bad (_("Predicate register expected"));
4703 ignore_rest_of_line ();
4704 return;
4705 }
4706 while (ISDIGIT (*input_line_pointer))
4707 ++input_line_pointer;
4708 if (p1 == -1)
4709 p1 = regno;
4710 else if (p2 == -1)
4711 p2 = regno;
4712 bit <<= regno;
4713 if (mask & bit)
4714 as_warn (_("Duplicate predicate register ignored"));
4715 mask |= bit;
4716 count++;
4717 /* See if it's a range. */
4718 if (*input_line_pointer == '-')
4719 {
4720 valueT stop = 1;
4721 ++input_line_pointer;
4722
4723 if (TOUPPER (*input_line_pointer) != 'P'
4724 || (regno = atoi (++input_line_pointer)) < 0
4725 || regno > 63)
4726 {
4727 as_bad (_("Predicate register expected"));
4728 ignore_rest_of_line ();
4729 return;
4730 }
4731 while (ISDIGIT (*input_line_pointer))
4732 ++input_line_pointer;
4733 stop <<= regno;
4734 if (bit >= stop)
4735 {
4736 as_bad (_("Bad register range"));
4737 ignore_rest_of_line ();
4738 return;
4739 }
4740 while (bit < stop)
4741 {
4742 bit <<= 1;
4743 mask |= bit;
4744 count++;
4745 }
4746 SKIP_WHITESPACE ();
4747 }
4748 if (*input_line_pointer != ',')
4749 break;
4750 ++input_line_pointer;
4751 SKIP_WHITESPACE ();
4752 }
4753
4754 switch (type)
4755 {
4756 case 'c':
4757 if (count == 0)
4758 mask = ~(valueT) 0;
4759 clear_qp_mutex (mask);
4760 clear_qp_implies (mask, (valueT) 0);
4761 break;
4762 case 'i':
4763 if (count != 2 || p1 == -1 || p2 == -1)
4764 as_bad (_("Predicate source and target required"));
4765 else if (p1 == 0 || p2 == 0)
4766 as_bad (_("Use of p0 is not valid in this context"));
4767 else
4768 add_qp_imply (p1, p2);
4769 break;
4770 case 'm':
4771 if (count < 2)
4772 {
4773 as_bad (_("At least two PR arguments expected"));
4774 break;
4775 }
4776 else if (mask & 1)
4777 {
4778 as_bad (_("Use of p0 is not valid in this context"));
4779 break;
4780 }
4781 add_qp_mutex (mask);
4782 break;
4783 case 's':
4784 /* note that we don't override any existing relations */
4785 if (count == 0)
4786 {
4787 as_bad (_("At least one PR argument expected"));
4788 break;
4789 }
4790 if (md.debug_dv)
4791 {
4792 fprintf (stderr, "Safe across calls: ");
4793 print_prmask (mask);
4794 fprintf (stderr, "\n");
4795 }
4796 qp_safe_across_calls = mask;
4797 break;
4798 }
4799 demand_empty_rest_of_line ();
4800 }
4801
4802 /* .entry label [, label [, ...]]
4803 Hint to DV code that the given labels are to be considered entry points.
4804 Otherwise, only global labels are considered entry points. */
4805
4806 static void
4807 dot_entry (dummy)
4808 int dummy ATTRIBUTE_UNUSED;
4809 {
4810 const char *err;
4811 char *name;
4812 int c;
4813 symbolS *symbolP;
4814
4815 do
4816 {
4817 name = input_line_pointer;
4818 c = get_symbol_end ();
4819 symbolP = symbol_find_or_make (name);
4820
4821 err = hash_insert (md.entry_hash, S_GET_NAME (symbolP), (PTR) symbolP);
4822 if (err)
4823 as_fatal (_("Inserting \"%s\" into entry hint table failed: %s"),
4824 name, err);
4825
4826 *input_line_pointer = c;
4827 SKIP_WHITESPACE ();
4828 c = *input_line_pointer;
4829 if (c == ',')
4830 {
4831 input_line_pointer++;
4832 SKIP_WHITESPACE ();
4833 if (*input_line_pointer == '\n')
4834 c = '\n';
4835 }
4836 }
4837 while (c == ',');
4838
4839 demand_empty_rest_of_line ();
4840 }
4841
4842 /* .mem.offset offset, base
4843 "base" is used to distinguish between offsets from a different base. */
4844
4845 static void
4846 dot_mem_offset (dummy)
4847 int dummy ATTRIBUTE_UNUSED;
4848 {
4849 md.mem_offset.hint = 1;
4850 md.mem_offset.offset = get_absolute_expression ();
4851 if (*input_line_pointer != ',')
4852 {
4853 as_bad (_("Comma expected"));
4854 ignore_rest_of_line ();
4855 return;
4856 }
4857 ++input_line_pointer;
4858 md.mem_offset.base = get_absolute_expression ();
4859 demand_empty_rest_of_line ();
4860 }
4861
4862 /* ia64-specific pseudo-ops: */
4863 const pseudo_typeS md_pseudo_table[] =
4864 {
4865 { "radix", dot_radix, 0 },
4866 { "lcomm", s_lcomm_bytes, 1 },
4867 { "bss", dot_special_section, SPECIAL_SECTION_BSS },
4868 { "sbss", dot_special_section, SPECIAL_SECTION_SBSS },
4869 { "sdata", dot_special_section, SPECIAL_SECTION_SDATA },
4870 { "rodata", dot_special_section, SPECIAL_SECTION_RODATA },
4871 { "comment", dot_special_section, SPECIAL_SECTION_COMMENT },
4872 { "ia_64.unwind", dot_special_section, SPECIAL_SECTION_UNWIND },
4873 { "ia_64.unwind_info", dot_special_section, SPECIAL_SECTION_UNWIND_INFO },
4874 { "init_array", dot_special_section, SPECIAL_SECTION_INIT_ARRAY },
4875 { "fini_array", dot_special_section, SPECIAL_SECTION_FINI_ARRAY },
4876 { "proc", dot_proc, 0 },
4877 { "body", dot_body, 0 },
4878 { "prologue", dot_prologue, 0 },
4879 { "endp", dot_endp, 0 },
4880 { "file", (void (*) PARAMS ((int))) dwarf2_directive_file, 0 },
4881 { "loc", dwarf2_directive_loc, 0 },
4882
4883 { "fframe", dot_fframe, 0 },
4884 { "vframe", dot_vframe, 0 },
4885 { "vframesp", dot_vframesp, 0 },
4886 { "vframepsp", dot_vframepsp, 0 },
4887 { "save", dot_save, 0 },
4888 { "restore", dot_restore, 0 },
4889 { "restorereg", dot_restorereg, 0 },
4890 { "restorereg.p", dot_restorereg_p, 0 },
4891 { "handlerdata", dot_handlerdata, 0 },
4892 { "unwentry", dot_unwentry, 0 },
4893 { "altrp", dot_altrp, 0 },
4894 { "savesp", dot_savemem, 0 },
4895 { "savepsp", dot_savemem, 1 },
4896 { "save.g", dot_saveg, 0 },
4897 { "save.f", dot_savef, 0 },
4898 { "save.b", dot_saveb, 0 },
4899 { "save.gf", dot_savegf, 0 },
4900 { "spill", dot_spill, 0 },
4901 { "spillreg", dot_spillreg, 0 },
4902 { "spillsp", dot_spillmem, 0 },
4903 { "spillpsp", dot_spillmem, 1 },
4904 { "spillreg.p", dot_spillreg_p, 0 },
4905 { "spillsp.p", dot_spillmem_p, 0 },
4906 { "spillpsp.p", dot_spillmem_p, 1 },
4907 { "label_state", dot_label_state, 0 },
4908 { "copy_state", dot_copy_state, 0 },
4909 { "unwabi", dot_unwabi, 0 },
4910 { "personality", dot_personality, 0 },
4911 #if 0
4912 { "estate", dot_estate, 0 },
4913 #endif
4914 { "mii", dot_template, 0x0 },
4915 { "mli", dot_template, 0x2 }, /* old format, for compatibility */
4916 { "mlx", dot_template, 0x2 },
4917 { "mmi", dot_template, 0x4 },
4918 { "mfi", dot_template, 0x6 },
4919 { "mmf", dot_template, 0x7 },
4920 { "mib", dot_template, 0x8 },
4921 { "mbb", dot_template, 0x9 },
4922 { "bbb", dot_template, 0xb },
4923 { "mmb", dot_template, 0xc },
4924 { "mfb", dot_template, 0xe },
4925 #if 0
4926 { "lb", dot_scope, 0 },
4927 { "le", dot_scope, 1 },
4928 #endif
4929 { "align", s_align_bytes, 0 },
4930 { "regstk", dot_regstk, 0 },
4931 { "rotr", dot_rot, DYNREG_GR },
4932 { "rotf", dot_rot, DYNREG_FR },
4933 { "rotp", dot_rot, DYNREG_PR },
4934 { "lsb", dot_byteorder, 0 },
4935 { "msb", dot_byteorder, 1 },
4936 { "psr", dot_psr, 0 },
4937 { "alias", dot_alias, 0 },
4938 { "ln", dot_ln, 0 }, /* source line info (for debugging) */
4939
4940 { "xdata1", dot_xdata, 1 },
4941 { "xdata2", dot_xdata, 2 },
4942 { "xdata4", dot_xdata, 4 },
4943 { "xdata8", dot_xdata, 8 },
4944 { "xreal4", dot_xfloat_cons, 'f' },
4945 { "xreal8", dot_xfloat_cons, 'd' },
4946 { "xreal10", dot_xfloat_cons, 'x' },
4947 { "xreal16", dot_xfloat_cons, 'X' },
4948 { "xstring", dot_xstringer, 0 },
4949 { "xstringz", dot_xstringer, 1 },
4950
4951 /* unaligned versions: */
4952 { "xdata2.ua", dot_xdata_ua, 2 },
4953 { "xdata4.ua", dot_xdata_ua, 4 },
4954 { "xdata8.ua", dot_xdata_ua, 8 },
4955 { "xreal4.ua", dot_xfloat_cons_ua, 'f' },
4956 { "xreal8.ua", dot_xfloat_cons_ua, 'd' },
4957 { "xreal10.ua", dot_xfloat_cons_ua, 'x' },
4958 { "xreal16.ua", dot_xfloat_cons_ua, 'X' },
4959
4960 /* annotations/DV checking support */
4961 { "entry", dot_entry, 0 },
4962 { "mem.offset", dot_mem_offset, 0 },
4963 { "pred.rel", dot_pred_rel, 0 },
4964 { "pred.rel.clear", dot_pred_rel, 'c' },
4965 { "pred.rel.imply", dot_pred_rel, 'i' },
4966 { "pred.rel.mutex", dot_pred_rel, 'm' },
4967 { "pred.safe_across_calls", dot_pred_rel, 's' },
4968 { "reg.val", dot_reg_val, 0 },
4969 { "auto", dot_dv_mode, 'a' },
4970 { "explicit", dot_dv_mode, 'e' },
4971 { "default", dot_dv_mode, 'd' },
4972
4973 /* ??? These are needed to make gas/testsuite/gas/elf/ehopt.s work.
4974 IA-64 aligns data allocation pseudo-ops by default, so we have to
4975 tell it that these ones are supposed to be unaligned. Long term,
4976 should rewrite so that only IA-64 specific data allocation pseudo-ops
4977 are aligned by default. */
4978 {"2byte", stmt_cons_ua, 2},
4979 {"4byte", stmt_cons_ua, 4},
4980 {"8byte", stmt_cons_ua, 8},
4981
4982 { NULL, 0, 0 }
4983 };
4984
4985 static const struct pseudo_opcode
4986 {
4987 const char *name;
4988 void (*handler) (int);
4989 int arg;
4990 }
4991 pseudo_opcode[] =
4992 {
4993 /* these are more like pseudo-ops, but don't start with a dot */
4994 { "data1", cons, 1 },
4995 { "data2", cons, 2 },
4996 { "data4", cons, 4 },
4997 { "data8", cons, 8 },
4998 { "data16", cons, 16 },
4999 { "real4", stmt_float_cons, 'f' },
5000 { "real8", stmt_float_cons, 'd' },
5001 { "real10", stmt_float_cons, 'x' },
5002 { "real16", stmt_float_cons, 'X' },
5003 { "string", stringer, 0 },
5004 { "stringz", stringer, 1 },
5005
5006 /* unaligned versions: */
5007 { "data2.ua", stmt_cons_ua, 2 },
5008 { "data4.ua", stmt_cons_ua, 4 },
5009 { "data8.ua", stmt_cons_ua, 8 },
5010 { "data16.ua", stmt_cons_ua, 16 },
5011 { "real4.ua", float_cons, 'f' },
5012 { "real8.ua", float_cons, 'd' },
5013 { "real10.ua", float_cons, 'x' },
5014 { "real16.ua", float_cons, 'X' },
5015 };
5016
5017 /* Declare a register by creating a symbol for it and entering it in
5018 the symbol table. */
5019
5020 static symbolS *
5021 declare_register (name, regnum)
5022 const char *name;
5023 int regnum;
5024 {
5025 const char *err;
5026 symbolS *sym;
5027
5028 sym = symbol_new (name, reg_section, regnum, &zero_address_frag);
5029
5030 err = hash_insert (md.reg_hash, S_GET_NAME (sym), (PTR) sym);
5031 if (err)
5032 as_fatal ("Inserting \"%s\" into register table failed: %s",
5033 name, err);
5034
5035 return sym;
5036 }
5037
5038 static void
5039 declare_register_set (prefix, num_regs, base_regnum)
5040 const char *prefix;
5041 int num_regs;
5042 int base_regnum;
5043 {
5044 char name[8];
5045 int i;
5046
5047 for (i = 0; i < num_regs; ++i)
5048 {
5049 sprintf (name, "%s%u", prefix, i);
5050 declare_register (name, base_regnum + i);
5051 }
5052 }
5053
5054 static unsigned int
5055 operand_width (opnd)
5056 enum ia64_opnd opnd;
5057 {
5058 const struct ia64_operand *odesc = &elf64_ia64_operands[opnd];
5059 unsigned int bits = 0;
5060 int i;
5061
5062 bits = 0;
5063 for (i = 0; i < NELEMS (odesc->field) && odesc->field[i].bits; ++i)
5064 bits += odesc->field[i].bits;
5065
5066 return bits;
5067 }
5068
5069 static enum operand_match_result
5070 operand_match (idesc, index, e)
5071 const struct ia64_opcode *idesc;
5072 int index;
5073 expressionS *e;
5074 {
5075 enum ia64_opnd opnd = idesc->operands[index];
5076 int bits, relocatable = 0;
5077 struct insn_fix *fix;
5078 bfd_signed_vma val;
5079
5080 switch (opnd)
5081 {
5082 /* constants: */
5083
5084 case IA64_OPND_AR_CCV:
5085 if (e->X_op == O_register && e->X_add_number == REG_AR + 32)
5086 return OPERAND_MATCH;
5087 break;
5088
5089 case IA64_OPND_AR_CSD:
5090 if (e->X_op == O_register && e->X_add_number == REG_AR + 25)
5091 return OPERAND_MATCH;
5092 break;
5093
5094 case IA64_OPND_AR_PFS:
5095 if (e->X_op == O_register && e->X_add_number == REG_AR + 64)
5096 return OPERAND_MATCH;
5097 break;
5098
5099 case IA64_OPND_GR0:
5100 if (e->X_op == O_register && e->X_add_number == REG_GR + 0)
5101 return OPERAND_MATCH;
5102 break;
5103
5104 case IA64_OPND_IP:
5105 if (e->X_op == O_register && e->X_add_number == REG_IP)
5106 return OPERAND_MATCH;
5107 break;
5108
5109 case IA64_OPND_PR:
5110 if (e->X_op == O_register && e->X_add_number == REG_PR)
5111 return OPERAND_MATCH;
5112 break;
5113
5114 case IA64_OPND_PR_ROT:
5115 if (e->X_op == O_register && e->X_add_number == REG_PR_ROT)
5116 return OPERAND_MATCH;
5117 break;
5118
5119 case IA64_OPND_PSR:
5120 if (e->X_op == O_register && e->X_add_number == REG_PSR)
5121 return OPERAND_MATCH;
5122 break;
5123
5124 case IA64_OPND_PSR_L:
5125 if (e->X_op == O_register && e->X_add_number == REG_PSR_L)
5126 return OPERAND_MATCH;
5127 break;
5128
5129 case IA64_OPND_PSR_UM:
5130 if (e->X_op == O_register && e->X_add_number == REG_PSR_UM)
5131 return OPERAND_MATCH;
5132 break;
5133
5134 case IA64_OPND_C1:
5135 if (e->X_op == O_constant)
5136 {
5137 if (e->X_add_number == 1)
5138 return OPERAND_MATCH;
5139 else
5140 return OPERAND_OUT_OF_RANGE;
5141 }
5142 break;
5143
5144 case IA64_OPND_C8:
5145 if (e->X_op == O_constant)
5146 {
5147 if (e->X_add_number == 8)
5148 return OPERAND_MATCH;
5149 else
5150 return OPERAND_OUT_OF_RANGE;
5151 }
5152 break;
5153
5154 case IA64_OPND_C16:
5155 if (e->X_op == O_constant)
5156 {
5157 if (e->X_add_number == 16)
5158 return OPERAND_MATCH;
5159 else
5160 return OPERAND_OUT_OF_RANGE;
5161 }
5162 break;
5163
5164 /* register operands: */
5165
5166 case IA64_OPND_AR3:
5167 if (e->X_op == O_register && e->X_add_number >= REG_AR
5168 && e->X_add_number < REG_AR + 128)
5169 return OPERAND_MATCH;
5170 break;
5171
5172 case IA64_OPND_B1:
5173 case IA64_OPND_B2:
5174 if (e->X_op == O_register && e->X_add_number >= REG_BR
5175 && e->X_add_number < REG_BR + 8)
5176 return OPERAND_MATCH;
5177 break;
5178
5179 case IA64_OPND_CR3:
5180 if (e->X_op == O_register && e->X_add_number >= REG_CR
5181 && e->X_add_number < REG_CR + 128)
5182 return OPERAND_MATCH;
5183 break;
5184
5185 case IA64_OPND_F1:
5186 case IA64_OPND_F2:
5187 case IA64_OPND_F3:
5188 case IA64_OPND_F4:
5189 if (e->X_op == O_register && e->X_add_number >= REG_FR
5190 && e->X_add_number < REG_FR + 128)
5191 return OPERAND_MATCH;
5192 break;
5193
5194 case IA64_OPND_P1:
5195 case IA64_OPND_P2:
5196 if (e->X_op == O_register && e->X_add_number >= REG_P
5197 && e->X_add_number < REG_P + 64)
5198 return OPERAND_MATCH;
5199 break;
5200
5201 case IA64_OPND_R1:
5202 case IA64_OPND_R2:
5203 case IA64_OPND_R3:
5204 if (e->X_op == O_register && e->X_add_number >= REG_GR
5205 && e->X_add_number < REG_GR + 128)
5206 return OPERAND_MATCH;
5207 break;
5208
5209 case IA64_OPND_R3_2:
5210 if (e->X_op == O_register && e->X_add_number >= REG_GR)
5211 {
5212 if (e->X_add_number < REG_GR + 4)
5213 return OPERAND_MATCH;
5214 else if (e->X_add_number < REG_GR + 128)
5215 return OPERAND_OUT_OF_RANGE;
5216 }
5217 break;
5218
5219 /* indirect operands: */
5220 case IA64_OPND_CPUID_R3:
5221 case IA64_OPND_DBR_R3:
5222 case IA64_OPND_DTR_R3:
5223 case IA64_OPND_ITR_R3:
5224 case IA64_OPND_IBR_R3:
5225 case IA64_OPND_MSR_R3:
5226 case IA64_OPND_PKR_R3:
5227 case IA64_OPND_PMC_R3:
5228 case IA64_OPND_PMD_R3:
5229 case IA64_OPND_RR_R3:
5230 if (e->X_op == O_index && e->X_op_symbol
5231 && (S_GET_VALUE (e->X_op_symbol) - IND_CPUID
5232 == opnd - IA64_OPND_CPUID_R3))
5233 return OPERAND_MATCH;
5234 break;
5235
5236 case IA64_OPND_MR3:
5237 if (e->X_op == O_index && !e->X_op_symbol)
5238 return OPERAND_MATCH;
5239 break;
5240
5241 /* immediate operands: */
5242 case IA64_OPND_CNT2a:
5243 case IA64_OPND_LEN4:
5244 case IA64_OPND_LEN6:
5245 bits = operand_width (idesc->operands[index]);
5246 if (e->X_op == O_constant)
5247 {
5248 if ((bfd_vma) (e->X_add_number - 1) < ((bfd_vma) 1 << bits))
5249 return OPERAND_MATCH;
5250 else
5251 return OPERAND_OUT_OF_RANGE;
5252 }
5253 break;
5254
5255 case IA64_OPND_CNT2b:
5256 if (e->X_op == O_constant)
5257 {
5258 if ((bfd_vma) (e->X_add_number - 1) < 3)
5259 return OPERAND_MATCH;
5260 else
5261 return OPERAND_OUT_OF_RANGE;
5262 }
5263 break;
5264
5265 case IA64_OPND_CNT2c:
5266 val = e->X_add_number;
5267 if (e->X_op == O_constant)
5268 {
5269 if ((val == 0 || val == 7 || val == 15 || val == 16))
5270 return OPERAND_MATCH;
5271 else
5272 return OPERAND_OUT_OF_RANGE;
5273 }
5274 break;
5275
5276 case IA64_OPND_SOR:
5277 /* SOR must be an integer multiple of 8 */
5278 if (e->X_op == O_constant && e->X_add_number & 0x7)
5279 return OPERAND_OUT_OF_RANGE;
5280 case IA64_OPND_SOF:
5281 case IA64_OPND_SOL:
5282 if (e->X_op == O_constant)
5283 {
5284 if ((bfd_vma) e->X_add_number <= 96)
5285 return OPERAND_MATCH;
5286 else
5287 return OPERAND_OUT_OF_RANGE;
5288 }
5289 break;
5290
5291 case IA64_OPND_IMMU62:
5292 if (e->X_op == O_constant)
5293 {
5294 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << 62))
5295 return OPERAND_MATCH;
5296 else
5297 return OPERAND_OUT_OF_RANGE;
5298 }
5299 else
5300 {
5301 /* FIXME -- need 62-bit relocation type */
5302 as_bad (_("62-bit relocation not yet implemented"));
5303 }
5304 break;
5305
5306 case IA64_OPND_IMMU64:
5307 if (e->X_op == O_symbol || e->X_op == O_pseudo_fixup
5308 || e->X_op == O_subtract)
5309 {
5310 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5311 fix->code = BFD_RELOC_IA64_IMM64;
5312 if (e->X_op != O_subtract)
5313 {
5314 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5315 if (e->X_op == O_pseudo_fixup)
5316 e->X_op = O_symbol;
5317 }
5318
5319 fix->opnd = idesc->operands[index];
5320 fix->expr = *e;
5321 fix->is_pcrel = 0;
5322 ++CURR_SLOT.num_fixups;
5323 return OPERAND_MATCH;
5324 }
5325 else if (e->X_op == O_constant)
5326 return OPERAND_MATCH;
5327 break;
5328
5329 case IA64_OPND_CCNT5:
5330 case IA64_OPND_CNT5:
5331 case IA64_OPND_CNT6:
5332 case IA64_OPND_CPOS6a:
5333 case IA64_OPND_CPOS6b:
5334 case IA64_OPND_CPOS6c:
5335 case IA64_OPND_IMMU2:
5336 case IA64_OPND_IMMU7a:
5337 case IA64_OPND_IMMU7b:
5338 case IA64_OPND_IMMU21:
5339 case IA64_OPND_IMMU24:
5340 case IA64_OPND_MBTYPE4:
5341 case IA64_OPND_MHTYPE8:
5342 case IA64_OPND_POS6:
5343 bits = operand_width (idesc->operands[index]);
5344 if (e->X_op == O_constant)
5345 {
5346 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5347 return OPERAND_MATCH;
5348 else
5349 return OPERAND_OUT_OF_RANGE;
5350 }
5351 break;
5352
5353 case IA64_OPND_IMMU9:
5354 bits = operand_width (idesc->operands[index]);
5355 if (e->X_op == O_constant)
5356 {
5357 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5358 {
5359 int lobits = e->X_add_number & 0x3;
5360 if (((bfd_vma) e->X_add_number & 0x3C) != 0 && lobits == 0)
5361 e->X_add_number |= (bfd_vma) 0x3;
5362 return OPERAND_MATCH;
5363 }
5364 else
5365 return OPERAND_OUT_OF_RANGE;
5366 }
5367 break;
5368
5369 case IA64_OPND_IMM44:
5370 /* least 16 bits must be zero */
5371 if ((e->X_add_number & 0xffff) != 0)
5372 /* XXX technically, this is wrong: we should not be issuing warning
5373 messages until we're sure this instruction pattern is going to
5374 be used! */
5375 as_warn (_("lower 16 bits of mask ignored"));
5376
5377 if (e->X_op == O_constant)
5378 {
5379 if (((e->X_add_number >= 0
5380 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 44))
5381 || (e->X_add_number < 0
5382 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 44))))
5383 {
5384 /* sign-extend */
5385 if (e->X_add_number >= 0
5386 && (e->X_add_number & ((bfd_vma) 1 << 43)) != 0)
5387 {
5388 e->X_add_number |= ~(((bfd_vma) 1 << 44) - 1);
5389 }
5390 return OPERAND_MATCH;
5391 }
5392 else
5393 return OPERAND_OUT_OF_RANGE;
5394 }
5395 break;
5396
5397 case IA64_OPND_IMM17:
5398 /* bit 0 is a don't care (pr0 is hardwired to 1) */
5399 if (e->X_op == O_constant)
5400 {
5401 if (((e->X_add_number >= 0
5402 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 17))
5403 || (e->X_add_number < 0
5404 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 17))))
5405 {
5406 /* sign-extend */
5407 if (e->X_add_number >= 0
5408 && (e->X_add_number & ((bfd_vma) 1 << 16)) != 0)
5409 {
5410 e->X_add_number |= ~(((bfd_vma) 1 << 17) - 1);
5411 }
5412 return OPERAND_MATCH;
5413 }
5414 else
5415 return OPERAND_OUT_OF_RANGE;
5416 }
5417 break;
5418
5419 case IA64_OPND_IMM14:
5420 case IA64_OPND_IMM22:
5421 relocatable = 1;
5422 case IA64_OPND_IMM1:
5423 case IA64_OPND_IMM8:
5424 case IA64_OPND_IMM8U4:
5425 case IA64_OPND_IMM8M1:
5426 case IA64_OPND_IMM8M1U4:
5427 case IA64_OPND_IMM8M1U8:
5428 case IA64_OPND_IMM9a:
5429 case IA64_OPND_IMM9b:
5430 bits = operand_width (idesc->operands[index]);
5431 if (relocatable && (e->X_op == O_symbol
5432 || e->X_op == O_subtract
5433 || e->X_op == O_pseudo_fixup))
5434 {
5435 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5436
5437 if (idesc->operands[index] == IA64_OPND_IMM14)
5438 fix->code = BFD_RELOC_IA64_IMM14;
5439 else
5440 fix->code = BFD_RELOC_IA64_IMM22;
5441
5442 if (e->X_op != O_subtract)
5443 {
5444 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5445 if (e->X_op == O_pseudo_fixup)
5446 e->X_op = O_symbol;
5447 }
5448
5449 fix->opnd = idesc->operands[index];
5450 fix->expr = *e;
5451 fix->is_pcrel = 0;
5452 ++CURR_SLOT.num_fixups;
5453 return OPERAND_MATCH;
5454 }
5455 else if (e->X_op != O_constant
5456 && ! (e->X_op == O_big && opnd == IA64_OPND_IMM8M1U8))
5457 return OPERAND_MISMATCH;
5458
5459 if (opnd == IA64_OPND_IMM8M1U4)
5460 {
5461 /* Zero is not valid for unsigned compares that take an adjusted
5462 constant immediate range. */
5463 if (e->X_add_number == 0)
5464 return OPERAND_OUT_OF_RANGE;
5465
5466 /* Sign-extend 32-bit unsigned numbers, so that the following range
5467 checks will work. */
5468 val = e->X_add_number;
5469 if (((val & (~(bfd_vma) 0 << 32)) == 0)
5470 && ((val & ((bfd_vma) 1 << 31)) != 0))
5471 val = ((val << 32) >> 32);
5472
5473 /* Check for 0x100000000. This is valid because
5474 0x100000000-1 is the same as ((uint32_t) -1). */
5475 if (val == ((bfd_signed_vma) 1 << 32))
5476 return OPERAND_MATCH;
5477
5478 val = val - 1;
5479 }
5480 else if (opnd == IA64_OPND_IMM8M1U8)
5481 {
5482 /* Zero is not valid for unsigned compares that take an adjusted
5483 constant immediate range. */
5484 if (e->X_add_number == 0)
5485 return OPERAND_OUT_OF_RANGE;
5486
5487 /* Check for 0x10000000000000000. */
5488 if (e->X_op == O_big)
5489 {
5490 if (generic_bignum[0] == 0
5491 && generic_bignum[1] == 0
5492 && generic_bignum[2] == 0
5493 && generic_bignum[3] == 0
5494 && generic_bignum[4] == 1)
5495 return OPERAND_MATCH;
5496 else
5497 return OPERAND_OUT_OF_RANGE;
5498 }
5499 else
5500 val = e->X_add_number - 1;
5501 }
5502 else if (opnd == IA64_OPND_IMM8M1)
5503 val = e->X_add_number - 1;
5504 else if (opnd == IA64_OPND_IMM8U4)
5505 {
5506 /* Sign-extend 32-bit unsigned numbers, so that the following range
5507 checks will work. */
5508 val = e->X_add_number;
5509 if (((val & (~(bfd_vma) 0 << 32)) == 0)
5510 && ((val & ((bfd_vma) 1 << 31)) != 0))
5511 val = ((val << 32) >> 32);
5512 }
5513 else
5514 val = e->X_add_number;
5515
5516 if ((val >= 0 && (bfd_vma) val < ((bfd_vma) 1 << (bits - 1)))
5517 || (val < 0 && (bfd_vma) -val <= ((bfd_vma) 1 << (bits - 1))))
5518 return OPERAND_MATCH;
5519 else
5520 return OPERAND_OUT_OF_RANGE;
5521
5522 case IA64_OPND_INC3:
5523 /* +/- 1, 4, 8, 16 */
5524 val = e->X_add_number;
5525 if (val < 0)
5526 val = -val;
5527 if (e->X_op == O_constant)
5528 {
5529 if ((val == 1 || val == 4 || val == 8 || val == 16))
5530 return OPERAND_MATCH;
5531 else
5532 return OPERAND_OUT_OF_RANGE;
5533 }
5534 break;
5535
5536 case IA64_OPND_TGT25:
5537 case IA64_OPND_TGT25b:
5538 case IA64_OPND_TGT25c:
5539 case IA64_OPND_TGT64:
5540 if (e->X_op == O_symbol)
5541 {
5542 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5543 if (opnd == IA64_OPND_TGT25)
5544 fix->code = BFD_RELOC_IA64_PCREL21F;
5545 else if (opnd == IA64_OPND_TGT25b)
5546 fix->code = BFD_RELOC_IA64_PCREL21M;
5547 else if (opnd == IA64_OPND_TGT25c)
5548 fix->code = BFD_RELOC_IA64_PCREL21B;
5549 else if (opnd == IA64_OPND_TGT64)
5550 fix->code = BFD_RELOC_IA64_PCREL60B;
5551 else
5552 abort ();
5553
5554 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5555 fix->opnd = idesc->operands[index];
5556 fix->expr = *e;
5557 fix->is_pcrel = 1;
5558 ++CURR_SLOT.num_fixups;
5559 return OPERAND_MATCH;
5560 }
5561 case IA64_OPND_TAG13:
5562 case IA64_OPND_TAG13b:
5563 switch (e->X_op)
5564 {
5565 case O_constant:
5566 return OPERAND_MATCH;
5567
5568 case O_symbol:
5569 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5570 /* There are no external relocs for TAG13/TAG13b fields, so we
5571 create a dummy reloc. This will not live past md_apply_fix3. */
5572 fix->code = BFD_RELOC_UNUSED;
5573 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5574 fix->opnd = idesc->operands[index];
5575 fix->expr = *e;
5576 fix->is_pcrel = 1;
5577 ++CURR_SLOT.num_fixups;
5578 return OPERAND_MATCH;
5579
5580 default:
5581 break;
5582 }
5583 break;
5584
5585 case IA64_OPND_LDXMOV:
5586 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5587 fix->code = BFD_RELOC_IA64_LDXMOV;
5588 fix->opnd = idesc->operands[index];
5589 fix->expr = *e;
5590 fix->is_pcrel = 0;
5591 ++CURR_SLOT.num_fixups;
5592 return OPERAND_MATCH;
5593
5594 default:
5595 break;
5596 }
5597 return OPERAND_MISMATCH;
5598 }
5599
5600 static int
5601 parse_operand (e)
5602 expressionS *e;
5603 {
5604 int sep = '\0';
5605
5606 memset (e, 0, sizeof (*e));
5607 e->X_op = O_absent;
5608 SKIP_WHITESPACE ();
5609 if (*input_line_pointer != '}')
5610 expression (e);
5611 sep = *input_line_pointer++;
5612
5613 if (sep == '}')
5614 {
5615 if (!md.manual_bundling)
5616 as_warn ("Found '}' when manual bundling is off");
5617 else
5618 CURR_SLOT.manual_bundling_off = 1;
5619 md.manual_bundling = 0;
5620 sep = '\0';
5621 }
5622 return sep;
5623 }
5624
5625 /* Returns the next entry in the opcode table that matches the one in
5626 IDESC, and frees the entry in IDESC. If no matching entry is
5627 found, NULL is returned instead. */
5628
5629 static struct ia64_opcode *
5630 get_next_opcode (struct ia64_opcode *idesc)
5631 {
5632 struct ia64_opcode *next = ia64_find_next_opcode (idesc);
5633 ia64_free_opcode (idesc);
5634 return next;
5635 }
5636
5637 /* Parse the operands for the opcode and find the opcode variant that
5638 matches the specified operands, or NULL if no match is possible. */
5639
5640 static struct ia64_opcode *
5641 parse_operands (idesc)
5642 struct ia64_opcode *idesc;
5643 {
5644 int i = 0, highest_unmatched_operand, num_operands = 0, num_outputs = 0;
5645 int error_pos, out_of_range_pos, curr_out_of_range_pos, sep = 0;
5646 enum ia64_opnd expected_operand = IA64_OPND_NIL;
5647 enum operand_match_result result;
5648 char mnemonic[129];
5649 char *first_arg = 0, *end, *saved_input_pointer;
5650 unsigned int sof;
5651
5652 assert (strlen (idesc->name) <= 128);
5653
5654 strcpy (mnemonic, idesc->name);
5655 if (idesc->operands[2] == IA64_OPND_SOF)
5656 {
5657 /* To make the common idiom "alloc loc?=ar.pfs,0,1,0,0" work, we
5658 can't parse the first operand until we have parsed the
5659 remaining operands of the "alloc" instruction. */
5660 SKIP_WHITESPACE ();
5661 first_arg = input_line_pointer;
5662 end = strchr (input_line_pointer, '=');
5663 if (!end)
5664 {
5665 as_bad ("Expected separator `='");
5666 return 0;
5667 }
5668 input_line_pointer = end + 1;
5669 ++i;
5670 ++num_outputs;
5671 }
5672
5673 for (; i < NELEMS (CURR_SLOT.opnd); ++i)
5674 {
5675 sep = parse_operand (CURR_SLOT.opnd + i);
5676 if (CURR_SLOT.opnd[i].X_op == O_absent)
5677 break;
5678
5679 ++num_operands;
5680
5681 if (sep != '=' && sep != ',')
5682 break;
5683
5684 if (sep == '=')
5685 {
5686 if (num_outputs > 0)
5687 as_bad ("Duplicate equal sign (=) in instruction");
5688 else
5689 num_outputs = i + 1;
5690 }
5691 }
5692 if (sep != '\0')
5693 {
5694 as_bad ("Illegal operand separator `%c'", sep);
5695 return 0;
5696 }
5697
5698 if (idesc->operands[2] == IA64_OPND_SOF)
5699 {
5700 /* map alloc r1=ar.pfs,i,l,o,r to alloc r1=ar.pfs,(i+l+o),(i+l),r */
5701 know (strcmp (idesc->name, "alloc") == 0);
5702 if (num_operands == 5 /* first_arg not included in this count! */
5703 && CURR_SLOT.opnd[2].X_op == O_constant
5704 && CURR_SLOT.opnd[3].X_op == O_constant
5705 && CURR_SLOT.opnd[4].X_op == O_constant
5706 && CURR_SLOT.opnd[5].X_op == O_constant)
5707 {
5708 sof = set_regstack (CURR_SLOT.opnd[2].X_add_number,
5709 CURR_SLOT.opnd[3].X_add_number,
5710 CURR_SLOT.opnd[4].X_add_number,
5711 CURR_SLOT.opnd[5].X_add_number);
5712
5713 /* now we can parse the first arg: */
5714 saved_input_pointer = input_line_pointer;
5715 input_line_pointer = first_arg;
5716 sep = parse_operand (CURR_SLOT.opnd + 0);
5717 if (sep != '=')
5718 --num_outputs; /* force error */
5719 input_line_pointer = saved_input_pointer;
5720
5721 CURR_SLOT.opnd[2].X_add_number = sof;
5722 CURR_SLOT.opnd[3].X_add_number
5723 = sof - CURR_SLOT.opnd[4].X_add_number;
5724 CURR_SLOT.opnd[4] = CURR_SLOT.opnd[5];
5725 }
5726 }
5727
5728 highest_unmatched_operand = 0;
5729 curr_out_of_range_pos = -1;
5730 error_pos = 0;
5731 expected_operand = idesc->operands[0];
5732 for (; idesc; idesc = get_next_opcode (idesc))
5733 {
5734 if (num_outputs != idesc->num_outputs)
5735 continue; /* mismatch in # of outputs */
5736
5737 CURR_SLOT.num_fixups = 0;
5738
5739 /* Try to match all operands. If we see an out-of-range operand,
5740 then continue trying to match the rest of the operands, since if
5741 the rest match, then this idesc will give the best error message. */
5742
5743 out_of_range_pos = -1;
5744 for (i = 0; i < num_operands && idesc->operands[i]; ++i)
5745 {
5746 result = operand_match (idesc, i, CURR_SLOT.opnd + i);
5747 if (result != OPERAND_MATCH)
5748 {
5749 if (result != OPERAND_OUT_OF_RANGE)
5750 break;
5751 if (out_of_range_pos < 0)
5752 /* remember position of the first out-of-range operand: */
5753 out_of_range_pos = i;
5754 }
5755 }
5756
5757 /* If we did not match all operands, or if at least one operand was
5758 out-of-range, then this idesc does not match. Keep track of which
5759 idesc matched the most operands before failing. If we have two
5760 idescs that failed at the same position, and one had an out-of-range
5761 operand, then prefer the out-of-range operand. Thus if we have
5762 "add r0=0x1000000,r1" we get an error saying the constant is out
5763 of range instead of an error saying that the constant should have been
5764 a register. */
5765
5766 if (i != num_operands || out_of_range_pos >= 0)
5767 {
5768 if (i > highest_unmatched_operand
5769 || (i == highest_unmatched_operand
5770 && out_of_range_pos > curr_out_of_range_pos))
5771 {
5772 highest_unmatched_operand = i;
5773 if (out_of_range_pos >= 0)
5774 {
5775 expected_operand = idesc->operands[out_of_range_pos];
5776 error_pos = out_of_range_pos;
5777 }
5778 else
5779 {
5780 expected_operand = idesc->operands[i];
5781 error_pos = i;
5782 }
5783 curr_out_of_range_pos = out_of_range_pos;
5784 }
5785 continue;
5786 }
5787
5788 if (num_operands < NELEMS (idesc->operands)
5789 && idesc->operands[num_operands])
5790 continue; /* mismatch in number of arguments */
5791
5792 break;
5793 }
5794 if (!idesc)
5795 {
5796 if (expected_operand)
5797 as_bad ("Operand %u of `%s' should be %s",
5798 error_pos + 1, mnemonic,
5799 elf64_ia64_operands[expected_operand].desc);
5800 else
5801 as_bad ("Operand mismatch");
5802 return 0;
5803 }
5804 return idesc;
5805 }
5806
5807 /* Keep track of state necessary to determine whether a NOP is necessary
5808 to avoid an erratum in A and B step Itanium chips, and return 1 if we
5809 detect a case where additional NOPs may be necessary. */
5810 static int
5811 errata_nop_necessary_p (slot, insn_unit)
5812 struct slot *slot;
5813 enum ia64_unit insn_unit;
5814 {
5815 int i;
5816 struct group *this_group = md.last_groups + md.group_idx;
5817 struct group *prev_group = md.last_groups + (md.group_idx + 2) % 3;
5818 struct ia64_opcode *idesc = slot->idesc;
5819
5820 /* Test whether this could be the first insn in a problematic sequence. */
5821 if (insn_unit == IA64_UNIT_F)
5822 {
5823 for (i = 0; i < idesc->num_outputs; i++)
5824 if (idesc->operands[i] == IA64_OPND_P1
5825 || idesc->operands[i] == IA64_OPND_P2)
5826 {
5827 int regno = slot->opnd[i].X_add_number - REG_P;
5828 /* Ignore invalid operands; they generate errors elsewhere. */
5829 if (regno >= 64)
5830 return 0;
5831 this_group->p_reg_set[regno] = 1;
5832 }
5833 }
5834
5835 /* Test whether this could be the second insn in a problematic sequence. */
5836 if (insn_unit == IA64_UNIT_M && slot->qp_regno > 0
5837 && prev_group->p_reg_set[slot->qp_regno])
5838 {
5839 for (i = 0; i < idesc->num_outputs; i++)
5840 if (idesc->operands[i] == IA64_OPND_R1
5841 || idesc->operands[i] == IA64_OPND_R2
5842 || idesc->operands[i] == IA64_OPND_R3)
5843 {
5844 int regno = slot->opnd[i].X_add_number - REG_GR;
5845 /* Ignore invalid operands; they generate errors elsewhere. */
5846 if (regno >= 128)
5847 return 0;
5848 if (strncmp (idesc->name, "add", 3) != 0
5849 && strncmp (idesc->name, "sub", 3) != 0
5850 && strncmp (idesc->name, "shladd", 6) != 0
5851 && (idesc->flags & IA64_OPCODE_POSTINC) == 0)
5852 this_group->g_reg_set_conditionally[regno] = 1;
5853 }
5854 }
5855
5856 /* Test whether this could be the third insn in a problematic sequence. */
5857 for (i = 0; i < NELEMS (idesc->operands) && idesc->operands[i]; i++)
5858 {
5859 if (/* For fc, ptc, ptr, tak, thash, tpa, ttag, probe, ptr, ptc. */
5860 idesc->operands[i] == IA64_OPND_R3
5861 /* For mov indirect. */
5862 || idesc->operands[i] == IA64_OPND_RR_R3
5863 || idesc->operands[i] == IA64_OPND_DBR_R3
5864 || idesc->operands[i] == IA64_OPND_IBR_R3
5865 || idesc->operands[i] == IA64_OPND_PKR_R3
5866 || idesc->operands[i] == IA64_OPND_PMC_R3
5867 || idesc->operands[i] == IA64_OPND_PMD_R3
5868 || idesc->operands[i] == IA64_OPND_MSR_R3
5869 || idesc->operands[i] == IA64_OPND_CPUID_R3
5870 /* For itr. */
5871 || idesc->operands[i] == IA64_OPND_ITR_R3
5872 || idesc->operands[i] == IA64_OPND_DTR_R3
5873 /* Normal memory addresses (load, store, xchg, cmpxchg, etc.). */
5874 || idesc->operands[i] == IA64_OPND_MR3)
5875 {
5876 int regno = slot->opnd[i].X_add_number - REG_GR;
5877 /* Ignore invalid operands; they generate errors elsewhere. */
5878 if (regno >= 128)
5879 return 0;
5880 if (idesc->operands[i] == IA64_OPND_R3)
5881 {
5882 if (strcmp (idesc->name, "fc") != 0
5883 && strcmp (idesc->name, "tak") != 0
5884 && strcmp (idesc->name, "thash") != 0
5885 && strcmp (idesc->name, "tpa") != 0
5886 && strcmp (idesc->name, "ttag") != 0
5887 && strncmp (idesc->name, "ptr", 3) != 0
5888 && strncmp (idesc->name, "ptc", 3) != 0
5889 && strncmp (idesc->name, "probe", 5) != 0)
5890 return 0;
5891 }
5892 if (prev_group->g_reg_set_conditionally[regno])
5893 return 1;
5894 }
5895 }
5896 return 0;
5897 }
5898
5899 static void
5900 build_insn (slot, insnp)
5901 struct slot *slot;
5902 bfd_vma *insnp;
5903 {
5904 const struct ia64_operand *odesc, *o2desc;
5905 struct ia64_opcode *idesc = slot->idesc;
5906 bfd_signed_vma insn, val;
5907 const char *err;
5908 int i;
5909
5910 insn = idesc->opcode | slot->qp_regno;
5911
5912 for (i = 0; i < NELEMS (idesc->operands) && idesc->operands[i]; ++i)
5913 {
5914 if (slot->opnd[i].X_op == O_register
5915 || slot->opnd[i].X_op == O_constant
5916 || slot->opnd[i].X_op == O_index)
5917 val = slot->opnd[i].X_add_number;
5918 else if (slot->opnd[i].X_op == O_big)
5919 {
5920 /* This must be the value 0x10000000000000000. */
5921 assert (idesc->operands[i] == IA64_OPND_IMM8M1U8);
5922 val = 0;
5923 }
5924 else
5925 val = 0;
5926
5927 switch (idesc->operands[i])
5928 {
5929 case IA64_OPND_IMMU64:
5930 *insnp++ = (val >> 22) & 0x1ffffffffffLL;
5931 insn |= (((val & 0x7f) << 13) | (((val >> 7) & 0x1ff) << 27)
5932 | (((val >> 16) & 0x1f) << 22) | (((val >> 21) & 0x1) << 21)
5933 | (((val >> 63) & 0x1) << 36));
5934 continue;
5935
5936 case IA64_OPND_IMMU62:
5937 val &= 0x3fffffffffffffffULL;
5938 if (val != slot->opnd[i].X_add_number)
5939 as_warn (_("Value truncated to 62 bits"));
5940 *insnp++ = (val >> 21) & 0x1ffffffffffLL;
5941 insn |= (((val & 0xfffff) << 6) | (((val >> 20) & 0x1) << 36));
5942 continue;
5943
5944 case IA64_OPND_TGT64:
5945 val >>= 4;
5946 *insnp++ = ((val >> 20) & 0x7fffffffffLL) << 2;
5947 insn |= ((((val >> 59) & 0x1) << 36)
5948 | (((val >> 0) & 0xfffff) << 13));
5949 continue;
5950
5951 case IA64_OPND_AR3:
5952 val -= REG_AR;
5953 break;
5954
5955 case IA64_OPND_B1:
5956 case IA64_OPND_B2:
5957 val -= REG_BR;
5958 break;
5959
5960 case IA64_OPND_CR3:
5961 val -= REG_CR;
5962 break;
5963
5964 case IA64_OPND_F1:
5965 case IA64_OPND_F2:
5966 case IA64_OPND_F3:
5967 case IA64_OPND_F4:
5968 val -= REG_FR;
5969 break;
5970
5971 case IA64_OPND_P1:
5972 case IA64_OPND_P2:
5973 val -= REG_P;
5974 break;
5975
5976 case IA64_OPND_R1:
5977 case IA64_OPND_R2:
5978 case IA64_OPND_R3:
5979 case IA64_OPND_R3_2:
5980 case IA64_OPND_CPUID_R3:
5981 case IA64_OPND_DBR_R3:
5982 case IA64_OPND_DTR_R3:
5983 case IA64_OPND_ITR_R3:
5984 case IA64_OPND_IBR_R3:
5985 case IA64_OPND_MR3:
5986 case IA64_OPND_MSR_R3:
5987 case IA64_OPND_PKR_R3:
5988 case IA64_OPND_PMC_R3:
5989 case IA64_OPND_PMD_R3:
5990 case IA64_OPND_RR_R3:
5991 val -= REG_GR;
5992 break;
5993
5994 default:
5995 break;
5996 }
5997
5998 odesc = elf64_ia64_operands + idesc->operands[i];
5999 err = (*odesc->insert) (odesc, val, &insn);
6000 if (err)
6001 as_bad_where (slot->src_file, slot->src_line,
6002 "Bad operand value: %s", err);
6003 if (idesc->flags & IA64_OPCODE_PSEUDO)
6004 {
6005 if ((idesc->flags & IA64_OPCODE_F2_EQ_F3)
6006 && odesc == elf64_ia64_operands + IA64_OPND_F3)
6007 {
6008 o2desc = elf64_ia64_operands + IA64_OPND_F2;
6009 (*o2desc->insert) (o2desc, val, &insn);
6010 }
6011 if ((idesc->flags & IA64_OPCODE_LEN_EQ_64MCNT)
6012 && (odesc == elf64_ia64_operands + IA64_OPND_CPOS6a
6013 || odesc == elf64_ia64_operands + IA64_OPND_POS6))
6014 {
6015 o2desc = elf64_ia64_operands + IA64_OPND_LEN6;
6016 (*o2desc->insert) (o2desc, 64 - val, &insn);
6017 }
6018 }
6019 }
6020 *insnp = insn;
6021 }
6022
6023 static void
6024 emit_one_bundle ()
6025 {
6026 unsigned int manual_bundling_on = 0, manual_bundling_off = 0;
6027 unsigned int manual_bundling = 0;
6028 enum ia64_unit required_unit, insn_unit = 0;
6029 enum ia64_insn_type type[3], insn_type;
6030 unsigned int template, orig_template;
6031 bfd_vma insn[3] = { -1, -1, -1 };
6032 struct ia64_opcode *idesc;
6033 int end_of_insn_group = 0, user_template = -1;
6034 int n, i, j, first, curr;
6035 unw_rec_list *ptr;
6036 bfd_vma t0 = 0, t1 = 0;
6037 struct label_fix *lfix;
6038 struct insn_fix *ifix;
6039 char mnemonic[16];
6040 fixS *fix;
6041 char *f;
6042
6043 first = (md.curr_slot + NUM_SLOTS - md.num_slots_in_use) % NUM_SLOTS;
6044 know (first >= 0 & first < NUM_SLOTS);
6045 n = MIN (3, md.num_slots_in_use);
6046
6047 /* Determine template: user user_template if specified, best match
6048 otherwise: */
6049
6050 if (md.slot[first].user_template >= 0)
6051 user_template = template = md.slot[first].user_template;
6052 else
6053 {
6054 /* Auto select appropriate template. */
6055 memset (type, 0, sizeof (type));
6056 curr = first;
6057 for (i = 0; i < n; ++i)
6058 {
6059 if (md.slot[curr].label_fixups && i != 0)
6060 break;
6061 type[i] = md.slot[curr].idesc->type;
6062 curr = (curr + 1) % NUM_SLOTS;
6063 }
6064 template = best_template[type[0]][type[1]][type[2]];
6065 }
6066
6067 /* initialize instructions with appropriate nops: */
6068 for (i = 0; i < 3; ++i)
6069 insn[i] = nop[ia64_templ_desc[template].exec_unit[i]];
6070
6071 f = frag_more (16);
6072
6073 /* now fill in slots with as many insns as possible: */
6074 curr = first;
6075 idesc = md.slot[curr].idesc;
6076 end_of_insn_group = 0;
6077 for (i = 0; i < 3 && md.num_slots_in_use > 0; ++i)
6078 {
6079 /* Set the slot number for prologue/body records now as those
6080 refer to the current point, not the point after the
6081 instruction has been issued: */
6082 /* Don't try to delete prologue/body records here, as that will cause
6083 them to also be deleted from the master list of unwind records. */
6084 for (ptr = md.slot[curr].unwind_record; ptr; ptr = ptr->next)
6085 if (ptr->r.type == prologue || ptr->r.type == prologue_gr
6086 || ptr->r.type == body)
6087 {
6088 ptr->slot_number = (unsigned long) f + i;
6089 ptr->slot_frag = frag_now;
6090 }
6091
6092 if (idesc->flags & IA64_OPCODE_SLOT2)
6093 {
6094 if (manual_bundling && i != 2)
6095 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6096 "`%s' must be last in bundle", idesc->name);
6097 else
6098 i = 2;
6099 }
6100 if (idesc->flags & IA64_OPCODE_LAST)
6101 {
6102 int required_slot;
6103 unsigned int required_template;
6104
6105 /* If we need a stop bit after an M slot, our only choice is
6106 template 5 (M;;MI). If we need a stop bit after a B
6107 slot, our only choice is to place it at the end of the
6108 bundle, because the only available templates are MIB,
6109 MBB, BBB, MMB, and MFB. We don't handle anything other
6110 than M and B slots because these are the only kind of
6111 instructions that can have the IA64_OPCODE_LAST bit set. */
6112 required_template = template;
6113 switch (idesc->type)
6114 {
6115 case IA64_TYPE_M:
6116 required_slot = 0;
6117 required_template = 5;
6118 break;
6119
6120 case IA64_TYPE_B:
6121 required_slot = 2;
6122 break;
6123
6124 default:
6125 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6126 "Internal error: don't know how to force %s to end"
6127 "of instruction group", idesc->name);
6128 required_slot = i;
6129 break;
6130 }
6131 if (manual_bundling && i != required_slot)
6132 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6133 "`%s' must be last in instruction group",
6134 idesc->name);
6135 if (required_slot < i)
6136 /* Can't fit this instruction. */
6137 break;
6138
6139 i = required_slot;
6140 if (required_template != template)
6141 {
6142 /* If we switch the template, we need to reset the NOPs
6143 after slot i. The slot-types of the instructions ahead
6144 of i never change, so we don't need to worry about
6145 changing NOPs in front of this slot. */
6146 for (j = i; j < 3; ++j)
6147 insn[j] = nop[ia64_templ_desc[required_template].exec_unit[j]];
6148 }
6149 template = required_template;
6150 }
6151 if (curr != first && md.slot[curr].label_fixups)
6152 {
6153 if (manual_bundling_on)
6154 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6155 "Label must be first in a bundle");
6156 /* This insn must go into the first slot of a bundle. */
6157 break;
6158 }
6159
6160 manual_bundling_on = md.slot[curr].manual_bundling_on;
6161 manual_bundling_off = md.slot[curr].manual_bundling_off;
6162
6163 if (manual_bundling_on)
6164 {
6165 if (curr == first)
6166 manual_bundling = 1;
6167 else
6168 break; /* need to start a new bundle */
6169 }
6170
6171 if (end_of_insn_group && md.num_slots_in_use >= 1)
6172 {
6173 /* We need an instruction group boundary in the middle of a
6174 bundle. See if we can switch to an other template with
6175 an appropriate boundary. */
6176
6177 orig_template = template;
6178 if (i == 1 && (user_template == 4
6179 || (user_template < 0
6180 && (ia64_templ_desc[template].exec_unit[0]
6181 == IA64_UNIT_M))))
6182 {
6183 template = 5;
6184 end_of_insn_group = 0;
6185 }
6186 else if (i == 2 && (user_template == 0
6187 || (user_template < 0
6188 && (ia64_templ_desc[template].exec_unit[1]
6189 == IA64_UNIT_I)))
6190 /* This test makes sure we don't switch the template if
6191 the next instruction is one that needs to be first in
6192 an instruction group. Since all those instructions are
6193 in the M group, there is no way such an instruction can
6194 fit in this bundle even if we switch the template. The
6195 reason we have to check for this is that otherwise we
6196 may end up generating "MI;;I M.." which has the deadly
6197 effect that the second M instruction is no longer the
6198 first in the bundle! --davidm 99/12/16 */
6199 && (idesc->flags & IA64_OPCODE_FIRST) == 0)
6200 {
6201 template = 1;
6202 end_of_insn_group = 0;
6203 }
6204 else if (curr != first)
6205 /* can't fit this insn */
6206 break;
6207
6208 if (template != orig_template)
6209 /* if we switch the template, we need to reset the NOPs
6210 after slot i. The slot-types of the instructions ahead
6211 of i never change, so we don't need to worry about
6212 changing NOPs in front of this slot. */
6213 for (j = i; j < 3; ++j)
6214 insn[j] = nop[ia64_templ_desc[template].exec_unit[j]];
6215 }
6216 required_unit = ia64_templ_desc[template].exec_unit[i];
6217
6218 /* resolve dynamic opcodes such as "break", "hint", and "nop": */
6219 if (idesc->type == IA64_TYPE_DYN)
6220 {
6221 if ((strcmp (idesc->name, "nop") == 0)
6222 || (strcmp (idesc->name, "hint") == 0)
6223 || (strcmp (idesc->name, "break") == 0))
6224 insn_unit = required_unit;
6225 else if (strcmp (idesc->name, "chk.s") == 0)
6226 {
6227 insn_unit = IA64_UNIT_M;
6228 if (required_unit == IA64_UNIT_I)
6229 insn_unit = IA64_UNIT_I;
6230 }
6231 else
6232 as_fatal ("emit_one_bundle: unexpected dynamic op");
6233
6234 sprintf (mnemonic, "%s.%c", idesc->name, "?imbf??"[insn_unit]);
6235 ia64_free_opcode (idesc);
6236 md.slot[curr].idesc = idesc = ia64_find_opcode (mnemonic);
6237 #if 0
6238 know (!idesc->next); /* no resolved dynamic ops have collisions */
6239 #endif
6240 }
6241 else
6242 {
6243 insn_type = idesc->type;
6244 insn_unit = IA64_UNIT_NIL;
6245 switch (insn_type)
6246 {
6247 case IA64_TYPE_A:
6248 if (required_unit == IA64_UNIT_I || required_unit == IA64_UNIT_M)
6249 insn_unit = required_unit;
6250 break;
6251 case IA64_TYPE_X: insn_unit = IA64_UNIT_L; break;
6252 case IA64_TYPE_I: insn_unit = IA64_UNIT_I; break;
6253 case IA64_TYPE_M: insn_unit = IA64_UNIT_M; break;
6254 case IA64_TYPE_B: insn_unit = IA64_UNIT_B; break;
6255 case IA64_TYPE_F: insn_unit = IA64_UNIT_F; break;
6256 default: break;
6257 }
6258 }
6259
6260 if (insn_unit != required_unit)
6261 {
6262 if (required_unit == IA64_UNIT_L
6263 && insn_unit == IA64_UNIT_I
6264 && !(idesc->flags & IA64_OPCODE_X_IN_MLX))
6265 {
6266 /* we got ourselves an MLX template but the current
6267 instruction isn't an X-unit, or an I-unit instruction
6268 that can go into the X slot of an MLX template. Duh. */
6269 if (md.num_slots_in_use >= NUM_SLOTS)
6270 {
6271 as_bad_where (md.slot[curr].src_file,
6272 md.slot[curr].src_line,
6273 "`%s' can't go in X slot of "
6274 "MLX template", idesc->name);
6275 /* drop this insn so we don't livelock: */
6276 --md.num_slots_in_use;
6277 }
6278 break;
6279 }
6280 continue; /* try next slot */
6281 }
6282
6283 {
6284 bfd_vma addr;
6285
6286 addr = frag_now->fr_address + frag_now_fix () - 16 + i;
6287 dwarf2_gen_line_info (addr, &md.slot[curr].debug_line);
6288 }
6289
6290 if (errata_nop_necessary_p (md.slot + curr, insn_unit))
6291 as_warn (_("Additional NOP may be necessary to workaround Itanium processor A/B step errata"));
6292
6293 build_insn (md.slot + curr, insn + i);
6294
6295 /* Set slot counts for non prologue/body unwind records. */
6296 for (ptr = md.slot[curr].unwind_record; ptr; ptr = ptr->next)
6297 if (ptr->r.type != prologue && ptr->r.type != prologue_gr
6298 && ptr->r.type != body)
6299 {
6300 ptr->slot_number = (unsigned long) f + i;
6301 ptr->slot_frag = frag_now;
6302 }
6303 md.slot[curr].unwind_record = NULL;
6304
6305 if (required_unit == IA64_UNIT_L)
6306 {
6307 know (i == 1);
6308 /* skip one slot for long/X-unit instructions */
6309 ++i;
6310 }
6311 --md.num_slots_in_use;
6312
6313 /* now is a good time to fix up the labels for this insn: */
6314 for (lfix = md.slot[curr].label_fixups; lfix; lfix = lfix->next)
6315 {
6316 S_SET_VALUE (lfix->sym, frag_now_fix () - 16);
6317 symbol_set_frag (lfix->sym, frag_now);
6318 }
6319 /* and fix up the tags also. */
6320 for (lfix = md.slot[curr].tag_fixups; lfix; lfix = lfix->next)
6321 {
6322 S_SET_VALUE (lfix->sym, frag_now_fix () - 16 + i);
6323 symbol_set_frag (lfix->sym, frag_now);
6324 }
6325
6326 for (j = 0; j < md.slot[curr].num_fixups; ++j)
6327 {
6328 ifix = md.slot[curr].fixup + j;
6329 fix = fix_new_exp (frag_now, frag_now_fix () - 16 + i, 8,
6330 &ifix->expr, ifix->is_pcrel, ifix->code);
6331 fix->tc_fix_data.opnd = ifix->opnd;
6332 fix->fx_plt = (fix->fx_r_type == BFD_RELOC_IA64_PLTOFF22);
6333 fix->fx_file = md.slot[curr].src_file;
6334 fix->fx_line = md.slot[curr].src_line;
6335 }
6336
6337 end_of_insn_group = md.slot[curr].end_of_insn_group;
6338
6339 if (end_of_insn_group)
6340 {
6341 md.group_idx = (md.group_idx + 1) % 3;
6342 memset (md.last_groups + md.group_idx, 0, sizeof md.last_groups[0]);
6343 }
6344
6345 /* clear slot: */
6346 ia64_free_opcode (md.slot[curr].idesc);
6347 memset (md.slot + curr, 0, sizeof (md.slot[curr]));
6348 md.slot[curr].user_template = -1;
6349
6350 if (manual_bundling_off)
6351 {
6352 manual_bundling = 0;
6353 break;
6354 }
6355 curr = (curr + 1) % NUM_SLOTS;
6356 idesc = md.slot[curr].idesc;
6357 }
6358 if (manual_bundling)
6359 {
6360 if (md.num_slots_in_use > 0)
6361 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6362 "`%s' does not fit into %s template",
6363 idesc->name, ia64_templ_desc[template].name);
6364 else
6365 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6366 "Missing '}' at end of file");
6367 }
6368 know (md.num_slots_in_use < NUM_SLOTS);
6369
6370 t0 = end_of_insn_group | (template << 1) | (insn[0] << 5) | (insn[1] << 46);
6371 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
6372
6373 number_to_chars_littleendian (f + 0, t0, 8);
6374 number_to_chars_littleendian (f + 8, t1, 8);
6375
6376 unwind.next_slot_number = (unsigned long) f + 16;
6377 unwind.next_slot_frag = frag_now;
6378 }
6379
6380 int
6381 md_parse_option (c, arg)
6382 int c;
6383 char *arg;
6384 {
6385
6386 switch (c)
6387 {
6388 /* Switches from the Intel assembler. */
6389 case 'm':
6390 if (strcmp (arg, "ilp64") == 0
6391 || strcmp (arg, "lp64") == 0
6392 || strcmp (arg, "p64") == 0)
6393 {
6394 md.flags |= EF_IA_64_ABI64;
6395 }
6396 else if (strcmp (arg, "ilp32") == 0)
6397 {
6398 md.flags &= ~EF_IA_64_ABI64;
6399 }
6400 else if (strcmp (arg, "le") == 0)
6401 {
6402 md.flags &= ~EF_IA_64_BE;
6403 }
6404 else if (strcmp (arg, "be") == 0)
6405 {
6406 md.flags |= EF_IA_64_BE;
6407 }
6408 else
6409 return 0;
6410 break;
6411
6412 case 'N':
6413 if (strcmp (arg, "so") == 0)
6414 {
6415 /* Suppress signon message. */
6416 }
6417 else if (strcmp (arg, "pi") == 0)
6418 {
6419 /* Reject privileged instructions. FIXME */
6420 }
6421 else if (strcmp (arg, "us") == 0)
6422 {
6423 /* Allow union of signed and unsigned range. FIXME */
6424 }
6425 else if (strcmp (arg, "close_fcalls") == 0)
6426 {
6427 /* Do not resolve global function calls. */
6428 }
6429 else
6430 return 0;
6431 break;
6432
6433 case 'C':
6434 /* temp[="prefix"] Insert temporary labels into the object file
6435 symbol table prefixed by "prefix".
6436 Default prefix is ":temp:".
6437 */
6438 break;
6439
6440 case 'a':
6441 /* indirect=<tgt> Assume unannotated indirect branches behavior
6442 according to <tgt> --
6443 exit: branch out from the current context (default)
6444 labels: all labels in context may be branch targets
6445 */
6446 if (strncmp (arg, "indirect=", 9) != 0)
6447 return 0;
6448 break;
6449
6450 case 'x':
6451 /* -X conflicts with an ignored option, use -x instead */
6452 md.detect_dv = 1;
6453 if (!arg || strcmp (arg, "explicit") == 0)
6454 {
6455 /* set default mode to explicit */
6456 md.default_explicit_mode = 1;
6457 break;
6458 }
6459 else if (strcmp (arg, "auto") == 0)
6460 {
6461 md.default_explicit_mode = 0;
6462 }
6463 else if (strcmp (arg, "debug") == 0)
6464 {
6465 md.debug_dv = 1;
6466 }
6467 else if (strcmp (arg, "debugx") == 0)
6468 {
6469 md.default_explicit_mode = 1;
6470 md.debug_dv = 1;
6471 }
6472 else
6473 {
6474 as_bad (_("Unrecognized option '-x%s'"), arg);
6475 }
6476 break;
6477
6478 case 'S':
6479 /* nops Print nops statistics. */
6480 break;
6481
6482 /* GNU specific switches for gcc. */
6483 case OPTION_MCONSTANT_GP:
6484 md.flags |= EF_IA_64_CONS_GP;
6485 break;
6486
6487 case OPTION_MAUTO_PIC:
6488 md.flags |= EF_IA_64_NOFUNCDESC_CONS_GP;
6489 break;
6490
6491 default:
6492 return 0;
6493 }
6494
6495 return 1;
6496 }
6497
6498 void
6499 md_show_usage (stream)
6500 FILE *stream;
6501 {
6502 fputs (_("\
6503 IA-64 options:\n\
6504 --mconstant-gp mark output file as using the constant-GP model\n\
6505 (sets ELF header flag EF_IA_64_CONS_GP)\n\
6506 --mauto-pic mark output file as using the constant-GP model\n\
6507 without function descriptors (sets ELF header flag\n\
6508 EF_IA_64_NOFUNCDESC_CONS_GP)\n\
6509 -milp32|-milp64|-mlp64|-mp64 select data model (default -mlp64)\n\
6510 -mle | -mbe select little- or big-endian byte order (default -mle)\n\
6511 -x | -xexplicit turn on dependency violation checking (default)\n\
6512 -xauto automagically remove dependency violations\n\
6513 -xdebug debug dependency violation checker\n"),
6514 stream);
6515 }
6516
6517 void
6518 ia64_after_parse_args ()
6519 {
6520 if (debug_type == DEBUG_STABS)
6521 as_fatal (_("--gstabs is not supported for ia64"));
6522 }
6523
6524 /* Return true if TYPE fits in TEMPL at SLOT. */
6525
6526 static int
6527 match (int templ, int type, int slot)
6528 {
6529 enum ia64_unit unit;
6530 int result;
6531
6532 unit = ia64_templ_desc[templ].exec_unit[slot];
6533 switch (type)
6534 {
6535 case IA64_TYPE_DYN: result = 1; break; /* for nop and break */
6536 case IA64_TYPE_A:
6537 result = (unit == IA64_UNIT_I || unit == IA64_UNIT_M);
6538 break;
6539 case IA64_TYPE_X: result = (unit == IA64_UNIT_L); break;
6540 case IA64_TYPE_I: result = (unit == IA64_UNIT_I); break;
6541 case IA64_TYPE_M: result = (unit == IA64_UNIT_M); break;
6542 case IA64_TYPE_B: result = (unit == IA64_UNIT_B); break;
6543 case IA64_TYPE_F: result = (unit == IA64_UNIT_F); break;
6544 default: result = 0; break;
6545 }
6546 return result;
6547 }
6548
6549 /* Add a bit of extra goodness if a nop of type F or B would fit
6550 in TEMPL at SLOT. */
6551
6552 static inline int
6553 extra_goodness (int templ, int slot)
6554 {
6555 if (slot == 1 && match (templ, IA64_TYPE_F, slot))
6556 return 2;
6557 if (slot == 2 && match (templ, IA64_TYPE_B, slot))
6558 return 1;
6559 return 0;
6560 }
6561
6562 /* This function is called once, at assembler startup time. It sets
6563 up all the tables, etc. that the MD part of the assembler will need
6564 that can be determined before arguments are parsed. */
6565 void
6566 md_begin ()
6567 {
6568 int i, j, k, t, total, ar_base, cr_base, goodness, best, regnum, ok;
6569 const char *err;
6570 char name[8];
6571
6572 md.auto_align = 1;
6573 md.explicit_mode = md.default_explicit_mode;
6574
6575 bfd_set_section_alignment (stdoutput, text_section, 4);
6576
6577 /* Make sure fucntion pointers get initialized. */
6578 target_big_endian = -1;
6579 dot_byteorder (TARGET_BYTES_BIG_ENDIAN);
6580
6581 pseudo_func[FUNC_DTP_MODULE].u.sym =
6582 symbol_new (".<dtpmod>", undefined_section, FUNC_DTP_MODULE,
6583 &zero_address_frag);
6584
6585 pseudo_func[FUNC_DTP_RELATIVE].u.sym =
6586 symbol_new (".<dtprel>", undefined_section, FUNC_DTP_RELATIVE,
6587 &zero_address_frag);
6588
6589 pseudo_func[FUNC_FPTR_RELATIVE].u.sym =
6590 symbol_new (".<fptr>", undefined_section, FUNC_FPTR_RELATIVE,
6591 &zero_address_frag);
6592
6593 pseudo_func[FUNC_GP_RELATIVE].u.sym =
6594 symbol_new (".<gprel>", undefined_section, FUNC_GP_RELATIVE,
6595 &zero_address_frag);
6596
6597 pseudo_func[FUNC_LT_RELATIVE].u.sym =
6598 symbol_new (".<ltoff>", undefined_section, FUNC_LT_RELATIVE,
6599 &zero_address_frag);
6600
6601 pseudo_func[FUNC_LT_RELATIVE_X].u.sym =
6602 symbol_new (".<ltoffx>", undefined_section, FUNC_LT_RELATIVE_X,
6603 &zero_address_frag);
6604
6605 pseudo_func[FUNC_PC_RELATIVE].u.sym =
6606 symbol_new (".<pcrel>", undefined_section, FUNC_PC_RELATIVE,
6607 &zero_address_frag);
6608
6609 pseudo_func[FUNC_PLT_RELATIVE].u.sym =
6610 symbol_new (".<pltoff>", undefined_section, FUNC_PLT_RELATIVE,
6611 &zero_address_frag);
6612
6613 pseudo_func[FUNC_SEC_RELATIVE].u.sym =
6614 symbol_new (".<secrel>", undefined_section, FUNC_SEC_RELATIVE,
6615 &zero_address_frag);
6616
6617 pseudo_func[FUNC_SEG_RELATIVE].u.sym =
6618 symbol_new (".<segrel>", undefined_section, FUNC_SEG_RELATIVE,
6619 &zero_address_frag);
6620
6621 pseudo_func[FUNC_TP_RELATIVE].u.sym =
6622 symbol_new (".<tprel>", undefined_section, FUNC_TP_RELATIVE,
6623 &zero_address_frag);
6624
6625 pseudo_func[FUNC_LTV_RELATIVE].u.sym =
6626 symbol_new (".<ltv>", undefined_section, FUNC_LTV_RELATIVE,
6627 &zero_address_frag);
6628
6629 pseudo_func[FUNC_LT_FPTR_RELATIVE].u.sym =
6630 symbol_new (".<ltoff.fptr>", undefined_section, FUNC_LT_FPTR_RELATIVE,
6631 &zero_address_frag);
6632
6633 pseudo_func[FUNC_LT_DTP_MODULE].u.sym =
6634 symbol_new (".<ltoff.dtpmod>", undefined_section, FUNC_LT_DTP_MODULE,
6635 &zero_address_frag);
6636
6637 pseudo_func[FUNC_LT_DTP_RELATIVE].u.sym =
6638 symbol_new (".<ltoff.dptrel>", undefined_section, FUNC_LT_DTP_RELATIVE,
6639 &zero_address_frag);
6640
6641 pseudo_func[FUNC_LT_TP_RELATIVE].u.sym =
6642 symbol_new (".<ltoff.tprel>", undefined_section, FUNC_LT_TP_RELATIVE,
6643 &zero_address_frag);
6644
6645 pseudo_func[FUNC_IPLT_RELOC].u.sym =
6646 symbol_new (".<iplt>", undefined_section, FUNC_IPLT_RELOC,
6647 &zero_address_frag);
6648
6649 /* Compute the table of best templates. We compute goodness as a
6650 base 4 value, in which each match counts for 3, each F counts
6651 for 2, each B counts for 1. This should maximize the number of
6652 F and B nops in the chosen bundles, which is good because these
6653 pipelines are least likely to be overcommitted. */
6654 for (i = 0; i < IA64_NUM_TYPES; ++i)
6655 for (j = 0; j < IA64_NUM_TYPES; ++j)
6656 for (k = 0; k < IA64_NUM_TYPES; ++k)
6657 {
6658 best = 0;
6659 for (t = 0; t < NELEMS (ia64_templ_desc); ++t)
6660 {
6661 goodness = 0;
6662 if (match (t, i, 0))
6663 {
6664 if (match (t, j, 1))
6665 {
6666 if (match (t, k, 2))
6667 goodness = 3 + 3 + 3;
6668 else
6669 goodness = 3 + 3 + extra_goodness (t, 2);
6670 }
6671 else if (match (t, j, 2))
6672 goodness = 3 + 3 + extra_goodness (t, 1);
6673 else
6674 {
6675 goodness = 3;
6676 goodness += extra_goodness (t, 1);
6677 goodness += extra_goodness (t, 2);
6678 }
6679 }
6680 else if (match (t, i, 1))
6681 {
6682 if (match (t, j, 2))
6683 goodness = 3 + 3;
6684 else
6685 goodness = 3 + extra_goodness (t, 2);
6686 }
6687 else if (match (t, i, 2))
6688 goodness = 3 + extra_goodness (t, 1);
6689
6690 if (goodness > best)
6691 {
6692 best = goodness;
6693 best_template[i][j][k] = t;
6694 }
6695 }
6696 }
6697
6698 for (i = 0; i < NUM_SLOTS; ++i)
6699 md.slot[i].user_template = -1;
6700
6701 md.pseudo_hash = hash_new ();
6702 for (i = 0; i < NELEMS (pseudo_opcode); ++i)
6703 {
6704 err = hash_insert (md.pseudo_hash, pseudo_opcode[i].name,
6705 (void *) (pseudo_opcode + i));
6706 if (err)
6707 as_fatal ("ia64.md_begin: can't hash `%s': %s",
6708 pseudo_opcode[i].name, err);
6709 }
6710
6711 md.reg_hash = hash_new ();
6712 md.dynreg_hash = hash_new ();
6713 md.const_hash = hash_new ();
6714 md.entry_hash = hash_new ();
6715
6716 /* general registers: */
6717
6718 total = 128;
6719 for (i = 0; i < total; ++i)
6720 {
6721 sprintf (name, "r%d", i - REG_GR);
6722 md.regsym[i] = declare_register (name, i);
6723 }
6724
6725 /* floating point registers: */
6726 total += 128;
6727 for (; i < total; ++i)
6728 {
6729 sprintf (name, "f%d", i - REG_FR);
6730 md.regsym[i] = declare_register (name, i);
6731 }
6732
6733 /* application registers: */
6734 total += 128;
6735 ar_base = i;
6736 for (; i < total; ++i)
6737 {
6738 sprintf (name, "ar%d", i - REG_AR);
6739 md.regsym[i] = declare_register (name, i);
6740 }
6741
6742 /* control registers: */
6743 total += 128;
6744 cr_base = i;
6745 for (; i < total; ++i)
6746 {
6747 sprintf (name, "cr%d", i - REG_CR);
6748 md.regsym[i] = declare_register (name, i);
6749 }
6750
6751 /* predicate registers: */
6752 total += 64;
6753 for (; i < total; ++i)
6754 {
6755 sprintf (name, "p%d", i - REG_P);
6756 md.regsym[i] = declare_register (name, i);
6757 }
6758
6759 /* branch registers: */
6760 total += 8;
6761 for (; i < total; ++i)
6762 {
6763 sprintf (name, "b%d", i - REG_BR);
6764 md.regsym[i] = declare_register (name, i);
6765 }
6766
6767 md.regsym[REG_IP] = declare_register ("ip", REG_IP);
6768 md.regsym[REG_CFM] = declare_register ("cfm", REG_CFM);
6769 md.regsym[REG_PR] = declare_register ("pr", REG_PR);
6770 md.regsym[REG_PR_ROT] = declare_register ("pr.rot", REG_PR_ROT);
6771 md.regsym[REG_PSR] = declare_register ("psr", REG_PSR);
6772 md.regsym[REG_PSR_L] = declare_register ("psr.l", REG_PSR_L);
6773 md.regsym[REG_PSR_UM] = declare_register ("psr.um", REG_PSR_UM);
6774
6775 for (i = 0; i < NELEMS (indirect_reg); ++i)
6776 {
6777 regnum = indirect_reg[i].regnum;
6778 md.regsym[regnum] = declare_register (indirect_reg[i].name, regnum);
6779 }
6780
6781 /* define synonyms for application registers: */
6782 for (i = REG_AR; i < REG_AR + NELEMS (ar); ++i)
6783 md.regsym[i] = declare_register (ar[i - REG_AR].name,
6784 REG_AR + ar[i - REG_AR].regnum);
6785
6786 /* define synonyms for control registers: */
6787 for (i = REG_CR; i < REG_CR + NELEMS (cr); ++i)
6788 md.regsym[i] = declare_register (cr[i - REG_CR].name,
6789 REG_CR + cr[i - REG_CR].regnum);
6790
6791 declare_register ("gp", REG_GR + 1);
6792 declare_register ("sp", REG_GR + 12);
6793 declare_register ("rp", REG_BR + 0);
6794
6795 /* pseudo-registers used to specify unwind info: */
6796 declare_register ("psp", REG_PSP);
6797
6798 declare_register_set ("ret", 4, REG_GR + 8);
6799 declare_register_set ("farg", 8, REG_FR + 8);
6800 declare_register_set ("fret", 8, REG_FR + 8);
6801
6802 for (i = 0; i < NELEMS (const_bits); ++i)
6803 {
6804 err = hash_insert (md.const_hash, const_bits[i].name,
6805 (PTR) (const_bits + i));
6806 if (err)
6807 as_fatal ("Inserting \"%s\" into constant hash table failed: %s",
6808 name, err);
6809 }
6810
6811 /* Set the architecture and machine depending on defaults and command line
6812 options. */
6813 if (md.flags & EF_IA_64_ABI64)
6814 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf64);
6815 else
6816 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf32);
6817
6818 if (! ok)
6819 as_warn (_("Could not set architecture and machine"));
6820
6821 /* Set the pointer size and pointer shift size depending on md.flags */
6822
6823 if (md.flags & EF_IA_64_ABI64)
6824 {
6825 md.pointer_size = 8; /* pointers are 8 bytes */
6826 md.pointer_size_shift = 3; /* alignment is 8 bytes = 2^2 */
6827 }
6828 else
6829 {
6830 md.pointer_size = 4; /* pointers are 4 bytes */
6831 md.pointer_size_shift = 2; /* alignment is 4 bytes = 2^2 */
6832 }
6833
6834 md.mem_offset.hint = 0;
6835 md.path = 0;
6836 md.maxpaths = 0;
6837 md.entry_labels = NULL;
6838 }
6839
6840 /* Set the elf type to 64 bit ABI by default. Cannot do this in md_begin
6841 because that is called after md_parse_option which is where we do the
6842 dynamic changing of md.flags based on -mlp64 or -milp32. Also, set the
6843 default endianness. */
6844
6845 void
6846 ia64_init (argc, argv)
6847 int argc ATTRIBUTE_UNUSED;
6848 char **argv ATTRIBUTE_UNUSED;
6849 {
6850 md.flags = MD_FLAGS_DEFAULT;
6851 }
6852
6853 /* Return a string for the target object file format. */
6854
6855 const char *
6856 ia64_target_format ()
6857 {
6858 if (OUTPUT_FLAVOR == bfd_target_elf_flavour)
6859 {
6860 if (md.flags & EF_IA_64_BE)
6861 {
6862 if (md.flags & EF_IA_64_ABI64)
6863 #if defined(TE_AIX50)
6864 return "elf64-ia64-aix-big";
6865 #elif defined(TE_HPUX)
6866 return "elf64-ia64-hpux-big";
6867 #else
6868 return "elf64-ia64-big";
6869 #endif
6870 else
6871 #if defined(TE_AIX50)
6872 return "elf32-ia64-aix-big";
6873 #elif defined(TE_HPUX)
6874 return "elf32-ia64-hpux-big";
6875 #else
6876 return "elf32-ia64-big";
6877 #endif
6878 }
6879 else
6880 {
6881 if (md.flags & EF_IA_64_ABI64)
6882 #ifdef TE_AIX50
6883 return "elf64-ia64-aix-little";
6884 #else
6885 return "elf64-ia64-little";
6886 #endif
6887 else
6888 #ifdef TE_AIX50
6889 return "elf32-ia64-aix-little";
6890 #else
6891 return "elf32-ia64-little";
6892 #endif
6893 }
6894 }
6895 else
6896 return "unknown-format";
6897 }
6898
6899 void
6900 ia64_end_of_source ()
6901 {
6902 /* terminate insn group upon reaching end of file: */
6903 insn_group_break (1, 0, 0);
6904
6905 /* emits slots we haven't written yet: */
6906 ia64_flush_insns ();
6907
6908 bfd_set_private_flags (stdoutput, md.flags);
6909
6910 md.mem_offset.hint = 0;
6911 }
6912
6913 void
6914 ia64_start_line ()
6915 {
6916 if (md.qp.X_op == O_register)
6917 as_bad ("qualifying predicate not followed by instruction");
6918 md.qp.X_op = O_absent;
6919
6920 if (ignore_input ())
6921 return;
6922
6923 if (input_line_pointer[0] == ';' && input_line_pointer[-1] == ';')
6924 {
6925 if (md.detect_dv && !md.explicit_mode)
6926 as_warn (_("Explicit stops are ignored in auto mode"));
6927 else
6928 insn_group_break (1, 0, 0);
6929 }
6930 }
6931
6932 /* This is a hook for ia64_frob_label, so that it can distinguish tags from
6933 labels. */
6934 static int defining_tag = 0;
6935
6936 int
6937 ia64_unrecognized_line (ch)
6938 int ch;
6939 {
6940 switch (ch)
6941 {
6942 case '(':
6943 expression (&md.qp);
6944 if (*input_line_pointer++ != ')')
6945 {
6946 as_bad ("Expected ')'");
6947 return 0;
6948 }
6949 if (md.qp.X_op != O_register)
6950 {
6951 as_bad ("Qualifying predicate expected");
6952 return 0;
6953 }
6954 if (md.qp.X_add_number < REG_P || md.qp.X_add_number >= REG_P + 64)
6955 {
6956 as_bad ("Predicate register expected");
6957 return 0;
6958 }
6959 return 1;
6960
6961 case '{':
6962 if (md.manual_bundling)
6963 as_warn ("Found '{' when manual bundling is already turned on");
6964 else
6965 CURR_SLOT.manual_bundling_on = 1;
6966 md.manual_bundling = 1;
6967
6968 /* Bundling is only acceptable in explicit mode
6969 or when in default automatic mode. */
6970 if (md.detect_dv && !md.explicit_mode)
6971 {
6972 if (!md.mode_explicitly_set
6973 && !md.default_explicit_mode)
6974 dot_dv_mode ('E');
6975 else
6976 as_warn (_("Found '{' after explicit switch to automatic mode"));
6977 }
6978 return 1;
6979
6980 case '}':
6981 if (!md.manual_bundling)
6982 as_warn ("Found '}' when manual bundling is off");
6983 else
6984 PREV_SLOT.manual_bundling_off = 1;
6985 md.manual_bundling = 0;
6986
6987 /* switch back to automatic mode, if applicable */
6988 if (md.detect_dv
6989 && md.explicit_mode
6990 && !md.mode_explicitly_set
6991 && !md.default_explicit_mode)
6992 dot_dv_mode ('A');
6993
6994 /* Allow '{' to follow on the same line. We also allow ";;", but that
6995 happens automatically because ';' is an end of line marker. */
6996 SKIP_WHITESPACE ();
6997 if (input_line_pointer[0] == '{')
6998 {
6999 input_line_pointer++;
7000 return ia64_unrecognized_line ('{');
7001 }
7002
7003 demand_empty_rest_of_line ();
7004 return 1;
7005
7006 case '[':
7007 {
7008 char *s;
7009 char c;
7010 symbolS *tag;
7011 int temp;
7012
7013 if (md.qp.X_op == O_register)
7014 {
7015 as_bad ("Tag must come before qualifying predicate.");
7016 return 0;
7017 }
7018
7019 /* This implements just enough of read_a_source_file in read.c to
7020 recognize labels. */
7021 if (is_name_beginner (*input_line_pointer))
7022 {
7023 s = input_line_pointer;
7024 c = get_symbol_end ();
7025 }
7026 else if (LOCAL_LABELS_FB
7027 && ISDIGIT (*input_line_pointer))
7028 {
7029 temp = 0;
7030 while (ISDIGIT (*input_line_pointer))
7031 temp = (temp * 10) + *input_line_pointer++ - '0';
7032 fb_label_instance_inc (temp);
7033 s = fb_label_name (temp, 0);
7034 c = *input_line_pointer;
7035 }
7036 else
7037 {
7038 s = NULL;
7039 c = '\0';
7040 }
7041 if (c != ':')
7042 {
7043 /* Put ':' back for error messages' sake. */
7044 *input_line_pointer++ = ':';
7045 as_bad ("Expected ':'");
7046 return 0;
7047 }
7048
7049 defining_tag = 1;
7050 tag = colon (s);
7051 defining_tag = 0;
7052 /* Put ':' back for error messages' sake. */
7053 *input_line_pointer++ = ':';
7054 if (*input_line_pointer++ != ']')
7055 {
7056 as_bad ("Expected ']'");
7057 return 0;
7058 }
7059 if (! tag)
7060 {
7061 as_bad ("Tag name expected");
7062 return 0;
7063 }
7064 return 1;
7065 }
7066
7067 default:
7068 break;
7069 }
7070
7071 /* Not a valid line. */
7072 return 0;
7073 }
7074
7075 void
7076 ia64_frob_label (sym)
7077 struct symbol *sym;
7078 {
7079 struct label_fix *fix;
7080
7081 /* Tags need special handling since they are not bundle breaks like
7082 labels. */
7083 if (defining_tag)
7084 {
7085 fix = obstack_alloc (&notes, sizeof (*fix));
7086 fix->sym = sym;
7087 fix->next = CURR_SLOT.tag_fixups;
7088 CURR_SLOT.tag_fixups = fix;
7089
7090 return;
7091 }
7092
7093 if (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE)
7094 {
7095 md.last_text_seg = now_seg;
7096 fix = obstack_alloc (&notes, sizeof (*fix));
7097 fix->sym = sym;
7098 fix->next = CURR_SLOT.label_fixups;
7099 CURR_SLOT.label_fixups = fix;
7100
7101 /* Keep track of how many code entry points we've seen. */
7102 if (md.path == md.maxpaths)
7103 {
7104 md.maxpaths += 20;
7105 md.entry_labels = (const char **)
7106 xrealloc ((void *) md.entry_labels,
7107 md.maxpaths * sizeof (char *));
7108 }
7109 md.entry_labels[md.path++] = S_GET_NAME (sym);
7110 }
7111 }
7112
7113 void
7114 ia64_flush_pending_output ()
7115 {
7116 if (!md.keep_pending_output
7117 && bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE)
7118 {
7119 /* ??? This causes many unnecessary stop bits to be emitted.
7120 Unfortunately, it isn't clear if it is safe to remove this. */
7121 insn_group_break (1, 0, 0);
7122 ia64_flush_insns ();
7123 }
7124 }
7125
7126 /* Do ia64-specific expression optimization. All that's done here is
7127 to transform index expressions that are either due to the indexing
7128 of rotating registers or due to the indexing of indirect register
7129 sets. */
7130 int
7131 ia64_optimize_expr (l, op, r)
7132 expressionS *l;
7133 operatorT op;
7134 expressionS *r;
7135 {
7136 unsigned num_regs;
7137
7138 if (op == O_index)
7139 {
7140 if (l->X_op == O_register && r->X_op == O_constant)
7141 {
7142 num_regs = (l->X_add_number >> 16);
7143 if ((unsigned) r->X_add_number >= num_regs)
7144 {
7145 if (!num_regs)
7146 as_bad ("No current frame");
7147 else
7148 as_bad ("Index out of range 0..%u", num_regs - 1);
7149 r->X_add_number = 0;
7150 }
7151 l->X_add_number = (l->X_add_number & 0xffff) + r->X_add_number;
7152 return 1;
7153 }
7154 else if (l->X_op == O_register && r->X_op == O_register)
7155 {
7156 if (l->X_add_number < IND_CPUID || l->X_add_number > IND_RR
7157 || l->X_add_number == IND_MEM)
7158 {
7159 as_bad ("Indirect register set name expected");
7160 l->X_add_number = IND_CPUID;
7161 }
7162 l->X_op = O_index;
7163 l->X_op_symbol = md.regsym[l->X_add_number];
7164 l->X_add_number = r->X_add_number;
7165 return 1;
7166 }
7167 }
7168 return 0;
7169 }
7170
7171 int
7172 ia64_parse_name (name, e)
7173 char *name;
7174 expressionS *e;
7175 {
7176 struct const_desc *cdesc;
7177 struct dynreg *dr = 0;
7178 unsigned int regnum;
7179 struct symbol *sym;
7180 char *end;
7181
7182 /* first see if NAME is a known register name: */
7183 sym = hash_find (md.reg_hash, name);
7184 if (sym)
7185 {
7186 e->X_op = O_register;
7187 e->X_add_number = S_GET_VALUE (sym);
7188 return 1;
7189 }
7190
7191 cdesc = hash_find (md.const_hash, name);
7192 if (cdesc)
7193 {
7194 e->X_op = O_constant;
7195 e->X_add_number = cdesc->value;
7196 return 1;
7197 }
7198
7199 /* check for inN, locN, or outN: */
7200 switch (name[0])
7201 {
7202 case 'i':
7203 if (name[1] == 'n' && ISDIGIT (name[2]))
7204 {
7205 dr = &md.in;
7206 name += 2;
7207 }
7208 break;
7209
7210 case 'l':
7211 if (name[1] == 'o' && name[2] == 'c' && ISDIGIT (name[3]))
7212 {
7213 dr = &md.loc;
7214 name += 3;
7215 }
7216 break;
7217
7218 case 'o':
7219 if (name[1] == 'u' && name[2] == 't' && ISDIGIT (name[3]))
7220 {
7221 dr = &md.out;
7222 name += 3;
7223 }
7224 break;
7225
7226 default:
7227 break;
7228 }
7229
7230 if (dr)
7231 {
7232 /* The name is inN, locN, or outN; parse the register number. */
7233 regnum = strtoul (name, &end, 10);
7234 if (end > name && *end == '\0')
7235 {
7236 if ((unsigned) regnum >= dr->num_regs)
7237 {
7238 if (!dr->num_regs)
7239 as_bad ("No current frame");
7240 else
7241 as_bad ("Register number out of range 0..%u",
7242 dr->num_regs - 1);
7243 regnum = 0;
7244 }
7245 e->X_op = O_register;
7246 e->X_add_number = dr->base + regnum;
7247 return 1;
7248 }
7249 }
7250
7251 if ((dr = hash_find (md.dynreg_hash, name)))
7252 {
7253 /* We've got ourselves the name of a rotating register set.
7254 Store the base register number in the low 16 bits of
7255 X_add_number and the size of the register set in the top 16
7256 bits. */
7257 e->X_op = O_register;
7258 e->X_add_number = dr->base | (dr->num_regs << 16);
7259 return 1;
7260 }
7261 return 0;
7262 }
7263
7264 /* Remove the '#' suffix that indicates a symbol as opposed to a register. */
7265
7266 char *
7267 ia64_canonicalize_symbol_name (name)
7268 char *name;
7269 {
7270 size_t len = strlen (name);
7271 if (len > 1 && name[len - 1] == '#')
7272 name[len - 1] = '\0';
7273 return name;
7274 }
7275
7276 /* Return true if idesc is a conditional branch instruction. This excludes
7277 the modulo scheduled branches, and br.ia. Mod-sched branches are excluded
7278 because they always read/write resources regardless of the value of the
7279 qualifying predicate. br.ia must always use p0, and hence is always
7280 taken. Thus this function returns true for branches which can fall
7281 through, and which use no resources if they do fall through. */
7282
7283 static int
7284 is_conditional_branch (idesc)
7285 struct ia64_opcode *idesc;
7286 {
7287 /* br is a conditional branch. Everything that starts with br. except
7288 br.ia, br.c{loop,top,exit}, and br.w{top,exit} is a conditional branch.
7289 Everything that starts with brl is a conditional branch. */
7290 return (idesc->name[0] == 'b' && idesc->name[1] == 'r'
7291 && (idesc->name[2] == '\0'
7292 || (idesc->name[2] == '.' && idesc->name[3] != 'i'
7293 && idesc->name[3] != 'c' && idesc->name[3] != 'w')
7294 || idesc->name[2] == 'l'
7295 /* br.cond, br.call, br.clr */
7296 || (idesc->name[2] == '.' && idesc->name[3] == 'c'
7297 && (idesc->name[4] == 'a' || idesc->name[4] == 'o'
7298 || (idesc->name[4] == 'l' && idesc->name[5] == 'r')))));
7299 }
7300
7301 /* Return whether the given opcode is a taken branch. If there's any doubt,
7302 returns zero. */
7303
7304 static int
7305 is_taken_branch (idesc)
7306 struct ia64_opcode *idesc;
7307 {
7308 return ((is_conditional_branch (idesc) && CURR_SLOT.qp_regno == 0)
7309 || strncmp (idesc->name, "br.ia", 5) == 0);
7310 }
7311
7312 /* Return whether the given opcode is an interruption or rfi. If there's any
7313 doubt, returns zero. */
7314
7315 static int
7316 is_interruption_or_rfi (idesc)
7317 struct ia64_opcode *idesc;
7318 {
7319 if (strcmp (idesc->name, "rfi") == 0)
7320 return 1;
7321 return 0;
7322 }
7323
7324 /* Returns the index of the given dependency in the opcode's list of chks, or
7325 -1 if there is no dependency. */
7326
7327 static int
7328 depends_on (depind, idesc)
7329 int depind;
7330 struct ia64_opcode *idesc;
7331 {
7332 int i;
7333 const struct ia64_opcode_dependency *dep = idesc->dependencies;
7334 for (i = 0; i < dep->nchks; i++)
7335 {
7336 if (depind == DEP (dep->chks[i]))
7337 return i;
7338 }
7339 return -1;
7340 }
7341
7342 /* Determine a set of specific resources used for a particular resource
7343 class. Returns the number of specific resources identified For those
7344 cases which are not determinable statically, the resource returned is
7345 marked nonspecific.
7346
7347 Meanings of value in 'NOTE':
7348 1) only read/write when the register number is explicitly encoded in the
7349 insn.
7350 2) only read CFM when accessing a rotating GR, FR, or PR. mov pr only
7351 accesses CFM when qualifying predicate is in the rotating region.
7352 3) general register value is used to specify an indirect register; not
7353 determinable statically.
7354 4) only read the given resource when bits 7:0 of the indirect index
7355 register value does not match the register number of the resource; not
7356 determinable statically.
7357 5) all rules are implementation specific.
7358 6) only when both the index specified by the reader and the index specified
7359 by the writer have the same value in bits 63:61; not determinable
7360 statically.
7361 7) only access the specified resource when the corresponding mask bit is
7362 set
7363 8) PSR.dfh is only read when these insns reference FR32-127. PSR.dfl is
7364 only read when these insns reference FR2-31
7365 9) PSR.mfl is only written when these insns write FR2-31. PSR.mfh is only
7366 written when these insns write FR32-127
7367 10) The PSR.bn bit is only accessed when one of GR16-31 is specified in the
7368 instruction
7369 11) The target predicates are written independently of PR[qp], but source
7370 registers are only read if PR[qp] is true. Since the state of PR[qp]
7371 cannot statically be determined, all source registers are marked used.
7372 12) This insn only reads the specified predicate register when that
7373 register is the PR[qp].
7374 13) This reference to ld-c only applies to teh GR whose value is loaded
7375 with data returned from memory, not the post-incremented address register.
7376 14) The RSE resource includes the implementation-specific RSE internal
7377 state resources. At least one (and possibly more) of these resources are
7378 read by each instruction listed in IC:rse-readers. At least one (and
7379 possibly more) of these resources are written by each insn listed in
7380 IC:rse-writers.
7381 15+16) Represents reserved instructions, which the assembler does not
7382 generate.
7383
7384 Memory resources (i.e. locations in memory) are *not* marked or tracked by
7385 this code; there are no dependency violations based on memory access.
7386 */
7387
7388 #define MAX_SPECS 256
7389 #define DV_CHK 1
7390 #define DV_REG 0
7391
7392 static int
7393 specify_resource (dep, idesc, type, specs, note, path)
7394 const struct ia64_dependency *dep;
7395 struct ia64_opcode *idesc;
7396 int type; /* is this a DV chk or a DV reg? */
7397 struct rsrc specs[MAX_SPECS]; /* returned specific resources */
7398 int note; /* resource note for this insn's usage */
7399 int path; /* which execution path to examine */
7400 {
7401 int count = 0;
7402 int i;
7403 int rsrc_write = 0;
7404 struct rsrc tmpl;
7405
7406 if (dep->mode == IA64_DV_WAW
7407 || (dep->mode == IA64_DV_RAW && type == DV_REG)
7408 || (dep->mode == IA64_DV_WAR && type == DV_CHK))
7409 rsrc_write = 1;
7410
7411 /* template for any resources we identify */
7412 tmpl.dependency = dep;
7413 tmpl.note = note;
7414 tmpl.insn_srlz = tmpl.data_srlz = 0;
7415 tmpl.qp_regno = CURR_SLOT.qp_regno;
7416 tmpl.link_to_qp_branch = 1;
7417 tmpl.mem_offset.hint = 0;
7418 tmpl.specific = 1;
7419 tmpl.index = 0;
7420 tmpl.cmp_type = CMP_NONE;
7421
7422 #define UNHANDLED \
7423 as_warn (_("Unhandled dependency %s for %s (%s), note %d"), \
7424 dep->name, idesc->name, (rsrc_write?"write":"read"), note)
7425 #define KNOWN(REG) (gr_values[REG].known && gr_values[REG].path >= path)
7426
7427 /* we don't need to track these */
7428 if (dep->semantics == IA64_DVS_NONE)
7429 return 0;
7430
7431 switch (dep->specifier)
7432 {
7433 case IA64_RS_AR_K:
7434 if (note == 1)
7435 {
7436 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
7437 {
7438 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
7439 if (regno >= 0 && regno <= 7)
7440 {
7441 specs[count] = tmpl;
7442 specs[count++].index = regno;
7443 }
7444 }
7445 }
7446 else if (note == 0)
7447 {
7448 for (i = 0; i < 8; i++)
7449 {
7450 specs[count] = tmpl;
7451 specs[count++].index = i;
7452 }
7453 }
7454 else
7455 {
7456 UNHANDLED;
7457 }
7458 break;
7459
7460 case IA64_RS_AR_UNAT:
7461 /* This is a mov =AR or mov AR= instruction. */
7462 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
7463 {
7464 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
7465 if (regno == AR_UNAT)
7466 {
7467 specs[count++] = tmpl;
7468 }
7469 }
7470 else
7471 {
7472 /* This is a spill/fill, or other instruction that modifies the
7473 unat register. */
7474
7475 /* Unless we can determine the specific bits used, mark the whole
7476 thing; bits 8:3 of the memory address indicate the bit used in
7477 UNAT. The .mem.offset hint may be used to eliminate a small
7478 subset of conflicts. */
7479 specs[count] = tmpl;
7480 if (md.mem_offset.hint)
7481 {
7482 if (md.debug_dv)
7483 fprintf (stderr, " Using hint for spill/fill\n");
7484 /* The index isn't actually used, just set it to something
7485 approximating the bit index. */
7486 specs[count].index = (md.mem_offset.offset >> 3) & 0x3F;
7487 specs[count].mem_offset.hint = 1;
7488 specs[count].mem_offset.offset = md.mem_offset.offset;
7489 specs[count++].mem_offset.base = md.mem_offset.base;
7490 }
7491 else
7492 {
7493 specs[count++].specific = 0;
7494 }
7495 }
7496 break;
7497
7498 case IA64_RS_AR:
7499 if (note == 1)
7500 {
7501 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
7502 {
7503 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
7504 if ((regno >= 8 && regno <= 15)
7505 || (regno >= 20 && regno <= 23)
7506 || (regno >= 31 && regno <= 39)
7507 || (regno >= 41 && regno <= 47)
7508 || (regno >= 67 && regno <= 111))
7509 {
7510 specs[count] = tmpl;
7511 specs[count++].index = regno;
7512 }
7513 }
7514 }
7515 else
7516 {
7517 UNHANDLED;
7518 }
7519 break;
7520
7521 case IA64_RS_ARb:
7522 if (note == 1)
7523 {
7524 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
7525 {
7526 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
7527 if ((regno >= 48 && regno <= 63)
7528 || (regno >= 112 && regno <= 127))
7529 {
7530 specs[count] = tmpl;
7531 specs[count++].index = regno;
7532 }
7533 }
7534 }
7535 else if (note == 0)
7536 {
7537 for (i = 48; i < 64; i++)
7538 {
7539 specs[count] = tmpl;
7540 specs[count++].index = i;
7541 }
7542 for (i = 112; i < 128; i++)
7543 {
7544 specs[count] = tmpl;
7545 specs[count++].index = i;
7546 }
7547 }
7548 else
7549 {
7550 UNHANDLED;
7551 }
7552 break;
7553
7554 case IA64_RS_BR:
7555 if (note != 1)
7556 {
7557 UNHANDLED;
7558 }
7559 else
7560 {
7561 if (rsrc_write)
7562 {
7563 for (i = 0; i < idesc->num_outputs; i++)
7564 if (idesc->operands[i] == IA64_OPND_B1
7565 || idesc->operands[i] == IA64_OPND_B2)
7566 {
7567 specs[count] = tmpl;
7568 specs[count++].index =
7569 CURR_SLOT.opnd[i].X_add_number - REG_BR;
7570 }
7571 }
7572 else
7573 {
7574 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
7575 if (idesc->operands[i] == IA64_OPND_B1
7576 || idesc->operands[i] == IA64_OPND_B2)
7577 {
7578 specs[count] = tmpl;
7579 specs[count++].index =
7580 CURR_SLOT.opnd[i].X_add_number - REG_BR;
7581 }
7582 }
7583 }
7584 break;
7585
7586 case IA64_RS_CPUID: /* four or more registers */
7587 if (note == 3)
7588 {
7589 if (idesc->operands[!rsrc_write] == IA64_OPND_CPUID_R3)
7590 {
7591 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7592 if (regno >= 0 && regno < NELEMS (gr_values)
7593 && KNOWN (regno))
7594 {
7595 specs[count] = tmpl;
7596 specs[count++].index = gr_values[regno].value & 0xFF;
7597 }
7598 else
7599 {
7600 specs[count] = tmpl;
7601 specs[count++].specific = 0;
7602 }
7603 }
7604 }
7605 else
7606 {
7607 UNHANDLED;
7608 }
7609 break;
7610
7611 case IA64_RS_DBR: /* four or more registers */
7612 if (note == 3)
7613 {
7614 if (idesc->operands[!rsrc_write] == IA64_OPND_DBR_R3)
7615 {
7616 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7617 if (regno >= 0 && regno < NELEMS (gr_values)
7618 && KNOWN (regno))
7619 {
7620 specs[count] = tmpl;
7621 specs[count++].index = gr_values[regno].value & 0xFF;
7622 }
7623 else
7624 {
7625 specs[count] = tmpl;
7626 specs[count++].specific = 0;
7627 }
7628 }
7629 }
7630 else if (note == 0 && !rsrc_write)
7631 {
7632 specs[count] = tmpl;
7633 specs[count++].specific = 0;
7634 }
7635 else
7636 {
7637 UNHANDLED;
7638 }
7639 break;
7640
7641 case IA64_RS_IBR: /* four or more registers */
7642 if (note == 3)
7643 {
7644 if (idesc->operands[!rsrc_write] == IA64_OPND_IBR_R3)
7645 {
7646 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7647 if (regno >= 0 && regno < NELEMS (gr_values)
7648 && KNOWN (regno))
7649 {
7650 specs[count] = tmpl;
7651 specs[count++].index = gr_values[regno].value & 0xFF;
7652 }
7653 else
7654 {
7655 specs[count] = tmpl;
7656 specs[count++].specific = 0;
7657 }
7658 }
7659 }
7660 else
7661 {
7662 UNHANDLED;
7663 }
7664 break;
7665
7666 case IA64_RS_MSR:
7667 if (note == 5)
7668 {
7669 /* These are implementation specific. Force all references to
7670 conflict with all other references. */
7671 specs[count] = tmpl;
7672 specs[count++].specific = 0;
7673 }
7674 else
7675 {
7676 UNHANDLED;
7677 }
7678 break;
7679
7680 case IA64_RS_PKR: /* 16 or more registers */
7681 if (note == 3 || note == 4)
7682 {
7683 if (idesc->operands[!rsrc_write] == IA64_OPND_PKR_R3)
7684 {
7685 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7686 if (regno >= 0 && regno < NELEMS (gr_values)
7687 && KNOWN (regno))
7688 {
7689 if (note == 3)
7690 {
7691 specs[count] = tmpl;
7692 specs[count++].index = gr_values[regno].value & 0xFF;
7693 }
7694 else
7695 for (i = 0; i < NELEMS (gr_values); i++)
7696 {
7697 /* Uses all registers *except* the one in R3. */
7698 if ((unsigned)i != (gr_values[regno].value & 0xFF))
7699 {
7700 specs[count] = tmpl;
7701 specs[count++].index = i;
7702 }
7703 }
7704 }
7705 else
7706 {
7707 specs[count] = tmpl;
7708 specs[count++].specific = 0;
7709 }
7710 }
7711 }
7712 else if (note == 0)
7713 {
7714 /* probe et al. */
7715 specs[count] = tmpl;
7716 specs[count++].specific = 0;
7717 }
7718 break;
7719
7720 case IA64_RS_PMC: /* four or more registers */
7721 if (note == 3)
7722 {
7723 if (idesc->operands[!rsrc_write] == IA64_OPND_PMC_R3
7724 || (!rsrc_write && idesc->operands[1] == IA64_OPND_PMD_R3))
7725
7726 {
7727 int index = ((idesc->operands[1] == IA64_OPND_R3 && !rsrc_write)
7728 ? 1 : !rsrc_write);
7729 int regno = CURR_SLOT.opnd[index].X_add_number - REG_GR;
7730 if (regno >= 0 && regno < NELEMS (gr_values)
7731 && KNOWN (regno))
7732 {
7733 specs[count] = tmpl;
7734 specs[count++].index = gr_values[regno].value & 0xFF;
7735 }
7736 else
7737 {
7738 specs[count] = tmpl;
7739 specs[count++].specific = 0;
7740 }
7741 }
7742 }
7743 else
7744 {
7745 UNHANDLED;
7746 }
7747 break;
7748
7749 case IA64_RS_PMD: /* four or more registers */
7750 if (note == 3)
7751 {
7752 if (idesc->operands[!rsrc_write] == IA64_OPND_PMD_R3)
7753 {
7754 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7755 if (regno >= 0 && regno < NELEMS (gr_values)
7756 && KNOWN (regno))
7757 {
7758 specs[count] = tmpl;
7759 specs[count++].index = gr_values[regno].value & 0xFF;
7760 }
7761 else
7762 {
7763 specs[count] = tmpl;
7764 specs[count++].specific = 0;
7765 }
7766 }
7767 }
7768 else
7769 {
7770 UNHANDLED;
7771 }
7772 break;
7773
7774 case IA64_RS_RR: /* eight registers */
7775 if (note == 6)
7776 {
7777 if (idesc->operands[!rsrc_write] == IA64_OPND_RR_R3)
7778 {
7779 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7780 if (regno >= 0 && regno < NELEMS (gr_values)
7781 && KNOWN (regno))
7782 {
7783 specs[count] = tmpl;
7784 specs[count++].index = (gr_values[regno].value >> 61) & 0x7;
7785 }
7786 else
7787 {
7788 specs[count] = tmpl;
7789 specs[count++].specific = 0;
7790 }
7791 }
7792 }
7793 else if (note == 0 && !rsrc_write)
7794 {
7795 specs[count] = tmpl;
7796 specs[count++].specific = 0;
7797 }
7798 else
7799 {
7800 UNHANDLED;
7801 }
7802 break;
7803
7804 case IA64_RS_CR_IRR:
7805 if (note == 0)
7806 {
7807 /* handle mov-from-CR-IVR; it's a read that writes CR[IRR] */
7808 int regno = CURR_SLOT.opnd[1].X_add_number - REG_CR;
7809 if (rsrc_write
7810 && idesc->operands[1] == IA64_OPND_CR3
7811 && regno == CR_IVR)
7812 {
7813 for (i = 0; i < 4; i++)
7814 {
7815 specs[count] = tmpl;
7816 specs[count++].index = CR_IRR0 + i;
7817 }
7818 }
7819 }
7820 else if (note == 1)
7821 {
7822 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
7823 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
7824 && regno >= CR_IRR0
7825 && regno <= CR_IRR3)
7826 {
7827 specs[count] = tmpl;
7828 specs[count++].index = regno;
7829 }
7830 }
7831 else
7832 {
7833 UNHANDLED;
7834 }
7835 break;
7836
7837 case IA64_RS_CR_LRR:
7838 if (note != 1)
7839 {
7840 UNHANDLED;
7841 }
7842 else
7843 {
7844 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
7845 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
7846 && (regno == CR_LRR0 || regno == CR_LRR1))
7847 {
7848 specs[count] = tmpl;
7849 specs[count++].index = regno;
7850 }
7851 }
7852 break;
7853
7854 case IA64_RS_CR:
7855 if (note == 1)
7856 {
7857 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
7858 {
7859 specs[count] = tmpl;
7860 specs[count++].index =
7861 CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
7862 }
7863 }
7864 else
7865 {
7866 UNHANDLED;
7867 }
7868 break;
7869
7870 case IA64_RS_FR:
7871 case IA64_RS_FRb:
7872 if (note != 1)
7873 {
7874 UNHANDLED;
7875 }
7876 else if (rsrc_write)
7877 {
7878 if (dep->specifier == IA64_RS_FRb
7879 && idesc->operands[0] == IA64_OPND_F1)
7880 {
7881 specs[count] = tmpl;
7882 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_FR;
7883 }
7884 }
7885 else
7886 {
7887 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
7888 {
7889 if (idesc->operands[i] == IA64_OPND_F2
7890 || idesc->operands[i] == IA64_OPND_F3
7891 || idesc->operands[i] == IA64_OPND_F4)
7892 {
7893 specs[count] = tmpl;
7894 specs[count++].index =
7895 CURR_SLOT.opnd[i].X_add_number - REG_FR;
7896 }
7897 }
7898 }
7899 break;
7900
7901 case IA64_RS_GR:
7902 if (note == 13)
7903 {
7904 /* This reference applies only to the GR whose value is loaded with
7905 data returned from memory. */
7906 specs[count] = tmpl;
7907 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_GR;
7908 }
7909 else if (note == 1)
7910 {
7911 if (rsrc_write)
7912 {
7913 for (i = 0; i < idesc->num_outputs; i++)
7914 if (idesc->operands[i] == IA64_OPND_R1
7915 || idesc->operands[i] == IA64_OPND_R2
7916 || idesc->operands[i] == IA64_OPND_R3)
7917 {
7918 specs[count] = tmpl;
7919 specs[count++].index =
7920 CURR_SLOT.opnd[i].X_add_number - REG_GR;
7921 }
7922 if (idesc->flags & IA64_OPCODE_POSTINC)
7923 for (i = 0; i < NELEMS (idesc->operands); i++)
7924 if (idesc->operands[i] == IA64_OPND_MR3)
7925 {
7926 specs[count] = tmpl;
7927 specs[count++].index =
7928 CURR_SLOT.opnd[i].X_add_number - REG_GR;
7929 }
7930 }
7931 else
7932 {
7933 /* Look for anything that reads a GR. */
7934 for (i = 0; i < NELEMS (idesc->operands); i++)
7935 {
7936 if (idesc->operands[i] == IA64_OPND_MR3
7937 || idesc->operands[i] == IA64_OPND_CPUID_R3
7938 || idesc->operands[i] == IA64_OPND_DBR_R3
7939 || idesc->operands[i] == IA64_OPND_IBR_R3
7940 || idesc->operands[i] == IA64_OPND_MSR_R3
7941 || idesc->operands[i] == IA64_OPND_PKR_R3
7942 || idesc->operands[i] == IA64_OPND_PMC_R3
7943 || idesc->operands[i] == IA64_OPND_PMD_R3
7944 || idesc->operands[i] == IA64_OPND_RR_R3
7945 || ((i >= idesc->num_outputs)
7946 && (idesc->operands[i] == IA64_OPND_R1
7947 || idesc->operands[i] == IA64_OPND_R2
7948 || idesc->operands[i] == IA64_OPND_R3
7949 /* addl source register. */
7950 || idesc->operands[i] == IA64_OPND_R3_2)))
7951 {
7952 specs[count] = tmpl;
7953 specs[count++].index =
7954 CURR_SLOT.opnd[i].X_add_number - REG_GR;
7955 }
7956 }
7957 }
7958 }
7959 else
7960 {
7961 UNHANDLED;
7962 }
7963 break;
7964
7965 /* This is the same as IA64_RS_PRr, except that the register range is
7966 from 1 - 15, and there are no rotating register reads/writes here. */
7967 case IA64_RS_PR:
7968 if (note == 0)
7969 {
7970 for (i = 1; i < 16; i++)
7971 {
7972 specs[count] = tmpl;
7973 specs[count++].index = i;
7974 }
7975 }
7976 else if (note == 7)
7977 {
7978 valueT mask = 0;
7979 /* Mark only those registers indicated by the mask. */
7980 if (rsrc_write)
7981 {
7982 mask = CURR_SLOT.opnd[2].X_add_number;
7983 for (i = 1; i < 16; i++)
7984 if (mask & ((valueT) 1 << i))
7985 {
7986 specs[count] = tmpl;
7987 specs[count++].index = i;
7988 }
7989 }
7990 else
7991 {
7992 UNHANDLED;
7993 }
7994 }
7995 else if (note == 11) /* note 11 implies note 1 as well */
7996 {
7997 if (rsrc_write)
7998 {
7999 for (i = 0; i < idesc->num_outputs; i++)
8000 {
8001 if (idesc->operands[i] == IA64_OPND_P1
8002 || idesc->operands[i] == IA64_OPND_P2)
8003 {
8004 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8005 if (regno >= 1 && regno < 16)
8006 {
8007 specs[count] = tmpl;
8008 specs[count++].index = regno;
8009 }
8010 }
8011 }
8012 }
8013 else
8014 {
8015 UNHANDLED;
8016 }
8017 }
8018 else if (note == 12)
8019 {
8020 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
8021 {
8022 specs[count] = tmpl;
8023 specs[count++].index = CURR_SLOT.qp_regno;
8024 }
8025 }
8026 else if (note == 1)
8027 {
8028 if (rsrc_write)
8029 {
8030 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8031 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8032 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
8033 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
8034
8035 if ((idesc->operands[0] == IA64_OPND_P1
8036 || idesc->operands[0] == IA64_OPND_P2)
8037 && p1 >= 1 && p1 < 16)
8038 {
8039 specs[count] = tmpl;
8040 specs[count].cmp_type =
8041 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
8042 specs[count++].index = p1;
8043 }
8044 if ((idesc->operands[1] == IA64_OPND_P1
8045 || idesc->operands[1] == IA64_OPND_P2)
8046 && p2 >= 1 && p2 < 16)
8047 {
8048 specs[count] = tmpl;
8049 specs[count].cmp_type =
8050 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
8051 specs[count++].index = p2;
8052 }
8053 }
8054 else
8055 {
8056 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
8057 {
8058 specs[count] = tmpl;
8059 specs[count++].index = CURR_SLOT.qp_regno;
8060 }
8061 if (idesc->operands[1] == IA64_OPND_PR)
8062 {
8063 for (i = 1; i < 16; i++)
8064 {
8065 specs[count] = tmpl;
8066 specs[count++].index = i;
8067 }
8068 }
8069 }
8070 }
8071 else
8072 {
8073 UNHANDLED;
8074 }
8075 break;
8076
8077 /* This is the general case for PRs. IA64_RS_PR and IA64_RS_PR63 are
8078 simplified cases of this. */
8079 case IA64_RS_PRr:
8080 if (note == 0)
8081 {
8082 for (i = 16; i < 63; i++)
8083 {
8084 specs[count] = tmpl;
8085 specs[count++].index = i;
8086 }
8087 }
8088 else if (note == 7)
8089 {
8090 valueT mask = 0;
8091 /* Mark only those registers indicated by the mask. */
8092 if (rsrc_write
8093 && idesc->operands[0] == IA64_OPND_PR)
8094 {
8095 mask = CURR_SLOT.opnd[2].X_add_number;
8096 if (mask & ((valueT) 1 << 16))
8097 for (i = 16; i < 63; i++)
8098 {
8099 specs[count] = tmpl;
8100 specs[count++].index = i;
8101 }
8102 }
8103 else if (rsrc_write
8104 && idesc->operands[0] == IA64_OPND_PR_ROT)
8105 {
8106 for (i = 16; i < 63; i++)
8107 {
8108 specs[count] = tmpl;
8109 specs[count++].index = i;
8110 }
8111 }
8112 else
8113 {
8114 UNHANDLED;
8115 }
8116 }
8117 else if (note == 11) /* note 11 implies note 1 as well */
8118 {
8119 if (rsrc_write)
8120 {
8121 for (i = 0; i < idesc->num_outputs; i++)
8122 {
8123 if (idesc->operands[i] == IA64_OPND_P1
8124 || idesc->operands[i] == IA64_OPND_P2)
8125 {
8126 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8127 if (regno >= 16 && regno < 63)
8128 {
8129 specs[count] = tmpl;
8130 specs[count++].index = regno;
8131 }
8132 }
8133 }
8134 }
8135 else
8136 {
8137 UNHANDLED;
8138 }
8139 }
8140 else if (note == 12)
8141 {
8142 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
8143 {
8144 specs[count] = tmpl;
8145 specs[count++].index = CURR_SLOT.qp_regno;
8146 }
8147 }
8148 else if (note == 1)
8149 {
8150 if (rsrc_write)
8151 {
8152 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8153 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8154 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
8155 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
8156
8157 if ((idesc->operands[0] == IA64_OPND_P1
8158 || idesc->operands[0] == IA64_OPND_P2)
8159 && p1 >= 16 && p1 < 63)
8160 {
8161 specs[count] = tmpl;
8162 specs[count].cmp_type =
8163 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
8164 specs[count++].index = p1;
8165 }
8166 if ((idesc->operands[1] == IA64_OPND_P1
8167 || idesc->operands[1] == IA64_OPND_P2)
8168 && p2 >= 16 && p2 < 63)
8169 {
8170 specs[count] = tmpl;
8171 specs[count].cmp_type =
8172 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
8173 specs[count++].index = p2;
8174 }
8175 }
8176 else
8177 {
8178 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
8179 {
8180 specs[count] = tmpl;
8181 specs[count++].index = CURR_SLOT.qp_regno;
8182 }
8183 if (idesc->operands[1] == IA64_OPND_PR)
8184 {
8185 for (i = 16; i < 63; i++)
8186 {
8187 specs[count] = tmpl;
8188 specs[count++].index = i;
8189 }
8190 }
8191 }
8192 }
8193 else
8194 {
8195 UNHANDLED;
8196 }
8197 break;
8198
8199 case IA64_RS_PSR:
8200 /* Verify that the instruction is using the PSR bit indicated in
8201 dep->regindex. */
8202 if (note == 0)
8203 {
8204 if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_UM)
8205 {
8206 if (dep->regindex < 6)
8207 {
8208 specs[count++] = tmpl;
8209 }
8210 }
8211 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR)
8212 {
8213 if (dep->regindex < 32
8214 || dep->regindex == 35
8215 || dep->regindex == 36
8216 || (!rsrc_write && dep->regindex == PSR_CPL))
8217 {
8218 specs[count++] = tmpl;
8219 }
8220 }
8221 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_L)
8222 {
8223 if (dep->regindex < 32
8224 || dep->regindex == 35
8225 || dep->regindex == 36
8226 || (rsrc_write && dep->regindex == PSR_CPL))
8227 {
8228 specs[count++] = tmpl;
8229 }
8230 }
8231 else
8232 {
8233 /* Several PSR bits have very specific dependencies. */
8234 switch (dep->regindex)
8235 {
8236 default:
8237 specs[count++] = tmpl;
8238 break;
8239 case PSR_IC:
8240 if (rsrc_write)
8241 {
8242 specs[count++] = tmpl;
8243 }
8244 else
8245 {
8246 /* Only certain CR accesses use PSR.ic */
8247 if (idesc->operands[0] == IA64_OPND_CR3
8248 || idesc->operands[1] == IA64_OPND_CR3)
8249 {
8250 int index =
8251 ((idesc->operands[0] == IA64_OPND_CR3)
8252 ? 0 : 1);
8253 int regno =
8254 CURR_SLOT.opnd[index].X_add_number - REG_CR;
8255
8256 switch (regno)
8257 {
8258 default:
8259 break;
8260 case CR_ITIR:
8261 case CR_IFS:
8262 case CR_IIM:
8263 case CR_IIP:
8264 case CR_IPSR:
8265 case CR_ISR:
8266 case CR_IFA:
8267 case CR_IHA:
8268 case CR_IIPA:
8269 specs[count++] = tmpl;
8270 break;
8271 }
8272 }
8273 }
8274 break;
8275 case PSR_CPL:
8276 if (rsrc_write)
8277 {
8278 specs[count++] = tmpl;
8279 }
8280 else
8281 {
8282 /* Only some AR accesses use cpl */
8283 if (idesc->operands[0] == IA64_OPND_AR3
8284 || idesc->operands[1] == IA64_OPND_AR3)
8285 {
8286 int index =
8287 ((idesc->operands[0] == IA64_OPND_AR3)
8288 ? 0 : 1);
8289 int regno =
8290 CURR_SLOT.opnd[index].X_add_number - REG_AR;
8291
8292 if (regno == AR_ITC
8293 || (index == 0
8294 && (regno == AR_ITC
8295 || regno == AR_RSC
8296 || (regno >= AR_K0
8297 && regno <= AR_K7))))
8298 {
8299 specs[count++] = tmpl;
8300 }
8301 }
8302 else
8303 {
8304 specs[count++] = tmpl;
8305 }
8306 break;
8307 }
8308 }
8309 }
8310 }
8311 else if (note == 7)
8312 {
8313 valueT mask = 0;
8314 if (idesc->operands[0] == IA64_OPND_IMMU24)
8315 {
8316 mask = CURR_SLOT.opnd[0].X_add_number;
8317 }
8318 else
8319 {
8320 UNHANDLED;
8321 }
8322 if (mask & ((valueT) 1 << dep->regindex))
8323 {
8324 specs[count++] = tmpl;
8325 }
8326 }
8327 else if (note == 8)
8328 {
8329 int min = dep->regindex == PSR_DFL ? 2 : 32;
8330 int max = dep->regindex == PSR_DFL ? 31 : 127;
8331 /* dfh is read on FR32-127; dfl is read on FR2-31 */
8332 for (i = 0; i < NELEMS (idesc->operands); i++)
8333 {
8334 if (idesc->operands[i] == IA64_OPND_F1
8335 || idesc->operands[i] == IA64_OPND_F2
8336 || idesc->operands[i] == IA64_OPND_F3
8337 || idesc->operands[i] == IA64_OPND_F4)
8338 {
8339 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
8340 if (reg >= min && reg <= max)
8341 {
8342 specs[count++] = tmpl;
8343 }
8344 }
8345 }
8346 }
8347 else if (note == 9)
8348 {
8349 int min = dep->regindex == PSR_MFL ? 2 : 32;
8350 int max = dep->regindex == PSR_MFL ? 31 : 127;
8351 /* mfh is read on writes to FR32-127; mfl is read on writes to
8352 FR2-31 */
8353 for (i = 0; i < idesc->num_outputs; i++)
8354 {
8355 if (idesc->operands[i] == IA64_OPND_F1)
8356 {
8357 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
8358 if (reg >= min && reg <= max)
8359 {
8360 specs[count++] = tmpl;
8361 }
8362 }
8363 }
8364 }
8365 else if (note == 10)
8366 {
8367 for (i = 0; i < NELEMS (idesc->operands); i++)
8368 {
8369 if (idesc->operands[i] == IA64_OPND_R1
8370 || idesc->operands[i] == IA64_OPND_R2
8371 || idesc->operands[i] == IA64_OPND_R3)
8372 {
8373 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
8374 if (regno >= 16 && regno <= 31)
8375 {
8376 specs[count++] = tmpl;
8377 }
8378 }
8379 }
8380 }
8381 else
8382 {
8383 UNHANDLED;
8384 }
8385 break;
8386
8387 case IA64_RS_AR_FPSR:
8388 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8389 {
8390 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8391 if (regno == AR_FPSR)
8392 {
8393 specs[count++] = tmpl;
8394 }
8395 }
8396 else
8397 {
8398 specs[count++] = tmpl;
8399 }
8400 break;
8401
8402 case IA64_RS_ARX:
8403 /* Handle all AR[REG] resources */
8404 if (note == 0 || note == 1)
8405 {
8406 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8407 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3
8408 && regno == dep->regindex)
8409 {
8410 specs[count++] = tmpl;
8411 }
8412 /* other AR[REG] resources may be affected by AR accesses */
8413 else if (idesc->operands[0] == IA64_OPND_AR3)
8414 {
8415 /* AR[] writes */
8416 regno = CURR_SLOT.opnd[0].X_add_number - REG_AR;
8417 switch (dep->regindex)
8418 {
8419 default:
8420 break;
8421 case AR_BSP:
8422 case AR_RNAT:
8423 if (regno == AR_BSPSTORE)
8424 {
8425 specs[count++] = tmpl;
8426 }
8427 case AR_RSC:
8428 if (!rsrc_write &&
8429 (regno == AR_BSPSTORE
8430 || regno == AR_RNAT))
8431 {
8432 specs[count++] = tmpl;
8433 }
8434 break;
8435 }
8436 }
8437 else if (idesc->operands[1] == IA64_OPND_AR3)
8438 {
8439 /* AR[] reads */
8440 regno = CURR_SLOT.opnd[1].X_add_number - REG_AR;
8441 switch (dep->regindex)
8442 {
8443 default:
8444 break;
8445 case AR_RSC:
8446 if (regno == AR_BSPSTORE || regno == AR_RNAT)
8447 {
8448 specs[count++] = tmpl;
8449 }
8450 break;
8451 }
8452 }
8453 else
8454 {
8455 specs[count++] = tmpl;
8456 }
8457 }
8458 else
8459 {
8460 UNHANDLED;
8461 }
8462 break;
8463
8464 case IA64_RS_CRX:
8465 /* Handle all CR[REG] resources */
8466 if (note == 0 || note == 1)
8467 {
8468 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
8469 {
8470 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8471 if (regno == dep->regindex)
8472 {
8473 specs[count++] = tmpl;
8474 }
8475 else if (!rsrc_write)
8476 {
8477 /* Reads from CR[IVR] affect other resources. */
8478 if (regno == CR_IVR)
8479 {
8480 if ((dep->regindex >= CR_IRR0
8481 && dep->regindex <= CR_IRR3)
8482 || dep->regindex == CR_TPR)
8483 {
8484 specs[count++] = tmpl;
8485 }
8486 }
8487 }
8488 }
8489 else
8490 {
8491 specs[count++] = tmpl;
8492 }
8493 }
8494 else
8495 {
8496 UNHANDLED;
8497 }
8498 break;
8499
8500 case IA64_RS_INSERVICE:
8501 /* look for write of EOI (67) or read of IVR (65) */
8502 if ((idesc->operands[0] == IA64_OPND_CR3
8503 && CURR_SLOT.opnd[0].X_add_number - REG_CR == CR_EOI)
8504 || (idesc->operands[1] == IA64_OPND_CR3
8505 && CURR_SLOT.opnd[1].X_add_number - REG_CR == CR_IVR))
8506 {
8507 specs[count++] = tmpl;
8508 }
8509 break;
8510
8511 case IA64_RS_GR0:
8512 if (note == 1)
8513 {
8514 specs[count++] = tmpl;
8515 }
8516 else
8517 {
8518 UNHANDLED;
8519 }
8520 break;
8521
8522 case IA64_RS_CFM:
8523 if (note != 2)
8524 {
8525 specs[count++] = tmpl;
8526 }
8527 else
8528 {
8529 /* Check if any of the registers accessed are in the rotating region.
8530 mov to/from pr accesses CFM only when qp_regno is in the rotating
8531 region */
8532 for (i = 0; i < NELEMS (idesc->operands); i++)
8533 {
8534 if (idesc->operands[i] == IA64_OPND_R1
8535 || idesc->operands[i] == IA64_OPND_R2
8536 || idesc->operands[i] == IA64_OPND_R3)
8537 {
8538 int num = CURR_SLOT.opnd[i].X_add_number - REG_GR;
8539 /* Assumes that md.rot.num_regs is always valid */
8540 if (md.rot.num_regs > 0
8541 && num > 31
8542 && num < 31 + md.rot.num_regs)
8543 {
8544 specs[count] = tmpl;
8545 specs[count++].specific = 0;
8546 }
8547 }
8548 else if (idesc->operands[i] == IA64_OPND_F1
8549 || idesc->operands[i] == IA64_OPND_F2
8550 || idesc->operands[i] == IA64_OPND_F3
8551 || idesc->operands[i] == IA64_OPND_F4)
8552 {
8553 int num = CURR_SLOT.opnd[i].X_add_number - REG_FR;
8554 if (num > 31)
8555 {
8556 specs[count] = tmpl;
8557 specs[count++].specific = 0;
8558 }
8559 }
8560 else if (idesc->operands[i] == IA64_OPND_P1
8561 || idesc->operands[i] == IA64_OPND_P2)
8562 {
8563 int num = CURR_SLOT.opnd[i].X_add_number - REG_P;
8564 if (num > 15)
8565 {
8566 specs[count] = tmpl;
8567 specs[count++].specific = 0;
8568 }
8569 }
8570 }
8571 if (CURR_SLOT.qp_regno > 15)
8572 {
8573 specs[count] = tmpl;
8574 specs[count++].specific = 0;
8575 }
8576 }
8577 break;
8578
8579 /* This is the same as IA64_RS_PRr, except simplified to account for
8580 the fact that there is only one register. */
8581 case IA64_RS_PR63:
8582 if (note == 0)
8583 {
8584 specs[count++] = tmpl;
8585 }
8586 else if (note == 7)
8587 {
8588 valueT mask = 0;
8589 if (idesc->operands[2] == IA64_OPND_IMM17)
8590 mask = CURR_SLOT.opnd[2].X_add_number;
8591 if (mask & ((valueT) 1 << 63))
8592 specs[count++] = tmpl;
8593 }
8594 else if (note == 11)
8595 {
8596 if ((idesc->operands[0] == IA64_OPND_P1
8597 && CURR_SLOT.opnd[0].X_add_number - REG_P == 63)
8598 || (idesc->operands[1] == IA64_OPND_P2
8599 && CURR_SLOT.opnd[1].X_add_number - REG_P == 63))
8600 {
8601 specs[count++] = tmpl;
8602 }
8603 }
8604 else if (note == 12)
8605 {
8606 if (CURR_SLOT.qp_regno == 63)
8607 {
8608 specs[count++] = tmpl;
8609 }
8610 }
8611 else if (note == 1)
8612 {
8613 if (rsrc_write)
8614 {
8615 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8616 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8617 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
8618 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
8619
8620 if (p1 == 63
8621 && (idesc->operands[0] == IA64_OPND_P1
8622 || idesc->operands[0] == IA64_OPND_P2))
8623 {
8624 specs[count] = tmpl;
8625 specs[count++].cmp_type =
8626 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
8627 }
8628 if (p2 == 63
8629 && (idesc->operands[1] == IA64_OPND_P1
8630 || idesc->operands[1] == IA64_OPND_P2))
8631 {
8632 specs[count] = tmpl;
8633 specs[count++].cmp_type =
8634 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
8635 }
8636 }
8637 else
8638 {
8639 if (CURR_SLOT.qp_regno == 63)
8640 {
8641 specs[count++] = tmpl;
8642 }
8643 }
8644 }
8645 else
8646 {
8647 UNHANDLED;
8648 }
8649 break;
8650
8651 case IA64_RS_RSE:
8652 /* FIXME we can identify some individual RSE written resources, but RSE
8653 read resources have not yet been completely identified, so for now
8654 treat RSE as a single resource */
8655 if (strncmp (idesc->name, "mov", 3) == 0)
8656 {
8657 if (rsrc_write)
8658 {
8659 if (idesc->operands[0] == IA64_OPND_AR3
8660 && CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE)
8661 {
8662 specs[count] = tmpl;
8663 specs[count++].index = 0; /* IA64_RSE_BSPLOAD/RNATBITINDEX */
8664 }
8665 }
8666 else
8667 {
8668 if (idesc->operands[0] == IA64_OPND_AR3)
8669 {
8670 if (CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE
8671 || CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_RNAT)
8672 {
8673 specs[count++] = tmpl;
8674 }
8675 }
8676 else if (idesc->operands[1] == IA64_OPND_AR3)
8677 {
8678 if (CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSP
8679 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSPSTORE
8680 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_RNAT)
8681 {
8682 specs[count++] = tmpl;
8683 }
8684 }
8685 }
8686 }
8687 else
8688 {
8689 specs[count++] = tmpl;
8690 }
8691 break;
8692
8693 case IA64_RS_ANY:
8694 /* FIXME -- do any of these need to be non-specific? */
8695 specs[count++] = tmpl;
8696 break;
8697
8698 default:
8699 as_bad (_("Unrecognized dependency specifier %d\n"), dep->specifier);
8700 break;
8701 }
8702
8703 return count;
8704 }
8705
8706 /* Clear branch flags on marked resources. This breaks the link between the
8707 QP of the marking instruction and a subsequent branch on the same QP. */
8708
8709 static void
8710 clear_qp_branch_flag (mask)
8711 valueT mask;
8712 {
8713 int i;
8714 for (i = 0; i < regdepslen; i++)
8715 {
8716 valueT bit = ((valueT) 1 << regdeps[i].qp_regno);
8717 if ((bit & mask) != 0)
8718 {
8719 regdeps[i].link_to_qp_branch = 0;
8720 }
8721 }
8722 }
8723
8724 /* Remove any mutexes which contain any of the PRs indicated in the mask.
8725
8726 Any changes to a PR clears the mutex relations which include that PR. */
8727
8728 static void
8729 clear_qp_mutex (mask)
8730 valueT mask;
8731 {
8732 int i;
8733
8734 i = 0;
8735 while (i < qp_mutexeslen)
8736 {
8737 if ((qp_mutexes[i].prmask & mask) != 0)
8738 {
8739 if (md.debug_dv)
8740 {
8741 fprintf (stderr, " Clearing mutex relation");
8742 print_prmask (qp_mutexes[i].prmask);
8743 fprintf (stderr, "\n");
8744 }
8745 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
8746 }
8747 else
8748 ++i;
8749 }
8750 }
8751
8752 /* Clear implies relations which contain PRs in the given masks.
8753 P1_MASK indicates the source of the implies relation, while P2_MASK
8754 indicates the implied PR. */
8755
8756 static void
8757 clear_qp_implies (p1_mask, p2_mask)
8758 valueT p1_mask;
8759 valueT p2_mask;
8760 {
8761 int i;
8762
8763 i = 0;
8764 while (i < qp_implieslen)
8765 {
8766 if ((((valueT) 1 << qp_implies[i].p1) & p1_mask) != 0
8767 || (((valueT) 1 << qp_implies[i].p2) & p2_mask) != 0)
8768 {
8769 if (md.debug_dv)
8770 fprintf (stderr, "Clearing implied relation PR%d->PR%d\n",
8771 qp_implies[i].p1, qp_implies[i].p2);
8772 qp_implies[i] = qp_implies[--qp_implieslen];
8773 }
8774 else
8775 ++i;
8776 }
8777 }
8778
8779 /* Add the PRs specified to the list of implied relations. */
8780
8781 static void
8782 add_qp_imply (p1, p2)
8783 int p1, p2;
8784 {
8785 valueT mask;
8786 valueT bit;
8787 int i;
8788
8789 /* p0 is not meaningful here. */
8790 if (p1 == 0 || p2 == 0)
8791 abort ();
8792
8793 if (p1 == p2)
8794 return;
8795
8796 /* If it exists already, ignore it. */
8797 for (i = 0; i < qp_implieslen; i++)
8798 {
8799 if (qp_implies[i].p1 == p1
8800 && qp_implies[i].p2 == p2
8801 && qp_implies[i].path == md.path
8802 && !qp_implies[i].p2_branched)
8803 return;
8804 }
8805
8806 if (qp_implieslen == qp_impliestotlen)
8807 {
8808 qp_impliestotlen += 20;
8809 qp_implies = (struct qp_imply *)
8810 xrealloc ((void *) qp_implies,
8811 qp_impliestotlen * sizeof (struct qp_imply));
8812 }
8813 if (md.debug_dv)
8814 fprintf (stderr, " Registering PR%d implies PR%d\n", p1, p2);
8815 qp_implies[qp_implieslen].p1 = p1;
8816 qp_implies[qp_implieslen].p2 = p2;
8817 qp_implies[qp_implieslen].path = md.path;
8818 qp_implies[qp_implieslen++].p2_branched = 0;
8819
8820 /* Add in the implied transitive relations; for everything that p2 implies,
8821 make p1 imply that, too; for everything that implies p1, make it imply p2
8822 as well. */
8823 for (i = 0; i < qp_implieslen; i++)
8824 {
8825 if (qp_implies[i].p1 == p2)
8826 add_qp_imply (p1, qp_implies[i].p2);
8827 if (qp_implies[i].p2 == p1)
8828 add_qp_imply (qp_implies[i].p1, p2);
8829 }
8830 /* Add in mutex relations implied by this implies relation; for each mutex
8831 relation containing p2, duplicate it and replace p2 with p1. */
8832 bit = (valueT) 1 << p1;
8833 mask = (valueT) 1 << p2;
8834 for (i = 0; i < qp_mutexeslen; i++)
8835 {
8836 if (qp_mutexes[i].prmask & mask)
8837 add_qp_mutex ((qp_mutexes[i].prmask & ~mask) | bit);
8838 }
8839 }
8840
8841 /* Add the PRs specified in the mask to the mutex list; this means that only
8842 one of the PRs can be true at any time. PR0 should never be included in
8843 the mask. */
8844
8845 static void
8846 add_qp_mutex (mask)
8847 valueT mask;
8848 {
8849 if (mask & 0x1)
8850 abort ();
8851
8852 if (qp_mutexeslen == qp_mutexestotlen)
8853 {
8854 qp_mutexestotlen += 20;
8855 qp_mutexes = (struct qpmutex *)
8856 xrealloc ((void *) qp_mutexes,
8857 qp_mutexestotlen * sizeof (struct qpmutex));
8858 }
8859 if (md.debug_dv)
8860 {
8861 fprintf (stderr, " Registering mutex on");
8862 print_prmask (mask);
8863 fprintf (stderr, "\n");
8864 }
8865 qp_mutexes[qp_mutexeslen].path = md.path;
8866 qp_mutexes[qp_mutexeslen++].prmask = mask;
8867 }
8868
8869 static int
8870 has_suffix_p (name, suffix)
8871 const char *name;
8872 const char *suffix;
8873 {
8874 size_t namelen = strlen (name);
8875 size_t sufflen = strlen (suffix);
8876
8877 if (namelen <= sufflen)
8878 return 0;
8879 return strcmp (name + namelen - sufflen, suffix) == 0;
8880 }
8881
8882 static void
8883 clear_register_values ()
8884 {
8885 int i;
8886 if (md.debug_dv)
8887 fprintf (stderr, " Clearing register values\n");
8888 for (i = 1; i < NELEMS (gr_values); i++)
8889 gr_values[i].known = 0;
8890 }
8891
8892 /* Keep track of register values/changes which affect DV tracking.
8893
8894 optimization note: should add a flag to classes of insns where otherwise we
8895 have to examine a group of strings to identify them. */
8896
8897 static void
8898 note_register_values (idesc)
8899 struct ia64_opcode *idesc;
8900 {
8901 valueT qp_changemask = 0;
8902 int i;
8903
8904 /* Invalidate values for registers being written to. */
8905 for (i = 0; i < idesc->num_outputs; i++)
8906 {
8907 if (idesc->operands[i] == IA64_OPND_R1
8908 || idesc->operands[i] == IA64_OPND_R2
8909 || idesc->operands[i] == IA64_OPND_R3)
8910 {
8911 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
8912 if (regno > 0 && regno < NELEMS (gr_values))
8913 gr_values[regno].known = 0;
8914 }
8915 else if (idesc->operands[i] == IA64_OPND_R3_2)
8916 {
8917 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
8918 if (regno > 0 && regno < 4)
8919 gr_values[regno].known = 0;
8920 }
8921 else if (idesc->operands[i] == IA64_OPND_P1
8922 || idesc->operands[i] == IA64_OPND_P2)
8923 {
8924 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8925 qp_changemask |= (valueT) 1 << regno;
8926 }
8927 else if (idesc->operands[i] == IA64_OPND_PR)
8928 {
8929 if (idesc->operands[2] & (valueT) 0x10000)
8930 qp_changemask = ~(valueT) 0x1FFFF | idesc->operands[2];
8931 else
8932 qp_changemask = idesc->operands[2];
8933 break;
8934 }
8935 else if (idesc->operands[i] == IA64_OPND_PR_ROT)
8936 {
8937 if (idesc->operands[1] & ((valueT) 1 << 43))
8938 qp_changemask = ~(valueT) 0xFFFFFFFFFFF | idesc->operands[1];
8939 else
8940 qp_changemask = idesc->operands[1];
8941 qp_changemask &= ~(valueT) 0xFFFF;
8942 break;
8943 }
8944 }
8945
8946 /* Always clear qp branch flags on any PR change. */
8947 /* FIXME there may be exceptions for certain compares. */
8948 clear_qp_branch_flag (qp_changemask);
8949
8950 /* Invalidate rotating registers on insns which affect RRBs in CFM. */
8951 if (idesc->flags & IA64_OPCODE_MOD_RRBS)
8952 {
8953 qp_changemask |= ~(valueT) 0xFFFF;
8954 if (strcmp (idesc->name, "clrrrb.pr") != 0)
8955 {
8956 for (i = 32; i < 32 + md.rot.num_regs; i++)
8957 gr_values[i].known = 0;
8958 }
8959 clear_qp_mutex (qp_changemask);
8960 clear_qp_implies (qp_changemask, qp_changemask);
8961 }
8962 /* After a call, all register values are undefined, except those marked
8963 as "safe". */
8964 else if (strncmp (idesc->name, "br.call", 6) == 0
8965 || strncmp (idesc->name, "brl.call", 7) == 0)
8966 {
8967 /* FIXME keep GR values which are marked as "safe_across_calls" */
8968 clear_register_values ();
8969 clear_qp_mutex (~qp_safe_across_calls);
8970 clear_qp_implies (~qp_safe_across_calls, ~qp_safe_across_calls);
8971 clear_qp_branch_flag (~qp_safe_across_calls);
8972 }
8973 else if (is_interruption_or_rfi (idesc)
8974 || is_taken_branch (idesc))
8975 {
8976 clear_register_values ();
8977 clear_qp_mutex (~(valueT) 0);
8978 clear_qp_implies (~(valueT) 0, ~(valueT) 0);
8979 }
8980 /* Look for mutex and implies relations. */
8981 else if ((idesc->operands[0] == IA64_OPND_P1
8982 || idesc->operands[0] == IA64_OPND_P2)
8983 && (idesc->operands[1] == IA64_OPND_P1
8984 || idesc->operands[1] == IA64_OPND_P2))
8985 {
8986 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8987 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8988 valueT p1mask = (valueT) 1 << p1;
8989 valueT p2mask = (valueT) 1 << p2;
8990
8991 /* If one of the PRs is PR0, we can't really do anything. */
8992 if (p1 == 0 || p2 == 0)
8993 {
8994 if (md.debug_dv)
8995 fprintf (stderr, " Ignoring PRs due to inclusion of p0\n");
8996 }
8997 /* In general, clear mutexes and implies which include P1 or P2,
8998 with the following exceptions. */
8999 else if (has_suffix_p (idesc->name, ".or.andcm")
9000 || has_suffix_p (idesc->name, ".and.orcm"))
9001 {
9002 add_qp_mutex (p1mask | p2mask);
9003 clear_qp_implies (p2mask, p1mask);
9004 }
9005 else if (has_suffix_p (idesc->name, ".andcm")
9006 || has_suffix_p (idesc->name, ".and"))
9007 {
9008 clear_qp_implies (0, p1mask | p2mask);
9009 }
9010 else if (has_suffix_p (idesc->name, ".orcm")
9011 || has_suffix_p (idesc->name, ".or"))
9012 {
9013 clear_qp_mutex (p1mask | p2mask);
9014 clear_qp_implies (p1mask | p2mask, 0);
9015 }
9016 else
9017 {
9018 clear_qp_implies (p1mask | p2mask, p1mask | p2mask);
9019 if (has_suffix_p (idesc->name, ".unc"))
9020 {
9021 add_qp_mutex (p1mask | p2mask);
9022 if (CURR_SLOT.qp_regno != 0)
9023 {
9024 add_qp_imply (CURR_SLOT.opnd[0].X_add_number - REG_P,
9025 CURR_SLOT.qp_regno);
9026 add_qp_imply (CURR_SLOT.opnd[1].X_add_number - REG_P,
9027 CURR_SLOT.qp_regno);
9028 }
9029 }
9030 else if (CURR_SLOT.qp_regno == 0)
9031 {
9032 add_qp_mutex (p1mask | p2mask);
9033 }
9034 else
9035 {
9036 clear_qp_mutex (p1mask | p2mask);
9037 }
9038 }
9039 }
9040 /* Look for mov imm insns into GRs. */
9041 else if (idesc->operands[0] == IA64_OPND_R1
9042 && (idesc->operands[1] == IA64_OPND_IMM22
9043 || idesc->operands[1] == IA64_OPND_IMMU64)
9044 && (strcmp (idesc->name, "mov") == 0
9045 || strcmp (idesc->name, "movl") == 0))
9046 {
9047 int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR;
9048 if (regno > 0 && regno < NELEMS (gr_values))
9049 {
9050 gr_values[regno].known = 1;
9051 gr_values[regno].value = CURR_SLOT.opnd[1].X_add_number;
9052 gr_values[regno].path = md.path;
9053 if (md.debug_dv)
9054 {
9055 fprintf (stderr, " Know gr%d = ", regno);
9056 fprintf_vma (stderr, gr_values[regno].value);
9057 fputs ("\n", stderr);
9058 }
9059 }
9060 }
9061 else
9062 {
9063 clear_qp_mutex (qp_changemask);
9064 clear_qp_implies (qp_changemask, qp_changemask);
9065 }
9066 }
9067
9068 /* Return whether the given predicate registers are currently mutex. */
9069
9070 static int
9071 qp_mutex (p1, p2, path)
9072 int p1;
9073 int p2;
9074 int path;
9075 {
9076 int i;
9077 valueT mask;
9078
9079 if (p1 != p2)
9080 {
9081 mask = ((valueT) 1 << p1) | (valueT) 1 << p2;
9082 for (i = 0; i < qp_mutexeslen; i++)
9083 {
9084 if (qp_mutexes[i].path >= path
9085 && (qp_mutexes[i].prmask & mask) == mask)
9086 return 1;
9087 }
9088 }
9089 return 0;
9090 }
9091
9092 /* Return whether the given resource is in the given insn's list of chks
9093 Return 1 if the conflict is absolutely determined, 2 if it's a potential
9094 conflict. */
9095
9096 static int
9097 resources_match (rs, idesc, note, qp_regno, path)
9098 struct rsrc *rs;
9099 struct ia64_opcode *idesc;
9100 int note;
9101 int qp_regno;
9102 int path;
9103 {
9104 struct rsrc specs[MAX_SPECS];
9105 int count;
9106
9107 /* If the marked resource's qp_regno and the given qp_regno are mutex,
9108 we don't need to check. One exception is note 11, which indicates that
9109 target predicates are written regardless of PR[qp]. */
9110 if (qp_mutex (rs->qp_regno, qp_regno, path)
9111 && note != 11)
9112 return 0;
9113
9114 count = specify_resource (rs->dependency, idesc, DV_CHK, specs, note, path);
9115 while (count-- > 0)
9116 {
9117 /* UNAT checking is a bit more specific than other resources */
9118 if (rs->dependency->specifier == IA64_RS_AR_UNAT
9119 && specs[count].mem_offset.hint
9120 && rs->mem_offset.hint)
9121 {
9122 if (rs->mem_offset.base == specs[count].mem_offset.base)
9123 {
9124 if (((rs->mem_offset.offset >> 3) & 0x3F) ==
9125 ((specs[count].mem_offset.offset >> 3) & 0x3F))
9126 return 1;
9127 else
9128 continue;
9129 }
9130 }
9131
9132 /* Skip apparent PR write conflicts where both writes are an AND or both
9133 writes are an OR. */
9134 if (rs->dependency->specifier == IA64_RS_PR
9135 || rs->dependency->specifier == IA64_RS_PRr
9136 || rs->dependency->specifier == IA64_RS_PR63)
9137 {
9138 if (specs[count].cmp_type != CMP_NONE
9139 && specs[count].cmp_type == rs->cmp_type)
9140 {
9141 if (md.debug_dv)
9142 fprintf (stderr, " %s on parallel compare allowed (PR%d)\n",
9143 dv_mode[rs->dependency->mode],
9144 rs->dependency->specifier != IA64_RS_PR63 ?
9145 specs[count].index : 63);
9146 continue;
9147 }
9148 if (md.debug_dv)
9149 fprintf (stderr,
9150 " %s on parallel compare conflict %s vs %s on PR%d\n",
9151 dv_mode[rs->dependency->mode],
9152 dv_cmp_type[rs->cmp_type],
9153 dv_cmp_type[specs[count].cmp_type],
9154 rs->dependency->specifier != IA64_RS_PR63 ?
9155 specs[count].index : 63);
9156
9157 }
9158
9159 /* If either resource is not specific, conservatively assume a conflict
9160 */
9161 if (!specs[count].specific || !rs->specific)
9162 return 2;
9163 else if (specs[count].index == rs->index)
9164 return 1;
9165 }
9166 #if 0
9167 if (md.debug_dv)
9168 fprintf (stderr, " No %s conflicts\n", rs->dependency->name);
9169 #endif
9170
9171 return 0;
9172 }
9173
9174 /* Indicate an instruction group break; if INSERT_STOP is non-zero, then
9175 insert a stop to create the break. Update all resource dependencies
9176 appropriately. If QP_REGNO is non-zero, only apply the break to resources
9177 which use the same QP_REGNO and have the link_to_qp_branch flag set.
9178 If SAVE_CURRENT is non-zero, don't affect resources marked by the current
9179 instruction. */
9180
9181 static void
9182 insn_group_break (insert_stop, qp_regno, save_current)
9183 int insert_stop;
9184 int qp_regno;
9185 int save_current;
9186 {
9187 int i;
9188
9189 if (insert_stop && md.num_slots_in_use > 0)
9190 PREV_SLOT.end_of_insn_group = 1;
9191
9192 if (md.debug_dv)
9193 {
9194 fprintf (stderr, " Insn group break%s",
9195 (insert_stop ? " (w/stop)" : ""));
9196 if (qp_regno != 0)
9197 fprintf (stderr, " effective for QP=%d", qp_regno);
9198 fprintf (stderr, "\n");
9199 }
9200
9201 i = 0;
9202 while (i < regdepslen)
9203 {
9204 const struct ia64_dependency *dep = regdeps[i].dependency;
9205
9206 if (qp_regno != 0
9207 && regdeps[i].qp_regno != qp_regno)
9208 {
9209 ++i;
9210 continue;
9211 }
9212
9213 if (save_current
9214 && CURR_SLOT.src_file == regdeps[i].file
9215 && CURR_SLOT.src_line == regdeps[i].line)
9216 {
9217 ++i;
9218 continue;
9219 }
9220
9221 /* clear dependencies which are automatically cleared by a stop, or
9222 those that have reached the appropriate state of insn serialization */
9223 if (dep->semantics == IA64_DVS_IMPLIED
9224 || dep->semantics == IA64_DVS_IMPLIEDF
9225 || regdeps[i].insn_srlz == STATE_SRLZ)
9226 {
9227 print_dependency ("Removing", i);
9228 regdeps[i] = regdeps[--regdepslen];
9229 }
9230 else
9231 {
9232 if (dep->semantics == IA64_DVS_DATA
9233 || dep->semantics == IA64_DVS_INSTR
9234 || dep->semantics == IA64_DVS_SPECIFIC)
9235 {
9236 if (regdeps[i].insn_srlz == STATE_NONE)
9237 regdeps[i].insn_srlz = STATE_STOP;
9238 if (regdeps[i].data_srlz == STATE_NONE)
9239 regdeps[i].data_srlz = STATE_STOP;
9240 }
9241 ++i;
9242 }
9243 }
9244 }
9245
9246 /* Add the given resource usage spec to the list of active dependencies. */
9247
9248 static void
9249 mark_resource (idesc, dep, spec, depind, path)
9250 struct ia64_opcode *idesc ATTRIBUTE_UNUSED;
9251 const struct ia64_dependency *dep ATTRIBUTE_UNUSED;
9252 struct rsrc *spec;
9253 int depind;
9254 int path;
9255 {
9256 if (regdepslen == regdepstotlen)
9257 {
9258 regdepstotlen += 20;
9259 regdeps = (struct rsrc *)
9260 xrealloc ((void *) regdeps,
9261 regdepstotlen * sizeof (struct rsrc));
9262 }
9263
9264 regdeps[regdepslen] = *spec;
9265 regdeps[regdepslen].depind = depind;
9266 regdeps[regdepslen].path = path;
9267 regdeps[regdepslen].file = CURR_SLOT.src_file;
9268 regdeps[regdepslen].line = CURR_SLOT.src_line;
9269
9270 print_dependency ("Adding", regdepslen);
9271
9272 ++regdepslen;
9273 }
9274
9275 static void
9276 print_dependency (action, depind)
9277 const char *action;
9278 int depind;
9279 {
9280 if (md.debug_dv)
9281 {
9282 fprintf (stderr, " %s %s '%s'",
9283 action, dv_mode[(regdeps[depind].dependency)->mode],
9284 (regdeps[depind].dependency)->name);
9285 if (regdeps[depind].specific && regdeps[depind].index != 0)
9286 fprintf (stderr, " (%d)", regdeps[depind].index);
9287 if (regdeps[depind].mem_offset.hint)
9288 {
9289 fputs (" ", stderr);
9290 fprintf_vma (stderr, regdeps[depind].mem_offset.base);
9291 fputs ("+", stderr);
9292 fprintf_vma (stderr, regdeps[depind].mem_offset.offset);
9293 }
9294 fprintf (stderr, "\n");
9295 }
9296 }
9297
9298 static void
9299 instruction_serialization ()
9300 {
9301 int i;
9302 if (md.debug_dv)
9303 fprintf (stderr, " Instruction serialization\n");
9304 for (i = 0; i < regdepslen; i++)
9305 if (regdeps[i].insn_srlz == STATE_STOP)
9306 regdeps[i].insn_srlz = STATE_SRLZ;
9307 }
9308
9309 static void
9310 data_serialization ()
9311 {
9312 int i = 0;
9313 if (md.debug_dv)
9314 fprintf (stderr, " Data serialization\n");
9315 while (i < regdepslen)
9316 {
9317 if (regdeps[i].data_srlz == STATE_STOP
9318 /* Note: as of 991210, all "other" dependencies are cleared by a
9319 data serialization. This might change with new tables */
9320 || (regdeps[i].dependency)->semantics == IA64_DVS_OTHER)
9321 {
9322 print_dependency ("Removing", i);
9323 regdeps[i] = regdeps[--regdepslen];
9324 }
9325 else
9326 ++i;
9327 }
9328 }
9329
9330 /* Insert stops and serializations as needed to avoid DVs. */
9331
9332 static void
9333 remove_marked_resource (rs)
9334 struct rsrc *rs;
9335 {
9336 switch (rs->dependency->semantics)
9337 {
9338 case IA64_DVS_SPECIFIC:
9339 if (md.debug_dv)
9340 fprintf (stderr, "Implementation-specific, assume worst case...\n");
9341 /* ...fall through... */
9342 case IA64_DVS_INSTR:
9343 if (md.debug_dv)
9344 fprintf (stderr, "Inserting instr serialization\n");
9345 if (rs->insn_srlz < STATE_STOP)
9346 insn_group_break (1, 0, 0);
9347 if (rs->insn_srlz < STATE_SRLZ)
9348 {
9349 int oldqp = CURR_SLOT.qp_regno;
9350 struct ia64_opcode *oldidesc = CURR_SLOT.idesc;
9351 /* Manually jam a srlz.i insn into the stream */
9352 CURR_SLOT.qp_regno = 0;
9353 CURR_SLOT.idesc = ia64_find_opcode ("srlz.i");
9354 instruction_serialization ();
9355 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
9356 if (++md.num_slots_in_use >= NUM_SLOTS)
9357 emit_one_bundle ();
9358 CURR_SLOT.qp_regno = oldqp;
9359 CURR_SLOT.idesc = oldidesc;
9360 }
9361 insn_group_break (1, 0, 0);
9362 break;
9363 case IA64_DVS_OTHER: /* as of rev2 (991220) of the DV tables, all
9364 "other" types of DV are eliminated
9365 by a data serialization */
9366 case IA64_DVS_DATA:
9367 if (md.debug_dv)
9368 fprintf (stderr, "Inserting data serialization\n");
9369 if (rs->data_srlz < STATE_STOP)
9370 insn_group_break (1, 0, 0);
9371 {
9372 int oldqp = CURR_SLOT.qp_regno;
9373 struct ia64_opcode *oldidesc = CURR_SLOT.idesc;
9374 /* Manually jam a srlz.d insn into the stream */
9375 CURR_SLOT.qp_regno = 0;
9376 CURR_SLOT.idesc = ia64_find_opcode ("srlz.d");
9377 data_serialization ();
9378 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
9379 if (++md.num_slots_in_use >= NUM_SLOTS)
9380 emit_one_bundle ();
9381 CURR_SLOT.qp_regno = oldqp;
9382 CURR_SLOT.idesc = oldidesc;
9383 }
9384 break;
9385 case IA64_DVS_IMPLIED:
9386 case IA64_DVS_IMPLIEDF:
9387 if (md.debug_dv)
9388 fprintf (stderr, "Inserting stop\n");
9389 insn_group_break (1, 0, 0);
9390 break;
9391 default:
9392 break;
9393 }
9394 }
9395
9396 /* Check the resources used by the given opcode against the current dependency
9397 list.
9398
9399 The check is run once for each execution path encountered. In this case,
9400 a unique execution path is the sequence of instructions following a code
9401 entry point, e.g. the following has three execution paths, one starting
9402 at L0, one at L1, and one at L2.
9403
9404 L0: nop
9405 L1: add
9406 L2: add
9407 br.ret
9408 */
9409
9410 static void
9411 check_dependencies (idesc)
9412 struct ia64_opcode *idesc;
9413 {
9414 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
9415 int path;
9416 int i;
9417
9418 /* Note that the number of marked resources may change within the
9419 loop if in auto mode. */
9420 i = 0;
9421 while (i < regdepslen)
9422 {
9423 struct rsrc *rs = &regdeps[i];
9424 const struct ia64_dependency *dep = rs->dependency;
9425 int chkind;
9426 int note;
9427 int start_over = 0;
9428
9429 if (dep->semantics == IA64_DVS_NONE
9430 || (chkind = depends_on (rs->depind, idesc)) == -1)
9431 {
9432 ++i;
9433 continue;
9434 }
9435
9436 note = NOTE (opdeps->chks[chkind]);
9437
9438 /* Check this resource against each execution path seen thus far. */
9439 for (path = 0; path <= md.path; path++)
9440 {
9441 int matchtype;
9442
9443 /* If the dependency wasn't on the path being checked, ignore it. */
9444 if (rs->path < path)
9445 continue;
9446
9447 /* If the QP for this insn implies a QP which has branched, don't
9448 bother checking. Ed. NOTE: I don't think this check is terribly
9449 useful; what's the point of generating code which will only be
9450 reached if its QP is zero?
9451 This code was specifically inserted to handle the following code,
9452 based on notes from Intel's DV checking code, where p1 implies p2.
9453
9454 mov r4 = 2
9455 (p2) br.cond L
9456 (p1) mov r4 = 7
9457 */
9458 if (CURR_SLOT.qp_regno != 0)
9459 {
9460 int skip = 0;
9461 int implies;
9462 for (implies = 0; implies < qp_implieslen; implies++)
9463 {
9464 if (qp_implies[implies].path >= path
9465 && qp_implies[implies].p1 == CURR_SLOT.qp_regno
9466 && qp_implies[implies].p2_branched)
9467 {
9468 skip = 1;
9469 break;
9470 }
9471 }
9472 if (skip)
9473 continue;
9474 }
9475
9476 if ((matchtype = resources_match (rs, idesc, note,
9477 CURR_SLOT.qp_regno, path)) != 0)
9478 {
9479 char msg[1024];
9480 char pathmsg[256] = "";
9481 char indexmsg[256] = "";
9482 int certain = (matchtype == 1 && CURR_SLOT.qp_regno == 0);
9483
9484 if (path != 0)
9485 sprintf (pathmsg, " when entry is at label '%s'",
9486 md.entry_labels[path - 1]);
9487 if (rs->specific && rs->index != 0)
9488 sprintf (indexmsg, ", specific resource number is %d",
9489 rs->index);
9490 sprintf (msg, "Use of '%s' %s %s dependency '%s' (%s)%s%s",
9491 idesc->name,
9492 (certain ? "violates" : "may violate"),
9493 dv_mode[dep->mode], dep->name,
9494 dv_sem[dep->semantics],
9495 pathmsg, indexmsg);
9496
9497 if (md.explicit_mode)
9498 {
9499 as_warn ("%s", msg);
9500 if (path < md.path)
9501 as_warn (_("Only the first path encountering the conflict "
9502 "is reported"));
9503 as_warn_where (rs->file, rs->line,
9504 _("This is the location of the "
9505 "conflicting usage"));
9506 /* Don't bother checking other paths, to avoid duplicating
9507 the same warning */
9508 break;
9509 }
9510 else
9511 {
9512 if (md.debug_dv)
9513 fprintf (stderr, "%s @ %s:%d\n", msg, rs->file, rs->line);
9514
9515 remove_marked_resource (rs);
9516
9517 /* since the set of dependencies has changed, start over */
9518 /* FIXME -- since we're removing dvs as we go, we
9519 probably don't really need to start over... */
9520 start_over = 1;
9521 break;
9522 }
9523 }
9524 }
9525 if (start_over)
9526 i = 0;
9527 else
9528 ++i;
9529 }
9530 }
9531
9532 /* Register new dependencies based on the given opcode. */
9533
9534 static void
9535 mark_resources (idesc)
9536 struct ia64_opcode *idesc;
9537 {
9538 int i;
9539 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
9540 int add_only_qp_reads = 0;
9541
9542 /* A conditional branch only uses its resources if it is taken; if it is
9543 taken, we stop following that path. The other branch types effectively
9544 *always* write their resources. If it's not taken, register only QP
9545 reads. */
9546 if (is_conditional_branch (idesc) || is_interruption_or_rfi (idesc))
9547 {
9548 add_only_qp_reads = 1;
9549 }
9550
9551 if (md.debug_dv)
9552 fprintf (stderr, "Registering '%s' resource usage\n", idesc->name);
9553
9554 for (i = 0; i < opdeps->nregs; i++)
9555 {
9556 const struct ia64_dependency *dep;
9557 struct rsrc specs[MAX_SPECS];
9558 int note;
9559 int path;
9560 int count;
9561
9562 dep = ia64_find_dependency (opdeps->regs[i]);
9563 note = NOTE (opdeps->regs[i]);
9564
9565 if (add_only_qp_reads
9566 && !(dep->mode == IA64_DV_WAR
9567 && (dep->specifier == IA64_RS_PR
9568 || dep->specifier == IA64_RS_PRr
9569 || dep->specifier == IA64_RS_PR63)))
9570 continue;
9571
9572 count = specify_resource (dep, idesc, DV_REG, specs, note, md.path);
9573
9574 #if 0
9575 if (md.debug_dv && !count)
9576 fprintf (stderr, " No %s %s usage found (path %d)\n",
9577 dv_mode[dep->mode], dep->name, md.path);
9578 #endif
9579
9580 while (count-- > 0)
9581 {
9582 mark_resource (idesc, dep, &specs[count],
9583 DEP (opdeps->regs[i]), md.path);
9584 }
9585
9586 /* The execution path may affect register values, which may in turn
9587 affect which indirect-access resources are accessed. */
9588 switch (dep->specifier)
9589 {
9590 default:
9591 break;
9592 case IA64_RS_CPUID:
9593 case IA64_RS_DBR:
9594 case IA64_RS_IBR:
9595 case IA64_RS_MSR:
9596 case IA64_RS_PKR:
9597 case IA64_RS_PMC:
9598 case IA64_RS_PMD:
9599 case IA64_RS_RR:
9600 for (path = 0; path < md.path; path++)
9601 {
9602 count = specify_resource (dep, idesc, DV_REG, specs, note, path);
9603 while (count-- > 0)
9604 mark_resource (idesc, dep, &specs[count],
9605 DEP (opdeps->regs[i]), path);
9606 }
9607 break;
9608 }
9609 }
9610 }
9611
9612 /* Remove dependencies when they no longer apply. */
9613
9614 static void
9615 update_dependencies (idesc)
9616 struct ia64_opcode *idesc;
9617 {
9618 int i;
9619
9620 if (strcmp (idesc->name, "srlz.i") == 0)
9621 {
9622 instruction_serialization ();
9623 }
9624 else if (strcmp (idesc->name, "srlz.d") == 0)
9625 {
9626 data_serialization ();
9627 }
9628 else if (is_interruption_or_rfi (idesc)
9629 || is_taken_branch (idesc))
9630 {
9631 /* Although technically the taken branch doesn't clear dependencies
9632 which require a srlz.[id], we don't follow the branch; the next
9633 instruction is assumed to start with a clean slate. */
9634 regdepslen = 0;
9635 md.path = 0;
9636 }
9637 else if (is_conditional_branch (idesc)
9638 && CURR_SLOT.qp_regno != 0)
9639 {
9640 int is_call = strstr (idesc->name, ".call") != NULL;
9641
9642 for (i = 0; i < qp_implieslen; i++)
9643 {
9644 /* If the conditional branch's predicate is implied by the predicate
9645 in an existing dependency, remove that dependency. */
9646 if (qp_implies[i].p2 == CURR_SLOT.qp_regno)
9647 {
9648 int depind = 0;
9649 /* Note that this implied predicate takes a branch so that if
9650 a later insn generates a DV but its predicate implies this
9651 one, we can avoid the false DV warning. */
9652 qp_implies[i].p2_branched = 1;
9653 while (depind < regdepslen)
9654 {
9655 if (regdeps[depind].qp_regno == qp_implies[i].p1)
9656 {
9657 print_dependency ("Removing", depind);
9658 regdeps[depind] = regdeps[--regdepslen];
9659 }
9660 else
9661 ++depind;
9662 }
9663 }
9664 }
9665 /* Any marked resources which have this same predicate should be
9666 cleared, provided that the QP hasn't been modified between the
9667 marking instruction and the branch. */
9668 if (is_call)
9669 {
9670 insn_group_break (0, CURR_SLOT.qp_regno, 1);
9671 }
9672 else
9673 {
9674 i = 0;
9675 while (i < regdepslen)
9676 {
9677 if (regdeps[i].qp_regno == CURR_SLOT.qp_regno
9678 && regdeps[i].link_to_qp_branch
9679 && (regdeps[i].file != CURR_SLOT.src_file
9680 || regdeps[i].line != CURR_SLOT.src_line))
9681 {
9682 /* Treat like a taken branch */
9683 print_dependency ("Removing", i);
9684 regdeps[i] = regdeps[--regdepslen];
9685 }
9686 else
9687 ++i;
9688 }
9689 }
9690 }
9691 }
9692
9693 /* Examine the current instruction for dependency violations. */
9694
9695 static int
9696 check_dv (idesc)
9697 struct ia64_opcode *idesc;
9698 {
9699 if (md.debug_dv)
9700 {
9701 fprintf (stderr, "Checking %s for violations (line %d, %d/%d)\n",
9702 idesc->name, CURR_SLOT.src_line,
9703 idesc->dependencies->nchks,
9704 idesc->dependencies->nregs);
9705 }
9706
9707 /* Look through the list of currently marked resources; if the current
9708 instruction has the dependency in its chks list which uses that resource,
9709 check against the specific resources used. */
9710 check_dependencies (idesc);
9711
9712 /* Look up the instruction's regdeps (RAW writes, WAW writes, and WAR reads),
9713 then add them to the list of marked resources. */
9714 mark_resources (idesc);
9715
9716 /* There are several types of dependency semantics, and each has its own
9717 requirements for being cleared
9718
9719 Instruction serialization (insns separated by interruption, rfi, or
9720 writer + srlz.i + reader, all in separate groups) clears DVS_INSTR.
9721
9722 Data serialization (instruction serialization, or writer + srlz.d +
9723 reader, where writer and srlz.d are in separate groups) clears
9724 DVS_DATA. (This also clears DVS_OTHER, but that is not guaranteed to
9725 always be the case).
9726
9727 Instruction group break (groups separated by stop, taken branch,
9728 interruption or rfi) clears DVS_IMPLIED and DVS_IMPLIEDF.
9729 */
9730 update_dependencies (idesc);
9731
9732 /* Sometimes, knowing a register value allows us to avoid giving a false DV
9733 warning. Keep track of as many as possible that are useful. */
9734 note_register_values (idesc);
9735
9736 /* We don't need or want this anymore. */
9737 md.mem_offset.hint = 0;
9738
9739 return 0;
9740 }
9741
9742 /* Translate one line of assembly. Pseudo ops and labels do not show
9743 here. */
9744 void
9745 md_assemble (str)
9746 char *str;
9747 {
9748 char *saved_input_line_pointer, *mnemonic;
9749 const struct pseudo_opcode *pdesc;
9750 struct ia64_opcode *idesc;
9751 unsigned char qp_regno;
9752 unsigned int flags;
9753 int ch;
9754
9755 saved_input_line_pointer = input_line_pointer;
9756 input_line_pointer = str;
9757
9758 /* extract the opcode (mnemonic): */
9759
9760 mnemonic = input_line_pointer;
9761 ch = get_symbol_end ();
9762 pdesc = (struct pseudo_opcode *) hash_find (md.pseudo_hash, mnemonic);
9763 if (pdesc)
9764 {
9765 *input_line_pointer = ch;
9766 (*pdesc->handler) (pdesc->arg);
9767 goto done;
9768 }
9769
9770 /* Find the instruction descriptor matching the arguments. */
9771
9772 idesc = ia64_find_opcode (mnemonic);
9773 *input_line_pointer = ch;
9774 if (!idesc)
9775 {
9776 as_bad ("Unknown opcode `%s'", mnemonic);
9777 goto done;
9778 }
9779
9780 idesc = parse_operands (idesc);
9781 if (!idesc)
9782 goto done;
9783
9784 /* Handle the dynamic ops we can handle now: */
9785 if (idesc->type == IA64_TYPE_DYN)
9786 {
9787 if (strcmp (idesc->name, "add") == 0)
9788 {
9789 if (CURR_SLOT.opnd[2].X_op == O_register
9790 && CURR_SLOT.opnd[2].X_add_number < 4)
9791 mnemonic = "addl";
9792 else
9793 mnemonic = "adds";
9794 ia64_free_opcode (idesc);
9795 idesc = ia64_find_opcode (mnemonic);
9796 #if 0
9797 know (!idesc->next);
9798 #endif
9799 }
9800 else if (strcmp (idesc->name, "mov") == 0)
9801 {
9802 enum ia64_opnd opnd1, opnd2;
9803 int rop;
9804
9805 opnd1 = idesc->operands[0];
9806 opnd2 = idesc->operands[1];
9807 if (opnd1 == IA64_OPND_AR3)
9808 rop = 0;
9809 else if (opnd2 == IA64_OPND_AR3)
9810 rop = 1;
9811 else
9812 abort ();
9813 if (CURR_SLOT.opnd[rop].X_op == O_register
9814 && ar_is_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
9815 mnemonic = "mov.i";
9816 else
9817 mnemonic = "mov.m";
9818 ia64_free_opcode (idesc);
9819 idesc = ia64_find_opcode (mnemonic);
9820 while (idesc != NULL
9821 && (idesc->operands[0] != opnd1
9822 || idesc->operands[1] != opnd2))
9823 idesc = get_next_opcode (idesc);
9824 }
9825 }
9826
9827 qp_regno = 0;
9828 if (md.qp.X_op == O_register)
9829 {
9830 qp_regno = md.qp.X_add_number - REG_P;
9831 md.qp.X_op = O_absent;
9832 }
9833
9834 flags = idesc->flags;
9835
9836 if ((flags & IA64_OPCODE_FIRST) != 0)
9837 insn_group_break (1, 0, 0);
9838
9839 if ((flags & IA64_OPCODE_NO_PRED) != 0 && qp_regno != 0)
9840 {
9841 as_bad ("`%s' cannot be predicated", idesc->name);
9842 goto done;
9843 }
9844
9845 /* Build the instruction. */
9846 CURR_SLOT.qp_regno = qp_regno;
9847 CURR_SLOT.idesc = idesc;
9848 as_where (&CURR_SLOT.src_file, &CURR_SLOT.src_line);
9849 dwarf2_where (&CURR_SLOT.debug_line);
9850
9851 /* Add unwind entry, if there is one. */
9852 if (unwind.current_entry)
9853 {
9854 CURR_SLOT.unwind_record = unwind.current_entry;
9855 unwind.current_entry = NULL;
9856 }
9857
9858 /* Check for dependency violations. */
9859 if (md.detect_dv)
9860 check_dv (idesc);
9861
9862 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
9863 if (++md.num_slots_in_use >= NUM_SLOTS)
9864 emit_one_bundle ();
9865
9866 if ((flags & IA64_OPCODE_LAST) != 0)
9867 insn_group_break (1, 0, 0);
9868
9869 md.last_text_seg = now_seg;
9870
9871 done:
9872 input_line_pointer = saved_input_line_pointer;
9873 }
9874
9875 /* Called when symbol NAME cannot be found in the symbol table.
9876 Should be used for dynamic valued symbols only. */
9877
9878 symbolS *
9879 md_undefined_symbol (name)
9880 char *name ATTRIBUTE_UNUSED;
9881 {
9882 return 0;
9883 }
9884
9885 /* Called for any expression that can not be recognized. When the
9886 function is called, `input_line_pointer' will point to the start of
9887 the expression. */
9888
9889 void
9890 md_operand (e)
9891 expressionS *e;
9892 {
9893 enum pseudo_type pseudo_type;
9894 const char *name;
9895 size_t len;
9896 int ch, i;
9897
9898 switch (*input_line_pointer)
9899 {
9900 case '@':
9901 /* Find what relocation pseudo-function we're dealing with. */
9902 pseudo_type = 0;
9903 ch = *++input_line_pointer;
9904 for (i = 0; i < NELEMS (pseudo_func); ++i)
9905 if (pseudo_func[i].name && pseudo_func[i].name[0] == ch)
9906 {
9907 len = strlen (pseudo_func[i].name);
9908 if (strncmp (pseudo_func[i].name + 1,
9909 input_line_pointer + 1, len - 1) == 0
9910 && !is_part_of_name (input_line_pointer[len]))
9911 {
9912 input_line_pointer += len;
9913 pseudo_type = pseudo_func[i].type;
9914 break;
9915 }
9916 }
9917 switch (pseudo_type)
9918 {
9919 case PSEUDO_FUNC_RELOC:
9920 SKIP_WHITESPACE ();
9921 if (*input_line_pointer != '(')
9922 {
9923 as_bad ("Expected '('");
9924 goto err;
9925 }
9926 /* Skip '('. */
9927 ++input_line_pointer;
9928 expression (e);
9929 if (*input_line_pointer++ != ')')
9930 {
9931 as_bad ("Missing ')'");
9932 goto err;
9933 }
9934 if (e->X_op != O_symbol)
9935 {
9936 if (e->X_op != O_pseudo_fixup)
9937 {
9938 as_bad ("Not a symbolic expression");
9939 goto err;
9940 }
9941 if (i != FUNC_LT_RELATIVE)
9942 {
9943 as_bad ("Illegal combination of relocation functions");
9944 goto err;
9945 }
9946 switch (S_GET_VALUE (e->X_op_symbol))
9947 {
9948 case FUNC_FPTR_RELATIVE:
9949 i = FUNC_LT_FPTR_RELATIVE; break;
9950 case FUNC_DTP_MODULE:
9951 i = FUNC_LT_DTP_MODULE; break;
9952 case FUNC_DTP_RELATIVE:
9953 i = FUNC_LT_DTP_RELATIVE; break;
9954 case FUNC_TP_RELATIVE:
9955 i = FUNC_LT_TP_RELATIVE; break;
9956 default:
9957 as_bad ("Illegal combination of relocation functions");
9958 goto err;
9959 }
9960 }
9961 /* Make sure gas doesn't get rid of local symbols that are used
9962 in relocs. */
9963 e->X_op = O_pseudo_fixup;
9964 e->X_op_symbol = pseudo_func[i].u.sym;
9965 break;
9966
9967 case PSEUDO_FUNC_CONST:
9968 e->X_op = O_constant;
9969 e->X_add_number = pseudo_func[i].u.ival;
9970 break;
9971
9972 case PSEUDO_FUNC_REG:
9973 e->X_op = O_register;
9974 e->X_add_number = pseudo_func[i].u.ival;
9975 break;
9976
9977 default:
9978 name = input_line_pointer - 1;
9979 get_symbol_end ();
9980 as_bad ("Unknown pseudo function `%s'", name);
9981 goto err;
9982 }
9983 break;
9984
9985 case '[':
9986 ++input_line_pointer;
9987 expression (e);
9988 if (*input_line_pointer != ']')
9989 {
9990 as_bad ("Closing bracket misssing");
9991 goto err;
9992 }
9993 else
9994 {
9995 if (e->X_op != O_register)
9996 as_bad ("Register expected as index");
9997
9998 ++input_line_pointer;
9999 e->X_op = O_index;
10000 }
10001 break;
10002
10003 default:
10004 break;
10005 }
10006 return;
10007
10008 err:
10009 ignore_rest_of_line ();
10010 }
10011
10012 /* Return 1 if it's OK to adjust a reloc by replacing the symbol with
10013 a section symbol plus some offset. For relocs involving @fptr(),
10014 directives we don't want such adjustments since we need to have the
10015 original symbol's name in the reloc. */
10016 int
10017 ia64_fix_adjustable (fix)
10018 fixS *fix;
10019 {
10020 /* Prevent all adjustments to global symbols */
10021 if (S_IS_EXTERN (fix->fx_addsy) || S_IS_WEAK (fix->fx_addsy))
10022 return 0;
10023
10024 switch (fix->fx_r_type)
10025 {
10026 case BFD_RELOC_IA64_FPTR64I:
10027 case BFD_RELOC_IA64_FPTR32MSB:
10028 case BFD_RELOC_IA64_FPTR32LSB:
10029 case BFD_RELOC_IA64_FPTR64MSB:
10030 case BFD_RELOC_IA64_FPTR64LSB:
10031 case BFD_RELOC_IA64_LTOFF_FPTR22:
10032 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10033 return 0;
10034 default:
10035 break;
10036 }
10037
10038 return 1;
10039 }
10040
10041 int
10042 ia64_force_relocation (fix)
10043 fixS *fix;
10044 {
10045 switch (fix->fx_r_type)
10046 {
10047 case BFD_RELOC_IA64_FPTR64I:
10048 case BFD_RELOC_IA64_FPTR32MSB:
10049 case BFD_RELOC_IA64_FPTR32LSB:
10050 case BFD_RELOC_IA64_FPTR64MSB:
10051 case BFD_RELOC_IA64_FPTR64LSB:
10052
10053 case BFD_RELOC_IA64_LTOFF22:
10054 case BFD_RELOC_IA64_LTOFF64I:
10055 case BFD_RELOC_IA64_LTOFF_FPTR22:
10056 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10057 case BFD_RELOC_IA64_PLTOFF22:
10058 case BFD_RELOC_IA64_PLTOFF64I:
10059 case BFD_RELOC_IA64_PLTOFF64MSB:
10060 case BFD_RELOC_IA64_PLTOFF64LSB:
10061
10062 case BFD_RELOC_IA64_LTOFF22X:
10063 case BFD_RELOC_IA64_LDXMOV:
10064 return 1;
10065
10066 default:
10067 break;
10068 }
10069
10070 return generic_force_reloc (fix);
10071 }
10072
10073 /* Decide from what point a pc-relative relocation is relative to,
10074 relative to the pc-relative fixup. Er, relatively speaking. */
10075 long
10076 ia64_pcrel_from_section (fix, sec)
10077 fixS *fix;
10078 segT sec;
10079 {
10080 unsigned long off = fix->fx_frag->fr_address + fix->fx_where;
10081
10082 if (bfd_get_section_flags (stdoutput, sec) & SEC_CODE)
10083 off &= ~0xfUL;
10084
10085 return off;
10086 }
10087
10088
10089 /* Used to emit section-relative relocs for the dwarf2 debug data. */
10090 void
10091 ia64_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
10092 {
10093 expressionS expr;
10094
10095 expr.X_op = O_pseudo_fixup;
10096 expr.X_op_symbol = pseudo_func[FUNC_SEC_RELATIVE].u.sym;
10097 expr.X_add_number = 0;
10098 expr.X_add_symbol = symbol;
10099 emit_expr (&expr, size);
10100 }
10101
10102 /* This is called whenever some data item (not an instruction) needs a
10103 fixup. We pick the right reloc code depending on the byteorder
10104 currently in effect. */
10105 void
10106 ia64_cons_fix_new (f, where, nbytes, exp)
10107 fragS *f;
10108 int where;
10109 int nbytes;
10110 expressionS *exp;
10111 {
10112 bfd_reloc_code_real_type code;
10113 fixS *fix;
10114
10115 switch (nbytes)
10116 {
10117 /* There are no reloc for 8 and 16 bit quantities, but we allow
10118 them here since they will work fine as long as the expression
10119 is fully defined at the end of the pass over the source file. */
10120 case 1: code = BFD_RELOC_8; break;
10121 case 2: code = BFD_RELOC_16; break;
10122 case 4:
10123 if (target_big_endian)
10124 code = BFD_RELOC_IA64_DIR32MSB;
10125 else
10126 code = BFD_RELOC_IA64_DIR32LSB;
10127 break;
10128
10129 case 8:
10130 /* In 32-bit mode, data8 could mean function descriptors too. */
10131 if (exp->X_op == O_pseudo_fixup
10132 && exp->X_op_symbol
10133 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC
10134 && !(md.flags & EF_IA_64_ABI64))
10135 {
10136 if (target_big_endian)
10137 code = BFD_RELOC_IA64_IPLTMSB;
10138 else
10139 code = BFD_RELOC_IA64_IPLTLSB;
10140 exp->X_op = O_symbol;
10141 break;
10142 }
10143 else
10144 {
10145 if (target_big_endian)
10146 code = BFD_RELOC_IA64_DIR64MSB;
10147 else
10148 code = BFD_RELOC_IA64_DIR64LSB;
10149 break;
10150 }
10151
10152 case 16:
10153 if (exp->X_op == O_pseudo_fixup
10154 && exp->X_op_symbol
10155 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC)
10156 {
10157 if (target_big_endian)
10158 code = BFD_RELOC_IA64_IPLTMSB;
10159 else
10160 code = BFD_RELOC_IA64_IPLTLSB;
10161 exp->X_op = O_symbol;
10162 break;
10163 }
10164 /* FALLTHRU */
10165
10166 default:
10167 as_bad ("Unsupported fixup size %d", nbytes);
10168 ignore_rest_of_line ();
10169 return;
10170 }
10171
10172 if (exp->X_op == O_pseudo_fixup)
10173 {
10174 exp->X_op = O_symbol;
10175 code = ia64_gen_real_reloc_type (exp->X_op_symbol, code);
10176 /* ??? If code unchanged, unsupported. */
10177 }
10178
10179 fix = fix_new_exp (f, where, nbytes, exp, 0, code);
10180 /* We need to store the byte order in effect in case we're going
10181 to fix an 8 or 16 bit relocation (for which there no real
10182 relocs available). See md_apply_fix3(). */
10183 fix->tc_fix_data.bigendian = target_big_endian;
10184 }
10185
10186 /* Return the actual relocation we wish to associate with the pseudo
10187 reloc described by SYM and R_TYPE. SYM should be one of the
10188 symbols in the pseudo_func array, or NULL. */
10189
10190 static bfd_reloc_code_real_type
10191 ia64_gen_real_reloc_type (sym, r_type)
10192 struct symbol *sym;
10193 bfd_reloc_code_real_type r_type;
10194 {
10195 bfd_reloc_code_real_type new = 0;
10196
10197 if (sym == NULL)
10198 {
10199 return r_type;
10200 }
10201
10202 switch (S_GET_VALUE (sym))
10203 {
10204 case FUNC_FPTR_RELATIVE:
10205 switch (r_type)
10206 {
10207 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_FPTR64I; break;
10208 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_FPTR32MSB; break;
10209 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_FPTR32LSB; break;
10210 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_FPTR64MSB; break;
10211 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_FPTR64LSB; break;
10212 default: break;
10213 }
10214 break;
10215
10216 case FUNC_GP_RELATIVE:
10217 switch (r_type)
10218 {
10219 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_GPREL22; break;
10220 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_GPREL64I; break;
10221 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_GPREL32MSB; break;
10222 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_GPREL32LSB; break;
10223 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_GPREL64MSB; break;
10224 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_GPREL64LSB; break;
10225 default: break;
10226 }
10227 break;
10228
10229 case FUNC_LT_RELATIVE:
10230 switch (r_type)
10231 {
10232 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_LTOFF22; break;
10233 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_LTOFF64I; break;
10234 default: break;
10235 }
10236 break;
10237
10238 case FUNC_LT_RELATIVE_X:
10239 switch (r_type)
10240 {
10241 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_LTOFF22X; break;
10242 default: break;
10243 }
10244 break;
10245
10246 case FUNC_PC_RELATIVE:
10247 switch (r_type)
10248 {
10249 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_PCREL22; break;
10250 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_PCREL64I; break;
10251 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_PCREL32MSB; break;
10252 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_PCREL32LSB; break;
10253 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_PCREL64MSB; break;
10254 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_PCREL64LSB; break;
10255 default: break;
10256 }
10257 break;
10258
10259 case FUNC_PLT_RELATIVE:
10260 switch (r_type)
10261 {
10262 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_PLTOFF22; break;
10263 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_PLTOFF64I; break;
10264 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_PLTOFF64MSB;break;
10265 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_PLTOFF64LSB;break;
10266 default: break;
10267 }
10268 break;
10269
10270 case FUNC_SEC_RELATIVE:
10271 switch (r_type)
10272 {
10273 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_SECREL32MSB;break;
10274 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_SECREL32LSB;break;
10275 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_SECREL64MSB;break;
10276 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_SECREL64LSB;break;
10277 default: break;
10278 }
10279 break;
10280
10281 case FUNC_SEG_RELATIVE:
10282 switch (r_type)
10283 {
10284 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_SEGREL32MSB;break;
10285 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_SEGREL32LSB;break;
10286 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_SEGREL64MSB;break;
10287 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_SEGREL64LSB;break;
10288 default: break;
10289 }
10290 break;
10291
10292 case FUNC_LTV_RELATIVE:
10293 switch (r_type)
10294 {
10295 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_LTV32MSB; break;
10296 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_LTV32LSB; break;
10297 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_LTV64MSB; break;
10298 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_LTV64LSB; break;
10299 default: break;
10300 }
10301 break;
10302
10303 case FUNC_LT_FPTR_RELATIVE:
10304 switch (r_type)
10305 {
10306 case BFD_RELOC_IA64_IMM22:
10307 new = BFD_RELOC_IA64_LTOFF_FPTR22; break;
10308 case BFD_RELOC_IA64_IMM64:
10309 new = BFD_RELOC_IA64_LTOFF_FPTR64I; break;
10310 default:
10311 break;
10312 }
10313 break;
10314
10315 case FUNC_TP_RELATIVE:
10316 switch (r_type)
10317 {
10318 case BFD_RELOC_IA64_IMM14:
10319 new = BFD_RELOC_IA64_TPREL14; break;
10320 case BFD_RELOC_IA64_IMM22:
10321 new = BFD_RELOC_IA64_TPREL22; break;
10322 case BFD_RELOC_IA64_IMM64:
10323 new = BFD_RELOC_IA64_TPREL64I; break;
10324 default:
10325 break;
10326 }
10327 break;
10328
10329 case FUNC_LT_TP_RELATIVE:
10330 switch (r_type)
10331 {
10332 case BFD_RELOC_IA64_IMM22:
10333 new = BFD_RELOC_IA64_LTOFF_TPREL22; break;
10334 default:
10335 break;
10336 }
10337 break;
10338
10339 case FUNC_LT_DTP_MODULE:
10340 switch (r_type)
10341 {
10342 case BFD_RELOC_IA64_IMM22:
10343 new = BFD_RELOC_IA64_LTOFF_DTPMOD22; break;
10344 default:
10345 break;
10346 }
10347 break;
10348
10349 case FUNC_DTP_RELATIVE:
10350 switch (r_type)
10351 {
10352 case BFD_RELOC_IA64_DIR64MSB:
10353 new = BFD_RELOC_IA64_DTPREL64MSB; break;
10354 case BFD_RELOC_IA64_DIR64LSB:
10355 new = BFD_RELOC_IA64_DTPREL64LSB; break;
10356 case BFD_RELOC_IA64_IMM14:
10357 new = BFD_RELOC_IA64_DTPREL14; break;
10358 case BFD_RELOC_IA64_IMM22:
10359 new = BFD_RELOC_IA64_DTPREL22; break;
10360 case BFD_RELOC_IA64_IMM64:
10361 new = BFD_RELOC_IA64_DTPREL64I; break;
10362 default:
10363 break;
10364 }
10365 break;
10366
10367 case FUNC_LT_DTP_RELATIVE:
10368 switch (r_type)
10369 {
10370 case BFD_RELOC_IA64_IMM22:
10371 new = BFD_RELOC_IA64_LTOFF_DTPREL22; break;
10372 default:
10373 break;
10374 }
10375 break;
10376
10377 case FUNC_IPLT_RELOC:
10378 break;
10379
10380 default:
10381 abort ();
10382 }
10383
10384 /* Hmmmm. Should this ever occur? */
10385 if (new)
10386 return new;
10387 else
10388 return r_type;
10389 }
10390
10391 /* Here is where generate the appropriate reloc for pseudo relocation
10392 functions. */
10393 void
10394 ia64_validate_fix (fix)
10395 fixS *fix;
10396 {
10397 switch (fix->fx_r_type)
10398 {
10399 case BFD_RELOC_IA64_FPTR64I:
10400 case BFD_RELOC_IA64_FPTR32MSB:
10401 case BFD_RELOC_IA64_FPTR64LSB:
10402 case BFD_RELOC_IA64_LTOFF_FPTR22:
10403 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10404 if (fix->fx_offset != 0)
10405 as_bad_where (fix->fx_file, fix->fx_line,
10406 "No addend allowed in @fptr() relocation");
10407 break;
10408 default:
10409 break;
10410 }
10411
10412 return;
10413 }
10414
10415 static void
10416 fix_insn (fix, odesc, value)
10417 fixS *fix;
10418 const struct ia64_operand *odesc;
10419 valueT value;
10420 {
10421 bfd_vma insn[3], t0, t1, control_bits;
10422 const char *err;
10423 char *fixpos;
10424 long slot;
10425
10426 slot = fix->fx_where & 0x3;
10427 fixpos = fix->fx_frag->fr_literal + (fix->fx_where - slot);
10428
10429 /* Bundles are always in little-endian byte order */
10430 t0 = bfd_getl64 (fixpos);
10431 t1 = bfd_getl64 (fixpos + 8);
10432 control_bits = t0 & 0x1f;
10433 insn[0] = (t0 >> 5) & 0x1ffffffffffLL;
10434 insn[1] = ((t0 >> 46) & 0x3ffff) | ((t1 & 0x7fffff) << 18);
10435 insn[2] = (t1 >> 23) & 0x1ffffffffffLL;
10436
10437 err = NULL;
10438 if (odesc - elf64_ia64_operands == IA64_OPND_IMMU64)
10439 {
10440 insn[1] = (value >> 22) & 0x1ffffffffffLL;
10441 insn[2] |= (((value & 0x7f) << 13)
10442 | (((value >> 7) & 0x1ff) << 27)
10443 | (((value >> 16) & 0x1f) << 22)
10444 | (((value >> 21) & 0x1) << 21)
10445 | (((value >> 63) & 0x1) << 36));
10446 }
10447 else if (odesc - elf64_ia64_operands == IA64_OPND_IMMU62)
10448 {
10449 if (value & ~0x3fffffffffffffffULL)
10450 err = "integer operand out of range";
10451 insn[1] = (value >> 21) & 0x1ffffffffffLL;
10452 insn[2] |= (((value & 0xfffff) << 6) | (((value >> 20) & 0x1) << 36));
10453 }
10454 else if (odesc - elf64_ia64_operands == IA64_OPND_TGT64)
10455 {
10456 value >>= 4;
10457 insn[1] = ((value >> 20) & 0x7fffffffffLL) << 2;
10458 insn[2] |= ((((value >> 59) & 0x1) << 36)
10459 | (((value >> 0) & 0xfffff) << 13));
10460 }
10461 else
10462 err = (*odesc->insert) (odesc, value, insn + slot);
10463
10464 if (err)
10465 as_bad_where (fix->fx_file, fix->fx_line, err);
10466
10467 t0 = control_bits | (insn[0] << 5) | (insn[1] << 46);
10468 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
10469 number_to_chars_littleendian (fixpos + 0, t0, 8);
10470 number_to_chars_littleendian (fixpos + 8, t1, 8);
10471 }
10472
10473 /* Attempt to simplify or even eliminate a fixup. The return value is
10474 ignored; perhaps it was once meaningful, but now it is historical.
10475 To indicate that a fixup has been eliminated, set FIXP->FX_DONE.
10476
10477 If fixp->fx_addsy is non-NULL, we'll have to generate a reloc entry
10478 (if possible). */
10479
10480 void
10481 md_apply_fix3 (fix, valP, seg)
10482 fixS *fix;
10483 valueT *valP;
10484 segT seg ATTRIBUTE_UNUSED;
10485 {
10486 char *fixpos;
10487 valueT value = *valP;
10488
10489 fixpos = fix->fx_frag->fr_literal + fix->fx_where;
10490
10491 if (fix->fx_pcrel)
10492 {
10493 switch (fix->fx_r_type)
10494 {
10495 case BFD_RELOC_IA64_DIR32MSB:
10496 fix->fx_r_type = BFD_RELOC_IA64_PCREL32MSB;
10497 break;
10498
10499 case BFD_RELOC_IA64_DIR32LSB:
10500 fix->fx_r_type = BFD_RELOC_IA64_PCREL32LSB;
10501 break;
10502
10503 case BFD_RELOC_IA64_DIR64MSB:
10504 fix->fx_r_type = BFD_RELOC_IA64_PCREL64MSB;
10505 break;
10506
10507 case BFD_RELOC_IA64_DIR64LSB:
10508 fix->fx_r_type = BFD_RELOC_IA64_PCREL64LSB;
10509 break;
10510
10511 default:
10512 break;
10513 }
10514 }
10515 if (fix->fx_addsy)
10516 {
10517 switch (fix->fx_r_type)
10518 {
10519 case BFD_RELOC_UNUSED:
10520 /* This must be a TAG13 or TAG13b operand. There are no external
10521 relocs defined for them, so we must give an error. */
10522 as_bad_where (fix->fx_file, fix->fx_line,
10523 "%s must have a constant value",
10524 elf64_ia64_operands[fix->tc_fix_data.opnd].desc);
10525 fix->fx_done = 1;
10526 return;
10527
10528 case BFD_RELOC_IA64_TPREL14:
10529 case BFD_RELOC_IA64_TPREL22:
10530 case BFD_RELOC_IA64_TPREL64I:
10531 case BFD_RELOC_IA64_LTOFF_TPREL22:
10532 case BFD_RELOC_IA64_LTOFF_DTPMOD22:
10533 case BFD_RELOC_IA64_DTPREL14:
10534 case BFD_RELOC_IA64_DTPREL22:
10535 case BFD_RELOC_IA64_DTPREL64I:
10536 case BFD_RELOC_IA64_LTOFF_DTPREL22:
10537 S_SET_THREAD_LOCAL (fix->fx_addsy);
10538 break;
10539
10540 default:
10541 break;
10542 }
10543 }
10544 else if (fix->tc_fix_data.opnd == IA64_OPND_NIL)
10545 {
10546 if (fix->tc_fix_data.bigendian)
10547 number_to_chars_bigendian (fixpos, value, fix->fx_size);
10548 else
10549 number_to_chars_littleendian (fixpos, value, fix->fx_size);
10550 fix->fx_done = 1;
10551 }
10552 else
10553 {
10554 fix_insn (fix, elf64_ia64_operands + fix->tc_fix_data.opnd, value);
10555 fix->fx_done = 1;
10556 }
10557 }
10558
10559 /* Generate the BFD reloc to be stuck in the object file from the
10560 fixup used internally in the assembler. */
10561
10562 arelent *
10563 tc_gen_reloc (sec, fixp)
10564 asection *sec ATTRIBUTE_UNUSED;
10565 fixS *fixp;
10566 {
10567 arelent *reloc;
10568
10569 reloc = xmalloc (sizeof (*reloc));
10570 reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
10571 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
10572 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
10573 reloc->addend = fixp->fx_offset;
10574 reloc->howto = bfd_reloc_type_lookup (stdoutput, fixp->fx_r_type);
10575
10576 if (!reloc->howto)
10577 {
10578 as_bad_where (fixp->fx_file, fixp->fx_line,
10579 "Cannot represent %s relocation in object file",
10580 bfd_get_reloc_code_name (fixp->fx_r_type));
10581 }
10582 return reloc;
10583 }
10584
10585 /* Turn a string in input_line_pointer into a floating point constant
10586 of type TYPE, and store the appropriate bytes in *LIT. The number
10587 of LITTLENUMS emitted is stored in *SIZE. An error message is
10588 returned, or NULL on OK. */
10589
10590 #define MAX_LITTLENUMS 5
10591
10592 char *
10593 md_atof (type, lit, size)
10594 int type;
10595 char *lit;
10596 int *size;
10597 {
10598 LITTLENUM_TYPE words[MAX_LITTLENUMS];
10599 char *t;
10600 int prec;
10601
10602 switch (type)
10603 {
10604 /* IEEE floats */
10605 case 'f':
10606 case 'F':
10607 case 's':
10608 case 'S':
10609 prec = 2;
10610 break;
10611
10612 case 'd':
10613 case 'D':
10614 case 'r':
10615 case 'R':
10616 prec = 4;
10617 break;
10618
10619 case 'x':
10620 case 'X':
10621 case 'p':
10622 case 'P':
10623 prec = 5;
10624 break;
10625
10626 default:
10627 *size = 0;
10628 return "Bad call to MD_ATOF()";
10629 }
10630 t = atof_ieee (input_line_pointer, type, words);
10631 if (t)
10632 input_line_pointer = t;
10633
10634 (*ia64_float_to_chars) (lit, words, prec);
10635
10636 if (type == 'X')
10637 {
10638 /* It is 10 byte floating point with 6 byte padding. */
10639 memset (&lit [10], 0, 6);
10640 *size = 8 * sizeof (LITTLENUM_TYPE);
10641 }
10642 else
10643 *size = prec * sizeof (LITTLENUM_TYPE);
10644
10645 return 0;
10646 }
10647
10648 /* Handle ia64 specific semantics of the align directive. */
10649
10650 void
10651 ia64_md_do_align (n, fill, len, max)
10652 int n ATTRIBUTE_UNUSED;
10653 const char *fill ATTRIBUTE_UNUSED;
10654 int len ATTRIBUTE_UNUSED;
10655 int max ATTRIBUTE_UNUSED;
10656 {
10657 if (subseg_text_p (now_seg))
10658 ia64_flush_insns ();
10659 }
10660
10661 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
10662 of an rs_align_code fragment. */
10663
10664 void
10665 ia64_handle_align (fragp)
10666 fragS *fragp;
10667 {
10668 /* Use mfi bundle of nops with no stop bits. */
10669 static const unsigned char be_nop[]
10670 = { 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
10671 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0c};
10672 static const unsigned char le_nop[]
10673 = { 0x0c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
10674 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00};
10675
10676 int bytes;
10677 char *p;
10678
10679 if (fragp->fr_type != rs_align_code)
10680 return;
10681
10682 bytes = fragp->fr_next->fr_address - fragp->fr_address - fragp->fr_fix;
10683 p = fragp->fr_literal + fragp->fr_fix;
10684
10685 /* Make sure we are on a 16-byte boundary, in case someone has been
10686 putting data into a text section. */
10687 if (bytes & 15)
10688 {
10689 int fix = bytes & 15;
10690 memset (p, 0, fix);
10691 p += fix;
10692 bytes -= fix;
10693 fragp->fr_fix += fix;
10694 }
10695
10696 memcpy (p, (target_big_endian ? be_nop : le_nop), 16);
10697 fragp->fr_var = 16;
10698 }
10699
10700 static void
10701 ia64_float_to_chars_bigendian (char *lit, LITTLENUM_TYPE *words,
10702 int prec)
10703 {
10704 while (prec--)
10705 {
10706 number_to_chars_bigendian (lit, (long) (*words++),
10707 sizeof (LITTLENUM_TYPE));
10708 lit += sizeof (LITTLENUM_TYPE);
10709 }
10710 }
10711
10712 static void
10713 ia64_float_to_chars_littleendian (char *lit, LITTLENUM_TYPE *words,
10714 int prec)
10715 {
10716 while (prec--)
10717 {
10718 number_to_chars_littleendian (lit, (long) (words[prec]),
10719 sizeof (LITTLENUM_TYPE));
10720 lit += sizeof (LITTLENUM_TYPE);
10721 }
10722 }
10723
10724 void
10725 ia64_elf_section_change_hook (void)
10726 {
10727 dot_byteorder (-1);
10728 }
10729
10730 /* Check if a label should be made global. */
10731 void
10732 ia64_check_label (symbolS *label)
10733 {
10734 if (*input_line_pointer == ':')
10735 {
10736 S_SET_EXTERNAL (label);
10737 input_line_pointer++;
10738 }
10739 }
This page took 0.462965 seconds and 5 git commands to generate.