x86: fold EsSeg into IsString
[deliverable/binutils-gdb.git] / gas / config / tc-ia64.c
1 /* tc-ia64.c -- Assembler for the HP/Intel IA-64 architecture.
2 Copyright (C) 1998-2019 Free Software Foundation, Inc.
3 Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
4
5 This file is part of GAS, the GNU Assembler.
6
7 GAS is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GAS is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GAS; see the file COPYING. If not, write to
19 the Free Software Foundation, 51 Franklin Street - Fifth Floor,
20 Boston, MA 02110-1301, USA. */
21
22 /*
23 TODO:
24
25 - optional operands
26 - directives:
27 .eb
28 .estate
29 .lb
30 .popsection
31 .previous
32 .psr
33 .pushsection
34 - labels are wrong if automatic alignment is introduced
35 (e.g., checkout the second real10 definition in test-data.s)
36 - DV-related stuff:
37 <reg>.safe_across_calls and any other DV-related directives I don't
38 have documentation for.
39 verify mod-sched-brs reads/writes are checked/marked (and other
40 notes)
41
42 */
43
44 #include "as.h"
45 #include "safe-ctype.h"
46 #include "dwarf2dbg.h"
47 #include "subsegs.h"
48
49 #include "opcode/ia64.h"
50
51 #include "elf/ia64.h"
52 #include "bfdver.h"
53 #include <time.h>
54
55 #ifdef HAVE_LIMITS_H
56 #include <limits.h>
57 #endif
58
59 #define NELEMS(a) ((int) (sizeof (a)/sizeof ((a)[0])))
60
61 /* Some systems define MIN in, e.g., param.h. */
62 #undef MIN
63 #define MIN(a,b) ((a) < (b) ? (a) : (b))
64
65 #define NUM_SLOTS 4
66 #define PREV_SLOT md.slot[(md.curr_slot + NUM_SLOTS - 1) % NUM_SLOTS]
67 #define CURR_SLOT md.slot[md.curr_slot]
68
69 #define O_pseudo_fixup (O_max + 1)
70
71 enum special_section
72 {
73 /* IA-64 ABI section pseudo-ops. */
74 SPECIAL_SECTION_BSS = 0,
75 SPECIAL_SECTION_SBSS,
76 SPECIAL_SECTION_SDATA,
77 SPECIAL_SECTION_RODATA,
78 SPECIAL_SECTION_COMMENT,
79 SPECIAL_SECTION_UNWIND,
80 SPECIAL_SECTION_UNWIND_INFO,
81 /* HPUX specific section pseudo-ops. */
82 SPECIAL_SECTION_INIT_ARRAY,
83 SPECIAL_SECTION_FINI_ARRAY,
84 };
85
86 enum reloc_func
87 {
88 FUNC_DTP_MODULE,
89 FUNC_DTP_RELATIVE,
90 FUNC_FPTR_RELATIVE,
91 FUNC_GP_RELATIVE,
92 FUNC_LT_RELATIVE,
93 FUNC_LT_RELATIVE_X,
94 FUNC_PC_RELATIVE,
95 FUNC_PLT_RELATIVE,
96 FUNC_SEC_RELATIVE,
97 FUNC_SEG_RELATIVE,
98 FUNC_TP_RELATIVE,
99 FUNC_LTV_RELATIVE,
100 FUNC_LT_FPTR_RELATIVE,
101 FUNC_LT_DTP_MODULE,
102 FUNC_LT_DTP_RELATIVE,
103 FUNC_LT_TP_RELATIVE,
104 FUNC_IPLT_RELOC,
105 #ifdef TE_VMS
106 FUNC_SLOTCOUNT_RELOC,
107 #endif
108 };
109
110 enum reg_symbol
111 {
112 REG_GR = 0,
113 REG_FR = (REG_GR + 128),
114 REG_AR = (REG_FR + 128),
115 REG_CR = (REG_AR + 128),
116 REG_DAHR = (REG_CR + 128),
117 REG_P = (REG_DAHR + 8),
118 REG_BR = (REG_P + 64),
119 REG_IP = (REG_BR + 8),
120 REG_CFM,
121 REG_PR,
122 REG_PR_ROT,
123 REG_PSR,
124 REG_PSR_L,
125 REG_PSR_UM,
126 /* The following are pseudo-registers for use by gas only. */
127 IND_CPUID,
128 IND_DBR,
129 IND_DTR,
130 IND_ITR,
131 IND_IBR,
132 IND_MSR,
133 IND_PKR,
134 IND_PMC,
135 IND_PMD,
136 IND_DAHR,
137 IND_RR,
138 /* The following pseudo-registers are used for unwind directives only: */
139 REG_PSP,
140 REG_PRIUNAT,
141 REG_NUM
142 };
143
144 enum dynreg_type
145 {
146 DYNREG_GR = 0, /* dynamic general purpose register */
147 DYNREG_FR, /* dynamic floating point register */
148 DYNREG_PR, /* dynamic predicate register */
149 DYNREG_NUM_TYPES
150 };
151
152 enum operand_match_result
153 {
154 OPERAND_MATCH,
155 OPERAND_OUT_OF_RANGE,
156 OPERAND_MISMATCH
157 };
158
159 /* On the ia64, we can't know the address of a text label until the
160 instructions are packed into a bundle. To handle this, we keep
161 track of the list of labels that appear in front of each
162 instruction. */
163 struct label_fix
164 {
165 struct label_fix *next;
166 struct symbol *sym;
167 bfd_boolean dw2_mark_labels;
168 };
169
170 #ifdef TE_VMS
171 /* An internally used relocation. */
172 #define DUMMY_RELOC_IA64_SLOTCOUNT (BFD_RELOC_UNUSED + 1)
173 #endif
174
175 /* This is the endianness of the current section. */
176 extern int target_big_endian;
177
178 /* This is the default endianness. */
179 static int default_big_endian = TARGET_BYTES_BIG_ENDIAN;
180
181 void (*ia64_number_to_chars) (char *, valueT, int);
182
183 static void ia64_float_to_chars_bigendian (char *, LITTLENUM_TYPE *, int);
184 static void ia64_float_to_chars_littleendian (char *, LITTLENUM_TYPE *, int);
185
186 static void (*ia64_float_to_chars) (char *, LITTLENUM_TYPE *, int);
187
188 static struct hash_control *alias_hash;
189 static struct hash_control *alias_name_hash;
190 static struct hash_control *secalias_hash;
191 static struct hash_control *secalias_name_hash;
192
193 /* List of chars besides those in app.c:symbol_chars that can start an
194 operand. Used to prevent the scrubber eating vital white-space. */
195 const char ia64_symbol_chars[] = "@?";
196
197 /* Characters which always start a comment. */
198 const char comment_chars[] = "";
199
200 /* Characters which start a comment at the beginning of a line. */
201 const char line_comment_chars[] = "#";
202
203 /* Characters which may be used to separate multiple commands on a
204 single line. */
205 const char line_separator_chars[] = ";{}";
206
207 /* Characters which are used to indicate an exponent in a floating
208 point number. */
209 const char EXP_CHARS[] = "eE";
210
211 /* Characters which mean that a number is a floating point constant,
212 as in 0d1.0. */
213 const char FLT_CHARS[] = "rRsSfFdDxXpP";
214
215 /* ia64-specific option processing: */
216
217 const char *md_shortopts = "m:N:x::";
218
219 struct option md_longopts[] =
220 {
221 #define OPTION_MCONSTANT_GP (OPTION_MD_BASE + 1)
222 {"mconstant-gp", no_argument, NULL, OPTION_MCONSTANT_GP},
223 #define OPTION_MAUTO_PIC (OPTION_MD_BASE + 2)
224 {"mauto-pic", no_argument, NULL, OPTION_MAUTO_PIC}
225 };
226
227 size_t md_longopts_size = sizeof (md_longopts);
228
229 static struct
230 {
231 struct hash_control *pseudo_hash; /* pseudo opcode hash table */
232 struct hash_control *reg_hash; /* register name hash table */
233 struct hash_control *dynreg_hash; /* dynamic register hash table */
234 struct hash_control *const_hash; /* constant hash table */
235 struct hash_control *entry_hash; /* code entry hint hash table */
236
237 /* If X_op is != O_absent, the register name for the instruction's
238 qualifying predicate. If NULL, p0 is assumed for instructions
239 that are predictable. */
240 expressionS qp;
241
242 /* Optimize for which CPU. */
243 enum
244 {
245 itanium1,
246 itanium2
247 } tune;
248
249 /* What to do when hint.b is used. */
250 enum
251 {
252 hint_b_error,
253 hint_b_warning,
254 hint_b_ok
255 } hint_b;
256
257 unsigned int
258 manual_bundling : 1,
259 debug_dv: 1,
260 detect_dv: 1,
261 explicit_mode : 1, /* which mode we're in */
262 default_explicit_mode : 1, /* which mode is the default */
263 mode_explicitly_set : 1, /* was the current mode explicitly set? */
264 auto_align : 1,
265 keep_pending_output : 1;
266
267 /* What to do when something is wrong with unwind directives. */
268 enum
269 {
270 unwind_check_warning,
271 unwind_check_error
272 } unwind_check;
273
274 /* Each bundle consists of up to three instructions. We keep
275 track of four most recent instructions so we can correctly set
276 the end_of_insn_group for the last instruction in a bundle. */
277 int curr_slot;
278 int num_slots_in_use;
279 struct slot
280 {
281 unsigned int
282 end_of_insn_group : 1,
283 manual_bundling_on : 1,
284 manual_bundling_off : 1,
285 loc_directive_seen : 1;
286 signed char user_template; /* user-selected template, if any */
287 unsigned char qp_regno; /* qualifying predicate */
288 /* This duplicates a good fraction of "struct fix" but we
289 can't use a "struct fix" instead since we can't call
290 fix_new_exp() until we know the address of the instruction. */
291 int num_fixups;
292 struct insn_fix
293 {
294 bfd_reloc_code_real_type code;
295 enum ia64_opnd opnd; /* type of operand in need of fix */
296 unsigned int is_pcrel : 1; /* is operand pc-relative? */
297 expressionS expr; /* the value to be inserted */
298 }
299 fixup[2]; /* at most two fixups per insn */
300 struct ia64_opcode *idesc;
301 struct label_fix *label_fixups;
302 struct label_fix *tag_fixups;
303 struct unw_rec_list *unwind_record; /* Unwind directive. */
304 expressionS opnd[6];
305 const char *src_file;
306 unsigned int src_line;
307 struct dwarf2_line_info debug_line;
308 }
309 slot[NUM_SLOTS];
310
311 segT last_text_seg;
312
313 struct dynreg
314 {
315 struct dynreg *next; /* next dynamic register */
316 const char *name;
317 unsigned short base; /* the base register number */
318 unsigned short num_regs; /* # of registers in this set */
319 }
320 *dynreg[DYNREG_NUM_TYPES], in, loc, out, rot;
321
322 flagword flags; /* ELF-header flags */
323
324 struct mem_offset {
325 unsigned hint:1; /* is this hint currently valid? */
326 bfd_vma offset; /* mem.offset offset */
327 bfd_vma base; /* mem.offset base */
328 } mem_offset;
329
330 int path; /* number of alt. entry points seen */
331 const char **entry_labels; /* labels of all alternate paths in
332 the current DV-checking block. */
333 int maxpaths; /* size currently allocated for
334 entry_labels */
335
336 int pointer_size; /* size in bytes of a pointer */
337 int pointer_size_shift; /* shift size of a pointer for alignment */
338
339 symbolS *indregsym[IND_RR - IND_CPUID + 1];
340 }
341 md;
342
343 /* These are not const, because they are modified to MMI for non-itanium1
344 targets below. */
345 /* MFI bundle of nops. */
346 static unsigned char le_nop[16] =
347 {
348 0x0c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
349 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00
350 };
351 /* MFI bundle of nops with stop-bit. */
352 static unsigned char le_nop_stop[16] =
353 {
354 0x0d, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
355 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00
356 };
357
358 /* application registers: */
359
360 #define AR_K0 0
361 #define AR_K7 7
362 #define AR_RSC 16
363 #define AR_BSP 17
364 #define AR_BSPSTORE 18
365 #define AR_RNAT 19
366 #define AR_FCR 21
367 #define AR_EFLAG 24
368 #define AR_CSD 25
369 #define AR_SSD 26
370 #define AR_CFLG 27
371 #define AR_FSR 28
372 #define AR_FIR 29
373 #define AR_FDR 30
374 #define AR_CCV 32
375 #define AR_UNAT 36
376 #define AR_FPSR 40
377 #define AR_ITC 44
378 #define AR_RUC 45
379 #define AR_PFS 64
380 #define AR_LC 65
381 #define AR_EC 66
382
383 static const struct
384 {
385 const char *name;
386 unsigned int regnum;
387 }
388 ar[] =
389 {
390 {"ar.k0", AR_K0}, {"ar.k1", AR_K0 + 1},
391 {"ar.k2", AR_K0 + 2}, {"ar.k3", AR_K0 + 3},
392 {"ar.k4", AR_K0 + 4}, {"ar.k5", AR_K0 + 5},
393 {"ar.k6", AR_K0 + 6}, {"ar.k7", AR_K7},
394 {"ar.rsc", AR_RSC}, {"ar.bsp", AR_BSP},
395 {"ar.bspstore", AR_BSPSTORE}, {"ar.rnat", AR_RNAT},
396 {"ar.fcr", AR_FCR}, {"ar.eflag", AR_EFLAG},
397 {"ar.csd", AR_CSD}, {"ar.ssd", AR_SSD},
398 {"ar.cflg", AR_CFLG}, {"ar.fsr", AR_FSR},
399 {"ar.fir", AR_FIR}, {"ar.fdr", AR_FDR},
400 {"ar.ccv", AR_CCV}, {"ar.unat", AR_UNAT},
401 {"ar.fpsr", AR_FPSR}, {"ar.itc", AR_ITC},
402 {"ar.ruc", AR_RUC}, {"ar.pfs", AR_PFS},
403 {"ar.lc", AR_LC}, {"ar.ec", AR_EC},
404 };
405
406 /* control registers: */
407
408 #define CR_DCR 0
409 #define CR_ITM 1
410 #define CR_IVA 2
411 #define CR_PTA 8
412 #define CR_GPTA 9
413 #define CR_IPSR 16
414 #define CR_ISR 17
415 #define CR_IIP 19
416 #define CR_IFA 20
417 #define CR_ITIR 21
418 #define CR_IIPA 22
419 #define CR_IFS 23
420 #define CR_IIM 24
421 #define CR_IHA 25
422 #define CR_IIB0 26
423 #define CR_IIB1 27
424 #define CR_LID 64
425 #define CR_IVR 65
426 #define CR_TPR 66
427 #define CR_EOI 67
428 #define CR_IRR0 68
429 #define CR_IRR3 71
430 #define CR_ITV 72
431 #define CR_PMV 73
432 #define CR_CMCV 74
433 #define CR_LRR0 80
434 #define CR_LRR1 81
435
436 static const struct
437 {
438 const char *name;
439 unsigned int regnum;
440 }
441 cr[] =
442 {
443 {"cr.dcr", CR_DCR},
444 {"cr.itm", CR_ITM},
445 {"cr.iva", CR_IVA},
446 {"cr.pta", CR_PTA},
447 {"cr.gpta", CR_GPTA},
448 {"cr.ipsr", CR_IPSR},
449 {"cr.isr", CR_ISR},
450 {"cr.iip", CR_IIP},
451 {"cr.ifa", CR_IFA},
452 {"cr.itir", CR_ITIR},
453 {"cr.iipa", CR_IIPA},
454 {"cr.ifs", CR_IFS},
455 {"cr.iim", CR_IIM},
456 {"cr.iha", CR_IHA},
457 {"cr.iib0", CR_IIB0},
458 {"cr.iib1", CR_IIB1},
459 {"cr.lid", CR_LID},
460 {"cr.ivr", CR_IVR},
461 {"cr.tpr", CR_TPR},
462 {"cr.eoi", CR_EOI},
463 {"cr.irr0", CR_IRR0},
464 {"cr.irr1", CR_IRR0 + 1},
465 {"cr.irr2", CR_IRR0 + 2},
466 {"cr.irr3", CR_IRR3},
467 {"cr.itv", CR_ITV},
468 {"cr.pmv", CR_PMV},
469 {"cr.cmcv", CR_CMCV},
470 {"cr.lrr0", CR_LRR0},
471 {"cr.lrr1", CR_LRR1}
472 };
473
474 #define PSR_MFL 4
475 #define PSR_IC 13
476 #define PSR_DFL 18
477 #define PSR_CPL 32
478
479 static const struct const_desc
480 {
481 const char *name;
482 valueT value;
483 }
484 const_bits[] =
485 {
486 /* PSR constant masks: */
487
488 /* 0: reserved */
489 {"psr.be", ((valueT) 1) << 1},
490 {"psr.up", ((valueT) 1) << 2},
491 {"psr.ac", ((valueT) 1) << 3},
492 {"psr.mfl", ((valueT) 1) << 4},
493 {"psr.mfh", ((valueT) 1) << 5},
494 /* 6-12: reserved */
495 {"psr.ic", ((valueT) 1) << 13},
496 {"psr.i", ((valueT) 1) << 14},
497 {"psr.pk", ((valueT) 1) << 15},
498 /* 16: reserved */
499 {"psr.dt", ((valueT) 1) << 17},
500 {"psr.dfl", ((valueT) 1) << 18},
501 {"psr.dfh", ((valueT) 1) << 19},
502 {"psr.sp", ((valueT) 1) << 20},
503 {"psr.pp", ((valueT) 1) << 21},
504 {"psr.di", ((valueT) 1) << 22},
505 {"psr.si", ((valueT) 1) << 23},
506 {"psr.db", ((valueT) 1) << 24},
507 {"psr.lp", ((valueT) 1) << 25},
508 {"psr.tb", ((valueT) 1) << 26},
509 {"psr.rt", ((valueT) 1) << 27},
510 /* 28-31: reserved */
511 /* 32-33: cpl (current privilege level) */
512 {"psr.is", ((valueT) 1) << 34},
513 {"psr.mc", ((valueT) 1) << 35},
514 {"psr.it", ((valueT) 1) << 36},
515 {"psr.id", ((valueT) 1) << 37},
516 {"psr.da", ((valueT) 1) << 38},
517 {"psr.dd", ((valueT) 1) << 39},
518 {"psr.ss", ((valueT) 1) << 40},
519 /* 41-42: ri (restart instruction) */
520 {"psr.ed", ((valueT) 1) << 43},
521 {"psr.bn", ((valueT) 1) << 44},
522 };
523
524 /* indirect register-sets/memory: */
525
526 static const struct
527 {
528 const char *name;
529 unsigned int regnum;
530 }
531 indirect_reg[] =
532 {
533 { "CPUID", IND_CPUID },
534 { "cpuid", IND_CPUID },
535 { "dbr", IND_DBR },
536 { "dtr", IND_DTR },
537 { "itr", IND_ITR },
538 { "ibr", IND_IBR },
539 { "msr", IND_MSR },
540 { "pkr", IND_PKR },
541 { "pmc", IND_PMC },
542 { "pmd", IND_PMD },
543 { "dahr", IND_DAHR },
544 { "rr", IND_RR },
545 };
546
547 /* Pseudo functions used to indicate relocation types (these functions
548 start with an at sign (@). */
549 static struct
550 {
551 const char *name;
552 enum pseudo_type
553 {
554 PSEUDO_FUNC_NONE,
555 PSEUDO_FUNC_RELOC,
556 PSEUDO_FUNC_CONST,
557 PSEUDO_FUNC_REG,
558 PSEUDO_FUNC_FLOAT
559 }
560 type;
561 union
562 {
563 unsigned long ival;
564 symbolS *sym;
565 }
566 u;
567 }
568 pseudo_func[] =
569 {
570 /* reloc pseudo functions (these must come first!): */
571 { "dtpmod", PSEUDO_FUNC_RELOC, { 0 } },
572 { "dtprel", PSEUDO_FUNC_RELOC, { 0 } },
573 { "fptr", PSEUDO_FUNC_RELOC, { 0 } },
574 { "gprel", PSEUDO_FUNC_RELOC, { 0 } },
575 { "ltoff", PSEUDO_FUNC_RELOC, { 0 } },
576 { "ltoffx", PSEUDO_FUNC_RELOC, { 0 } },
577 { "pcrel", PSEUDO_FUNC_RELOC, { 0 } },
578 { "pltoff", PSEUDO_FUNC_RELOC, { 0 } },
579 { "secrel", PSEUDO_FUNC_RELOC, { 0 } },
580 { "segrel", PSEUDO_FUNC_RELOC, { 0 } },
581 { "tprel", PSEUDO_FUNC_RELOC, { 0 } },
582 { "ltv", PSEUDO_FUNC_RELOC, { 0 } },
583 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_FPTR_RELATIVE */
584 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_DTP_MODULE */
585 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_DTP_RELATIVE */
586 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_TP_RELATIVE */
587 { "iplt", PSEUDO_FUNC_RELOC, { 0 } },
588 #ifdef TE_VMS
589 { "slotcount", PSEUDO_FUNC_RELOC, { 0 } },
590 #endif
591
592 /* mbtype4 constants: */
593 { "alt", PSEUDO_FUNC_CONST, { 0xa } },
594 { "brcst", PSEUDO_FUNC_CONST, { 0x0 } },
595 { "mix", PSEUDO_FUNC_CONST, { 0x8 } },
596 { "rev", PSEUDO_FUNC_CONST, { 0xb } },
597 { "shuf", PSEUDO_FUNC_CONST, { 0x9 } },
598
599 /* fclass constants: */
600 { "nat", PSEUDO_FUNC_CONST, { 0x100 } },
601 { "qnan", PSEUDO_FUNC_CONST, { 0x080 } },
602 { "snan", PSEUDO_FUNC_CONST, { 0x040 } },
603 { "pos", PSEUDO_FUNC_CONST, { 0x001 } },
604 { "neg", PSEUDO_FUNC_CONST, { 0x002 } },
605 { "zero", PSEUDO_FUNC_CONST, { 0x004 } },
606 { "unorm", PSEUDO_FUNC_CONST, { 0x008 } },
607 { "norm", PSEUDO_FUNC_CONST, { 0x010 } },
608 { "inf", PSEUDO_FUNC_CONST, { 0x020 } },
609
610 { "natval", PSEUDO_FUNC_CONST, { 0x100 } }, /* old usage */
611
612 /* hint constants: */
613 { "pause", PSEUDO_FUNC_CONST, { 0x0 } },
614 { "priority", PSEUDO_FUNC_CONST, { 0x1 } },
615
616 /* tf constants: */
617 { "clz", PSEUDO_FUNC_CONST, { 32 } },
618 { "mpy", PSEUDO_FUNC_CONST, { 33 } },
619 { "datahints", PSEUDO_FUNC_CONST, { 34 } },
620
621 /* unwind-related constants: */
622 { "svr4", PSEUDO_FUNC_CONST, { ELFOSABI_NONE } },
623 { "hpux", PSEUDO_FUNC_CONST, { ELFOSABI_HPUX } },
624 { "nt", PSEUDO_FUNC_CONST, { 2 } }, /* conflicts w/ELFOSABI_NETBSD */
625 { "linux", PSEUDO_FUNC_CONST, { ELFOSABI_GNU } },
626 { "freebsd", PSEUDO_FUNC_CONST, { ELFOSABI_FREEBSD } },
627 { "openvms", PSEUDO_FUNC_CONST, { ELFOSABI_OPENVMS } },
628 { "nsk", PSEUDO_FUNC_CONST, { ELFOSABI_NSK } },
629
630 /* unwind-related registers: */
631 { "priunat",PSEUDO_FUNC_REG, { REG_PRIUNAT } }
632 };
633
634 /* 41-bit nop opcodes (one per unit): */
635 static const bfd_vma nop[IA64_NUM_UNITS] =
636 {
637 0x0000000000LL, /* NIL => break 0 */
638 0x0008000000LL, /* I-unit nop */
639 0x0008000000LL, /* M-unit nop */
640 0x4000000000LL, /* B-unit nop */
641 0x0008000000LL, /* F-unit nop */
642 0x0000000000LL, /* L-"unit" nop immediate */
643 0x0008000000LL, /* X-unit nop */
644 };
645
646 /* Can't be `const' as it's passed to input routines (which have the
647 habit of setting temporary sentinels. */
648 static char special_section_name[][20] =
649 {
650 {".bss"}, {".sbss"}, {".sdata"}, {".rodata"}, {".comment"},
651 {".IA_64.unwind"}, {".IA_64.unwind_info"},
652 {".init_array"}, {".fini_array"}
653 };
654
655 /* The best template for a particular sequence of up to three
656 instructions: */
657 #define N IA64_NUM_TYPES
658 static unsigned char best_template[N][N][N];
659 #undef N
660
661 /* Resource dependencies currently in effect */
662 static struct rsrc {
663 int depind; /* dependency index */
664 const struct ia64_dependency *dependency; /* actual dependency */
665 unsigned specific:1, /* is this a specific bit/regno? */
666 link_to_qp_branch:1; /* will a branch on the same QP clear it?*/
667 int index; /* specific regno/bit within dependency */
668 int note; /* optional qualifying note (0 if none) */
669 #define STATE_NONE 0
670 #define STATE_STOP 1
671 #define STATE_SRLZ 2
672 int insn_srlz; /* current insn serialization state */
673 int data_srlz; /* current data serialization state */
674 int qp_regno; /* qualifying predicate for this usage */
675 const char *file; /* what file marked this dependency */
676 unsigned int line; /* what line marked this dependency */
677 struct mem_offset mem_offset; /* optional memory offset hint */
678 enum { CMP_NONE, CMP_OR, CMP_AND } cmp_type; /* OR or AND compare? */
679 int path; /* corresponding code entry index */
680 } *regdeps = NULL;
681 static int regdepslen = 0;
682 static int regdepstotlen = 0;
683 static const char *dv_mode[] = { "RAW", "WAW", "WAR" };
684 static const char *dv_sem[] = { "none", "implied", "impliedf",
685 "data", "instr", "specific", "stop", "other" };
686 static const char *dv_cmp_type[] = { "none", "OR", "AND" };
687
688 /* Current state of PR mutexation */
689 static struct qpmutex {
690 valueT prmask;
691 int path;
692 } *qp_mutexes = NULL; /* QP mutex bitmasks */
693 static int qp_mutexeslen = 0;
694 static int qp_mutexestotlen = 0;
695 static valueT qp_safe_across_calls = 0;
696
697 /* Current state of PR implications */
698 static struct qp_imply {
699 unsigned p1:6;
700 unsigned p2:6;
701 unsigned p2_branched:1;
702 int path;
703 } *qp_implies = NULL;
704 static int qp_implieslen = 0;
705 static int qp_impliestotlen = 0;
706
707 /* Keep track of static GR values so that indirect register usage can
708 sometimes be tracked. */
709 static struct gr {
710 unsigned known:1;
711 int path;
712 valueT value;
713 } gr_values[128] = {
714 {
715 1,
716 #ifdef INT_MAX
717 INT_MAX,
718 #else
719 (((1 << (8 * sizeof(gr_values->path) - 2)) - 1) << 1) + 1,
720 #endif
721 0
722 }
723 };
724
725 /* Remember the alignment frag. */
726 static fragS *align_frag;
727
728 /* These are the routines required to output the various types of
729 unwind records. */
730
731 /* A slot_number is a frag address plus the slot index (0-2). We use the
732 frag address here so that if there is a section switch in the middle of
733 a function, then instructions emitted to a different section are not
734 counted. Since there may be more than one frag for a function, this
735 means we also need to keep track of which frag this address belongs to
736 so we can compute inter-frag distances. This also nicely solves the
737 problem with nops emitted for align directives, which can't easily be
738 counted, but can easily be derived from frag sizes. */
739
740 typedef struct unw_rec_list {
741 unwind_record r;
742 unsigned long slot_number;
743 fragS *slot_frag;
744 struct unw_rec_list *next;
745 } unw_rec_list;
746
747 #define SLOT_NUM_NOT_SET (unsigned)-1
748
749 /* Linked list of saved prologue counts. A very poor
750 implementation of a map from label numbers to prologue counts. */
751 typedef struct label_prologue_count
752 {
753 struct label_prologue_count *next;
754 unsigned long label_number;
755 unsigned int prologue_count;
756 } label_prologue_count;
757
758 typedef struct proc_pending
759 {
760 symbolS *sym;
761 struct proc_pending *next;
762 } proc_pending;
763
764 static struct
765 {
766 /* Maintain a list of unwind entries for the current function. */
767 unw_rec_list *list;
768 unw_rec_list *tail;
769
770 /* Any unwind entries that should be attached to the current slot
771 that an insn is being constructed for. */
772 unw_rec_list *current_entry;
773
774 /* These are used to create the unwind table entry for this function. */
775 proc_pending proc_pending;
776 symbolS *info; /* pointer to unwind info */
777 symbolS *personality_routine;
778 segT saved_text_seg;
779 subsegT saved_text_subseg;
780 unsigned int force_unwind_entry : 1; /* force generation of unwind entry? */
781
782 /* TRUE if processing unwind directives in a prologue region. */
783 unsigned int prologue : 1;
784 unsigned int prologue_mask : 4;
785 unsigned int prologue_gr : 7;
786 unsigned int body : 1;
787 unsigned int insn : 1;
788 unsigned int prologue_count; /* number of .prologues seen so far */
789 /* Prologue counts at previous .label_state directives. */
790 struct label_prologue_count * saved_prologue_counts;
791
792 /* List of split up .save-s. */
793 unw_p_record *pending_saves;
794 } unwind;
795
796 /* The input value is a negated offset from psp, and specifies an address
797 psp - offset. The encoded value is psp + 16 - (4 * offset). Thus we
798 must add 16 and divide by 4 to get the encoded value. */
799
800 #define ENCODED_PSP_OFFSET(OFFSET) (((OFFSET) + 16) / 4)
801
802 typedef void (*vbyte_func) (int, char *, char *);
803
804 /* Forward declarations: */
805 static void dot_alias (int);
806 static int parse_operand_and_eval (expressionS *, int);
807 static void emit_one_bundle (void);
808 static bfd_reloc_code_real_type ia64_gen_real_reloc_type (struct symbol *,
809 bfd_reloc_code_real_type);
810 static void insn_group_break (int, int, int);
811 static void add_qp_mutex (valueT);
812 static void add_qp_imply (int, int);
813 static void clear_qp_mutex (valueT);
814 static void clear_qp_implies (valueT, valueT);
815 static void print_dependency (const char *, int);
816 static void instruction_serialization (void);
817 static void data_serialization (void);
818 static void output_R3_format (vbyte_func, unw_record_type, unsigned long);
819 static void output_B3_format (vbyte_func, unsigned long, unsigned long);
820 static void output_B4_format (vbyte_func, unw_record_type, unsigned long);
821 static void free_saved_prologue_counts (void);
822
823 /* Determine if application register REGNUM resides only in the integer
824 unit (as opposed to the memory unit). */
825 static int
826 ar_is_only_in_integer_unit (int reg)
827 {
828 reg -= REG_AR;
829 return reg >= 64 && reg <= 111;
830 }
831
832 /* Determine if application register REGNUM resides only in the memory
833 unit (as opposed to the integer unit). */
834 static int
835 ar_is_only_in_memory_unit (int reg)
836 {
837 reg -= REG_AR;
838 return reg >= 0 && reg <= 47;
839 }
840
841 /* Switch to section NAME and create section if necessary. It's
842 rather ugly that we have to manipulate input_line_pointer but I
843 don't see any other way to accomplish the same thing without
844 changing obj-elf.c (which may be the Right Thing, in the end). */
845 static void
846 set_section (char *name)
847 {
848 char *saved_input_line_pointer;
849
850 saved_input_line_pointer = input_line_pointer;
851 input_line_pointer = name;
852 obj_elf_section (0);
853 input_line_pointer = saved_input_line_pointer;
854 }
855
856 /* Map 's' to SHF_IA_64_SHORT. */
857
858 bfd_vma
859 ia64_elf_section_letter (int letter, const char **ptr_msg)
860 {
861 if (letter == 's')
862 return SHF_IA_64_SHORT;
863 else if (letter == 'o')
864 return SHF_LINK_ORDER;
865 #ifdef TE_VMS
866 else if (letter == 'O')
867 return SHF_IA_64_VMS_OVERLAID;
868 else if (letter == 'g')
869 return SHF_IA_64_VMS_GLOBAL;
870 #endif
871
872 *ptr_msg = _("bad .section directive: want a,o,s,w,x,M,S,G,T in string");
873 return -1;
874 }
875
876 /* Map SHF_IA_64_SHORT to SEC_SMALL_DATA. */
877
878 flagword
879 ia64_elf_section_flags (flagword flags,
880 bfd_vma attr,
881 int type ATTRIBUTE_UNUSED)
882 {
883 if (attr & SHF_IA_64_SHORT)
884 flags |= SEC_SMALL_DATA;
885 return flags;
886 }
887
888 int
889 ia64_elf_section_type (const char *str, size_t len)
890 {
891 #define STREQ(s) ((len == sizeof (s) - 1) && (strncmp (str, s, sizeof (s) - 1) == 0))
892
893 if (STREQ (ELF_STRING_ia64_unwind_info))
894 return SHT_PROGBITS;
895
896 if (STREQ (ELF_STRING_ia64_unwind_info_once))
897 return SHT_PROGBITS;
898
899 if (STREQ (ELF_STRING_ia64_unwind))
900 return SHT_IA_64_UNWIND;
901
902 if (STREQ (ELF_STRING_ia64_unwind_once))
903 return SHT_IA_64_UNWIND;
904
905 if (STREQ ("unwind"))
906 return SHT_IA_64_UNWIND;
907
908 return -1;
909 #undef STREQ
910 }
911
912 static unsigned int
913 set_regstack (unsigned int ins,
914 unsigned int locs,
915 unsigned int outs,
916 unsigned int rots)
917 {
918 /* Size of frame. */
919 unsigned int sof;
920
921 sof = ins + locs + outs;
922 if (sof > 96)
923 {
924 as_bad (_("Size of frame exceeds maximum of 96 registers"));
925 return 0;
926 }
927 if (rots > sof)
928 {
929 as_warn (_("Size of rotating registers exceeds frame size"));
930 return 0;
931 }
932 md.in.base = REG_GR + 32;
933 md.loc.base = md.in.base + ins;
934 md.out.base = md.loc.base + locs;
935
936 md.in.num_regs = ins;
937 md.loc.num_regs = locs;
938 md.out.num_regs = outs;
939 md.rot.num_regs = rots;
940 return sof;
941 }
942
943 void
944 ia64_flush_insns (void)
945 {
946 struct label_fix *lfix;
947 segT saved_seg;
948 subsegT saved_subseg;
949 unw_rec_list *ptr;
950 bfd_boolean mark;
951
952 if (!md.last_text_seg)
953 return;
954
955 saved_seg = now_seg;
956 saved_subseg = now_subseg;
957
958 subseg_set (md.last_text_seg, 0);
959
960 while (md.num_slots_in_use > 0)
961 emit_one_bundle (); /* force out queued instructions */
962
963 /* In case there are labels following the last instruction, resolve
964 those now. */
965 mark = FALSE;
966 for (lfix = CURR_SLOT.label_fixups; lfix; lfix = lfix->next)
967 {
968 symbol_set_value_now (lfix->sym);
969 mark |= lfix->dw2_mark_labels;
970 }
971 if (mark)
972 {
973 dwarf2_where (&CURR_SLOT.debug_line);
974 CURR_SLOT.debug_line.flags |= DWARF2_FLAG_BASIC_BLOCK;
975 dwarf2_gen_line_info (frag_now_fix (), &CURR_SLOT.debug_line);
976 dwarf2_consume_line_info ();
977 }
978 CURR_SLOT.label_fixups = 0;
979
980 for (lfix = CURR_SLOT.tag_fixups; lfix; lfix = lfix->next)
981 symbol_set_value_now (lfix->sym);
982 CURR_SLOT.tag_fixups = 0;
983
984 /* In case there are unwind directives following the last instruction,
985 resolve those now. We only handle prologue, body, and endp directives
986 here. Give an error for others. */
987 for (ptr = unwind.current_entry; ptr; ptr = ptr->next)
988 {
989 switch (ptr->r.type)
990 {
991 case prologue:
992 case prologue_gr:
993 case body:
994 case endp:
995 ptr->slot_number = (unsigned long) frag_more (0);
996 ptr->slot_frag = frag_now;
997 break;
998
999 /* Allow any record which doesn't have a "t" field (i.e.,
1000 doesn't relate to a particular instruction). */
1001 case unwabi:
1002 case br_gr:
1003 case copy_state:
1004 case fr_mem:
1005 case frgr_mem:
1006 case gr_gr:
1007 case gr_mem:
1008 case label_state:
1009 case rp_br:
1010 case spill_base:
1011 case spill_mask:
1012 /* nothing */
1013 break;
1014
1015 default:
1016 as_bad (_("Unwind directive not followed by an instruction."));
1017 break;
1018 }
1019 }
1020 unwind.current_entry = NULL;
1021
1022 subseg_set (saved_seg, saved_subseg);
1023
1024 if (md.qp.X_op == O_register)
1025 as_bad (_("qualifying predicate not followed by instruction"));
1026 }
1027
1028 void
1029 ia64_cons_align (int nbytes)
1030 {
1031 if (md.auto_align)
1032 {
1033 int log;
1034 for (log = 0; (nbytes & 1) != 1; nbytes >>= 1)
1035 log++;
1036
1037 do_align (log, NULL, 0, 0);
1038 }
1039 }
1040
1041 #ifdef TE_VMS
1042
1043 /* .vms_common section, symbol, size, alignment */
1044
1045 static void
1046 obj_elf_vms_common (int ignore ATTRIBUTE_UNUSED)
1047 {
1048 const char *sec_name;
1049 char *sym_name;
1050 char c;
1051 offsetT size;
1052 offsetT cur_size;
1053 offsetT temp;
1054 symbolS *symbolP;
1055 segT current_seg = now_seg;
1056 subsegT current_subseg = now_subseg;
1057 offsetT log_align;
1058
1059 /* Section name. */
1060 sec_name = obj_elf_section_name ();
1061 if (sec_name == NULL)
1062 return;
1063
1064 /* Symbol name. */
1065 SKIP_WHITESPACE ();
1066 if (*input_line_pointer == ',')
1067 {
1068 input_line_pointer++;
1069 SKIP_WHITESPACE ();
1070 }
1071 else
1072 {
1073 as_bad (_("expected ',' after section name"));
1074 ignore_rest_of_line ();
1075 return;
1076 }
1077
1078 c = get_symbol_name (&sym_name);
1079
1080 if (input_line_pointer == sym_name)
1081 {
1082 (void) restore_line_pointer (c);
1083 as_bad (_("expected symbol name"));
1084 ignore_rest_of_line ();
1085 return;
1086 }
1087
1088 symbolP = symbol_find_or_make (sym_name);
1089 (void) restore_line_pointer (c);
1090
1091 if ((S_IS_DEFINED (symbolP) || symbol_equated_p (symbolP))
1092 && !S_IS_COMMON (symbolP))
1093 {
1094 as_bad (_("Ignoring attempt to re-define symbol"));
1095 ignore_rest_of_line ();
1096 return;
1097 }
1098
1099 /* Symbol size. */
1100 SKIP_WHITESPACE ();
1101 if (*input_line_pointer == ',')
1102 {
1103 input_line_pointer++;
1104 SKIP_WHITESPACE ();
1105 }
1106 else
1107 {
1108 as_bad (_("expected ',' after symbol name"));
1109 ignore_rest_of_line ();
1110 return;
1111 }
1112
1113 temp = get_absolute_expression ();
1114 size = temp;
1115 size &= ((offsetT) 2 << (stdoutput->arch_info->bits_per_address - 1)) - 1;
1116 if (temp != size)
1117 {
1118 as_warn (_("size (%ld) out of range, ignored"), (long) temp);
1119 ignore_rest_of_line ();
1120 return;
1121 }
1122
1123 /* Alignment. */
1124 SKIP_WHITESPACE ();
1125 if (*input_line_pointer == ',')
1126 {
1127 input_line_pointer++;
1128 SKIP_WHITESPACE ();
1129 }
1130 else
1131 {
1132 as_bad (_("expected ',' after symbol size"));
1133 ignore_rest_of_line ();
1134 return;
1135 }
1136
1137 log_align = get_absolute_expression ();
1138
1139 demand_empty_rest_of_line ();
1140
1141 obj_elf_change_section
1142 (sec_name, SHT_NOBITS, 0,
1143 SHF_ALLOC | SHF_WRITE | SHF_IA_64_VMS_OVERLAID | SHF_IA_64_VMS_GLOBAL,
1144 0, NULL, 1, 0);
1145
1146 S_SET_VALUE (symbolP, 0);
1147 S_SET_SIZE (symbolP, size);
1148 S_SET_EXTERNAL (symbolP);
1149 S_SET_SEGMENT (symbolP, now_seg);
1150
1151 symbol_get_bfdsym (symbolP)->flags |= BSF_OBJECT;
1152
1153 record_alignment (now_seg, log_align);
1154
1155 cur_size = bfd_section_size (now_seg);
1156 if ((int) size > cur_size)
1157 {
1158 char *pfrag
1159 = frag_var (rs_fill, 1, 1, (relax_substateT)0, NULL,
1160 (valueT)size - (valueT)cur_size, NULL);
1161 *pfrag = 0;
1162 bfd_set_section_size (now_seg, size);
1163 }
1164
1165 /* Switch back to current segment. */
1166 subseg_set (current_seg, current_subseg);
1167
1168 #ifdef md_elf_section_change_hook
1169 md_elf_section_change_hook ();
1170 #endif
1171 }
1172
1173 #endif /* TE_VMS */
1174
1175 /* Output COUNT bytes to a memory location. */
1176 static char *vbyte_mem_ptr = NULL;
1177
1178 static void
1179 output_vbyte_mem (int count, char *ptr, char *comment ATTRIBUTE_UNUSED)
1180 {
1181 int x;
1182 if (vbyte_mem_ptr == NULL)
1183 abort ();
1184
1185 if (count == 0)
1186 return;
1187 for (x = 0; x < count; x++)
1188 *(vbyte_mem_ptr++) = ptr[x];
1189 }
1190
1191 /* Count the number of bytes required for records. */
1192 static int vbyte_count = 0;
1193 static void
1194 count_output (int count,
1195 char *ptr ATTRIBUTE_UNUSED,
1196 char *comment ATTRIBUTE_UNUSED)
1197 {
1198 vbyte_count += count;
1199 }
1200
1201 static void
1202 output_R1_format (vbyte_func f, unw_record_type rtype, int rlen)
1203 {
1204 int r = 0;
1205 char byte;
1206 if (rlen > 0x1f)
1207 {
1208 output_R3_format (f, rtype, rlen);
1209 return;
1210 }
1211
1212 if (rtype == body)
1213 r = 1;
1214 else if (rtype != prologue)
1215 as_bad (_("record type is not valid"));
1216
1217 byte = UNW_R1 | (r << 5) | (rlen & 0x1f);
1218 (*f) (1, &byte, NULL);
1219 }
1220
1221 static void
1222 output_R2_format (vbyte_func f, int mask, int grsave, unsigned long rlen)
1223 {
1224 char bytes[20];
1225 int count = 2;
1226 mask = (mask & 0x0f);
1227 grsave = (grsave & 0x7f);
1228
1229 bytes[0] = (UNW_R2 | (mask >> 1));
1230 bytes[1] = (((mask & 0x01) << 7) | grsave);
1231 count += output_leb128 (bytes + 2, rlen, 0);
1232 (*f) (count, bytes, NULL);
1233 }
1234
1235 static void
1236 output_R3_format (vbyte_func f, unw_record_type rtype, unsigned long rlen)
1237 {
1238 int r = 0, count;
1239 char bytes[20];
1240 if (rlen <= 0x1f)
1241 {
1242 output_R1_format (f, rtype, rlen);
1243 return;
1244 }
1245
1246 if (rtype == body)
1247 r = 1;
1248 else if (rtype != prologue)
1249 as_bad (_("record type is not valid"));
1250 bytes[0] = (UNW_R3 | r);
1251 count = output_leb128 (bytes + 1, rlen, 0);
1252 (*f) (count + 1, bytes, NULL);
1253 }
1254
1255 static void
1256 output_P1_format (vbyte_func f, int brmask)
1257 {
1258 char byte;
1259 byte = UNW_P1 | (brmask & 0x1f);
1260 (*f) (1, &byte, NULL);
1261 }
1262
1263 static void
1264 output_P2_format (vbyte_func f, int brmask, int gr)
1265 {
1266 char bytes[2];
1267 brmask = (brmask & 0x1f);
1268 bytes[0] = UNW_P2 | (brmask >> 1);
1269 bytes[1] = (((brmask & 1) << 7) | gr);
1270 (*f) (2, bytes, NULL);
1271 }
1272
1273 static void
1274 output_P3_format (vbyte_func f, unw_record_type rtype, int reg)
1275 {
1276 char bytes[2];
1277 int r = 0;
1278 reg = (reg & 0x7f);
1279 switch (rtype)
1280 {
1281 case psp_gr:
1282 r = 0;
1283 break;
1284 case rp_gr:
1285 r = 1;
1286 break;
1287 case pfs_gr:
1288 r = 2;
1289 break;
1290 case preds_gr:
1291 r = 3;
1292 break;
1293 case unat_gr:
1294 r = 4;
1295 break;
1296 case lc_gr:
1297 r = 5;
1298 break;
1299 case rp_br:
1300 r = 6;
1301 break;
1302 case rnat_gr:
1303 r = 7;
1304 break;
1305 case bsp_gr:
1306 r = 8;
1307 break;
1308 case bspstore_gr:
1309 r = 9;
1310 break;
1311 case fpsr_gr:
1312 r = 10;
1313 break;
1314 case priunat_gr:
1315 r = 11;
1316 break;
1317 default:
1318 as_bad (_("Invalid record type for P3 format."));
1319 }
1320 bytes[0] = (UNW_P3 | (r >> 1));
1321 bytes[1] = (((r & 1) << 7) | reg);
1322 (*f) (2, bytes, NULL);
1323 }
1324
1325 static void
1326 output_P4_format (vbyte_func f, unsigned char *imask, unsigned long imask_size)
1327 {
1328 imask[0] = UNW_P4;
1329 (*f) (imask_size, (char *) imask, NULL);
1330 }
1331
1332 static void
1333 output_P5_format (vbyte_func f, int grmask, unsigned long frmask)
1334 {
1335 char bytes[4];
1336 grmask = (grmask & 0x0f);
1337
1338 bytes[0] = UNW_P5;
1339 bytes[1] = ((grmask << 4) | ((frmask & 0x000f0000) >> 16));
1340 bytes[2] = ((frmask & 0x0000ff00) >> 8);
1341 bytes[3] = (frmask & 0x000000ff);
1342 (*f) (4, bytes, NULL);
1343 }
1344
1345 static void
1346 output_P6_format (vbyte_func f, unw_record_type rtype, int rmask)
1347 {
1348 char byte;
1349 int r = 0;
1350
1351 if (rtype == gr_mem)
1352 r = 1;
1353 else if (rtype != fr_mem)
1354 as_bad (_("Invalid record type for format P6"));
1355 byte = (UNW_P6 | (r << 4) | (rmask & 0x0f));
1356 (*f) (1, &byte, NULL);
1357 }
1358
1359 static void
1360 output_P7_format (vbyte_func f,
1361 unw_record_type rtype,
1362 unsigned long w1,
1363 unsigned long w2)
1364 {
1365 char bytes[20];
1366 int count = 1;
1367 int r = 0;
1368 count += output_leb128 (bytes + 1, w1, 0);
1369 switch (rtype)
1370 {
1371 case mem_stack_f:
1372 r = 0;
1373 count += output_leb128 (bytes + count, w2 >> 4, 0);
1374 break;
1375 case mem_stack_v:
1376 r = 1;
1377 break;
1378 case spill_base:
1379 r = 2;
1380 break;
1381 case psp_sprel:
1382 r = 3;
1383 break;
1384 case rp_when:
1385 r = 4;
1386 break;
1387 case rp_psprel:
1388 r = 5;
1389 break;
1390 case pfs_when:
1391 r = 6;
1392 break;
1393 case pfs_psprel:
1394 r = 7;
1395 break;
1396 case preds_when:
1397 r = 8;
1398 break;
1399 case preds_psprel:
1400 r = 9;
1401 break;
1402 case lc_when:
1403 r = 10;
1404 break;
1405 case lc_psprel:
1406 r = 11;
1407 break;
1408 case unat_when:
1409 r = 12;
1410 break;
1411 case unat_psprel:
1412 r = 13;
1413 break;
1414 case fpsr_when:
1415 r = 14;
1416 break;
1417 case fpsr_psprel:
1418 r = 15;
1419 break;
1420 default:
1421 break;
1422 }
1423 bytes[0] = (UNW_P7 | r);
1424 (*f) (count, bytes, NULL);
1425 }
1426
1427 static void
1428 output_P8_format (vbyte_func f, unw_record_type rtype, unsigned long t)
1429 {
1430 char bytes[20];
1431 int r = 0;
1432 int count = 2;
1433 bytes[0] = UNW_P8;
1434 switch (rtype)
1435 {
1436 case rp_sprel:
1437 r = 1;
1438 break;
1439 case pfs_sprel:
1440 r = 2;
1441 break;
1442 case preds_sprel:
1443 r = 3;
1444 break;
1445 case lc_sprel:
1446 r = 4;
1447 break;
1448 case unat_sprel:
1449 r = 5;
1450 break;
1451 case fpsr_sprel:
1452 r = 6;
1453 break;
1454 case bsp_when:
1455 r = 7;
1456 break;
1457 case bsp_psprel:
1458 r = 8;
1459 break;
1460 case bsp_sprel:
1461 r = 9;
1462 break;
1463 case bspstore_when:
1464 r = 10;
1465 break;
1466 case bspstore_psprel:
1467 r = 11;
1468 break;
1469 case bspstore_sprel:
1470 r = 12;
1471 break;
1472 case rnat_when:
1473 r = 13;
1474 break;
1475 case rnat_psprel:
1476 r = 14;
1477 break;
1478 case rnat_sprel:
1479 r = 15;
1480 break;
1481 case priunat_when_gr:
1482 r = 16;
1483 break;
1484 case priunat_psprel:
1485 r = 17;
1486 break;
1487 case priunat_sprel:
1488 r = 18;
1489 break;
1490 case priunat_when_mem:
1491 r = 19;
1492 break;
1493 default:
1494 break;
1495 }
1496 bytes[1] = r;
1497 count += output_leb128 (bytes + 2, t, 0);
1498 (*f) (count, bytes, NULL);
1499 }
1500
1501 static void
1502 output_P9_format (vbyte_func f, int grmask, int gr)
1503 {
1504 char bytes[3];
1505 bytes[0] = UNW_P9;
1506 bytes[1] = (grmask & 0x0f);
1507 bytes[2] = (gr & 0x7f);
1508 (*f) (3, bytes, NULL);
1509 }
1510
1511 static void
1512 output_P10_format (vbyte_func f, int abi, int context)
1513 {
1514 char bytes[3];
1515 bytes[0] = UNW_P10;
1516 bytes[1] = (abi & 0xff);
1517 bytes[2] = (context & 0xff);
1518 (*f) (3, bytes, NULL);
1519 }
1520
1521 static void
1522 output_B1_format (vbyte_func f, unw_record_type rtype, unsigned long label)
1523 {
1524 char byte;
1525 int r = 0;
1526 if (label > 0x1f)
1527 {
1528 output_B4_format (f, rtype, label);
1529 return;
1530 }
1531 if (rtype == copy_state)
1532 r = 1;
1533 else if (rtype != label_state)
1534 as_bad (_("Invalid record type for format B1"));
1535
1536 byte = (UNW_B1 | (r << 5) | (label & 0x1f));
1537 (*f) (1, &byte, NULL);
1538 }
1539
1540 static void
1541 output_B2_format (vbyte_func f, unsigned long ecount, unsigned long t)
1542 {
1543 char bytes[20];
1544 int count = 1;
1545 if (ecount > 0x1f)
1546 {
1547 output_B3_format (f, ecount, t);
1548 return;
1549 }
1550 bytes[0] = (UNW_B2 | (ecount & 0x1f));
1551 count += output_leb128 (bytes + 1, t, 0);
1552 (*f) (count, bytes, NULL);
1553 }
1554
1555 static void
1556 output_B3_format (vbyte_func f, unsigned long ecount, unsigned long t)
1557 {
1558 char bytes[20];
1559 int count = 1;
1560 if (ecount <= 0x1f)
1561 {
1562 output_B2_format (f, ecount, t);
1563 return;
1564 }
1565 bytes[0] = UNW_B3;
1566 count += output_leb128 (bytes + 1, t, 0);
1567 count += output_leb128 (bytes + count, ecount, 0);
1568 (*f) (count, bytes, NULL);
1569 }
1570
1571 static void
1572 output_B4_format (vbyte_func f, unw_record_type rtype, unsigned long label)
1573 {
1574 char bytes[20];
1575 int r = 0;
1576 int count = 1;
1577 if (label <= 0x1f)
1578 {
1579 output_B1_format (f, rtype, label);
1580 return;
1581 }
1582
1583 if (rtype == copy_state)
1584 r = 1;
1585 else if (rtype != label_state)
1586 as_bad (_("Invalid record type for format B1"));
1587
1588 bytes[0] = (UNW_B4 | (r << 3));
1589 count += output_leb128 (bytes + 1, label, 0);
1590 (*f) (count, bytes, NULL);
1591 }
1592
1593 static char
1594 format_ab_reg (int ab, int reg)
1595 {
1596 int ret;
1597 ab = (ab & 3);
1598 reg = (reg & 0x1f);
1599 ret = (ab << 5) | reg;
1600 return ret;
1601 }
1602
1603 static void
1604 output_X1_format (vbyte_func f,
1605 unw_record_type rtype,
1606 int ab,
1607 int reg,
1608 unsigned long t,
1609 unsigned long w1)
1610 {
1611 char bytes[20];
1612 int r = 0;
1613 int count = 2;
1614 bytes[0] = UNW_X1;
1615
1616 if (rtype == spill_sprel)
1617 r = 1;
1618 else if (rtype != spill_psprel)
1619 as_bad (_("Invalid record type for format X1"));
1620 bytes[1] = ((r << 7) | format_ab_reg (ab, reg));
1621 count += output_leb128 (bytes + 2, t, 0);
1622 count += output_leb128 (bytes + count, w1, 0);
1623 (*f) (count, bytes, NULL);
1624 }
1625
1626 static void
1627 output_X2_format (vbyte_func f,
1628 int ab,
1629 int reg,
1630 int x,
1631 int y,
1632 int treg,
1633 unsigned long t)
1634 {
1635 char bytes[20];
1636 int count = 3;
1637 bytes[0] = UNW_X2;
1638 bytes[1] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1639 bytes[2] = (((y & 1) << 7) | (treg & 0x7f));
1640 count += output_leb128 (bytes + 3, t, 0);
1641 (*f) (count, bytes, NULL);
1642 }
1643
1644 static void
1645 output_X3_format (vbyte_func f,
1646 unw_record_type rtype,
1647 int qp,
1648 int ab,
1649 int reg,
1650 unsigned long t,
1651 unsigned long w1)
1652 {
1653 char bytes[20];
1654 int r = 0;
1655 int count = 3;
1656 bytes[0] = UNW_X3;
1657
1658 if (rtype == spill_sprel_p)
1659 r = 1;
1660 else if (rtype != spill_psprel_p)
1661 as_bad (_("Invalid record type for format X3"));
1662 bytes[1] = ((r << 7) | (qp & 0x3f));
1663 bytes[2] = format_ab_reg (ab, reg);
1664 count += output_leb128 (bytes + 3, t, 0);
1665 count += output_leb128 (bytes + count, w1, 0);
1666 (*f) (count, bytes, NULL);
1667 }
1668
1669 static void
1670 output_X4_format (vbyte_func f,
1671 int qp,
1672 int ab,
1673 int reg,
1674 int x,
1675 int y,
1676 int treg,
1677 unsigned long t)
1678 {
1679 char bytes[20];
1680 int count = 4;
1681 bytes[0] = UNW_X4;
1682 bytes[1] = (qp & 0x3f);
1683 bytes[2] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1684 bytes[3] = (((y & 1) << 7) | (treg & 0x7f));
1685 count += output_leb128 (bytes + 4, t, 0);
1686 (*f) (count, bytes, NULL);
1687 }
1688
1689 /* This function checks whether there are any outstanding .save-s and
1690 discards them if so. */
1691
1692 static void
1693 check_pending_save (void)
1694 {
1695 if (unwind.pending_saves)
1696 {
1697 unw_rec_list *cur, *prev;
1698
1699 as_warn (_("Previous .save incomplete"));
1700 for (cur = unwind.list, prev = NULL; cur; )
1701 if (&cur->r.record.p == unwind.pending_saves)
1702 {
1703 if (prev)
1704 prev->next = cur->next;
1705 else
1706 unwind.list = cur->next;
1707 if (cur == unwind.tail)
1708 unwind.tail = prev;
1709 if (cur == unwind.current_entry)
1710 unwind.current_entry = cur->next;
1711 /* Don't free the first discarded record, it's being used as
1712 terminator for (currently) br_gr and gr_gr processing, and
1713 also prevents leaving a dangling pointer to it in its
1714 predecessor. */
1715 cur->r.record.p.grmask = 0;
1716 cur->r.record.p.brmask = 0;
1717 cur->r.record.p.frmask = 0;
1718 prev = cur->r.record.p.next;
1719 cur->r.record.p.next = NULL;
1720 cur = prev;
1721 break;
1722 }
1723 else
1724 {
1725 prev = cur;
1726 cur = cur->next;
1727 }
1728 while (cur)
1729 {
1730 prev = cur;
1731 cur = cur->r.record.p.next;
1732 free (prev);
1733 }
1734 unwind.pending_saves = NULL;
1735 }
1736 }
1737
1738 /* This function allocates a record list structure, and initializes fields. */
1739
1740 static unw_rec_list *
1741 alloc_record (unw_record_type t)
1742 {
1743 unw_rec_list *ptr;
1744 ptr = XNEW (unw_rec_list);
1745 memset (ptr, 0, sizeof (*ptr));
1746 ptr->slot_number = SLOT_NUM_NOT_SET;
1747 ptr->r.type = t;
1748 return ptr;
1749 }
1750
1751 /* Dummy unwind record used for calculating the length of the last prologue or
1752 body region. */
1753
1754 static unw_rec_list *
1755 output_endp (void)
1756 {
1757 unw_rec_list *ptr = alloc_record (endp);
1758 return ptr;
1759 }
1760
1761 static unw_rec_list *
1762 output_prologue (void)
1763 {
1764 unw_rec_list *ptr = alloc_record (prologue);
1765 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1766 return ptr;
1767 }
1768
1769 static unw_rec_list *
1770 output_prologue_gr (unsigned int saved_mask, unsigned int reg)
1771 {
1772 unw_rec_list *ptr = alloc_record (prologue_gr);
1773 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1774 ptr->r.record.r.grmask = saved_mask;
1775 ptr->r.record.r.grsave = reg;
1776 return ptr;
1777 }
1778
1779 static unw_rec_list *
1780 output_body (void)
1781 {
1782 unw_rec_list *ptr = alloc_record (body);
1783 return ptr;
1784 }
1785
1786 static unw_rec_list *
1787 output_mem_stack_f (unsigned int size)
1788 {
1789 unw_rec_list *ptr = alloc_record (mem_stack_f);
1790 ptr->r.record.p.size = size;
1791 return ptr;
1792 }
1793
1794 static unw_rec_list *
1795 output_mem_stack_v (void)
1796 {
1797 unw_rec_list *ptr = alloc_record (mem_stack_v);
1798 return ptr;
1799 }
1800
1801 static unw_rec_list *
1802 output_psp_gr (unsigned int gr)
1803 {
1804 unw_rec_list *ptr = alloc_record (psp_gr);
1805 ptr->r.record.p.r.gr = gr;
1806 return ptr;
1807 }
1808
1809 static unw_rec_list *
1810 output_psp_sprel (unsigned int offset)
1811 {
1812 unw_rec_list *ptr = alloc_record (psp_sprel);
1813 ptr->r.record.p.off.sp = offset / 4;
1814 return ptr;
1815 }
1816
1817 static unw_rec_list *
1818 output_rp_when (void)
1819 {
1820 unw_rec_list *ptr = alloc_record (rp_when);
1821 return ptr;
1822 }
1823
1824 static unw_rec_list *
1825 output_rp_gr (unsigned int gr)
1826 {
1827 unw_rec_list *ptr = alloc_record (rp_gr);
1828 ptr->r.record.p.r.gr = gr;
1829 return ptr;
1830 }
1831
1832 static unw_rec_list *
1833 output_rp_br (unsigned int br)
1834 {
1835 unw_rec_list *ptr = alloc_record (rp_br);
1836 ptr->r.record.p.r.br = br;
1837 return ptr;
1838 }
1839
1840 static unw_rec_list *
1841 output_rp_psprel (unsigned int offset)
1842 {
1843 unw_rec_list *ptr = alloc_record (rp_psprel);
1844 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1845 return ptr;
1846 }
1847
1848 static unw_rec_list *
1849 output_rp_sprel (unsigned int offset)
1850 {
1851 unw_rec_list *ptr = alloc_record (rp_sprel);
1852 ptr->r.record.p.off.sp = offset / 4;
1853 return ptr;
1854 }
1855
1856 static unw_rec_list *
1857 output_pfs_when (void)
1858 {
1859 unw_rec_list *ptr = alloc_record (pfs_when);
1860 return ptr;
1861 }
1862
1863 static unw_rec_list *
1864 output_pfs_gr (unsigned int gr)
1865 {
1866 unw_rec_list *ptr = alloc_record (pfs_gr);
1867 ptr->r.record.p.r.gr = gr;
1868 return ptr;
1869 }
1870
1871 static unw_rec_list *
1872 output_pfs_psprel (unsigned int offset)
1873 {
1874 unw_rec_list *ptr = alloc_record (pfs_psprel);
1875 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1876 return ptr;
1877 }
1878
1879 static unw_rec_list *
1880 output_pfs_sprel (unsigned int offset)
1881 {
1882 unw_rec_list *ptr = alloc_record (pfs_sprel);
1883 ptr->r.record.p.off.sp = offset / 4;
1884 return ptr;
1885 }
1886
1887 static unw_rec_list *
1888 output_preds_when (void)
1889 {
1890 unw_rec_list *ptr = alloc_record (preds_when);
1891 return ptr;
1892 }
1893
1894 static unw_rec_list *
1895 output_preds_gr (unsigned int gr)
1896 {
1897 unw_rec_list *ptr = alloc_record (preds_gr);
1898 ptr->r.record.p.r.gr = gr;
1899 return ptr;
1900 }
1901
1902 static unw_rec_list *
1903 output_preds_psprel (unsigned int offset)
1904 {
1905 unw_rec_list *ptr = alloc_record (preds_psprel);
1906 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1907 return ptr;
1908 }
1909
1910 static unw_rec_list *
1911 output_preds_sprel (unsigned int offset)
1912 {
1913 unw_rec_list *ptr = alloc_record (preds_sprel);
1914 ptr->r.record.p.off.sp = offset / 4;
1915 return ptr;
1916 }
1917
1918 static unw_rec_list *
1919 output_fr_mem (unsigned int mask)
1920 {
1921 unw_rec_list *ptr = alloc_record (fr_mem);
1922 unw_rec_list *cur = ptr;
1923
1924 ptr->r.record.p.frmask = mask;
1925 unwind.pending_saves = &ptr->r.record.p;
1926 for (;;)
1927 {
1928 unw_rec_list *prev = cur;
1929
1930 /* Clear least significant set bit. */
1931 mask &= ~(mask & (~mask + 1));
1932 if (!mask)
1933 return ptr;
1934 cur = alloc_record (fr_mem);
1935 cur->r.record.p.frmask = mask;
1936 /* Retain only least significant bit. */
1937 prev->r.record.p.frmask ^= mask;
1938 prev->r.record.p.next = cur;
1939 }
1940 }
1941
1942 static unw_rec_list *
1943 output_frgr_mem (unsigned int gr_mask, unsigned int fr_mask)
1944 {
1945 unw_rec_list *ptr = alloc_record (frgr_mem);
1946 unw_rec_list *cur = ptr;
1947
1948 unwind.pending_saves = &cur->r.record.p;
1949 cur->r.record.p.frmask = fr_mask;
1950 while (fr_mask)
1951 {
1952 unw_rec_list *prev = cur;
1953
1954 /* Clear least significant set bit. */
1955 fr_mask &= ~(fr_mask & (~fr_mask + 1));
1956 if (!gr_mask && !fr_mask)
1957 return ptr;
1958 cur = alloc_record (frgr_mem);
1959 cur->r.record.p.frmask = fr_mask;
1960 /* Retain only least significant bit. */
1961 prev->r.record.p.frmask ^= fr_mask;
1962 prev->r.record.p.next = cur;
1963 }
1964 cur->r.record.p.grmask = gr_mask;
1965 for (;;)
1966 {
1967 unw_rec_list *prev = cur;
1968
1969 /* Clear least significant set bit. */
1970 gr_mask &= ~(gr_mask & (~gr_mask + 1));
1971 if (!gr_mask)
1972 return ptr;
1973 cur = alloc_record (frgr_mem);
1974 cur->r.record.p.grmask = gr_mask;
1975 /* Retain only least significant bit. */
1976 prev->r.record.p.grmask ^= gr_mask;
1977 prev->r.record.p.next = cur;
1978 }
1979 }
1980
1981 static unw_rec_list *
1982 output_gr_gr (unsigned int mask, unsigned int reg)
1983 {
1984 unw_rec_list *ptr = alloc_record (gr_gr);
1985 unw_rec_list *cur = ptr;
1986
1987 ptr->r.record.p.grmask = mask;
1988 ptr->r.record.p.r.gr = reg;
1989 unwind.pending_saves = &ptr->r.record.p;
1990 for (;;)
1991 {
1992 unw_rec_list *prev = cur;
1993
1994 /* Clear least significant set bit. */
1995 mask &= ~(mask & (~mask + 1));
1996 if (!mask)
1997 return ptr;
1998 cur = alloc_record (gr_gr);
1999 cur->r.record.p.grmask = mask;
2000 /* Indicate this record shouldn't be output. */
2001 cur->r.record.p.r.gr = REG_NUM;
2002 /* Retain only least significant bit. */
2003 prev->r.record.p.grmask ^= mask;
2004 prev->r.record.p.next = cur;
2005 }
2006 }
2007
2008 static unw_rec_list *
2009 output_gr_mem (unsigned int mask)
2010 {
2011 unw_rec_list *ptr = alloc_record (gr_mem);
2012 unw_rec_list *cur = ptr;
2013
2014 ptr->r.record.p.grmask = mask;
2015 unwind.pending_saves = &ptr->r.record.p;
2016 for (;;)
2017 {
2018 unw_rec_list *prev = cur;
2019
2020 /* Clear least significant set bit. */
2021 mask &= ~(mask & (~mask + 1));
2022 if (!mask)
2023 return ptr;
2024 cur = alloc_record (gr_mem);
2025 cur->r.record.p.grmask = mask;
2026 /* Retain only least significant bit. */
2027 prev->r.record.p.grmask ^= mask;
2028 prev->r.record.p.next = cur;
2029 }
2030 }
2031
2032 static unw_rec_list *
2033 output_br_mem (unsigned int mask)
2034 {
2035 unw_rec_list *ptr = alloc_record (br_mem);
2036 unw_rec_list *cur = ptr;
2037
2038 ptr->r.record.p.brmask = mask;
2039 unwind.pending_saves = &ptr->r.record.p;
2040 for (;;)
2041 {
2042 unw_rec_list *prev = cur;
2043
2044 /* Clear least significant set bit. */
2045 mask &= ~(mask & (~mask + 1));
2046 if (!mask)
2047 return ptr;
2048 cur = alloc_record (br_mem);
2049 cur->r.record.p.brmask = mask;
2050 /* Retain only least significant bit. */
2051 prev->r.record.p.brmask ^= mask;
2052 prev->r.record.p.next = cur;
2053 }
2054 }
2055
2056 static unw_rec_list *
2057 output_br_gr (unsigned int mask, unsigned int reg)
2058 {
2059 unw_rec_list *ptr = alloc_record (br_gr);
2060 unw_rec_list *cur = ptr;
2061
2062 ptr->r.record.p.brmask = mask;
2063 ptr->r.record.p.r.gr = reg;
2064 unwind.pending_saves = &ptr->r.record.p;
2065 for (;;)
2066 {
2067 unw_rec_list *prev = cur;
2068
2069 /* Clear least significant set bit. */
2070 mask &= ~(mask & (~mask + 1));
2071 if (!mask)
2072 return ptr;
2073 cur = alloc_record (br_gr);
2074 cur->r.record.p.brmask = mask;
2075 /* Indicate this record shouldn't be output. */
2076 cur->r.record.p.r.gr = REG_NUM;
2077 /* Retain only least significant bit. */
2078 prev->r.record.p.brmask ^= mask;
2079 prev->r.record.p.next = cur;
2080 }
2081 }
2082
2083 static unw_rec_list *
2084 output_spill_base (unsigned int offset)
2085 {
2086 unw_rec_list *ptr = alloc_record (spill_base);
2087 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2088 return ptr;
2089 }
2090
2091 static unw_rec_list *
2092 output_unat_when (void)
2093 {
2094 unw_rec_list *ptr = alloc_record (unat_when);
2095 return ptr;
2096 }
2097
2098 static unw_rec_list *
2099 output_unat_gr (unsigned int gr)
2100 {
2101 unw_rec_list *ptr = alloc_record (unat_gr);
2102 ptr->r.record.p.r.gr = gr;
2103 return ptr;
2104 }
2105
2106 static unw_rec_list *
2107 output_unat_psprel (unsigned int offset)
2108 {
2109 unw_rec_list *ptr = alloc_record (unat_psprel);
2110 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2111 return ptr;
2112 }
2113
2114 static unw_rec_list *
2115 output_unat_sprel (unsigned int offset)
2116 {
2117 unw_rec_list *ptr = alloc_record (unat_sprel);
2118 ptr->r.record.p.off.sp = offset / 4;
2119 return ptr;
2120 }
2121
2122 static unw_rec_list *
2123 output_lc_when (void)
2124 {
2125 unw_rec_list *ptr = alloc_record (lc_when);
2126 return ptr;
2127 }
2128
2129 static unw_rec_list *
2130 output_lc_gr (unsigned int gr)
2131 {
2132 unw_rec_list *ptr = alloc_record (lc_gr);
2133 ptr->r.record.p.r.gr = gr;
2134 return ptr;
2135 }
2136
2137 static unw_rec_list *
2138 output_lc_psprel (unsigned int offset)
2139 {
2140 unw_rec_list *ptr = alloc_record (lc_psprel);
2141 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2142 return ptr;
2143 }
2144
2145 static unw_rec_list *
2146 output_lc_sprel (unsigned int offset)
2147 {
2148 unw_rec_list *ptr = alloc_record (lc_sprel);
2149 ptr->r.record.p.off.sp = offset / 4;
2150 return ptr;
2151 }
2152
2153 static unw_rec_list *
2154 output_fpsr_when (void)
2155 {
2156 unw_rec_list *ptr = alloc_record (fpsr_when);
2157 return ptr;
2158 }
2159
2160 static unw_rec_list *
2161 output_fpsr_gr (unsigned int gr)
2162 {
2163 unw_rec_list *ptr = alloc_record (fpsr_gr);
2164 ptr->r.record.p.r.gr = gr;
2165 return ptr;
2166 }
2167
2168 static unw_rec_list *
2169 output_fpsr_psprel (unsigned int offset)
2170 {
2171 unw_rec_list *ptr = alloc_record (fpsr_psprel);
2172 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2173 return ptr;
2174 }
2175
2176 static unw_rec_list *
2177 output_fpsr_sprel (unsigned int offset)
2178 {
2179 unw_rec_list *ptr = alloc_record (fpsr_sprel);
2180 ptr->r.record.p.off.sp = offset / 4;
2181 return ptr;
2182 }
2183
2184 static unw_rec_list *
2185 output_priunat_when_gr (void)
2186 {
2187 unw_rec_list *ptr = alloc_record (priunat_when_gr);
2188 return ptr;
2189 }
2190
2191 static unw_rec_list *
2192 output_priunat_when_mem (void)
2193 {
2194 unw_rec_list *ptr = alloc_record (priunat_when_mem);
2195 return ptr;
2196 }
2197
2198 static unw_rec_list *
2199 output_priunat_gr (unsigned int gr)
2200 {
2201 unw_rec_list *ptr = alloc_record (priunat_gr);
2202 ptr->r.record.p.r.gr = gr;
2203 return ptr;
2204 }
2205
2206 static unw_rec_list *
2207 output_priunat_psprel (unsigned int offset)
2208 {
2209 unw_rec_list *ptr = alloc_record (priunat_psprel);
2210 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2211 return ptr;
2212 }
2213
2214 static unw_rec_list *
2215 output_priunat_sprel (unsigned int offset)
2216 {
2217 unw_rec_list *ptr = alloc_record (priunat_sprel);
2218 ptr->r.record.p.off.sp = offset / 4;
2219 return ptr;
2220 }
2221
2222 static unw_rec_list *
2223 output_bsp_when (void)
2224 {
2225 unw_rec_list *ptr = alloc_record (bsp_when);
2226 return ptr;
2227 }
2228
2229 static unw_rec_list *
2230 output_bsp_gr (unsigned int gr)
2231 {
2232 unw_rec_list *ptr = alloc_record (bsp_gr);
2233 ptr->r.record.p.r.gr = gr;
2234 return ptr;
2235 }
2236
2237 static unw_rec_list *
2238 output_bsp_psprel (unsigned int offset)
2239 {
2240 unw_rec_list *ptr = alloc_record (bsp_psprel);
2241 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2242 return ptr;
2243 }
2244
2245 static unw_rec_list *
2246 output_bsp_sprel (unsigned int offset)
2247 {
2248 unw_rec_list *ptr = alloc_record (bsp_sprel);
2249 ptr->r.record.p.off.sp = offset / 4;
2250 return ptr;
2251 }
2252
2253 static unw_rec_list *
2254 output_bspstore_when (void)
2255 {
2256 unw_rec_list *ptr = alloc_record (bspstore_when);
2257 return ptr;
2258 }
2259
2260 static unw_rec_list *
2261 output_bspstore_gr (unsigned int gr)
2262 {
2263 unw_rec_list *ptr = alloc_record (bspstore_gr);
2264 ptr->r.record.p.r.gr = gr;
2265 return ptr;
2266 }
2267
2268 static unw_rec_list *
2269 output_bspstore_psprel (unsigned int offset)
2270 {
2271 unw_rec_list *ptr = alloc_record (bspstore_psprel);
2272 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2273 return ptr;
2274 }
2275
2276 static unw_rec_list *
2277 output_bspstore_sprel (unsigned int offset)
2278 {
2279 unw_rec_list *ptr = alloc_record (bspstore_sprel);
2280 ptr->r.record.p.off.sp = offset / 4;
2281 return ptr;
2282 }
2283
2284 static unw_rec_list *
2285 output_rnat_when (void)
2286 {
2287 unw_rec_list *ptr = alloc_record (rnat_when);
2288 return ptr;
2289 }
2290
2291 static unw_rec_list *
2292 output_rnat_gr (unsigned int gr)
2293 {
2294 unw_rec_list *ptr = alloc_record (rnat_gr);
2295 ptr->r.record.p.r.gr = gr;
2296 return ptr;
2297 }
2298
2299 static unw_rec_list *
2300 output_rnat_psprel (unsigned int offset)
2301 {
2302 unw_rec_list *ptr = alloc_record (rnat_psprel);
2303 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2304 return ptr;
2305 }
2306
2307 static unw_rec_list *
2308 output_rnat_sprel (unsigned int offset)
2309 {
2310 unw_rec_list *ptr = alloc_record (rnat_sprel);
2311 ptr->r.record.p.off.sp = offset / 4;
2312 return ptr;
2313 }
2314
2315 static unw_rec_list *
2316 output_unwabi (unsigned long abi, unsigned long context)
2317 {
2318 unw_rec_list *ptr = alloc_record (unwabi);
2319 ptr->r.record.p.abi = abi;
2320 ptr->r.record.p.context = context;
2321 return ptr;
2322 }
2323
2324 static unw_rec_list *
2325 output_epilogue (unsigned long ecount)
2326 {
2327 unw_rec_list *ptr = alloc_record (epilogue);
2328 ptr->r.record.b.ecount = ecount;
2329 return ptr;
2330 }
2331
2332 static unw_rec_list *
2333 output_label_state (unsigned long label)
2334 {
2335 unw_rec_list *ptr = alloc_record (label_state);
2336 ptr->r.record.b.label = label;
2337 return ptr;
2338 }
2339
2340 static unw_rec_list *
2341 output_copy_state (unsigned long label)
2342 {
2343 unw_rec_list *ptr = alloc_record (copy_state);
2344 ptr->r.record.b.label = label;
2345 return ptr;
2346 }
2347
2348 static unw_rec_list *
2349 output_spill_psprel (unsigned int ab,
2350 unsigned int reg,
2351 unsigned int offset,
2352 unsigned int predicate)
2353 {
2354 unw_rec_list *ptr = alloc_record (predicate ? spill_psprel_p : spill_psprel);
2355 ptr->r.record.x.ab = ab;
2356 ptr->r.record.x.reg = reg;
2357 ptr->r.record.x.where.pspoff = ENCODED_PSP_OFFSET (offset);
2358 ptr->r.record.x.qp = predicate;
2359 return ptr;
2360 }
2361
2362 static unw_rec_list *
2363 output_spill_sprel (unsigned int ab,
2364 unsigned int reg,
2365 unsigned int offset,
2366 unsigned int predicate)
2367 {
2368 unw_rec_list *ptr = alloc_record (predicate ? spill_sprel_p : spill_sprel);
2369 ptr->r.record.x.ab = ab;
2370 ptr->r.record.x.reg = reg;
2371 ptr->r.record.x.where.spoff = offset / 4;
2372 ptr->r.record.x.qp = predicate;
2373 return ptr;
2374 }
2375
2376 static unw_rec_list *
2377 output_spill_reg (unsigned int ab,
2378 unsigned int reg,
2379 unsigned int targ_reg,
2380 unsigned int xy,
2381 unsigned int predicate)
2382 {
2383 unw_rec_list *ptr = alloc_record (predicate ? spill_reg_p : spill_reg);
2384 ptr->r.record.x.ab = ab;
2385 ptr->r.record.x.reg = reg;
2386 ptr->r.record.x.where.reg = targ_reg;
2387 ptr->r.record.x.xy = xy;
2388 ptr->r.record.x.qp = predicate;
2389 return ptr;
2390 }
2391
2392 /* Given a unw_rec_list process the correct format with the
2393 specified function. */
2394
2395 static void
2396 process_one_record (unw_rec_list *ptr, vbyte_func f)
2397 {
2398 unsigned int fr_mask, gr_mask;
2399
2400 switch (ptr->r.type)
2401 {
2402 /* This is a dummy record that takes up no space in the output. */
2403 case endp:
2404 break;
2405
2406 case gr_mem:
2407 case fr_mem:
2408 case br_mem:
2409 case frgr_mem:
2410 /* These are taken care of by prologue/prologue_gr. */
2411 break;
2412
2413 case prologue_gr:
2414 case prologue:
2415 if (ptr->r.type == prologue_gr)
2416 output_R2_format (f, ptr->r.record.r.grmask,
2417 ptr->r.record.r.grsave, ptr->r.record.r.rlen);
2418 else
2419 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2420
2421 /* Output descriptor(s) for union of register spills (if any). */
2422 gr_mask = ptr->r.record.r.mask.gr_mem;
2423 fr_mask = ptr->r.record.r.mask.fr_mem;
2424 if (fr_mask)
2425 {
2426 if ((fr_mask & ~0xfUL) == 0)
2427 output_P6_format (f, fr_mem, fr_mask);
2428 else
2429 {
2430 output_P5_format (f, gr_mask, fr_mask);
2431 gr_mask = 0;
2432 }
2433 }
2434 if (gr_mask)
2435 output_P6_format (f, gr_mem, gr_mask);
2436 if (ptr->r.record.r.mask.br_mem)
2437 output_P1_format (f, ptr->r.record.r.mask.br_mem);
2438
2439 /* output imask descriptor if necessary: */
2440 if (ptr->r.record.r.mask.i)
2441 output_P4_format (f, ptr->r.record.r.mask.i,
2442 ptr->r.record.r.imask_size);
2443 break;
2444
2445 case body:
2446 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2447 break;
2448 case mem_stack_f:
2449 case mem_stack_v:
2450 output_P7_format (f, ptr->r.type, ptr->r.record.p.t,
2451 ptr->r.record.p.size);
2452 break;
2453 case psp_gr:
2454 case rp_gr:
2455 case pfs_gr:
2456 case preds_gr:
2457 case unat_gr:
2458 case lc_gr:
2459 case fpsr_gr:
2460 case priunat_gr:
2461 case bsp_gr:
2462 case bspstore_gr:
2463 case rnat_gr:
2464 output_P3_format (f, ptr->r.type, ptr->r.record.p.r.gr);
2465 break;
2466 case rp_br:
2467 output_P3_format (f, rp_br, ptr->r.record.p.r.br);
2468 break;
2469 case psp_sprel:
2470 output_P7_format (f, psp_sprel, ptr->r.record.p.off.sp, 0);
2471 break;
2472 case rp_when:
2473 case pfs_when:
2474 case preds_when:
2475 case unat_when:
2476 case lc_when:
2477 case fpsr_when:
2478 output_P7_format (f, ptr->r.type, ptr->r.record.p.t, 0);
2479 break;
2480 case rp_psprel:
2481 case pfs_psprel:
2482 case preds_psprel:
2483 case unat_psprel:
2484 case lc_psprel:
2485 case fpsr_psprel:
2486 case spill_base:
2487 output_P7_format (f, ptr->r.type, ptr->r.record.p.off.psp, 0);
2488 break;
2489 case rp_sprel:
2490 case pfs_sprel:
2491 case preds_sprel:
2492 case unat_sprel:
2493 case lc_sprel:
2494 case fpsr_sprel:
2495 case priunat_sprel:
2496 case bsp_sprel:
2497 case bspstore_sprel:
2498 case rnat_sprel:
2499 output_P8_format (f, ptr->r.type, ptr->r.record.p.off.sp);
2500 break;
2501 case gr_gr:
2502 if (ptr->r.record.p.r.gr < REG_NUM)
2503 {
2504 const unw_rec_list *cur = ptr;
2505
2506 gr_mask = cur->r.record.p.grmask;
2507 while ((cur = cur->r.record.p.next) != NULL)
2508 gr_mask |= cur->r.record.p.grmask;
2509 output_P9_format (f, gr_mask, ptr->r.record.p.r.gr);
2510 }
2511 break;
2512 case br_gr:
2513 if (ptr->r.record.p.r.gr < REG_NUM)
2514 {
2515 const unw_rec_list *cur = ptr;
2516
2517 gr_mask = cur->r.record.p.brmask;
2518 while ((cur = cur->r.record.p.next) != NULL)
2519 gr_mask |= cur->r.record.p.brmask;
2520 output_P2_format (f, gr_mask, ptr->r.record.p.r.gr);
2521 }
2522 break;
2523 case spill_mask:
2524 as_bad (_("spill_mask record unimplemented."));
2525 break;
2526 case priunat_when_gr:
2527 case priunat_when_mem:
2528 case bsp_when:
2529 case bspstore_when:
2530 case rnat_when:
2531 output_P8_format (f, ptr->r.type, ptr->r.record.p.t);
2532 break;
2533 case priunat_psprel:
2534 case bsp_psprel:
2535 case bspstore_psprel:
2536 case rnat_psprel:
2537 output_P8_format (f, ptr->r.type, ptr->r.record.p.off.psp);
2538 break;
2539 case unwabi:
2540 output_P10_format (f, ptr->r.record.p.abi, ptr->r.record.p.context);
2541 break;
2542 case epilogue:
2543 output_B3_format (f, ptr->r.record.b.ecount, ptr->r.record.b.t);
2544 break;
2545 case label_state:
2546 case copy_state:
2547 output_B4_format (f, ptr->r.type, ptr->r.record.b.label);
2548 break;
2549 case spill_psprel:
2550 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2551 ptr->r.record.x.reg, ptr->r.record.x.t,
2552 ptr->r.record.x.where.pspoff);
2553 break;
2554 case spill_sprel:
2555 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2556 ptr->r.record.x.reg, ptr->r.record.x.t,
2557 ptr->r.record.x.where.spoff);
2558 break;
2559 case spill_reg:
2560 output_X2_format (f, ptr->r.record.x.ab, ptr->r.record.x.reg,
2561 ptr->r.record.x.xy >> 1, ptr->r.record.x.xy,
2562 ptr->r.record.x.where.reg, ptr->r.record.x.t);
2563 break;
2564 case spill_psprel_p:
2565 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2566 ptr->r.record.x.ab, ptr->r.record.x.reg,
2567 ptr->r.record.x.t, ptr->r.record.x.where.pspoff);
2568 break;
2569 case spill_sprel_p:
2570 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2571 ptr->r.record.x.ab, ptr->r.record.x.reg,
2572 ptr->r.record.x.t, ptr->r.record.x.where.spoff);
2573 break;
2574 case spill_reg_p:
2575 output_X4_format (f, ptr->r.record.x.qp, ptr->r.record.x.ab,
2576 ptr->r.record.x.reg, ptr->r.record.x.xy >> 1,
2577 ptr->r.record.x.xy, ptr->r.record.x.where.reg,
2578 ptr->r.record.x.t);
2579 break;
2580 default:
2581 as_bad (_("record_type_not_valid"));
2582 break;
2583 }
2584 }
2585
2586 /* Given a unw_rec_list list, process all the records with
2587 the specified function. */
2588 static void
2589 process_unw_records (unw_rec_list *list, vbyte_func f)
2590 {
2591 unw_rec_list *ptr;
2592 for (ptr = list; ptr; ptr = ptr->next)
2593 process_one_record (ptr, f);
2594 }
2595
2596 /* Determine the size of a record list in bytes. */
2597 static int
2598 calc_record_size (unw_rec_list *list)
2599 {
2600 vbyte_count = 0;
2601 process_unw_records (list, count_output);
2602 return vbyte_count;
2603 }
2604
2605 /* Return the number of bits set in the input value.
2606 Perhaps this has a better place... */
2607 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
2608 # define popcount __builtin_popcount
2609 #else
2610 static int
2611 popcount (unsigned x)
2612 {
2613 static const unsigned char popcnt[16] =
2614 {
2615 0, 1, 1, 2,
2616 1, 2, 2, 3,
2617 1, 2, 2, 3,
2618 2, 3, 3, 4
2619 };
2620
2621 if (x < NELEMS (popcnt))
2622 return popcnt[x];
2623 return popcnt[x % NELEMS (popcnt)] + popcount (x / NELEMS (popcnt));
2624 }
2625 #endif
2626
2627 /* Update IMASK bitmask to reflect the fact that one or more registers
2628 of type TYPE are saved starting at instruction with index T. If N
2629 bits are set in REGMASK, it is assumed that instructions T through
2630 T+N-1 save these registers.
2631
2632 TYPE values:
2633 0: no save
2634 1: instruction saves next fp reg
2635 2: instruction saves next general reg
2636 3: instruction saves next branch reg */
2637 static void
2638 set_imask (unw_rec_list *region,
2639 unsigned long regmask,
2640 unsigned long t,
2641 unsigned int type)
2642 {
2643 unsigned char *imask;
2644 unsigned long imask_size;
2645 unsigned int i;
2646 int pos;
2647
2648 imask = region->r.record.r.mask.i;
2649 imask_size = region->r.record.r.imask_size;
2650 if (!imask)
2651 {
2652 imask_size = (region->r.record.r.rlen * 2 + 7) / 8 + 1;
2653 imask = XCNEWVEC (unsigned char, imask_size);
2654
2655 region->r.record.r.imask_size = imask_size;
2656 region->r.record.r.mask.i = imask;
2657 }
2658
2659 i = (t / 4) + 1;
2660 pos = 2 * (3 - t % 4);
2661 while (regmask)
2662 {
2663 if (i >= imask_size)
2664 {
2665 as_bad (_("Ignoring attempt to spill beyond end of region"));
2666 return;
2667 }
2668
2669 imask[i] |= (type & 0x3) << pos;
2670
2671 regmask &= (regmask - 1);
2672 pos -= 2;
2673 if (pos < 0)
2674 {
2675 pos = 0;
2676 ++i;
2677 }
2678 }
2679 }
2680
2681 /* Return the number of instruction slots from FIRST_ADDR to SLOT_ADDR.
2682 SLOT_FRAG is the frag containing SLOT_ADDR, and FIRST_FRAG is the frag
2683 containing FIRST_ADDR. If BEFORE_RELAX, then we use worst-case estimates
2684 for frag sizes. */
2685
2686 static unsigned long
2687 slot_index (unsigned long slot_addr,
2688 fragS *slot_frag,
2689 unsigned long first_addr,
2690 fragS *first_frag,
2691 int before_relax)
2692 {
2693 unsigned long s_index = 0;
2694
2695 /* First time we are called, the initial address and frag are invalid. */
2696 if (first_addr == 0)
2697 return 0;
2698
2699 /* If the two addresses are in different frags, then we need to add in
2700 the remaining size of this frag, and then the entire size of intermediate
2701 frags. */
2702 while (slot_frag != first_frag)
2703 {
2704 unsigned long start_addr = (unsigned long) &first_frag->fr_literal;
2705
2706 if (! before_relax)
2707 {
2708 /* We can get the final addresses only during and after
2709 relaxation. */
2710 if (first_frag->fr_next && first_frag->fr_next->fr_address)
2711 s_index += 3 * ((first_frag->fr_next->fr_address
2712 - first_frag->fr_address
2713 - first_frag->fr_fix) >> 4);
2714 }
2715 else
2716 /* We don't know what the final addresses will be. We try our
2717 best to estimate. */
2718 switch (first_frag->fr_type)
2719 {
2720 default:
2721 break;
2722
2723 case rs_space:
2724 as_fatal (_("Only constant space allocation is supported"));
2725 break;
2726
2727 case rs_align:
2728 case rs_align_code:
2729 case rs_align_test:
2730 /* Take alignment into account. Assume the worst case
2731 before relaxation. */
2732 s_index += 3 * ((1 << first_frag->fr_offset) >> 4);
2733 break;
2734
2735 case rs_org:
2736 if (first_frag->fr_symbol)
2737 {
2738 as_fatal (_("Only constant offsets are supported"));
2739 break;
2740 }
2741 /* Fall through. */
2742 case rs_fill:
2743 s_index += 3 * (first_frag->fr_offset >> 4);
2744 break;
2745 }
2746
2747 /* Add in the full size of the frag converted to instruction slots. */
2748 s_index += 3 * (first_frag->fr_fix >> 4);
2749 /* Subtract away the initial part before first_addr. */
2750 s_index -= (3 * ((first_addr >> 4) - (start_addr >> 4))
2751 + ((first_addr & 0x3) - (start_addr & 0x3)));
2752
2753 /* Move to the beginning of the next frag. */
2754 first_frag = first_frag->fr_next;
2755 first_addr = (unsigned long) &first_frag->fr_literal;
2756
2757 /* This can happen if there is section switching in the middle of a
2758 function, causing the frag chain for the function to be broken.
2759 It is too difficult to recover safely from this problem, so we just
2760 exit with an error. */
2761 if (first_frag == NULL)
2762 as_fatal (_("Section switching in code is not supported."));
2763 }
2764
2765 /* Add in the used part of the last frag. */
2766 s_index += (3 * ((slot_addr >> 4) - (first_addr >> 4))
2767 + ((slot_addr & 0x3) - (first_addr & 0x3)));
2768 return s_index;
2769 }
2770
2771 /* Optimize unwind record directives. */
2772
2773 static unw_rec_list *
2774 optimize_unw_records (unw_rec_list *list)
2775 {
2776 if (!list)
2777 return NULL;
2778
2779 /* If the only unwind record is ".prologue" or ".prologue" followed
2780 by ".body", then we can optimize the unwind directives away. */
2781 if (list->r.type == prologue
2782 && (list->next->r.type == endp
2783 || (list->next->r.type == body && list->next->next->r.type == endp)))
2784 return NULL;
2785
2786 return list;
2787 }
2788
2789 /* Given a complete record list, process any records which have
2790 unresolved fields, (ie length counts for a prologue). After
2791 this has been run, all necessary information should be available
2792 within each record to generate an image. */
2793
2794 static void
2795 fixup_unw_records (unw_rec_list *list, int before_relax)
2796 {
2797 unw_rec_list *ptr, *region = 0;
2798 unsigned long first_addr = 0, rlen = 0, t;
2799 fragS *first_frag = 0;
2800
2801 for (ptr = list; ptr; ptr = ptr->next)
2802 {
2803 if (ptr->slot_number == SLOT_NUM_NOT_SET)
2804 as_bad (_("Insn slot not set in unwind record."));
2805 t = slot_index (ptr->slot_number, ptr->slot_frag,
2806 first_addr, first_frag, before_relax);
2807 switch (ptr->r.type)
2808 {
2809 case prologue:
2810 case prologue_gr:
2811 case body:
2812 {
2813 unw_rec_list *last;
2814 int size;
2815 unsigned long last_addr = 0;
2816 fragS *last_frag = NULL;
2817
2818 first_addr = ptr->slot_number;
2819 first_frag = ptr->slot_frag;
2820 /* Find either the next body/prologue start, or the end of
2821 the function, and determine the size of the region. */
2822 for (last = ptr->next; last != NULL; last = last->next)
2823 if (last->r.type == prologue || last->r.type == prologue_gr
2824 || last->r.type == body || last->r.type == endp)
2825 {
2826 last_addr = last->slot_number;
2827 last_frag = last->slot_frag;
2828 break;
2829 }
2830 size = slot_index (last_addr, last_frag, first_addr, first_frag,
2831 before_relax);
2832 rlen = ptr->r.record.r.rlen = size;
2833 if (ptr->r.type == body)
2834 /* End of region. */
2835 region = 0;
2836 else
2837 region = ptr;
2838 break;
2839 }
2840 case epilogue:
2841 if (t < rlen)
2842 ptr->r.record.b.t = rlen - 1 - t;
2843 else
2844 /* This happens when a memory-stack-less procedure uses a
2845 ".restore sp" directive at the end of a region to pop
2846 the frame state. */
2847 ptr->r.record.b.t = 0;
2848 break;
2849
2850 case mem_stack_f:
2851 case mem_stack_v:
2852 case rp_when:
2853 case pfs_when:
2854 case preds_when:
2855 case unat_when:
2856 case lc_when:
2857 case fpsr_when:
2858 case priunat_when_gr:
2859 case priunat_when_mem:
2860 case bsp_when:
2861 case bspstore_when:
2862 case rnat_when:
2863 ptr->r.record.p.t = t;
2864 break;
2865
2866 case spill_reg:
2867 case spill_sprel:
2868 case spill_psprel:
2869 case spill_reg_p:
2870 case spill_sprel_p:
2871 case spill_psprel_p:
2872 ptr->r.record.x.t = t;
2873 break;
2874
2875 case frgr_mem:
2876 if (!region)
2877 {
2878 as_bad (_("frgr_mem record before region record!"));
2879 return;
2880 }
2881 region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask;
2882 region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask;
2883 set_imask (region, ptr->r.record.p.frmask, t, 1);
2884 set_imask (region, ptr->r.record.p.grmask, t, 2);
2885 break;
2886 case fr_mem:
2887 if (!region)
2888 {
2889 as_bad (_("fr_mem record before region record!"));
2890 return;
2891 }
2892 region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask;
2893 set_imask (region, ptr->r.record.p.frmask, t, 1);
2894 break;
2895 case gr_mem:
2896 if (!region)
2897 {
2898 as_bad (_("gr_mem record before region record!"));
2899 return;
2900 }
2901 region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask;
2902 set_imask (region, ptr->r.record.p.grmask, t, 2);
2903 break;
2904 case br_mem:
2905 if (!region)
2906 {
2907 as_bad (_("br_mem record before region record!"));
2908 return;
2909 }
2910 region->r.record.r.mask.br_mem |= ptr->r.record.p.brmask;
2911 set_imask (region, ptr->r.record.p.brmask, t, 3);
2912 break;
2913
2914 case gr_gr:
2915 if (!region)
2916 {
2917 as_bad (_("gr_gr record before region record!"));
2918 return;
2919 }
2920 set_imask (region, ptr->r.record.p.grmask, t, 2);
2921 break;
2922 case br_gr:
2923 if (!region)
2924 {
2925 as_bad (_("br_gr record before region record!"));
2926 return;
2927 }
2928 set_imask (region, ptr->r.record.p.brmask, t, 3);
2929 break;
2930
2931 default:
2932 break;
2933 }
2934 }
2935 }
2936
2937 /* Estimate the size of a frag before relaxing. We only have one type of frag
2938 to handle here, which is the unwind info frag. */
2939
2940 int
2941 ia64_estimate_size_before_relax (fragS *frag,
2942 asection *segtype ATTRIBUTE_UNUSED)
2943 {
2944 unw_rec_list *list;
2945 int len, size, pad;
2946
2947 /* ??? This code is identical to the first part of ia64_convert_frag. */
2948 list = (unw_rec_list *) frag->fr_opcode;
2949 fixup_unw_records (list, 0);
2950
2951 len = calc_record_size (list);
2952 /* pad to pointer-size boundary. */
2953 pad = len % md.pointer_size;
2954 if (pad != 0)
2955 len += md.pointer_size - pad;
2956 /* Add 8 for the header. */
2957 size = len + 8;
2958 /* Add a pointer for the personality offset. */
2959 if (frag->fr_offset)
2960 size += md.pointer_size;
2961
2962 /* fr_var carries the max_chars that we created the fragment with.
2963 We must, of course, have allocated enough memory earlier. */
2964 gas_assert (frag->fr_var >= size);
2965
2966 return frag->fr_fix + size;
2967 }
2968
2969 /* This function converts a rs_machine_dependent variant frag into a
2970 normal fill frag with the unwind image from the record list. */
2971 void
2972 ia64_convert_frag (fragS *frag)
2973 {
2974 unw_rec_list *list;
2975 int len, size, pad;
2976 valueT flag_value;
2977
2978 /* ??? This code is identical to ia64_estimate_size_before_relax. */
2979 list = (unw_rec_list *) frag->fr_opcode;
2980 fixup_unw_records (list, 0);
2981
2982 len = calc_record_size (list);
2983 /* pad to pointer-size boundary. */
2984 pad = len % md.pointer_size;
2985 if (pad != 0)
2986 len += md.pointer_size - pad;
2987 /* Add 8 for the header. */
2988 size = len + 8;
2989 /* Add a pointer for the personality offset. */
2990 if (frag->fr_offset)
2991 size += md.pointer_size;
2992
2993 /* fr_var carries the max_chars that we created the fragment with.
2994 We must, of course, have allocated enough memory earlier. */
2995 gas_assert (frag->fr_var >= size);
2996
2997 /* Initialize the header area. fr_offset is initialized with
2998 unwind.personality_routine. */
2999 if (frag->fr_offset)
3000 {
3001 if (md.flags & EF_IA_64_ABI64)
3002 flag_value = (bfd_vma) 3 << 32;
3003 else
3004 /* 32-bit unwind info block. */
3005 flag_value = (bfd_vma) 0x1003 << 32;
3006 }
3007 else
3008 flag_value = 0;
3009
3010 md_number_to_chars (frag->fr_literal,
3011 (((bfd_vma) 1 << 48) /* Version. */
3012 | flag_value /* U & E handler flags. */
3013 | (len / md.pointer_size)), /* Length. */
3014 8);
3015
3016 /* Skip the header. */
3017 vbyte_mem_ptr = frag->fr_literal + 8;
3018 process_unw_records (list, output_vbyte_mem);
3019
3020 /* Fill the padding bytes with zeros. */
3021 if (pad != 0)
3022 md_number_to_chars (frag->fr_literal + len + 8 - md.pointer_size + pad, 0,
3023 md.pointer_size - pad);
3024 /* Fill the unwind personality with zeros. */
3025 if (frag->fr_offset)
3026 md_number_to_chars (frag->fr_literal + size - md.pointer_size, 0,
3027 md.pointer_size);
3028
3029 frag->fr_fix += size;
3030 frag->fr_type = rs_fill;
3031 frag->fr_var = 0;
3032 frag->fr_offset = 0;
3033 }
3034
3035 static int
3036 parse_predicate_and_operand (expressionS *e, unsigned *qp, const char *po)
3037 {
3038 int sep = parse_operand_and_eval (e, ',');
3039
3040 *qp = e->X_add_number - REG_P;
3041 if (e->X_op != O_register || *qp > 63)
3042 {
3043 as_bad (_("First operand to .%s must be a predicate"), po);
3044 *qp = 0;
3045 }
3046 else if (*qp == 0)
3047 as_warn (_("Pointless use of p0 as first operand to .%s"), po);
3048 if (sep == ',')
3049 sep = parse_operand_and_eval (e, ',');
3050 else
3051 e->X_op = O_absent;
3052 return sep;
3053 }
3054
3055 static void
3056 convert_expr_to_ab_reg (const expressionS *e,
3057 unsigned int *ab,
3058 unsigned int *regp,
3059 const char *po,
3060 int n)
3061 {
3062 unsigned int reg = e->X_add_number;
3063
3064 *ab = *regp = 0; /* Anything valid is good here. */
3065
3066 if (e->X_op != O_register)
3067 reg = REG_GR; /* Anything invalid is good here. */
3068
3069 if (reg >= (REG_GR + 4) && reg <= (REG_GR + 7))
3070 {
3071 *ab = 0;
3072 *regp = reg - REG_GR;
3073 }
3074 else if ((reg >= (REG_FR + 2) && reg <= (REG_FR + 5))
3075 || (reg >= (REG_FR + 16) && reg <= (REG_FR + 31)))
3076 {
3077 *ab = 1;
3078 *regp = reg - REG_FR;
3079 }
3080 else if (reg >= (REG_BR + 1) && reg <= (REG_BR + 5))
3081 {
3082 *ab = 2;
3083 *regp = reg - REG_BR;
3084 }
3085 else
3086 {
3087 *ab = 3;
3088 switch (reg)
3089 {
3090 case REG_PR: *regp = 0; break;
3091 case REG_PSP: *regp = 1; break;
3092 case REG_PRIUNAT: *regp = 2; break;
3093 case REG_BR + 0: *regp = 3; break;
3094 case REG_AR + AR_BSP: *regp = 4; break;
3095 case REG_AR + AR_BSPSTORE: *regp = 5; break;
3096 case REG_AR + AR_RNAT: *regp = 6; break;
3097 case REG_AR + AR_UNAT: *regp = 7; break;
3098 case REG_AR + AR_FPSR: *regp = 8; break;
3099 case REG_AR + AR_PFS: *regp = 9; break;
3100 case REG_AR + AR_LC: *regp = 10; break;
3101
3102 default:
3103 as_bad (_("Operand %d to .%s must be a preserved register"), n, po);
3104 break;
3105 }
3106 }
3107 }
3108
3109 static void
3110 convert_expr_to_xy_reg (const expressionS *e,
3111 unsigned int *xy,
3112 unsigned int *regp,
3113 const char *po,
3114 int n)
3115 {
3116 unsigned int reg = e->X_add_number;
3117
3118 *xy = *regp = 0; /* Anything valid is good here. */
3119
3120 if (e->X_op != O_register)
3121 reg = REG_GR; /* Anything invalid is good here. */
3122
3123 if (reg >= (REG_GR + 1) && reg <= (REG_GR + 127))
3124 {
3125 *xy = 0;
3126 *regp = reg - REG_GR;
3127 }
3128 else if (reg >= (REG_FR + 2) && reg <= (REG_FR + 127))
3129 {
3130 *xy = 1;
3131 *regp = reg - REG_FR;
3132 }
3133 else if (reg >= REG_BR && reg <= (REG_BR + 7))
3134 {
3135 *xy = 2;
3136 *regp = reg - REG_BR;
3137 }
3138 else
3139 as_bad (_("Operand %d to .%s must be a writable register"), n, po);
3140 }
3141
3142 static void
3143 dot_align (int arg)
3144 {
3145 /* The current frag is an alignment frag. */
3146 align_frag = frag_now;
3147 s_align_bytes (arg);
3148 }
3149
3150 static void
3151 dot_radix (int dummy ATTRIBUTE_UNUSED)
3152 {
3153 char *radix;
3154 int ch;
3155
3156 SKIP_WHITESPACE ();
3157
3158 if (is_it_end_of_statement ())
3159 return;
3160 ch = get_symbol_name (&radix);
3161 ia64_canonicalize_symbol_name (radix);
3162 if (strcasecmp (radix, "C"))
3163 as_bad (_("Radix `%s' unsupported or invalid"), radix);
3164 (void) restore_line_pointer (ch);
3165 demand_empty_rest_of_line ();
3166 }
3167
3168 /* Helper function for .loc directives. If the assembler is not generating
3169 line number info, then we need to remember which instructions have a .loc
3170 directive, and only call dwarf2_gen_line_info for those instructions. */
3171
3172 static void
3173 dot_loc (int x)
3174 {
3175 CURR_SLOT.loc_directive_seen = 1;
3176 dwarf2_directive_loc (x);
3177 }
3178
3179 /* .sbss, .bss etc. are macros that expand into ".section SECNAME". */
3180 static void
3181 dot_special_section (int which)
3182 {
3183 set_section ((char *) special_section_name[which]);
3184 }
3185
3186 /* Return -1 for warning and 0 for error. */
3187
3188 static int
3189 unwind_diagnostic (const char * region, const char *directive)
3190 {
3191 if (md.unwind_check == unwind_check_warning)
3192 {
3193 as_warn (_(".%s outside of %s"), directive, region);
3194 return -1;
3195 }
3196 else
3197 {
3198 as_bad (_(".%s outside of %s"), directive, region);
3199 ignore_rest_of_line ();
3200 return 0;
3201 }
3202 }
3203
3204 /* Return 1 if a directive is in a procedure, -1 if a directive isn't in
3205 a procedure but the unwind directive check is set to warning, 0 if
3206 a directive isn't in a procedure and the unwind directive check is set
3207 to error. */
3208
3209 static int
3210 in_procedure (const char *directive)
3211 {
3212 if (unwind.proc_pending.sym
3213 && (!unwind.saved_text_seg || strcmp (directive, "endp") == 0))
3214 return 1;
3215 return unwind_diagnostic ("procedure", directive);
3216 }
3217
3218 /* Return 1 if a directive is in a prologue, -1 if a directive isn't in
3219 a prologue but the unwind directive check is set to warning, 0 if
3220 a directive isn't in a prologue and the unwind directive check is set
3221 to error. */
3222
3223 static int
3224 in_prologue (const char *directive)
3225 {
3226 int in = in_procedure (directive);
3227
3228 if (in > 0 && !unwind.prologue)
3229 in = unwind_diagnostic ("prologue", directive);
3230 check_pending_save ();
3231 return in;
3232 }
3233
3234 /* Return 1 if a directive is in a body, -1 if a directive isn't in
3235 a body but the unwind directive check is set to warning, 0 if
3236 a directive isn't in a body and the unwind directive check is set
3237 to error. */
3238
3239 static int
3240 in_body (const char *directive)
3241 {
3242 int in = in_procedure (directive);
3243
3244 if (in > 0 && !unwind.body)
3245 in = unwind_diagnostic ("body region", directive);
3246 return in;
3247 }
3248
3249 static void
3250 add_unwind_entry (unw_rec_list *ptr, int sep)
3251 {
3252 if (ptr)
3253 {
3254 if (unwind.tail)
3255 unwind.tail->next = ptr;
3256 else
3257 unwind.list = ptr;
3258 unwind.tail = ptr;
3259
3260 /* The current entry can in fact be a chain of unwind entries. */
3261 if (unwind.current_entry == NULL)
3262 unwind.current_entry = ptr;
3263 }
3264
3265 /* The current entry can in fact be a chain of unwind entries. */
3266 if (unwind.current_entry == NULL)
3267 unwind.current_entry = ptr;
3268
3269 if (sep == ',')
3270 {
3271 char *name;
3272 /* Parse a tag permitted for the current directive. */
3273 int ch;
3274
3275 SKIP_WHITESPACE ();
3276 ch = get_symbol_name (&name);
3277 /* FIXME: For now, just issue a warning that this isn't implemented. */
3278 {
3279 static int warned;
3280
3281 if (!warned)
3282 {
3283 warned = 1;
3284 as_warn (_("Tags on unwind pseudo-ops aren't supported, yet"));
3285 }
3286 }
3287 (void) restore_line_pointer (ch);
3288 }
3289 if (sep != NOT_A_CHAR)
3290 demand_empty_rest_of_line ();
3291 }
3292
3293 static void
3294 dot_fframe (int dummy ATTRIBUTE_UNUSED)
3295 {
3296 expressionS e;
3297 int sep;
3298
3299 if (!in_prologue ("fframe"))
3300 return;
3301
3302 sep = parse_operand_and_eval (&e, ',');
3303
3304 if (e.X_op != O_constant)
3305 {
3306 as_bad (_("First operand to .fframe must be a constant"));
3307 e.X_add_number = 0;
3308 }
3309 add_unwind_entry (output_mem_stack_f (e.X_add_number), sep);
3310 }
3311
3312 static void
3313 dot_vframe (int dummy ATTRIBUTE_UNUSED)
3314 {
3315 expressionS e;
3316 unsigned reg;
3317 int sep;
3318
3319 if (!in_prologue ("vframe"))
3320 return;
3321
3322 sep = parse_operand_and_eval (&e, ',');
3323 reg = e.X_add_number - REG_GR;
3324 if (e.X_op != O_register || reg > 127)
3325 {
3326 as_bad (_("First operand to .vframe must be a general register"));
3327 reg = 0;
3328 }
3329 add_unwind_entry (output_mem_stack_v (), sep);
3330 if (! (unwind.prologue_mask & 2))
3331 add_unwind_entry (output_psp_gr (reg), NOT_A_CHAR);
3332 else if (reg != unwind.prologue_gr
3333 + (unsigned) popcount (unwind.prologue_mask & -(2 << 1)))
3334 as_warn (_("Operand of .vframe contradicts .prologue"));
3335 }
3336
3337 static void
3338 dot_vframesp (int psp)
3339 {
3340 expressionS e;
3341 int sep;
3342
3343 if (psp)
3344 as_warn (_(".vframepsp is meaningless, assuming .vframesp was meant"));
3345
3346 if (!in_prologue ("vframesp"))
3347 return;
3348
3349 sep = parse_operand_and_eval (&e, ',');
3350 if (e.X_op != O_constant)
3351 {
3352 as_bad (_("Operand to .vframesp must be a constant (sp-relative offset)"));
3353 e.X_add_number = 0;
3354 }
3355 add_unwind_entry (output_mem_stack_v (), sep);
3356 add_unwind_entry (output_psp_sprel (e.X_add_number), NOT_A_CHAR);
3357 }
3358
3359 static void
3360 dot_save (int dummy ATTRIBUTE_UNUSED)
3361 {
3362 expressionS e1, e2;
3363 unsigned reg1, reg2;
3364 int sep;
3365
3366 if (!in_prologue ("save"))
3367 return;
3368
3369 sep = parse_operand_and_eval (&e1, ',');
3370 if (sep == ',')
3371 sep = parse_operand_and_eval (&e2, ',');
3372 else
3373 e2.X_op = O_absent;
3374
3375 reg1 = e1.X_add_number;
3376 /* Make sure it's a valid ar.xxx reg, OR its br0, aka 'rp'. */
3377 if (e1.X_op != O_register)
3378 {
3379 as_bad (_("First operand to .save not a register"));
3380 reg1 = REG_PR; /* Anything valid is good here. */
3381 }
3382 reg2 = e2.X_add_number - REG_GR;
3383 if (e2.X_op != O_register || reg2 > 127)
3384 {
3385 as_bad (_("Second operand to .save not a valid register"));
3386 reg2 = 0;
3387 }
3388 switch (reg1)
3389 {
3390 case REG_AR + AR_BSP:
3391 add_unwind_entry (output_bsp_when (), sep);
3392 add_unwind_entry (output_bsp_gr (reg2), NOT_A_CHAR);
3393 break;
3394 case REG_AR + AR_BSPSTORE:
3395 add_unwind_entry (output_bspstore_when (), sep);
3396 add_unwind_entry (output_bspstore_gr (reg2), NOT_A_CHAR);
3397 break;
3398 case REG_AR + AR_RNAT:
3399 add_unwind_entry (output_rnat_when (), sep);
3400 add_unwind_entry (output_rnat_gr (reg2), NOT_A_CHAR);
3401 break;
3402 case REG_AR + AR_UNAT:
3403 add_unwind_entry (output_unat_when (), sep);
3404 add_unwind_entry (output_unat_gr (reg2), NOT_A_CHAR);
3405 break;
3406 case REG_AR + AR_FPSR:
3407 add_unwind_entry (output_fpsr_when (), sep);
3408 add_unwind_entry (output_fpsr_gr (reg2), NOT_A_CHAR);
3409 break;
3410 case REG_AR + AR_PFS:
3411 add_unwind_entry (output_pfs_when (), sep);
3412 if (! (unwind.prologue_mask & 4))
3413 add_unwind_entry (output_pfs_gr (reg2), NOT_A_CHAR);
3414 else if (reg2 != unwind.prologue_gr
3415 + (unsigned) popcount (unwind.prologue_mask & -(4 << 1)))
3416 as_warn (_("Second operand of .save contradicts .prologue"));
3417 break;
3418 case REG_AR + AR_LC:
3419 add_unwind_entry (output_lc_when (), sep);
3420 add_unwind_entry (output_lc_gr (reg2), NOT_A_CHAR);
3421 break;
3422 case REG_BR:
3423 add_unwind_entry (output_rp_when (), sep);
3424 if (! (unwind.prologue_mask & 8))
3425 add_unwind_entry (output_rp_gr (reg2), NOT_A_CHAR);
3426 else if (reg2 != unwind.prologue_gr)
3427 as_warn (_("Second operand of .save contradicts .prologue"));
3428 break;
3429 case REG_PR:
3430 add_unwind_entry (output_preds_when (), sep);
3431 if (! (unwind.prologue_mask & 1))
3432 add_unwind_entry (output_preds_gr (reg2), NOT_A_CHAR);
3433 else if (reg2 != unwind.prologue_gr
3434 + (unsigned) popcount (unwind.prologue_mask & -(1 << 1)))
3435 as_warn (_("Second operand of .save contradicts .prologue"));
3436 break;
3437 case REG_PRIUNAT:
3438 add_unwind_entry (output_priunat_when_gr (), sep);
3439 add_unwind_entry (output_priunat_gr (reg2), NOT_A_CHAR);
3440 break;
3441 default:
3442 as_bad (_("First operand to .save not a valid register"));
3443 add_unwind_entry (NULL, sep);
3444 break;
3445 }
3446 }
3447
3448 static void
3449 dot_restore (int dummy ATTRIBUTE_UNUSED)
3450 {
3451 expressionS e1;
3452 unsigned long ecount; /* # of _additional_ regions to pop */
3453 int sep;
3454
3455 if (!in_body ("restore"))
3456 return;
3457
3458 sep = parse_operand_and_eval (&e1, ',');
3459 if (e1.X_op != O_register || e1.X_add_number != REG_GR + 12)
3460 as_bad (_("First operand to .restore must be stack pointer (sp)"));
3461
3462 if (sep == ',')
3463 {
3464 expressionS e2;
3465
3466 sep = parse_operand_and_eval (&e2, ',');
3467 if (e2.X_op != O_constant || e2.X_add_number < 0)
3468 {
3469 as_bad (_("Second operand to .restore must be a constant >= 0"));
3470 e2.X_add_number = 0;
3471 }
3472 ecount = e2.X_add_number;
3473 }
3474 else
3475 ecount = unwind.prologue_count - 1;
3476
3477 if (ecount >= unwind.prologue_count)
3478 {
3479 as_bad (_("Epilogue count of %lu exceeds number of nested prologues (%u)"),
3480 ecount + 1, unwind.prologue_count);
3481 ecount = 0;
3482 }
3483
3484 add_unwind_entry (output_epilogue (ecount), sep);
3485
3486 if (ecount < unwind.prologue_count)
3487 unwind.prologue_count -= ecount + 1;
3488 else
3489 unwind.prologue_count = 0;
3490 }
3491
3492 static void
3493 dot_restorereg (int pred)
3494 {
3495 unsigned int qp, ab, reg;
3496 expressionS e;
3497 int sep;
3498 const char * const po = pred ? "restorereg.p" : "restorereg";
3499
3500 if (!in_procedure (po))
3501 return;
3502
3503 if (pred)
3504 sep = parse_predicate_and_operand (&e, &qp, po);
3505 else
3506 {
3507 sep = parse_operand_and_eval (&e, ',');
3508 qp = 0;
3509 }
3510 convert_expr_to_ab_reg (&e, &ab, &reg, po, 1 + pred);
3511
3512 add_unwind_entry (output_spill_reg (ab, reg, 0, 0, qp), sep);
3513 }
3514
3515 static const char *special_linkonce_name[] =
3516 {
3517 ".gnu.linkonce.ia64unw.", ".gnu.linkonce.ia64unwi."
3518 };
3519
3520 static void
3521 start_unwind_section (const segT text_seg, int sec_index)
3522 {
3523 /*
3524 Use a slightly ugly scheme to derive the unwind section names from
3525 the text section name:
3526
3527 text sect. unwind table sect.
3528 name: name: comments:
3529 ---------- ----------------- --------------------------------
3530 .text .IA_64.unwind
3531 .text.foo .IA_64.unwind.text.foo
3532 .foo .IA_64.unwind.foo
3533 .gnu.linkonce.t.foo
3534 .gnu.linkonce.ia64unw.foo
3535 _info .IA_64.unwind_info gas issues error message (ditto)
3536 _infoFOO .IA_64.unwind_infoFOO gas issues error message (ditto)
3537
3538 This mapping is done so that:
3539
3540 (a) An object file with unwind info only in .text will use
3541 unwind section names .IA_64.unwind and .IA_64.unwind_info.
3542 This follows the letter of the ABI and also ensures backwards
3543 compatibility with older toolchains.
3544
3545 (b) An object file with unwind info in multiple text sections
3546 will use separate unwind sections for each text section.
3547 This allows us to properly set the "sh_info" and "sh_link"
3548 fields in SHT_IA_64_UNWIND as required by the ABI and also
3549 lets GNU ld support programs with multiple segments
3550 containing unwind info (as might be the case for certain
3551 embedded applications).
3552
3553 (c) An error is issued if there would be a name clash.
3554 */
3555
3556 const char *text_name, *sec_text_name;
3557 char *sec_name;
3558 const char *prefix = special_section_name [sec_index];
3559 const char *suffix;
3560
3561 sec_text_name = segment_name (text_seg);
3562 text_name = sec_text_name;
3563 if (strncmp (text_name, "_info", 5) == 0)
3564 {
3565 as_bad (_("Illegal section name `%s' (causes unwind section name clash)"),
3566 text_name);
3567 ignore_rest_of_line ();
3568 return;
3569 }
3570 if (strcmp (text_name, ".text") == 0)
3571 text_name = "";
3572
3573 /* Build the unwind section name by appending the (possibly stripped)
3574 text section name to the unwind prefix. */
3575 suffix = text_name;
3576 if (strncmp (text_name, ".gnu.linkonce.t.",
3577 sizeof (".gnu.linkonce.t.") - 1) == 0)
3578 {
3579 prefix = special_linkonce_name [sec_index - SPECIAL_SECTION_UNWIND];
3580 suffix += sizeof (".gnu.linkonce.t.") - 1;
3581 }
3582
3583 sec_name = concat (prefix, suffix, NULL);
3584
3585 /* Handle COMDAT group. */
3586 if ((text_seg->flags & SEC_LINK_ONCE) != 0
3587 && (elf_section_flags (text_seg) & SHF_GROUP) != 0)
3588 {
3589 char *section;
3590 const char *group_name = elf_group_name (text_seg);
3591
3592 if (group_name == NULL)
3593 {
3594 as_bad (_("Group section `%s' has no group signature"),
3595 sec_text_name);
3596 ignore_rest_of_line ();
3597 free (sec_name);
3598 return;
3599 }
3600
3601 /* We have to construct a fake section directive. */
3602 section = concat (sec_name, ",\"aG\",@progbits,", group_name, ",comdat", NULL);
3603 set_section (section);
3604 free (section);
3605 }
3606 else
3607 {
3608 set_section (sec_name);
3609 bfd_set_section_flags (now_seg, SEC_LOAD | SEC_ALLOC | SEC_READONLY);
3610 }
3611
3612 elf_linked_to_section (now_seg) = text_seg;
3613 free (sec_name);
3614 }
3615
3616 static void
3617 generate_unwind_image (const segT text_seg)
3618 {
3619 int size, pad;
3620 unw_rec_list *list;
3621
3622 /* Mark the end of the unwind info, so that we can compute the size of the
3623 last unwind region. */
3624 add_unwind_entry (output_endp (), NOT_A_CHAR);
3625
3626 /* Force out pending instructions, to make sure all unwind records have
3627 a valid slot_number field. */
3628 ia64_flush_insns ();
3629
3630 /* Generate the unwind record. */
3631 list = optimize_unw_records (unwind.list);
3632 fixup_unw_records (list, 1);
3633 size = calc_record_size (list);
3634
3635 if (size > 0 || unwind.force_unwind_entry)
3636 {
3637 unwind.force_unwind_entry = 0;
3638 /* pad to pointer-size boundary. */
3639 pad = size % md.pointer_size;
3640 if (pad != 0)
3641 size += md.pointer_size - pad;
3642 /* Add 8 for the header. */
3643 size += 8;
3644 /* Add a pointer for the personality offset. */
3645 if (unwind.personality_routine)
3646 size += md.pointer_size;
3647 }
3648
3649 /* If there are unwind records, switch sections, and output the info. */
3650 if (size != 0)
3651 {
3652 expressionS exp;
3653 bfd_reloc_code_real_type reloc;
3654
3655 start_unwind_section (text_seg, SPECIAL_SECTION_UNWIND_INFO);
3656
3657 /* Make sure the section has 4 byte alignment for ILP32 and
3658 8 byte alignment for LP64. */
3659 frag_align (md.pointer_size_shift, 0, 0);
3660 record_alignment (now_seg, md.pointer_size_shift);
3661
3662 /* Set expression which points to start of unwind descriptor area. */
3663 unwind.info = expr_build_dot ();
3664
3665 frag_var (rs_machine_dependent, size, size, 0, 0,
3666 (offsetT) (long) unwind.personality_routine,
3667 (char *) list);
3668
3669 /* Add the personality address to the image. */
3670 if (unwind.personality_routine != 0)
3671 {
3672 exp.X_op = O_symbol;
3673 exp.X_add_symbol = unwind.personality_routine;
3674 exp.X_add_number = 0;
3675
3676 if (md.flags & EF_IA_64_BE)
3677 {
3678 if (md.flags & EF_IA_64_ABI64)
3679 reloc = BFD_RELOC_IA64_LTOFF_FPTR64MSB;
3680 else
3681 reloc = BFD_RELOC_IA64_LTOFF_FPTR32MSB;
3682 }
3683 else
3684 {
3685 if (md.flags & EF_IA_64_ABI64)
3686 reloc = BFD_RELOC_IA64_LTOFF_FPTR64LSB;
3687 else
3688 reloc = BFD_RELOC_IA64_LTOFF_FPTR32LSB;
3689 }
3690
3691 fix_new_exp (frag_now, frag_now_fix () - md.pointer_size,
3692 md.pointer_size, &exp, 0, reloc);
3693 unwind.personality_routine = 0;
3694 }
3695 }
3696
3697 free_saved_prologue_counts ();
3698 unwind.list = unwind.tail = unwind.current_entry = NULL;
3699 }
3700
3701 static void
3702 dot_handlerdata (int dummy ATTRIBUTE_UNUSED)
3703 {
3704 if (!in_procedure ("handlerdata"))
3705 return;
3706 unwind.force_unwind_entry = 1;
3707
3708 /* Remember which segment we're in so we can switch back after .endp */
3709 unwind.saved_text_seg = now_seg;
3710 unwind.saved_text_subseg = now_subseg;
3711
3712 /* Generate unwind info into unwind-info section and then leave that
3713 section as the currently active one so dataXX directives go into
3714 the language specific data area of the unwind info block. */
3715 generate_unwind_image (now_seg);
3716 demand_empty_rest_of_line ();
3717 }
3718
3719 static void
3720 dot_unwentry (int dummy ATTRIBUTE_UNUSED)
3721 {
3722 if (!in_procedure ("unwentry"))
3723 return;
3724 unwind.force_unwind_entry = 1;
3725 demand_empty_rest_of_line ();
3726 }
3727
3728 static void
3729 dot_altrp (int dummy ATTRIBUTE_UNUSED)
3730 {
3731 expressionS e;
3732 unsigned reg;
3733
3734 if (!in_prologue ("altrp"))
3735 return;
3736
3737 parse_operand_and_eval (&e, 0);
3738 reg = e.X_add_number - REG_BR;
3739 if (e.X_op != O_register || reg > 7)
3740 {
3741 as_bad (_("First operand to .altrp not a valid branch register"));
3742 reg = 0;
3743 }
3744 add_unwind_entry (output_rp_br (reg), 0);
3745 }
3746
3747 static void
3748 dot_savemem (int psprel)
3749 {
3750 expressionS e1, e2;
3751 int sep;
3752 int reg1, val;
3753 const char * const po = psprel ? "savepsp" : "savesp";
3754
3755 if (!in_prologue (po))
3756 return;
3757
3758 sep = parse_operand_and_eval (&e1, ',');
3759 if (sep == ',')
3760 sep = parse_operand_and_eval (&e2, ',');
3761 else
3762 e2.X_op = O_absent;
3763
3764 reg1 = e1.X_add_number;
3765 val = e2.X_add_number;
3766
3767 /* Make sure it's a valid ar.xxx reg, OR its br0, aka 'rp'. */
3768 if (e1.X_op != O_register)
3769 {
3770 as_bad (_("First operand to .%s not a register"), po);
3771 reg1 = REG_PR; /* Anything valid is good here. */
3772 }
3773 if (e2.X_op != O_constant)
3774 {
3775 as_bad (_("Second operand to .%s not a constant"), po);
3776 val = 0;
3777 }
3778
3779 switch (reg1)
3780 {
3781 case REG_AR + AR_BSP:
3782 add_unwind_entry (output_bsp_when (), sep);
3783 add_unwind_entry ((psprel
3784 ? output_bsp_psprel
3785 : output_bsp_sprel) (val), NOT_A_CHAR);
3786 break;
3787 case REG_AR + AR_BSPSTORE:
3788 add_unwind_entry (output_bspstore_when (), sep);
3789 add_unwind_entry ((psprel
3790 ? output_bspstore_psprel
3791 : output_bspstore_sprel) (val), NOT_A_CHAR);
3792 break;
3793 case REG_AR + AR_RNAT:
3794 add_unwind_entry (output_rnat_when (), sep);
3795 add_unwind_entry ((psprel
3796 ? output_rnat_psprel
3797 : output_rnat_sprel) (val), NOT_A_CHAR);
3798 break;
3799 case REG_AR + AR_UNAT:
3800 add_unwind_entry (output_unat_when (), sep);
3801 add_unwind_entry ((psprel
3802 ? output_unat_psprel
3803 : output_unat_sprel) (val), NOT_A_CHAR);
3804 break;
3805 case REG_AR + AR_FPSR:
3806 add_unwind_entry (output_fpsr_when (), sep);
3807 add_unwind_entry ((psprel
3808 ? output_fpsr_psprel
3809 : output_fpsr_sprel) (val), NOT_A_CHAR);
3810 break;
3811 case REG_AR + AR_PFS:
3812 add_unwind_entry (output_pfs_when (), sep);
3813 add_unwind_entry ((psprel
3814 ? output_pfs_psprel
3815 : output_pfs_sprel) (val), NOT_A_CHAR);
3816 break;
3817 case REG_AR + AR_LC:
3818 add_unwind_entry (output_lc_when (), sep);
3819 add_unwind_entry ((psprel
3820 ? output_lc_psprel
3821 : output_lc_sprel) (val), NOT_A_CHAR);
3822 break;
3823 case REG_BR:
3824 add_unwind_entry (output_rp_when (), sep);
3825 add_unwind_entry ((psprel
3826 ? output_rp_psprel
3827 : output_rp_sprel) (val), NOT_A_CHAR);
3828 break;
3829 case REG_PR:
3830 add_unwind_entry (output_preds_when (), sep);
3831 add_unwind_entry ((psprel
3832 ? output_preds_psprel
3833 : output_preds_sprel) (val), NOT_A_CHAR);
3834 break;
3835 case REG_PRIUNAT:
3836 add_unwind_entry (output_priunat_when_mem (), sep);
3837 add_unwind_entry ((psprel
3838 ? output_priunat_psprel
3839 : output_priunat_sprel) (val), NOT_A_CHAR);
3840 break;
3841 default:
3842 as_bad (_("First operand to .%s not a valid register"), po);
3843 add_unwind_entry (NULL, sep);
3844 break;
3845 }
3846 }
3847
3848 static void
3849 dot_saveg (int dummy ATTRIBUTE_UNUSED)
3850 {
3851 expressionS e;
3852 unsigned grmask;
3853 int sep;
3854
3855 if (!in_prologue ("save.g"))
3856 return;
3857
3858 sep = parse_operand_and_eval (&e, ',');
3859
3860 grmask = e.X_add_number;
3861 if (e.X_op != O_constant
3862 || e.X_add_number <= 0
3863 || e.X_add_number > 0xf)
3864 {
3865 as_bad (_("First operand to .save.g must be a positive 4-bit constant"));
3866 grmask = 0;
3867 }
3868
3869 if (sep == ',')
3870 {
3871 unsigned reg;
3872 int n = popcount (grmask);
3873
3874 parse_operand_and_eval (&e, 0);
3875 reg = e.X_add_number - REG_GR;
3876 if (e.X_op != O_register || reg > 127)
3877 {
3878 as_bad (_("Second operand to .save.g must be a general register"));
3879 reg = 0;
3880 }
3881 else if (reg > 128U - n)
3882 {
3883 as_bad (_("Second operand to .save.g must be the first of %d general registers"), n);
3884 reg = 0;
3885 }
3886 add_unwind_entry (output_gr_gr (grmask, reg), 0);
3887 }
3888 else
3889 add_unwind_entry (output_gr_mem (grmask), 0);
3890 }
3891
3892 static void
3893 dot_savef (int dummy ATTRIBUTE_UNUSED)
3894 {
3895 expressionS e;
3896
3897 if (!in_prologue ("save.f"))
3898 return;
3899
3900 parse_operand_and_eval (&e, 0);
3901
3902 if (e.X_op != O_constant
3903 || e.X_add_number <= 0
3904 || e.X_add_number > 0xfffff)
3905 {
3906 as_bad (_("Operand to .save.f must be a positive 20-bit constant"));
3907 e.X_add_number = 0;
3908 }
3909 add_unwind_entry (output_fr_mem (e.X_add_number), 0);
3910 }
3911
3912 static void
3913 dot_saveb (int dummy ATTRIBUTE_UNUSED)
3914 {
3915 expressionS e;
3916 unsigned brmask;
3917 int sep;
3918
3919 if (!in_prologue ("save.b"))
3920 return;
3921
3922 sep = parse_operand_and_eval (&e, ',');
3923
3924 brmask = e.X_add_number;
3925 if (e.X_op != O_constant
3926 || e.X_add_number <= 0
3927 || e.X_add_number > 0x1f)
3928 {
3929 as_bad (_("First operand to .save.b must be a positive 5-bit constant"));
3930 brmask = 0;
3931 }
3932
3933 if (sep == ',')
3934 {
3935 unsigned reg;
3936 int n = popcount (brmask);
3937
3938 parse_operand_and_eval (&e, 0);
3939 reg = e.X_add_number - REG_GR;
3940 if (e.X_op != O_register || reg > 127)
3941 {
3942 as_bad (_("Second operand to .save.b must be a general register"));
3943 reg = 0;
3944 }
3945 else if (reg > 128U - n)
3946 {
3947 as_bad (_("Second operand to .save.b must be the first of %d general registers"), n);
3948 reg = 0;
3949 }
3950 add_unwind_entry (output_br_gr (brmask, reg), 0);
3951 }
3952 else
3953 add_unwind_entry (output_br_mem (brmask), 0);
3954 }
3955
3956 static void
3957 dot_savegf (int dummy ATTRIBUTE_UNUSED)
3958 {
3959 expressionS e1, e2;
3960
3961 if (!in_prologue ("save.gf"))
3962 return;
3963
3964 if (parse_operand_and_eval (&e1, ',') == ',')
3965 parse_operand_and_eval (&e2, 0);
3966 else
3967 e2.X_op = O_absent;
3968
3969 if (e1.X_op != O_constant
3970 || e1.X_add_number < 0
3971 || e1.X_add_number > 0xf)
3972 {
3973 as_bad (_("First operand to .save.gf must be a non-negative 4-bit constant"));
3974 e1.X_op = O_absent;
3975 e1.X_add_number = 0;
3976 }
3977 if (e2.X_op != O_constant
3978 || e2.X_add_number < 0
3979 || e2.X_add_number > 0xfffff)
3980 {
3981 as_bad (_("Second operand to .save.gf must be a non-negative 20-bit constant"));
3982 e2.X_op = O_absent;
3983 e2.X_add_number = 0;
3984 }
3985 if (e1.X_op == O_constant
3986 && e2.X_op == O_constant
3987 && e1.X_add_number == 0
3988 && e2.X_add_number == 0)
3989 as_bad (_("Operands to .save.gf may not be both zero"));
3990
3991 add_unwind_entry (output_frgr_mem (e1.X_add_number, e2.X_add_number), 0);
3992 }
3993
3994 static void
3995 dot_spill (int dummy ATTRIBUTE_UNUSED)
3996 {
3997 expressionS e;
3998
3999 if (!in_prologue ("spill"))
4000 return;
4001
4002 parse_operand_and_eval (&e, 0);
4003
4004 if (e.X_op != O_constant)
4005 {
4006 as_bad (_("Operand to .spill must be a constant"));
4007 e.X_add_number = 0;
4008 }
4009 add_unwind_entry (output_spill_base (e.X_add_number), 0);
4010 }
4011
4012 static void
4013 dot_spillreg (int pred)
4014 {
4015 int sep;
4016 unsigned int qp, ab, xy, reg, treg;
4017 expressionS e;
4018 const char * const po = pred ? "spillreg.p" : "spillreg";
4019
4020 if (!in_procedure (po))
4021 return;
4022
4023 if (pred)
4024 sep = parse_predicate_and_operand (&e, &qp, po);
4025 else
4026 {
4027 sep = parse_operand_and_eval (&e, ',');
4028 qp = 0;
4029 }
4030 convert_expr_to_ab_reg (&e, &ab, &reg, po, 1 + pred);
4031
4032 if (sep == ',')
4033 sep = parse_operand_and_eval (&e, ',');
4034 else
4035 e.X_op = O_absent;
4036 convert_expr_to_xy_reg (&e, &xy, &treg, po, 2 + pred);
4037
4038 add_unwind_entry (output_spill_reg (ab, reg, treg, xy, qp), sep);
4039 }
4040
4041 static void
4042 dot_spillmem (int psprel)
4043 {
4044 expressionS e;
4045 int pred = (psprel < 0), sep;
4046 unsigned int qp, ab, reg;
4047 const char * po;
4048
4049 if (pred)
4050 {
4051 psprel = ~psprel;
4052 po = psprel ? "spillpsp.p" : "spillsp.p";
4053 }
4054 else
4055 po = psprel ? "spillpsp" : "spillsp";
4056
4057 if (!in_procedure (po))
4058 return;
4059
4060 if (pred)
4061 sep = parse_predicate_and_operand (&e, &qp, po);
4062 else
4063 {
4064 sep = parse_operand_and_eval (&e, ',');
4065 qp = 0;
4066 }
4067 convert_expr_to_ab_reg (&e, &ab, &reg, po, 1 + pred);
4068
4069 if (sep == ',')
4070 sep = parse_operand_and_eval (&e, ',');
4071 else
4072 e.X_op = O_absent;
4073 if (e.X_op != O_constant)
4074 {
4075 as_bad (_("Operand %d to .%s must be a constant"), 2 + pred, po);
4076 e.X_add_number = 0;
4077 }
4078
4079 if (psprel)
4080 add_unwind_entry (output_spill_psprel (ab, reg, e.X_add_number, qp), sep);
4081 else
4082 add_unwind_entry (output_spill_sprel (ab, reg, e.X_add_number, qp), sep);
4083 }
4084
4085 static unsigned int
4086 get_saved_prologue_count (unsigned long lbl)
4087 {
4088 label_prologue_count *lpc = unwind.saved_prologue_counts;
4089
4090 while (lpc != NULL && lpc->label_number != lbl)
4091 lpc = lpc->next;
4092
4093 if (lpc != NULL)
4094 return lpc->prologue_count;
4095
4096 as_bad (_("Missing .label_state %ld"), lbl);
4097 return 1;
4098 }
4099
4100 static void
4101 save_prologue_count (unsigned long lbl, unsigned int count)
4102 {
4103 label_prologue_count *lpc = unwind.saved_prologue_counts;
4104
4105 while (lpc != NULL && lpc->label_number != lbl)
4106 lpc = lpc->next;
4107
4108 if (lpc != NULL)
4109 lpc->prologue_count = count;
4110 else
4111 {
4112 label_prologue_count *new_lpc = XNEW (label_prologue_count);
4113
4114 new_lpc->next = unwind.saved_prologue_counts;
4115 new_lpc->label_number = lbl;
4116 new_lpc->prologue_count = count;
4117 unwind.saved_prologue_counts = new_lpc;
4118 }
4119 }
4120
4121 static void
4122 free_saved_prologue_counts (void)
4123 {
4124 label_prologue_count *lpc = unwind.saved_prologue_counts;
4125 label_prologue_count *next;
4126
4127 while (lpc != NULL)
4128 {
4129 next = lpc->next;
4130 free (lpc);
4131 lpc = next;
4132 }
4133
4134 unwind.saved_prologue_counts = NULL;
4135 }
4136
4137 static void
4138 dot_label_state (int dummy ATTRIBUTE_UNUSED)
4139 {
4140 expressionS e;
4141
4142 if (!in_body ("label_state"))
4143 return;
4144
4145 parse_operand_and_eval (&e, 0);
4146 if (e.X_op == O_constant)
4147 save_prologue_count (e.X_add_number, unwind.prologue_count);
4148 else
4149 {
4150 as_bad (_("Operand to .label_state must be a constant"));
4151 e.X_add_number = 0;
4152 }
4153 add_unwind_entry (output_label_state (e.X_add_number), 0);
4154 }
4155
4156 static void
4157 dot_copy_state (int dummy ATTRIBUTE_UNUSED)
4158 {
4159 expressionS e;
4160
4161 if (!in_body ("copy_state"))
4162 return;
4163
4164 parse_operand_and_eval (&e, 0);
4165 if (e.X_op == O_constant)
4166 unwind.prologue_count = get_saved_prologue_count (e.X_add_number);
4167 else
4168 {
4169 as_bad (_("Operand to .copy_state must be a constant"));
4170 e.X_add_number = 0;
4171 }
4172 add_unwind_entry (output_copy_state (e.X_add_number), 0);
4173 }
4174
4175 static void
4176 dot_unwabi (int dummy ATTRIBUTE_UNUSED)
4177 {
4178 expressionS e1, e2;
4179 unsigned char sep;
4180
4181 if (!in_prologue ("unwabi"))
4182 return;
4183
4184 sep = parse_operand_and_eval (&e1, ',');
4185 if (sep == ',')
4186 parse_operand_and_eval (&e2, 0);
4187 else
4188 e2.X_op = O_absent;
4189
4190 if (e1.X_op != O_constant)
4191 {
4192 as_bad (_("First operand to .unwabi must be a constant"));
4193 e1.X_add_number = 0;
4194 }
4195
4196 if (e2.X_op != O_constant)
4197 {
4198 as_bad (_("Second operand to .unwabi must be a constant"));
4199 e2.X_add_number = 0;
4200 }
4201
4202 add_unwind_entry (output_unwabi (e1.X_add_number, e2.X_add_number), 0);
4203 }
4204
4205 static void
4206 dot_personality (int dummy ATTRIBUTE_UNUSED)
4207 {
4208 char *name, *p, c;
4209
4210 if (!in_procedure ("personality"))
4211 return;
4212 SKIP_WHITESPACE ();
4213 c = get_symbol_name (&name);
4214 p = input_line_pointer;
4215 unwind.personality_routine = symbol_find_or_make (name);
4216 unwind.force_unwind_entry = 1;
4217 *p = c;
4218 SKIP_WHITESPACE_AFTER_NAME ();
4219 demand_empty_rest_of_line ();
4220 }
4221
4222 static void
4223 dot_proc (int dummy ATTRIBUTE_UNUSED)
4224 {
4225 char *name, *p, c;
4226 symbolS *sym;
4227 proc_pending *pending, *last_pending;
4228
4229 if (unwind.proc_pending.sym)
4230 {
4231 (md.unwind_check == unwind_check_warning
4232 ? as_warn
4233 : as_bad) (_("Missing .endp after previous .proc"));
4234 while (unwind.proc_pending.next)
4235 {
4236 pending = unwind.proc_pending.next;
4237 unwind.proc_pending.next = pending->next;
4238 free (pending);
4239 }
4240 }
4241 last_pending = NULL;
4242
4243 /* Parse names of main and alternate entry points and mark them as
4244 function symbols: */
4245 while (1)
4246 {
4247 SKIP_WHITESPACE ();
4248 c = get_symbol_name (&name);
4249 p = input_line_pointer;
4250 if (!*name)
4251 as_bad (_("Empty argument of .proc"));
4252 else
4253 {
4254 sym = symbol_find_or_make (name);
4255 if (S_IS_DEFINED (sym))
4256 as_bad (_("`%s' was already defined"), name);
4257 else if (!last_pending)
4258 {
4259 unwind.proc_pending.sym = sym;
4260 last_pending = &unwind.proc_pending;
4261 }
4262 else
4263 {
4264 pending = XNEW (proc_pending);
4265 pending->sym = sym;
4266 last_pending = last_pending->next = pending;
4267 }
4268 symbol_get_bfdsym (sym)->flags |= BSF_FUNCTION;
4269 }
4270 *p = c;
4271 SKIP_WHITESPACE_AFTER_NAME ();
4272 if (*input_line_pointer != ',')
4273 break;
4274 ++input_line_pointer;
4275 }
4276 if (!last_pending)
4277 {
4278 unwind.proc_pending.sym = expr_build_dot ();
4279 last_pending = &unwind.proc_pending;
4280 }
4281 last_pending->next = NULL;
4282 demand_empty_rest_of_line ();
4283 do_align (4, NULL, 0, 0);
4284
4285 unwind.prologue = 0;
4286 unwind.prologue_count = 0;
4287 unwind.body = 0;
4288 unwind.insn = 0;
4289 unwind.list = unwind.tail = unwind.current_entry = NULL;
4290 unwind.personality_routine = 0;
4291 }
4292
4293 static void
4294 dot_body (int dummy ATTRIBUTE_UNUSED)
4295 {
4296 if (!in_procedure ("body"))
4297 return;
4298 if (!unwind.prologue && !unwind.body && unwind.insn)
4299 as_warn (_("Initial .body should precede any instructions"));
4300 check_pending_save ();
4301
4302 unwind.prologue = 0;
4303 unwind.prologue_mask = 0;
4304 unwind.body = 1;
4305
4306 add_unwind_entry (output_body (), 0);
4307 }
4308
4309 static void
4310 dot_prologue (int dummy ATTRIBUTE_UNUSED)
4311 {
4312 unsigned mask = 0, grsave = 0;
4313
4314 if (!in_procedure ("prologue"))
4315 return;
4316 if (unwind.prologue)
4317 {
4318 as_bad (_(".prologue within prologue"));
4319 ignore_rest_of_line ();
4320 return;
4321 }
4322 if (!unwind.body && unwind.insn)
4323 as_warn (_("Initial .prologue should precede any instructions"));
4324
4325 if (!is_it_end_of_statement ())
4326 {
4327 expressionS e;
4328 int n, sep = parse_operand_and_eval (&e, ',');
4329
4330 if (e.X_op != O_constant
4331 || e.X_add_number < 0
4332 || e.X_add_number > 0xf)
4333 as_bad (_("First operand to .prologue must be a positive 4-bit constant"));
4334 else if (e.X_add_number == 0)
4335 as_warn (_("Pointless use of zero first operand to .prologue"));
4336 else
4337 mask = e.X_add_number;
4338
4339 n = popcount (mask);
4340
4341 if (sep == ',')
4342 parse_operand_and_eval (&e, 0);
4343 else
4344 e.X_op = O_absent;
4345
4346 if (e.X_op == O_constant
4347 && e.X_add_number >= 0
4348 && e.X_add_number < 128)
4349 {
4350 if (md.unwind_check == unwind_check_error)
4351 as_warn (_("Using a constant as second operand to .prologue is deprecated"));
4352 grsave = e.X_add_number;
4353 }
4354 else if (e.X_op != O_register
4355 || (grsave = e.X_add_number - REG_GR) > 127)
4356 {
4357 as_bad (_("Second operand to .prologue must be a general register"));
4358 grsave = 0;
4359 }
4360 else if (grsave > 128U - n)
4361 {
4362 as_bad (_("Second operand to .prologue must be the first of %d general registers"), n);
4363 grsave = 0;
4364 }
4365 }
4366
4367 if (mask)
4368 add_unwind_entry (output_prologue_gr (mask, grsave), 0);
4369 else
4370 add_unwind_entry (output_prologue (), 0);
4371
4372 unwind.prologue = 1;
4373 unwind.prologue_mask = mask;
4374 unwind.prologue_gr = grsave;
4375 unwind.body = 0;
4376 ++unwind.prologue_count;
4377 }
4378
4379 static void
4380 dot_endp (int dummy ATTRIBUTE_UNUSED)
4381 {
4382 expressionS e;
4383 int bytes_per_address;
4384 long where;
4385 segT saved_seg;
4386 subsegT saved_subseg;
4387 proc_pending *pending;
4388 int unwind_check = md.unwind_check;
4389
4390 md.unwind_check = unwind_check_error;
4391 if (!in_procedure ("endp"))
4392 return;
4393 md.unwind_check = unwind_check;
4394
4395 if (unwind.saved_text_seg)
4396 {
4397 saved_seg = unwind.saved_text_seg;
4398 saved_subseg = unwind.saved_text_subseg;
4399 unwind.saved_text_seg = NULL;
4400 }
4401 else
4402 {
4403 saved_seg = now_seg;
4404 saved_subseg = now_subseg;
4405 }
4406
4407 insn_group_break (1, 0, 0);
4408
4409 /* If there wasn't a .handlerdata, we haven't generated an image yet. */
4410 if (!unwind.info)
4411 generate_unwind_image (saved_seg);
4412
4413 if (unwind.info || unwind.force_unwind_entry)
4414 {
4415 symbolS *proc_end;
4416
4417 subseg_set (md.last_text_seg, 0);
4418 proc_end = expr_build_dot ();
4419
4420 start_unwind_section (saved_seg, SPECIAL_SECTION_UNWIND);
4421
4422 /* Make sure that section has 4 byte alignment for ILP32 and
4423 8 byte alignment for LP64. */
4424 record_alignment (now_seg, md.pointer_size_shift);
4425
4426 /* Need space for 3 pointers for procedure start, procedure end,
4427 and unwind info. */
4428 memset (frag_more (3 * md.pointer_size), 0, 3 * md.pointer_size);
4429 where = frag_now_fix () - (3 * md.pointer_size);
4430 bytes_per_address = bfd_arch_bits_per_address (stdoutput) / 8;
4431
4432 /* Issue the values of a) Proc Begin, b) Proc End, c) Unwind Record. */
4433 e.X_op = O_pseudo_fixup;
4434 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4435 e.X_add_number = 0;
4436 if (!S_IS_LOCAL (unwind.proc_pending.sym)
4437 && S_IS_DEFINED (unwind.proc_pending.sym))
4438 e.X_add_symbol = symbol_temp_new (S_GET_SEGMENT (unwind.proc_pending.sym),
4439 S_GET_VALUE (unwind.proc_pending.sym),
4440 symbol_get_frag (unwind.proc_pending.sym));
4441 else
4442 e.X_add_symbol = unwind.proc_pending.sym;
4443 ia64_cons_fix_new (frag_now, where, bytes_per_address, &e,
4444 BFD_RELOC_NONE);
4445
4446 e.X_op = O_pseudo_fixup;
4447 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4448 e.X_add_number = 0;
4449 e.X_add_symbol = proc_end;
4450 ia64_cons_fix_new (frag_now, where + bytes_per_address,
4451 bytes_per_address, &e, BFD_RELOC_NONE);
4452
4453 if (unwind.info)
4454 {
4455 e.X_op = O_pseudo_fixup;
4456 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4457 e.X_add_number = 0;
4458 e.X_add_symbol = unwind.info;
4459 ia64_cons_fix_new (frag_now, where + (bytes_per_address * 2),
4460 bytes_per_address, &e, BFD_RELOC_NONE);
4461 }
4462 }
4463 subseg_set (saved_seg, saved_subseg);
4464
4465 /* Set symbol sizes. */
4466 pending = &unwind.proc_pending;
4467 if (S_GET_NAME (pending->sym))
4468 {
4469 do
4470 {
4471 symbolS *sym = pending->sym;
4472
4473 if (!S_IS_DEFINED (sym))
4474 as_bad (_("`%s' was not defined within procedure"), S_GET_NAME (sym));
4475 else if (S_GET_SIZE (sym) == 0
4476 && symbol_get_obj (sym)->size == NULL)
4477 {
4478 fragS *frag = symbol_get_frag (sym);
4479
4480 if (frag)
4481 {
4482 if (frag == frag_now && SEG_NORMAL (now_seg))
4483 S_SET_SIZE (sym, frag_now_fix () - S_GET_VALUE (sym));
4484 else
4485 {
4486 symbol_get_obj (sym)->size = XNEW (expressionS);
4487 symbol_get_obj (sym)->size->X_op = O_subtract;
4488 symbol_get_obj (sym)->size->X_add_symbol
4489 = symbol_new (FAKE_LABEL_NAME, now_seg,
4490 frag_now_fix (), frag_now);
4491 symbol_get_obj (sym)->size->X_op_symbol = sym;
4492 symbol_get_obj (sym)->size->X_add_number = 0;
4493 }
4494 }
4495 }
4496 } while ((pending = pending->next) != NULL);
4497 }
4498
4499 /* Parse names of main and alternate entry points. */
4500 while (1)
4501 {
4502 char *name, *p, c;
4503
4504 SKIP_WHITESPACE ();
4505 c = get_symbol_name (&name);
4506 p = input_line_pointer;
4507 if (!*name)
4508 (md.unwind_check == unwind_check_warning
4509 ? as_warn
4510 : as_bad) (_("Empty argument of .endp"));
4511 else
4512 {
4513 symbolS *sym = symbol_find (name);
4514
4515 for (pending = &unwind.proc_pending; pending; pending = pending->next)
4516 {
4517 if (sym == pending->sym)
4518 {
4519 pending->sym = NULL;
4520 break;
4521 }
4522 }
4523 if (!sym || !pending)
4524 as_warn (_("`%s' was not specified with previous .proc"), name);
4525 }
4526 *p = c;
4527 SKIP_WHITESPACE_AFTER_NAME ();
4528 if (*input_line_pointer != ',')
4529 break;
4530 ++input_line_pointer;
4531 }
4532 demand_empty_rest_of_line ();
4533
4534 /* Deliberately only checking for the main entry point here; the
4535 language spec even says all arguments to .endp are ignored. */
4536 if (unwind.proc_pending.sym
4537 && S_GET_NAME (unwind.proc_pending.sym)
4538 && strcmp (S_GET_NAME (unwind.proc_pending.sym), FAKE_LABEL_NAME))
4539 as_warn (_("`%s' should be an operand to this .endp"),
4540 S_GET_NAME (unwind.proc_pending.sym));
4541 while (unwind.proc_pending.next)
4542 {
4543 pending = unwind.proc_pending.next;
4544 unwind.proc_pending.next = pending->next;
4545 free (pending);
4546 }
4547 unwind.proc_pending.sym = unwind.info = NULL;
4548 }
4549
4550 static void
4551 dot_template (int template_val)
4552 {
4553 CURR_SLOT.user_template = template_val;
4554 }
4555
4556 static void
4557 dot_regstk (int dummy ATTRIBUTE_UNUSED)
4558 {
4559 int ins, locs, outs, rots;
4560
4561 if (is_it_end_of_statement ())
4562 ins = locs = outs = rots = 0;
4563 else
4564 {
4565 ins = get_absolute_expression ();
4566 if (*input_line_pointer++ != ',')
4567 goto err;
4568 locs = get_absolute_expression ();
4569 if (*input_line_pointer++ != ',')
4570 goto err;
4571 outs = get_absolute_expression ();
4572 if (*input_line_pointer++ != ',')
4573 goto err;
4574 rots = get_absolute_expression ();
4575 }
4576 set_regstack (ins, locs, outs, rots);
4577 return;
4578
4579 err:
4580 as_bad (_("Comma expected"));
4581 ignore_rest_of_line ();
4582 }
4583
4584 static void
4585 dot_rot (int type)
4586 {
4587 offsetT num_regs;
4588 valueT num_alloced = 0;
4589 struct dynreg **drpp, *dr;
4590 int ch, base_reg = 0;
4591 char *name, *start;
4592 size_t len;
4593
4594 switch (type)
4595 {
4596 case DYNREG_GR: base_reg = REG_GR + 32; break;
4597 case DYNREG_FR: base_reg = REG_FR + 32; break;
4598 case DYNREG_PR: base_reg = REG_P + 16; break;
4599 default: break;
4600 }
4601
4602 /* First, remove existing names from hash table. */
4603 for (dr = md.dynreg[type]; dr && dr->num_regs; dr = dr->next)
4604 {
4605 hash_delete (md.dynreg_hash, dr->name, FALSE);
4606 /* FIXME: Free dr->name. */
4607 dr->num_regs = 0;
4608 }
4609
4610 drpp = &md.dynreg[type];
4611 while (1)
4612 {
4613 ch = get_symbol_name (&start);
4614 len = strlen (ia64_canonicalize_symbol_name (start));
4615 *input_line_pointer = ch;
4616
4617 SKIP_WHITESPACE_AFTER_NAME ();
4618 if (*input_line_pointer != '[')
4619 {
4620 as_bad (_("Expected '['"));
4621 goto err;
4622 }
4623 ++input_line_pointer; /* skip '[' */
4624
4625 num_regs = get_absolute_expression ();
4626
4627 if (*input_line_pointer++ != ']')
4628 {
4629 as_bad (_("Expected ']'"));
4630 goto err;
4631 }
4632 if (num_regs <= 0)
4633 {
4634 as_bad (_("Number of elements must be positive"));
4635 goto err;
4636 }
4637 SKIP_WHITESPACE ();
4638
4639 num_alloced += num_regs;
4640 switch (type)
4641 {
4642 case DYNREG_GR:
4643 if (num_alloced > md.rot.num_regs)
4644 {
4645 as_bad (_("Used more than the declared %d rotating registers"),
4646 md.rot.num_regs);
4647 goto err;
4648 }
4649 break;
4650 case DYNREG_FR:
4651 if (num_alloced > 96)
4652 {
4653 as_bad (_("Used more than the available 96 rotating registers"));
4654 goto err;
4655 }
4656 break;
4657 case DYNREG_PR:
4658 if (num_alloced > 48)
4659 {
4660 as_bad (_("Used more than the available 48 rotating registers"));
4661 goto err;
4662 }
4663 break;
4664
4665 default:
4666 break;
4667 }
4668
4669 if (!*drpp)
4670 {
4671 *drpp = XOBNEW (&notes, struct dynreg);
4672 memset (*drpp, 0, sizeof (*dr));
4673 }
4674
4675 name = XOBNEWVEC (&notes, char, len + 1);
4676 memcpy (name, start, len);
4677 name[len] = '\0';
4678
4679 dr = *drpp;
4680 dr->name = name;
4681 dr->num_regs = num_regs;
4682 dr->base = base_reg;
4683 drpp = &dr->next;
4684 base_reg += num_regs;
4685
4686 if (hash_insert (md.dynreg_hash, name, dr))
4687 {
4688 as_bad (_("Attempt to redefine register set `%s'"), name);
4689 obstack_free (&notes, name);
4690 goto err;
4691 }
4692
4693 if (*input_line_pointer != ',')
4694 break;
4695 ++input_line_pointer; /* skip comma */
4696 SKIP_WHITESPACE ();
4697 }
4698 demand_empty_rest_of_line ();
4699 return;
4700
4701 err:
4702 ignore_rest_of_line ();
4703 }
4704
4705 static void
4706 dot_byteorder (int byteorder)
4707 {
4708 segment_info_type *seginfo = seg_info (now_seg);
4709
4710 if (byteorder == -1)
4711 {
4712 if (seginfo->tc_segment_info_data.endian == 0)
4713 seginfo->tc_segment_info_data.endian = default_big_endian ? 1 : 2;
4714 byteorder = seginfo->tc_segment_info_data.endian == 1;
4715 }
4716 else
4717 seginfo->tc_segment_info_data.endian = byteorder ? 1 : 2;
4718
4719 if (target_big_endian != byteorder)
4720 {
4721 target_big_endian = byteorder;
4722 if (target_big_endian)
4723 {
4724 ia64_number_to_chars = number_to_chars_bigendian;
4725 ia64_float_to_chars = ia64_float_to_chars_bigendian;
4726 }
4727 else
4728 {
4729 ia64_number_to_chars = number_to_chars_littleendian;
4730 ia64_float_to_chars = ia64_float_to_chars_littleendian;
4731 }
4732 }
4733 }
4734
4735 static void
4736 dot_psr (int dummy ATTRIBUTE_UNUSED)
4737 {
4738 char *option;
4739 int ch;
4740
4741 while (1)
4742 {
4743 ch = get_symbol_name (&option);
4744 if (strcmp (option, "lsb") == 0)
4745 md.flags &= ~EF_IA_64_BE;
4746 else if (strcmp (option, "msb") == 0)
4747 md.flags |= EF_IA_64_BE;
4748 else if (strcmp (option, "abi32") == 0)
4749 md.flags &= ~EF_IA_64_ABI64;
4750 else if (strcmp (option, "abi64") == 0)
4751 md.flags |= EF_IA_64_ABI64;
4752 else
4753 as_bad (_("Unknown psr option `%s'"), option);
4754 *input_line_pointer = ch;
4755
4756 SKIP_WHITESPACE_AFTER_NAME ();
4757 if (*input_line_pointer != ',')
4758 break;
4759
4760 ++input_line_pointer;
4761 SKIP_WHITESPACE ();
4762 }
4763 demand_empty_rest_of_line ();
4764 }
4765
4766 static void
4767 dot_ln (int dummy ATTRIBUTE_UNUSED)
4768 {
4769 new_logical_line (0, get_absolute_expression ());
4770 demand_empty_rest_of_line ();
4771 }
4772
4773 static void
4774 cross_section (int ref, void (*builder) (int), int ua)
4775 {
4776 char *start, *end;
4777 int saved_auto_align;
4778 unsigned int section_count;
4779 char *name;
4780 char c;
4781
4782 SKIP_WHITESPACE ();
4783 start = input_line_pointer;
4784 c = get_symbol_name (&name);
4785 if (input_line_pointer == start)
4786 {
4787 as_bad (_("Missing section name"));
4788 ignore_rest_of_line ();
4789 return;
4790 }
4791 * input_line_pointer = c;
4792 SKIP_WHITESPACE_AFTER_NAME ();
4793 end = input_line_pointer;
4794 if (*input_line_pointer != ',')
4795 {
4796 as_bad (_("Comma expected after section name"));
4797 ignore_rest_of_line ();
4798 return;
4799 }
4800 *end = '\0';
4801 end = input_line_pointer + 1; /* skip comma */
4802 input_line_pointer = start;
4803 md.keep_pending_output = 1;
4804 section_count = bfd_count_sections (stdoutput);
4805 obj_elf_section (0);
4806 if (section_count != bfd_count_sections (stdoutput))
4807 as_warn (_("Creating sections with .xdataN/.xrealN/.xstringZ is deprecated."));
4808 input_line_pointer = end;
4809 saved_auto_align = md.auto_align;
4810 if (ua)
4811 md.auto_align = 0;
4812 (*builder) (ref);
4813 if (ua)
4814 md.auto_align = saved_auto_align;
4815 obj_elf_previous (0);
4816 md.keep_pending_output = 0;
4817 }
4818
4819 static void
4820 dot_xdata (int size)
4821 {
4822 cross_section (size, cons, 0);
4823 }
4824
4825 /* Why doesn't float_cons() call md_cons_align() the way cons() does? */
4826
4827 static void
4828 stmt_float_cons (int kind)
4829 {
4830 size_t alignment;
4831
4832 switch (kind)
4833 {
4834 case 'd':
4835 alignment = 3;
4836 break;
4837
4838 case 'x':
4839 case 'X':
4840 alignment = 4;
4841 break;
4842
4843 case 'f':
4844 default:
4845 alignment = 2;
4846 break;
4847 }
4848 do_align (alignment, NULL, 0, 0);
4849 float_cons (kind);
4850 }
4851
4852 static void
4853 stmt_cons_ua (int size)
4854 {
4855 int saved_auto_align = md.auto_align;
4856
4857 md.auto_align = 0;
4858 cons (size);
4859 md.auto_align = saved_auto_align;
4860 }
4861
4862 static void
4863 dot_xfloat_cons (int kind)
4864 {
4865 cross_section (kind, stmt_float_cons, 0);
4866 }
4867
4868 static void
4869 dot_xstringer (int zero)
4870 {
4871 cross_section (zero, stringer, 0);
4872 }
4873
4874 static void
4875 dot_xdata_ua (int size)
4876 {
4877 cross_section (size, cons, 1);
4878 }
4879
4880 static void
4881 dot_xfloat_cons_ua (int kind)
4882 {
4883 cross_section (kind, float_cons, 1);
4884 }
4885
4886 /* .reg.val <regname>,value */
4887
4888 static void
4889 dot_reg_val (int dummy ATTRIBUTE_UNUSED)
4890 {
4891 expressionS reg;
4892
4893 expression_and_evaluate (&reg);
4894 if (reg.X_op != O_register)
4895 {
4896 as_bad (_("Register name expected"));
4897 ignore_rest_of_line ();
4898 }
4899 else if (*input_line_pointer++ != ',')
4900 {
4901 as_bad (_("Comma expected"));
4902 ignore_rest_of_line ();
4903 }
4904 else
4905 {
4906 valueT value = get_absolute_expression ();
4907 int regno = reg.X_add_number;
4908 if (regno <= REG_GR || regno > REG_GR + 127)
4909 as_warn (_("Register value annotation ignored"));
4910 else
4911 {
4912 gr_values[regno - REG_GR].known = 1;
4913 gr_values[regno - REG_GR].value = value;
4914 gr_values[regno - REG_GR].path = md.path;
4915 }
4916 }
4917 demand_empty_rest_of_line ();
4918 }
4919
4920 /*
4921 .serialize.data
4922 .serialize.instruction
4923 */
4924 static void
4925 dot_serialize (int type)
4926 {
4927 insn_group_break (0, 0, 0);
4928 if (type)
4929 instruction_serialization ();
4930 else
4931 data_serialization ();
4932 insn_group_break (0, 0, 0);
4933 demand_empty_rest_of_line ();
4934 }
4935
4936 /* select dv checking mode
4937 .auto
4938 .explicit
4939 .default
4940
4941 A stop is inserted when changing modes
4942 */
4943
4944 static void
4945 dot_dv_mode (int type)
4946 {
4947 if (md.manual_bundling)
4948 as_warn (_("Directive invalid within a bundle"));
4949
4950 if (type == 'E' || type == 'A')
4951 md.mode_explicitly_set = 0;
4952 else
4953 md.mode_explicitly_set = 1;
4954
4955 md.detect_dv = 1;
4956 switch (type)
4957 {
4958 case 'A':
4959 case 'a':
4960 if (md.explicit_mode)
4961 insn_group_break (1, 0, 0);
4962 md.explicit_mode = 0;
4963 break;
4964 case 'E':
4965 case 'e':
4966 if (!md.explicit_mode)
4967 insn_group_break (1, 0, 0);
4968 md.explicit_mode = 1;
4969 break;
4970 default:
4971 case 'd':
4972 if (md.explicit_mode != md.default_explicit_mode)
4973 insn_group_break (1, 0, 0);
4974 md.explicit_mode = md.default_explicit_mode;
4975 md.mode_explicitly_set = 0;
4976 break;
4977 }
4978 }
4979
4980 static void
4981 print_prmask (valueT mask)
4982 {
4983 int regno;
4984 const char *comma = "";
4985 for (regno = 0; regno < 64; regno++)
4986 {
4987 if (mask & ((valueT) 1 << regno))
4988 {
4989 fprintf (stderr, "%s p%d", comma, regno);
4990 comma = ",";
4991 }
4992 }
4993 }
4994
4995 /*
4996 .pred.rel.clear [p1 [,p2 [,...]]] (also .pred.rel "clear" or @clear)
4997 .pred.rel.imply p1, p2 (also .pred.rel "imply" or @imply)
4998 .pred.rel.mutex p1, p2 [,...] (also .pred.rel "mutex" or @mutex)
4999 .pred.safe_across_calls p1 [, p2 [,...]]
5000 */
5001
5002 static void
5003 dot_pred_rel (int type)
5004 {
5005 valueT mask = 0;
5006 int count = 0;
5007 int p1 = -1, p2 = -1;
5008
5009 if (type == 0)
5010 {
5011 if (*input_line_pointer == '"')
5012 {
5013 int len;
5014 char *form = demand_copy_C_string (&len);
5015
5016 if (strcmp (form, "mutex") == 0)
5017 type = 'm';
5018 else if (strcmp (form, "clear") == 0)
5019 type = 'c';
5020 else if (strcmp (form, "imply") == 0)
5021 type = 'i';
5022 obstack_free (&notes, form);
5023 }
5024 else if (*input_line_pointer == '@')
5025 {
5026 char *form;
5027 char c;
5028
5029 ++input_line_pointer;
5030 c = get_symbol_name (&form);
5031
5032 if (strcmp (form, "mutex") == 0)
5033 type = 'm';
5034 else if (strcmp (form, "clear") == 0)
5035 type = 'c';
5036 else if (strcmp (form, "imply") == 0)
5037 type = 'i';
5038 (void) restore_line_pointer (c);
5039 }
5040 else
5041 {
5042 as_bad (_("Missing predicate relation type"));
5043 ignore_rest_of_line ();
5044 return;
5045 }
5046 if (type == 0)
5047 {
5048 as_bad (_("Unrecognized predicate relation type"));
5049 ignore_rest_of_line ();
5050 return;
5051 }
5052 if (*input_line_pointer == ',')
5053 ++input_line_pointer;
5054 SKIP_WHITESPACE ();
5055 }
5056
5057 while (1)
5058 {
5059 valueT bits = 1;
5060 int sep, regno;
5061 expressionS pr, *pr1, *pr2;
5062
5063 sep = parse_operand_and_eval (&pr, ',');
5064 if (pr.X_op == O_register
5065 && pr.X_add_number >= REG_P
5066 && pr.X_add_number <= REG_P + 63)
5067 {
5068 regno = pr.X_add_number - REG_P;
5069 bits <<= regno;
5070 count++;
5071 if (p1 == -1)
5072 p1 = regno;
5073 else if (p2 == -1)
5074 p2 = regno;
5075 }
5076 else if (type != 'i'
5077 && pr.X_op == O_subtract
5078 && (pr1 = symbol_get_value_expression (pr.X_add_symbol))
5079 && pr1->X_op == O_register
5080 && pr1->X_add_number >= REG_P
5081 && pr1->X_add_number <= REG_P + 63
5082 && (pr2 = symbol_get_value_expression (pr.X_op_symbol))
5083 && pr2->X_op == O_register
5084 && pr2->X_add_number >= REG_P
5085 && pr2->X_add_number <= REG_P + 63)
5086 {
5087 /* It's a range. */
5088 int stop;
5089
5090 regno = pr1->X_add_number - REG_P;
5091 stop = pr2->X_add_number - REG_P;
5092 if (regno >= stop)
5093 {
5094 as_bad (_("Bad register range"));
5095 ignore_rest_of_line ();
5096 return;
5097 }
5098 bits = ((bits << stop) << 1) - (bits << regno);
5099 count += stop - regno + 1;
5100 }
5101 else
5102 {
5103 as_bad (_("Predicate register expected"));
5104 ignore_rest_of_line ();
5105 return;
5106 }
5107 if (mask & bits)
5108 as_warn (_("Duplicate predicate register ignored"));
5109 mask |= bits;
5110 if (sep != ',')
5111 break;
5112 }
5113
5114 switch (type)
5115 {
5116 case 'c':
5117 if (count == 0)
5118 mask = ~(valueT) 0;
5119 clear_qp_mutex (mask);
5120 clear_qp_implies (mask, (valueT) 0);
5121 break;
5122 case 'i':
5123 if (count != 2 || p1 == -1 || p2 == -1)
5124 as_bad (_("Predicate source and target required"));
5125 else if (p1 == 0 || p2 == 0)
5126 as_bad (_("Use of p0 is not valid in this context"));
5127 else
5128 add_qp_imply (p1, p2);
5129 break;
5130 case 'm':
5131 if (count < 2)
5132 {
5133 as_bad (_("At least two PR arguments expected"));
5134 break;
5135 }
5136 else if (mask & 1)
5137 {
5138 as_bad (_("Use of p0 is not valid in this context"));
5139 break;
5140 }
5141 add_qp_mutex (mask);
5142 break;
5143 case 's':
5144 /* note that we don't override any existing relations */
5145 if (count == 0)
5146 {
5147 as_bad (_("At least one PR argument expected"));
5148 break;
5149 }
5150 if (md.debug_dv)
5151 {
5152 fprintf (stderr, "Safe across calls: ");
5153 print_prmask (mask);
5154 fprintf (stderr, "\n");
5155 }
5156 qp_safe_across_calls = mask;
5157 break;
5158 }
5159 demand_empty_rest_of_line ();
5160 }
5161
5162 /* .entry label [, label [, ...]]
5163 Hint to DV code that the given labels are to be considered entry points.
5164 Otherwise, only global labels are considered entry points. */
5165
5166 static void
5167 dot_entry (int dummy ATTRIBUTE_UNUSED)
5168 {
5169 const char *err;
5170 char *name;
5171 int c;
5172 symbolS *symbolP;
5173
5174 do
5175 {
5176 c = get_symbol_name (&name);
5177 symbolP = symbol_find_or_make (name);
5178
5179 err = hash_insert (md.entry_hash, S_GET_NAME (symbolP), (void *) symbolP);
5180 if (err)
5181 as_fatal (_("Inserting \"%s\" into entry hint table failed: %s"),
5182 name, err);
5183
5184 *input_line_pointer = c;
5185 SKIP_WHITESPACE_AFTER_NAME ();
5186 c = *input_line_pointer;
5187 if (c == ',')
5188 {
5189 input_line_pointer++;
5190 SKIP_WHITESPACE ();
5191 if (*input_line_pointer == '\n')
5192 c = '\n';
5193 }
5194 }
5195 while (c == ',');
5196
5197 demand_empty_rest_of_line ();
5198 }
5199
5200 /* .mem.offset offset, base
5201 "base" is used to distinguish between offsets from a different base. */
5202
5203 static void
5204 dot_mem_offset (int dummy ATTRIBUTE_UNUSED)
5205 {
5206 md.mem_offset.hint = 1;
5207 md.mem_offset.offset = get_absolute_expression ();
5208 if (*input_line_pointer != ',')
5209 {
5210 as_bad (_("Comma expected"));
5211 ignore_rest_of_line ();
5212 return;
5213 }
5214 ++input_line_pointer;
5215 md.mem_offset.base = get_absolute_expression ();
5216 demand_empty_rest_of_line ();
5217 }
5218
5219 /* ia64-specific pseudo-ops: */
5220 const pseudo_typeS md_pseudo_table[] =
5221 {
5222 { "radix", dot_radix, 0 },
5223 { "lcomm", s_lcomm_bytes, 1 },
5224 { "loc", dot_loc, 0 },
5225 { "bss", dot_special_section, SPECIAL_SECTION_BSS },
5226 { "sbss", dot_special_section, SPECIAL_SECTION_SBSS },
5227 { "sdata", dot_special_section, SPECIAL_SECTION_SDATA },
5228 { "rodata", dot_special_section, SPECIAL_SECTION_RODATA },
5229 { "comment", dot_special_section, SPECIAL_SECTION_COMMENT },
5230 { "ia_64.unwind", dot_special_section, SPECIAL_SECTION_UNWIND },
5231 { "ia_64.unwind_info", dot_special_section, SPECIAL_SECTION_UNWIND_INFO },
5232 { "init_array", dot_special_section, SPECIAL_SECTION_INIT_ARRAY },
5233 { "fini_array", dot_special_section, SPECIAL_SECTION_FINI_ARRAY },
5234 { "proc", dot_proc, 0 },
5235 { "body", dot_body, 0 },
5236 { "prologue", dot_prologue, 0 },
5237 { "endp", dot_endp, 0 },
5238
5239 { "fframe", dot_fframe, 0 },
5240 { "vframe", dot_vframe, 0 },
5241 { "vframesp", dot_vframesp, 0 },
5242 { "vframepsp", dot_vframesp, 1 },
5243 { "save", dot_save, 0 },
5244 { "restore", dot_restore, 0 },
5245 { "restorereg", dot_restorereg, 0 },
5246 { "restorereg.p", dot_restorereg, 1 },
5247 { "handlerdata", dot_handlerdata, 0 },
5248 { "unwentry", dot_unwentry, 0 },
5249 { "altrp", dot_altrp, 0 },
5250 { "savesp", dot_savemem, 0 },
5251 { "savepsp", dot_savemem, 1 },
5252 { "save.g", dot_saveg, 0 },
5253 { "save.f", dot_savef, 0 },
5254 { "save.b", dot_saveb, 0 },
5255 { "save.gf", dot_savegf, 0 },
5256 { "spill", dot_spill, 0 },
5257 { "spillreg", dot_spillreg, 0 },
5258 { "spillsp", dot_spillmem, 0 },
5259 { "spillpsp", dot_spillmem, 1 },
5260 { "spillreg.p", dot_spillreg, 1 },
5261 { "spillsp.p", dot_spillmem, ~0 },
5262 { "spillpsp.p", dot_spillmem, ~1 },
5263 { "label_state", dot_label_state, 0 },
5264 { "copy_state", dot_copy_state, 0 },
5265 { "unwabi", dot_unwabi, 0 },
5266 { "personality", dot_personality, 0 },
5267 { "mii", dot_template, 0x0 },
5268 { "mli", dot_template, 0x2 }, /* old format, for compatibility */
5269 { "mlx", dot_template, 0x2 },
5270 { "mmi", dot_template, 0x4 },
5271 { "mfi", dot_template, 0x6 },
5272 { "mmf", dot_template, 0x7 },
5273 { "mib", dot_template, 0x8 },
5274 { "mbb", dot_template, 0x9 },
5275 { "bbb", dot_template, 0xb },
5276 { "mmb", dot_template, 0xc },
5277 { "mfb", dot_template, 0xe },
5278 { "align", dot_align, 0 },
5279 { "regstk", dot_regstk, 0 },
5280 { "rotr", dot_rot, DYNREG_GR },
5281 { "rotf", dot_rot, DYNREG_FR },
5282 { "rotp", dot_rot, DYNREG_PR },
5283 { "lsb", dot_byteorder, 0 },
5284 { "msb", dot_byteorder, 1 },
5285 { "psr", dot_psr, 0 },
5286 { "alias", dot_alias, 0 },
5287 { "secalias", dot_alias, 1 },
5288 { "ln", dot_ln, 0 }, /* source line info (for debugging) */
5289
5290 { "xdata1", dot_xdata, 1 },
5291 { "xdata2", dot_xdata, 2 },
5292 { "xdata4", dot_xdata, 4 },
5293 { "xdata8", dot_xdata, 8 },
5294 { "xdata16", dot_xdata, 16 },
5295 { "xreal4", dot_xfloat_cons, 'f' },
5296 { "xreal8", dot_xfloat_cons, 'd' },
5297 { "xreal10", dot_xfloat_cons, 'x' },
5298 { "xreal16", dot_xfloat_cons, 'X' },
5299 { "xstring", dot_xstringer, 8 + 0 },
5300 { "xstringz", dot_xstringer, 8 + 1 },
5301
5302 /* unaligned versions: */
5303 { "xdata2.ua", dot_xdata_ua, 2 },
5304 { "xdata4.ua", dot_xdata_ua, 4 },
5305 { "xdata8.ua", dot_xdata_ua, 8 },
5306 { "xdata16.ua", dot_xdata_ua, 16 },
5307 { "xreal4.ua", dot_xfloat_cons_ua, 'f' },
5308 { "xreal8.ua", dot_xfloat_cons_ua, 'd' },
5309 { "xreal10.ua", dot_xfloat_cons_ua, 'x' },
5310 { "xreal16.ua", dot_xfloat_cons_ua, 'X' },
5311
5312 /* annotations/DV checking support */
5313 { "entry", dot_entry, 0 },
5314 { "mem.offset", dot_mem_offset, 0 },
5315 { "pred.rel", dot_pred_rel, 0 },
5316 { "pred.rel.clear", dot_pred_rel, 'c' },
5317 { "pred.rel.imply", dot_pred_rel, 'i' },
5318 { "pred.rel.mutex", dot_pred_rel, 'm' },
5319 { "pred.safe_across_calls", dot_pred_rel, 's' },
5320 { "reg.val", dot_reg_val, 0 },
5321 { "serialize.data", dot_serialize, 0 },
5322 { "serialize.instruction", dot_serialize, 1 },
5323 { "auto", dot_dv_mode, 'a' },
5324 { "explicit", dot_dv_mode, 'e' },
5325 { "default", dot_dv_mode, 'd' },
5326
5327 /* ??? These are needed to make gas/testsuite/gas/elf/ehopt.s work.
5328 IA-64 aligns data allocation pseudo-ops by default, so we have to
5329 tell it that these ones are supposed to be unaligned. Long term,
5330 should rewrite so that only IA-64 specific data allocation pseudo-ops
5331 are aligned by default. */
5332 {"2byte", stmt_cons_ua, 2},
5333 {"4byte", stmt_cons_ua, 4},
5334 {"8byte", stmt_cons_ua, 8},
5335
5336 #ifdef TE_VMS
5337 {"vms_common", obj_elf_vms_common, 0},
5338 #endif
5339
5340 { NULL, 0, 0 }
5341 };
5342
5343 static const struct pseudo_opcode
5344 {
5345 const char *name;
5346 void (*handler) (int);
5347 int arg;
5348 }
5349 pseudo_opcode[] =
5350 {
5351 /* these are more like pseudo-ops, but don't start with a dot */
5352 { "data1", cons, 1 },
5353 { "data2", cons, 2 },
5354 { "data4", cons, 4 },
5355 { "data8", cons, 8 },
5356 { "data16", cons, 16 },
5357 { "real4", stmt_float_cons, 'f' },
5358 { "real8", stmt_float_cons, 'd' },
5359 { "real10", stmt_float_cons, 'x' },
5360 { "real16", stmt_float_cons, 'X' },
5361 { "string", stringer, 8 + 0 },
5362 { "stringz", stringer, 8 + 1 },
5363
5364 /* unaligned versions: */
5365 { "data2.ua", stmt_cons_ua, 2 },
5366 { "data4.ua", stmt_cons_ua, 4 },
5367 { "data8.ua", stmt_cons_ua, 8 },
5368 { "data16.ua", stmt_cons_ua, 16 },
5369 { "real4.ua", float_cons, 'f' },
5370 { "real8.ua", float_cons, 'd' },
5371 { "real10.ua", float_cons, 'x' },
5372 { "real16.ua", float_cons, 'X' },
5373 };
5374
5375 /* Declare a register by creating a symbol for it and entering it in
5376 the symbol table. */
5377
5378 static symbolS *
5379 declare_register (const char *name, unsigned int regnum)
5380 {
5381 const char *err;
5382 symbolS *sym;
5383
5384 sym = symbol_create (name, reg_section, regnum, &zero_address_frag);
5385
5386 err = hash_insert (md.reg_hash, S_GET_NAME (sym), (void *) sym);
5387 if (err)
5388 as_fatal ("Inserting \"%s\" into register table failed: %s",
5389 name, err);
5390
5391 return sym;
5392 }
5393
5394 static void
5395 declare_register_set (const char *prefix,
5396 unsigned int num_regs,
5397 unsigned int base_regnum)
5398 {
5399 char name[8];
5400 unsigned int i;
5401
5402 for (i = 0; i < num_regs; ++i)
5403 {
5404 snprintf (name, sizeof (name), "%s%u", prefix, i);
5405 declare_register (name, base_regnum + i);
5406 }
5407 }
5408
5409 static unsigned int
5410 operand_width (enum ia64_opnd opnd)
5411 {
5412 const struct ia64_operand *odesc = &elf64_ia64_operands[opnd];
5413 unsigned int bits = 0;
5414 int i;
5415
5416 bits = 0;
5417 for (i = 0; i < NELEMS (odesc->field) && odesc->field[i].bits; ++i)
5418 bits += odesc->field[i].bits;
5419
5420 return bits;
5421 }
5422
5423 static enum operand_match_result
5424 operand_match (const struct ia64_opcode *idesc, int res_index, expressionS *e)
5425 {
5426 enum ia64_opnd opnd = idesc->operands[res_index];
5427 int bits, relocatable = 0;
5428 struct insn_fix *fix;
5429 bfd_signed_vma val;
5430
5431 switch (opnd)
5432 {
5433 /* constants: */
5434
5435 case IA64_OPND_AR_CCV:
5436 if (e->X_op == O_register && e->X_add_number == REG_AR + 32)
5437 return OPERAND_MATCH;
5438 break;
5439
5440 case IA64_OPND_AR_CSD:
5441 if (e->X_op == O_register && e->X_add_number == REG_AR + 25)
5442 return OPERAND_MATCH;
5443 break;
5444
5445 case IA64_OPND_AR_PFS:
5446 if (e->X_op == O_register && e->X_add_number == REG_AR + 64)
5447 return OPERAND_MATCH;
5448 break;
5449
5450 case IA64_OPND_GR0:
5451 if (e->X_op == O_register && e->X_add_number == REG_GR + 0)
5452 return OPERAND_MATCH;
5453 break;
5454
5455 case IA64_OPND_IP:
5456 if (e->X_op == O_register && e->X_add_number == REG_IP)
5457 return OPERAND_MATCH;
5458 break;
5459
5460 case IA64_OPND_PR:
5461 if (e->X_op == O_register && e->X_add_number == REG_PR)
5462 return OPERAND_MATCH;
5463 break;
5464
5465 case IA64_OPND_PR_ROT:
5466 if (e->X_op == O_register && e->X_add_number == REG_PR_ROT)
5467 return OPERAND_MATCH;
5468 break;
5469
5470 case IA64_OPND_PSR:
5471 if (e->X_op == O_register && e->X_add_number == REG_PSR)
5472 return OPERAND_MATCH;
5473 break;
5474
5475 case IA64_OPND_PSR_L:
5476 if (e->X_op == O_register && e->X_add_number == REG_PSR_L)
5477 return OPERAND_MATCH;
5478 break;
5479
5480 case IA64_OPND_PSR_UM:
5481 if (e->X_op == O_register && e->X_add_number == REG_PSR_UM)
5482 return OPERAND_MATCH;
5483 break;
5484
5485 case IA64_OPND_C1:
5486 if (e->X_op == O_constant)
5487 {
5488 if (e->X_add_number == 1)
5489 return OPERAND_MATCH;
5490 else
5491 return OPERAND_OUT_OF_RANGE;
5492 }
5493 break;
5494
5495 case IA64_OPND_C8:
5496 if (e->X_op == O_constant)
5497 {
5498 if (e->X_add_number == 8)
5499 return OPERAND_MATCH;
5500 else
5501 return OPERAND_OUT_OF_RANGE;
5502 }
5503 break;
5504
5505 case IA64_OPND_C16:
5506 if (e->X_op == O_constant)
5507 {
5508 if (e->X_add_number == 16)
5509 return OPERAND_MATCH;
5510 else
5511 return OPERAND_OUT_OF_RANGE;
5512 }
5513 break;
5514
5515 /* register operands: */
5516
5517 case IA64_OPND_AR3:
5518 if (e->X_op == O_register && e->X_add_number >= REG_AR
5519 && e->X_add_number < REG_AR + 128)
5520 return OPERAND_MATCH;
5521 break;
5522
5523 case IA64_OPND_B1:
5524 case IA64_OPND_B2:
5525 if (e->X_op == O_register && e->X_add_number >= REG_BR
5526 && e->X_add_number < REG_BR + 8)
5527 return OPERAND_MATCH;
5528 break;
5529
5530 case IA64_OPND_CR3:
5531 if (e->X_op == O_register && e->X_add_number >= REG_CR
5532 && e->X_add_number < REG_CR + 128)
5533 return OPERAND_MATCH;
5534 break;
5535
5536 case IA64_OPND_DAHR3:
5537 if (e->X_op == O_register && e->X_add_number >= REG_DAHR
5538 && e->X_add_number < REG_DAHR + 8)
5539 return OPERAND_MATCH;
5540 break;
5541
5542 case IA64_OPND_F1:
5543 case IA64_OPND_F2:
5544 case IA64_OPND_F3:
5545 case IA64_OPND_F4:
5546 if (e->X_op == O_register && e->X_add_number >= REG_FR
5547 && e->X_add_number < REG_FR + 128)
5548 return OPERAND_MATCH;
5549 break;
5550
5551 case IA64_OPND_P1:
5552 case IA64_OPND_P2:
5553 if (e->X_op == O_register && e->X_add_number >= REG_P
5554 && e->X_add_number < REG_P + 64)
5555 return OPERAND_MATCH;
5556 break;
5557
5558 case IA64_OPND_R1:
5559 case IA64_OPND_R2:
5560 case IA64_OPND_R3:
5561 if (e->X_op == O_register && e->X_add_number >= REG_GR
5562 && e->X_add_number < REG_GR + 128)
5563 return OPERAND_MATCH;
5564 break;
5565
5566 case IA64_OPND_R3_2:
5567 if (e->X_op == O_register && e->X_add_number >= REG_GR)
5568 {
5569 if (e->X_add_number < REG_GR + 4)
5570 return OPERAND_MATCH;
5571 else if (e->X_add_number < REG_GR + 128)
5572 return OPERAND_OUT_OF_RANGE;
5573 }
5574 break;
5575
5576 /* indirect operands: */
5577 case IA64_OPND_CPUID_R3:
5578 case IA64_OPND_DBR_R3:
5579 case IA64_OPND_DTR_R3:
5580 case IA64_OPND_ITR_R3:
5581 case IA64_OPND_IBR_R3:
5582 case IA64_OPND_MSR_R3:
5583 case IA64_OPND_PKR_R3:
5584 case IA64_OPND_PMC_R3:
5585 case IA64_OPND_PMD_R3:
5586 case IA64_OPND_DAHR_R3:
5587 case IA64_OPND_RR_R3:
5588 if (e->X_op == O_index && e->X_op_symbol
5589 && (S_GET_VALUE (e->X_op_symbol) - IND_CPUID
5590 == opnd - IA64_OPND_CPUID_R3))
5591 return OPERAND_MATCH;
5592 break;
5593
5594 case IA64_OPND_MR3:
5595 if (e->X_op == O_index && !e->X_op_symbol)
5596 return OPERAND_MATCH;
5597 break;
5598
5599 /* immediate operands: */
5600 case IA64_OPND_CNT2a:
5601 case IA64_OPND_LEN4:
5602 case IA64_OPND_LEN6:
5603 bits = operand_width (idesc->operands[res_index]);
5604 if (e->X_op == O_constant)
5605 {
5606 if ((bfd_vma) (e->X_add_number - 1) < ((bfd_vma) 1 << bits))
5607 return OPERAND_MATCH;
5608 else
5609 return OPERAND_OUT_OF_RANGE;
5610 }
5611 break;
5612
5613 case IA64_OPND_CNT2b:
5614 if (e->X_op == O_constant)
5615 {
5616 if ((bfd_vma) (e->X_add_number - 1) < 3)
5617 return OPERAND_MATCH;
5618 else
5619 return OPERAND_OUT_OF_RANGE;
5620 }
5621 break;
5622
5623 case IA64_OPND_CNT2c:
5624 val = e->X_add_number;
5625 if (e->X_op == O_constant)
5626 {
5627 if ((val == 0 || val == 7 || val == 15 || val == 16))
5628 return OPERAND_MATCH;
5629 else
5630 return OPERAND_OUT_OF_RANGE;
5631 }
5632 break;
5633
5634 case IA64_OPND_SOR:
5635 /* SOR must be an integer multiple of 8 */
5636 if (e->X_op == O_constant && e->X_add_number & 0x7)
5637 return OPERAND_OUT_OF_RANGE;
5638 /* Fall through. */
5639 case IA64_OPND_SOF:
5640 case IA64_OPND_SOL:
5641 if (e->X_op == O_constant)
5642 {
5643 if ((bfd_vma) e->X_add_number <= 96)
5644 return OPERAND_MATCH;
5645 else
5646 return OPERAND_OUT_OF_RANGE;
5647 }
5648 break;
5649
5650 case IA64_OPND_IMMU62:
5651 if (e->X_op == O_constant)
5652 {
5653 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << 62))
5654 return OPERAND_MATCH;
5655 else
5656 return OPERAND_OUT_OF_RANGE;
5657 }
5658 else
5659 {
5660 /* FIXME -- need 62-bit relocation type */
5661 as_bad (_("62-bit relocation not yet implemented"));
5662 }
5663 break;
5664
5665 case IA64_OPND_IMMU64:
5666 if (e->X_op == O_symbol || e->X_op == O_pseudo_fixup
5667 || e->X_op == O_subtract)
5668 {
5669 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5670 fix->code = BFD_RELOC_IA64_IMM64;
5671 if (e->X_op != O_subtract)
5672 {
5673 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5674 if (e->X_op == O_pseudo_fixup)
5675 e->X_op = O_symbol;
5676 }
5677
5678 fix->opnd = idesc->operands[res_index];
5679 fix->expr = *e;
5680 fix->is_pcrel = 0;
5681 ++CURR_SLOT.num_fixups;
5682 return OPERAND_MATCH;
5683 }
5684 else if (e->X_op == O_constant)
5685 return OPERAND_MATCH;
5686 break;
5687
5688 case IA64_OPND_IMMU5b:
5689 if (e->X_op == O_constant)
5690 {
5691 val = e->X_add_number;
5692 if (val >= 32 && val <= 63)
5693 return OPERAND_MATCH;
5694 else
5695 return OPERAND_OUT_OF_RANGE;
5696 }
5697 break;
5698
5699 case IA64_OPND_CCNT5:
5700 case IA64_OPND_CNT5:
5701 case IA64_OPND_CNT6:
5702 case IA64_OPND_CPOS6a:
5703 case IA64_OPND_CPOS6b:
5704 case IA64_OPND_CPOS6c:
5705 case IA64_OPND_IMMU2:
5706 case IA64_OPND_IMMU7a:
5707 case IA64_OPND_IMMU7b:
5708 case IA64_OPND_IMMU16:
5709 case IA64_OPND_IMMU19:
5710 case IA64_OPND_IMMU21:
5711 case IA64_OPND_IMMU24:
5712 case IA64_OPND_MBTYPE4:
5713 case IA64_OPND_MHTYPE8:
5714 case IA64_OPND_POS6:
5715 bits = operand_width (idesc->operands[res_index]);
5716 if (e->X_op == O_constant)
5717 {
5718 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5719 return OPERAND_MATCH;
5720 else
5721 return OPERAND_OUT_OF_RANGE;
5722 }
5723 break;
5724
5725 case IA64_OPND_IMMU9:
5726 bits = operand_width (idesc->operands[res_index]);
5727 if (e->X_op == O_constant)
5728 {
5729 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5730 {
5731 int lobits = e->X_add_number & 0x3;
5732 if (((bfd_vma) e->X_add_number & 0x3C) != 0 && lobits == 0)
5733 e->X_add_number |= (bfd_vma) 0x3;
5734 return OPERAND_MATCH;
5735 }
5736 else
5737 return OPERAND_OUT_OF_RANGE;
5738 }
5739 break;
5740
5741 case IA64_OPND_IMM44:
5742 /* least 16 bits must be zero */
5743 if ((e->X_add_number & 0xffff) != 0)
5744 /* XXX technically, this is wrong: we should not be issuing warning
5745 messages until we're sure this instruction pattern is going to
5746 be used! */
5747 as_warn (_("lower 16 bits of mask ignored"));
5748
5749 if (e->X_op == O_constant)
5750 {
5751 if (((e->X_add_number >= 0
5752 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 44))
5753 || (e->X_add_number < 0
5754 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 44))))
5755 {
5756 /* sign-extend */
5757 if (e->X_add_number >= 0
5758 && (e->X_add_number & ((bfd_vma) 1 << 43)) != 0)
5759 {
5760 e->X_add_number |= ~(((bfd_vma) 1 << 44) - 1);
5761 }
5762 return OPERAND_MATCH;
5763 }
5764 else
5765 return OPERAND_OUT_OF_RANGE;
5766 }
5767 break;
5768
5769 case IA64_OPND_IMM17:
5770 /* bit 0 is a don't care (pr0 is hardwired to 1) */
5771 if (e->X_op == O_constant)
5772 {
5773 if (((e->X_add_number >= 0
5774 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 17))
5775 || (e->X_add_number < 0
5776 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 17))))
5777 {
5778 /* sign-extend */
5779 if (e->X_add_number >= 0
5780 && (e->X_add_number & ((bfd_vma) 1 << 16)) != 0)
5781 {
5782 e->X_add_number |= ~(((bfd_vma) 1 << 17) - 1);
5783 }
5784 return OPERAND_MATCH;
5785 }
5786 else
5787 return OPERAND_OUT_OF_RANGE;
5788 }
5789 break;
5790
5791 case IA64_OPND_IMM14:
5792 case IA64_OPND_IMM22:
5793 relocatable = 1;
5794 /* Fall through. */
5795 case IA64_OPND_IMM1:
5796 case IA64_OPND_IMM8:
5797 case IA64_OPND_IMM8U4:
5798 case IA64_OPND_IMM8M1:
5799 case IA64_OPND_IMM8M1U4:
5800 case IA64_OPND_IMM8M1U8:
5801 case IA64_OPND_IMM9a:
5802 case IA64_OPND_IMM9b:
5803 bits = operand_width (idesc->operands[res_index]);
5804 if (relocatable && (e->X_op == O_symbol
5805 || e->X_op == O_subtract
5806 || e->X_op == O_pseudo_fixup))
5807 {
5808 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5809
5810 if (idesc->operands[res_index] == IA64_OPND_IMM14)
5811 fix->code = BFD_RELOC_IA64_IMM14;
5812 else
5813 fix->code = BFD_RELOC_IA64_IMM22;
5814
5815 if (e->X_op != O_subtract)
5816 {
5817 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5818 if (e->X_op == O_pseudo_fixup)
5819 e->X_op = O_symbol;
5820 }
5821
5822 fix->opnd = idesc->operands[res_index];
5823 fix->expr = *e;
5824 fix->is_pcrel = 0;
5825 ++CURR_SLOT.num_fixups;
5826 return OPERAND_MATCH;
5827 }
5828 else if (e->X_op != O_constant
5829 && ! (e->X_op == O_big && opnd == IA64_OPND_IMM8M1U8))
5830 return OPERAND_MISMATCH;
5831
5832 if (opnd == IA64_OPND_IMM8M1U4)
5833 {
5834 /* Zero is not valid for unsigned compares that take an adjusted
5835 constant immediate range. */
5836 if (e->X_add_number == 0)
5837 return OPERAND_OUT_OF_RANGE;
5838
5839 /* Sign-extend 32-bit unsigned numbers, so that the following range
5840 checks will work. */
5841 val = e->X_add_number;
5842 if (((val & (~(bfd_vma) 0 << 32)) == 0)
5843 && ((val & ((bfd_vma) 1 << 31)) != 0))
5844 val = ((val << 32) >> 32);
5845
5846 /* Check for 0x100000000. This is valid because
5847 0x100000000-1 is the same as ((uint32_t) -1). */
5848 if (val == ((bfd_signed_vma) 1 << 32))
5849 return OPERAND_MATCH;
5850
5851 val = val - 1;
5852 }
5853 else if (opnd == IA64_OPND_IMM8M1U8)
5854 {
5855 /* Zero is not valid for unsigned compares that take an adjusted
5856 constant immediate range. */
5857 if (e->X_add_number == 0)
5858 return OPERAND_OUT_OF_RANGE;
5859
5860 /* Check for 0x10000000000000000. */
5861 if (e->X_op == O_big)
5862 {
5863 if (generic_bignum[0] == 0
5864 && generic_bignum[1] == 0
5865 && generic_bignum[2] == 0
5866 && generic_bignum[3] == 0
5867 && generic_bignum[4] == 1)
5868 return OPERAND_MATCH;
5869 else
5870 return OPERAND_OUT_OF_RANGE;
5871 }
5872 else
5873 val = e->X_add_number - 1;
5874 }
5875 else if (opnd == IA64_OPND_IMM8M1)
5876 val = e->X_add_number - 1;
5877 else if (opnd == IA64_OPND_IMM8U4)
5878 {
5879 /* Sign-extend 32-bit unsigned numbers, so that the following range
5880 checks will work. */
5881 val = e->X_add_number;
5882 if (((val & (~(bfd_vma) 0 << 32)) == 0)
5883 && ((val & ((bfd_vma) 1 << 31)) != 0))
5884 val = ((val << 32) >> 32);
5885 }
5886 else
5887 val = e->X_add_number;
5888
5889 if ((val >= 0 && (bfd_vma) val < ((bfd_vma) 1 << (bits - 1)))
5890 || (val < 0 && (bfd_vma) -val <= ((bfd_vma) 1 << (bits - 1))))
5891 return OPERAND_MATCH;
5892 else
5893 return OPERAND_OUT_OF_RANGE;
5894
5895 case IA64_OPND_INC3:
5896 /* +/- 1, 4, 8, 16 */
5897 val = e->X_add_number;
5898 if (val < 0)
5899 val = -val;
5900 if (e->X_op == O_constant)
5901 {
5902 if ((val == 1 || val == 4 || val == 8 || val == 16))
5903 return OPERAND_MATCH;
5904 else
5905 return OPERAND_OUT_OF_RANGE;
5906 }
5907 break;
5908
5909 case IA64_OPND_TGT25:
5910 case IA64_OPND_TGT25b:
5911 case IA64_OPND_TGT25c:
5912 case IA64_OPND_TGT64:
5913 if (e->X_op == O_symbol)
5914 {
5915 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5916 if (opnd == IA64_OPND_TGT25)
5917 fix->code = BFD_RELOC_IA64_PCREL21F;
5918 else if (opnd == IA64_OPND_TGT25b)
5919 fix->code = BFD_RELOC_IA64_PCREL21M;
5920 else if (opnd == IA64_OPND_TGT25c)
5921 fix->code = BFD_RELOC_IA64_PCREL21B;
5922 else if (opnd == IA64_OPND_TGT64)
5923 fix->code = BFD_RELOC_IA64_PCREL60B;
5924 else
5925 abort ();
5926
5927 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5928 fix->opnd = idesc->operands[res_index];
5929 fix->expr = *e;
5930 fix->is_pcrel = 1;
5931 ++CURR_SLOT.num_fixups;
5932 return OPERAND_MATCH;
5933 }
5934 /* Fall through. */
5935 case IA64_OPND_TAG13:
5936 case IA64_OPND_TAG13b:
5937 switch (e->X_op)
5938 {
5939 case O_constant:
5940 return OPERAND_MATCH;
5941
5942 case O_symbol:
5943 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5944 /* There are no external relocs for TAG13/TAG13b fields, so we
5945 create a dummy reloc. This will not live past md_apply_fix. */
5946 fix->code = BFD_RELOC_UNUSED;
5947 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5948 fix->opnd = idesc->operands[res_index];
5949 fix->expr = *e;
5950 fix->is_pcrel = 1;
5951 ++CURR_SLOT.num_fixups;
5952 return OPERAND_MATCH;
5953
5954 default:
5955 break;
5956 }
5957 break;
5958
5959 case IA64_OPND_LDXMOV:
5960 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5961 fix->code = BFD_RELOC_IA64_LDXMOV;
5962 fix->opnd = idesc->operands[res_index];
5963 fix->expr = *e;
5964 fix->is_pcrel = 0;
5965 ++CURR_SLOT.num_fixups;
5966 return OPERAND_MATCH;
5967
5968 case IA64_OPND_STRD5b:
5969 if (e->X_op == O_constant)
5970 {
5971 /* 5-bit signed scaled by 64 */
5972 if ((e->X_add_number <= ( 0xf << 6 ))
5973 && (e->X_add_number >= -( 0x10 << 6 )))
5974 {
5975
5976 /* Must be a multiple of 64 */
5977 if ((e->X_add_number & 0x3f) != 0)
5978 as_warn (_("stride must be a multiple of 64; lower 6 bits ignored"));
5979
5980 e->X_add_number &= ~ 0x3f;
5981 return OPERAND_MATCH;
5982 }
5983 else
5984 return OPERAND_OUT_OF_RANGE;
5985 }
5986 break;
5987 case IA64_OPND_CNT6a:
5988 if (e->X_op == O_constant)
5989 {
5990 /* 6-bit unsigned biased by 1 -- count 0 is meaningless */
5991 if ((e->X_add_number <= 64)
5992 && (e->X_add_number > 0) )
5993 {
5994 return OPERAND_MATCH;
5995 }
5996 else
5997 return OPERAND_OUT_OF_RANGE;
5998 }
5999 break;
6000
6001 default:
6002 break;
6003 }
6004 return OPERAND_MISMATCH;
6005 }
6006
6007 static int
6008 parse_operand (expressionS *e, int more)
6009 {
6010 int sep = '\0';
6011
6012 memset (e, 0, sizeof (*e));
6013 e->X_op = O_absent;
6014 SKIP_WHITESPACE ();
6015 expression (e);
6016 sep = *input_line_pointer;
6017 if (more && (sep == ',' || sep == more))
6018 ++input_line_pointer;
6019 return sep;
6020 }
6021
6022 static int
6023 parse_operand_and_eval (expressionS *e, int more)
6024 {
6025 int sep = parse_operand (e, more);
6026 resolve_expression (e);
6027 return sep;
6028 }
6029
6030 static int
6031 parse_operand_maybe_eval (expressionS *e, int more, enum ia64_opnd op)
6032 {
6033 int sep = parse_operand (e, more);
6034 switch (op)
6035 {
6036 case IA64_OPND_IMM14:
6037 case IA64_OPND_IMM22:
6038 case IA64_OPND_IMMU64:
6039 case IA64_OPND_TGT25:
6040 case IA64_OPND_TGT25b:
6041 case IA64_OPND_TGT25c:
6042 case IA64_OPND_TGT64:
6043 case IA64_OPND_TAG13:
6044 case IA64_OPND_TAG13b:
6045 case IA64_OPND_LDXMOV:
6046 break;
6047 default:
6048 resolve_expression (e);
6049 break;
6050 }
6051 return sep;
6052 }
6053
6054 /* Returns the next entry in the opcode table that matches the one in
6055 IDESC, and frees the entry in IDESC. If no matching entry is
6056 found, NULL is returned instead. */
6057
6058 static struct ia64_opcode *
6059 get_next_opcode (struct ia64_opcode *idesc)
6060 {
6061 struct ia64_opcode *next = ia64_find_next_opcode (idesc);
6062 ia64_free_opcode (idesc);
6063 return next;
6064 }
6065
6066 /* Parse the operands for the opcode and find the opcode variant that
6067 matches the specified operands, or NULL if no match is possible. */
6068
6069 static struct ia64_opcode *
6070 parse_operands (struct ia64_opcode *idesc)
6071 {
6072 int i = 0, highest_unmatched_operand, num_operands = 0, num_outputs = 0;
6073 int error_pos, out_of_range_pos, curr_out_of_range_pos, sep = 0;
6074 int reg1, reg2;
6075 char reg_class;
6076 enum ia64_opnd expected_operand = IA64_OPND_NIL;
6077 enum operand_match_result result;
6078 char mnemonic[129];
6079 char *first_arg = 0, *end, *saved_input_pointer;
6080 unsigned int sof;
6081
6082 gas_assert (strlen (idesc->name) <= 128);
6083
6084 strcpy (mnemonic, idesc->name);
6085 if (idesc->operands[2] == IA64_OPND_SOF
6086 || idesc->operands[1] == IA64_OPND_SOF)
6087 {
6088 /* To make the common idiom "alloc loc?=ar.pfs,0,1,0,0" work, we
6089 can't parse the first operand until we have parsed the
6090 remaining operands of the "alloc" instruction. */
6091 SKIP_WHITESPACE ();
6092 first_arg = input_line_pointer;
6093 end = strchr (input_line_pointer, '=');
6094 if (!end)
6095 {
6096 as_bad (_("Expected separator `='"));
6097 return 0;
6098 }
6099 input_line_pointer = end + 1;
6100 ++i;
6101 ++num_outputs;
6102 }
6103
6104 for (; ; ++i)
6105 {
6106 if (i < NELEMS (CURR_SLOT.opnd))
6107 {
6108 sep = parse_operand_maybe_eval (CURR_SLOT.opnd + i, '=',
6109 idesc->operands[i]);
6110 if (CURR_SLOT.opnd[i].X_op == O_absent)
6111 break;
6112 }
6113 else
6114 {
6115 expressionS dummy;
6116
6117 sep = parse_operand (&dummy, '=');
6118 if (dummy.X_op == O_absent)
6119 break;
6120 }
6121
6122 ++num_operands;
6123
6124 if (sep != '=' && sep != ',')
6125 break;
6126
6127 if (sep == '=')
6128 {
6129 if (num_outputs > 0)
6130 as_bad (_("Duplicate equal sign (=) in instruction"));
6131 else
6132 num_outputs = i + 1;
6133 }
6134 }
6135 if (sep != '\0')
6136 {
6137 as_bad (_("Illegal operand separator `%c'"), sep);
6138 return 0;
6139 }
6140
6141 if (idesc->operands[2] == IA64_OPND_SOF
6142 || idesc->operands[1] == IA64_OPND_SOF)
6143 {
6144 /* Map alloc r1=ar.pfs,i,l,o,r to alloc r1=ar.pfs,(i+l+o),(i+l),r.
6145 Note, however, that due to that mapping operand numbers in error
6146 messages for any of the constant operands will not be correct. */
6147 know (strcmp (idesc->name, "alloc") == 0);
6148 /* The first operand hasn't been parsed/initialized, yet (but
6149 num_operands intentionally doesn't account for that). */
6150 i = num_operands > 4 ? 2 : 1;
6151 #define FORCE_CONST(n) (CURR_SLOT.opnd[n].X_op == O_constant \
6152 ? CURR_SLOT.opnd[n].X_add_number \
6153 : 0)
6154 sof = set_regstack (FORCE_CONST(i),
6155 FORCE_CONST(i + 1),
6156 FORCE_CONST(i + 2),
6157 FORCE_CONST(i + 3));
6158 #undef FORCE_CONST
6159
6160 /* now we can parse the first arg: */
6161 saved_input_pointer = input_line_pointer;
6162 input_line_pointer = first_arg;
6163 sep = parse_operand_maybe_eval (CURR_SLOT.opnd + 0, '=',
6164 idesc->operands[0]);
6165 if (sep != '=')
6166 --num_outputs; /* force error */
6167 input_line_pointer = saved_input_pointer;
6168
6169 CURR_SLOT.opnd[i].X_add_number = sof;
6170 if (CURR_SLOT.opnd[i + 1].X_op == O_constant
6171 && CURR_SLOT.opnd[i + 2].X_op == O_constant)
6172 CURR_SLOT.opnd[i + 1].X_add_number
6173 = sof - CURR_SLOT.opnd[i + 2].X_add_number;
6174 else
6175 CURR_SLOT.opnd[i + 1].X_op = O_illegal;
6176 CURR_SLOT.opnd[i + 2] = CURR_SLOT.opnd[i + 3];
6177 }
6178
6179 highest_unmatched_operand = -4;
6180 curr_out_of_range_pos = -1;
6181 error_pos = 0;
6182 for (; idesc; idesc = get_next_opcode (idesc))
6183 {
6184 if (num_outputs != idesc->num_outputs)
6185 continue; /* mismatch in # of outputs */
6186 if (highest_unmatched_operand < 0)
6187 highest_unmatched_operand |= 1;
6188 if (num_operands > NELEMS (idesc->operands)
6189 || (num_operands < NELEMS (idesc->operands)
6190 && idesc->operands[num_operands])
6191 || (num_operands > 0 && !idesc->operands[num_operands - 1]))
6192 continue; /* mismatch in number of arguments */
6193 if (highest_unmatched_operand < 0)
6194 highest_unmatched_operand |= 2;
6195
6196 CURR_SLOT.num_fixups = 0;
6197
6198 /* Try to match all operands. If we see an out-of-range operand,
6199 then continue trying to match the rest of the operands, since if
6200 the rest match, then this idesc will give the best error message. */
6201
6202 out_of_range_pos = -1;
6203 for (i = 0; i < num_operands && idesc->operands[i]; ++i)
6204 {
6205 result = operand_match (idesc, i, CURR_SLOT.opnd + i);
6206 if (result != OPERAND_MATCH)
6207 {
6208 if (result != OPERAND_OUT_OF_RANGE)
6209 break;
6210 if (out_of_range_pos < 0)
6211 /* remember position of the first out-of-range operand: */
6212 out_of_range_pos = i;
6213 }
6214 }
6215
6216 /* If we did not match all operands, or if at least one operand was
6217 out-of-range, then this idesc does not match. Keep track of which
6218 idesc matched the most operands before failing. If we have two
6219 idescs that failed at the same position, and one had an out-of-range
6220 operand, then prefer the out-of-range operand. Thus if we have
6221 "add r0=0x1000000,r1" we get an error saying the constant is out
6222 of range instead of an error saying that the constant should have been
6223 a register. */
6224
6225 if (i != num_operands || out_of_range_pos >= 0)
6226 {
6227 if (i > highest_unmatched_operand
6228 || (i == highest_unmatched_operand
6229 && out_of_range_pos > curr_out_of_range_pos))
6230 {
6231 highest_unmatched_operand = i;
6232 if (out_of_range_pos >= 0)
6233 {
6234 expected_operand = idesc->operands[out_of_range_pos];
6235 error_pos = out_of_range_pos;
6236 }
6237 else
6238 {
6239 expected_operand = idesc->operands[i];
6240 error_pos = i;
6241 }
6242 curr_out_of_range_pos = out_of_range_pos;
6243 }
6244 continue;
6245 }
6246
6247 break;
6248 }
6249 if (!idesc)
6250 {
6251 if (expected_operand)
6252 as_bad (_("Operand %u of `%s' should be %s"),
6253 error_pos + 1, mnemonic,
6254 elf64_ia64_operands[expected_operand].desc);
6255 else if (highest_unmatched_operand < 0 && !(highest_unmatched_operand & 1))
6256 as_bad (_("Wrong number of output operands"));
6257 else if (highest_unmatched_operand < 0 && !(highest_unmatched_operand & 2))
6258 as_bad (_("Wrong number of input operands"));
6259 else
6260 as_bad (_("Operand mismatch"));
6261 return 0;
6262 }
6263
6264 /* Check that the instruction doesn't use
6265 - r0, f0, or f1 as output operands
6266 - the same predicate twice as output operands
6267 - r0 as address of a base update load or store
6268 - the same GR as output and address of a base update load
6269 - two even- or two odd-numbered FRs as output operands of a floating
6270 point parallel load.
6271 At most two (conflicting) output (or output-like) operands can exist,
6272 (floating point parallel loads have three outputs, but the base register,
6273 if updated, cannot conflict with the actual outputs). */
6274 reg2 = reg1 = -1;
6275 for (i = 0; i < num_operands; ++i)
6276 {
6277 int regno = 0;
6278
6279 reg_class = 0;
6280 switch (idesc->operands[i])
6281 {
6282 case IA64_OPND_R1:
6283 case IA64_OPND_R2:
6284 case IA64_OPND_R3:
6285 if (i < num_outputs)
6286 {
6287 if (CURR_SLOT.opnd[i].X_add_number == REG_GR)
6288 reg_class = 'r';
6289 else if (reg1 < 0)
6290 reg1 = CURR_SLOT.opnd[i].X_add_number;
6291 else if (reg2 < 0)
6292 reg2 = CURR_SLOT.opnd[i].X_add_number;
6293 }
6294 break;
6295 case IA64_OPND_P1:
6296 case IA64_OPND_P2:
6297 if (i < num_outputs)
6298 {
6299 if (reg1 < 0)
6300 reg1 = CURR_SLOT.opnd[i].X_add_number;
6301 else if (reg2 < 0)
6302 reg2 = CURR_SLOT.opnd[i].X_add_number;
6303 }
6304 break;
6305 case IA64_OPND_F1:
6306 case IA64_OPND_F2:
6307 case IA64_OPND_F3:
6308 case IA64_OPND_F4:
6309 if (i < num_outputs)
6310 {
6311 if (CURR_SLOT.opnd[i].X_add_number >= REG_FR
6312 && CURR_SLOT.opnd[i].X_add_number <= REG_FR + 1)
6313 {
6314 reg_class = 'f';
6315 regno = CURR_SLOT.opnd[i].X_add_number - REG_FR;
6316 }
6317 else if (reg1 < 0)
6318 reg1 = CURR_SLOT.opnd[i].X_add_number;
6319 else if (reg2 < 0)
6320 reg2 = CURR_SLOT.opnd[i].X_add_number;
6321 }
6322 break;
6323 case IA64_OPND_MR3:
6324 if (idesc->flags & IA64_OPCODE_POSTINC)
6325 {
6326 if (CURR_SLOT.opnd[i].X_add_number == REG_GR)
6327 reg_class = 'm';
6328 else if (reg1 < 0)
6329 reg1 = CURR_SLOT.opnd[i].X_add_number;
6330 else if (reg2 < 0)
6331 reg2 = CURR_SLOT.opnd[i].X_add_number;
6332 }
6333 break;
6334 default:
6335 break;
6336 }
6337 switch (reg_class)
6338 {
6339 case 0:
6340 break;
6341 default:
6342 as_warn (_("Invalid use of `%c%d' as output operand"), reg_class, regno);
6343 break;
6344 case 'm':
6345 as_warn (_("Invalid use of `r%d' as base update address operand"), regno);
6346 break;
6347 }
6348 }
6349 if (reg1 == reg2)
6350 {
6351 if (reg1 >= REG_GR && reg1 <= REG_GR + 127)
6352 {
6353 reg1 -= REG_GR;
6354 reg_class = 'r';
6355 }
6356 else if (reg1 >= REG_P && reg1 <= REG_P + 63)
6357 {
6358 reg1 -= REG_P;
6359 reg_class = 'p';
6360 }
6361 else if (reg1 >= REG_FR && reg1 <= REG_FR + 127)
6362 {
6363 reg1 -= REG_FR;
6364 reg_class = 'f';
6365 }
6366 else
6367 reg_class = 0;
6368 if (reg_class)
6369 as_warn (_("Invalid duplicate use of `%c%d'"), reg_class, reg1);
6370 }
6371 else if (((reg1 >= REG_FR && reg1 <= REG_FR + 31
6372 && reg2 >= REG_FR && reg2 <= REG_FR + 31)
6373 || (reg1 >= REG_FR + 32 && reg1 <= REG_FR + 127
6374 && reg2 >= REG_FR + 32 && reg2 <= REG_FR + 127))
6375 && ! ((reg1 ^ reg2) & 1))
6376 as_warn (_("Invalid simultaneous use of `f%d' and `f%d'"),
6377 reg1 - REG_FR, reg2 - REG_FR);
6378 else if ((reg1 >= REG_FR && reg1 <= REG_FR + 31
6379 && reg2 >= REG_FR + 32 && reg2 <= REG_FR + 127)
6380 || (reg1 >= REG_FR + 32 && reg1 <= REG_FR + 127
6381 && reg2 >= REG_FR && reg2 <= REG_FR + 31))
6382 as_warn (_("Dangerous simultaneous use of `f%d' and `f%d'"),
6383 reg1 - REG_FR, reg2 - REG_FR);
6384 return idesc;
6385 }
6386
6387 static void
6388 build_insn (struct slot *slot, bfd_vma *insnp)
6389 {
6390 const struct ia64_operand *odesc, *o2desc;
6391 struct ia64_opcode *idesc = slot->idesc;
6392 bfd_vma insn;
6393 bfd_signed_vma val;
6394 const char *err;
6395 int i;
6396
6397 insn = idesc->opcode | slot->qp_regno;
6398
6399 for (i = 0; i < NELEMS (idesc->operands) && idesc->operands[i]; ++i)
6400 {
6401 if (slot->opnd[i].X_op == O_register
6402 || slot->opnd[i].X_op == O_constant
6403 || slot->opnd[i].X_op == O_index)
6404 val = slot->opnd[i].X_add_number;
6405 else if (slot->opnd[i].X_op == O_big)
6406 {
6407 /* This must be the value 0x10000000000000000. */
6408 gas_assert (idesc->operands[i] == IA64_OPND_IMM8M1U8);
6409 val = 0;
6410 }
6411 else
6412 val = 0;
6413
6414 switch (idesc->operands[i])
6415 {
6416 case IA64_OPND_IMMU64:
6417 *insnp++ = (val >> 22) & 0x1ffffffffffLL;
6418 insn |= (((val & 0x7f) << 13) | (((val >> 7) & 0x1ff) << 27)
6419 | (((val >> 16) & 0x1f) << 22) | (((val >> 21) & 0x1) << 21)
6420 | (((val >> 63) & 0x1) << 36));
6421 continue;
6422
6423 case IA64_OPND_IMMU62:
6424 val &= 0x3fffffffffffffffULL;
6425 if (val != slot->opnd[i].X_add_number)
6426 as_warn (_("Value truncated to 62 bits"));
6427 *insnp++ = (val >> 21) & 0x1ffffffffffLL;
6428 insn |= (((val & 0xfffff) << 6) | (((val >> 20) & 0x1) << 36));
6429 continue;
6430
6431 case IA64_OPND_TGT64:
6432 val >>= 4;
6433 *insnp++ = ((val >> 20) & 0x7fffffffffLL) << 2;
6434 insn |= ((((val >> 59) & 0x1) << 36)
6435 | (((val >> 0) & 0xfffff) << 13));
6436 continue;
6437
6438 case IA64_OPND_AR3:
6439 val -= REG_AR;
6440 break;
6441
6442 case IA64_OPND_B1:
6443 case IA64_OPND_B2:
6444 val -= REG_BR;
6445 break;
6446
6447 case IA64_OPND_CR3:
6448 val -= REG_CR;
6449 break;
6450
6451 case IA64_OPND_DAHR3:
6452 val -= REG_DAHR;
6453 break;
6454
6455 case IA64_OPND_F1:
6456 case IA64_OPND_F2:
6457 case IA64_OPND_F3:
6458 case IA64_OPND_F4:
6459 val -= REG_FR;
6460 break;
6461
6462 case IA64_OPND_P1:
6463 case IA64_OPND_P2:
6464 val -= REG_P;
6465 break;
6466
6467 case IA64_OPND_R1:
6468 case IA64_OPND_R2:
6469 case IA64_OPND_R3:
6470 case IA64_OPND_R3_2:
6471 case IA64_OPND_CPUID_R3:
6472 case IA64_OPND_DBR_R3:
6473 case IA64_OPND_DTR_R3:
6474 case IA64_OPND_ITR_R3:
6475 case IA64_OPND_IBR_R3:
6476 case IA64_OPND_MR3:
6477 case IA64_OPND_MSR_R3:
6478 case IA64_OPND_PKR_R3:
6479 case IA64_OPND_PMC_R3:
6480 case IA64_OPND_PMD_R3:
6481 case IA64_OPND_DAHR_R3:
6482 case IA64_OPND_RR_R3:
6483 val -= REG_GR;
6484 break;
6485
6486 default:
6487 break;
6488 }
6489
6490 odesc = elf64_ia64_operands + idesc->operands[i];
6491 err = (*odesc->insert) (odesc, val, &insn);
6492 if (err)
6493 as_bad_where (slot->src_file, slot->src_line,
6494 _("Bad operand value: %s"), err);
6495 if (idesc->flags & IA64_OPCODE_PSEUDO)
6496 {
6497 if ((idesc->flags & IA64_OPCODE_F2_EQ_F3)
6498 && odesc == elf64_ia64_operands + IA64_OPND_F3)
6499 {
6500 o2desc = elf64_ia64_operands + IA64_OPND_F2;
6501 (*o2desc->insert) (o2desc, val, &insn);
6502 }
6503 if ((idesc->flags & IA64_OPCODE_LEN_EQ_64MCNT)
6504 && (odesc == elf64_ia64_operands + IA64_OPND_CPOS6a
6505 || odesc == elf64_ia64_operands + IA64_OPND_POS6))
6506 {
6507 o2desc = elf64_ia64_operands + IA64_OPND_LEN6;
6508 (*o2desc->insert) (o2desc, 64 - val, &insn);
6509 }
6510 }
6511 }
6512 *insnp = insn;
6513 }
6514
6515 static void
6516 emit_one_bundle (void)
6517 {
6518 int manual_bundling_off = 0, manual_bundling = 0;
6519 enum ia64_unit required_unit, insn_unit = 0;
6520 enum ia64_insn_type type[3], insn_type;
6521 unsigned int template_val, orig_template;
6522 bfd_vma insn[3] = { -1, -1, -1 };
6523 struct ia64_opcode *idesc;
6524 int end_of_insn_group = 0, user_template = -1;
6525 int n, i, j, first, curr, last_slot;
6526 bfd_vma t0 = 0, t1 = 0;
6527 struct label_fix *lfix;
6528 bfd_boolean mark_label;
6529 struct insn_fix *ifix;
6530 char mnemonic[16];
6531 fixS *fix;
6532 char *f;
6533 int addr_mod;
6534
6535 first = (md.curr_slot + NUM_SLOTS - md.num_slots_in_use) % NUM_SLOTS;
6536 know (first >= 0 && first < NUM_SLOTS);
6537 n = MIN (3, md.num_slots_in_use);
6538
6539 /* Determine template: user user_template if specified, best match
6540 otherwise: */
6541
6542 if (md.slot[first].user_template >= 0)
6543 user_template = template_val = md.slot[first].user_template;
6544 else
6545 {
6546 /* Auto select appropriate template. */
6547 memset (type, 0, sizeof (type));
6548 curr = first;
6549 for (i = 0; i < n; ++i)
6550 {
6551 if (md.slot[curr].label_fixups && i != 0)
6552 break;
6553 type[i] = md.slot[curr].idesc->type;
6554 curr = (curr + 1) % NUM_SLOTS;
6555 }
6556 template_val = best_template[type[0]][type[1]][type[2]];
6557 }
6558
6559 /* initialize instructions with appropriate nops: */
6560 for (i = 0; i < 3; ++i)
6561 insn[i] = nop[ia64_templ_desc[template_val].exec_unit[i]];
6562
6563 f = frag_more (16);
6564
6565 /* Check to see if this bundle is at an offset that is a multiple of 16-bytes
6566 from the start of the frag. */
6567 addr_mod = frag_now_fix () & 15;
6568 if (frag_now->has_code && frag_now->insn_addr != addr_mod)
6569 as_bad (_("instruction address is not a multiple of 16"));
6570 frag_now->insn_addr = addr_mod;
6571 frag_now->has_code = 1;
6572
6573 /* now fill in slots with as many insns as possible: */
6574 curr = first;
6575 idesc = md.slot[curr].idesc;
6576 end_of_insn_group = 0;
6577 last_slot = -1;
6578 for (i = 0; i < 3 && md.num_slots_in_use > 0; ++i)
6579 {
6580 /* If we have unwind records, we may need to update some now. */
6581 unw_rec_list *ptr = md.slot[curr].unwind_record;
6582 unw_rec_list *end_ptr = NULL;
6583
6584 if (ptr)
6585 {
6586 /* Find the last prologue/body record in the list for the current
6587 insn, and set the slot number for all records up to that point.
6588 This needs to be done now, because prologue/body records refer to
6589 the current point, not the point after the instruction has been
6590 issued. This matters because there may have been nops emitted
6591 meanwhile. Any non-prologue non-body record followed by a
6592 prologue/body record must also refer to the current point. */
6593 unw_rec_list *last_ptr;
6594
6595 for (j = 1; end_ptr == NULL && j < md.num_slots_in_use; ++j)
6596 end_ptr = md.slot[(curr + j) % NUM_SLOTS].unwind_record;
6597 for (last_ptr = NULL; ptr != end_ptr; ptr = ptr->next)
6598 if (ptr->r.type == prologue || ptr->r.type == prologue_gr
6599 || ptr->r.type == body)
6600 last_ptr = ptr;
6601 if (last_ptr)
6602 {
6603 /* Make last_ptr point one after the last prologue/body
6604 record. */
6605 last_ptr = last_ptr->next;
6606 for (ptr = md.slot[curr].unwind_record; ptr != last_ptr;
6607 ptr = ptr->next)
6608 {
6609 ptr->slot_number = (unsigned long) f + i;
6610 ptr->slot_frag = frag_now;
6611 }
6612 /* Remove the initialized records, so that we won't accidentally
6613 update them again if we insert a nop and continue. */
6614 md.slot[curr].unwind_record = last_ptr;
6615 }
6616 }
6617
6618 manual_bundling_off = md.slot[curr].manual_bundling_off;
6619 if (md.slot[curr].manual_bundling_on)
6620 {
6621 if (curr == first)
6622 manual_bundling = 1;
6623 else
6624 break; /* Need to start a new bundle. */
6625 }
6626
6627 /* If this instruction specifies a template, then it must be the first
6628 instruction of a bundle. */
6629 if (curr != first && md.slot[curr].user_template >= 0)
6630 break;
6631
6632 if (idesc->flags & IA64_OPCODE_SLOT2)
6633 {
6634 if (manual_bundling && !manual_bundling_off)
6635 {
6636 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6637 _("`%s' must be last in bundle"), idesc->name);
6638 if (i < 2)
6639 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6640 }
6641 i = 2;
6642 }
6643 if (idesc->flags & IA64_OPCODE_LAST)
6644 {
6645 int required_slot;
6646 unsigned int required_template;
6647
6648 /* If we need a stop bit after an M slot, our only choice is
6649 template 5 (M;;MI). If we need a stop bit after a B
6650 slot, our only choice is to place it at the end of the
6651 bundle, because the only available templates are MIB,
6652 MBB, BBB, MMB, and MFB. We don't handle anything other
6653 than M and B slots because these are the only kind of
6654 instructions that can have the IA64_OPCODE_LAST bit set. */
6655 required_template = template_val;
6656 switch (idesc->type)
6657 {
6658 case IA64_TYPE_M:
6659 required_slot = 0;
6660 required_template = 5;
6661 break;
6662
6663 case IA64_TYPE_B:
6664 required_slot = 2;
6665 break;
6666
6667 default:
6668 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6669 _("Internal error: don't know how to force %s to end of instruction group"),
6670 idesc->name);
6671 required_slot = i;
6672 break;
6673 }
6674 if (manual_bundling
6675 && (i > required_slot
6676 || (required_slot == 2 && !manual_bundling_off)
6677 || (user_template >= 0
6678 /* Changing from MMI to M;MI is OK. */
6679 && (template_val ^ required_template) > 1)))
6680 {
6681 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6682 _("`%s' must be last in instruction group"),
6683 idesc->name);
6684 if (i < 2 && required_slot == 2 && !manual_bundling_off)
6685 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6686 }
6687 if (required_slot < i)
6688 /* Can't fit this instruction. */
6689 break;
6690
6691 i = required_slot;
6692 if (required_template != template_val)
6693 {
6694 /* If we switch the template, we need to reset the NOPs
6695 after slot i. The slot-types of the instructions ahead
6696 of i never change, so we don't need to worry about
6697 changing NOPs in front of this slot. */
6698 for (j = i; j < 3; ++j)
6699 insn[j] = nop[ia64_templ_desc[required_template].exec_unit[j]];
6700
6701 /* We just picked a template that includes the stop bit in the
6702 middle, so we don't need another one emitted later. */
6703 md.slot[curr].end_of_insn_group = 0;
6704 }
6705 template_val = required_template;
6706 }
6707 if (curr != first && md.slot[curr].label_fixups)
6708 {
6709 if (manual_bundling)
6710 {
6711 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6712 _("Label must be first in a bundle"));
6713 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6714 }
6715 /* This insn must go into the first slot of a bundle. */
6716 break;
6717 }
6718
6719 if (end_of_insn_group && md.num_slots_in_use >= 1)
6720 {
6721 /* We need an instruction group boundary in the middle of a
6722 bundle. See if we can switch to an other template with
6723 an appropriate boundary. */
6724
6725 orig_template = template_val;
6726 if (i == 1 && (user_template == 4
6727 || (user_template < 0
6728 && (ia64_templ_desc[template_val].exec_unit[0]
6729 == IA64_UNIT_M))))
6730 {
6731 template_val = 5;
6732 end_of_insn_group = 0;
6733 }
6734 else if (i == 2 && (user_template == 0
6735 || (user_template < 0
6736 && (ia64_templ_desc[template_val].exec_unit[1]
6737 == IA64_UNIT_I)))
6738 /* This test makes sure we don't switch the template if
6739 the next instruction is one that needs to be first in
6740 an instruction group. Since all those instructions are
6741 in the M group, there is no way such an instruction can
6742 fit in this bundle even if we switch the template. The
6743 reason we have to check for this is that otherwise we
6744 may end up generating "MI;;I M.." which has the deadly
6745 effect that the second M instruction is no longer the
6746 first in the group! --davidm 99/12/16 */
6747 && (idesc->flags & IA64_OPCODE_FIRST) == 0)
6748 {
6749 template_val = 1;
6750 end_of_insn_group = 0;
6751 }
6752 else if (i == 1
6753 && user_template == 0
6754 && !(idesc->flags & IA64_OPCODE_FIRST))
6755 /* Use the next slot. */
6756 continue;
6757 else if (curr != first)
6758 /* can't fit this insn */
6759 break;
6760
6761 if (template_val != orig_template)
6762 /* if we switch the template, we need to reset the NOPs
6763 after slot i. The slot-types of the instructions ahead
6764 of i never change, so we don't need to worry about
6765 changing NOPs in front of this slot. */
6766 for (j = i; j < 3; ++j)
6767 insn[j] = nop[ia64_templ_desc[template_val].exec_unit[j]];
6768 }
6769 required_unit = ia64_templ_desc[template_val].exec_unit[i];
6770
6771 /* resolve dynamic opcodes such as "break", "hint", and "nop": */
6772 if (idesc->type == IA64_TYPE_DYN)
6773 {
6774 enum ia64_opnd opnd1, opnd2;
6775
6776 if ((strcmp (idesc->name, "nop") == 0)
6777 || (strcmp (idesc->name, "break") == 0))
6778 insn_unit = required_unit;
6779 else if (strcmp (idesc->name, "hint") == 0)
6780 {
6781 insn_unit = required_unit;
6782 if (required_unit == IA64_UNIT_B)
6783 {
6784 switch (md.hint_b)
6785 {
6786 case hint_b_ok:
6787 break;
6788 case hint_b_warning:
6789 as_warn (_("hint in B unit may be treated as nop"));
6790 break;
6791 case hint_b_error:
6792 /* When manual bundling is off and there is no
6793 user template, we choose a different unit so
6794 that hint won't go into the current slot. We
6795 will fill the current bundle with nops and
6796 try to put hint into the next bundle. */
6797 if (!manual_bundling && user_template < 0)
6798 insn_unit = IA64_UNIT_I;
6799 else
6800 as_bad (_("hint in B unit can't be used"));
6801 break;
6802 }
6803 }
6804 }
6805 else if (strcmp (idesc->name, "chk.s") == 0
6806 || strcmp (idesc->name, "mov") == 0)
6807 {
6808 insn_unit = IA64_UNIT_M;
6809 if (required_unit == IA64_UNIT_I
6810 || (required_unit == IA64_UNIT_F && template_val == 6))
6811 insn_unit = IA64_UNIT_I;
6812 }
6813 else
6814 as_fatal (_("emit_one_bundle: unexpected dynamic op"));
6815
6816 snprintf (mnemonic, sizeof (mnemonic), "%s.%c",
6817 idesc->name, "?imbfxx"[insn_unit]);
6818 opnd1 = idesc->operands[0];
6819 opnd2 = idesc->operands[1];
6820 ia64_free_opcode (idesc);
6821 idesc = ia64_find_opcode (mnemonic);
6822 /* moves to/from ARs have collisions */
6823 if (opnd1 == IA64_OPND_AR3 || opnd2 == IA64_OPND_AR3)
6824 {
6825 while (idesc != NULL
6826 && (idesc->operands[0] != opnd1
6827 || idesc->operands[1] != opnd2))
6828 idesc = get_next_opcode (idesc);
6829 }
6830 md.slot[curr].idesc = idesc;
6831 }
6832 else
6833 {
6834 insn_type = idesc->type;
6835 insn_unit = IA64_UNIT_NIL;
6836 switch (insn_type)
6837 {
6838 case IA64_TYPE_A:
6839 if (required_unit == IA64_UNIT_I || required_unit == IA64_UNIT_M)
6840 insn_unit = required_unit;
6841 break;
6842 case IA64_TYPE_X: insn_unit = IA64_UNIT_L; break;
6843 case IA64_TYPE_I: insn_unit = IA64_UNIT_I; break;
6844 case IA64_TYPE_M: insn_unit = IA64_UNIT_M; break;
6845 case IA64_TYPE_B: insn_unit = IA64_UNIT_B; break;
6846 case IA64_TYPE_F: insn_unit = IA64_UNIT_F; break;
6847 default: break;
6848 }
6849 }
6850
6851 if (insn_unit != required_unit)
6852 continue; /* Try next slot. */
6853
6854 /* Now is a good time to fix up the labels for this insn. */
6855 mark_label = FALSE;
6856 for (lfix = md.slot[curr].label_fixups; lfix; lfix = lfix->next)
6857 {
6858 S_SET_VALUE (lfix->sym, frag_now_fix () - 16);
6859 symbol_set_frag (lfix->sym, frag_now);
6860 mark_label |= lfix->dw2_mark_labels;
6861 }
6862 for (lfix = md.slot[curr].tag_fixups; lfix; lfix = lfix->next)
6863 {
6864 S_SET_VALUE (lfix->sym, frag_now_fix () - 16 + i);
6865 symbol_set_frag (lfix->sym, frag_now);
6866 }
6867
6868 if (debug_type == DEBUG_DWARF2
6869 || md.slot[curr].loc_directive_seen
6870 || mark_label)
6871 {
6872 bfd_vma addr = frag_now->fr_address + frag_now_fix () - 16 + i;
6873
6874 md.slot[curr].loc_directive_seen = 0;
6875 if (mark_label)
6876 md.slot[curr].debug_line.flags |= DWARF2_FLAG_BASIC_BLOCK;
6877
6878 dwarf2_gen_line_info (addr, &md.slot[curr].debug_line);
6879 }
6880
6881 build_insn (md.slot + curr, insn + i);
6882
6883 ptr = md.slot[curr].unwind_record;
6884 if (ptr)
6885 {
6886 /* Set slot numbers for all remaining unwind records belonging to the
6887 current insn. There can not be any prologue/body unwind records
6888 here. */
6889 for (; ptr != end_ptr; ptr = ptr->next)
6890 {
6891 ptr->slot_number = (unsigned long) f + i;
6892 ptr->slot_frag = frag_now;
6893 }
6894 md.slot[curr].unwind_record = NULL;
6895 }
6896
6897 for (j = 0; j < md.slot[curr].num_fixups; ++j)
6898 {
6899 ifix = md.slot[curr].fixup + j;
6900 fix = fix_new_exp (frag_now, frag_now_fix () - 16 + i, 8,
6901 &ifix->expr, ifix->is_pcrel, ifix->code);
6902 fix->tc_fix_data.opnd = ifix->opnd;
6903 fix->fx_file = md.slot[curr].src_file;
6904 fix->fx_line = md.slot[curr].src_line;
6905 }
6906
6907 end_of_insn_group = md.slot[curr].end_of_insn_group;
6908
6909 /* This adjustment to "i" must occur after the fix, otherwise the fix
6910 is assigned to the wrong slot, and the VMS linker complains. */
6911 if (required_unit == IA64_UNIT_L)
6912 {
6913 know (i == 1);
6914 /* skip one slot for long/X-unit instructions */
6915 ++i;
6916 }
6917 --md.num_slots_in_use;
6918 last_slot = i;
6919
6920 /* clear slot: */
6921 ia64_free_opcode (md.slot[curr].idesc);
6922 memset (md.slot + curr, 0, sizeof (md.slot[curr]));
6923 md.slot[curr].user_template = -1;
6924
6925 if (manual_bundling_off)
6926 {
6927 manual_bundling = 0;
6928 break;
6929 }
6930 curr = (curr + 1) % NUM_SLOTS;
6931 idesc = md.slot[curr].idesc;
6932 }
6933
6934 /* A user template was specified, but the first following instruction did
6935 not fit. This can happen with or without manual bundling. */
6936 if (md.num_slots_in_use > 0 && last_slot < 0)
6937 {
6938 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6939 _("`%s' does not fit into %s template"),
6940 idesc->name, ia64_templ_desc[template_val].name);
6941 /* Drop first insn so we don't livelock. */
6942 --md.num_slots_in_use;
6943 know (curr == first);
6944 ia64_free_opcode (md.slot[curr].idesc);
6945 memset (md.slot + curr, 0, sizeof (md.slot[curr]));
6946 md.slot[curr].user_template = -1;
6947 }
6948 else if (manual_bundling > 0)
6949 {
6950 if (md.num_slots_in_use > 0)
6951 {
6952 if (last_slot >= 2)
6953 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6954 _("`%s' does not fit into bundle"), idesc->name);
6955 else
6956 {
6957 const char *where;
6958
6959 if (template_val == 2)
6960 where = "X slot";
6961 else if (last_slot == 0)
6962 where = "slots 2 or 3";
6963 else
6964 where = "slot 3";
6965 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6966 _("`%s' can't go in %s of %s template"),
6967 idesc->name, where, ia64_templ_desc[template_val].name);
6968 }
6969 }
6970 else
6971 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6972 _("Missing '}' at end of file"));
6973 }
6974
6975 know (md.num_slots_in_use < NUM_SLOTS);
6976
6977 t0 = end_of_insn_group | (template_val << 1) | (insn[0] << 5) | (insn[1] << 46);
6978 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
6979
6980 number_to_chars_littleendian (f + 0, t0, 8);
6981 number_to_chars_littleendian (f + 8, t1, 8);
6982 }
6983
6984 int
6985 md_parse_option (int c, const char *arg)
6986 {
6987
6988 switch (c)
6989 {
6990 /* Switches from the Intel assembler. */
6991 case 'm':
6992 if (strcmp (arg, "ilp64") == 0
6993 || strcmp (arg, "lp64") == 0
6994 || strcmp (arg, "p64") == 0)
6995 {
6996 md.flags |= EF_IA_64_ABI64;
6997 }
6998 else if (strcmp (arg, "ilp32") == 0)
6999 {
7000 md.flags &= ~EF_IA_64_ABI64;
7001 }
7002 else if (strcmp (arg, "le") == 0)
7003 {
7004 md.flags &= ~EF_IA_64_BE;
7005 default_big_endian = 0;
7006 }
7007 else if (strcmp (arg, "be") == 0)
7008 {
7009 md.flags |= EF_IA_64_BE;
7010 default_big_endian = 1;
7011 }
7012 else if (strncmp (arg, "unwind-check=", 13) == 0)
7013 {
7014 arg += 13;
7015 if (strcmp (arg, "warning") == 0)
7016 md.unwind_check = unwind_check_warning;
7017 else if (strcmp (arg, "error") == 0)
7018 md.unwind_check = unwind_check_error;
7019 else
7020 return 0;
7021 }
7022 else if (strncmp (arg, "hint.b=", 7) == 0)
7023 {
7024 arg += 7;
7025 if (strcmp (arg, "ok") == 0)
7026 md.hint_b = hint_b_ok;
7027 else if (strcmp (arg, "warning") == 0)
7028 md.hint_b = hint_b_warning;
7029 else if (strcmp (arg, "error") == 0)
7030 md.hint_b = hint_b_error;
7031 else
7032 return 0;
7033 }
7034 else if (strncmp (arg, "tune=", 5) == 0)
7035 {
7036 arg += 5;
7037 if (strcmp (arg, "itanium1") == 0)
7038 md.tune = itanium1;
7039 else if (strcmp (arg, "itanium2") == 0)
7040 md.tune = itanium2;
7041 else
7042 return 0;
7043 }
7044 else
7045 return 0;
7046 break;
7047
7048 case 'N':
7049 if (strcmp (arg, "so") == 0)
7050 {
7051 /* Suppress signon message. */
7052 }
7053 else if (strcmp (arg, "pi") == 0)
7054 {
7055 /* Reject privileged instructions. FIXME */
7056 }
7057 else if (strcmp (arg, "us") == 0)
7058 {
7059 /* Allow union of signed and unsigned range. FIXME */
7060 }
7061 else if (strcmp (arg, "close_fcalls") == 0)
7062 {
7063 /* Do not resolve global function calls. */
7064 }
7065 else
7066 return 0;
7067 break;
7068
7069 case 'C':
7070 /* temp[="prefix"] Insert temporary labels into the object file
7071 symbol table prefixed by "prefix".
7072 Default prefix is ":temp:".
7073 */
7074 break;
7075
7076 case 'a':
7077 /* indirect=<tgt> Assume unannotated indirect branches behavior
7078 according to <tgt> --
7079 exit: branch out from the current context (default)
7080 labels: all labels in context may be branch targets
7081 */
7082 if (strncmp (arg, "indirect=", 9) != 0)
7083 return 0;
7084 break;
7085
7086 case 'x':
7087 /* -X conflicts with an ignored option, use -x instead */
7088 md.detect_dv = 1;
7089 if (!arg || strcmp (arg, "explicit") == 0)
7090 {
7091 /* set default mode to explicit */
7092 md.default_explicit_mode = 1;
7093 break;
7094 }
7095 else if (strcmp (arg, "auto") == 0)
7096 {
7097 md.default_explicit_mode = 0;
7098 }
7099 else if (strcmp (arg, "none") == 0)
7100 {
7101 md.detect_dv = 0;
7102 }
7103 else if (strcmp (arg, "debug") == 0)
7104 {
7105 md.debug_dv = 1;
7106 }
7107 else if (strcmp (arg, "debugx") == 0)
7108 {
7109 md.default_explicit_mode = 1;
7110 md.debug_dv = 1;
7111 }
7112 else if (strcmp (arg, "debugn") == 0)
7113 {
7114 md.debug_dv = 1;
7115 md.detect_dv = 0;
7116 }
7117 else
7118 {
7119 as_bad (_("Unrecognized option '-x%s'"), arg);
7120 }
7121 break;
7122
7123 case 'S':
7124 /* nops Print nops statistics. */
7125 break;
7126
7127 /* GNU specific switches for gcc. */
7128 case OPTION_MCONSTANT_GP:
7129 md.flags |= EF_IA_64_CONS_GP;
7130 break;
7131
7132 case OPTION_MAUTO_PIC:
7133 md.flags |= EF_IA_64_NOFUNCDESC_CONS_GP;
7134 break;
7135
7136 default:
7137 return 0;
7138 }
7139
7140 return 1;
7141 }
7142
7143 void
7144 md_show_usage (FILE *stream)
7145 {
7146 fputs (_("\
7147 IA-64 options:\n\
7148 --mconstant-gp mark output file as using the constant-GP model\n\
7149 (sets ELF header flag EF_IA_64_CONS_GP)\n\
7150 --mauto-pic mark output file as using the constant-GP model\n\
7151 without function descriptors (sets ELF header flag\n\
7152 EF_IA_64_NOFUNCDESC_CONS_GP)\n\
7153 -milp32|-milp64|-mlp64|-mp64 select data model (default -mlp64)\n\
7154 -mle | -mbe select little- or big-endian byte order (default -mle)\n\
7155 -mtune=[itanium1|itanium2]\n\
7156 tune for a specific CPU (default -mtune=itanium2)\n\
7157 -munwind-check=[warning|error]\n\
7158 unwind directive check (default -munwind-check=warning)\n\
7159 -mhint.b=[ok|warning|error]\n\
7160 hint.b check (default -mhint.b=error)\n\
7161 -x | -xexplicit turn on dependency violation checking\n"), stream);
7162 /* Note for translators: "automagically" can be translated as "automatically" here. */
7163 fputs (_("\
7164 -xauto automagically remove dependency violations (default)\n\
7165 -xnone turn off dependency violation checking\n\
7166 -xdebug debug dependency violation checker\n\
7167 -xdebugn debug dependency violation checker but turn off\n\
7168 dependency violation checking\n\
7169 -xdebugx debug dependency violation checker and turn on\n\
7170 dependency violation checking\n"),
7171 stream);
7172 }
7173
7174 void
7175 ia64_after_parse_args (void)
7176 {
7177 if (debug_type == DEBUG_STABS)
7178 as_fatal (_("--gstabs is not supported for ia64"));
7179 }
7180
7181 /* Return true if TYPE fits in TEMPL at SLOT. */
7182
7183 static int
7184 match (int templ, int type, int slot)
7185 {
7186 enum ia64_unit unit;
7187 int result;
7188
7189 unit = ia64_templ_desc[templ].exec_unit[slot];
7190 switch (type)
7191 {
7192 case IA64_TYPE_DYN: result = 1; break; /* for nop and break */
7193 case IA64_TYPE_A:
7194 result = (unit == IA64_UNIT_I || unit == IA64_UNIT_M);
7195 break;
7196 case IA64_TYPE_X: result = (unit == IA64_UNIT_L); break;
7197 case IA64_TYPE_I: result = (unit == IA64_UNIT_I); break;
7198 case IA64_TYPE_M: result = (unit == IA64_UNIT_M); break;
7199 case IA64_TYPE_B: result = (unit == IA64_UNIT_B); break;
7200 case IA64_TYPE_F: result = (unit == IA64_UNIT_F); break;
7201 default: result = 0; break;
7202 }
7203 return result;
7204 }
7205
7206 /* For Itanium 1, add a bit of extra goodness if a nop of type F or B would fit
7207 in TEMPL at SLOT. For Itanium 2, add a bit of extra goodness if a nop of
7208 type M or I would fit in TEMPL at SLOT. */
7209
7210 static inline int
7211 extra_goodness (int templ, int slot)
7212 {
7213 switch (md.tune)
7214 {
7215 case itanium1:
7216 if (slot == 1 && match (templ, IA64_TYPE_F, slot))
7217 return 2;
7218 else if (slot == 2 && match (templ, IA64_TYPE_B, slot))
7219 return 1;
7220 else
7221 return 0;
7222 break;
7223 case itanium2:
7224 if (match (templ, IA64_TYPE_M, slot)
7225 || match (templ, IA64_TYPE_I, slot))
7226 /* Favor M- and I-unit NOPs. We definitely want to avoid
7227 F-unit and B-unit may cause split-issue or less-than-optimal
7228 branch-prediction. */
7229 return 2;
7230 else
7231 return 0;
7232 break;
7233 default:
7234 abort ();
7235 return 0;
7236 }
7237 }
7238
7239 /* This function is called once, at assembler startup time. It sets
7240 up all the tables, etc. that the MD part of the assembler will need
7241 that can be determined before arguments are parsed. */
7242 void
7243 md_begin (void)
7244 {
7245 int i, j, k, t, goodness, best, ok;
7246 const char *err;
7247 char name[8];
7248
7249 md.auto_align = 1;
7250 md.explicit_mode = md.default_explicit_mode;
7251
7252 bfd_set_section_alignment (text_section, 4);
7253
7254 /* Make sure function pointers get initialized. */
7255 target_big_endian = -1;
7256 dot_byteorder (default_big_endian);
7257
7258 alias_hash = hash_new ();
7259 alias_name_hash = hash_new ();
7260 secalias_hash = hash_new ();
7261 secalias_name_hash = hash_new ();
7262
7263 pseudo_func[FUNC_DTP_MODULE].u.sym =
7264 symbol_new (".<dtpmod>", undefined_section, FUNC_DTP_MODULE,
7265 &zero_address_frag);
7266
7267 pseudo_func[FUNC_DTP_RELATIVE].u.sym =
7268 symbol_new (".<dtprel>", undefined_section, FUNC_DTP_RELATIVE,
7269 &zero_address_frag);
7270
7271 pseudo_func[FUNC_FPTR_RELATIVE].u.sym =
7272 symbol_new (".<fptr>", undefined_section, FUNC_FPTR_RELATIVE,
7273 &zero_address_frag);
7274
7275 pseudo_func[FUNC_GP_RELATIVE].u.sym =
7276 symbol_new (".<gprel>", undefined_section, FUNC_GP_RELATIVE,
7277 &zero_address_frag);
7278
7279 pseudo_func[FUNC_LT_RELATIVE].u.sym =
7280 symbol_new (".<ltoff>", undefined_section, FUNC_LT_RELATIVE,
7281 &zero_address_frag);
7282
7283 pseudo_func[FUNC_LT_RELATIVE_X].u.sym =
7284 symbol_new (".<ltoffx>", undefined_section, FUNC_LT_RELATIVE_X,
7285 &zero_address_frag);
7286
7287 pseudo_func[FUNC_PC_RELATIVE].u.sym =
7288 symbol_new (".<pcrel>", undefined_section, FUNC_PC_RELATIVE,
7289 &zero_address_frag);
7290
7291 pseudo_func[FUNC_PLT_RELATIVE].u.sym =
7292 symbol_new (".<pltoff>", undefined_section, FUNC_PLT_RELATIVE,
7293 &zero_address_frag);
7294
7295 pseudo_func[FUNC_SEC_RELATIVE].u.sym =
7296 symbol_new (".<secrel>", undefined_section, FUNC_SEC_RELATIVE,
7297 &zero_address_frag);
7298
7299 pseudo_func[FUNC_SEG_RELATIVE].u.sym =
7300 symbol_new (".<segrel>", undefined_section, FUNC_SEG_RELATIVE,
7301 &zero_address_frag);
7302
7303 pseudo_func[FUNC_TP_RELATIVE].u.sym =
7304 symbol_new (".<tprel>", undefined_section, FUNC_TP_RELATIVE,
7305 &zero_address_frag);
7306
7307 pseudo_func[FUNC_LTV_RELATIVE].u.sym =
7308 symbol_new (".<ltv>", undefined_section, FUNC_LTV_RELATIVE,
7309 &zero_address_frag);
7310
7311 pseudo_func[FUNC_LT_FPTR_RELATIVE].u.sym =
7312 symbol_new (".<ltoff.fptr>", undefined_section, FUNC_LT_FPTR_RELATIVE,
7313 &zero_address_frag);
7314
7315 pseudo_func[FUNC_LT_DTP_MODULE].u.sym =
7316 symbol_new (".<ltoff.dtpmod>", undefined_section, FUNC_LT_DTP_MODULE,
7317 &zero_address_frag);
7318
7319 pseudo_func[FUNC_LT_DTP_RELATIVE].u.sym =
7320 symbol_new (".<ltoff.dptrel>", undefined_section, FUNC_LT_DTP_RELATIVE,
7321 &zero_address_frag);
7322
7323 pseudo_func[FUNC_LT_TP_RELATIVE].u.sym =
7324 symbol_new (".<ltoff.tprel>", undefined_section, FUNC_LT_TP_RELATIVE,
7325 &zero_address_frag);
7326
7327 pseudo_func[FUNC_IPLT_RELOC].u.sym =
7328 symbol_new (".<iplt>", undefined_section, FUNC_IPLT_RELOC,
7329 &zero_address_frag);
7330
7331 #ifdef TE_VMS
7332 pseudo_func[FUNC_SLOTCOUNT_RELOC].u.sym =
7333 symbol_new (".<slotcount>", undefined_section, FUNC_SLOTCOUNT_RELOC,
7334 &zero_address_frag);
7335 #endif
7336
7337 if (md.tune != itanium1)
7338 {
7339 /* Convert MFI NOPs bundles into MMI NOPs bundles. */
7340 le_nop[0] = 0x8;
7341 le_nop_stop[0] = 0x9;
7342 }
7343
7344 /* Compute the table of best templates. We compute goodness as a
7345 base 4 value, in which each match counts for 3. Match-failures
7346 result in NOPs and we use extra_goodness() to pick the execution
7347 units that are best suited for issuing the NOP. */
7348 for (i = 0; i < IA64_NUM_TYPES; ++i)
7349 for (j = 0; j < IA64_NUM_TYPES; ++j)
7350 for (k = 0; k < IA64_NUM_TYPES; ++k)
7351 {
7352 best = 0;
7353 for (t = 0; t < NELEMS (ia64_templ_desc); ++t)
7354 {
7355 goodness = 0;
7356 if (match (t, i, 0))
7357 {
7358 if (match (t, j, 1))
7359 {
7360 if ((t == 2 && j == IA64_TYPE_X) || match (t, k, 2))
7361 goodness = 3 + 3 + 3;
7362 else
7363 goodness = 3 + 3 + extra_goodness (t, 2);
7364 }
7365 else if (match (t, j, 2))
7366 goodness = 3 + 3 + extra_goodness (t, 1);
7367 else
7368 {
7369 goodness = 3;
7370 goodness += extra_goodness (t, 1);
7371 goodness += extra_goodness (t, 2);
7372 }
7373 }
7374 else if (match (t, i, 1))
7375 {
7376 if ((t == 2 && i == IA64_TYPE_X) || match (t, j, 2))
7377 goodness = 3 + 3;
7378 else
7379 goodness = 3 + extra_goodness (t, 2);
7380 }
7381 else if (match (t, i, 2))
7382 goodness = 3 + extra_goodness (t, 1);
7383
7384 if (goodness > best)
7385 {
7386 best = goodness;
7387 best_template[i][j][k] = t;
7388 }
7389 }
7390 }
7391
7392 #ifdef DEBUG_TEMPLATES
7393 /* For debugging changes to the best_template calculations. We don't care
7394 about combinations with invalid instructions, so start the loops at 1. */
7395 for (i = 0; i < IA64_NUM_TYPES; ++i)
7396 for (j = 0; j < IA64_NUM_TYPES; ++j)
7397 for (k = 0; k < IA64_NUM_TYPES; ++k)
7398 {
7399 char type_letter[IA64_NUM_TYPES] = { 'n', 'a', 'i', 'm', 'b', 'f',
7400 'x', 'd' };
7401 fprintf (stderr, "%c%c%c %s\n", type_letter[i], type_letter[j],
7402 type_letter[k],
7403 ia64_templ_desc[best_template[i][j][k]].name);
7404 }
7405 #endif
7406
7407 for (i = 0; i < NUM_SLOTS; ++i)
7408 md.slot[i].user_template = -1;
7409
7410 md.pseudo_hash = hash_new ();
7411 for (i = 0; i < NELEMS (pseudo_opcode); ++i)
7412 {
7413 err = hash_insert (md.pseudo_hash, pseudo_opcode[i].name,
7414 (void *) (pseudo_opcode + i));
7415 if (err)
7416 as_fatal (_("ia64.md_begin: can't hash `%s': %s"),
7417 pseudo_opcode[i].name, err);
7418 }
7419
7420 md.reg_hash = hash_new ();
7421 md.dynreg_hash = hash_new ();
7422 md.const_hash = hash_new ();
7423 md.entry_hash = hash_new ();
7424
7425 /* general registers: */
7426 declare_register_set ("r", 128, REG_GR);
7427 declare_register ("gp", REG_GR + 1);
7428 declare_register ("sp", REG_GR + 12);
7429 declare_register ("tp", REG_GR + 13);
7430 declare_register_set ("ret", 4, REG_GR + 8);
7431
7432 /* floating point registers: */
7433 declare_register_set ("f", 128, REG_FR);
7434 declare_register_set ("farg", 8, REG_FR + 8);
7435 declare_register_set ("fret", 8, REG_FR + 8);
7436
7437 /* branch registers: */
7438 declare_register_set ("b", 8, REG_BR);
7439 declare_register ("rp", REG_BR + 0);
7440
7441 /* predicate registers: */
7442 declare_register_set ("p", 64, REG_P);
7443 declare_register ("pr", REG_PR);
7444 declare_register ("pr.rot", REG_PR_ROT);
7445
7446 /* application registers: */
7447 declare_register_set ("ar", 128, REG_AR);
7448 for (i = 0; i < NELEMS (ar); ++i)
7449 declare_register (ar[i].name, REG_AR + ar[i].regnum);
7450
7451 /* control registers: */
7452 declare_register_set ("cr", 128, REG_CR);
7453 for (i = 0; i < NELEMS (cr); ++i)
7454 declare_register (cr[i].name, REG_CR + cr[i].regnum);
7455
7456 /* dahr registers: */
7457 declare_register_set ("dahr", 8, REG_DAHR);
7458
7459 declare_register ("ip", REG_IP);
7460 declare_register ("cfm", REG_CFM);
7461 declare_register ("psr", REG_PSR);
7462 declare_register ("psr.l", REG_PSR_L);
7463 declare_register ("psr.um", REG_PSR_UM);
7464
7465 for (i = 0; i < NELEMS (indirect_reg); ++i)
7466 {
7467 unsigned int regnum = indirect_reg[i].regnum;
7468
7469 md.indregsym[regnum - IND_CPUID] = declare_register (indirect_reg[i].name, regnum);
7470 }
7471
7472 /* pseudo-registers used to specify unwind info: */
7473 declare_register ("psp", REG_PSP);
7474
7475 for (i = 0; i < NELEMS (const_bits); ++i)
7476 {
7477 err = hash_insert (md.const_hash, const_bits[i].name,
7478 (void *) (const_bits + i));
7479 if (err)
7480 as_fatal (_("Inserting \"%s\" into constant hash table failed: %s"),
7481 name, err);
7482 }
7483
7484 /* Set the architecture and machine depending on defaults and command line
7485 options. */
7486 if (md.flags & EF_IA_64_ABI64)
7487 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf64);
7488 else
7489 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf32);
7490
7491 if (! ok)
7492 as_warn (_("Could not set architecture and machine"));
7493
7494 /* Set the pointer size and pointer shift size depending on md.flags */
7495
7496 if (md.flags & EF_IA_64_ABI64)
7497 {
7498 md.pointer_size = 8; /* pointers are 8 bytes */
7499 md.pointer_size_shift = 3; /* alignment is 8 bytes = 2^2 */
7500 }
7501 else
7502 {
7503 md.pointer_size = 4; /* pointers are 4 bytes */
7504 md.pointer_size_shift = 2; /* alignment is 4 bytes = 2^2 */
7505 }
7506
7507 md.mem_offset.hint = 0;
7508 md.path = 0;
7509 md.maxpaths = 0;
7510 md.entry_labels = NULL;
7511 }
7512
7513 /* Set the default options in md. Cannot do this in md_begin because
7514 that is called after md_parse_option which is where we set the
7515 options in md based on command line options. */
7516
7517 void
7518 ia64_init (int argc ATTRIBUTE_UNUSED, char **argv ATTRIBUTE_UNUSED)
7519 {
7520 md.flags = MD_FLAGS_DEFAULT;
7521 #ifndef TE_VMS
7522 /* Don't turn on dependency checking for VMS, doesn't work. */
7523 md.detect_dv = 1;
7524 #endif
7525 /* FIXME: We should change it to unwind_check_error someday. */
7526 md.unwind_check = unwind_check_warning;
7527 md.hint_b = hint_b_error;
7528 md.tune = itanium2;
7529 }
7530
7531 /* Return a string for the target object file format. */
7532
7533 const char *
7534 ia64_target_format (void)
7535 {
7536 if (OUTPUT_FLAVOR == bfd_target_elf_flavour)
7537 {
7538 if (md.flags & EF_IA_64_BE)
7539 {
7540 if (md.flags & EF_IA_64_ABI64)
7541 #if defined(TE_AIX50)
7542 return "elf64-ia64-aix-big";
7543 #elif defined(TE_HPUX)
7544 return "elf64-ia64-hpux-big";
7545 #else
7546 return "elf64-ia64-big";
7547 #endif
7548 else
7549 #if defined(TE_AIX50)
7550 return "elf32-ia64-aix-big";
7551 #elif defined(TE_HPUX)
7552 return "elf32-ia64-hpux-big";
7553 #else
7554 return "elf32-ia64-big";
7555 #endif
7556 }
7557 else
7558 {
7559 if (md.flags & EF_IA_64_ABI64)
7560 #if defined (TE_AIX50)
7561 return "elf64-ia64-aix-little";
7562 #elif defined (TE_VMS)
7563 {
7564 md.flags |= EF_IA_64_ARCHVER_1;
7565 return "elf64-ia64-vms";
7566 }
7567 #else
7568 return "elf64-ia64-little";
7569 #endif
7570 else
7571 #ifdef TE_AIX50
7572 return "elf32-ia64-aix-little";
7573 #else
7574 return "elf32-ia64-little";
7575 #endif
7576 }
7577 }
7578 else
7579 return "unknown-format";
7580 }
7581
7582 void
7583 ia64_end_of_source (void)
7584 {
7585 /* terminate insn group upon reaching end of file: */
7586 insn_group_break (1, 0, 0);
7587
7588 /* emits slots we haven't written yet: */
7589 ia64_flush_insns ();
7590
7591 bfd_set_private_flags (stdoutput, md.flags);
7592
7593 md.mem_offset.hint = 0;
7594 }
7595
7596 void
7597 ia64_start_line (void)
7598 {
7599 static int first;
7600
7601 if (!first) {
7602 /* Make sure we don't reference input_line_pointer[-1] when that's
7603 not valid. */
7604 first = 1;
7605 return;
7606 }
7607
7608 if (md.qp.X_op == O_register)
7609 as_bad (_("qualifying predicate not followed by instruction"));
7610 md.qp.X_op = O_absent;
7611
7612 if (ignore_input ())
7613 return;
7614
7615 if (input_line_pointer[0] == ';' && input_line_pointer[-1] == ';')
7616 {
7617 if (md.detect_dv && !md.explicit_mode)
7618 {
7619 static int warned;
7620
7621 if (!warned)
7622 {
7623 warned = 1;
7624 as_warn (_("Explicit stops are ignored in auto mode"));
7625 }
7626 }
7627 else
7628 insn_group_break (1, 0, 0);
7629 }
7630 else if (input_line_pointer[-1] == '{')
7631 {
7632 if (md.manual_bundling)
7633 as_warn (_("Found '{' when manual bundling is already turned on"));
7634 else
7635 CURR_SLOT.manual_bundling_on = 1;
7636 md.manual_bundling = 1;
7637
7638 /* Bundling is only acceptable in explicit mode
7639 or when in default automatic mode. */
7640 if (md.detect_dv && !md.explicit_mode)
7641 {
7642 if (!md.mode_explicitly_set
7643 && !md.default_explicit_mode)
7644 dot_dv_mode ('E');
7645 else
7646 as_warn (_("Found '{' after explicit switch to automatic mode"));
7647 }
7648 }
7649 else if (input_line_pointer[-1] == '}')
7650 {
7651 if (!md.manual_bundling)
7652 as_warn (_("Found '}' when manual bundling is off"));
7653 else
7654 PREV_SLOT.manual_bundling_off = 1;
7655 md.manual_bundling = 0;
7656
7657 /* switch back to automatic mode, if applicable */
7658 if (md.detect_dv
7659 && md.explicit_mode
7660 && !md.mode_explicitly_set
7661 && !md.default_explicit_mode)
7662 dot_dv_mode ('A');
7663 }
7664 }
7665
7666 /* This is a hook for ia64_frob_label, so that it can distinguish tags from
7667 labels. */
7668 static int defining_tag = 0;
7669
7670 int
7671 ia64_unrecognized_line (int ch)
7672 {
7673 switch (ch)
7674 {
7675 case '(':
7676 expression_and_evaluate (&md.qp);
7677 if (*input_line_pointer++ != ')')
7678 {
7679 as_bad (_("Expected ')'"));
7680 return 0;
7681 }
7682 if (md.qp.X_op != O_register)
7683 {
7684 as_bad (_("Qualifying predicate expected"));
7685 return 0;
7686 }
7687 if (md.qp.X_add_number < REG_P || md.qp.X_add_number >= REG_P + 64)
7688 {
7689 as_bad (_("Predicate register expected"));
7690 return 0;
7691 }
7692 return 1;
7693
7694 case '[':
7695 {
7696 char *s;
7697 char c;
7698 symbolS *tag;
7699 int temp;
7700
7701 if (md.qp.X_op == O_register)
7702 {
7703 as_bad (_("Tag must come before qualifying predicate."));
7704 return 0;
7705 }
7706
7707 /* This implements just enough of read_a_source_file in read.c to
7708 recognize labels. */
7709 if (is_name_beginner (*input_line_pointer))
7710 {
7711 c = get_symbol_name (&s);
7712 }
7713 else if (LOCAL_LABELS_FB
7714 && ISDIGIT (*input_line_pointer))
7715 {
7716 temp = 0;
7717 while (ISDIGIT (*input_line_pointer))
7718 temp = (temp * 10) + *input_line_pointer++ - '0';
7719 fb_label_instance_inc (temp);
7720 s = fb_label_name (temp, 0);
7721 c = *input_line_pointer;
7722 }
7723 else
7724 {
7725 s = NULL;
7726 c = '\0';
7727 }
7728 if (c != ':')
7729 {
7730 /* Put ':' back for error messages' sake. */
7731 *input_line_pointer++ = ':';
7732 as_bad (_("Expected ':'"));
7733 return 0;
7734 }
7735
7736 defining_tag = 1;
7737 tag = colon (s);
7738 defining_tag = 0;
7739 /* Put ':' back for error messages' sake. */
7740 *input_line_pointer++ = ':';
7741 if (*input_line_pointer++ != ']')
7742 {
7743 as_bad (_("Expected ']'"));
7744 return 0;
7745 }
7746 if (! tag)
7747 {
7748 as_bad (_("Tag name expected"));
7749 return 0;
7750 }
7751 return 1;
7752 }
7753
7754 default:
7755 break;
7756 }
7757
7758 /* Not a valid line. */
7759 return 0;
7760 }
7761
7762 void
7763 ia64_frob_label (struct symbol *sym)
7764 {
7765 struct label_fix *fix;
7766
7767 /* Tags need special handling since they are not bundle breaks like
7768 labels. */
7769 if (defining_tag)
7770 {
7771 fix = XOBNEW (&notes, struct label_fix);
7772 fix->sym = sym;
7773 fix->next = CURR_SLOT.tag_fixups;
7774 fix->dw2_mark_labels = FALSE;
7775 CURR_SLOT.tag_fixups = fix;
7776
7777 return;
7778 }
7779
7780 if (bfd_section_flags (now_seg) & SEC_CODE)
7781 {
7782 md.last_text_seg = now_seg;
7783 fix = XOBNEW (&notes, struct label_fix);
7784 fix->sym = sym;
7785 fix->next = CURR_SLOT.label_fixups;
7786 fix->dw2_mark_labels = dwarf2_loc_mark_labels;
7787 CURR_SLOT.label_fixups = fix;
7788
7789 /* Keep track of how many code entry points we've seen. */
7790 if (md.path == md.maxpaths)
7791 {
7792 md.maxpaths += 20;
7793 md.entry_labels = XRESIZEVEC (const char *, md.entry_labels,
7794 md.maxpaths);
7795 }
7796 md.entry_labels[md.path++] = S_GET_NAME (sym);
7797 }
7798 }
7799
7800 #ifdef TE_HPUX
7801 /* The HP-UX linker will give unresolved symbol errors for symbols
7802 that are declared but unused. This routine removes declared,
7803 unused symbols from an object. */
7804 int
7805 ia64_frob_symbol (struct symbol *sym)
7806 {
7807 if ((S_GET_SEGMENT (sym) == bfd_und_section_ptr && ! symbol_used_p (sym) &&
7808 ELF_ST_VISIBILITY (S_GET_OTHER (sym)) == STV_DEFAULT)
7809 || (S_GET_SEGMENT (sym) == bfd_abs_section_ptr
7810 && ! S_IS_EXTERNAL (sym)))
7811 return 1;
7812 return 0;
7813 }
7814 #endif
7815
7816 void
7817 ia64_flush_pending_output (void)
7818 {
7819 if (!md.keep_pending_output
7820 && bfd_section_flags (now_seg) & SEC_CODE)
7821 {
7822 /* ??? This causes many unnecessary stop bits to be emitted.
7823 Unfortunately, it isn't clear if it is safe to remove this. */
7824 insn_group_break (1, 0, 0);
7825 ia64_flush_insns ();
7826 }
7827 }
7828
7829 /* Do ia64-specific expression optimization. All that's done here is
7830 to transform index expressions that are either due to the indexing
7831 of rotating registers or due to the indexing of indirect register
7832 sets. */
7833 int
7834 ia64_optimize_expr (expressionS *l, operatorT op, expressionS *r)
7835 {
7836 if (op != O_index)
7837 return 0;
7838 resolve_expression (l);
7839 if (l->X_op == O_register)
7840 {
7841 unsigned num_regs = l->X_add_number >> 16;
7842
7843 resolve_expression (r);
7844 if (num_regs)
7845 {
7846 /* Left side is a .rotX-allocated register. */
7847 if (r->X_op != O_constant)
7848 {
7849 as_bad (_("Rotating register index must be a non-negative constant"));
7850 r->X_add_number = 0;
7851 }
7852 else if ((valueT) r->X_add_number >= num_regs)
7853 {
7854 as_bad (_("Index out of range 0..%u"), num_regs - 1);
7855 r->X_add_number = 0;
7856 }
7857 l->X_add_number = (l->X_add_number & 0xffff) + r->X_add_number;
7858 return 1;
7859 }
7860 else if (l->X_add_number >= IND_CPUID && l->X_add_number <= IND_RR)
7861 {
7862 if (r->X_op != O_register
7863 || r->X_add_number < REG_GR
7864 || r->X_add_number > REG_GR + 127)
7865 {
7866 as_bad (_("Indirect register index must be a general register"));
7867 r->X_add_number = REG_GR;
7868 }
7869 l->X_op = O_index;
7870 l->X_op_symbol = md.indregsym[l->X_add_number - IND_CPUID];
7871 l->X_add_number = r->X_add_number;
7872 return 1;
7873 }
7874 }
7875 as_bad (_("Index can only be applied to rotating or indirect registers"));
7876 /* Fall back to some register use of which has as little as possible
7877 side effects, to minimize subsequent error messages. */
7878 l->X_op = O_register;
7879 l->X_add_number = REG_GR + 3;
7880 return 1;
7881 }
7882
7883 int
7884 ia64_parse_name (char *name, expressionS *e, char *nextcharP)
7885 {
7886 struct const_desc *cdesc;
7887 struct dynreg *dr = 0;
7888 unsigned int idx;
7889 struct symbol *sym;
7890 char *end;
7891
7892 if (*name == '@')
7893 {
7894 enum pseudo_type pseudo_type = PSEUDO_FUNC_NONE;
7895
7896 /* Find what relocation pseudo-function we're dealing with. */
7897 for (idx = 0; idx < NELEMS (pseudo_func); ++idx)
7898 if (pseudo_func[idx].name
7899 && pseudo_func[idx].name[0] == name[1]
7900 && strcmp (pseudo_func[idx].name + 1, name + 2) == 0)
7901 {
7902 pseudo_type = pseudo_func[idx].type;
7903 break;
7904 }
7905 switch (pseudo_type)
7906 {
7907 case PSEUDO_FUNC_RELOC:
7908 end = input_line_pointer;
7909 if (*nextcharP != '(')
7910 {
7911 as_bad (_("Expected '('"));
7912 break;
7913 }
7914 /* Skip '('. */
7915 ++input_line_pointer;
7916 expression (e);
7917 if (*input_line_pointer != ')')
7918 {
7919 as_bad (_("Missing ')'"));
7920 goto done;
7921 }
7922 /* Skip ')'. */
7923 ++input_line_pointer;
7924 #ifdef TE_VMS
7925 if (idx == FUNC_SLOTCOUNT_RELOC)
7926 {
7927 /* @slotcount can accept any expression. Canonicalize. */
7928 e->X_add_symbol = make_expr_symbol (e);
7929 e->X_op = O_symbol;
7930 e->X_add_number = 0;
7931 }
7932 #endif
7933 if (e->X_op != O_symbol)
7934 {
7935 if (e->X_op != O_pseudo_fixup)
7936 {
7937 as_bad (_("Not a symbolic expression"));
7938 goto done;
7939 }
7940 if (idx != FUNC_LT_RELATIVE)
7941 {
7942 as_bad (_("Illegal combination of relocation functions"));
7943 goto done;
7944 }
7945 switch (S_GET_VALUE (e->X_op_symbol))
7946 {
7947 case FUNC_FPTR_RELATIVE:
7948 idx = FUNC_LT_FPTR_RELATIVE; break;
7949 case FUNC_DTP_MODULE:
7950 idx = FUNC_LT_DTP_MODULE; break;
7951 case FUNC_DTP_RELATIVE:
7952 idx = FUNC_LT_DTP_RELATIVE; break;
7953 case FUNC_TP_RELATIVE:
7954 idx = FUNC_LT_TP_RELATIVE; break;
7955 default:
7956 as_bad (_("Illegal combination of relocation functions"));
7957 goto done;
7958 }
7959 }
7960 /* Make sure gas doesn't get rid of local symbols that are used
7961 in relocs. */
7962 e->X_op = O_pseudo_fixup;
7963 e->X_op_symbol = pseudo_func[idx].u.sym;
7964 done:
7965 *nextcharP = *input_line_pointer;
7966 break;
7967
7968 case PSEUDO_FUNC_CONST:
7969 e->X_op = O_constant;
7970 e->X_add_number = pseudo_func[idx].u.ival;
7971 break;
7972
7973 case PSEUDO_FUNC_REG:
7974 e->X_op = O_register;
7975 e->X_add_number = pseudo_func[idx].u.ival;
7976 break;
7977
7978 default:
7979 return 0;
7980 }
7981 return 1;
7982 }
7983
7984 /* first see if NAME is a known register name: */
7985 sym = hash_find (md.reg_hash, name);
7986 if (sym)
7987 {
7988 e->X_op = O_register;
7989 e->X_add_number = S_GET_VALUE (sym);
7990 return 1;
7991 }
7992
7993 cdesc = hash_find (md.const_hash, name);
7994 if (cdesc)
7995 {
7996 e->X_op = O_constant;
7997 e->X_add_number = cdesc->value;
7998 return 1;
7999 }
8000
8001 /* check for inN, locN, or outN: */
8002 idx = 0;
8003 switch (name[0])
8004 {
8005 case 'i':
8006 if (name[1] == 'n' && ISDIGIT (name[2]))
8007 {
8008 dr = &md.in;
8009 idx = 2;
8010 }
8011 break;
8012
8013 case 'l':
8014 if (name[1] == 'o' && name[2] == 'c' && ISDIGIT (name[3]))
8015 {
8016 dr = &md.loc;
8017 idx = 3;
8018 }
8019 break;
8020
8021 case 'o':
8022 if (name[1] == 'u' && name[2] == 't' && ISDIGIT (name[3]))
8023 {
8024 dr = &md.out;
8025 idx = 3;
8026 }
8027 break;
8028
8029 default:
8030 break;
8031 }
8032
8033 /* Ignore register numbers with leading zeroes, except zero itself. */
8034 if (dr && (name[idx] != '0' || name[idx + 1] == '\0'))
8035 {
8036 unsigned long regnum;
8037
8038 /* The name is inN, locN, or outN; parse the register number. */
8039 regnum = strtoul (name + idx, &end, 10);
8040 if (end > name + idx && *end == '\0' && regnum < 96)
8041 {
8042 if (regnum >= dr->num_regs)
8043 {
8044 if (!dr->num_regs)
8045 as_bad (_("No current frame"));
8046 else
8047 as_bad (_("Register number out of range 0..%u"),
8048 dr->num_regs - 1);
8049 regnum = 0;
8050 }
8051 e->X_op = O_register;
8052 e->X_add_number = dr->base + regnum;
8053 return 1;
8054 }
8055 }
8056
8057 end = xstrdup (name);
8058 name = ia64_canonicalize_symbol_name (end);
8059 if ((dr = hash_find (md.dynreg_hash, name)))
8060 {
8061 /* We've got ourselves the name of a rotating register set.
8062 Store the base register number in the low 16 bits of
8063 X_add_number and the size of the register set in the top 16
8064 bits. */
8065 e->X_op = O_register;
8066 e->X_add_number = dr->base | (dr->num_regs << 16);
8067 free (end);
8068 return 1;
8069 }
8070 free (end);
8071 return 0;
8072 }
8073
8074 /* Remove the '#' suffix that indicates a symbol as opposed to a register. */
8075
8076 char *
8077 ia64_canonicalize_symbol_name (char *name)
8078 {
8079 size_t len = strlen (name), full = len;
8080
8081 while (len > 0 && name[len - 1] == '#')
8082 --len;
8083 if (len <= 0)
8084 {
8085 if (full > 0)
8086 as_bad (_("Standalone `#' is illegal"));
8087 }
8088 else if (len < full - 1)
8089 as_warn (_("Redundant `#' suffix operators"));
8090 name[len] = '\0';
8091 return name;
8092 }
8093
8094 /* Return true if idesc is a conditional branch instruction. This excludes
8095 the modulo scheduled branches, and br.ia. Mod-sched branches are excluded
8096 because they always read/write resources regardless of the value of the
8097 qualifying predicate. br.ia must always use p0, and hence is always
8098 taken. Thus this function returns true for branches which can fall
8099 through, and which use no resources if they do fall through. */
8100
8101 static int
8102 is_conditional_branch (struct ia64_opcode *idesc)
8103 {
8104 /* br is a conditional branch. Everything that starts with br. except
8105 br.ia, br.c{loop,top,exit}, and br.w{top,exit} is a conditional branch.
8106 Everything that starts with brl is a conditional branch. */
8107 return (idesc->name[0] == 'b' && idesc->name[1] == 'r'
8108 && (idesc->name[2] == '\0'
8109 || (idesc->name[2] == '.' && idesc->name[3] != 'i'
8110 && idesc->name[3] != 'c' && idesc->name[3] != 'w')
8111 || idesc->name[2] == 'l'
8112 /* br.cond, br.call, br.clr */
8113 || (idesc->name[2] == '.' && idesc->name[3] == 'c'
8114 && (idesc->name[4] == 'a' || idesc->name[4] == 'o'
8115 || (idesc->name[4] == 'l' && idesc->name[5] == 'r')))));
8116 }
8117
8118 /* Return whether the given opcode is a taken branch. If there's any doubt,
8119 returns zero. */
8120
8121 static int
8122 is_taken_branch (struct ia64_opcode *idesc)
8123 {
8124 return ((is_conditional_branch (idesc) && CURR_SLOT.qp_regno == 0)
8125 || strncmp (idesc->name, "br.ia", 5) == 0);
8126 }
8127
8128 /* Return whether the given opcode is an interruption or rfi. If there's any
8129 doubt, returns zero. */
8130
8131 static int
8132 is_interruption_or_rfi (struct ia64_opcode *idesc)
8133 {
8134 if (strcmp (idesc->name, "rfi") == 0)
8135 return 1;
8136 return 0;
8137 }
8138
8139 /* Returns the index of the given dependency in the opcode's list of chks, or
8140 -1 if there is no dependency. */
8141
8142 static int
8143 depends_on (int depind, struct ia64_opcode *idesc)
8144 {
8145 int i;
8146 const struct ia64_opcode_dependency *dep = idesc->dependencies;
8147 for (i = 0; i < dep->nchks; i++)
8148 {
8149 if (depind == DEP (dep->chks[i]))
8150 return i;
8151 }
8152 return -1;
8153 }
8154
8155 /* Determine a set of specific resources used for a particular resource
8156 class. Returns the number of specific resources identified For those
8157 cases which are not determinable statically, the resource returned is
8158 marked nonspecific.
8159
8160 Meanings of value in 'NOTE':
8161 1) only read/write when the register number is explicitly encoded in the
8162 insn.
8163 2) only read CFM when accessing a rotating GR, FR, or PR. mov pr only
8164 accesses CFM when qualifying predicate is in the rotating region.
8165 3) general register value is used to specify an indirect register; not
8166 determinable statically.
8167 4) only read the given resource when bits 7:0 of the indirect index
8168 register value does not match the register number of the resource; not
8169 determinable statically.
8170 5) all rules are implementation specific.
8171 6) only when both the index specified by the reader and the index specified
8172 by the writer have the same value in bits 63:61; not determinable
8173 statically.
8174 7) only access the specified resource when the corresponding mask bit is
8175 set
8176 8) PSR.dfh is only read when these insns reference FR32-127. PSR.dfl is
8177 only read when these insns reference FR2-31
8178 9) PSR.mfl is only written when these insns write FR2-31. PSR.mfh is only
8179 written when these insns write FR32-127
8180 10) The PSR.bn bit is only accessed when one of GR16-31 is specified in the
8181 instruction
8182 11) The target predicates are written independently of PR[qp], but source
8183 registers are only read if PR[qp] is true. Since the state of PR[qp]
8184 cannot statically be determined, all source registers are marked used.
8185 12) This insn only reads the specified predicate register when that
8186 register is the PR[qp].
8187 13) This reference to ld-c only applies to the GR whose value is loaded
8188 with data returned from memory, not the post-incremented address register.
8189 14) The RSE resource includes the implementation-specific RSE internal
8190 state resources. At least one (and possibly more) of these resources are
8191 read by each instruction listed in IC:rse-readers. At least one (and
8192 possibly more) of these resources are written by each insn listed in
8193 IC:rse-writers.
8194 15+16) Represents reserved instructions, which the assembler does not
8195 generate.
8196 17) CR[TPR] has a RAW dependency only between mov-to-CR-TPR and
8197 mov-to-PSR-l or ssm instructions that set PSR.i, PSR.pp or PSR.up.
8198
8199 Memory resources (i.e. locations in memory) are *not* marked or tracked by
8200 this code; there are no dependency violations based on memory access.
8201 */
8202
8203 #define MAX_SPECS 256
8204 #define DV_CHK 1
8205 #define DV_REG 0
8206
8207 static int
8208 specify_resource (const struct ia64_dependency *dep,
8209 struct ia64_opcode *idesc,
8210 /* is this a DV chk or a DV reg? */
8211 int type,
8212 /* returned specific resources */
8213 struct rsrc specs[MAX_SPECS],
8214 /* resource note for this insn's usage */
8215 int note,
8216 /* which execution path to examine */
8217 int path)
8218 {
8219 int count = 0;
8220 int i;
8221 int rsrc_write = 0;
8222 struct rsrc tmpl;
8223
8224 if (dep->mode == IA64_DV_WAW
8225 || (dep->mode == IA64_DV_RAW && type == DV_REG)
8226 || (dep->mode == IA64_DV_WAR && type == DV_CHK))
8227 rsrc_write = 1;
8228
8229 /* template for any resources we identify */
8230 tmpl.dependency = dep;
8231 tmpl.note = note;
8232 tmpl.insn_srlz = tmpl.data_srlz = 0;
8233 tmpl.qp_regno = CURR_SLOT.qp_regno;
8234 tmpl.link_to_qp_branch = 1;
8235 tmpl.mem_offset.hint = 0;
8236 tmpl.mem_offset.offset = 0;
8237 tmpl.mem_offset.base = 0;
8238 tmpl.specific = 1;
8239 tmpl.index = -1;
8240 tmpl.cmp_type = CMP_NONE;
8241 tmpl.depind = 0;
8242 tmpl.file = NULL;
8243 tmpl.line = 0;
8244 tmpl.path = 0;
8245
8246 #define UNHANDLED \
8247 as_warn (_("Unhandled dependency %s for %s (%s), note %d"), \
8248 dep->name, idesc->name, (rsrc_write?"write":"read"), note)
8249 #define KNOWN(REG) (gr_values[REG].known && gr_values[REG].path >= path)
8250
8251 /* we don't need to track these */
8252 if (dep->semantics == IA64_DVS_NONE)
8253 return 0;
8254
8255 switch (dep->specifier)
8256 {
8257 case IA64_RS_AR_K:
8258 if (note == 1)
8259 {
8260 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8261 {
8262 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8263 if (regno >= 0 && regno <= 7)
8264 {
8265 specs[count] = tmpl;
8266 specs[count++].index = regno;
8267 }
8268 }
8269 }
8270 else if (note == 0)
8271 {
8272 for (i = 0; i < 8; i++)
8273 {
8274 specs[count] = tmpl;
8275 specs[count++].index = i;
8276 }
8277 }
8278 else
8279 {
8280 UNHANDLED;
8281 }
8282 break;
8283
8284 case IA64_RS_AR_UNAT:
8285 /* This is a mov =AR or mov AR= instruction. */
8286 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8287 {
8288 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8289 if (regno == AR_UNAT)
8290 {
8291 specs[count++] = tmpl;
8292 }
8293 }
8294 else
8295 {
8296 /* This is a spill/fill, or other instruction that modifies the
8297 unat register. */
8298
8299 /* Unless we can determine the specific bits used, mark the whole
8300 thing; bits 8:3 of the memory address indicate the bit used in
8301 UNAT. The .mem.offset hint may be used to eliminate a small
8302 subset of conflicts. */
8303 specs[count] = tmpl;
8304 if (md.mem_offset.hint)
8305 {
8306 if (md.debug_dv)
8307 fprintf (stderr, " Using hint for spill/fill\n");
8308 /* The index isn't actually used, just set it to something
8309 approximating the bit index. */
8310 specs[count].index = (md.mem_offset.offset >> 3) & 0x3F;
8311 specs[count].mem_offset.hint = 1;
8312 specs[count].mem_offset.offset = md.mem_offset.offset;
8313 specs[count++].mem_offset.base = md.mem_offset.base;
8314 }
8315 else
8316 {
8317 specs[count++].specific = 0;
8318 }
8319 }
8320 break;
8321
8322 case IA64_RS_AR:
8323 if (note == 1)
8324 {
8325 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8326 {
8327 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8328 if ((regno >= 8 && regno <= 15)
8329 || (regno >= 20 && regno <= 23)
8330 || (regno >= 31 && regno <= 39)
8331 || (regno >= 41 && regno <= 47)
8332 || (regno >= 67 && regno <= 111))
8333 {
8334 specs[count] = tmpl;
8335 specs[count++].index = regno;
8336 }
8337 }
8338 }
8339 else
8340 {
8341 UNHANDLED;
8342 }
8343 break;
8344
8345 case IA64_RS_ARb:
8346 if (note == 1)
8347 {
8348 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8349 {
8350 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8351 if ((regno >= 48 && regno <= 63)
8352 || (regno >= 112 && regno <= 127))
8353 {
8354 specs[count] = tmpl;
8355 specs[count++].index = regno;
8356 }
8357 }
8358 }
8359 else if (note == 0)
8360 {
8361 for (i = 48; i < 64; i++)
8362 {
8363 specs[count] = tmpl;
8364 specs[count++].index = i;
8365 }
8366 for (i = 112; i < 128; i++)
8367 {
8368 specs[count] = tmpl;
8369 specs[count++].index = i;
8370 }
8371 }
8372 else
8373 {
8374 UNHANDLED;
8375 }
8376 break;
8377
8378 case IA64_RS_BR:
8379 if (note != 1)
8380 {
8381 UNHANDLED;
8382 }
8383 else
8384 {
8385 if (rsrc_write)
8386 {
8387 for (i = 0; i < idesc->num_outputs; i++)
8388 if (idesc->operands[i] == IA64_OPND_B1
8389 || idesc->operands[i] == IA64_OPND_B2)
8390 {
8391 specs[count] = tmpl;
8392 specs[count++].index =
8393 CURR_SLOT.opnd[i].X_add_number - REG_BR;
8394 }
8395 }
8396 else
8397 {
8398 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
8399 if (idesc->operands[i] == IA64_OPND_B1
8400 || idesc->operands[i] == IA64_OPND_B2)
8401 {
8402 specs[count] = tmpl;
8403 specs[count++].index =
8404 CURR_SLOT.opnd[i].X_add_number - REG_BR;
8405 }
8406 }
8407 }
8408 break;
8409
8410 case IA64_RS_CPUID: /* four or more registers */
8411 if (note == 3)
8412 {
8413 if (idesc->operands[!rsrc_write] == IA64_OPND_CPUID_R3)
8414 {
8415 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8416 if (regno >= 0 && regno < NELEMS (gr_values)
8417 && KNOWN (regno))
8418 {
8419 specs[count] = tmpl;
8420 specs[count++].index = gr_values[regno].value & 0xFF;
8421 }
8422 else
8423 {
8424 specs[count] = tmpl;
8425 specs[count++].specific = 0;
8426 }
8427 }
8428 }
8429 else
8430 {
8431 UNHANDLED;
8432 }
8433 break;
8434
8435 case IA64_RS_DBR: /* four or more registers */
8436 if (note == 3)
8437 {
8438 if (idesc->operands[!rsrc_write] == IA64_OPND_DBR_R3)
8439 {
8440 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8441 if (regno >= 0 && regno < NELEMS (gr_values)
8442 && KNOWN (regno))
8443 {
8444 specs[count] = tmpl;
8445 specs[count++].index = gr_values[regno].value & 0xFF;
8446 }
8447 else
8448 {
8449 specs[count] = tmpl;
8450 specs[count++].specific = 0;
8451 }
8452 }
8453 }
8454 else if (note == 0 && !rsrc_write)
8455 {
8456 specs[count] = tmpl;
8457 specs[count++].specific = 0;
8458 }
8459 else
8460 {
8461 UNHANDLED;
8462 }
8463 break;
8464
8465 case IA64_RS_IBR: /* four or more registers */
8466 if (note == 3)
8467 {
8468 if (idesc->operands[!rsrc_write] == IA64_OPND_IBR_R3)
8469 {
8470 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8471 if (regno >= 0 && regno < NELEMS (gr_values)
8472 && KNOWN (regno))
8473 {
8474 specs[count] = tmpl;
8475 specs[count++].index = gr_values[regno].value & 0xFF;
8476 }
8477 else
8478 {
8479 specs[count] = tmpl;
8480 specs[count++].specific = 0;
8481 }
8482 }
8483 }
8484 else
8485 {
8486 UNHANDLED;
8487 }
8488 break;
8489
8490 case IA64_RS_MSR:
8491 if (note == 5)
8492 {
8493 /* These are implementation specific. Force all references to
8494 conflict with all other references. */
8495 specs[count] = tmpl;
8496 specs[count++].specific = 0;
8497 }
8498 else
8499 {
8500 UNHANDLED;
8501 }
8502 break;
8503
8504 case IA64_RS_PKR: /* 16 or more registers */
8505 if (note == 3 || note == 4)
8506 {
8507 if (idesc->operands[!rsrc_write] == IA64_OPND_PKR_R3)
8508 {
8509 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8510 if (regno >= 0 && regno < NELEMS (gr_values)
8511 && KNOWN (regno))
8512 {
8513 if (note == 3)
8514 {
8515 specs[count] = tmpl;
8516 specs[count++].index = gr_values[regno].value & 0xFF;
8517 }
8518 else
8519 for (i = 0; i < NELEMS (gr_values); i++)
8520 {
8521 /* Uses all registers *except* the one in R3. */
8522 if ((unsigned)i != (gr_values[regno].value & 0xFF))
8523 {
8524 specs[count] = tmpl;
8525 specs[count++].index = i;
8526 }
8527 }
8528 }
8529 else
8530 {
8531 specs[count] = tmpl;
8532 specs[count++].specific = 0;
8533 }
8534 }
8535 }
8536 else if (note == 0)
8537 {
8538 /* probe et al. */
8539 specs[count] = tmpl;
8540 specs[count++].specific = 0;
8541 }
8542 break;
8543
8544 case IA64_RS_PMC: /* four or more registers */
8545 if (note == 3)
8546 {
8547 if (idesc->operands[!rsrc_write] == IA64_OPND_PMC_R3
8548 || (!rsrc_write && idesc->operands[1] == IA64_OPND_PMD_R3))
8549
8550 {
8551 int reg_index = ((idesc->operands[1] == IA64_OPND_R3 && !rsrc_write)
8552 ? 1 : !rsrc_write);
8553 int regno = CURR_SLOT.opnd[reg_index].X_add_number - REG_GR;
8554 if (regno >= 0 && regno < NELEMS (gr_values)
8555 && KNOWN (regno))
8556 {
8557 specs[count] = tmpl;
8558 specs[count++].index = gr_values[regno].value & 0xFF;
8559 }
8560 else
8561 {
8562 specs[count] = tmpl;
8563 specs[count++].specific = 0;
8564 }
8565 }
8566 }
8567 else
8568 {
8569 UNHANDLED;
8570 }
8571 break;
8572
8573 case IA64_RS_PMD: /* four or more registers */
8574 if (note == 3)
8575 {
8576 if (idesc->operands[!rsrc_write] == IA64_OPND_PMD_R3)
8577 {
8578 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8579 if (regno >= 0 && regno < NELEMS (gr_values)
8580 && KNOWN (regno))
8581 {
8582 specs[count] = tmpl;
8583 specs[count++].index = gr_values[regno].value & 0xFF;
8584 }
8585 else
8586 {
8587 specs[count] = tmpl;
8588 specs[count++].specific = 0;
8589 }
8590 }
8591 }
8592 else
8593 {
8594 UNHANDLED;
8595 }
8596 break;
8597
8598 case IA64_RS_RR: /* eight registers */
8599 if (note == 6)
8600 {
8601 if (idesc->operands[!rsrc_write] == IA64_OPND_RR_R3)
8602 {
8603 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8604 if (regno >= 0 && regno < NELEMS (gr_values)
8605 && KNOWN (regno))
8606 {
8607 specs[count] = tmpl;
8608 specs[count++].index = (gr_values[regno].value >> 61) & 0x7;
8609 }
8610 else
8611 {
8612 specs[count] = tmpl;
8613 specs[count++].specific = 0;
8614 }
8615 }
8616 }
8617 else if (note == 0 && !rsrc_write)
8618 {
8619 specs[count] = tmpl;
8620 specs[count++].specific = 0;
8621 }
8622 else
8623 {
8624 UNHANDLED;
8625 }
8626 break;
8627
8628 case IA64_RS_CR_IRR:
8629 if (note == 0)
8630 {
8631 /* handle mov-from-CR-IVR; it's a read that writes CR[IRR] */
8632 int regno = CURR_SLOT.opnd[1].X_add_number - REG_CR;
8633 if (rsrc_write
8634 && idesc->operands[1] == IA64_OPND_CR3
8635 && regno == CR_IVR)
8636 {
8637 for (i = 0; i < 4; i++)
8638 {
8639 specs[count] = tmpl;
8640 specs[count++].index = CR_IRR0 + i;
8641 }
8642 }
8643 }
8644 else if (note == 1)
8645 {
8646 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8647 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8648 && regno >= CR_IRR0
8649 && regno <= CR_IRR3)
8650 {
8651 specs[count] = tmpl;
8652 specs[count++].index = regno;
8653 }
8654 }
8655 else
8656 {
8657 UNHANDLED;
8658 }
8659 break;
8660
8661 case IA64_RS_CR_IIB:
8662 if (note != 0)
8663 {
8664 UNHANDLED;
8665 }
8666 else
8667 {
8668 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8669 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8670 && (regno == CR_IIB0 || regno == CR_IIB1))
8671 {
8672 specs[count] = tmpl;
8673 specs[count++].index = regno;
8674 }
8675 }
8676 break;
8677
8678 case IA64_RS_CR_LRR:
8679 if (note != 1)
8680 {
8681 UNHANDLED;
8682 }
8683 else
8684 {
8685 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8686 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8687 && (regno == CR_LRR0 || regno == CR_LRR1))
8688 {
8689 specs[count] = tmpl;
8690 specs[count++].index = regno;
8691 }
8692 }
8693 break;
8694
8695 case IA64_RS_CR:
8696 if (note == 1)
8697 {
8698 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
8699 {
8700 specs[count] = tmpl;
8701 specs[count++].index =
8702 CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8703 }
8704 }
8705 else
8706 {
8707 UNHANDLED;
8708 }
8709 break;
8710
8711 case IA64_RS_DAHR:
8712 if (note == 0)
8713 {
8714 if (idesc->operands[!rsrc_write] == IA64_OPND_DAHR3)
8715 {
8716 specs[count] = tmpl;
8717 specs[count++].index =
8718 CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_DAHR;
8719 }
8720 }
8721 else
8722 {
8723 UNHANDLED;
8724 }
8725 break;
8726
8727 case IA64_RS_FR:
8728 case IA64_RS_FRb:
8729 if (note != 1)
8730 {
8731 UNHANDLED;
8732 }
8733 else if (rsrc_write)
8734 {
8735 if (dep->specifier == IA64_RS_FRb
8736 && idesc->operands[0] == IA64_OPND_F1)
8737 {
8738 specs[count] = tmpl;
8739 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_FR;
8740 }
8741 }
8742 else
8743 {
8744 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
8745 {
8746 if (idesc->operands[i] == IA64_OPND_F2
8747 || idesc->operands[i] == IA64_OPND_F3
8748 || idesc->operands[i] == IA64_OPND_F4)
8749 {
8750 specs[count] = tmpl;
8751 specs[count++].index =
8752 CURR_SLOT.opnd[i].X_add_number - REG_FR;
8753 }
8754 }
8755 }
8756 break;
8757
8758 case IA64_RS_GR:
8759 if (note == 13)
8760 {
8761 /* This reference applies only to the GR whose value is loaded with
8762 data returned from memory. */
8763 specs[count] = tmpl;
8764 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_GR;
8765 }
8766 else if (note == 1)
8767 {
8768 if (rsrc_write)
8769 {
8770 for (i = 0; i < idesc->num_outputs; i++)
8771 if (idesc->operands[i] == IA64_OPND_R1
8772 || idesc->operands[i] == IA64_OPND_R2
8773 || idesc->operands[i] == IA64_OPND_R3)
8774 {
8775 specs[count] = tmpl;
8776 specs[count++].index =
8777 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8778 }
8779 if (idesc->flags & IA64_OPCODE_POSTINC)
8780 for (i = 0; i < NELEMS (idesc->operands); i++)
8781 if (idesc->operands[i] == IA64_OPND_MR3)
8782 {
8783 specs[count] = tmpl;
8784 specs[count++].index =
8785 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8786 }
8787 }
8788 else
8789 {
8790 /* Look for anything that reads a GR. */
8791 for (i = 0; i < NELEMS (idesc->operands); i++)
8792 {
8793 if (idesc->operands[i] == IA64_OPND_MR3
8794 || idesc->operands[i] == IA64_OPND_CPUID_R3
8795 || idesc->operands[i] == IA64_OPND_DBR_R3
8796 || idesc->operands[i] == IA64_OPND_IBR_R3
8797 || idesc->operands[i] == IA64_OPND_MSR_R3
8798 || idesc->operands[i] == IA64_OPND_PKR_R3
8799 || idesc->operands[i] == IA64_OPND_PMC_R3
8800 || idesc->operands[i] == IA64_OPND_PMD_R3
8801 || idesc->operands[i] == IA64_OPND_DAHR_R3
8802 || idesc->operands[i] == IA64_OPND_RR_R3
8803 || ((i >= idesc->num_outputs)
8804 && (idesc->operands[i] == IA64_OPND_R1
8805 || idesc->operands[i] == IA64_OPND_R2
8806 || idesc->operands[i] == IA64_OPND_R3
8807 /* addl source register. */
8808 || idesc->operands[i] == IA64_OPND_R3_2)))
8809 {
8810 specs[count] = tmpl;
8811 specs[count++].index =
8812 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8813 }
8814 }
8815 }
8816 }
8817 else
8818 {
8819 UNHANDLED;
8820 }
8821 break;
8822
8823 /* This is the same as IA64_RS_PRr, except that the register range is
8824 from 1 - 15, and there are no rotating register reads/writes here. */
8825 case IA64_RS_PR:
8826 if (note == 0)
8827 {
8828 for (i = 1; i < 16; i++)
8829 {
8830 specs[count] = tmpl;
8831 specs[count++].index = i;
8832 }
8833 }
8834 else if (note == 7)
8835 {
8836 valueT mask = 0;
8837 /* Mark only those registers indicated by the mask. */
8838 if (rsrc_write)
8839 {
8840 mask = CURR_SLOT.opnd[2].X_add_number;
8841 for (i = 1; i < 16; i++)
8842 if (mask & ((valueT) 1 << i))
8843 {
8844 specs[count] = tmpl;
8845 specs[count++].index = i;
8846 }
8847 }
8848 else
8849 {
8850 UNHANDLED;
8851 }
8852 }
8853 else if (note == 11) /* note 11 implies note 1 as well */
8854 {
8855 if (rsrc_write)
8856 {
8857 for (i = 0; i < idesc->num_outputs; i++)
8858 {
8859 if (idesc->operands[i] == IA64_OPND_P1
8860 || idesc->operands[i] == IA64_OPND_P2)
8861 {
8862 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8863 if (regno >= 1 && regno < 16)
8864 {
8865 specs[count] = tmpl;
8866 specs[count++].index = regno;
8867 }
8868 }
8869 }
8870 }
8871 else
8872 {
8873 UNHANDLED;
8874 }
8875 }
8876 else if (note == 12)
8877 {
8878 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
8879 {
8880 specs[count] = tmpl;
8881 specs[count++].index = CURR_SLOT.qp_regno;
8882 }
8883 }
8884 else if (note == 1)
8885 {
8886 if (rsrc_write)
8887 {
8888 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8889 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8890 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
8891 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
8892
8893 if ((idesc->operands[0] == IA64_OPND_P1
8894 || idesc->operands[0] == IA64_OPND_P2)
8895 && p1 >= 1 && p1 < 16)
8896 {
8897 specs[count] = tmpl;
8898 specs[count].cmp_type =
8899 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
8900 specs[count++].index = p1;
8901 }
8902 if ((idesc->operands[1] == IA64_OPND_P1
8903 || idesc->operands[1] == IA64_OPND_P2)
8904 && p2 >= 1 && p2 < 16)
8905 {
8906 specs[count] = tmpl;
8907 specs[count].cmp_type =
8908 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
8909 specs[count++].index = p2;
8910 }
8911 }
8912 else
8913 {
8914 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
8915 {
8916 specs[count] = tmpl;
8917 specs[count++].index = CURR_SLOT.qp_regno;
8918 }
8919 if (idesc->operands[1] == IA64_OPND_PR)
8920 {
8921 for (i = 1; i < 16; i++)
8922 {
8923 specs[count] = tmpl;
8924 specs[count++].index = i;
8925 }
8926 }
8927 }
8928 }
8929 else
8930 {
8931 UNHANDLED;
8932 }
8933 break;
8934
8935 /* This is the general case for PRs. IA64_RS_PR and IA64_RS_PR63 are
8936 simplified cases of this. */
8937 case IA64_RS_PRr:
8938 if (note == 0)
8939 {
8940 for (i = 16; i < 63; i++)
8941 {
8942 specs[count] = tmpl;
8943 specs[count++].index = i;
8944 }
8945 }
8946 else if (note == 7)
8947 {
8948 valueT mask = 0;
8949 /* Mark only those registers indicated by the mask. */
8950 if (rsrc_write
8951 && idesc->operands[0] == IA64_OPND_PR)
8952 {
8953 mask = CURR_SLOT.opnd[2].X_add_number;
8954 if (mask & ((valueT) 1 << 16))
8955 for (i = 16; i < 63; i++)
8956 {
8957 specs[count] = tmpl;
8958 specs[count++].index = i;
8959 }
8960 }
8961 else if (rsrc_write
8962 && idesc->operands[0] == IA64_OPND_PR_ROT)
8963 {
8964 for (i = 16; i < 63; i++)
8965 {
8966 specs[count] = tmpl;
8967 specs[count++].index = i;
8968 }
8969 }
8970 else
8971 {
8972 UNHANDLED;
8973 }
8974 }
8975 else if (note == 11) /* note 11 implies note 1 as well */
8976 {
8977 if (rsrc_write)
8978 {
8979 for (i = 0; i < idesc->num_outputs; i++)
8980 {
8981 if (idesc->operands[i] == IA64_OPND_P1
8982 || idesc->operands[i] == IA64_OPND_P2)
8983 {
8984 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8985 if (regno >= 16 && regno < 63)
8986 {
8987 specs[count] = tmpl;
8988 specs[count++].index = regno;
8989 }
8990 }
8991 }
8992 }
8993 else
8994 {
8995 UNHANDLED;
8996 }
8997 }
8998 else if (note == 12)
8999 {
9000 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
9001 {
9002 specs[count] = tmpl;
9003 specs[count++].index = CURR_SLOT.qp_regno;
9004 }
9005 }
9006 else if (note == 1)
9007 {
9008 if (rsrc_write)
9009 {
9010 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
9011 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
9012 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
9013 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
9014
9015 if ((idesc->operands[0] == IA64_OPND_P1
9016 || idesc->operands[0] == IA64_OPND_P2)
9017 && p1 >= 16 && p1 < 63)
9018 {
9019 specs[count] = tmpl;
9020 specs[count].cmp_type =
9021 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
9022 specs[count++].index = p1;
9023 }
9024 if ((idesc->operands[1] == IA64_OPND_P1
9025 || idesc->operands[1] == IA64_OPND_P2)
9026 && p2 >= 16 && p2 < 63)
9027 {
9028 specs[count] = tmpl;
9029 specs[count].cmp_type =
9030 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
9031 specs[count++].index = p2;
9032 }
9033 }
9034 else
9035 {
9036 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
9037 {
9038 specs[count] = tmpl;
9039 specs[count++].index = CURR_SLOT.qp_regno;
9040 }
9041 if (idesc->operands[1] == IA64_OPND_PR)
9042 {
9043 for (i = 16; i < 63; i++)
9044 {
9045 specs[count] = tmpl;
9046 specs[count++].index = i;
9047 }
9048 }
9049 }
9050 }
9051 else
9052 {
9053 UNHANDLED;
9054 }
9055 break;
9056
9057 case IA64_RS_PSR:
9058 /* Verify that the instruction is using the PSR bit indicated in
9059 dep->regindex. */
9060 if (note == 0)
9061 {
9062 if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_UM)
9063 {
9064 if (dep->regindex < 6)
9065 {
9066 specs[count++] = tmpl;
9067 }
9068 }
9069 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR)
9070 {
9071 if (dep->regindex < 32
9072 || dep->regindex == 35
9073 || dep->regindex == 36
9074 || (!rsrc_write && dep->regindex == PSR_CPL))
9075 {
9076 specs[count++] = tmpl;
9077 }
9078 }
9079 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_L)
9080 {
9081 if (dep->regindex < 32
9082 || dep->regindex == 35
9083 || dep->regindex == 36
9084 || (rsrc_write && dep->regindex == PSR_CPL))
9085 {
9086 specs[count++] = tmpl;
9087 }
9088 }
9089 else
9090 {
9091 /* Several PSR bits have very specific dependencies. */
9092 switch (dep->regindex)
9093 {
9094 default:
9095 specs[count++] = tmpl;
9096 break;
9097 case PSR_IC:
9098 if (rsrc_write)
9099 {
9100 specs[count++] = tmpl;
9101 }
9102 else
9103 {
9104 /* Only certain CR accesses use PSR.ic */
9105 if (idesc->operands[0] == IA64_OPND_CR3
9106 || idesc->operands[1] == IA64_OPND_CR3)
9107 {
9108 int reg_index =
9109 ((idesc->operands[0] == IA64_OPND_CR3)
9110 ? 0 : 1);
9111 int regno =
9112 CURR_SLOT.opnd[reg_index].X_add_number - REG_CR;
9113
9114 switch (regno)
9115 {
9116 default:
9117 break;
9118 case CR_ITIR:
9119 case CR_IFS:
9120 case CR_IIM:
9121 case CR_IIP:
9122 case CR_IPSR:
9123 case CR_ISR:
9124 case CR_IFA:
9125 case CR_IHA:
9126 case CR_IIB0:
9127 case CR_IIB1:
9128 case CR_IIPA:
9129 specs[count++] = tmpl;
9130 break;
9131 }
9132 }
9133 }
9134 break;
9135 case PSR_CPL:
9136 if (rsrc_write)
9137 {
9138 specs[count++] = tmpl;
9139 }
9140 else
9141 {
9142 /* Only some AR accesses use cpl */
9143 if (idesc->operands[0] == IA64_OPND_AR3
9144 || idesc->operands[1] == IA64_OPND_AR3)
9145 {
9146 int reg_index =
9147 ((idesc->operands[0] == IA64_OPND_AR3)
9148 ? 0 : 1);
9149 int regno =
9150 CURR_SLOT.opnd[reg_index].X_add_number - REG_AR;
9151
9152 if (regno == AR_ITC
9153 || regno == AR_RUC
9154 || (reg_index == 0
9155 && (regno == AR_RSC
9156 || (regno >= AR_K0
9157 && regno <= AR_K7))))
9158 {
9159 specs[count++] = tmpl;
9160 }
9161 }
9162 else
9163 {
9164 specs[count++] = tmpl;
9165 }
9166 break;
9167 }
9168 }
9169 }
9170 }
9171 else if (note == 7)
9172 {
9173 valueT mask = 0;
9174 if (idesc->operands[0] == IA64_OPND_IMMU24)
9175 {
9176 mask = CURR_SLOT.opnd[0].X_add_number;
9177 }
9178 else
9179 {
9180 UNHANDLED;
9181 }
9182 if (mask & ((valueT) 1 << dep->regindex))
9183 {
9184 specs[count++] = tmpl;
9185 }
9186 }
9187 else if (note == 8)
9188 {
9189 int min = dep->regindex == PSR_DFL ? 2 : 32;
9190 int max = dep->regindex == PSR_DFL ? 31 : 127;
9191 /* dfh is read on FR32-127; dfl is read on FR2-31 */
9192 for (i = 0; i < NELEMS (idesc->operands); i++)
9193 {
9194 if (idesc->operands[i] == IA64_OPND_F1
9195 || idesc->operands[i] == IA64_OPND_F2
9196 || idesc->operands[i] == IA64_OPND_F3
9197 || idesc->operands[i] == IA64_OPND_F4)
9198 {
9199 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9200 if (reg >= min && reg <= max)
9201 {
9202 specs[count++] = tmpl;
9203 }
9204 }
9205 }
9206 }
9207 else if (note == 9)
9208 {
9209 int min = dep->regindex == PSR_MFL ? 2 : 32;
9210 int max = dep->regindex == PSR_MFL ? 31 : 127;
9211 /* mfh is read on writes to FR32-127; mfl is read on writes to
9212 FR2-31 */
9213 for (i = 0; i < idesc->num_outputs; i++)
9214 {
9215 if (idesc->operands[i] == IA64_OPND_F1)
9216 {
9217 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9218 if (reg >= min && reg <= max)
9219 {
9220 specs[count++] = tmpl;
9221 }
9222 }
9223 }
9224 }
9225 else if (note == 10)
9226 {
9227 for (i = 0; i < NELEMS (idesc->operands); i++)
9228 {
9229 if (idesc->operands[i] == IA64_OPND_R1
9230 || idesc->operands[i] == IA64_OPND_R2
9231 || idesc->operands[i] == IA64_OPND_R3)
9232 {
9233 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9234 if (regno >= 16 && regno <= 31)
9235 {
9236 specs[count++] = tmpl;
9237 }
9238 }
9239 }
9240 }
9241 else
9242 {
9243 UNHANDLED;
9244 }
9245 break;
9246
9247 case IA64_RS_AR_FPSR:
9248 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
9249 {
9250 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
9251 if (regno == AR_FPSR)
9252 {
9253 specs[count++] = tmpl;
9254 }
9255 }
9256 else
9257 {
9258 specs[count++] = tmpl;
9259 }
9260 break;
9261
9262 case IA64_RS_ARX:
9263 /* Handle all AR[REG] resources */
9264 if (note == 0 || note == 1)
9265 {
9266 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
9267 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3
9268 && regno == dep->regindex)
9269 {
9270 specs[count++] = tmpl;
9271 }
9272 /* other AR[REG] resources may be affected by AR accesses */
9273 else if (idesc->operands[0] == IA64_OPND_AR3)
9274 {
9275 /* AR[] writes */
9276 regno = CURR_SLOT.opnd[0].X_add_number - REG_AR;
9277 switch (dep->regindex)
9278 {
9279 default:
9280 break;
9281 case AR_BSP:
9282 case AR_RNAT:
9283 if (regno == AR_BSPSTORE)
9284 {
9285 specs[count++] = tmpl;
9286 }
9287 /* Fall through. */
9288 case AR_RSC:
9289 if (!rsrc_write &&
9290 (regno == AR_BSPSTORE
9291 || regno == AR_RNAT))
9292 {
9293 specs[count++] = tmpl;
9294 }
9295 break;
9296 }
9297 }
9298 else if (idesc->operands[1] == IA64_OPND_AR3)
9299 {
9300 /* AR[] reads */
9301 regno = CURR_SLOT.opnd[1].X_add_number - REG_AR;
9302 switch (dep->regindex)
9303 {
9304 default:
9305 break;
9306 case AR_RSC:
9307 if (regno == AR_BSPSTORE || regno == AR_RNAT)
9308 {
9309 specs[count++] = tmpl;
9310 }
9311 break;
9312 }
9313 }
9314 else
9315 {
9316 specs[count++] = tmpl;
9317 }
9318 }
9319 else
9320 {
9321 UNHANDLED;
9322 }
9323 break;
9324
9325 case IA64_RS_CRX:
9326 /* Handle all CR[REG] resources.
9327 ??? FIXME: The rule 17 isn't really handled correctly. */
9328 if (note == 0 || note == 1 || note == 17)
9329 {
9330 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
9331 {
9332 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
9333 if (regno == dep->regindex)
9334 {
9335 specs[count++] = tmpl;
9336 }
9337 else if (!rsrc_write)
9338 {
9339 /* Reads from CR[IVR] affect other resources. */
9340 if (regno == CR_IVR)
9341 {
9342 if ((dep->regindex >= CR_IRR0
9343 && dep->regindex <= CR_IRR3)
9344 || dep->regindex == CR_TPR)
9345 {
9346 specs[count++] = tmpl;
9347 }
9348 }
9349 }
9350 }
9351 else
9352 {
9353 specs[count++] = tmpl;
9354 }
9355 }
9356 else
9357 {
9358 UNHANDLED;
9359 }
9360 break;
9361
9362 case IA64_RS_INSERVICE:
9363 /* look for write of EOI (67) or read of IVR (65) */
9364 if ((idesc->operands[0] == IA64_OPND_CR3
9365 && CURR_SLOT.opnd[0].X_add_number - REG_CR == CR_EOI)
9366 || (idesc->operands[1] == IA64_OPND_CR3
9367 && CURR_SLOT.opnd[1].X_add_number - REG_CR == CR_IVR))
9368 {
9369 specs[count++] = tmpl;
9370 }
9371 break;
9372
9373 case IA64_RS_GR0:
9374 if (note == 1)
9375 {
9376 specs[count++] = tmpl;
9377 }
9378 else
9379 {
9380 UNHANDLED;
9381 }
9382 break;
9383
9384 case IA64_RS_CFM:
9385 if (note != 2)
9386 {
9387 specs[count++] = tmpl;
9388 }
9389 else
9390 {
9391 /* Check if any of the registers accessed are in the rotating region.
9392 mov to/from pr accesses CFM only when qp_regno is in the rotating
9393 region */
9394 for (i = 0; i < NELEMS (idesc->operands); i++)
9395 {
9396 if (idesc->operands[i] == IA64_OPND_R1
9397 || idesc->operands[i] == IA64_OPND_R2
9398 || idesc->operands[i] == IA64_OPND_R3)
9399 {
9400 int num = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9401 /* Assumes that md.rot.num_regs is always valid */
9402 if (md.rot.num_regs > 0
9403 && num > 31
9404 && num < 31 + md.rot.num_regs)
9405 {
9406 specs[count] = tmpl;
9407 specs[count++].specific = 0;
9408 }
9409 }
9410 else if (idesc->operands[i] == IA64_OPND_F1
9411 || idesc->operands[i] == IA64_OPND_F2
9412 || idesc->operands[i] == IA64_OPND_F3
9413 || idesc->operands[i] == IA64_OPND_F4)
9414 {
9415 int num = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9416 if (num > 31)
9417 {
9418 specs[count] = tmpl;
9419 specs[count++].specific = 0;
9420 }
9421 }
9422 else if (idesc->operands[i] == IA64_OPND_P1
9423 || idesc->operands[i] == IA64_OPND_P2)
9424 {
9425 int num = CURR_SLOT.opnd[i].X_add_number - REG_P;
9426 if (num > 15)
9427 {
9428 specs[count] = tmpl;
9429 specs[count++].specific = 0;
9430 }
9431 }
9432 }
9433 if (CURR_SLOT.qp_regno > 15)
9434 {
9435 specs[count] = tmpl;
9436 specs[count++].specific = 0;
9437 }
9438 }
9439 break;
9440
9441 /* This is the same as IA64_RS_PRr, except simplified to account for
9442 the fact that there is only one register. */
9443 case IA64_RS_PR63:
9444 if (note == 0)
9445 {
9446 specs[count++] = tmpl;
9447 }
9448 else if (note == 7)
9449 {
9450 valueT mask = 0;
9451 if (idesc->operands[2] == IA64_OPND_IMM17)
9452 mask = CURR_SLOT.opnd[2].X_add_number;
9453 if (mask & ((valueT) 1 << 63))
9454 specs[count++] = tmpl;
9455 }
9456 else if (note == 11)
9457 {
9458 if ((idesc->operands[0] == IA64_OPND_P1
9459 && CURR_SLOT.opnd[0].X_add_number - REG_P == 63)
9460 || (idesc->operands[1] == IA64_OPND_P2
9461 && CURR_SLOT.opnd[1].X_add_number - REG_P == 63))
9462 {
9463 specs[count++] = tmpl;
9464 }
9465 }
9466 else if (note == 12)
9467 {
9468 if (CURR_SLOT.qp_regno == 63)
9469 {
9470 specs[count++] = tmpl;
9471 }
9472 }
9473 else if (note == 1)
9474 {
9475 if (rsrc_write)
9476 {
9477 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
9478 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
9479 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
9480 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
9481
9482 if (p1 == 63
9483 && (idesc->operands[0] == IA64_OPND_P1
9484 || idesc->operands[0] == IA64_OPND_P2))
9485 {
9486 specs[count] = tmpl;
9487 specs[count++].cmp_type =
9488 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
9489 }
9490 if (p2 == 63
9491 && (idesc->operands[1] == IA64_OPND_P1
9492 || idesc->operands[1] == IA64_OPND_P2))
9493 {
9494 specs[count] = tmpl;
9495 specs[count++].cmp_type =
9496 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
9497 }
9498 }
9499 else
9500 {
9501 if (CURR_SLOT.qp_regno == 63)
9502 {
9503 specs[count++] = tmpl;
9504 }
9505 }
9506 }
9507 else
9508 {
9509 UNHANDLED;
9510 }
9511 break;
9512
9513 case IA64_RS_RSE:
9514 /* FIXME we can identify some individual RSE written resources, but RSE
9515 read resources have not yet been completely identified, so for now
9516 treat RSE as a single resource */
9517 if (strncmp (idesc->name, "mov", 3) == 0)
9518 {
9519 if (rsrc_write)
9520 {
9521 if (idesc->operands[0] == IA64_OPND_AR3
9522 && CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE)
9523 {
9524 specs[count++] = tmpl;
9525 }
9526 }
9527 else
9528 {
9529 if (idesc->operands[0] == IA64_OPND_AR3)
9530 {
9531 if (CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE
9532 || CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_RNAT)
9533 {
9534 specs[count++] = tmpl;
9535 }
9536 }
9537 else if (idesc->operands[1] == IA64_OPND_AR3)
9538 {
9539 if (CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSP
9540 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSPSTORE
9541 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_RNAT)
9542 {
9543 specs[count++] = tmpl;
9544 }
9545 }
9546 }
9547 }
9548 else
9549 {
9550 specs[count++] = tmpl;
9551 }
9552 break;
9553
9554 case IA64_RS_ANY:
9555 /* FIXME -- do any of these need to be non-specific? */
9556 specs[count++] = tmpl;
9557 break;
9558
9559 default:
9560 as_bad (_("Unrecognized dependency specifier %d\n"), dep->specifier);
9561 break;
9562 }
9563
9564 return count;
9565 }
9566
9567 /* Clear branch flags on marked resources. This breaks the link between the
9568 QP of the marking instruction and a subsequent branch on the same QP. */
9569
9570 static void
9571 clear_qp_branch_flag (valueT mask)
9572 {
9573 int i;
9574 for (i = 0; i < regdepslen; i++)
9575 {
9576 valueT bit = ((valueT) 1 << regdeps[i].qp_regno);
9577 if ((bit & mask) != 0)
9578 {
9579 regdeps[i].link_to_qp_branch = 0;
9580 }
9581 }
9582 }
9583
9584 /* MASK contains 2 and only 2 PRs which are mutually exclusive. Remove
9585 any mutexes which contain one of the PRs and create new ones when
9586 needed. */
9587
9588 static int
9589 update_qp_mutex (valueT mask)
9590 {
9591 int i;
9592 int add = 0;
9593
9594 i = 0;
9595 while (i < qp_mutexeslen)
9596 {
9597 if ((qp_mutexes[i].prmask & mask) != 0)
9598 {
9599 /* If it destroys and creates the same mutex, do nothing. */
9600 if (qp_mutexes[i].prmask == mask
9601 && qp_mutexes[i].path == md.path)
9602 {
9603 i++;
9604 add = -1;
9605 }
9606 else
9607 {
9608 int keep = 0;
9609
9610 if (md.debug_dv)
9611 {
9612 fprintf (stderr, " Clearing mutex relation");
9613 print_prmask (qp_mutexes[i].prmask);
9614 fprintf (stderr, "\n");
9615 }
9616
9617 /* Deal with the old mutex with more than 3+ PRs only if
9618 the new mutex on the same execution path with it.
9619
9620 FIXME: The 3+ mutex support is incomplete.
9621 dot_pred_rel () may be a better place to fix it. */
9622 if (qp_mutexes[i].path == md.path)
9623 {
9624 /* If it is a proper subset of the mutex, create a
9625 new mutex. */
9626 if (add == 0
9627 && (qp_mutexes[i].prmask & mask) == mask)
9628 add = 1;
9629
9630 qp_mutexes[i].prmask &= ~mask;
9631 if (qp_mutexes[i].prmask & (qp_mutexes[i].prmask - 1))
9632 {
9633 /* Modify the mutex if there are more than one
9634 PR left. */
9635 keep = 1;
9636 i++;
9637 }
9638 }
9639
9640 if (keep == 0)
9641 /* Remove the mutex. */
9642 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
9643 }
9644 }
9645 else
9646 ++i;
9647 }
9648
9649 if (add == 1)
9650 add_qp_mutex (mask);
9651
9652 return add;
9653 }
9654
9655 /* Remove any mutexes which contain any of the PRs indicated in the mask.
9656
9657 Any changes to a PR clears the mutex relations which include that PR. */
9658
9659 static void
9660 clear_qp_mutex (valueT mask)
9661 {
9662 int i;
9663
9664 i = 0;
9665 while (i < qp_mutexeslen)
9666 {
9667 if ((qp_mutexes[i].prmask & mask) != 0)
9668 {
9669 if (md.debug_dv)
9670 {
9671 fprintf (stderr, " Clearing mutex relation");
9672 print_prmask (qp_mutexes[i].prmask);
9673 fprintf (stderr, "\n");
9674 }
9675 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
9676 }
9677 else
9678 ++i;
9679 }
9680 }
9681
9682 /* Clear implies relations which contain PRs in the given masks.
9683 P1_MASK indicates the source of the implies relation, while P2_MASK
9684 indicates the implied PR. */
9685
9686 static void
9687 clear_qp_implies (valueT p1_mask, valueT p2_mask)
9688 {
9689 int i;
9690
9691 i = 0;
9692 while (i < qp_implieslen)
9693 {
9694 if ((((valueT) 1 << qp_implies[i].p1) & p1_mask) != 0
9695 || (((valueT) 1 << qp_implies[i].p2) & p2_mask) != 0)
9696 {
9697 if (md.debug_dv)
9698 fprintf (stderr, "Clearing implied relation PR%d->PR%d\n",
9699 qp_implies[i].p1, qp_implies[i].p2);
9700 qp_implies[i] = qp_implies[--qp_implieslen];
9701 }
9702 else
9703 ++i;
9704 }
9705 }
9706
9707 /* Add the PRs specified to the list of implied relations. */
9708
9709 static void
9710 add_qp_imply (int p1, int p2)
9711 {
9712 valueT mask;
9713 valueT bit;
9714 int i;
9715
9716 /* p0 is not meaningful here. */
9717 if (p1 == 0 || p2 == 0)
9718 abort ();
9719
9720 if (p1 == p2)
9721 return;
9722
9723 /* If it exists already, ignore it. */
9724 for (i = 0; i < qp_implieslen; i++)
9725 {
9726 if (qp_implies[i].p1 == p1
9727 && qp_implies[i].p2 == p2
9728 && qp_implies[i].path == md.path
9729 && !qp_implies[i].p2_branched)
9730 return;
9731 }
9732
9733 if (qp_implieslen == qp_impliestotlen)
9734 {
9735 qp_impliestotlen += 20;
9736 qp_implies = XRESIZEVEC (struct qp_imply, qp_implies, qp_impliestotlen);
9737 }
9738 if (md.debug_dv)
9739 fprintf (stderr, " Registering PR%d implies PR%d\n", p1, p2);
9740 qp_implies[qp_implieslen].p1 = p1;
9741 qp_implies[qp_implieslen].p2 = p2;
9742 qp_implies[qp_implieslen].path = md.path;
9743 qp_implies[qp_implieslen++].p2_branched = 0;
9744
9745 /* Add in the implied transitive relations; for everything that p2 implies,
9746 make p1 imply that, too; for everything that implies p1, make it imply p2
9747 as well. */
9748 for (i = 0; i < qp_implieslen; i++)
9749 {
9750 if (qp_implies[i].p1 == p2)
9751 add_qp_imply (p1, qp_implies[i].p2);
9752 if (qp_implies[i].p2 == p1)
9753 add_qp_imply (qp_implies[i].p1, p2);
9754 }
9755 /* Add in mutex relations implied by this implies relation; for each mutex
9756 relation containing p2, duplicate it and replace p2 with p1. */
9757 bit = (valueT) 1 << p1;
9758 mask = (valueT) 1 << p2;
9759 for (i = 0; i < qp_mutexeslen; i++)
9760 {
9761 if (qp_mutexes[i].prmask & mask)
9762 add_qp_mutex ((qp_mutexes[i].prmask & ~mask) | bit);
9763 }
9764 }
9765
9766 /* Add the PRs specified in the mask to the mutex list; this means that only
9767 one of the PRs can be true at any time. PR0 should never be included in
9768 the mask. */
9769
9770 static void
9771 add_qp_mutex (valueT mask)
9772 {
9773 if (mask & 0x1)
9774 abort ();
9775
9776 if (qp_mutexeslen == qp_mutexestotlen)
9777 {
9778 qp_mutexestotlen += 20;
9779 qp_mutexes = XRESIZEVEC (struct qpmutex, qp_mutexes, qp_mutexestotlen);
9780 }
9781 if (md.debug_dv)
9782 {
9783 fprintf (stderr, " Registering mutex on");
9784 print_prmask (mask);
9785 fprintf (stderr, "\n");
9786 }
9787 qp_mutexes[qp_mutexeslen].path = md.path;
9788 qp_mutexes[qp_mutexeslen++].prmask = mask;
9789 }
9790
9791 static int
9792 has_suffix_p (const char *name, const char *suffix)
9793 {
9794 size_t namelen = strlen (name);
9795 size_t sufflen = strlen (suffix);
9796
9797 if (namelen <= sufflen)
9798 return 0;
9799 return strcmp (name + namelen - sufflen, suffix) == 0;
9800 }
9801
9802 static void
9803 clear_register_values (void)
9804 {
9805 int i;
9806 if (md.debug_dv)
9807 fprintf (stderr, " Clearing register values\n");
9808 for (i = 1; i < NELEMS (gr_values); i++)
9809 gr_values[i].known = 0;
9810 }
9811
9812 /* Keep track of register values/changes which affect DV tracking.
9813
9814 optimization note: should add a flag to classes of insns where otherwise we
9815 have to examine a group of strings to identify them. */
9816
9817 static void
9818 note_register_values (struct ia64_opcode *idesc)
9819 {
9820 valueT qp_changemask = 0;
9821 int i;
9822
9823 /* Invalidate values for registers being written to. */
9824 for (i = 0; i < idesc->num_outputs; i++)
9825 {
9826 if (idesc->operands[i] == IA64_OPND_R1
9827 || idesc->operands[i] == IA64_OPND_R2
9828 || idesc->operands[i] == IA64_OPND_R3)
9829 {
9830 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9831 if (regno > 0 && regno < NELEMS (gr_values))
9832 gr_values[regno].known = 0;
9833 }
9834 else if (idesc->operands[i] == IA64_OPND_R3_2)
9835 {
9836 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9837 if (regno > 0 && regno < 4)
9838 gr_values[regno].known = 0;
9839 }
9840 else if (idesc->operands[i] == IA64_OPND_P1
9841 || idesc->operands[i] == IA64_OPND_P2)
9842 {
9843 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
9844 qp_changemask |= (valueT) 1 << regno;
9845 }
9846 else if (idesc->operands[i] == IA64_OPND_PR)
9847 {
9848 if (idesc->operands[2] & (valueT) 0x10000)
9849 qp_changemask = ~(valueT) 0x1FFFF | idesc->operands[2];
9850 else
9851 qp_changemask = idesc->operands[2];
9852 break;
9853 }
9854 else if (idesc->operands[i] == IA64_OPND_PR_ROT)
9855 {
9856 if (idesc->operands[1] & ((valueT) 1 << 43))
9857 qp_changemask = -((valueT) 1 << 44) | idesc->operands[1];
9858 else
9859 qp_changemask = idesc->operands[1];
9860 qp_changemask &= ~(valueT) 0xFFFF;
9861 break;
9862 }
9863 }
9864
9865 /* Always clear qp branch flags on any PR change. */
9866 /* FIXME there may be exceptions for certain compares. */
9867 clear_qp_branch_flag (qp_changemask);
9868
9869 /* Invalidate rotating registers on insns which affect RRBs in CFM. */
9870 if (idesc->flags & IA64_OPCODE_MOD_RRBS)
9871 {
9872 qp_changemask |= ~(valueT) 0xFFFF;
9873 if (strcmp (idesc->name, "clrrrb.pr") != 0)
9874 {
9875 for (i = 32; i < 32 + md.rot.num_regs; i++)
9876 gr_values[i].known = 0;
9877 }
9878 clear_qp_mutex (qp_changemask);
9879 clear_qp_implies (qp_changemask, qp_changemask);
9880 }
9881 /* After a call, all register values are undefined, except those marked
9882 as "safe". */
9883 else if (strncmp (idesc->name, "br.call", 6) == 0
9884 || strncmp (idesc->name, "brl.call", 7) == 0)
9885 {
9886 /* FIXME keep GR values which are marked as "safe_across_calls" */
9887 clear_register_values ();
9888 clear_qp_mutex (~qp_safe_across_calls);
9889 clear_qp_implies (~qp_safe_across_calls, ~qp_safe_across_calls);
9890 clear_qp_branch_flag (~qp_safe_across_calls);
9891 }
9892 else if (is_interruption_or_rfi (idesc)
9893 || is_taken_branch (idesc))
9894 {
9895 clear_register_values ();
9896 clear_qp_mutex (~(valueT) 0);
9897 clear_qp_implies (~(valueT) 0, ~(valueT) 0);
9898 }
9899 /* Look for mutex and implies relations. */
9900 else if ((idesc->operands[0] == IA64_OPND_P1
9901 || idesc->operands[0] == IA64_OPND_P2)
9902 && (idesc->operands[1] == IA64_OPND_P1
9903 || idesc->operands[1] == IA64_OPND_P2))
9904 {
9905 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
9906 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
9907 valueT p1mask = (p1 != 0) ? (valueT) 1 << p1 : 0;
9908 valueT p2mask = (p2 != 0) ? (valueT) 1 << p2 : 0;
9909
9910 /* If both PRs are PR0, we can't really do anything. */
9911 if (p1 == 0 && p2 == 0)
9912 {
9913 if (md.debug_dv)
9914 fprintf (stderr, " Ignoring PRs due to inclusion of p0\n");
9915 }
9916 /* In general, clear mutexes and implies which include P1 or P2,
9917 with the following exceptions. */
9918 else if (has_suffix_p (idesc->name, ".or.andcm")
9919 || has_suffix_p (idesc->name, ".and.orcm"))
9920 {
9921 clear_qp_implies (p2mask, p1mask);
9922 }
9923 else if (has_suffix_p (idesc->name, ".andcm")
9924 || has_suffix_p (idesc->name, ".and"))
9925 {
9926 clear_qp_implies (0, p1mask | p2mask);
9927 }
9928 else if (has_suffix_p (idesc->name, ".orcm")
9929 || has_suffix_p (idesc->name, ".or"))
9930 {
9931 clear_qp_mutex (p1mask | p2mask);
9932 clear_qp_implies (p1mask | p2mask, 0);
9933 }
9934 else
9935 {
9936 int added = 0;
9937
9938 clear_qp_implies (p1mask | p2mask, p1mask | p2mask);
9939
9940 /* If one of the PRs is PR0, we call clear_qp_mutex. */
9941 if (p1 == 0 || p2 == 0)
9942 clear_qp_mutex (p1mask | p2mask);
9943 else
9944 added = update_qp_mutex (p1mask | p2mask);
9945
9946 if (CURR_SLOT.qp_regno == 0
9947 || has_suffix_p (idesc->name, ".unc"))
9948 {
9949 if (added == 0 && p1 && p2)
9950 add_qp_mutex (p1mask | p2mask);
9951 if (CURR_SLOT.qp_regno != 0)
9952 {
9953 if (p1)
9954 add_qp_imply (p1, CURR_SLOT.qp_regno);
9955 if (p2)
9956 add_qp_imply (p2, CURR_SLOT.qp_regno);
9957 }
9958 }
9959 }
9960 }
9961 /* Look for mov imm insns into GRs. */
9962 else if (idesc->operands[0] == IA64_OPND_R1
9963 && (idesc->operands[1] == IA64_OPND_IMM22
9964 || idesc->operands[1] == IA64_OPND_IMMU64)
9965 && CURR_SLOT.opnd[1].X_op == O_constant
9966 && (strcmp (idesc->name, "mov") == 0
9967 || strcmp (idesc->name, "movl") == 0))
9968 {
9969 int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR;
9970 if (regno > 0 && regno < NELEMS (gr_values))
9971 {
9972 gr_values[regno].known = 1;
9973 gr_values[regno].value = CURR_SLOT.opnd[1].X_add_number;
9974 gr_values[regno].path = md.path;
9975 if (md.debug_dv)
9976 {
9977 fprintf (stderr, " Know gr%d = ", regno);
9978 fprintf_vma (stderr, gr_values[regno].value);
9979 fputs ("\n", stderr);
9980 }
9981 }
9982 }
9983 /* Look for dep.z imm insns. */
9984 else if (idesc->operands[0] == IA64_OPND_R1
9985 && idesc->operands[1] == IA64_OPND_IMM8
9986 && strcmp (idesc->name, "dep.z") == 0)
9987 {
9988 int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR;
9989 if (regno > 0 && regno < NELEMS (gr_values))
9990 {
9991 valueT value = CURR_SLOT.opnd[1].X_add_number;
9992
9993 if (CURR_SLOT.opnd[3].X_add_number < 64)
9994 value &= ((valueT)1 << CURR_SLOT.opnd[3].X_add_number) - 1;
9995 value <<= CURR_SLOT.opnd[2].X_add_number;
9996 gr_values[regno].known = 1;
9997 gr_values[regno].value = value;
9998 gr_values[regno].path = md.path;
9999 if (md.debug_dv)
10000 {
10001 fprintf (stderr, " Know gr%d = ", regno);
10002 fprintf_vma (stderr, gr_values[regno].value);
10003 fputs ("\n", stderr);
10004 }
10005 }
10006 }
10007 else
10008 {
10009 clear_qp_mutex (qp_changemask);
10010 clear_qp_implies (qp_changemask, qp_changemask);
10011 }
10012 }
10013
10014 /* Return whether the given predicate registers are currently mutex. */
10015
10016 static int
10017 qp_mutex (int p1, int p2, int path)
10018 {
10019 int i;
10020 valueT mask;
10021
10022 if (p1 != p2)
10023 {
10024 mask = ((valueT) 1 << p1) | (valueT) 1 << p2;
10025 for (i = 0; i < qp_mutexeslen; i++)
10026 {
10027 if (qp_mutexes[i].path >= path
10028 && (qp_mutexes[i].prmask & mask) == mask)
10029 return 1;
10030 }
10031 }
10032 return 0;
10033 }
10034
10035 /* Return whether the given resource is in the given insn's list of chks
10036 Return 1 if the conflict is absolutely determined, 2 if it's a potential
10037 conflict. */
10038
10039 static int
10040 resources_match (struct rsrc *rs,
10041 struct ia64_opcode *idesc,
10042 int note,
10043 int qp_regno,
10044 int path)
10045 {
10046 struct rsrc specs[MAX_SPECS];
10047 int count;
10048
10049 /* If the marked resource's qp_regno and the given qp_regno are mutex,
10050 we don't need to check. One exception is note 11, which indicates that
10051 target predicates are written regardless of PR[qp]. */
10052 if (qp_mutex (rs->qp_regno, qp_regno, path)
10053 && note != 11)
10054 return 0;
10055
10056 count = specify_resource (rs->dependency, idesc, DV_CHK, specs, note, path);
10057 while (count-- > 0)
10058 {
10059 /* UNAT checking is a bit more specific than other resources */
10060 if (rs->dependency->specifier == IA64_RS_AR_UNAT
10061 && specs[count].mem_offset.hint
10062 && rs->mem_offset.hint)
10063 {
10064 if (rs->mem_offset.base == specs[count].mem_offset.base)
10065 {
10066 if (((rs->mem_offset.offset >> 3) & 0x3F) ==
10067 ((specs[count].mem_offset.offset >> 3) & 0x3F))
10068 return 1;
10069 else
10070 continue;
10071 }
10072 }
10073
10074 /* Skip apparent PR write conflicts where both writes are an AND or both
10075 writes are an OR. */
10076 if (rs->dependency->specifier == IA64_RS_PR
10077 || rs->dependency->specifier == IA64_RS_PRr
10078 || rs->dependency->specifier == IA64_RS_PR63)
10079 {
10080 if (specs[count].cmp_type != CMP_NONE
10081 && specs[count].cmp_type == rs->cmp_type)
10082 {
10083 if (md.debug_dv)
10084 fprintf (stderr, " %s on parallel compare allowed (PR%d)\n",
10085 dv_mode[rs->dependency->mode],
10086 rs->dependency->specifier != IA64_RS_PR63 ?
10087 specs[count].index : 63);
10088 continue;
10089 }
10090 if (md.debug_dv)
10091 fprintf (stderr,
10092 " %s on parallel compare conflict %s vs %s on PR%d\n",
10093 dv_mode[rs->dependency->mode],
10094 dv_cmp_type[rs->cmp_type],
10095 dv_cmp_type[specs[count].cmp_type],
10096 rs->dependency->specifier != IA64_RS_PR63 ?
10097 specs[count].index : 63);
10098
10099 }
10100
10101 /* If either resource is not specific, conservatively assume a conflict
10102 */
10103 if (!specs[count].specific || !rs->specific)
10104 return 2;
10105 else if (specs[count].index == rs->index)
10106 return 1;
10107 }
10108
10109 return 0;
10110 }
10111
10112 /* Indicate an instruction group break; if INSERT_STOP is non-zero, then
10113 insert a stop to create the break. Update all resource dependencies
10114 appropriately. If QP_REGNO is non-zero, only apply the break to resources
10115 which use the same QP_REGNO and have the link_to_qp_branch flag set.
10116 If SAVE_CURRENT is non-zero, don't affect resources marked by the current
10117 instruction. */
10118
10119 static void
10120 insn_group_break (int insert_stop, int qp_regno, int save_current)
10121 {
10122 int i;
10123
10124 if (insert_stop && md.num_slots_in_use > 0)
10125 PREV_SLOT.end_of_insn_group = 1;
10126
10127 if (md.debug_dv)
10128 {
10129 fprintf (stderr, " Insn group break%s",
10130 (insert_stop ? " (w/stop)" : ""));
10131 if (qp_regno != 0)
10132 fprintf (stderr, " effective for QP=%d", qp_regno);
10133 fprintf (stderr, "\n");
10134 }
10135
10136 i = 0;
10137 while (i < regdepslen)
10138 {
10139 const struct ia64_dependency *dep = regdeps[i].dependency;
10140
10141 if (qp_regno != 0
10142 && regdeps[i].qp_regno != qp_regno)
10143 {
10144 ++i;
10145 continue;
10146 }
10147
10148 if (save_current
10149 && CURR_SLOT.src_file == regdeps[i].file
10150 && CURR_SLOT.src_line == regdeps[i].line)
10151 {
10152 ++i;
10153 continue;
10154 }
10155
10156 /* clear dependencies which are automatically cleared by a stop, or
10157 those that have reached the appropriate state of insn serialization */
10158 if (dep->semantics == IA64_DVS_IMPLIED
10159 || dep->semantics == IA64_DVS_IMPLIEDF
10160 || regdeps[i].insn_srlz == STATE_SRLZ)
10161 {
10162 print_dependency ("Removing", i);
10163 regdeps[i] = regdeps[--regdepslen];
10164 }
10165 else
10166 {
10167 if (dep->semantics == IA64_DVS_DATA
10168 || dep->semantics == IA64_DVS_INSTR
10169 || dep->semantics == IA64_DVS_SPECIFIC)
10170 {
10171 if (regdeps[i].insn_srlz == STATE_NONE)
10172 regdeps[i].insn_srlz = STATE_STOP;
10173 if (regdeps[i].data_srlz == STATE_NONE)
10174 regdeps[i].data_srlz = STATE_STOP;
10175 }
10176 ++i;
10177 }
10178 }
10179 }
10180
10181 /* Add the given resource usage spec to the list of active dependencies. */
10182
10183 static void
10184 mark_resource (struct ia64_opcode *idesc ATTRIBUTE_UNUSED,
10185 const struct ia64_dependency *dep ATTRIBUTE_UNUSED,
10186 struct rsrc *spec,
10187 int depind,
10188 int path)
10189 {
10190 if (regdepslen == regdepstotlen)
10191 {
10192 regdepstotlen += 20;
10193 regdeps = XRESIZEVEC (struct rsrc, regdeps, regdepstotlen);
10194 }
10195
10196 regdeps[regdepslen] = *spec;
10197 regdeps[regdepslen].depind = depind;
10198 regdeps[regdepslen].path = path;
10199 regdeps[regdepslen].file = CURR_SLOT.src_file;
10200 regdeps[regdepslen].line = CURR_SLOT.src_line;
10201
10202 print_dependency ("Adding", regdepslen);
10203
10204 ++regdepslen;
10205 }
10206
10207 static void
10208 print_dependency (const char *action, int depind)
10209 {
10210 if (md.debug_dv)
10211 {
10212 fprintf (stderr, " %s %s '%s'",
10213 action, dv_mode[(regdeps[depind].dependency)->mode],
10214 (regdeps[depind].dependency)->name);
10215 if (regdeps[depind].specific && regdeps[depind].index >= 0)
10216 fprintf (stderr, " (%d)", regdeps[depind].index);
10217 if (regdeps[depind].mem_offset.hint)
10218 {
10219 fputs (" ", stderr);
10220 fprintf_vma (stderr, regdeps[depind].mem_offset.base);
10221 fputs ("+", stderr);
10222 fprintf_vma (stderr, regdeps[depind].mem_offset.offset);
10223 }
10224 fprintf (stderr, "\n");
10225 }
10226 }
10227
10228 static void
10229 instruction_serialization (void)
10230 {
10231 int i;
10232 if (md.debug_dv)
10233 fprintf (stderr, " Instruction serialization\n");
10234 for (i = 0; i < regdepslen; i++)
10235 if (regdeps[i].insn_srlz == STATE_STOP)
10236 regdeps[i].insn_srlz = STATE_SRLZ;
10237 }
10238
10239 static void
10240 data_serialization (void)
10241 {
10242 int i = 0;
10243 if (md.debug_dv)
10244 fprintf (stderr, " Data serialization\n");
10245 while (i < regdepslen)
10246 {
10247 if (regdeps[i].data_srlz == STATE_STOP
10248 /* Note: as of 991210, all "other" dependencies are cleared by a
10249 data serialization. This might change with new tables */
10250 || (regdeps[i].dependency)->semantics == IA64_DVS_OTHER)
10251 {
10252 print_dependency ("Removing", i);
10253 regdeps[i] = regdeps[--regdepslen];
10254 }
10255 else
10256 ++i;
10257 }
10258 }
10259
10260 /* Insert stops and serializations as needed to avoid DVs. */
10261
10262 static void
10263 remove_marked_resource (struct rsrc *rs)
10264 {
10265 switch (rs->dependency->semantics)
10266 {
10267 case IA64_DVS_SPECIFIC:
10268 if (md.debug_dv)
10269 fprintf (stderr, "Implementation-specific, assume worst case...\n");
10270 /* Fall through. */
10271 case IA64_DVS_INSTR:
10272 if (md.debug_dv)
10273 fprintf (stderr, "Inserting instr serialization\n");
10274 if (rs->insn_srlz < STATE_STOP)
10275 insn_group_break (1, 0, 0);
10276 if (rs->insn_srlz < STATE_SRLZ)
10277 {
10278 struct slot oldslot = CURR_SLOT;
10279 /* Manually jam a srlz.i insn into the stream */
10280 memset (&CURR_SLOT, 0, sizeof (CURR_SLOT));
10281 CURR_SLOT.user_template = -1;
10282 CURR_SLOT.idesc = ia64_find_opcode ("srlz.i");
10283 instruction_serialization ();
10284 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10285 if (++md.num_slots_in_use >= NUM_SLOTS)
10286 emit_one_bundle ();
10287 CURR_SLOT = oldslot;
10288 }
10289 insn_group_break (1, 0, 0);
10290 break;
10291 case IA64_DVS_OTHER: /* as of rev2 (991220) of the DV tables, all
10292 "other" types of DV are eliminated
10293 by a data serialization */
10294 case IA64_DVS_DATA:
10295 if (md.debug_dv)
10296 fprintf (stderr, "Inserting data serialization\n");
10297 if (rs->data_srlz < STATE_STOP)
10298 insn_group_break (1, 0, 0);
10299 {
10300 struct slot oldslot = CURR_SLOT;
10301 /* Manually jam a srlz.d insn into the stream */
10302 memset (&CURR_SLOT, 0, sizeof (CURR_SLOT));
10303 CURR_SLOT.user_template = -1;
10304 CURR_SLOT.idesc = ia64_find_opcode ("srlz.d");
10305 data_serialization ();
10306 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10307 if (++md.num_slots_in_use >= NUM_SLOTS)
10308 emit_one_bundle ();
10309 CURR_SLOT = oldslot;
10310 }
10311 break;
10312 case IA64_DVS_IMPLIED:
10313 case IA64_DVS_IMPLIEDF:
10314 if (md.debug_dv)
10315 fprintf (stderr, "Inserting stop\n");
10316 insn_group_break (1, 0, 0);
10317 break;
10318 default:
10319 break;
10320 }
10321 }
10322
10323 /* Check the resources used by the given opcode against the current dependency
10324 list.
10325
10326 The check is run once for each execution path encountered. In this case,
10327 a unique execution path is the sequence of instructions following a code
10328 entry point, e.g. the following has three execution paths, one starting
10329 at L0, one at L1, and one at L2.
10330
10331 L0: nop
10332 L1: add
10333 L2: add
10334 br.ret
10335 */
10336
10337 static void
10338 check_dependencies (struct ia64_opcode *idesc)
10339 {
10340 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
10341 int path;
10342 int i;
10343
10344 /* Note that the number of marked resources may change within the
10345 loop if in auto mode. */
10346 i = 0;
10347 while (i < regdepslen)
10348 {
10349 struct rsrc *rs = &regdeps[i];
10350 const struct ia64_dependency *dep = rs->dependency;
10351 int chkind;
10352 int note;
10353 int start_over = 0;
10354
10355 if (dep->semantics == IA64_DVS_NONE
10356 || (chkind = depends_on (rs->depind, idesc)) == -1)
10357 {
10358 ++i;
10359 continue;
10360 }
10361
10362 note = NOTE (opdeps->chks[chkind]);
10363
10364 /* Check this resource against each execution path seen thus far. */
10365 for (path = 0; path <= md.path; path++)
10366 {
10367 int matchtype;
10368
10369 /* If the dependency wasn't on the path being checked, ignore it. */
10370 if (rs->path < path)
10371 continue;
10372
10373 /* If the QP for this insn implies a QP which has branched, don't
10374 bother checking. Ed. NOTE: I don't think this check is terribly
10375 useful; what's the point of generating code which will only be
10376 reached if its QP is zero?
10377 This code was specifically inserted to handle the following code,
10378 based on notes from Intel's DV checking code, where p1 implies p2.
10379
10380 mov r4 = 2
10381 (p2) br.cond L
10382 (p1) mov r4 = 7
10383 */
10384 if (CURR_SLOT.qp_regno != 0)
10385 {
10386 int skip = 0;
10387 int implies;
10388 for (implies = 0; implies < qp_implieslen; implies++)
10389 {
10390 if (qp_implies[implies].path >= path
10391 && qp_implies[implies].p1 == CURR_SLOT.qp_regno
10392 && qp_implies[implies].p2_branched)
10393 {
10394 skip = 1;
10395 break;
10396 }
10397 }
10398 if (skip)
10399 continue;
10400 }
10401
10402 if ((matchtype = resources_match (rs, idesc, note,
10403 CURR_SLOT.qp_regno, path)) != 0)
10404 {
10405 char msg[1024];
10406 char pathmsg[256] = "";
10407 char indexmsg[256] = "";
10408 int certain = (matchtype == 1 && CURR_SLOT.qp_regno == 0);
10409
10410 if (path != 0)
10411 snprintf (pathmsg, sizeof (pathmsg),
10412 " when entry is at label '%s'",
10413 md.entry_labels[path - 1]);
10414 if (matchtype == 1 && rs->index >= 0)
10415 snprintf (indexmsg, sizeof (indexmsg),
10416 ", specific resource number is %d",
10417 rs->index);
10418 snprintf (msg, sizeof (msg),
10419 "Use of '%s' %s %s dependency '%s' (%s)%s%s",
10420 idesc->name,
10421 (certain ? "violates" : "may violate"),
10422 dv_mode[dep->mode], dep->name,
10423 dv_sem[dep->semantics],
10424 pathmsg, indexmsg);
10425
10426 if (md.explicit_mode)
10427 {
10428 as_warn ("%s", msg);
10429 if (path < md.path)
10430 as_warn (_("Only the first path encountering the conflict is reported"));
10431 as_warn_where (rs->file, rs->line,
10432 _("This is the location of the conflicting usage"));
10433 /* Don't bother checking other paths, to avoid duplicating
10434 the same warning */
10435 break;
10436 }
10437 else
10438 {
10439 if (md.debug_dv)
10440 fprintf (stderr, "%s @ %s:%d\n", msg, rs->file, rs->line);
10441
10442 remove_marked_resource (rs);
10443
10444 /* since the set of dependencies has changed, start over */
10445 /* FIXME -- since we're removing dvs as we go, we
10446 probably don't really need to start over... */
10447 start_over = 1;
10448 break;
10449 }
10450 }
10451 }
10452 if (start_over)
10453 i = 0;
10454 else
10455 ++i;
10456 }
10457 }
10458
10459 /* Register new dependencies based on the given opcode. */
10460
10461 static void
10462 mark_resources (struct ia64_opcode *idesc)
10463 {
10464 int i;
10465 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
10466 int add_only_qp_reads = 0;
10467
10468 /* A conditional branch only uses its resources if it is taken; if it is
10469 taken, we stop following that path. The other branch types effectively
10470 *always* write their resources. If it's not taken, register only QP
10471 reads. */
10472 if (is_conditional_branch (idesc) || is_interruption_or_rfi (idesc))
10473 {
10474 add_only_qp_reads = 1;
10475 }
10476
10477 if (md.debug_dv)
10478 fprintf (stderr, "Registering '%s' resource usage\n", idesc->name);
10479
10480 for (i = 0; i < opdeps->nregs; i++)
10481 {
10482 const struct ia64_dependency *dep;
10483 struct rsrc specs[MAX_SPECS];
10484 int note;
10485 int path;
10486 int count;
10487
10488 dep = ia64_find_dependency (opdeps->regs[i]);
10489 note = NOTE (opdeps->regs[i]);
10490
10491 if (add_only_qp_reads
10492 && !(dep->mode == IA64_DV_WAR
10493 && (dep->specifier == IA64_RS_PR
10494 || dep->specifier == IA64_RS_PRr
10495 || dep->specifier == IA64_RS_PR63)))
10496 continue;
10497
10498 count = specify_resource (dep, idesc, DV_REG, specs, note, md.path);
10499
10500 while (count-- > 0)
10501 {
10502 mark_resource (idesc, dep, &specs[count],
10503 DEP (opdeps->regs[i]), md.path);
10504 }
10505
10506 /* The execution path may affect register values, which may in turn
10507 affect which indirect-access resources are accessed. */
10508 switch (dep->specifier)
10509 {
10510 default:
10511 break;
10512 case IA64_RS_CPUID:
10513 case IA64_RS_DBR:
10514 case IA64_RS_IBR:
10515 case IA64_RS_MSR:
10516 case IA64_RS_PKR:
10517 case IA64_RS_PMC:
10518 case IA64_RS_PMD:
10519 case IA64_RS_RR:
10520 for (path = 0; path < md.path; path++)
10521 {
10522 count = specify_resource (dep, idesc, DV_REG, specs, note, path);
10523 while (count-- > 0)
10524 mark_resource (idesc, dep, &specs[count],
10525 DEP (opdeps->regs[i]), path);
10526 }
10527 break;
10528 }
10529 }
10530 }
10531
10532 /* Remove dependencies when they no longer apply. */
10533
10534 static void
10535 update_dependencies (struct ia64_opcode *idesc)
10536 {
10537 int i;
10538
10539 if (strcmp (idesc->name, "srlz.i") == 0)
10540 {
10541 instruction_serialization ();
10542 }
10543 else if (strcmp (idesc->name, "srlz.d") == 0)
10544 {
10545 data_serialization ();
10546 }
10547 else if (is_interruption_or_rfi (idesc)
10548 || is_taken_branch (idesc))
10549 {
10550 /* Although technically the taken branch doesn't clear dependencies
10551 which require a srlz.[id], we don't follow the branch; the next
10552 instruction is assumed to start with a clean slate. */
10553 regdepslen = 0;
10554 md.path = 0;
10555 }
10556 else if (is_conditional_branch (idesc)
10557 && CURR_SLOT.qp_regno != 0)
10558 {
10559 int is_call = strstr (idesc->name, ".call") != NULL;
10560
10561 for (i = 0; i < qp_implieslen; i++)
10562 {
10563 /* If the conditional branch's predicate is implied by the predicate
10564 in an existing dependency, remove that dependency. */
10565 if (qp_implies[i].p2 == CURR_SLOT.qp_regno)
10566 {
10567 int depind = 0;
10568 /* Note that this implied predicate takes a branch so that if
10569 a later insn generates a DV but its predicate implies this
10570 one, we can avoid the false DV warning. */
10571 qp_implies[i].p2_branched = 1;
10572 while (depind < regdepslen)
10573 {
10574 if (regdeps[depind].qp_regno == qp_implies[i].p1)
10575 {
10576 print_dependency ("Removing", depind);
10577 regdeps[depind] = regdeps[--regdepslen];
10578 }
10579 else
10580 ++depind;
10581 }
10582 }
10583 }
10584 /* Any marked resources which have this same predicate should be
10585 cleared, provided that the QP hasn't been modified between the
10586 marking instruction and the branch. */
10587 if (is_call)
10588 {
10589 insn_group_break (0, CURR_SLOT.qp_regno, 1);
10590 }
10591 else
10592 {
10593 i = 0;
10594 while (i < regdepslen)
10595 {
10596 if (regdeps[i].qp_regno == CURR_SLOT.qp_regno
10597 && regdeps[i].link_to_qp_branch
10598 && (regdeps[i].file != CURR_SLOT.src_file
10599 || regdeps[i].line != CURR_SLOT.src_line))
10600 {
10601 /* Treat like a taken branch */
10602 print_dependency ("Removing", i);
10603 regdeps[i] = regdeps[--regdepslen];
10604 }
10605 else
10606 ++i;
10607 }
10608 }
10609 }
10610 }
10611
10612 /* Examine the current instruction for dependency violations. */
10613
10614 static int
10615 check_dv (struct ia64_opcode *idesc)
10616 {
10617 if (md.debug_dv)
10618 {
10619 fprintf (stderr, "Checking %s for violations (line %d, %d/%d)\n",
10620 idesc->name, CURR_SLOT.src_line,
10621 idesc->dependencies->nchks,
10622 idesc->dependencies->nregs);
10623 }
10624
10625 /* Look through the list of currently marked resources; if the current
10626 instruction has the dependency in its chks list which uses that resource,
10627 check against the specific resources used. */
10628 check_dependencies (idesc);
10629
10630 /* Look up the instruction's regdeps (RAW writes, WAW writes, and WAR reads),
10631 then add them to the list of marked resources. */
10632 mark_resources (idesc);
10633
10634 /* There are several types of dependency semantics, and each has its own
10635 requirements for being cleared
10636
10637 Instruction serialization (insns separated by interruption, rfi, or
10638 writer + srlz.i + reader, all in separate groups) clears DVS_INSTR.
10639
10640 Data serialization (instruction serialization, or writer + srlz.d +
10641 reader, where writer and srlz.d are in separate groups) clears
10642 DVS_DATA. (This also clears DVS_OTHER, but that is not guaranteed to
10643 always be the case).
10644
10645 Instruction group break (groups separated by stop, taken branch,
10646 interruption or rfi) clears DVS_IMPLIED and DVS_IMPLIEDF.
10647 */
10648 update_dependencies (idesc);
10649
10650 /* Sometimes, knowing a register value allows us to avoid giving a false DV
10651 warning. Keep track of as many as possible that are useful. */
10652 note_register_values (idesc);
10653
10654 /* We don't need or want this anymore. */
10655 md.mem_offset.hint = 0;
10656
10657 return 0;
10658 }
10659
10660 /* Translate one line of assembly. Pseudo ops and labels do not show
10661 here. */
10662 void
10663 md_assemble (char *str)
10664 {
10665 char *saved_input_line_pointer, *temp;
10666 const char *mnemonic;
10667 const struct pseudo_opcode *pdesc;
10668 struct ia64_opcode *idesc;
10669 unsigned char qp_regno;
10670 unsigned int flags;
10671 int ch;
10672
10673 saved_input_line_pointer = input_line_pointer;
10674 input_line_pointer = str;
10675
10676 /* extract the opcode (mnemonic): */
10677
10678 ch = get_symbol_name (&temp);
10679 mnemonic = temp;
10680 pdesc = (struct pseudo_opcode *) hash_find (md.pseudo_hash, mnemonic);
10681 if (pdesc)
10682 {
10683 (void) restore_line_pointer (ch);
10684 (*pdesc->handler) (pdesc->arg);
10685 goto done;
10686 }
10687
10688 /* Find the instruction descriptor matching the arguments. */
10689
10690 idesc = ia64_find_opcode (mnemonic);
10691 (void) restore_line_pointer (ch);
10692 if (!idesc)
10693 {
10694 as_bad (_("Unknown opcode `%s'"), mnemonic);
10695 goto done;
10696 }
10697
10698 idesc = parse_operands (idesc);
10699 if (!idesc)
10700 goto done;
10701
10702 /* Handle the dynamic ops we can handle now: */
10703 if (idesc->type == IA64_TYPE_DYN)
10704 {
10705 if (strcmp (idesc->name, "add") == 0)
10706 {
10707 if (CURR_SLOT.opnd[2].X_op == O_register
10708 && CURR_SLOT.opnd[2].X_add_number < 4)
10709 mnemonic = "addl";
10710 else
10711 mnemonic = "adds";
10712 ia64_free_opcode (idesc);
10713 idesc = ia64_find_opcode (mnemonic);
10714 }
10715 else if (strcmp (idesc->name, "mov") == 0)
10716 {
10717 enum ia64_opnd opnd1, opnd2;
10718 int rop;
10719
10720 opnd1 = idesc->operands[0];
10721 opnd2 = idesc->operands[1];
10722 if (opnd1 == IA64_OPND_AR3)
10723 rop = 0;
10724 else if (opnd2 == IA64_OPND_AR3)
10725 rop = 1;
10726 else
10727 abort ();
10728 if (CURR_SLOT.opnd[rop].X_op == O_register)
10729 {
10730 if (ar_is_only_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
10731 mnemonic = "mov.i";
10732 else if (ar_is_only_in_memory_unit (CURR_SLOT.opnd[rop].X_add_number))
10733 mnemonic = "mov.m";
10734 else
10735 rop = -1;
10736 }
10737 else
10738 abort ();
10739 if (rop >= 0)
10740 {
10741 ia64_free_opcode (idesc);
10742 idesc = ia64_find_opcode (mnemonic);
10743 while (idesc != NULL
10744 && (idesc->operands[0] != opnd1
10745 || idesc->operands[1] != opnd2))
10746 idesc = get_next_opcode (idesc);
10747 }
10748 }
10749 }
10750 else if (strcmp (idesc->name, "mov.i") == 0
10751 || strcmp (idesc->name, "mov.m") == 0)
10752 {
10753 enum ia64_opnd opnd1, opnd2;
10754 int rop;
10755
10756 opnd1 = idesc->operands[0];
10757 opnd2 = idesc->operands[1];
10758 if (opnd1 == IA64_OPND_AR3)
10759 rop = 0;
10760 else if (opnd2 == IA64_OPND_AR3)
10761 rop = 1;
10762 else
10763 abort ();
10764 if (CURR_SLOT.opnd[rop].X_op == O_register)
10765 {
10766 char unit = 'a';
10767 if (ar_is_only_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
10768 unit = 'i';
10769 else if (ar_is_only_in_memory_unit (CURR_SLOT.opnd[rop].X_add_number))
10770 unit = 'm';
10771 if (unit != 'a' && unit != idesc->name [4])
10772 as_bad (_("AR %d can only be accessed by %c-unit"),
10773 (int) (CURR_SLOT.opnd[rop].X_add_number - REG_AR),
10774 TOUPPER (unit));
10775 }
10776 }
10777 else if (strcmp (idesc->name, "hint.b") == 0)
10778 {
10779 switch (md.hint_b)
10780 {
10781 case hint_b_ok:
10782 break;
10783 case hint_b_warning:
10784 as_warn (_("hint.b may be treated as nop"));
10785 break;
10786 case hint_b_error:
10787 as_bad (_("hint.b shouldn't be used"));
10788 break;
10789 }
10790 }
10791
10792 qp_regno = 0;
10793 if (md.qp.X_op == O_register)
10794 {
10795 qp_regno = md.qp.X_add_number - REG_P;
10796 md.qp.X_op = O_absent;
10797 }
10798
10799 flags = idesc->flags;
10800
10801 if ((flags & IA64_OPCODE_FIRST) != 0)
10802 {
10803 /* The alignment frag has to end with a stop bit only if the
10804 next instruction after the alignment directive has to be
10805 the first instruction in an instruction group. */
10806 if (align_frag)
10807 {
10808 while (align_frag->fr_type != rs_align_code)
10809 {
10810 align_frag = align_frag->fr_next;
10811 if (!align_frag)
10812 break;
10813 }
10814 /* align_frag can be NULL if there are directives in
10815 between. */
10816 if (align_frag && align_frag->fr_next == frag_now)
10817 align_frag->tc_frag_data = 1;
10818 }
10819
10820 insn_group_break (1, 0, 0);
10821 }
10822 align_frag = NULL;
10823
10824 if ((flags & IA64_OPCODE_NO_PRED) != 0 && qp_regno != 0)
10825 {
10826 as_bad (_("`%s' cannot be predicated"), idesc->name);
10827 goto done;
10828 }
10829
10830 /* Build the instruction. */
10831 CURR_SLOT.qp_regno = qp_regno;
10832 CURR_SLOT.idesc = idesc;
10833 CURR_SLOT.src_file = as_where (&CURR_SLOT.src_line);
10834 dwarf2_where (&CURR_SLOT.debug_line);
10835 dwarf2_consume_line_info ();
10836
10837 /* Add unwind entries, if there are any. */
10838 if (unwind.current_entry)
10839 {
10840 CURR_SLOT.unwind_record = unwind.current_entry;
10841 unwind.current_entry = NULL;
10842 }
10843 if (unwind.pending_saves)
10844 {
10845 if (unwind.pending_saves->next)
10846 {
10847 /* Attach the next pending save to the next slot so that its
10848 slot number will get set correctly. */
10849 add_unwind_entry (unwind.pending_saves->next, NOT_A_CHAR);
10850 unwind.pending_saves = &unwind.pending_saves->next->r.record.p;
10851 }
10852 else
10853 unwind.pending_saves = NULL;
10854 }
10855 if (unwind.proc_pending.sym && S_IS_DEFINED (unwind.proc_pending.sym))
10856 unwind.insn = 1;
10857
10858 /* Check for dependency violations. */
10859 if (md.detect_dv)
10860 check_dv (idesc);
10861
10862 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10863 if (++md.num_slots_in_use >= NUM_SLOTS)
10864 emit_one_bundle ();
10865
10866 if ((flags & IA64_OPCODE_LAST) != 0)
10867 insn_group_break (1, 0, 0);
10868
10869 md.last_text_seg = now_seg;
10870
10871 done:
10872 input_line_pointer = saved_input_line_pointer;
10873 }
10874
10875 /* Called when symbol NAME cannot be found in the symbol table.
10876 Should be used for dynamic valued symbols only. */
10877
10878 symbolS *
10879 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
10880 {
10881 return 0;
10882 }
10883
10884 /* Called for any expression that can not be recognized. When the
10885 function is called, `input_line_pointer' will point to the start of
10886 the expression. */
10887
10888 void
10889 md_operand (expressionS *e)
10890 {
10891 switch (*input_line_pointer)
10892 {
10893 case '[':
10894 ++input_line_pointer;
10895 expression_and_evaluate (e);
10896 if (*input_line_pointer != ']')
10897 {
10898 as_bad (_("Closing bracket missing"));
10899 goto err;
10900 }
10901 else
10902 {
10903 if (e->X_op != O_register
10904 || e->X_add_number < REG_GR
10905 || e->X_add_number > REG_GR + 127)
10906 {
10907 as_bad (_("Index must be a general register"));
10908 e->X_add_number = REG_GR;
10909 }
10910
10911 ++input_line_pointer;
10912 e->X_op = O_index;
10913 }
10914 break;
10915
10916 default:
10917 break;
10918 }
10919 return;
10920
10921 err:
10922 ignore_rest_of_line ();
10923 }
10924
10925 /* Return 1 if it's OK to adjust a reloc by replacing the symbol with
10926 a section symbol plus some offset. For relocs involving @fptr(),
10927 directives we don't want such adjustments since we need to have the
10928 original symbol's name in the reloc. */
10929 int
10930 ia64_fix_adjustable (fixS *fix)
10931 {
10932 /* Prevent all adjustments to global symbols */
10933 if (S_IS_EXTERNAL (fix->fx_addsy) || S_IS_WEAK (fix->fx_addsy))
10934 return 0;
10935
10936 switch (fix->fx_r_type)
10937 {
10938 case BFD_RELOC_IA64_FPTR64I:
10939 case BFD_RELOC_IA64_FPTR32MSB:
10940 case BFD_RELOC_IA64_FPTR32LSB:
10941 case BFD_RELOC_IA64_FPTR64MSB:
10942 case BFD_RELOC_IA64_FPTR64LSB:
10943 case BFD_RELOC_IA64_LTOFF_FPTR22:
10944 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10945 return 0;
10946 default:
10947 break;
10948 }
10949
10950 return 1;
10951 }
10952
10953 int
10954 ia64_force_relocation (fixS *fix)
10955 {
10956 switch (fix->fx_r_type)
10957 {
10958 case BFD_RELOC_IA64_FPTR64I:
10959 case BFD_RELOC_IA64_FPTR32MSB:
10960 case BFD_RELOC_IA64_FPTR32LSB:
10961 case BFD_RELOC_IA64_FPTR64MSB:
10962 case BFD_RELOC_IA64_FPTR64LSB:
10963
10964 case BFD_RELOC_IA64_LTOFF22:
10965 case BFD_RELOC_IA64_LTOFF64I:
10966 case BFD_RELOC_IA64_LTOFF_FPTR22:
10967 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10968 case BFD_RELOC_IA64_PLTOFF22:
10969 case BFD_RELOC_IA64_PLTOFF64I:
10970 case BFD_RELOC_IA64_PLTOFF64MSB:
10971 case BFD_RELOC_IA64_PLTOFF64LSB:
10972
10973 case BFD_RELOC_IA64_LTOFF22X:
10974 case BFD_RELOC_IA64_LDXMOV:
10975 return 1;
10976
10977 default:
10978 break;
10979 }
10980
10981 return generic_force_reloc (fix);
10982 }
10983
10984 /* Decide from what point a pc-relative relocation is relative to,
10985 relative to the pc-relative fixup. Er, relatively speaking. */
10986 long
10987 ia64_pcrel_from_section (fixS *fix, segT sec)
10988 {
10989 unsigned long off = fix->fx_frag->fr_address + fix->fx_where;
10990
10991 if (bfd_section_flags (sec) & SEC_CODE)
10992 off &= ~0xfUL;
10993
10994 return off;
10995 }
10996
10997
10998 /* Used to emit section-relative relocs for the dwarf2 debug data. */
10999 void
11000 ia64_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
11001 {
11002 expressionS exp;
11003
11004 exp.X_op = O_pseudo_fixup;
11005 exp.X_op_symbol = pseudo_func[FUNC_SEC_RELATIVE].u.sym;
11006 exp.X_add_number = 0;
11007 exp.X_add_symbol = symbol;
11008 emit_expr (&exp, size);
11009 }
11010
11011 /* This is called whenever some data item (not an instruction) needs a
11012 fixup. We pick the right reloc code depending on the byteorder
11013 currently in effect. */
11014 void
11015 ia64_cons_fix_new (fragS *f, int where, int nbytes, expressionS *exp,
11016 bfd_reloc_code_real_type code)
11017 {
11018 fixS *fix;
11019
11020 switch (nbytes)
11021 {
11022 /* There are no reloc for 8 and 16 bit quantities, but we allow
11023 them here since they will work fine as long as the expression
11024 is fully defined at the end of the pass over the source file. */
11025 case 1: code = BFD_RELOC_8; break;
11026 case 2: code = BFD_RELOC_16; break;
11027 case 4:
11028 if (target_big_endian)
11029 code = BFD_RELOC_IA64_DIR32MSB;
11030 else
11031 code = BFD_RELOC_IA64_DIR32LSB;
11032 break;
11033
11034 case 8:
11035 /* In 32-bit mode, data8 could mean function descriptors too. */
11036 if (exp->X_op == O_pseudo_fixup
11037 && exp->X_op_symbol
11038 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC
11039 && !(md.flags & EF_IA_64_ABI64))
11040 {
11041 if (target_big_endian)
11042 code = BFD_RELOC_IA64_IPLTMSB;
11043 else
11044 code = BFD_RELOC_IA64_IPLTLSB;
11045 exp->X_op = O_symbol;
11046 break;
11047 }
11048 else
11049 {
11050 if (target_big_endian)
11051 code = BFD_RELOC_IA64_DIR64MSB;
11052 else
11053 code = BFD_RELOC_IA64_DIR64LSB;
11054 break;
11055 }
11056
11057 case 16:
11058 if (exp->X_op == O_pseudo_fixup
11059 && exp->X_op_symbol
11060 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC)
11061 {
11062 if (target_big_endian)
11063 code = BFD_RELOC_IA64_IPLTMSB;
11064 else
11065 code = BFD_RELOC_IA64_IPLTLSB;
11066 exp->X_op = O_symbol;
11067 break;
11068 }
11069 /* FALLTHRU */
11070
11071 default:
11072 as_bad (_("Unsupported fixup size %d"), nbytes);
11073 ignore_rest_of_line ();
11074 return;
11075 }
11076
11077 if (exp->X_op == O_pseudo_fixup)
11078 {
11079 exp->X_op = O_symbol;
11080 code = ia64_gen_real_reloc_type (exp->X_op_symbol, code);
11081 /* ??? If code unchanged, unsupported. */
11082 }
11083
11084 fix = fix_new_exp (f, where, nbytes, exp, 0, code);
11085 /* We need to store the byte order in effect in case we're going
11086 to fix an 8 or 16 bit relocation (for which there no real
11087 relocs available). See md_apply_fix(). */
11088 fix->tc_fix_data.bigendian = target_big_endian;
11089 }
11090
11091 /* Return the actual relocation we wish to associate with the pseudo
11092 reloc described by SYM and R_TYPE. SYM should be one of the
11093 symbols in the pseudo_func array, or NULL. */
11094
11095 static bfd_reloc_code_real_type
11096 ia64_gen_real_reloc_type (struct symbol *sym, bfd_reloc_code_real_type r_type)
11097 {
11098 bfd_reloc_code_real_type newr = 0;
11099 const char *type = NULL, *suffix = "";
11100
11101 if (sym == NULL)
11102 {
11103 return r_type;
11104 }
11105
11106 switch (S_GET_VALUE (sym))
11107 {
11108 case FUNC_FPTR_RELATIVE:
11109 switch (r_type)
11110 {
11111 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_FPTR64I; break;
11112 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_FPTR32MSB; break;
11113 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_FPTR32LSB; break;
11114 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_FPTR64MSB; break;
11115 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_FPTR64LSB; break;
11116 default: type = "FPTR"; break;
11117 }
11118 break;
11119
11120 case FUNC_GP_RELATIVE:
11121 switch (r_type)
11122 {
11123 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_GPREL22; break;
11124 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_GPREL64I; break;
11125 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_GPREL32MSB; break;
11126 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_GPREL32LSB; break;
11127 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_GPREL64MSB; break;
11128 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_GPREL64LSB; break;
11129 default: type = "GPREL"; break;
11130 }
11131 break;
11132
11133 case FUNC_LT_RELATIVE:
11134 switch (r_type)
11135 {
11136 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_LTOFF22; break;
11137 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_LTOFF64I; break;
11138 default: type = "LTOFF"; break;
11139 }
11140 break;
11141
11142 case FUNC_LT_RELATIVE_X:
11143 switch (r_type)
11144 {
11145 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_LTOFF22X; break;
11146 default: type = "LTOFF"; suffix = "X"; break;
11147 }
11148 break;
11149
11150 case FUNC_PC_RELATIVE:
11151 switch (r_type)
11152 {
11153 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_PCREL22; break;
11154 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_PCREL64I; break;
11155 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_PCREL32MSB; break;
11156 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_PCREL32LSB; break;
11157 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_PCREL64MSB; break;
11158 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_PCREL64LSB; break;
11159 default: type = "PCREL"; break;
11160 }
11161 break;
11162
11163 case FUNC_PLT_RELATIVE:
11164 switch (r_type)
11165 {
11166 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_PLTOFF22; break;
11167 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_PLTOFF64I; break;
11168 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_PLTOFF64MSB;break;
11169 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_PLTOFF64LSB;break;
11170 default: type = "PLTOFF"; break;
11171 }
11172 break;
11173
11174 case FUNC_SEC_RELATIVE:
11175 switch (r_type)
11176 {
11177 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_SECREL32MSB;break;
11178 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_SECREL32LSB;break;
11179 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_SECREL64MSB;break;
11180 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_SECREL64LSB;break;
11181 default: type = "SECREL"; break;
11182 }
11183 break;
11184
11185 case FUNC_SEG_RELATIVE:
11186 switch (r_type)
11187 {
11188 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_SEGREL32MSB;break;
11189 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_SEGREL32LSB;break;
11190 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_SEGREL64MSB;break;
11191 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_SEGREL64LSB;break;
11192 default: type = "SEGREL"; break;
11193 }
11194 break;
11195
11196 case FUNC_LTV_RELATIVE:
11197 switch (r_type)
11198 {
11199 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_LTV32MSB; break;
11200 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_LTV32LSB; break;
11201 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_LTV64MSB; break;
11202 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_LTV64LSB; break;
11203 default: type = "LTV"; break;
11204 }
11205 break;
11206
11207 case FUNC_LT_FPTR_RELATIVE:
11208 switch (r_type)
11209 {
11210 case BFD_RELOC_IA64_IMM22:
11211 newr = BFD_RELOC_IA64_LTOFF_FPTR22; break;
11212 case BFD_RELOC_IA64_IMM64:
11213 newr = BFD_RELOC_IA64_LTOFF_FPTR64I; break;
11214 case BFD_RELOC_IA64_DIR32MSB:
11215 newr = BFD_RELOC_IA64_LTOFF_FPTR32MSB; break;
11216 case BFD_RELOC_IA64_DIR32LSB:
11217 newr = BFD_RELOC_IA64_LTOFF_FPTR32LSB; break;
11218 case BFD_RELOC_IA64_DIR64MSB:
11219 newr = BFD_RELOC_IA64_LTOFF_FPTR64MSB; break;
11220 case BFD_RELOC_IA64_DIR64LSB:
11221 newr = BFD_RELOC_IA64_LTOFF_FPTR64LSB; break;
11222 default:
11223 type = "LTOFF_FPTR"; break;
11224 }
11225 break;
11226
11227 case FUNC_TP_RELATIVE:
11228 switch (r_type)
11229 {
11230 case BFD_RELOC_IA64_IMM14: newr = BFD_RELOC_IA64_TPREL14; break;
11231 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_TPREL22; break;
11232 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_TPREL64I; break;
11233 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_TPREL64MSB; break;
11234 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_TPREL64LSB; break;
11235 default: type = "TPREL"; break;
11236 }
11237 break;
11238
11239 case FUNC_LT_TP_RELATIVE:
11240 switch (r_type)
11241 {
11242 case BFD_RELOC_IA64_IMM22:
11243 newr = BFD_RELOC_IA64_LTOFF_TPREL22; break;
11244 default:
11245 type = "LTOFF_TPREL"; break;
11246 }
11247 break;
11248
11249 case FUNC_DTP_MODULE:
11250 switch (r_type)
11251 {
11252 case BFD_RELOC_IA64_DIR64MSB:
11253 newr = BFD_RELOC_IA64_DTPMOD64MSB; break;
11254 case BFD_RELOC_IA64_DIR64LSB:
11255 newr = BFD_RELOC_IA64_DTPMOD64LSB; break;
11256 default:
11257 type = "DTPMOD"; break;
11258 }
11259 break;
11260
11261 case FUNC_LT_DTP_MODULE:
11262 switch (r_type)
11263 {
11264 case BFD_RELOC_IA64_IMM22:
11265 newr = BFD_RELOC_IA64_LTOFF_DTPMOD22; break;
11266 default:
11267 type = "LTOFF_DTPMOD"; break;
11268 }
11269 break;
11270
11271 case FUNC_DTP_RELATIVE:
11272 switch (r_type)
11273 {
11274 case BFD_RELOC_IA64_DIR32MSB:
11275 newr = BFD_RELOC_IA64_DTPREL32MSB; break;
11276 case BFD_RELOC_IA64_DIR32LSB:
11277 newr = BFD_RELOC_IA64_DTPREL32LSB; break;
11278 case BFD_RELOC_IA64_DIR64MSB:
11279 newr = BFD_RELOC_IA64_DTPREL64MSB; break;
11280 case BFD_RELOC_IA64_DIR64LSB:
11281 newr = BFD_RELOC_IA64_DTPREL64LSB; break;
11282 case BFD_RELOC_IA64_IMM14:
11283 newr = BFD_RELOC_IA64_DTPREL14; break;
11284 case BFD_RELOC_IA64_IMM22:
11285 newr = BFD_RELOC_IA64_DTPREL22; break;
11286 case BFD_RELOC_IA64_IMM64:
11287 newr = BFD_RELOC_IA64_DTPREL64I; break;
11288 default:
11289 type = "DTPREL"; break;
11290 }
11291 break;
11292
11293 case FUNC_LT_DTP_RELATIVE:
11294 switch (r_type)
11295 {
11296 case BFD_RELOC_IA64_IMM22:
11297 newr = BFD_RELOC_IA64_LTOFF_DTPREL22; break;
11298 default:
11299 type = "LTOFF_DTPREL"; break;
11300 }
11301 break;
11302
11303 case FUNC_IPLT_RELOC:
11304 switch (r_type)
11305 {
11306 case BFD_RELOC_IA64_IPLTMSB: return r_type;
11307 case BFD_RELOC_IA64_IPLTLSB: return r_type;
11308 default: type = "IPLT"; break;
11309 }
11310 break;
11311
11312 #ifdef TE_VMS
11313 case FUNC_SLOTCOUNT_RELOC:
11314 return DUMMY_RELOC_IA64_SLOTCOUNT;
11315 #endif
11316
11317 default:
11318 abort ();
11319 }
11320
11321 if (newr)
11322 return newr;
11323 else
11324 {
11325 int width;
11326
11327 if (!type)
11328 abort ();
11329 switch (r_type)
11330 {
11331 case BFD_RELOC_IA64_DIR32MSB: width = 32; suffix = "MSB"; break;
11332 case BFD_RELOC_IA64_DIR32LSB: width = 32; suffix = "LSB"; break;
11333 case BFD_RELOC_IA64_DIR64MSB: width = 64; suffix = "MSB"; break;
11334 case BFD_RELOC_IA64_DIR64LSB: width = 64; suffix = "LSB"; break;
11335 case BFD_RELOC_UNUSED: width = 13; break;
11336 case BFD_RELOC_IA64_IMM14: width = 14; break;
11337 case BFD_RELOC_IA64_IMM22: width = 22; break;
11338 case BFD_RELOC_IA64_IMM64: width = 64; suffix = "I"; break;
11339 default: abort ();
11340 }
11341
11342 /* This should be an error, but since previously there wasn't any
11343 diagnostic here, don't make it fail because of this for now. */
11344 as_warn (_("Cannot express %s%d%s relocation"), type, width, suffix);
11345 return r_type;
11346 }
11347 }
11348
11349 /* Here is where generate the appropriate reloc for pseudo relocation
11350 functions. */
11351 void
11352 ia64_validate_fix (fixS *fix)
11353 {
11354 switch (fix->fx_r_type)
11355 {
11356 case BFD_RELOC_IA64_FPTR64I:
11357 case BFD_RELOC_IA64_FPTR32MSB:
11358 case BFD_RELOC_IA64_FPTR64LSB:
11359 case BFD_RELOC_IA64_LTOFF_FPTR22:
11360 case BFD_RELOC_IA64_LTOFF_FPTR64I:
11361 if (fix->fx_offset != 0)
11362 as_bad_where (fix->fx_file, fix->fx_line,
11363 _("No addend allowed in @fptr() relocation"));
11364 break;
11365 default:
11366 break;
11367 }
11368 }
11369
11370 static void
11371 fix_insn (fixS *fix, const struct ia64_operand *odesc, valueT value)
11372 {
11373 bfd_vma insn[3], t0, t1, control_bits;
11374 const char *err;
11375 char *fixpos;
11376 long slot;
11377
11378 slot = fix->fx_where & 0x3;
11379 fixpos = fix->fx_frag->fr_literal + (fix->fx_where - slot);
11380
11381 /* Bundles are always in little-endian byte order */
11382 t0 = bfd_getl64 (fixpos);
11383 t1 = bfd_getl64 (fixpos + 8);
11384 control_bits = t0 & 0x1f;
11385 insn[0] = (t0 >> 5) & 0x1ffffffffffLL;
11386 insn[1] = ((t0 >> 46) & 0x3ffff) | ((t1 & 0x7fffff) << 18);
11387 insn[2] = (t1 >> 23) & 0x1ffffffffffLL;
11388
11389 err = NULL;
11390 if (odesc - elf64_ia64_operands == IA64_OPND_IMMU64)
11391 {
11392 insn[1] = (value >> 22) & 0x1ffffffffffLL;
11393 insn[2] |= (((value & 0x7f) << 13)
11394 | (((value >> 7) & 0x1ff) << 27)
11395 | (((value >> 16) & 0x1f) << 22)
11396 | (((value >> 21) & 0x1) << 21)
11397 | (((value >> 63) & 0x1) << 36));
11398 }
11399 else if (odesc - elf64_ia64_operands == IA64_OPND_IMMU62)
11400 {
11401 if (value & ~0x3fffffffffffffffULL)
11402 err = _("integer operand out of range");
11403 insn[1] = (value >> 21) & 0x1ffffffffffLL;
11404 insn[2] |= (((value & 0xfffff) << 6) | (((value >> 20) & 0x1) << 36));
11405 }
11406 else if (odesc - elf64_ia64_operands == IA64_OPND_TGT64)
11407 {
11408 value >>= 4;
11409 insn[1] = ((value >> 20) & 0x7fffffffffLL) << 2;
11410 insn[2] |= ((((value >> 59) & 0x1) << 36)
11411 | (((value >> 0) & 0xfffff) << 13));
11412 }
11413 else
11414 err = (*odesc->insert) (odesc, value, insn + slot);
11415
11416 if (err)
11417 as_bad_where (fix->fx_file, fix->fx_line, "%s", err);
11418
11419 t0 = control_bits | (insn[0] << 5) | (insn[1] << 46);
11420 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
11421 number_to_chars_littleendian (fixpos + 0, t0, 8);
11422 number_to_chars_littleendian (fixpos + 8, t1, 8);
11423 }
11424
11425 /* Attempt to simplify or even eliminate a fixup. The return value is
11426 ignored; perhaps it was once meaningful, but now it is historical.
11427 To indicate that a fixup has been eliminated, set FIXP->FX_DONE.
11428
11429 If fixp->fx_addsy is non-NULL, we'll have to generate a reloc entry
11430 (if possible). */
11431
11432 void
11433 md_apply_fix (fixS *fix, valueT *valP, segT seg ATTRIBUTE_UNUSED)
11434 {
11435 char *fixpos;
11436 valueT value = *valP;
11437
11438 fixpos = fix->fx_frag->fr_literal + fix->fx_where;
11439
11440 if (fix->fx_pcrel)
11441 {
11442 switch (fix->fx_r_type)
11443 {
11444 case BFD_RELOC_IA64_PCREL21B: break;
11445 case BFD_RELOC_IA64_PCREL21BI: break;
11446 case BFD_RELOC_IA64_PCREL21F: break;
11447 case BFD_RELOC_IA64_PCREL21M: break;
11448 case BFD_RELOC_IA64_PCREL60B: break;
11449 case BFD_RELOC_IA64_PCREL22: break;
11450 case BFD_RELOC_IA64_PCREL64I: break;
11451 case BFD_RELOC_IA64_PCREL32MSB: break;
11452 case BFD_RELOC_IA64_PCREL32LSB: break;
11453 case BFD_RELOC_IA64_PCREL64MSB: break;
11454 case BFD_RELOC_IA64_PCREL64LSB: break;
11455 default:
11456 fix->fx_r_type = ia64_gen_real_reloc_type (pseudo_func[FUNC_PC_RELATIVE].u.sym,
11457 fix->fx_r_type);
11458 break;
11459 }
11460 }
11461 if (fix->fx_addsy)
11462 {
11463 switch ((unsigned) fix->fx_r_type)
11464 {
11465 case BFD_RELOC_UNUSED:
11466 /* This must be a TAG13 or TAG13b operand. There are no external
11467 relocs defined for them, so we must give an error. */
11468 as_bad_where (fix->fx_file, fix->fx_line,
11469 _("%s must have a constant value"),
11470 elf64_ia64_operands[fix->tc_fix_data.opnd].desc);
11471 fix->fx_done = 1;
11472 return;
11473
11474 case BFD_RELOC_IA64_TPREL14:
11475 case BFD_RELOC_IA64_TPREL22:
11476 case BFD_RELOC_IA64_TPREL64I:
11477 case BFD_RELOC_IA64_LTOFF_TPREL22:
11478 case BFD_RELOC_IA64_LTOFF_DTPMOD22:
11479 case BFD_RELOC_IA64_DTPREL14:
11480 case BFD_RELOC_IA64_DTPREL22:
11481 case BFD_RELOC_IA64_DTPREL64I:
11482 case BFD_RELOC_IA64_LTOFF_DTPREL22:
11483 S_SET_THREAD_LOCAL (fix->fx_addsy);
11484 break;
11485
11486 #ifdef TE_VMS
11487 case DUMMY_RELOC_IA64_SLOTCOUNT:
11488 as_bad_where (fix->fx_file, fix->fx_line,
11489 _("cannot resolve @slotcount parameter"));
11490 fix->fx_done = 1;
11491 return;
11492 #endif
11493
11494 default:
11495 break;
11496 }
11497 }
11498 else if (fix->tc_fix_data.opnd == IA64_OPND_NIL)
11499 {
11500 #ifdef TE_VMS
11501 if (fix->fx_r_type == DUMMY_RELOC_IA64_SLOTCOUNT)
11502 {
11503 /* For @slotcount, convert an addresses difference to a slots
11504 difference. */
11505 valueT v;
11506
11507 v = (value >> 4) * 3;
11508 switch (value & 0x0f)
11509 {
11510 case 0:
11511 case 1:
11512 case 2:
11513 v += value & 0x0f;
11514 break;
11515 case 0x0f:
11516 v += 2;
11517 break;
11518 case 0x0e:
11519 v += 1;
11520 break;
11521 default:
11522 as_bad (_("invalid @slotcount value"));
11523 }
11524 value = v;
11525 }
11526 #endif
11527
11528 if (fix->tc_fix_data.bigendian)
11529 number_to_chars_bigendian (fixpos, value, fix->fx_size);
11530 else
11531 number_to_chars_littleendian (fixpos, value, fix->fx_size);
11532 fix->fx_done = 1;
11533 }
11534 else
11535 {
11536 fix_insn (fix, elf64_ia64_operands + fix->tc_fix_data.opnd, value);
11537 fix->fx_done = 1;
11538 }
11539 }
11540
11541 /* Generate the BFD reloc to be stuck in the object file from the
11542 fixup used internally in the assembler. */
11543
11544 arelent *
11545 tc_gen_reloc (asection *sec ATTRIBUTE_UNUSED, fixS *fixp)
11546 {
11547 arelent *reloc;
11548
11549 reloc = XNEW (arelent);
11550 reloc->sym_ptr_ptr = XNEW (asymbol *);
11551 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
11552 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
11553 reloc->addend = fixp->fx_offset;
11554 reloc->howto = bfd_reloc_type_lookup (stdoutput, fixp->fx_r_type);
11555
11556 if (!reloc->howto)
11557 {
11558 as_bad_where (fixp->fx_file, fixp->fx_line,
11559 _("Cannot represent %s relocation in object file"),
11560 bfd_get_reloc_code_name (fixp->fx_r_type));
11561 free (reloc);
11562 return NULL;
11563 }
11564 return reloc;
11565 }
11566
11567 /* Turn a string in input_line_pointer into a floating point constant
11568 of type TYPE, and store the appropriate bytes in *LIT. The number
11569 of LITTLENUMS emitted is stored in *SIZE. An error message is
11570 returned, or NULL on OK. */
11571
11572 const char *
11573 md_atof (int type, char *lit, int *size)
11574 {
11575 LITTLENUM_TYPE words[MAX_LITTLENUMS];
11576 char *t;
11577 int prec;
11578
11579 switch (type)
11580 {
11581 /* IEEE floats */
11582 case 'f':
11583 case 'F':
11584 case 's':
11585 case 'S':
11586 prec = 2;
11587 break;
11588
11589 case 'd':
11590 case 'D':
11591 case 'r':
11592 case 'R':
11593 prec = 4;
11594 break;
11595
11596 case 'x':
11597 case 'X':
11598 case 'p':
11599 case 'P':
11600 prec = 5;
11601 break;
11602
11603 default:
11604 *size = 0;
11605 return _("Unrecognized or unsupported floating point constant");
11606 }
11607 t = atof_ieee (input_line_pointer, type, words);
11608 if (t)
11609 input_line_pointer = t;
11610
11611 (*ia64_float_to_chars) (lit, words, prec);
11612
11613 if (type == 'X')
11614 {
11615 /* It is 10 byte floating point with 6 byte padding. */
11616 memset (&lit [10], 0, 6);
11617 *size = 8 * sizeof (LITTLENUM_TYPE);
11618 }
11619 else
11620 *size = prec * sizeof (LITTLENUM_TYPE);
11621
11622 return NULL;
11623 }
11624
11625 /* Handle ia64 specific semantics of the align directive. */
11626
11627 void
11628 ia64_md_do_align (int n ATTRIBUTE_UNUSED,
11629 const char *fill ATTRIBUTE_UNUSED,
11630 int len ATTRIBUTE_UNUSED,
11631 int max ATTRIBUTE_UNUSED)
11632 {
11633 if (subseg_text_p (now_seg))
11634 ia64_flush_insns ();
11635 }
11636
11637 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
11638 of an rs_align_code fragment. */
11639
11640 void
11641 ia64_handle_align (fragS *fragp)
11642 {
11643 int bytes;
11644 char *p;
11645 const unsigned char *nop_type;
11646
11647 if (fragp->fr_type != rs_align_code)
11648 return;
11649
11650 /* Check if this frag has to end with a stop bit. */
11651 nop_type = fragp->tc_frag_data ? le_nop_stop : le_nop;
11652
11653 bytes = fragp->fr_next->fr_address - fragp->fr_address - fragp->fr_fix;
11654 p = fragp->fr_literal + fragp->fr_fix;
11655
11656 /* If no paddings are needed, we check if we need a stop bit. */
11657 if (!bytes && fragp->tc_frag_data)
11658 {
11659 if (fragp->fr_fix < 16)
11660 #if 1
11661 /* FIXME: It won't work with
11662 .align 16
11663 alloc r32=ar.pfs,1,2,4,0
11664 */
11665 ;
11666 #else
11667 as_bad_where (fragp->fr_file, fragp->fr_line,
11668 _("Can't add stop bit to mark end of instruction group"));
11669 #endif
11670 else
11671 /* Bundles are always in little-endian byte order. Make sure
11672 the previous bundle has the stop bit. */
11673 *(p - 16) |= 1;
11674 }
11675
11676 /* Make sure we are on a 16-byte boundary, in case someone has been
11677 putting data into a text section. */
11678 if (bytes & 15)
11679 {
11680 int fix = bytes & 15;
11681 memset (p, 0, fix);
11682 p += fix;
11683 bytes -= fix;
11684 fragp->fr_fix += fix;
11685 }
11686
11687 /* Instruction bundles are always little-endian. */
11688 memcpy (p, nop_type, 16);
11689 fragp->fr_var = 16;
11690 }
11691
11692 static void
11693 ia64_float_to_chars_bigendian (char *lit, LITTLENUM_TYPE *words,
11694 int prec)
11695 {
11696 while (prec--)
11697 {
11698 number_to_chars_bigendian (lit, (long) (*words++),
11699 sizeof (LITTLENUM_TYPE));
11700 lit += sizeof (LITTLENUM_TYPE);
11701 }
11702 }
11703
11704 static void
11705 ia64_float_to_chars_littleendian (char *lit, LITTLENUM_TYPE *words,
11706 int prec)
11707 {
11708 while (prec--)
11709 {
11710 number_to_chars_littleendian (lit, (long) (words[prec]),
11711 sizeof (LITTLENUM_TYPE));
11712 lit += sizeof (LITTLENUM_TYPE);
11713 }
11714 }
11715
11716 void
11717 ia64_elf_section_change_hook (void)
11718 {
11719 if (elf_section_type (now_seg) == SHT_IA_64_UNWIND
11720 && elf_linked_to_section (now_seg) == NULL)
11721 elf_linked_to_section (now_seg) = text_section;
11722 dot_byteorder (-1);
11723 }
11724
11725 /* Check if a label should be made global. */
11726 void
11727 ia64_check_label (symbolS *label)
11728 {
11729 if (*input_line_pointer == ':')
11730 {
11731 S_SET_EXTERNAL (label);
11732 input_line_pointer++;
11733 }
11734 }
11735
11736 /* Used to remember where .alias and .secalias directives are seen. We
11737 will rename symbol and section names when we are about to output
11738 the relocatable file. */
11739 struct alias
11740 {
11741 const char *file; /* The file where the directive is seen. */
11742 unsigned int line; /* The line number the directive is at. */
11743 const char *name; /* The original name of the symbol. */
11744 };
11745
11746 /* Called for .alias and .secalias directives. If SECTION is 1, it is
11747 .secalias. Otherwise, it is .alias. */
11748 static void
11749 dot_alias (int section)
11750 {
11751 char *name, *alias;
11752 char delim;
11753 char *end_name;
11754 int len;
11755 const char *error_string;
11756 struct alias *h;
11757 const char *a;
11758 struct hash_control *ahash, *nhash;
11759 const char *kind;
11760
11761 delim = get_symbol_name (&name);
11762 end_name = input_line_pointer;
11763 *end_name = delim;
11764
11765 if (name == end_name)
11766 {
11767 as_bad (_("expected symbol name"));
11768 ignore_rest_of_line ();
11769 return;
11770 }
11771
11772 SKIP_WHITESPACE_AFTER_NAME ();
11773
11774 if (*input_line_pointer != ',')
11775 {
11776 *end_name = 0;
11777 as_bad (_("expected comma after \"%s\""), name);
11778 *end_name = delim;
11779 ignore_rest_of_line ();
11780 return;
11781 }
11782
11783 input_line_pointer++;
11784 *end_name = 0;
11785 ia64_canonicalize_symbol_name (name);
11786
11787 /* We call demand_copy_C_string to check if alias string is valid.
11788 There should be a closing `"' and no `\0' in the string. */
11789 alias = demand_copy_C_string (&len);
11790 if (alias == NULL)
11791 {
11792 ignore_rest_of_line ();
11793 return;
11794 }
11795
11796 /* Make a copy of name string. */
11797 len = strlen (name) + 1;
11798 obstack_grow (&notes, name, len);
11799 name = obstack_finish (&notes);
11800
11801 if (section)
11802 {
11803 kind = "section";
11804 ahash = secalias_hash;
11805 nhash = secalias_name_hash;
11806 }
11807 else
11808 {
11809 kind = "symbol";
11810 ahash = alias_hash;
11811 nhash = alias_name_hash;
11812 }
11813
11814 /* Check if alias has been used before. */
11815 h = (struct alias *) hash_find (ahash, alias);
11816 if (h)
11817 {
11818 if (strcmp (h->name, name))
11819 as_bad (_("`%s' is already the alias of %s `%s'"),
11820 alias, kind, h->name);
11821 goto out;
11822 }
11823
11824 /* Check if name already has an alias. */
11825 a = (const char *) hash_find (nhash, name);
11826 if (a)
11827 {
11828 if (strcmp (a, alias))
11829 as_bad (_("%s `%s' already has an alias `%s'"), kind, name, a);
11830 goto out;
11831 }
11832
11833 h = XNEW (struct alias);
11834 h->file = as_where (&h->line);
11835 h->name = name;
11836
11837 error_string = hash_jam (ahash, alias, (void *) h);
11838 if (error_string)
11839 {
11840 as_fatal (_("inserting \"%s\" into %s alias hash table failed: %s"),
11841 alias, kind, error_string);
11842 goto out;
11843 }
11844
11845 error_string = hash_jam (nhash, name, (void *) alias);
11846 if (error_string)
11847 {
11848 as_fatal (_("inserting \"%s\" into %s name hash table failed: %s"),
11849 alias, kind, error_string);
11850 out:
11851 obstack_free (&notes, name);
11852 obstack_free (&notes, alias);
11853 }
11854
11855 demand_empty_rest_of_line ();
11856 }
11857
11858 /* It renames the original symbol name to its alias. */
11859 static void
11860 do_alias (const char *alias, void *value)
11861 {
11862 struct alias *h = (struct alias *) value;
11863 symbolS *sym = symbol_find (h->name);
11864
11865 if (sym == NULL)
11866 {
11867 #ifdef TE_VMS
11868 /* Uses .alias extensively to alias CRTL functions to same with
11869 decc$ prefix. Sometimes function gets optimized away and a
11870 warning results, which should be suppressed. */
11871 if (strncmp (alias, "decc$", 5) != 0)
11872 #endif
11873 as_warn_where (h->file, h->line,
11874 _("symbol `%s' aliased to `%s' is not used"),
11875 h->name, alias);
11876 }
11877 else
11878 S_SET_NAME (sym, (char *) alias);
11879 }
11880
11881 /* Called from write_object_file. */
11882 void
11883 ia64_adjust_symtab (void)
11884 {
11885 hash_traverse (alias_hash, do_alias);
11886 }
11887
11888 /* It renames the original section name to its alias. */
11889 static void
11890 do_secalias (const char *alias, void *value)
11891 {
11892 struct alias *h = (struct alias *) value;
11893 segT sec = bfd_get_section_by_name (stdoutput, h->name);
11894
11895 if (sec == NULL)
11896 as_warn_where (h->file, h->line,
11897 _("section `%s' aliased to `%s' is not used"),
11898 h->name, alias);
11899 else
11900 sec->name = alias;
11901 }
11902
11903 /* Called from write_object_file. */
11904 void
11905 ia64_frob_file (void)
11906 {
11907 hash_traverse (secalias_hash, do_secalias);
11908 }
11909
11910 #ifdef TE_VMS
11911 #define NT_VMS_MHD 1
11912 #define NT_VMS_LNM 2
11913
11914 /* Integrity VMS 8.x identifies it's ELF modules with a standard ELF
11915 .note section. */
11916
11917 /* Manufacture a VMS-like time string. */
11918 static void
11919 get_vms_time (char *Now)
11920 {
11921 char *pnt;
11922 time_t timeb;
11923
11924 time (&timeb);
11925 pnt = ctime (&timeb);
11926 pnt[3] = 0;
11927 pnt[7] = 0;
11928 pnt[10] = 0;
11929 pnt[16] = 0;
11930 pnt[24] = 0;
11931 sprintf (Now, "%2s-%3s-%s %s", pnt + 8, pnt + 4, pnt + 20, pnt + 11);
11932 }
11933
11934 void
11935 ia64_vms_note (void)
11936 {
11937 char *p;
11938 asection *seg = now_seg;
11939 subsegT subseg = now_subseg;
11940 asection *secp = NULL;
11941 char *bname;
11942 char buf [256];
11943 symbolS *sym;
11944
11945 /* Create the .note section. */
11946
11947 secp = subseg_new (".note", 0);
11948 bfd_set_section_flags (secp, SEC_HAS_CONTENTS | SEC_READONLY);
11949
11950 /* Module header note (MHD). */
11951 bname = xstrdup (lbasename (out_file_name));
11952 if ((p = strrchr (bname, '.')))
11953 *p = '\0';
11954
11955 /* VMS note header is 24 bytes long. */
11956 p = frag_more (8 + 8 + 8);
11957 number_to_chars_littleendian (p + 0, 8, 8);
11958 number_to_chars_littleendian (p + 8, 40 + strlen (bname), 8);
11959 number_to_chars_littleendian (p + 16, NT_VMS_MHD, 8);
11960
11961 p = frag_more (8);
11962 strcpy (p, "IPF/VMS");
11963
11964 p = frag_more (17 + 17 + strlen (bname) + 1 + 5);
11965 get_vms_time (p);
11966 strcpy (p + 17, "24-FEB-2005 15:00");
11967 p += 17 + 17;
11968 strcpy (p, bname);
11969 p += strlen (bname) + 1;
11970 free (bname);
11971 strcpy (p, "V1.0");
11972
11973 frag_align (3, 0, 0);
11974
11975 /* Language processor name note. */
11976 sprintf (buf, "GNU assembler version %s (%s) using BFD version %s",
11977 VERSION, TARGET_ALIAS, BFD_VERSION_STRING);
11978
11979 p = frag_more (8 + 8 + 8);
11980 number_to_chars_littleendian (p + 0, 8, 8);
11981 number_to_chars_littleendian (p + 8, strlen (buf) + 1, 8);
11982 number_to_chars_littleendian (p + 16, NT_VMS_LNM, 8);
11983
11984 p = frag_more (8);
11985 strcpy (p, "IPF/VMS");
11986
11987 p = frag_more (strlen (buf) + 1);
11988 strcpy (p, buf);
11989
11990 frag_align (3, 0, 0);
11991
11992 secp = subseg_new (".vms_display_name_info", 0);
11993 bfd_set_section_flags (secp, SEC_HAS_CONTENTS | SEC_READONLY);
11994
11995 /* This symbol should be passed on the command line and be variable
11996 according to language. */
11997 sym = symbol_new ("__gnat_vms_display_name@gnat_demangler_rtl",
11998 absolute_section, 0, &zero_address_frag);
11999 symbol_table_insert (sym);
12000 symbol_get_bfdsym (sym)->flags |= BSF_DEBUGGING | BSF_DYNAMIC;
12001
12002 p = frag_more (4);
12003 /* Format 3 of VMS demangler Spec. */
12004 number_to_chars_littleendian (p, 3, 4);
12005
12006 p = frag_more (4);
12007 /* Place holder for symbol table index of above symbol. */
12008 number_to_chars_littleendian (p, -1, 4);
12009
12010 frag_align (3, 0, 0);
12011
12012 /* We probably can't restore the current segment, for there likely
12013 isn't one yet... */
12014 if (seg && subseg)
12015 subseg_set (seg, subseg);
12016 }
12017
12018 #endif /* TE_VMS */
This page took 0.344575 seconds and 4 git commands to generate.