Rearrange symbol_create parameters
[deliverable/binutils-gdb.git] / gas / config / tc-ia64.c
1 /* tc-ia64.c -- Assembler for the HP/Intel IA-64 architecture.
2 Copyright (C) 1998-2020 Free Software Foundation, Inc.
3 Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
4
5 This file is part of GAS, the GNU Assembler.
6
7 GAS is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GAS is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GAS; see the file COPYING. If not, write to
19 the Free Software Foundation, 51 Franklin Street - Fifth Floor,
20 Boston, MA 02110-1301, USA. */
21
22 /*
23 TODO:
24
25 - optional operands
26 - directives:
27 .eb
28 .estate
29 .lb
30 .popsection
31 .previous
32 .psr
33 .pushsection
34 - labels are wrong if automatic alignment is introduced
35 (e.g., checkout the second real10 definition in test-data.s)
36 - DV-related stuff:
37 <reg>.safe_across_calls and any other DV-related directives I don't
38 have documentation for.
39 verify mod-sched-brs reads/writes are checked/marked (and other
40 notes)
41
42 */
43
44 #include "as.h"
45 #include "safe-ctype.h"
46 #include "dwarf2dbg.h"
47 #include "subsegs.h"
48
49 #include "opcode/ia64.h"
50
51 #include "elf/ia64.h"
52 #include "bfdver.h"
53 #include <time.h>
54
55 #ifdef HAVE_LIMITS_H
56 #include <limits.h>
57 #endif
58
59 #define NELEMS(a) ((int) (sizeof (a)/sizeof ((a)[0])))
60
61 /* Some systems define MIN in, e.g., param.h. */
62 #undef MIN
63 #define MIN(a,b) ((a) < (b) ? (a) : (b))
64
65 #define NUM_SLOTS 4
66 #define PREV_SLOT md.slot[(md.curr_slot + NUM_SLOTS - 1) % NUM_SLOTS]
67 #define CURR_SLOT md.slot[md.curr_slot]
68
69 #define O_pseudo_fixup (O_max + 1)
70
71 enum special_section
72 {
73 /* IA-64 ABI section pseudo-ops. */
74 SPECIAL_SECTION_BSS = 0,
75 SPECIAL_SECTION_SBSS,
76 SPECIAL_SECTION_SDATA,
77 SPECIAL_SECTION_RODATA,
78 SPECIAL_SECTION_COMMENT,
79 SPECIAL_SECTION_UNWIND,
80 SPECIAL_SECTION_UNWIND_INFO,
81 /* HPUX specific section pseudo-ops. */
82 SPECIAL_SECTION_INIT_ARRAY,
83 SPECIAL_SECTION_FINI_ARRAY,
84 };
85
86 enum reloc_func
87 {
88 FUNC_DTP_MODULE,
89 FUNC_DTP_RELATIVE,
90 FUNC_FPTR_RELATIVE,
91 FUNC_GP_RELATIVE,
92 FUNC_LT_RELATIVE,
93 FUNC_LT_RELATIVE_X,
94 FUNC_PC_RELATIVE,
95 FUNC_PLT_RELATIVE,
96 FUNC_SEC_RELATIVE,
97 FUNC_SEG_RELATIVE,
98 FUNC_TP_RELATIVE,
99 FUNC_LTV_RELATIVE,
100 FUNC_LT_FPTR_RELATIVE,
101 FUNC_LT_DTP_MODULE,
102 FUNC_LT_DTP_RELATIVE,
103 FUNC_LT_TP_RELATIVE,
104 FUNC_IPLT_RELOC,
105 #ifdef TE_VMS
106 FUNC_SLOTCOUNT_RELOC,
107 #endif
108 };
109
110 enum reg_symbol
111 {
112 REG_GR = 0,
113 REG_FR = (REG_GR + 128),
114 REG_AR = (REG_FR + 128),
115 REG_CR = (REG_AR + 128),
116 REG_DAHR = (REG_CR + 128),
117 REG_P = (REG_DAHR + 8),
118 REG_BR = (REG_P + 64),
119 REG_IP = (REG_BR + 8),
120 REG_CFM,
121 REG_PR,
122 REG_PR_ROT,
123 REG_PSR,
124 REG_PSR_L,
125 REG_PSR_UM,
126 /* The following are pseudo-registers for use by gas only. */
127 IND_CPUID,
128 IND_DBR,
129 IND_DTR,
130 IND_ITR,
131 IND_IBR,
132 IND_MSR,
133 IND_PKR,
134 IND_PMC,
135 IND_PMD,
136 IND_DAHR,
137 IND_RR,
138 /* The following pseudo-registers are used for unwind directives only: */
139 REG_PSP,
140 REG_PRIUNAT,
141 REG_NUM
142 };
143
144 enum dynreg_type
145 {
146 DYNREG_GR = 0, /* dynamic general purpose register */
147 DYNREG_FR, /* dynamic floating point register */
148 DYNREG_PR, /* dynamic predicate register */
149 DYNREG_NUM_TYPES
150 };
151
152 enum operand_match_result
153 {
154 OPERAND_MATCH,
155 OPERAND_OUT_OF_RANGE,
156 OPERAND_MISMATCH
157 };
158
159 /* On the ia64, we can't know the address of a text label until the
160 instructions are packed into a bundle. To handle this, we keep
161 track of the list of labels that appear in front of each
162 instruction. */
163 struct label_fix
164 {
165 struct label_fix *next;
166 struct symbol *sym;
167 bfd_boolean dw2_mark_labels;
168 };
169
170 #ifdef TE_VMS
171 /* An internally used relocation. */
172 #define DUMMY_RELOC_IA64_SLOTCOUNT (BFD_RELOC_UNUSED + 1)
173 #endif
174
175 /* This is the endianness of the current section. */
176 extern int target_big_endian;
177
178 /* This is the default endianness. */
179 static int default_big_endian = TARGET_BYTES_BIG_ENDIAN;
180
181 void (*ia64_number_to_chars) (char *, valueT, int);
182
183 static void ia64_float_to_chars_bigendian (char *, LITTLENUM_TYPE *, int);
184 static void ia64_float_to_chars_littleendian (char *, LITTLENUM_TYPE *, int);
185
186 static void (*ia64_float_to_chars) (char *, LITTLENUM_TYPE *, int);
187
188 static htab_t alias_hash;
189 static htab_t alias_name_hash;
190 static htab_t secalias_hash;
191 static htab_t secalias_name_hash;
192
193 /* List of chars besides those in app.c:symbol_chars that can start an
194 operand. Used to prevent the scrubber eating vital white-space. */
195 const char ia64_symbol_chars[] = "@?";
196
197 /* Characters which always start a comment. */
198 const char comment_chars[] = "";
199
200 /* Characters which start a comment at the beginning of a line. */
201 const char line_comment_chars[] = "#";
202
203 /* Characters which may be used to separate multiple commands on a
204 single line. */
205 const char line_separator_chars[] = ";{}";
206
207 /* Characters which are used to indicate an exponent in a floating
208 point number. */
209 const char EXP_CHARS[] = "eE";
210
211 /* Characters which mean that a number is a floating point constant,
212 as in 0d1.0. */
213 const char FLT_CHARS[] = "rRsSfFdDxXpP";
214
215 /* ia64-specific option processing: */
216
217 const char *md_shortopts = "m:N:x::";
218
219 struct option md_longopts[] =
220 {
221 #define OPTION_MCONSTANT_GP (OPTION_MD_BASE + 1)
222 {"mconstant-gp", no_argument, NULL, OPTION_MCONSTANT_GP},
223 #define OPTION_MAUTO_PIC (OPTION_MD_BASE + 2)
224 {"mauto-pic", no_argument, NULL, OPTION_MAUTO_PIC}
225 };
226
227 size_t md_longopts_size = sizeof (md_longopts);
228
229 static struct
230 {
231 htab_t pseudo_hash; /* pseudo opcode hash table */
232 htab_t reg_hash; /* register name hash table */
233 htab_t dynreg_hash; /* dynamic register hash table */
234 htab_t const_hash; /* constant hash table */
235 htab_t entry_hash; /* code entry hint hash table */
236
237 /* If X_op is != O_absent, the register name for the instruction's
238 qualifying predicate. If NULL, p0 is assumed for instructions
239 that are predictable. */
240 expressionS qp;
241
242 /* Optimize for which CPU. */
243 enum
244 {
245 itanium1,
246 itanium2
247 } tune;
248
249 /* What to do when hint.b is used. */
250 enum
251 {
252 hint_b_error,
253 hint_b_warning,
254 hint_b_ok
255 } hint_b;
256
257 unsigned int
258 manual_bundling : 1,
259 debug_dv: 1,
260 detect_dv: 1,
261 explicit_mode : 1, /* which mode we're in */
262 default_explicit_mode : 1, /* which mode is the default */
263 mode_explicitly_set : 1, /* was the current mode explicitly set? */
264 auto_align : 1,
265 keep_pending_output : 1;
266
267 /* What to do when something is wrong with unwind directives. */
268 enum
269 {
270 unwind_check_warning,
271 unwind_check_error
272 } unwind_check;
273
274 /* Each bundle consists of up to three instructions. We keep
275 track of four most recent instructions so we can correctly set
276 the end_of_insn_group for the last instruction in a bundle. */
277 int curr_slot;
278 int num_slots_in_use;
279 struct slot
280 {
281 unsigned int
282 end_of_insn_group : 1,
283 manual_bundling_on : 1,
284 manual_bundling_off : 1,
285 loc_directive_seen : 1;
286 signed char user_template; /* user-selected template, if any */
287 unsigned char qp_regno; /* qualifying predicate */
288 /* This duplicates a good fraction of "struct fix" but we
289 can't use a "struct fix" instead since we can't call
290 fix_new_exp() until we know the address of the instruction. */
291 int num_fixups;
292 struct insn_fix
293 {
294 bfd_reloc_code_real_type code;
295 enum ia64_opnd opnd; /* type of operand in need of fix */
296 unsigned int is_pcrel : 1; /* is operand pc-relative? */
297 expressionS expr; /* the value to be inserted */
298 }
299 fixup[2]; /* at most two fixups per insn */
300 struct ia64_opcode *idesc;
301 struct label_fix *label_fixups;
302 struct label_fix *tag_fixups;
303 struct unw_rec_list *unwind_record; /* Unwind directive. */
304 expressionS opnd[6];
305 const char *src_file;
306 unsigned int src_line;
307 struct dwarf2_line_info debug_line;
308 }
309 slot[NUM_SLOTS];
310
311 segT last_text_seg;
312
313 struct dynreg
314 {
315 struct dynreg *next; /* next dynamic register */
316 const char *name;
317 unsigned short base; /* the base register number */
318 unsigned short num_regs; /* # of registers in this set */
319 }
320 *dynreg[DYNREG_NUM_TYPES], in, loc, out, rot;
321
322 flagword flags; /* ELF-header flags */
323
324 struct mem_offset {
325 unsigned hint:1; /* is this hint currently valid? */
326 bfd_vma offset; /* mem.offset offset */
327 bfd_vma base; /* mem.offset base */
328 } mem_offset;
329
330 int path; /* number of alt. entry points seen */
331 const char **entry_labels; /* labels of all alternate paths in
332 the current DV-checking block. */
333 int maxpaths; /* size currently allocated for
334 entry_labels */
335
336 int pointer_size; /* size in bytes of a pointer */
337 int pointer_size_shift; /* shift size of a pointer for alignment */
338
339 symbolS *indregsym[IND_RR - IND_CPUID + 1];
340 }
341 md;
342
343 /* These are not const, because they are modified to MMI for non-itanium1
344 targets below. */
345 /* MFI bundle of nops. */
346 static unsigned char le_nop[16] =
347 {
348 0x0c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
349 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00
350 };
351 /* MFI bundle of nops with stop-bit. */
352 static unsigned char le_nop_stop[16] =
353 {
354 0x0d, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
355 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00
356 };
357
358 /* application registers: */
359
360 #define AR_K0 0
361 #define AR_K7 7
362 #define AR_RSC 16
363 #define AR_BSP 17
364 #define AR_BSPSTORE 18
365 #define AR_RNAT 19
366 #define AR_FCR 21
367 #define AR_EFLAG 24
368 #define AR_CSD 25
369 #define AR_SSD 26
370 #define AR_CFLG 27
371 #define AR_FSR 28
372 #define AR_FIR 29
373 #define AR_FDR 30
374 #define AR_CCV 32
375 #define AR_UNAT 36
376 #define AR_FPSR 40
377 #define AR_ITC 44
378 #define AR_RUC 45
379 #define AR_PFS 64
380 #define AR_LC 65
381 #define AR_EC 66
382
383 static const struct
384 {
385 const char *name;
386 unsigned int regnum;
387 }
388 ar[] =
389 {
390 {"ar.k0", AR_K0}, {"ar.k1", AR_K0 + 1},
391 {"ar.k2", AR_K0 + 2}, {"ar.k3", AR_K0 + 3},
392 {"ar.k4", AR_K0 + 4}, {"ar.k5", AR_K0 + 5},
393 {"ar.k6", AR_K0 + 6}, {"ar.k7", AR_K7},
394 {"ar.rsc", AR_RSC}, {"ar.bsp", AR_BSP},
395 {"ar.bspstore", AR_BSPSTORE}, {"ar.rnat", AR_RNAT},
396 {"ar.fcr", AR_FCR}, {"ar.eflag", AR_EFLAG},
397 {"ar.csd", AR_CSD}, {"ar.ssd", AR_SSD},
398 {"ar.cflg", AR_CFLG}, {"ar.fsr", AR_FSR},
399 {"ar.fir", AR_FIR}, {"ar.fdr", AR_FDR},
400 {"ar.ccv", AR_CCV}, {"ar.unat", AR_UNAT},
401 {"ar.fpsr", AR_FPSR}, {"ar.itc", AR_ITC},
402 {"ar.ruc", AR_RUC}, {"ar.pfs", AR_PFS},
403 {"ar.lc", AR_LC}, {"ar.ec", AR_EC},
404 };
405
406 /* control registers: */
407
408 #define CR_DCR 0
409 #define CR_ITM 1
410 #define CR_IVA 2
411 #define CR_PTA 8
412 #define CR_GPTA 9
413 #define CR_IPSR 16
414 #define CR_ISR 17
415 #define CR_IIP 19
416 #define CR_IFA 20
417 #define CR_ITIR 21
418 #define CR_IIPA 22
419 #define CR_IFS 23
420 #define CR_IIM 24
421 #define CR_IHA 25
422 #define CR_IIB0 26
423 #define CR_IIB1 27
424 #define CR_LID 64
425 #define CR_IVR 65
426 #define CR_TPR 66
427 #define CR_EOI 67
428 #define CR_IRR0 68
429 #define CR_IRR3 71
430 #define CR_ITV 72
431 #define CR_PMV 73
432 #define CR_CMCV 74
433 #define CR_LRR0 80
434 #define CR_LRR1 81
435
436 static const struct
437 {
438 const char *name;
439 unsigned int regnum;
440 }
441 cr[] =
442 {
443 {"cr.dcr", CR_DCR},
444 {"cr.itm", CR_ITM},
445 {"cr.iva", CR_IVA},
446 {"cr.pta", CR_PTA},
447 {"cr.gpta", CR_GPTA},
448 {"cr.ipsr", CR_IPSR},
449 {"cr.isr", CR_ISR},
450 {"cr.iip", CR_IIP},
451 {"cr.ifa", CR_IFA},
452 {"cr.itir", CR_ITIR},
453 {"cr.iipa", CR_IIPA},
454 {"cr.ifs", CR_IFS},
455 {"cr.iim", CR_IIM},
456 {"cr.iha", CR_IHA},
457 {"cr.iib0", CR_IIB0},
458 {"cr.iib1", CR_IIB1},
459 {"cr.lid", CR_LID},
460 {"cr.ivr", CR_IVR},
461 {"cr.tpr", CR_TPR},
462 {"cr.eoi", CR_EOI},
463 {"cr.irr0", CR_IRR0},
464 {"cr.irr1", CR_IRR0 + 1},
465 {"cr.irr2", CR_IRR0 + 2},
466 {"cr.irr3", CR_IRR3},
467 {"cr.itv", CR_ITV},
468 {"cr.pmv", CR_PMV},
469 {"cr.cmcv", CR_CMCV},
470 {"cr.lrr0", CR_LRR0},
471 {"cr.lrr1", CR_LRR1}
472 };
473
474 #define PSR_MFL 4
475 #define PSR_IC 13
476 #define PSR_DFL 18
477 #define PSR_CPL 32
478
479 static const struct const_desc
480 {
481 const char *name;
482 valueT value;
483 }
484 const_bits[] =
485 {
486 /* PSR constant masks: */
487
488 /* 0: reserved */
489 {"psr.be", ((valueT) 1) << 1},
490 {"psr.up", ((valueT) 1) << 2},
491 {"psr.ac", ((valueT) 1) << 3},
492 {"psr.mfl", ((valueT) 1) << 4},
493 {"psr.mfh", ((valueT) 1) << 5},
494 /* 6-12: reserved */
495 {"psr.ic", ((valueT) 1) << 13},
496 {"psr.i", ((valueT) 1) << 14},
497 {"psr.pk", ((valueT) 1) << 15},
498 /* 16: reserved */
499 {"psr.dt", ((valueT) 1) << 17},
500 {"psr.dfl", ((valueT) 1) << 18},
501 {"psr.dfh", ((valueT) 1) << 19},
502 {"psr.sp", ((valueT) 1) << 20},
503 {"psr.pp", ((valueT) 1) << 21},
504 {"psr.di", ((valueT) 1) << 22},
505 {"psr.si", ((valueT) 1) << 23},
506 {"psr.db", ((valueT) 1) << 24},
507 {"psr.lp", ((valueT) 1) << 25},
508 {"psr.tb", ((valueT) 1) << 26},
509 {"psr.rt", ((valueT) 1) << 27},
510 /* 28-31: reserved */
511 /* 32-33: cpl (current privilege level) */
512 {"psr.is", ((valueT) 1) << 34},
513 {"psr.mc", ((valueT) 1) << 35},
514 {"psr.it", ((valueT) 1) << 36},
515 {"psr.id", ((valueT) 1) << 37},
516 {"psr.da", ((valueT) 1) << 38},
517 {"psr.dd", ((valueT) 1) << 39},
518 {"psr.ss", ((valueT) 1) << 40},
519 /* 41-42: ri (restart instruction) */
520 {"psr.ed", ((valueT) 1) << 43},
521 {"psr.bn", ((valueT) 1) << 44},
522 };
523
524 /* indirect register-sets/memory: */
525
526 static const struct
527 {
528 const char *name;
529 unsigned int regnum;
530 }
531 indirect_reg[] =
532 {
533 { "CPUID", IND_CPUID },
534 { "cpuid", IND_CPUID },
535 { "dbr", IND_DBR },
536 { "dtr", IND_DTR },
537 { "itr", IND_ITR },
538 { "ibr", IND_IBR },
539 { "msr", IND_MSR },
540 { "pkr", IND_PKR },
541 { "pmc", IND_PMC },
542 { "pmd", IND_PMD },
543 { "dahr", IND_DAHR },
544 { "rr", IND_RR },
545 };
546
547 /* Pseudo functions used to indicate relocation types (these functions
548 start with an at sign (@). */
549 static struct
550 {
551 const char *name;
552 enum pseudo_type
553 {
554 PSEUDO_FUNC_NONE,
555 PSEUDO_FUNC_RELOC,
556 PSEUDO_FUNC_CONST,
557 PSEUDO_FUNC_REG,
558 PSEUDO_FUNC_FLOAT
559 }
560 type;
561 union
562 {
563 unsigned long ival;
564 symbolS *sym;
565 }
566 u;
567 }
568 pseudo_func[] =
569 {
570 /* reloc pseudo functions (these must come first!): */
571 { "dtpmod", PSEUDO_FUNC_RELOC, { 0 } },
572 { "dtprel", PSEUDO_FUNC_RELOC, { 0 } },
573 { "fptr", PSEUDO_FUNC_RELOC, { 0 } },
574 { "gprel", PSEUDO_FUNC_RELOC, { 0 } },
575 { "ltoff", PSEUDO_FUNC_RELOC, { 0 } },
576 { "ltoffx", PSEUDO_FUNC_RELOC, { 0 } },
577 { "pcrel", PSEUDO_FUNC_RELOC, { 0 } },
578 { "pltoff", PSEUDO_FUNC_RELOC, { 0 } },
579 { "secrel", PSEUDO_FUNC_RELOC, { 0 } },
580 { "segrel", PSEUDO_FUNC_RELOC, { 0 } },
581 { "tprel", PSEUDO_FUNC_RELOC, { 0 } },
582 { "ltv", PSEUDO_FUNC_RELOC, { 0 } },
583 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_FPTR_RELATIVE */
584 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_DTP_MODULE */
585 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_DTP_RELATIVE */
586 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_TP_RELATIVE */
587 { "iplt", PSEUDO_FUNC_RELOC, { 0 } },
588 #ifdef TE_VMS
589 { "slotcount", PSEUDO_FUNC_RELOC, { 0 } },
590 #endif
591
592 /* mbtype4 constants: */
593 { "alt", PSEUDO_FUNC_CONST, { 0xa } },
594 { "brcst", PSEUDO_FUNC_CONST, { 0x0 } },
595 { "mix", PSEUDO_FUNC_CONST, { 0x8 } },
596 { "rev", PSEUDO_FUNC_CONST, { 0xb } },
597 { "shuf", PSEUDO_FUNC_CONST, { 0x9 } },
598
599 /* fclass constants: */
600 { "nat", PSEUDO_FUNC_CONST, { 0x100 } },
601 { "qnan", PSEUDO_FUNC_CONST, { 0x080 } },
602 { "snan", PSEUDO_FUNC_CONST, { 0x040 } },
603 { "pos", PSEUDO_FUNC_CONST, { 0x001 } },
604 { "neg", PSEUDO_FUNC_CONST, { 0x002 } },
605 { "zero", PSEUDO_FUNC_CONST, { 0x004 } },
606 { "unorm", PSEUDO_FUNC_CONST, { 0x008 } },
607 { "norm", PSEUDO_FUNC_CONST, { 0x010 } },
608 { "inf", PSEUDO_FUNC_CONST, { 0x020 } },
609
610 { "natval", PSEUDO_FUNC_CONST, { 0x100 } }, /* old usage */
611
612 /* hint constants: */
613 { "pause", PSEUDO_FUNC_CONST, { 0x0 } },
614 { "priority", PSEUDO_FUNC_CONST, { 0x1 } },
615
616 /* tf constants: */
617 { "clz", PSEUDO_FUNC_CONST, { 32 } },
618 { "mpy", PSEUDO_FUNC_CONST, { 33 } },
619 { "datahints", PSEUDO_FUNC_CONST, { 34 } },
620
621 /* unwind-related constants: */
622 { "svr4", PSEUDO_FUNC_CONST, { ELFOSABI_NONE } },
623 { "hpux", PSEUDO_FUNC_CONST, { ELFOSABI_HPUX } },
624 { "nt", PSEUDO_FUNC_CONST, { 2 } }, /* conflicts w/ELFOSABI_NETBSD */
625 { "linux", PSEUDO_FUNC_CONST, { ELFOSABI_GNU } },
626 { "freebsd", PSEUDO_FUNC_CONST, { ELFOSABI_FREEBSD } },
627 { "openvms", PSEUDO_FUNC_CONST, { ELFOSABI_OPENVMS } },
628 { "nsk", PSEUDO_FUNC_CONST, { ELFOSABI_NSK } },
629
630 /* unwind-related registers: */
631 { "priunat",PSEUDO_FUNC_REG, { REG_PRIUNAT } }
632 };
633
634 /* 41-bit nop opcodes (one per unit): */
635 static const bfd_vma nop[IA64_NUM_UNITS] =
636 {
637 0x0000000000LL, /* NIL => break 0 */
638 0x0008000000LL, /* I-unit nop */
639 0x0008000000LL, /* M-unit nop */
640 0x4000000000LL, /* B-unit nop */
641 0x0008000000LL, /* F-unit nop */
642 0x0000000000LL, /* L-"unit" nop immediate */
643 0x0008000000LL, /* X-unit nop */
644 };
645
646 /* Can't be `const' as it's passed to input routines (which have the
647 habit of setting temporary sentinels. */
648 static char special_section_name[][20] =
649 {
650 {".bss"}, {".sbss"}, {".sdata"}, {".rodata"}, {".comment"},
651 {".IA_64.unwind"}, {".IA_64.unwind_info"},
652 {".init_array"}, {".fini_array"}
653 };
654
655 /* The best template for a particular sequence of up to three
656 instructions: */
657 #define N IA64_NUM_TYPES
658 static unsigned char best_template[N][N][N];
659 #undef N
660
661 /* Resource dependencies currently in effect */
662 static struct rsrc {
663 int depind; /* dependency index */
664 const struct ia64_dependency *dependency; /* actual dependency */
665 unsigned specific:1, /* is this a specific bit/regno? */
666 link_to_qp_branch:1; /* will a branch on the same QP clear it?*/
667 int index; /* specific regno/bit within dependency */
668 int note; /* optional qualifying note (0 if none) */
669 #define STATE_NONE 0
670 #define STATE_STOP 1
671 #define STATE_SRLZ 2
672 int insn_srlz; /* current insn serialization state */
673 int data_srlz; /* current data serialization state */
674 int qp_regno; /* qualifying predicate for this usage */
675 const char *file; /* what file marked this dependency */
676 unsigned int line; /* what line marked this dependency */
677 struct mem_offset mem_offset; /* optional memory offset hint */
678 enum { CMP_NONE, CMP_OR, CMP_AND } cmp_type; /* OR or AND compare? */
679 int path; /* corresponding code entry index */
680 } *regdeps = NULL;
681 static int regdepslen = 0;
682 static int regdepstotlen = 0;
683 static const char *dv_mode[] = { "RAW", "WAW", "WAR" };
684 static const char *dv_sem[] = { "none", "implied", "impliedf",
685 "data", "instr", "specific", "stop", "other" };
686 static const char *dv_cmp_type[] = { "none", "OR", "AND" };
687
688 /* Current state of PR mutexation */
689 static struct qpmutex {
690 valueT prmask;
691 int path;
692 } *qp_mutexes = NULL; /* QP mutex bitmasks */
693 static int qp_mutexeslen = 0;
694 static int qp_mutexestotlen = 0;
695 static valueT qp_safe_across_calls = 0;
696
697 /* Current state of PR implications */
698 static struct qp_imply {
699 unsigned p1:6;
700 unsigned p2:6;
701 unsigned p2_branched:1;
702 int path;
703 } *qp_implies = NULL;
704 static int qp_implieslen = 0;
705 static int qp_impliestotlen = 0;
706
707 /* Keep track of static GR values so that indirect register usage can
708 sometimes be tracked. */
709 static struct gr {
710 unsigned known:1;
711 int path;
712 valueT value;
713 } gr_values[128] = {
714 {
715 1,
716 #ifdef INT_MAX
717 INT_MAX,
718 #else
719 (((1 << (8 * sizeof(gr_values->path) - 2)) - 1) << 1) + 1,
720 #endif
721 0
722 }
723 };
724
725 /* Remember the alignment frag. */
726 static fragS *align_frag;
727
728 /* These are the routines required to output the various types of
729 unwind records. */
730
731 /* A slot_number is a frag address plus the slot index (0-2). We use the
732 frag address here so that if there is a section switch in the middle of
733 a function, then instructions emitted to a different section are not
734 counted. Since there may be more than one frag for a function, this
735 means we also need to keep track of which frag this address belongs to
736 so we can compute inter-frag distances. This also nicely solves the
737 problem with nops emitted for align directives, which can't easily be
738 counted, but can easily be derived from frag sizes. */
739
740 typedef struct unw_rec_list {
741 unwind_record r;
742 unsigned long slot_number;
743 fragS *slot_frag;
744 struct unw_rec_list *next;
745 } unw_rec_list;
746
747 #define SLOT_NUM_NOT_SET (unsigned)-1
748
749 /* Linked list of saved prologue counts. A very poor
750 implementation of a map from label numbers to prologue counts. */
751 typedef struct label_prologue_count
752 {
753 struct label_prologue_count *next;
754 unsigned long label_number;
755 unsigned int prologue_count;
756 } label_prologue_count;
757
758 typedef struct proc_pending
759 {
760 symbolS *sym;
761 struct proc_pending *next;
762 } proc_pending;
763
764 static struct
765 {
766 /* Maintain a list of unwind entries for the current function. */
767 unw_rec_list *list;
768 unw_rec_list *tail;
769
770 /* Any unwind entries that should be attached to the current slot
771 that an insn is being constructed for. */
772 unw_rec_list *current_entry;
773
774 /* These are used to create the unwind table entry for this function. */
775 proc_pending proc_pending;
776 symbolS *info; /* pointer to unwind info */
777 symbolS *personality_routine;
778 segT saved_text_seg;
779 subsegT saved_text_subseg;
780 unsigned int force_unwind_entry : 1; /* force generation of unwind entry? */
781
782 /* TRUE if processing unwind directives in a prologue region. */
783 unsigned int prologue : 1;
784 unsigned int prologue_mask : 4;
785 unsigned int prologue_gr : 7;
786 unsigned int body : 1;
787 unsigned int insn : 1;
788 unsigned int prologue_count; /* number of .prologues seen so far */
789 /* Prologue counts at previous .label_state directives. */
790 struct label_prologue_count * saved_prologue_counts;
791
792 /* List of split up .save-s. */
793 unw_p_record *pending_saves;
794 } unwind;
795
796 /* The input value is a negated offset from psp, and specifies an address
797 psp - offset. The encoded value is psp + 16 - (4 * offset). Thus we
798 must add 16 and divide by 4 to get the encoded value. */
799
800 #define ENCODED_PSP_OFFSET(OFFSET) (((OFFSET) + 16) / 4)
801
802 typedef void (*vbyte_func) (int, char *, char *);
803
804 /* Forward declarations: */
805 static void dot_alias (int);
806 static int parse_operand_and_eval (expressionS *, int);
807 static void emit_one_bundle (void);
808 static bfd_reloc_code_real_type ia64_gen_real_reloc_type (struct symbol *,
809 bfd_reloc_code_real_type);
810 static void insn_group_break (int, int, int);
811 static void add_qp_mutex (valueT);
812 static void add_qp_imply (int, int);
813 static void clear_qp_mutex (valueT);
814 static void clear_qp_implies (valueT, valueT);
815 static void print_dependency (const char *, int);
816 static void instruction_serialization (void);
817 static void data_serialization (void);
818 static void output_R3_format (vbyte_func, unw_record_type, unsigned long);
819 static void output_B3_format (vbyte_func, unsigned long, unsigned long);
820 static void output_B4_format (vbyte_func, unw_record_type, unsigned long);
821 static void free_saved_prologue_counts (void);
822
823 /* Determine if application register REGNUM resides only in the integer
824 unit (as opposed to the memory unit). */
825 static int
826 ar_is_only_in_integer_unit (int reg)
827 {
828 reg -= REG_AR;
829 return reg >= 64 && reg <= 111;
830 }
831
832 /* Determine if application register REGNUM resides only in the memory
833 unit (as opposed to the integer unit). */
834 static int
835 ar_is_only_in_memory_unit (int reg)
836 {
837 reg -= REG_AR;
838 return reg >= 0 && reg <= 47;
839 }
840
841 /* Switch to section NAME and create section if necessary. It's
842 rather ugly that we have to manipulate input_line_pointer but I
843 don't see any other way to accomplish the same thing without
844 changing obj-elf.c (which may be the Right Thing, in the end). */
845 static void
846 set_section (char *name)
847 {
848 char *saved_input_line_pointer;
849
850 saved_input_line_pointer = input_line_pointer;
851 input_line_pointer = name;
852 obj_elf_section (0);
853 input_line_pointer = saved_input_line_pointer;
854 }
855
856 /* Map 's' to SHF_IA_64_SHORT. */
857
858 bfd_vma
859 ia64_elf_section_letter (int letter, const char **ptr_msg)
860 {
861 if (letter == 's')
862 return SHF_IA_64_SHORT;
863 else if (letter == 'o')
864 return SHF_LINK_ORDER;
865 #ifdef TE_VMS
866 else if (letter == 'O')
867 return SHF_IA_64_VMS_OVERLAID;
868 else if (letter == 'g')
869 return SHF_IA_64_VMS_GLOBAL;
870 #endif
871
872 *ptr_msg = _("bad .section directive: want a,o,s,w,x,M,S,G,T in string");
873 return -1;
874 }
875
876 /* Map SHF_IA_64_SHORT to SEC_SMALL_DATA. */
877
878 flagword
879 ia64_elf_section_flags (flagword flags,
880 bfd_vma attr,
881 int type ATTRIBUTE_UNUSED)
882 {
883 if (attr & SHF_IA_64_SHORT)
884 flags |= SEC_SMALL_DATA;
885 return flags;
886 }
887
888 int
889 ia64_elf_section_type (const char *str, size_t len)
890 {
891 #define STREQ(s) ((len == sizeof (s) - 1) && (strncmp (str, s, sizeof (s) - 1) == 0))
892
893 if (STREQ (ELF_STRING_ia64_unwind_info))
894 return SHT_PROGBITS;
895
896 if (STREQ (ELF_STRING_ia64_unwind_info_once))
897 return SHT_PROGBITS;
898
899 if (STREQ (ELF_STRING_ia64_unwind))
900 return SHT_IA_64_UNWIND;
901
902 if (STREQ (ELF_STRING_ia64_unwind_once))
903 return SHT_IA_64_UNWIND;
904
905 if (STREQ ("unwind"))
906 return SHT_IA_64_UNWIND;
907
908 return -1;
909 #undef STREQ
910 }
911
912 static unsigned int
913 set_regstack (unsigned int ins,
914 unsigned int locs,
915 unsigned int outs,
916 unsigned int rots)
917 {
918 /* Size of frame. */
919 unsigned int sof;
920
921 sof = ins + locs + outs;
922 if (sof > 96)
923 {
924 as_bad (_("Size of frame exceeds maximum of 96 registers"));
925 return 0;
926 }
927 if (rots > sof)
928 {
929 as_warn (_("Size of rotating registers exceeds frame size"));
930 return 0;
931 }
932 md.in.base = REG_GR + 32;
933 md.loc.base = md.in.base + ins;
934 md.out.base = md.loc.base + locs;
935
936 md.in.num_regs = ins;
937 md.loc.num_regs = locs;
938 md.out.num_regs = outs;
939 md.rot.num_regs = rots;
940 return sof;
941 }
942
943 void
944 ia64_flush_insns (void)
945 {
946 struct label_fix *lfix;
947 segT saved_seg;
948 subsegT saved_subseg;
949 unw_rec_list *ptr;
950 bfd_boolean mark;
951
952 if (!md.last_text_seg)
953 return;
954
955 saved_seg = now_seg;
956 saved_subseg = now_subseg;
957
958 subseg_set (md.last_text_seg, 0);
959
960 while (md.num_slots_in_use > 0)
961 emit_one_bundle (); /* force out queued instructions */
962
963 /* In case there are labels following the last instruction, resolve
964 those now. */
965 mark = FALSE;
966 for (lfix = CURR_SLOT.label_fixups; lfix; lfix = lfix->next)
967 {
968 symbol_set_value_now (lfix->sym);
969 mark |= lfix->dw2_mark_labels;
970 }
971 if (mark)
972 {
973 dwarf2_where (&CURR_SLOT.debug_line);
974 CURR_SLOT.debug_line.flags |= DWARF2_FLAG_BASIC_BLOCK;
975 dwarf2_gen_line_info (frag_now_fix (), &CURR_SLOT.debug_line);
976 dwarf2_consume_line_info ();
977 }
978 CURR_SLOT.label_fixups = 0;
979
980 for (lfix = CURR_SLOT.tag_fixups; lfix; lfix = lfix->next)
981 symbol_set_value_now (lfix->sym);
982 CURR_SLOT.tag_fixups = 0;
983
984 /* In case there are unwind directives following the last instruction,
985 resolve those now. We only handle prologue, body, and endp directives
986 here. Give an error for others. */
987 for (ptr = unwind.current_entry; ptr; ptr = ptr->next)
988 {
989 switch (ptr->r.type)
990 {
991 case prologue:
992 case prologue_gr:
993 case body:
994 case endp:
995 ptr->slot_number = (unsigned long) frag_more (0);
996 ptr->slot_frag = frag_now;
997 break;
998
999 /* Allow any record which doesn't have a "t" field (i.e.,
1000 doesn't relate to a particular instruction). */
1001 case unwabi:
1002 case br_gr:
1003 case copy_state:
1004 case fr_mem:
1005 case frgr_mem:
1006 case gr_gr:
1007 case gr_mem:
1008 case label_state:
1009 case rp_br:
1010 case spill_base:
1011 case spill_mask:
1012 /* nothing */
1013 break;
1014
1015 default:
1016 as_bad (_("Unwind directive not followed by an instruction."));
1017 break;
1018 }
1019 }
1020 unwind.current_entry = NULL;
1021
1022 subseg_set (saved_seg, saved_subseg);
1023
1024 if (md.qp.X_op == O_register)
1025 as_bad (_("qualifying predicate not followed by instruction"));
1026 }
1027
1028 void
1029 ia64_cons_align (int nbytes)
1030 {
1031 if (md.auto_align)
1032 {
1033 int log;
1034 for (log = 0; (nbytes & 1) != 1; nbytes >>= 1)
1035 log++;
1036
1037 do_align (log, NULL, 0, 0);
1038 }
1039 }
1040
1041 #ifdef TE_VMS
1042
1043 /* .vms_common section, symbol, size, alignment */
1044
1045 static void
1046 obj_elf_vms_common (int ignore ATTRIBUTE_UNUSED)
1047 {
1048 const char *sec_name;
1049 char *sym_name;
1050 char c;
1051 offsetT size;
1052 offsetT cur_size;
1053 offsetT temp;
1054 symbolS *symbolP;
1055 segT current_seg = now_seg;
1056 subsegT current_subseg = now_subseg;
1057 offsetT log_align;
1058
1059 /* Section name. */
1060 sec_name = obj_elf_section_name ();
1061 if (sec_name == NULL)
1062 return;
1063
1064 /* Symbol name. */
1065 SKIP_WHITESPACE ();
1066 if (*input_line_pointer == ',')
1067 {
1068 input_line_pointer++;
1069 SKIP_WHITESPACE ();
1070 }
1071 else
1072 {
1073 as_bad (_("expected ',' after section name"));
1074 ignore_rest_of_line ();
1075 return;
1076 }
1077
1078 c = get_symbol_name (&sym_name);
1079
1080 if (input_line_pointer == sym_name)
1081 {
1082 (void) restore_line_pointer (c);
1083 as_bad (_("expected symbol name"));
1084 ignore_rest_of_line ();
1085 return;
1086 }
1087
1088 symbolP = symbol_find_or_make (sym_name);
1089 (void) restore_line_pointer (c);
1090
1091 if ((S_IS_DEFINED (symbolP) || symbol_equated_p (symbolP))
1092 && !S_IS_COMMON (symbolP))
1093 {
1094 as_bad (_("Ignoring attempt to re-define symbol"));
1095 ignore_rest_of_line ();
1096 return;
1097 }
1098
1099 /* Symbol size. */
1100 SKIP_WHITESPACE ();
1101 if (*input_line_pointer == ',')
1102 {
1103 input_line_pointer++;
1104 SKIP_WHITESPACE ();
1105 }
1106 else
1107 {
1108 as_bad (_("expected ',' after symbol name"));
1109 ignore_rest_of_line ();
1110 return;
1111 }
1112
1113 temp = get_absolute_expression ();
1114 size = temp;
1115 size &= ((offsetT) 2 << (stdoutput->arch_info->bits_per_address - 1)) - 1;
1116 if (temp != size)
1117 {
1118 as_warn (_("size (%ld) out of range, ignored"), (long) temp);
1119 ignore_rest_of_line ();
1120 return;
1121 }
1122
1123 /* Alignment. */
1124 SKIP_WHITESPACE ();
1125 if (*input_line_pointer == ',')
1126 {
1127 input_line_pointer++;
1128 SKIP_WHITESPACE ();
1129 }
1130 else
1131 {
1132 as_bad (_("expected ',' after symbol size"));
1133 ignore_rest_of_line ();
1134 return;
1135 }
1136
1137 log_align = get_absolute_expression ();
1138
1139 demand_empty_rest_of_line ();
1140
1141 obj_elf_change_section
1142 (sec_name, SHT_NOBITS,
1143 SHF_ALLOC | SHF_WRITE | SHF_IA_64_VMS_OVERLAID | SHF_IA_64_VMS_GLOBAL,
1144 0, NULL, 1, 0);
1145
1146 S_SET_VALUE (symbolP, 0);
1147 S_SET_SIZE (symbolP, size);
1148 S_SET_EXTERNAL (symbolP);
1149 S_SET_SEGMENT (symbolP, now_seg);
1150
1151 symbol_get_bfdsym (symbolP)->flags |= BSF_OBJECT;
1152
1153 record_alignment (now_seg, log_align);
1154
1155 cur_size = bfd_section_size (now_seg);
1156 if ((int) size > cur_size)
1157 {
1158 char *pfrag
1159 = frag_var (rs_fill, 1, 1, (relax_substateT)0, NULL,
1160 (valueT)size - (valueT)cur_size, NULL);
1161 *pfrag = 0;
1162 bfd_set_section_size (now_seg, size);
1163 }
1164
1165 /* Switch back to current segment. */
1166 subseg_set (current_seg, current_subseg);
1167
1168 #ifdef md_elf_section_change_hook
1169 md_elf_section_change_hook ();
1170 #endif
1171 }
1172
1173 #endif /* TE_VMS */
1174
1175 /* Output COUNT bytes to a memory location. */
1176 static char *vbyte_mem_ptr = NULL;
1177
1178 static void
1179 output_vbyte_mem (int count, char *ptr, char *comment ATTRIBUTE_UNUSED)
1180 {
1181 int x;
1182 if (vbyte_mem_ptr == NULL)
1183 abort ();
1184
1185 if (count == 0)
1186 return;
1187 for (x = 0; x < count; x++)
1188 *(vbyte_mem_ptr++) = ptr[x];
1189 }
1190
1191 /* Count the number of bytes required for records. */
1192 static int vbyte_count = 0;
1193 static void
1194 count_output (int count,
1195 char *ptr ATTRIBUTE_UNUSED,
1196 char *comment ATTRIBUTE_UNUSED)
1197 {
1198 vbyte_count += count;
1199 }
1200
1201 static void
1202 output_R1_format (vbyte_func f, unw_record_type rtype, int rlen)
1203 {
1204 int r = 0;
1205 char byte;
1206 if (rlen > 0x1f)
1207 {
1208 output_R3_format (f, rtype, rlen);
1209 return;
1210 }
1211
1212 if (rtype == body)
1213 r = 1;
1214 else if (rtype != prologue)
1215 as_bad (_("record type is not valid"));
1216
1217 byte = UNW_R1 | (r << 5) | (rlen & 0x1f);
1218 (*f) (1, &byte, NULL);
1219 }
1220
1221 static void
1222 output_R2_format (vbyte_func f, int mask, int grsave, unsigned long rlen)
1223 {
1224 char bytes[20];
1225 int count = 2;
1226 mask = (mask & 0x0f);
1227 grsave = (grsave & 0x7f);
1228
1229 bytes[0] = (UNW_R2 | (mask >> 1));
1230 bytes[1] = (((mask & 0x01) << 7) | grsave);
1231 count += output_leb128 (bytes + 2, rlen, 0);
1232 (*f) (count, bytes, NULL);
1233 }
1234
1235 static void
1236 output_R3_format (vbyte_func f, unw_record_type rtype, unsigned long rlen)
1237 {
1238 int r = 0, count;
1239 char bytes[20];
1240 if (rlen <= 0x1f)
1241 {
1242 output_R1_format (f, rtype, rlen);
1243 return;
1244 }
1245
1246 if (rtype == body)
1247 r = 1;
1248 else if (rtype != prologue)
1249 as_bad (_("record type is not valid"));
1250 bytes[0] = (UNW_R3 | r);
1251 count = output_leb128 (bytes + 1, rlen, 0);
1252 (*f) (count + 1, bytes, NULL);
1253 }
1254
1255 static void
1256 output_P1_format (vbyte_func f, int brmask)
1257 {
1258 char byte;
1259 byte = UNW_P1 | (brmask & 0x1f);
1260 (*f) (1, &byte, NULL);
1261 }
1262
1263 static void
1264 output_P2_format (vbyte_func f, int brmask, int gr)
1265 {
1266 char bytes[2];
1267 brmask = (brmask & 0x1f);
1268 bytes[0] = UNW_P2 | (brmask >> 1);
1269 bytes[1] = (((brmask & 1) << 7) | gr);
1270 (*f) (2, bytes, NULL);
1271 }
1272
1273 static void
1274 output_P3_format (vbyte_func f, unw_record_type rtype, int reg)
1275 {
1276 char bytes[2];
1277 int r = 0;
1278 reg = (reg & 0x7f);
1279 switch (rtype)
1280 {
1281 case psp_gr:
1282 r = 0;
1283 break;
1284 case rp_gr:
1285 r = 1;
1286 break;
1287 case pfs_gr:
1288 r = 2;
1289 break;
1290 case preds_gr:
1291 r = 3;
1292 break;
1293 case unat_gr:
1294 r = 4;
1295 break;
1296 case lc_gr:
1297 r = 5;
1298 break;
1299 case rp_br:
1300 r = 6;
1301 break;
1302 case rnat_gr:
1303 r = 7;
1304 break;
1305 case bsp_gr:
1306 r = 8;
1307 break;
1308 case bspstore_gr:
1309 r = 9;
1310 break;
1311 case fpsr_gr:
1312 r = 10;
1313 break;
1314 case priunat_gr:
1315 r = 11;
1316 break;
1317 default:
1318 as_bad (_("Invalid record type for P3 format."));
1319 }
1320 bytes[0] = (UNW_P3 | (r >> 1));
1321 bytes[1] = (((r & 1) << 7) | reg);
1322 (*f) (2, bytes, NULL);
1323 }
1324
1325 static void
1326 output_P4_format (vbyte_func f, unsigned char *imask, unsigned long imask_size)
1327 {
1328 imask[0] = UNW_P4;
1329 (*f) (imask_size, (char *) imask, NULL);
1330 }
1331
1332 static void
1333 output_P5_format (vbyte_func f, int grmask, unsigned long frmask)
1334 {
1335 char bytes[4];
1336 grmask = (grmask & 0x0f);
1337
1338 bytes[0] = UNW_P5;
1339 bytes[1] = ((grmask << 4) | ((frmask & 0x000f0000) >> 16));
1340 bytes[2] = ((frmask & 0x0000ff00) >> 8);
1341 bytes[3] = (frmask & 0x000000ff);
1342 (*f) (4, bytes, NULL);
1343 }
1344
1345 static void
1346 output_P6_format (vbyte_func f, unw_record_type rtype, int rmask)
1347 {
1348 char byte;
1349 int r = 0;
1350
1351 if (rtype == gr_mem)
1352 r = 1;
1353 else if (rtype != fr_mem)
1354 as_bad (_("Invalid record type for format P6"));
1355 byte = (UNW_P6 | (r << 4) | (rmask & 0x0f));
1356 (*f) (1, &byte, NULL);
1357 }
1358
1359 static void
1360 output_P7_format (vbyte_func f,
1361 unw_record_type rtype,
1362 unsigned long w1,
1363 unsigned long w2)
1364 {
1365 char bytes[20];
1366 int count = 1;
1367 int r = 0;
1368 count += output_leb128 (bytes + 1, w1, 0);
1369 switch (rtype)
1370 {
1371 case mem_stack_f:
1372 r = 0;
1373 count += output_leb128 (bytes + count, w2 >> 4, 0);
1374 break;
1375 case mem_stack_v:
1376 r = 1;
1377 break;
1378 case spill_base:
1379 r = 2;
1380 break;
1381 case psp_sprel:
1382 r = 3;
1383 break;
1384 case rp_when:
1385 r = 4;
1386 break;
1387 case rp_psprel:
1388 r = 5;
1389 break;
1390 case pfs_when:
1391 r = 6;
1392 break;
1393 case pfs_psprel:
1394 r = 7;
1395 break;
1396 case preds_when:
1397 r = 8;
1398 break;
1399 case preds_psprel:
1400 r = 9;
1401 break;
1402 case lc_when:
1403 r = 10;
1404 break;
1405 case lc_psprel:
1406 r = 11;
1407 break;
1408 case unat_when:
1409 r = 12;
1410 break;
1411 case unat_psprel:
1412 r = 13;
1413 break;
1414 case fpsr_when:
1415 r = 14;
1416 break;
1417 case fpsr_psprel:
1418 r = 15;
1419 break;
1420 default:
1421 break;
1422 }
1423 bytes[0] = (UNW_P7 | r);
1424 (*f) (count, bytes, NULL);
1425 }
1426
1427 static void
1428 output_P8_format (vbyte_func f, unw_record_type rtype, unsigned long t)
1429 {
1430 char bytes[20];
1431 int r = 0;
1432 int count = 2;
1433 bytes[0] = UNW_P8;
1434 switch (rtype)
1435 {
1436 case rp_sprel:
1437 r = 1;
1438 break;
1439 case pfs_sprel:
1440 r = 2;
1441 break;
1442 case preds_sprel:
1443 r = 3;
1444 break;
1445 case lc_sprel:
1446 r = 4;
1447 break;
1448 case unat_sprel:
1449 r = 5;
1450 break;
1451 case fpsr_sprel:
1452 r = 6;
1453 break;
1454 case bsp_when:
1455 r = 7;
1456 break;
1457 case bsp_psprel:
1458 r = 8;
1459 break;
1460 case bsp_sprel:
1461 r = 9;
1462 break;
1463 case bspstore_when:
1464 r = 10;
1465 break;
1466 case bspstore_psprel:
1467 r = 11;
1468 break;
1469 case bspstore_sprel:
1470 r = 12;
1471 break;
1472 case rnat_when:
1473 r = 13;
1474 break;
1475 case rnat_psprel:
1476 r = 14;
1477 break;
1478 case rnat_sprel:
1479 r = 15;
1480 break;
1481 case priunat_when_gr:
1482 r = 16;
1483 break;
1484 case priunat_psprel:
1485 r = 17;
1486 break;
1487 case priunat_sprel:
1488 r = 18;
1489 break;
1490 case priunat_when_mem:
1491 r = 19;
1492 break;
1493 default:
1494 break;
1495 }
1496 bytes[1] = r;
1497 count += output_leb128 (bytes + 2, t, 0);
1498 (*f) (count, bytes, NULL);
1499 }
1500
1501 static void
1502 output_P9_format (vbyte_func f, int grmask, int gr)
1503 {
1504 char bytes[3];
1505 bytes[0] = UNW_P9;
1506 bytes[1] = (grmask & 0x0f);
1507 bytes[2] = (gr & 0x7f);
1508 (*f) (3, bytes, NULL);
1509 }
1510
1511 static void
1512 output_P10_format (vbyte_func f, int abi, int context)
1513 {
1514 char bytes[3];
1515 bytes[0] = UNW_P10;
1516 bytes[1] = (abi & 0xff);
1517 bytes[2] = (context & 0xff);
1518 (*f) (3, bytes, NULL);
1519 }
1520
1521 static void
1522 output_B1_format (vbyte_func f, unw_record_type rtype, unsigned long label)
1523 {
1524 char byte;
1525 int r = 0;
1526 if (label > 0x1f)
1527 {
1528 output_B4_format (f, rtype, label);
1529 return;
1530 }
1531 if (rtype == copy_state)
1532 r = 1;
1533 else if (rtype != label_state)
1534 as_bad (_("Invalid record type for format B1"));
1535
1536 byte = (UNW_B1 | (r << 5) | (label & 0x1f));
1537 (*f) (1, &byte, NULL);
1538 }
1539
1540 static void
1541 output_B2_format (vbyte_func f, unsigned long ecount, unsigned long t)
1542 {
1543 char bytes[20];
1544 int count = 1;
1545 if (ecount > 0x1f)
1546 {
1547 output_B3_format (f, ecount, t);
1548 return;
1549 }
1550 bytes[0] = (UNW_B2 | (ecount & 0x1f));
1551 count += output_leb128 (bytes + 1, t, 0);
1552 (*f) (count, bytes, NULL);
1553 }
1554
1555 static void
1556 output_B3_format (vbyte_func f, unsigned long ecount, unsigned long t)
1557 {
1558 char bytes[20];
1559 int count = 1;
1560 if (ecount <= 0x1f)
1561 {
1562 output_B2_format (f, ecount, t);
1563 return;
1564 }
1565 bytes[0] = UNW_B3;
1566 count += output_leb128 (bytes + 1, t, 0);
1567 count += output_leb128 (bytes + count, ecount, 0);
1568 (*f) (count, bytes, NULL);
1569 }
1570
1571 static void
1572 output_B4_format (vbyte_func f, unw_record_type rtype, unsigned long label)
1573 {
1574 char bytes[20];
1575 int r = 0;
1576 int count = 1;
1577 if (label <= 0x1f)
1578 {
1579 output_B1_format (f, rtype, label);
1580 return;
1581 }
1582
1583 if (rtype == copy_state)
1584 r = 1;
1585 else if (rtype != label_state)
1586 as_bad (_("Invalid record type for format B1"));
1587
1588 bytes[0] = (UNW_B4 | (r << 3));
1589 count += output_leb128 (bytes + 1, label, 0);
1590 (*f) (count, bytes, NULL);
1591 }
1592
1593 static char
1594 format_ab_reg (int ab, int reg)
1595 {
1596 int ret;
1597 ab = (ab & 3);
1598 reg = (reg & 0x1f);
1599 ret = (ab << 5) | reg;
1600 return ret;
1601 }
1602
1603 static void
1604 output_X1_format (vbyte_func f,
1605 unw_record_type rtype,
1606 int ab,
1607 int reg,
1608 unsigned long t,
1609 unsigned long w1)
1610 {
1611 char bytes[20];
1612 int r = 0;
1613 int count = 2;
1614 bytes[0] = UNW_X1;
1615
1616 if (rtype == spill_sprel)
1617 r = 1;
1618 else if (rtype != spill_psprel)
1619 as_bad (_("Invalid record type for format X1"));
1620 bytes[1] = ((r << 7) | format_ab_reg (ab, reg));
1621 count += output_leb128 (bytes + 2, t, 0);
1622 count += output_leb128 (bytes + count, w1, 0);
1623 (*f) (count, bytes, NULL);
1624 }
1625
1626 static void
1627 output_X2_format (vbyte_func f,
1628 int ab,
1629 int reg,
1630 int x,
1631 int y,
1632 int treg,
1633 unsigned long t)
1634 {
1635 char bytes[20];
1636 int count = 3;
1637 bytes[0] = UNW_X2;
1638 bytes[1] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1639 bytes[2] = (((y & 1) << 7) | (treg & 0x7f));
1640 count += output_leb128 (bytes + 3, t, 0);
1641 (*f) (count, bytes, NULL);
1642 }
1643
1644 static void
1645 output_X3_format (vbyte_func f,
1646 unw_record_type rtype,
1647 int qp,
1648 int ab,
1649 int reg,
1650 unsigned long t,
1651 unsigned long w1)
1652 {
1653 char bytes[20];
1654 int r = 0;
1655 int count = 3;
1656 bytes[0] = UNW_X3;
1657
1658 if (rtype == spill_sprel_p)
1659 r = 1;
1660 else if (rtype != spill_psprel_p)
1661 as_bad (_("Invalid record type for format X3"));
1662 bytes[1] = ((r << 7) | (qp & 0x3f));
1663 bytes[2] = format_ab_reg (ab, reg);
1664 count += output_leb128 (bytes + 3, t, 0);
1665 count += output_leb128 (bytes + count, w1, 0);
1666 (*f) (count, bytes, NULL);
1667 }
1668
1669 static void
1670 output_X4_format (vbyte_func f,
1671 int qp,
1672 int ab,
1673 int reg,
1674 int x,
1675 int y,
1676 int treg,
1677 unsigned long t)
1678 {
1679 char bytes[20];
1680 int count = 4;
1681 bytes[0] = UNW_X4;
1682 bytes[1] = (qp & 0x3f);
1683 bytes[2] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1684 bytes[3] = (((y & 1) << 7) | (treg & 0x7f));
1685 count += output_leb128 (bytes + 4, t, 0);
1686 (*f) (count, bytes, NULL);
1687 }
1688
1689 /* This function checks whether there are any outstanding .save-s and
1690 discards them if so. */
1691
1692 static void
1693 check_pending_save (void)
1694 {
1695 if (unwind.pending_saves)
1696 {
1697 unw_rec_list *cur, *prev;
1698
1699 as_warn (_("Previous .save incomplete"));
1700 for (cur = unwind.list, prev = NULL; cur; )
1701 if (&cur->r.record.p == unwind.pending_saves)
1702 {
1703 if (prev)
1704 prev->next = cur->next;
1705 else
1706 unwind.list = cur->next;
1707 if (cur == unwind.tail)
1708 unwind.tail = prev;
1709 if (cur == unwind.current_entry)
1710 unwind.current_entry = cur->next;
1711 /* Don't free the first discarded record, it's being used as
1712 terminator for (currently) br_gr and gr_gr processing, and
1713 also prevents leaving a dangling pointer to it in its
1714 predecessor. */
1715 cur->r.record.p.grmask = 0;
1716 cur->r.record.p.brmask = 0;
1717 cur->r.record.p.frmask = 0;
1718 prev = cur->r.record.p.next;
1719 cur->r.record.p.next = NULL;
1720 cur = prev;
1721 break;
1722 }
1723 else
1724 {
1725 prev = cur;
1726 cur = cur->next;
1727 }
1728 while (cur)
1729 {
1730 prev = cur;
1731 cur = cur->r.record.p.next;
1732 free (prev);
1733 }
1734 unwind.pending_saves = NULL;
1735 }
1736 }
1737
1738 /* This function allocates a record list structure, and initializes fields. */
1739
1740 static unw_rec_list *
1741 alloc_record (unw_record_type t)
1742 {
1743 unw_rec_list *ptr;
1744 ptr = XNEW (unw_rec_list);
1745 memset (ptr, 0, sizeof (*ptr));
1746 ptr->slot_number = SLOT_NUM_NOT_SET;
1747 ptr->r.type = t;
1748 return ptr;
1749 }
1750
1751 /* Dummy unwind record used for calculating the length of the last prologue or
1752 body region. */
1753
1754 static unw_rec_list *
1755 output_endp (void)
1756 {
1757 unw_rec_list *ptr = alloc_record (endp);
1758 return ptr;
1759 }
1760
1761 static unw_rec_list *
1762 output_prologue (void)
1763 {
1764 unw_rec_list *ptr = alloc_record (prologue);
1765 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1766 return ptr;
1767 }
1768
1769 static unw_rec_list *
1770 output_prologue_gr (unsigned int saved_mask, unsigned int reg)
1771 {
1772 unw_rec_list *ptr = alloc_record (prologue_gr);
1773 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1774 ptr->r.record.r.grmask = saved_mask;
1775 ptr->r.record.r.grsave = reg;
1776 return ptr;
1777 }
1778
1779 static unw_rec_list *
1780 output_body (void)
1781 {
1782 unw_rec_list *ptr = alloc_record (body);
1783 return ptr;
1784 }
1785
1786 static unw_rec_list *
1787 output_mem_stack_f (unsigned int size)
1788 {
1789 unw_rec_list *ptr = alloc_record (mem_stack_f);
1790 ptr->r.record.p.size = size;
1791 return ptr;
1792 }
1793
1794 static unw_rec_list *
1795 output_mem_stack_v (void)
1796 {
1797 unw_rec_list *ptr = alloc_record (mem_stack_v);
1798 return ptr;
1799 }
1800
1801 static unw_rec_list *
1802 output_psp_gr (unsigned int gr)
1803 {
1804 unw_rec_list *ptr = alloc_record (psp_gr);
1805 ptr->r.record.p.r.gr = gr;
1806 return ptr;
1807 }
1808
1809 static unw_rec_list *
1810 output_psp_sprel (unsigned int offset)
1811 {
1812 unw_rec_list *ptr = alloc_record (psp_sprel);
1813 ptr->r.record.p.off.sp = offset / 4;
1814 return ptr;
1815 }
1816
1817 static unw_rec_list *
1818 output_rp_when (void)
1819 {
1820 unw_rec_list *ptr = alloc_record (rp_when);
1821 return ptr;
1822 }
1823
1824 static unw_rec_list *
1825 output_rp_gr (unsigned int gr)
1826 {
1827 unw_rec_list *ptr = alloc_record (rp_gr);
1828 ptr->r.record.p.r.gr = gr;
1829 return ptr;
1830 }
1831
1832 static unw_rec_list *
1833 output_rp_br (unsigned int br)
1834 {
1835 unw_rec_list *ptr = alloc_record (rp_br);
1836 ptr->r.record.p.r.br = br;
1837 return ptr;
1838 }
1839
1840 static unw_rec_list *
1841 output_rp_psprel (unsigned int offset)
1842 {
1843 unw_rec_list *ptr = alloc_record (rp_psprel);
1844 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1845 return ptr;
1846 }
1847
1848 static unw_rec_list *
1849 output_rp_sprel (unsigned int offset)
1850 {
1851 unw_rec_list *ptr = alloc_record (rp_sprel);
1852 ptr->r.record.p.off.sp = offset / 4;
1853 return ptr;
1854 }
1855
1856 static unw_rec_list *
1857 output_pfs_when (void)
1858 {
1859 unw_rec_list *ptr = alloc_record (pfs_when);
1860 return ptr;
1861 }
1862
1863 static unw_rec_list *
1864 output_pfs_gr (unsigned int gr)
1865 {
1866 unw_rec_list *ptr = alloc_record (pfs_gr);
1867 ptr->r.record.p.r.gr = gr;
1868 return ptr;
1869 }
1870
1871 static unw_rec_list *
1872 output_pfs_psprel (unsigned int offset)
1873 {
1874 unw_rec_list *ptr = alloc_record (pfs_psprel);
1875 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1876 return ptr;
1877 }
1878
1879 static unw_rec_list *
1880 output_pfs_sprel (unsigned int offset)
1881 {
1882 unw_rec_list *ptr = alloc_record (pfs_sprel);
1883 ptr->r.record.p.off.sp = offset / 4;
1884 return ptr;
1885 }
1886
1887 static unw_rec_list *
1888 output_preds_when (void)
1889 {
1890 unw_rec_list *ptr = alloc_record (preds_when);
1891 return ptr;
1892 }
1893
1894 static unw_rec_list *
1895 output_preds_gr (unsigned int gr)
1896 {
1897 unw_rec_list *ptr = alloc_record (preds_gr);
1898 ptr->r.record.p.r.gr = gr;
1899 return ptr;
1900 }
1901
1902 static unw_rec_list *
1903 output_preds_psprel (unsigned int offset)
1904 {
1905 unw_rec_list *ptr = alloc_record (preds_psprel);
1906 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1907 return ptr;
1908 }
1909
1910 static unw_rec_list *
1911 output_preds_sprel (unsigned int offset)
1912 {
1913 unw_rec_list *ptr = alloc_record (preds_sprel);
1914 ptr->r.record.p.off.sp = offset / 4;
1915 return ptr;
1916 }
1917
1918 static unw_rec_list *
1919 output_fr_mem (unsigned int mask)
1920 {
1921 unw_rec_list *ptr = alloc_record (fr_mem);
1922 unw_rec_list *cur = ptr;
1923
1924 ptr->r.record.p.frmask = mask;
1925 unwind.pending_saves = &ptr->r.record.p;
1926 for (;;)
1927 {
1928 unw_rec_list *prev = cur;
1929
1930 /* Clear least significant set bit. */
1931 mask &= ~(mask & (~mask + 1));
1932 if (!mask)
1933 return ptr;
1934 cur = alloc_record (fr_mem);
1935 cur->r.record.p.frmask = mask;
1936 /* Retain only least significant bit. */
1937 prev->r.record.p.frmask ^= mask;
1938 prev->r.record.p.next = cur;
1939 }
1940 }
1941
1942 static unw_rec_list *
1943 output_frgr_mem (unsigned int gr_mask, unsigned int fr_mask)
1944 {
1945 unw_rec_list *ptr = alloc_record (frgr_mem);
1946 unw_rec_list *cur = ptr;
1947
1948 unwind.pending_saves = &cur->r.record.p;
1949 cur->r.record.p.frmask = fr_mask;
1950 while (fr_mask)
1951 {
1952 unw_rec_list *prev = cur;
1953
1954 /* Clear least significant set bit. */
1955 fr_mask &= ~(fr_mask & (~fr_mask + 1));
1956 if (!gr_mask && !fr_mask)
1957 return ptr;
1958 cur = alloc_record (frgr_mem);
1959 cur->r.record.p.frmask = fr_mask;
1960 /* Retain only least significant bit. */
1961 prev->r.record.p.frmask ^= fr_mask;
1962 prev->r.record.p.next = cur;
1963 }
1964 cur->r.record.p.grmask = gr_mask;
1965 for (;;)
1966 {
1967 unw_rec_list *prev = cur;
1968
1969 /* Clear least significant set bit. */
1970 gr_mask &= ~(gr_mask & (~gr_mask + 1));
1971 if (!gr_mask)
1972 return ptr;
1973 cur = alloc_record (frgr_mem);
1974 cur->r.record.p.grmask = gr_mask;
1975 /* Retain only least significant bit. */
1976 prev->r.record.p.grmask ^= gr_mask;
1977 prev->r.record.p.next = cur;
1978 }
1979 }
1980
1981 static unw_rec_list *
1982 output_gr_gr (unsigned int mask, unsigned int reg)
1983 {
1984 unw_rec_list *ptr = alloc_record (gr_gr);
1985 unw_rec_list *cur = ptr;
1986
1987 ptr->r.record.p.grmask = mask;
1988 ptr->r.record.p.r.gr = reg;
1989 unwind.pending_saves = &ptr->r.record.p;
1990 for (;;)
1991 {
1992 unw_rec_list *prev = cur;
1993
1994 /* Clear least significant set bit. */
1995 mask &= ~(mask & (~mask + 1));
1996 if (!mask)
1997 return ptr;
1998 cur = alloc_record (gr_gr);
1999 cur->r.record.p.grmask = mask;
2000 /* Indicate this record shouldn't be output. */
2001 cur->r.record.p.r.gr = REG_NUM;
2002 /* Retain only least significant bit. */
2003 prev->r.record.p.grmask ^= mask;
2004 prev->r.record.p.next = cur;
2005 }
2006 }
2007
2008 static unw_rec_list *
2009 output_gr_mem (unsigned int mask)
2010 {
2011 unw_rec_list *ptr = alloc_record (gr_mem);
2012 unw_rec_list *cur = ptr;
2013
2014 ptr->r.record.p.grmask = mask;
2015 unwind.pending_saves = &ptr->r.record.p;
2016 for (;;)
2017 {
2018 unw_rec_list *prev = cur;
2019
2020 /* Clear least significant set bit. */
2021 mask &= ~(mask & (~mask + 1));
2022 if (!mask)
2023 return ptr;
2024 cur = alloc_record (gr_mem);
2025 cur->r.record.p.grmask = mask;
2026 /* Retain only least significant bit. */
2027 prev->r.record.p.grmask ^= mask;
2028 prev->r.record.p.next = cur;
2029 }
2030 }
2031
2032 static unw_rec_list *
2033 output_br_mem (unsigned int mask)
2034 {
2035 unw_rec_list *ptr = alloc_record (br_mem);
2036 unw_rec_list *cur = ptr;
2037
2038 ptr->r.record.p.brmask = mask;
2039 unwind.pending_saves = &ptr->r.record.p;
2040 for (;;)
2041 {
2042 unw_rec_list *prev = cur;
2043
2044 /* Clear least significant set bit. */
2045 mask &= ~(mask & (~mask + 1));
2046 if (!mask)
2047 return ptr;
2048 cur = alloc_record (br_mem);
2049 cur->r.record.p.brmask = mask;
2050 /* Retain only least significant bit. */
2051 prev->r.record.p.brmask ^= mask;
2052 prev->r.record.p.next = cur;
2053 }
2054 }
2055
2056 static unw_rec_list *
2057 output_br_gr (unsigned int mask, unsigned int reg)
2058 {
2059 unw_rec_list *ptr = alloc_record (br_gr);
2060 unw_rec_list *cur = ptr;
2061
2062 ptr->r.record.p.brmask = mask;
2063 ptr->r.record.p.r.gr = reg;
2064 unwind.pending_saves = &ptr->r.record.p;
2065 for (;;)
2066 {
2067 unw_rec_list *prev = cur;
2068
2069 /* Clear least significant set bit. */
2070 mask &= ~(mask & (~mask + 1));
2071 if (!mask)
2072 return ptr;
2073 cur = alloc_record (br_gr);
2074 cur->r.record.p.brmask = mask;
2075 /* Indicate this record shouldn't be output. */
2076 cur->r.record.p.r.gr = REG_NUM;
2077 /* Retain only least significant bit. */
2078 prev->r.record.p.brmask ^= mask;
2079 prev->r.record.p.next = cur;
2080 }
2081 }
2082
2083 static unw_rec_list *
2084 output_spill_base (unsigned int offset)
2085 {
2086 unw_rec_list *ptr = alloc_record (spill_base);
2087 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2088 return ptr;
2089 }
2090
2091 static unw_rec_list *
2092 output_unat_when (void)
2093 {
2094 unw_rec_list *ptr = alloc_record (unat_when);
2095 return ptr;
2096 }
2097
2098 static unw_rec_list *
2099 output_unat_gr (unsigned int gr)
2100 {
2101 unw_rec_list *ptr = alloc_record (unat_gr);
2102 ptr->r.record.p.r.gr = gr;
2103 return ptr;
2104 }
2105
2106 static unw_rec_list *
2107 output_unat_psprel (unsigned int offset)
2108 {
2109 unw_rec_list *ptr = alloc_record (unat_psprel);
2110 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2111 return ptr;
2112 }
2113
2114 static unw_rec_list *
2115 output_unat_sprel (unsigned int offset)
2116 {
2117 unw_rec_list *ptr = alloc_record (unat_sprel);
2118 ptr->r.record.p.off.sp = offset / 4;
2119 return ptr;
2120 }
2121
2122 static unw_rec_list *
2123 output_lc_when (void)
2124 {
2125 unw_rec_list *ptr = alloc_record (lc_when);
2126 return ptr;
2127 }
2128
2129 static unw_rec_list *
2130 output_lc_gr (unsigned int gr)
2131 {
2132 unw_rec_list *ptr = alloc_record (lc_gr);
2133 ptr->r.record.p.r.gr = gr;
2134 return ptr;
2135 }
2136
2137 static unw_rec_list *
2138 output_lc_psprel (unsigned int offset)
2139 {
2140 unw_rec_list *ptr = alloc_record (lc_psprel);
2141 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2142 return ptr;
2143 }
2144
2145 static unw_rec_list *
2146 output_lc_sprel (unsigned int offset)
2147 {
2148 unw_rec_list *ptr = alloc_record (lc_sprel);
2149 ptr->r.record.p.off.sp = offset / 4;
2150 return ptr;
2151 }
2152
2153 static unw_rec_list *
2154 output_fpsr_when (void)
2155 {
2156 unw_rec_list *ptr = alloc_record (fpsr_when);
2157 return ptr;
2158 }
2159
2160 static unw_rec_list *
2161 output_fpsr_gr (unsigned int gr)
2162 {
2163 unw_rec_list *ptr = alloc_record (fpsr_gr);
2164 ptr->r.record.p.r.gr = gr;
2165 return ptr;
2166 }
2167
2168 static unw_rec_list *
2169 output_fpsr_psprel (unsigned int offset)
2170 {
2171 unw_rec_list *ptr = alloc_record (fpsr_psprel);
2172 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2173 return ptr;
2174 }
2175
2176 static unw_rec_list *
2177 output_fpsr_sprel (unsigned int offset)
2178 {
2179 unw_rec_list *ptr = alloc_record (fpsr_sprel);
2180 ptr->r.record.p.off.sp = offset / 4;
2181 return ptr;
2182 }
2183
2184 static unw_rec_list *
2185 output_priunat_when_gr (void)
2186 {
2187 unw_rec_list *ptr = alloc_record (priunat_when_gr);
2188 return ptr;
2189 }
2190
2191 static unw_rec_list *
2192 output_priunat_when_mem (void)
2193 {
2194 unw_rec_list *ptr = alloc_record (priunat_when_mem);
2195 return ptr;
2196 }
2197
2198 static unw_rec_list *
2199 output_priunat_gr (unsigned int gr)
2200 {
2201 unw_rec_list *ptr = alloc_record (priunat_gr);
2202 ptr->r.record.p.r.gr = gr;
2203 return ptr;
2204 }
2205
2206 static unw_rec_list *
2207 output_priunat_psprel (unsigned int offset)
2208 {
2209 unw_rec_list *ptr = alloc_record (priunat_psprel);
2210 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2211 return ptr;
2212 }
2213
2214 static unw_rec_list *
2215 output_priunat_sprel (unsigned int offset)
2216 {
2217 unw_rec_list *ptr = alloc_record (priunat_sprel);
2218 ptr->r.record.p.off.sp = offset / 4;
2219 return ptr;
2220 }
2221
2222 static unw_rec_list *
2223 output_bsp_when (void)
2224 {
2225 unw_rec_list *ptr = alloc_record (bsp_when);
2226 return ptr;
2227 }
2228
2229 static unw_rec_list *
2230 output_bsp_gr (unsigned int gr)
2231 {
2232 unw_rec_list *ptr = alloc_record (bsp_gr);
2233 ptr->r.record.p.r.gr = gr;
2234 return ptr;
2235 }
2236
2237 static unw_rec_list *
2238 output_bsp_psprel (unsigned int offset)
2239 {
2240 unw_rec_list *ptr = alloc_record (bsp_psprel);
2241 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2242 return ptr;
2243 }
2244
2245 static unw_rec_list *
2246 output_bsp_sprel (unsigned int offset)
2247 {
2248 unw_rec_list *ptr = alloc_record (bsp_sprel);
2249 ptr->r.record.p.off.sp = offset / 4;
2250 return ptr;
2251 }
2252
2253 static unw_rec_list *
2254 output_bspstore_when (void)
2255 {
2256 unw_rec_list *ptr = alloc_record (bspstore_when);
2257 return ptr;
2258 }
2259
2260 static unw_rec_list *
2261 output_bspstore_gr (unsigned int gr)
2262 {
2263 unw_rec_list *ptr = alloc_record (bspstore_gr);
2264 ptr->r.record.p.r.gr = gr;
2265 return ptr;
2266 }
2267
2268 static unw_rec_list *
2269 output_bspstore_psprel (unsigned int offset)
2270 {
2271 unw_rec_list *ptr = alloc_record (bspstore_psprel);
2272 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2273 return ptr;
2274 }
2275
2276 static unw_rec_list *
2277 output_bspstore_sprel (unsigned int offset)
2278 {
2279 unw_rec_list *ptr = alloc_record (bspstore_sprel);
2280 ptr->r.record.p.off.sp = offset / 4;
2281 return ptr;
2282 }
2283
2284 static unw_rec_list *
2285 output_rnat_when (void)
2286 {
2287 unw_rec_list *ptr = alloc_record (rnat_when);
2288 return ptr;
2289 }
2290
2291 static unw_rec_list *
2292 output_rnat_gr (unsigned int gr)
2293 {
2294 unw_rec_list *ptr = alloc_record (rnat_gr);
2295 ptr->r.record.p.r.gr = gr;
2296 return ptr;
2297 }
2298
2299 static unw_rec_list *
2300 output_rnat_psprel (unsigned int offset)
2301 {
2302 unw_rec_list *ptr = alloc_record (rnat_psprel);
2303 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2304 return ptr;
2305 }
2306
2307 static unw_rec_list *
2308 output_rnat_sprel (unsigned int offset)
2309 {
2310 unw_rec_list *ptr = alloc_record (rnat_sprel);
2311 ptr->r.record.p.off.sp = offset / 4;
2312 return ptr;
2313 }
2314
2315 static unw_rec_list *
2316 output_unwabi (unsigned long abi, unsigned long context)
2317 {
2318 unw_rec_list *ptr = alloc_record (unwabi);
2319 ptr->r.record.p.abi = abi;
2320 ptr->r.record.p.context = context;
2321 return ptr;
2322 }
2323
2324 static unw_rec_list *
2325 output_epilogue (unsigned long ecount)
2326 {
2327 unw_rec_list *ptr = alloc_record (epilogue);
2328 ptr->r.record.b.ecount = ecount;
2329 return ptr;
2330 }
2331
2332 static unw_rec_list *
2333 output_label_state (unsigned long label)
2334 {
2335 unw_rec_list *ptr = alloc_record (label_state);
2336 ptr->r.record.b.label = label;
2337 return ptr;
2338 }
2339
2340 static unw_rec_list *
2341 output_copy_state (unsigned long label)
2342 {
2343 unw_rec_list *ptr = alloc_record (copy_state);
2344 ptr->r.record.b.label = label;
2345 return ptr;
2346 }
2347
2348 static unw_rec_list *
2349 output_spill_psprel (unsigned int ab,
2350 unsigned int reg,
2351 unsigned int offset,
2352 unsigned int predicate)
2353 {
2354 unw_rec_list *ptr = alloc_record (predicate ? spill_psprel_p : spill_psprel);
2355 ptr->r.record.x.ab = ab;
2356 ptr->r.record.x.reg = reg;
2357 ptr->r.record.x.where.pspoff = ENCODED_PSP_OFFSET (offset);
2358 ptr->r.record.x.qp = predicate;
2359 return ptr;
2360 }
2361
2362 static unw_rec_list *
2363 output_spill_sprel (unsigned int ab,
2364 unsigned int reg,
2365 unsigned int offset,
2366 unsigned int predicate)
2367 {
2368 unw_rec_list *ptr = alloc_record (predicate ? spill_sprel_p : spill_sprel);
2369 ptr->r.record.x.ab = ab;
2370 ptr->r.record.x.reg = reg;
2371 ptr->r.record.x.where.spoff = offset / 4;
2372 ptr->r.record.x.qp = predicate;
2373 return ptr;
2374 }
2375
2376 static unw_rec_list *
2377 output_spill_reg (unsigned int ab,
2378 unsigned int reg,
2379 unsigned int targ_reg,
2380 unsigned int xy,
2381 unsigned int predicate)
2382 {
2383 unw_rec_list *ptr = alloc_record (predicate ? spill_reg_p : spill_reg);
2384 ptr->r.record.x.ab = ab;
2385 ptr->r.record.x.reg = reg;
2386 ptr->r.record.x.where.reg = targ_reg;
2387 ptr->r.record.x.xy = xy;
2388 ptr->r.record.x.qp = predicate;
2389 return ptr;
2390 }
2391
2392 /* Given a unw_rec_list process the correct format with the
2393 specified function. */
2394
2395 static void
2396 process_one_record (unw_rec_list *ptr, vbyte_func f)
2397 {
2398 unsigned int fr_mask, gr_mask;
2399
2400 switch (ptr->r.type)
2401 {
2402 /* This is a dummy record that takes up no space in the output. */
2403 case endp:
2404 break;
2405
2406 case gr_mem:
2407 case fr_mem:
2408 case br_mem:
2409 case frgr_mem:
2410 /* These are taken care of by prologue/prologue_gr. */
2411 break;
2412
2413 case prologue_gr:
2414 case prologue:
2415 if (ptr->r.type == prologue_gr)
2416 output_R2_format (f, ptr->r.record.r.grmask,
2417 ptr->r.record.r.grsave, ptr->r.record.r.rlen);
2418 else
2419 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2420
2421 /* Output descriptor(s) for union of register spills (if any). */
2422 gr_mask = ptr->r.record.r.mask.gr_mem;
2423 fr_mask = ptr->r.record.r.mask.fr_mem;
2424 if (fr_mask)
2425 {
2426 if ((fr_mask & ~0xfUL) == 0)
2427 output_P6_format (f, fr_mem, fr_mask);
2428 else
2429 {
2430 output_P5_format (f, gr_mask, fr_mask);
2431 gr_mask = 0;
2432 }
2433 }
2434 if (gr_mask)
2435 output_P6_format (f, gr_mem, gr_mask);
2436 if (ptr->r.record.r.mask.br_mem)
2437 output_P1_format (f, ptr->r.record.r.mask.br_mem);
2438
2439 /* output imask descriptor if necessary: */
2440 if (ptr->r.record.r.mask.i)
2441 output_P4_format (f, ptr->r.record.r.mask.i,
2442 ptr->r.record.r.imask_size);
2443 break;
2444
2445 case body:
2446 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2447 break;
2448 case mem_stack_f:
2449 case mem_stack_v:
2450 output_P7_format (f, ptr->r.type, ptr->r.record.p.t,
2451 ptr->r.record.p.size);
2452 break;
2453 case psp_gr:
2454 case rp_gr:
2455 case pfs_gr:
2456 case preds_gr:
2457 case unat_gr:
2458 case lc_gr:
2459 case fpsr_gr:
2460 case priunat_gr:
2461 case bsp_gr:
2462 case bspstore_gr:
2463 case rnat_gr:
2464 output_P3_format (f, ptr->r.type, ptr->r.record.p.r.gr);
2465 break;
2466 case rp_br:
2467 output_P3_format (f, rp_br, ptr->r.record.p.r.br);
2468 break;
2469 case psp_sprel:
2470 output_P7_format (f, psp_sprel, ptr->r.record.p.off.sp, 0);
2471 break;
2472 case rp_when:
2473 case pfs_when:
2474 case preds_when:
2475 case unat_when:
2476 case lc_when:
2477 case fpsr_when:
2478 output_P7_format (f, ptr->r.type, ptr->r.record.p.t, 0);
2479 break;
2480 case rp_psprel:
2481 case pfs_psprel:
2482 case preds_psprel:
2483 case unat_psprel:
2484 case lc_psprel:
2485 case fpsr_psprel:
2486 case spill_base:
2487 output_P7_format (f, ptr->r.type, ptr->r.record.p.off.psp, 0);
2488 break;
2489 case rp_sprel:
2490 case pfs_sprel:
2491 case preds_sprel:
2492 case unat_sprel:
2493 case lc_sprel:
2494 case fpsr_sprel:
2495 case priunat_sprel:
2496 case bsp_sprel:
2497 case bspstore_sprel:
2498 case rnat_sprel:
2499 output_P8_format (f, ptr->r.type, ptr->r.record.p.off.sp);
2500 break;
2501 case gr_gr:
2502 if (ptr->r.record.p.r.gr < REG_NUM)
2503 {
2504 const unw_rec_list *cur = ptr;
2505
2506 gr_mask = cur->r.record.p.grmask;
2507 while ((cur = cur->r.record.p.next) != NULL)
2508 gr_mask |= cur->r.record.p.grmask;
2509 output_P9_format (f, gr_mask, ptr->r.record.p.r.gr);
2510 }
2511 break;
2512 case br_gr:
2513 if (ptr->r.record.p.r.gr < REG_NUM)
2514 {
2515 const unw_rec_list *cur = ptr;
2516
2517 gr_mask = cur->r.record.p.brmask;
2518 while ((cur = cur->r.record.p.next) != NULL)
2519 gr_mask |= cur->r.record.p.brmask;
2520 output_P2_format (f, gr_mask, ptr->r.record.p.r.gr);
2521 }
2522 break;
2523 case spill_mask:
2524 as_bad (_("spill_mask record unimplemented."));
2525 break;
2526 case priunat_when_gr:
2527 case priunat_when_mem:
2528 case bsp_when:
2529 case bspstore_when:
2530 case rnat_when:
2531 output_P8_format (f, ptr->r.type, ptr->r.record.p.t);
2532 break;
2533 case priunat_psprel:
2534 case bsp_psprel:
2535 case bspstore_psprel:
2536 case rnat_psprel:
2537 output_P8_format (f, ptr->r.type, ptr->r.record.p.off.psp);
2538 break;
2539 case unwabi:
2540 output_P10_format (f, ptr->r.record.p.abi, ptr->r.record.p.context);
2541 break;
2542 case epilogue:
2543 output_B3_format (f, ptr->r.record.b.ecount, ptr->r.record.b.t);
2544 break;
2545 case label_state:
2546 case copy_state:
2547 output_B4_format (f, ptr->r.type, ptr->r.record.b.label);
2548 break;
2549 case spill_psprel:
2550 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2551 ptr->r.record.x.reg, ptr->r.record.x.t,
2552 ptr->r.record.x.where.pspoff);
2553 break;
2554 case spill_sprel:
2555 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2556 ptr->r.record.x.reg, ptr->r.record.x.t,
2557 ptr->r.record.x.where.spoff);
2558 break;
2559 case spill_reg:
2560 output_X2_format (f, ptr->r.record.x.ab, ptr->r.record.x.reg,
2561 ptr->r.record.x.xy >> 1, ptr->r.record.x.xy,
2562 ptr->r.record.x.where.reg, ptr->r.record.x.t);
2563 break;
2564 case spill_psprel_p:
2565 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2566 ptr->r.record.x.ab, ptr->r.record.x.reg,
2567 ptr->r.record.x.t, ptr->r.record.x.where.pspoff);
2568 break;
2569 case spill_sprel_p:
2570 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2571 ptr->r.record.x.ab, ptr->r.record.x.reg,
2572 ptr->r.record.x.t, ptr->r.record.x.where.spoff);
2573 break;
2574 case spill_reg_p:
2575 output_X4_format (f, ptr->r.record.x.qp, ptr->r.record.x.ab,
2576 ptr->r.record.x.reg, ptr->r.record.x.xy >> 1,
2577 ptr->r.record.x.xy, ptr->r.record.x.where.reg,
2578 ptr->r.record.x.t);
2579 break;
2580 default:
2581 as_bad (_("record_type_not_valid"));
2582 break;
2583 }
2584 }
2585
2586 /* Given a unw_rec_list list, process all the records with
2587 the specified function. */
2588 static void
2589 process_unw_records (unw_rec_list *list, vbyte_func f)
2590 {
2591 unw_rec_list *ptr;
2592 for (ptr = list; ptr; ptr = ptr->next)
2593 process_one_record (ptr, f);
2594 }
2595
2596 /* Determine the size of a record list in bytes. */
2597 static int
2598 calc_record_size (unw_rec_list *list)
2599 {
2600 vbyte_count = 0;
2601 process_unw_records (list, count_output);
2602 return vbyte_count;
2603 }
2604
2605 /* Return the number of bits set in the input value.
2606 Perhaps this has a better place... */
2607 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
2608 # define popcount __builtin_popcount
2609 #else
2610 static int
2611 popcount (unsigned x)
2612 {
2613 static const unsigned char popcnt[16] =
2614 {
2615 0, 1, 1, 2,
2616 1, 2, 2, 3,
2617 1, 2, 2, 3,
2618 2, 3, 3, 4
2619 };
2620
2621 if (x < NELEMS (popcnt))
2622 return popcnt[x];
2623 return popcnt[x % NELEMS (popcnt)] + popcount (x / NELEMS (popcnt));
2624 }
2625 #endif
2626
2627 /* Update IMASK bitmask to reflect the fact that one or more registers
2628 of type TYPE are saved starting at instruction with index T. If N
2629 bits are set in REGMASK, it is assumed that instructions T through
2630 T+N-1 save these registers.
2631
2632 TYPE values:
2633 0: no save
2634 1: instruction saves next fp reg
2635 2: instruction saves next general reg
2636 3: instruction saves next branch reg */
2637 static void
2638 set_imask (unw_rec_list *region,
2639 unsigned long regmask,
2640 unsigned long t,
2641 unsigned int type)
2642 {
2643 unsigned char *imask;
2644 unsigned long imask_size;
2645 unsigned int i;
2646 int pos;
2647
2648 imask = region->r.record.r.mask.i;
2649 imask_size = region->r.record.r.imask_size;
2650 if (!imask)
2651 {
2652 imask_size = (region->r.record.r.rlen * 2 + 7) / 8 + 1;
2653 imask = XCNEWVEC (unsigned char, imask_size);
2654
2655 region->r.record.r.imask_size = imask_size;
2656 region->r.record.r.mask.i = imask;
2657 }
2658
2659 i = (t / 4) + 1;
2660 pos = 2 * (3 - t % 4);
2661 while (regmask)
2662 {
2663 if (i >= imask_size)
2664 {
2665 as_bad (_("Ignoring attempt to spill beyond end of region"));
2666 return;
2667 }
2668
2669 imask[i] |= (type & 0x3) << pos;
2670
2671 regmask &= (regmask - 1);
2672 pos -= 2;
2673 if (pos < 0)
2674 {
2675 pos = 0;
2676 ++i;
2677 }
2678 }
2679 }
2680
2681 /* Return the number of instruction slots from FIRST_ADDR to SLOT_ADDR.
2682 SLOT_FRAG is the frag containing SLOT_ADDR, and FIRST_FRAG is the frag
2683 containing FIRST_ADDR. If BEFORE_RELAX, then we use worst-case estimates
2684 for frag sizes. */
2685
2686 static unsigned long
2687 slot_index (unsigned long slot_addr,
2688 fragS *slot_frag,
2689 unsigned long first_addr,
2690 fragS *first_frag,
2691 int before_relax)
2692 {
2693 unsigned long s_index = 0;
2694
2695 /* First time we are called, the initial address and frag are invalid. */
2696 if (first_addr == 0)
2697 return 0;
2698
2699 /* If the two addresses are in different frags, then we need to add in
2700 the remaining size of this frag, and then the entire size of intermediate
2701 frags. */
2702 while (slot_frag != first_frag)
2703 {
2704 unsigned long start_addr = (unsigned long) &first_frag->fr_literal;
2705
2706 if (! before_relax)
2707 {
2708 /* We can get the final addresses only during and after
2709 relaxation. */
2710 if (first_frag->fr_next && first_frag->fr_next->fr_address)
2711 s_index += 3 * ((first_frag->fr_next->fr_address
2712 - first_frag->fr_address
2713 - first_frag->fr_fix) >> 4);
2714 }
2715 else
2716 /* We don't know what the final addresses will be. We try our
2717 best to estimate. */
2718 switch (first_frag->fr_type)
2719 {
2720 default:
2721 break;
2722
2723 case rs_space:
2724 as_fatal (_("Only constant space allocation is supported"));
2725 break;
2726
2727 case rs_align:
2728 case rs_align_code:
2729 case rs_align_test:
2730 /* Take alignment into account. Assume the worst case
2731 before relaxation. */
2732 s_index += 3 * ((1 << first_frag->fr_offset) >> 4);
2733 break;
2734
2735 case rs_org:
2736 if (first_frag->fr_symbol)
2737 {
2738 as_fatal (_("Only constant offsets are supported"));
2739 break;
2740 }
2741 /* Fall through. */
2742 case rs_fill:
2743 s_index += 3 * (first_frag->fr_offset >> 4);
2744 break;
2745 }
2746
2747 /* Add in the full size of the frag converted to instruction slots. */
2748 s_index += 3 * (first_frag->fr_fix >> 4);
2749 /* Subtract away the initial part before first_addr. */
2750 s_index -= (3 * ((first_addr >> 4) - (start_addr >> 4))
2751 + ((first_addr & 0x3) - (start_addr & 0x3)));
2752
2753 /* Move to the beginning of the next frag. */
2754 first_frag = first_frag->fr_next;
2755 first_addr = (unsigned long) &first_frag->fr_literal;
2756
2757 /* This can happen if there is section switching in the middle of a
2758 function, causing the frag chain for the function to be broken.
2759 It is too difficult to recover safely from this problem, so we just
2760 exit with an error. */
2761 if (first_frag == NULL)
2762 as_fatal (_("Section switching in code is not supported."));
2763 }
2764
2765 /* Add in the used part of the last frag. */
2766 s_index += (3 * ((slot_addr >> 4) - (first_addr >> 4))
2767 + ((slot_addr & 0x3) - (first_addr & 0x3)));
2768 return s_index;
2769 }
2770
2771 /* Optimize unwind record directives. */
2772
2773 static unw_rec_list *
2774 optimize_unw_records (unw_rec_list *list)
2775 {
2776 if (!list)
2777 return NULL;
2778
2779 /* If the only unwind record is ".prologue" or ".prologue" followed
2780 by ".body", then we can optimize the unwind directives away. */
2781 if (list->r.type == prologue
2782 && (list->next->r.type == endp
2783 || (list->next->r.type == body && list->next->next->r.type == endp)))
2784 return NULL;
2785
2786 return list;
2787 }
2788
2789 /* Given a complete record list, process any records which have
2790 unresolved fields, (ie length counts for a prologue). After
2791 this has been run, all necessary information should be available
2792 within each record to generate an image. */
2793
2794 static void
2795 fixup_unw_records (unw_rec_list *list, int before_relax)
2796 {
2797 unw_rec_list *ptr, *region = 0;
2798 unsigned long first_addr = 0, rlen = 0, t;
2799 fragS *first_frag = 0;
2800
2801 for (ptr = list; ptr; ptr = ptr->next)
2802 {
2803 if (ptr->slot_number == SLOT_NUM_NOT_SET)
2804 as_bad (_("Insn slot not set in unwind record."));
2805 t = slot_index (ptr->slot_number, ptr->slot_frag,
2806 first_addr, first_frag, before_relax);
2807 switch (ptr->r.type)
2808 {
2809 case prologue:
2810 case prologue_gr:
2811 case body:
2812 {
2813 unw_rec_list *last;
2814 int size;
2815 unsigned long last_addr = 0;
2816 fragS *last_frag = NULL;
2817
2818 first_addr = ptr->slot_number;
2819 first_frag = ptr->slot_frag;
2820 /* Find either the next body/prologue start, or the end of
2821 the function, and determine the size of the region. */
2822 for (last = ptr->next; last != NULL; last = last->next)
2823 if (last->r.type == prologue || last->r.type == prologue_gr
2824 || last->r.type == body || last->r.type == endp)
2825 {
2826 last_addr = last->slot_number;
2827 last_frag = last->slot_frag;
2828 break;
2829 }
2830 size = slot_index (last_addr, last_frag, first_addr, first_frag,
2831 before_relax);
2832 rlen = ptr->r.record.r.rlen = size;
2833 if (ptr->r.type == body)
2834 /* End of region. */
2835 region = 0;
2836 else
2837 region = ptr;
2838 break;
2839 }
2840 case epilogue:
2841 if (t < rlen)
2842 ptr->r.record.b.t = rlen - 1 - t;
2843 else
2844 /* This happens when a memory-stack-less procedure uses a
2845 ".restore sp" directive at the end of a region to pop
2846 the frame state. */
2847 ptr->r.record.b.t = 0;
2848 break;
2849
2850 case mem_stack_f:
2851 case mem_stack_v:
2852 case rp_when:
2853 case pfs_when:
2854 case preds_when:
2855 case unat_when:
2856 case lc_when:
2857 case fpsr_when:
2858 case priunat_when_gr:
2859 case priunat_when_mem:
2860 case bsp_when:
2861 case bspstore_when:
2862 case rnat_when:
2863 ptr->r.record.p.t = t;
2864 break;
2865
2866 case spill_reg:
2867 case spill_sprel:
2868 case spill_psprel:
2869 case spill_reg_p:
2870 case spill_sprel_p:
2871 case spill_psprel_p:
2872 ptr->r.record.x.t = t;
2873 break;
2874
2875 case frgr_mem:
2876 if (!region)
2877 {
2878 as_bad (_("frgr_mem record before region record!"));
2879 return;
2880 }
2881 region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask;
2882 region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask;
2883 set_imask (region, ptr->r.record.p.frmask, t, 1);
2884 set_imask (region, ptr->r.record.p.grmask, t, 2);
2885 break;
2886 case fr_mem:
2887 if (!region)
2888 {
2889 as_bad (_("fr_mem record before region record!"));
2890 return;
2891 }
2892 region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask;
2893 set_imask (region, ptr->r.record.p.frmask, t, 1);
2894 break;
2895 case gr_mem:
2896 if (!region)
2897 {
2898 as_bad (_("gr_mem record before region record!"));
2899 return;
2900 }
2901 region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask;
2902 set_imask (region, ptr->r.record.p.grmask, t, 2);
2903 break;
2904 case br_mem:
2905 if (!region)
2906 {
2907 as_bad (_("br_mem record before region record!"));
2908 return;
2909 }
2910 region->r.record.r.mask.br_mem |= ptr->r.record.p.brmask;
2911 set_imask (region, ptr->r.record.p.brmask, t, 3);
2912 break;
2913
2914 case gr_gr:
2915 if (!region)
2916 {
2917 as_bad (_("gr_gr record before region record!"));
2918 return;
2919 }
2920 set_imask (region, ptr->r.record.p.grmask, t, 2);
2921 break;
2922 case br_gr:
2923 if (!region)
2924 {
2925 as_bad (_("br_gr record before region record!"));
2926 return;
2927 }
2928 set_imask (region, ptr->r.record.p.brmask, t, 3);
2929 break;
2930
2931 default:
2932 break;
2933 }
2934 }
2935 }
2936
2937 /* Estimate the size of a frag before relaxing. We only have one type of frag
2938 to handle here, which is the unwind info frag. */
2939
2940 int
2941 ia64_estimate_size_before_relax (fragS *frag,
2942 asection *segtype ATTRIBUTE_UNUSED)
2943 {
2944 unw_rec_list *list;
2945 int len, size, pad;
2946
2947 /* ??? This code is identical to the first part of ia64_convert_frag. */
2948 list = (unw_rec_list *) frag->fr_opcode;
2949 fixup_unw_records (list, 0);
2950
2951 len = calc_record_size (list);
2952 /* pad to pointer-size boundary. */
2953 pad = len % md.pointer_size;
2954 if (pad != 0)
2955 len += md.pointer_size - pad;
2956 /* Add 8 for the header. */
2957 size = len + 8;
2958 /* Add a pointer for the personality offset. */
2959 if (frag->fr_offset)
2960 size += md.pointer_size;
2961
2962 /* fr_var carries the max_chars that we created the fragment with.
2963 We must, of course, have allocated enough memory earlier. */
2964 gas_assert (frag->fr_var >= size);
2965
2966 return frag->fr_fix + size;
2967 }
2968
2969 /* This function converts a rs_machine_dependent variant frag into a
2970 normal fill frag with the unwind image from the record list. */
2971 void
2972 ia64_convert_frag (fragS *frag)
2973 {
2974 unw_rec_list *list;
2975 int len, size, pad;
2976 valueT flag_value;
2977
2978 /* ??? This code is identical to ia64_estimate_size_before_relax. */
2979 list = (unw_rec_list *) frag->fr_opcode;
2980 fixup_unw_records (list, 0);
2981
2982 len = calc_record_size (list);
2983 /* pad to pointer-size boundary. */
2984 pad = len % md.pointer_size;
2985 if (pad != 0)
2986 len += md.pointer_size - pad;
2987 /* Add 8 for the header. */
2988 size = len + 8;
2989 /* Add a pointer for the personality offset. */
2990 if (frag->fr_offset)
2991 size += md.pointer_size;
2992
2993 /* fr_var carries the max_chars that we created the fragment with.
2994 We must, of course, have allocated enough memory earlier. */
2995 gas_assert (frag->fr_var >= size);
2996
2997 /* Initialize the header area. fr_offset is initialized with
2998 unwind.personality_routine. */
2999 if (frag->fr_offset)
3000 {
3001 if (md.flags & EF_IA_64_ABI64)
3002 flag_value = (bfd_vma) 3 << 32;
3003 else
3004 /* 32-bit unwind info block. */
3005 flag_value = (bfd_vma) 0x1003 << 32;
3006 }
3007 else
3008 flag_value = 0;
3009
3010 md_number_to_chars (frag->fr_literal,
3011 (((bfd_vma) 1 << 48) /* Version. */
3012 | flag_value /* U & E handler flags. */
3013 | (len / md.pointer_size)), /* Length. */
3014 8);
3015
3016 /* Skip the header. */
3017 vbyte_mem_ptr = frag->fr_literal + 8;
3018 process_unw_records (list, output_vbyte_mem);
3019
3020 /* Fill the padding bytes with zeros. */
3021 if (pad != 0)
3022 md_number_to_chars (frag->fr_literal + len + 8 - md.pointer_size + pad, 0,
3023 md.pointer_size - pad);
3024 /* Fill the unwind personality with zeros. */
3025 if (frag->fr_offset)
3026 md_number_to_chars (frag->fr_literal + size - md.pointer_size, 0,
3027 md.pointer_size);
3028
3029 frag->fr_fix += size;
3030 frag->fr_type = rs_fill;
3031 frag->fr_var = 0;
3032 frag->fr_offset = 0;
3033 }
3034
3035 static int
3036 parse_predicate_and_operand (expressionS *e, unsigned *qp, const char *po)
3037 {
3038 int sep = parse_operand_and_eval (e, ',');
3039
3040 *qp = e->X_add_number - REG_P;
3041 if (e->X_op != O_register || *qp > 63)
3042 {
3043 as_bad (_("First operand to .%s must be a predicate"), po);
3044 *qp = 0;
3045 }
3046 else if (*qp == 0)
3047 as_warn (_("Pointless use of p0 as first operand to .%s"), po);
3048 if (sep == ',')
3049 sep = parse_operand_and_eval (e, ',');
3050 else
3051 e->X_op = O_absent;
3052 return sep;
3053 }
3054
3055 static void
3056 convert_expr_to_ab_reg (const expressionS *e,
3057 unsigned int *ab,
3058 unsigned int *regp,
3059 const char *po,
3060 int n)
3061 {
3062 unsigned int reg = e->X_add_number;
3063
3064 *ab = *regp = 0; /* Anything valid is good here. */
3065
3066 if (e->X_op != O_register)
3067 reg = REG_GR; /* Anything invalid is good here. */
3068
3069 if (reg >= (REG_GR + 4) && reg <= (REG_GR + 7))
3070 {
3071 *ab = 0;
3072 *regp = reg - REG_GR;
3073 }
3074 else if ((reg >= (REG_FR + 2) && reg <= (REG_FR + 5))
3075 || (reg >= (REG_FR + 16) && reg <= (REG_FR + 31)))
3076 {
3077 *ab = 1;
3078 *regp = reg - REG_FR;
3079 }
3080 else if (reg >= (REG_BR + 1) && reg <= (REG_BR + 5))
3081 {
3082 *ab = 2;
3083 *regp = reg - REG_BR;
3084 }
3085 else
3086 {
3087 *ab = 3;
3088 switch (reg)
3089 {
3090 case REG_PR: *regp = 0; break;
3091 case REG_PSP: *regp = 1; break;
3092 case REG_PRIUNAT: *regp = 2; break;
3093 case REG_BR + 0: *regp = 3; break;
3094 case REG_AR + AR_BSP: *regp = 4; break;
3095 case REG_AR + AR_BSPSTORE: *regp = 5; break;
3096 case REG_AR + AR_RNAT: *regp = 6; break;
3097 case REG_AR + AR_UNAT: *regp = 7; break;
3098 case REG_AR + AR_FPSR: *regp = 8; break;
3099 case REG_AR + AR_PFS: *regp = 9; break;
3100 case REG_AR + AR_LC: *regp = 10; break;
3101
3102 default:
3103 as_bad (_("Operand %d to .%s must be a preserved register"), n, po);
3104 break;
3105 }
3106 }
3107 }
3108
3109 static void
3110 convert_expr_to_xy_reg (const expressionS *e,
3111 unsigned int *xy,
3112 unsigned int *regp,
3113 const char *po,
3114 int n)
3115 {
3116 unsigned int reg = e->X_add_number;
3117
3118 *xy = *regp = 0; /* Anything valid is good here. */
3119
3120 if (e->X_op != O_register)
3121 reg = REG_GR; /* Anything invalid is good here. */
3122
3123 if (reg >= (REG_GR + 1) && reg <= (REG_GR + 127))
3124 {
3125 *xy = 0;
3126 *regp = reg - REG_GR;
3127 }
3128 else if (reg >= (REG_FR + 2) && reg <= (REG_FR + 127))
3129 {
3130 *xy = 1;
3131 *regp = reg - REG_FR;
3132 }
3133 else if (reg >= REG_BR && reg <= (REG_BR + 7))
3134 {
3135 *xy = 2;
3136 *regp = reg - REG_BR;
3137 }
3138 else
3139 as_bad (_("Operand %d to .%s must be a writable register"), n, po);
3140 }
3141
3142 static void
3143 dot_align (int arg)
3144 {
3145 /* The current frag is an alignment frag. */
3146 align_frag = frag_now;
3147 s_align_bytes (arg);
3148 }
3149
3150 static void
3151 dot_radix (int dummy ATTRIBUTE_UNUSED)
3152 {
3153 char *radix;
3154 int ch;
3155
3156 SKIP_WHITESPACE ();
3157
3158 if (is_it_end_of_statement ())
3159 return;
3160 ch = get_symbol_name (&radix);
3161 ia64_canonicalize_symbol_name (radix);
3162 if (strcasecmp (radix, "C"))
3163 as_bad (_("Radix `%s' unsupported or invalid"), radix);
3164 (void) restore_line_pointer (ch);
3165 demand_empty_rest_of_line ();
3166 }
3167
3168 /* Helper function for .loc directives. If the assembler is not generating
3169 line number info, then we need to remember which instructions have a .loc
3170 directive, and only call dwarf2_gen_line_info for those instructions. */
3171
3172 static void
3173 dot_loc (int x)
3174 {
3175 CURR_SLOT.loc_directive_seen = 1;
3176 dwarf2_directive_loc (x);
3177 }
3178
3179 /* .sbss, .bss etc. are macros that expand into ".section SECNAME". */
3180 static void
3181 dot_special_section (int which)
3182 {
3183 set_section ((char *) special_section_name[which]);
3184 }
3185
3186 /* Return -1 for warning and 0 for error. */
3187
3188 static int
3189 unwind_diagnostic (const char * region, const char *directive)
3190 {
3191 if (md.unwind_check == unwind_check_warning)
3192 {
3193 as_warn (_(".%s outside of %s"), directive, region);
3194 return -1;
3195 }
3196 else
3197 {
3198 as_bad (_(".%s outside of %s"), directive, region);
3199 ignore_rest_of_line ();
3200 return 0;
3201 }
3202 }
3203
3204 /* Return 1 if a directive is in a procedure, -1 if a directive isn't in
3205 a procedure but the unwind directive check is set to warning, 0 if
3206 a directive isn't in a procedure and the unwind directive check is set
3207 to error. */
3208
3209 static int
3210 in_procedure (const char *directive)
3211 {
3212 if (unwind.proc_pending.sym
3213 && (!unwind.saved_text_seg || strcmp (directive, "endp") == 0))
3214 return 1;
3215 return unwind_diagnostic ("procedure", directive);
3216 }
3217
3218 /* Return 1 if a directive is in a prologue, -1 if a directive isn't in
3219 a prologue but the unwind directive check is set to warning, 0 if
3220 a directive isn't in a prologue and the unwind directive check is set
3221 to error. */
3222
3223 static int
3224 in_prologue (const char *directive)
3225 {
3226 int in = in_procedure (directive);
3227
3228 if (in > 0 && !unwind.prologue)
3229 in = unwind_diagnostic ("prologue", directive);
3230 check_pending_save ();
3231 return in;
3232 }
3233
3234 /* Return 1 if a directive is in a body, -1 if a directive isn't in
3235 a body but the unwind directive check is set to warning, 0 if
3236 a directive isn't in a body and the unwind directive check is set
3237 to error. */
3238
3239 static int
3240 in_body (const char *directive)
3241 {
3242 int in = in_procedure (directive);
3243
3244 if (in > 0 && !unwind.body)
3245 in = unwind_diagnostic ("body region", directive);
3246 return in;
3247 }
3248
3249 static void
3250 add_unwind_entry (unw_rec_list *ptr, int sep)
3251 {
3252 if (ptr)
3253 {
3254 if (unwind.tail)
3255 unwind.tail->next = ptr;
3256 else
3257 unwind.list = ptr;
3258 unwind.tail = ptr;
3259
3260 /* The current entry can in fact be a chain of unwind entries. */
3261 if (unwind.current_entry == NULL)
3262 unwind.current_entry = ptr;
3263 }
3264
3265 /* The current entry can in fact be a chain of unwind entries. */
3266 if (unwind.current_entry == NULL)
3267 unwind.current_entry = ptr;
3268
3269 if (sep == ',')
3270 {
3271 char *name;
3272 /* Parse a tag permitted for the current directive. */
3273 int ch;
3274
3275 SKIP_WHITESPACE ();
3276 ch = get_symbol_name (&name);
3277 /* FIXME: For now, just issue a warning that this isn't implemented. */
3278 {
3279 static int warned;
3280
3281 if (!warned)
3282 {
3283 warned = 1;
3284 as_warn (_("Tags on unwind pseudo-ops aren't supported, yet"));
3285 }
3286 }
3287 (void) restore_line_pointer (ch);
3288 }
3289 if (sep != NOT_A_CHAR)
3290 demand_empty_rest_of_line ();
3291 }
3292
3293 static void
3294 dot_fframe (int dummy ATTRIBUTE_UNUSED)
3295 {
3296 expressionS e;
3297 int sep;
3298
3299 if (!in_prologue ("fframe"))
3300 return;
3301
3302 sep = parse_operand_and_eval (&e, ',');
3303
3304 if (e.X_op != O_constant)
3305 {
3306 as_bad (_("First operand to .fframe must be a constant"));
3307 e.X_add_number = 0;
3308 }
3309 add_unwind_entry (output_mem_stack_f (e.X_add_number), sep);
3310 }
3311
3312 static void
3313 dot_vframe (int dummy ATTRIBUTE_UNUSED)
3314 {
3315 expressionS e;
3316 unsigned reg;
3317 int sep;
3318
3319 if (!in_prologue ("vframe"))
3320 return;
3321
3322 sep = parse_operand_and_eval (&e, ',');
3323 reg = e.X_add_number - REG_GR;
3324 if (e.X_op != O_register || reg > 127)
3325 {
3326 as_bad (_("First operand to .vframe must be a general register"));
3327 reg = 0;
3328 }
3329 add_unwind_entry (output_mem_stack_v (), sep);
3330 if (! (unwind.prologue_mask & 2))
3331 add_unwind_entry (output_psp_gr (reg), NOT_A_CHAR);
3332 else if (reg != unwind.prologue_gr
3333 + (unsigned) popcount (unwind.prologue_mask & -(2 << 1)))
3334 as_warn (_("Operand of .vframe contradicts .prologue"));
3335 }
3336
3337 static void
3338 dot_vframesp (int psp)
3339 {
3340 expressionS e;
3341 int sep;
3342
3343 if (psp)
3344 as_warn (_(".vframepsp is meaningless, assuming .vframesp was meant"));
3345
3346 if (!in_prologue ("vframesp"))
3347 return;
3348
3349 sep = parse_operand_and_eval (&e, ',');
3350 if (e.X_op != O_constant)
3351 {
3352 as_bad (_("Operand to .vframesp must be a constant (sp-relative offset)"));
3353 e.X_add_number = 0;
3354 }
3355 add_unwind_entry (output_mem_stack_v (), sep);
3356 add_unwind_entry (output_psp_sprel (e.X_add_number), NOT_A_CHAR);
3357 }
3358
3359 static void
3360 dot_save (int dummy ATTRIBUTE_UNUSED)
3361 {
3362 expressionS e1, e2;
3363 unsigned reg1, reg2;
3364 int sep;
3365
3366 if (!in_prologue ("save"))
3367 return;
3368
3369 sep = parse_operand_and_eval (&e1, ',');
3370 if (sep == ',')
3371 sep = parse_operand_and_eval (&e2, ',');
3372 else
3373 e2.X_op = O_absent;
3374
3375 reg1 = e1.X_add_number;
3376 /* Make sure it's a valid ar.xxx reg, OR its br0, aka 'rp'. */
3377 if (e1.X_op != O_register)
3378 {
3379 as_bad (_("First operand to .save not a register"));
3380 reg1 = REG_PR; /* Anything valid is good here. */
3381 }
3382 reg2 = e2.X_add_number - REG_GR;
3383 if (e2.X_op != O_register || reg2 > 127)
3384 {
3385 as_bad (_("Second operand to .save not a valid register"));
3386 reg2 = 0;
3387 }
3388 switch (reg1)
3389 {
3390 case REG_AR + AR_BSP:
3391 add_unwind_entry (output_bsp_when (), sep);
3392 add_unwind_entry (output_bsp_gr (reg2), NOT_A_CHAR);
3393 break;
3394 case REG_AR + AR_BSPSTORE:
3395 add_unwind_entry (output_bspstore_when (), sep);
3396 add_unwind_entry (output_bspstore_gr (reg2), NOT_A_CHAR);
3397 break;
3398 case REG_AR + AR_RNAT:
3399 add_unwind_entry (output_rnat_when (), sep);
3400 add_unwind_entry (output_rnat_gr (reg2), NOT_A_CHAR);
3401 break;
3402 case REG_AR + AR_UNAT:
3403 add_unwind_entry (output_unat_when (), sep);
3404 add_unwind_entry (output_unat_gr (reg2), NOT_A_CHAR);
3405 break;
3406 case REG_AR + AR_FPSR:
3407 add_unwind_entry (output_fpsr_when (), sep);
3408 add_unwind_entry (output_fpsr_gr (reg2), NOT_A_CHAR);
3409 break;
3410 case REG_AR + AR_PFS:
3411 add_unwind_entry (output_pfs_when (), sep);
3412 if (! (unwind.prologue_mask & 4))
3413 add_unwind_entry (output_pfs_gr (reg2), NOT_A_CHAR);
3414 else if (reg2 != unwind.prologue_gr
3415 + (unsigned) popcount (unwind.prologue_mask & -(4 << 1)))
3416 as_warn (_("Second operand of .save contradicts .prologue"));
3417 break;
3418 case REG_AR + AR_LC:
3419 add_unwind_entry (output_lc_when (), sep);
3420 add_unwind_entry (output_lc_gr (reg2), NOT_A_CHAR);
3421 break;
3422 case REG_BR:
3423 add_unwind_entry (output_rp_when (), sep);
3424 if (! (unwind.prologue_mask & 8))
3425 add_unwind_entry (output_rp_gr (reg2), NOT_A_CHAR);
3426 else if (reg2 != unwind.prologue_gr)
3427 as_warn (_("Second operand of .save contradicts .prologue"));
3428 break;
3429 case REG_PR:
3430 add_unwind_entry (output_preds_when (), sep);
3431 if (! (unwind.prologue_mask & 1))
3432 add_unwind_entry (output_preds_gr (reg2), NOT_A_CHAR);
3433 else if (reg2 != unwind.prologue_gr
3434 + (unsigned) popcount (unwind.prologue_mask & -(1 << 1)))
3435 as_warn (_("Second operand of .save contradicts .prologue"));
3436 break;
3437 case REG_PRIUNAT:
3438 add_unwind_entry (output_priunat_when_gr (), sep);
3439 add_unwind_entry (output_priunat_gr (reg2), NOT_A_CHAR);
3440 break;
3441 default:
3442 as_bad (_("First operand to .save not a valid register"));
3443 add_unwind_entry (NULL, sep);
3444 break;
3445 }
3446 }
3447
3448 static void
3449 dot_restore (int dummy ATTRIBUTE_UNUSED)
3450 {
3451 expressionS e1;
3452 unsigned long ecount; /* # of _additional_ regions to pop */
3453 int sep;
3454
3455 if (!in_body ("restore"))
3456 return;
3457
3458 sep = parse_operand_and_eval (&e1, ',');
3459 if (e1.X_op != O_register || e1.X_add_number != REG_GR + 12)
3460 as_bad (_("First operand to .restore must be stack pointer (sp)"));
3461
3462 if (sep == ',')
3463 {
3464 expressionS e2;
3465
3466 sep = parse_operand_and_eval (&e2, ',');
3467 if (e2.X_op != O_constant || e2.X_add_number < 0)
3468 {
3469 as_bad (_("Second operand to .restore must be a constant >= 0"));
3470 e2.X_add_number = 0;
3471 }
3472 ecount = e2.X_add_number;
3473 }
3474 else
3475 ecount = unwind.prologue_count - 1;
3476
3477 if (ecount >= unwind.prologue_count)
3478 {
3479 as_bad (_("Epilogue count of %lu exceeds number of nested prologues (%u)"),
3480 ecount + 1, unwind.prologue_count);
3481 ecount = 0;
3482 }
3483
3484 add_unwind_entry (output_epilogue (ecount), sep);
3485
3486 if (ecount < unwind.prologue_count)
3487 unwind.prologue_count -= ecount + 1;
3488 else
3489 unwind.prologue_count = 0;
3490 }
3491
3492 static void
3493 dot_restorereg (int pred)
3494 {
3495 unsigned int qp, ab, reg;
3496 expressionS e;
3497 int sep;
3498 const char * const po = pred ? "restorereg.p" : "restorereg";
3499
3500 if (!in_procedure (po))
3501 return;
3502
3503 if (pred)
3504 sep = parse_predicate_and_operand (&e, &qp, po);
3505 else
3506 {
3507 sep = parse_operand_and_eval (&e, ',');
3508 qp = 0;
3509 }
3510 convert_expr_to_ab_reg (&e, &ab, &reg, po, 1 + pred);
3511
3512 add_unwind_entry (output_spill_reg (ab, reg, 0, 0, qp), sep);
3513 }
3514
3515 static const char *special_linkonce_name[] =
3516 {
3517 ".gnu.linkonce.ia64unw.", ".gnu.linkonce.ia64unwi."
3518 };
3519
3520 static void
3521 start_unwind_section (const segT text_seg, int sec_index)
3522 {
3523 /*
3524 Use a slightly ugly scheme to derive the unwind section names from
3525 the text section name:
3526
3527 text sect. unwind table sect.
3528 name: name: comments:
3529 ---------- ----------------- --------------------------------
3530 .text .IA_64.unwind
3531 .text.foo .IA_64.unwind.text.foo
3532 .foo .IA_64.unwind.foo
3533 .gnu.linkonce.t.foo
3534 .gnu.linkonce.ia64unw.foo
3535 _info .IA_64.unwind_info gas issues error message (ditto)
3536 _infoFOO .IA_64.unwind_infoFOO gas issues error message (ditto)
3537
3538 This mapping is done so that:
3539
3540 (a) An object file with unwind info only in .text will use
3541 unwind section names .IA_64.unwind and .IA_64.unwind_info.
3542 This follows the letter of the ABI and also ensures backwards
3543 compatibility with older toolchains.
3544
3545 (b) An object file with unwind info in multiple text sections
3546 will use separate unwind sections for each text section.
3547 This allows us to properly set the "sh_info" and "sh_link"
3548 fields in SHT_IA_64_UNWIND as required by the ABI and also
3549 lets GNU ld support programs with multiple segments
3550 containing unwind info (as might be the case for certain
3551 embedded applications).
3552
3553 (c) An error is issued if there would be a name clash.
3554 */
3555
3556 const char *text_name, *sec_text_name;
3557 char *sec_name;
3558 const char *prefix = special_section_name [sec_index];
3559 const char *suffix;
3560
3561 sec_text_name = segment_name (text_seg);
3562 text_name = sec_text_name;
3563 if (strncmp (text_name, "_info", 5) == 0)
3564 {
3565 as_bad (_("Illegal section name `%s' (causes unwind section name clash)"),
3566 text_name);
3567 ignore_rest_of_line ();
3568 return;
3569 }
3570 if (strcmp (text_name, ".text") == 0)
3571 text_name = "";
3572
3573 /* Build the unwind section name by appending the (possibly stripped)
3574 text section name to the unwind prefix. */
3575 suffix = text_name;
3576 if (strncmp (text_name, ".gnu.linkonce.t.",
3577 sizeof (".gnu.linkonce.t.") - 1) == 0)
3578 {
3579 prefix = special_linkonce_name [sec_index - SPECIAL_SECTION_UNWIND];
3580 suffix += sizeof (".gnu.linkonce.t.") - 1;
3581 }
3582
3583 sec_name = concat (prefix, suffix, NULL);
3584
3585 /* Handle COMDAT group. */
3586 if ((text_seg->flags & SEC_LINK_ONCE) != 0
3587 && (elf_section_flags (text_seg) & SHF_GROUP) != 0)
3588 {
3589 char *section;
3590 const char *group_name = elf_group_name (text_seg);
3591
3592 if (group_name == NULL)
3593 {
3594 as_bad (_("Group section `%s' has no group signature"),
3595 sec_text_name);
3596 ignore_rest_of_line ();
3597 free (sec_name);
3598 return;
3599 }
3600
3601 /* We have to construct a fake section directive. */
3602 section = concat (sec_name, ",\"aG\",@progbits,", group_name, ",comdat", NULL);
3603 set_section (section);
3604 free (section);
3605 }
3606 else
3607 {
3608 set_section (sec_name);
3609 bfd_set_section_flags (now_seg, SEC_LOAD | SEC_ALLOC | SEC_READONLY);
3610 }
3611
3612 elf_linked_to_section (now_seg) = text_seg;
3613 free (sec_name);
3614 }
3615
3616 static void
3617 generate_unwind_image (const segT text_seg)
3618 {
3619 int size, pad;
3620 unw_rec_list *list;
3621
3622 /* Mark the end of the unwind info, so that we can compute the size of the
3623 last unwind region. */
3624 add_unwind_entry (output_endp (), NOT_A_CHAR);
3625
3626 /* Force out pending instructions, to make sure all unwind records have
3627 a valid slot_number field. */
3628 ia64_flush_insns ();
3629
3630 /* Generate the unwind record. */
3631 list = optimize_unw_records (unwind.list);
3632 fixup_unw_records (list, 1);
3633 size = calc_record_size (list);
3634
3635 if (size > 0 || unwind.force_unwind_entry)
3636 {
3637 unwind.force_unwind_entry = 0;
3638 /* pad to pointer-size boundary. */
3639 pad = size % md.pointer_size;
3640 if (pad != 0)
3641 size += md.pointer_size - pad;
3642 /* Add 8 for the header. */
3643 size += 8;
3644 /* Add a pointer for the personality offset. */
3645 if (unwind.personality_routine)
3646 size += md.pointer_size;
3647 }
3648
3649 /* If there are unwind records, switch sections, and output the info. */
3650 if (size != 0)
3651 {
3652 expressionS exp;
3653 bfd_reloc_code_real_type reloc;
3654
3655 start_unwind_section (text_seg, SPECIAL_SECTION_UNWIND_INFO);
3656
3657 /* Make sure the section has 4 byte alignment for ILP32 and
3658 8 byte alignment for LP64. */
3659 frag_align (md.pointer_size_shift, 0, 0);
3660 record_alignment (now_seg, md.pointer_size_shift);
3661
3662 /* Set expression which points to start of unwind descriptor area. */
3663 unwind.info = expr_build_dot ();
3664
3665 frag_var (rs_machine_dependent, size, size, 0, 0,
3666 (offsetT) (long) unwind.personality_routine,
3667 (char *) list);
3668
3669 /* Add the personality address to the image. */
3670 if (unwind.personality_routine != 0)
3671 {
3672 exp.X_op = O_symbol;
3673 exp.X_add_symbol = unwind.personality_routine;
3674 exp.X_add_number = 0;
3675
3676 if (md.flags & EF_IA_64_BE)
3677 {
3678 if (md.flags & EF_IA_64_ABI64)
3679 reloc = BFD_RELOC_IA64_LTOFF_FPTR64MSB;
3680 else
3681 reloc = BFD_RELOC_IA64_LTOFF_FPTR32MSB;
3682 }
3683 else
3684 {
3685 if (md.flags & EF_IA_64_ABI64)
3686 reloc = BFD_RELOC_IA64_LTOFF_FPTR64LSB;
3687 else
3688 reloc = BFD_RELOC_IA64_LTOFF_FPTR32LSB;
3689 }
3690
3691 fix_new_exp (frag_now, frag_now_fix () - md.pointer_size,
3692 md.pointer_size, &exp, 0, reloc);
3693 unwind.personality_routine = 0;
3694 }
3695 }
3696
3697 free_saved_prologue_counts ();
3698 unwind.list = unwind.tail = unwind.current_entry = NULL;
3699 }
3700
3701 static void
3702 dot_handlerdata (int dummy ATTRIBUTE_UNUSED)
3703 {
3704 if (!in_procedure ("handlerdata"))
3705 return;
3706 unwind.force_unwind_entry = 1;
3707
3708 /* Remember which segment we're in so we can switch back after .endp */
3709 unwind.saved_text_seg = now_seg;
3710 unwind.saved_text_subseg = now_subseg;
3711
3712 /* Generate unwind info into unwind-info section and then leave that
3713 section as the currently active one so dataXX directives go into
3714 the language specific data area of the unwind info block. */
3715 generate_unwind_image (now_seg);
3716 demand_empty_rest_of_line ();
3717 }
3718
3719 static void
3720 dot_unwentry (int dummy ATTRIBUTE_UNUSED)
3721 {
3722 if (!in_procedure ("unwentry"))
3723 return;
3724 unwind.force_unwind_entry = 1;
3725 demand_empty_rest_of_line ();
3726 }
3727
3728 static void
3729 dot_altrp (int dummy ATTRIBUTE_UNUSED)
3730 {
3731 expressionS e;
3732 unsigned reg;
3733
3734 if (!in_prologue ("altrp"))
3735 return;
3736
3737 parse_operand_and_eval (&e, 0);
3738 reg = e.X_add_number - REG_BR;
3739 if (e.X_op != O_register || reg > 7)
3740 {
3741 as_bad (_("First operand to .altrp not a valid branch register"));
3742 reg = 0;
3743 }
3744 add_unwind_entry (output_rp_br (reg), 0);
3745 }
3746
3747 static void
3748 dot_savemem (int psprel)
3749 {
3750 expressionS e1, e2;
3751 int sep;
3752 int reg1, val;
3753 const char * const po = psprel ? "savepsp" : "savesp";
3754
3755 if (!in_prologue (po))
3756 return;
3757
3758 sep = parse_operand_and_eval (&e1, ',');
3759 if (sep == ',')
3760 sep = parse_operand_and_eval (&e2, ',');
3761 else
3762 e2.X_op = O_absent;
3763
3764 reg1 = e1.X_add_number;
3765 val = e2.X_add_number;
3766
3767 /* Make sure it's a valid ar.xxx reg, OR its br0, aka 'rp'. */
3768 if (e1.X_op != O_register)
3769 {
3770 as_bad (_("First operand to .%s not a register"), po);
3771 reg1 = REG_PR; /* Anything valid is good here. */
3772 }
3773 if (e2.X_op != O_constant)
3774 {
3775 as_bad (_("Second operand to .%s not a constant"), po);
3776 val = 0;
3777 }
3778
3779 switch (reg1)
3780 {
3781 case REG_AR + AR_BSP:
3782 add_unwind_entry (output_bsp_when (), sep);
3783 add_unwind_entry ((psprel
3784 ? output_bsp_psprel
3785 : output_bsp_sprel) (val), NOT_A_CHAR);
3786 break;
3787 case REG_AR + AR_BSPSTORE:
3788 add_unwind_entry (output_bspstore_when (), sep);
3789 add_unwind_entry ((psprel
3790 ? output_bspstore_psprel
3791 : output_bspstore_sprel) (val), NOT_A_CHAR);
3792 break;
3793 case REG_AR + AR_RNAT:
3794 add_unwind_entry (output_rnat_when (), sep);
3795 add_unwind_entry ((psprel
3796 ? output_rnat_psprel
3797 : output_rnat_sprel) (val), NOT_A_CHAR);
3798 break;
3799 case REG_AR + AR_UNAT:
3800 add_unwind_entry (output_unat_when (), sep);
3801 add_unwind_entry ((psprel
3802 ? output_unat_psprel
3803 : output_unat_sprel) (val), NOT_A_CHAR);
3804 break;
3805 case REG_AR + AR_FPSR:
3806 add_unwind_entry (output_fpsr_when (), sep);
3807 add_unwind_entry ((psprel
3808 ? output_fpsr_psprel
3809 : output_fpsr_sprel) (val), NOT_A_CHAR);
3810 break;
3811 case REG_AR + AR_PFS:
3812 add_unwind_entry (output_pfs_when (), sep);
3813 add_unwind_entry ((psprel
3814 ? output_pfs_psprel
3815 : output_pfs_sprel) (val), NOT_A_CHAR);
3816 break;
3817 case REG_AR + AR_LC:
3818 add_unwind_entry (output_lc_when (), sep);
3819 add_unwind_entry ((psprel
3820 ? output_lc_psprel
3821 : output_lc_sprel) (val), NOT_A_CHAR);
3822 break;
3823 case REG_BR:
3824 add_unwind_entry (output_rp_when (), sep);
3825 add_unwind_entry ((psprel
3826 ? output_rp_psprel
3827 : output_rp_sprel) (val), NOT_A_CHAR);
3828 break;
3829 case REG_PR:
3830 add_unwind_entry (output_preds_when (), sep);
3831 add_unwind_entry ((psprel
3832 ? output_preds_psprel
3833 : output_preds_sprel) (val), NOT_A_CHAR);
3834 break;
3835 case REG_PRIUNAT:
3836 add_unwind_entry (output_priunat_when_mem (), sep);
3837 add_unwind_entry ((psprel
3838 ? output_priunat_psprel
3839 : output_priunat_sprel) (val), NOT_A_CHAR);
3840 break;
3841 default:
3842 as_bad (_("First operand to .%s not a valid register"), po);
3843 add_unwind_entry (NULL, sep);
3844 break;
3845 }
3846 }
3847
3848 static void
3849 dot_saveg (int dummy ATTRIBUTE_UNUSED)
3850 {
3851 expressionS e;
3852 unsigned grmask;
3853 int sep;
3854
3855 if (!in_prologue ("save.g"))
3856 return;
3857
3858 sep = parse_operand_and_eval (&e, ',');
3859
3860 grmask = e.X_add_number;
3861 if (e.X_op != O_constant
3862 || e.X_add_number <= 0
3863 || e.X_add_number > 0xf)
3864 {
3865 as_bad (_("First operand to .save.g must be a positive 4-bit constant"));
3866 grmask = 0;
3867 }
3868
3869 if (sep == ',')
3870 {
3871 unsigned reg;
3872 int n = popcount (grmask);
3873
3874 parse_operand_and_eval (&e, 0);
3875 reg = e.X_add_number - REG_GR;
3876 if (e.X_op != O_register || reg > 127)
3877 {
3878 as_bad (_("Second operand to .save.g must be a general register"));
3879 reg = 0;
3880 }
3881 else if (reg > 128U - n)
3882 {
3883 as_bad (_("Second operand to .save.g must be the first of %d general registers"), n);
3884 reg = 0;
3885 }
3886 add_unwind_entry (output_gr_gr (grmask, reg), 0);
3887 }
3888 else
3889 add_unwind_entry (output_gr_mem (grmask), 0);
3890 }
3891
3892 static void
3893 dot_savef (int dummy ATTRIBUTE_UNUSED)
3894 {
3895 expressionS e;
3896
3897 if (!in_prologue ("save.f"))
3898 return;
3899
3900 parse_operand_and_eval (&e, 0);
3901
3902 if (e.X_op != O_constant
3903 || e.X_add_number <= 0
3904 || e.X_add_number > 0xfffff)
3905 {
3906 as_bad (_("Operand to .save.f must be a positive 20-bit constant"));
3907 e.X_add_number = 0;
3908 }
3909 add_unwind_entry (output_fr_mem (e.X_add_number), 0);
3910 }
3911
3912 static void
3913 dot_saveb (int dummy ATTRIBUTE_UNUSED)
3914 {
3915 expressionS e;
3916 unsigned brmask;
3917 int sep;
3918
3919 if (!in_prologue ("save.b"))
3920 return;
3921
3922 sep = parse_operand_and_eval (&e, ',');
3923
3924 brmask = e.X_add_number;
3925 if (e.X_op != O_constant
3926 || e.X_add_number <= 0
3927 || e.X_add_number > 0x1f)
3928 {
3929 as_bad (_("First operand to .save.b must be a positive 5-bit constant"));
3930 brmask = 0;
3931 }
3932
3933 if (sep == ',')
3934 {
3935 unsigned reg;
3936 int n = popcount (brmask);
3937
3938 parse_operand_and_eval (&e, 0);
3939 reg = e.X_add_number - REG_GR;
3940 if (e.X_op != O_register || reg > 127)
3941 {
3942 as_bad (_("Second operand to .save.b must be a general register"));
3943 reg = 0;
3944 }
3945 else if (reg > 128U - n)
3946 {
3947 as_bad (_("Second operand to .save.b must be the first of %d general registers"), n);
3948 reg = 0;
3949 }
3950 add_unwind_entry (output_br_gr (brmask, reg), 0);
3951 }
3952 else
3953 add_unwind_entry (output_br_mem (brmask), 0);
3954 }
3955
3956 static void
3957 dot_savegf (int dummy ATTRIBUTE_UNUSED)
3958 {
3959 expressionS e1, e2;
3960
3961 if (!in_prologue ("save.gf"))
3962 return;
3963
3964 if (parse_operand_and_eval (&e1, ',') == ',')
3965 parse_operand_and_eval (&e2, 0);
3966 else
3967 e2.X_op = O_absent;
3968
3969 if (e1.X_op != O_constant
3970 || e1.X_add_number < 0
3971 || e1.X_add_number > 0xf)
3972 {
3973 as_bad (_("First operand to .save.gf must be a non-negative 4-bit constant"));
3974 e1.X_op = O_absent;
3975 e1.X_add_number = 0;
3976 }
3977 if (e2.X_op != O_constant
3978 || e2.X_add_number < 0
3979 || e2.X_add_number > 0xfffff)
3980 {
3981 as_bad (_("Second operand to .save.gf must be a non-negative 20-bit constant"));
3982 e2.X_op = O_absent;
3983 e2.X_add_number = 0;
3984 }
3985 if (e1.X_op == O_constant
3986 && e2.X_op == O_constant
3987 && e1.X_add_number == 0
3988 && e2.X_add_number == 0)
3989 as_bad (_("Operands to .save.gf may not be both zero"));
3990
3991 add_unwind_entry (output_frgr_mem (e1.X_add_number, e2.X_add_number), 0);
3992 }
3993
3994 static void
3995 dot_spill (int dummy ATTRIBUTE_UNUSED)
3996 {
3997 expressionS e;
3998
3999 if (!in_prologue ("spill"))
4000 return;
4001
4002 parse_operand_and_eval (&e, 0);
4003
4004 if (e.X_op != O_constant)
4005 {
4006 as_bad (_("Operand to .spill must be a constant"));
4007 e.X_add_number = 0;
4008 }
4009 add_unwind_entry (output_spill_base (e.X_add_number), 0);
4010 }
4011
4012 static void
4013 dot_spillreg (int pred)
4014 {
4015 int sep;
4016 unsigned int qp, ab, xy, reg, treg;
4017 expressionS e;
4018 const char * const po = pred ? "spillreg.p" : "spillreg";
4019
4020 if (!in_procedure (po))
4021 return;
4022
4023 if (pred)
4024 sep = parse_predicate_and_operand (&e, &qp, po);
4025 else
4026 {
4027 sep = parse_operand_and_eval (&e, ',');
4028 qp = 0;
4029 }
4030 convert_expr_to_ab_reg (&e, &ab, &reg, po, 1 + pred);
4031
4032 if (sep == ',')
4033 sep = parse_operand_and_eval (&e, ',');
4034 else
4035 e.X_op = O_absent;
4036 convert_expr_to_xy_reg (&e, &xy, &treg, po, 2 + pred);
4037
4038 add_unwind_entry (output_spill_reg (ab, reg, treg, xy, qp), sep);
4039 }
4040
4041 static void
4042 dot_spillmem (int psprel)
4043 {
4044 expressionS e;
4045 int pred = (psprel < 0), sep;
4046 unsigned int qp, ab, reg;
4047 const char * po;
4048
4049 if (pred)
4050 {
4051 psprel = ~psprel;
4052 po = psprel ? "spillpsp.p" : "spillsp.p";
4053 }
4054 else
4055 po = psprel ? "spillpsp" : "spillsp";
4056
4057 if (!in_procedure (po))
4058 return;
4059
4060 if (pred)
4061 sep = parse_predicate_and_operand (&e, &qp, po);
4062 else
4063 {
4064 sep = parse_operand_and_eval (&e, ',');
4065 qp = 0;
4066 }
4067 convert_expr_to_ab_reg (&e, &ab, &reg, po, 1 + pred);
4068
4069 if (sep == ',')
4070 sep = parse_operand_and_eval (&e, ',');
4071 else
4072 e.X_op = O_absent;
4073 if (e.X_op != O_constant)
4074 {
4075 as_bad (_("Operand %d to .%s must be a constant"), 2 + pred, po);
4076 e.X_add_number = 0;
4077 }
4078
4079 if (psprel)
4080 add_unwind_entry (output_spill_psprel (ab, reg, e.X_add_number, qp), sep);
4081 else
4082 add_unwind_entry (output_spill_sprel (ab, reg, e.X_add_number, qp), sep);
4083 }
4084
4085 static unsigned int
4086 get_saved_prologue_count (unsigned long lbl)
4087 {
4088 label_prologue_count *lpc = unwind.saved_prologue_counts;
4089
4090 while (lpc != NULL && lpc->label_number != lbl)
4091 lpc = lpc->next;
4092
4093 if (lpc != NULL)
4094 return lpc->prologue_count;
4095
4096 as_bad (_("Missing .label_state %ld"), lbl);
4097 return 1;
4098 }
4099
4100 static void
4101 save_prologue_count (unsigned long lbl, unsigned int count)
4102 {
4103 label_prologue_count *lpc = unwind.saved_prologue_counts;
4104
4105 while (lpc != NULL && lpc->label_number != lbl)
4106 lpc = lpc->next;
4107
4108 if (lpc != NULL)
4109 lpc->prologue_count = count;
4110 else
4111 {
4112 label_prologue_count *new_lpc = XNEW (label_prologue_count);
4113
4114 new_lpc->next = unwind.saved_prologue_counts;
4115 new_lpc->label_number = lbl;
4116 new_lpc->prologue_count = count;
4117 unwind.saved_prologue_counts = new_lpc;
4118 }
4119 }
4120
4121 static void
4122 free_saved_prologue_counts (void)
4123 {
4124 label_prologue_count *lpc = unwind.saved_prologue_counts;
4125 label_prologue_count *next;
4126
4127 while (lpc != NULL)
4128 {
4129 next = lpc->next;
4130 free (lpc);
4131 lpc = next;
4132 }
4133
4134 unwind.saved_prologue_counts = NULL;
4135 }
4136
4137 static void
4138 dot_label_state (int dummy ATTRIBUTE_UNUSED)
4139 {
4140 expressionS e;
4141
4142 if (!in_body ("label_state"))
4143 return;
4144
4145 parse_operand_and_eval (&e, 0);
4146 if (e.X_op == O_constant)
4147 save_prologue_count (e.X_add_number, unwind.prologue_count);
4148 else
4149 {
4150 as_bad (_("Operand to .label_state must be a constant"));
4151 e.X_add_number = 0;
4152 }
4153 add_unwind_entry (output_label_state (e.X_add_number), 0);
4154 }
4155
4156 static void
4157 dot_copy_state (int dummy ATTRIBUTE_UNUSED)
4158 {
4159 expressionS e;
4160
4161 if (!in_body ("copy_state"))
4162 return;
4163
4164 parse_operand_and_eval (&e, 0);
4165 if (e.X_op == O_constant)
4166 unwind.prologue_count = get_saved_prologue_count (e.X_add_number);
4167 else
4168 {
4169 as_bad (_("Operand to .copy_state must be a constant"));
4170 e.X_add_number = 0;
4171 }
4172 add_unwind_entry (output_copy_state (e.X_add_number), 0);
4173 }
4174
4175 static void
4176 dot_unwabi (int dummy ATTRIBUTE_UNUSED)
4177 {
4178 expressionS e1, e2;
4179 unsigned char sep;
4180
4181 if (!in_prologue ("unwabi"))
4182 return;
4183
4184 sep = parse_operand_and_eval (&e1, ',');
4185 if (sep == ',')
4186 parse_operand_and_eval (&e2, 0);
4187 else
4188 e2.X_op = O_absent;
4189
4190 if (e1.X_op != O_constant)
4191 {
4192 as_bad (_("First operand to .unwabi must be a constant"));
4193 e1.X_add_number = 0;
4194 }
4195
4196 if (e2.X_op != O_constant)
4197 {
4198 as_bad (_("Second operand to .unwabi must be a constant"));
4199 e2.X_add_number = 0;
4200 }
4201
4202 add_unwind_entry (output_unwabi (e1.X_add_number, e2.X_add_number), 0);
4203 }
4204
4205 static void
4206 dot_personality (int dummy ATTRIBUTE_UNUSED)
4207 {
4208 char *name, *p, c;
4209
4210 if (!in_procedure ("personality"))
4211 return;
4212 SKIP_WHITESPACE ();
4213 c = get_symbol_name (&name);
4214 p = input_line_pointer;
4215 unwind.personality_routine = symbol_find_or_make (name);
4216 unwind.force_unwind_entry = 1;
4217 *p = c;
4218 SKIP_WHITESPACE_AFTER_NAME ();
4219 demand_empty_rest_of_line ();
4220 }
4221
4222 static void
4223 dot_proc (int dummy ATTRIBUTE_UNUSED)
4224 {
4225 char *name, *p, c;
4226 symbolS *sym;
4227 proc_pending *pending, *last_pending;
4228
4229 if (unwind.proc_pending.sym)
4230 {
4231 (md.unwind_check == unwind_check_warning
4232 ? as_warn
4233 : as_bad) (_("Missing .endp after previous .proc"));
4234 while (unwind.proc_pending.next)
4235 {
4236 pending = unwind.proc_pending.next;
4237 unwind.proc_pending.next = pending->next;
4238 free (pending);
4239 }
4240 }
4241 last_pending = NULL;
4242
4243 /* Parse names of main and alternate entry points and mark them as
4244 function symbols: */
4245 while (1)
4246 {
4247 SKIP_WHITESPACE ();
4248 c = get_symbol_name (&name);
4249 p = input_line_pointer;
4250 if (!*name)
4251 as_bad (_("Empty argument of .proc"));
4252 else
4253 {
4254 sym = symbol_find_or_make (name);
4255 if (S_IS_DEFINED (sym))
4256 as_bad (_("`%s' was already defined"), name);
4257 else if (!last_pending)
4258 {
4259 unwind.proc_pending.sym = sym;
4260 last_pending = &unwind.proc_pending;
4261 }
4262 else
4263 {
4264 pending = XNEW (proc_pending);
4265 pending->sym = sym;
4266 last_pending = last_pending->next = pending;
4267 }
4268 symbol_get_bfdsym (sym)->flags |= BSF_FUNCTION;
4269 }
4270 *p = c;
4271 SKIP_WHITESPACE_AFTER_NAME ();
4272 if (*input_line_pointer != ',')
4273 break;
4274 ++input_line_pointer;
4275 }
4276 if (!last_pending)
4277 {
4278 unwind.proc_pending.sym = expr_build_dot ();
4279 last_pending = &unwind.proc_pending;
4280 }
4281 last_pending->next = NULL;
4282 demand_empty_rest_of_line ();
4283 do_align (4, NULL, 0, 0);
4284
4285 unwind.prologue = 0;
4286 unwind.prologue_count = 0;
4287 unwind.body = 0;
4288 unwind.insn = 0;
4289 unwind.list = unwind.tail = unwind.current_entry = NULL;
4290 unwind.personality_routine = 0;
4291 }
4292
4293 static void
4294 dot_body (int dummy ATTRIBUTE_UNUSED)
4295 {
4296 if (!in_procedure ("body"))
4297 return;
4298 if (!unwind.prologue && !unwind.body && unwind.insn)
4299 as_warn (_("Initial .body should precede any instructions"));
4300 check_pending_save ();
4301
4302 unwind.prologue = 0;
4303 unwind.prologue_mask = 0;
4304 unwind.body = 1;
4305
4306 add_unwind_entry (output_body (), 0);
4307 }
4308
4309 static void
4310 dot_prologue (int dummy ATTRIBUTE_UNUSED)
4311 {
4312 unsigned mask = 0, grsave = 0;
4313
4314 if (!in_procedure ("prologue"))
4315 return;
4316 if (unwind.prologue)
4317 {
4318 as_bad (_(".prologue within prologue"));
4319 ignore_rest_of_line ();
4320 return;
4321 }
4322 if (!unwind.body && unwind.insn)
4323 as_warn (_("Initial .prologue should precede any instructions"));
4324
4325 if (!is_it_end_of_statement ())
4326 {
4327 expressionS e;
4328 int n, sep = parse_operand_and_eval (&e, ',');
4329
4330 if (e.X_op != O_constant
4331 || e.X_add_number < 0
4332 || e.X_add_number > 0xf)
4333 as_bad (_("First operand to .prologue must be a positive 4-bit constant"));
4334 else if (e.X_add_number == 0)
4335 as_warn (_("Pointless use of zero first operand to .prologue"));
4336 else
4337 mask = e.X_add_number;
4338
4339 n = popcount (mask);
4340
4341 if (sep == ',')
4342 parse_operand_and_eval (&e, 0);
4343 else
4344 e.X_op = O_absent;
4345
4346 if (e.X_op == O_constant
4347 && e.X_add_number >= 0
4348 && e.X_add_number < 128)
4349 {
4350 if (md.unwind_check == unwind_check_error)
4351 as_warn (_("Using a constant as second operand to .prologue is deprecated"));
4352 grsave = e.X_add_number;
4353 }
4354 else if (e.X_op != O_register
4355 || (grsave = e.X_add_number - REG_GR) > 127)
4356 {
4357 as_bad (_("Second operand to .prologue must be a general register"));
4358 grsave = 0;
4359 }
4360 else if (grsave > 128U - n)
4361 {
4362 as_bad (_("Second operand to .prologue must be the first of %d general registers"), n);
4363 grsave = 0;
4364 }
4365 }
4366
4367 if (mask)
4368 add_unwind_entry (output_prologue_gr (mask, grsave), 0);
4369 else
4370 add_unwind_entry (output_prologue (), 0);
4371
4372 unwind.prologue = 1;
4373 unwind.prologue_mask = mask;
4374 unwind.prologue_gr = grsave;
4375 unwind.body = 0;
4376 ++unwind.prologue_count;
4377 }
4378
4379 static void
4380 dot_endp (int dummy ATTRIBUTE_UNUSED)
4381 {
4382 expressionS e;
4383 int bytes_per_address;
4384 long where;
4385 segT saved_seg;
4386 subsegT saved_subseg;
4387 proc_pending *pending;
4388 int unwind_check = md.unwind_check;
4389
4390 md.unwind_check = unwind_check_error;
4391 if (!in_procedure ("endp"))
4392 return;
4393 md.unwind_check = unwind_check;
4394
4395 if (unwind.saved_text_seg)
4396 {
4397 saved_seg = unwind.saved_text_seg;
4398 saved_subseg = unwind.saved_text_subseg;
4399 unwind.saved_text_seg = NULL;
4400 }
4401 else
4402 {
4403 saved_seg = now_seg;
4404 saved_subseg = now_subseg;
4405 }
4406
4407 insn_group_break (1, 0, 0);
4408
4409 /* If there wasn't a .handlerdata, we haven't generated an image yet. */
4410 if (!unwind.info)
4411 generate_unwind_image (saved_seg);
4412
4413 if (unwind.info || unwind.force_unwind_entry)
4414 {
4415 symbolS *proc_end;
4416
4417 subseg_set (md.last_text_seg, 0);
4418 proc_end = expr_build_dot ();
4419
4420 start_unwind_section (saved_seg, SPECIAL_SECTION_UNWIND);
4421
4422 /* Make sure that section has 4 byte alignment for ILP32 and
4423 8 byte alignment for LP64. */
4424 record_alignment (now_seg, md.pointer_size_shift);
4425
4426 /* Need space for 3 pointers for procedure start, procedure end,
4427 and unwind info. */
4428 memset (frag_more (3 * md.pointer_size), 0, 3 * md.pointer_size);
4429 where = frag_now_fix () - (3 * md.pointer_size);
4430 bytes_per_address = bfd_arch_bits_per_address (stdoutput) / 8;
4431
4432 /* Issue the values of a) Proc Begin, b) Proc End, c) Unwind Record. */
4433 e.X_op = O_pseudo_fixup;
4434 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4435 e.X_add_number = 0;
4436 if (!S_IS_LOCAL (unwind.proc_pending.sym)
4437 && S_IS_DEFINED (unwind.proc_pending.sym))
4438 e.X_add_symbol
4439 = symbol_temp_new (S_GET_SEGMENT (unwind.proc_pending.sym),
4440 symbol_get_frag (unwind.proc_pending.sym),
4441 S_GET_VALUE (unwind.proc_pending.sym));
4442 else
4443 e.X_add_symbol = unwind.proc_pending.sym;
4444 ia64_cons_fix_new (frag_now, where, bytes_per_address, &e,
4445 BFD_RELOC_NONE);
4446
4447 e.X_op = O_pseudo_fixup;
4448 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4449 e.X_add_number = 0;
4450 e.X_add_symbol = proc_end;
4451 ia64_cons_fix_new (frag_now, where + bytes_per_address,
4452 bytes_per_address, &e, BFD_RELOC_NONE);
4453
4454 if (unwind.info)
4455 {
4456 e.X_op = O_pseudo_fixup;
4457 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4458 e.X_add_number = 0;
4459 e.X_add_symbol = unwind.info;
4460 ia64_cons_fix_new (frag_now, where + (bytes_per_address * 2),
4461 bytes_per_address, &e, BFD_RELOC_NONE);
4462 }
4463 }
4464 subseg_set (saved_seg, saved_subseg);
4465
4466 /* Set symbol sizes. */
4467 pending = &unwind.proc_pending;
4468 if (S_GET_NAME (pending->sym))
4469 {
4470 do
4471 {
4472 symbolS *sym = pending->sym;
4473
4474 if (!S_IS_DEFINED (sym))
4475 as_bad (_("`%s' was not defined within procedure"), S_GET_NAME (sym));
4476 else if (S_GET_SIZE (sym) == 0
4477 && symbol_get_obj (sym)->size == NULL)
4478 {
4479 fragS *frag = symbol_get_frag (sym);
4480
4481 if (frag)
4482 {
4483 if (frag == frag_now && SEG_NORMAL (now_seg))
4484 S_SET_SIZE (sym, frag_now_fix () - S_GET_VALUE (sym));
4485 else
4486 {
4487 symbol_get_obj (sym)->size = XNEW (expressionS);
4488 symbol_get_obj (sym)->size->X_op = O_subtract;
4489 symbol_get_obj (sym)->size->X_add_symbol
4490 = symbol_new (FAKE_LABEL_NAME, now_seg,
4491 frag_now, frag_now_fix ());
4492 symbol_get_obj (sym)->size->X_op_symbol = sym;
4493 symbol_get_obj (sym)->size->X_add_number = 0;
4494 }
4495 }
4496 }
4497 } while ((pending = pending->next) != NULL);
4498 }
4499
4500 /* Parse names of main and alternate entry points. */
4501 while (1)
4502 {
4503 char *name, *p, c;
4504
4505 SKIP_WHITESPACE ();
4506 c = get_symbol_name (&name);
4507 p = input_line_pointer;
4508 if (!*name)
4509 (md.unwind_check == unwind_check_warning
4510 ? as_warn
4511 : as_bad) (_("Empty argument of .endp"));
4512 else
4513 {
4514 symbolS *sym = symbol_find (name);
4515
4516 for (pending = &unwind.proc_pending; pending; pending = pending->next)
4517 {
4518 if (sym == pending->sym)
4519 {
4520 pending->sym = NULL;
4521 break;
4522 }
4523 }
4524 if (!sym || !pending)
4525 as_warn (_("`%s' was not specified with previous .proc"), name);
4526 }
4527 *p = c;
4528 SKIP_WHITESPACE_AFTER_NAME ();
4529 if (*input_line_pointer != ',')
4530 break;
4531 ++input_line_pointer;
4532 }
4533 demand_empty_rest_of_line ();
4534
4535 /* Deliberately only checking for the main entry point here; the
4536 language spec even says all arguments to .endp are ignored. */
4537 if (unwind.proc_pending.sym
4538 && S_GET_NAME (unwind.proc_pending.sym)
4539 && strcmp (S_GET_NAME (unwind.proc_pending.sym), FAKE_LABEL_NAME))
4540 as_warn (_("`%s' should be an operand to this .endp"),
4541 S_GET_NAME (unwind.proc_pending.sym));
4542 while (unwind.proc_pending.next)
4543 {
4544 pending = unwind.proc_pending.next;
4545 unwind.proc_pending.next = pending->next;
4546 free (pending);
4547 }
4548 unwind.proc_pending.sym = unwind.info = NULL;
4549 }
4550
4551 static void
4552 dot_template (int template_val)
4553 {
4554 CURR_SLOT.user_template = template_val;
4555 }
4556
4557 static void
4558 dot_regstk (int dummy ATTRIBUTE_UNUSED)
4559 {
4560 int ins, locs, outs, rots;
4561
4562 if (is_it_end_of_statement ())
4563 ins = locs = outs = rots = 0;
4564 else
4565 {
4566 ins = get_absolute_expression ();
4567 if (*input_line_pointer++ != ',')
4568 goto err;
4569 locs = get_absolute_expression ();
4570 if (*input_line_pointer++ != ',')
4571 goto err;
4572 outs = get_absolute_expression ();
4573 if (*input_line_pointer++ != ',')
4574 goto err;
4575 rots = get_absolute_expression ();
4576 }
4577 set_regstack (ins, locs, outs, rots);
4578 return;
4579
4580 err:
4581 as_bad (_("Comma expected"));
4582 ignore_rest_of_line ();
4583 }
4584
4585 static void
4586 dot_rot (int type)
4587 {
4588 offsetT num_regs;
4589 valueT num_alloced = 0;
4590 struct dynreg **drpp, *dr;
4591 int ch, base_reg = 0;
4592 char *name, *start;
4593 size_t len;
4594
4595 switch (type)
4596 {
4597 case DYNREG_GR: base_reg = REG_GR + 32; break;
4598 case DYNREG_FR: base_reg = REG_FR + 32; break;
4599 case DYNREG_PR: base_reg = REG_P + 16; break;
4600 default: break;
4601 }
4602
4603 /* First, remove existing names from hash table. */
4604 for (dr = md.dynreg[type]; dr && dr->num_regs; dr = dr->next)
4605 {
4606 str_hash_delete (md.dynreg_hash, dr->name);
4607 /* FIXME: Free dr->name. */
4608 dr->num_regs = 0;
4609 }
4610
4611 drpp = &md.dynreg[type];
4612 while (1)
4613 {
4614 ch = get_symbol_name (&start);
4615 len = strlen (ia64_canonicalize_symbol_name (start));
4616 *input_line_pointer = ch;
4617
4618 SKIP_WHITESPACE_AFTER_NAME ();
4619 if (*input_line_pointer != '[')
4620 {
4621 as_bad (_("Expected '['"));
4622 goto err;
4623 }
4624 ++input_line_pointer; /* skip '[' */
4625
4626 num_regs = get_absolute_expression ();
4627
4628 if (*input_line_pointer++ != ']')
4629 {
4630 as_bad (_("Expected ']'"));
4631 goto err;
4632 }
4633 if (num_regs <= 0)
4634 {
4635 as_bad (_("Number of elements must be positive"));
4636 goto err;
4637 }
4638 SKIP_WHITESPACE ();
4639
4640 num_alloced += num_regs;
4641 switch (type)
4642 {
4643 case DYNREG_GR:
4644 if (num_alloced > md.rot.num_regs)
4645 {
4646 as_bad (_("Used more than the declared %d rotating registers"),
4647 md.rot.num_regs);
4648 goto err;
4649 }
4650 break;
4651 case DYNREG_FR:
4652 if (num_alloced > 96)
4653 {
4654 as_bad (_("Used more than the available 96 rotating registers"));
4655 goto err;
4656 }
4657 break;
4658 case DYNREG_PR:
4659 if (num_alloced > 48)
4660 {
4661 as_bad (_("Used more than the available 48 rotating registers"));
4662 goto err;
4663 }
4664 break;
4665
4666 default:
4667 break;
4668 }
4669
4670 if (!*drpp)
4671 {
4672 *drpp = XOBNEW (&notes, struct dynreg);
4673 memset (*drpp, 0, sizeof (*dr));
4674 }
4675
4676 name = XOBNEWVEC (&notes, char, len + 1);
4677 memcpy (name, start, len);
4678 name[len] = '\0';
4679
4680 dr = *drpp;
4681 dr->name = name;
4682 dr->num_regs = num_regs;
4683 dr->base = base_reg;
4684 drpp = &dr->next;
4685 base_reg += num_regs;
4686
4687 str_hash_insert (md.dynreg_hash, name, dr);
4688
4689 if (*input_line_pointer != ',')
4690 break;
4691 ++input_line_pointer; /* skip comma */
4692 SKIP_WHITESPACE ();
4693 }
4694 demand_empty_rest_of_line ();
4695 return;
4696
4697 err:
4698 ignore_rest_of_line ();
4699 }
4700
4701 static void
4702 dot_byteorder (int byteorder)
4703 {
4704 segment_info_type *seginfo = seg_info (now_seg);
4705
4706 if (byteorder == -1)
4707 {
4708 if (seginfo->tc_segment_info_data.endian == 0)
4709 seginfo->tc_segment_info_data.endian = default_big_endian ? 1 : 2;
4710 byteorder = seginfo->tc_segment_info_data.endian == 1;
4711 }
4712 else
4713 seginfo->tc_segment_info_data.endian = byteorder ? 1 : 2;
4714
4715 if (target_big_endian != byteorder)
4716 {
4717 target_big_endian = byteorder;
4718 if (target_big_endian)
4719 {
4720 ia64_number_to_chars = number_to_chars_bigendian;
4721 ia64_float_to_chars = ia64_float_to_chars_bigendian;
4722 }
4723 else
4724 {
4725 ia64_number_to_chars = number_to_chars_littleendian;
4726 ia64_float_to_chars = ia64_float_to_chars_littleendian;
4727 }
4728 }
4729 }
4730
4731 static void
4732 dot_psr (int dummy ATTRIBUTE_UNUSED)
4733 {
4734 char *option;
4735 int ch;
4736
4737 while (1)
4738 {
4739 ch = get_symbol_name (&option);
4740 if (strcmp (option, "lsb") == 0)
4741 md.flags &= ~EF_IA_64_BE;
4742 else if (strcmp (option, "msb") == 0)
4743 md.flags |= EF_IA_64_BE;
4744 else if (strcmp (option, "abi32") == 0)
4745 md.flags &= ~EF_IA_64_ABI64;
4746 else if (strcmp (option, "abi64") == 0)
4747 md.flags |= EF_IA_64_ABI64;
4748 else
4749 as_bad (_("Unknown psr option `%s'"), option);
4750 *input_line_pointer = ch;
4751
4752 SKIP_WHITESPACE_AFTER_NAME ();
4753 if (*input_line_pointer != ',')
4754 break;
4755
4756 ++input_line_pointer;
4757 SKIP_WHITESPACE ();
4758 }
4759 demand_empty_rest_of_line ();
4760 }
4761
4762 static void
4763 dot_ln (int dummy ATTRIBUTE_UNUSED)
4764 {
4765 new_logical_line (0, get_absolute_expression ());
4766 demand_empty_rest_of_line ();
4767 }
4768
4769 static void
4770 cross_section (int ref, void (*builder) (int), int ua)
4771 {
4772 char *start, *end;
4773 int saved_auto_align;
4774 unsigned int section_count;
4775 char *name;
4776 char c;
4777
4778 SKIP_WHITESPACE ();
4779 start = input_line_pointer;
4780 c = get_symbol_name (&name);
4781 if (input_line_pointer == start)
4782 {
4783 as_bad (_("Missing section name"));
4784 ignore_rest_of_line ();
4785 return;
4786 }
4787 * input_line_pointer = c;
4788 SKIP_WHITESPACE_AFTER_NAME ();
4789 end = input_line_pointer;
4790 if (*input_line_pointer != ',')
4791 {
4792 as_bad (_("Comma expected after section name"));
4793 ignore_rest_of_line ();
4794 return;
4795 }
4796 *end = '\0';
4797 end = input_line_pointer + 1; /* skip comma */
4798 input_line_pointer = start;
4799 md.keep_pending_output = 1;
4800 section_count = bfd_count_sections (stdoutput);
4801 obj_elf_section (0);
4802 if (section_count != bfd_count_sections (stdoutput))
4803 as_warn (_("Creating sections with .xdataN/.xrealN/.xstringZ is deprecated."));
4804 input_line_pointer = end;
4805 saved_auto_align = md.auto_align;
4806 if (ua)
4807 md.auto_align = 0;
4808 (*builder) (ref);
4809 if (ua)
4810 md.auto_align = saved_auto_align;
4811 obj_elf_previous (0);
4812 md.keep_pending_output = 0;
4813 }
4814
4815 static void
4816 dot_xdata (int size)
4817 {
4818 cross_section (size, cons, 0);
4819 }
4820
4821 /* Why doesn't float_cons() call md_cons_align() the way cons() does? */
4822
4823 static void
4824 stmt_float_cons (int kind)
4825 {
4826 size_t alignment;
4827
4828 switch (kind)
4829 {
4830 case 'd':
4831 alignment = 3;
4832 break;
4833
4834 case 'x':
4835 case 'X':
4836 alignment = 4;
4837 break;
4838
4839 case 'f':
4840 default:
4841 alignment = 2;
4842 break;
4843 }
4844 do_align (alignment, NULL, 0, 0);
4845 float_cons (kind);
4846 }
4847
4848 static void
4849 stmt_cons_ua (int size)
4850 {
4851 int saved_auto_align = md.auto_align;
4852
4853 md.auto_align = 0;
4854 cons (size);
4855 md.auto_align = saved_auto_align;
4856 }
4857
4858 static void
4859 dot_xfloat_cons (int kind)
4860 {
4861 cross_section (kind, stmt_float_cons, 0);
4862 }
4863
4864 static void
4865 dot_xstringer (int zero)
4866 {
4867 cross_section (zero, stringer, 0);
4868 }
4869
4870 static void
4871 dot_xdata_ua (int size)
4872 {
4873 cross_section (size, cons, 1);
4874 }
4875
4876 static void
4877 dot_xfloat_cons_ua (int kind)
4878 {
4879 cross_section (kind, float_cons, 1);
4880 }
4881
4882 /* .reg.val <regname>,value */
4883
4884 static void
4885 dot_reg_val (int dummy ATTRIBUTE_UNUSED)
4886 {
4887 expressionS reg;
4888
4889 expression_and_evaluate (&reg);
4890 if (reg.X_op != O_register)
4891 {
4892 as_bad (_("Register name expected"));
4893 ignore_rest_of_line ();
4894 }
4895 else if (*input_line_pointer++ != ',')
4896 {
4897 as_bad (_("Comma expected"));
4898 ignore_rest_of_line ();
4899 }
4900 else
4901 {
4902 valueT value = get_absolute_expression ();
4903 int regno = reg.X_add_number;
4904 if (regno <= REG_GR || regno > REG_GR + 127)
4905 as_warn (_("Register value annotation ignored"));
4906 else
4907 {
4908 gr_values[regno - REG_GR].known = 1;
4909 gr_values[regno - REG_GR].value = value;
4910 gr_values[regno - REG_GR].path = md.path;
4911 }
4912 }
4913 demand_empty_rest_of_line ();
4914 }
4915
4916 /*
4917 .serialize.data
4918 .serialize.instruction
4919 */
4920 static void
4921 dot_serialize (int type)
4922 {
4923 insn_group_break (0, 0, 0);
4924 if (type)
4925 instruction_serialization ();
4926 else
4927 data_serialization ();
4928 insn_group_break (0, 0, 0);
4929 demand_empty_rest_of_line ();
4930 }
4931
4932 /* select dv checking mode
4933 .auto
4934 .explicit
4935 .default
4936
4937 A stop is inserted when changing modes
4938 */
4939
4940 static void
4941 dot_dv_mode (int type)
4942 {
4943 if (md.manual_bundling)
4944 as_warn (_("Directive invalid within a bundle"));
4945
4946 if (type == 'E' || type == 'A')
4947 md.mode_explicitly_set = 0;
4948 else
4949 md.mode_explicitly_set = 1;
4950
4951 md.detect_dv = 1;
4952 switch (type)
4953 {
4954 case 'A':
4955 case 'a':
4956 if (md.explicit_mode)
4957 insn_group_break (1, 0, 0);
4958 md.explicit_mode = 0;
4959 break;
4960 case 'E':
4961 case 'e':
4962 if (!md.explicit_mode)
4963 insn_group_break (1, 0, 0);
4964 md.explicit_mode = 1;
4965 break;
4966 default:
4967 case 'd':
4968 if (md.explicit_mode != md.default_explicit_mode)
4969 insn_group_break (1, 0, 0);
4970 md.explicit_mode = md.default_explicit_mode;
4971 md.mode_explicitly_set = 0;
4972 break;
4973 }
4974 }
4975
4976 static void
4977 print_prmask (valueT mask)
4978 {
4979 int regno;
4980 const char *comma = "";
4981 for (regno = 0; regno < 64; regno++)
4982 {
4983 if (mask & ((valueT) 1 << regno))
4984 {
4985 fprintf (stderr, "%s p%d", comma, regno);
4986 comma = ",";
4987 }
4988 }
4989 }
4990
4991 /*
4992 .pred.rel.clear [p1 [,p2 [,...]]] (also .pred.rel "clear" or @clear)
4993 .pred.rel.imply p1, p2 (also .pred.rel "imply" or @imply)
4994 .pred.rel.mutex p1, p2 [,...] (also .pred.rel "mutex" or @mutex)
4995 .pred.safe_across_calls p1 [, p2 [,...]]
4996 */
4997
4998 static void
4999 dot_pred_rel (int type)
5000 {
5001 valueT mask = 0;
5002 int count = 0;
5003 int p1 = -1, p2 = -1;
5004
5005 if (type == 0)
5006 {
5007 if (*input_line_pointer == '"')
5008 {
5009 int len;
5010 char *form = demand_copy_C_string (&len);
5011
5012 if (strcmp (form, "mutex") == 0)
5013 type = 'm';
5014 else if (strcmp (form, "clear") == 0)
5015 type = 'c';
5016 else if (strcmp (form, "imply") == 0)
5017 type = 'i';
5018 obstack_free (&notes, form);
5019 }
5020 else if (*input_line_pointer == '@')
5021 {
5022 char *form;
5023 char c;
5024
5025 ++input_line_pointer;
5026 c = get_symbol_name (&form);
5027
5028 if (strcmp (form, "mutex") == 0)
5029 type = 'm';
5030 else if (strcmp (form, "clear") == 0)
5031 type = 'c';
5032 else if (strcmp (form, "imply") == 0)
5033 type = 'i';
5034 (void) restore_line_pointer (c);
5035 }
5036 else
5037 {
5038 as_bad (_("Missing predicate relation type"));
5039 ignore_rest_of_line ();
5040 return;
5041 }
5042 if (type == 0)
5043 {
5044 as_bad (_("Unrecognized predicate relation type"));
5045 ignore_rest_of_line ();
5046 return;
5047 }
5048 if (*input_line_pointer == ',')
5049 ++input_line_pointer;
5050 SKIP_WHITESPACE ();
5051 }
5052
5053 while (1)
5054 {
5055 valueT bits = 1;
5056 int sep, regno;
5057 expressionS pr, *pr1, *pr2;
5058
5059 sep = parse_operand_and_eval (&pr, ',');
5060 if (pr.X_op == O_register
5061 && pr.X_add_number >= REG_P
5062 && pr.X_add_number <= REG_P + 63)
5063 {
5064 regno = pr.X_add_number - REG_P;
5065 bits <<= regno;
5066 count++;
5067 if (p1 == -1)
5068 p1 = regno;
5069 else if (p2 == -1)
5070 p2 = regno;
5071 }
5072 else if (type != 'i'
5073 && pr.X_op == O_subtract
5074 && (pr1 = symbol_get_value_expression (pr.X_add_symbol))
5075 && pr1->X_op == O_register
5076 && pr1->X_add_number >= REG_P
5077 && pr1->X_add_number <= REG_P + 63
5078 && (pr2 = symbol_get_value_expression (pr.X_op_symbol))
5079 && pr2->X_op == O_register
5080 && pr2->X_add_number >= REG_P
5081 && pr2->X_add_number <= REG_P + 63)
5082 {
5083 /* It's a range. */
5084 int stop;
5085
5086 regno = pr1->X_add_number - REG_P;
5087 stop = pr2->X_add_number - REG_P;
5088 if (regno >= stop)
5089 {
5090 as_bad (_("Bad register range"));
5091 ignore_rest_of_line ();
5092 return;
5093 }
5094 bits = ((bits << stop) << 1) - (bits << regno);
5095 count += stop - regno + 1;
5096 }
5097 else
5098 {
5099 as_bad (_("Predicate register expected"));
5100 ignore_rest_of_line ();
5101 return;
5102 }
5103 if (mask & bits)
5104 as_warn (_("Duplicate predicate register ignored"));
5105 mask |= bits;
5106 if (sep != ',')
5107 break;
5108 }
5109
5110 switch (type)
5111 {
5112 case 'c':
5113 if (count == 0)
5114 mask = ~(valueT) 0;
5115 clear_qp_mutex (mask);
5116 clear_qp_implies (mask, (valueT) 0);
5117 break;
5118 case 'i':
5119 if (count != 2 || p1 == -1 || p2 == -1)
5120 as_bad (_("Predicate source and target required"));
5121 else if (p1 == 0 || p2 == 0)
5122 as_bad (_("Use of p0 is not valid in this context"));
5123 else
5124 add_qp_imply (p1, p2);
5125 break;
5126 case 'm':
5127 if (count < 2)
5128 {
5129 as_bad (_("At least two PR arguments expected"));
5130 break;
5131 }
5132 else if (mask & 1)
5133 {
5134 as_bad (_("Use of p0 is not valid in this context"));
5135 break;
5136 }
5137 add_qp_mutex (mask);
5138 break;
5139 case 's':
5140 /* note that we don't override any existing relations */
5141 if (count == 0)
5142 {
5143 as_bad (_("At least one PR argument expected"));
5144 break;
5145 }
5146 if (md.debug_dv)
5147 {
5148 fprintf (stderr, "Safe across calls: ");
5149 print_prmask (mask);
5150 fprintf (stderr, "\n");
5151 }
5152 qp_safe_across_calls = mask;
5153 break;
5154 }
5155 demand_empty_rest_of_line ();
5156 }
5157
5158 /* .entry label [, label [, ...]]
5159 Hint to DV code that the given labels are to be considered entry points.
5160 Otherwise, only global labels are considered entry points. */
5161
5162 static void
5163 dot_entry (int dummy ATTRIBUTE_UNUSED)
5164 {
5165 char *name;
5166 int c;
5167 symbolS *symbolP;
5168
5169 do
5170 {
5171 c = get_symbol_name (&name);
5172 symbolP = symbol_find_or_make (name);
5173
5174 str_hash_insert (md.entry_hash, S_GET_NAME (symbolP), (void *) symbolP);
5175
5176 *input_line_pointer = c;
5177 SKIP_WHITESPACE_AFTER_NAME ();
5178 c = *input_line_pointer;
5179 if (c == ',')
5180 {
5181 input_line_pointer++;
5182 SKIP_WHITESPACE ();
5183 if (*input_line_pointer == '\n')
5184 c = '\n';
5185 }
5186 }
5187 while (c == ',');
5188
5189 demand_empty_rest_of_line ();
5190 }
5191
5192 /* .mem.offset offset, base
5193 "base" is used to distinguish between offsets from a different base. */
5194
5195 static void
5196 dot_mem_offset (int dummy ATTRIBUTE_UNUSED)
5197 {
5198 md.mem_offset.hint = 1;
5199 md.mem_offset.offset = get_absolute_expression ();
5200 if (*input_line_pointer != ',')
5201 {
5202 as_bad (_("Comma expected"));
5203 ignore_rest_of_line ();
5204 return;
5205 }
5206 ++input_line_pointer;
5207 md.mem_offset.base = get_absolute_expression ();
5208 demand_empty_rest_of_line ();
5209 }
5210
5211 /* ia64-specific pseudo-ops: */
5212 const pseudo_typeS md_pseudo_table[] =
5213 {
5214 { "radix", dot_radix, 0 },
5215 { "lcomm", s_lcomm_bytes, 1 },
5216 { "loc", dot_loc, 0 },
5217 { "bss", dot_special_section, SPECIAL_SECTION_BSS },
5218 { "sbss", dot_special_section, SPECIAL_SECTION_SBSS },
5219 { "sdata", dot_special_section, SPECIAL_SECTION_SDATA },
5220 { "rodata", dot_special_section, SPECIAL_SECTION_RODATA },
5221 { "comment", dot_special_section, SPECIAL_SECTION_COMMENT },
5222 { "ia_64.unwind", dot_special_section, SPECIAL_SECTION_UNWIND },
5223 { "ia_64.unwind_info", dot_special_section, SPECIAL_SECTION_UNWIND_INFO },
5224 { "init_array", dot_special_section, SPECIAL_SECTION_INIT_ARRAY },
5225 { "fini_array", dot_special_section, SPECIAL_SECTION_FINI_ARRAY },
5226 { "proc", dot_proc, 0 },
5227 { "body", dot_body, 0 },
5228 { "prologue", dot_prologue, 0 },
5229 { "endp", dot_endp, 0 },
5230
5231 { "fframe", dot_fframe, 0 },
5232 { "vframe", dot_vframe, 0 },
5233 { "vframesp", dot_vframesp, 0 },
5234 { "vframepsp", dot_vframesp, 1 },
5235 { "save", dot_save, 0 },
5236 { "restore", dot_restore, 0 },
5237 { "restorereg", dot_restorereg, 0 },
5238 { "restorereg.p", dot_restorereg, 1 },
5239 { "handlerdata", dot_handlerdata, 0 },
5240 { "unwentry", dot_unwentry, 0 },
5241 { "altrp", dot_altrp, 0 },
5242 { "savesp", dot_savemem, 0 },
5243 { "savepsp", dot_savemem, 1 },
5244 { "save.g", dot_saveg, 0 },
5245 { "save.f", dot_savef, 0 },
5246 { "save.b", dot_saveb, 0 },
5247 { "save.gf", dot_savegf, 0 },
5248 { "spill", dot_spill, 0 },
5249 { "spillreg", dot_spillreg, 0 },
5250 { "spillsp", dot_spillmem, 0 },
5251 { "spillpsp", dot_spillmem, 1 },
5252 { "spillreg.p", dot_spillreg, 1 },
5253 { "spillsp.p", dot_spillmem, ~0 },
5254 { "spillpsp.p", dot_spillmem, ~1 },
5255 { "label_state", dot_label_state, 0 },
5256 { "copy_state", dot_copy_state, 0 },
5257 { "unwabi", dot_unwabi, 0 },
5258 { "personality", dot_personality, 0 },
5259 { "mii", dot_template, 0x0 },
5260 { "mli", dot_template, 0x2 }, /* old format, for compatibility */
5261 { "mlx", dot_template, 0x2 },
5262 { "mmi", dot_template, 0x4 },
5263 { "mfi", dot_template, 0x6 },
5264 { "mmf", dot_template, 0x7 },
5265 { "mib", dot_template, 0x8 },
5266 { "mbb", dot_template, 0x9 },
5267 { "bbb", dot_template, 0xb },
5268 { "mmb", dot_template, 0xc },
5269 { "mfb", dot_template, 0xe },
5270 { "align", dot_align, 0 },
5271 { "regstk", dot_regstk, 0 },
5272 { "rotr", dot_rot, DYNREG_GR },
5273 { "rotf", dot_rot, DYNREG_FR },
5274 { "rotp", dot_rot, DYNREG_PR },
5275 { "lsb", dot_byteorder, 0 },
5276 { "msb", dot_byteorder, 1 },
5277 { "psr", dot_psr, 0 },
5278 { "alias", dot_alias, 0 },
5279 { "secalias", dot_alias, 1 },
5280 { "ln", dot_ln, 0 }, /* source line info (for debugging) */
5281
5282 { "xdata1", dot_xdata, 1 },
5283 { "xdata2", dot_xdata, 2 },
5284 { "xdata4", dot_xdata, 4 },
5285 { "xdata8", dot_xdata, 8 },
5286 { "xdata16", dot_xdata, 16 },
5287 { "xreal4", dot_xfloat_cons, 'f' },
5288 { "xreal8", dot_xfloat_cons, 'd' },
5289 { "xreal10", dot_xfloat_cons, 'x' },
5290 { "xreal16", dot_xfloat_cons, 'X' },
5291 { "xstring", dot_xstringer, 8 + 0 },
5292 { "xstringz", dot_xstringer, 8 + 1 },
5293
5294 /* unaligned versions: */
5295 { "xdata2.ua", dot_xdata_ua, 2 },
5296 { "xdata4.ua", dot_xdata_ua, 4 },
5297 { "xdata8.ua", dot_xdata_ua, 8 },
5298 { "xdata16.ua", dot_xdata_ua, 16 },
5299 { "xreal4.ua", dot_xfloat_cons_ua, 'f' },
5300 { "xreal8.ua", dot_xfloat_cons_ua, 'd' },
5301 { "xreal10.ua", dot_xfloat_cons_ua, 'x' },
5302 { "xreal16.ua", dot_xfloat_cons_ua, 'X' },
5303
5304 /* annotations/DV checking support */
5305 { "entry", dot_entry, 0 },
5306 { "mem.offset", dot_mem_offset, 0 },
5307 { "pred.rel", dot_pred_rel, 0 },
5308 { "pred.rel.clear", dot_pred_rel, 'c' },
5309 { "pred.rel.imply", dot_pred_rel, 'i' },
5310 { "pred.rel.mutex", dot_pred_rel, 'm' },
5311 { "pred.safe_across_calls", dot_pred_rel, 's' },
5312 { "reg.val", dot_reg_val, 0 },
5313 { "serialize.data", dot_serialize, 0 },
5314 { "serialize.instruction", dot_serialize, 1 },
5315 { "auto", dot_dv_mode, 'a' },
5316 { "explicit", dot_dv_mode, 'e' },
5317 { "default", dot_dv_mode, 'd' },
5318
5319 /* ??? These are needed to make gas/testsuite/gas/elf/ehopt.s work.
5320 IA-64 aligns data allocation pseudo-ops by default, so we have to
5321 tell it that these ones are supposed to be unaligned. Long term,
5322 should rewrite so that only IA-64 specific data allocation pseudo-ops
5323 are aligned by default. */
5324 {"2byte", stmt_cons_ua, 2},
5325 {"4byte", stmt_cons_ua, 4},
5326 {"8byte", stmt_cons_ua, 8},
5327
5328 #ifdef TE_VMS
5329 {"vms_common", obj_elf_vms_common, 0},
5330 #endif
5331
5332 { NULL, 0, 0 }
5333 };
5334
5335 static const struct pseudo_opcode
5336 {
5337 const char *name;
5338 void (*handler) (int);
5339 int arg;
5340 }
5341 pseudo_opcode[] =
5342 {
5343 /* these are more like pseudo-ops, but don't start with a dot */
5344 { "data1", cons, 1 },
5345 { "data2", cons, 2 },
5346 { "data4", cons, 4 },
5347 { "data8", cons, 8 },
5348 { "data16", cons, 16 },
5349 { "real4", stmt_float_cons, 'f' },
5350 { "real8", stmt_float_cons, 'd' },
5351 { "real10", stmt_float_cons, 'x' },
5352 { "real16", stmt_float_cons, 'X' },
5353 { "string", stringer, 8 + 0 },
5354 { "stringz", stringer, 8 + 1 },
5355
5356 /* unaligned versions: */
5357 { "data2.ua", stmt_cons_ua, 2 },
5358 { "data4.ua", stmt_cons_ua, 4 },
5359 { "data8.ua", stmt_cons_ua, 8 },
5360 { "data16.ua", stmt_cons_ua, 16 },
5361 { "real4.ua", float_cons, 'f' },
5362 { "real8.ua", float_cons, 'd' },
5363 { "real10.ua", float_cons, 'x' },
5364 { "real16.ua", float_cons, 'X' },
5365 };
5366
5367 /* Declare a register by creating a symbol for it and entering it in
5368 the symbol table. */
5369
5370 static symbolS *
5371 declare_register (const char *name, unsigned int regnum)
5372 {
5373 symbolS *sym;
5374
5375 sym = symbol_create (name, reg_section, &zero_address_frag, regnum);
5376
5377 str_hash_insert (md.reg_hash, S_GET_NAME (sym), (void *) sym);
5378
5379 return sym;
5380 }
5381
5382 static void
5383 declare_register_set (const char *prefix,
5384 unsigned int num_regs,
5385 unsigned int base_regnum)
5386 {
5387 char name[8];
5388 unsigned int i;
5389
5390 for (i = 0; i < num_regs; ++i)
5391 {
5392 snprintf (name, sizeof (name), "%s%u", prefix, i);
5393 declare_register (name, base_regnum + i);
5394 }
5395 }
5396
5397 static unsigned int
5398 operand_width (enum ia64_opnd opnd)
5399 {
5400 const struct ia64_operand *odesc = &elf64_ia64_operands[opnd];
5401 unsigned int bits = 0;
5402 int i;
5403
5404 bits = 0;
5405 for (i = 0; i < NELEMS (odesc->field) && odesc->field[i].bits; ++i)
5406 bits += odesc->field[i].bits;
5407
5408 return bits;
5409 }
5410
5411 static enum operand_match_result
5412 operand_match (const struct ia64_opcode *idesc, int res_index, expressionS *e)
5413 {
5414 enum ia64_opnd opnd = idesc->operands[res_index];
5415 int bits, relocatable = 0;
5416 struct insn_fix *fix;
5417 bfd_signed_vma val;
5418
5419 switch (opnd)
5420 {
5421 /* constants: */
5422
5423 case IA64_OPND_AR_CCV:
5424 if (e->X_op == O_register && e->X_add_number == REG_AR + 32)
5425 return OPERAND_MATCH;
5426 break;
5427
5428 case IA64_OPND_AR_CSD:
5429 if (e->X_op == O_register && e->X_add_number == REG_AR + 25)
5430 return OPERAND_MATCH;
5431 break;
5432
5433 case IA64_OPND_AR_PFS:
5434 if (e->X_op == O_register && e->X_add_number == REG_AR + 64)
5435 return OPERAND_MATCH;
5436 break;
5437
5438 case IA64_OPND_GR0:
5439 if (e->X_op == O_register && e->X_add_number == REG_GR + 0)
5440 return OPERAND_MATCH;
5441 break;
5442
5443 case IA64_OPND_IP:
5444 if (e->X_op == O_register && e->X_add_number == REG_IP)
5445 return OPERAND_MATCH;
5446 break;
5447
5448 case IA64_OPND_PR:
5449 if (e->X_op == O_register && e->X_add_number == REG_PR)
5450 return OPERAND_MATCH;
5451 break;
5452
5453 case IA64_OPND_PR_ROT:
5454 if (e->X_op == O_register && e->X_add_number == REG_PR_ROT)
5455 return OPERAND_MATCH;
5456 break;
5457
5458 case IA64_OPND_PSR:
5459 if (e->X_op == O_register && e->X_add_number == REG_PSR)
5460 return OPERAND_MATCH;
5461 break;
5462
5463 case IA64_OPND_PSR_L:
5464 if (e->X_op == O_register && e->X_add_number == REG_PSR_L)
5465 return OPERAND_MATCH;
5466 break;
5467
5468 case IA64_OPND_PSR_UM:
5469 if (e->X_op == O_register && e->X_add_number == REG_PSR_UM)
5470 return OPERAND_MATCH;
5471 break;
5472
5473 case IA64_OPND_C1:
5474 if (e->X_op == O_constant)
5475 {
5476 if (e->X_add_number == 1)
5477 return OPERAND_MATCH;
5478 else
5479 return OPERAND_OUT_OF_RANGE;
5480 }
5481 break;
5482
5483 case IA64_OPND_C8:
5484 if (e->X_op == O_constant)
5485 {
5486 if (e->X_add_number == 8)
5487 return OPERAND_MATCH;
5488 else
5489 return OPERAND_OUT_OF_RANGE;
5490 }
5491 break;
5492
5493 case IA64_OPND_C16:
5494 if (e->X_op == O_constant)
5495 {
5496 if (e->X_add_number == 16)
5497 return OPERAND_MATCH;
5498 else
5499 return OPERAND_OUT_OF_RANGE;
5500 }
5501 break;
5502
5503 /* register operands: */
5504
5505 case IA64_OPND_AR3:
5506 if (e->X_op == O_register && e->X_add_number >= REG_AR
5507 && e->X_add_number < REG_AR + 128)
5508 return OPERAND_MATCH;
5509 break;
5510
5511 case IA64_OPND_B1:
5512 case IA64_OPND_B2:
5513 if (e->X_op == O_register && e->X_add_number >= REG_BR
5514 && e->X_add_number < REG_BR + 8)
5515 return OPERAND_MATCH;
5516 break;
5517
5518 case IA64_OPND_CR3:
5519 if (e->X_op == O_register && e->X_add_number >= REG_CR
5520 && e->X_add_number < REG_CR + 128)
5521 return OPERAND_MATCH;
5522 break;
5523
5524 case IA64_OPND_DAHR3:
5525 if (e->X_op == O_register && e->X_add_number >= REG_DAHR
5526 && e->X_add_number < REG_DAHR + 8)
5527 return OPERAND_MATCH;
5528 break;
5529
5530 case IA64_OPND_F1:
5531 case IA64_OPND_F2:
5532 case IA64_OPND_F3:
5533 case IA64_OPND_F4:
5534 if (e->X_op == O_register && e->X_add_number >= REG_FR
5535 && e->X_add_number < REG_FR + 128)
5536 return OPERAND_MATCH;
5537 break;
5538
5539 case IA64_OPND_P1:
5540 case IA64_OPND_P2:
5541 if (e->X_op == O_register && e->X_add_number >= REG_P
5542 && e->X_add_number < REG_P + 64)
5543 return OPERAND_MATCH;
5544 break;
5545
5546 case IA64_OPND_R1:
5547 case IA64_OPND_R2:
5548 case IA64_OPND_R3:
5549 if (e->X_op == O_register && e->X_add_number >= REG_GR
5550 && e->X_add_number < REG_GR + 128)
5551 return OPERAND_MATCH;
5552 break;
5553
5554 case IA64_OPND_R3_2:
5555 if (e->X_op == O_register && e->X_add_number >= REG_GR)
5556 {
5557 if (e->X_add_number < REG_GR + 4)
5558 return OPERAND_MATCH;
5559 else if (e->X_add_number < REG_GR + 128)
5560 return OPERAND_OUT_OF_RANGE;
5561 }
5562 break;
5563
5564 /* indirect operands: */
5565 case IA64_OPND_CPUID_R3:
5566 case IA64_OPND_DBR_R3:
5567 case IA64_OPND_DTR_R3:
5568 case IA64_OPND_ITR_R3:
5569 case IA64_OPND_IBR_R3:
5570 case IA64_OPND_MSR_R3:
5571 case IA64_OPND_PKR_R3:
5572 case IA64_OPND_PMC_R3:
5573 case IA64_OPND_PMD_R3:
5574 case IA64_OPND_DAHR_R3:
5575 case IA64_OPND_RR_R3:
5576 if (e->X_op == O_index && e->X_op_symbol
5577 && (S_GET_VALUE (e->X_op_symbol) - IND_CPUID
5578 == opnd - IA64_OPND_CPUID_R3))
5579 return OPERAND_MATCH;
5580 break;
5581
5582 case IA64_OPND_MR3:
5583 if (e->X_op == O_index && !e->X_op_symbol)
5584 return OPERAND_MATCH;
5585 break;
5586
5587 /* immediate operands: */
5588 case IA64_OPND_CNT2a:
5589 case IA64_OPND_LEN4:
5590 case IA64_OPND_LEN6:
5591 bits = operand_width (idesc->operands[res_index]);
5592 if (e->X_op == O_constant)
5593 {
5594 if ((bfd_vma) (e->X_add_number - 1) < ((bfd_vma) 1 << bits))
5595 return OPERAND_MATCH;
5596 else
5597 return OPERAND_OUT_OF_RANGE;
5598 }
5599 break;
5600
5601 case IA64_OPND_CNT2b:
5602 if (e->X_op == O_constant)
5603 {
5604 if ((bfd_vma) (e->X_add_number - 1) < 3)
5605 return OPERAND_MATCH;
5606 else
5607 return OPERAND_OUT_OF_RANGE;
5608 }
5609 break;
5610
5611 case IA64_OPND_CNT2c:
5612 val = e->X_add_number;
5613 if (e->X_op == O_constant)
5614 {
5615 if ((val == 0 || val == 7 || val == 15 || val == 16))
5616 return OPERAND_MATCH;
5617 else
5618 return OPERAND_OUT_OF_RANGE;
5619 }
5620 break;
5621
5622 case IA64_OPND_SOR:
5623 /* SOR must be an integer multiple of 8 */
5624 if (e->X_op == O_constant && e->X_add_number & 0x7)
5625 return OPERAND_OUT_OF_RANGE;
5626 /* Fall through. */
5627 case IA64_OPND_SOF:
5628 case IA64_OPND_SOL:
5629 if (e->X_op == O_constant)
5630 {
5631 if ((bfd_vma) e->X_add_number <= 96)
5632 return OPERAND_MATCH;
5633 else
5634 return OPERAND_OUT_OF_RANGE;
5635 }
5636 break;
5637
5638 case IA64_OPND_IMMU62:
5639 if (e->X_op == O_constant)
5640 {
5641 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << 62))
5642 return OPERAND_MATCH;
5643 else
5644 return OPERAND_OUT_OF_RANGE;
5645 }
5646 else
5647 {
5648 /* FIXME -- need 62-bit relocation type */
5649 as_bad (_("62-bit relocation not yet implemented"));
5650 }
5651 break;
5652
5653 case IA64_OPND_IMMU64:
5654 if (e->X_op == O_symbol || e->X_op == O_pseudo_fixup
5655 || e->X_op == O_subtract)
5656 {
5657 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5658 fix->code = BFD_RELOC_IA64_IMM64;
5659 if (e->X_op != O_subtract)
5660 {
5661 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5662 if (e->X_op == O_pseudo_fixup)
5663 e->X_op = O_symbol;
5664 }
5665
5666 fix->opnd = idesc->operands[res_index];
5667 fix->expr = *e;
5668 fix->is_pcrel = 0;
5669 ++CURR_SLOT.num_fixups;
5670 return OPERAND_MATCH;
5671 }
5672 else if (e->X_op == O_constant)
5673 return OPERAND_MATCH;
5674 break;
5675
5676 case IA64_OPND_IMMU5b:
5677 if (e->X_op == O_constant)
5678 {
5679 val = e->X_add_number;
5680 if (val >= 32 && val <= 63)
5681 return OPERAND_MATCH;
5682 else
5683 return OPERAND_OUT_OF_RANGE;
5684 }
5685 break;
5686
5687 case IA64_OPND_CCNT5:
5688 case IA64_OPND_CNT5:
5689 case IA64_OPND_CNT6:
5690 case IA64_OPND_CPOS6a:
5691 case IA64_OPND_CPOS6b:
5692 case IA64_OPND_CPOS6c:
5693 case IA64_OPND_IMMU2:
5694 case IA64_OPND_IMMU7a:
5695 case IA64_OPND_IMMU7b:
5696 case IA64_OPND_IMMU16:
5697 case IA64_OPND_IMMU19:
5698 case IA64_OPND_IMMU21:
5699 case IA64_OPND_IMMU24:
5700 case IA64_OPND_MBTYPE4:
5701 case IA64_OPND_MHTYPE8:
5702 case IA64_OPND_POS6:
5703 bits = operand_width (idesc->operands[res_index]);
5704 if (e->X_op == O_constant)
5705 {
5706 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5707 return OPERAND_MATCH;
5708 else
5709 return OPERAND_OUT_OF_RANGE;
5710 }
5711 break;
5712
5713 case IA64_OPND_IMMU9:
5714 bits = operand_width (idesc->operands[res_index]);
5715 if (e->X_op == O_constant)
5716 {
5717 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5718 {
5719 int lobits = e->X_add_number & 0x3;
5720 if (((bfd_vma) e->X_add_number & 0x3C) != 0 && lobits == 0)
5721 e->X_add_number |= (bfd_vma) 0x3;
5722 return OPERAND_MATCH;
5723 }
5724 else
5725 return OPERAND_OUT_OF_RANGE;
5726 }
5727 break;
5728
5729 case IA64_OPND_IMM44:
5730 /* least 16 bits must be zero */
5731 if ((e->X_add_number & 0xffff) != 0)
5732 /* XXX technically, this is wrong: we should not be issuing warning
5733 messages until we're sure this instruction pattern is going to
5734 be used! */
5735 as_warn (_("lower 16 bits of mask ignored"));
5736
5737 if (e->X_op == O_constant)
5738 {
5739 if (((e->X_add_number >= 0
5740 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 44))
5741 || (e->X_add_number < 0
5742 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 44))))
5743 {
5744 /* sign-extend */
5745 if (e->X_add_number >= 0
5746 && (e->X_add_number & ((bfd_vma) 1 << 43)) != 0)
5747 {
5748 e->X_add_number |= ~(((bfd_vma) 1 << 44) - 1);
5749 }
5750 return OPERAND_MATCH;
5751 }
5752 else
5753 return OPERAND_OUT_OF_RANGE;
5754 }
5755 break;
5756
5757 case IA64_OPND_IMM17:
5758 /* bit 0 is a don't care (pr0 is hardwired to 1) */
5759 if (e->X_op == O_constant)
5760 {
5761 if (((e->X_add_number >= 0
5762 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 17))
5763 || (e->X_add_number < 0
5764 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 17))))
5765 {
5766 /* sign-extend */
5767 if (e->X_add_number >= 0
5768 && (e->X_add_number & ((bfd_vma) 1 << 16)) != 0)
5769 {
5770 e->X_add_number |= ~(((bfd_vma) 1 << 17) - 1);
5771 }
5772 return OPERAND_MATCH;
5773 }
5774 else
5775 return OPERAND_OUT_OF_RANGE;
5776 }
5777 break;
5778
5779 case IA64_OPND_IMM14:
5780 case IA64_OPND_IMM22:
5781 relocatable = 1;
5782 /* Fall through. */
5783 case IA64_OPND_IMM1:
5784 case IA64_OPND_IMM8:
5785 case IA64_OPND_IMM8U4:
5786 case IA64_OPND_IMM8M1:
5787 case IA64_OPND_IMM8M1U4:
5788 case IA64_OPND_IMM8M1U8:
5789 case IA64_OPND_IMM9a:
5790 case IA64_OPND_IMM9b:
5791 bits = operand_width (idesc->operands[res_index]);
5792 if (relocatable && (e->X_op == O_symbol
5793 || e->X_op == O_subtract
5794 || e->X_op == O_pseudo_fixup))
5795 {
5796 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5797
5798 if (idesc->operands[res_index] == IA64_OPND_IMM14)
5799 fix->code = BFD_RELOC_IA64_IMM14;
5800 else
5801 fix->code = BFD_RELOC_IA64_IMM22;
5802
5803 if (e->X_op != O_subtract)
5804 {
5805 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5806 if (e->X_op == O_pseudo_fixup)
5807 e->X_op = O_symbol;
5808 }
5809
5810 fix->opnd = idesc->operands[res_index];
5811 fix->expr = *e;
5812 fix->is_pcrel = 0;
5813 ++CURR_SLOT.num_fixups;
5814 return OPERAND_MATCH;
5815 }
5816 else if (e->X_op != O_constant
5817 && ! (e->X_op == O_big && opnd == IA64_OPND_IMM8M1U8))
5818 return OPERAND_MISMATCH;
5819
5820 if (opnd == IA64_OPND_IMM8M1U4)
5821 {
5822 /* Zero is not valid for unsigned compares that take an adjusted
5823 constant immediate range. */
5824 if (e->X_add_number == 0)
5825 return OPERAND_OUT_OF_RANGE;
5826
5827 /* Sign-extend 32-bit unsigned numbers, so that the following range
5828 checks will work. */
5829 val = e->X_add_number;
5830 if ((val & (~(bfd_vma) 0 << 32)) == 0)
5831 val = (val ^ ((bfd_vma) 1 << 31)) - ((bfd_vma) 1 << 31);
5832
5833 /* Check for 0x100000000. This is valid because
5834 0x100000000-1 is the same as ((uint32_t) -1). */
5835 if (val == ((bfd_signed_vma) 1 << 32))
5836 return OPERAND_MATCH;
5837
5838 val = val - 1;
5839 }
5840 else if (opnd == IA64_OPND_IMM8M1U8)
5841 {
5842 /* Zero is not valid for unsigned compares that take an adjusted
5843 constant immediate range. */
5844 if (e->X_add_number == 0)
5845 return OPERAND_OUT_OF_RANGE;
5846
5847 /* Check for 0x10000000000000000. */
5848 if (e->X_op == O_big)
5849 {
5850 if (generic_bignum[0] == 0
5851 && generic_bignum[1] == 0
5852 && generic_bignum[2] == 0
5853 && generic_bignum[3] == 0
5854 && generic_bignum[4] == 1)
5855 return OPERAND_MATCH;
5856 else
5857 return OPERAND_OUT_OF_RANGE;
5858 }
5859 else
5860 val = e->X_add_number - 1;
5861 }
5862 else if (opnd == IA64_OPND_IMM8M1)
5863 val = e->X_add_number - 1;
5864 else if (opnd == IA64_OPND_IMM8U4)
5865 {
5866 /* Sign-extend 32-bit unsigned numbers, so that the following range
5867 checks will work. */
5868 val = e->X_add_number;
5869 if ((val & (~(bfd_vma) 0 << 32)) == 0)
5870 val = (val ^ ((bfd_vma) 1 << 31)) - ((bfd_vma) 1 << 31);
5871 }
5872 else
5873 val = e->X_add_number;
5874
5875 if ((val >= 0 && (bfd_vma) val < ((bfd_vma) 1 << (bits - 1)))
5876 || (val < 0 && (bfd_vma) -val <= ((bfd_vma) 1 << (bits - 1))))
5877 return OPERAND_MATCH;
5878 else
5879 return OPERAND_OUT_OF_RANGE;
5880
5881 case IA64_OPND_INC3:
5882 /* +/- 1, 4, 8, 16 */
5883 val = e->X_add_number;
5884 if (val < 0)
5885 val = -val;
5886 if (e->X_op == O_constant)
5887 {
5888 if ((val == 1 || val == 4 || val == 8 || val == 16))
5889 return OPERAND_MATCH;
5890 else
5891 return OPERAND_OUT_OF_RANGE;
5892 }
5893 break;
5894
5895 case IA64_OPND_TGT25:
5896 case IA64_OPND_TGT25b:
5897 case IA64_OPND_TGT25c:
5898 case IA64_OPND_TGT64:
5899 if (e->X_op == O_symbol)
5900 {
5901 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5902 if (opnd == IA64_OPND_TGT25)
5903 fix->code = BFD_RELOC_IA64_PCREL21F;
5904 else if (opnd == IA64_OPND_TGT25b)
5905 fix->code = BFD_RELOC_IA64_PCREL21M;
5906 else if (opnd == IA64_OPND_TGT25c)
5907 fix->code = BFD_RELOC_IA64_PCREL21B;
5908 else if (opnd == IA64_OPND_TGT64)
5909 fix->code = BFD_RELOC_IA64_PCREL60B;
5910 else
5911 abort ();
5912
5913 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5914 fix->opnd = idesc->operands[res_index];
5915 fix->expr = *e;
5916 fix->is_pcrel = 1;
5917 ++CURR_SLOT.num_fixups;
5918 return OPERAND_MATCH;
5919 }
5920 /* Fall through. */
5921 case IA64_OPND_TAG13:
5922 case IA64_OPND_TAG13b:
5923 switch (e->X_op)
5924 {
5925 case O_constant:
5926 return OPERAND_MATCH;
5927
5928 case O_symbol:
5929 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5930 /* There are no external relocs for TAG13/TAG13b fields, so we
5931 create a dummy reloc. This will not live past md_apply_fix. */
5932 fix->code = BFD_RELOC_UNUSED;
5933 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5934 fix->opnd = idesc->operands[res_index];
5935 fix->expr = *e;
5936 fix->is_pcrel = 1;
5937 ++CURR_SLOT.num_fixups;
5938 return OPERAND_MATCH;
5939
5940 default:
5941 break;
5942 }
5943 break;
5944
5945 case IA64_OPND_LDXMOV:
5946 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5947 fix->code = BFD_RELOC_IA64_LDXMOV;
5948 fix->opnd = idesc->operands[res_index];
5949 fix->expr = *e;
5950 fix->is_pcrel = 0;
5951 ++CURR_SLOT.num_fixups;
5952 return OPERAND_MATCH;
5953
5954 case IA64_OPND_STRD5b:
5955 if (e->X_op == O_constant)
5956 {
5957 /* 5-bit signed scaled by 64 */
5958 if ((e->X_add_number <= ( 0xf << 6 ))
5959 && (e->X_add_number >= -( 0x10 << 6 )))
5960 {
5961
5962 /* Must be a multiple of 64 */
5963 if ((e->X_add_number & 0x3f) != 0)
5964 as_warn (_("stride must be a multiple of 64; lower 6 bits ignored"));
5965
5966 e->X_add_number &= ~ 0x3f;
5967 return OPERAND_MATCH;
5968 }
5969 else
5970 return OPERAND_OUT_OF_RANGE;
5971 }
5972 break;
5973 case IA64_OPND_CNT6a:
5974 if (e->X_op == O_constant)
5975 {
5976 /* 6-bit unsigned biased by 1 -- count 0 is meaningless */
5977 if ((e->X_add_number <= 64)
5978 && (e->X_add_number > 0) )
5979 {
5980 return OPERAND_MATCH;
5981 }
5982 else
5983 return OPERAND_OUT_OF_RANGE;
5984 }
5985 break;
5986
5987 default:
5988 break;
5989 }
5990 return OPERAND_MISMATCH;
5991 }
5992
5993 static int
5994 parse_operand (expressionS *e, int more)
5995 {
5996 int sep = '\0';
5997
5998 memset (e, 0, sizeof (*e));
5999 e->X_op = O_absent;
6000 SKIP_WHITESPACE ();
6001 expression (e);
6002 sep = *input_line_pointer;
6003 if (more && (sep == ',' || sep == more))
6004 ++input_line_pointer;
6005 return sep;
6006 }
6007
6008 static int
6009 parse_operand_and_eval (expressionS *e, int more)
6010 {
6011 int sep = parse_operand (e, more);
6012 resolve_expression (e);
6013 return sep;
6014 }
6015
6016 static int
6017 parse_operand_maybe_eval (expressionS *e, int more, enum ia64_opnd op)
6018 {
6019 int sep = parse_operand (e, more);
6020 switch (op)
6021 {
6022 case IA64_OPND_IMM14:
6023 case IA64_OPND_IMM22:
6024 case IA64_OPND_IMMU64:
6025 case IA64_OPND_TGT25:
6026 case IA64_OPND_TGT25b:
6027 case IA64_OPND_TGT25c:
6028 case IA64_OPND_TGT64:
6029 case IA64_OPND_TAG13:
6030 case IA64_OPND_TAG13b:
6031 case IA64_OPND_LDXMOV:
6032 break;
6033 default:
6034 resolve_expression (e);
6035 break;
6036 }
6037 return sep;
6038 }
6039
6040 /* Returns the next entry in the opcode table that matches the one in
6041 IDESC, and frees the entry in IDESC. If no matching entry is
6042 found, NULL is returned instead. */
6043
6044 static struct ia64_opcode *
6045 get_next_opcode (struct ia64_opcode *idesc)
6046 {
6047 struct ia64_opcode *next = ia64_find_next_opcode (idesc);
6048 ia64_free_opcode (idesc);
6049 return next;
6050 }
6051
6052 /* Parse the operands for the opcode and find the opcode variant that
6053 matches the specified operands, or NULL if no match is possible. */
6054
6055 static struct ia64_opcode *
6056 parse_operands (struct ia64_opcode *idesc)
6057 {
6058 int i = 0, highest_unmatched_operand, num_operands = 0, num_outputs = 0;
6059 int error_pos, out_of_range_pos, curr_out_of_range_pos, sep = 0;
6060 int reg1, reg2;
6061 char reg_class;
6062 enum ia64_opnd expected_operand = IA64_OPND_NIL;
6063 enum operand_match_result result;
6064 char mnemonic[129];
6065 char *first_arg = 0, *end, *saved_input_pointer;
6066 unsigned int sof;
6067
6068 gas_assert (strlen (idesc->name) <= 128);
6069
6070 strcpy (mnemonic, idesc->name);
6071 if (idesc->operands[2] == IA64_OPND_SOF
6072 || idesc->operands[1] == IA64_OPND_SOF)
6073 {
6074 /* To make the common idiom "alloc loc?=ar.pfs,0,1,0,0" work, we
6075 can't parse the first operand until we have parsed the
6076 remaining operands of the "alloc" instruction. */
6077 SKIP_WHITESPACE ();
6078 first_arg = input_line_pointer;
6079 end = strchr (input_line_pointer, '=');
6080 if (!end)
6081 {
6082 as_bad (_("Expected separator `='"));
6083 return 0;
6084 }
6085 input_line_pointer = end + 1;
6086 ++i;
6087 ++num_outputs;
6088 }
6089
6090 for (; ; ++i)
6091 {
6092 if (i < NELEMS (CURR_SLOT.opnd))
6093 {
6094 sep = parse_operand_maybe_eval (CURR_SLOT.opnd + i, '=',
6095 idesc->operands[i]);
6096 if (CURR_SLOT.opnd[i].X_op == O_absent)
6097 break;
6098 }
6099 else
6100 {
6101 expressionS dummy;
6102
6103 sep = parse_operand (&dummy, '=');
6104 if (dummy.X_op == O_absent)
6105 break;
6106 }
6107
6108 ++num_operands;
6109
6110 if (sep != '=' && sep != ',')
6111 break;
6112
6113 if (sep == '=')
6114 {
6115 if (num_outputs > 0)
6116 as_bad (_("Duplicate equal sign (=) in instruction"));
6117 else
6118 num_outputs = i + 1;
6119 }
6120 }
6121 if (sep != '\0')
6122 {
6123 as_bad (_("Illegal operand separator `%c'"), sep);
6124 return 0;
6125 }
6126
6127 if (idesc->operands[2] == IA64_OPND_SOF
6128 || idesc->operands[1] == IA64_OPND_SOF)
6129 {
6130 /* Map alloc r1=ar.pfs,i,l,o,r to alloc r1=ar.pfs,(i+l+o),(i+l),r.
6131 Note, however, that due to that mapping operand numbers in error
6132 messages for any of the constant operands will not be correct. */
6133 know (strcmp (idesc->name, "alloc") == 0);
6134 /* The first operand hasn't been parsed/initialized, yet (but
6135 num_operands intentionally doesn't account for that). */
6136 i = num_operands > 4 ? 2 : 1;
6137 #define FORCE_CONST(n) (CURR_SLOT.opnd[n].X_op == O_constant \
6138 ? CURR_SLOT.opnd[n].X_add_number \
6139 : 0)
6140 sof = set_regstack (FORCE_CONST(i),
6141 FORCE_CONST(i + 1),
6142 FORCE_CONST(i + 2),
6143 FORCE_CONST(i + 3));
6144 #undef FORCE_CONST
6145
6146 /* now we can parse the first arg: */
6147 saved_input_pointer = input_line_pointer;
6148 input_line_pointer = first_arg;
6149 sep = parse_operand_maybe_eval (CURR_SLOT.opnd + 0, '=',
6150 idesc->operands[0]);
6151 if (sep != '=')
6152 --num_outputs; /* force error */
6153 input_line_pointer = saved_input_pointer;
6154
6155 CURR_SLOT.opnd[i].X_add_number = sof;
6156 if (CURR_SLOT.opnd[i + 1].X_op == O_constant
6157 && CURR_SLOT.opnd[i + 2].X_op == O_constant)
6158 CURR_SLOT.opnd[i + 1].X_add_number
6159 = sof - CURR_SLOT.opnd[i + 2].X_add_number;
6160 else
6161 CURR_SLOT.opnd[i + 1].X_op = O_illegal;
6162 CURR_SLOT.opnd[i + 2] = CURR_SLOT.opnd[i + 3];
6163 }
6164
6165 highest_unmatched_operand = -4;
6166 curr_out_of_range_pos = -1;
6167 error_pos = 0;
6168 for (; idesc; idesc = get_next_opcode (idesc))
6169 {
6170 if (num_outputs != idesc->num_outputs)
6171 continue; /* mismatch in # of outputs */
6172 if (highest_unmatched_operand < 0)
6173 highest_unmatched_operand |= 1;
6174 if (num_operands > NELEMS (idesc->operands)
6175 || (num_operands < NELEMS (idesc->operands)
6176 && idesc->operands[num_operands])
6177 || (num_operands > 0 && !idesc->operands[num_operands - 1]))
6178 continue; /* mismatch in number of arguments */
6179 if (highest_unmatched_operand < 0)
6180 highest_unmatched_operand |= 2;
6181
6182 CURR_SLOT.num_fixups = 0;
6183
6184 /* Try to match all operands. If we see an out-of-range operand,
6185 then continue trying to match the rest of the operands, since if
6186 the rest match, then this idesc will give the best error message. */
6187
6188 out_of_range_pos = -1;
6189 for (i = 0; i < num_operands && idesc->operands[i]; ++i)
6190 {
6191 result = operand_match (idesc, i, CURR_SLOT.opnd + i);
6192 if (result != OPERAND_MATCH)
6193 {
6194 if (result != OPERAND_OUT_OF_RANGE)
6195 break;
6196 if (out_of_range_pos < 0)
6197 /* remember position of the first out-of-range operand: */
6198 out_of_range_pos = i;
6199 }
6200 }
6201
6202 /* If we did not match all operands, or if at least one operand was
6203 out-of-range, then this idesc does not match. Keep track of which
6204 idesc matched the most operands before failing. If we have two
6205 idescs that failed at the same position, and one had an out-of-range
6206 operand, then prefer the out-of-range operand. Thus if we have
6207 "add r0=0x1000000,r1" we get an error saying the constant is out
6208 of range instead of an error saying that the constant should have been
6209 a register. */
6210
6211 if (i != num_operands || out_of_range_pos >= 0)
6212 {
6213 if (i > highest_unmatched_operand
6214 || (i == highest_unmatched_operand
6215 && out_of_range_pos > curr_out_of_range_pos))
6216 {
6217 highest_unmatched_operand = i;
6218 if (out_of_range_pos >= 0)
6219 {
6220 expected_operand = idesc->operands[out_of_range_pos];
6221 error_pos = out_of_range_pos;
6222 }
6223 else
6224 {
6225 expected_operand = idesc->operands[i];
6226 error_pos = i;
6227 }
6228 curr_out_of_range_pos = out_of_range_pos;
6229 }
6230 continue;
6231 }
6232
6233 break;
6234 }
6235 if (!idesc)
6236 {
6237 if (expected_operand)
6238 as_bad (_("Operand %u of `%s' should be %s"),
6239 error_pos + 1, mnemonic,
6240 elf64_ia64_operands[expected_operand].desc);
6241 else if (highest_unmatched_operand < 0 && !(highest_unmatched_operand & 1))
6242 as_bad (_("Wrong number of output operands"));
6243 else if (highest_unmatched_operand < 0 && !(highest_unmatched_operand & 2))
6244 as_bad (_("Wrong number of input operands"));
6245 else
6246 as_bad (_("Operand mismatch"));
6247 return 0;
6248 }
6249
6250 /* Check that the instruction doesn't use
6251 - r0, f0, or f1 as output operands
6252 - the same predicate twice as output operands
6253 - r0 as address of a base update load or store
6254 - the same GR as output and address of a base update load
6255 - two even- or two odd-numbered FRs as output operands of a floating
6256 point parallel load.
6257 At most two (conflicting) output (or output-like) operands can exist,
6258 (floating point parallel loads have three outputs, but the base register,
6259 if updated, cannot conflict with the actual outputs). */
6260 reg2 = reg1 = -1;
6261 for (i = 0; i < num_operands; ++i)
6262 {
6263 int regno = 0;
6264
6265 reg_class = 0;
6266 switch (idesc->operands[i])
6267 {
6268 case IA64_OPND_R1:
6269 case IA64_OPND_R2:
6270 case IA64_OPND_R3:
6271 if (i < num_outputs)
6272 {
6273 if (CURR_SLOT.opnd[i].X_add_number == REG_GR)
6274 reg_class = 'r';
6275 else if (reg1 < 0)
6276 reg1 = CURR_SLOT.opnd[i].X_add_number;
6277 else if (reg2 < 0)
6278 reg2 = CURR_SLOT.opnd[i].X_add_number;
6279 }
6280 break;
6281 case IA64_OPND_P1:
6282 case IA64_OPND_P2:
6283 if (i < num_outputs)
6284 {
6285 if (reg1 < 0)
6286 reg1 = CURR_SLOT.opnd[i].X_add_number;
6287 else if (reg2 < 0)
6288 reg2 = CURR_SLOT.opnd[i].X_add_number;
6289 }
6290 break;
6291 case IA64_OPND_F1:
6292 case IA64_OPND_F2:
6293 case IA64_OPND_F3:
6294 case IA64_OPND_F4:
6295 if (i < num_outputs)
6296 {
6297 if (CURR_SLOT.opnd[i].X_add_number >= REG_FR
6298 && CURR_SLOT.opnd[i].X_add_number <= REG_FR + 1)
6299 {
6300 reg_class = 'f';
6301 regno = CURR_SLOT.opnd[i].X_add_number - REG_FR;
6302 }
6303 else if (reg1 < 0)
6304 reg1 = CURR_SLOT.opnd[i].X_add_number;
6305 else if (reg2 < 0)
6306 reg2 = CURR_SLOT.opnd[i].X_add_number;
6307 }
6308 break;
6309 case IA64_OPND_MR3:
6310 if (idesc->flags & IA64_OPCODE_POSTINC)
6311 {
6312 if (CURR_SLOT.opnd[i].X_add_number == REG_GR)
6313 reg_class = 'm';
6314 else if (reg1 < 0)
6315 reg1 = CURR_SLOT.opnd[i].X_add_number;
6316 else if (reg2 < 0)
6317 reg2 = CURR_SLOT.opnd[i].X_add_number;
6318 }
6319 break;
6320 default:
6321 break;
6322 }
6323 switch (reg_class)
6324 {
6325 case 0:
6326 break;
6327 default:
6328 as_warn (_("Invalid use of `%c%d' as output operand"), reg_class, regno);
6329 break;
6330 case 'm':
6331 as_warn (_("Invalid use of `r%d' as base update address operand"), regno);
6332 break;
6333 }
6334 }
6335 if (reg1 == reg2)
6336 {
6337 if (reg1 >= REG_GR && reg1 <= REG_GR + 127)
6338 {
6339 reg1 -= REG_GR;
6340 reg_class = 'r';
6341 }
6342 else if (reg1 >= REG_P && reg1 <= REG_P + 63)
6343 {
6344 reg1 -= REG_P;
6345 reg_class = 'p';
6346 }
6347 else if (reg1 >= REG_FR && reg1 <= REG_FR + 127)
6348 {
6349 reg1 -= REG_FR;
6350 reg_class = 'f';
6351 }
6352 else
6353 reg_class = 0;
6354 if (reg_class)
6355 as_warn (_("Invalid duplicate use of `%c%d'"), reg_class, reg1);
6356 }
6357 else if (((reg1 >= REG_FR && reg1 <= REG_FR + 31
6358 && reg2 >= REG_FR && reg2 <= REG_FR + 31)
6359 || (reg1 >= REG_FR + 32 && reg1 <= REG_FR + 127
6360 && reg2 >= REG_FR + 32 && reg2 <= REG_FR + 127))
6361 && ! ((reg1 ^ reg2) & 1))
6362 as_warn (_("Invalid simultaneous use of `f%d' and `f%d'"),
6363 reg1 - REG_FR, reg2 - REG_FR);
6364 else if ((reg1 >= REG_FR && reg1 <= REG_FR + 31
6365 && reg2 >= REG_FR + 32 && reg2 <= REG_FR + 127)
6366 || (reg1 >= REG_FR + 32 && reg1 <= REG_FR + 127
6367 && reg2 >= REG_FR && reg2 <= REG_FR + 31))
6368 as_warn (_("Dangerous simultaneous use of `f%d' and `f%d'"),
6369 reg1 - REG_FR, reg2 - REG_FR);
6370 return idesc;
6371 }
6372
6373 static void
6374 build_insn (struct slot *slot, bfd_vma *insnp)
6375 {
6376 const struct ia64_operand *odesc, *o2desc;
6377 struct ia64_opcode *idesc = slot->idesc;
6378 bfd_vma insn;
6379 bfd_signed_vma val;
6380 const char *err;
6381 int i;
6382
6383 insn = idesc->opcode | slot->qp_regno;
6384
6385 for (i = 0; i < NELEMS (idesc->operands) && idesc->operands[i]; ++i)
6386 {
6387 if (slot->opnd[i].X_op == O_register
6388 || slot->opnd[i].X_op == O_constant
6389 || slot->opnd[i].X_op == O_index)
6390 val = slot->opnd[i].X_add_number;
6391 else if (slot->opnd[i].X_op == O_big)
6392 {
6393 /* This must be the value 0x10000000000000000. */
6394 gas_assert (idesc->operands[i] == IA64_OPND_IMM8M1U8);
6395 val = 0;
6396 }
6397 else
6398 val = 0;
6399
6400 switch (idesc->operands[i])
6401 {
6402 case IA64_OPND_IMMU64:
6403 *insnp++ = (val >> 22) & 0x1ffffffffffLL;
6404 insn |= (((val & 0x7f) << 13) | (((val >> 7) & 0x1ff) << 27)
6405 | (((val >> 16) & 0x1f) << 22) | (((val >> 21) & 0x1) << 21)
6406 | (((val >> 63) & 0x1) << 36));
6407 continue;
6408
6409 case IA64_OPND_IMMU62:
6410 val &= 0x3fffffffffffffffULL;
6411 if (val != slot->opnd[i].X_add_number)
6412 as_warn (_("Value truncated to 62 bits"));
6413 *insnp++ = (val >> 21) & 0x1ffffffffffLL;
6414 insn |= (((val & 0xfffff) << 6) | (((val >> 20) & 0x1) << 36));
6415 continue;
6416
6417 case IA64_OPND_TGT64:
6418 val >>= 4;
6419 *insnp++ = ((val >> 20) & 0x7fffffffffLL) << 2;
6420 insn |= ((((val >> 59) & 0x1) << 36)
6421 | (((val >> 0) & 0xfffff) << 13));
6422 continue;
6423
6424 case IA64_OPND_AR3:
6425 val -= REG_AR;
6426 break;
6427
6428 case IA64_OPND_B1:
6429 case IA64_OPND_B2:
6430 val -= REG_BR;
6431 break;
6432
6433 case IA64_OPND_CR3:
6434 val -= REG_CR;
6435 break;
6436
6437 case IA64_OPND_DAHR3:
6438 val -= REG_DAHR;
6439 break;
6440
6441 case IA64_OPND_F1:
6442 case IA64_OPND_F2:
6443 case IA64_OPND_F3:
6444 case IA64_OPND_F4:
6445 val -= REG_FR;
6446 break;
6447
6448 case IA64_OPND_P1:
6449 case IA64_OPND_P2:
6450 val -= REG_P;
6451 break;
6452
6453 case IA64_OPND_R1:
6454 case IA64_OPND_R2:
6455 case IA64_OPND_R3:
6456 case IA64_OPND_R3_2:
6457 case IA64_OPND_CPUID_R3:
6458 case IA64_OPND_DBR_R3:
6459 case IA64_OPND_DTR_R3:
6460 case IA64_OPND_ITR_R3:
6461 case IA64_OPND_IBR_R3:
6462 case IA64_OPND_MR3:
6463 case IA64_OPND_MSR_R3:
6464 case IA64_OPND_PKR_R3:
6465 case IA64_OPND_PMC_R3:
6466 case IA64_OPND_PMD_R3:
6467 case IA64_OPND_DAHR_R3:
6468 case IA64_OPND_RR_R3:
6469 val -= REG_GR;
6470 break;
6471
6472 default:
6473 break;
6474 }
6475
6476 odesc = elf64_ia64_operands + idesc->operands[i];
6477 err = (*odesc->insert) (odesc, val, &insn);
6478 if (err)
6479 as_bad_where (slot->src_file, slot->src_line,
6480 _("Bad operand value: %s"), err);
6481 if (idesc->flags & IA64_OPCODE_PSEUDO)
6482 {
6483 if ((idesc->flags & IA64_OPCODE_F2_EQ_F3)
6484 && odesc == elf64_ia64_operands + IA64_OPND_F3)
6485 {
6486 o2desc = elf64_ia64_operands + IA64_OPND_F2;
6487 (*o2desc->insert) (o2desc, val, &insn);
6488 }
6489 if ((idesc->flags & IA64_OPCODE_LEN_EQ_64MCNT)
6490 && (odesc == elf64_ia64_operands + IA64_OPND_CPOS6a
6491 || odesc == elf64_ia64_operands + IA64_OPND_POS6))
6492 {
6493 o2desc = elf64_ia64_operands + IA64_OPND_LEN6;
6494 (*o2desc->insert) (o2desc, 64 - val, &insn);
6495 }
6496 }
6497 }
6498 *insnp = insn;
6499 }
6500
6501 static void
6502 emit_one_bundle (void)
6503 {
6504 int manual_bundling_off = 0, manual_bundling = 0;
6505 enum ia64_unit required_unit, insn_unit = 0;
6506 enum ia64_insn_type type[3], insn_type;
6507 unsigned int template_val, orig_template;
6508 bfd_vma insn[3] = { -1, -1, -1 };
6509 struct ia64_opcode *idesc;
6510 int end_of_insn_group = 0, user_template = -1;
6511 int n, i, j, first, curr, last_slot;
6512 bfd_vma t0 = 0, t1 = 0;
6513 struct label_fix *lfix;
6514 bfd_boolean mark_label;
6515 struct insn_fix *ifix;
6516 char mnemonic[16];
6517 fixS *fix;
6518 char *f;
6519 int addr_mod;
6520
6521 first = (md.curr_slot + NUM_SLOTS - md.num_slots_in_use) % NUM_SLOTS;
6522 know (first >= 0 && first < NUM_SLOTS);
6523 n = MIN (3, md.num_slots_in_use);
6524
6525 /* Determine template: user user_template if specified, best match
6526 otherwise: */
6527
6528 if (md.slot[first].user_template >= 0)
6529 user_template = template_val = md.slot[first].user_template;
6530 else
6531 {
6532 /* Auto select appropriate template. */
6533 memset (type, 0, sizeof (type));
6534 curr = first;
6535 for (i = 0; i < n; ++i)
6536 {
6537 if (md.slot[curr].label_fixups && i != 0)
6538 break;
6539 type[i] = md.slot[curr].idesc->type;
6540 curr = (curr + 1) % NUM_SLOTS;
6541 }
6542 template_val = best_template[type[0]][type[1]][type[2]];
6543 }
6544
6545 /* initialize instructions with appropriate nops: */
6546 for (i = 0; i < 3; ++i)
6547 insn[i] = nop[ia64_templ_desc[template_val].exec_unit[i]];
6548
6549 f = frag_more (16);
6550
6551 /* Check to see if this bundle is at an offset that is a multiple of 16-bytes
6552 from the start of the frag. */
6553 addr_mod = frag_now_fix () & 15;
6554 if (frag_now->has_code && frag_now->insn_addr != addr_mod)
6555 as_bad (_("instruction address is not a multiple of 16"));
6556 frag_now->insn_addr = addr_mod;
6557 frag_now->has_code = 1;
6558
6559 /* now fill in slots with as many insns as possible: */
6560 curr = first;
6561 idesc = md.slot[curr].idesc;
6562 end_of_insn_group = 0;
6563 last_slot = -1;
6564 for (i = 0; i < 3 && md.num_slots_in_use > 0; ++i)
6565 {
6566 /* If we have unwind records, we may need to update some now. */
6567 unw_rec_list *ptr = md.slot[curr].unwind_record;
6568 unw_rec_list *end_ptr = NULL;
6569
6570 if (ptr)
6571 {
6572 /* Find the last prologue/body record in the list for the current
6573 insn, and set the slot number for all records up to that point.
6574 This needs to be done now, because prologue/body records refer to
6575 the current point, not the point after the instruction has been
6576 issued. This matters because there may have been nops emitted
6577 meanwhile. Any non-prologue non-body record followed by a
6578 prologue/body record must also refer to the current point. */
6579 unw_rec_list *last_ptr;
6580
6581 for (j = 1; end_ptr == NULL && j < md.num_slots_in_use; ++j)
6582 end_ptr = md.slot[(curr + j) % NUM_SLOTS].unwind_record;
6583 for (last_ptr = NULL; ptr != end_ptr; ptr = ptr->next)
6584 if (ptr->r.type == prologue || ptr->r.type == prologue_gr
6585 || ptr->r.type == body)
6586 last_ptr = ptr;
6587 if (last_ptr)
6588 {
6589 /* Make last_ptr point one after the last prologue/body
6590 record. */
6591 last_ptr = last_ptr->next;
6592 for (ptr = md.slot[curr].unwind_record; ptr != last_ptr;
6593 ptr = ptr->next)
6594 {
6595 ptr->slot_number = (unsigned long) f + i;
6596 ptr->slot_frag = frag_now;
6597 }
6598 /* Remove the initialized records, so that we won't accidentally
6599 update them again if we insert a nop and continue. */
6600 md.slot[curr].unwind_record = last_ptr;
6601 }
6602 }
6603
6604 manual_bundling_off = md.slot[curr].manual_bundling_off;
6605 if (md.slot[curr].manual_bundling_on)
6606 {
6607 if (curr == first)
6608 manual_bundling = 1;
6609 else
6610 break; /* Need to start a new bundle. */
6611 }
6612
6613 /* If this instruction specifies a template, then it must be the first
6614 instruction of a bundle. */
6615 if (curr != first && md.slot[curr].user_template >= 0)
6616 break;
6617
6618 if (idesc->flags & IA64_OPCODE_SLOT2)
6619 {
6620 if (manual_bundling && !manual_bundling_off)
6621 {
6622 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6623 _("`%s' must be last in bundle"), idesc->name);
6624 if (i < 2)
6625 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6626 }
6627 i = 2;
6628 }
6629 if (idesc->flags & IA64_OPCODE_LAST)
6630 {
6631 int required_slot;
6632 unsigned int required_template;
6633
6634 /* If we need a stop bit after an M slot, our only choice is
6635 template 5 (M;;MI). If we need a stop bit after a B
6636 slot, our only choice is to place it at the end of the
6637 bundle, because the only available templates are MIB,
6638 MBB, BBB, MMB, and MFB. We don't handle anything other
6639 than M and B slots because these are the only kind of
6640 instructions that can have the IA64_OPCODE_LAST bit set. */
6641 required_template = template_val;
6642 switch (idesc->type)
6643 {
6644 case IA64_TYPE_M:
6645 required_slot = 0;
6646 required_template = 5;
6647 break;
6648
6649 case IA64_TYPE_B:
6650 required_slot = 2;
6651 break;
6652
6653 default:
6654 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6655 _("Internal error: don't know how to force %s to end of instruction group"),
6656 idesc->name);
6657 required_slot = i;
6658 break;
6659 }
6660 if (manual_bundling
6661 && (i > required_slot
6662 || (required_slot == 2 && !manual_bundling_off)
6663 || (user_template >= 0
6664 /* Changing from MMI to M;MI is OK. */
6665 && (template_val ^ required_template) > 1)))
6666 {
6667 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6668 _("`%s' must be last in instruction group"),
6669 idesc->name);
6670 if (i < 2 && required_slot == 2 && !manual_bundling_off)
6671 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6672 }
6673 if (required_slot < i)
6674 /* Can't fit this instruction. */
6675 break;
6676
6677 i = required_slot;
6678 if (required_template != template_val)
6679 {
6680 /* If we switch the template, we need to reset the NOPs
6681 after slot i. The slot-types of the instructions ahead
6682 of i never change, so we don't need to worry about
6683 changing NOPs in front of this slot. */
6684 for (j = i; j < 3; ++j)
6685 insn[j] = nop[ia64_templ_desc[required_template].exec_unit[j]];
6686
6687 /* We just picked a template that includes the stop bit in the
6688 middle, so we don't need another one emitted later. */
6689 md.slot[curr].end_of_insn_group = 0;
6690 }
6691 template_val = required_template;
6692 }
6693 if (curr != first && md.slot[curr].label_fixups)
6694 {
6695 if (manual_bundling)
6696 {
6697 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6698 _("Label must be first in a bundle"));
6699 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6700 }
6701 /* This insn must go into the first slot of a bundle. */
6702 break;
6703 }
6704
6705 if (end_of_insn_group && md.num_slots_in_use >= 1)
6706 {
6707 /* We need an instruction group boundary in the middle of a
6708 bundle. See if we can switch to an other template with
6709 an appropriate boundary. */
6710
6711 orig_template = template_val;
6712 if (i == 1 && (user_template == 4
6713 || (user_template < 0
6714 && (ia64_templ_desc[template_val].exec_unit[0]
6715 == IA64_UNIT_M))))
6716 {
6717 template_val = 5;
6718 end_of_insn_group = 0;
6719 }
6720 else if (i == 2 && (user_template == 0
6721 || (user_template < 0
6722 && (ia64_templ_desc[template_val].exec_unit[1]
6723 == IA64_UNIT_I)))
6724 /* This test makes sure we don't switch the template if
6725 the next instruction is one that needs to be first in
6726 an instruction group. Since all those instructions are
6727 in the M group, there is no way such an instruction can
6728 fit in this bundle even if we switch the template. The
6729 reason we have to check for this is that otherwise we
6730 may end up generating "MI;;I M.." which has the deadly
6731 effect that the second M instruction is no longer the
6732 first in the group! --davidm 99/12/16 */
6733 && (idesc->flags & IA64_OPCODE_FIRST) == 0)
6734 {
6735 template_val = 1;
6736 end_of_insn_group = 0;
6737 }
6738 else if (i == 1
6739 && user_template == 0
6740 && !(idesc->flags & IA64_OPCODE_FIRST))
6741 /* Use the next slot. */
6742 continue;
6743 else if (curr != first)
6744 /* can't fit this insn */
6745 break;
6746
6747 if (template_val != orig_template)
6748 /* if we switch the template, we need to reset the NOPs
6749 after slot i. The slot-types of the instructions ahead
6750 of i never change, so we don't need to worry about
6751 changing NOPs in front of this slot. */
6752 for (j = i; j < 3; ++j)
6753 insn[j] = nop[ia64_templ_desc[template_val].exec_unit[j]];
6754 }
6755 required_unit = ia64_templ_desc[template_val].exec_unit[i];
6756
6757 /* resolve dynamic opcodes such as "break", "hint", and "nop": */
6758 if (idesc->type == IA64_TYPE_DYN)
6759 {
6760 enum ia64_opnd opnd1, opnd2;
6761
6762 if ((strcmp (idesc->name, "nop") == 0)
6763 || (strcmp (idesc->name, "break") == 0))
6764 insn_unit = required_unit;
6765 else if (strcmp (idesc->name, "hint") == 0)
6766 {
6767 insn_unit = required_unit;
6768 if (required_unit == IA64_UNIT_B)
6769 {
6770 switch (md.hint_b)
6771 {
6772 case hint_b_ok:
6773 break;
6774 case hint_b_warning:
6775 as_warn (_("hint in B unit may be treated as nop"));
6776 break;
6777 case hint_b_error:
6778 /* When manual bundling is off and there is no
6779 user template, we choose a different unit so
6780 that hint won't go into the current slot. We
6781 will fill the current bundle with nops and
6782 try to put hint into the next bundle. */
6783 if (!manual_bundling && user_template < 0)
6784 insn_unit = IA64_UNIT_I;
6785 else
6786 as_bad (_("hint in B unit can't be used"));
6787 break;
6788 }
6789 }
6790 }
6791 else if (strcmp (idesc->name, "chk.s") == 0
6792 || strcmp (idesc->name, "mov") == 0)
6793 {
6794 insn_unit = IA64_UNIT_M;
6795 if (required_unit == IA64_UNIT_I
6796 || (required_unit == IA64_UNIT_F && template_val == 6))
6797 insn_unit = IA64_UNIT_I;
6798 }
6799 else
6800 as_fatal (_("emit_one_bundle: unexpected dynamic op"));
6801
6802 snprintf (mnemonic, sizeof (mnemonic), "%s.%c",
6803 idesc->name, "?imbfxx"[insn_unit]);
6804 opnd1 = idesc->operands[0];
6805 opnd2 = idesc->operands[1];
6806 ia64_free_opcode (idesc);
6807 idesc = ia64_find_opcode (mnemonic);
6808 /* moves to/from ARs have collisions */
6809 if (opnd1 == IA64_OPND_AR3 || opnd2 == IA64_OPND_AR3)
6810 {
6811 while (idesc != NULL
6812 && (idesc->operands[0] != opnd1
6813 || idesc->operands[1] != opnd2))
6814 idesc = get_next_opcode (idesc);
6815 }
6816 md.slot[curr].idesc = idesc;
6817 }
6818 else
6819 {
6820 insn_type = idesc->type;
6821 insn_unit = IA64_UNIT_NIL;
6822 switch (insn_type)
6823 {
6824 case IA64_TYPE_A:
6825 if (required_unit == IA64_UNIT_I || required_unit == IA64_UNIT_M)
6826 insn_unit = required_unit;
6827 break;
6828 case IA64_TYPE_X: insn_unit = IA64_UNIT_L; break;
6829 case IA64_TYPE_I: insn_unit = IA64_UNIT_I; break;
6830 case IA64_TYPE_M: insn_unit = IA64_UNIT_M; break;
6831 case IA64_TYPE_B: insn_unit = IA64_UNIT_B; break;
6832 case IA64_TYPE_F: insn_unit = IA64_UNIT_F; break;
6833 default: break;
6834 }
6835 }
6836
6837 if (insn_unit != required_unit)
6838 continue; /* Try next slot. */
6839
6840 /* Now is a good time to fix up the labels for this insn. */
6841 mark_label = FALSE;
6842 for (lfix = md.slot[curr].label_fixups; lfix; lfix = lfix->next)
6843 {
6844 S_SET_VALUE (lfix->sym, frag_now_fix () - 16);
6845 symbol_set_frag (lfix->sym, frag_now);
6846 mark_label |= lfix->dw2_mark_labels;
6847 }
6848 for (lfix = md.slot[curr].tag_fixups; lfix; lfix = lfix->next)
6849 {
6850 S_SET_VALUE (lfix->sym, frag_now_fix () - 16 + i);
6851 symbol_set_frag (lfix->sym, frag_now);
6852 }
6853
6854 if (debug_type == DEBUG_DWARF2
6855 || md.slot[curr].loc_directive_seen
6856 || mark_label)
6857 {
6858 bfd_vma addr = frag_now->fr_address + frag_now_fix () - 16 + i;
6859
6860 md.slot[curr].loc_directive_seen = 0;
6861 if (mark_label)
6862 md.slot[curr].debug_line.flags |= DWARF2_FLAG_BASIC_BLOCK;
6863
6864 dwarf2_gen_line_info (addr, &md.slot[curr].debug_line);
6865 }
6866
6867 build_insn (md.slot + curr, insn + i);
6868
6869 ptr = md.slot[curr].unwind_record;
6870 if (ptr)
6871 {
6872 /* Set slot numbers for all remaining unwind records belonging to the
6873 current insn. There can not be any prologue/body unwind records
6874 here. */
6875 for (; ptr != end_ptr; ptr = ptr->next)
6876 {
6877 ptr->slot_number = (unsigned long) f + i;
6878 ptr->slot_frag = frag_now;
6879 }
6880 md.slot[curr].unwind_record = NULL;
6881 }
6882
6883 for (j = 0; j < md.slot[curr].num_fixups; ++j)
6884 {
6885 ifix = md.slot[curr].fixup + j;
6886 fix = fix_new_exp (frag_now, frag_now_fix () - 16 + i, 8,
6887 &ifix->expr, ifix->is_pcrel, ifix->code);
6888 fix->tc_fix_data.opnd = ifix->opnd;
6889 fix->fx_file = md.slot[curr].src_file;
6890 fix->fx_line = md.slot[curr].src_line;
6891 }
6892
6893 end_of_insn_group = md.slot[curr].end_of_insn_group;
6894
6895 /* This adjustment to "i" must occur after the fix, otherwise the fix
6896 is assigned to the wrong slot, and the VMS linker complains. */
6897 if (required_unit == IA64_UNIT_L)
6898 {
6899 know (i == 1);
6900 /* skip one slot for long/X-unit instructions */
6901 ++i;
6902 }
6903 --md.num_slots_in_use;
6904 last_slot = i;
6905
6906 /* clear slot: */
6907 ia64_free_opcode (md.slot[curr].idesc);
6908 memset (md.slot + curr, 0, sizeof (md.slot[curr]));
6909 md.slot[curr].user_template = -1;
6910
6911 if (manual_bundling_off)
6912 {
6913 manual_bundling = 0;
6914 break;
6915 }
6916 curr = (curr + 1) % NUM_SLOTS;
6917 idesc = md.slot[curr].idesc;
6918 }
6919
6920 /* A user template was specified, but the first following instruction did
6921 not fit. This can happen with or without manual bundling. */
6922 if (md.num_slots_in_use > 0 && last_slot < 0)
6923 {
6924 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6925 _("`%s' does not fit into %s template"),
6926 idesc->name, ia64_templ_desc[template_val].name);
6927 /* Drop first insn so we don't livelock. */
6928 --md.num_slots_in_use;
6929 know (curr == first);
6930 ia64_free_opcode (md.slot[curr].idesc);
6931 memset (md.slot + curr, 0, sizeof (md.slot[curr]));
6932 md.slot[curr].user_template = -1;
6933 }
6934 else if (manual_bundling > 0)
6935 {
6936 if (md.num_slots_in_use > 0)
6937 {
6938 if (last_slot >= 2)
6939 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6940 _("`%s' does not fit into bundle"), idesc->name);
6941 else
6942 {
6943 const char *where;
6944
6945 if (template_val == 2)
6946 where = "X slot";
6947 else if (last_slot == 0)
6948 where = "slots 2 or 3";
6949 else
6950 where = "slot 3";
6951 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6952 _("`%s' can't go in %s of %s template"),
6953 idesc->name, where, ia64_templ_desc[template_val].name);
6954 }
6955 }
6956 else
6957 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6958 _("Missing '}' at end of file"));
6959 }
6960
6961 know (md.num_slots_in_use < NUM_SLOTS);
6962
6963 t0 = end_of_insn_group | (template_val << 1) | (insn[0] << 5) | (insn[1] << 46);
6964 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
6965
6966 number_to_chars_littleendian (f + 0, t0, 8);
6967 number_to_chars_littleendian (f + 8, t1, 8);
6968 }
6969
6970 int
6971 md_parse_option (int c, const char *arg)
6972 {
6973
6974 switch (c)
6975 {
6976 /* Switches from the Intel assembler. */
6977 case 'm':
6978 if (strcmp (arg, "ilp64") == 0
6979 || strcmp (arg, "lp64") == 0
6980 || strcmp (arg, "p64") == 0)
6981 {
6982 md.flags |= EF_IA_64_ABI64;
6983 }
6984 else if (strcmp (arg, "ilp32") == 0)
6985 {
6986 md.flags &= ~EF_IA_64_ABI64;
6987 }
6988 else if (strcmp (arg, "le") == 0)
6989 {
6990 md.flags &= ~EF_IA_64_BE;
6991 default_big_endian = 0;
6992 }
6993 else if (strcmp (arg, "be") == 0)
6994 {
6995 md.flags |= EF_IA_64_BE;
6996 default_big_endian = 1;
6997 }
6998 else if (strncmp (arg, "unwind-check=", 13) == 0)
6999 {
7000 arg += 13;
7001 if (strcmp (arg, "warning") == 0)
7002 md.unwind_check = unwind_check_warning;
7003 else if (strcmp (arg, "error") == 0)
7004 md.unwind_check = unwind_check_error;
7005 else
7006 return 0;
7007 }
7008 else if (strncmp (arg, "hint.b=", 7) == 0)
7009 {
7010 arg += 7;
7011 if (strcmp (arg, "ok") == 0)
7012 md.hint_b = hint_b_ok;
7013 else if (strcmp (arg, "warning") == 0)
7014 md.hint_b = hint_b_warning;
7015 else if (strcmp (arg, "error") == 0)
7016 md.hint_b = hint_b_error;
7017 else
7018 return 0;
7019 }
7020 else if (strncmp (arg, "tune=", 5) == 0)
7021 {
7022 arg += 5;
7023 if (strcmp (arg, "itanium1") == 0)
7024 md.tune = itanium1;
7025 else if (strcmp (arg, "itanium2") == 0)
7026 md.tune = itanium2;
7027 else
7028 return 0;
7029 }
7030 else
7031 return 0;
7032 break;
7033
7034 case 'N':
7035 if (strcmp (arg, "so") == 0)
7036 {
7037 /* Suppress signon message. */
7038 }
7039 else if (strcmp (arg, "pi") == 0)
7040 {
7041 /* Reject privileged instructions. FIXME */
7042 }
7043 else if (strcmp (arg, "us") == 0)
7044 {
7045 /* Allow union of signed and unsigned range. FIXME */
7046 }
7047 else if (strcmp (arg, "close_fcalls") == 0)
7048 {
7049 /* Do not resolve global function calls. */
7050 }
7051 else
7052 return 0;
7053 break;
7054
7055 case 'C':
7056 /* temp[="prefix"] Insert temporary labels into the object file
7057 symbol table prefixed by "prefix".
7058 Default prefix is ":temp:".
7059 */
7060 break;
7061
7062 case 'a':
7063 /* indirect=<tgt> Assume unannotated indirect branches behavior
7064 according to <tgt> --
7065 exit: branch out from the current context (default)
7066 labels: all labels in context may be branch targets
7067 */
7068 if (strncmp (arg, "indirect=", 9) != 0)
7069 return 0;
7070 break;
7071
7072 case 'x':
7073 /* -X conflicts with an ignored option, use -x instead */
7074 md.detect_dv = 1;
7075 if (!arg || strcmp (arg, "explicit") == 0)
7076 {
7077 /* set default mode to explicit */
7078 md.default_explicit_mode = 1;
7079 break;
7080 }
7081 else if (strcmp (arg, "auto") == 0)
7082 {
7083 md.default_explicit_mode = 0;
7084 }
7085 else if (strcmp (arg, "none") == 0)
7086 {
7087 md.detect_dv = 0;
7088 }
7089 else if (strcmp (arg, "debug") == 0)
7090 {
7091 md.debug_dv = 1;
7092 }
7093 else if (strcmp (arg, "debugx") == 0)
7094 {
7095 md.default_explicit_mode = 1;
7096 md.debug_dv = 1;
7097 }
7098 else if (strcmp (arg, "debugn") == 0)
7099 {
7100 md.debug_dv = 1;
7101 md.detect_dv = 0;
7102 }
7103 else
7104 {
7105 as_bad (_("Unrecognized option '-x%s'"), arg);
7106 }
7107 break;
7108
7109 case 'S':
7110 /* nops Print nops statistics. */
7111 break;
7112
7113 /* GNU specific switches for gcc. */
7114 case OPTION_MCONSTANT_GP:
7115 md.flags |= EF_IA_64_CONS_GP;
7116 break;
7117
7118 case OPTION_MAUTO_PIC:
7119 md.flags |= EF_IA_64_NOFUNCDESC_CONS_GP;
7120 break;
7121
7122 default:
7123 return 0;
7124 }
7125
7126 return 1;
7127 }
7128
7129 void
7130 md_show_usage (FILE *stream)
7131 {
7132 fputs (_("\
7133 IA-64 options:\n\
7134 --mconstant-gp mark output file as using the constant-GP model\n\
7135 (sets ELF header flag EF_IA_64_CONS_GP)\n\
7136 --mauto-pic mark output file as using the constant-GP model\n\
7137 without function descriptors (sets ELF header flag\n\
7138 EF_IA_64_NOFUNCDESC_CONS_GP)\n\
7139 -milp32|-milp64|-mlp64|-mp64 select data model (default -mlp64)\n\
7140 -mle | -mbe select little- or big-endian byte order (default -mle)\n\
7141 -mtune=[itanium1|itanium2]\n\
7142 tune for a specific CPU (default -mtune=itanium2)\n\
7143 -munwind-check=[warning|error]\n\
7144 unwind directive check (default -munwind-check=warning)\n\
7145 -mhint.b=[ok|warning|error]\n\
7146 hint.b check (default -mhint.b=error)\n\
7147 -x | -xexplicit turn on dependency violation checking\n"), stream);
7148 /* Note for translators: "automagically" can be translated as "automatically" here. */
7149 fputs (_("\
7150 -xauto automagically remove dependency violations (default)\n\
7151 -xnone turn off dependency violation checking\n\
7152 -xdebug debug dependency violation checker\n\
7153 -xdebugn debug dependency violation checker but turn off\n\
7154 dependency violation checking\n\
7155 -xdebugx debug dependency violation checker and turn on\n\
7156 dependency violation checking\n"),
7157 stream);
7158 }
7159
7160 void
7161 ia64_after_parse_args (void)
7162 {
7163 if (debug_type == DEBUG_STABS)
7164 as_fatal (_("--gstabs is not supported for ia64"));
7165 }
7166
7167 /* Return true if TYPE fits in TEMPL at SLOT. */
7168
7169 static int
7170 match (int templ, int type, int slot)
7171 {
7172 enum ia64_unit unit;
7173 int result;
7174
7175 unit = ia64_templ_desc[templ].exec_unit[slot];
7176 switch (type)
7177 {
7178 case IA64_TYPE_DYN: result = 1; break; /* for nop and break */
7179 case IA64_TYPE_A:
7180 result = (unit == IA64_UNIT_I || unit == IA64_UNIT_M);
7181 break;
7182 case IA64_TYPE_X: result = (unit == IA64_UNIT_L); break;
7183 case IA64_TYPE_I: result = (unit == IA64_UNIT_I); break;
7184 case IA64_TYPE_M: result = (unit == IA64_UNIT_M); break;
7185 case IA64_TYPE_B: result = (unit == IA64_UNIT_B); break;
7186 case IA64_TYPE_F: result = (unit == IA64_UNIT_F); break;
7187 default: result = 0; break;
7188 }
7189 return result;
7190 }
7191
7192 /* For Itanium 1, add a bit of extra goodness if a nop of type F or B would fit
7193 in TEMPL at SLOT. For Itanium 2, add a bit of extra goodness if a nop of
7194 type M or I would fit in TEMPL at SLOT. */
7195
7196 static inline int
7197 extra_goodness (int templ, int slot)
7198 {
7199 switch (md.tune)
7200 {
7201 case itanium1:
7202 if (slot == 1 && match (templ, IA64_TYPE_F, slot))
7203 return 2;
7204 else if (slot == 2 && match (templ, IA64_TYPE_B, slot))
7205 return 1;
7206 else
7207 return 0;
7208 break;
7209 case itanium2:
7210 if (match (templ, IA64_TYPE_M, slot)
7211 || match (templ, IA64_TYPE_I, slot))
7212 /* Favor M- and I-unit NOPs. We definitely want to avoid
7213 F-unit and B-unit may cause split-issue or less-than-optimal
7214 branch-prediction. */
7215 return 2;
7216 else
7217 return 0;
7218 break;
7219 default:
7220 abort ();
7221 return 0;
7222 }
7223 }
7224
7225 /* This function is called once, at assembler startup time. It sets
7226 up all the tables, etc. that the MD part of the assembler will need
7227 that can be determined before arguments are parsed. */
7228 void
7229 md_begin (void)
7230 {
7231 int i, j, k, t, goodness, best, ok;
7232
7233 md.auto_align = 1;
7234 md.explicit_mode = md.default_explicit_mode;
7235
7236 bfd_set_section_alignment (text_section, 4);
7237
7238 /* Make sure function pointers get initialized. */
7239 target_big_endian = -1;
7240 dot_byteorder (default_big_endian);
7241
7242 alias_hash = str_htab_create ();
7243 alias_name_hash = str_htab_create ();
7244 secalias_hash = str_htab_create ();
7245 secalias_name_hash = str_htab_create ();
7246
7247 pseudo_func[FUNC_DTP_MODULE].u.sym =
7248 symbol_new (".<dtpmod>", undefined_section,
7249 &zero_address_frag, FUNC_DTP_MODULE);
7250
7251 pseudo_func[FUNC_DTP_RELATIVE].u.sym =
7252 symbol_new (".<dtprel>", undefined_section,
7253 &zero_address_frag, FUNC_DTP_RELATIVE);
7254
7255 pseudo_func[FUNC_FPTR_RELATIVE].u.sym =
7256 symbol_new (".<fptr>", undefined_section,
7257 &zero_address_frag, FUNC_FPTR_RELATIVE);
7258
7259 pseudo_func[FUNC_GP_RELATIVE].u.sym =
7260 symbol_new (".<gprel>", undefined_section,
7261 &zero_address_frag, FUNC_GP_RELATIVE);
7262
7263 pseudo_func[FUNC_LT_RELATIVE].u.sym =
7264 symbol_new (".<ltoff>", undefined_section,
7265 &zero_address_frag, FUNC_LT_RELATIVE);
7266
7267 pseudo_func[FUNC_LT_RELATIVE_X].u.sym =
7268 symbol_new (".<ltoffx>", undefined_section,
7269 &zero_address_frag, FUNC_LT_RELATIVE_X);
7270
7271 pseudo_func[FUNC_PC_RELATIVE].u.sym =
7272 symbol_new (".<pcrel>", undefined_section,
7273 &zero_address_frag, FUNC_PC_RELATIVE);
7274
7275 pseudo_func[FUNC_PLT_RELATIVE].u.sym =
7276 symbol_new (".<pltoff>", undefined_section,
7277 &zero_address_frag, FUNC_PLT_RELATIVE);
7278
7279 pseudo_func[FUNC_SEC_RELATIVE].u.sym =
7280 symbol_new (".<secrel>", undefined_section,
7281 &zero_address_frag, FUNC_SEC_RELATIVE);
7282
7283 pseudo_func[FUNC_SEG_RELATIVE].u.sym =
7284 symbol_new (".<segrel>", undefined_section,
7285 &zero_address_frag, FUNC_SEG_RELATIVE);
7286
7287 pseudo_func[FUNC_TP_RELATIVE].u.sym =
7288 symbol_new (".<tprel>", undefined_section,
7289 &zero_address_frag, FUNC_TP_RELATIVE);
7290
7291 pseudo_func[FUNC_LTV_RELATIVE].u.sym =
7292 symbol_new (".<ltv>", undefined_section,
7293 &zero_address_frag, FUNC_LTV_RELATIVE);
7294
7295 pseudo_func[FUNC_LT_FPTR_RELATIVE].u.sym =
7296 symbol_new (".<ltoff.fptr>", undefined_section,
7297 &zero_address_frag, FUNC_LT_FPTR_RELATIVE);
7298
7299 pseudo_func[FUNC_LT_DTP_MODULE].u.sym =
7300 symbol_new (".<ltoff.dtpmod>", undefined_section,
7301 &zero_address_frag, FUNC_LT_DTP_MODULE);
7302
7303 pseudo_func[FUNC_LT_DTP_RELATIVE].u.sym =
7304 symbol_new (".<ltoff.dptrel>", undefined_section,
7305 &zero_address_frag, FUNC_LT_DTP_RELATIVE);
7306
7307 pseudo_func[FUNC_LT_TP_RELATIVE].u.sym =
7308 symbol_new (".<ltoff.tprel>", undefined_section,
7309 &zero_address_frag, FUNC_LT_TP_RELATIVE);
7310
7311 pseudo_func[FUNC_IPLT_RELOC].u.sym =
7312 symbol_new (".<iplt>", undefined_section,
7313 &zero_address_frag, FUNC_IPLT_RELOC);
7314
7315 #ifdef TE_VMS
7316 pseudo_func[FUNC_SLOTCOUNT_RELOC].u.sym =
7317 symbol_new (".<slotcount>", undefined_section,
7318 &zero_address_frag, FUNC_SLOTCOUNT_RELOC);
7319 #endif
7320
7321 if (md.tune != itanium1)
7322 {
7323 /* Convert MFI NOPs bundles into MMI NOPs bundles. */
7324 le_nop[0] = 0x8;
7325 le_nop_stop[0] = 0x9;
7326 }
7327
7328 /* Compute the table of best templates. We compute goodness as a
7329 base 4 value, in which each match counts for 3. Match-failures
7330 result in NOPs and we use extra_goodness() to pick the execution
7331 units that are best suited for issuing the NOP. */
7332 for (i = 0; i < IA64_NUM_TYPES; ++i)
7333 for (j = 0; j < IA64_NUM_TYPES; ++j)
7334 for (k = 0; k < IA64_NUM_TYPES; ++k)
7335 {
7336 best = 0;
7337 for (t = 0; t < NELEMS (ia64_templ_desc); ++t)
7338 {
7339 goodness = 0;
7340 if (match (t, i, 0))
7341 {
7342 if (match (t, j, 1))
7343 {
7344 if ((t == 2 && j == IA64_TYPE_X) || match (t, k, 2))
7345 goodness = 3 + 3 + 3;
7346 else
7347 goodness = 3 + 3 + extra_goodness (t, 2);
7348 }
7349 else if (match (t, j, 2))
7350 goodness = 3 + 3 + extra_goodness (t, 1);
7351 else
7352 {
7353 goodness = 3;
7354 goodness += extra_goodness (t, 1);
7355 goodness += extra_goodness (t, 2);
7356 }
7357 }
7358 else if (match (t, i, 1))
7359 {
7360 if ((t == 2 && i == IA64_TYPE_X) || match (t, j, 2))
7361 goodness = 3 + 3;
7362 else
7363 goodness = 3 + extra_goodness (t, 2);
7364 }
7365 else if (match (t, i, 2))
7366 goodness = 3 + extra_goodness (t, 1);
7367
7368 if (goodness > best)
7369 {
7370 best = goodness;
7371 best_template[i][j][k] = t;
7372 }
7373 }
7374 }
7375
7376 #ifdef DEBUG_TEMPLATES
7377 /* For debugging changes to the best_template calculations. We don't care
7378 about combinations with invalid instructions, so start the loops at 1. */
7379 for (i = 0; i < IA64_NUM_TYPES; ++i)
7380 for (j = 0; j < IA64_NUM_TYPES; ++j)
7381 for (k = 0; k < IA64_NUM_TYPES; ++k)
7382 {
7383 char type_letter[IA64_NUM_TYPES] = { 'n', 'a', 'i', 'm', 'b', 'f',
7384 'x', 'd' };
7385 fprintf (stderr, "%c%c%c %s\n", type_letter[i], type_letter[j],
7386 type_letter[k],
7387 ia64_templ_desc[best_template[i][j][k]].name);
7388 }
7389 #endif
7390
7391 for (i = 0; i < NUM_SLOTS; ++i)
7392 md.slot[i].user_template = -1;
7393
7394 md.pseudo_hash = str_htab_create ();
7395 for (i = 0; i < NELEMS (pseudo_opcode); ++i)
7396 str_hash_insert (md.pseudo_hash, pseudo_opcode[i].name,
7397 (void *) (pseudo_opcode + i));
7398
7399 md.reg_hash = str_htab_create ();
7400 md.dynreg_hash = str_htab_create ();
7401 md.const_hash = str_htab_create ();
7402 md.entry_hash = str_htab_create ();
7403
7404 /* general registers: */
7405 declare_register_set ("r", 128, REG_GR);
7406 declare_register ("gp", REG_GR + 1);
7407 declare_register ("sp", REG_GR + 12);
7408 declare_register ("tp", REG_GR + 13);
7409 declare_register_set ("ret", 4, REG_GR + 8);
7410
7411 /* floating point registers: */
7412 declare_register_set ("f", 128, REG_FR);
7413 declare_register_set ("farg", 8, REG_FR + 8);
7414 declare_register_set ("fret", 8, REG_FR + 8);
7415
7416 /* branch registers: */
7417 declare_register_set ("b", 8, REG_BR);
7418 declare_register ("rp", REG_BR + 0);
7419
7420 /* predicate registers: */
7421 declare_register_set ("p", 64, REG_P);
7422 declare_register ("pr", REG_PR);
7423 declare_register ("pr.rot", REG_PR_ROT);
7424
7425 /* application registers: */
7426 declare_register_set ("ar", 128, REG_AR);
7427 for (i = 0; i < NELEMS (ar); ++i)
7428 declare_register (ar[i].name, REG_AR + ar[i].regnum);
7429
7430 /* control registers: */
7431 declare_register_set ("cr", 128, REG_CR);
7432 for (i = 0; i < NELEMS (cr); ++i)
7433 declare_register (cr[i].name, REG_CR + cr[i].regnum);
7434
7435 /* dahr registers: */
7436 declare_register_set ("dahr", 8, REG_DAHR);
7437
7438 declare_register ("ip", REG_IP);
7439 declare_register ("cfm", REG_CFM);
7440 declare_register ("psr", REG_PSR);
7441 declare_register ("psr.l", REG_PSR_L);
7442 declare_register ("psr.um", REG_PSR_UM);
7443
7444 for (i = 0; i < NELEMS (indirect_reg); ++i)
7445 {
7446 unsigned int regnum = indirect_reg[i].regnum;
7447
7448 md.indregsym[regnum - IND_CPUID] = declare_register (indirect_reg[i].name, regnum);
7449 }
7450
7451 /* pseudo-registers used to specify unwind info: */
7452 declare_register ("psp", REG_PSP);
7453
7454 for (i = 0; i < NELEMS (const_bits); ++i)
7455 str_hash_insert (md.const_hash, const_bits[i].name,
7456 (void *) (const_bits + i));
7457
7458 /* Set the architecture and machine depending on defaults and command line
7459 options. */
7460 if (md.flags & EF_IA_64_ABI64)
7461 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf64);
7462 else
7463 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf32);
7464
7465 if (! ok)
7466 as_warn (_("Could not set architecture and machine"));
7467
7468 /* Set the pointer size and pointer shift size depending on md.flags */
7469
7470 if (md.flags & EF_IA_64_ABI64)
7471 {
7472 md.pointer_size = 8; /* pointers are 8 bytes */
7473 md.pointer_size_shift = 3; /* alignment is 8 bytes = 2^2 */
7474 }
7475 else
7476 {
7477 md.pointer_size = 4; /* pointers are 4 bytes */
7478 md.pointer_size_shift = 2; /* alignment is 4 bytes = 2^2 */
7479 }
7480
7481 md.mem_offset.hint = 0;
7482 md.path = 0;
7483 md.maxpaths = 0;
7484 md.entry_labels = NULL;
7485 }
7486
7487 /* Set the default options in md. Cannot do this in md_begin because
7488 that is called after md_parse_option which is where we set the
7489 options in md based on command line options. */
7490
7491 void
7492 ia64_init (int argc ATTRIBUTE_UNUSED, char **argv ATTRIBUTE_UNUSED)
7493 {
7494 md.flags = MD_FLAGS_DEFAULT;
7495 #ifndef TE_VMS
7496 /* Don't turn on dependency checking for VMS, doesn't work. */
7497 md.detect_dv = 1;
7498 #endif
7499 /* FIXME: We should change it to unwind_check_error someday. */
7500 md.unwind_check = unwind_check_warning;
7501 md.hint_b = hint_b_error;
7502 md.tune = itanium2;
7503 }
7504
7505 /* Return a string for the target object file format. */
7506
7507 const char *
7508 ia64_target_format (void)
7509 {
7510 if (OUTPUT_FLAVOR == bfd_target_elf_flavour)
7511 {
7512 if (md.flags & EF_IA_64_BE)
7513 {
7514 if (md.flags & EF_IA_64_ABI64)
7515 #if defined(TE_AIX50)
7516 return "elf64-ia64-aix-big";
7517 #elif defined(TE_HPUX)
7518 return "elf64-ia64-hpux-big";
7519 #else
7520 return "elf64-ia64-big";
7521 #endif
7522 else
7523 #if defined(TE_AIX50)
7524 return "elf32-ia64-aix-big";
7525 #elif defined(TE_HPUX)
7526 return "elf32-ia64-hpux-big";
7527 #else
7528 return "elf32-ia64-big";
7529 #endif
7530 }
7531 else
7532 {
7533 if (md.flags & EF_IA_64_ABI64)
7534 #if defined (TE_AIX50)
7535 return "elf64-ia64-aix-little";
7536 #elif defined (TE_VMS)
7537 {
7538 md.flags |= EF_IA_64_ARCHVER_1;
7539 return "elf64-ia64-vms";
7540 }
7541 #else
7542 return "elf64-ia64-little";
7543 #endif
7544 else
7545 #ifdef TE_AIX50
7546 return "elf32-ia64-aix-little";
7547 #else
7548 return "elf32-ia64-little";
7549 #endif
7550 }
7551 }
7552 else
7553 return "unknown-format";
7554 }
7555
7556 void
7557 ia64_end_of_source (void)
7558 {
7559 /* terminate insn group upon reaching end of file: */
7560 insn_group_break (1, 0, 0);
7561
7562 /* emits slots we haven't written yet: */
7563 ia64_flush_insns ();
7564
7565 bfd_set_private_flags (stdoutput, md.flags);
7566
7567 md.mem_offset.hint = 0;
7568 }
7569
7570 void
7571 ia64_start_line (void)
7572 {
7573 static int first;
7574
7575 if (!first) {
7576 /* Make sure we don't reference input_line_pointer[-1] when that's
7577 not valid. */
7578 first = 1;
7579 return;
7580 }
7581
7582 if (md.qp.X_op == O_register)
7583 as_bad (_("qualifying predicate not followed by instruction"));
7584 md.qp.X_op = O_absent;
7585
7586 if (ignore_input ())
7587 return;
7588
7589 if (input_line_pointer[0] == ';' && input_line_pointer[-1] == ';')
7590 {
7591 if (md.detect_dv && !md.explicit_mode)
7592 {
7593 static int warned;
7594
7595 if (!warned)
7596 {
7597 warned = 1;
7598 as_warn (_("Explicit stops are ignored in auto mode"));
7599 }
7600 }
7601 else
7602 insn_group_break (1, 0, 0);
7603 }
7604 else if (input_line_pointer[-1] == '{')
7605 {
7606 if (md.manual_bundling)
7607 as_warn (_("Found '{' when manual bundling is already turned on"));
7608 else
7609 CURR_SLOT.manual_bundling_on = 1;
7610 md.manual_bundling = 1;
7611
7612 /* Bundling is only acceptable in explicit mode
7613 or when in default automatic mode. */
7614 if (md.detect_dv && !md.explicit_mode)
7615 {
7616 if (!md.mode_explicitly_set
7617 && !md.default_explicit_mode)
7618 dot_dv_mode ('E');
7619 else
7620 as_warn (_("Found '{' after explicit switch to automatic mode"));
7621 }
7622 }
7623 else if (input_line_pointer[-1] == '}')
7624 {
7625 if (!md.manual_bundling)
7626 as_warn (_("Found '}' when manual bundling is off"));
7627 else
7628 PREV_SLOT.manual_bundling_off = 1;
7629 md.manual_bundling = 0;
7630
7631 /* switch back to automatic mode, if applicable */
7632 if (md.detect_dv
7633 && md.explicit_mode
7634 && !md.mode_explicitly_set
7635 && !md.default_explicit_mode)
7636 dot_dv_mode ('A');
7637 }
7638 }
7639
7640 /* This is a hook for ia64_frob_label, so that it can distinguish tags from
7641 labels. */
7642 static int defining_tag = 0;
7643
7644 int
7645 ia64_unrecognized_line (int ch)
7646 {
7647 switch (ch)
7648 {
7649 case '(':
7650 expression_and_evaluate (&md.qp);
7651 if (*input_line_pointer++ != ')')
7652 {
7653 as_bad (_("Expected ')'"));
7654 return 0;
7655 }
7656 if (md.qp.X_op != O_register)
7657 {
7658 as_bad (_("Qualifying predicate expected"));
7659 return 0;
7660 }
7661 if (md.qp.X_add_number < REG_P || md.qp.X_add_number >= REG_P + 64)
7662 {
7663 as_bad (_("Predicate register expected"));
7664 return 0;
7665 }
7666 return 1;
7667
7668 case '[':
7669 {
7670 char *s;
7671 char c;
7672 symbolS *tag;
7673 int temp;
7674
7675 if (md.qp.X_op == O_register)
7676 {
7677 as_bad (_("Tag must come before qualifying predicate."));
7678 return 0;
7679 }
7680
7681 /* This implements just enough of read_a_source_file in read.c to
7682 recognize labels. */
7683 if (is_name_beginner (*input_line_pointer))
7684 {
7685 c = get_symbol_name (&s);
7686 }
7687 else if (LOCAL_LABELS_FB
7688 && ISDIGIT (*input_line_pointer))
7689 {
7690 temp = 0;
7691 while (ISDIGIT (*input_line_pointer))
7692 temp = (temp * 10) + *input_line_pointer++ - '0';
7693 fb_label_instance_inc (temp);
7694 s = fb_label_name (temp, 0);
7695 c = *input_line_pointer;
7696 }
7697 else
7698 {
7699 s = NULL;
7700 c = '\0';
7701 }
7702 if (c != ':')
7703 {
7704 /* Put ':' back for error messages' sake. */
7705 *input_line_pointer++ = ':';
7706 as_bad (_("Expected ':'"));
7707 return 0;
7708 }
7709
7710 defining_tag = 1;
7711 tag = colon (s);
7712 defining_tag = 0;
7713 /* Put ':' back for error messages' sake. */
7714 *input_line_pointer++ = ':';
7715 if (*input_line_pointer++ != ']')
7716 {
7717 as_bad (_("Expected ']'"));
7718 return 0;
7719 }
7720 if (! tag)
7721 {
7722 as_bad (_("Tag name expected"));
7723 return 0;
7724 }
7725 return 1;
7726 }
7727
7728 default:
7729 break;
7730 }
7731
7732 /* Not a valid line. */
7733 return 0;
7734 }
7735
7736 void
7737 ia64_frob_label (struct symbol *sym)
7738 {
7739 struct label_fix *fix;
7740
7741 /* Tags need special handling since they are not bundle breaks like
7742 labels. */
7743 if (defining_tag)
7744 {
7745 fix = XOBNEW (&notes, struct label_fix);
7746 fix->sym = sym;
7747 fix->next = CURR_SLOT.tag_fixups;
7748 fix->dw2_mark_labels = FALSE;
7749 CURR_SLOT.tag_fixups = fix;
7750
7751 return;
7752 }
7753
7754 if (bfd_section_flags (now_seg) & SEC_CODE)
7755 {
7756 md.last_text_seg = now_seg;
7757 fix = XOBNEW (&notes, struct label_fix);
7758 fix->sym = sym;
7759 fix->next = CURR_SLOT.label_fixups;
7760 fix->dw2_mark_labels = dwarf2_loc_mark_labels;
7761 CURR_SLOT.label_fixups = fix;
7762
7763 /* Keep track of how many code entry points we've seen. */
7764 if (md.path == md.maxpaths)
7765 {
7766 md.maxpaths += 20;
7767 md.entry_labels = XRESIZEVEC (const char *, md.entry_labels,
7768 md.maxpaths);
7769 }
7770 md.entry_labels[md.path++] = S_GET_NAME (sym);
7771 }
7772 }
7773
7774 #ifdef TE_HPUX
7775 /* The HP-UX linker will give unresolved symbol errors for symbols
7776 that are declared but unused. This routine removes declared,
7777 unused symbols from an object. */
7778 int
7779 ia64_frob_symbol (struct symbol *sym)
7780 {
7781 if ((S_GET_SEGMENT (sym) == bfd_und_section_ptr && ! symbol_used_p (sym) &&
7782 ELF_ST_VISIBILITY (S_GET_OTHER (sym)) == STV_DEFAULT)
7783 || (S_GET_SEGMENT (sym) == bfd_abs_section_ptr
7784 && ! S_IS_EXTERNAL (sym)))
7785 return 1;
7786 return 0;
7787 }
7788 #endif
7789
7790 void
7791 ia64_flush_pending_output (void)
7792 {
7793 if (!md.keep_pending_output
7794 && bfd_section_flags (now_seg) & SEC_CODE)
7795 {
7796 /* ??? This causes many unnecessary stop bits to be emitted.
7797 Unfortunately, it isn't clear if it is safe to remove this. */
7798 insn_group_break (1, 0, 0);
7799 ia64_flush_insns ();
7800 }
7801 }
7802
7803 /* Do ia64-specific expression optimization. All that's done here is
7804 to transform index expressions that are either due to the indexing
7805 of rotating registers or due to the indexing of indirect register
7806 sets. */
7807 int
7808 ia64_optimize_expr (expressionS *l, operatorT op, expressionS *r)
7809 {
7810 if (op != O_index)
7811 return 0;
7812 resolve_expression (l);
7813 if (l->X_op == O_register)
7814 {
7815 unsigned num_regs = l->X_add_number >> 16;
7816
7817 resolve_expression (r);
7818 if (num_regs)
7819 {
7820 /* Left side is a .rotX-allocated register. */
7821 if (r->X_op != O_constant)
7822 {
7823 as_bad (_("Rotating register index must be a non-negative constant"));
7824 r->X_add_number = 0;
7825 }
7826 else if ((valueT) r->X_add_number >= num_regs)
7827 {
7828 as_bad (_("Index out of range 0..%u"), num_regs - 1);
7829 r->X_add_number = 0;
7830 }
7831 l->X_add_number = (l->X_add_number & 0xffff) + r->X_add_number;
7832 return 1;
7833 }
7834 else if (l->X_add_number >= IND_CPUID && l->X_add_number <= IND_RR)
7835 {
7836 if (r->X_op != O_register
7837 || r->X_add_number < REG_GR
7838 || r->X_add_number > REG_GR + 127)
7839 {
7840 as_bad (_("Indirect register index must be a general register"));
7841 r->X_add_number = REG_GR;
7842 }
7843 l->X_op = O_index;
7844 l->X_op_symbol = md.indregsym[l->X_add_number - IND_CPUID];
7845 l->X_add_number = r->X_add_number;
7846 return 1;
7847 }
7848 }
7849 as_bad (_("Index can only be applied to rotating or indirect registers"));
7850 /* Fall back to some register use of which has as little as possible
7851 side effects, to minimize subsequent error messages. */
7852 l->X_op = O_register;
7853 l->X_add_number = REG_GR + 3;
7854 return 1;
7855 }
7856
7857 int
7858 ia64_parse_name (char *name, expressionS *e, char *nextcharP)
7859 {
7860 struct const_desc *cdesc;
7861 struct dynreg *dr = 0;
7862 unsigned int idx;
7863 struct symbol *sym;
7864 char *end;
7865
7866 if (*name == '@')
7867 {
7868 enum pseudo_type pseudo_type = PSEUDO_FUNC_NONE;
7869
7870 /* Find what relocation pseudo-function we're dealing with. */
7871 for (idx = 0; idx < NELEMS (pseudo_func); ++idx)
7872 if (pseudo_func[idx].name
7873 && pseudo_func[idx].name[0] == name[1]
7874 && strcmp (pseudo_func[idx].name + 1, name + 2) == 0)
7875 {
7876 pseudo_type = pseudo_func[idx].type;
7877 break;
7878 }
7879 switch (pseudo_type)
7880 {
7881 case PSEUDO_FUNC_RELOC:
7882 end = input_line_pointer;
7883 if (*nextcharP != '(')
7884 {
7885 as_bad (_("Expected '('"));
7886 break;
7887 }
7888 /* Skip '('. */
7889 ++input_line_pointer;
7890 expression (e);
7891 if (*input_line_pointer != ')')
7892 {
7893 as_bad (_("Missing ')'"));
7894 goto done;
7895 }
7896 /* Skip ')'. */
7897 ++input_line_pointer;
7898 #ifdef TE_VMS
7899 if (idx == FUNC_SLOTCOUNT_RELOC)
7900 {
7901 /* @slotcount can accept any expression. Canonicalize. */
7902 e->X_add_symbol = make_expr_symbol (e);
7903 e->X_op = O_symbol;
7904 e->X_add_number = 0;
7905 }
7906 #endif
7907 if (e->X_op != O_symbol)
7908 {
7909 if (e->X_op != O_pseudo_fixup)
7910 {
7911 as_bad (_("Not a symbolic expression"));
7912 goto done;
7913 }
7914 if (idx != FUNC_LT_RELATIVE)
7915 {
7916 as_bad (_("Illegal combination of relocation functions"));
7917 goto done;
7918 }
7919 switch (S_GET_VALUE (e->X_op_symbol))
7920 {
7921 case FUNC_FPTR_RELATIVE:
7922 idx = FUNC_LT_FPTR_RELATIVE; break;
7923 case FUNC_DTP_MODULE:
7924 idx = FUNC_LT_DTP_MODULE; break;
7925 case FUNC_DTP_RELATIVE:
7926 idx = FUNC_LT_DTP_RELATIVE; break;
7927 case FUNC_TP_RELATIVE:
7928 idx = FUNC_LT_TP_RELATIVE; break;
7929 default:
7930 as_bad (_("Illegal combination of relocation functions"));
7931 goto done;
7932 }
7933 }
7934 /* Make sure gas doesn't get rid of local symbols that are used
7935 in relocs. */
7936 e->X_op = O_pseudo_fixup;
7937 e->X_op_symbol = pseudo_func[idx].u.sym;
7938 done:
7939 *nextcharP = *input_line_pointer;
7940 break;
7941
7942 case PSEUDO_FUNC_CONST:
7943 e->X_op = O_constant;
7944 e->X_add_number = pseudo_func[idx].u.ival;
7945 break;
7946
7947 case PSEUDO_FUNC_REG:
7948 e->X_op = O_register;
7949 e->X_add_number = pseudo_func[idx].u.ival;
7950 break;
7951
7952 default:
7953 return 0;
7954 }
7955 return 1;
7956 }
7957
7958 /* first see if NAME is a known register name: */
7959 sym = str_hash_find (md.reg_hash, name);
7960 if (sym)
7961 {
7962 e->X_op = O_register;
7963 e->X_add_number = S_GET_VALUE (sym);
7964 return 1;
7965 }
7966
7967 cdesc = str_hash_find (md.const_hash, name);
7968 if (cdesc)
7969 {
7970 e->X_op = O_constant;
7971 e->X_add_number = cdesc->value;
7972 return 1;
7973 }
7974
7975 /* check for inN, locN, or outN: */
7976 idx = 0;
7977 switch (name[0])
7978 {
7979 case 'i':
7980 if (name[1] == 'n' && ISDIGIT (name[2]))
7981 {
7982 dr = &md.in;
7983 idx = 2;
7984 }
7985 break;
7986
7987 case 'l':
7988 if (name[1] == 'o' && name[2] == 'c' && ISDIGIT (name[3]))
7989 {
7990 dr = &md.loc;
7991 idx = 3;
7992 }
7993 break;
7994
7995 case 'o':
7996 if (name[1] == 'u' && name[2] == 't' && ISDIGIT (name[3]))
7997 {
7998 dr = &md.out;
7999 idx = 3;
8000 }
8001 break;
8002
8003 default:
8004 break;
8005 }
8006
8007 /* Ignore register numbers with leading zeroes, except zero itself. */
8008 if (dr && (name[idx] != '0' || name[idx + 1] == '\0'))
8009 {
8010 unsigned long regnum;
8011
8012 /* The name is inN, locN, or outN; parse the register number. */
8013 regnum = strtoul (name + idx, &end, 10);
8014 if (end > name + idx && *end == '\0' && regnum < 96)
8015 {
8016 if (regnum >= dr->num_regs)
8017 {
8018 if (!dr->num_regs)
8019 as_bad (_("No current frame"));
8020 else
8021 as_bad (_("Register number out of range 0..%u"),
8022 dr->num_regs - 1);
8023 regnum = 0;
8024 }
8025 e->X_op = O_register;
8026 e->X_add_number = dr->base + regnum;
8027 return 1;
8028 }
8029 }
8030
8031 end = xstrdup (name);
8032 name = ia64_canonicalize_symbol_name (end);
8033 if ((dr = str_hash_find (md.dynreg_hash, name)))
8034 {
8035 /* We've got ourselves the name of a rotating register set.
8036 Store the base register number in the low 16 bits of
8037 X_add_number and the size of the register set in the top 16
8038 bits. */
8039 e->X_op = O_register;
8040 e->X_add_number = dr->base | (dr->num_regs << 16);
8041 free (end);
8042 return 1;
8043 }
8044 free (end);
8045 return 0;
8046 }
8047
8048 /* Remove the '#' suffix that indicates a symbol as opposed to a register. */
8049
8050 char *
8051 ia64_canonicalize_symbol_name (char *name)
8052 {
8053 size_t len = strlen (name), full = len;
8054
8055 while (len > 0 && name[len - 1] == '#')
8056 --len;
8057 if (len <= 0)
8058 {
8059 if (full > 0)
8060 as_bad (_("Standalone `#' is illegal"));
8061 }
8062 else if (len < full - 1)
8063 as_warn (_("Redundant `#' suffix operators"));
8064 name[len] = '\0';
8065 return name;
8066 }
8067
8068 /* Return true if idesc is a conditional branch instruction. This excludes
8069 the modulo scheduled branches, and br.ia. Mod-sched branches are excluded
8070 because they always read/write resources regardless of the value of the
8071 qualifying predicate. br.ia must always use p0, and hence is always
8072 taken. Thus this function returns true for branches which can fall
8073 through, and which use no resources if they do fall through. */
8074
8075 static int
8076 is_conditional_branch (struct ia64_opcode *idesc)
8077 {
8078 /* br is a conditional branch. Everything that starts with br. except
8079 br.ia, br.c{loop,top,exit}, and br.w{top,exit} is a conditional branch.
8080 Everything that starts with brl is a conditional branch. */
8081 return (idesc->name[0] == 'b' && idesc->name[1] == 'r'
8082 && (idesc->name[2] == '\0'
8083 || (idesc->name[2] == '.' && idesc->name[3] != 'i'
8084 && idesc->name[3] != 'c' && idesc->name[3] != 'w')
8085 || idesc->name[2] == 'l'
8086 /* br.cond, br.call, br.clr */
8087 || (idesc->name[2] == '.' && idesc->name[3] == 'c'
8088 && (idesc->name[4] == 'a' || idesc->name[4] == 'o'
8089 || (idesc->name[4] == 'l' && idesc->name[5] == 'r')))));
8090 }
8091
8092 /* Return whether the given opcode is a taken branch. If there's any doubt,
8093 returns zero. */
8094
8095 static int
8096 is_taken_branch (struct ia64_opcode *idesc)
8097 {
8098 return ((is_conditional_branch (idesc) && CURR_SLOT.qp_regno == 0)
8099 || strncmp (idesc->name, "br.ia", 5) == 0);
8100 }
8101
8102 /* Return whether the given opcode is an interruption or rfi. If there's any
8103 doubt, returns zero. */
8104
8105 static int
8106 is_interruption_or_rfi (struct ia64_opcode *idesc)
8107 {
8108 if (strcmp (idesc->name, "rfi") == 0)
8109 return 1;
8110 return 0;
8111 }
8112
8113 /* Returns the index of the given dependency in the opcode's list of chks, or
8114 -1 if there is no dependency. */
8115
8116 static int
8117 depends_on (int depind, struct ia64_opcode *idesc)
8118 {
8119 int i;
8120 const struct ia64_opcode_dependency *dep = idesc->dependencies;
8121 for (i = 0; i < dep->nchks; i++)
8122 {
8123 if (depind == DEP (dep->chks[i]))
8124 return i;
8125 }
8126 return -1;
8127 }
8128
8129 /* Determine a set of specific resources used for a particular resource
8130 class. Returns the number of specific resources identified For those
8131 cases which are not determinable statically, the resource returned is
8132 marked nonspecific.
8133
8134 Meanings of value in 'NOTE':
8135 1) only read/write when the register number is explicitly encoded in the
8136 insn.
8137 2) only read CFM when accessing a rotating GR, FR, or PR. mov pr only
8138 accesses CFM when qualifying predicate is in the rotating region.
8139 3) general register value is used to specify an indirect register; not
8140 determinable statically.
8141 4) only read the given resource when bits 7:0 of the indirect index
8142 register value does not match the register number of the resource; not
8143 determinable statically.
8144 5) all rules are implementation specific.
8145 6) only when both the index specified by the reader and the index specified
8146 by the writer have the same value in bits 63:61; not determinable
8147 statically.
8148 7) only access the specified resource when the corresponding mask bit is
8149 set
8150 8) PSR.dfh is only read when these insns reference FR32-127. PSR.dfl is
8151 only read when these insns reference FR2-31
8152 9) PSR.mfl is only written when these insns write FR2-31. PSR.mfh is only
8153 written when these insns write FR32-127
8154 10) The PSR.bn bit is only accessed when one of GR16-31 is specified in the
8155 instruction
8156 11) The target predicates are written independently of PR[qp], but source
8157 registers are only read if PR[qp] is true. Since the state of PR[qp]
8158 cannot statically be determined, all source registers are marked used.
8159 12) This insn only reads the specified predicate register when that
8160 register is the PR[qp].
8161 13) This reference to ld-c only applies to the GR whose value is loaded
8162 with data returned from memory, not the post-incremented address register.
8163 14) The RSE resource includes the implementation-specific RSE internal
8164 state resources. At least one (and possibly more) of these resources are
8165 read by each instruction listed in IC:rse-readers. At least one (and
8166 possibly more) of these resources are written by each insn listed in
8167 IC:rse-writers.
8168 15+16) Represents reserved instructions, which the assembler does not
8169 generate.
8170 17) CR[TPR] has a RAW dependency only between mov-to-CR-TPR and
8171 mov-to-PSR-l or ssm instructions that set PSR.i, PSR.pp or PSR.up.
8172
8173 Memory resources (i.e. locations in memory) are *not* marked or tracked by
8174 this code; there are no dependency violations based on memory access.
8175 */
8176
8177 #define MAX_SPECS 256
8178 #define DV_CHK 1
8179 #define DV_REG 0
8180
8181 static int
8182 specify_resource (const struct ia64_dependency *dep,
8183 struct ia64_opcode *idesc,
8184 /* is this a DV chk or a DV reg? */
8185 int type,
8186 /* returned specific resources */
8187 struct rsrc specs[MAX_SPECS],
8188 /* resource note for this insn's usage */
8189 int note,
8190 /* which execution path to examine */
8191 int path)
8192 {
8193 int count = 0;
8194 int i;
8195 int rsrc_write = 0;
8196 struct rsrc tmpl;
8197
8198 if (dep->mode == IA64_DV_WAW
8199 || (dep->mode == IA64_DV_RAW && type == DV_REG)
8200 || (dep->mode == IA64_DV_WAR && type == DV_CHK))
8201 rsrc_write = 1;
8202
8203 /* template for any resources we identify */
8204 tmpl.dependency = dep;
8205 tmpl.note = note;
8206 tmpl.insn_srlz = tmpl.data_srlz = 0;
8207 tmpl.qp_regno = CURR_SLOT.qp_regno;
8208 tmpl.link_to_qp_branch = 1;
8209 tmpl.mem_offset.hint = 0;
8210 tmpl.mem_offset.offset = 0;
8211 tmpl.mem_offset.base = 0;
8212 tmpl.specific = 1;
8213 tmpl.index = -1;
8214 tmpl.cmp_type = CMP_NONE;
8215 tmpl.depind = 0;
8216 tmpl.file = NULL;
8217 tmpl.line = 0;
8218 tmpl.path = 0;
8219
8220 #define UNHANDLED \
8221 as_warn (_("Unhandled dependency %s for %s (%s), note %d"), \
8222 dep->name, idesc->name, (rsrc_write?"write":"read"), note)
8223 #define KNOWN(REG) (gr_values[REG].known && gr_values[REG].path >= path)
8224
8225 /* we don't need to track these */
8226 if (dep->semantics == IA64_DVS_NONE)
8227 return 0;
8228
8229 switch (dep->specifier)
8230 {
8231 case IA64_RS_AR_K:
8232 if (note == 1)
8233 {
8234 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8235 {
8236 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8237 if (regno >= 0 && regno <= 7)
8238 {
8239 specs[count] = tmpl;
8240 specs[count++].index = regno;
8241 }
8242 }
8243 }
8244 else if (note == 0)
8245 {
8246 for (i = 0; i < 8; i++)
8247 {
8248 specs[count] = tmpl;
8249 specs[count++].index = i;
8250 }
8251 }
8252 else
8253 {
8254 UNHANDLED;
8255 }
8256 break;
8257
8258 case IA64_RS_AR_UNAT:
8259 /* This is a mov =AR or mov AR= instruction. */
8260 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8261 {
8262 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8263 if (regno == AR_UNAT)
8264 {
8265 specs[count++] = tmpl;
8266 }
8267 }
8268 else
8269 {
8270 /* This is a spill/fill, or other instruction that modifies the
8271 unat register. */
8272
8273 /* Unless we can determine the specific bits used, mark the whole
8274 thing; bits 8:3 of the memory address indicate the bit used in
8275 UNAT. The .mem.offset hint may be used to eliminate a small
8276 subset of conflicts. */
8277 specs[count] = tmpl;
8278 if (md.mem_offset.hint)
8279 {
8280 if (md.debug_dv)
8281 fprintf (stderr, " Using hint for spill/fill\n");
8282 /* The index isn't actually used, just set it to something
8283 approximating the bit index. */
8284 specs[count].index = (md.mem_offset.offset >> 3) & 0x3F;
8285 specs[count].mem_offset.hint = 1;
8286 specs[count].mem_offset.offset = md.mem_offset.offset;
8287 specs[count++].mem_offset.base = md.mem_offset.base;
8288 }
8289 else
8290 {
8291 specs[count++].specific = 0;
8292 }
8293 }
8294 break;
8295
8296 case IA64_RS_AR:
8297 if (note == 1)
8298 {
8299 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8300 {
8301 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8302 if ((regno >= 8 && regno <= 15)
8303 || (regno >= 20 && regno <= 23)
8304 || (regno >= 31 && regno <= 39)
8305 || (regno >= 41 && regno <= 47)
8306 || (regno >= 67 && regno <= 111))
8307 {
8308 specs[count] = tmpl;
8309 specs[count++].index = regno;
8310 }
8311 }
8312 }
8313 else
8314 {
8315 UNHANDLED;
8316 }
8317 break;
8318
8319 case IA64_RS_ARb:
8320 if (note == 1)
8321 {
8322 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8323 {
8324 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8325 if ((regno >= 48 && regno <= 63)
8326 || (regno >= 112 && regno <= 127))
8327 {
8328 specs[count] = tmpl;
8329 specs[count++].index = regno;
8330 }
8331 }
8332 }
8333 else if (note == 0)
8334 {
8335 for (i = 48; i < 64; i++)
8336 {
8337 specs[count] = tmpl;
8338 specs[count++].index = i;
8339 }
8340 for (i = 112; i < 128; i++)
8341 {
8342 specs[count] = tmpl;
8343 specs[count++].index = i;
8344 }
8345 }
8346 else
8347 {
8348 UNHANDLED;
8349 }
8350 break;
8351
8352 case IA64_RS_BR:
8353 if (note != 1)
8354 {
8355 UNHANDLED;
8356 }
8357 else
8358 {
8359 if (rsrc_write)
8360 {
8361 for (i = 0; i < idesc->num_outputs; i++)
8362 if (idesc->operands[i] == IA64_OPND_B1
8363 || idesc->operands[i] == IA64_OPND_B2)
8364 {
8365 specs[count] = tmpl;
8366 specs[count++].index =
8367 CURR_SLOT.opnd[i].X_add_number - REG_BR;
8368 }
8369 }
8370 else
8371 {
8372 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
8373 if (idesc->operands[i] == IA64_OPND_B1
8374 || idesc->operands[i] == IA64_OPND_B2)
8375 {
8376 specs[count] = tmpl;
8377 specs[count++].index =
8378 CURR_SLOT.opnd[i].X_add_number - REG_BR;
8379 }
8380 }
8381 }
8382 break;
8383
8384 case IA64_RS_CPUID: /* four or more registers */
8385 if (note == 3)
8386 {
8387 if (idesc->operands[!rsrc_write] == IA64_OPND_CPUID_R3)
8388 {
8389 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8390 if (regno >= 0 && regno < NELEMS (gr_values)
8391 && KNOWN (regno))
8392 {
8393 specs[count] = tmpl;
8394 specs[count++].index = gr_values[regno].value & 0xFF;
8395 }
8396 else
8397 {
8398 specs[count] = tmpl;
8399 specs[count++].specific = 0;
8400 }
8401 }
8402 }
8403 else
8404 {
8405 UNHANDLED;
8406 }
8407 break;
8408
8409 case IA64_RS_DBR: /* four or more registers */
8410 if (note == 3)
8411 {
8412 if (idesc->operands[!rsrc_write] == IA64_OPND_DBR_R3)
8413 {
8414 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8415 if (regno >= 0 && regno < NELEMS (gr_values)
8416 && KNOWN (regno))
8417 {
8418 specs[count] = tmpl;
8419 specs[count++].index = gr_values[regno].value & 0xFF;
8420 }
8421 else
8422 {
8423 specs[count] = tmpl;
8424 specs[count++].specific = 0;
8425 }
8426 }
8427 }
8428 else if (note == 0 && !rsrc_write)
8429 {
8430 specs[count] = tmpl;
8431 specs[count++].specific = 0;
8432 }
8433 else
8434 {
8435 UNHANDLED;
8436 }
8437 break;
8438
8439 case IA64_RS_IBR: /* four or more registers */
8440 if (note == 3)
8441 {
8442 if (idesc->operands[!rsrc_write] == IA64_OPND_IBR_R3)
8443 {
8444 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8445 if (regno >= 0 && regno < NELEMS (gr_values)
8446 && KNOWN (regno))
8447 {
8448 specs[count] = tmpl;
8449 specs[count++].index = gr_values[regno].value & 0xFF;
8450 }
8451 else
8452 {
8453 specs[count] = tmpl;
8454 specs[count++].specific = 0;
8455 }
8456 }
8457 }
8458 else
8459 {
8460 UNHANDLED;
8461 }
8462 break;
8463
8464 case IA64_RS_MSR:
8465 if (note == 5)
8466 {
8467 /* These are implementation specific. Force all references to
8468 conflict with all other references. */
8469 specs[count] = tmpl;
8470 specs[count++].specific = 0;
8471 }
8472 else
8473 {
8474 UNHANDLED;
8475 }
8476 break;
8477
8478 case IA64_RS_PKR: /* 16 or more registers */
8479 if (note == 3 || note == 4)
8480 {
8481 if (idesc->operands[!rsrc_write] == IA64_OPND_PKR_R3)
8482 {
8483 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8484 if (regno >= 0 && regno < NELEMS (gr_values)
8485 && KNOWN (regno))
8486 {
8487 if (note == 3)
8488 {
8489 specs[count] = tmpl;
8490 specs[count++].index = gr_values[regno].value & 0xFF;
8491 }
8492 else
8493 for (i = 0; i < NELEMS (gr_values); i++)
8494 {
8495 /* Uses all registers *except* the one in R3. */
8496 if ((unsigned)i != (gr_values[regno].value & 0xFF))
8497 {
8498 specs[count] = tmpl;
8499 specs[count++].index = i;
8500 }
8501 }
8502 }
8503 else
8504 {
8505 specs[count] = tmpl;
8506 specs[count++].specific = 0;
8507 }
8508 }
8509 }
8510 else if (note == 0)
8511 {
8512 /* probe et al. */
8513 specs[count] = tmpl;
8514 specs[count++].specific = 0;
8515 }
8516 break;
8517
8518 case IA64_RS_PMC: /* four or more registers */
8519 if (note == 3)
8520 {
8521 if (idesc->operands[!rsrc_write] == IA64_OPND_PMC_R3
8522 || (!rsrc_write && idesc->operands[1] == IA64_OPND_PMD_R3))
8523
8524 {
8525 int reg_index = ((idesc->operands[1] == IA64_OPND_R3 && !rsrc_write)
8526 ? 1 : !rsrc_write);
8527 int regno = CURR_SLOT.opnd[reg_index].X_add_number - REG_GR;
8528 if (regno >= 0 && regno < NELEMS (gr_values)
8529 && KNOWN (regno))
8530 {
8531 specs[count] = tmpl;
8532 specs[count++].index = gr_values[regno].value & 0xFF;
8533 }
8534 else
8535 {
8536 specs[count] = tmpl;
8537 specs[count++].specific = 0;
8538 }
8539 }
8540 }
8541 else
8542 {
8543 UNHANDLED;
8544 }
8545 break;
8546
8547 case IA64_RS_PMD: /* four or more registers */
8548 if (note == 3)
8549 {
8550 if (idesc->operands[!rsrc_write] == IA64_OPND_PMD_R3)
8551 {
8552 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8553 if (regno >= 0 && regno < NELEMS (gr_values)
8554 && KNOWN (regno))
8555 {
8556 specs[count] = tmpl;
8557 specs[count++].index = gr_values[regno].value & 0xFF;
8558 }
8559 else
8560 {
8561 specs[count] = tmpl;
8562 specs[count++].specific = 0;
8563 }
8564 }
8565 }
8566 else
8567 {
8568 UNHANDLED;
8569 }
8570 break;
8571
8572 case IA64_RS_RR: /* eight registers */
8573 if (note == 6)
8574 {
8575 if (idesc->operands[!rsrc_write] == IA64_OPND_RR_R3)
8576 {
8577 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8578 if (regno >= 0 && regno < NELEMS (gr_values)
8579 && KNOWN (regno))
8580 {
8581 specs[count] = tmpl;
8582 specs[count++].index = (gr_values[regno].value >> 61) & 0x7;
8583 }
8584 else
8585 {
8586 specs[count] = tmpl;
8587 specs[count++].specific = 0;
8588 }
8589 }
8590 }
8591 else if (note == 0 && !rsrc_write)
8592 {
8593 specs[count] = tmpl;
8594 specs[count++].specific = 0;
8595 }
8596 else
8597 {
8598 UNHANDLED;
8599 }
8600 break;
8601
8602 case IA64_RS_CR_IRR:
8603 if (note == 0)
8604 {
8605 /* handle mov-from-CR-IVR; it's a read that writes CR[IRR] */
8606 int regno = CURR_SLOT.opnd[1].X_add_number - REG_CR;
8607 if (rsrc_write
8608 && idesc->operands[1] == IA64_OPND_CR3
8609 && regno == CR_IVR)
8610 {
8611 for (i = 0; i < 4; i++)
8612 {
8613 specs[count] = tmpl;
8614 specs[count++].index = CR_IRR0 + i;
8615 }
8616 }
8617 }
8618 else if (note == 1)
8619 {
8620 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8621 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8622 && regno >= CR_IRR0
8623 && regno <= CR_IRR3)
8624 {
8625 specs[count] = tmpl;
8626 specs[count++].index = regno;
8627 }
8628 }
8629 else
8630 {
8631 UNHANDLED;
8632 }
8633 break;
8634
8635 case IA64_RS_CR_IIB:
8636 if (note != 0)
8637 {
8638 UNHANDLED;
8639 }
8640 else
8641 {
8642 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8643 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8644 && (regno == CR_IIB0 || regno == CR_IIB1))
8645 {
8646 specs[count] = tmpl;
8647 specs[count++].index = regno;
8648 }
8649 }
8650 break;
8651
8652 case IA64_RS_CR_LRR:
8653 if (note != 1)
8654 {
8655 UNHANDLED;
8656 }
8657 else
8658 {
8659 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8660 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8661 && (regno == CR_LRR0 || regno == CR_LRR1))
8662 {
8663 specs[count] = tmpl;
8664 specs[count++].index = regno;
8665 }
8666 }
8667 break;
8668
8669 case IA64_RS_CR:
8670 if (note == 1)
8671 {
8672 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
8673 {
8674 specs[count] = tmpl;
8675 specs[count++].index =
8676 CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8677 }
8678 }
8679 else
8680 {
8681 UNHANDLED;
8682 }
8683 break;
8684
8685 case IA64_RS_DAHR:
8686 if (note == 0)
8687 {
8688 if (idesc->operands[!rsrc_write] == IA64_OPND_DAHR3)
8689 {
8690 specs[count] = tmpl;
8691 specs[count++].index =
8692 CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_DAHR;
8693 }
8694 }
8695 else
8696 {
8697 UNHANDLED;
8698 }
8699 break;
8700
8701 case IA64_RS_FR:
8702 case IA64_RS_FRb:
8703 if (note != 1)
8704 {
8705 UNHANDLED;
8706 }
8707 else if (rsrc_write)
8708 {
8709 if (dep->specifier == IA64_RS_FRb
8710 && idesc->operands[0] == IA64_OPND_F1)
8711 {
8712 specs[count] = tmpl;
8713 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_FR;
8714 }
8715 }
8716 else
8717 {
8718 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
8719 {
8720 if (idesc->operands[i] == IA64_OPND_F2
8721 || idesc->operands[i] == IA64_OPND_F3
8722 || idesc->operands[i] == IA64_OPND_F4)
8723 {
8724 specs[count] = tmpl;
8725 specs[count++].index =
8726 CURR_SLOT.opnd[i].X_add_number - REG_FR;
8727 }
8728 }
8729 }
8730 break;
8731
8732 case IA64_RS_GR:
8733 if (note == 13)
8734 {
8735 /* This reference applies only to the GR whose value is loaded with
8736 data returned from memory. */
8737 specs[count] = tmpl;
8738 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_GR;
8739 }
8740 else if (note == 1)
8741 {
8742 if (rsrc_write)
8743 {
8744 for (i = 0; i < idesc->num_outputs; i++)
8745 if (idesc->operands[i] == IA64_OPND_R1
8746 || idesc->operands[i] == IA64_OPND_R2
8747 || idesc->operands[i] == IA64_OPND_R3)
8748 {
8749 specs[count] = tmpl;
8750 specs[count++].index =
8751 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8752 }
8753 if (idesc->flags & IA64_OPCODE_POSTINC)
8754 for (i = 0; i < NELEMS (idesc->operands); i++)
8755 if (idesc->operands[i] == IA64_OPND_MR3)
8756 {
8757 specs[count] = tmpl;
8758 specs[count++].index =
8759 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8760 }
8761 }
8762 else
8763 {
8764 /* Look for anything that reads a GR. */
8765 for (i = 0; i < NELEMS (idesc->operands); i++)
8766 {
8767 if (idesc->operands[i] == IA64_OPND_MR3
8768 || idesc->operands[i] == IA64_OPND_CPUID_R3
8769 || idesc->operands[i] == IA64_OPND_DBR_R3
8770 || idesc->operands[i] == IA64_OPND_IBR_R3
8771 || idesc->operands[i] == IA64_OPND_MSR_R3
8772 || idesc->operands[i] == IA64_OPND_PKR_R3
8773 || idesc->operands[i] == IA64_OPND_PMC_R3
8774 || idesc->operands[i] == IA64_OPND_PMD_R3
8775 || idesc->operands[i] == IA64_OPND_DAHR_R3
8776 || idesc->operands[i] == IA64_OPND_RR_R3
8777 || ((i >= idesc->num_outputs)
8778 && (idesc->operands[i] == IA64_OPND_R1
8779 || idesc->operands[i] == IA64_OPND_R2
8780 || idesc->operands[i] == IA64_OPND_R3
8781 /* addl source register. */
8782 || idesc->operands[i] == IA64_OPND_R3_2)))
8783 {
8784 specs[count] = tmpl;
8785 specs[count++].index =
8786 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8787 }
8788 }
8789 }
8790 }
8791 else
8792 {
8793 UNHANDLED;
8794 }
8795 break;
8796
8797 /* This is the same as IA64_RS_PRr, except that the register range is
8798 from 1 - 15, and there are no rotating register reads/writes here. */
8799 case IA64_RS_PR:
8800 if (note == 0)
8801 {
8802 for (i = 1; i < 16; i++)
8803 {
8804 specs[count] = tmpl;
8805 specs[count++].index = i;
8806 }
8807 }
8808 else if (note == 7)
8809 {
8810 valueT mask = 0;
8811 /* Mark only those registers indicated by the mask. */
8812 if (rsrc_write)
8813 {
8814 mask = CURR_SLOT.opnd[2].X_add_number;
8815 for (i = 1; i < 16; i++)
8816 if (mask & ((valueT) 1 << i))
8817 {
8818 specs[count] = tmpl;
8819 specs[count++].index = i;
8820 }
8821 }
8822 else
8823 {
8824 UNHANDLED;
8825 }
8826 }
8827 else if (note == 11) /* note 11 implies note 1 as well */
8828 {
8829 if (rsrc_write)
8830 {
8831 for (i = 0; i < idesc->num_outputs; i++)
8832 {
8833 if (idesc->operands[i] == IA64_OPND_P1
8834 || idesc->operands[i] == IA64_OPND_P2)
8835 {
8836 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8837 if (regno >= 1 && regno < 16)
8838 {
8839 specs[count] = tmpl;
8840 specs[count++].index = regno;
8841 }
8842 }
8843 }
8844 }
8845 else
8846 {
8847 UNHANDLED;
8848 }
8849 }
8850 else if (note == 12)
8851 {
8852 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
8853 {
8854 specs[count] = tmpl;
8855 specs[count++].index = CURR_SLOT.qp_regno;
8856 }
8857 }
8858 else if (note == 1)
8859 {
8860 if (rsrc_write)
8861 {
8862 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8863 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8864 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
8865 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
8866
8867 if ((idesc->operands[0] == IA64_OPND_P1
8868 || idesc->operands[0] == IA64_OPND_P2)
8869 && p1 >= 1 && p1 < 16)
8870 {
8871 specs[count] = tmpl;
8872 specs[count].cmp_type =
8873 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
8874 specs[count++].index = p1;
8875 }
8876 if ((idesc->operands[1] == IA64_OPND_P1
8877 || idesc->operands[1] == IA64_OPND_P2)
8878 && p2 >= 1 && p2 < 16)
8879 {
8880 specs[count] = tmpl;
8881 specs[count].cmp_type =
8882 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
8883 specs[count++].index = p2;
8884 }
8885 }
8886 else
8887 {
8888 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
8889 {
8890 specs[count] = tmpl;
8891 specs[count++].index = CURR_SLOT.qp_regno;
8892 }
8893 if (idesc->operands[1] == IA64_OPND_PR)
8894 {
8895 for (i = 1; i < 16; i++)
8896 {
8897 specs[count] = tmpl;
8898 specs[count++].index = i;
8899 }
8900 }
8901 }
8902 }
8903 else
8904 {
8905 UNHANDLED;
8906 }
8907 break;
8908
8909 /* This is the general case for PRs. IA64_RS_PR and IA64_RS_PR63 are
8910 simplified cases of this. */
8911 case IA64_RS_PRr:
8912 if (note == 0)
8913 {
8914 for (i = 16; i < 63; i++)
8915 {
8916 specs[count] = tmpl;
8917 specs[count++].index = i;
8918 }
8919 }
8920 else if (note == 7)
8921 {
8922 valueT mask = 0;
8923 /* Mark only those registers indicated by the mask. */
8924 if (rsrc_write
8925 && idesc->operands[0] == IA64_OPND_PR)
8926 {
8927 mask = CURR_SLOT.opnd[2].X_add_number;
8928 if (mask & ((valueT) 1 << 16))
8929 for (i = 16; i < 63; i++)
8930 {
8931 specs[count] = tmpl;
8932 specs[count++].index = i;
8933 }
8934 }
8935 else if (rsrc_write
8936 && idesc->operands[0] == IA64_OPND_PR_ROT)
8937 {
8938 for (i = 16; i < 63; i++)
8939 {
8940 specs[count] = tmpl;
8941 specs[count++].index = i;
8942 }
8943 }
8944 else
8945 {
8946 UNHANDLED;
8947 }
8948 }
8949 else if (note == 11) /* note 11 implies note 1 as well */
8950 {
8951 if (rsrc_write)
8952 {
8953 for (i = 0; i < idesc->num_outputs; i++)
8954 {
8955 if (idesc->operands[i] == IA64_OPND_P1
8956 || idesc->operands[i] == IA64_OPND_P2)
8957 {
8958 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8959 if (regno >= 16 && regno < 63)
8960 {
8961 specs[count] = tmpl;
8962 specs[count++].index = regno;
8963 }
8964 }
8965 }
8966 }
8967 else
8968 {
8969 UNHANDLED;
8970 }
8971 }
8972 else if (note == 12)
8973 {
8974 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
8975 {
8976 specs[count] = tmpl;
8977 specs[count++].index = CURR_SLOT.qp_regno;
8978 }
8979 }
8980 else if (note == 1)
8981 {
8982 if (rsrc_write)
8983 {
8984 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8985 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8986 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
8987 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
8988
8989 if ((idesc->operands[0] == IA64_OPND_P1
8990 || idesc->operands[0] == IA64_OPND_P2)
8991 && p1 >= 16 && p1 < 63)
8992 {
8993 specs[count] = tmpl;
8994 specs[count].cmp_type =
8995 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
8996 specs[count++].index = p1;
8997 }
8998 if ((idesc->operands[1] == IA64_OPND_P1
8999 || idesc->operands[1] == IA64_OPND_P2)
9000 && p2 >= 16 && p2 < 63)
9001 {
9002 specs[count] = tmpl;
9003 specs[count].cmp_type =
9004 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
9005 specs[count++].index = p2;
9006 }
9007 }
9008 else
9009 {
9010 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
9011 {
9012 specs[count] = tmpl;
9013 specs[count++].index = CURR_SLOT.qp_regno;
9014 }
9015 if (idesc->operands[1] == IA64_OPND_PR)
9016 {
9017 for (i = 16; i < 63; i++)
9018 {
9019 specs[count] = tmpl;
9020 specs[count++].index = i;
9021 }
9022 }
9023 }
9024 }
9025 else
9026 {
9027 UNHANDLED;
9028 }
9029 break;
9030
9031 case IA64_RS_PSR:
9032 /* Verify that the instruction is using the PSR bit indicated in
9033 dep->regindex. */
9034 if (note == 0)
9035 {
9036 if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_UM)
9037 {
9038 if (dep->regindex < 6)
9039 {
9040 specs[count++] = tmpl;
9041 }
9042 }
9043 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR)
9044 {
9045 if (dep->regindex < 32
9046 || dep->regindex == 35
9047 || dep->regindex == 36
9048 || (!rsrc_write && dep->regindex == PSR_CPL))
9049 {
9050 specs[count++] = tmpl;
9051 }
9052 }
9053 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_L)
9054 {
9055 if (dep->regindex < 32
9056 || dep->regindex == 35
9057 || dep->regindex == 36
9058 || (rsrc_write && dep->regindex == PSR_CPL))
9059 {
9060 specs[count++] = tmpl;
9061 }
9062 }
9063 else
9064 {
9065 /* Several PSR bits have very specific dependencies. */
9066 switch (dep->regindex)
9067 {
9068 default:
9069 specs[count++] = tmpl;
9070 break;
9071 case PSR_IC:
9072 if (rsrc_write)
9073 {
9074 specs[count++] = tmpl;
9075 }
9076 else
9077 {
9078 /* Only certain CR accesses use PSR.ic */
9079 if (idesc->operands[0] == IA64_OPND_CR3
9080 || idesc->operands[1] == IA64_OPND_CR3)
9081 {
9082 int reg_index =
9083 ((idesc->operands[0] == IA64_OPND_CR3)
9084 ? 0 : 1);
9085 int regno =
9086 CURR_SLOT.opnd[reg_index].X_add_number - REG_CR;
9087
9088 switch (regno)
9089 {
9090 default:
9091 break;
9092 case CR_ITIR:
9093 case CR_IFS:
9094 case CR_IIM:
9095 case CR_IIP:
9096 case CR_IPSR:
9097 case CR_ISR:
9098 case CR_IFA:
9099 case CR_IHA:
9100 case CR_IIB0:
9101 case CR_IIB1:
9102 case CR_IIPA:
9103 specs[count++] = tmpl;
9104 break;
9105 }
9106 }
9107 }
9108 break;
9109 case PSR_CPL:
9110 if (rsrc_write)
9111 {
9112 specs[count++] = tmpl;
9113 }
9114 else
9115 {
9116 /* Only some AR accesses use cpl */
9117 if (idesc->operands[0] == IA64_OPND_AR3
9118 || idesc->operands[1] == IA64_OPND_AR3)
9119 {
9120 int reg_index =
9121 ((idesc->operands[0] == IA64_OPND_AR3)
9122 ? 0 : 1);
9123 int regno =
9124 CURR_SLOT.opnd[reg_index].X_add_number - REG_AR;
9125
9126 if (regno == AR_ITC
9127 || regno == AR_RUC
9128 || (reg_index == 0
9129 && (regno == AR_RSC
9130 || (regno >= AR_K0
9131 && regno <= AR_K7))))
9132 {
9133 specs[count++] = tmpl;
9134 }
9135 }
9136 else
9137 {
9138 specs[count++] = tmpl;
9139 }
9140 break;
9141 }
9142 }
9143 }
9144 }
9145 else if (note == 7)
9146 {
9147 valueT mask = 0;
9148 if (idesc->operands[0] == IA64_OPND_IMMU24)
9149 {
9150 mask = CURR_SLOT.opnd[0].X_add_number;
9151 }
9152 else
9153 {
9154 UNHANDLED;
9155 }
9156 if (mask & ((valueT) 1 << dep->regindex))
9157 {
9158 specs[count++] = tmpl;
9159 }
9160 }
9161 else if (note == 8)
9162 {
9163 int min = dep->regindex == PSR_DFL ? 2 : 32;
9164 int max = dep->regindex == PSR_DFL ? 31 : 127;
9165 /* dfh is read on FR32-127; dfl is read on FR2-31 */
9166 for (i = 0; i < NELEMS (idesc->operands); i++)
9167 {
9168 if (idesc->operands[i] == IA64_OPND_F1
9169 || idesc->operands[i] == IA64_OPND_F2
9170 || idesc->operands[i] == IA64_OPND_F3
9171 || idesc->operands[i] == IA64_OPND_F4)
9172 {
9173 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9174 if (reg >= min && reg <= max)
9175 {
9176 specs[count++] = tmpl;
9177 }
9178 }
9179 }
9180 }
9181 else if (note == 9)
9182 {
9183 int min = dep->regindex == PSR_MFL ? 2 : 32;
9184 int max = dep->regindex == PSR_MFL ? 31 : 127;
9185 /* mfh is read on writes to FR32-127; mfl is read on writes to
9186 FR2-31 */
9187 for (i = 0; i < idesc->num_outputs; i++)
9188 {
9189 if (idesc->operands[i] == IA64_OPND_F1)
9190 {
9191 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9192 if (reg >= min && reg <= max)
9193 {
9194 specs[count++] = tmpl;
9195 }
9196 }
9197 }
9198 }
9199 else if (note == 10)
9200 {
9201 for (i = 0; i < NELEMS (idesc->operands); i++)
9202 {
9203 if (idesc->operands[i] == IA64_OPND_R1
9204 || idesc->operands[i] == IA64_OPND_R2
9205 || idesc->operands[i] == IA64_OPND_R3)
9206 {
9207 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9208 if (regno >= 16 && regno <= 31)
9209 {
9210 specs[count++] = tmpl;
9211 }
9212 }
9213 }
9214 }
9215 else
9216 {
9217 UNHANDLED;
9218 }
9219 break;
9220
9221 case IA64_RS_AR_FPSR:
9222 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
9223 {
9224 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
9225 if (regno == AR_FPSR)
9226 {
9227 specs[count++] = tmpl;
9228 }
9229 }
9230 else
9231 {
9232 specs[count++] = tmpl;
9233 }
9234 break;
9235
9236 case IA64_RS_ARX:
9237 /* Handle all AR[REG] resources */
9238 if (note == 0 || note == 1)
9239 {
9240 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
9241 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3
9242 && regno == dep->regindex)
9243 {
9244 specs[count++] = tmpl;
9245 }
9246 /* other AR[REG] resources may be affected by AR accesses */
9247 else if (idesc->operands[0] == IA64_OPND_AR3)
9248 {
9249 /* AR[] writes */
9250 regno = CURR_SLOT.opnd[0].X_add_number - REG_AR;
9251 switch (dep->regindex)
9252 {
9253 default:
9254 break;
9255 case AR_BSP:
9256 case AR_RNAT:
9257 if (regno == AR_BSPSTORE)
9258 {
9259 specs[count++] = tmpl;
9260 }
9261 /* Fall through. */
9262 case AR_RSC:
9263 if (!rsrc_write &&
9264 (regno == AR_BSPSTORE
9265 || regno == AR_RNAT))
9266 {
9267 specs[count++] = tmpl;
9268 }
9269 break;
9270 }
9271 }
9272 else if (idesc->operands[1] == IA64_OPND_AR3)
9273 {
9274 /* AR[] reads */
9275 regno = CURR_SLOT.opnd[1].X_add_number - REG_AR;
9276 switch (dep->regindex)
9277 {
9278 default:
9279 break;
9280 case AR_RSC:
9281 if (regno == AR_BSPSTORE || regno == AR_RNAT)
9282 {
9283 specs[count++] = tmpl;
9284 }
9285 break;
9286 }
9287 }
9288 else
9289 {
9290 specs[count++] = tmpl;
9291 }
9292 }
9293 else
9294 {
9295 UNHANDLED;
9296 }
9297 break;
9298
9299 case IA64_RS_CRX:
9300 /* Handle all CR[REG] resources.
9301 ??? FIXME: The rule 17 isn't really handled correctly. */
9302 if (note == 0 || note == 1 || note == 17)
9303 {
9304 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
9305 {
9306 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
9307 if (regno == dep->regindex)
9308 {
9309 specs[count++] = tmpl;
9310 }
9311 else if (!rsrc_write)
9312 {
9313 /* Reads from CR[IVR] affect other resources. */
9314 if (regno == CR_IVR)
9315 {
9316 if ((dep->regindex >= CR_IRR0
9317 && dep->regindex <= CR_IRR3)
9318 || dep->regindex == CR_TPR)
9319 {
9320 specs[count++] = tmpl;
9321 }
9322 }
9323 }
9324 }
9325 else
9326 {
9327 specs[count++] = tmpl;
9328 }
9329 }
9330 else
9331 {
9332 UNHANDLED;
9333 }
9334 break;
9335
9336 case IA64_RS_INSERVICE:
9337 /* look for write of EOI (67) or read of IVR (65) */
9338 if ((idesc->operands[0] == IA64_OPND_CR3
9339 && CURR_SLOT.opnd[0].X_add_number - REG_CR == CR_EOI)
9340 || (idesc->operands[1] == IA64_OPND_CR3
9341 && CURR_SLOT.opnd[1].X_add_number - REG_CR == CR_IVR))
9342 {
9343 specs[count++] = tmpl;
9344 }
9345 break;
9346
9347 case IA64_RS_GR0:
9348 if (note == 1)
9349 {
9350 specs[count++] = tmpl;
9351 }
9352 else
9353 {
9354 UNHANDLED;
9355 }
9356 break;
9357
9358 case IA64_RS_CFM:
9359 if (note != 2)
9360 {
9361 specs[count++] = tmpl;
9362 }
9363 else
9364 {
9365 /* Check if any of the registers accessed are in the rotating region.
9366 mov to/from pr accesses CFM only when qp_regno is in the rotating
9367 region */
9368 for (i = 0; i < NELEMS (idesc->operands); i++)
9369 {
9370 if (idesc->operands[i] == IA64_OPND_R1
9371 || idesc->operands[i] == IA64_OPND_R2
9372 || idesc->operands[i] == IA64_OPND_R3)
9373 {
9374 int num = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9375 /* Assumes that md.rot.num_regs is always valid */
9376 if (md.rot.num_regs > 0
9377 && num > 31
9378 && num < 31 + md.rot.num_regs)
9379 {
9380 specs[count] = tmpl;
9381 specs[count++].specific = 0;
9382 }
9383 }
9384 else if (idesc->operands[i] == IA64_OPND_F1
9385 || idesc->operands[i] == IA64_OPND_F2
9386 || idesc->operands[i] == IA64_OPND_F3
9387 || idesc->operands[i] == IA64_OPND_F4)
9388 {
9389 int num = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9390 if (num > 31)
9391 {
9392 specs[count] = tmpl;
9393 specs[count++].specific = 0;
9394 }
9395 }
9396 else if (idesc->operands[i] == IA64_OPND_P1
9397 || idesc->operands[i] == IA64_OPND_P2)
9398 {
9399 int num = CURR_SLOT.opnd[i].X_add_number - REG_P;
9400 if (num > 15)
9401 {
9402 specs[count] = tmpl;
9403 specs[count++].specific = 0;
9404 }
9405 }
9406 }
9407 if (CURR_SLOT.qp_regno > 15)
9408 {
9409 specs[count] = tmpl;
9410 specs[count++].specific = 0;
9411 }
9412 }
9413 break;
9414
9415 /* This is the same as IA64_RS_PRr, except simplified to account for
9416 the fact that there is only one register. */
9417 case IA64_RS_PR63:
9418 if (note == 0)
9419 {
9420 specs[count++] = tmpl;
9421 }
9422 else if (note == 7)
9423 {
9424 valueT mask = 0;
9425 if (idesc->operands[2] == IA64_OPND_IMM17)
9426 mask = CURR_SLOT.opnd[2].X_add_number;
9427 if (mask & ((valueT) 1 << 63))
9428 specs[count++] = tmpl;
9429 }
9430 else if (note == 11)
9431 {
9432 if ((idesc->operands[0] == IA64_OPND_P1
9433 && CURR_SLOT.opnd[0].X_add_number - REG_P == 63)
9434 || (idesc->operands[1] == IA64_OPND_P2
9435 && CURR_SLOT.opnd[1].X_add_number - REG_P == 63))
9436 {
9437 specs[count++] = tmpl;
9438 }
9439 }
9440 else if (note == 12)
9441 {
9442 if (CURR_SLOT.qp_regno == 63)
9443 {
9444 specs[count++] = tmpl;
9445 }
9446 }
9447 else if (note == 1)
9448 {
9449 if (rsrc_write)
9450 {
9451 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
9452 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
9453 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
9454 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
9455
9456 if (p1 == 63
9457 && (idesc->operands[0] == IA64_OPND_P1
9458 || idesc->operands[0] == IA64_OPND_P2))
9459 {
9460 specs[count] = tmpl;
9461 specs[count++].cmp_type =
9462 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
9463 }
9464 if (p2 == 63
9465 && (idesc->operands[1] == IA64_OPND_P1
9466 || idesc->operands[1] == IA64_OPND_P2))
9467 {
9468 specs[count] = tmpl;
9469 specs[count++].cmp_type =
9470 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
9471 }
9472 }
9473 else
9474 {
9475 if (CURR_SLOT.qp_regno == 63)
9476 {
9477 specs[count++] = tmpl;
9478 }
9479 }
9480 }
9481 else
9482 {
9483 UNHANDLED;
9484 }
9485 break;
9486
9487 case IA64_RS_RSE:
9488 /* FIXME we can identify some individual RSE written resources, but RSE
9489 read resources have not yet been completely identified, so for now
9490 treat RSE as a single resource */
9491 if (strncmp (idesc->name, "mov", 3) == 0)
9492 {
9493 if (rsrc_write)
9494 {
9495 if (idesc->operands[0] == IA64_OPND_AR3
9496 && CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE)
9497 {
9498 specs[count++] = tmpl;
9499 }
9500 }
9501 else
9502 {
9503 if (idesc->operands[0] == IA64_OPND_AR3)
9504 {
9505 if (CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE
9506 || CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_RNAT)
9507 {
9508 specs[count++] = tmpl;
9509 }
9510 }
9511 else if (idesc->operands[1] == IA64_OPND_AR3)
9512 {
9513 if (CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSP
9514 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSPSTORE
9515 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_RNAT)
9516 {
9517 specs[count++] = tmpl;
9518 }
9519 }
9520 }
9521 }
9522 else
9523 {
9524 specs[count++] = tmpl;
9525 }
9526 break;
9527
9528 case IA64_RS_ANY:
9529 /* FIXME -- do any of these need to be non-specific? */
9530 specs[count++] = tmpl;
9531 break;
9532
9533 default:
9534 as_bad (_("Unrecognized dependency specifier %d\n"), dep->specifier);
9535 break;
9536 }
9537
9538 return count;
9539 }
9540
9541 /* Clear branch flags on marked resources. This breaks the link between the
9542 QP of the marking instruction and a subsequent branch on the same QP. */
9543
9544 static void
9545 clear_qp_branch_flag (valueT mask)
9546 {
9547 int i;
9548 for (i = 0; i < regdepslen; i++)
9549 {
9550 valueT bit = ((valueT) 1 << regdeps[i].qp_regno);
9551 if ((bit & mask) != 0)
9552 {
9553 regdeps[i].link_to_qp_branch = 0;
9554 }
9555 }
9556 }
9557
9558 /* MASK contains 2 and only 2 PRs which are mutually exclusive. Remove
9559 any mutexes which contain one of the PRs and create new ones when
9560 needed. */
9561
9562 static int
9563 update_qp_mutex (valueT mask)
9564 {
9565 int i;
9566 int add = 0;
9567
9568 i = 0;
9569 while (i < qp_mutexeslen)
9570 {
9571 if ((qp_mutexes[i].prmask & mask) != 0)
9572 {
9573 /* If it destroys and creates the same mutex, do nothing. */
9574 if (qp_mutexes[i].prmask == mask
9575 && qp_mutexes[i].path == md.path)
9576 {
9577 i++;
9578 add = -1;
9579 }
9580 else
9581 {
9582 int keep = 0;
9583
9584 if (md.debug_dv)
9585 {
9586 fprintf (stderr, " Clearing mutex relation");
9587 print_prmask (qp_mutexes[i].prmask);
9588 fprintf (stderr, "\n");
9589 }
9590
9591 /* Deal with the old mutex with more than 3+ PRs only if
9592 the new mutex on the same execution path with it.
9593
9594 FIXME: The 3+ mutex support is incomplete.
9595 dot_pred_rel () may be a better place to fix it. */
9596 if (qp_mutexes[i].path == md.path)
9597 {
9598 /* If it is a proper subset of the mutex, create a
9599 new mutex. */
9600 if (add == 0
9601 && (qp_mutexes[i].prmask & mask) == mask)
9602 add = 1;
9603
9604 qp_mutexes[i].prmask &= ~mask;
9605 if (qp_mutexes[i].prmask & (qp_mutexes[i].prmask - 1))
9606 {
9607 /* Modify the mutex if there are more than one
9608 PR left. */
9609 keep = 1;
9610 i++;
9611 }
9612 }
9613
9614 if (keep == 0)
9615 /* Remove the mutex. */
9616 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
9617 }
9618 }
9619 else
9620 ++i;
9621 }
9622
9623 if (add == 1)
9624 add_qp_mutex (mask);
9625
9626 return add;
9627 }
9628
9629 /* Remove any mutexes which contain any of the PRs indicated in the mask.
9630
9631 Any changes to a PR clears the mutex relations which include that PR. */
9632
9633 static void
9634 clear_qp_mutex (valueT mask)
9635 {
9636 int i;
9637
9638 i = 0;
9639 while (i < qp_mutexeslen)
9640 {
9641 if ((qp_mutexes[i].prmask & mask) != 0)
9642 {
9643 if (md.debug_dv)
9644 {
9645 fprintf (stderr, " Clearing mutex relation");
9646 print_prmask (qp_mutexes[i].prmask);
9647 fprintf (stderr, "\n");
9648 }
9649 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
9650 }
9651 else
9652 ++i;
9653 }
9654 }
9655
9656 /* Clear implies relations which contain PRs in the given masks.
9657 P1_MASK indicates the source of the implies relation, while P2_MASK
9658 indicates the implied PR. */
9659
9660 static void
9661 clear_qp_implies (valueT p1_mask, valueT p2_mask)
9662 {
9663 int i;
9664
9665 i = 0;
9666 while (i < qp_implieslen)
9667 {
9668 if ((((valueT) 1 << qp_implies[i].p1) & p1_mask) != 0
9669 || (((valueT) 1 << qp_implies[i].p2) & p2_mask) != 0)
9670 {
9671 if (md.debug_dv)
9672 fprintf (stderr, "Clearing implied relation PR%d->PR%d\n",
9673 qp_implies[i].p1, qp_implies[i].p2);
9674 qp_implies[i] = qp_implies[--qp_implieslen];
9675 }
9676 else
9677 ++i;
9678 }
9679 }
9680
9681 /* Add the PRs specified to the list of implied relations. */
9682
9683 static void
9684 add_qp_imply (int p1, int p2)
9685 {
9686 valueT mask;
9687 valueT bit;
9688 int i;
9689
9690 /* p0 is not meaningful here. */
9691 if (p1 == 0 || p2 == 0)
9692 abort ();
9693
9694 if (p1 == p2)
9695 return;
9696
9697 /* If it exists already, ignore it. */
9698 for (i = 0; i < qp_implieslen; i++)
9699 {
9700 if (qp_implies[i].p1 == p1
9701 && qp_implies[i].p2 == p2
9702 && qp_implies[i].path == md.path
9703 && !qp_implies[i].p2_branched)
9704 return;
9705 }
9706
9707 if (qp_implieslen == qp_impliestotlen)
9708 {
9709 qp_impliestotlen += 20;
9710 qp_implies = XRESIZEVEC (struct qp_imply, qp_implies, qp_impliestotlen);
9711 }
9712 if (md.debug_dv)
9713 fprintf (stderr, " Registering PR%d implies PR%d\n", p1, p2);
9714 qp_implies[qp_implieslen].p1 = p1;
9715 qp_implies[qp_implieslen].p2 = p2;
9716 qp_implies[qp_implieslen].path = md.path;
9717 qp_implies[qp_implieslen++].p2_branched = 0;
9718
9719 /* Add in the implied transitive relations; for everything that p2 implies,
9720 make p1 imply that, too; for everything that implies p1, make it imply p2
9721 as well. */
9722 for (i = 0; i < qp_implieslen; i++)
9723 {
9724 if (qp_implies[i].p1 == p2)
9725 add_qp_imply (p1, qp_implies[i].p2);
9726 if (qp_implies[i].p2 == p1)
9727 add_qp_imply (qp_implies[i].p1, p2);
9728 }
9729 /* Add in mutex relations implied by this implies relation; for each mutex
9730 relation containing p2, duplicate it and replace p2 with p1. */
9731 bit = (valueT) 1 << p1;
9732 mask = (valueT) 1 << p2;
9733 for (i = 0; i < qp_mutexeslen; i++)
9734 {
9735 if (qp_mutexes[i].prmask & mask)
9736 add_qp_mutex ((qp_mutexes[i].prmask & ~mask) | bit);
9737 }
9738 }
9739
9740 /* Add the PRs specified in the mask to the mutex list; this means that only
9741 one of the PRs can be true at any time. PR0 should never be included in
9742 the mask. */
9743
9744 static void
9745 add_qp_mutex (valueT mask)
9746 {
9747 if (mask & 0x1)
9748 abort ();
9749
9750 if (qp_mutexeslen == qp_mutexestotlen)
9751 {
9752 qp_mutexestotlen += 20;
9753 qp_mutexes = XRESIZEVEC (struct qpmutex, qp_mutexes, qp_mutexestotlen);
9754 }
9755 if (md.debug_dv)
9756 {
9757 fprintf (stderr, " Registering mutex on");
9758 print_prmask (mask);
9759 fprintf (stderr, "\n");
9760 }
9761 qp_mutexes[qp_mutexeslen].path = md.path;
9762 qp_mutexes[qp_mutexeslen++].prmask = mask;
9763 }
9764
9765 static int
9766 has_suffix_p (const char *name, const char *suffix)
9767 {
9768 size_t namelen = strlen (name);
9769 size_t sufflen = strlen (suffix);
9770
9771 if (namelen <= sufflen)
9772 return 0;
9773 return strcmp (name + namelen - sufflen, suffix) == 0;
9774 }
9775
9776 static void
9777 clear_register_values (void)
9778 {
9779 int i;
9780 if (md.debug_dv)
9781 fprintf (stderr, " Clearing register values\n");
9782 for (i = 1; i < NELEMS (gr_values); i++)
9783 gr_values[i].known = 0;
9784 }
9785
9786 /* Keep track of register values/changes which affect DV tracking.
9787
9788 optimization note: should add a flag to classes of insns where otherwise we
9789 have to examine a group of strings to identify them. */
9790
9791 static void
9792 note_register_values (struct ia64_opcode *idesc)
9793 {
9794 valueT qp_changemask = 0;
9795 int i;
9796
9797 /* Invalidate values for registers being written to. */
9798 for (i = 0; i < idesc->num_outputs; i++)
9799 {
9800 if (idesc->operands[i] == IA64_OPND_R1
9801 || idesc->operands[i] == IA64_OPND_R2
9802 || idesc->operands[i] == IA64_OPND_R3)
9803 {
9804 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9805 if (regno > 0 && regno < NELEMS (gr_values))
9806 gr_values[regno].known = 0;
9807 }
9808 else if (idesc->operands[i] == IA64_OPND_R3_2)
9809 {
9810 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9811 if (regno > 0 && regno < 4)
9812 gr_values[regno].known = 0;
9813 }
9814 else if (idesc->operands[i] == IA64_OPND_P1
9815 || idesc->operands[i] == IA64_OPND_P2)
9816 {
9817 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
9818 qp_changemask |= (valueT) 1 << regno;
9819 }
9820 else if (idesc->operands[i] == IA64_OPND_PR)
9821 {
9822 if (idesc->operands[2] & (valueT) 0x10000)
9823 qp_changemask = ~(valueT) 0x1FFFF | idesc->operands[2];
9824 else
9825 qp_changemask = idesc->operands[2];
9826 break;
9827 }
9828 else if (idesc->operands[i] == IA64_OPND_PR_ROT)
9829 {
9830 if (idesc->operands[1] & ((valueT) 1 << 43))
9831 qp_changemask = -((valueT) 1 << 44) | idesc->operands[1];
9832 else
9833 qp_changemask = idesc->operands[1];
9834 qp_changemask &= ~(valueT) 0xFFFF;
9835 break;
9836 }
9837 }
9838
9839 /* Always clear qp branch flags on any PR change. */
9840 /* FIXME there may be exceptions for certain compares. */
9841 clear_qp_branch_flag (qp_changemask);
9842
9843 /* Invalidate rotating registers on insns which affect RRBs in CFM. */
9844 if (idesc->flags & IA64_OPCODE_MOD_RRBS)
9845 {
9846 qp_changemask |= ~(valueT) 0xFFFF;
9847 if (strcmp (idesc->name, "clrrrb.pr") != 0)
9848 {
9849 for (i = 32; i < 32 + md.rot.num_regs; i++)
9850 gr_values[i].known = 0;
9851 }
9852 clear_qp_mutex (qp_changemask);
9853 clear_qp_implies (qp_changemask, qp_changemask);
9854 }
9855 /* After a call, all register values are undefined, except those marked
9856 as "safe". */
9857 else if (strncmp (idesc->name, "br.call", 6) == 0
9858 || strncmp (idesc->name, "brl.call", 7) == 0)
9859 {
9860 /* FIXME keep GR values which are marked as "safe_across_calls" */
9861 clear_register_values ();
9862 clear_qp_mutex (~qp_safe_across_calls);
9863 clear_qp_implies (~qp_safe_across_calls, ~qp_safe_across_calls);
9864 clear_qp_branch_flag (~qp_safe_across_calls);
9865 }
9866 else if (is_interruption_or_rfi (idesc)
9867 || is_taken_branch (idesc))
9868 {
9869 clear_register_values ();
9870 clear_qp_mutex (~(valueT) 0);
9871 clear_qp_implies (~(valueT) 0, ~(valueT) 0);
9872 }
9873 /* Look for mutex and implies relations. */
9874 else if ((idesc->operands[0] == IA64_OPND_P1
9875 || idesc->operands[0] == IA64_OPND_P2)
9876 && (idesc->operands[1] == IA64_OPND_P1
9877 || idesc->operands[1] == IA64_OPND_P2))
9878 {
9879 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
9880 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
9881 valueT p1mask = (p1 != 0) ? (valueT) 1 << p1 : 0;
9882 valueT p2mask = (p2 != 0) ? (valueT) 1 << p2 : 0;
9883
9884 /* If both PRs are PR0, we can't really do anything. */
9885 if (p1 == 0 && p2 == 0)
9886 {
9887 if (md.debug_dv)
9888 fprintf (stderr, " Ignoring PRs due to inclusion of p0\n");
9889 }
9890 /* In general, clear mutexes and implies which include P1 or P2,
9891 with the following exceptions. */
9892 else if (has_suffix_p (idesc->name, ".or.andcm")
9893 || has_suffix_p (idesc->name, ".and.orcm"))
9894 {
9895 clear_qp_implies (p2mask, p1mask);
9896 }
9897 else if (has_suffix_p (idesc->name, ".andcm")
9898 || has_suffix_p (idesc->name, ".and"))
9899 {
9900 clear_qp_implies (0, p1mask | p2mask);
9901 }
9902 else if (has_suffix_p (idesc->name, ".orcm")
9903 || has_suffix_p (idesc->name, ".or"))
9904 {
9905 clear_qp_mutex (p1mask | p2mask);
9906 clear_qp_implies (p1mask | p2mask, 0);
9907 }
9908 else
9909 {
9910 int added = 0;
9911
9912 clear_qp_implies (p1mask | p2mask, p1mask | p2mask);
9913
9914 /* If one of the PRs is PR0, we call clear_qp_mutex. */
9915 if (p1 == 0 || p2 == 0)
9916 clear_qp_mutex (p1mask | p2mask);
9917 else
9918 added = update_qp_mutex (p1mask | p2mask);
9919
9920 if (CURR_SLOT.qp_regno == 0
9921 || has_suffix_p (idesc->name, ".unc"))
9922 {
9923 if (added == 0 && p1 && p2)
9924 add_qp_mutex (p1mask | p2mask);
9925 if (CURR_SLOT.qp_regno != 0)
9926 {
9927 if (p1)
9928 add_qp_imply (p1, CURR_SLOT.qp_regno);
9929 if (p2)
9930 add_qp_imply (p2, CURR_SLOT.qp_regno);
9931 }
9932 }
9933 }
9934 }
9935 /* Look for mov imm insns into GRs. */
9936 else if (idesc->operands[0] == IA64_OPND_R1
9937 && (idesc->operands[1] == IA64_OPND_IMM22
9938 || idesc->operands[1] == IA64_OPND_IMMU64)
9939 && CURR_SLOT.opnd[1].X_op == O_constant
9940 && (strcmp (idesc->name, "mov") == 0
9941 || strcmp (idesc->name, "movl") == 0))
9942 {
9943 int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR;
9944 if (regno > 0 && regno < NELEMS (gr_values))
9945 {
9946 gr_values[regno].known = 1;
9947 gr_values[regno].value = CURR_SLOT.opnd[1].X_add_number;
9948 gr_values[regno].path = md.path;
9949 if (md.debug_dv)
9950 {
9951 fprintf (stderr, " Know gr%d = ", regno);
9952 fprintf_vma (stderr, gr_values[regno].value);
9953 fputs ("\n", stderr);
9954 }
9955 }
9956 }
9957 /* Look for dep.z imm insns. */
9958 else if (idesc->operands[0] == IA64_OPND_R1
9959 && idesc->operands[1] == IA64_OPND_IMM8
9960 && strcmp (idesc->name, "dep.z") == 0)
9961 {
9962 int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR;
9963 if (regno > 0 && regno < NELEMS (gr_values))
9964 {
9965 valueT value = CURR_SLOT.opnd[1].X_add_number;
9966
9967 if (CURR_SLOT.opnd[3].X_add_number < 64)
9968 value &= ((valueT)1 << CURR_SLOT.opnd[3].X_add_number) - 1;
9969 value <<= CURR_SLOT.opnd[2].X_add_number;
9970 gr_values[regno].known = 1;
9971 gr_values[regno].value = value;
9972 gr_values[regno].path = md.path;
9973 if (md.debug_dv)
9974 {
9975 fprintf (stderr, " Know gr%d = ", regno);
9976 fprintf_vma (stderr, gr_values[regno].value);
9977 fputs ("\n", stderr);
9978 }
9979 }
9980 }
9981 else
9982 {
9983 clear_qp_mutex (qp_changemask);
9984 clear_qp_implies (qp_changemask, qp_changemask);
9985 }
9986 }
9987
9988 /* Return whether the given predicate registers are currently mutex. */
9989
9990 static int
9991 qp_mutex (int p1, int p2, int path)
9992 {
9993 int i;
9994 valueT mask;
9995
9996 if (p1 != p2)
9997 {
9998 mask = ((valueT) 1 << p1) | (valueT) 1 << p2;
9999 for (i = 0; i < qp_mutexeslen; i++)
10000 {
10001 if (qp_mutexes[i].path >= path
10002 && (qp_mutexes[i].prmask & mask) == mask)
10003 return 1;
10004 }
10005 }
10006 return 0;
10007 }
10008
10009 /* Return whether the given resource is in the given insn's list of chks
10010 Return 1 if the conflict is absolutely determined, 2 if it's a potential
10011 conflict. */
10012
10013 static int
10014 resources_match (struct rsrc *rs,
10015 struct ia64_opcode *idesc,
10016 int note,
10017 int qp_regno,
10018 int path)
10019 {
10020 struct rsrc specs[MAX_SPECS];
10021 int count;
10022
10023 /* If the marked resource's qp_regno and the given qp_regno are mutex,
10024 we don't need to check. One exception is note 11, which indicates that
10025 target predicates are written regardless of PR[qp]. */
10026 if (qp_mutex (rs->qp_regno, qp_regno, path)
10027 && note != 11)
10028 return 0;
10029
10030 count = specify_resource (rs->dependency, idesc, DV_CHK, specs, note, path);
10031 while (count-- > 0)
10032 {
10033 /* UNAT checking is a bit more specific than other resources */
10034 if (rs->dependency->specifier == IA64_RS_AR_UNAT
10035 && specs[count].mem_offset.hint
10036 && rs->mem_offset.hint)
10037 {
10038 if (rs->mem_offset.base == specs[count].mem_offset.base)
10039 {
10040 if (((rs->mem_offset.offset >> 3) & 0x3F) ==
10041 ((specs[count].mem_offset.offset >> 3) & 0x3F))
10042 return 1;
10043 else
10044 continue;
10045 }
10046 }
10047
10048 /* Skip apparent PR write conflicts where both writes are an AND or both
10049 writes are an OR. */
10050 if (rs->dependency->specifier == IA64_RS_PR
10051 || rs->dependency->specifier == IA64_RS_PRr
10052 || rs->dependency->specifier == IA64_RS_PR63)
10053 {
10054 if (specs[count].cmp_type != CMP_NONE
10055 && specs[count].cmp_type == rs->cmp_type)
10056 {
10057 if (md.debug_dv)
10058 fprintf (stderr, " %s on parallel compare allowed (PR%d)\n",
10059 dv_mode[rs->dependency->mode],
10060 rs->dependency->specifier != IA64_RS_PR63 ?
10061 specs[count].index : 63);
10062 continue;
10063 }
10064 if (md.debug_dv)
10065 fprintf (stderr,
10066 " %s on parallel compare conflict %s vs %s on PR%d\n",
10067 dv_mode[rs->dependency->mode],
10068 dv_cmp_type[rs->cmp_type],
10069 dv_cmp_type[specs[count].cmp_type],
10070 rs->dependency->specifier != IA64_RS_PR63 ?
10071 specs[count].index : 63);
10072
10073 }
10074
10075 /* If either resource is not specific, conservatively assume a conflict
10076 */
10077 if (!specs[count].specific || !rs->specific)
10078 return 2;
10079 else if (specs[count].index == rs->index)
10080 return 1;
10081 }
10082
10083 return 0;
10084 }
10085
10086 /* Indicate an instruction group break; if INSERT_STOP is non-zero, then
10087 insert a stop to create the break. Update all resource dependencies
10088 appropriately. If QP_REGNO is non-zero, only apply the break to resources
10089 which use the same QP_REGNO and have the link_to_qp_branch flag set.
10090 If SAVE_CURRENT is non-zero, don't affect resources marked by the current
10091 instruction. */
10092
10093 static void
10094 insn_group_break (int insert_stop, int qp_regno, int save_current)
10095 {
10096 int i;
10097
10098 if (insert_stop && md.num_slots_in_use > 0)
10099 PREV_SLOT.end_of_insn_group = 1;
10100
10101 if (md.debug_dv)
10102 {
10103 fprintf (stderr, " Insn group break%s",
10104 (insert_stop ? " (w/stop)" : ""));
10105 if (qp_regno != 0)
10106 fprintf (stderr, " effective for QP=%d", qp_regno);
10107 fprintf (stderr, "\n");
10108 }
10109
10110 i = 0;
10111 while (i < regdepslen)
10112 {
10113 const struct ia64_dependency *dep = regdeps[i].dependency;
10114
10115 if (qp_regno != 0
10116 && regdeps[i].qp_regno != qp_regno)
10117 {
10118 ++i;
10119 continue;
10120 }
10121
10122 if (save_current
10123 && CURR_SLOT.src_file == regdeps[i].file
10124 && CURR_SLOT.src_line == regdeps[i].line)
10125 {
10126 ++i;
10127 continue;
10128 }
10129
10130 /* clear dependencies which are automatically cleared by a stop, or
10131 those that have reached the appropriate state of insn serialization */
10132 if (dep->semantics == IA64_DVS_IMPLIED
10133 || dep->semantics == IA64_DVS_IMPLIEDF
10134 || regdeps[i].insn_srlz == STATE_SRLZ)
10135 {
10136 print_dependency ("Removing", i);
10137 regdeps[i] = regdeps[--regdepslen];
10138 }
10139 else
10140 {
10141 if (dep->semantics == IA64_DVS_DATA
10142 || dep->semantics == IA64_DVS_INSTR
10143 || dep->semantics == IA64_DVS_SPECIFIC)
10144 {
10145 if (regdeps[i].insn_srlz == STATE_NONE)
10146 regdeps[i].insn_srlz = STATE_STOP;
10147 if (regdeps[i].data_srlz == STATE_NONE)
10148 regdeps[i].data_srlz = STATE_STOP;
10149 }
10150 ++i;
10151 }
10152 }
10153 }
10154
10155 /* Add the given resource usage spec to the list of active dependencies. */
10156
10157 static void
10158 mark_resource (struct ia64_opcode *idesc ATTRIBUTE_UNUSED,
10159 const struct ia64_dependency *dep ATTRIBUTE_UNUSED,
10160 struct rsrc *spec,
10161 int depind,
10162 int path)
10163 {
10164 if (regdepslen == regdepstotlen)
10165 {
10166 regdepstotlen += 20;
10167 regdeps = XRESIZEVEC (struct rsrc, regdeps, regdepstotlen);
10168 }
10169
10170 regdeps[regdepslen] = *spec;
10171 regdeps[regdepslen].depind = depind;
10172 regdeps[regdepslen].path = path;
10173 regdeps[regdepslen].file = CURR_SLOT.src_file;
10174 regdeps[regdepslen].line = CURR_SLOT.src_line;
10175
10176 print_dependency ("Adding", regdepslen);
10177
10178 ++regdepslen;
10179 }
10180
10181 static void
10182 print_dependency (const char *action, int depind)
10183 {
10184 if (md.debug_dv)
10185 {
10186 fprintf (stderr, " %s %s '%s'",
10187 action, dv_mode[(regdeps[depind].dependency)->mode],
10188 (regdeps[depind].dependency)->name);
10189 if (regdeps[depind].specific && regdeps[depind].index >= 0)
10190 fprintf (stderr, " (%d)", regdeps[depind].index);
10191 if (regdeps[depind].mem_offset.hint)
10192 {
10193 fputs (" ", stderr);
10194 fprintf_vma (stderr, regdeps[depind].mem_offset.base);
10195 fputs ("+", stderr);
10196 fprintf_vma (stderr, regdeps[depind].mem_offset.offset);
10197 }
10198 fprintf (stderr, "\n");
10199 }
10200 }
10201
10202 static void
10203 instruction_serialization (void)
10204 {
10205 int i;
10206 if (md.debug_dv)
10207 fprintf (stderr, " Instruction serialization\n");
10208 for (i = 0; i < regdepslen; i++)
10209 if (regdeps[i].insn_srlz == STATE_STOP)
10210 regdeps[i].insn_srlz = STATE_SRLZ;
10211 }
10212
10213 static void
10214 data_serialization (void)
10215 {
10216 int i = 0;
10217 if (md.debug_dv)
10218 fprintf (stderr, " Data serialization\n");
10219 while (i < regdepslen)
10220 {
10221 if (regdeps[i].data_srlz == STATE_STOP
10222 /* Note: as of 991210, all "other" dependencies are cleared by a
10223 data serialization. This might change with new tables */
10224 || (regdeps[i].dependency)->semantics == IA64_DVS_OTHER)
10225 {
10226 print_dependency ("Removing", i);
10227 regdeps[i] = regdeps[--regdepslen];
10228 }
10229 else
10230 ++i;
10231 }
10232 }
10233
10234 /* Insert stops and serializations as needed to avoid DVs. */
10235
10236 static void
10237 remove_marked_resource (struct rsrc *rs)
10238 {
10239 switch (rs->dependency->semantics)
10240 {
10241 case IA64_DVS_SPECIFIC:
10242 if (md.debug_dv)
10243 fprintf (stderr, "Implementation-specific, assume worst case...\n");
10244 /* Fall through. */
10245 case IA64_DVS_INSTR:
10246 if (md.debug_dv)
10247 fprintf (stderr, "Inserting instr serialization\n");
10248 if (rs->insn_srlz < STATE_STOP)
10249 insn_group_break (1, 0, 0);
10250 if (rs->insn_srlz < STATE_SRLZ)
10251 {
10252 struct slot oldslot = CURR_SLOT;
10253 /* Manually jam a srlz.i insn into the stream */
10254 memset (&CURR_SLOT, 0, sizeof (CURR_SLOT));
10255 CURR_SLOT.user_template = -1;
10256 CURR_SLOT.idesc = ia64_find_opcode ("srlz.i");
10257 instruction_serialization ();
10258 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10259 if (++md.num_slots_in_use >= NUM_SLOTS)
10260 emit_one_bundle ();
10261 CURR_SLOT = oldslot;
10262 }
10263 insn_group_break (1, 0, 0);
10264 break;
10265 case IA64_DVS_OTHER: /* as of rev2 (991220) of the DV tables, all
10266 "other" types of DV are eliminated
10267 by a data serialization */
10268 case IA64_DVS_DATA:
10269 if (md.debug_dv)
10270 fprintf (stderr, "Inserting data serialization\n");
10271 if (rs->data_srlz < STATE_STOP)
10272 insn_group_break (1, 0, 0);
10273 {
10274 struct slot oldslot = CURR_SLOT;
10275 /* Manually jam a srlz.d insn into the stream */
10276 memset (&CURR_SLOT, 0, sizeof (CURR_SLOT));
10277 CURR_SLOT.user_template = -1;
10278 CURR_SLOT.idesc = ia64_find_opcode ("srlz.d");
10279 data_serialization ();
10280 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10281 if (++md.num_slots_in_use >= NUM_SLOTS)
10282 emit_one_bundle ();
10283 CURR_SLOT = oldslot;
10284 }
10285 break;
10286 case IA64_DVS_IMPLIED:
10287 case IA64_DVS_IMPLIEDF:
10288 if (md.debug_dv)
10289 fprintf (stderr, "Inserting stop\n");
10290 insn_group_break (1, 0, 0);
10291 break;
10292 default:
10293 break;
10294 }
10295 }
10296
10297 /* Check the resources used by the given opcode against the current dependency
10298 list.
10299
10300 The check is run once for each execution path encountered. In this case,
10301 a unique execution path is the sequence of instructions following a code
10302 entry point, e.g. the following has three execution paths, one starting
10303 at L0, one at L1, and one at L2.
10304
10305 L0: nop
10306 L1: add
10307 L2: add
10308 br.ret
10309 */
10310
10311 static void
10312 check_dependencies (struct ia64_opcode *idesc)
10313 {
10314 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
10315 int path;
10316 int i;
10317
10318 /* Note that the number of marked resources may change within the
10319 loop if in auto mode. */
10320 i = 0;
10321 while (i < regdepslen)
10322 {
10323 struct rsrc *rs = &regdeps[i];
10324 const struct ia64_dependency *dep = rs->dependency;
10325 int chkind;
10326 int note;
10327 int start_over = 0;
10328
10329 if (dep->semantics == IA64_DVS_NONE
10330 || (chkind = depends_on (rs->depind, idesc)) == -1)
10331 {
10332 ++i;
10333 continue;
10334 }
10335
10336 note = NOTE (opdeps->chks[chkind]);
10337
10338 /* Check this resource against each execution path seen thus far. */
10339 for (path = 0; path <= md.path; path++)
10340 {
10341 int matchtype;
10342
10343 /* If the dependency wasn't on the path being checked, ignore it. */
10344 if (rs->path < path)
10345 continue;
10346
10347 /* If the QP for this insn implies a QP which has branched, don't
10348 bother checking. Ed. NOTE: I don't think this check is terribly
10349 useful; what's the point of generating code which will only be
10350 reached if its QP is zero?
10351 This code was specifically inserted to handle the following code,
10352 based on notes from Intel's DV checking code, where p1 implies p2.
10353
10354 mov r4 = 2
10355 (p2) br.cond L
10356 (p1) mov r4 = 7
10357 */
10358 if (CURR_SLOT.qp_regno != 0)
10359 {
10360 int skip = 0;
10361 int implies;
10362 for (implies = 0; implies < qp_implieslen; implies++)
10363 {
10364 if (qp_implies[implies].path >= path
10365 && qp_implies[implies].p1 == CURR_SLOT.qp_regno
10366 && qp_implies[implies].p2_branched)
10367 {
10368 skip = 1;
10369 break;
10370 }
10371 }
10372 if (skip)
10373 continue;
10374 }
10375
10376 if ((matchtype = resources_match (rs, idesc, note,
10377 CURR_SLOT.qp_regno, path)) != 0)
10378 {
10379 char msg[1024];
10380 char pathmsg[256] = "";
10381 char indexmsg[256] = "";
10382 int certain = (matchtype == 1 && CURR_SLOT.qp_regno == 0);
10383
10384 if (path != 0)
10385 snprintf (pathmsg, sizeof (pathmsg),
10386 " when entry is at label '%s'",
10387 md.entry_labels[path - 1]);
10388 if (matchtype == 1 && rs->index >= 0)
10389 snprintf (indexmsg, sizeof (indexmsg),
10390 ", specific resource number is %d",
10391 rs->index);
10392 snprintf (msg, sizeof (msg),
10393 "Use of '%s' %s %s dependency '%s' (%s)%s%s",
10394 idesc->name,
10395 (certain ? "violates" : "may violate"),
10396 dv_mode[dep->mode], dep->name,
10397 dv_sem[dep->semantics],
10398 pathmsg, indexmsg);
10399
10400 if (md.explicit_mode)
10401 {
10402 as_warn ("%s", msg);
10403 if (path < md.path)
10404 as_warn (_("Only the first path encountering the conflict is reported"));
10405 as_warn_where (rs->file, rs->line,
10406 _("This is the location of the conflicting usage"));
10407 /* Don't bother checking other paths, to avoid duplicating
10408 the same warning */
10409 break;
10410 }
10411 else
10412 {
10413 if (md.debug_dv)
10414 fprintf (stderr, "%s @ %s:%d\n", msg, rs->file, rs->line);
10415
10416 remove_marked_resource (rs);
10417
10418 /* since the set of dependencies has changed, start over */
10419 /* FIXME -- since we're removing dvs as we go, we
10420 probably don't really need to start over... */
10421 start_over = 1;
10422 break;
10423 }
10424 }
10425 }
10426 if (start_over)
10427 i = 0;
10428 else
10429 ++i;
10430 }
10431 }
10432
10433 /* Register new dependencies based on the given opcode. */
10434
10435 static void
10436 mark_resources (struct ia64_opcode *idesc)
10437 {
10438 int i;
10439 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
10440 int add_only_qp_reads = 0;
10441
10442 /* A conditional branch only uses its resources if it is taken; if it is
10443 taken, we stop following that path. The other branch types effectively
10444 *always* write their resources. If it's not taken, register only QP
10445 reads. */
10446 if (is_conditional_branch (idesc) || is_interruption_or_rfi (idesc))
10447 {
10448 add_only_qp_reads = 1;
10449 }
10450
10451 if (md.debug_dv)
10452 fprintf (stderr, "Registering '%s' resource usage\n", idesc->name);
10453
10454 for (i = 0; i < opdeps->nregs; i++)
10455 {
10456 const struct ia64_dependency *dep;
10457 struct rsrc specs[MAX_SPECS];
10458 int note;
10459 int path;
10460 int count;
10461
10462 dep = ia64_find_dependency (opdeps->regs[i]);
10463 note = NOTE (opdeps->regs[i]);
10464
10465 if (add_only_qp_reads
10466 && !(dep->mode == IA64_DV_WAR
10467 && (dep->specifier == IA64_RS_PR
10468 || dep->specifier == IA64_RS_PRr
10469 || dep->specifier == IA64_RS_PR63)))
10470 continue;
10471
10472 count = specify_resource (dep, idesc, DV_REG, specs, note, md.path);
10473
10474 while (count-- > 0)
10475 {
10476 mark_resource (idesc, dep, &specs[count],
10477 DEP (opdeps->regs[i]), md.path);
10478 }
10479
10480 /* The execution path may affect register values, which may in turn
10481 affect which indirect-access resources are accessed. */
10482 switch (dep->specifier)
10483 {
10484 default:
10485 break;
10486 case IA64_RS_CPUID:
10487 case IA64_RS_DBR:
10488 case IA64_RS_IBR:
10489 case IA64_RS_MSR:
10490 case IA64_RS_PKR:
10491 case IA64_RS_PMC:
10492 case IA64_RS_PMD:
10493 case IA64_RS_RR:
10494 for (path = 0; path < md.path; path++)
10495 {
10496 count = specify_resource (dep, idesc, DV_REG, specs, note, path);
10497 while (count-- > 0)
10498 mark_resource (idesc, dep, &specs[count],
10499 DEP (opdeps->regs[i]), path);
10500 }
10501 break;
10502 }
10503 }
10504 }
10505
10506 /* Remove dependencies when they no longer apply. */
10507
10508 static void
10509 update_dependencies (struct ia64_opcode *idesc)
10510 {
10511 int i;
10512
10513 if (strcmp (idesc->name, "srlz.i") == 0)
10514 {
10515 instruction_serialization ();
10516 }
10517 else if (strcmp (idesc->name, "srlz.d") == 0)
10518 {
10519 data_serialization ();
10520 }
10521 else if (is_interruption_or_rfi (idesc)
10522 || is_taken_branch (idesc))
10523 {
10524 /* Although technically the taken branch doesn't clear dependencies
10525 which require a srlz.[id], we don't follow the branch; the next
10526 instruction is assumed to start with a clean slate. */
10527 regdepslen = 0;
10528 md.path = 0;
10529 }
10530 else if (is_conditional_branch (idesc)
10531 && CURR_SLOT.qp_regno != 0)
10532 {
10533 int is_call = strstr (idesc->name, ".call") != NULL;
10534
10535 for (i = 0; i < qp_implieslen; i++)
10536 {
10537 /* If the conditional branch's predicate is implied by the predicate
10538 in an existing dependency, remove that dependency. */
10539 if (qp_implies[i].p2 == CURR_SLOT.qp_regno)
10540 {
10541 int depind = 0;
10542 /* Note that this implied predicate takes a branch so that if
10543 a later insn generates a DV but its predicate implies this
10544 one, we can avoid the false DV warning. */
10545 qp_implies[i].p2_branched = 1;
10546 while (depind < regdepslen)
10547 {
10548 if (regdeps[depind].qp_regno == qp_implies[i].p1)
10549 {
10550 print_dependency ("Removing", depind);
10551 regdeps[depind] = regdeps[--regdepslen];
10552 }
10553 else
10554 ++depind;
10555 }
10556 }
10557 }
10558 /* Any marked resources which have this same predicate should be
10559 cleared, provided that the QP hasn't been modified between the
10560 marking instruction and the branch. */
10561 if (is_call)
10562 {
10563 insn_group_break (0, CURR_SLOT.qp_regno, 1);
10564 }
10565 else
10566 {
10567 i = 0;
10568 while (i < regdepslen)
10569 {
10570 if (regdeps[i].qp_regno == CURR_SLOT.qp_regno
10571 && regdeps[i].link_to_qp_branch
10572 && (regdeps[i].file != CURR_SLOT.src_file
10573 || regdeps[i].line != CURR_SLOT.src_line))
10574 {
10575 /* Treat like a taken branch */
10576 print_dependency ("Removing", i);
10577 regdeps[i] = regdeps[--regdepslen];
10578 }
10579 else
10580 ++i;
10581 }
10582 }
10583 }
10584 }
10585
10586 /* Examine the current instruction for dependency violations. */
10587
10588 static int
10589 check_dv (struct ia64_opcode *idesc)
10590 {
10591 if (md.debug_dv)
10592 {
10593 fprintf (stderr, "Checking %s for violations (line %d, %d/%d)\n",
10594 idesc->name, CURR_SLOT.src_line,
10595 idesc->dependencies->nchks,
10596 idesc->dependencies->nregs);
10597 }
10598
10599 /* Look through the list of currently marked resources; if the current
10600 instruction has the dependency in its chks list which uses that resource,
10601 check against the specific resources used. */
10602 check_dependencies (idesc);
10603
10604 /* Look up the instruction's regdeps (RAW writes, WAW writes, and WAR reads),
10605 then add them to the list of marked resources. */
10606 mark_resources (idesc);
10607
10608 /* There are several types of dependency semantics, and each has its own
10609 requirements for being cleared
10610
10611 Instruction serialization (insns separated by interruption, rfi, or
10612 writer + srlz.i + reader, all in separate groups) clears DVS_INSTR.
10613
10614 Data serialization (instruction serialization, or writer + srlz.d +
10615 reader, where writer and srlz.d are in separate groups) clears
10616 DVS_DATA. (This also clears DVS_OTHER, but that is not guaranteed to
10617 always be the case).
10618
10619 Instruction group break (groups separated by stop, taken branch,
10620 interruption or rfi) clears DVS_IMPLIED and DVS_IMPLIEDF.
10621 */
10622 update_dependencies (idesc);
10623
10624 /* Sometimes, knowing a register value allows us to avoid giving a false DV
10625 warning. Keep track of as many as possible that are useful. */
10626 note_register_values (idesc);
10627
10628 /* We don't need or want this anymore. */
10629 md.mem_offset.hint = 0;
10630
10631 return 0;
10632 }
10633
10634 /* Translate one line of assembly. Pseudo ops and labels do not show
10635 here. */
10636 void
10637 md_assemble (char *str)
10638 {
10639 char *saved_input_line_pointer, *temp;
10640 const char *mnemonic;
10641 const struct pseudo_opcode *pdesc;
10642 struct ia64_opcode *idesc;
10643 unsigned char qp_regno;
10644 unsigned int flags;
10645 int ch;
10646
10647 saved_input_line_pointer = input_line_pointer;
10648 input_line_pointer = str;
10649
10650 /* extract the opcode (mnemonic): */
10651
10652 ch = get_symbol_name (&temp);
10653 mnemonic = temp;
10654 pdesc = (struct pseudo_opcode *) str_hash_find (md.pseudo_hash, mnemonic);
10655 if (pdesc)
10656 {
10657 (void) restore_line_pointer (ch);
10658 (*pdesc->handler) (pdesc->arg);
10659 goto done;
10660 }
10661
10662 /* Find the instruction descriptor matching the arguments. */
10663
10664 idesc = ia64_find_opcode (mnemonic);
10665 (void) restore_line_pointer (ch);
10666 if (!idesc)
10667 {
10668 as_bad (_("Unknown opcode `%s'"), mnemonic);
10669 goto done;
10670 }
10671
10672 idesc = parse_operands (idesc);
10673 if (!idesc)
10674 goto done;
10675
10676 /* Handle the dynamic ops we can handle now: */
10677 if (idesc->type == IA64_TYPE_DYN)
10678 {
10679 if (strcmp (idesc->name, "add") == 0)
10680 {
10681 if (CURR_SLOT.opnd[2].X_op == O_register
10682 && CURR_SLOT.opnd[2].X_add_number < 4)
10683 mnemonic = "addl";
10684 else
10685 mnemonic = "adds";
10686 ia64_free_opcode (idesc);
10687 idesc = ia64_find_opcode (mnemonic);
10688 }
10689 else if (strcmp (idesc->name, "mov") == 0)
10690 {
10691 enum ia64_opnd opnd1, opnd2;
10692 int rop;
10693
10694 opnd1 = idesc->operands[0];
10695 opnd2 = idesc->operands[1];
10696 if (opnd1 == IA64_OPND_AR3)
10697 rop = 0;
10698 else if (opnd2 == IA64_OPND_AR3)
10699 rop = 1;
10700 else
10701 abort ();
10702 if (CURR_SLOT.opnd[rop].X_op == O_register)
10703 {
10704 if (ar_is_only_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
10705 mnemonic = "mov.i";
10706 else if (ar_is_only_in_memory_unit (CURR_SLOT.opnd[rop].X_add_number))
10707 mnemonic = "mov.m";
10708 else
10709 rop = -1;
10710 }
10711 else
10712 abort ();
10713 if (rop >= 0)
10714 {
10715 ia64_free_opcode (idesc);
10716 idesc = ia64_find_opcode (mnemonic);
10717 while (idesc != NULL
10718 && (idesc->operands[0] != opnd1
10719 || idesc->operands[1] != opnd2))
10720 idesc = get_next_opcode (idesc);
10721 }
10722 }
10723 }
10724 else if (strcmp (idesc->name, "mov.i") == 0
10725 || strcmp (idesc->name, "mov.m") == 0)
10726 {
10727 enum ia64_opnd opnd1, opnd2;
10728 int rop;
10729
10730 opnd1 = idesc->operands[0];
10731 opnd2 = idesc->operands[1];
10732 if (opnd1 == IA64_OPND_AR3)
10733 rop = 0;
10734 else if (opnd2 == IA64_OPND_AR3)
10735 rop = 1;
10736 else
10737 abort ();
10738 if (CURR_SLOT.opnd[rop].X_op == O_register)
10739 {
10740 char unit = 'a';
10741 if (ar_is_only_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
10742 unit = 'i';
10743 else if (ar_is_only_in_memory_unit (CURR_SLOT.opnd[rop].X_add_number))
10744 unit = 'm';
10745 if (unit != 'a' && unit != idesc->name [4])
10746 as_bad (_("AR %d can only be accessed by %c-unit"),
10747 (int) (CURR_SLOT.opnd[rop].X_add_number - REG_AR),
10748 TOUPPER (unit));
10749 }
10750 }
10751 else if (strcmp (idesc->name, "hint.b") == 0)
10752 {
10753 switch (md.hint_b)
10754 {
10755 case hint_b_ok:
10756 break;
10757 case hint_b_warning:
10758 as_warn (_("hint.b may be treated as nop"));
10759 break;
10760 case hint_b_error:
10761 as_bad (_("hint.b shouldn't be used"));
10762 break;
10763 }
10764 }
10765
10766 qp_regno = 0;
10767 if (md.qp.X_op == O_register)
10768 {
10769 qp_regno = md.qp.X_add_number - REG_P;
10770 md.qp.X_op = O_absent;
10771 }
10772
10773 flags = idesc->flags;
10774
10775 if ((flags & IA64_OPCODE_FIRST) != 0)
10776 {
10777 /* The alignment frag has to end with a stop bit only if the
10778 next instruction after the alignment directive has to be
10779 the first instruction in an instruction group. */
10780 if (align_frag)
10781 {
10782 while (align_frag->fr_type != rs_align_code)
10783 {
10784 align_frag = align_frag->fr_next;
10785 if (!align_frag)
10786 break;
10787 }
10788 /* align_frag can be NULL if there are directives in
10789 between. */
10790 if (align_frag && align_frag->fr_next == frag_now)
10791 align_frag->tc_frag_data = 1;
10792 }
10793
10794 insn_group_break (1, 0, 0);
10795 }
10796 align_frag = NULL;
10797
10798 if ((flags & IA64_OPCODE_NO_PRED) != 0 && qp_regno != 0)
10799 {
10800 as_bad (_("`%s' cannot be predicated"), idesc->name);
10801 goto done;
10802 }
10803
10804 /* Build the instruction. */
10805 CURR_SLOT.qp_regno = qp_regno;
10806 CURR_SLOT.idesc = idesc;
10807 CURR_SLOT.src_file = as_where (&CURR_SLOT.src_line);
10808 dwarf2_where (&CURR_SLOT.debug_line);
10809 dwarf2_consume_line_info ();
10810
10811 /* Add unwind entries, if there are any. */
10812 if (unwind.current_entry)
10813 {
10814 CURR_SLOT.unwind_record = unwind.current_entry;
10815 unwind.current_entry = NULL;
10816 }
10817 if (unwind.pending_saves)
10818 {
10819 if (unwind.pending_saves->next)
10820 {
10821 /* Attach the next pending save to the next slot so that its
10822 slot number will get set correctly. */
10823 add_unwind_entry (unwind.pending_saves->next, NOT_A_CHAR);
10824 unwind.pending_saves = &unwind.pending_saves->next->r.record.p;
10825 }
10826 else
10827 unwind.pending_saves = NULL;
10828 }
10829 if (unwind.proc_pending.sym && S_IS_DEFINED (unwind.proc_pending.sym))
10830 unwind.insn = 1;
10831
10832 /* Check for dependency violations. */
10833 if (md.detect_dv)
10834 check_dv (idesc);
10835
10836 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10837 if (++md.num_slots_in_use >= NUM_SLOTS)
10838 emit_one_bundle ();
10839
10840 if ((flags & IA64_OPCODE_LAST) != 0)
10841 insn_group_break (1, 0, 0);
10842
10843 md.last_text_seg = now_seg;
10844
10845 done:
10846 input_line_pointer = saved_input_line_pointer;
10847 }
10848
10849 /* Called when symbol NAME cannot be found in the symbol table.
10850 Should be used for dynamic valued symbols only. */
10851
10852 symbolS *
10853 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
10854 {
10855 return 0;
10856 }
10857
10858 /* Called for any expression that can not be recognized. When the
10859 function is called, `input_line_pointer' will point to the start of
10860 the expression. */
10861
10862 void
10863 md_operand (expressionS *e)
10864 {
10865 switch (*input_line_pointer)
10866 {
10867 case '[':
10868 ++input_line_pointer;
10869 expression_and_evaluate (e);
10870 if (*input_line_pointer != ']')
10871 {
10872 as_bad (_("Closing bracket missing"));
10873 goto err;
10874 }
10875 else
10876 {
10877 if (e->X_op != O_register
10878 || e->X_add_number < REG_GR
10879 || e->X_add_number > REG_GR + 127)
10880 {
10881 as_bad (_("Index must be a general register"));
10882 e->X_add_number = REG_GR;
10883 }
10884
10885 ++input_line_pointer;
10886 e->X_op = O_index;
10887 }
10888 break;
10889
10890 default:
10891 break;
10892 }
10893 return;
10894
10895 err:
10896 ignore_rest_of_line ();
10897 }
10898
10899 /* Return 1 if it's OK to adjust a reloc by replacing the symbol with
10900 a section symbol plus some offset. For relocs involving @fptr(),
10901 directives we don't want such adjustments since we need to have the
10902 original symbol's name in the reloc. */
10903 int
10904 ia64_fix_adjustable (fixS *fix)
10905 {
10906 /* Prevent all adjustments to global symbols */
10907 if (S_IS_EXTERNAL (fix->fx_addsy) || S_IS_WEAK (fix->fx_addsy))
10908 return 0;
10909
10910 switch (fix->fx_r_type)
10911 {
10912 case BFD_RELOC_IA64_FPTR64I:
10913 case BFD_RELOC_IA64_FPTR32MSB:
10914 case BFD_RELOC_IA64_FPTR32LSB:
10915 case BFD_RELOC_IA64_FPTR64MSB:
10916 case BFD_RELOC_IA64_FPTR64LSB:
10917 case BFD_RELOC_IA64_LTOFF_FPTR22:
10918 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10919 return 0;
10920 default:
10921 break;
10922 }
10923
10924 return 1;
10925 }
10926
10927 int
10928 ia64_force_relocation (fixS *fix)
10929 {
10930 switch (fix->fx_r_type)
10931 {
10932 case BFD_RELOC_IA64_FPTR64I:
10933 case BFD_RELOC_IA64_FPTR32MSB:
10934 case BFD_RELOC_IA64_FPTR32LSB:
10935 case BFD_RELOC_IA64_FPTR64MSB:
10936 case BFD_RELOC_IA64_FPTR64LSB:
10937
10938 case BFD_RELOC_IA64_LTOFF22:
10939 case BFD_RELOC_IA64_LTOFF64I:
10940 case BFD_RELOC_IA64_LTOFF_FPTR22:
10941 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10942 case BFD_RELOC_IA64_PLTOFF22:
10943 case BFD_RELOC_IA64_PLTOFF64I:
10944 case BFD_RELOC_IA64_PLTOFF64MSB:
10945 case BFD_RELOC_IA64_PLTOFF64LSB:
10946
10947 case BFD_RELOC_IA64_LTOFF22X:
10948 case BFD_RELOC_IA64_LDXMOV:
10949 return 1;
10950
10951 default:
10952 break;
10953 }
10954
10955 return generic_force_reloc (fix);
10956 }
10957
10958 /* Decide from what point a pc-relative relocation is relative to,
10959 relative to the pc-relative fixup. Er, relatively speaking. */
10960 long
10961 ia64_pcrel_from_section (fixS *fix, segT sec)
10962 {
10963 unsigned long off = fix->fx_frag->fr_address + fix->fx_where;
10964
10965 if (bfd_section_flags (sec) & SEC_CODE)
10966 off &= ~0xfUL;
10967
10968 return off;
10969 }
10970
10971
10972 /* Used to emit section-relative relocs for the dwarf2 debug data. */
10973 void
10974 ia64_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
10975 {
10976 expressionS exp;
10977
10978 exp.X_op = O_pseudo_fixup;
10979 exp.X_op_symbol = pseudo_func[FUNC_SEC_RELATIVE].u.sym;
10980 exp.X_add_number = 0;
10981 exp.X_add_symbol = symbol;
10982 emit_expr (&exp, size);
10983 }
10984
10985 /* This is called whenever some data item (not an instruction) needs a
10986 fixup. We pick the right reloc code depending on the byteorder
10987 currently in effect. */
10988 void
10989 ia64_cons_fix_new (fragS *f, int where, int nbytes, expressionS *exp,
10990 bfd_reloc_code_real_type code)
10991 {
10992 fixS *fix;
10993
10994 switch (nbytes)
10995 {
10996 /* There are no reloc for 8 and 16 bit quantities, but we allow
10997 them here since they will work fine as long as the expression
10998 is fully defined at the end of the pass over the source file. */
10999 case 1: code = BFD_RELOC_8; break;
11000 case 2: code = BFD_RELOC_16; break;
11001 case 4:
11002 if (target_big_endian)
11003 code = BFD_RELOC_IA64_DIR32MSB;
11004 else
11005 code = BFD_RELOC_IA64_DIR32LSB;
11006 break;
11007
11008 case 8:
11009 /* In 32-bit mode, data8 could mean function descriptors too. */
11010 if (exp->X_op == O_pseudo_fixup
11011 && exp->X_op_symbol
11012 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC
11013 && !(md.flags & EF_IA_64_ABI64))
11014 {
11015 if (target_big_endian)
11016 code = BFD_RELOC_IA64_IPLTMSB;
11017 else
11018 code = BFD_RELOC_IA64_IPLTLSB;
11019 exp->X_op = O_symbol;
11020 break;
11021 }
11022 else
11023 {
11024 if (target_big_endian)
11025 code = BFD_RELOC_IA64_DIR64MSB;
11026 else
11027 code = BFD_RELOC_IA64_DIR64LSB;
11028 break;
11029 }
11030
11031 case 16:
11032 if (exp->X_op == O_pseudo_fixup
11033 && exp->X_op_symbol
11034 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC)
11035 {
11036 if (target_big_endian)
11037 code = BFD_RELOC_IA64_IPLTMSB;
11038 else
11039 code = BFD_RELOC_IA64_IPLTLSB;
11040 exp->X_op = O_symbol;
11041 break;
11042 }
11043 /* FALLTHRU */
11044
11045 default:
11046 as_bad (_("Unsupported fixup size %d"), nbytes);
11047 ignore_rest_of_line ();
11048 return;
11049 }
11050
11051 if (exp->X_op == O_pseudo_fixup)
11052 {
11053 exp->X_op = O_symbol;
11054 code = ia64_gen_real_reloc_type (exp->X_op_symbol, code);
11055 /* ??? If code unchanged, unsupported. */
11056 }
11057
11058 fix = fix_new_exp (f, where, nbytes, exp, 0, code);
11059 /* We need to store the byte order in effect in case we're going
11060 to fix an 8 or 16 bit relocation (for which there no real
11061 relocs available). See md_apply_fix(). */
11062 fix->tc_fix_data.bigendian = target_big_endian;
11063 }
11064
11065 /* Return the actual relocation we wish to associate with the pseudo
11066 reloc described by SYM and R_TYPE. SYM should be one of the
11067 symbols in the pseudo_func array, or NULL. */
11068
11069 static bfd_reloc_code_real_type
11070 ia64_gen_real_reloc_type (struct symbol *sym, bfd_reloc_code_real_type r_type)
11071 {
11072 bfd_reloc_code_real_type newr = 0;
11073 const char *type = NULL, *suffix = "";
11074
11075 if (sym == NULL)
11076 {
11077 return r_type;
11078 }
11079
11080 switch (S_GET_VALUE (sym))
11081 {
11082 case FUNC_FPTR_RELATIVE:
11083 switch (r_type)
11084 {
11085 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_FPTR64I; break;
11086 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_FPTR32MSB; break;
11087 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_FPTR32LSB; break;
11088 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_FPTR64MSB; break;
11089 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_FPTR64LSB; break;
11090 default: type = "FPTR"; break;
11091 }
11092 break;
11093
11094 case FUNC_GP_RELATIVE:
11095 switch (r_type)
11096 {
11097 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_GPREL22; break;
11098 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_GPREL64I; break;
11099 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_GPREL32MSB; break;
11100 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_GPREL32LSB; break;
11101 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_GPREL64MSB; break;
11102 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_GPREL64LSB; break;
11103 default: type = "GPREL"; break;
11104 }
11105 break;
11106
11107 case FUNC_LT_RELATIVE:
11108 switch (r_type)
11109 {
11110 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_LTOFF22; break;
11111 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_LTOFF64I; break;
11112 default: type = "LTOFF"; break;
11113 }
11114 break;
11115
11116 case FUNC_LT_RELATIVE_X:
11117 switch (r_type)
11118 {
11119 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_LTOFF22X; break;
11120 default: type = "LTOFF"; suffix = "X"; break;
11121 }
11122 break;
11123
11124 case FUNC_PC_RELATIVE:
11125 switch (r_type)
11126 {
11127 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_PCREL22; break;
11128 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_PCREL64I; break;
11129 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_PCREL32MSB; break;
11130 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_PCREL32LSB; break;
11131 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_PCREL64MSB; break;
11132 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_PCREL64LSB; break;
11133 default: type = "PCREL"; break;
11134 }
11135 break;
11136
11137 case FUNC_PLT_RELATIVE:
11138 switch (r_type)
11139 {
11140 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_PLTOFF22; break;
11141 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_PLTOFF64I; break;
11142 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_PLTOFF64MSB;break;
11143 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_PLTOFF64LSB;break;
11144 default: type = "PLTOFF"; break;
11145 }
11146 break;
11147
11148 case FUNC_SEC_RELATIVE:
11149 switch (r_type)
11150 {
11151 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_SECREL32MSB;break;
11152 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_SECREL32LSB;break;
11153 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_SECREL64MSB;break;
11154 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_SECREL64LSB;break;
11155 default: type = "SECREL"; break;
11156 }
11157 break;
11158
11159 case FUNC_SEG_RELATIVE:
11160 switch (r_type)
11161 {
11162 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_SEGREL32MSB;break;
11163 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_SEGREL32LSB;break;
11164 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_SEGREL64MSB;break;
11165 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_SEGREL64LSB;break;
11166 default: type = "SEGREL"; break;
11167 }
11168 break;
11169
11170 case FUNC_LTV_RELATIVE:
11171 switch (r_type)
11172 {
11173 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_LTV32MSB; break;
11174 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_LTV32LSB; break;
11175 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_LTV64MSB; break;
11176 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_LTV64LSB; break;
11177 default: type = "LTV"; break;
11178 }
11179 break;
11180
11181 case FUNC_LT_FPTR_RELATIVE:
11182 switch (r_type)
11183 {
11184 case BFD_RELOC_IA64_IMM22:
11185 newr = BFD_RELOC_IA64_LTOFF_FPTR22; break;
11186 case BFD_RELOC_IA64_IMM64:
11187 newr = BFD_RELOC_IA64_LTOFF_FPTR64I; break;
11188 case BFD_RELOC_IA64_DIR32MSB:
11189 newr = BFD_RELOC_IA64_LTOFF_FPTR32MSB; break;
11190 case BFD_RELOC_IA64_DIR32LSB:
11191 newr = BFD_RELOC_IA64_LTOFF_FPTR32LSB; break;
11192 case BFD_RELOC_IA64_DIR64MSB:
11193 newr = BFD_RELOC_IA64_LTOFF_FPTR64MSB; break;
11194 case BFD_RELOC_IA64_DIR64LSB:
11195 newr = BFD_RELOC_IA64_LTOFF_FPTR64LSB; break;
11196 default:
11197 type = "LTOFF_FPTR"; break;
11198 }
11199 break;
11200
11201 case FUNC_TP_RELATIVE:
11202 switch (r_type)
11203 {
11204 case BFD_RELOC_IA64_IMM14: newr = BFD_RELOC_IA64_TPREL14; break;
11205 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_TPREL22; break;
11206 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_TPREL64I; break;
11207 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_TPREL64MSB; break;
11208 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_TPREL64LSB; break;
11209 default: type = "TPREL"; break;
11210 }
11211 break;
11212
11213 case FUNC_LT_TP_RELATIVE:
11214 switch (r_type)
11215 {
11216 case BFD_RELOC_IA64_IMM22:
11217 newr = BFD_RELOC_IA64_LTOFF_TPREL22; break;
11218 default:
11219 type = "LTOFF_TPREL"; break;
11220 }
11221 break;
11222
11223 case FUNC_DTP_MODULE:
11224 switch (r_type)
11225 {
11226 case BFD_RELOC_IA64_DIR64MSB:
11227 newr = BFD_RELOC_IA64_DTPMOD64MSB; break;
11228 case BFD_RELOC_IA64_DIR64LSB:
11229 newr = BFD_RELOC_IA64_DTPMOD64LSB; break;
11230 default:
11231 type = "DTPMOD"; break;
11232 }
11233 break;
11234
11235 case FUNC_LT_DTP_MODULE:
11236 switch (r_type)
11237 {
11238 case BFD_RELOC_IA64_IMM22:
11239 newr = BFD_RELOC_IA64_LTOFF_DTPMOD22; break;
11240 default:
11241 type = "LTOFF_DTPMOD"; break;
11242 }
11243 break;
11244
11245 case FUNC_DTP_RELATIVE:
11246 switch (r_type)
11247 {
11248 case BFD_RELOC_IA64_DIR32MSB:
11249 newr = BFD_RELOC_IA64_DTPREL32MSB; break;
11250 case BFD_RELOC_IA64_DIR32LSB:
11251 newr = BFD_RELOC_IA64_DTPREL32LSB; break;
11252 case BFD_RELOC_IA64_DIR64MSB:
11253 newr = BFD_RELOC_IA64_DTPREL64MSB; break;
11254 case BFD_RELOC_IA64_DIR64LSB:
11255 newr = BFD_RELOC_IA64_DTPREL64LSB; break;
11256 case BFD_RELOC_IA64_IMM14:
11257 newr = BFD_RELOC_IA64_DTPREL14; break;
11258 case BFD_RELOC_IA64_IMM22:
11259 newr = BFD_RELOC_IA64_DTPREL22; break;
11260 case BFD_RELOC_IA64_IMM64:
11261 newr = BFD_RELOC_IA64_DTPREL64I; break;
11262 default:
11263 type = "DTPREL"; break;
11264 }
11265 break;
11266
11267 case FUNC_LT_DTP_RELATIVE:
11268 switch (r_type)
11269 {
11270 case BFD_RELOC_IA64_IMM22:
11271 newr = BFD_RELOC_IA64_LTOFF_DTPREL22; break;
11272 default:
11273 type = "LTOFF_DTPREL"; break;
11274 }
11275 break;
11276
11277 case FUNC_IPLT_RELOC:
11278 switch (r_type)
11279 {
11280 case BFD_RELOC_IA64_IPLTMSB: return r_type;
11281 case BFD_RELOC_IA64_IPLTLSB: return r_type;
11282 default: type = "IPLT"; break;
11283 }
11284 break;
11285
11286 #ifdef TE_VMS
11287 case FUNC_SLOTCOUNT_RELOC:
11288 return DUMMY_RELOC_IA64_SLOTCOUNT;
11289 #endif
11290
11291 default:
11292 abort ();
11293 }
11294
11295 if (newr)
11296 return newr;
11297 else
11298 {
11299 int width;
11300
11301 if (!type)
11302 abort ();
11303 switch (r_type)
11304 {
11305 case BFD_RELOC_IA64_DIR32MSB: width = 32; suffix = "MSB"; break;
11306 case BFD_RELOC_IA64_DIR32LSB: width = 32; suffix = "LSB"; break;
11307 case BFD_RELOC_IA64_DIR64MSB: width = 64; suffix = "MSB"; break;
11308 case BFD_RELOC_IA64_DIR64LSB: width = 64; suffix = "LSB"; break;
11309 case BFD_RELOC_UNUSED: width = 13; break;
11310 case BFD_RELOC_IA64_IMM14: width = 14; break;
11311 case BFD_RELOC_IA64_IMM22: width = 22; break;
11312 case BFD_RELOC_IA64_IMM64: width = 64; suffix = "I"; break;
11313 default: abort ();
11314 }
11315
11316 /* This should be an error, but since previously there wasn't any
11317 diagnostic here, don't make it fail because of this for now. */
11318 as_warn (_("Cannot express %s%d%s relocation"), type, width, suffix);
11319 return r_type;
11320 }
11321 }
11322
11323 /* Here is where generate the appropriate reloc for pseudo relocation
11324 functions. */
11325 void
11326 ia64_validate_fix (fixS *fix)
11327 {
11328 switch (fix->fx_r_type)
11329 {
11330 case BFD_RELOC_IA64_FPTR64I:
11331 case BFD_RELOC_IA64_FPTR32MSB:
11332 case BFD_RELOC_IA64_FPTR64LSB:
11333 case BFD_RELOC_IA64_LTOFF_FPTR22:
11334 case BFD_RELOC_IA64_LTOFF_FPTR64I:
11335 if (fix->fx_offset != 0)
11336 as_bad_where (fix->fx_file, fix->fx_line,
11337 _("No addend allowed in @fptr() relocation"));
11338 break;
11339 default:
11340 break;
11341 }
11342 }
11343
11344 static void
11345 fix_insn (fixS *fix, const struct ia64_operand *odesc, valueT value)
11346 {
11347 bfd_vma insn[3], t0, t1, control_bits;
11348 const char *err;
11349 char *fixpos;
11350 long slot;
11351
11352 slot = fix->fx_where & 0x3;
11353 fixpos = fix->fx_frag->fr_literal + (fix->fx_where - slot);
11354
11355 /* Bundles are always in little-endian byte order */
11356 t0 = bfd_getl64 (fixpos);
11357 t1 = bfd_getl64 (fixpos + 8);
11358 control_bits = t0 & 0x1f;
11359 insn[0] = (t0 >> 5) & 0x1ffffffffffLL;
11360 insn[1] = ((t0 >> 46) & 0x3ffff) | ((t1 & 0x7fffff) << 18);
11361 insn[2] = (t1 >> 23) & 0x1ffffffffffLL;
11362
11363 err = NULL;
11364 if (odesc - elf64_ia64_operands == IA64_OPND_IMMU64)
11365 {
11366 insn[1] = (value >> 22) & 0x1ffffffffffLL;
11367 insn[2] |= (((value & 0x7f) << 13)
11368 | (((value >> 7) & 0x1ff) << 27)
11369 | (((value >> 16) & 0x1f) << 22)
11370 | (((value >> 21) & 0x1) << 21)
11371 | (((value >> 63) & 0x1) << 36));
11372 }
11373 else if (odesc - elf64_ia64_operands == IA64_OPND_IMMU62)
11374 {
11375 if (value & ~0x3fffffffffffffffULL)
11376 err = _("integer operand out of range");
11377 insn[1] = (value >> 21) & 0x1ffffffffffLL;
11378 insn[2] |= (((value & 0xfffff) << 6) | (((value >> 20) & 0x1) << 36));
11379 }
11380 else if (odesc - elf64_ia64_operands == IA64_OPND_TGT64)
11381 {
11382 value >>= 4;
11383 insn[1] = ((value >> 20) & 0x7fffffffffLL) << 2;
11384 insn[2] |= ((((value >> 59) & 0x1) << 36)
11385 | (((value >> 0) & 0xfffff) << 13));
11386 }
11387 else
11388 err = (*odesc->insert) (odesc, value, insn + slot);
11389
11390 if (err)
11391 as_bad_where (fix->fx_file, fix->fx_line, "%s", err);
11392
11393 t0 = control_bits | (insn[0] << 5) | (insn[1] << 46);
11394 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
11395 number_to_chars_littleendian (fixpos + 0, t0, 8);
11396 number_to_chars_littleendian (fixpos + 8, t1, 8);
11397 }
11398
11399 /* Attempt to simplify or even eliminate a fixup. The return value is
11400 ignored; perhaps it was once meaningful, but now it is historical.
11401 To indicate that a fixup has been eliminated, set FIXP->FX_DONE.
11402
11403 If fixp->fx_addsy is non-NULL, we'll have to generate a reloc entry
11404 (if possible). */
11405
11406 void
11407 md_apply_fix (fixS *fix, valueT *valP, segT seg ATTRIBUTE_UNUSED)
11408 {
11409 char *fixpos;
11410 valueT value = *valP;
11411
11412 fixpos = fix->fx_frag->fr_literal + fix->fx_where;
11413
11414 if (fix->fx_pcrel)
11415 {
11416 switch (fix->fx_r_type)
11417 {
11418 case BFD_RELOC_IA64_PCREL21B: break;
11419 case BFD_RELOC_IA64_PCREL21BI: break;
11420 case BFD_RELOC_IA64_PCREL21F: break;
11421 case BFD_RELOC_IA64_PCREL21M: break;
11422 case BFD_RELOC_IA64_PCREL60B: break;
11423 case BFD_RELOC_IA64_PCREL22: break;
11424 case BFD_RELOC_IA64_PCREL64I: break;
11425 case BFD_RELOC_IA64_PCREL32MSB: break;
11426 case BFD_RELOC_IA64_PCREL32LSB: break;
11427 case BFD_RELOC_IA64_PCREL64MSB: break;
11428 case BFD_RELOC_IA64_PCREL64LSB: break;
11429 default:
11430 fix->fx_r_type = ia64_gen_real_reloc_type (pseudo_func[FUNC_PC_RELATIVE].u.sym,
11431 fix->fx_r_type);
11432 break;
11433 }
11434 }
11435 if (fix->fx_addsy)
11436 {
11437 switch ((unsigned) fix->fx_r_type)
11438 {
11439 case BFD_RELOC_UNUSED:
11440 /* This must be a TAG13 or TAG13b operand. There are no external
11441 relocs defined for them, so we must give an error. */
11442 as_bad_where (fix->fx_file, fix->fx_line,
11443 _("%s must have a constant value"),
11444 elf64_ia64_operands[fix->tc_fix_data.opnd].desc);
11445 fix->fx_done = 1;
11446 return;
11447
11448 case BFD_RELOC_IA64_TPREL14:
11449 case BFD_RELOC_IA64_TPREL22:
11450 case BFD_RELOC_IA64_TPREL64I:
11451 case BFD_RELOC_IA64_LTOFF_TPREL22:
11452 case BFD_RELOC_IA64_LTOFF_DTPMOD22:
11453 case BFD_RELOC_IA64_DTPREL14:
11454 case BFD_RELOC_IA64_DTPREL22:
11455 case BFD_RELOC_IA64_DTPREL64I:
11456 case BFD_RELOC_IA64_LTOFF_DTPREL22:
11457 S_SET_THREAD_LOCAL (fix->fx_addsy);
11458 break;
11459
11460 #ifdef TE_VMS
11461 case DUMMY_RELOC_IA64_SLOTCOUNT:
11462 as_bad_where (fix->fx_file, fix->fx_line,
11463 _("cannot resolve @slotcount parameter"));
11464 fix->fx_done = 1;
11465 return;
11466 #endif
11467
11468 default:
11469 break;
11470 }
11471 }
11472 else if (fix->tc_fix_data.opnd == IA64_OPND_NIL)
11473 {
11474 #ifdef TE_VMS
11475 if (fix->fx_r_type == DUMMY_RELOC_IA64_SLOTCOUNT)
11476 {
11477 /* For @slotcount, convert an addresses difference to a slots
11478 difference. */
11479 valueT v;
11480
11481 v = (value >> 4) * 3;
11482 switch (value & 0x0f)
11483 {
11484 case 0:
11485 case 1:
11486 case 2:
11487 v += value & 0x0f;
11488 break;
11489 case 0x0f:
11490 v += 2;
11491 break;
11492 case 0x0e:
11493 v += 1;
11494 break;
11495 default:
11496 as_bad (_("invalid @slotcount value"));
11497 }
11498 value = v;
11499 }
11500 #endif
11501
11502 if (fix->tc_fix_data.bigendian)
11503 number_to_chars_bigendian (fixpos, value, fix->fx_size);
11504 else
11505 number_to_chars_littleendian (fixpos, value, fix->fx_size);
11506 fix->fx_done = 1;
11507 }
11508 else
11509 {
11510 fix_insn (fix, elf64_ia64_operands + fix->tc_fix_data.opnd, value);
11511 fix->fx_done = 1;
11512 }
11513 }
11514
11515 /* Generate the BFD reloc to be stuck in the object file from the
11516 fixup used internally in the assembler. */
11517
11518 arelent *
11519 tc_gen_reloc (asection *sec ATTRIBUTE_UNUSED, fixS *fixp)
11520 {
11521 arelent *reloc;
11522
11523 reloc = XNEW (arelent);
11524 reloc->sym_ptr_ptr = XNEW (asymbol *);
11525 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
11526 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
11527 reloc->addend = fixp->fx_offset;
11528 reloc->howto = bfd_reloc_type_lookup (stdoutput, fixp->fx_r_type);
11529
11530 if (!reloc->howto)
11531 {
11532 as_bad_where (fixp->fx_file, fixp->fx_line,
11533 _("Cannot represent %s relocation in object file"),
11534 bfd_get_reloc_code_name (fixp->fx_r_type));
11535 free (reloc);
11536 return NULL;
11537 }
11538 return reloc;
11539 }
11540
11541 /* Turn a string in input_line_pointer into a floating point constant
11542 of type TYPE, and store the appropriate bytes in *LIT. The number
11543 of LITTLENUMS emitted is stored in *SIZE. An error message is
11544 returned, or NULL on OK. */
11545
11546 const char *
11547 md_atof (int type, char *lit, int *size)
11548 {
11549 LITTLENUM_TYPE words[MAX_LITTLENUMS];
11550 char *t;
11551 int prec;
11552
11553 switch (type)
11554 {
11555 /* IEEE floats */
11556 case 'f':
11557 case 'F':
11558 case 's':
11559 case 'S':
11560 prec = 2;
11561 break;
11562
11563 case 'd':
11564 case 'D':
11565 case 'r':
11566 case 'R':
11567 prec = 4;
11568 break;
11569
11570 case 'x':
11571 case 'X':
11572 case 'p':
11573 case 'P':
11574 prec = 5;
11575 break;
11576
11577 default:
11578 *size = 0;
11579 return _("Unrecognized or unsupported floating point constant");
11580 }
11581 t = atof_ieee (input_line_pointer, type, words);
11582 if (t)
11583 input_line_pointer = t;
11584
11585 (*ia64_float_to_chars) (lit, words, prec);
11586
11587 if (type == 'X')
11588 {
11589 /* It is 10 byte floating point with 6 byte padding. */
11590 memset (&lit [10], 0, 6);
11591 *size = 8 * sizeof (LITTLENUM_TYPE);
11592 }
11593 else
11594 *size = prec * sizeof (LITTLENUM_TYPE);
11595
11596 return NULL;
11597 }
11598
11599 /* Handle ia64 specific semantics of the align directive. */
11600
11601 void
11602 ia64_md_do_align (int n ATTRIBUTE_UNUSED,
11603 const char *fill ATTRIBUTE_UNUSED,
11604 int len ATTRIBUTE_UNUSED,
11605 int max ATTRIBUTE_UNUSED)
11606 {
11607 if (subseg_text_p (now_seg))
11608 ia64_flush_insns ();
11609 }
11610
11611 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
11612 of an rs_align_code fragment. */
11613
11614 void
11615 ia64_handle_align (fragS *fragp)
11616 {
11617 int bytes;
11618 char *p;
11619 const unsigned char *nop_type;
11620
11621 if (fragp->fr_type != rs_align_code)
11622 return;
11623
11624 /* Check if this frag has to end with a stop bit. */
11625 nop_type = fragp->tc_frag_data ? le_nop_stop : le_nop;
11626
11627 bytes = fragp->fr_next->fr_address - fragp->fr_address - fragp->fr_fix;
11628 p = fragp->fr_literal + fragp->fr_fix;
11629
11630 /* If no paddings are needed, we check if we need a stop bit. */
11631 if (!bytes && fragp->tc_frag_data)
11632 {
11633 if (fragp->fr_fix < 16)
11634 #if 1
11635 /* FIXME: It won't work with
11636 .align 16
11637 alloc r32=ar.pfs,1,2,4,0
11638 */
11639 ;
11640 #else
11641 as_bad_where (fragp->fr_file, fragp->fr_line,
11642 _("Can't add stop bit to mark end of instruction group"));
11643 #endif
11644 else
11645 /* Bundles are always in little-endian byte order. Make sure
11646 the previous bundle has the stop bit. */
11647 *(p - 16) |= 1;
11648 }
11649
11650 /* Make sure we are on a 16-byte boundary, in case someone has been
11651 putting data into a text section. */
11652 if (bytes & 15)
11653 {
11654 int fix = bytes & 15;
11655 memset (p, 0, fix);
11656 p += fix;
11657 bytes -= fix;
11658 fragp->fr_fix += fix;
11659 }
11660
11661 /* Instruction bundles are always little-endian. */
11662 memcpy (p, nop_type, 16);
11663 fragp->fr_var = 16;
11664 }
11665
11666 static void
11667 ia64_float_to_chars_bigendian (char *lit, LITTLENUM_TYPE *words,
11668 int prec)
11669 {
11670 while (prec--)
11671 {
11672 number_to_chars_bigendian (lit, (long) (*words++),
11673 sizeof (LITTLENUM_TYPE));
11674 lit += sizeof (LITTLENUM_TYPE);
11675 }
11676 }
11677
11678 static void
11679 ia64_float_to_chars_littleendian (char *lit, LITTLENUM_TYPE *words,
11680 int prec)
11681 {
11682 while (prec--)
11683 {
11684 number_to_chars_littleendian (lit, (long) (words[prec]),
11685 sizeof (LITTLENUM_TYPE));
11686 lit += sizeof (LITTLENUM_TYPE);
11687 }
11688 }
11689
11690 void
11691 ia64_elf_section_change_hook (void)
11692 {
11693 if (elf_section_type (now_seg) == SHT_IA_64_UNWIND
11694 && elf_linked_to_section (now_seg) == NULL)
11695 elf_linked_to_section (now_seg) = text_section;
11696 dot_byteorder (-1);
11697 }
11698
11699 /* Check if a label should be made global. */
11700 void
11701 ia64_check_label (symbolS *label)
11702 {
11703 if (*input_line_pointer == ':')
11704 {
11705 S_SET_EXTERNAL (label);
11706 input_line_pointer++;
11707 }
11708 }
11709
11710 /* Used to remember where .alias and .secalias directives are seen. We
11711 will rename symbol and section names when we are about to output
11712 the relocatable file. */
11713 struct alias
11714 {
11715 const char *file; /* The file where the directive is seen. */
11716 unsigned int line; /* The line number the directive is at. */
11717 const char *name; /* The original name of the symbol. */
11718 };
11719
11720 /* Called for .alias and .secalias directives. If SECTION is 1, it is
11721 .secalias. Otherwise, it is .alias. */
11722 static void
11723 dot_alias (int section)
11724 {
11725 char *name, *alias;
11726 char delim;
11727 char *end_name;
11728 int len;
11729 struct alias *h;
11730 const char *a;
11731 htab_t ahash, nhash;
11732 const char *kind;
11733
11734 delim = get_symbol_name (&name);
11735 end_name = input_line_pointer;
11736 *end_name = delim;
11737
11738 if (name == end_name)
11739 {
11740 as_bad (_("expected symbol name"));
11741 ignore_rest_of_line ();
11742 return;
11743 }
11744
11745 SKIP_WHITESPACE_AFTER_NAME ();
11746
11747 if (*input_line_pointer != ',')
11748 {
11749 *end_name = 0;
11750 as_bad (_("expected comma after \"%s\""), name);
11751 *end_name = delim;
11752 ignore_rest_of_line ();
11753 return;
11754 }
11755
11756 input_line_pointer++;
11757 *end_name = 0;
11758 ia64_canonicalize_symbol_name (name);
11759
11760 /* We call demand_copy_C_string to check if alias string is valid.
11761 There should be a closing `"' and no `\0' in the string. */
11762 alias = demand_copy_C_string (&len);
11763 if (alias == NULL)
11764 {
11765 ignore_rest_of_line ();
11766 return;
11767 }
11768
11769 /* Make a copy of name string. */
11770 len = strlen (name) + 1;
11771 obstack_grow (&notes, name, len);
11772 name = obstack_finish (&notes);
11773
11774 if (section)
11775 {
11776 kind = "section";
11777 ahash = secalias_hash;
11778 nhash = secalias_name_hash;
11779 }
11780 else
11781 {
11782 kind = "symbol";
11783 ahash = alias_hash;
11784 nhash = alias_name_hash;
11785 }
11786
11787 /* Check if alias has been used before. */
11788
11789 h = (struct alias *) str_hash_find (ahash, alias);
11790 if (h)
11791 {
11792 if (strcmp (h->name, name))
11793 as_bad (_("`%s' is already the alias of %s `%s'"),
11794 alias, kind, h->name);
11795 obstack_free (&notes, name);
11796 obstack_free (&notes, alias);
11797 goto out;
11798 }
11799
11800 /* Check if name already has an alias. */
11801 a = (const char *) str_hash_find (nhash, name);
11802 if (a)
11803 {
11804 if (strcmp (a, alias))
11805 as_bad (_("%s `%s' already has an alias `%s'"), kind, name, a);
11806 obstack_free (&notes, name);
11807 obstack_free (&notes, alias);
11808 goto out;
11809 }
11810
11811 h = XNEW (struct alias);
11812 h->file = as_where (&h->line);
11813 h->name = name;
11814
11815 str_hash_insert (ahash, alias, (void *) h);
11816 str_hash_insert (nhash, name, (void *) alias);
11817
11818 out:
11819 demand_empty_rest_of_line ();
11820 }
11821
11822 /* It renames the original symbol name to its alias. */
11823 static int
11824 do_alias (void **slot, void *arg ATTRIBUTE_UNUSED)
11825 {
11826 string_tuple_t *tuple = *((string_tuple_t **) slot);
11827 struct alias *h = (struct alias *) tuple->value;
11828 symbolS *sym = symbol_find (h->name);
11829
11830 if (sym == NULL)
11831 {
11832 #ifdef TE_VMS
11833 /* Uses .alias extensively to alias CRTL functions to same with
11834 decc$ prefix. Sometimes function gets optimized away and a
11835 warning results, which should be suppressed. */
11836 if (strncmp (tuple->key, "decc$", 5) != 0)
11837 #endif
11838 as_warn_where (h->file, h->line,
11839 _("symbol `%s' aliased to `%s' is not used"),
11840 h->name, tuple->key);
11841 }
11842 else
11843 S_SET_NAME (sym, (char *) tuple->key);
11844
11845 return 1;
11846 }
11847
11848 /* Called from write_object_file. */
11849 void
11850 ia64_adjust_symtab (void)
11851 {
11852 htab_traverse (alias_hash, do_alias, NULL);
11853 }
11854
11855 /* It renames the original section name to its alias. */
11856 static int
11857 do_secalias (void **slot, void *arg ATTRIBUTE_UNUSED)
11858 {
11859 string_tuple_t *tuple = *((string_tuple_t **) slot);
11860 struct alias *h = (struct alias *) tuple->value;
11861 segT sec = bfd_get_section_by_name (stdoutput, h->name);
11862
11863 if (sec == NULL)
11864 as_warn_where (h->file, h->line,
11865 _("section `%s' aliased to `%s' is not used"),
11866 h->name, tuple->key);
11867 else
11868 sec->name = tuple->key;
11869
11870 return 1;
11871 }
11872
11873 /* Called from write_object_file. */
11874 void
11875 ia64_frob_file (void)
11876 {
11877 htab_traverse (secalias_hash, do_secalias, NULL);
11878 }
11879
11880 #ifdef TE_VMS
11881 #define NT_VMS_MHD 1
11882 #define NT_VMS_LNM 2
11883
11884 /* Integrity VMS 8.x identifies it's ELF modules with a standard ELF
11885 .note section. */
11886
11887 /* Manufacture a VMS-like time string. */
11888 static void
11889 get_vms_time (char *Now)
11890 {
11891 char *pnt;
11892 time_t timeb;
11893
11894 time (&timeb);
11895 pnt = ctime (&timeb);
11896 pnt[3] = 0;
11897 pnt[7] = 0;
11898 pnt[10] = 0;
11899 pnt[16] = 0;
11900 pnt[24] = 0;
11901 sprintf (Now, "%2s-%3s-%s %s", pnt + 8, pnt + 4, pnt + 20, pnt + 11);
11902 }
11903
11904 void
11905 ia64_vms_note (void)
11906 {
11907 char *p;
11908 asection *seg = now_seg;
11909 subsegT subseg = now_subseg;
11910 asection *secp = NULL;
11911 char *bname;
11912 char buf [256];
11913 symbolS *sym;
11914
11915 /* Create the .note section. */
11916
11917 secp = subseg_new (".note", 0);
11918 bfd_set_section_flags (secp, SEC_HAS_CONTENTS | SEC_READONLY);
11919
11920 /* Module header note (MHD). */
11921 bname = xstrdup (lbasename (out_file_name));
11922 if ((p = strrchr (bname, '.')))
11923 *p = '\0';
11924
11925 /* VMS note header is 24 bytes long. */
11926 p = frag_more (8 + 8 + 8);
11927 number_to_chars_littleendian (p + 0, 8, 8);
11928 number_to_chars_littleendian (p + 8, 40 + strlen (bname), 8);
11929 number_to_chars_littleendian (p + 16, NT_VMS_MHD, 8);
11930
11931 p = frag_more (8);
11932 strcpy (p, "IPF/VMS");
11933
11934 p = frag_more (17 + 17 + strlen (bname) + 1 + 5);
11935 get_vms_time (p);
11936 strcpy (p + 17, "24-FEB-2005 15:00");
11937 p += 17 + 17;
11938 strcpy (p, bname);
11939 p += strlen (bname) + 1;
11940 free (bname);
11941 strcpy (p, "V1.0");
11942
11943 frag_align (3, 0, 0);
11944
11945 /* Language processor name note. */
11946 sprintf (buf, "GNU assembler version %s (%s) using BFD version %s",
11947 VERSION, TARGET_ALIAS, BFD_VERSION_STRING);
11948
11949 p = frag_more (8 + 8 + 8);
11950 number_to_chars_littleendian (p + 0, 8, 8);
11951 number_to_chars_littleendian (p + 8, strlen (buf) + 1, 8);
11952 number_to_chars_littleendian (p + 16, NT_VMS_LNM, 8);
11953
11954 p = frag_more (8);
11955 strcpy (p, "IPF/VMS");
11956
11957 p = frag_more (strlen (buf) + 1);
11958 strcpy (p, buf);
11959
11960 frag_align (3, 0, 0);
11961
11962 secp = subseg_new (".vms_display_name_info", 0);
11963 bfd_set_section_flags (secp, SEC_HAS_CONTENTS | SEC_READONLY);
11964
11965 /* This symbol should be passed on the command line and be variable
11966 according to language. */
11967 sym = symbol_new ("__gnat_vms_display_name@gnat_demangler_rtl",
11968 absolute_section, &zero_address_frag, 0);
11969 symbol_table_insert (sym);
11970 symbol_get_bfdsym (sym)->flags |= BSF_DEBUGGING | BSF_DYNAMIC;
11971
11972 p = frag_more (4);
11973 /* Format 3 of VMS demangler Spec. */
11974 number_to_chars_littleendian (p, 3, 4);
11975
11976 p = frag_more (4);
11977 /* Place holder for symbol table index of above symbol. */
11978 number_to_chars_littleendian (p, -1, 4);
11979
11980 frag_align (3, 0, 0);
11981
11982 /* We probably can't restore the current segment, for there likely
11983 isn't one yet... */
11984 if (seg && subseg)
11985 subseg_set (seg, subseg);
11986 }
11987
11988 #endif /* TE_VMS */
This page took 0.596127 seconds and 4 git commands to generate.