Fix compilation error in ia64-tdep.c with libunwind-ia64
[deliverable/binutils-gdb.git] / gdb / ia64-tdep.c
1 /* Target-dependent code for the IA-64 for GDB, the GNU debugger.
2
3 Copyright (C) 1999-2018 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "inferior.h"
22 #include "gdbcore.h"
23 #include "arch-utils.h"
24 #include "floatformat.h"
25 #include "gdbtypes.h"
26 #include "regcache.h"
27 #include "reggroups.h"
28 #include "frame.h"
29 #include "frame-base.h"
30 #include "frame-unwind.h"
31 #include "target-float.h"
32 #include "value.h"
33 #include "objfiles.h"
34 #include "elf/common.h" /* for DT_PLTGOT value */
35 #include "elf-bfd.h"
36 #include "dis-asm.h"
37 #include "infcall.h"
38 #include "osabi.h"
39 #include "ia64-tdep.h"
40 #include "cp-abi.h"
41
42 #ifdef HAVE_LIBUNWIND_IA64_H
43 #include "elf/ia64.h" /* for PT_IA_64_UNWIND value */
44 #include "ia64-libunwind-tdep.h"
45
46 /* Note: KERNEL_START is supposed to be an address which is not going
47 to ever contain any valid unwind info. For ia64 linux, the choice
48 of 0xc000000000000000 is fairly safe since that's uncached space.
49
50 We use KERNEL_START as follows: after obtaining the kernel's
51 unwind table via getunwind(), we project its unwind data into
52 address-range KERNEL_START-(KERNEL_START+ktab_size) and then
53 when ia64_access_mem() sees a memory access to this
54 address-range, we redirect it to ktab instead.
55
56 None of this hackery is needed with a modern kernel/libcs
57 which uses the kernel virtual DSO to provide access to the
58 kernel's unwind info. In that case, ktab_size remains 0 and
59 hence the value of KERNEL_START doesn't matter. */
60
61 #define KERNEL_START 0xc000000000000000ULL
62
63 static size_t ktab_size = 0;
64 struct ia64_table_entry
65 {
66 uint64_t start_offset;
67 uint64_t end_offset;
68 uint64_t info_offset;
69 };
70
71 static struct ia64_table_entry *ktab = NULL;
72 static gdb::optional<gdb::byte_vector> ktab_buf;
73
74 #endif
75
76 /* An enumeration of the different IA-64 instruction types. */
77
78 typedef enum instruction_type
79 {
80 A, /* Integer ALU ; I-unit or M-unit */
81 I, /* Non-ALU integer; I-unit */
82 M, /* Memory ; M-unit */
83 F, /* Floating-point ; F-unit */
84 B, /* Branch ; B-unit */
85 L, /* Extended (L+X) ; I-unit */
86 X, /* Extended (L+X) ; I-unit */
87 undefined /* undefined or reserved */
88 } instruction_type;
89
90 /* We represent IA-64 PC addresses as the value of the instruction
91 pointer or'd with some bit combination in the low nibble which
92 represents the slot number in the bundle addressed by the
93 instruction pointer. The problem is that the Linux kernel
94 multiplies its slot numbers (for exceptions) by one while the
95 disassembler multiplies its slot numbers by 6. In addition, I've
96 heard it said that the simulator uses 1 as the multiplier.
97
98 I've fixed the disassembler so that the bytes_per_line field will
99 be the slot multiplier. If bytes_per_line comes in as zero, it
100 is set to six (which is how it was set up initially). -- objdump
101 displays pretty disassembly dumps with this value. For our purposes,
102 we'll set bytes_per_line to SLOT_MULTIPLIER. This is okay since we
103 never want to also display the raw bytes the way objdump does. */
104
105 #define SLOT_MULTIPLIER 1
106
107 /* Length in bytes of an instruction bundle. */
108
109 #define BUNDLE_LEN 16
110
111 /* See the saved memory layout comment for ia64_memory_insert_breakpoint. */
112
113 #if BREAKPOINT_MAX < BUNDLE_LEN - 2
114 # error "BREAKPOINT_MAX < BUNDLE_LEN - 2"
115 #endif
116
117 static gdbarch_init_ftype ia64_gdbarch_init;
118
119 static gdbarch_register_name_ftype ia64_register_name;
120 static gdbarch_register_type_ftype ia64_register_type;
121 static gdbarch_breakpoint_from_pc_ftype ia64_breakpoint_from_pc;
122 static gdbarch_skip_prologue_ftype ia64_skip_prologue;
123 static struct type *is_float_or_hfa_type (struct type *t);
124 static CORE_ADDR ia64_find_global_pointer (struct gdbarch *gdbarch,
125 CORE_ADDR faddr);
126
127 #define NUM_IA64_RAW_REGS 462
128
129 /* Big enough to hold a FP register in bytes. */
130 #define IA64_FP_REGISTER_SIZE 16
131
132 static int sp_regnum = IA64_GR12_REGNUM;
133
134 /* NOTE: we treat the register stack registers r32-r127 as
135 pseudo-registers because they may not be accessible via the ptrace
136 register get/set interfaces. */
137
138 enum pseudo_regs { FIRST_PSEUDO_REGNUM = NUM_IA64_RAW_REGS,
139 VBOF_REGNUM = IA64_NAT127_REGNUM + 1, V32_REGNUM,
140 V127_REGNUM = V32_REGNUM + 95,
141 VP0_REGNUM, VP16_REGNUM = VP0_REGNUM + 16,
142 VP63_REGNUM = VP0_REGNUM + 63, LAST_PSEUDO_REGNUM };
143
144 /* Array of register names; There should be ia64_num_regs strings in
145 the initializer. */
146
147 static const char *ia64_register_names[] =
148 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
149 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
150 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
151 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
152 "", "", "", "", "", "", "", "",
153 "", "", "", "", "", "", "", "",
154 "", "", "", "", "", "", "", "",
155 "", "", "", "", "", "", "", "",
156 "", "", "", "", "", "", "", "",
157 "", "", "", "", "", "", "", "",
158 "", "", "", "", "", "", "", "",
159 "", "", "", "", "", "", "", "",
160 "", "", "", "", "", "", "", "",
161 "", "", "", "", "", "", "", "",
162 "", "", "", "", "", "", "", "",
163 "", "", "", "", "", "", "", "",
164
165 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
166 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
167 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
168 "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31",
169 "f32", "f33", "f34", "f35", "f36", "f37", "f38", "f39",
170 "f40", "f41", "f42", "f43", "f44", "f45", "f46", "f47",
171 "f48", "f49", "f50", "f51", "f52", "f53", "f54", "f55",
172 "f56", "f57", "f58", "f59", "f60", "f61", "f62", "f63",
173 "f64", "f65", "f66", "f67", "f68", "f69", "f70", "f71",
174 "f72", "f73", "f74", "f75", "f76", "f77", "f78", "f79",
175 "f80", "f81", "f82", "f83", "f84", "f85", "f86", "f87",
176 "f88", "f89", "f90", "f91", "f92", "f93", "f94", "f95",
177 "f96", "f97", "f98", "f99", "f100", "f101", "f102", "f103",
178 "f104", "f105", "f106", "f107", "f108", "f109", "f110", "f111",
179 "f112", "f113", "f114", "f115", "f116", "f117", "f118", "f119",
180 "f120", "f121", "f122", "f123", "f124", "f125", "f126", "f127",
181
182 "", "", "", "", "", "", "", "",
183 "", "", "", "", "", "", "", "",
184 "", "", "", "", "", "", "", "",
185 "", "", "", "", "", "", "", "",
186 "", "", "", "", "", "", "", "",
187 "", "", "", "", "", "", "", "",
188 "", "", "", "", "", "", "", "",
189 "", "", "", "", "", "", "", "",
190
191 "b0", "b1", "b2", "b3", "b4", "b5", "b6", "b7",
192
193 "vfp", "vrap",
194
195 "pr", "ip", "psr", "cfm",
196
197 "kr0", "kr1", "kr2", "kr3", "kr4", "kr5", "kr6", "kr7",
198 "", "", "", "", "", "", "", "",
199 "rsc", "bsp", "bspstore", "rnat",
200 "", "fcr", "", "",
201 "eflag", "csd", "ssd", "cflg", "fsr", "fir", "fdr", "",
202 "ccv", "", "", "", "unat", "", "", "",
203 "fpsr", "", "", "", "itc",
204 "", "", "", "", "", "", "", "", "", "",
205 "", "", "", "", "", "", "", "", "",
206 "pfs", "lc", "ec",
207 "", "", "", "", "", "", "", "", "", "",
208 "", "", "", "", "", "", "", "", "", "",
209 "", "", "", "", "", "", "", "", "", "",
210 "", "", "", "", "", "", "", "", "", "",
211 "", "", "", "", "", "", "", "", "", "",
212 "", "", "", "", "", "", "", "", "", "",
213 "",
214 "nat0", "nat1", "nat2", "nat3", "nat4", "nat5", "nat6", "nat7",
215 "nat8", "nat9", "nat10", "nat11", "nat12", "nat13", "nat14", "nat15",
216 "nat16", "nat17", "nat18", "nat19", "nat20", "nat21", "nat22", "nat23",
217 "nat24", "nat25", "nat26", "nat27", "nat28", "nat29", "nat30", "nat31",
218 "nat32", "nat33", "nat34", "nat35", "nat36", "nat37", "nat38", "nat39",
219 "nat40", "nat41", "nat42", "nat43", "nat44", "nat45", "nat46", "nat47",
220 "nat48", "nat49", "nat50", "nat51", "nat52", "nat53", "nat54", "nat55",
221 "nat56", "nat57", "nat58", "nat59", "nat60", "nat61", "nat62", "nat63",
222 "nat64", "nat65", "nat66", "nat67", "nat68", "nat69", "nat70", "nat71",
223 "nat72", "nat73", "nat74", "nat75", "nat76", "nat77", "nat78", "nat79",
224 "nat80", "nat81", "nat82", "nat83", "nat84", "nat85", "nat86", "nat87",
225 "nat88", "nat89", "nat90", "nat91", "nat92", "nat93", "nat94", "nat95",
226 "nat96", "nat97", "nat98", "nat99", "nat100","nat101","nat102","nat103",
227 "nat104","nat105","nat106","nat107","nat108","nat109","nat110","nat111",
228 "nat112","nat113","nat114","nat115","nat116","nat117","nat118","nat119",
229 "nat120","nat121","nat122","nat123","nat124","nat125","nat126","nat127",
230
231 "bof",
232
233 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
234 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
235 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
236 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
237 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
238 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
239 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
240 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
241 "r96", "r97", "r98", "r99", "r100", "r101", "r102", "r103",
242 "r104", "r105", "r106", "r107", "r108", "r109", "r110", "r111",
243 "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
244 "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127",
245
246 "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7",
247 "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15",
248 "p16", "p17", "p18", "p19", "p20", "p21", "p22", "p23",
249 "p24", "p25", "p26", "p27", "p28", "p29", "p30", "p31",
250 "p32", "p33", "p34", "p35", "p36", "p37", "p38", "p39",
251 "p40", "p41", "p42", "p43", "p44", "p45", "p46", "p47",
252 "p48", "p49", "p50", "p51", "p52", "p53", "p54", "p55",
253 "p56", "p57", "p58", "p59", "p60", "p61", "p62", "p63",
254 };
255
256 struct ia64_frame_cache
257 {
258 CORE_ADDR base; /* frame pointer base for frame */
259 CORE_ADDR pc; /* function start pc for frame */
260 CORE_ADDR saved_sp; /* stack pointer for frame */
261 CORE_ADDR bsp; /* points at r32 for the current frame */
262 CORE_ADDR cfm; /* cfm value for current frame */
263 CORE_ADDR prev_cfm; /* cfm value for previous frame */
264 int frameless;
265 int sof; /* Size of frame (decoded from cfm value). */
266 int sol; /* Size of locals (decoded from cfm value). */
267 int sor; /* Number of rotating registers (decoded from
268 cfm value). */
269 CORE_ADDR after_prologue;
270 /* Address of first instruction after the last
271 prologue instruction; Note that there may
272 be instructions from the function's body
273 intermingled with the prologue. */
274 int mem_stack_frame_size;
275 /* Size of the memory stack frame (may be zero),
276 or -1 if it has not been determined yet. */
277 int fp_reg; /* Register number (if any) used a frame pointer
278 for this frame. 0 if no register is being used
279 as the frame pointer. */
280
281 /* Saved registers. */
282 CORE_ADDR saved_regs[NUM_IA64_RAW_REGS];
283
284 };
285
286 static int
287 floatformat_valid (const struct floatformat *fmt, const void *from)
288 {
289 return 1;
290 }
291
292 static const struct floatformat floatformat_ia64_ext_little =
293 {
294 floatformat_little, 82, 0, 1, 17, 65535, 0x1ffff, 18, 64,
295 floatformat_intbit_yes, "floatformat_ia64_ext_little", floatformat_valid, NULL
296 };
297
298 static const struct floatformat floatformat_ia64_ext_big =
299 {
300 floatformat_big, 82, 46, 47, 17, 65535, 0x1ffff, 64, 64,
301 floatformat_intbit_yes, "floatformat_ia64_ext_big", floatformat_valid
302 };
303
304 static const struct floatformat *floatformats_ia64_ext[2] =
305 {
306 &floatformat_ia64_ext_big,
307 &floatformat_ia64_ext_little
308 };
309
310 static struct type *
311 ia64_ext_type (struct gdbarch *gdbarch)
312 {
313 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
314
315 if (!tdep->ia64_ext_type)
316 tdep->ia64_ext_type
317 = arch_float_type (gdbarch, 128, "builtin_type_ia64_ext",
318 floatformats_ia64_ext);
319
320 return tdep->ia64_ext_type;
321 }
322
323 static int
324 ia64_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
325 struct reggroup *group)
326 {
327 int vector_p;
328 int float_p;
329 int raw_p;
330 if (group == all_reggroup)
331 return 1;
332 vector_p = TYPE_VECTOR (register_type (gdbarch, regnum));
333 float_p = TYPE_CODE (register_type (gdbarch, regnum)) == TYPE_CODE_FLT;
334 raw_p = regnum < NUM_IA64_RAW_REGS;
335 if (group == float_reggroup)
336 return float_p;
337 if (group == vector_reggroup)
338 return vector_p;
339 if (group == general_reggroup)
340 return (!vector_p && !float_p);
341 if (group == save_reggroup || group == restore_reggroup)
342 return raw_p;
343 return 0;
344 }
345
346 static const char *
347 ia64_register_name (struct gdbarch *gdbarch, int reg)
348 {
349 return ia64_register_names[reg];
350 }
351
352 struct type *
353 ia64_register_type (struct gdbarch *arch, int reg)
354 {
355 if (reg >= IA64_FR0_REGNUM && reg <= IA64_FR127_REGNUM)
356 return ia64_ext_type (arch);
357 else
358 return builtin_type (arch)->builtin_long;
359 }
360
361 static int
362 ia64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
363 {
364 if (reg >= IA64_GR32_REGNUM && reg <= IA64_GR127_REGNUM)
365 return V32_REGNUM + (reg - IA64_GR32_REGNUM);
366 return reg;
367 }
368
369
370 /* Extract ``len'' bits from an instruction bundle starting at
371 bit ``from''. */
372
373 static long long
374 extract_bit_field (const gdb_byte *bundle, int from, int len)
375 {
376 long long result = 0LL;
377 int to = from + len;
378 int from_byte = from / 8;
379 int to_byte = to / 8;
380 unsigned char *b = (unsigned char *) bundle;
381 unsigned char c;
382 int lshift;
383 int i;
384
385 c = b[from_byte];
386 if (from_byte == to_byte)
387 c = ((unsigned char) (c << (8 - to % 8))) >> (8 - to % 8);
388 result = c >> (from % 8);
389 lshift = 8 - (from % 8);
390
391 for (i = from_byte+1; i < to_byte; i++)
392 {
393 result |= ((long long) b[i]) << lshift;
394 lshift += 8;
395 }
396
397 if (from_byte < to_byte && (to % 8 != 0))
398 {
399 c = b[to_byte];
400 c = ((unsigned char) (c << (8 - to % 8))) >> (8 - to % 8);
401 result |= ((long long) c) << lshift;
402 }
403
404 return result;
405 }
406
407 /* Replace the specified bits in an instruction bundle. */
408
409 static void
410 replace_bit_field (gdb_byte *bundle, long long val, int from, int len)
411 {
412 int to = from + len;
413 int from_byte = from / 8;
414 int to_byte = to / 8;
415 unsigned char *b = (unsigned char *) bundle;
416 unsigned char c;
417
418 if (from_byte == to_byte)
419 {
420 unsigned char left, right;
421 c = b[from_byte];
422 left = (c >> (to % 8)) << (to % 8);
423 right = ((unsigned char) (c << (8 - from % 8))) >> (8 - from % 8);
424 c = (unsigned char) (val & 0xff);
425 c = (unsigned char) (c << (from % 8 + 8 - to % 8)) >> (8 - to % 8);
426 c |= right | left;
427 b[from_byte] = c;
428 }
429 else
430 {
431 int i;
432 c = b[from_byte];
433 c = ((unsigned char) (c << (8 - from % 8))) >> (8 - from % 8);
434 c = c | (val << (from % 8));
435 b[from_byte] = c;
436 val >>= 8 - from % 8;
437
438 for (i = from_byte+1; i < to_byte; i++)
439 {
440 c = val & 0xff;
441 val >>= 8;
442 b[i] = c;
443 }
444
445 if (to % 8 != 0)
446 {
447 unsigned char cv = (unsigned char) val;
448 c = b[to_byte];
449 c = c >> (to % 8) << (to % 8);
450 c |= ((unsigned char) (cv << (8 - to % 8))) >> (8 - to % 8);
451 b[to_byte] = c;
452 }
453 }
454 }
455
456 /* Return the contents of slot N (for N = 0, 1, or 2) in
457 and instruction bundle. */
458
459 static long long
460 slotN_contents (gdb_byte *bundle, int slotnum)
461 {
462 return extract_bit_field (bundle, 5+41*slotnum, 41);
463 }
464
465 /* Store an instruction in an instruction bundle. */
466
467 static void
468 replace_slotN_contents (gdb_byte *bundle, long long instr, int slotnum)
469 {
470 replace_bit_field (bundle, instr, 5+41*slotnum, 41);
471 }
472
473 static const enum instruction_type template_encoding_table[32][3] =
474 {
475 { M, I, I }, /* 00 */
476 { M, I, I }, /* 01 */
477 { M, I, I }, /* 02 */
478 { M, I, I }, /* 03 */
479 { M, L, X }, /* 04 */
480 { M, L, X }, /* 05 */
481 { undefined, undefined, undefined }, /* 06 */
482 { undefined, undefined, undefined }, /* 07 */
483 { M, M, I }, /* 08 */
484 { M, M, I }, /* 09 */
485 { M, M, I }, /* 0A */
486 { M, M, I }, /* 0B */
487 { M, F, I }, /* 0C */
488 { M, F, I }, /* 0D */
489 { M, M, F }, /* 0E */
490 { M, M, F }, /* 0F */
491 { M, I, B }, /* 10 */
492 { M, I, B }, /* 11 */
493 { M, B, B }, /* 12 */
494 { M, B, B }, /* 13 */
495 { undefined, undefined, undefined }, /* 14 */
496 { undefined, undefined, undefined }, /* 15 */
497 { B, B, B }, /* 16 */
498 { B, B, B }, /* 17 */
499 { M, M, B }, /* 18 */
500 { M, M, B }, /* 19 */
501 { undefined, undefined, undefined }, /* 1A */
502 { undefined, undefined, undefined }, /* 1B */
503 { M, F, B }, /* 1C */
504 { M, F, B }, /* 1D */
505 { undefined, undefined, undefined }, /* 1E */
506 { undefined, undefined, undefined }, /* 1F */
507 };
508
509 /* Fetch and (partially) decode an instruction at ADDR and return the
510 address of the next instruction to fetch. */
511
512 static CORE_ADDR
513 fetch_instruction (CORE_ADDR addr, instruction_type *it, long long *instr)
514 {
515 gdb_byte bundle[BUNDLE_LEN];
516 int slotnum = (int) (addr & 0x0f) / SLOT_MULTIPLIER;
517 long long templ;
518 int val;
519
520 /* Warn about slot numbers greater than 2. We used to generate
521 an error here on the assumption that the user entered an invalid
522 address. But, sometimes GDB itself requests an invalid address.
523 This can (easily) happen when execution stops in a function for
524 which there are no symbols. The prologue scanner will attempt to
525 find the beginning of the function - if the nearest symbol
526 happens to not be aligned on a bundle boundary (16 bytes), the
527 resulting starting address will cause GDB to think that the slot
528 number is too large.
529
530 So we warn about it and set the slot number to zero. It is
531 not necessarily a fatal condition, particularly if debugging
532 at the assembly language level. */
533 if (slotnum > 2)
534 {
535 warning (_("Can't fetch instructions for slot numbers greater than 2.\n"
536 "Using slot 0 instead"));
537 slotnum = 0;
538 }
539
540 addr &= ~0x0f;
541
542 val = target_read_memory (addr, bundle, BUNDLE_LEN);
543
544 if (val != 0)
545 return 0;
546
547 *instr = slotN_contents (bundle, slotnum);
548 templ = extract_bit_field (bundle, 0, 5);
549 *it = template_encoding_table[(int)templ][slotnum];
550
551 if (slotnum == 2 || (slotnum == 1 && *it == L))
552 addr += 16;
553 else
554 addr += (slotnum + 1) * SLOT_MULTIPLIER;
555
556 return addr;
557 }
558
559 /* There are 5 different break instructions (break.i, break.b,
560 break.m, break.f, and break.x), but they all have the same
561 encoding. (The five bit template in the low five bits of the
562 instruction bundle distinguishes one from another.)
563
564 The runtime architecture manual specifies that break instructions
565 used for debugging purposes must have the upper two bits of the 21
566 bit immediate set to a 0 and a 1 respectively. A breakpoint
567 instruction encodes the most significant bit of its 21 bit
568 immediate at bit 36 of the 41 bit instruction. The penultimate msb
569 is at bit 25 which leads to the pattern below.
570
571 Originally, I had this set up to do, e.g, a "break.i 0x80000" But
572 it turns out that 0x80000 was used as the syscall break in the early
573 simulators. So I changed the pattern slightly to do "break.i 0x080001"
574 instead. But that didn't work either (I later found out that this
575 pattern was used by the simulator that I was using.) So I ended up
576 using the pattern seen below.
577
578 SHADOW_CONTENTS has byte-based addressing (PLACED_ADDRESS and SHADOW_LEN)
579 while we need bit-based addressing as the instructions length is 41 bits and
580 we must not modify/corrupt the adjacent slots in the same bundle.
581 Fortunately we may store larger memory incl. the adjacent bits with the
582 original memory content (not the possibly already stored breakpoints there).
583 We need to be careful in ia64_memory_remove_breakpoint to always restore
584 only the specific bits of this instruction ignoring any adjacent stored
585 bits.
586
587 We use the original addressing with the low nibble in the range <0..2> which
588 gets incorrectly interpreted by generic non-ia64 breakpoint_restore_shadows
589 as the direct byte offset of SHADOW_CONTENTS. We store whole BUNDLE_LEN
590 bytes just without these two possibly skipped bytes to not to exceed to the
591 next bundle.
592
593 If we would like to store the whole bundle to SHADOW_CONTENTS we would have
594 to store already the base address (`address & ~0x0f') into PLACED_ADDRESS.
595 In such case there is no other place where to store
596 SLOTNUM (`adress & 0x0f', value in the range <0..2>). We need to know
597 SLOTNUM in ia64_memory_remove_breakpoint.
598
599 There is one special case where we need to be extra careful:
600 L-X instructions, which are instructions that occupy 2 slots
601 (The L part is always in slot 1, and the X part is always in
602 slot 2). We must refuse to insert breakpoints for an address
603 that points at slot 2 of a bundle where an L-X instruction is
604 present, since there is logically no instruction at that address.
605 However, to make things more interesting, the opcode of L-X
606 instructions is located in slot 2. This means that, to insert
607 a breakpoint at an address that points to slot 1, we actually
608 need to write the breakpoint in slot 2! Slot 1 is actually
609 the extended operand, so writing the breakpoint there would not
610 have the desired effect. Another side-effect of this issue
611 is that we need to make sure that the shadow contents buffer
612 does save byte 15 of our instruction bundle (this is the tail
613 end of slot 2, which wouldn't be saved if we were to insert
614 the breakpoint in slot 1).
615
616 ia64 16-byte bundle layout:
617 | 5 bits | slot 0 with 41 bits | slot 1 with 41 bits | slot 2 with 41 bits |
618
619 The current addressing used by the code below:
620 original PC placed_address placed_size required covered
621 == bp_tgt->shadow_len reqd \subset covered
622 0xABCDE0 0xABCDE0 0x10 <0x0...0x5> <0x0..0xF>
623 0xABCDE1 0xABCDE1 0xF <0x5...0xA> <0x1..0xF>
624 0xABCDE2 0xABCDE2 0xE <0xA...0xF> <0x2..0xF>
625
626 L-X instructions are treated a little specially, as explained above:
627 0xABCDE1 0xABCDE1 0xF <0xA...0xF> <0x1..0xF>
628
629 `objdump -d' and some other tools show a bit unjustified offsets:
630 original PC byte where starts the instruction objdump offset
631 0xABCDE0 0xABCDE0 0xABCDE0
632 0xABCDE1 0xABCDE5 0xABCDE6
633 0xABCDE2 0xABCDEA 0xABCDEC
634 */
635
636 #define IA64_BREAKPOINT 0x00003333300LL
637
638 static int
639 ia64_memory_insert_breakpoint (struct gdbarch *gdbarch,
640 struct bp_target_info *bp_tgt)
641 {
642 CORE_ADDR addr = bp_tgt->placed_address = bp_tgt->reqstd_address;
643 gdb_byte bundle[BUNDLE_LEN];
644 int slotnum = (int) (addr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
645 long long instr_breakpoint;
646 int val;
647 int templ;
648
649 if (slotnum > 2)
650 error (_("Can't insert breakpoint for slot numbers greater than 2."));
651
652 addr &= ~0x0f;
653
654 /* Enable the automatic memory restoration from breakpoints while
655 we read our instruction bundle for the purpose of SHADOW_CONTENTS.
656 Otherwise, we could possibly store into the shadow parts of the adjacent
657 placed breakpoints. It is due to our SHADOW_CONTENTS overlapping the real
658 breakpoint instruction bits region. */
659 scoped_restore restore_memory_0
660 = make_scoped_restore_show_memory_breakpoints (0);
661 val = target_read_memory (addr, bundle, BUNDLE_LEN);
662 if (val != 0)
663 return val;
664
665 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
666 for addressing the SHADOW_CONTENTS placement. */
667 shadow_slotnum = slotnum;
668
669 /* Always cover the last byte of the bundle in case we are inserting
670 a breakpoint on an L-X instruction. */
671 bp_tgt->shadow_len = BUNDLE_LEN - shadow_slotnum;
672
673 templ = extract_bit_field (bundle, 0, 5);
674 if (template_encoding_table[templ][slotnum] == X)
675 {
676 /* X unit types can only be used in slot 2, and are actually
677 part of a 2-slot L-X instruction. We cannot break at this
678 address, as this is the second half of an instruction that
679 lives in slot 1 of that bundle. */
680 gdb_assert (slotnum == 2);
681 error (_("Can't insert breakpoint for non-existing slot X"));
682 }
683 if (template_encoding_table[templ][slotnum] == L)
684 {
685 /* L unit types can only be used in slot 1. But the associated
686 opcode for that instruction is in slot 2, so bump the slot number
687 accordingly. */
688 gdb_assert (slotnum == 1);
689 slotnum = 2;
690 }
691
692 /* Store the whole bundle, except for the initial skipped bytes by the slot
693 number interpreted as bytes offset in PLACED_ADDRESS. */
694 memcpy (bp_tgt->shadow_contents, bundle + shadow_slotnum,
695 bp_tgt->shadow_len);
696
697 /* Re-read the same bundle as above except that, this time, read it in order
698 to compute the new bundle inside which we will be inserting the
699 breakpoint. Therefore, disable the automatic memory restoration from
700 breakpoints while we read our instruction bundle. Otherwise, the general
701 restoration mechanism kicks in and we would possibly remove parts of the
702 adjacent placed breakpoints. It is due to our SHADOW_CONTENTS overlapping
703 the real breakpoint instruction bits region. */
704 scoped_restore restore_memory_1
705 = make_scoped_restore_show_memory_breakpoints (1);
706 val = target_read_memory (addr, bundle, BUNDLE_LEN);
707 if (val != 0)
708 return val;
709
710 /* Breakpoints already present in the code will get deteacted and not get
711 reinserted by bp_loc_is_permanent. Multiple breakpoints at the same
712 location cannot induce the internal error as they are optimized into
713 a single instance by update_global_location_list. */
714 instr_breakpoint = slotN_contents (bundle, slotnum);
715 if (instr_breakpoint == IA64_BREAKPOINT)
716 internal_error (__FILE__, __LINE__,
717 _("Address %s already contains a breakpoint."),
718 paddress (gdbarch, bp_tgt->placed_address));
719 replace_slotN_contents (bundle, IA64_BREAKPOINT, slotnum);
720
721 val = target_write_memory (addr + shadow_slotnum, bundle + shadow_slotnum,
722 bp_tgt->shadow_len);
723
724 return val;
725 }
726
727 static int
728 ia64_memory_remove_breakpoint (struct gdbarch *gdbarch,
729 struct bp_target_info *bp_tgt)
730 {
731 CORE_ADDR addr = bp_tgt->placed_address;
732 gdb_byte bundle_mem[BUNDLE_LEN], bundle_saved[BUNDLE_LEN];
733 int slotnum = (addr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
734 long long instr_breakpoint, instr_saved;
735 int val;
736 int templ;
737
738 addr &= ~0x0f;
739
740 /* Disable the automatic memory restoration from breakpoints while
741 we read our instruction bundle. Otherwise, the general restoration
742 mechanism kicks in and we would possibly remove parts of the adjacent
743 placed breakpoints. It is due to our SHADOW_CONTENTS overlapping the real
744 breakpoint instruction bits region. */
745 scoped_restore restore_memory_1
746 = make_scoped_restore_show_memory_breakpoints (1);
747 val = target_read_memory (addr, bundle_mem, BUNDLE_LEN);
748 if (val != 0)
749 return val;
750
751 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
752 for addressing the SHADOW_CONTENTS placement. */
753 shadow_slotnum = slotnum;
754
755 templ = extract_bit_field (bundle_mem, 0, 5);
756 if (template_encoding_table[templ][slotnum] == X)
757 {
758 /* X unit types can only be used in slot 2, and are actually
759 part of a 2-slot L-X instruction. We refuse to insert
760 breakpoints at this address, so there should be no reason
761 for us attempting to remove one there, except if the program's
762 code somehow got modified in memory. */
763 gdb_assert (slotnum == 2);
764 warning (_("Cannot remove breakpoint at address %s from non-existing "
765 "X-type slot, memory has changed underneath"),
766 paddress (gdbarch, bp_tgt->placed_address));
767 return -1;
768 }
769 if (template_encoding_table[templ][slotnum] == L)
770 {
771 /* L unit types can only be used in slot 1. But the breakpoint
772 was actually saved using slot 2, so update the slot number
773 accordingly. */
774 gdb_assert (slotnum == 1);
775 slotnum = 2;
776 }
777
778 gdb_assert (bp_tgt->shadow_len == BUNDLE_LEN - shadow_slotnum);
779
780 instr_breakpoint = slotN_contents (bundle_mem, slotnum);
781 if (instr_breakpoint != IA64_BREAKPOINT)
782 {
783 warning (_("Cannot remove breakpoint at address %s, "
784 "no break instruction at such address."),
785 paddress (gdbarch, bp_tgt->placed_address));
786 return -1;
787 }
788
789 /* Extract the original saved instruction from SLOTNUM normalizing its
790 bit-shift for INSTR_SAVED. */
791 memcpy (bundle_saved, bundle_mem, BUNDLE_LEN);
792 memcpy (bundle_saved + shadow_slotnum, bp_tgt->shadow_contents,
793 bp_tgt->shadow_len);
794 instr_saved = slotN_contents (bundle_saved, slotnum);
795
796 /* In BUNDLE_MEM, be careful to modify only the bits belonging to SLOTNUM
797 and not any of the other ones that are stored in SHADOW_CONTENTS. */
798 replace_slotN_contents (bundle_mem, instr_saved, slotnum);
799 val = target_write_raw_memory (addr, bundle_mem, BUNDLE_LEN);
800
801 return val;
802 }
803
804 /* Implement the breakpoint_kind_from_pc gdbarch method. */
805
806 static int
807 ia64_breakpoint_kind_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr)
808 {
809 /* A place holder of gdbarch method breakpoint_kind_from_pc. */
810 return 0;
811 }
812
813 /* As gdbarch_breakpoint_from_pc ranges have byte granularity and ia64
814 instruction slots ranges are bit-granular (41 bits) we have to provide an
815 extended range as described for ia64_memory_insert_breakpoint. We also take
816 care of preserving the `break' instruction 21-bit (or 62-bit) parameter to
817 make a match for permanent breakpoints. */
818
819 static const gdb_byte *
820 ia64_breakpoint_from_pc (struct gdbarch *gdbarch,
821 CORE_ADDR *pcptr, int *lenptr)
822 {
823 CORE_ADDR addr = *pcptr;
824 static gdb_byte bundle[BUNDLE_LEN];
825 int slotnum = (int) (*pcptr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
826 long long instr_fetched;
827 int val;
828 int templ;
829
830 if (slotnum > 2)
831 error (_("Can't insert breakpoint for slot numbers greater than 2."));
832
833 addr &= ~0x0f;
834
835 /* Enable the automatic memory restoration from breakpoints while
836 we read our instruction bundle to match bp_loc_is_permanent. */
837 {
838 scoped_restore restore_memory_0
839 = make_scoped_restore_show_memory_breakpoints (0);
840 val = target_read_memory (addr, bundle, BUNDLE_LEN);
841 }
842
843 /* The memory might be unreachable. This can happen, for instance,
844 when the user inserts a breakpoint at an invalid address. */
845 if (val != 0)
846 return NULL;
847
848 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
849 for addressing the SHADOW_CONTENTS placement. */
850 shadow_slotnum = slotnum;
851
852 /* Cover always the last byte of the bundle for the L-X slot case. */
853 *lenptr = BUNDLE_LEN - shadow_slotnum;
854
855 /* Check for L type instruction in slot 1, if present then bump up the slot
856 number to the slot 2. */
857 templ = extract_bit_field (bundle, 0, 5);
858 if (template_encoding_table[templ][slotnum] == X)
859 {
860 gdb_assert (slotnum == 2);
861 error (_("Can't insert breakpoint for non-existing slot X"));
862 }
863 if (template_encoding_table[templ][slotnum] == L)
864 {
865 gdb_assert (slotnum == 1);
866 slotnum = 2;
867 }
868
869 /* A break instruction has its all its opcode bits cleared except for
870 the parameter value. For L+X slot pair we are at the X slot (slot 2) so
871 we should not touch the L slot - the upper 41 bits of the parameter. */
872 instr_fetched = slotN_contents (bundle, slotnum);
873 instr_fetched &= 0x1003ffffc0LL;
874 replace_slotN_contents (bundle, instr_fetched, slotnum);
875
876 return bundle + shadow_slotnum;
877 }
878
879 static CORE_ADDR
880 ia64_read_pc (readable_regcache *regcache)
881 {
882 ULONGEST psr_value, pc_value;
883 int slot_num;
884
885 regcache->cooked_read (IA64_PSR_REGNUM, &psr_value);
886 regcache->cooked_read (IA64_IP_REGNUM, &pc_value);
887 slot_num = (psr_value >> 41) & 3;
888
889 return pc_value | (slot_num * SLOT_MULTIPLIER);
890 }
891
892 void
893 ia64_write_pc (struct regcache *regcache, CORE_ADDR new_pc)
894 {
895 int slot_num = (int) (new_pc & 0xf) / SLOT_MULTIPLIER;
896 ULONGEST psr_value;
897
898 regcache_cooked_read_unsigned (regcache, IA64_PSR_REGNUM, &psr_value);
899 psr_value &= ~(3LL << 41);
900 psr_value |= (ULONGEST)(slot_num & 0x3) << 41;
901
902 new_pc &= ~0xfLL;
903
904 regcache_cooked_write_unsigned (regcache, IA64_PSR_REGNUM, psr_value);
905 regcache_cooked_write_unsigned (regcache, IA64_IP_REGNUM, new_pc);
906 }
907
908 #define IS_NaT_COLLECTION_ADDR(addr) ((((addr) >> 3) & 0x3f) == 0x3f)
909
910 /* Returns the address of the slot that's NSLOTS slots away from
911 the address ADDR. NSLOTS may be positive or negative. */
912 static CORE_ADDR
913 rse_address_add(CORE_ADDR addr, int nslots)
914 {
915 CORE_ADDR new_addr;
916 int mandatory_nat_slots = nslots / 63;
917 int direction = nslots < 0 ? -1 : 1;
918
919 new_addr = addr + 8 * (nslots + mandatory_nat_slots);
920
921 if ((new_addr >> 9) != ((addr + 8 * 64 * mandatory_nat_slots) >> 9))
922 new_addr += 8 * direction;
923
924 if (IS_NaT_COLLECTION_ADDR(new_addr))
925 new_addr += 8 * direction;
926
927 return new_addr;
928 }
929
930 static enum register_status
931 ia64_pseudo_register_read (struct gdbarch *gdbarch, readable_regcache *regcache,
932 int regnum, gdb_byte *buf)
933 {
934 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
935 enum register_status status;
936
937 if (regnum >= V32_REGNUM && regnum <= V127_REGNUM)
938 {
939 #ifdef HAVE_LIBUNWIND_IA64_H
940 /* First try and use the libunwind special reg accessor,
941 otherwise fallback to standard logic. */
942 if (!libunwind_is_initialized ()
943 || libunwind_get_reg_special (gdbarch, regcache, regnum, buf) != 0)
944 #endif
945 {
946 /* The fallback position is to assume that r32-r127 are
947 found sequentially in memory starting at $bof. This
948 isn't always true, but without libunwind, this is the
949 best we can do. */
950 enum register_status status;
951 ULONGEST cfm;
952 ULONGEST bsp;
953 CORE_ADDR reg;
954
955 status = regcache->cooked_read (IA64_BSP_REGNUM, &bsp);
956 if (status != REG_VALID)
957 return status;
958
959 status = regcache->cooked_read (IA64_CFM_REGNUM, &cfm);
960 if (status != REG_VALID)
961 return status;
962
963 /* The bsp points at the end of the register frame so we
964 subtract the size of frame from it to get start of
965 register frame. */
966 bsp = rse_address_add (bsp, -(cfm & 0x7f));
967
968 if ((cfm & 0x7f) > regnum - V32_REGNUM)
969 {
970 ULONGEST reg_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
971 reg = read_memory_integer ((CORE_ADDR)reg_addr, 8, byte_order);
972 store_unsigned_integer (buf, register_size (gdbarch, regnum),
973 byte_order, reg);
974 }
975 else
976 store_unsigned_integer (buf, register_size (gdbarch, regnum),
977 byte_order, 0);
978 }
979 }
980 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
981 {
982 ULONGEST unatN_val;
983 ULONGEST unat;
984
985 status = regcache->cooked_read (IA64_UNAT_REGNUM, &unat);
986 if (status != REG_VALID)
987 return status;
988 unatN_val = (unat & (1LL << (regnum - IA64_NAT0_REGNUM))) != 0;
989 store_unsigned_integer (buf, register_size (gdbarch, regnum),
990 byte_order, unatN_val);
991 }
992 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
993 {
994 ULONGEST natN_val = 0;
995 ULONGEST bsp;
996 ULONGEST cfm;
997 CORE_ADDR gr_addr = 0;
998
999 status = regcache->cooked_read (IA64_BSP_REGNUM, &bsp);
1000 if (status != REG_VALID)
1001 return status;
1002
1003 status = regcache->cooked_read (IA64_CFM_REGNUM, &cfm);
1004 if (status != REG_VALID)
1005 return status;
1006
1007 /* The bsp points at the end of the register frame so we
1008 subtract the size of frame from it to get start of register frame. */
1009 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1010
1011 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1012 gr_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1013
1014 if (gr_addr != 0)
1015 {
1016 /* Compute address of nat collection bits. */
1017 CORE_ADDR nat_addr = gr_addr | 0x1f8;
1018 ULONGEST nat_collection;
1019 int nat_bit;
1020 /* If our nat collection address is bigger than bsp, we have to get
1021 the nat collection from rnat. Otherwise, we fetch the nat
1022 collection from the computed address. */
1023 if (nat_addr >= bsp)
1024 regcache->cooked_read (IA64_RNAT_REGNUM, &nat_collection);
1025 else
1026 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
1027 nat_bit = (gr_addr >> 3) & 0x3f;
1028 natN_val = (nat_collection >> nat_bit) & 1;
1029 }
1030
1031 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1032 byte_order, natN_val);
1033 }
1034 else if (regnum == VBOF_REGNUM)
1035 {
1036 /* A virtual register frame start is provided for user convenience.
1037 It can be calculated as the bsp - sof (sizeof frame). */
1038 ULONGEST bsp, vbsp;
1039 ULONGEST cfm;
1040
1041 status = regcache->cooked_read (IA64_BSP_REGNUM, &bsp);
1042 if (status != REG_VALID)
1043 return status;
1044 status = regcache->cooked_read (IA64_CFM_REGNUM, &cfm);
1045 if (status != REG_VALID)
1046 return status;
1047
1048 /* The bsp points at the end of the register frame so we
1049 subtract the size of frame from it to get beginning of frame. */
1050 vbsp = rse_address_add (bsp, -(cfm & 0x7f));
1051 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1052 byte_order, vbsp);
1053 }
1054 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1055 {
1056 ULONGEST pr;
1057 ULONGEST cfm;
1058 ULONGEST prN_val;
1059
1060 status = regcache->cooked_read (IA64_PR_REGNUM, &pr);
1061 if (status != REG_VALID)
1062 return status;
1063 status = regcache->cooked_read (IA64_CFM_REGNUM, &cfm);
1064 if (status != REG_VALID)
1065 return status;
1066
1067 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1068 {
1069 /* Fetch predicate register rename base from current frame
1070 marker for this frame. */
1071 int rrb_pr = (cfm >> 32) & 0x3f;
1072
1073 /* Adjust the register number to account for register rotation. */
1074 regnum = VP16_REGNUM
1075 + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1076 }
1077 prN_val = (pr & (1LL << (regnum - VP0_REGNUM))) != 0;
1078 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1079 byte_order, prN_val);
1080 }
1081 else
1082 memset (buf, 0, register_size (gdbarch, regnum));
1083
1084 return REG_VALID;
1085 }
1086
1087 static void
1088 ia64_pseudo_register_write (struct gdbarch *gdbarch, struct regcache *regcache,
1089 int regnum, const gdb_byte *buf)
1090 {
1091 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1092
1093 if (regnum >= V32_REGNUM && regnum <= V127_REGNUM)
1094 {
1095 ULONGEST bsp;
1096 ULONGEST cfm;
1097 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1098 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1099
1100 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1101
1102 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1103 {
1104 ULONGEST reg_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1105 write_memory (reg_addr, buf, 8);
1106 }
1107 }
1108 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
1109 {
1110 ULONGEST unatN_val, unat, unatN_mask;
1111 regcache_cooked_read_unsigned (regcache, IA64_UNAT_REGNUM, &unat);
1112 unatN_val = extract_unsigned_integer (buf, register_size (gdbarch,
1113 regnum),
1114 byte_order);
1115 unatN_mask = (1LL << (regnum - IA64_NAT0_REGNUM));
1116 if (unatN_val == 0)
1117 unat &= ~unatN_mask;
1118 else if (unatN_val == 1)
1119 unat |= unatN_mask;
1120 regcache_cooked_write_unsigned (regcache, IA64_UNAT_REGNUM, unat);
1121 }
1122 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
1123 {
1124 ULONGEST natN_val;
1125 ULONGEST bsp;
1126 ULONGEST cfm;
1127 CORE_ADDR gr_addr = 0;
1128 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1129 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1130
1131 /* The bsp points at the end of the register frame so we
1132 subtract the size of frame from it to get start of register frame. */
1133 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1134
1135 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1136 gr_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1137
1138 natN_val = extract_unsigned_integer (buf, register_size (gdbarch,
1139 regnum),
1140 byte_order);
1141
1142 if (gr_addr != 0 && (natN_val == 0 || natN_val == 1))
1143 {
1144 /* Compute address of nat collection bits. */
1145 CORE_ADDR nat_addr = gr_addr | 0x1f8;
1146 CORE_ADDR nat_collection;
1147 int natN_bit = (gr_addr >> 3) & 0x3f;
1148 ULONGEST natN_mask = (1LL << natN_bit);
1149 /* If our nat collection address is bigger than bsp, we have to get
1150 the nat collection from rnat. Otherwise, we fetch the nat
1151 collection from the computed address. */
1152 if (nat_addr >= bsp)
1153 {
1154 regcache_cooked_read_unsigned (regcache,
1155 IA64_RNAT_REGNUM,
1156 &nat_collection);
1157 if (natN_val)
1158 nat_collection |= natN_mask;
1159 else
1160 nat_collection &= ~natN_mask;
1161 regcache_cooked_write_unsigned (regcache, IA64_RNAT_REGNUM,
1162 nat_collection);
1163 }
1164 else
1165 {
1166 gdb_byte nat_buf[8];
1167 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
1168 if (natN_val)
1169 nat_collection |= natN_mask;
1170 else
1171 nat_collection &= ~natN_mask;
1172 store_unsigned_integer (nat_buf, register_size (gdbarch, regnum),
1173 byte_order, nat_collection);
1174 write_memory (nat_addr, nat_buf, 8);
1175 }
1176 }
1177 }
1178 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1179 {
1180 ULONGEST pr;
1181 ULONGEST cfm;
1182 ULONGEST prN_val;
1183 ULONGEST prN_mask;
1184
1185 regcache_cooked_read_unsigned (regcache, IA64_PR_REGNUM, &pr);
1186 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1187
1188 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1189 {
1190 /* Fetch predicate register rename base from current frame
1191 marker for this frame. */
1192 int rrb_pr = (cfm >> 32) & 0x3f;
1193
1194 /* Adjust the register number to account for register rotation. */
1195 regnum = VP16_REGNUM
1196 + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1197 }
1198 prN_val = extract_unsigned_integer (buf, register_size (gdbarch, regnum),
1199 byte_order);
1200 prN_mask = (1LL << (regnum - VP0_REGNUM));
1201 if (prN_val == 0)
1202 pr &= ~prN_mask;
1203 else if (prN_val == 1)
1204 pr |= prN_mask;
1205 regcache_cooked_write_unsigned (regcache, IA64_PR_REGNUM, pr);
1206 }
1207 }
1208
1209 /* The ia64 needs to convert between various ieee floating-point formats
1210 and the special ia64 floating point register format. */
1211
1212 static int
1213 ia64_convert_register_p (struct gdbarch *gdbarch, int regno, struct type *type)
1214 {
1215 return (regno >= IA64_FR0_REGNUM && regno <= IA64_FR127_REGNUM
1216 && TYPE_CODE (type) == TYPE_CODE_FLT
1217 && type != ia64_ext_type (gdbarch));
1218 }
1219
1220 static int
1221 ia64_register_to_value (struct frame_info *frame, int regnum,
1222 struct type *valtype, gdb_byte *out,
1223 int *optimizedp, int *unavailablep)
1224 {
1225 struct gdbarch *gdbarch = get_frame_arch (frame);
1226 gdb_byte in[IA64_FP_REGISTER_SIZE];
1227
1228 /* Convert to TYPE. */
1229 if (!get_frame_register_bytes (frame, regnum, 0,
1230 register_size (gdbarch, regnum),
1231 in, optimizedp, unavailablep))
1232 return 0;
1233
1234 target_float_convert (in, ia64_ext_type (gdbarch), out, valtype);
1235 *optimizedp = *unavailablep = 0;
1236 return 1;
1237 }
1238
1239 static void
1240 ia64_value_to_register (struct frame_info *frame, int regnum,
1241 struct type *valtype, const gdb_byte *in)
1242 {
1243 struct gdbarch *gdbarch = get_frame_arch (frame);
1244 gdb_byte out[IA64_FP_REGISTER_SIZE];
1245 target_float_convert (in, valtype, out, ia64_ext_type (gdbarch));
1246 put_frame_register (frame, regnum, out);
1247 }
1248
1249
1250 /* Limit the number of skipped non-prologue instructions since examining
1251 of the prologue is expensive. */
1252 static int max_skip_non_prologue_insns = 40;
1253
1254 /* Given PC representing the starting address of a function, and
1255 LIM_PC which is the (sloppy) limit to which to scan when looking
1256 for a prologue, attempt to further refine this limit by using
1257 the line data in the symbol table. If successful, a better guess
1258 on where the prologue ends is returned, otherwise the previous
1259 value of lim_pc is returned. TRUST_LIMIT is a pointer to a flag
1260 which will be set to indicate whether the returned limit may be
1261 used with no further scanning in the event that the function is
1262 frameless. */
1263
1264 /* FIXME: cagney/2004-02-14: This function and logic have largely been
1265 superseded by skip_prologue_using_sal. */
1266
1267 static CORE_ADDR
1268 refine_prologue_limit (CORE_ADDR pc, CORE_ADDR lim_pc, int *trust_limit)
1269 {
1270 struct symtab_and_line prologue_sal;
1271 CORE_ADDR start_pc = pc;
1272 CORE_ADDR end_pc;
1273
1274 /* The prologue can not possibly go past the function end itself,
1275 so we can already adjust LIM_PC accordingly. */
1276 if (find_pc_partial_function (pc, NULL, NULL, &end_pc) && end_pc < lim_pc)
1277 lim_pc = end_pc;
1278
1279 /* Start off not trusting the limit. */
1280 *trust_limit = 0;
1281
1282 prologue_sal = find_pc_line (pc, 0);
1283 if (prologue_sal.line != 0)
1284 {
1285 int i;
1286 CORE_ADDR addr = prologue_sal.end;
1287
1288 /* Handle the case in which compiler's optimizer/scheduler
1289 has moved instructions into the prologue. We scan ahead
1290 in the function looking for address ranges whose corresponding
1291 line number is less than or equal to the first one that we
1292 found for the function. (It can be less than when the
1293 scheduler puts a body instruction before the first prologue
1294 instruction.) */
1295 for (i = 2 * max_skip_non_prologue_insns;
1296 i > 0 && (lim_pc == 0 || addr < lim_pc);
1297 i--)
1298 {
1299 struct symtab_and_line sal;
1300
1301 sal = find_pc_line (addr, 0);
1302 if (sal.line == 0)
1303 break;
1304 if (sal.line <= prologue_sal.line
1305 && sal.symtab == prologue_sal.symtab)
1306 {
1307 prologue_sal = sal;
1308 }
1309 addr = sal.end;
1310 }
1311
1312 if (lim_pc == 0 || prologue_sal.end < lim_pc)
1313 {
1314 lim_pc = prologue_sal.end;
1315 if (start_pc == get_pc_function_start (lim_pc))
1316 *trust_limit = 1;
1317 }
1318 }
1319 return lim_pc;
1320 }
1321
1322 #define isScratch(_regnum_) ((_regnum_) == 2 || (_regnum_) == 3 \
1323 || (8 <= (_regnum_) && (_regnum_) <= 11) \
1324 || (14 <= (_regnum_) && (_regnum_) <= 31))
1325 #define imm9(_instr_) \
1326 ( ((((_instr_) & 0x01000000000LL) ? -1 : 0) << 8) \
1327 | (((_instr_) & 0x00008000000LL) >> 20) \
1328 | (((_instr_) & 0x00000001fc0LL) >> 6))
1329
1330 /* Allocate and initialize a frame cache. */
1331
1332 static struct ia64_frame_cache *
1333 ia64_alloc_frame_cache (void)
1334 {
1335 struct ia64_frame_cache *cache;
1336 int i;
1337
1338 cache = FRAME_OBSTACK_ZALLOC (struct ia64_frame_cache);
1339
1340 /* Base address. */
1341 cache->base = 0;
1342 cache->pc = 0;
1343 cache->cfm = 0;
1344 cache->prev_cfm = 0;
1345 cache->sof = 0;
1346 cache->sol = 0;
1347 cache->sor = 0;
1348 cache->bsp = 0;
1349 cache->fp_reg = 0;
1350 cache->frameless = 1;
1351
1352 for (i = 0; i < NUM_IA64_RAW_REGS; i++)
1353 cache->saved_regs[i] = 0;
1354
1355 return cache;
1356 }
1357
1358 static CORE_ADDR
1359 examine_prologue (CORE_ADDR pc, CORE_ADDR lim_pc,
1360 struct frame_info *this_frame,
1361 struct ia64_frame_cache *cache)
1362 {
1363 CORE_ADDR next_pc;
1364 CORE_ADDR last_prologue_pc = pc;
1365 instruction_type it;
1366 long long instr;
1367 int cfm_reg = 0;
1368 int ret_reg = 0;
1369 int fp_reg = 0;
1370 int unat_save_reg = 0;
1371 int pr_save_reg = 0;
1372 int mem_stack_frame_size = 0;
1373 int spill_reg = 0;
1374 CORE_ADDR spill_addr = 0;
1375 char instores[8];
1376 char infpstores[8];
1377 char reg_contents[256];
1378 int trust_limit;
1379 int frameless = 1;
1380 int i;
1381 CORE_ADDR addr;
1382 gdb_byte buf[8];
1383 CORE_ADDR bof, sor, sol, sof, cfm, rrb_gr;
1384
1385 memset (instores, 0, sizeof instores);
1386 memset (infpstores, 0, sizeof infpstores);
1387 memset (reg_contents, 0, sizeof reg_contents);
1388
1389 if (cache->after_prologue != 0
1390 && cache->after_prologue <= lim_pc)
1391 return cache->after_prologue;
1392
1393 lim_pc = refine_prologue_limit (pc, lim_pc, &trust_limit);
1394 next_pc = fetch_instruction (pc, &it, &instr);
1395
1396 /* We want to check if we have a recognizable function start before we
1397 look ahead for a prologue. */
1398 if (pc < lim_pc && next_pc
1399 && it == M && ((instr & 0x1ee0000003fLL) == 0x02c00000000LL))
1400 {
1401 /* alloc - start of a regular function. */
1402 int sol = (int) ((instr & 0x00007f00000LL) >> 20);
1403 int sof = (int) ((instr & 0x000000fe000LL) >> 13);
1404 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1405
1406 /* Verify that the current cfm matches what we think is the
1407 function start. If we have somehow jumped within a function,
1408 we do not want to interpret the prologue and calculate the
1409 addresses of various registers such as the return address.
1410 We will instead treat the frame as frameless. */
1411 if (!this_frame ||
1412 (sof == (cache->cfm & 0x7f) &&
1413 sol == ((cache->cfm >> 7) & 0x7f)))
1414 frameless = 0;
1415
1416 cfm_reg = rN;
1417 last_prologue_pc = next_pc;
1418 pc = next_pc;
1419 }
1420 else
1421 {
1422 /* Look for a leaf routine. */
1423 if (pc < lim_pc && next_pc
1424 && (it == I || it == M)
1425 && ((instr & 0x1ee00000000LL) == 0x10800000000LL))
1426 {
1427 /* adds rN = imm14, rM (or mov rN, rM when imm14 is 0) */
1428 int imm = (int) ((((instr & 0x01000000000LL) ? -1 : 0) << 13)
1429 | ((instr & 0x001f8000000LL) >> 20)
1430 | ((instr & 0x000000fe000LL) >> 13));
1431 int rM = (int) ((instr & 0x00007f00000LL) >> 20);
1432 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1433 int qp = (int) (instr & 0x0000000003fLL);
1434 if (qp == 0 && rN == 2 && imm == 0 && rM == 12 && fp_reg == 0)
1435 {
1436 /* mov r2, r12 - beginning of leaf routine. */
1437 fp_reg = rN;
1438 last_prologue_pc = next_pc;
1439 }
1440 }
1441
1442 /* If we don't recognize a regular function or leaf routine, we are
1443 done. */
1444 if (!fp_reg)
1445 {
1446 pc = lim_pc;
1447 if (trust_limit)
1448 last_prologue_pc = lim_pc;
1449 }
1450 }
1451
1452 /* Loop, looking for prologue instructions, keeping track of
1453 where preserved registers were spilled. */
1454 while (pc < lim_pc)
1455 {
1456 next_pc = fetch_instruction (pc, &it, &instr);
1457 if (next_pc == 0)
1458 break;
1459
1460 if (it == B && ((instr & 0x1e1f800003fLL) != 0x04000000000LL))
1461 {
1462 /* Exit loop upon hitting a non-nop branch instruction. */
1463 if (trust_limit)
1464 lim_pc = pc;
1465 break;
1466 }
1467 else if (((instr & 0x3fLL) != 0LL) &&
1468 (frameless || ret_reg != 0))
1469 {
1470 /* Exit loop upon hitting a predicated instruction if
1471 we already have the return register or if we are frameless. */
1472 if (trust_limit)
1473 lim_pc = pc;
1474 break;
1475 }
1476 else if (it == I && ((instr & 0x1eff8000000LL) == 0x00188000000LL))
1477 {
1478 /* Move from BR */
1479 int b2 = (int) ((instr & 0x0000000e000LL) >> 13);
1480 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1481 int qp = (int) (instr & 0x0000000003f);
1482
1483 if (qp == 0 && b2 == 0 && rN >= 32 && ret_reg == 0)
1484 {
1485 ret_reg = rN;
1486 last_prologue_pc = next_pc;
1487 }
1488 }
1489 else if ((it == I || it == M)
1490 && ((instr & 0x1ee00000000LL) == 0x10800000000LL))
1491 {
1492 /* adds rN = imm14, rM (or mov rN, rM when imm14 is 0) */
1493 int imm = (int) ((((instr & 0x01000000000LL) ? -1 : 0) << 13)
1494 | ((instr & 0x001f8000000LL) >> 20)
1495 | ((instr & 0x000000fe000LL) >> 13));
1496 int rM = (int) ((instr & 0x00007f00000LL) >> 20);
1497 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1498 int qp = (int) (instr & 0x0000000003fLL);
1499
1500 if (qp == 0 && rN >= 32 && imm == 0 && rM == 12 && fp_reg == 0)
1501 {
1502 /* mov rN, r12 */
1503 fp_reg = rN;
1504 last_prologue_pc = next_pc;
1505 }
1506 else if (qp == 0 && rN == 12 && rM == 12)
1507 {
1508 /* adds r12, -mem_stack_frame_size, r12 */
1509 mem_stack_frame_size -= imm;
1510 last_prologue_pc = next_pc;
1511 }
1512 else if (qp == 0 && rN == 2
1513 && ((rM == fp_reg && fp_reg != 0) || rM == 12))
1514 {
1515 CORE_ADDR saved_sp = 0;
1516 /* adds r2, spilloffset, rFramePointer
1517 or
1518 adds r2, spilloffset, r12
1519
1520 Get ready for stf.spill or st8.spill instructions.
1521 The address to start spilling at is loaded into r2.
1522 FIXME: Why r2? That's what gcc currently uses; it
1523 could well be different for other compilers. */
1524
1525 /* Hmm... whether or not this will work will depend on
1526 where the pc is. If it's still early in the prologue
1527 this'll be wrong. FIXME */
1528 if (this_frame)
1529 {
1530 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1531 saved_sp = get_frame_register_unsigned (this_frame,
1532 sp_regnum);
1533 }
1534 spill_addr = saved_sp
1535 + (rM == 12 ? 0 : mem_stack_frame_size)
1536 + imm;
1537 spill_reg = rN;
1538 last_prologue_pc = next_pc;
1539 }
1540 else if (qp == 0 && rM >= 32 && rM < 40 && !instores[rM-32] &&
1541 rN < 256 && imm == 0)
1542 {
1543 /* mov rN, rM where rM is an input register. */
1544 reg_contents[rN] = rM;
1545 last_prologue_pc = next_pc;
1546 }
1547 else if (frameless && qp == 0 && rN == fp_reg && imm == 0 &&
1548 rM == 2)
1549 {
1550 /* mov r12, r2 */
1551 last_prologue_pc = next_pc;
1552 break;
1553 }
1554 }
1555 else if (it == M
1556 && ( ((instr & 0x1efc0000000LL) == 0x0eec0000000LL)
1557 || ((instr & 0x1ffc8000000LL) == 0x0cec0000000LL) ))
1558 {
1559 /* stf.spill [rN] = fM, imm9
1560 or
1561 stf.spill [rN] = fM */
1562
1563 int imm = imm9(instr);
1564 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1565 int fM = (int) ((instr & 0x000000fe000LL) >> 13);
1566 int qp = (int) (instr & 0x0000000003fLL);
1567 if (qp == 0 && rN == spill_reg && spill_addr != 0
1568 && ((2 <= fM && fM <= 5) || (16 <= fM && fM <= 31)))
1569 {
1570 cache->saved_regs[IA64_FR0_REGNUM + fM] = spill_addr;
1571
1572 if ((instr & 0x1efc0000000LL) == 0x0eec0000000LL)
1573 spill_addr += imm;
1574 else
1575 spill_addr = 0; /* last one; must be done. */
1576 last_prologue_pc = next_pc;
1577 }
1578 }
1579 else if ((it == M && ((instr & 0x1eff8000000LL) == 0x02110000000LL))
1580 || (it == I && ((instr & 0x1eff8000000LL) == 0x00050000000LL)) )
1581 {
1582 /* mov.m rN = arM
1583 or
1584 mov.i rN = arM */
1585
1586 int arM = (int) ((instr & 0x00007f00000LL) >> 20);
1587 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1588 int qp = (int) (instr & 0x0000000003fLL);
1589 if (qp == 0 && isScratch (rN) && arM == 36 /* ar.unat */)
1590 {
1591 /* We have something like "mov.m r3 = ar.unat". Remember the
1592 r3 (or whatever) and watch for a store of this register... */
1593 unat_save_reg = rN;
1594 last_prologue_pc = next_pc;
1595 }
1596 }
1597 else if (it == I && ((instr & 0x1eff8000000LL) == 0x00198000000LL))
1598 {
1599 /* mov rN = pr */
1600 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1601 int qp = (int) (instr & 0x0000000003fLL);
1602 if (qp == 0 && isScratch (rN))
1603 {
1604 pr_save_reg = rN;
1605 last_prologue_pc = next_pc;
1606 }
1607 }
1608 else if (it == M
1609 && ( ((instr & 0x1ffc8000000LL) == 0x08cc0000000LL)
1610 || ((instr & 0x1efc0000000LL) == 0x0acc0000000LL)))
1611 {
1612 /* st8 [rN] = rM
1613 or
1614 st8 [rN] = rM, imm9 */
1615 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1616 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1617 int qp = (int) (instr & 0x0000000003fLL);
1618 int indirect = rM < 256 ? reg_contents[rM] : 0;
1619 if (qp == 0 && rN == spill_reg && spill_addr != 0
1620 && (rM == unat_save_reg || rM == pr_save_reg))
1621 {
1622 /* We've found a spill of either the UNAT register or the PR
1623 register. (Well, not exactly; what we've actually found is
1624 a spill of the register that UNAT or PR was moved to).
1625 Record that fact and move on... */
1626 if (rM == unat_save_reg)
1627 {
1628 /* Track UNAT register. */
1629 cache->saved_regs[IA64_UNAT_REGNUM] = spill_addr;
1630 unat_save_reg = 0;
1631 }
1632 else
1633 {
1634 /* Track PR register. */
1635 cache->saved_regs[IA64_PR_REGNUM] = spill_addr;
1636 pr_save_reg = 0;
1637 }
1638 if ((instr & 0x1efc0000000LL) == 0x0acc0000000LL)
1639 /* st8 [rN] = rM, imm9 */
1640 spill_addr += imm9(instr);
1641 else
1642 spill_addr = 0; /* Must be done spilling. */
1643 last_prologue_pc = next_pc;
1644 }
1645 else if (qp == 0 && 32 <= rM && rM < 40 && !instores[rM-32])
1646 {
1647 /* Allow up to one store of each input register. */
1648 instores[rM-32] = 1;
1649 last_prologue_pc = next_pc;
1650 }
1651 else if (qp == 0 && 32 <= indirect && indirect < 40 &&
1652 !instores[indirect-32])
1653 {
1654 /* Allow an indirect store of an input register. */
1655 instores[indirect-32] = 1;
1656 last_prologue_pc = next_pc;
1657 }
1658 }
1659 else if (it == M && ((instr & 0x1ff08000000LL) == 0x08c00000000LL))
1660 {
1661 /* One of
1662 st1 [rN] = rM
1663 st2 [rN] = rM
1664 st4 [rN] = rM
1665 st8 [rN] = rM
1666 Note that the st8 case is handled in the clause above.
1667
1668 Advance over stores of input registers. One store per input
1669 register is permitted. */
1670 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1671 int qp = (int) (instr & 0x0000000003fLL);
1672 int indirect = rM < 256 ? reg_contents[rM] : 0;
1673 if (qp == 0 && 32 <= rM && rM < 40 && !instores[rM-32])
1674 {
1675 instores[rM-32] = 1;
1676 last_prologue_pc = next_pc;
1677 }
1678 else if (qp == 0 && 32 <= indirect && indirect < 40 &&
1679 !instores[indirect-32])
1680 {
1681 /* Allow an indirect store of an input register. */
1682 instores[indirect-32] = 1;
1683 last_prologue_pc = next_pc;
1684 }
1685 }
1686 else if (it == M && ((instr & 0x1ff88000000LL) == 0x0cc80000000LL))
1687 {
1688 /* Either
1689 stfs [rN] = fM
1690 or
1691 stfd [rN] = fM
1692
1693 Advance over stores of floating point input registers. Again
1694 one store per register is permitted. */
1695 int fM = (int) ((instr & 0x000000fe000LL) >> 13);
1696 int qp = (int) (instr & 0x0000000003fLL);
1697 if (qp == 0 && 8 <= fM && fM < 16 && !infpstores[fM - 8])
1698 {
1699 infpstores[fM-8] = 1;
1700 last_prologue_pc = next_pc;
1701 }
1702 }
1703 else if (it == M
1704 && ( ((instr & 0x1ffc8000000LL) == 0x08ec0000000LL)
1705 || ((instr & 0x1efc0000000LL) == 0x0aec0000000LL)))
1706 {
1707 /* st8.spill [rN] = rM
1708 or
1709 st8.spill [rN] = rM, imm9 */
1710 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1711 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1712 int qp = (int) (instr & 0x0000000003fLL);
1713 if (qp == 0 && rN == spill_reg && 4 <= rM && rM <= 7)
1714 {
1715 /* We've found a spill of one of the preserved general purpose
1716 regs. Record the spill address and advance the spill
1717 register if appropriate. */
1718 cache->saved_regs[IA64_GR0_REGNUM + rM] = spill_addr;
1719 if ((instr & 0x1efc0000000LL) == 0x0aec0000000LL)
1720 /* st8.spill [rN] = rM, imm9 */
1721 spill_addr += imm9(instr);
1722 else
1723 spill_addr = 0; /* Done spilling. */
1724 last_prologue_pc = next_pc;
1725 }
1726 }
1727
1728 pc = next_pc;
1729 }
1730
1731 /* If not frameless and we aren't called by skip_prologue, then we need
1732 to calculate registers for the previous frame which will be needed
1733 later. */
1734
1735 if (!frameless && this_frame)
1736 {
1737 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1738 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1739
1740 /* Extract the size of the rotating portion of the stack
1741 frame and the register rename base from the current
1742 frame marker. */
1743 cfm = cache->cfm;
1744 sor = cache->sor;
1745 sof = cache->sof;
1746 sol = cache->sol;
1747 rrb_gr = (cfm >> 18) & 0x7f;
1748
1749 /* Find the bof (beginning of frame). */
1750 bof = rse_address_add (cache->bsp, -sof);
1751
1752 for (i = 0, addr = bof;
1753 i < sof;
1754 i++, addr += 8)
1755 {
1756 if (IS_NaT_COLLECTION_ADDR (addr))
1757 {
1758 addr += 8;
1759 }
1760 if (i+32 == cfm_reg)
1761 cache->saved_regs[IA64_CFM_REGNUM] = addr;
1762 if (i+32 == ret_reg)
1763 cache->saved_regs[IA64_VRAP_REGNUM] = addr;
1764 if (i+32 == fp_reg)
1765 cache->saved_regs[IA64_VFP_REGNUM] = addr;
1766 }
1767
1768 /* For the previous argument registers we require the previous bof.
1769 If we can't find the previous cfm, then we can do nothing. */
1770 cfm = 0;
1771 if (cache->saved_regs[IA64_CFM_REGNUM] != 0)
1772 {
1773 cfm = read_memory_integer (cache->saved_regs[IA64_CFM_REGNUM],
1774 8, byte_order);
1775 }
1776 else if (cfm_reg != 0)
1777 {
1778 get_frame_register (this_frame, cfm_reg, buf);
1779 cfm = extract_unsigned_integer (buf, 8, byte_order);
1780 }
1781 cache->prev_cfm = cfm;
1782
1783 if (cfm != 0)
1784 {
1785 sor = ((cfm >> 14) & 0xf) * 8;
1786 sof = (cfm & 0x7f);
1787 sol = (cfm >> 7) & 0x7f;
1788 rrb_gr = (cfm >> 18) & 0x7f;
1789
1790 /* The previous bof only requires subtraction of the sol (size of
1791 locals) due to the overlap between output and input of
1792 subsequent frames. */
1793 bof = rse_address_add (bof, -sol);
1794
1795 for (i = 0, addr = bof;
1796 i < sof;
1797 i++, addr += 8)
1798 {
1799 if (IS_NaT_COLLECTION_ADDR (addr))
1800 {
1801 addr += 8;
1802 }
1803 if (i < sor)
1804 cache->saved_regs[IA64_GR32_REGNUM
1805 + ((i + (sor - rrb_gr)) % sor)]
1806 = addr;
1807 else
1808 cache->saved_regs[IA64_GR32_REGNUM + i] = addr;
1809 }
1810
1811 }
1812 }
1813
1814 /* Try and trust the lim_pc value whenever possible. */
1815 if (trust_limit && lim_pc >= last_prologue_pc)
1816 last_prologue_pc = lim_pc;
1817
1818 cache->frameless = frameless;
1819 cache->after_prologue = last_prologue_pc;
1820 cache->mem_stack_frame_size = mem_stack_frame_size;
1821 cache->fp_reg = fp_reg;
1822
1823 return last_prologue_pc;
1824 }
1825
1826 CORE_ADDR
1827 ia64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1828 {
1829 struct ia64_frame_cache cache;
1830 cache.base = 0;
1831 cache.after_prologue = 0;
1832 cache.cfm = 0;
1833 cache.bsp = 0;
1834
1835 /* Call examine_prologue with - as third argument since we don't
1836 have a next frame pointer to send. */
1837 return examine_prologue (pc, pc+1024, 0, &cache);
1838 }
1839
1840
1841 /* Normal frames. */
1842
1843 static struct ia64_frame_cache *
1844 ia64_frame_cache (struct frame_info *this_frame, void **this_cache)
1845 {
1846 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1847 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1848 struct ia64_frame_cache *cache;
1849 gdb_byte buf[8];
1850 CORE_ADDR cfm;
1851
1852 if (*this_cache)
1853 return (struct ia64_frame_cache *) *this_cache;
1854
1855 cache = ia64_alloc_frame_cache ();
1856 *this_cache = cache;
1857
1858 get_frame_register (this_frame, sp_regnum, buf);
1859 cache->saved_sp = extract_unsigned_integer (buf, 8, byte_order);
1860
1861 /* We always want the bsp to point to the end of frame.
1862 This way, we can always get the beginning of frame (bof)
1863 by subtracting frame size. */
1864 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
1865 cache->bsp = extract_unsigned_integer (buf, 8, byte_order);
1866
1867 get_frame_register (this_frame, IA64_PSR_REGNUM, buf);
1868
1869 get_frame_register (this_frame, IA64_CFM_REGNUM, buf);
1870 cfm = extract_unsigned_integer (buf, 8, byte_order);
1871
1872 cache->sof = (cfm & 0x7f);
1873 cache->sol = (cfm >> 7) & 0x7f;
1874 cache->sor = ((cfm >> 14) & 0xf) * 8;
1875
1876 cache->cfm = cfm;
1877
1878 cache->pc = get_frame_func (this_frame);
1879
1880 if (cache->pc != 0)
1881 examine_prologue (cache->pc, get_frame_pc (this_frame), this_frame, cache);
1882
1883 cache->base = cache->saved_sp + cache->mem_stack_frame_size;
1884
1885 return cache;
1886 }
1887
1888 static void
1889 ia64_frame_this_id (struct frame_info *this_frame, void **this_cache,
1890 struct frame_id *this_id)
1891 {
1892 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1893 struct ia64_frame_cache *cache =
1894 ia64_frame_cache (this_frame, this_cache);
1895
1896 /* If outermost frame, mark with null frame id. */
1897 if (cache->base != 0)
1898 (*this_id) = frame_id_build_special (cache->base, cache->pc, cache->bsp);
1899 if (gdbarch_debug >= 1)
1900 fprintf_unfiltered (gdb_stdlog,
1901 "regular frame id: code %s, stack %s, "
1902 "special %s, this_frame %s\n",
1903 paddress (gdbarch, this_id->code_addr),
1904 paddress (gdbarch, this_id->stack_addr),
1905 paddress (gdbarch, cache->bsp),
1906 host_address_to_string (this_frame));
1907 }
1908
1909 static struct value *
1910 ia64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
1911 int regnum)
1912 {
1913 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1914 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1915 struct ia64_frame_cache *cache = ia64_frame_cache (this_frame, this_cache);
1916 gdb_byte buf[8];
1917
1918 gdb_assert (regnum >= 0);
1919
1920 if (!target_has_registers)
1921 error (_("No registers."));
1922
1923 if (regnum == gdbarch_sp_regnum (gdbarch))
1924 return frame_unwind_got_constant (this_frame, regnum, cache->base);
1925
1926 else if (regnum == IA64_BSP_REGNUM)
1927 {
1928 struct value *val;
1929 CORE_ADDR prev_cfm, bsp, prev_bsp;
1930
1931 /* We want to calculate the previous bsp as the end of the previous
1932 register stack frame. This corresponds to what the hardware bsp
1933 register will be if we pop the frame back which is why we might
1934 have been called. We know the beginning of the current frame is
1935 cache->bsp - cache->sof. This value in the previous frame points
1936 to the start of the output registers. We can calculate the end of
1937 that frame by adding the size of output:
1938 (sof (size of frame) - sol (size of locals)). */
1939 val = ia64_frame_prev_register (this_frame, this_cache, IA64_CFM_REGNUM);
1940 prev_cfm = extract_unsigned_integer (value_contents_all (val),
1941 8, byte_order);
1942 bsp = rse_address_add (cache->bsp, -(cache->sof));
1943 prev_bsp =
1944 rse_address_add (bsp, (prev_cfm & 0x7f) - ((prev_cfm >> 7) & 0x7f));
1945
1946 return frame_unwind_got_constant (this_frame, regnum, prev_bsp);
1947 }
1948
1949 else if (regnum == IA64_CFM_REGNUM)
1950 {
1951 CORE_ADDR addr = cache->saved_regs[IA64_CFM_REGNUM];
1952
1953 if (addr != 0)
1954 return frame_unwind_got_memory (this_frame, regnum, addr);
1955
1956 if (cache->prev_cfm)
1957 return frame_unwind_got_constant (this_frame, regnum, cache->prev_cfm);
1958
1959 if (cache->frameless)
1960 return frame_unwind_got_register (this_frame, IA64_PFS_REGNUM,
1961 IA64_PFS_REGNUM);
1962 return frame_unwind_got_register (this_frame, regnum, 0);
1963 }
1964
1965 else if (regnum == IA64_VFP_REGNUM)
1966 {
1967 /* If the function in question uses an automatic register (r32-r127)
1968 for the frame pointer, it'll be found by ia64_find_saved_register()
1969 above. If the function lacks one of these frame pointers, we can
1970 still provide a value since we know the size of the frame. */
1971 return frame_unwind_got_constant (this_frame, regnum, cache->base);
1972 }
1973
1974 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1975 {
1976 struct value *pr_val;
1977 ULONGEST prN;
1978
1979 pr_val = ia64_frame_prev_register (this_frame, this_cache,
1980 IA64_PR_REGNUM);
1981 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1982 {
1983 /* Fetch predicate register rename base from current frame
1984 marker for this frame. */
1985 int rrb_pr = (cache->cfm >> 32) & 0x3f;
1986
1987 /* Adjust the register number to account for register rotation. */
1988 regnum = VP16_REGNUM + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1989 }
1990 prN = extract_bit_field (value_contents_all (pr_val),
1991 regnum - VP0_REGNUM, 1);
1992 return frame_unwind_got_constant (this_frame, regnum, prN);
1993 }
1994
1995 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
1996 {
1997 struct value *unat_val;
1998 ULONGEST unatN;
1999 unat_val = ia64_frame_prev_register (this_frame, this_cache,
2000 IA64_UNAT_REGNUM);
2001 unatN = extract_bit_field (value_contents_all (unat_val),
2002 regnum - IA64_NAT0_REGNUM, 1);
2003 return frame_unwind_got_constant (this_frame, regnum, unatN);
2004 }
2005
2006 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
2007 {
2008 int natval = 0;
2009 /* Find address of general register corresponding to nat bit we're
2010 interested in. */
2011 CORE_ADDR gr_addr;
2012
2013 gr_addr = cache->saved_regs[regnum - IA64_NAT0_REGNUM + IA64_GR0_REGNUM];
2014
2015 if (gr_addr != 0)
2016 {
2017 /* Compute address of nat collection bits. */
2018 CORE_ADDR nat_addr = gr_addr | 0x1f8;
2019 CORE_ADDR bsp;
2020 CORE_ADDR nat_collection;
2021 int nat_bit;
2022
2023 /* If our nat collection address is bigger than bsp, we have to get
2024 the nat collection from rnat. Otherwise, we fetch the nat
2025 collection from the computed address. */
2026 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2027 bsp = extract_unsigned_integer (buf, 8, byte_order);
2028 if (nat_addr >= bsp)
2029 {
2030 get_frame_register (this_frame, IA64_RNAT_REGNUM, buf);
2031 nat_collection = extract_unsigned_integer (buf, 8, byte_order);
2032 }
2033 else
2034 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
2035 nat_bit = (gr_addr >> 3) & 0x3f;
2036 natval = (nat_collection >> nat_bit) & 1;
2037 }
2038
2039 return frame_unwind_got_constant (this_frame, regnum, natval);
2040 }
2041
2042 else if (regnum == IA64_IP_REGNUM)
2043 {
2044 CORE_ADDR pc = 0;
2045 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2046
2047 if (addr != 0)
2048 {
2049 read_memory (addr, buf, register_size (gdbarch, IA64_IP_REGNUM));
2050 pc = extract_unsigned_integer (buf, 8, byte_order);
2051 }
2052 else if (cache->frameless)
2053 {
2054 get_frame_register (this_frame, IA64_BR0_REGNUM, buf);
2055 pc = extract_unsigned_integer (buf, 8, byte_order);
2056 }
2057 pc &= ~0xf;
2058 return frame_unwind_got_constant (this_frame, regnum, pc);
2059 }
2060
2061 else if (regnum == IA64_PSR_REGNUM)
2062 {
2063 /* We don't know how to get the complete previous PSR, but we need it
2064 for the slot information when we unwind the pc (pc is formed of IP
2065 register plus slot information from PSR). To get the previous
2066 slot information, we mask it off the return address. */
2067 ULONGEST slot_num = 0;
2068 CORE_ADDR pc = 0;
2069 CORE_ADDR psr = 0;
2070 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2071
2072 get_frame_register (this_frame, IA64_PSR_REGNUM, buf);
2073 psr = extract_unsigned_integer (buf, 8, byte_order);
2074
2075 if (addr != 0)
2076 {
2077 read_memory (addr, buf, register_size (gdbarch, IA64_IP_REGNUM));
2078 pc = extract_unsigned_integer (buf, 8, byte_order);
2079 }
2080 else if (cache->frameless)
2081 {
2082 get_frame_register (this_frame, IA64_BR0_REGNUM, buf);
2083 pc = extract_unsigned_integer (buf, 8, byte_order);
2084 }
2085 psr &= ~(3LL << 41);
2086 slot_num = pc & 0x3LL;
2087 psr |= (CORE_ADDR)slot_num << 41;
2088 return frame_unwind_got_constant (this_frame, regnum, psr);
2089 }
2090
2091 else if (regnum == IA64_BR0_REGNUM)
2092 {
2093 CORE_ADDR addr = cache->saved_regs[IA64_BR0_REGNUM];
2094
2095 if (addr != 0)
2096 return frame_unwind_got_memory (this_frame, regnum, addr);
2097
2098 return frame_unwind_got_constant (this_frame, regnum, 0);
2099 }
2100
2101 else if ((regnum >= IA64_GR32_REGNUM && regnum <= IA64_GR127_REGNUM)
2102 || (regnum >= V32_REGNUM && regnum <= V127_REGNUM))
2103 {
2104 CORE_ADDR addr = 0;
2105
2106 if (regnum >= V32_REGNUM)
2107 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2108 addr = cache->saved_regs[regnum];
2109 if (addr != 0)
2110 return frame_unwind_got_memory (this_frame, regnum, addr);
2111
2112 if (cache->frameless)
2113 {
2114 struct value *reg_val;
2115 CORE_ADDR prev_cfm, prev_bsp, prev_bof;
2116
2117 /* FIXME: brobecker/2008-05-01: Doesn't this seem redundant
2118 with the same code above? */
2119 if (regnum >= V32_REGNUM)
2120 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2121 reg_val = ia64_frame_prev_register (this_frame, this_cache,
2122 IA64_CFM_REGNUM);
2123 prev_cfm = extract_unsigned_integer (value_contents_all (reg_val),
2124 8, byte_order);
2125 reg_val = ia64_frame_prev_register (this_frame, this_cache,
2126 IA64_BSP_REGNUM);
2127 prev_bsp = extract_unsigned_integer (value_contents_all (reg_val),
2128 8, byte_order);
2129 prev_bof = rse_address_add (prev_bsp, -(prev_cfm & 0x7f));
2130
2131 addr = rse_address_add (prev_bof, (regnum - IA64_GR32_REGNUM));
2132 return frame_unwind_got_memory (this_frame, regnum, addr);
2133 }
2134
2135 return frame_unwind_got_constant (this_frame, regnum, 0);
2136 }
2137
2138 else /* All other registers. */
2139 {
2140 CORE_ADDR addr = 0;
2141
2142 if (IA64_FR32_REGNUM <= regnum && regnum <= IA64_FR127_REGNUM)
2143 {
2144 /* Fetch floating point register rename base from current
2145 frame marker for this frame. */
2146 int rrb_fr = (cache->cfm >> 25) & 0x7f;
2147
2148 /* Adjust the floating point register number to account for
2149 register rotation. */
2150 regnum = IA64_FR32_REGNUM
2151 + ((regnum - IA64_FR32_REGNUM) + rrb_fr) % 96;
2152 }
2153
2154 /* If we have stored a memory address, access the register. */
2155 addr = cache->saved_regs[regnum];
2156 if (addr != 0)
2157 return frame_unwind_got_memory (this_frame, regnum, addr);
2158 /* Otherwise, punt and get the current value of the register. */
2159 else
2160 return frame_unwind_got_register (this_frame, regnum, regnum);
2161 }
2162 }
2163
2164 static const struct frame_unwind ia64_frame_unwind =
2165 {
2166 NORMAL_FRAME,
2167 default_frame_unwind_stop_reason,
2168 &ia64_frame_this_id,
2169 &ia64_frame_prev_register,
2170 NULL,
2171 default_frame_sniffer
2172 };
2173
2174 /* Signal trampolines. */
2175
2176 static void
2177 ia64_sigtramp_frame_init_saved_regs (struct frame_info *this_frame,
2178 struct ia64_frame_cache *cache)
2179 {
2180 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2181 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2182
2183 if (tdep->sigcontext_register_address)
2184 {
2185 int regno;
2186
2187 cache->saved_regs[IA64_VRAP_REGNUM]
2188 = tdep->sigcontext_register_address (gdbarch, cache->base,
2189 IA64_IP_REGNUM);
2190 cache->saved_regs[IA64_CFM_REGNUM]
2191 = tdep->sigcontext_register_address (gdbarch, cache->base,
2192 IA64_CFM_REGNUM);
2193 cache->saved_regs[IA64_PSR_REGNUM]
2194 = tdep->sigcontext_register_address (gdbarch, cache->base,
2195 IA64_PSR_REGNUM);
2196 cache->saved_regs[IA64_BSP_REGNUM]
2197 = tdep->sigcontext_register_address (gdbarch, cache->base,
2198 IA64_BSP_REGNUM);
2199 cache->saved_regs[IA64_RNAT_REGNUM]
2200 = tdep->sigcontext_register_address (gdbarch, cache->base,
2201 IA64_RNAT_REGNUM);
2202 cache->saved_regs[IA64_CCV_REGNUM]
2203 = tdep->sigcontext_register_address (gdbarch, cache->base,
2204 IA64_CCV_REGNUM);
2205 cache->saved_regs[IA64_UNAT_REGNUM]
2206 = tdep->sigcontext_register_address (gdbarch, cache->base,
2207 IA64_UNAT_REGNUM);
2208 cache->saved_regs[IA64_FPSR_REGNUM]
2209 = tdep->sigcontext_register_address (gdbarch, cache->base,
2210 IA64_FPSR_REGNUM);
2211 cache->saved_regs[IA64_PFS_REGNUM]
2212 = tdep->sigcontext_register_address (gdbarch, cache->base,
2213 IA64_PFS_REGNUM);
2214 cache->saved_regs[IA64_LC_REGNUM]
2215 = tdep->sigcontext_register_address (gdbarch, cache->base,
2216 IA64_LC_REGNUM);
2217
2218 for (regno = IA64_GR1_REGNUM; regno <= IA64_GR31_REGNUM; regno++)
2219 cache->saved_regs[regno] =
2220 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2221 for (regno = IA64_BR0_REGNUM; regno <= IA64_BR7_REGNUM; regno++)
2222 cache->saved_regs[regno] =
2223 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2224 for (regno = IA64_FR2_REGNUM; regno <= IA64_FR31_REGNUM; regno++)
2225 cache->saved_regs[regno] =
2226 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2227 }
2228 }
2229
2230 static struct ia64_frame_cache *
2231 ia64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
2232 {
2233 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2234 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2235 struct ia64_frame_cache *cache;
2236 gdb_byte buf[8];
2237
2238 if (*this_cache)
2239 return (struct ia64_frame_cache *) *this_cache;
2240
2241 cache = ia64_alloc_frame_cache ();
2242
2243 get_frame_register (this_frame, sp_regnum, buf);
2244 /* Note that frame size is hard-coded below. We cannot calculate it
2245 via prologue examination. */
2246 cache->base = extract_unsigned_integer (buf, 8, byte_order) + 16;
2247
2248 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2249 cache->bsp = extract_unsigned_integer (buf, 8, byte_order);
2250
2251 get_frame_register (this_frame, IA64_CFM_REGNUM, buf);
2252 cache->cfm = extract_unsigned_integer (buf, 8, byte_order);
2253 cache->sof = cache->cfm & 0x7f;
2254
2255 ia64_sigtramp_frame_init_saved_regs (this_frame, cache);
2256
2257 *this_cache = cache;
2258 return cache;
2259 }
2260
2261 static void
2262 ia64_sigtramp_frame_this_id (struct frame_info *this_frame,
2263 void **this_cache, struct frame_id *this_id)
2264 {
2265 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2266 struct ia64_frame_cache *cache =
2267 ia64_sigtramp_frame_cache (this_frame, this_cache);
2268
2269 (*this_id) = frame_id_build_special (cache->base,
2270 get_frame_pc (this_frame),
2271 cache->bsp);
2272 if (gdbarch_debug >= 1)
2273 fprintf_unfiltered (gdb_stdlog,
2274 "sigtramp frame id: code %s, stack %s, "
2275 "special %s, this_frame %s\n",
2276 paddress (gdbarch, this_id->code_addr),
2277 paddress (gdbarch, this_id->stack_addr),
2278 paddress (gdbarch, cache->bsp),
2279 host_address_to_string (this_frame));
2280 }
2281
2282 static struct value *
2283 ia64_sigtramp_frame_prev_register (struct frame_info *this_frame,
2284 void **this_cache, int regnum)
2285 {
2286 struct ia64_frame_cache *cache =
2287 ia64_sigtramp_frame_cache (this_frame, this_cache);
2288
2289 gdb_assert (regnum >= 0);
2290
2291 if (!target_has_registers)
2292 error (_("No registers."));
2293
2294 if (regnum == IA64_IP_REGNUM)
2295 {
2296 CORE_ADDR pc = 0;
2297 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2298
2299 if (addr != 0)
2300 {
2301 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2302 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2303 pc = read_memory_unsigned_integer (addr, 8, byte_order);
2304 }
2305 pc &= ~0xf;
2306 return frame_unwind_got_constant (this_frame, regnum, pc);
2307 }
2308
2309 else if ((regnum >= IA64_GR32_REGNUM && regnum <= IA64_GR127_REGNUM)
2310 || (regnum >= V32_REGNUM && regnum <= V127_REGNUM))
2311 {
2312 CORE_ADDR addr = 0;
2313
2314 if (regnum >= V32_REGNUM)
2315 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2316 addr = cache->saved_regs[regnum];
2317 if (addr != 0)
2318 return frame_unwind_got_memory (this_frame, regnum, addr);
2319
2320 return frame_unwind_got_constant (this_frame, regnum, 0);
2321 }
2322
2323 else /* All other registers not listed above. */
2324 {
2325 CORE_ADDR addr = cache->saved_regs[regnum];
2326
2327 if (addr != 0)
2328 return frame_unwind_got_memory (this_frame, regnum, addr);
2329
2330 return frame_unwind_got_constant (this_frame, regnum, 0);
2331 }
2332 }
2333
2334 static int
2335 ia64_sigtramp_frame_sniffer (const struct frame_unwind *self,
2336 struct frame_info *this_frame,
2337 void **this_cache)
2338 {
2339 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
2340 if (tdep->pc_in_sigtramp)
2341 {
2342 CORE_ADDR pc = get_frame_pc (this_frame);
2343
2344 if (tdep->pc_in_sigtramp (pc))
2345 return 1;
2346 }
2347
2348 return 0;
2349 }
2350
2351 static const struct frame_unwind ia64_sigtramp_frame_unwind =
2352 {
2353 SIGTRAMP_FRAME,
2354 default_frame_unwind_stop_reason,
2355 ia64_sigtramp_frame_this_id,
2356 ia64_sigtramp_frame_prev_register,
2357 NULL,
2358 ia64_sigtramp_frame_sniffer
2359 };
2360
2361 \f
2362
2363 static CORE_ADDR
2364 ia64_frame_base_address (struct frame_info *this_frame, void **this_cache)
2365 {
2366 struct ia64_frame_cache *cache = ia64_frame_cache (this_frame, this_cache);
2367
2368 return cache->base;
2369 }
2370
2371 static const struct frame_base ia64_frame_base =
2372 {
2373 &ia64_frame_unwind,
2374 ia64_frame_base_address,
2375 ia64_frame_base_address,
2376 ia64_frame_base_address
2377 };
2378
2379 #ifdef HAVE_LIBUNWIND_IA64_H
2380
2381 struct ia64_unwind_table_entry
2382 {
2383 unw_word_t start_offset;
2384 unw_word_t end_offset;
2385 unw_word_t info_offset;
2386 };
2387
2388 static __inline__ uint64_t
2389 ia64_rse_slot_num (uint64_t addr)
2390 {
2391 return (addr >> 3) & 0x3f;
2392 }
2393
2394 /* Skip over a designated number of registers in the backing
2395 store, remembering every 64th position is for NAT. */
2396 static __inline__ uint64_t
2397 ia64_rse_skip_regs (uint64_t addr, long num_regs)
2398 {
2399 long delta = ia64_rse_slot_num(addr) + num_regs;
2400
2401 if (num_regs < 0)
2402 delta -= 0x3e;
2403 return addr + ((num_regs + delta/0x3f) << 3);
2404 }
2405
2406 /* Gdb ia64-libunwind-tdep callback function to convert from an ia64 gdb
2407 register number to a libunwind register number. */
2408 static int
2409 ia64_gdb2uw_regnum (int regnum)
2410 {
2411 if (regnum == sp_regnum)
2412 return UNW_IA64_SP;
2413 else if (regnum == IA64_BSP_REGNUM)
2414 return UNW_IA64_BSP;
2415 else if ((unsigned) (regnum - IA64_GR0_REGNUM) < 128)
2416 return UNW_IA64_GR + (regnum - IA64_GR0_REGNUM);
2417 else if ((unsigned) (regnum - V32_REGNUM) < 95)
2418 return UNW_IA64_GR + 32 + (regnum - V32_REGNUM);
2419 else if ((unsigned) (regnum - IA64_FR0_REGNUM) < 128)
2420 return UNW_IA64_FR + (regnum - IA64_FR0_REGNUM);
2421 else if ((unsigned) (regnum - IA64_PR0_REGNUM) < 64)
2422 return -1;
2423 else if ((unsigned) (regnum - IA64_BR0_REGNUM) < 8)
2424 return UNW_IA64_BR + (regnum - IA64_BR0_REGNUM);
2425 else if (regnum == IA64_PR_REGNUM)
2426 return UNW_IA64_PR;
2427 else if (regnum == IA64_IP_REGNUM)
2428 return UNW_REG_IP;
2429 else if (regnum == IA64_CFM_REGNUM)
2430 return UNW_IA64_CFM;
2431 else if ((unsigned) (regnum - IA64_AR0_REGNUM) < 128)
2432 return UNW_IA64_AR + (regnum - IA64_AR0_REGNUM);
2433 else if ((unsigned) (regnum - IA64_NAT0_REGNUM) < 128)
2434 return UNW_IA64_NAT + (regnum - IA64_NAT0_REGNUM);
2435 else
2436 return -1;
2437 }
2438
2439 /* Gdb ia64-libunwind-tdep callback function to convert from a libunwind
2440 register number to a ia64 gdb register number. */
2441 static int
2442 ia64_uw2gdb_regnum (int uw_regnum)
2443 {
2444 if (uw_regnum == UNW_IA64_SP)
2445 return sp_regnum;
2446 else if (uw_regnum == UNW_IA64_BSP)
2447 return IA64_BSP_REGNUM;
2448 else if ((unsigned) (uw_regnum - UNW_IA64_GR) < 32)
2449 return IA64_GR0_REGNUM + (uw_regnum - UNW_IA64_GR);
2450 else if ((unsigned) (uw_regnum - UNW_IA64_GR) < 128)
2451 return V32_REGNUM + (uw_regnum - (IA64_GR0_REGNUM + 32));
2452 else if ((unsigned) (uw_regnum - UNW_IA64_FR) < 128)
2453 return IA64_FR0_REGNUM + (uw_regnum - UNW_IA64_FR);
2454 else if ((unsigned) (uw_regnum - UNW_IA64_BR) < 8)
2455 return IA64_BR0_REGNUM + (uw_regnum - UNW_IA64_BR);
2456 else if (uw_regnum == UNW_IA64_PR)
2457 return IA64_PR_REGNUM;
2458 else if (uw_regnum == UNW_REG_IP)
2459 return IA64_IP_REGNUM;
2460 else if (uw_regnum == UNW_IA64_CFM)
2461 return IA64_CFM_REGNUM;
2462 else if ((unsigned) (uw_regnum - UNW_IA64_AR) < 128)
2463 return IA64_AR0_REGNUM + (uw_regnum - UNW_IA64_AR);
2464 else if ((unsigned) (uw_regnum - UNW_IA64_NAT) < 128)
2465 return IA64_NAT0_REGNUM + (uw_regnum - UNW_IA64_NAT);
2466 else
2467 return -1;
2468 }
2469
2470 /* Gdb ia64-libunwind-tdep callback function to reveal if register is
2471 a float register or not. */
2472 static int
2473 ia64_is_fpreg (int uw_regnum)
2474 {
2475 return unw_is_fpreg (uw_regnum);
2476 }
2477
2478 /* Libunwind callback accessor function for general registers. */
2479 static int
2480 ia64_access_reg (unw_addr_space_t as, unw_regnum_t uw_regnum, unw_word_t *val,
2481 int write, void *arg)
2482 {
2483 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2484 unw_word_t bsp, sof, cfm, psr, ip;
2485 struct frame_info *this_frame = (struct frame_info *) arg;
2486 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2487 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2488 long new_sof, old_sof;
2489
2490 /* We never call any libunwind routines that need to write registers. */
2491 gdb_assert (!write);
2492
2493 switch (uw_regnum)
2494 {
2495 case UNW_REG_IP:
2496 /* Libunwind expects to see the pc value which means the slot number
2497 from the psr must be merged with the ip word address. */
2498 ip = get_frame_register_unsigned (this_frame, IA64_IP_REGNUM);
2499 psr = get_frame_register_unsigned (this_frame, IA64_PSR_REGNUM);
2500 *val = ip | ((psr >> 41) & 0x3);
2501 break;
2502
2503 case UNW_IA64_AR_BSP:
2504 /* Libunwind expects to see the beginning of the current
2505 register frame so we must account for the fact that
2506 ptrace() will return a value for bsp that points *after*
2507 the current register frame. */
2508 bsp = get_frame_register_unsigned (this_frame, IA64_BSP_REGNUM);
2509 cfm = get_frame_register_unsigned (this_frame, IA64_CFM_REGNUM);
2510 sof = gdbarch_tdep (gdbarch)->size_of_register_frame (this_frame, cfm);
2511 *val = ia64_rse_skip_regs (bsp, -sof);
2512 break;
2513
2514 case UNW_IA64_AR_BSPSTORE:
2515 /* Libunwind wants bspstore to be after the current register frame.
2516 This is what ptrace() and gdb treats as the regular bsp value. */
2517 *val = get_frame_register_unsigned (this_frame, IA64_BSP_REGNUM);
2518 break;
2519
2520 default:
2521 /* For all other registers, just unwind the value directly. */
2522 *val = get_frame_register_unsigned (this_frame, regnum);
2523 break;
2524 }
2525
2526 if (gdbarch_debug >= 1)
2527 fprintf_unfiltered (gdb_stdlog,
2528 " access_reg: from cache: %4s=%s\n",
2529 (((unsigned) regnum <= IA64_NAT127_REGNUM)
2530 ? ia64_register_names[regnum] : "r??"),
2531 paddress (gdbarch, *val));
2532 return 0;
2533 }
2534
2535 /* Libunwind callback accessor function for floating-point registers. */
2536 static int
2537 ia64_access_fpreg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2538 unw_fpreg_t *val, int write, void *arg)
2539 {
2540 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2541 struct frame_info *this_frame = (struct frame_info *) arg;
2542
2543 /* We never call any libunwind routines that need to write registers. */
2544 gdb_assert (!write);
2545
2546 get_frame_register (this_frame, regnum, (gdb_byte *) val);
2547
2548 return 0;
2549 }
2550
2551 /* Libunwind callback accessor function for top-level rse registers. */
2552 static int
2553 ia64_access_rse_reg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2554 unw_word_t *val, int write, void *arg)
2555 {
2556 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2557 unw_word_t bsp, sof, cfm, psr, ip;
2558 struct regcache *regcache = (struct regcache *) arg;
2559 struct gdbarch *gdbarch = regcache->arch ();
2560 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2561 long new_sof, old_sof;
2562
2563 /* We never call any libunwind routines that need to write registers. */
2564 gdb_assert (!write);
2565
2566 switch (uw_regnum)
2567 {
2568 case UNW_REG_IP:
2569 /* Libunwind expects to see the pc value which means the slot number
2570 from the psr must be merged with the ip word address. */
2571 regcache_cooked_read_unsigned (regcache, IA64_IP_REGNUM, &ip);
2572 regcache_cooked_read_unsigned (regcache, IA64_PSR_REGNUM, &psr);
2573 *val = ip | ((psr >> 41) & 0x3);
2574 break;
2575
2576 case UNW_IA64_AR_BSP:
2577 /* Libunwind expects to see the beginning of the current
2578 register frame so we must account for the fact that
2579 ptrace() will return a value for bsp that points *after*
2580 the current register frame. */
2581 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
2582 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
2583 sof = (cfm & 0x7f);
2584 *val = ia64_rse_skip_regs (bsp, -sof);
2585 break;
2586
2587 case UNW_IA64_AR_BSPSTORE:
2588 /* Libunwind wants bspstore to be after the current register frame.
2589 This is what ptrace() and gdb treats as the regular bsp value. */
2590 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, val);
2591 break;
2592
2593 default:
2594 /* For all other registers, just unwind the value directly. */
2595 regcache_cooked_read_unsigned (regcache, regnum, val);
2596 break;
2597 }
2598
2599 if (gdbarch_debug >= 1)
2600 fprintf_unfiltered (gdb_stdlog,
2601 " access_rse_reg: from cache: %4s=%s\n",
2602 (((unsigned) regnum <= IA64_NAT127_REGNUM)
2603 ? ia64_register_names[regnum] : "r??"),
2604 paddress (gdbarch, *val));
2605
2606 return 0;
2607 }
2608
2609 /* Libunwind callback accessor function for top-level fp registers. */
2610 static int
2611 ia64_access_rse_fpreg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2612 unw_fpreg_t *val, int write, void *arg)
2613 {
2614 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2615 struct regcache *regcache = (struct regcache *) arg;
2616
2617 /* We never call any libunwind routines that need to write registers. */
2618 gdb_assert (!write);
2619
2620 regcache->cooked_read (regnum, (gdb_byte *) val);
2621
2622 return 0;
2623 }
2624
2625 /* Libunwind callback accessor function for accessing memory. */
2626 static int
2627 ia64_access_mem (unw_addr_space_t as,
2628 unw_word_t addr, unw_word_t *val,
2629 int write, void *arg)
2630 {
2631 if (addr - KERNEL_START < ktab_size)
2632 {
2633 unw_word_t *laddr = (unw_word_t*) ((char *) ktab
2634 + (addr - KERNEL_START));
2635
2636 if (write)
2637 *laddr = *val;
2638 else
2639 *val = *laddr;
2640 return 0;
2641 }
2642
2643 /* XXX do we need to normalize byte-order here? */
2644 if (write)
2645 return target_write_memory (addr, (gdb_byte *) val, sizeof (unw_word_t));
2646 else
2647 return target_read_memory (addr, (gdb_byte *) val, sizeof (unw_word_t));
2648 }
2649
2650 /* Call low-level function to access the kernel unwind table. */
2651 static gdb::optional<gdb::byte_vector>
2652 getunwind_table ()
2653 {
2654 /* FIXME drow/2005-09-10: This code used to call
2655 ia64_linux_xfer_unwind_table directly to fetch the unwind table
2656 for the currently running ia64-linux kernel. That data should
2657 come from the core file and be accessed via the auxv vector; if
2658 we want to preserve fall back to the running kernel's table, then
2659 we should find a way to override the corefile layer's
2660 xfer_partial method. */
2661
2662 return target_read_alloc (current_top_target (), TARGET_OBJECT_UNWIND_TABLE,
2663 NULL);
2664 }
2665
2666 /* Get the kernel unwind table. */
2667 static int
2668 get_kernel_table (unw_word_t ip, unw_dyn_info_t *di)
2669 {
2670 static struct ia64_table_entry *etab;
2671
2672 if (!ktab)
2673 {
2674 ktab_buf = getunwind_table ();
2675 if (!ktab_buf)
2676 return -UNW_ENOINFO;
2677
2678 ktab = (struct ia64_table_entry *) ktab_buf->data ();
2679 ktab_size = ktab_buf->size ();
2680
2681 for (etab = ktab; etab->start_offset; ++etab)
2682 etab->info_offset += KERNEL_START;
2683 }
2684
2685 if (ip < ktab[0].start_offset || ip >= etab[-1].end_offset)
2686 return -UNW_ENOINFO;
2687
2688 di->format = UNW_INFO_FORMAT_TABLE;
2689 di->gp = 0;
2690 di->start_ip = ktab[0].start_offset;
2691 di->end_ip = etab[-1].end_offset;
2692 di->u.ti.name_ptr = (unw_word_t) "<kernel>";
2693 di->u.ti.segbase = 0;
2694 di->u.ti.table_len = ((char *) etab - (char *) ktab) / sizeof (unw_word_t);
2695 di->u.ti.table_data = (unw_word_t *) ktab;
2696
2697 if (gdbarch_debug >= 1)
2698 fprintf_unfiltered (gdb_stdlog, "get_kernel_table: found table `%s': "
2699 "segbase=%s, length=%s, gp=%s\n",
2700 (char *) di->u.ti.name_ptr,
2701 hex_string (di->u.ti.segbase),
2702 pulongest (di->u.ti.table_len),
2703 hex_string (di->gp));
2704 return 0;
2705 }
2706
2707 /* Find the unwind table entry for a specified address. */
2708 static int
2709 ia64_find_unwind_table (struct objfile *objfile, unw_word_t ip,
2710 unw_dyn_info_t *dip, void **buf)
2711 {
2712 Elf_Internal_Phdr *phdr, *p_text = NULL, *p_unwind = NULL;
2713 Elf_Internal_Ehdr *ehdr;
2714 unw_word_t segbase = 0;
2715 CORE_ADDR load_base;
2716 bfd *bfd;
2717 int i;
2718
2719 bfd = objfile->obfd;
2720
2721 ehdr = elf_tdata (bfd)->elf_header;
2722 phdr = elf_tdata (bfd)->phdr;
2723
2724 load_base = ANOFFSET (objfile->section_offsets, SECT_OFF_TEXT (objfile));
2725
2726 for (i = 0; i < ehdr->e_phnum; ++i)
2727 {
2728 switch (phdr[i].p_type)
2729 {
2730 case PT_LOAD:
2731 if ((unw_word_t) (ip - load_base - phdr[i].p_vaddr)
2732 < phdr[i].p_memsz)
2733 p_text = phdr + i;
2734 break;
2735
2736 case PT_IA_64_UNWIND:
2737 p_unwind = phdr + i;
2738 break;
2739
2740 default:
2741 break;
2742 }
2743 }
2744
2745 if (!p_text || !p_unwind)
2746 return -UNW_ENOINFO;
2747
2748 /* Verify that the segment that contains the IP also contains
2749 the static unwind table. If not, we may be in the Linux kernel's
2750 DSO gate page in which case the unwind table is another segment.
2751 Otherwise, we are dealing with runtime-generated code, for which we
2752 have no info here. */
2753 segbase = p_text->p_vaddr + load_base;
2754
2755 if ((p_unwind->p_vaddr - p_text->p_vaddr) >= p_text->p_memsz)
2756 {
2757 int ok = 0;
2758 for (i = 0; i < ehdr->e_phnum; ++i)
2759 {
2760 if (phdr[i].p_type == PT_LOAD
2761 && (p_unwind->p_vaddr - phdr[i].p_vaddr) < phdr[i].p_memsz)
2762 {
2763 ok = 1;
2764 /* Get the segbase from the section containing the
2765 libunwind table. */
2766 segbase = phdr[i].p_vaddr + load_base;
2767 }
2768 }
2769 if (!ok)
2770 return -UNW_ENOINFO;
2771 }
2772
2773 dip->start_ip = p_text->p_vaddr + load_base;
2774 dip->end_ip = dip->start_ip + p_text->p_memsz;
2775 dip->gp = ia64_find_global_pointer (get_objfile_arch (objfile), ip);
2776 dip->format = UNW_INFO_FORMAT_REMOTE_TABLE;
2777 dip->u.rti.name_ptr = (unw_word_t) bfd_get_filename (bfd);
2778 dip->u.rti.segbase = segbase;
2779 dip->u.rti.table_len = p_unwind->p_memsz / sizeof (unw_word_t);
2780 dip->u.rti.table_data = p_unwind->p_vaddr + load_base;
2781
2782 return 0;
2783 }
2784
2785 /* Libunwind callback accessor function to acquire procedure unwind-info. */
2786 static int
2787 ia64_find_proc_info_x (unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
2788 int need_unwind_info, void *arg)
2789 {
2790 struct obj_section *sec = find_pc_section (ip);
2791 unw_dyn_info_t di;
2792 int ret;
2793 void *buf = NULL;
2794
2795 if (!sec)
2796 {
2797 /* XXX This only works if the host and the target architecture are
2798 both ia64 and if the have (more or less) the same kernel
2799 version. */
2800 if (get_kernel_table (ip, &di) < 0)
2801 return -UNW_ENOINFO;
2802
2803 if (gdbarch_debug >= 1)
2804 fprintf_unfiltered (gdb_stdlog, "ia64_find_proc_info_x: %s -> "
2805 "(name=`%s',segbase=%s,start=%s,end=%s,gp=%s,"
2806 "length=%s,data=%s)\n",
2807 hex_string (ip), (char *)di.u.ti.name_ptr,
2808 hex_string (di.u.ti.segbase),
2809 hex_string (di.start_ip), hex_string (di.end_ip),
2810 hex_string (di.gp),
2811 pulongest (di.u.ti.table_len),
2812 hex_string ((CORE_ADDR)di.u.ti.table_data));
2813 }
2814 else
2815 {
2816 ret = ia64_find_unwind_table (sec->objfile, ip, &di, &buf);
2817 if (ret < 0)
2818 return ret;
2819
2820 if (gdbarch_debug >= 1)
2821 fprintf_unfiltered (gdb_stdlog, "ia64_find_proc_info_x: %s -> "
2822 "(name=`%s',segbase=%s,start=%s,end=%s,gp=%s,"
2823 "length=%s,data=%s)\n",
2824 hex_string (ip), (char *)di.u.rti.name_ptr,
2825 hex_string (di.u.rti.segbase),
2826 hex_string (di.start_ip), hex_string (di.end_ip),
2827 hex_string (di.gp),
2828 pulongest (di.u.rti.table_len),
2829 hex_string (di.u.rti.table_data));
2830 }
2831
2832 ret = libunwind_search_unwind_table (&as, ip, &di, pi, need_unwind_info,
2833 arg);
2834
2835 /* We no longer need the dyn info storage so free it. */
2836 xfree (buf);
2837
2838 return ret;
2839 }
2840
2841 /* Libunwind callback accessor function for cleanup. */
2842 static void
2843 ia64_put_unwind_info (unw_addr_space_t as,
2844 unw_proc_info_t *pip, void *arg)
2845 {
2846 /* Nothing required for now. */
2847 }
2848
2849 /* Libunwind callback accessor function to get head of the dynamic
2850 unwind-info registration list. */
2851 static int
2852 ia64_get_dyn_info_list (unw_addr_space_t as,
2853 unw_word_t *dilap, void *arg)
2854 {
2855 struct obj_section *text_sec;
2856 struct objfile *objfile;
2857 unw_word_t ip, addr;
2858 unw_dyn_info_t di;
2859 int ret;
2860
2861 if (!libunwind_is_initialized ())
2862 return -UNW_ENOINFO;
2863
2864 for (objfile = object_files; objfile; objfile = objfile->next)
2865 {
2866 void *buf = NULL;
2867
2868 text_sec = objfile->sections + SECT_OFF_TEXT (objfile);
2869 ip = obj_section_addr (text_sec);
2870 ret = ia64_find_unwind_table (objfile, ip, &di, &buf);
2871 if (ret >= 0)
2872 {
2873 addr = libunwind_find_dyn_list (as, &di, arg);
2874 /* We no longer need the dyn info storage so free it. */
2875 xfree (buf);
2876
2877 if (addr)
2878 {
2879 if (gdbarch_debug >= 1)
2880 fprintf_unfiltered (gdb_stdlog,
2881 "dynamic unwind table in objfile %s "
2882 "at %s (gp=%s)\n",
2883 bfd_get_filename (objfile->obfd),
2884 hex_string (addr), hex_string (di.gp));
2885 *dilap = addr;
2886 return 0;
2887 }
2888 }
2889 }
2890 return -UNW_ENOINFO;
2891 }
2892
2893
2894 /* Frame interface functions for libunwind. */
2895
2896 static void
2897 ia64_libunwind_frame_this_id (struct frame_info *this_frame, void **this_cache,
2898 struct frame_id *this_id)
2899 {
2900 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2901 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2902 struct frame_id id = outer_frame_id;
2903 gdb_byte buf[8];
2904 CORE_ADDR bsp;
2905
2906 libunwind_frame_this_id (this_frame, this_cache, &id);
2907 if (frame_id_eq (id, outer_frame_id))
2908 {
2909 (*this_id) = outer_frame_id;
2910 return;
2911 }
2912
2913 /* We must add the bsp as the special address for frame comparison
2914 purposes. */
2915 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2916 bsp = extract_unsigned_integer (buf, 8, byte_order);
2917
2918 (*this_id) = frame_id_build_special (id.stack_addr, id.code_addr, bsp);
2919
2920 if (gdbarch_debug >= 1)
2921 fprintf_unfiltered (gdb_stdlog,
2922 "libunwind frame id: code %s, stack %s, "
2923 "special %s, this_frame %s\n",
2924 paddress (gdbarch, id.code_addr),
2925 paddress (gdbarch, id.stack_addr),
2926 paddress (gdbarch, bsp),
2927 host_address_to_string (this_frame));
2928 }
2929
2930 static struct value *
2931 ia64_libunwind_frame_prev_register (struct frame_info *this_frame,
2932 void **this_cache, int regnum)
2933 {
2934 int reg = regnum;
2935 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2936 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2937 struct value *val;
2938
2939 if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
2940 reg = IA64_PR_REGNUM;
2941 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
2942 reg = IA64_UNAT_REGNUM;
2943
2944 /* Let libunwind do most of the work. */
2945 val = libunwind_frame_prev_register (this_frame, this_cache, reg);
2946
2947 if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
2948 {
2949 ULONGEST prN_val;
2950
2951 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
2952 {
2953 int rrb_pr = 0;
2954 ULONGEST cfm;
2955
2956 /* Fetch predicate register rename base from current frame
2957 marker for this frame. */
2958 cfm = get_frame_register_unsigned (this_frame, IA64_CFM_REGNUM);
2959 rrb_pr = (cfm >> 32) & 0x3f;
2960
2961 /* Adjust the register number to account for register rotation. */
2962 regnum = VP16_REGNUM + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
2963 }
2964 prN_val = extract_bit_field (value_contents_all (val),
2965 regnum - VP0_REGNUM, 1);
2966 return frame_unwind_got_constant (this_frame, regnum, prN_val);
2967 }
2968
2969 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
2970 {
2971 ULONGEST unatN_val;
2972
2973 unatN_val = extract_bit_field (value_contents_all (val),
2974 regnum - IA64_NAT0_REGNUM, 1);
2975 return frame_unwind_got_constant (this_frame, regnum, unatN_val);
2976 }
2977
2978 else if (regnum == IA64_BSP_REGNUM)
2979 {
2980 struct value *cfm_val;
2981 CORE_ADDR prev_bsp, prev_cfm;
2982
2983 /* We want to calculate the previous bsp as the end of the previous
2984 register stack frame. This corresponds to what the hardware bsp
2985 register will be if we pop the frame back which is why we might
2986 have been called. We know that libunwind will pass us back the
2987 beginning of the current frame so we should just add sof to it. */
2988 prev_bsp = extract_unsigned_integer (value_contents_all (val),
2989 8, byte_order);
2990 cfm_val = libunwind_frame_prev_register (this_frame, this_cache,
2991 IA64_CFM_REGNUM);
2992 prev_cfm = extract_unsigned_integer (value_contents_all (cfm_val),
2993 8, byte_order);
2994 prev_bsp = rse_address_add (prev_bsp, (prev_cfm & 0x7f));
2995
2996 return frame_unwind_got_constant (this_frame, regnum, prev_bsp);
2997 }
2998 else
2999 return val;
3000 }
3001
3002 static int
3003 ia64_libunwind_frame_sniffer (const struct frame_unwind *self,
3004 struct frame_info *this_frame,
3005 void **this_cache)
3006 {
3007 if (libunwind_is_initialized ()
3008 && libunwind_frame_sniffer (self, this_frame, this_cache))
3009 return 1;
3010
3011 return 0;
3012 }
3013
3014 static const struct frame_unwind ia64_libunwind_frame_unwind =
3015 {
3016 NORMAL_FRAME,
3017 default_frame_unwind_stop_reason,
3018 ia64_libunwind_frame_this_id,
3019 ia64_libunwind_frame_prev_register,
3020 NULL,
3021 ia64_libunwind_frame_sniffer,
3022 libunwind_frame_dealloc_cache
3023 };
3024
3025 static void
3026 ia64_libunwind_sigtramp_frame_this_id (struct frame_info *this_frame,
3027 void **this_cache,
3028 struct frame_id *this_id)
3029 {
3030 struct gdbarch *gdbarch = get_frame_arch (this_frame);
3031 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3032 gdb_byte buf[8];
3033 CORE_ADDR bsp;
3034 struct frame_id id = outer_frame_id;
3035 CORE_ADDR prev_ip;
3036
3037 libunwind_frame_this_id (this_frame, this_cache, &id);
3038 if (frame_id_eq (id, outer_frame_id))
3039 {
3040 (*this_id) = outer_frame_id;
3041 return;
3042 }
3043
3044 /* We must add the bsp as the special address for frame comparison
3045 purposes. */
3046 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
3047 bsp = extract_unsigned_integer (buf, 8, byte_order);
3048
3049 /* For a sigtramp frame, we don't make the check for previous ip being 0. */
3050 (*this_id) = frame_id_build_special (id.stack_addr, id.code_addr, bsp);
3051
3052 if (gdbarch_debug >= 1)
3053 fprintf_unfiltered (gdb_stdlog,
3054 "libunwind sigtramp frame id: code %s, "
3055 "stack %s, special %s, this_frame %s\n",
3056 paddress (gdbarch, id.code_addr),
3057 paddress (gdbarch, id.stack_addr),
3058 paddress (gdbarch, bsp),
3059 host_address_to_string (this_frame));
3060 }
3061
3062 static struct value *
3063 ia64_libunwind_sigtramp_frame_prev_register (struct frame_info *this_frame,
3064 void **this_cache, int regnum)
3065 {
3066 struct gdbarch *gdbarch = get_frame_arch (this_frame);
3067 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3068 struct value *prev_ip_val;
3069 CORE_ADDR prev_ip;
3070
3071 /* If the previous frame pc value is 0, then we want to use the SIGCONTEXT
3072 method of getting previous registers. */
3073 prev_ip_val = libunwind_frame_prev_register (this_frame, this_cache,
3074 IA64_IP_REGNUM);
3075 prev_ip = extract_unsigned_integer (value_contents_all (prev_ip_val),
3076 8, byte_order);
3077
3078 if (prev_ip == 0)
3079 {
3080 void *tmp_cache = NULL;
3081 return ia64_sigtramp_frame_prev_register (this_frame, &tmp_cache,
3082 regnum);
3083 }
3084 else
3085 return ia64_libunwind_frame_prev_register (this_frame, this_cache, regnum);
3086 }
3087
3088 static int
3089 ia64_libunwind_sigtramp_frame_sniffer (const struct frame_unwind *self,
3090 struct frame_info *this_frame,
3091 void **this_cache)
3092 {
3093 if (libunwind_is_initialized ())
3094 {
3095 if (libunwind_sigtramp_frame_sniffer (self, this_frame, this_cache))
3096 return 1;
3097 return 0;
3098 }
3099 else
3100 return ia64_sigtramp_frame_sniffer (self, this_frame, this_cache);
3101 }
3102
3103 static const struct frame_unwind ia64_libunwind_sigtramp_frame_unwind =
3104 {
3105 SIGTRAMP_FRAME,
3106 default_frame_unwind_stop_reason,
3107 ia64_libunwind_sigtramp_frame_this_id,
3108 ia64_libunwind_sigtramp_frame_prev_register,
3109 NULL,
3110 ia64_libunwind_sigtramp_frame_sniffer
3111 };
3112
3113 /* Set of libunwind callback acccessor functions. */
3114 unw_accessors_t ia64_unw_accessors =
3115 {
3116 ia64_find_proc_info_x,
3117 ia64_put_unwind_info,
3118 ia64_get_dyn_info_list,
3119 ia64_access_mem,
3120 ia64_access_reg,
3121 ia64_access_fpreg,
3122 /* resume */
3123 /* get_proc_name */
3124 };
3125
3126 /* Set of special libunwind callback acccessor functions specific for accessing
3127 the rse registers. At the top of the stack, we want libunwind to figure out
3128 how to read r32 - r127. Though usually they are found sequentially in
3129 memory starting from $bof, this is not always true. */
3130 unw_accessors_t ia64_unw_rse_accessors =
3131 {
3132 ia64_find_proc_info_x,
3133 ia64_put_unwind_info,
3134 ia64_get_dyn_info_list,
3135 ia64_access_mem,
3136 ia64_access_rse_reg,
3137 ia64_access_rse_fpreg,
3138 /* resume */
3139 /* get_proc_name */
3140 };
3141
3142 /* Set of ia64-libunwind-tdep gdb callbacks and data for generic
3143 ia64-libunwind-tdep code to use. */
3144 struct libunwind_descr ia64_libunwind_descr =
3145 {
3146 ia64_gdb2uw_regnum,
3147 ia64_uw2gdb_regnum,
3148 ia64_is_fpreg,
3149 &ia64_unw_accessors,
3150 &ia64_unw_rse_accessors,
3151 };
3152
3153 #endif /* HAVE_LIBUNWIND_IA64_H */
3154
3155 static int
3156 ia64_use_struct_convention (struct type *type)
3157 {
3158 struct type *float_elt_type;
3159
3160 /* Don't use the struct convention for anything but structure,
3161 union, or array types. */
3162 if (!(TYPE_CODE (type) == TYPE_CODE_STRUCT
3163 || TYPE_CODE (type) == TYPE_CODE_UNION
3164 || TYPE_CODE (type) == TYPE_CODE_ARRAY))
3165 return 0;
3166
3167 /* HFAs are structures (or arrays) consisting entirely of floating
3168 point values of the same length. Up to 8 of these are returned
3169 in registers. Don't use the struct convention when this is the
3170 case. */
3171 float_elt_type = is_float_or_hfa_type (type);
3172 if (float_elt_type != NULL
3173 && TYPE_LENGTH (type) / TYPE_LENGTH (float_elt_type) <= 8)
3174 return 0;
3175
3176 /* Other structs of length 32 or less are returned in r8-r11.
3177 Don't use the struct convention for those either. */
3178 return TYPE_LENGTH (type) > 32;
3179 }
3180
3181 /* Return non-zero if TYPE is a structure or union type. */
3182
3183 static int
3184 ia64_struct_type_p (const struct type *type)
3185 {
3186 return (TYPE_CODE (type) == TYPE_CODE_STRUCT
3187 || TYPE_CODE (type) == TYPE_CODE_UNION);
3188 }
3189
3190 static void
3191 ia64_extract_return_value (struct type *type, struct regcache *regcache,
3192 gdb_byte *valbuf)
3193 {
3194 struct gdbarch *gdbarch = regcache->arch ();
3195 struct type *float_elt_type;
3196
3197 float_elt_type = is_float_or_hfa_type (type);
3198 if (float_elt_type != NULL)
3199 {
3200 gdb_byte from[IA64_FP_REGISTER_SIZE];
3201 int offset = 0;
3202 int regnum = IA64_FR8_REGNUM;
3203 int n = TYPE_LENGTH (type) / TYPE_LENGTH (float_elt_type);
3204
3205 while (n-- > 0)
3206 {
3207 regcache->cooked_read (regnum, from);
3208 target_float_convert (from, ia64_ext_type (gdbarch),
3209 valbuf + offset, float_elt_type);
3210 offset += TYPE_LENGTH (float_elt_type);
3211 regnum++;
3212 }
3213 }
3214 else if (!ia64_struct_type_p (type) && TYPE_LENGTH (type) < 8)
3215 {
3216 /* This is an integral value, and its size is less than 8 bytes.
3217 These values are LSB-aligned, so extract the relevant bytes,
3218 and copy them into VALBUF. */
3219 /* brobecker/2005-12-30: Actually, all integral values are LSB aligned,
3220 so I suppose we should also add handling here for integral values
3221 whose size is greater than 8. But I wasn't able to create such
3222 a type, neither in C nor in Ada, so not worrying about these yet. */
3223 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3224 ULONGEST val;
3225
3226 regcache_cooked_read_unsigned (regcache, IA64_GR8_REGNUM, &val);
3227 store_unsigned_integer (valbuf, TYPE_LENGTH (type), byte_order, val);
3228 }
3229 else
3230 {
3231 ULONGEST val;
3232 int offset = 0;
3233 int regnum = IA64_GR8_REGNUM;
3234 int reglen = TYPE_LENGTH (register_type (gdbarch, IA64_GR8_REGNUM));
3235 int n = TYPE_LENGTH (type) / reglen;
3236 int m = TYPE_LENGTH (type) % reglen;
3237
3238 while (n-- > 0)
3239 {
3240 ULONGEST val;
3241 regcache_cooked_read_unsigned (regcache, regnum, &val);
3242 memcpy ((char *)valbuf + offset, &val, reglen);
3243 offset += reglen;
3244 regnum++;
3245 }
3246
3247 if (m)
3248 {
3249 regcache_cooked_read_unsigned (regcache, regnum, &val);
3250 memcpy ((char *)valbuf + offset, &val, m);
3251 }
3252 }
3253 }
3254
3255 static void
3256 ia64_store_return_value (struct type *type, struct regcache *regcache,
3257 const gdb_byte *valbuf)
3258 {
3259 struct gdbarch *gdbarch = regcache->arch ();
3260 struct type *float_elt_type;
3261
3262 float_elt_type = is_float_or_hfa_type (type);
3263 if (float_elt_type != NULL)
3264 {
3265 gdb_byte to[IA64_FP_REGISTER_SIZE];
3266 int offset = 0;
3267 int regnum = IA64_FR8_REGNUM;
3268 int n = TYPE_LENGTH (type) / TYPE_LENGTH (float_elt_type);
3269
3270 while (n-- > 0)
3271 {
3272 target_float_convert (valbuf + offset, float_elt_type,
3273 to, ia64_ext_type (gdbarch));
3274 regcache->cooked_write (regnum, to);
3275 offset += TYPE_LENGTH (float_elt_type);
3276 regnum++;
3277 }
3278 }
3279 else
3280 {
3281 ULONGEST val;
3282 int offset = 0;
3283 int regnum = IA64_GR8_REGNUM;
3284 int reglen = TYPE_LENGTH (register_type (gdbarch, IA64_GR8_REGNUM));
3285 int n = TYPE_LENGTH (type) / reglen;
3286 int m = TYPE_LENGTH (type) % reglen;
3287
3288 while (n-- > 0)
3289 {
3290 ULONGEST val;
3291 memcpy (&val, (char *)valbuf + offset, reglen);
3292 regcache_cooked_write_unsigned (regcache, regnum, val);
3293 offset += reglen;
3294 regnum++;
3295 }
3296
3297 if (m)
3298 {
3299 memcpy (&val, (char *)valbuf + offset, m);
3300 regcache_cooked_write_unsigned (regcache, regnum, val);
3301 }
3302 }
3303 }
3304
3305 static enum return_value_convention
3306 ia64_return_value (struct gdbarch *gdbarch, struct value *function,
3307 struct type *valtype, struct regcache *regcache,
3308 gdb_byte *readbuf, const gdb_byte *writebuf)
3309 {
3310 int struct_return = ia64_use_struct_convention (valtype);
3311
3312 if (writebuf != NULL)
3313 {
3314 gdb_assert (!struct_return);
3315 ia64_store_return_value (valtype, regcache, writebuf);
3316 }
3317
3318 if (readbuf != NULL)
3319 {
3320 gdb_assert (!struct_return);
3321 ia64_extract_return_value (valtype, regcache, readbuf);
3322 }
3323
3324 if (struct_return)
3325 return RETURN_VALUE_STRUCT_CONVENTION;
3326 else
3327 return RETURN_VALUE_REGISTER_CONVENTION;
3328 }
3329
3330 static int
3331 is_float_or_hfa_type_recurse (struct type *t, struct type **etp)
3332 {
3333 switch (TYPE_CODE (t))
3334 {
3335 case TYPE_CODE_FLT:
3336 if (*etp)
3337 return TYPE_LENGTH (*etp) == TYPE_LENGTH (t);
3338 else
3339 {
3340 *etp = t;
3341 return 1;
3342 }
3343 break;
3344 case TYPE_CODE_ARRAY:
3345 return
3346 is_float_or_hfa_type_recurse (check_typedef (TYPE_TARGET_TYPE (t)),
3347 etp);
3348 break;
3349 case TYPE_CODE_STRUCT:
3350 {
3351 int i;
3352
3353 for (i = 0; i < TYPE_NFIELDS (t); i++)
3354 if (!is_float_or_hfa_type_recurse
3355 (check_typedef (TYPE_FIELD_TYPE (t, i)), etp))
3356 return 0;
3357 return 1;
3358 }
3359 break;
3360 default:
3361 return 0;
3362 break;
3363 }
3364 }
3365
3366 /* Determine if the given type is one of the floating point types or
3367 and HFA (which is a struct, array, or combination thereof whose
3368 bottom-most elements are all of the same floating point type). */
3369
3370 static struct type *
3371 is_float_or_hfa_type (struct type *t)
3372 {
3373 struct type *et = 0;
3374
3375 return is_float_or_hfa_type_recurse (t, &et) ? et : 0;
3376 }
3377
3378
3379 /* Return 1 if the alignment of T is such that the next even slot
3380 should be used. Return 0, if the next available slot should
3381 be used. (See section 8.5.1 of the IA-64 Software Conventions
3382 and Runtime manual). */
3383
3384 static int
3385 slot_alignment_is_next_even (struct type *t)
3386 {
3387 switch (TYPE_CODE (t))
3388 {
3389 case TYPE_CODE_INT:
3390 case TYPE_CODE_FLT:
3391 if (TYPE_LENGTH (t) > 8)
3392 return 1;
3393 else
3394 return 0;
3395 case TYPE_CODE_ARRAY:
3396 return
3397 slot_alignment_is_next_even (check_typedef (TYPE_TARGET_TYPE (t)));
3398 case TYPE_CODE_STRUCT:
3399 {
3400 int i;
3401
3402 for (i = 0; i < TYPE_NFIELDS (t); i++)
3403 if (slot_alignment_is_next_even
3404 (check_typedef (TYPE_FIELD_TYPE (t, i))))
3405 return 1;
3406 return 0;
3407 }
3408 default:
3409 return 0;
3410 }
3411 }
3412
3413 /* Attempt to find (and return) the global pointer for the given
3414 function.
3415
3416 This is a rather nasty bit of code searchs for the .dynamic section
3417 in the objfile corresponding to the pc of the function we're trying
3418 to call. Once it finds the addresses at which the .dynamic section
3419 lives in the child process, it scans the Elf64_Dyn entries for a
3420 DT_PLTGOT tag. If it finds one of these, the corresponding
3421 d_un.d_ptr value is the global pointer. */
3422
3423 static CORE_ADDR
3424 ia64_find_global_pointer_from_dynamic_section (struct gdbarch *gdbarch,
3425 CORE_ADDR faddr)
3426 {
3427 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3428 struct obj_section *faddr_sect;
3429
3430 faddr_sect = find_pc_section (faddr);
3431 if (faddr_sect != NULL)
3432 {
3433 struct obj_section *osect;
3434
3435 ALL_OBJFILE_OSECTIONS (faddr_sect->objfile, osect)
3436 {
3437 if (strcmp (osect->the_bfd_section->name, ".dynamic") == 0)
3438 break;
3439 }
3440
3441 if (osect < faddr_sect->objfile->sections_end)
3442 {
3443 CORE_ADDR addr, endaddr;
3444
3445 addr = obj_section_addr (osect);
3446 endaddr = obj_section_endaddr (osect);
3447
3448 while (addr < endaddr)
3449 {
3450 int status;
3451 LONGEST tag;
3452 gdb_byte buf[8];
3453
3454 status = target_read_memory (addr, buf, sizeof (buf));
3455 if (status != 0)
3456 break;
3457 tag = extract_signed_integer (buf, sizeof (buf), byte_order);
3458
3459 if (tag == DT_PLTGOT)
3460 {
3461 CORE_ADDR global_pointer;
3462
3463 status = target_read_memory (addr + 8, buf, sizeof (buf));
3464 if (status != 0)
3465 break;
3466 global_pointer = extract_unsigned_integer (buf, sizeof (buf),
3467 byte_order);
3468
3469 /* The payoff... */
3470 return global_pointer;
3471 }
3472
3473 if (tag == DT_NULL)
3474 break;
3475
3476 addr += 16;
3477 }
3478 }
3479 }
3480 return 0;
3481 }
3482
3483 /* Attempt to find (and return) the global pointer for the given
3484 function. We first try the find_global_pointer_from_solib routine
3485 from the gdbarch tdep vector, if provided. And if that does not
3486 work, then we try ia64_find_global_pointer_from_dynamic_section. */
3487
3488 static CORE_ADDR
3489 ia64_find_global_pointer (struct gdbarch *gdbarch, CORE_ADDR faddr)
3490 {
3491 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3492 CORE_ADDR addr = 0;
3493
3494 if (tdep->find_global_pointer_from_solib)
3495 addr = tdep->find_global_pointer_from_solib (gdbarch, faddr);
3496 if (addr == 0)
3497 addr = ia64_find_global_pointer_from_dynamic_section (gdbarch, faddr);
3498 return addr;
3499 }
3500
3501 /* Given a function's address, attempt to find (and return) the
3502 corresponding (canonical) function descriptor. Return 0 if
3503 not found. */
3504 static CORE_ADDR
3505 find_extant_func_descr (struct gdbarch *gdbarch, CORE_ADDR faddr)
3506 {
3507 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3508 struct obj_section *faddr_sect;
3509
3510 /* Return early if faddr is already a function descriptor. */
3511 faddr_sect = find_pc_section (faddr);
3512 if (faddr_sect && strcmp (faddr_sect->the_bfd_section->name, ".opd") == 0)
3513 return faddr;
3514
3515 if (faddr_sect != NULL)
3516 {
3517 struct obj_section *osect;
3518 ALL_OBJFILE_OSECTIONS (faddr_sect->objfile, osect)
3519 {
3520 if (strcmp (osect->the_bfd_section->name, ".opd") == 0)
3521 break;
3522 }
3523
3524 if (osect < faddr_sect->objfile->sections_end)
3525 {
3526 CORE_ADDR addr, endaddr;
3527
3528 addr = obj_section_addr (osect);
3529 endaddr = obj_section_endaddr (osect);
3530
3531 while (addr < endaddr)
3532 {
3533 int status;
3534 LONGEST faddr2;
3535 gdb_byte buf[8];
3536
3537 status = target_read_memory (addr, buf, sizeof (buf));
3538 if (status != 0)
3539 break;
3540 faddr2 = extract_signed_integer (buf, sizeof (buf), byte_order);
3541
3542 if (faddr == faddr2)
3543 return addr;
3544
3545 addr += 16;
3546 }
3547 }
3548 }
3549 return 0;
3550 }
3551
3552 /* Attempt to find a function descriptor corresponding to the
3553 given address. If none is found, construct one on the
3554 stack using the address at fdaptr. */
3555
3556 static CORE_ADDR
3557 find_func_descr (struct regcache *regcache, CORE_ADDR faddr, CORE_ADDR *fdaptr)
3558 {
3559 struct gdbarch *gdbarch = regcache->arch ();
3560 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3561 CORE_ADDR fdesc;
3562
3563 fdesc = find_extant_func_descr (gdbarch, faddr);
3564
3565 if (fdesc == 0)
3566 {
3567 ULONGEST global_pointer;
3568 gdb_byte buf[16];
3569
3570 fdesc = *fdaptr;
3571 *fdaptr += 16;
3572
3573 global_pointer = ia64_find_global_pointer (gdbarch, faddr);
3574
3575 if (global_pointer == 0)
3576 regcache_cooked_read_unsigned (regcache,
3577 IA64_GR1_REGNUM, &global_pointer);
3578
3579 store_unsigned_integer (buf, 8, byte_order, faddr);
3580 store_unsigned_integer (buf + 8, 8, byte_order, global_pointer);
3581
3582 write_memory (fdesc, buf, 16);
3583 }
3584
3585 return fdesc;
3586 }
3587
3588 /* Use the following routine when printing out function pointers
3589 so the user can see the function address rather than just the
3590 function descriptor. */
3591 static CORE_ADDR
3592 ia64_convert_from_func_ptr_addr (struct gdbarch *gdbarch, CORE_ADDR addr,
3593 struct target_ops *targ)
3594 {
3595 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3596 struct obj_section *s;
3597 gdb_byte buf[8];
3598
3599 s = find_pc_section (addr);
3600
3601 /* check if ADDR points to a function descriptor. */
3602 if (s && strcmp (s->the_bfd_section->name, ".opd") == 0)
3603 return read_memory_unsigned_integer (addr, 8, byte_order);
3604
3605 /* Normally, functions live inside a section that is executable.
3606 So, if ADDR points to a non-executable section, then treat it
3607 as a function descriptor and return the target address iff
3608 the target address itself points to a section that is executable.
3609 Check first the memory of the whole length of 8 bytes is readable. */
3610 if (s && (s->the_bfd_section->flags & SEC_CODE) == 0
3611 && target_read_memory (addr, buf, 8) == 0)
3612 {
3613 CORE_ADDR pc = extract_unsigned_integer (buf, 8, byte_order);
3614 struct obj_section *pc_section = find_pc_section (pc);
3615
3616 if (pc_section && (pc_section->the_bfd_section->flags & SEC_CODE))
3617 return pc;
3618 }
3619
3620 /* There are also descriptors embedded in vtables. */
3621 if (s)
3622 {
3623 struct bound_minimal_symbol minsym;
3624
3625 minsym = lookup_minimal_symbol_by_pc (addr);
3626
3627 if (minsym.minsym
3628 && is_vtable_name (MSYMBOL_LINKAGE_NAME (minsym.minsym)))
3629 return read_memory_unsigned_integer (addr, 8, byte_order);
3630 }
3631
3632 return addr;
3633 }
3634
3635 static CORE_ADDR
3636 ia64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3637 {
3638 return sp & ~0xfLL;
3639 }
3640
3641 /* The default "allocate_new_rse_frame" ia64_infcall_ops routine for ia64. */
3642
3643 static void
3644 ia64_allocate_new_rse_frame (struct regcache *regcache, ULONGEST bsp, int sof)
3645 {
3646 ULONGEST cfm, pfs, new_bsp;
3647
3648 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
3649
3650 new_bsp = rse_address_add (bsp, sof);
3651 regcache_cooked_write_unsigned (regcache, IA64_BSP_REGNUM, new_bsp);
3652
3653 regcache_cooked_read_unsigned (regcache, IA64_PFS_REGNUM, &pfs);
3654 pfs &= 0xc000000000000000LL;
3655 pfs |= (cfm & 0xffffffffffffLL);
3656 regcache_cooked_write_unsigned (regcache, IA64_PFS_REGNUM, pfs);
3657
3658 cfm &= 0xc000000000000000LL;
3659 cfm |= sof;
3660 regcache_cooked_write_unsigned (regcache, IA64_CFM_REGNUM, cfm);
3661 }
3662
3663 /* The default "store_argument_in_slot" ia64_infcall_ops routine for
3664 ia64. */
3665
3666 static void
3667 ia64_store_argument_in_slot (struct regcache *regcache, CORE_ADDR bsp,
3668 int slotnum, gdb_byte *buf)
3669 {
3670 write_memory (rse_address_add (bsp, slotnum), buf, 8);
3671 }
3672
3673 /* The default "set_function_addr" ia64_infcall_ops routine for ia64. */
3674
3675 static void
3676 ia64_set_function_addr (struct regcache *regcache, CORE_ADDR func_addr)
3677 {
3678 /* Nothing needed. */
3679 }
3680
3681 static CORE_ADDR
3682 ia64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
3683 struct regcache *regcache, CORE_ADDR bp_addr,
3684 int nargs, struct value **args, CORE_ADDR sp,
3685 int struct_return, CORE_ADDR struct_addr)
3686 {
3687 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3688 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3689 int argno;
3690 struct value *arg;
3691 struct type *type;
3692 int len, argoffset;
3693 int nslots, rseslots, memslots, slotnum, nfuncargs;
3694 int floatreg;
3695 ULONGEST bsp;
3696 CORE_ADDR funcdescaddr, global_pointer;
3697 CORE_ADDR func_addr = find_function_addr (function, NULL);
3698
3699 nslots = 0;
3700 nfuncargs = 0;
3701 /* Count the number of slots needed for the arguments. */
3702 for (argno = 0; argno < nargs; argno++)
3703 {
3704 arg = args[argno];
3705 type = check_typedef (value_type (arg));
3706 len = TYPE_LENGTH (type);
3707
3708 if ((nslots & 1) && slot_alignment_is_next_even (type))
3709 nslots++;
3710
3711 if (TYPE_CODE (type) == TYPE_CODE_FUNC)
3712 nfuncargs++;
3713
3714 nslots += (len + 7) / 8;
3715 }
3716
3717 /* Divvy up the slots between the RSE and the memory stack. */
3718 rseslots = (nslots > 8) ? 8 : nslots;
3719 memslots = nslots - rseslots;
3720
3721 /* Allocate a new RSE frame. */
3722 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
3723 tdep->infcall_ops.allocate_new_rse_frame (regcache, bsp, rseslots);
3724
3725 /* We will attempt to find function descriptors in the .opd segment,
3726 but if we can't we'll construct them ourselves. That being the
3727 case, we'll need to reserve space on the stack for them. */
3728 funcdescaddr = sp - nfuncargs * 16;
3729 funcdescaddr &= ~0xfLL;
3730
3731 /* Adjust the stack pointer to it's new value. The calling conventions
3732 require us to have 16 bytes of scratch, plus whatever space is
3733 necessary for the memory slots and our function descriptors. */
3734 sp = sp - 16 - (memslots + nfuncargs) * 8;
3735 sp &= ~0xfLL; /* Maintain 16 byte alignment. */
3736
3737 /* Place the arguments where they belong. The arguments will be
3738 either placed in the RSE backing store or on the memory stack.
3739 In addition, floating point arguments or HFAs are placed in
3740 floating point registers. */
3741 slotnum = 0;
3742 floatreg = IA64_FR8_REGNUM;
3743 for (argno = 0; argno < nargs; argno++)
3744 {
3745 struct type *float_elt_type;
3746
3747 arg = args[argno];
3748 type = check_typedef (value_type (arg));
3749 len = TYPE_LENGTH (type);
3750
3751 /* Special handling for function parameters. */
3752 if (len == 8
3753 && TYPE_CODE (type) == TYPE_CODE_PTR
3754 && TYPE_CODE (TYPE_TARGET_TYPE (type)) == TYPE_CODE_FUNC)
3755 {
3756 gdb_byte val_buf[8];
3757 ULONGEST faddr = extract_unsigned_integer (value_contents (arg),
3758 8, byte_order);
3759 store_unsigned_integer (val_buf, 8, byte_order,
3760 find_func_descr (regcache, faddr,
3761 &funcdescaddr));
3762 if (slotnum < rseslots)
3763 tdep->infcall_ops.store_argument_in_slot (regcache, bsp,
3764 slotnum, val_buf);
3765 else
3766 write_memory (sp + 16 + 8 * (slotnum - rseslots), val_buf, 8);
3767 slotnum++;
3768 continue;
3769 }
3770
3771 /* Normal slots. */
3772
3773 /* Skip odd slot if necessary... */
3774 if ((slotnum & 1) && slot_alignment_is_next_even (type))
3775 slotnum++;
3776
3777 argoffset = 0;
3778 while (len > 0)
3779 {
3780 gdb_byte val_buf[8];
3781
3782 memset (val_buf, 0, 8);
3783 if (!ia64_struct_type_p (type) && len < 8)
3784 {
3785 /* Integral types are LSB-aligned, so we have to be careful
3786 to insert the argument on the correct side of the buffer.
3787 This is why we use store_unsigned_integer. */
3788 store_unsigned_integer
3789 (val_buf, 8, byte_order,
3790 extract_unsigned_integer (value_contents (arg), len,
3791 byte_order));
3792 }
3793 else
3794 {
3795 /* This is either an 8bit integral type, or an aggregate.
3796 For 8bit integral type, there is no problem, we just
3797 copy the value over.
3798
3799 For aggregates, the only potentially tricky portion
3800 is to write the last one if it is less than 8 bytes.
3801 In this case, the data is Byte0-aligned. Happy news,
3802 this means that we don't need to differentiate the
3803 handling of 8byte blocks and less-than-8bytes blocks. */
3804 memcpy (val_buf, value_contents (arg) + argoffset,
3805 (len > 8) ? 8 : len);
3806 }
3807
3808 if (slotnum < rseslots)
3809 tdep->infcall_ops.store_argument_in_slot (regcache, bsp,
3810 slotnum, val_buf);
3811 else
3812 write_memory (sp + 16 + 8 * (slotnum - rseslots), val_buf, 8);
3813
3814 argoffset += 8;
3815 len -= 8;
3816 slotnum++;
3817 }
3818
3819 /* Handle floating point types (including HFAs). */
3820 float_elt_type = is_float_or_hfa_type (type);
3821 if (float_elt_type != NULL)
3822 {
3823 argoffset = 0;
3824 len = TYPE_LENGTH (type);
3825 while (len > 0 && floatreg < IA64_FR16_REGNUM)
3826 {
3827 gdb_byte to[IA64_FP_REGISTER_SIZE];
3828 target_float_convert (value_contents (arg) + argoffset,
3829 float_elt_type, to,
3830 ia64_ext_type (gdbarch));
3831 regcache->cooked_write (floatreg, to);
3832 floatreg++;
3833 argoffset += TYPE_LENGTH (float_elt_type);
3834 len -= TYPE_LENGTH (float_elt_type);
3835 }
3836 }
3837 }
3838
3839 /* Store the struct return value in r8 if necessary. */
3840 if (struct_return)
3841 {
3842 regcache_cooked_write_unsigned (regcache, IA64_GR8_REGNUM,
3843 (ULONGEST) struct_addr);
3844 }
3845
3846 global_pointer = ia64_find_global_pointer (gdbarch, func_addr);
3847
3848 if (global_pointer != 0)
3849 regcache_cooked_write_unsigned (regcache, IA64_GR1_REGNUM, global_pointer);
3850
3851 /* The following is not necessary on HP-UX, because we're using
3852 a dummy code sequence pushed on the stack to make the call, and
3853 this sequence doesn't need b0 to be set in order for our dummy
3854 breakpoint to be hit. Nonetheless, this doesn't interfere, and
3855 it's needed for other OSes, so we do this unconditionaly. */
3856 regcache_cooked_write_unsigned (regcache, IA64_BR0_REGNUM, bp_addr);
3857
3858 regcache_cooked_write_unsigned (regcache, sp_regnum, sp);
3859
3860 tdep->infcall_ops.set_function_addr (regcache, func_addr);
3861
3862 return sp;
3863 }
3864
3865 static const struct ia64_infcall_ops ia64_infcall_ops =
3866 {
3867 ia64_allocate_new_rse_frame,
3868 ia64_store_argument_in_slot,
3869 ia64_set_function_addr
3870 };
3871
3872 static struct frame_id
3873 ia64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
3874 {
3875 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3876 gdb_byte buf[8];
3877 CORE_ADDR sp, bsp;
3878
3879 get_frame_register (this_frame, sp_regnum, buf);
3880 sp = extract_unsigned_integer (buf, 8, byte_order);
3881
3882 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
3883 bsp = extract_unsigned_integer (buf, 8, byte_order);
3884
3885 if (gdbarch_debug >= 1)
3886 fprintf_unfiltered (gdb_stdlog,
3887 "dummy frame id: code %s, stack %s, special %s\n",
3888 paddress (gdbarch, get_frame_pc (this_frame)),
3889 paddress (gdbarch, sp), paddress (gdbarch, bsp));
3890
3891 return frame_id_build_special (sp, get_frame_pc (this_frame), bsp);
3892 }
3893
3894 static CORE_ADDR
3895 ia64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *next_frame)
3896 {
3897 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3898 gdb_byte buf[8];
3899 CORE_ADDR ip, psr, pc;
3900
3901 frame_unwind_register (next_frame, IA64_IP_REGNUM, buf);
3902 ip = extract_unsigned_integer (buf, 8, byte_order);
3903 frame_unwind_register (next_frame, IA64_PSR_REGNUM, buf);
3904 psr = extract_unsigned_integer (buf, 8, byte_order);
3905
3906 pc = (ip & ~0xf) | ((psr >> 41) & 3);
3907 return pc;
3908 }
3909
3910 static int
3911 ia64_print_insn (bfd_vma memaddr, struct disassemble_info *info)
3912 {
3913 info->bytes_per_line = SLOT_MULTIPLIER;
3914 return default_print_insn (memaddr, info);
3915 }
3916
3917 /* The default "size_of_register_frame" gdbarch_tdep routine for ia64. */
3918
3919 static int
3920 ia64_size_of_register_frame (struct frame_info *this_frame, ULONGEST cfm)
3921 {
3922 return (cfm & 0x7f);
3923 }
3924
3925 static struct gdbarch *
3926 ia64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3927 {
3928 struct gdbarch *gdbarch;
3929 struct gdbarch_tdep *tdep;
3930
3931 /* If there is already a candidate, use it. */
3932 arches = gdbarch_list_lookup_by_info (arches, &info);
3933 if (arches != NULL)
3934 return arches->gdbarch;
3935
3936 tdep = XCNEW (struct gdbarch_tdep);
3937 gdbarch = gdbarch_alloc (&info, tdep);
3938
3939 tdep->size_of_register_frame = ia64_size_of_register_frame;
3940
3941 /* According to the ia64 specs, instructions that store long double
3942 floats in memory use a long-double format different than that
3943 used in the floating registers. The memory format matches the
3944 x86 extended float format which is 80 bits. An OS may choose to
3945 use this format (e.g. GNU/Linux) or choose to use a different
3946 format for storing long doubles (e.g. HPUX). In the latter case,
3947 the setting of the format may be moved/overridden in an
3948 OS-specific tdep file. */
3949 set_gdbarch_long_double_format (gdbarch, floatformats_i387_ext);
3950
3951 set_gdbarch_short_bit (gdbarch, 16);
3952 set_gdbarch_int_bit (gdbarch, 32);
3953 set_gdbarch_long_bit (gdbarch, 64);
3954 set_gdbarch_long_long_bit (gdbarch, 64);
3955 set_gdbarch_float_bit (gdbarch, 32);
3956 set_gdbarch_double_bit (gdbarch, 64);
3957 set_gdbarch_long_double_bit (gdbarch, 128);
3958 set_gdbarch_ptr_bit (gdbarch, 64);
3959
3960 set_gdbarch_num_regs (gdbarch, NUM_IA64_RAW_REGS);
3961 set_gdbarch_num_pseudo_regs (gdbarch,
3962 LAST_PSEUDO_REGNUM - FIRST_PSEUDO_REGNUM);
3963 set_gdbarch_sp_regnum (gdbarch, sp_regnum);
3964 set_gdbarch_fp0_regnum (gdbarch, IA64_FR0_REGNUM);
3965
3966 set_gdbarch_register_name (gdbarch, ia64_register_name);
3967 set_gdbarch_register_type (gdbarch, ia64_register_type);
3968
3969 set_gdbarch_pseudo_register_read (gdbarch, ia64_pseudo_register_read);
3970 set_gdbarch_pseudo_register_write (gdbarch, ia64_pseudo_register_write);
3971 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, ia64_dwarf_reg_to_regnum);
3972 set_gdbarch_register_reggroup_p (gdbarch, ia64_register_reggroup_p);
3973 set_gdbarch_convert_register_p (gdbarch, ia64_convert_register_p);
3974 set_gdbarch_register_to_value (gdbarch, ia64_register_to_value);
3975 set_gdbarch_value_to_register (gdbarch, ia64_value_to_register);
3976
3977 set_gdbarch_skip_prologue (gdbarch, ia64_skip_prologue);
3978
3979 set_gdbarch_return_value (gdbarch, ia64_return_value);
3980
3981 set_gdbarch_memory_insert_breakpoint (gdbarch,
3982 ia64_memory_insert_breakpoint);
3983 set_gdbarch_memory_remove_breakpoint (gdbarch,
3984 ia64_memory_remove_breakpoint);
3985 set_gdbarch_breakpoint_from_pc (gdbarch, ia64_breakpoint_from_pc);
3986 set_gdbarch_breakpoint_kind_from_pc (gdbarch, ia64_breakpoint_kind_from_pc);
3987 set_gdbarch_read_pc (gdbarch, ia64_read_pc);
3988 set_gdbarch_write_pc (gdbarch, ia64_write_pc);
3989
3990 /* Settings for calling functions in the inferior. */
3991 set_gdbarch_push_dummy_call (gdbarch, ia64_push_dummy_call);
3992 tdep->infcall_ops = ia64_infcall_ops;
3993 set_gdbarch_frame_align (gdbarch, ia64_frame_align);
3994 set_gdbarch_dummy_id (gdbarch, ia64_dummy_id);
3995
3996 set_gdbarch_unwind_pc (gdbarch, ia64_unwind_pc);
3997 #ifdef HAVE_LIBUNWIND_IA64_H
3998 frame_unwind_append_unwinder (gdbarch,
3999 &ia64_libunwind_sigtramp_frame_unwind);
4000 frame_unwind_append_unwinder (gdbarch, &ia64_libunwind_frame_unwind);
4001 frame_unwind_append_unwinder (gdbarch, &ia64_sigtramp_frame_unwind);
4002 libunwind_frame_set_descr (gdbarch, &ia64_libunwind_descr);
4003 #else
4004 frame_unwind_append_unwinder (gdbarch, &ia64_sigtramp_frame_unwind);
4005 #endif
4006 frame_unwind_append_unwinder (gdbarch, &ia64_frame_unwind);
4007 frame_base_set_default (gdbarch, &ia64_frame_base);
4008
4009 /* Settings that should be unnecessary. */
4010 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
4011
4012 set_gdbarch_print_insn (gdbarch, ia64_print_insn);
4013 set_gdbarch_convert_from_func_ptr_addr (gdbarch,
4014 ia64_convert_from_func_ptr_addr);
4015
4016 /* The virtual table contains 16-byte descriptors, not pointers to
4017 descriptors. */
4018 set_gdbarch_vtable_function_descriptors (gdbarch, 1);
4019
4020 /* Hook in ABI-specific overrides, if they have been registered. */
4021 gdbarch_init_osabi (info, gdbarch);
4022
4023 return gdbarch;
4024 }
4025
4026 void
4027 _initialize_ia64_tdep (void)
4028 {
4029 gdbarch_register (bfd_arch_ia64, ia64_gdbarch_init, NULL);
4030 }
This page took 0.120696 seconds and 5 git commands to generate.