cbd8514acc5a1b55da55c4e8688f564e43458bcb
[deliverable/binutils-gdb.git] / gdb / ia64-tdep.c
1 /* Target-dependent code for the IA-64 for GDB, the GNU debugger.
2
3 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008,
4 2009, 2010, 2011 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "inferior.h"
23 #include "gdbcore.h"
24 #include "arch-utils.h"
25 #include "floatformat.h"
26 #include "gdbtypes.h"
27 #include "regcache.h"
28 #include "reggroups.h"
29 #include "frame.h"
30 #include "frame-base.h"
31 #include "frame-unwind.h"
32 #include "doublest.h"
33 #include "value.h"
34 #include "gdb_assert.h"
35 #include "objfiles.h"
36 #include "elf/common.h" /* for DT_PLTGOT value */
37 #include "elf-bfd.h"
38 #include "dis-asm.h"
39 #include "infcall.h"
40 #include "osabi.h"
41 #include "ia64-tdep.h"
42 #include "cp-abi.h"
43
44 #ifdef HAVE_LIBUNWIND_IA64_H
45 #include "elf/ia64.h" /* for PT_IA_64_UNWIND value */
46 #include "libunwind-frame.h"
47 #include "libunwind-ia64.h"
48
49 /* Note: KERNEL_START is supposed to be an address which is not going
50 to ever contain any valid unwind info. For ia64 linux, the choice
51 of 0xc000000000000000 is fairly safe since that's uncached space.
52
53 We use KERNEL_START as follows: after obtaining the kernel's
54 unwind table via getunwind(), we project its unwind data into
55 address-range KERNEL_START-(KERNEL_START+ktab_size) and then
56 when ia64_access_mem() sees a memory access to this
57 address-range, we redirect it to ktab instead.
58
59 None of this hackery is needed with a modern kernel/libcs
60 which uses the kernel virtual DSO to provide access to the
61 kernel's unwind info. In that case, ktab_size remains 0 and
62 hence the value of KERNEL_START doesn't matter. */
63
64 #define KERNEL_START 0xc000000000000000ULL
65
66 static size_t ktab_size = 0;
67 struct ia64_table_entry
68 {
69 uint64_t start_offset;
70 uint64_t end_offset;
71 uint64_t info_offset;
72 };
73
74 static struct ia64_table_entry *ktab = NULL;
75
76 #endif
77
78 /* An enumeration of the different IA-64 instruction types. */
79
80 typedef enum instruction_type
81 {
82 A, /* Integer ALU ; I-unit or M-unit */
83 I, /* Non-ALU integer; I-unit */
84 M, /* Memory ; M-unit */
85 F, /* Floating-point ; F-unit */
86 B, /* Branch ; B-unit */
87 L, /* Extended (L+X) ; I-unit */
88 X, /* Extended (L+X) ; I-unit */
89 undefined /* undefined or reserved */
90 } instruction_type;
91
92 /* We represent IA-64 PC addresses as the value of the instruction
93 pointer or'd with some bit combination in the low nibble which
94 represents the slot number in the bundle addressed by the
95 instruction pointer. The problem is that the Linux kernel
96 multiplies its slot numbers (for exceptions) by one while the
97 disassembler multiplies its slot numbers by 6. In addition, I've
98 heard it said that the simulator uses 1 as the multiplier.
99
100 I've fixed the disassembler so that the bytes_per_line field will
101 be the slot multiplier. If bytes_per_line comes in as zero, it
102 is set to six (which is how it was set up initially). -- objdump
103 displays pretty disassembly dumps with this value. For our purposes,
104 we'll set bytes_per_line to SLOT_MULTIPLIER. This is okay since we
105 never want to also display the raw bytes the way objdump does. */
106
107 #define SLOT_MULTIPLIER 1
108
109 /* Length in bytes of an instruction bundle. */
110
111 #define BUNDLE_LEN 16
112
113 /* See the saved memory layout comment for ia64_memory_insert_breakpoint. */
114
115 #if BREAKPOINT_MAX < BUNDLE_LEN - 2
116 # error "BREAKPOINT_MAX < BUNDLE_LEN - 2"
117 #endif
118
119 static gdbarch_init_ftype ia64_gdbarch_init;
120
121 static gdbarch_register_name_ftype ia64_register_name;
122 static gdbarch_register_type_ftype ia64_register_type;
123 static gdbarch_breakpoint_from_pc_ftype ia64_breakpoint_from_pc;
124 static gdbarch_skip_prologue_ftype ia64_skip_prologue;
125 static struct type *is_float_or_hfa_type (struct type *t);
126 static CORE_ADDR ia64_find_global_pointer (struct gdbarch *gdbarch,
127 CORE_ADDR faddr);
128
129 #define NUM_IA64_RAW_REGS 462
130
131 static int sp_regnum = IA64_GR12_REGNUM;
132 static int fp_regnum = IA64_VFP_REGNUM;
133 static int lr_regnum = IA64_VRAP_REGNUM;
134
135 /* NOTE: we treat the register stack registers r32-r127 as
136 pseudo-registers because they may not be accessible via the ptrace
137 register get/set interfaces. */
138
139 enum pseudo_regs { FIRST_PSEUDO_REGNUM = NUM_IA64_RAW_REGS,
140 VBOF_REGNUM = IA64_NAT127_REGNUM + 1, V32_REGNUM,
141 V127_REGNUM = V32_REGNUM + 95,
142 VP0_REGNUM, VP16_REGNUM = VP0_REGNUM + 16,
143 VP63_REGNUM = VP0_REGNUM + 63, LAST_PSEUDO_REGNUM };
144
145 /* Array of register names; There should be ia64_num_regs strings in
146 the initializer. */
147
148 static char *ia64_register_names[] =
149 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
150 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
151 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
152 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
153 "", "", "", "", "", "", "", "",
154 "", "", "", "", "", "", "", "",
155 "", "", "", "", "", "", "", "",
156 "", "", "", "", "", "", "", "",
157 "", "", "", "", "", "", "", "",
158 "", "", "", "", "", "", "", "",
159 "", "", "", "", "", "", "", "",
160 "", "", "", "", "", "", "", "",
161 "", "", "", "", "", "", "", "",
162 "", "", "", "", "", "", "", "",
163 "", "", "", "", "", "", "", "",
164 "", "", "", "", "", "", "", "",
165
166 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
167 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
168 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
169 "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31",
170 "f32", "f33", "f34", "f35", "f36", "f37", "f38", "f39",
171 "f40", "f41", "f42", "f43", "f44", "f45", "f46", "f47",
172 "f48", "f49", "f50", "f51", "f52", "f53", "f54", "f55",
173 "f56", "f57", "f58", "f59", "f60", "f61", "f62", "f63",
174 "f64", "f65", "f66", "f67", "f68", "f69", "f70", "f71",
175 "f72", "f73", "f74", "f75", "f76", "f77", "f78", "f79",
176 "f80", "f81", "f82", "f83", "f84", "f85", "f86", "f87",
177 "f88", "f89", "f90", "f91", "f92", "f93", "f94", "f95",
178 "f96", "f97", "f98", "f99", "f100", "f101", "f102", "f103",
179 "f104", "f105", "f106", "f107", "f108", "f109", "f110", "f111",
180 "f112", "f113", "f114", "f115", "f116", "f117", "f118", "f119",
181 "f120", "f121", "f122", "f123", "f124", "f125", "f126", "f127",
182
183 "", "", "", "", "", "", "", "",
184 "", "", "", "", "", "", "", "",
185 "", "", "", "", "", "", "", "",
186 "", "", "", "", "", "", "", "",
187 "", "", "", "", "", "", "", "",
188 "", "", "", "", "", "", "", "",
189 "", "", "", "", "", "", "", "",
190 "", "", "", "", "", "", "", "",
191
192 "b0", "b1", "b2", "b3", "b4", "b5", "b6", "b7",
193
194 "vfp", "vrap",
195
196 "pr", "ip", "psr", "cfm",
197
198 "kr0", "kr1", "kr2", "kr3", "kr4", "kr5", "kr6", "kr7",
199 "", "", "", "", "", "", "", "",
200 "rsc", "bsp", "bspstore", "rnat",
201 "", "fcr", "", "",
202 "eflag", "csd", "ssd", "cflg", "fsr", "fir", "fdr", "",
203 "ccv", "", "", "", "unat", "", "", "",
204 "fpsr", "", "", "", "itc",
205 "", "", "", "", "", "", "", "", "", "",
206 "", "", "", "", "", "", "", "", "",
207 "pfs", "lc", "ec",
208 "", "", "", "", "", "", "", "", "", "",
209 "", "", "", "", "", "", "", "", "", "",
210 "", "", "", "", "", "", "", "", "", "",
211 "", "", "", "", "", "", "", "", "", "",
212 "", "", "", "", "", "", "", "", "", "",
213 "", "", "", "", "", "", "", "", "", "",
214 "",
215 "nat0", "nat1", "nat2", "nat3", "nat4", "nat5", "nat6", "nat7",
216 "nat8", "nat9", "nat10", "nat11", "nat12", "nat13", "nat14", "nat15",
217 "nat16", "nat17", "nat18", "nat19", "nat20", "nat21", "nat22", "nat23",
218 "nat24", "nat25", "nat26", "nat27", "nat28", "nat29", "nat30", "nat31",
219 "nat32", "nat33", "nat34", "nat35", "nat36", "nat37", "nat38", "nat39",
220 "nat40", "nat41", "nat42", "nat43", "nat44", "nat45", "nat46", "nat47",
221 "nat48", "nat49", "nat50", "nat51", "nat52", "nat53", "nat54", "nat55",
222 "nat56", "nat57", "nat58", "nat59", "nat60", "nat61", "nat62", "nat63",
223 "nat64", "nat65", "nat66", "nat67", "nat68", "nat69", "nat70", "nat71",
224 "nat72", "nat73", "nat74", "nat75", "nat76", "nat77", "nat78", "nat79",
225 "nat80", "nat81", "nat82", "nat83", "nat84", "nat85", "nat86", "nat87",
226 "nat88", "nat89", "nat90", "nat91", "nat92", "nat93", "nat94", "nat95",
227 "nat96", "nat97", "nat98", "nat99", "nat100","nat101","nat102","nat103",
228 "nat104","nat105","nat106","nat107","nat108","nat109","nat110","nat111",
229 "nat112","nat113","nat114","nat115","nat116","nat117","nat118","nat119",
230 "nat120","nat121","nat122","nat123","nat124","nat125","nat126","nat127",
231
232 "bof",
233
234 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
235 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
236 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
237 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
238 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
239 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
240 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
241 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
242 "r96", "r97", "r98", "r99", "r100", "r101", "r102", "r103",
243 "r104", "r105", "r106", "r107", "r108", "r109", "r110", "r111",
244 "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
245 "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127",
246
247 "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7",
248 "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15",
249 "p16", "p17", "p18", "p19", "p20", "p21", "p22", "p23",
250 "p24", "p25", "p26", "p27", "p28", "p29", "p30", "p31",
251 "p32", "p33", "p34", "p35", "p36", "p37", "p38", "p39",
252 "p40", "p41", "p42", "p43", "p44", "p45", "p46", "p47",
253 "p48", "p49", "p50", "p51", "p52", "p53", "p54", "p55",
254 "p56", "p57", "p58", "p59", "p60", "p61", "p62", "p63",
255 };
256
257 struct ia64_frame_cache
258 {
259 CORE_ADDR base; /* frame pointer base for frame */
260 CORE_ADDR pc; /* function start pc for frame */
261 CORE_ADDR saved_sp; /* stack pointer for frame */
262 CORE_ADDR bsp; /* points at r32 for the current frame */
263 CORE_ADDR cfm; /* cfm value for current frame */
264 CORE_ADDR prev_cfm; /* cfm value for previous frame */
265 int frameless;
266 int sof; /* Size of frame (decoded from cfm value). */
267 int sol; /* Size of locals (decoded from cfm value). */
268 int sor; /* Number of rotating registers (decoded from
269 cfm value). */
270 CORE_ADDR after_prologue;
271 /* Address of first instruction after the last
272 prologue instruction; Note that there may
273 be instructions from the function's body
274 intermingled with the prologue. */
275 int mem_stack_frame_size;
276 /* Size of the memory stack frame (may be zero),
277 or -1 if it has not been determined yet. */
278 int fp_reg; /* Register number (if any) used a frame pointer
279 for this frame. 0 if no register is being used
280 as the frame pointer. */
281
282 /* Saved registers. */
283 CORE_ADDR saved_regs[NUM_IA64_RAW_REGS];
284
285 };
286
287 static int
288 floatformat_valid (const struct floatformat *fmt, const void *from)
289 {
290 return 1;
291 }
292
293 static const struct floatformat floatformat_ia64_ext_little =
294 {
295 floatformat_little, 82, 0, 1, 17, 65535, 0x1ffff, 18, 64,
296 floatformat_intbit_yes, "floatformat_ia64_ext_little", floatformat_valid, NULL
297 };
298
299 static const struct floatformat floatformat_ia64_ext_big =
300 {
301 floatformat_big, 82, 46, 47, 17, 65535, 0x1ffff, 64, 64,
302 floatformat_intbit_yes, "floatformat_ia64_ext_big", floatformat_valid
303 };
304
305 static const struct floatformat *floatformats_ia64_ext[2] =
306 {
307 &floatformat_ia64_ext_big,
308 &floatformat_ia64_ext_little
309 };
310
311 static struct type *
312 ia64_ext_type (struct gdbarch *gdbarch)
313 {
314 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
315
316 if (!tdep->ia64_ext_type)
317 tdep->ia64_ext_type
318 = arch_float_type (gdbarch, 128, "builtin_type_ia64_ext",
319 floatformats_ia64_ext);
320
321 return tdep->ia64_ext_type;
322 }
323
324 static int
325 ia64_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
326 struct reggroup *group)
327 {
328 int vector_p;
329 int float_p;
330 int raw_p;
331 if (group == all_reggroup)
332 return 1;
333 vector_p = TYPE_VECTOR (register_type (gdbarch, regnum));
334 float_p = TYPE_CODE (register_type (gdbarch, regnum)) == TYPE_CODE_FLT;
335 raw_p = regnum < NUM_IA64_RAW_REGS;
336 if (group == float_reggroup)
337 return float_p;
338 if (group == vector_reggroup)
339 return vector_p;
340 if (group == general_reggroup)
341 return (!vector_p && !float_p);
342 if (group == save_reggroup || group == restore_reggroup)
343 return raw_p;
344 return 0;
345 }
346
347 static const char *
348 ia64_register_name (struct gdbarch *gdbarch, int reg)
349 {
350 return ia64_register_names[reg];
351 }
352
353 struct type *
354 ia64_register_type (struct gdbarch *arch, int reg)
355 {
356 if (reg >= IA64_FR0_REGNUM && reg <= IA64_FR127_REGNUM)
357 return ia64_ext_type (arch);
358 else
359 return builtin_type (arch)->builtin_long;
360 }
361
362 static int
363 ia64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
364 {
365 if (reg >= IA64_GR32_REGNUM && reg <= IA64_GR127_REGNUM)
366 return V32_REGNUM + (reg - IA64_GR32_REGNUM);
367 return reg;
368 }
369
370
371 /* Extract ``len'' bits from an instruction bundle starting at
372 bit ``from''. */
373
374 static long long
375 extract_bit_field (const char *bundle, int from, int len)
376 {
377 long long result = 0LL;
378 int to = from + len;
379 int from_byte = from / 8;
380 int to_byte = to / 8;
381 unsigned char *b = (unsigned char *) bundle;
382 unsigned char c;
383 int lshift;
384 int i;
385
386 c = b[from_byte];
387 if (from_byte == to_byte)
388 c = ((unsigned char) (c << (8 - to % 8))) >> (8 - to % 8);
389 result = c >> (from % 8);
390 lshift = 8 - (from % 8);
391
392 for (i = from_byte+1; i < to_byte; i++)
393 {
394 result |= ((long long) b[i]) << lshift;
395 lshift += 8;
396 }
397
398 if (from_byte < to_byte && (to % 8 != 0))
399 {
400 c = b[to_byte];
401 c = ((unsigned char) (c << (8 - to % 8))) >> (8 - to % 8);
402 result |= ((long long) c) << lshift;
403 }
404
405 return result;
406 }
407
408 /* Replace the specified bits in an instruction bundle. */
409
410 static void
411 replace_bit_field (char *bundle, long long val, int from, int len)
412 {
413 int to = from + len;
414 int from_byte = from / 8;
415 int to_byte = to / 8;
416 unsigned char *b = (unsigned char *) bundle;
417 unsigned char c;
418
419 if (from_byte == to_byte)
420 {
421 unsigned char left, right;
422 c = b[from_byte];
423 left = (c >> (to % 8)) << (to % 8);
424 right = ((unsigned char) (c << (8 - from % 8))) >> (8 - from % 8);
425 c = (unsigned char) (val & 0xff);
426 c = (unsigned char) (c << (from % 8 + 8 - to % 8)) >> (8 - to % 8);
427 c |= right | left;
428 b[from_byte] = c;
429 }
430 else
431 {
432 int i;
433 c = b[from_byte];
434 c = ((unsigned char) (c << (8 - from % 8))) >> (8 - from % 8);
435 c = c | (val << (from % 8));
436 b[from_byte] = c;
437 val >>= 8 - from % 8;
438
439 for (i = from_byte+1; i < to_byte; i++)
440 {
441 c = val & 0xff;
442 val >>= 8;
443 b[i] = c;
444 }
445
446 if (to % 8 != 0)
447 {
448 unsigned char cv = (unsigned char) val;
449 c = b[to_byte];
450 c = c >> (to % 8) << (to % 8);
451 c |= ((unsigned char) (cv << (8 - to % 8))) >> (8 - to % 8);
452 b[to_byte] = c;
453 }
454 }
455 }
456
457 /* Return the contents of slot N (for N = 0, 1, or 2) in
458 and instruction bundle. */
459
460 static long long
461 slotN_contents (char *bundle, int slotnum)
462 {
463 return extract_bit_field (bundle, 5+41*slotnum, 41);
464 }
465
466 /* Store an instruction in an instruction bundle. */
467
468 static void
469 replace_slotN_contents (char *bundle, long long instr, int slotnum)
470 {
471 replace_bit_field (bundle, instr, 5+41*slotnum, 41);
472 }
473
474 static const enum instruction_type template_encoding_table[32][3] =
475 {
476 { M, I, I }, /* 00 */
477 { M, I, I }, /* 01 */
478 { M, I, I }, /* 02 */
479 { M, I, I }, /* 03 */
480 { M, L, X }, /* 04 */
481 { M, L, X }, /* 05 */
482 { undefined, undefined, undefined }, /* 06 */
483 { undefined, undefined, undefined }, /* 07 */
484 { M, M, I }, /* 08 */
485 { M, M, I }, /* 09 */
486 { M, M, I }, /* 0A */
487 { M, M, I }, /* 0B */
488 { M, F, I }, /* 0C */
489 { M, F, I }, /* 0D */
490 { M, M, F }, /* 0E */
491 { M, M, F }, /* 0F */
492 { M, I, B }, /* 10 */
493 { M, I, B }, /* 11 */
494 { M, B, B }, /* 12 */
495 { M, B, B }, /* 13 */
496 { undefined, undefined, undefined }, /* 14 */
497 { undefined, undefined, undefined }, /* 15 */
498 { B, B, B }, /* 16 */
499 { B, B, B }, /* 17 */
500 { M, M, B }, /* 18 */
501 { M, M, B }, /* 19 */
502 { undefined, undefined, undefined }, /* 1A */
503 { undefined, undefined, undefined }, /* 1B */
504 { M, F, B }, /* 1C */
505 { M, F, B }, /* 1D */
506 { undefined, undefined, undefined }, /* 1E */
507 { undefined, undefined, undefined }, /* 1F */
508 };
509
510 /* Fetch and (partially) decode an instruction at ADDR and return the
511 address of the next instruction to fetch. */
512
513 static CORE_ADDR
514 fetch_instruction (CORE_ADDR addr, instruction_type *it, long long *instr)
515 {
516 char bundle[BUNDLE_LEN];
517 int slotnum = (int) (addr & 0x0f) / SLOT_MULTIPLIER;
518 long long template;
519 int val;
520
521 /* Warn about slot numbers greater than 2. We used to generate
522 an error here on the assumption that the user entered an invalid
523 address. But, sometimes GDB itself requests an invalid address.
524 This can (easily) happen when execution stops in a function for
525 which there are no symbols. The prologue scanner will attempt to
526 find the beginning of the function - if the nearest symbol
527 happens to not be aligned on a bundle boundary (16 bytes), the
528 resulting starting address will cause GDB to think that the slot
529 number is too large.
530
531 So we warn about it and set the slot number to zero. It is
532 not necessarily a fatal condition, particularly if debugging
533 at the assembly language level. */
534 if (slotnum > 2)
535 {
536 warning (_("Can't fetch instructions for slot numbers greater than 2.\n"
537 "Using slot 0 instead"));
538 slotnum = 0;
539 }
540
541 addr &= ~0x0f;
542
543 val = target_read_memory (addr, bundle, BUNDLE_LEN);
544
545 if (val != 0)
546 return 0;
547
548 *instr = slotN_contents (bundle, slotnum);
549 template = extract_bit_field (bundle, 0, 5);
550 *it = template_encoding_table[(int)template][slotnum];
551
552 if (slotnum == 2 || (slotnum == 1 && *it == L))
553 addr += 16;
554 else
555 addr += (slotnum + 1) * SLOT_MULTIPLIER;
556
557 return addr;
558 }
559
560 /* There are 5 different break instructions (break.i, break.b,
561 break.m, break.f, and break.x), but they all have the same
562 encoding. (The five bit template in the low five bits of the
563 instruction bundle distinguishes one from another.)
564
565 The runtime architecture manual specifies that break instructions
566 used for debugging purposes must have the upper two bits of the 21
567 bit immediate set to a 0 and a 1 respectively. A breakpoint
568 instruction encodes the most significant bit of its 21 bit
569 immediate at bit 36 of the 41 bit instruction. The penultimate msb
570 is at bit 25 which leads to the pattern below.
571
572 Originally, I had this set up to do, e.g, a "break.i 0x80000" But
573 it turns out that 0x80000 was used as the syscall break in the early
574 simulators. So I changed the pattern slightly to do "break.i 0x080001"
575 instead. But that didn't work either (I later found out that this
576 pattern was used by the simulator that I was using.) So I ended up
577 using the pattern seen below.
578
579 SHADOW_CONTENTS has byte-based addressing (PLACED_ADDRESS and SHADOW_LEN)
580 while we need bit-based addressing as the instructions length is 41 bits and
581 we must not modify/corrupt the adjacent slots in the same bundle.
582 Fortunately we may store larger memory incl. the adjacent bits with the
583 original memory content (not the possibly already stored breakpoints there).
584 We need to be careful in ia64_memory_remove_breakpoint to always restore
585 only the specific bits of this instruction ignoring any adjacent stored
586 bits.
587
588 We use the original addressing with the low nibble in the range <0..2> which
589 gets incorrectly interpreted by generic non-ia64 breakpoint_restore_shadows
590 as the direct byte offset of SHADOW_CONTENTS. We store whole BUNDLE_LEN
591 bytes just without these two possibly skipped bytes to not to exceed to the
592 next bundle.
593
594 If we would like to store the whole bundle to SHADOW_CONTENTS we would have
595 to store already the base address (`address & ~0x0f') into PLACED_ADDRESS.
596 In such case there is no other place where to store
597 SLOTNUM (`adress & 0x0f', value in the range <0..2>). We need to know
598 SLOTNUM in ia64_memory_remove_breakpoint.
599
600 There is one special case where we need to be extra careful:
601 L-X instructions, which are instructions that occupy 2 slots
602 (The L part is always in slot 1, and the X part is always in
603 slot 2). We must refuse to insert breakpoints for an address
604 that points at slot 2 of a bundle where an L-X instruction is
605 present, since there is logically no instruction at that address.
606 However, to make things more interesting, the opcode of L-X
607 instructions is located in slot 2. This means that, to insert
608 a breakpoint at an address that points to slot 1, we actually
609 need to write the breakpoint in slot 2! Slot 1 is actually
610 the extended operand, so writing the breakpoint there would not
611 have the desired effect. Another side-effect of this issue
612 is that we need to make sure that the shadow contents buffer
613 does save byte 15 of our instruction bundle (this is the tail
614 end of slot 2, which wouldn't be saved if we were to insert
615 the breakpoint in slot 1).
616
617 ia64 16-byte bundle layout:
618 | 5 bits | slot 0 with 41 bits | slot 1 with 41 bits | slot 2 with 41 bits |
619
620 The current addressing used by the code below:
621 original PC placed_address placed_size required covered
622 == bp_tgt->shadow_len reqd \subset covered
623 0xABCDE0 0xABCDE0 0x10 <0x0...0x5> <0x0..0xF>
624 0xABCDE1 0xABCDE1 0xF <0x5...0xA> <0x1..0xF>
625 0xABCDE2 0xABCDE2 0xE <0xA...0xF> <0x2..0xF>
626
627 L-X instructions are treated a little specially, as explained above:
628 0xABCDE1 0xABCDE1 0xF <0xA...0xF> <0x1..0xF>
629
630 `objdump -d' and some other tools show a bit unjustified offsets:
631 original PC byte where starts the instruction objdump offset
632 0xABCDE0 0xABCDE0 0xABCDE0
633 0xABCDE1 0xABCDE5 0xABCDE6
634 0xABCDE2 0xABCDEA 0xABCDEC
635 */
636
637 #define IA64_BREAKPOINT 0x00003333300LL
638
639 static int
640 ia64_memory_insert_breakpoint (struct gdbarch *gdbarch,
641 struct bp_target_info *bp_tgt)
642 {
643 CORE_ADDR addr = bp_tgt->placed_address;
644 gdb_byte bundle[BUNDLE_LEN];
645 int slotnum = (int) (addr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
646 long long instr_breakpoint;
647 int val;
648 int template;
649 struct cleanup *cleanup;
650
651 if (slotnum > 2)
652 error (_("Can't insert breakpoint for slot numbers greater than 2."));
653
654 addr &= ~0x0f;
655
656 /* Enable the automatic memory restoration from breakpoints while
657 we read our instruction bundle for the purpose of SHADOW_CONTENTS.
658 Otherwise, we could possibly store into the shadow parts of the adjacent
659 placed breakpoints. It is due to our SHADOW_CONTENTS overlapping the real
660 breakpoint instruction bits region. */
661 cleanup = make_show_memory_breakpoints_cleanup (0);
662 val = target_read_memory (addr, bundle, BUNDLE_LEN);
663 if (val != 0)
664 {
665 do_cleanups (cleanup);
666 return val;
667 }
668
669 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
670 for addressing the SHADOW_CONTENTS placement. */
671 shadow_slotnum = slotnum;
672
673 /* Always cover the last byte of the bundle in case we are inserting
674 a breakpoint on an L-X instruction. */
675 bp_tgt->shadow_len = BUNDLE_LEN - shadow_slotnum;
676
677 template = extract_bit_field (bundle, 0, 5);
678 if (template_encoding_table[template][slotnum] == X)
679 {
680 /* X unit types can only be used in slot 2, and are actually
681 part of a 2-slot L-X instruction. We cannot break at this
682 address, as this is the second half of an instruction that
683 lives in slot 1 of that bundle. */
684 gdb_assert (slotnum == 2);
685 error (_("Can't insert breakpoint for non-existing slot X"));
686 }
687 if (template_encoding_table[template][slotnum] == L)
688 {
689 /* L unit types can only be used in slot 1. But the associated
690 opcode for that instruction is in slot 2, so bump the slot number
691 accordingly. */
692 gdb_assert (slotnum == 1);
693 slotnum = 2;
694 }
695
696 /* Store the whole bundle, except for the initial skipped bytes by the slot
697 number interpreted as bytes offset in PLACED_ADDRESS. */
698 memcpy (bp_tgt->shadow_contents, bundle + shadow_slotnum,
699 bp_tgt->shadow_len);
700
701 /* Re-read the same bundle as above except that, this time, read it in order
702 to compute the new bundle inside which we will be inserting the
703 breakpoint. Therefore, disable the automatic memory restoration from
704 breakpoints while we read our instruction bundle. Otherwise, the general
705 restoration mechanism kicks in and we would possibly remove parts of the
706 adjacent placed breakpoints. It is due to our SHADOW_CONTENTS overlapping
707 the real breakpoint instruction bits region. */
708 make_show_memory_breakpoints_cleanup (1);
709 val = target_read_memory (addr, bundle, BUNDLE_LEN);
710 if (val != 0)
711 {
712 do_cleanups (cleanup);
713 return val;
714 }
715
716 /* Breakpoints already present in the code will get deteacted and not get
717 reinserted by bp_loc_is_permanent. Multiple breakpoints at the same
718 location cannot induce the internal error as they are optimized into
719 a single instance by update_global_location_list. */
720 instr_breakpoint = slotN_contents (bundle, slotnum);
721 if (instr_breakpoint == IA64_BREAKPOINT)
722 internal_error (__FILE__, __LINE__,
723 _("Address %s already contains a breakpoint."),
724 paddress (gdbarch, bp_tgt->placed_address));
725 replace_slotN_contents (bundle, IA64_BREAKPOINT, slotnum);
726
727 bp_tgt->placed_size = bp_tgt->shadow_len;
728
729 val = target_write_memory (addr + shadow_slotnum, bundle + shadow_slotnum,
730 bp_tgt->shadow_len);
731
732 do_cleanups (cleanup);
733 return val;
734 }
735
736 static int
737 ia64_memory_remove_breakpoint (struct gdbarch *gdbarch,
738 struct bp_target_info *bp_tgt)
739 {
740 CORE_ADDR addr = bp_tgt->placed_address;
741 gdb_byte bundle_mem[BUNDLE_LEN], bundle_saved[BUNDLE_LEN];
742 int slotnum = (addr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
743 long long instr_breakpoint, instr_saved;
744 int val;
745 int template;
746 struct cleanup *cleanup;
747
748 addr &= ~0x0f;
749
750 /* Disable the automatic memory restoration from breakpoints while
751 we read our instruction bundle. Otherwise, the general restoration
752 mechanism kicks in and we would possibly remove parts of the adjacent
753 placed breakpoints. It is due to our SHADOW_CONTENTS overlapping the real
754 breakpoint instruction bits region. */
755 cleanup = make_show_memory_breakpoints_cleanup (1);
756 val = target_read_memory (addr, bundle_mem, BUNDLE_LEN);
757 if (val != 0)
758 {
759 do_cleanups (cleanup);
760 return val;
761 }
762
763 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
764 for addressing the SHADOW_CONTENTS placement. */
765 shadow_slotnum = slotnum;
766
767 template = extract_bit_field (bundle_mem, 0, 5);
768 if (template_encoding_table[template][slotnum] == X)
769 {
770 /* X unit types can only be used in slot 2, and are actually
771 part of a 2-slot L-X instruction. We refuse to insert
772 breakpoints at this address, so there should be no reason
773 for us attempting to remove one there, except if the program's
774 code somehow got modified in memory. */
775 gdb_assert (slotnum == 2);
776 warning (_("Cannot remove breakpoint at address %s from non-existing "
777 "X-type slot, memory has changed underneath"),
778 paddress (gdbarch, bp_tgt->placed_address));
779 do_cleanups (cleanup);
780 return -1;
781 }
782 if (template_encoding_table[template][slotnum] == L)
783 {
784 /* L unit types can only be used in slot 1. But the breakpoint
785 was actually saved using slot 2, so update the slot number
786 accordingly. */
787 gdb_assert (slotnum == 1);
788 slotnum = 2;
789 }
790
791 gdb_assert (bp_tgt->placed_size == BUNDLE_LEN - shadow_slotnum);
792 gdb_assert (bp_tgt->placed_size == bp_tgt->shadow_len);
793
794 instr_breakpoint = slotN_contents (bundle_mem, slotnum);
795 if (instr_breakpoint != IA64_BREAKPOINT)
796 {
797 warning (_("Cannot remove breakpoint at address %s, "
798 "no break instruction at such address."),
799 paddress (gdbarch, bp_tgt->placed_address));
800 do_cleanups (cleanup);
801 return -1;
802 }
803
804 /* Extract the original saved instruction from SLOTNUM normalizing its
805 bit-shift for INSTR_SAVED. */
806 memcpy (bundle_saved, bundle_mem, BUNDLE_LEN);
807 memcpy (bundle_saved + shadow_slotnum, bp_tgt->shadow_contents,
808 bp_tgt->shadow_len);
809 instr_saved = slotN_contents (bundle_saved, slotnum);
810
811 /* In BUNDLE_MEM, be careful to modify only the bits belonging to SLOTNUM
812 and not any of the other ones that are stored in SHADOW_CONTENTS. */
813 replace_slotN_contents (bundle_mem, instr_saved, slotnum);
814 val = target_write_memory (addr, bundle_mem, BUNDLE_LEN);
815
816 do_cleanups (cleanup);
817 return val;
818 }
819
820 /* As gdbarch_breakpoint_from_pc ranges have byte granularity and ia64
821 instruction slots ranges are bit-granular (41 bits) we have to provide an
822 extended range as described for ia64_memory_insert_breakpoint. We also take
823 care of preserving the `break' instruction 21-bit (or 62-bit) parameter to
824 make a match for permanent breakpoints. */
825
826 static const gdb_byte *
827 ia64_breakpoint_from_pc (struct gdbarch *gdbarch,
828 CORE_ADDR *pcptr, int *lenptr)
829 {
830 CORE_ADDR addr = *pcptr;
831 static gdb_byte bundle[BUNDLE_LEN];
832 int slotnum = (int) (*pcptr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
833 long long instr_fetched;
834 int val;
835 int template;
836 struct cleanup *cleanup;
837
838 if (slotnum > 2)
839 error (_("Can't insert breakpoint for slot numbers greater than 2."));
840
841 addr &= ~0x0f;
842
843 /* Enable the automatic memory restoration from breakpoints while
844 we read our instruction bundle to match bp_loc_is_permanent. */
845 cleanup = make_show_memory_breakpoints_cleanup (0);
846 val = target_read_memory (addr, bundle, BUNDLE_LEN);
847 do_cleanups (cleanup);
848
849 /* The memory might be unreachable. This can happen, for instance,
850 when the user inserts a breakpoint at an invalid address. */
851 if (val != 0)
852 return NULL;
853
854 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
855 for addressing the SHADOW_CONTENTS placement. */
856 shadow_slotnum = slotnum;
857
858 /* Cover always the last byte of the bundle for the L-X slot case. */
859 *lenptr = BUNDLE_LEN - shadow_slotnum;
860
861 /* Check for L type instruction in slot 1, if present then bump up the slot
862 number to the slot 2. */
863 template = extract_bit_field (bundle, 0, 5);
864 if (template_encoding_table[template][slotnum] == X)
865 {
866 gdb_assert (slotnum == 2);
867 error (_("Can't insert breakpoint for non-existing slot X"));
868 }
869 if (template_encoding_table[template][slotnum] == L)
870 {
871 gdb_assert (slotnum == 1);
872 slotnum = 2;
873 }
874
875 /* A break instruction has its all its opcode bits cleared except for
876 the parameter value. For L+X slot pair we are at the X slot (slot 2) so
877 we should not touch the L slot - the upper 41 bits of the parameter. */
878 instr_fetched = slotN_contents (bundle, slotnum);
879 instr_fetched &= 0x1003ffffc0LL;
880 replace_slotN_contents (bundle, instr_fetched, slotnum);
881
882 return bundle + shadow_slotnum;
883 }
884
885 static CORE_ADDR
886 ia64_read_pc (struct regcache *regcache)
887 {
888 ULONGEST psr_value, pc_value;
889 int slot_num;
890
891 regcache_cooked_read_unsigned (regcache, IA64_PSR_REGNUM, &psr_value);
892 regcache_cooked_read_unsigned (regcache, IA64_IP_REGNUM, &pc_value);
893 slot_num = (psr_value >> 41) & 3;
894
895 return pc_value | (slot_num * SLOT_MULTIPLIER);
896 }
897
898 void
899 ia64_write_pc (struct regcache *regcache, CORE_ADDR new_pc)
900 {
901 int slot_num = (int) (new_pc & 0xf) / SLOT_MULTIPLIER;
902 ULONGEST psr_value;
903
904 regcache_cooked_read_unsigned (regcache, IA64_PSR_REGNUM, &psr_value);
905 psr_value &= ~(3LL << 41);
906 psr_value |= (ULONGEST)(slot_num & 0x3) << 41;
907
908 new_pc &= ~0xfLL;
909
910 regcache_cooked_write_unsigned (regcache, IA64_PSR_REGNUM, psr_value);
911 regcache_cooked_write_unsigned (regcache, IA64_IP_REGNUM, new_pc);
912 }
913
914 #define IS_NaT_COLLECTION_ADDR(addr) ((((addr) >> 3) & 0x3f) == 0x3f)
915
916 /* Returns the address of the slot that's NSLOTS slots away from
917 the address ADDR. NSLOTS may be positive or negative. */
918 static CORE_ADDR
919 rse_address_add(CORE_ADDR addr, int nslots)
920 {
921 CORE_ADDR new_addr;
922 int mandatory_nat_slots = nslots / 63;
923 int direction = nslots < 0 ? -1 : 1;
924
925 new_addr = addr + 8 * (nslots + mandatory_nat_slots);
926
927 if ((new_addr >> 9) != ((addr + 8 * 64 * mandatory_nat_slots) >> 9))
928 new_addr += 8 * direction;
929
930 if (IS_NaT_COLLECTION_ADDR(new_addr))
931 new_addr += 8 * direction;
932
933 return new_addr;
934 }
935
936 static enum register_status
937 ia64_pseudo_register_read (struct gdbarch *gdbarch, struct regcache *regcache,
938 int regnum, gdb_byte *buf)
939 {
940 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
941 enum register_status status;
942
943 if (regnum >= V32_REGNUM && regnum <= V127_REGNUM)
944 {
945 #ifdef HAVE_LIBUNWIND_IA64_H
946 /* First try and use the libunwind special reg accessor,
947 otherwise fallback to standard logic. */
948 if (!libunwind_is_initialized ()
949 || libunwind_get_reg_special (gdbarch, regcache, regnum, buf) != 0)
950 #endif
951 {
952 /* The fallback position is to assume that r32-r127 are
953 found sequentially in memory starting at $bof. This
954 isn't always true, but without libunwind, this is the
955 best we can do. */
956 enum register_status status;
957 ULONGEST cfm;
958 ULONGEST bsp;
959 CORE_ADDR reg;
960
961 status = regcache_cooked_read_unsigned (regcache,
962 IA64_BSP_REGNUM, &bsp);
963 if (status != REG_VALID)
964 return status;
965
966 status = regcache_cooked_read_unsigned (regcache,
967 IA64_CFM_REGNUM, &cfm);
968 if (status != REG_VALID)
969 return status;
970
971 /* The bsp points at the end of the register frame so we
972 subtract the size of frame from it to get start of
973 register frame. */
974 bsp = rse_address_add (bsp, -(cfm & 0x7f));
975
976 if ((cfm & 0x7f) > regnum - V32_REGNUM)
977 {
978 ULONGEST reg_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
979 reg = read_memory_integer ((CORE_ADDR)reg_addr, 8, byte_order);
980 store_unsigned_integer (buf, register_size (gdbarch, regnum),
981 byte_order, reg);
982 }
983 else
984 store_unsigned_integer (buf, register_size (gdbarch, regnum),
985 byte_order, 0);
986 }
987 }
988 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
989 {
990 ULONGEST unatN_val;
991 ULONGEST unat;
992 status = regcache_cooked_read_unsigned (regcache, IA64_UNAT_REGNUM, &unat);
993 if (status != REG_VALID)
994 return status;
995 unatN_val = (unat & (1LL << (regnum - IA64_NAT0_REGNUM))) != 0;
996 store_unsigned_integer (buf, register_size (gdbarch, regnum),
997 byte_order, unatN_val);
998 }
999 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
1000 {
1001 ULONGEST natN_val = 0;
1002 ULONGEST bsp;
1003 ULONGEST cfm;
1004 CORE_ADDR gr_addr = 0;
1005 status = regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1006 if (status != REG_VALID)
1007 return status;
1008 status = regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1009 if (status != REG_VALID)
1010 return status;
1011
1012 /* The bsp points at the end of the register frame so we
1013 subtract the size of frame from it to get start of register frame. */
1014 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1015
1016 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1017 gr_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1018
1019 if (gr_addr != 0)
1020 {
1021 /* Compute address of nat collection bits. */
1022 CORE_ADDR nat_addr = gr_addr | 0x1f8;
1023 CORE_ADDR nat_collection;
1024 int nat_bit;
1025 /* If our nat collection address is bigger than bsp, we have to get
1026 the nat collection from rnat. Otherwise, we fetch the nat
1027 collection from the computed address. */
1028 if (nat_addr >= bsp)
1029 regcache_cooked_read_unsigned (regcache, IA64_RNAT_REGNUM,
1030 &nat_collection);
1031 else
1032 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
1033 nat_bit = (gr_addr >> 3) & 0x3f;
1034 natN_val = (nat_collection >> nat_bit) & 1;
1035 }
1036
1037 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1038 byte_order, natN_val);
1039 }
1040 else if (regnum == VBOF_REGNUM)
1041 {
1042 /* A virtual register frame start is provided for user convenience.
1043 It can be calculated as the bsp - sof (sizeof frame). */
1044 ULONGEST bsp, vbsp;
1045 ULONGEST cfm;
1046 CORE_ADDR reg;
1047 status = regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1048 if (status != REG_VALID)
1049 return status;
1050 status = regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1051 if (status != REG_VALID)
1052 return status;
1053
1054 /* The bsp points at the end of the register frame so we
1055 subtract the size of frame from it to get beginning of frame. */
1056 vbsp = rse_address_add (bsp, -(cfm & 0x7f));
1057 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1058 byte_order, vbsp);
1059 }
1060 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1061 {
1062 ULONGEST pr;
1063 ULONGEST cfm;
1064 ULONGEST prN_val;
1065 CORE_ADDR reg;
1066 status = regcache_cooked_read_unsigned (regcache, IA64_PR_REGNUM, &pr);
1067 if (status != REG_VALID)
1068 return status;
1069 status = regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1070 if (status != REG_VALID)
1071 return status;
1072
1073 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1074 {
1075 /* Fetch predicate register rename base from current frame
1076 marker for this frame. */
1077 int rrb_pr = (cfm >> 32) & 0x3f;
1078
1079 /* Adjust the register number to account for register rotation. */
1080 regnum = VP16_REGNUM
1081 + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1082 }
1083 prN_val = (pr & (1LL << (regnum - VP0_REGNUM))) != 0;
1084 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1085 byte_order, prN_val);
1086 }
1087 else
1088 memset (buf, 0, register_size (gdbarch, regnum));
1089
1090 return REG_VALID;
1091 }
1092
1093 static void
1094 ia64_pseudo_register_write (struct gdbarch *gdbarch, struct regcache *regcache,
1095 int regnum, const gdb_byte *buf)
1096 {
1097 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1098
1099 if (regnum >= V32_REGNUM && regnum <= V127_REGNUM)
1100 {
1101 ULONGEST bsp;
1102 ULONGEST cfm;
1103 CORE_ADDR reg;
1104 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1105 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1106
1107 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1108
1109 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1110 {
1111 ULONGEST reg_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1112 write_memory (reg_addr, (void *) buf, 8);
1113 }
1114 }
1115 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
1116 {
1117 ULONGEST unatN_val, unat, unatN_mask;
1118 regcache_cooked_read_unsigned (regcache, IA64_UNAT_REGNUM, &unat);
1119 unatN_val = extract_unsigned_integer (buf, register_size (gdbarch,
1120 regnum),
1121 byte_order);
1122 unatN_mask = (1LL << (regnum - IA64_NAT0_REGNUM));
1123 if (unatN_val == 0)
1124 unat &= ~unatN_mask;
1125 else if (unatN_val == 1)
1126 unat |= unatN_mask;
1127 regcache_cooked_write_unsigned (regcache, IA64_UNAT_REGNUM, unat);
1128 }
1129 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
1130 {
1131 ULONGEST natN_val;
1132 ULONGEST bsp;
1133 ULONGEST cfm;
1134 CORE_ADDR gr_addr = 0;
1135 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1136 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1137
1138 /* The bsp points at the end of the register frame so we
1139 subtract the size of frame from it to get start of register frame. */
1140 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1141
1142 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1143 gr_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1144
1145 natN_val = extract_unsigned_integer (buf, register_size (gdbarch,
1146 regnum),
1147 byte_order);
1148
1149 if (gr_addr != 0 && (natN_val == 0 || natN_val == 1))
1150 {
1151 /* Compute address of nat collection bits. */
1152 CORE_ADDR nat_addr = gr_addr | 0x1f8;
1153 CORE_ADDR nat_collection;
1154 int natN_bit = (gr_addr >> 3) & 0x3f;
1155 ULONGEST natN_mask = (1LL << natN_bit);
1156 /* If our nat collection address is bigger than bsp, we have to get
1157 the nat collection from rnat. Otherwise, we fetch the nat
1158 collection from the computed address. */
1159 if (nat_addr >= bsp)
1160 {
1161 regcache_cooked_read_unsigned (regcache,
1162 IA64_RNAT_REGNUM,
1163 &nat_collection);
1164 if (natN_val)
1165 nat_collection |= natN_mask;
1166 else
1167 nat_collection &= ~natN_mask;
1168 regcache_cooked_write_unsigned (regcache, IA64_RNAT_REGNUM,
1169 nat_collection);
1170 }
1171 else
1172 {
1173 char nat_buf[8];
1174 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
1175 if (natN_val)
1176 nat_collection |= natN_mask;
1177 else
1178 nat_collection &= ~natN_mask;
1179 store_unsigned_integer (nat_buf, register_size (gdbarch, regnum),
1180 byte_order, nat_collection);
1181 write_memory (nat_addr, nat_buf, 8);
1182 }
1183 }
1184 }
1185 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1186 {
1187 ULONGEST pr;
1188 ULONGEST cfm;
1189 ULONGEST prN_val;
1190 ULONGEST prN_mask;
1191
1192 regcache_cooked_read_unsigned (regcache, IA64_PR_REGNUM, &pr);
1193 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1194
1195 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1196 {
1197 /* Fetch predicate register rename base from current frame
1198 marker for this frame. */
1199 int rrb_pr = (cfm >> 32) & 0x3f;
1200
1201 /* Adjust the register number to account for register rotation. */
1202 regnum = VP16_REGNUM
1203 + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1204 }
1205 prN_val = extract_unsigned_integer (buf, register_size (gdbarch, regnum),
1206 byte_order);
1207 prN_mask = (1LL << (regnum - VP0_REGNUM));
1208 if (prN_val == 0)
1209 pr &= ~prN_mask;
1210 else if (prN_val == 1)
1211 pr |= prN_mask;
1212 regcache_cooked_write_unsigned (regcache, IA64_PR_REGNUM, pr);
1213 }
1214 }
1215
1216 /* The ia64 needs to convert between various ieee floating-point formats
1217 and the special ia64 floating point register format. */
1218
1219 static int
1220 ia64_convert_register_p (struct gdbarch *gdbarch, int regno, struct type *type)
1221 {
1222 return (regno >= IA64_FR0_REGNUM && regno <= IA64_FR127_REGNUM
1223 && type != ia64_ext_type (gdbarch));
1224 }
1225
1226 static void
1227 ia64_register_to_value (struct frame_info *frame, int regnum,
1228 struct type *valtype, gdb_byte *out)
1229 {
1230 struct gdbarch *gdbarch = get_frame_arch (frame);
1231 char in[MAX_REGISTER_SIZE];
1232 frame_register_read (frame, regnum, in);
1233 convert_typed_floating (in, ia64_ext_type (gdbarch), out, valtype);
1234 }
1235
1236 static void
1237 ia64_value_to_register (struct frame_info *frame, int regnum,
1238 struct type *valtype, const gdb_byte *in)
1239 {
1240 struct gdbarch *gdbarch = get_frame_arch (frame);
1241 char out[MAX_REGISTER_SIZE];
1242 convert_typed_floating (in, valtype, out, ia64_ext_type (gdbarch));
1243 put_frame_register (frame, regnum, out);
1244 }
1245
1246
1247 /* Limit the number of skipped non-prologue instructions since examining
1248 of the prologue is expensive. */
1249 static int max_skip_non_prologue_insns = 40;
1250
1251 /* Given PC representing the starting address of a function, and
1252 LIM_PC which is the (sloppy) limit to which to scan when looking
1253 for a prologue, attempt to further refine this limit by using
1254 the line data in the symbol table. If successful, a better guess
1255 on where the prologue ends is returned, otherwise the previous
1256 value of lim_pc is returned. TRUST_LIMIT is a pointer to a flag
1257 which will be set to indicate whether the returned limit may be
1258 used with no further scanning in the event that the function is
1259 frameless. */
1260
1261 /* FIXME: cagney/2004-02-14: This function and logic have largely been
1262 superseded by skip_prologue_using_sal. */
1263
1264 static CORE_ADDR
1265 refine_prologue_limit (CORE_ADDR pc, CORE_ADDR lim_pc, int *trust_limit)
1266 {
1267 struct symtab_and_line prologue_sal;
1268 CORE_ADDR start_pc = pc;
1269 CORE_ADDR end_pc;
1270
1271 /* The prologue can not possibly go past the function end itself,
1272 so we can already adjust LIM_PC accordingly. */
1273 if (find_pc_partial_function (pc, NULL, NULL, &end_pc) && end_pc < lim_pc)
1274 lim_pc = end_pc;
1275
1276 /* Start off not trusting the limit. */
1277 *trust_limit = 0;
1278
1279 prologue_sal = find_pc_line (pc, 0);
1280 if (prologue_sal.line != 0)
1281 {
1282 int i;
1283 CORE_ADDR addr = prologue_sal.end;
1284
1285 /* Handle the case in which compiler's optimizer/scheduler
1286 has moved instructions into the prologue. We scan ahead
1287 in the function looking for address ranges whose corresponding
1288 line number is less than or equal to the first one that we
1289 found for the function. (It can be less than when the
1290 scheduler puts a body instruction before the first prologue
1291 instruction.) */
1292 for (i = 2 * max_skip_non_prologue_insns;
1293 i > 0 && (lim_pc == 0 || addr < lim_pc);
1294 i--)
1295 {
1296 struct symtab_and_line sal;
1297
1298 sal = find_pc_line (addr, 0);
1299 if (sal.line == 0)
1300 break;
1301 if (sal.line <= prologue_sal.line
1302 && sal.symtab == prologue_sal.symtab)
1303 {
1304 prologue_sal = sal;
1305 }
1306 addr = sal.end;
1307 }
1308
1309 if (lim_pc == 0 || prologue_sal.end < lim_pc)
1310 {
1311 lim_pc = prologue_sal.end;
1312 if (start_pc == get_pc_function_start (lim_pc))
1313 *trust_limit = 1;
1314 }
1315 }
1316 return lim_pc;
1317 }
1318
1319 #define isScratch(_regnum_) ((_regnum_) == 2 || (_regnum_) == 3 \
1320 || (8 <= (_regnum_) && (_regnum_) <= 11) \
1321 || (14 <= (_regnum_) && (_regnum_) <= 31))
1322 #define imm9(_instr_) \
1323 ( ((((_instr_) & 0x01000000000LL) ? -1 : 0) << 8) \
1324 | (((_instr_) & 0x00008000000LL) >> 20) \
1325 | (((_instr_) & 0x00000001fc0LL) >> 6))
1326
1327 /* Allocate and initialize a frame cache. */
1328
1329 static struct ia64_frame_cache *
1330 ia64_alloc_frame_cache (void)
1331 {
1332 struct ia64_frame_cache *cache;
1333 int i;
1334
1335 cache = FRAME_OBSTACK_ZALLOC (struct ia64_frame_cache);
1336
1337 /* Base address. */
1338 cache->base = 0;
1339 cache->pc = 0;
1340 cache->cfm = 0;
1341 cache->prev_cfm = 0;
1342 cache->sof = 0;
1343 cache->sol = 0;
1344 cache->sor = 0;
1345 cache->bsp = 0;
1346 cache->fp_reg = 0;
1347 cache->frameless = 1;
1348
1349 for (i = 0; i < NUM_IA64_RAW_REGS; i++)
1350 cache->saved_regs[i] = 0;
1351
1352 return cache;
1353 }
1354
1355 static CORE_ADDR
1356 examine_prologue (CORE_ADDR pc, CORE_ADDR lim_pc,
1357 struct frame_info *this_frame,
1358 struct ia64_frame_cache *cache)
1359 {
1360 CORE_ADDR next_pc;
1361 CORE_ADDR last_prologue_pc = pc;
1362 instruction_type it;
1363 long long instr;
1364 int cfm_reg = 0;
1365 int ret_reg = 0;
1366 int fp_reg = 0;
1367 int unat_save_reg = 0;
1368 int pr_save_reg = 0;
1369 int mem_stack_frame_size = 0;
1370 int spill_reg = 0;
1371 CORE_ADDR spill_addr = 0;
1372 char instores[8];
1373 char infpstores[8];
1374 char reg_contents[256];
1375 int trust_limit;
1376 int frameless = 1;
1377 int i;
1378 CORE_ADDR addr;
1379 char buf[8];
1380 CORE_ADDR bof, sor, sol, sof, cfm, rrb_gr;
1381
1382 memset (instores, 0, sizeof instores);
1383 memset (infpstores, 0, sizeof infpstores);
1384 memset (reg_contents, 0, sizeof reg_contents);
1385
1386 if (cache->after_prologue != 0
1387 && cache->after_prologue <= lim_pc)
1388 return cache->after_prologue;
1389
1390 lim_pc = refine_prologue_limit (pc, lim_pc, &trust_limit);
1391 next_pc = fetch_instruction (pc, &it, &instr);
1392
1393 /* We want to check if we have a recognizable function start before we
1394 look ahead for a prologue. */
1395 if (pc < lim_pc && next_pc
1396 && it == M && ((instr & 0x1ee0000003fLL) == 0x02c00000000LL))
1397 {
1398 /* alloc - start of a regular function. */
1399 int sor = (int) ((instr & 0x00078000000LL) >> 27);
1400 int sol = (int) ((instr & 0x00007f00000LL) >> 20);
1401 int sof = (int) ((instr & 0x000000fe000LL) >> 13);
1402 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1403
1404 /* Verify that the current cfm matches what we think is the
1405 function start. If we have somehow jumped within a function,
1406 we do not want to interpret the prologue and calculate the
1407 addresses of various registers such as the return address.
1408 We will instead treat the frame as frameless. */
1409 if (!this_frame ||
1410 (sof == (cache->cfm & 0x7f) &&
1411 sol == ((cache->cfm >> 7) & 0x7f)))
1412 frameless = 0;
1413
1414 cfm_reg = rN;
1415 last_prologue_pc = next_pc;
1416 pc = next_pc;
1417 }
1418 else
1419 {
1420 /* Look for a leaf routine. */
1421 if (pc < lim_pc && next_pc
1422 && (it == I || it == M)
1423 && ((instr & 0x1ee00000000LL) == 0x10800000000LL))
1424 {
1425 /* adds rN = imm14, rM (or mov rN, rM when imm14 is 0) */
1426 int imm = (int) ((((instr & 0x01000000000LL) ? -1 : 0) << 13)
1427 | ((instr & 0x001f8000000LL) >> 20)
1428 | ((instr & 0x000000fe000LL) >> 13));
1429 int rM = (int) ((instr & 0x00007f00000LL) >> 20);
1430 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1431 int qp = (int) (instr & 0x0000000003fLL);
1432 if (qp == 0 && rN == 2 && imm == 0 && rM == 12 && fp_reg == 0)
1433 {
1434 /* mov r2, r12 - beginning of leaf routine. */
1435 fp_reg = rN;
1436 last_prologue_pc = next_pc;
1437 }
1438 }
1439
1440 /* If we don't recognize a regular function or leaf routine, we are
1441 done. */
1442 if (!fp_reg)
1443 {
1444 pc = lim_pc;
1445 if (trust_limit)
1446 last_prologue_pc = lim_pc;
1447 }
1448 }
1449
1450 /* Loop, looking for prologue instructions, keeping track of
1451 where preserved registers were spilled. */
1452 while (pc < lim_pc)
1453 {
1454 next_pc = fetch_instruction (pc, &it, &instr);
1455 if (next_pc == 0)
1456 break;
1457
1458 if (it == B && ((instr & 0x1e1f800003fLL) != 0x04000000000LL))
1459 {
1460 /* Exit loop upon hitting a non-nop branch instruction. */
1461 if (trust_limit)
1462 lim_pc = pc;
1463 break;
1464 }
1465 else if (((instr & 0x3fLL) != 0LL) &&
1466 (frameless || ret_reg != 0))
1467 {
1468 /* Exit loop upon hitting a predicated instruction if
1469 we already have the return register or if we are frameless. */
1470 if (trust_limit)
1471 lim_pc = pc;
1472 break;
1473 }
1474 else if (it == I && ((instr & 0x1eff8000000LL) == 0x00188000000LL))
1475 {
1476 /* Move from BR */
1477 int b2 = (int) ((instr & 0x0000000e000LL) >> 13);
1478 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1479 int qp = (int) (instr & 0x0000000003f);
1480
1481 if (qp == 0 && b2 == 0 && rN >= 32 && ret_reg == 0)
1482 {
1483 ret_reg = rN;
1484 last_prologue_pc = next_pc;
1485 }
1486 }
1487 else if ((it == I || it == M)
1488 && ((instr & 0x1ee00000000LL) == 0x10800000000LL))
1489 {
1490 /* adds rN = imm14, rM (or mov rN, rM when imm14 is 0) */
1491 int imm = (int) ((((instr & 0x01000000000LL) ? -1 : 0) << 13)
1492 | ((instr & 0x001f8000000LL) >> 20)
1493 | ((instr & 0x000000fe000LL) >> 13));
1494 int rM = (int) ((instr & 0x00007f00000LL) >> 20);
1495 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1496 int qp = (int) (instr & 0x0000000003fLL);
1497
1498 if (qp == 0 && rN >= 32 && imm == 0 && rM == 12 && fp_reg == 0)
1499 {
1500 /* mov rN, r12 */
1501 fp_reg = rN;
1502 last_prologue_pc = next_pc;
1503 }
1504 else if (qp == 0 && rN == 12 && rM == 12)
1505 {
1506 /* adds r12, -mem_stack_frame_size, r12 */
1507 mem_stack_frame_size -= imm;
1508 last_prologue_pc = next_pc;
1509 }
1510 else if (qp == 0 && rN == 2
1511 && ((rM == fp_reg && fp_reg != 0) || rM == 12))
1512 {
1513 char buf[MAX_REGISTER_SIZE];
1514 CORE_ADDR saved_sp = 0;
1515 /* adds r2, spilloffset, rFramePointer
1516 or
1517 adds r2, spilloffset, r12
1518
1519 Get ready for stf.spill or st8.spill instructions.
1520 The address to start spilling at is loaded into r2.
1521 FIXME: Why r2? That's what gcc currently uses; it
1522 could well be different for other compilers. */
1523
1524 /* Hmm... whether or not this will work will depend on
1525 where the pc is. If it's still early in the prologue
1526 this'll be wrong. FIXME */
1527 if (this_frame)
1528 {
1529 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1530 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1531 get_frame_register (this_frame, sp_regnum, buf);
1532 saved_sp = extract_unsigned_integer (buf, 8, byte_order);
1533 }
1534 spill_addr = saved_sp
1535 + (rM == 12 ? 0 : mem_stack_frame_size)
1536 + imm;
1537 spill_reg = rN;
1538 last_prologue_pc = next_pc;
1539 }
1540 else if (qp == 0 && rM >= 32 && rM < 40 && !instores[rM-32] &&
1541 rN < 256 && imm == 0)
1542 {
1543 /* mov rN, rM where rM is an input register. */
1544 reg_contents[rN] = rM;
1545 last_prologue_pc = next_pc;
1546 }
1547 else if (frameless && qp == 0 && rN == fp_reg && imm == 0 &&
1548 rM == 2)
1549 {
1550 /* mov r12, r2 */
1551 last_prologue_pc = next_pc;
1552 break;
1553 }
1554 }
1555 else if (it == M
1556 && ( ((instr & 0x1efc0000000LL) == 0x0eec0000000LL)
1557 || ((instr & 0x1ffc8000000LL) == 0x0cec0000000LL) ))
1558 {
1559 /* stf.spill [rN] = fM, imm9
1560 or
1561 stf.spill [rN] = fM */
1562
1563 int imm = imm9(instr);
1564 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1565 int fM = (int) ((instr & 0x000000fe000LL) >> 13);
1566 int qp = (int) (instr & 0x0000000003fLL);
1567 if (qp == 0 && rN == spill_reg && spill_addr != 0
1568 && ((2 <= fM && fM <= 5) || (16 <= fM && fM <= 31)))
1569 {
1570 cache->saved_regs[IA64_FR0_REGNUM + fM] = spill_addr;
1571
1572 if ((instr & 0x1efc0000000LL) == 0x0eec0000000LL)
1573 spill_addr += imm;
1574 else
1575 spill_addr = 0; /* last one; must be done. */
1576 last_prologue_pc = next_pc;
1577 }
1578 }
1579 else if ((it == M && ((instr & 0x1eff8000000LL) == 0x02110000000LL))
1580 || (it == I && ((instr & 0x1eff8000000LL) == 0x00050000000LL)) )
1581 {
1582 /* mov.m rN = arM
1583 or
1584 mov.i rN = arM */
1585
1586 int arM = (int) ((instr & 0x00007f00000LL) >> 20);
1587 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1588 int qp = (int) (instr & 0x0000000003fLL);
1589 if (qp == 0 && isScratch (rN) && arM == 36 /* ar.unat */)
1590 {
1591 /* We have something like "mov.m r3 = ar.unat". Remember the
1592 r3 (or whatever) and watch for a store of this register... */
1593 unat_save_reg = rN;
1594 last_prologue_pc = next_pc;
1595 }
1596 }
1597 else if (it == I && ((instr & 0x1eff8000000LL) == 0x00198000000LL))
1598 {
1599 /* mov rN = pr */
1600 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1601 int qp = (int) (instr & 0x0000000003fLL);
1602 if (qp == 0 && isScratch (rN))
1603 {
1604 pr_save_reg = rN;
1605 last_prologue_pc = next_pc;
1606 }
1607 }
1608 else if (it == M
1609 && ( ((instr & 0x1ffc8000000LL) == 0x08cc0000000LL)
1610 || ((instr & 0x1efc0000000LL) == 0x0acc0000000LL)))
1611 {
1612 /* st8 [rN] = rM
1613 or
1614 st8 [rN] = rM, imm9 */
1615 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1616 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1617 int qp = (int) (instr & 0x0000000003fLL);
1618 int indirect = rM < 256 ? reg_contents[rM] : 0;
1619 if (qp == 0 && rN == spill_reg && spill_addr != 0
1620 && (rM == unat_save_reg || rM == pr_save_reg))
1621 {
1622 /* We've found a spill of either the UNAT register or the PR
1623 register. (Well, not exactly; what we've actually found is
1624 a spill of the register that UNAT or PR was moved to).
1625 Record that fact and move on... */
1626 if (rM == unat_save_reg)
1627 {
1628 /* Track UNAT register. */
1629 cache->saved_regs[IA64_UNAT_REGNUM] = spill_addr;
1630 unat_save_reg = 0;
1631 }
1632 else
1633 {
1634 /* Track PR register. */
1635 cache->saved_regs[IA64_PR_REGNUM] = spill_addr;
1636 pr_save_reg = 0;
1637 }
1638 if ((instr & 0x1efc0000000LL) == 0x0acc0000000LL)
1639 /* st8 [rN] = rM, imm9 */
1640 spill_addr += imm9(instr);
1641 else
1642 spill_addr = 0; /* Must be done spilling. */
1643 last_prologue_pc = next_pc;
1644 }
1645 else if (qp == 0 && 32 <= rM && rM < 40 && !instores[rM-32])
1646 {
1647 /* Allow up to one store of each input register. */
1648 instores[rM-32] = 1;
1649 last_prologue_pc = next_pc;
1650 }
1651 else if (qp == 0 && 32 <= indirect && indirect < 40 &&
1652 !instores[indirect-32])
1653 {
1654 /* Allow an indirect store of an input register. */
1655 instores[indirect-32] = 1;
1656 last_prologue_pc = next_pc;
1657 }
1658 }
1659 else if (it == M && ((instr & 0x1ff08000000LL) == 0x08c00000000LL))
1660 {
1661 /* One of
1662 st1 [rN] = rM
1663 st2 [rN] = rM
1664 st4 [rN] = rM
1665 st8 [rN] = rM
1666 Note that the st8 case is handled in the clause above.
1667
1668 Advance over stores of input registers. One store per input
1669 register is permitted. */
1670 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1671 int qp = (int) (instr & 0x0000000003fLL);
1672 int indirect = rM < 256 ? reg_contents[rM] : 0;
1673 if (qp == 0 && 32 <= rM && rM < 40 && !instores[rM-32])
1674 {
1675 instores[rM-32] = 1;
1676 last_prologue_pc = next_pc;
1677 }
1678 else if (qp == 0 && 32 <= indirect && indirect < 40 &&
1679 !instores[indirect-32])
1680 {
1681 /* Allow an indirect store of an input register. */
1682 instores[indirect-32] = 1;
1683 last_prologue_pc = next_pc;
1684 }
1685 }
1686 else if (it == M && ((instr & 0x1ff88000000LL) == 0x0cc80000000LL))
1687 {
1688 /* Either
1689 stfs [rN] = fM
1690 or
1691 stfd [rN] = fM
1692
1693 Advance over stores of floating point input registers. Again
1694 one store per register is permitted. */
1695 int fM = (int) ((instr & 0x000000fe000LL) >> 13);
1696 int qp = (int) (instr & 0x0000000003fLL);
1697 if (qp == 0 && 8 <= fM && fM < 16 && !infpstores[fM - 8])
1698 {
1699 infpstores[fM-8] = 1;
1700 last_prologue_pc = next_pc;
1701 }
1702 }
1703 else if (it == M
1704 && ( ((instr & 0x1ffc8000000LL) == 0x08ec0000000LL)
1705 || ((instr & 0x1efc0000000LL) == 0x0aec0000000LL)))
1706 {
1707 /* st8.spill [rN] = rM
1708 or
1709 st8.spill [rN] = rM, imm9 */
1710 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1711 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1712 int qp = (int) (instr & 0x0000000003fLL);
1713 if (qp == 0 && rN == spill_reg && 4 <= rM && rM <= 7)
1714 {
1715 /* We've found a spill of one of the preserved general purpose
1716 regs. Record the spill address and advance the spill
1717 register if appropriate. */
1718 cache->saved_regs[IA64_GR0_REGNUM + rM] = spill_addr;
1719 if ((instr & 0x1efc0000000LL) == 0x0aec0000000LL)
1720 /* st8.spill [rN] = rM, imm9 */
1721 spill_addr += imm9(instr);
1722 else
1723 spill_addr = 0; /* Done spilling. */
1724 last_prologue_pc = next_pc;
1725 }
1726 }
1727
1728 pc = next_pc;
1729 }
1730
1731 /* If not frameless and we aren't called by skip_prologue, then we need
1732 to calculate registers for the previous frame which will be needed
1733 later. */
1734
1735 if (!frameless && this_frame)
1736 {
1737 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1738 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1739
1740 /* Extract the size of the rotating portion of the stack
1741 frame and the register rename base from the current
1742 frame marker. */
1743 cfm = cache->cfm;
1744 sor = cache->sor;
1745 sof = cache->sof;
1746 sol = cache->sol;
1747 rrb_gr = (cfm >> 18) & 0x7f;
1748
1749 /* Find the bof (beginning of frame). */
1750 bof = rse_address_add (cache->bsp, -sof);
1751
1752 for (i = 0, addr = bof;
1753 i < sof;
1754 i++, addr += 8)
1755 {
1756 if (IS_NaT_COLLECTION_ADDR (addr))
1757 {
1758 addr += 8;
1759 }
1760 if (i+32 == cfm_reg)
1761 cache->saved_regs[IA64_CFM_REGNUM] = addr;
1762 if (i+32 == ret_reg)
1763 cache->saved_regs[IA64_VRAP_REGNUM] = addr;
1764 if (i+32 == fp_reg)
1765 cache->saved_regs[IA64_VFP_REGNUM] = addr;
1766 }
1767
1768 /* For the previous argument registers we require the previous bof.
1769 If we can't find the previous cfm, then we can do nothing. */
1770 cfm = 0;
1771 if (cache->saved_regs[IA64_CFM_REGNUM] != 0)
1772 {
1773 cfm = read_memory_integer (cache->saved_regs[IA64_CFM_REGNUM],
1774 8, byte_order);
1775 }
1776 else if (cfm_reg != 0)
1777 {
1778 get_frame_register (this_frame, cfm_reg, buf);
1779 cfm = extract_unsigned_integer (buf, 8, byte_order);
1780 }
1781 cache->prev_cfm = cfm;
1782
1783 if (cfm != 0)
1784 {
1785 sor = ((cfm >> 14) & 0xf) * 8;
1786 sof = (cfm & 0x7f);
1787 sol = (cfm >> 7) & 0x7f;
1788 rrb_gr = (cfm >> 18) & 0x7f;
1789
1790 /* The previous bof only requires subtraction of the sol (size of
1791 locals) due to the overlap between output and input of
1792 subsequent frames. */
1793 bof = rse_address_add (bof, -sol);
1794
1795 for (i = 0, addr = bof;
1796 i < sof;
1797 i++, addr += 8)
1798 {
1799 if (IS_NaT_COLLECTION_ADDR (addr))
1800 {
1801 addr += 8;
1802 }
1803 if (i < sor)
1804 cache->saved_regs[IA64_GR32_REGNUM
1805 + ((i + (sor - rrb_gr)) % sor)]
1806 = addr;
1807 else
1808 cache->saved_regs[IA64_GR32_REGNUM + i] = addr;
1809 }
1810
1811 }
1812 }
1813
1814 /* Try and trust the lim_pc value whenever possible. */
1815 if (trust_limit && lim_pc >= last_prologue_pc)
1816 last_prologue_pc = lim_pc;
1817
1818 cache->frameless = frameless;
1819 cache->after_prologue = last_prologue_pc;
1820 cache->mem_stack_frame_size = mem_stack_frame_size;
1821 cache->fp_reg = fp_reg;
1822
1823 return last_prologue_pc;
1824 }
1825
1826 CORE_ADDR
1827 ia64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1828 {
1829 struct ia64_frame_cache cache;
1830 cache.base = 0;
1831 cache.after_prologue = 0;
1832 cache.cfm = 0;
1833 cache.bsp = 0;
1834
1835 /* Call examine_prologue with - as third argument since we don't
1836 have a next frame pointer to send. */
1837 return examine_prologue (pc, pc+1024, 0, &cache);
1838 }
1839
1840
1841 /* Normal frames. */
1842
1843 static struct ia64_frame_cache *
1844 ia64_frame_cache (struct frame_info *this_frame, void **this_cache)
1845 {
1846 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1847 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1848 struct ia64_frame_cache *cache;
1849 char buf[8];
1850 CORE_ADDR cfm, sof, sol, bsp, psr;
1851 int i;
1852
1853 if (*this_cache)
1854 return *this_cache;
1855
1856 cache = ia64_alloc_frame_cache ();
1857 *this_cache = cache;
1858
1859 get_frame_register (this_frame, sp_regnum, buf);
1860 cache->saved_sp = extract_unsigned_integer (buf, 8, byte_order);
1861
1862 /* We always want the bsp to point to the end of frame.
1863 This way, we can always get the beginning of frame (bof)
1864 by subtracting frame size. */
1865 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
1866 cache->bsp = extract_unsigned_integer (buf, 8, byte_order);
1867
1868 get_frame_register (this_frame, IA64_PSR_REGNUM, buf);
1869 psr = extract_unsigned_integer (buf, 8, byte_order);
1870
1871 get_frame_register (this_frame, IA64_CFM_REGNUM, buf);
1872 cfm = extract_unsigned_integer (buf, 8, byte_order);
1873
1874 cache->sof = (cfm & 0x7f);
1875 cache->sol = (cfm >> 7) & 0x7f;
1876 cache->sor = ((cfm >> 14) & 0xf) * 8;
1877
1878 cache->cfm = cfm;
1879
1880 cache->pc = get_frame_func (this_frame);
1881
1882 if (cache->pc != 0)
1883 examine_prologue (cache->pc, get_frame_pc (this_frame), this_frame, cache);
1884
1885 cache->base = cache->saved_sp + cache->mem_stack_frame_size;
1886
1887 return cache;
1888 }
1889
1890 static void
1891 ia64_frame_this_id (struct frame_info *this_frame, void **this_cache,
1892 struct frame_id *this_id)
1893 {
1894 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1895 struct ia64_frame_cache *cache =
1896 ia64_frame_cache (this_frame, this_cache);
1897
1898 /* If outermost frame, mark with null frame id. */
1899 if (cache->base != 0)
1900 (*this_id) = frame_id_build_special (cache->base, cache->pc, cache->bsp);
1901 if (gdbarch_debug >= 1)
1902 fprintf_unfiltered (gdb_stdlog,
1903 "regular frame id: code %s, stack %s, "
1904 "special %s, this_frame %s\n",
1905 paddress (gdbarch, this_id->code_addr),
1906 paddress (gdbarch, this_id->stack_addr),
1907 paddress (gdbarch, cache->bsp),
1908 host_address_to_string (this_frame));
1909 }
1910
1911 static struct value *
1912 ia64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
1913 int regnum)
1914 {
1915 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1916 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1917 struct ia64_frame_cache *cache = ia64_frame_cache (this_frame, this_cache);
1918 char buf[8];
1919
1920 gdb_assert (regnum >= 0);
1921
1922 if (!target_has_registers)
1923 error (_("No registers."));
1924
1925 if (regnum == gdbarch_sp_regnum (gdbarch))
1926 return frame_unwind_got_constant (this_frame, regnum, cache->base);
1927
1928 else if (regnum == IA64_BSP_REGNUM)
1929 {
1930 struct value *val;
1931 CORE_ADDR prev_cfm, bsp, prev_bsp;
1932
1933 /* We want to calculate the previous bsp as the end of the previous
1934 register stack frame. This corresponds to what the hardware bsp
1935 register will be if we pop the frame back which is why we might
1936 have been called. We know the beginning of the current frame is
1937 cache->bsp - cache->sof. This value in the previous frame points
1938 to the start of the output registers. We can calculate the end of
1939 that frame by adding the size of output:
1940 (sof (size of frame) - sol (size of locals)). */
1941 val = ia64_frame_prev_register (this_frame, this_cache, IA64_CFM_REGNUM);
1942 prev_cfm = extract_unsigned_integer (value_contents_all (val),
1943 8, byte_order);
1944 bsp = rse_address_add (cache->bsp, -(cache->sof));
1945 prev_bsp =
1946 rse_address_add (bsp, (prev_cfm & 0x7f) - ((prev_cfm >> 7) & 0x7f));
1947
1948 return frame_unwind_got_constant (this_frame, regnum, prev_bsp);
1949 }
1950
1951 else if (regnum == IA64_CFM_REGNUM)
1952 {
1953 CORE_ADDR addr = cache->saved_regs[IA64_CFM_REGNUM];
1954
1955 if (addr != 0)
1956 return frame_unwind_got_memory (this_frame, regnum, addr);
1957
1958 if (cache->prev_cfm)
1959 return frame_unwind_got_constant (this_frame, regnum, cache->prev_cfm);
1960
1961 if (cache->frameless)
1962 return frame_unwind_got_register (this_frame, IA64_PFS_REGNUM,
1963 IA64_PFS_REGNUM);
1964 return frame_unwind_got_register (this_frame, regnum, 0);
1965 }
1966
1967 else if (regnum == IA64_VFP_REGNUM)
1968 {
1969 /* If the function in question uses an automatic register (r32-r127)
1970 for the frame pointer, it'll be found by ia64_find_saved_register()
1971 above. If the function lacks one of these frame pointers, we can
1972 still provide a value since we know the size of the frame. */
1973 return frame_unwind_got_constant (this_frame, regnum, cache->base);
1974 }
1975
1976 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1977 {
1978 struct value *pr_val;
1979 ULONGEST prN;
1980
1981 pr_val = ia64_frame_prev_register (this_frame, this_cache,
1982 IA64_PR_REGNUM);
1983 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1984 {
1985 /* Fetch predicate register rename base from current frame
1986 marker for this frame. */
1987 int rrb_pr = (cache->cfm >> 32) & 0x3f;
1988
1989 /* Adjust the register number to account for register rotation. */
1990 regnum = VP16_REGNUM + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1991 }
1992 prN = extract_bit_field (value_contents_all (pr_val),
1993 regnum - VP0_REGNUM, 1);
1994 return frame_unwind_got_constant (this_frame, regnum, prN);
1995 }
1996
1997 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
1998 {
1999 struct value *unat_val;
2000 ULONGEST unatN;
2001 unat_val = ia64_frame_prev_register (this_frame, this_cache,
2002 IA64_UNAT_REGNUM);
2003 unatN = extract_bit_field (value_contents_all (unat_val),
2004 regnum - IA64_NAT0_REGNUM, 1);
2005 return frame_unwind_got_constant (this_frame, regnum, unatN);
2006 }
2007
2008 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
2009 {
2010 int natval = 0;
2011 /* Find address of general register corresponding to nat bit we're
2012 interested in. */
2013 CORE_ADDR gr_addr;
2014
2015 gr_addr = cache->saved_regs[regnum - IA64_NAT0_REGNUM + IA64_GR0_REGNUM];
2016
2017 if (gr_addr != 0)
2018 {
2019 /* Compute address of nat collection bits. */
2020 CORE_ADDR nat_addr = gr_addr | 0x1f8;
2021 CORE_ADDR bsp;
2022 CORE_ADDR nat_collection;
2023 int nat_bit;
2024
2025 /* If our nat collection address is bigger than bsp, we have to get
2026 the nat collection from rnat. Otherwise, we fetch the nat
2027 collection from the computed address. */
2028 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2029 bsp = extract_unsigned_integer (buf, 8, byte_order);
2030 if (nat_addr >= bsp)
2031 {
2032 get_frame_register (this_frame, IA64_RNAT_REGNUM, buf);
2033 nat_collection = extract_unsigned_integer (buf, 8, byte_order);
2034 }
2035 else
2036 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
2037 nat_bit = (gr_addr >> 3) & 0x3f;
2038 natval = (nat_collection >> nat_bit) & 1;
2039 }
2040
2041 return frame_unwind_got_constant (this_frame, regnum, natval);
2042 }
2043
2044 else if (regnum == IA64_IP_REGNUM)
2045 {
2046 CORE_ADDR pc = 0;
2047 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2048
2049 if (addr != 0)
2050 {
2051 read_memory (addr, buf, register_size (gdbarch, IA64_IP_REGNUM));
2052 pc = extract_unsigned_integer (buf, 8, byte_order);
2053 }
2054 else if (cache->frameless)
2055 {
2056 get_frame_register (this_frame, IA64_BR0_REGNUM, buf);
2057 pc = extract_unsigned_integer (buf, 8, byte_order);
2058 }
2059 pc &= ~0xf;
2060 return frame_unwind_got_constant (this_frame, regnum, pc);
2061 }
2062
2063 else if (regnum == IA64_PSR_REGNUM)
2064 {
2065 /* We don't know how to get the complete previous PSR, but we need it
2066 for the slot information when we unwind the pc (pc is formed of IP
2067 register plus slot information from PSR). To get the previous
2068 slot information, we mask it off the return address. */
2069 ULONGEST slot_num = 0;
2070 CORE_ADDR pc = 0;
2071 CORE_ADDR psr = 0;
2072 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2073
2074 get_frame_register (this_frame, IA64_PSR_REGNUM, buf);
2075 psr = extract_unsigned_integer (buf, 8, byte_order);
2076
2077 if (addr != 0)
2078 {
2079 read_memory (addr, buf, register_size (gdbarch, IA64_IP_REGNUM));
2080 pc = extract_unsigned_integer (buf, 8, byte_order);
2081 }
2082 else if (cache->frameless)
2083 {
2084 get_frame_register (this_frame, IA64_BR0_REGNUM, buf);
2085 pc = extract_unsigned_integer (buf, 8, byte_order);
2086 }
2087 psr &= ~(3LL << 41);
2088 slot_num = pc & 0x3LL;
2089 psr |= (CORE_ADDR)slot_num << 41;
2090 return frame_unwind_got_constant (this_frame, regnum, psr);
2091 }
2092
2093 else if (regnum == IA64_BR0_REGNUM)
2094 {
2095 CORE_ADDR addr = cache->saved_regs[IA64_BR0_REGNUM];
2096
2097 if (addr != 0)
2098 return frame_unwind_got_memory (this_frame, regnum, addr);
2099
2100 return frame_unwind_got_constant (this_frame, regnum, 0);
2101 }
2102
2103 else if ((regnum >= IA64_GR32_REGNUM && regnum <= IA64_GR127_REGNUM)
2104 || (regnum >= V32_REGNUM && regnum <= V127_REGNUM))
2105 {
2106 CORE_ADDR addr = 0;
2107
2108 if (regnum >= V32_REGNUM)
2109 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2110 addr = cache->saved_regs[regnum];
2111 if (addr != 0)
2112 return frame_unwind_got_memory (this_frame, regnum, addr);
2113
2114 if (cache->frameless)
2115 {
2116 struct value *reg_val;
2117 CORE_ADDR prev_cfm, prev_bsp, prev_bof;
2118
2119 /* FIXME: brobecker/2008-05-01: Doesn't this seem redundant
2120 with the same code above? */
2121 if (regnum >= V32_REGNUM)
2122 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2123 reg_val = ia64_frame_prev_register (this_frame, this_cache,
2124 IA64_CFM_REGNUM);
2125 prev_cfm = extract_unsigned_integer (value_contents_all (reg_val),
2126 8, byte_order);
2127 reg_val = ia64_frame_prev_register (this_frame, this_cache,
2128 IA64_BSP_REGNUM);
2129 prev_bsp = extract_unsigned_integer (value_contents_all (reg_val),
2130 8, byte_order);
2131 prev_bof = rse_address_add (prev_bsp, -(prev_cfm & 0x7f));
2132
2133 addr = rse_address_add (prev_bof, (regnum - IA64_GR32_REGNUM));
2134 return frame_unwind_got_memory (this_frame, regnum, addr);
2135 }
2136
2137 return frame_unwind_got_constant (this_frame, regnum, 0);
2138 }
2139
2140 else /* All other registers. */
2141 {
2142 CORE_ADDR addr = 0;
2143
2144 if (IA64_FR32_REGNUM <= regnum && regnum <= IA64_FR127_REGNUM)
2145 {
2146 /* Fetch floating point register rename base from current
2147 frame marker for this frame. */
2148 int rrb_fr = (cache->cfm >> 25) & 0x7f;
2149
2150 /* Adjust the floating point register number to account for
2151 register rotation. */
2152 regnum = IA64_FR32_REGNUM
2153 + ((regnum - IA64_FR32_REGNUM) + rrb_fr) % 96;
2154 }
2155
2156 /* If we have stored a memory address, access the register. */
2157 addr = cache->saved_regs[regnum];
2158 if (addr != 0)
2159 return frame_unwind_got_memory (this_frame, regnum, addr);
2160 /* Otherwise, punt and get the current value of the register. */
2161 else
2162 return frame_unwind_got_register (this_frame, regnum, regnum);
2163 }
2164 }
2165
2166 static const struct frame_unwind ia64_frame_unwind =
2167 {
2168 NORMAL_FRAME,
2169 &ia64_frame_this_id,
2170 &ia64_frame_prev_register,
2171 NULL,
2172 default_frame_sniffer
2173 };
2174
2175 /* Signal trampolines. */
2176
2177 static void
2178 ia64_sigtramp_frame_init_saved_regs (struct frame_info *this_frame,
2179 struct ia64_frame_cache *cache)
2180 {
2181 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2182 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2183
2184 if (tdep->sigcontext_register_address)
2185 {
2186 int regno;
2187
2188 cache->saved_regs[IA64_VRAP_REGNUM]
2189 = tdep->sigcontext_register_address (gdbarch, cache->base,
2190 IA64_IP_REGNUM);
2191 cache->saved_regs[IA64_CFM_REGNUM]
2192 = tdep->sigcontext_register_address (gdbarch, cache->base,
2193 IA64_CFM_REGNUM);
2194 cache->saved_regs[IA64_PSR_REGNUM]
2195 = tdep->sigcontext_register_address (gdbarch, cache->base,
2196 IA64_PSR_REGNUM);
2197 cache->saved_regs[IA64_BSP_REGNUM]
2198 = tdep->sigcontext_register_address (gdbarch, cache->base,
2199 IA64_BSP_REGNUM);
2200 cache->saved_regs[IA64_RNAT_REGNUM]
2201 = tdep->sigcontext_register_address (gdbarch, cache->base,
2202 IA64_RNAT_REGNUM);
2203 cache->saved_regs[IA64_CCV_REGNUM]
2204 = tdep->sigcontext_register_address (gdbarch, cache->base,
2205 IA64_CCV_REGNUM);
2206 cache->saved_regs[IA64_UNAT_REGNUM]
2207 = tdep->sigcontext_register_address (gdbarch, cache->base,
2208 IA64_UNAT_REGNUM);
2209 cache->saved_regs[IA64_FPSR_REGNUM]
2210 = tdep->sigcontext_register_address (gdbarch, cache->base,
2211 IA64_FPSR_REGNUM);
2212 cache->saved_regs[IA64_PFS_REGNUM]
2213 = tdep->sigcontext_register_address (gdbarch, cache->base,
2214 IA64_PFS_REGNUM);
2215 cache->saved_regs[IA64_LC_REGNUM]
2216 = tdep->sigcontext_register_address (gdbarch, cache->base,
2217 IA64_LC_REGNUM);
2218
2219 for (regno = IA64_GR1_REGNUM; regno <= IA64_GR31_REGNUM; regno++)
2220 cache->saved_regs[regno] =
2221 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2222 for (regno = IA64_BR0_REGNUM; regno <= IA64_BR7_REGNUM; regno++)
2223 cache->saved_regs[regno] =
2224 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2225 for (regno = IA64_FR2_REGNUM; regno <= IA64_FR31_REGNUM; regno++)
2226 cache->saved_regs[regno] =
2227 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2228 }
2229 }
2230
2231 static struct ia64_frame_cache *
2232 ia64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
2233 {
2234 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2235 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2236 struct ia64_frame_cache *cache;
2237 CORE_ADDR addr;
2238 char buf[8];
2239 int i;
2240
2241 if (*this_cache)
2242 return *this_cache;
2243
2244 cache = ia64_alloc_frame_cache ();
2245
2246 get_frame_register (this_frame, sp_regnum, buf);
2247 /* Note that frame size is hard-coded below. We cannot calculate it
2248 via prologue examination. */
2249 cache->base = extract_unsigned_integer (buf, 8, byte_order) + 16;
2250
2251 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2252 cache->bsp = extract_unsigned_integer (buf, 8, byte_order);
2253
2254 get_frame_register (this_frame, IA64_CFM_REGNUM, buf);
2255 cache->cfm = extract_unsigned_integer (buf, 8, byte_order);
2256 cache->sof = cache->cfm & 0x7f;
2257
2258 ia64_sigtramp_frame_init_saved_regs (this_frame, cache);
2259
2260 *this_cache = cache;
2261 return cache;
2262 }
2263
2264 static void
2265 ia64_sigtramp_frame_this_id (struct frame_info *this_frame,
2266 void **this_cache, struct frame_id *this_id)
2267 {
2268 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2269 struct ia64_frame_cache *cache =
2270 ia64_sigtramp_frame_cache (this_frame, this_cache);
2271
2272 (*this_id) = frame_id_build_special (cache->base,
2273 get_frame_pc (this_frame),
2274 cache->bsp);
2275 if (gdbarch_debug >= 1)
2276 fprintf_unfiltered (gdb_stdlog,
2277 "sigtramp frame id: code %s, stack %s, "
2278 "special %s, this_frame %s\n",
2279 paddress (gdbarch, this_id->code_addr),
2280 paddress (gdbarch, this_id->stack_addr),
2281 paddress (gdbarch, cache->bsp),
2282 host_address_to_string (this_frame));
2283 }
2284
2285 static struct value *
2286 ia64_sigtramp_frame_prev_register (struct frame_info *this_frame,
2287 void **this_cache, int regnum)
2288 {
2289 char buf[MAX_REGISTER_SIZE];
2290
2291 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2292 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2293 struct ia64_frame_cache *cache =
2294 ia64_sigtramp_frame_cache (this_frame, this_cache);
2295
2296 gdb_assert (regnum >= 0);
2297
2298 if (!target_has_registers)
2299 error (_("No registers."));
2300
2301 if (regnum == IA64_IP_REGNUM)
2302 {
2303 CORE_ADDR pc = 0;
2304 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2305
2306 if (addr != 0)
2307 {
2308 read_memory (addr, buf, register_size (gdbarch, IA64_IP_REGNUM));
2309 pc = extract_unsigned_integer (buf, 8, byte_order);
2310 }
2311 pc &= ~0xf;
2312 return frame_unwind_got_constant (this_frame, regnum, pc);
2313 }
2314
2315 else if ((regnum >= IA64_GR32_REGNUM && regnum <= IA64_GR127_REGNUM)
2316 || (regnum >= V32_REGNUM && regnum <= V127_REGNUM))
2317 {
2318 CORE_ADDR addr = 0;
2319
2320 if (regnum >= V32_REGNUM)
2321 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2322 addr = cache->saved_regs[regnum];
2323 if (addr != 0)
2324 return frame_unwind_got_memory (this_frame, regnum, addr);
2325
2326 return frame_unwind_got_constant (this_frame, regnum, 0);
2327 }
2328
2329 else /* All other registers not listed above. */
2330 {
2331 CORE_ADDR addr = cache->saved_regs[regnum];
2332
2333 if (addr != 0)
2334 return frame_unwind_got_memory (this_frame, regnum, addr);
2335
2336 return frame_unwind_got_constant (this_frame, regnum, 0);
2337 }
2338 }
2339
2340 static int
2341 ia64_sigtramp_frame_sniffer (const struct frame_unwind *self,
2342 struct frame_info *this_frame,
2343 void **this_cache)
2344 {
2345 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
2346 if (tdep->pc_in_sigtramp)
2347 {
2348 CORE_ADDR pc = get_frame_pc (this_frame);
2349
2350 if (tdep->pc_in_sigtramp (pc))
2351 return 1;
2352 }
2353
2354 return 0;
2355 }
2356
2357 static const struct frame_unwind ia64_sigtramp_frame_unwind =
2358 {
2359 SIGTRAMP_FRAME,
2360 ia64_sigtramp_frame_this_id,
2361 ia64_sigtramp_frame_prev_register,
2362 NULL,
2363 ia64_sigtramp_frame_sniffer
2364 };
2365
2366 \f
2367
2368 static CORE_ADDR
2369 ia64_frame_base_address (struct frame_info *this_frame, void **this_cache)
2370 {
2371 struct ia64_frame_cache *cache = ia64_frame_cache (this_frame, this_cache);
2372
2373 return cache->base;
2374 }
2375
2376 static const struct frame_base ia64_frame_base =
2377 {
2378 &ia64_frame_unwind,
2379 ia64_frame_base_address,
2380 ia64_frame_base_address,
2381 ia64_frame_base_address
2382 };
2383
2384 #ifdef HAVE_LIBUNWIND_IA64_H
2385
2386 struct ia64_unwind_table_entry
2387 {
2388 unw_word_t start_offset;
2389 unw_word_t end_offset;
2390 unw_word_t info_offset;
2391 };
2392
2393 static __inline__ uint64_t
2394 ia64_rse_slot_num (uint64_t addr)
2395 {
2396 return (addr >> 3) & 0x3f;
2397 }
2398
2399 /* Skip over a designated number of registers in the backing
2400 store, remembering every 64th position is for NAT. */
2401 static __inline__ uint64_t
2402 ia64_rse_skip_regs (uint64_t addr, long num_regs)
2403 {
2404 long delta = ia64_rse_slot_num(addr) + num_regs;
2405
2406 if (num_regs < 0)
2407 delta -= 0x3e;
2408 return addr + ((num_regs + delta/0x3f) << 3);
2409 }
2410
2411 /* Gdb libunwind-frame callback function to convert from an ia64 gdb register
2412 number to a libunwind register number. */
2413 static int
2414 ia64_gdb2uw_regnum (int regnum)
2415 {
2416 if (regnum == sp_regnum)
2417 return UNW_IA64_SP;
2418 else if (regnum == IA64_BSP_REGNUM)
2419 return UNW_IA64_BSP;
2420 else if ((unsigned) (regnum - IA64_GR0_REGNUM) < 128)
2421 return UNW_IA64_GR + (regnum - IA64_GR0_REGNUM);
2422 else if ((unsigned) (regnum - V32_REGNUM) < 95)
2423 return UNW_IA64_GR + 32 + (regnum - V32_REGNUM);
2424 else if ((unsigned) (regnum - IA64_FR0_REGNUM) < 128)
2425 return UNW_IA64_FR + (regnum - IA64_FR0_REGNUM);
2426 else if ((unsigned) (regnum - IA64_PR0_REGNUM) < 64)
2427 return -1;
2428 else if ((unsigned) (regnum - IA64_BR0_REGNUM) < 8)
2429 return UNW_IA64_BR + (regnum - IA64_BR0_REGNUM);
2430 else if (regnum == IA64_PR_REGNUM)
2431 return UNW_IA64_PR;
2432 else if (regnum == IA64_IP_REGNUM)
2433 return UNW_REG_IP;
2434 else if (regnum == IA64_CFM_REGNUM)
2435 return UNW_IA64_CFM;
2436 else if ((unsigned) (regnum - IA64_AR0_REGNUM) < 128)
2437 return UNW_IA64_AR + (regnum - IA64_AR0_REGNUM);
2438 else if ((unsigned) (regnum - IA64_NAT0_REGNUM) < 128)
2439 return UNW_IA64_NAT + (regnum - IA64_NAT0_REGNUM);
2440 else
2441 return -1;
2442 }
2443
2444 /* Gdb libunwind-frame callback function to convert from a libunwind register
2445 number to a ia64 gdb register number. */
2446 static int
2447 ia64_uw2gdb_regnum (int uw_regnum)
2448 {
2449 if (uw_regnum == UNW_IA64_SP)
2450 return sp_regnum;
2451 else if (uw_regnum == UNW_IA64_BSP)
2452 return IA64_BSP_REGNUM;
2453 else if ((unsigned) (uw_regnum - UNW_IA64_GR) < 32)
2454 return IA64_GR0_REGNUM + (uw_regnum - UNW_IA64_GR);
2455 else if ((unsigned) (uw_regnum - UNW_IA64_GR) < 128)
2456 return V32_REGNUM + (uw_regnum - (IA64_GR0_REGNUM + 32));
2457 else if ((unsigned) (uw_regnum - UNW_IA64_FR) < 128)
2458 return IA64_FR0_REGNUM + (uw_regnum - UNW_IA64_FR);
2459 else if ((unsigned) (uw_regnum - UNW_IA64_BR) < 8)
2460 return IA64_BR0_REGNUM + (uw_regnum - UNW_IA64_BR);
2461 else if (uw_regnum == UNW_IA64_PR)
2462 return IA64_PR_REGNUM;
2463 else if (uw_regnum == UNW_REG_IP)
2464 return IA64_IP_REGNUM;
2465 else if (uw_regnum == UNW_IA64_CFM)
2466 return IA64_CFM_REGNUM;
2467 else if ((unsigned) (uw_regnum - UNW_IA64_AR) < 128)
2468 return IA64_AR0_REGNUM + (uw_regnum - UNW_IA64_AR);
2469 else if ((unsigned) (uw_regnum - UNW_IA64_NAT) < 128)
2470 return IA64_NAT0_REGNUM + (uw_regnum - UNW_IA64_NAT);
2471 else
2472 return -1;
2473 }
2474
2475 /* Gdb libunwind-frame callback function to reveal if register is a float
2476 register or not. */
2477 static int
2478 ia64_is_fpreg (int uw_regnum)
2479 {
2480 return unw_is_fpreg (uw_regnum);
2481 }
2482
2483 /* Libunwind callback accessor function for general registers. */
2484 static int
2485 ia64_access_reg (unw_addr_space_t as, unw_regnum_t uw_regnum, unw_word_t *val,
2486 int write, void *arg)
2487 {
2488 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2489 unw_word_t bsp, sof, sol, cfm, psr, ip;
2490 struct frame_info *this_frame = arg;
2491 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2492 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2493 long new_sof, old_sof;
2494 char buf[MAX_REGISTER_SIZE];
2495
2496 /* We never call any libunwind routines that need to write registers. */
2497 gdb_assert (!write);
2498
2499 switch (uw_regnum)
2500 {
2501 case UNW_REG_IP:
2502 /* Libunwind expects to see the pc value which means the slot number
2503 from the psr must be merged with the ip word address. */
2504 get_frame_register (this_frame, IA64_IP_REGNUM, buf);
2505 ip = extract_unsigned_integer (buf, 8, byte_order);
2506 get_frame_register (this_frame, IA64_PSR_REGNUM, buf);
2507 psr = extract_unsigned_integer (buf, 8, byte_order);
2508 *val = ip | ((psr >> 41) & 0x3);
2509 break;
2510
2511 case UNW_IA64_AR_BSP:
2512 /* Libunwind expects to see the beginning of the current
2513 register frame so we must account for the fact that
2514 ptrace() will return a value for bsp that points *after*
2515 the current register frame. */
2516 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2517 bsp = extract_unsigned_integer (buf, 8, byte_order);
2518 get_frame_register (this_frame, IA64_CFM_REGNUM, buf);
2519 cfm = extract_unsigned_integer (buf, 8, byte_order);
2520 sof = gdbarch_tdep (gdbarch)->size_of_register_frame (this_frame, cfm);
2521 *val = ia64_rse_skip_regs (bsp, -sof);
2522 break;
2523
2524 case UNW_IA64_AR_BSPSTORE:
2525 /* Libunwind wants bspstore to be after the current register frame.
2526 This is what ptrace() and gdb treats as the regular bsp value. */
2527 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2528 *val = extract_unsigned_integer (buf, 8, byte_order);
2529 break;
2530
2531 default:
2532 /* For all other registers, just unwind the value directly. */
2533 get_frame_register (this_frame, regnum, buf);
2534 *val = extract_unsigned_integer (buf, 8, byte_order);
2535 break;
2536 }
2537
2538 if (gdbarch_debug >= 1)
2539 fprintf_unfiltered (gdb_stdlog,
2540 " access_reg: from cache: %4s=%s\n",
2541 (((unsigned) regnum <= IA64_NAT127_REGNUM)
2542 ? ia64_register_names[regnum] : "r??"),
2543 paddress (gdbarch, *val));
2544 return 0;
2545 }
2546
2547 /* Libunwind callback accessor function for floating-point registers. */
2548 static int
2549 ia64_access_fpreg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2550 unw_fpreg_t *val, int write, void *arg)
2551 {
2552 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2553 struct frame_info *this_frame = arg;
2554
2555 /* We never call any libunwind routines that need to write registers. */
2556 gdb_assert (!write);
2557
2558 get_frame_register (this_frame, regnum, (char *) val);
2559
2560 return 0;
2561 }
2562
2563 /* Libunwind callback accessor function for top-level rse registers. */
2564 static int
2565 ia64_access_rse_reg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2566 unw_word_t *val, int write, void *arg)
2567 {
2568 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2569 unw_word_t bsp, sof, sol, cfm, psr, ip;
2570 struct regcache *regcache = arg;
2571 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2572 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2573 long new_sof, old_sof;
2574 char buf[MAX_REGISTER_SIZE];
2575
2576 /* We never call any libunwind routines that need to write registers. */
2577 gdb_assert (!write);
2578
2579 switch (uw_regnum)
2580 {
2581 case UNW_REG_IP:
2582 /* Libunwind expects to see the pc value which means the slot number
2583 from the psr must be merged with the ip word address. */
2584 regcache_cooked_read (regcache, IA64_IP_REGNUM, buf);
2585 ip = extract_unsigned_integer (buf, 8, byte_order);
2586 regcache_cooked_read (regcache, IA64_PSR_REGNUM, buf);
2587 psr = extract_unsigned_integer (buf, 8, byte_order);
2588 *val = ip | ((psr >> 41) & 0x3);
2589 break;
2590
2591 case UNW_IA64_AR_BSP:
2592 /* Libunwind expects to see the beginning of the current
2593 register frame so we must account for the fact that
2594 ptrace() will return a value for bsp that points *after*
2595 the current register frame. */
2596 regcache_cooked_read (regcache, IA64_BSP_REGNUM, buf);
2597 bsp = extract_unsigned_integer (buf, 8, byte_order);
2598 regcache_cooked_read (regcache, IA64_CFM_REGNUM, buf);
2599 cfm = extract_unsigned_integer (buf, 8, byte_order);
2600 sof = (cfm & 0x7f);
2601 *val = ia64_rse_skip_regs (bsp, -sof);
2602 break;
2603
2604 case UNW_IA64_AR_BSPSTORE:
2605 /* Libunwind wants bspstore to be after the current register frame.
2606 This is what ptrace() and gdb treats as the regular bsp value. */
2607 regcache_cooked_read (regcache, IA64_BSP_REGNUM, buf);
2608 *val = extract_unsigned_integer (buf, 8, byte_order);
2609 break;
2610
2611 default:
2612 /* For all other registers, just unwind the value directly. */
2613 regcache_cooked_read (regcache, regnum, buf);
2614 *val = extract_unsigned_integer (buf, 8, byte_order);
2615 break;
2616 }
2617
2618 if (gdbarch_debug >= 1)
2619 fprintf_unfiltered (gdb_stdlog,
2620 " access_rse_reg: from cache: %4s=%s\n",
2621 (((unsigned) regnum <= IA64_NAT127_REGNUM)
2622 ? ia64_register_names[regnum] : "r??"),
2623 paddress (gdbarch, *val));
2624
2625 return 0;
2626 }
2627
2628 /* Libunwind callback accessor function for top-level fp registers. */
2629 static int
2630 ia64_access_rse_fpreg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2631 unw_fpreg_t *val, int write, void *arg)
2632 {
2633 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2634 struct regcache *regcache = arg;
2635
2636 /* We never call any libunwind routines that need to write registers. */
2637 gdb_assert (!write);
2638
2639 regcache_cooked_read (regcache, regnum, (char *) val);
2640
2641 return 0;
2642 }
2643
2644 /* Libunwind callback accessor function for accessing memory. */
2645 static int
2646 ia64_access_mem (unw_addr_space_t as,
2647 unw_word_t addr, unw_word_t *val,
2648 int write, void *arg)
2649 {
2650 if (addr - KERNEL_START < ktab_size)
2651 {
2652 unw_word_t *laddr = (unw_word_t*) ((char *) ktab
2653 + (addr - KERNEL_START));
2654
2655 if (write)
2656 *laddr = *val;
2657 else
2658 *val = *laddr;
2659 return 0;
2660 }
2661
2662 /* XXX do we need to normalize byte-order here? */
2663 if (write)
2664 return target_write_memory (addr, (char *) val, sizeof (unw_word_t));
2665 else
2666 return target_read_memory (addr, (char *) val, sizeof (unw_word_t));
2667 }
2668
2669 /* Call low-level function to access the kernel unwind table. */
2670 static LONGEST
2671 getunwind_table (gdb_byte **buf_p)
2672 {
2673 LONGEST x;
2674
2675 /* FIXME drow/2005-09-10: This code used to call
2676 ia64_linux_xfer_unwind_table directly to fetch the unwind table
2677 for the currently running ia64-linux kernel. That data should
2678 come from the core file and be accessed via the auxv vector; if
2679 we want to preserve fall back to the running kernel's table, then
2680 we should find a way to override the corefile layer's
2681 xfer_partial method. */
2682
2683 x = target_read_alloc (&current_target, TARGET_OBJECT_UNWIND_TABLE,
2684 NULL, buf_p);
2685
2686 return x;
2687 }
2688
2689 /* Get the kernel unwind table. */
2690 static int
2691 get_kernel_table (unw_word_t ip, unw_dyn_info_t *di)
2692 {
2693 static struct ia64_table_entry *etab;
2694
2695 if (!ktab)
2696 {
2697 gdb_byte *ktab_buf;
2698 LONGEST size;
2699
2700 size = getunwind_table (&ktab_buf);
2701 if (size <= 0)
2702 return -UNW_ENOINFO;
2703
2704 ktab = (struct ia64_table_entry *) ktab_buf;
2705 ktab_size = size;
2706
2707 for (etab = ktab; etab->start_offset; ++etab)
2708 etab->info_offset += KERNEL_START;
2709 }
2710
2711 if (ip < ktab[0].start_offset || ip >= etab[-1].end_offset)
2712 return -UNW_ENOINFO;
2713
2714 di->format = UNW_INFO_FORMAT_TABLE;
2715 di->gp = 0;
2716 di->start_ip = ktab[0].start_offset;
2717 di->end_ip = etab[-1].end_offset;
2718 di->u.ti.name_ptr = (unw_word_t) "<kernel>";
2719 di->u.ti.segbase = 0;
2720 di->u.ti.table_len = ((char *) etab - (char *) ktab) / sizeof (unw_word_t);
2721 di->u.ti.table_data = (unw_word_t *) ktab;
2722
2723 if (gdbarch_debug >= 1)
2724 fprintf_unfiltered (gdb_stdlog, "get_kernel_table: found table `%s': "
2725 "segbase=%s, length=%s, gp=%s\n",
2726 (char *) di->u.ti.name_ptr,
2727 hex_string (di->u.ti.segbase),
2728 pulongest (di->u.ti.table_len),
2729 hex_string (di->gp));
2730 return 0;
2731 }
2732
2733 /* Find the unwind table entry for a specified address. */
2734 static int
2735 ia64_find_unwind_table (struct objfile *objfile, unw_word_t ip,
2736 unw_dyn_info_t *dip, void **buf)
2737 {
2738 Elf_Internal_Phdr *phdr, *p_text = NULL, *p_unwind = NULL;
2739 Elf_Internal_Ehdr *ehdr;
2740 unw_word_t segbase = 0;
2741 CORE_ADDR load_base;
2742 bfd *bfd;
2743 int i;
2744
2745 bfd = objfile->obfd;
2746
2747 ehdr = elf_tdata (bfd)->elf_header;
2748 phdr = elf_tdata (bfd)->phdr;
2749
2750 load_base = ANOFFSET (objfile->section_offsets, SECT_OFF_TEXT (objfile));
2751
2752 for (i = 0; i < ehdr->e_phnum; ++i)
2753 {
2754 switch (phdr[i].p_type)
2755 {
2756 case PT_LOAD:
2757 if ((unw_word_t) (ip - load_base - phdr[i].p_vaddr)
2758 < phdr[i].p_memsz)
2759 p_text = phdr + i;
2760 break;
2761
2762 case PT_IA_64_UNWIND:
2763 p_unwind = phdr + i;
2764 break;
2765
2766 default:
2767 break;
2768 }
2769 }
2770
2771 if (!p_text || !p_unwind)
2772 return -UNW_ENOINFO;
2773
2774 /* Verify that the segment that contains the IP also contains
2775 the static unwind table. If not, we may be in the Linux kernel's
2776 DSO gate page in which case the unwind table is another segment.
2777 Otherwise, we are dealing with runtime-generated code, for which we
2778 have no info here. */
2779 segbase = p_text->p_vaddr + load_base;
2780
2781 if ((p_unwind->p_vaddr - p_text->p_vaddr) >= p_text->p_memsz)
2782 {
2783 int ok = 0;
2784 for (i = 0; i < ehdr->e_phnum; ++i)
2785 {
2786 if (phdr[i].p_type == PT_LOAD
2787 && (p_unwind->p_vaddr - phdr[i].p_vaddr) < phdr[i].p_memsz)
2788 {
2789 ok = 1;
2790 /* Get the segbase from the section containing the
2791 libunwind table. */
2792 segbase = phdr[i].p_vaddr + load_base;
2793 }
2794 }
2795 if (!ok)
2796 return -UNW_ENOINFO;
2797 }
2798
2799 dip->start_ip = p_text->p_vaddr + load_base;
2800 dip->end_ip = dip->start_ip + p_text->p_memsz;
2801 dip->gp = ia64_find_global_pointer (get_objfile_arch (objfile), ip);
2802 dip->format = UNW_INFO_FORMAT_REMOTE_TABLE;
2803 dip->u.rti.name_ptr = (unw_word_t) bfd_get_filename (bfd);
2804 dip->u.rti.segbase = segbase;
2805 dip->u.rti.table_len = p_unwind->p_memsz / sizeof (unw_word_t);
2806 dip->u.rti.table_data = p_unwind->p_vaddr + load_base;
2807
2808 return 0;
2809 }
2810
2811 /* Libunwind callback accessor function to acquire procedure unwind-info. */
2812 static int
2813 ia64_find_proc_info_x (unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
2814 int need_unwind_info, void *arg)
2815 {
2816 struct obj_section *sec = find_pc_section (ip);
2817 unw_dyn_info_t di;
2818 int ret;
2819 void *buf = NULL;
2820
2821 if (!sec)
2822 {
2823 /* XXX This only works if the host and the target architecture are
2824 both ia64 and if the have (more or less) the same kernel
2825 version. */
2826 if (get_kernel_table (ip, &di) < 0)
2827 return -UNW_ENOINFO;
2828
2829 if (gdbarch_debug >= 1)
2830 fprintf_unfiltered (gdb_stdlog, "ia64_find_proc_info_x: %s -> "
2831 "(name=`%s',segbase=%s,start=%s,end=%s,gp=%s,"
2832 "length=%s,data=%s)\n",
2833 hex_string (ip), (char *)di.u.ti.name_ptr,
2834 hex_string (di.u.ti.segbase),
2835 hex_string (di.start_ip), hex_string (di.end_ip),
2836 hex_string (di.gp),
2837 pulongest (di.u.ti.table_len),
2838 hex_string ((CORE_ADDR)di.u.ti.table_data));
2839 }
2840 else
2841 {
2842 ret = ia64_find_unwind_table (sec->objfile, ip, &di, &buf);
2843 if (ret < 0)
2844 return ret;
2845
2846 if (gdbarch_debug >= 1)
2847 fprintf_unfiltered (gdb_stdlog, "ia64_find_proc_info_x: %s -> "
2848 "(name=`%s',segbase=%s,start=%s,end=%s,gp=%s,"
2849 "length=%s,data=%s)\n",
2850 hex_string (ip), (char *)di.u.rti.name_ptr,
2851 hex_string (di.u.rti.segbase),
2852 hex_string (di.start_ip), hex_string (di.end_ip),
2853 hex_string (di.gp),
2854 pulongest (di.u.rti.table_len),
2855 hex_string (di.u.rti.table_data));
2856 }
2857
2858 ret = libunwind_search_unwind_table (&as, ip, &di, pi, need_unwind_info,
2859 arg);
2860
2861 /* We no longer need the dyn info storage so free it. */
2862 xfree (buf);
2863
2864 return ret;
2865 }
2866
2867 /* Libunwind callback accessor function for cleanup. */
2868 static void
2869 ia64_put_unwind_info (unw_addr_space_t as,
2870 unw_proc_info_t *pip, void *arg)
2871 {
2872 /* Nothing required for now. */
2873 }
2874
2875 /* Libunwind callback accessor function to get head of the dynamic
2876 unwind-info registration list. */
2877 static int
2878 ia64_get_dyn_info_list (unw_addr_space_t as,
2879 unw_word_t *dilap, void *arg)
2880 {
2881 struct obj_section *text_sec;
2882 struct objfile *objfile;
2883 unw_word_t ip, addr;
2884 unw_dyn_info_t di;
2885 int ret;
2886
2887 if (!libunwind_is_initialized ())
2888 return -UNW_ENOINFO;
2889
2890 for (objfile = object_files; objfile; objfile = objfile->next)
2891 {
2892 void *buf = NULL;
2893
2894 text_sec = objfile->sections + SECT_OFF_TEXT (objfile);
2895 ip = obj_section_addr (text_sec);
2896 ret = ia64_find_unwind_table (objfile, ip, &di, &buf);
2897 if (ret >= 0)
2898 {
2899 addr = libunwind_find_dyn_list (as, &di, arg);
2900 /* We no longer need the dyn info storage so free it. */
2901 xfree (buf);
2902
2903 if (addr)
2904 {
2905 if (gdbarch_debug >= 1)
2906 fprintf_unfiltered (gdb_stdlog,
2907 "dynamic unwind table in objfile %s "
2908 "at %s (gp=%s)\n",
2909 bfd_get_filename (objfile->obfd),
2910 hex_string (addr), hex_string (di.gp));
2911 *dilap = addr;
2912 return 0;
2913 }
2914 }
2915 }
2916 return -UNW_ENOINFO;
2917 }
2918
2919
2920 /* Frame interface functions for libunwind. */
2921
2922 static void
2923 ia64_libunwind_frame_this_id (struct frame_info *this_frame, void **this_cache,
2924 struct frame_id *this_id)
2925 {
2926 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2927 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2928 struct frame_id id = outer_frame_id;
2929 char buf[8];
2930 CORE_ADDR bsp;
2931
2932 libunwind_frame_this_id (this_frame, this_cache, &id);
2933 if (frame_id_eq (id, outer_frame_id))
2934 {
2935 (*this_id) = outer_frame_id;
2936 return;
2937 }
2938
2939 /* We must add the bsp as the special address for frame comparison
2940 purposes. */
2941 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2942 bsp = extract_unsigned_integer (buf, 8, byte_order);
2943
2944 (*this_id) = frame_id_build_special (id.stack_addr, id.code_addr, bsp);
2945
2946 if (gdbarch_debug >= 1)
2947 fprintf_unfiltered (gdb_stdlog,
2948 "libunwind frame id: code %s, stack %s, "
2949 "special %s, this_frame %s\n",
2950 paddress (gdbarch, id.code_addr),
2951 paddress (gdbarch, id.stack_addr),
2952 paddress (gdbarch, bsp),
2953 host_address_to_string (this_frame));
2954 }
2955
2956 static struct value *
2957 ia64_libunwind_frame_prev_register (struct frame_info *this_frame,
2958 void **this_cache, int regnum)
2959 {
2960 int reg = regnum;
2961 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2962 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2963 struct value *val;
2964
2965 if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
2966 reg = IA64_PR_REGNUM;
2967 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
2968 reg = IA64_UNAT_REGNUM;
2969
2970 /* Let libunwind do most of the work. */
2971 val = libunwind_frame_prev_register (this_frame, this_cache, reg);
2972
2973 if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
2974 {
2975 ULONGEST prN_val;
2976
2977 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
2978 {
2979 int rrb_pr = 0;
2980 ULONGEST cfm;
2981 unsigned char buf[MAX_REGISTER_SIZE];
2982
2983 /* Fetch predicate register rename base from current frame
2984 marker for this frame. */
2985 get_frame_register (this_frame, IA64_CFM_REGNUM, buf);
2986 cfm = extract_unsigned_integer (buf, 8, byte_order);
2987 rrb_pr = (cfm >> 32) & 0x3f;
2988
2989 /* Adjust the register number to account for register rotation. */
2990 regnum = VP16_REGNUM + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
2991 }
2992 prN_val = extract_bit_field (value_contents_all (val),
2993 regnum - VP0_REGNUM, 1);
2994 return frame_unwind_got_constant (this_frame, regnum, prN_val);
2995 }
2996
2997 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
2998 {
2999 ULONGEST unatN_val;
3000
3001 unatN_val = extract_bit_field (value_contents_all (val),
3002 regnum - IA64_NAT0_REGNUM, 1);
3003 return frame_unwind_got_constant (this_frame, regnum, unatN_val);
3004 }
3005
3006 else if (regnum == IA64_BSP_REGNUM)
3007 {
3008 struct value *cfm_val;
3009 CORE_ADDR prev_bsp, prev_cfm;
3010
3011 /* We want to calculate the previous bsp as the end of the previous
3012 register stack frame. This corresponds to what the hardware bsp
3013 register will be if we pop the frame back which is why we might
3014 have been called. We know that libunwind will pass us back the
3015 beginning of the current frame so we should just add sof to it. */
3016 prev_bsp = extract_unsigned_integer (value_contents_all (val),
3017 8, byte_order);
3018 cfm_val = libunwind_frame_prev_register (this_frame, this_cache,
3019 IA64_CFM_REGNUM);
3020 prev_cfm = extract_unsigned_integer (value_contents_all (cfm_val),
3021 8, byte_order);
3022 prev_bsp = rse_address_add (prev_bsp, (prev_cfm & 0x7f));
3023
3024 return frame_unwind_got_constant (this_frame, regnum, prev_bsp);
3025 }
3026 else
3027 return val;
3028 }
3029
3030 static int
3031 ia64_libunwind_frame_sniffer (const struct frame_unwind *self,
3032 struct frame_info *this_frame,
3033 void **this_cache)
3034 {
3035 if (libunwind_is_initialized ()
3036 && libunwind_frame_sniffer (self, this_frame, this_cache))
3037 return 1;
3038
3039 return 0;
3040 }
3041
3042 static const struct frame_unwind ia64_libunwind_frame_unwind =
3043 {
3044 NORMAL_FRAME,
3045 ia64_libunwind_frame_this_id,
3046 ia64_libunwind_frame_prev_register,
3047 NULL,
3048 ia64_libunwind_frame_sniffer,
3049 libunwind_frame_dealloc_cache
3050 };
3051
3052 static void
3053 ia64_libunwind_sigtramp_frame_this_id (struct frame_info *this_frame,
3054 void **this_cache,
3055 struct frame_id *this_id)
3056 {
3057 struct gdbarch *gdbarch = get_frame_arch (this_frame);
3058 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3059 char buf[8];
3060 CORE_ADDR bsp;
3061 struct frame_id id = outer_frame_id;
3062 CORE_ADDR prev_ip;
3063
3064 libunwind_frame_this_id (this_frame, this_cache, &id);
3065 if (frame_id_eq (id, outer_frame_id))
3066 {
3067 (*this_id) = outer_frame_id;
3068 return;
3069 }
3070
3071 /* We must add the bsp as the special address for frame comparison
3072 purposes. */
3073 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
3074 bsp = extract_unsigned_integer (buf, 8, byte_order);
3075
3076 /* For a sigtramp frame, we don't make the check for previous ip being 0. */
3077 (*this_id) = frame_id_build_special (id.stack_addr, id.code_addr, bsp);
3078
3079 if (gdbarch_debug >= 1)
3080 fprintf_unfiltered (gdb_stdlog,
3081 "libunwind sigtramp frame id: code %s, "
3082 "stack %s, special %s, this_frame %s\n",
3083 paddress (gdbarch, id.code_addr),
3084 paddress (gdbarch, id.stack_addr),
3085 paddress (gdbarch, bsp),
3086 host_address_to_string (this_frame));
3087 }
3088
3089 static struct value *
3090 ia64_libunwind_sigtramp_frame_prev_register (struct frame_info *this_frame,
3091 void **this_cache, int regnum)
3092 {
3093 struct gdbarch *gdbarch = get_frame_arch (this_frame);
3094 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3095 struct value *prev_ip_val;
3096 CORE_ADDR prev_ip;
3097
3098 /* If the previous frame pc value is 0, then we want to use the SIGCONTEXT
3099 method of getting previous registers. */
3100 prev_ip_val = libunwind_frame_prev_register (this_frame, this_cache,
3101 IA64_IP_REGNUM);
3102 prev_ip = extract_unsigned_integer (value_contents_all (prev_ip_val),
3103 8, byte_order);
3104
3105 if (prev_ip == 0)
3106 {
3107 void *tmp_cache = NULL;
3108 return ia64_sigtramp_frame_prev_register (this_frame, &tmp_cache,
3109 regnum);
3110 }
3111 else
3112 return ia64_libunwind_frame_prev_register (this_frame, this_cache, regnum);
3113 }
3114
3115 static int
3116 ia64_libunwind_sigtramp_frame_sniffer (const struct frame_unwind *self,
3117 struct frame_info *this_frame,
3118 void **this_cache)
3119 {
3120 if (libunwind_is_initialized ())
3121 {
3122 if (libunwind_sigtramp_frame_sniffer (self, this_frame, this_cache))
3123 return 1;
3124 return 0;
3125 }
3126 else
3127 return ia64_sigtramp_frame_sniffer (self, this_frame, this_cache);
3128 }
3129
3130 static const struct frame_unwind ia64_libunwind_sigtramp_frame_unwind =
3131 {
3132 SIGTRAMP_FRAME,
3133 ia64_libunwind_sigtramp_frame_this_id,
3134 ia64_libunwind_sigtramp_frame_prev_register,
3135 NULL,
3136 ia64_libunwind_sigtramp_frame_sniffer
3137 };
3138
3139 /* Set of libunwind callback acccessor functions. */
3140 static unw_accessors_t ia64_unw_accessors =
3141 {
3142 ia64_find_proc_info_x,
3143 ia64_put_unwind_info,
3144 ia64_get_dyn_info_list,
3145 ia64_access_mem,
3146 ia64_access_reg,
3147 ia64_access_fpreg,
3148 /* resume */
3149 /* get_proc_name */
3150 };
3151
3152 /* Set of special libunwind callback acccessor functions specific for accessing
3153 the rse registers. At the top of the stack, we want libunwind to figure out
3154 how to read r32 - r127. Though usually they are found sequentially in
3155 memory starting from $bof, this is not always true. */
3156 static unw_accessors_t ia64_unw_rse_accessors =
3157 {
3158 ia64_find_proc_info_x,
3159 ia64_put_unwind_info,
3160 ia64_get_dyn_info_list,
3161 ia64_access_mem,
3162 ia64_access_rse_reg,
3163 ia64_access_rse_fpreg,
3164 /* resume */
3165 /* get_proc_name */
3166 };
3167
3168 /* Set of ia64 gdb libunwind-frame callbacks and data for generic
3169 libunwind-frame code to use. */
3170 static struct libunwind_descr ia64_libunwind_descr =
3171 {
3172 ia64_gdb2uw_regnum,
3173 ia64_uw2gdb_regnum,
3174 ia64_is_fpreg,
3175 &ia64_unw_accessors,
3176 &ia64_unw_rse_accessors,
3177 };
3178
3179 #endif /* HAVE_LIBUNWIND_IA64_H */
3180
3181 static int
3182 ia64_use_struct_convention (struct type *type)
3183 {
3184 struct type *float_elt_type;
3185
3186 /* Don't use the struct convention for anything but structure,
3187 union, or array types. */
3188 if (!(TYPE_CODE (type) == TYPE_CODE_STRUCT
3189 || TYPE_CODE (type) == TYPE_CODE_UNION
3190 || TYPE_CODE (type) == TYPE_CODE_ARRAY))
3191 return 0;
3192
3193 /* HFAs are structures (or arrays) consisting entirely of floating
3194 point values of the same length. Up to 8 of these are returned
3195 in registers. Don't use the struct convention when this is the
3196 case. */
3197 float_elt_type = is_float_or_hfa_type (type);
3198 if (float_elt_type != NULL
3199 && TYPE_LENGTH (type) / TYPE_LENGTH (float_elt_type) <= 8)
3200 return 0;
3201
3202 /* Other structs of length 32 or less are returned in r8-r11.
3203 Don't use the struct convention for those either. */
3204 return TYPE_LENGTH (type) > 32;
3205 }
3206
3207 /* Return non-zero if TYPE is a structure or union type. */
3208
3209 static int
3210 ia64_struct_type_p (const struct type *type)
3211 {
3212 return (TYPE_CODE (type) == TYPE_CODE_STRUCT
3213 || TYPE_CODE (type) == TYPE_CODE_UNION);
3214 }
3215
3216 static void
3217 ia64_extract_return_value (struct type *type, struct regcache *regcache,
3218 gdb_byte *valbuf)
3219 {
3220 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3221 struct type *float_elt_type;
3222
3223 float_elt_type = is_float_or_hfa_type (type);
3224 if (float_elt_type != NULL)
3225 {
3226 char from[MAX_REGISTER_SIZE];
3227 int offset = 0;
3228 int regnum = IA64_FR8_REGNUM;
3229 int n = TYPE_LENGTH (type) / TYPE_LENGTH (float_elt_type);
3230
3231 while (n-- > 0)
3232 {
3233 regcache_cooked_read (regcache, regnum, from);
3234 convert_typed_floating (from, ia64_ext_type (gdbarch),
3235 (char *)valbuf + offset, float_elt_type);
3236 offset += TYPE_LENGTH (float_elt_type);
3237 regnum++;
3238 }
3239 }
3240 else if (!ia64_struct_type_p (type) && TYPE_LENGTH (type) < 8)
3241 {
3242 /* This is an integral value, and its size is less than 8 bytes.
3243 These values are LSB-aligned, so extract the relevant bytes,
3244 and copy them into VALBUF. */
3245 /* brobecker/2005-12-30: Actually, all integral values are LSB aligned,
3246 so I suppose we should also add handling here for integral values
3247 whose size is greater than 8. But I wasn't able to create such
3248 a type, neither in C nor in Ada, so not worrying about these yet. */
3249 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3250 ULONGEST val;
3251
3252 regcache_cooked_read_unsigned (regcache, IA64_GR8_REGNUM, &val);
3253 store_unsigned_integer (valbuf, TYPE_LENGTH (type), byte_order, val);
3254 }
3255 else
3256 {
3257 ULONGEST val;
3258 int offset = 0;
3259 int regnum = IA64_GR8_REGNUM;
3260 int reglen = TYPE_LENGTH (register_type (gdbarch, IA64_GR8_REGNUM));
3261 int n = TYPE_LENGTH (type) / reglen;
3262 int m = TYPE_LENGTH (type) % reglen;
3263
3264 while (n-- > 0)
3265 {
3266 ULONGEST val;
3267 regcache_cooked_read_unsigned (regcache, regnum, &val);
3268 memcpy ((char *)valbuf + offset, &val, reglen);
3269 offset += reglen;
3270 regnum++;
3271 }
3272
3273 if (m)
3274 {
3275 regcache_cooked_read_unsigned (regcache, regnum, &val);
3276 memcpy ((char *)valbuf + offset, &val, m);
3277 }
3278 }
3279 }
3280
3281 static void
3282 ia64_store_return_value (struct type *type, struct regcache *regcache,
3283 const gdb_byte *valbuf)
3284 {
3285 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3286 struct type *float_elt_type;
3287
3288 float_elt_type = is_float_or_hfa_type (type);
3289 if (float_elt_type != NULL)
3290 {
3291 char to[MAX_REGISTER_SIZE];
3292 int offset = 0;
3293 int regnum = IA64_FR8_REGNUM;
3294 int n = TYPE_LENGTH (type) / TYPE_LENGTH (float_elt_type);
3295
3296 while (n-- > 0)
3297 {
3298 convert_typed_floating ((char *)valbuf + offset, float_elt_type,
3299 to, ia64_ext_type (gdbarch));
3300 regcache_cooked_write (regcache, regnum, to);
3301 offset += TYPE_LENGTH (float_elt_type);
3302 regnum++;
3303 }
3304 }
3305 else
3306 {
3307 ULONGEST val;
3308 int offset = 0;
3309 int regnum = IA64_GR8_REGNUM;
3310 int reglen = TYPE_LENGTH (register_type (gdbarch, IA64_GR8_REGNUM));
3311 int n = TYPE_LENGTH (type) / reglen;
3312 int m = TYPE_LENGTH (type) % reglen;
3313
3314 while (n-- > 0)
3315 {
3316 ULONGEST val;
3317 memcpy (&val, (char *)valbuf + offset, reglen);
3318 regcache_cooked_write_unsigned (regcache, regnum, val);
3319 offset += reglen;
3320 regnum++;
3321 }
3322
3323 if (m)
3324 {
3325 memcpy (&val, (char *)valbuf + offset, m);
3326 regcache_cooked_write_unsigned (regcache, regnum, val);
3327 }
3328 }
3329 }
3330
3331 static enum return_value_convention
3332 ia64_return_value (struct gdbarch *gdbarch, struct type *func_type,
3333 struct type *valtype, struct regcache *regcache,
3334 gdb_byte *readbuf, const gdb_byte *writebuf)
3335 {
3336 int struct_return = ia64_use_struct_convention (valtype);
3337
3338 if (writebuf != NULL)
3339 {
3340 gdb_assert (!struct_return);
3341 ia64_store_return_value (valtype, regcache, writebuf);
3342 }
3343
3344 if (readbuf != NULL)
3345 {
3346 gdb_assert (!struct_return);
3347 ia64_extract_return_value (valtype, regcache, readbuf);
3348 }
3349
3350 if (struct_return)
3351 return RETURN_VALUE_STRUCT_CONVENTION;
3352 else
3353 return RETURN_VALUE_REGISTER_CONVENTION;
3354 }
3355
3356 static int
3357 is_float_or_hfa_type_recurse (struct type *t, struct type **etp)
3358 {
3359 switch (TYPE_CODE (t))
3360 {
3361 case TYPE_CODE_FLT:
3362 if (*etp)
3363 return TYPE_LENGTH (*etp) == TYPE_LENGTH (t);
3364 else
3365 {
3366 *etp = t;
3367 return 1;
3368 }
3369 break;
3370 case TYPE_CODE_ARRAY:
3371 return
3372 is_float_or_hfa_type_recurse (check_typedef (TYPE_TARGET_TYPE (t)),
3373 etp);
3374 break;
3375 case TYPE_CODE_STRUCT:
3376 {
3377 int i;
3378
3379 for (i = 0; i < TYPE_NFIELDS (t); i++)
3380 if (!is_float_or_hfa_type_recurse
3381 (check_typedef (TYPE_FIELD_TYPE (t, i)), etp))
3382 return 0;
3383 return 1;
3384 }
3385 break;
3386 default:
3387 return 0;
3388 break;
3389 }
3390 }
3391
3392 /* Determine if the given type is one of the floating point types or
3393 and HFA (which is a struct, array, or combination thereof whose
3394 bottom-most elements are all of the same floating point type). */
3395
3396 static struct type *
3397 is_float_or_hfa_type (struct type *t)
3398 {
3399 struct type *et = 0;
3400
3401 return is_float_or_hfa_type_recurse (t, &et) ? et : 0;
3402 }
3403
3404
3405 /* Return 1 if the alignment of T is such that the next even slot
3406 should be used. Return 0, if the next available slot should
3407 be used. (See section 8.5.1 of the IA-64 Software Conventions
3408 and Runtime manual). */
3409
3410 static int
3411 slot_alignment_is_next_even (struct type *t)
3412 {
3413 switch (TYPE_CODE (t))
3414 {
3415 case TYPE_CODE_INT:
3416 case TYPE_CODE_FLT:
3417 if (TYPE_LENGTH (t) > 8)
3418 return 1;
3419 else
3420 return 0;
3421 case TYPE_CODE_ARRAY:
3422 return
3423 slot_alignment_is_next_even (check_typedef (TYPE_TARGET_TYPE (t)));
3424 case TYPE_CODE_STRUCT:
3425 {
3426 int i;
3427
3428 for (i = 0; i < TYPE_NFIELDS (t); i++)
3429 if (slot_alignment_is_next_even
3430 (check_typedef (TYPE_FIELD_TYPE (t, i))))
3431 return 1;
3432 return 0;
3433 }
3434 default:
3435 return 0;
3436 }
3437 }
3438
3439 /* Attempt to find (and return) the global pointer for the given
3440 function.
3441
3442 This is a rather nasty bit of code searchs for the .dynamic section
3443 in the objfile corresponding to the pc of the function we're trying
3444 to call. Once it finds the addresses at which the .dynamic section
3445 lives in the child process, it scans the Elf64_Dyn entries for a
3446 DT_PLTGOT tag. If it finds one of these, the corresponding
3447 d_un.d_ptr value is the global pointer. */
3448
3449 static CORE_ADDR
3450 ia64_find_global_pointer_from_dynamic_section (struct gdbarch *gdbarch,
3451 CORE_ADDR faddr)
3452 {
3453 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3454 struct obj_section *faddr_sect;
3455
3456 faddr_sect = find_pc_section (faddr);
3457 if (faddr_sect != NULL)
3458 {
3459 struct obj_section *osect;
3460
3461 ALL_OBJFILE_OSECTIONS (faddr_sect->objfile, osect)
3462 {
3463 if (strcmp (osect->the_bfd_section->name, ".dynamic") == 0)
3464 break;
3465 }
3466
3467 if (osect < faddr_sect->objfile->sections_end)
3468 {
3469 CORE_ADDR addr, endaddr;
3470
3471 addr = obj_section_addr (osect);
3472 endaddr = obj_section_endaddr (osect);
3473
3474 while (addr < endaddr)
3475 {
3476 int status;
3477 LONGEST tag;
3478 char buf[8];
3479
3480 status = target_read_memory (addr, buf, sizeof (buf));
3481 if (status != 0)
3482 break;
3483 tag = extract_signed_integer (buf, sizeof (buf), byte_order);
3484
3485 if (tag == DT_PLTGOT)
3486 {
3487 CORE_ADDR global_pointer;
3488
3489 status = target_read_memory (addr + 8, buf, sizeof (buf));
3490 if (status != 0)
3491 break;
3492 global_pointer = extract_unsigned_integer (buf, sizeof (buf),
3493 byte_order);
3494
3495 /* The payoff... */
3496 return global_pointer;
3497 }
3498
3499 if (tag == DT_NULL)
3500 break;
3501
3502 addr += 16;
3503 }
3504 }
3505 }
3506 return 0;
3507 }
3508
3509 /* Attempt to find (and return) the global pointer for the given
3510 function. We first try the find_global_pointer_from_solib routine
3511 from the gdbarch tdep vector, if provided. And if that does not
3512 work, then we try ia64_find_global_pointer_from_dynamic_section. */
3513
3514 static CORE_ADDR
3515 ia64_find_global_pointer (struct gdbarch *gdbarch, CORE_ADDR faddr)
3516 {
3517 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3518 CORE_ADDR addr = 0;
3519
3520 if (tdep->find_global_pointer_from_solib)
3521 addr = tdep->find_global_pointer_from_solib (gdbarch, faddr);
3522 if (addr == 0)
3523 addr = ia64_find_global_pointer_from_dynamic_section (gdbarch, faddr);
3524 return addr;
3525 }
3526
3527 /* Given a function's address, attempt to find (and return) the
3528 corresponding (canonical) function descriptor. Return 0 if
3529 not found. */
3530 static CORE_ADDR
3531 find_extant_func_descr (struct gdbarch *gdbarch, CORE_ADDR faddr)
3532 {
3533 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3534 struct obj_section *faddr_sect;
3535
3536 /* Return early if faddr is already a function descriptor. */
3537 faddr_sect = find_pc_section (faddr);
3538 if (faddr_sect && strcmp (faddr_sect->the_bfd_section->name, ".opd") == 0)
3539 return faddr;
3540
3541 if (faddr_sect != NULL)
3542 {
3543 struct obj_section *osect;
3544 ALL_OBJFILE_OSECTIONS (faddr_sect->objfile, osect)
3545 {
3546 if (strcmp (osect->the_bfd_section->name, ".opd") == 0)
3547 break;
3548 }
3549
3550 if (osect < faddr_sect->objfile->sections_end)
3551 {
3552 CORE_ADDR addr, endaddr;
3553
3554 addr = obj_section_addr (osect);
3555 endaddr = obj_section_endaddr (osect);
3556
3557 while (addr < endaddr)
3558 {
3559 int status;
3560 LONGEST faddr2;
3561 char buf[8];
3562
3563 status = target_read_memory (addr, buf, sizeof (buf));
3564 if (status != 0)
3565 break;
3566 faddr2 = extract_signed_integer (buf, sizeof (buf), byte_order);
3567
3568 if (faddr == faddr2)
3569 return addr;
3570
3571 addr += 16;
3572 }
3573 }
3574 }
3575 return 0;
3576 }
3577
3578 /* Attempt to find a function descriptor corresponding to the
3579 given address. If none is found, construct one on the
3580 stack using the address at fdaptr. */
3581
3582 static CORE_ADDR
3583 find_func_descr (struct regcache *regcache, CORE_ADDR faddr, CORE_ADDR *fdaptr)
3584 {
3585 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3586 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3587 CORE_ADDR fdesc;
3588
3589 fdesc = find_extant_func_descr (gdbarch, faddr);
3590
3591 if (fdesc == 0)
3592 {
3593 ULONGEST global_pointer;
3594 char buf[16];
3595
3596 fdesc = *fdaptr;
3597 *fdaptr += 16;
3598
3599 global_pointer = ia64_find_global_pointer (gdbarch, faddr);
3600
3601 if (global_pointer == 0)
3602 regcache_cooked_read_unsigned (regcache,
3603 IA64_GR1_REGNUM, &global_pointer);
3604
3605 store_unsigned_integer (buf, 8, byte_order, faddr);
3606 store_unsigned_integer (buf + 8, 8, byte_order, global_pointer);
3607
3608 write_memory (fdesc, buf, 16);
3609 }
3610
3611 return fdesc;
3612 }
3613
3614 /* Use the following routine when printing out function pointers
3615 so the user can see the function address rather than just the
3616 function descriptor. */
3617 static CORE_ADDR
3618 ia64_convert_from_func_ptr_addr (struct gdbarch *gdbarch, CORE_ADDR addr,
3619 struct target_ops *targ)
3620 {
3621 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3622 struct obj_section *s;
3623 gdb_byte buf[8];
3624
3625 s = find_pc_section (addr);
3626
3627 /* check if ADDR points to a function descriptor. */
3628 if (s && strcmp (s->the_bfd_section->name, ".opd") == 0)
3629 return read_memory_unsigned_integer (addr, 8, byte_order);
3630
3631 /* Normally, functions live inside a section that is executable.
3632 So, if ADDR points to a non-executable section, then treat it
3633 as a function descriptor and return the target address iff
3634 the target address itself points to a section that is executable.
3635 Check first the memory of the whole length of 8 bytes is readable. */
3636 if (s && (s->the_bfd_section->flags & SEC_CODE) == 0
3637 && target_read_memory (addr, buf, 8) == 0)
3638 {
3639 CORE_ADDR pc = extract_unsigned_integer (buf, 8, byte_order);
3640 struct obj_section *pc_section = find_pc_section (pc);
3641
3642 if (pc_section && (pc_section->the_bfd_section->flags & SEC_CODE))
3643 return pc;
3644 }
3645
3646 /* There are also descriptors embedded in vtables. */
3647 if (s)
3648 {
3649 struct minimal_symbol *minsym;
3650
3651 minsym = lookup_minimal_symbol_by_pc (addr);
3652
3653 if (minsym && is_vtable_name (SYMBOL_LINKAGE_NAME (minsym)))
3654 return read_memory_unsigned_integer (addr, 8, byte_order);
3655 }
3656
3657 return addr;
3658 }
3659
3660 static CORE_ADDR
3661 ia64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3662 {
3663 return sp & ~0xfLL;
3664 }
3665
3666 /* The default "allocate_new_rse_frame" ia64_infcall_ops routine for ia64. */
3667
3668 static void
3669 ia64_allocate_new_rse_frame (struct regcache *regcache, ULONGEST bsp, int sof)
3670 {
3671 ULONGEST cfm, pfs, new_bsp;
3672
3673 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
3674
3675 new_bsp = rse_address_add (bsp, sof);
3676 regcache_cooked_write_unsigned (regcache, IA64_BSP_REGNUM, new_bsp);
3677
3678 regcache_cooked_read_unsigned (regcache, IA64_PFS_REGNUM, &pfs);
3679 pfs &= 0xc000000000000000LL;
3680 pfs |= (cfm & 0xffffffffffffLL);
3681 regcache_cooked_write_unsigned (regcache, IA64_PFS_REGNUM, pfs);
3682
3683 cfm &= 0xc000000000000000LL;
3684 cfm |= sof;
3685 regcache_cooked_write_unsigned (regcache, IA64_CFM_REGNUM, cfm);
3686 }
3687
3688 /* The default "store_argument_in_slot" ia64_infcall_ops routine for
3689 ia64. */
3690
3691 static void
3692 ia64_store_argument_in_slot (struct regcache *regcache, CORE_ADDR bsp,
3693 int slotnum, gdb_byte *buf)
3694 {
3695 write_memory (rse_address_add (bsp, slotnum), buf, 8);
3696 }
3697
3698 /* The default "set_function_addr" ia64_infcall_ops routine for ia64. */
3699
3700 static void
3701 ia64_set_function_addr (struct regcache *regcache, CORE_ADDR func_addr)
3702 {
3703 /* Nothing needed. */
3704 }
3705
3706 static CORE_ADDR
3707 ia64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
3708 struct regcache *regcache, CORE_ADDR bp_addr,
3709 int nargs, struct value **args, CORE_ADDR sp,
3710 int struct_return, CORE_ADDR struct_addr)
3711 {
3712 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3713 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3714 int argno;
3715 struct value *arg;
3716 struct type *type;
3717 int len, argoffset;
3718 int nslots, rseslots, memslots, slotnum, nfuncargs;
3719 int floatreg;
3720 ULONGEST bsp;
3721 CORE_ADDR funcdescaddr, pc, global_pointer;
3722 CORE_ADDR func_addr = find_function_addr (function, NULL);
3723
3724 nslots = 0;
3725 nfuncargs = 0;
3726 /* Count the number of slots needed for the arguments. */
3727 for (argno = 0; argno < nargs; argno++)
3728 {
3729 arg = args[argno];
3730 type = check_typedef (value_type (arg));
3731 len = TYPE_LENGTH (type);
3732
3733 if ((nslots & 1) && slot_alignment_is_next_even (type))
3734 nslots++;
3735
3736 if (TYPE_CODE (type) == TYPE_CODE_FUNC)
3737 nfuncargs++;
3738
3739 nslots += (len + 7) / 8;
3740 }
3741
3742 /* Divvy up the slots between the RSE and the memory stack. */
3743 rseslots = (nslots > 8) ? 8 : nslots;
3744 memslots = nslots - rseslots;
3745
3746 /* Allocate a new RSE frame. */
3747 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
3748 tdep->infcall_ops.allocate_new_rse_frame (regcache, bsp, rseslots);
3749
3750 /* We will attempt to find function descriptors in the .opd segment,
3751 but if we can't we'll construct them ourselves. That being the
3752 case, we'll need to reserve space on the stack for them. */
3753 funcdescaddr = sp - nfuncargs * 16;
3754 funcdescaddr &= ~0xfLL;
3755
3756 /* Adjust the stack pointer to it's new value. The calling conventions
3757 require us to have 16 bytes of scratch, plus whatever space is
3758 necessary for the memory slots and our function descriptors. */
3759 sp = sp - 16 - (memslots + nfuncargs) * 8;
3760 sp &= ~0xfLL; /* Maintain 16 byte alignment. */
3761
3762 /* Place the arguments where they belong. The arguments will be
3763 either placed in the RSE backing store or on the memory stack.
3764 In addition, floating point arguments or HFAs are placed in
3765 floating point registers. */
3766 slotnum = 0;
3767 floatreg = IA64_FR8_REGNUM;
3768 for (argno = 0; argno < nargs; argno++)
3769 {
3770 struct type *float_elt_type;
3771
3772 arg = args[argno];
3773 type = check_typedef (value_type (arg));
3774 len = TYPE_LENGTH (type);
3775
3776 /* Special handling for function parameters. */
3777 if (len == 8
3778 && TYPE_CODE (type) == TYPE_CODE_PTR
3779 && TYPE_CODE (TYPE_TARGET_TYPE (type)) == TYPE_CODE_FUNC)
3780 {
3781 char val_buf[8];
3782 ULONGEST faddr = extract_unsigned_integer (value_contents (arg),
3783 8, byte_order);
3784 store_unsigned_integer (val_buf, 8, byte_order,
3785 find_func_descr (regcache, faddr,
3786 &funcdescaddr));
3787 if (slotnum < rseslots)
3788 tdep->infcall_ops.store_argument_in_slot (regcache, bsp,
3789 slotnum, val_buf);
3790 else
3791 write_memory (sp + 16 + 8 * (slotnum - rseslots), val_buf, 8);
3792 slotnum++;
3793 continue;
3794 }
3795
3796 /* Normal slots. */
3797
3798 /* Skip odd slot if necessary... */
3799 if ((slotnum & 1) && slot_alignment_is_next_even (type))
3800 slotnum++;
3801
3802 argoffset = 0;
3803 while (len > 0)
3804 {
3805 char val_buf[8];
3806
3807 memset (val_buf, 0, 8);
3808 if (!ia64_struct_type_p (type) && len < 8)
3809 {
3810 /* Integral types are LSB-aligned, so we have to be careful
3811 to insert the argument on the correct side of the buffer.
3812 This is why we use store_unsigned_integer. */
3813 store_unsigned_integer
3814 (val_buf, 8, byte_order,
3815 extract_unsigned_integer (value_contents (arg), len,
3816 byte_order));
3817 }
3818 else
3819 {
3820 /* This is either an 8bit integral type, or an aggregate.
3821 For 8bit integral type, there is no problem, we just
3822 copy the value over.
3823
3824 For aggregates, the only potentially tricky portion
3825 is to write the last one if it is less than 8 bytes.
3826 In this case, the data is Byte0-aligned. Happy news,
3827 this means that we don't need to differentiate the
3828 handling of 8byte blocks and less-than-8bytes blocks. */
3829 memcpy (val_buf, value_contents (arg) + argoffset,
3830 (len > 8) ? 8 : len);
3831 }
3832
3833 if (slotnum < rseslots)
3834 tdep->infcall_ops.store_argument_in_slot (regcache, bsp,
3835 slotnum, val_buf);
3836 else
3837 write_memory (sp + 16 + 8 * (slotnum - rseslots), val_buf, 8);
3838
3839 argoffset += 8;
3840 len -= 8;
3841 slotnum++;
3842 }
3843
3844 /* Handle floating point types (including HFAs). */
3845 float_elt_type = is_float_or_hfa_type (type);
3846 if (float_elt_type != NULL)
3847 {
3848 argoffset = 0;
3849 len = TYPE_LENGTH (type);
3850 while (len > 0 && floatreg < IA64_FR16_REGNUM)
3851 {
3852 char to[MAX_REGISTER_SIZE];
3853 convert_typed_floating (value_contents (arg) + argoffset,
3854 float_elt_type, to,
3855 ia64_ext_type (gdbarch));
3856 regcache_cooked_write (regcache, floatreg, (void *)to);
3857 floatreg++;
3858 argoffset += TYPE_LENGTH (float_elt_type);
3859 len -= TYPE_LENGTH (float_elt_type);
3860 }
3861 }
3862 }
3863
3864 /* Store the struct return value in r8 if necessary. */
3865 if (struct_return)
3866 {
3867 regcache_cooked_write_unsigned (regcache, IA64_GR8_REGNUM,
3868 (ULONGEST) struct_addr);
3869 }
3870
3871 global_pointer = ia64_find_global_pointer (gdbarch, func_addr);
3872
3873 if (global_pointer != 0)
3874 regcache_cooked_write_unsigned (regcache, IA64_GR1_REGNUM, global_pointer);
3875
3876 /* The following is not necessary on HP-UX, because we're using
3877 a dummy code sequence pushed on the stack to make the call, and
3878 this sequence doesn't need b0 to be set in order for our dummy
3879 breakpoint to be hit. Nonetheless, this doesn't interfere, and
3880 it's needed for other OSes, so we do this unconditionaly. */
3881 regcache_cooked_write_unsigned (regcache, IA64_BR0_REGNUM, bp_addr);
3882
3883 regcache_cooked_write_unsigned (regcache, sp_regnum, sp);
3884
3885 tdep->infcall_ops.set_function_addr (regcache, func_addr);
3886
3887 return sp;
3888 }
3889
3890 static const struct ia64_infcall_ops ia64_infcall_ops =
3891 {
3892 ia64_allocate_new_rse_frame,
3893 ia64_store_argument_in_slot,
3894 ia64_set_function_addr
3895 };
3896
3897 static struct frame_id
3898 ia64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
3899 {
3900 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3901 char buf[8];
3902 CORE_ADDR sp, bsp;
3903
3904 get_frame_register (this_frame, sp_regnum, buf);
3905 sp = extract_unsigned_integer (buf, 8, byte_order);
3906
3907 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
3908 bsp = extract_unsigned_integer (buf, 8, byte_order);
3909
3910 if (gdbarch_debug >= 1)
3911 fprintf_unfiltered (gdb_stdlog,
3912 "dummy frame id: code %s, stack %s, special %s\n",
3913 paddress (gdbarch, get_frame_pc (this_frame)),
3914 paddress (gdbarch, sp), paddress (gdbarch, bsp));
3915
3916 return frame_id_build_special (sp, get_frame_pc (this_frame), bsp);
3917 }
3918
3919 static CORE_ADDR
3920 ia64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *next_frame)
3921 {
3922 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3923 char buf[8];
3924 CORE_ADDR ip, psr, pc;
3925
3926 frame_unwind_register (next_frame, IA64_IP_REGNUM, buf);
3927 ip = extract_unsigned_integer (buf, 8, byte_order);
3928 frame_unwind_register (next_frame, IA64_PSR_REGNUM, buf);
3929 psr = extract_unsigned_integer (buf, 8, byte_order);
3930
3931 pc = (ip & ~0xf) | ((psr >> 41) & 3);
3932 return pc;
3933 }
3934
3935 static int
3936 ia64_print_insn (bfd_vma memaddr, struct disassemble_info *info)
3937 {
3938 info->bytes_per_line = SLOT_MULTIPLIER;
3939 return print_insn_ia64 (memaddr, info);
3940 }
3941
3942 /* The default "size_of_register_frame" gdbarch_tdep routine for ia64. */
3943
3944 static int
3945 ia64_size_of_register_frame (struct frame_info *this_frame, ULONGEST cfm)
3946 {
3947 return (cfm & 0x7f);
3948 }
3949
3950 static struct gdbarch *
3951 ia64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3952 {
3953 struct gdbarch *gdbarch;
3954 struct gdbarch_tdep *tdep;
3955
3956 /* If there is already a candidate, use it. */
3957 arches = gdbarch_list_lookup_by_info (arches, &info);
3958 if (arches != NULL)
3959 return arches->gdbarch;
3960
3961 tdep = xzalloc (sizeof (struct gdbarch_tdep));
3962 gdbarch = gdbarch_alloc (&info, tdep);
3963
3964 tdep->size_of_register_frame = ia64_size_of_register_frame;
3965
3966 /* According to the ia64 specs, instructions that store long double
3967 floats in memory use a long-double format different than that
3968 used in the floating registers. The memory format matches the
3969 x86 extended float format which is 80 bits. An OS may choose to
3970 use this format (e.g. GNU/Linux) or choose to use a different
3971 format for storing long doubles (e.g. HPUX). In the latter case,
3972 the setting of the format may be moved/overridden in an
3973 OS-specific tdep file. */
3974 set_gdbarch_long_double_format (gdbarch, floatformats_i387_ext);
3975
3976 set_gdbarch_short_bit (gdbarch, 16);
3977 set_gdbarch_int_bit (gdbarch, 32);
3978 set_gdbarch_long_bit (gdbarch, 64);
3979 set_gdbarch_long_long_bit (gdbarch, 64);
3980 set_gdbarch_float_bit (gdbarch, 32);
3981 set_gdbarch_double_bit (gdbarch, 64);
3982 set_gdbarch_long_double_bit (gdbarch, 128);
3983 set_gdbarch_ptr_bit (gdbarch, 64);
3984
3985 set_gdbarch_num_regs (gdbarch, NUM_IA64_RAW_REGS);
3986 set_gdbarch_num_pseudo_regs (gdbarch,
3987 LAST_PSEUDO_REGNUM - FIRST_PSEUDO_REGNUM);
3988 set_gdbarch_sp_regnum (gdbarch, sp_regnum);
3989 set_gdbarch_fp0_regnum (gdbarch, IA64_FR0_REGNUM);
3990
3991 set_gdbarch_register_name (gdbarch, ia64_register_name);
3992 set_gdbarch_register_type (gdbarch, ia64_register_type);
3993
3994 set_gdbarch_pseudo_register_read (gdbarch, ia64_pseudo_register_read);
3995 set_gdbarch_pseudo_register_write (gdbarch, ia64_pseudo_register_write);
3996 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, ia64_dwarf_reg_to_regnum);
3997 set_gdbarch_register_reggroup_p (gdbarch, ia64_register_reggroup_p);
3998 set_gdbarch_convert_register_p (gdbarch, ia64_convert_register_p);
3999 set_gdbarch_register_to_value (gdbarch, ia64_register_to_value);
4000 set_gdbarch_value_to_register (gdbarch, ia64_value_to_register);
4001
4002 set_gdbarch_skip_prologue (gdbarch, ia64_skip_prologue);
4003
4004 set_gdbarch_return_value (gdbarch, ia64_return_value);
4005
4006 set_gdbarch_memory_insert_breakpoint (gdbarch,
4007 ia64_memory_insert_breakpoint);
4008 set_gdbarch_memory_remove_breakpoint (gdbarch,
4009 ia64_memory_remove_breakpoint);
4010 set_gdbarch_breakpoint_from_pc (gdbarch, ia64_breakpoint_from_pc);
4011 set_gdbarch_read_pc (gdbarch, ia64_read_pc);
4012 set_gdbarch_write_pc (gdbarch, ia64_write_pc);
4013
4014 /* Settings for calling functions in the inferior. */
4015 set_gdbarch_push_dummy_call (gdbarch, ia64_push_dummy_call);
4016 tdep->infcall_ops = ia64_infcall_ops;
4017 set_gdbarch_frame_align (gdbarch, ia64_frame_align);
4018 set_gdbarch_dummy_id (gdbarch, ia64_dummy_id);
4019
4020 set_gdbarch_unwind_pc (gdbarch, ia64_unwind_pc);
4021 #ifdef HAVE_LIBUNWIND_IA64_H
4022 frame_unwind_append_unwinder (gdbarch,
4023 &ia64_libunwind_sigtramp_frame_unwind);
4024 frame_unwind_append_unwinder (gdbarch, &ia64_libunwind_frame_unwind);
4025 frame_unwind_append_unwinder (gdbarch, &ia64_sigtramp_frame_unwind);
4026 libunwind_frame_set_descr (gdbarch, &ia64_libunwind_descr);
4027 #else
4028 frame_unwind_append_unwinder (gdbarch, &ia64_sigtramp_frame_unwind);
4029 #endif
4030 frame_unwind_append_unwinder (gdbarch, &ia64_frame_unwind);
4031 frame_base_set_default (gdbarch, &ia64_frame_base);
4032
4033 /* Settings that should be unnecessary. */
4034 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
4035
4036 set_gdbarch_print_insn (gdbarch, ia64_print_insn);
4037 set_gdbarch_convert_from_func_ptr_addr (gdbarch,
4038 ia64_convert_from_func_ptr_addr);
4039
4040 /* The virtual table contains 16-byte descriptors, not pointers to
4041 descriptors. */
4042 set_gdbarch_vtable_function_descriptors (gdbarch, 1);
4043
4044 /* Hook in ABI-specific overrides, if they have been registered. */
4045 gdbarch_init_osabi (info, gdbarch);
4046
4047 return gdbarch;
4048 }
4049
4050 extern initialize_file_ftype _initialize_ia64_tdep; /* -Wmissing-prototypes */
4051
4052 void
4053 _initialize_ia64_tdep (void)
4054 {
4055 gdbarch_register (bfd_arch_ia64, ia64_gdbarch_init, NULL);
4056 }
This page took 0.119679 seconds and 4 git commands to generate.