* spu-tdep.c (spu_push_dummy_call): Update all stack pointer slots
[deliverable/binutils-gdb.git] / gdb / spu-tdep.c
1 /* SPU target-dependent code for GDB, the GNU debugger.
2 Copyright (C) 2006, 2007, 2008 Free Software Foundation, Inc.
3
4 Contributed by Ulrich Weigand <uweigand@de.ibm.com>.
5 Based on a port by Sid Manning <sid@us.ibm.com>.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "arch-utils.h"
24 #include "gdbtypes.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "gdb_string.h"
28 #include "gdb_assert.h"
29 #include "frame.h"
30 #include "frame-unwind.h"
31 #include "frame-base.h"
32 #include "trad-frame.h"
33 #include "symtab.h"
34 #include "symfile.h"
35 #include "value.h"
36 #include "inferior.h"
37 #include "dis-asm.h"
38 #include "objfiles.h"
39 #include "language.h"
40 #include "regcache.h"
41 #include "reggroups.h"
42 #include "floatformat.h"
43 #include "observer.h"
44
45 #include "spu-tdep.h"
46
47
48 /* The tdep structure. */
49 struct gdbarch_tdep
50 {
51 /* SPU-specific vector type. */
52 struct type *spu_builtin_type_vec128;
53 };
54
55
56 /* SPU-specific vector type. */
57 static struct type *
58 spu_builtin_type_vec128 (struct gdbarch *gdbarch)
59 {
60 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
61
62 if (!tdep->spu_builtin_type_vec128)
63 {
64 struct type *t;
65
66 t = init_composite_type ("__spu_builtin_type_vec128", TYPE_CODE_UNION);
67 append_composite_type_field (t, "uint128", builtin_type_int128);
68 append_composite_type_field (t, "v2_int64",
69 init_vector_type (builtin_type_int64, 2));
70 append_composite_type_field (t, "v4_int32",
71 init_vector_type (builtin_type_int32, 4));
72 append_composite_type_field (t, "v8_int16",
73 init_vector_type (builtin_type_int16, 8));
74 append_composite_type_field (t, "v16_int8",
75 init_vector_type (builtin_type_int8, 16));
76 append_composite_type_field (t, "v2_double",
77 init_vector_type (builtin_type_double, 2));
78 append_composite_type_field (t, "v4_float",
79 init_vector_type (builtin_type_float, 4));
80
81 TYPE_VECTOR (t) = 1;
82 TYPE_NAME (t) = "spu_builtin_type_vec128";
83
84 tdep->spu_builtin_type_vec128 = t;
85 }
86
87 return tdep->spu_builtin_type_vec128;
88 }
89
90
91 /* The list of available "info spu " commands. */
92 static struct cmd_list_element *infospucmdlist = NULL;
93
94 /* Registers. */
95
96 static const char *
97 spu_register_name (struct gdbarch *gdbarch, int reg_nr)
98 {
99 static char *register_names[] =
100 {
101 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
102 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
103 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
104 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
105 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
106 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
107 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
108 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
109 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
110 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
111 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
112 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
113 "r96", "r97", "r98", "r99", "r100", "r101", "r102", "r103",
114 "r104", "r105", "r106", "r107", "r108", "r109", "r110", "r111",
115 "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
116 "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127",
117 "id", "pc", "sp", "fpscr", "srr0", "lslr", "decr", "decr_status"
118 };
119
120 if (reg_nr < 0)
121 return NULL;
122 if (reg_nr >= sizeof register_names / sizeof *register_names)
123 return NULL;
124
125 return register_names[reg_nr];
126 }
127
128 static struct type *
129 spu_register_type (struct gdbarch *gdbarch, int reg_nr)
130 {
131 if (reg_nr < SPU_NUM_GPRS)
132 return spu_builtin_type_vec128 (gdbarch);
133
134 switch (reg_nr)
135 {
136 case SPU_ID_REGNUM:
137 return builtin_type_uint32;
138
139 case SPU_PC_REGNUM:
140 return builtin_type_void_func_ptr;
141
142 case SPU_SP_REGNUM:
143 return builtin_type_void_data_ptr;
144
145 case SPU_FPSCR_REGNUM:
146 return builtin_type_uint128;
147
148 case SPU_SRR0_REGNUM:
149 return builtin_type_uint32;
150
151 case SPU_LSLR_REGNUM:
152 return builtin_type_uint32;
153
154 case SPU_DECR_REGNUM:
155 return builtin_type_uint32;
156
157 case SPU_DECR_STATUS_REGNUM:
158 return builtin_type_uint32;
159
160 default:
161 internal_error (__FILE__, __LINE__, "invalid regnum");
162 }
163 }
164
165 /* Pseudo registers for preferred slots - stack pointer. */
166
167 static void
168 spu_pseudo_register_read_spu (struct regcache *regcache, const char *regname,
169 gdb_byte *buf)
170 {
171 gdb_byte reg[32];
172 char annex[32];
173 ULONGEST id;
174
175 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
176 xsnprintf (annex, sizeof annex, "%d/%s", (int) id, regname);
177 memset (reg, 0, sizeof reg);
178 target_read (&current_target, TARGET_OBJECT_SPU, annex,
179 reg, 0, sizeof reg);
180
181 store_unsigned_integer (buf, 4, strtoulst (reg, NULL, 16));
182 }
183
184 static void
185 spu_pseudo_register_read (struct gdbarch *gdbarch, struct regcache *regcache,
186 int regnum, gdb_byte *buf)
187 {
188 gdb_byte reg[16];
189 char annex[32];
190 ULONGEST id;
191
192 switch (regnum)
193 {
194 case SPU_SP_REGNUM:
195 regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg);
196 memcpy (buf, reg, 4);
197 break;
198
199 case SPU_FPSCR_REGNUM:
200 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
201 xsnprintf (annex, sizeof annex, "%d/fpcr", (int) id);
202 target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 16);
203 break;
204
205 case SPU_SRR0_REGNUM:
206 spu_pseudo_register_read_spu (regcache, "srr0", buf);
207 break;
208
209 case SPU_LSLR_REGNUM:
210 spu_pseudo_register_read_spu (regcache, "lslr", buf);
211 break;
212
213 case SPU_DECR_REGNUM:
214 spu_pseudo_register_read_spu (regcache, "decr", buf);
215 break;
216
217 case SPU_DECR_STATUS_REGNUM:
218 spu_pseudo_register_read_spu (regcache, "decr_status", buf);
219 break;
220
221 default:
222 internal_error (__FILE__, __LINE__, _("invalid regnum"));
223 }
224 }
225
226 static void
227 spu_pseudo_register_write_spu (struct regcache *regcache, const char *regname,
228 const gdb_byte *buf)
229 {
230 gdb_byte reg[32];
231 char annex[32];
232 ULONGEST id;
233
234 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
235 xsnprintf (annex, sizeof annex, "%d/%s", (int) id, regname);
236 xsnprintf (reg, sizeof reg, "0x%s",
237 phex_nz (extract_unsigned_integer (buf, 4), 4));
238 target_write (&current_target, TARGET_OBJECT_SPU, annex,
239 reg, 0, strlen (reg));
240 }
241
242 static void
243 spu_pseudo_register_write (struct gdbarch *gdbarch, struct regcache *regcache,
244 int regnum, const gdb_byte *buf)
245 {
246 gdb_byte reg[16];
247 char annex[32];
248 ULONGEST id;
249
250 switch (regnum)
251 {
252 case SPU_SP_REGNUM:
253 regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg);
254 memcpy (reg, buf, 4);
255 regcache_raw_write (regcache, SPU_RAW_SP_REGNUM, reg);
256 break;
257
258 case SPU_FPSCR_REGNUM:
259 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
260 xsnprintf (annex, sizeof annex, "%d/fpcr", (int) id);
261 target_write (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 16);
262 break;
263
264 case SPU_SRR0_REGNUM:
265 spu_pseudo_register_write_spu (regcache, "srr0", buf);
266 break;
267
268 case SPU_LSLR_REGNUM:
269 spu_pseudo_register_write_spu (regcache, "lslr", buf);
270 break;
271
272 case SPU_DECR_REGNUM:
273 spu_pseudo_register_write_spu (regcache, "decr", buf);
274 break;
275
276 case SPU_DECR_STATUS_REGNUM:
277 spu_pseudo_register_write_spu (regcache, "decr_status", buf);
278 break;
279
280 default:
281 internal_error (__FILE__, __LINE__, _("invalid regnum"));
282 }
283 }
284
285 /* Value conversion -- access scalar values at the preferred slot. */
286
287 static struct value *
288 spu_value_from_register (struct type *type, int regnum,
289 struct frame_info *frame)
290 {
291 struct value *value = default_value_from_register (type, regnum, frame);
292 int len = TYPE_LENGTH (type);
293
294 if (regnum < SPU_NUM_GPRS && len < 16)
295 {
296 int preferred_slot = len < 4 ? 4 - len : 0;
297 set_value_offset (value, preferred_slot);
298 }
299
300 return value;
301 }
302
303 /* Register groups. */
304
305 static int
306 spu_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
307 struct reggroup *group)
308 {
309 /* Registers displayed via 'info regs'. */
310 if (group == general_reggroup)
311 return 1;
312
313 /* Registers displayed via 'info float'. */
314 if (group == float_reggroup)
315 return 0;
316
317 /* Registers that need to be saved/restored in order to
318 push or pop frames. */
319 if (group == save_reggroup || group == restore_reggroup)
320 return 1;
321
322 return default_register_reggroup_p (gdbarch, regnum, group);
323 }
324
325 /* Address conversion. */
326
327 static CORE_ADDR
328 spu_pointer_to_address (struct type *type, const gdb_byte *buf)
329 {
330 ULONGEST addr = extract_unsigned_integer (buf, TYPE_LENGTH (type));
331 ULONGEST lslr = SPU_LS_SIZE - 1; /* Hard-wired LS size. */
332
333 if (target_has_registers && target_has_stack && target_has_memory)
334 lslr = get_frame_register_unsigned (get_selected_frame (NULL),
335 SPU_LSLR_REGNUM);
336
337 return addr & lslr;
338 }
339
340 static CORE_ADDR
341 spu_integer_to_address (struct gdbarch *gdbarch,
342 struct type *type, const gdb_byte *buf)
343 {
344 ULONGEST addr = unpack_long (type, buf);
345 ULONGEST lslr = SPU_LS_SIZE - 1; /* Hard-wired LS size. */
346
347 if (target_has_registers && target_has_stack && target_has_memory)
348 lslr = get_frame_register_unsigned (get_selected_frame (NULL),
349 SPU_LSLR_REGNUM);
350
351 return addr & lslr;
352 }
353
354
355 /* Decoding SPU instructions. */
356
357 enum
358 {
359 op_lqd = 0x34,
360 op_lqx = 0x3c4,
361 op_lqa = 0x61,
362 op_lqr = 0x67,
363 op_stqd = 0x24,
364 op_stqx = 0x144,
365 op_stqa = 0x41,
366 op_stqr = 0x47,
367
368 op_il = 0x081,
369 op_ila = 0x21,
370 op_a = 0x0c0,
371 op_ai = 0x1c,
372
373 op_selb = 0x4,
374
375 op_br = 0x64,
376 op_bra = 0x60,
377 op_brsl = 0x66,
378 op_brasl = 0x62,
379 op_brnz = 0x42,
380 op_brz = 0x40,
381 op_brhnz = 0x46,
382 op_brhz = 0x44,
383 op_bi = 0x1a8,
384 op_bisl = 0x1a9,
385 op_biz = 0x128,
386 op_binz = 0x129,
387 op_bihz = 0x12a,
388 op_bihnz = 0x12b,
389 };
390
391 static int
392 is_rr (unsigned int insn, int op, int *rt, int *ra, int *rb)
393 {
394 if ((insn >> 21) == op)
395 {
396 *rt = insn & 127;
397 *ra = (insn >> 7) & 127;
398 *rb = (insn >> 14) & 127;
399 return 1;
400 }
401
402 return 0;
403 }
404
405 static int
406 is_rrr (unsigned int insn, int op, int *rt, int *ra, int *rb, int *rc)
407 {
408 if ((insn >> 28) == op)
409 {
410 *rt = (insn >> 21) & 127;
411 *ra = (insn >> 7) & 127;
412 *rb = (insn >> 14) & 127;
413 *rc = insn & 127;
414 return 1;
415 }
416
417 return 0;
418 }
419
420 static int
421 is_ri7 (unsigned int insn, int op, int *rt, int *ra, int *i7)
422 {
423 if ((insn >> 21) == op)
424 {
425 *rt = insn & 127;
426 *ra = (insn >> 7) & 127;
427 *i7 = (((insn >> 14) & 127) ^ 0x40) - 0x40;
428 return 1;
429 }
430
431 return 0;
432 }
433
434 static int
435 is_ri10 (unsigned int insn, int op, int *rt, int *ra, int *i10)
436 {
437 if ((insn >> 24) == op)
438 {
439 *rt = insn & 127;
440 *ra = (insn >> 7) & 127;
441 *i10 = (((insn >> 14) & 0x3ff) ^ 0x200) - 0x200;
442 return 1;
443 }
444
445 return 0;
446 }
447
448 static int
449 is_ri16 (unsigned int insn, int op, int *rt, int *i16)
450 {
451 if ((insn >> 23) == op)
452 {
453 *rt = insn & 127;
454 *i16 = (((insn >> 7) & 0xffff) ^ 0x8000) - 0x8000;
455 return 1;
456 }
457
458 return 0;
459 }
460
461 static int
462 is_ri18 (unsigned int insn, int op, int *rt, int *i18)
463 {
464 if ((insn >> 25) == op)
465 {
466 *rt = insn & 127;
467 *i18 = (((insn >> 7) & 0x3ffff) ^ 0x20000) - 0x20000;
468 return 1;
469 }
470
471 return 0;
472 }
473
474 static int
475 is_branch (unsigned int insn, int *offset, int *reg)
476 {
477 int rt, i7, i16;
478
479 if (is_ri16 (insn, op_br, &rt, &i16)
480 || is_ri16 (insn, op_brsl, &rt, &i16)
481 || is_ri16 (insn, op_brnz, &rt, &i16)
482 || is_ri16 (insn, op_brz, &rt, &i16)
483 || is_ri16 (insn, op_brhnz, &rt, &i16)
484 || is_ri16 (insn, op_brhz, &rt, &i16))
485 {
486 *reg = SPU_PC_REGNUM;
487 *offset = i16 << 2;
488 return 1;
489 }
490
491 if (is_ri16 (insn, op_bra, &rt, &i16)
492 || is_ri16 (insn, op_brasl, &rt, &i16))
493 {
494 *reg = -1;
495 *offset = i16 << 2;
496 return 1;
497 }
498
499 if (is_ri7 (insn, op_bi, &rt, reg, &i7)
500 || is_ri7 (insn, op_bisl, &rt, reg, &i7)
501 || is_ri7 (insn, op_biz, &rt, reg, &i7)
502 || is_ri7 (insn, op_binz, &rt, reg, &i7)
503 || is_ri7 (insn, op_bihz, &rt, reg, &i7)
504 || is_ri7 (insn, op_bihnz, &rt, reg, &i7))
505 {
506 *offset = 0;
507 return 1;
508 }
509
510 return 0;
511 }
512
513
514 /* Prolog parsing. */
515
516 struct spu_prologue_data
517 {
518 /* Stack frame size. -1 if analysis was unsuccessful. */
519 int size;
520
521 /* How to find the CFA. The CFA is equal to SP at function entry. */
522 int cfa_reg;
523 int cfa_offset;
524
525 /* Offset relative to CFA where a register is saved. -1 if invalid. */
526 int reg_offset[SPU_NUM_GPRS];
527 };
528
529 static CORE_ADDR
530 spu_analyze_prologue (CORE_ADDR start_pc, CORE_ADDR end_pc,
531 struct spu_prologue_data *data)
532 {
533 int found_sp = 0;
534 int found_fp = 0;
535 int found_lr = 0;
536 int reg_immed[SPU_NUM_GPRS];
537 gdb_byte buf[16];
538 CORE_ADDR prolog_pc = start_pc;
539 CORE_ADDR pc;
540 int i;
541
542
543 /* Initialize DATA to default values. */
544 data->size = -1;
545
546 data->cfa_reg = SPU_RAW_SP_REGNUM;
547 data->cfa_offset = 0;
548
549 for (i = 0; i < SPU_NUM_GPRS; i++)
550 data->reg_offset[i] = -1;
551
552 /* Set up REG_IMMED array. This is non-zero for a register if we know its
553 preferred slot currently holds this immediate value. */
554 for (i = 0; i < SPU_NUM_GPRS; i++)
555 reg_immed[i] = 0;
556
557 /* Scan instructions until the first branch.
558
559 The following instructions are important prolog components:
560
561 - The first instruction to set up the stack pointer.
562 - The first instruction to set up the frame pointer.
563 - The first instruction to save the link register.
564
565 We return the instruction after the latest of these three,
566 or the incoming PC if none is found. The first instruction
567 to set up the stack pointer also defines the frame size.
568
569 Note that instructions saving incoming arguments to their stack
570 slots are not counted as important, because they are hard to
571 identify with certainty. This should not matter much, because
572 arguments are relevant only in code compiled with debug data,
573 and in such code the GDB core will advance until the first source
574 line anyway, using SAL data.
575
576 For purposes of stack unwinding, we analyze the following types
577 of instructions in addition:
578
579 - Any instruction adding to the current frame pointer.
580 - Any instruction loading an immediate constant into a register.
581 - Any instruction storing a register onto the stack.
582
583 These are used to compute the CFA and REG_OFFSET output. */
584
585 for (pc = start_pc; pc < end_pc; pc += 4)
586 {
587 unsigned int insn;
588 int rt, ra, rb, rc, immed;
589
590 if (target_read_memory (pc, buf, 4))
591 break;
592 insn = extract_unsigned_integer (buf, 4);
593
594 /* AI is the typical instruction to set up a stack frame.
595 It is also used to initialize the frame pointer. */
596 if (is_ri10 (insn, op_ai, &rt, &ra, &immed))
597 {
598 if (rt == data->cfa_reg && ra == data->cfa_reg)
599 data->cfa_offset -= immed;
600
601 if (rt == SPU_RAW_SP_REGNUM && ra == SPU_RAW_SP_REGNUM
602 && !found_sp)
603 {
604 found_sp = 1;
605 prolog_pc = pc + 4;
606
607 data->size = -immed;
608 }
609 else if (rt == SPU_FP_REGNUM && ra == SPU_RAW_SP_REGNUM
610 && !found_fp)
611 {
612 found_fp = 1;
613 prolog_pc = pc + 4;
614
615 data->cfa_reg = SPU_FP_REGNUM;
616 data->cfa_offset -= immed;
617 }
618 }
619
620 /* A is used to set up stack frames of size >= 512 bytes.
621 If we have tracked the contents of the addend register,
622 we can handle this as well. */
623 else if (is_rr (insn, op_a, &rt, &ra, &rb))
624 {
625 if (rt == data->cfa_reg && ra == data->cfa_reg)
626 {
627 if (reg_immed[rb] != 0)
628 data->cfa_offset -= reg_immed[rb];
629 else
630 data->cfa_reg = -1; /* We don't know the CFA any more. */
631 }
632
633 if (rt == SPU_RAW_SP_REGNUM && ra == SPU_RAW_SP_REGNUM
634 && !found_sp)
635 {
636 found_sp = 1;
637 prolog_pc = pc + 4;
638
639 if (reg_immed[rb] != 0)
640 data->size = -reg_immed[rb];
641 }
642 }
643
644 /* We need to track IL and ILA used to load immediate constants
645 in case they are later used as input to an A instruction. */
646 else if (is_ri16 (insn, op_il, &rt, &immed))
647 {
648 reg_immed[rt] = immed;
649
650 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
651 found_sp = 1;
652 }
653
654 else if (is_ri18 (insn, op_ila, &rt, &immed))
655 {
656 reg_immed[rt] = immed & 0x3ffff;
657
658 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
659 found_sp = 1;
660 }
661
662 /* STQD is used to save registers to the stack. */
663 else if (is_ri10 (insn, op_stqd, &rt, &ra, &immed))
664 {
665 if (ra == data->cfa_reg)
666 data->reg_offset[rt] = data->cfa_offset - (immed << 4);
667
668 if (ra == data->cfa_reg && rt == SPU_LR_REGNUM
669 && !found_lr)
670 {
671 found_lr = 1;
672 prolog_pc = pc + 4;
673 }
674 }
675
676 /* _start uses SELB to set up the stack pointer. */
677 else if (is_rrr (insn, op_selb, &rt, &ra, &rb, &rc))
678 {
679 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
680 found_sp = 1;
681 }
682
683 /* We terminate if we find a branch. */
684 else if (is_branch (insn, &immed, &ra))
685 break;
686 }
687
688
689 /* If we successfully parsed until here, and didn't find any instruction
690 modifying SP, we assume we have a frameless function. */
691 if (!found_sp)
692 data->size = 0;
693
694 /* Return cooked instead of raw SP. */
695 if (data->cfa_reg == SPU_RAW_SP_REGNUM)
696 data->cfa_reg = SPU_SP_REGNUM;
697
698 return prolog_pc;
699 }
700
701 /* Return the first instruction after the prologue starting at PC. */
702 static CORE_ADDR
703 spu_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
704 {
705 struct spu_prologue_data data;
706 return spu_analyze_prologue (pc, (CORE_ADDR)-1, &data);
707 }
708
709 /* Return the frame pointer in use at address PC. */
710 static void
711 spu_virtual_frame_pointer (struct gdbarch *gdbarch, CORE_ADDR pc,
712 int *reg, LONGEST *offset)
713 {
714 struct spu_prologue_data data;
715 spu_analyze_prologue (pc, (CORE_ADDR)-1, &data);
716
717 if (data.size != -1 && data.cfa_reg != -1)
718 {
719 /* The 'frame pointer' address is CFA minus frame size. */
720 *reg = data.cfa_reg;
721 *offset = data.cfa_offset - data.size;
722 }
723 else
724 {
725 /* ??? We don't really know ... */
726 *reg = SPU_SP_REGNUM;
727 *offset = 0;
728 }
729 }
730
731 /* Return true if we are in the function's epilogue, i.e. after the
732 instruction that destroyed the function's stack frame.
733
734 1) scan forward from the point of execution:
735 a) If you find an instruction that modifies the stack pointer
736 or transfers control (except a return), execution is not in
737 an epilogue, return.
738 b) Stop scanning if you find a return instruction or reach the
739 end of the function or reach the hard limit for the size of
740 an epilogue.
741 2) scan backward from the point of execution:
742 a) If you find an instruction that modifies the stack pointer,
743 execution *is* in an epilogue, return.
744 b) Stop scanning if you reach an instruction that transfers
745 control or the beginning of the function or reach the hard
746 limit for the size of an epilogue. */
747
748 static int
749 spu_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
750 {
751 CORE_ADDR scan_pc, func_start, func_end, epilogue_start, epilogue_end;
752 bfd_byte buf[4];
753 unsigned int insn;
754 int rt, ra, rb, rc, immed;
755
756 /* Find the search limits based on function boundaries and hard limit.
757 We assume the epilogue can be up to 64 instructions long. */
758
759 const int spu_max_epilogue_size = 64 * 4;
760
761 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
762 return 0;
763
764 if (pc - func_start < spu_max_epilogue_size)
765 epilogue_start = func_start;
766 else
767 epilogue_start = pc - spu_max_epilogue_size;
768
769 if (func_end - pc < spu_max_epilogue_size)
770 epilogue_end = func_end;
771 else
772 epilogue_end = pc + spu_max_epilogue_size;
773
774 /* Scan forward until next 'bi $0'. */
775
776 for (scan_pc = pc; scan_pc < epilogue_end; scan_pc += 4)
777 {
778 if (target_read_memory (scan_pc, buf, 4))
779 return 0;
780 insn = extract_unsigned_integer (buf, 4);
781
782 if (is_branch (insn, &immed, &ra))
783 {
784 if (immed == 0 && ra == SPU_LR_REGNUM)
785 break;
786
787 return 0;
788 }
789
790 if (is_ri10 (insn, op_ai, &rt, &ra, &immed)
791 || is_rr (insn, op_a, &rt, &ra, &rb)
792 || is_ri10 (insn, op_lqd, &rt, &ra, &immed))
793 {
794 if (rt == SPU_RAW_SP_REGNUM)
795 return 0;
796 }
797 }
798
799 if (scan_pc >= epilogue_end)
800 return 0;
801
802 /* Scan backward until adjustment to stack pointer (R1). */
803
804 for (scan_pc = pc - 4; scan_pc >= epilogue_start; scan_pc -= 4)
805 {
806 if (target_read_memory (scan_pc, buf, 4))
807 return 0;
808 insn = extract_unsigned_integer (buf, 4);
809
810 if (is_branch (insn, &immed, &ra))
811 return 0;
812
813 if (is_ri10 (insn, op_ai, &rt, &ra, &immed)
814 || is_rr (insn, op_a, &rt, &ra, &rb)
815 || is_ri10 (insn, op_lqd, &rt, &ra, &immed))
816 {
817 if (rt == SPU_RAW_SP_REGNUM)
818 return 1;
819 }
820 }
821
822 return 0;
823 }
824
825
826 /* Normal stack frames. */
827
828 struct spu_unwind_cache
829 {
830 CORE_ADDR func;
831 CORE_ADDR frame_base;
832 CORE_ADDR local_base;
833
834 struct trad_frame_saved_reg *saved_regs;
835 };
836
837 static struct spu_unwind_cache *
838 spu_frame_unwind_cache (struct frame_info *this_frame,
839 void **this_prologue_cache)
840 {
841 struct spu_unwind_cache *info;
842 struct spu_prologue_data data;
843 gdb_byte buf[16];
844
845 if (*this_prologue_cache)
846 return *this_prologue_cache;
847
848 info = FRAME_OBSTACK_ZALLOC (struct spu_unwind_cache);
849 *this_prologue_cache = info;
850 info->saved_regs = trad_frame_alloc_saved_regs (this_frame);
851 info->frame_base = 0;
852 info->local_base = 0;
853
854 /* Find the start of the current function, and analyze its prologue. */
855 info->func = get_frame_func (this_frame);
856 if (info->func == 0)
857 {
858 /* Fall back to using the current PC as frame ID. */
859 info->func = get_frame_pc (this_frame);
860 data.size = -1;
861 }
862 else
863 spu_analyze_prologue (info->func, get_frame_pc (this_frame), &data);
864
865
866 /* If successful, use prologue analysis data. */
867 if (data.size != -1 && data.cfa_reg != -1)
868 {
869 CORE_ADDR cfa;
870 int i;
871
872 /* Determine CFA via unwound CFA_REG plus CFA_OFFSET. */
873 get_frame_register (this_frame, data.cfa_reg, buf);
874 cfa = extract_unsigned_integer (buf, 4) + data.cfa_offset;
875
876 /* Call-saved register slots. */
877 for (i = 0; i < SPU_NUM_GPRS; i++)
878 if (i == SPU_LR_REGNUM
879 || (i >= SPU_SAVED1_REGNUM && i <= SPU_SAVEDN_REGNUM))
880 if (data.reg_offset[i] != -1)
881 info->saved_regs[i].addr = cfa - data.reg_offset[i];
882
883 /* Frame bases. */
884 info->frame_base = cfa;
885 info->local_base = cfa - data.size;
886 }
887
888 /* Otherwise, fall back to reading the backchain link. */
889 else
890 {
891 CORE_ADDR reg;
892 LONGEST backchain;
893 int status;
894
895 /* Get the backchain. */
896 reg = get_frame_register_unsigned (this_frame, SPU_SP_REGNUM);
897 status = safe_read_memory_integer (reg, 4, &backchain);
898
899 /* A zero backchain terminates the frame chain. Also, sanity
900 check against the local store size limit. */
901 if (status && backchain > 0 && backchain < SPU_LS_SIZE)
902 {
903 /* Assume the link register is saved into its slot. */
904 if (backchain + 16 < SPU_LS_SIZE)
905 info->saved_regs[SPU_LR_REGNUM].addr = backchain + 16;
906
907 /* Frame bases. */
908 info->frame_base = backchain;
909 info->local_base = reg;
910 }
911 }
912
913 /* If we didn't find a frame, we cannot determine SP / return address. */
914 if (info->frame_base == 0)
915 return info;
916
917 /* The previous SP is equal to the CFA. */
918 trad_frame_set_value (info->saved_regs, SPU_SP_REGNUM, info->frame_base);
919
920 /* Read full contents of the unwound link register in order to
921 be able to determine the return address. */
922 if (trad_frame_addr_p (info->saved_regs, SPU_LR_REGNUM))
923 target_read_memory (info->saved_regs[SPU_LR_REGNUM].addr, buf, 16);
924 else
925 get_frame_register (this_frame, SPU_LR_REGNUM, buf);
926
927 /* Normally, the return address is contained in the slot 0 of the
928 link register, and slots 1-3 are zero. For an overlay return,
929 slot 0 contains the address of the overlay manager return stub,
930 slot 1 contains the partition number of the overlay section to
931 be returned to, and slot 2 contains the return address within
932 that section. Return the latter address in that case. */
933 if (extract_unsigned_integer (buf + 8, 4) != 0)
934 trad_frame_set_value (info->saved_regs, SPU_PC_REGNUM,
935 extract_unsigned_integer (buf + 8, 4));
936 else
937 trad_frame_set_value (info->saved_regs, SPU_PC_REGNUM,
938 extract_unsigned_integer (buf, 4));
939
940 return info;
941 }
942
943 static void
944 spu_frame_this_id (struct frame_info *this_frame,
945 void **this_prologue_cache, struct frame_id *this_id)
946 {
947 struct spu_unwind_cache *info =
948 spu_frame_unwind_cache (this_frame, this_prologue_cache);
949
950 if (info->frame_base == 0)
951 return;
952
953 *this_id = frame_id_build (info->frame_base, info->func);
954 }
955
956 static struct value *
957 spu_frame_prev_register (struct frame_info *this_frame,
958 void **this_prologue_cache, int regnum)
959 {
960 struct spu_unwind_cache *info
961 = spu_frame_unwind_cache (this_frame, this_prologue_cache);
962
963 /* Special-case the stack pointer. */
964 if (regnum == SPU_RAW_SP_REGNUM)
965 regnum = SPU_SP_REGNUM;
966
967 return trad_frame_get_prev_register (this_frame, info->saved_regs, regnum);
968 }
969
970 static const struct frame_unwind spu_frame_unwind = {
971 NORMAL_FRAME,
972 spu_frame_this_id,
973 spu_frame_prev_register,
974 NULL,
975 default_frame_sniffer
976 };
977
978 static CORE_ADDR
979 spu_frame_base_address (struct frame_info *this_frame, void **this_cache)
980 {
981 struct spu_unwind_cache *info
982 = spu_frame_unwind_cache (this_frame, this_cache);
983 return info->local_base;
984 }
985
986 static const struct frame_base spu_frame_base = {
987 &spu_frame_unwind,
988 spu_frame_base_address,
989 spu_frame_base_address,
990 spu_frame_base_address
991 };
992
993 static CORE_ADDR
994 spu_unwind_pc (struct gdbarch *gdbarch, struct frame_info *next_frame)
995 {
996 CORE_ADDR pc = frame_unwind_register_unsigned (next_frame, SPU_PC_REGNUM);
997 /* Mask off interrupt enable bit. */
998 return pc & -4;
999 }
1000
1001 static CORE_ADDR
1002 spu_unwind_sp (struct gdbarch *gdbarch, struct frame_info *next_frame)
1003 {
1004 return frame_unwind_register_unsigned (next_frame, SPU_SP_REGNUM);
1005 }
1006
1007 static CORE_ADDR
1008 spu_read_pc (struct regcache *regcache)
1009 {
1010 ULONGEST pc;
1011 regcache_cooked_read_unsigned (regcache, SPU_PC_REGNUM, &pc);
1012 /* Mask off interrupt enable bit. */
1013 return pc & -4;
1014 }
1015
1016 static void
1017 spu_write_pc (struct regcache *regcache, CORE_ADDR pc)
1018 {
1019 /* Keep interrupt enabled state unchanged. */
1020 ULONGEST old_pc;
1021 regcache_cooked_read_unsigned (regcache, SPU_PC_REGNUM, &old_pc);
1022 regcache_cooked_write_unsigned (regcache, SPU_PC_REGNUM,
1023 (pc & -4) | (old_pc & 3));
1024 }
1025
1026
1027 /* Function calling convention. */
1028
1029 static CORE_ADDR
1030 spu_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1031 {
1032 return sp & ~15;
1033 }
1034
1035 static int
1036 spu_scalar_value_p (struct type *type)
1037 {
1038 switch (TYPE_CODE (type))
1039 {
1040 case TYPE_CODE_INT:
1041 case TYPE_CODE_ENUM:
1042 case TYPE_CODE_RANGE:
1043 case TYPE_CODE_CHAR:
1044 case TYPE_CODE_BOOL:
1045 case TYPE_CODE_PTR:
1046 case TYPE_CODE_REF:
1047 return TYPE_LENGTH (type) <= 16;
1048
1049 default:
1050 return 0;
1051 }
1052 }
1053
1054 static void
1055 spu_value_to_regcache (struct regcache *regcache, int regnum,
1056 struct type *type, const gdb_byte *in)
1057 {
1058 int len = TYPE_LENGTH (type);
1059
1060 if (spu_scalar_value_p (type))
1061 {
1062 int preferred_slot = len < 4 ? 4 - len : 0;
1063 regcache_cooked_write_part (regcache, regnum, preferred_slot, len, in);
1064 }
1065 else
1066 {
1067 while (len >= 16)
1068 {
1069 regcache_cooked_write (regcache, regnum++, in);
1070 in += 16;
1071 len -= 16;
1072 }
1073
1074 if (len > 0)
1075 regcache_cooked_write_part (regcache, regnum, 0, len, in);
1076 }
1077 }
1078
1079 static void
1080 spu_regcache_to_value (struct regcache *regcache, int regnum,
1081 struct type *type, gdb_byte *out)
1082 {
1083 int len = TYPE_LENGTH (type);
1084
1085 if (spu_scalar_value_p (type))
1086 {
1087 int preferred_slot = len < 4 ? 4 - len : 0;
1088 regcache_cooked_read_part (regcache, regnum, preferred_slot, len, out);
1089 }
1090 else
1091 {
1092 while (len >= 16)
1093 {
1094 regcache_cooked_read (regcache, regnum++, out);
1095 out += 16;
1096 len -= 16;
1097 }
1098
1099 if (len > 0)
1100 regcache_cooked_read_part (regcache, regnum, 0, len, out);
1101 }
1102 }
1103
1104 static CORE_ADDR
1105 spu_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1106 struct regcache *regcache, CORE_ADDR bp_addr,
1107 int nargs, struct value **args, CORE_ADDR sp,
1108 int struct_return, CORE_ADDR struct_addr)
1109 {
1110 CORE_ADDR sp_delta;
1111 int i;
1112 int regnum = SPU_ARG1_REGNUM;
1113 int stack_arg = -1;
1114 gdb_byte buf[16];
1115
1116 /* Set the return address. */
1117 memset (buf, 0, sizeof buf);
1118 store_unsigned_integer (buf, 4, bp_addr);
1119 regcache_cooked_write (regcache, SPU_LR_REGNUM, buf);
1120
1121 /* If STRUCT_RETURN is true, then the struct return address (in
1122 STRUCT_ADDR) will consume the first argument-passing register.
1123 Both adjust the register count and store that value. */
1124 if (struct_return)
1125 {
1126 memset (buf, 0, sizeof buf);
1127 store_unsigned_integer (buf, 4, struct_addr);
1128 regcache_cooked_write (regcache, regnum++, buf);
1129 }
1130
1131 /* Fill in argument registers. */
1132 for (i = 0; i < nargs; i++)
1133 {
1134 struct value *arg = args[i];
1135 struct type *type = check_typedef (value_type (arg));
1136 const gdb_byte *contents = value_contents (arg);
1137 int len = TYPE_LENGTH (type);
1138 int n_regs = align_up (len, 16) / 16;
1139
1140 /* If the argument doesn't wholly fit into registers, it and
1141 all subsequent arguments go to the stack. */
1142 if (regnum + n_regs - 1 > SPU_ARGN_REGNUM)
1143 {
1144 stack_arg = i;
1145 break;
1146 }
1147
1148 spu_value_to_regcache (regcache, regnum, type, contents);
1149 regnum += n_regs;
1150 }
1151
1152 /* Overflow arguments go to the stack. */
1153 if (stack_arg != -1)
1154 {
1155 CORE_ADDR ap;
1156
1157 /* Allocate all required stack size. */
1158 for (i = stack_arg; i < nargs; i++)
1159 {
1160 struct type *type = check_typedef (value_type (args[i]));
1161 sp -= align_up (TYPE_LENGTH (type), 16);
1162 }
1163
1164 /* Fill in stack arguments. */
1165 ap = sp;
1166 for (i = stack_arg; i < nargs; i++)
1167 {
1168 struct value *arg = args[i];
1169 struct type *type = check_typedef (value_type (arg));
1170 int len = TYPE_LENGTH (type);
1171 int preferred_slot;
1172
1173 if (spu_scalar_value_p (type))
1174 preferred_slot = len < 4 ? 4 - len : 0;
1175 else
1176 preferred_slot = 0;
1177
1178 target_write_memory (ap + preferred_slot, value_contents (arg), len);
1179 ap += align_up (TYPE_LENGTH (type), 16);
1180 }
1181 }
1182
1183 /* Allocate stack frame header. */
1184 sp -= 32;
1185
1186 /* Store stack back chain. */
1187 regcache_cooked_read (regcache, SPU_RAW_SP_REGNUM, buf);
1188 target_write_memory (sp, buf, 16);
1189
1190 /* Finally, update all slots of the SP register. */
1191 sp_delta = sp - extract_unsigned_integer (buf, 4);
1192 for (i = 0; i < 4; i++)
1193 {
1194 CORE_ADDR sp_slot = extract_unsigned_integer (buf + 4*i, 4);
1195 store_unsigned_integer (buf + 4*i, 4, sp_slot + sp_delta);
1196 }
1197 regcache_cooked_write (regcache, SPU_RAW_SP_REGNUM, buf);
1198
1199 return sp;
1200 }
1201
1202 static struct frame_id
1203 spu_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1204 {
1205 CORE_ADDR pc = get_frame_register_unsigned (this_frame, SPU_PC_REGNUM);
1206 CORE_ADDR sp = get_frame_register_unsigned (this_frame, SPU_SP_REGNUM);
1207 return frame_id_build (sp, pc & -4);
1208 }
1209
1210 /* Function return value access. */
1211
1212 static enum return_value_convention
1213 spu_return_value (struct gdbarch *gdbarch, struct type *func_type,
1214 struct type *type, struct regcache *regcache,
1215 gdb_byte *out, const gdb_byte *in)
1216 {
1217 enum return_value_convention rvc;
1218
1219 if (TYPE_LENGTH (type) <= (SPU_ARGN_REGNUM - SPU_ARG1_REGNUM + 1) * 16)
1220 rvc = RETURN_VALUE_REGISTER_CONVENTION;
1221 else
1222 rvc = RETURN_VALUE_STRUCT_CONVENTION;
1223
1224 if (in)
1225 {
1226 switch (rvc)
1227 {
1228 case RETURN_VALUE_REGISTER_CONVENTION:
1229 spu_value_to_regcache (regcache, SPU_ARG1_REGNUM, type, in);
1230 break;
1231
1232 case RETURN_VALUE_STRUCT_CONVENTION:
1233 error ("Cannot set function return value.");
1234 break;
1235 }
1236 }
1237 else if (out)
1238 {
1239 switch (rvc)
1240 {
1241 case RETURN_VALUE_REGISTER_CONVENTION:
1242 spu_regcache_to_value (regcache, SPU_ARG1_REGNUM, type, out);
1243 break;
1244
1245 case RETURN_VALUE_STRUCT_CONVENTION:
1246 error ("Function return value unknown.");
1247 break;
1248 }
1249 }
1250
1251 return rvc;
1252 }
1253
1254
1255 /* Breakpoints. */
1256
1257 static const gdb_byte *
1258 spu_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR * pcptr, int *lenptr)
1259 {
1260 static const gdb_byte breakpoint[] = { 0x00, 0x00, 0x3f, 0xff };
1261
1262 *lenptr = sizeof breakpoint;
1263 return breakpoint;
1264 }
1265
1266
1267 /* Software single-stepping support. */
1268
1269 int
1270 spu_software_single_step (struct frame_info *frame)
1271 {
1272 CORE_ADDR pc, next_pc;
1273 unsigned int insn;
1274 int offset, reg;
1275 gdb_byte buf[4];
1276
1277 pc = get_frame_pc (frame);
1278
1279 if (target_read_memory (pc, buf, 4))
1280 return 1;
1281 insn = extract_unsigned_integer (buf, 4);
1282
1283 /* Next sequential instruction is at PC + 4, except if the current
1284 instruction is a PPE-assisted call, in which case it is at PC + 8.
1285 Wrap around LS limit to be on the safe side. */
1286 if ((insn & 0xffffff00) == 0x00002100)
1287 next_pc = (pc + 8) & (SPU_LS_SIZE - 1);
1288 else
1289 next_pc = (pc + 4) & (SPU_LS_SIZE - 1);
1290
1291 insert_single_step_breakpoint (next_pc);
1292
1293 if (is_branch (insn, &offset, &reg))
1294 {
1295 CORE_ADDR target = offset;
1296
1297 if (reg == SPU_PC_REGNUM)
1298 target += pc;
1299 else if (reg != -1)
1300 {
1301 get_frame_register_bytes (frame, reg, 0, 4, buf);
1302 target += extract_unsigned_integer (buf, 4) & -4;
1303 }
1304
1305 target = target & (SPU_LS_SIZE - 1);
1306 if (target != next_pc)
1307 insert_single_step_breakpoint (target);
1308 }
1309
1310 return 1;
1311 }
1312
1313 /* Target overlays for the SPU overlay manager.
1314
1315 See the documentation of simple_overlay_update for how the
1316 interface is supposed to work.
1317
1318 Data structures used by the overlay manager:
1319
1320 struct ovly_table
1321 {
1322 u32 vma;
1323 u32 size;
1324 u32 pos;
1325 u32 buf;
1326 } _ovly_table[]; -- one entry per overlay section
1327
1328 struct ovly_buf_table
1329 {
1330 u32 mapped;
1331 } _ovly_buf_table[]; -- one entry per overlay buffer
1332
1333 _ovly_table should never change.
1334
1335 Both tables are aligned to a 16-byte boundary, the symbols _ovly_table
1336 and _ovly_buf_table are of type STT_OBJECT and their size set to the size
1337 of the respective array. buf in _ovly_table is an index into _ovly_buf_table.
1338
1339 mapped is an index into _ovly_table. Both the mapped and buf indices start
1340 from one to reference the first entry in their respective tables. */
1341
1342 /* Using the per-objfile private data mechanism, we store for each
1343 objfile an array of "struct spu_overlay_table" structures, one
1344 for each obj_section of the objfile. This structure holds two
1345 fields, MAPPED_PTR and MAPPED_VAL. If MAPPED_PTR is zero, this
1346 is *not* an overlay section. If it is non-zero, it represents
1347 a target address. The overlay section is mapped iff the target
1348 integer at this location equals MAPPED_VAL. */
1349
1350 static const struct objfile_data *spu_overlay_data;
1351
1352 struct spu_overlay_table
1353 {
1354 CORE_ADDR mapped_ptr;
1355 CORE_ADDR mapped_val;
1356 };
1357
1358 /* Retrieve the overlay table for OBJFILE. If not already cached, read
1359 the _ovly_table data structure from the target and initialize the
1360 spu_overlay_table data structure from it. */
1361 static struct spu_overlay_table *
1362 spu_get_overlay_table (struct objfile *objfile)
1363 {
1364 struct minimal_symbol *ovly_table_msym, *ovly_buf_table_msym;
1365 CORE_ADDR ovly_table_base, ovly_buf_table_base;
1366 unsigned ovly_table_size, ovly_buf_table_size;
1367 struct spu_overlay_table *tbl;
1368 struct obj_section *osect;
1369 char *ovly_table;
1370 int i;
1371
1372 tbl = objfile_data (objfile, spu_overlay_data);
1373 if (tbl)
1374 return tbl;
1375
1376 ovly_table_msym = lookup_minimal_symbol ("_ovly_table", NULL, objfile);
1377 if (!ovly_table_msym)
1378 return NULL;
1379
1380 ovly_buf_table_msym = lookup_minimal_symbol ("_ovly_buf_table", NULL, objfile);
1381 if (!ovly_buf_table_msym)
1382 return NULL;
1383
1384 ovly_table_base = SYMBOL_VALUE_ADDRESS (ovly_table_msym);
1385 ovly_table_size = MSYMBOL_SIZE (ovly_table_msym);
1386
1387 ovly_buf_table_base = SYMBOL_VALUE_ADDRESS (ovly_buf_table_msym);
1388 ovly_buf_table_size = MSYMBOL_SIZE (ovly_buf_table_msym);
1389
1390 ovly_table = xmalloc (ovly_table_size);
1391 read_memory (ovly_table_base, ovly_table, ovly_table_size);
1392
1393 tbl = OBSTACK_CALLOC (&objfile->objfile_obstack,
1394 objfile->sections_end - objfile->sections,
1395 struct spu_overlay_table);
1396
1397 for (i = 0; i < ovly_table_size / 16; i++)
1398 {
1399 CORE_ADDR vma = extract_unsigned_integer (ovly_table + 16*i + 0, 4);
1400 CORE_ADDR size = extract_unsigned_integer (ovly_table + 16*i + 4, 4);
1401 CORE_ADDR pos = extract_unsigned_integer (ovly_table + 16*i + 8, 4);
1402 CORE_ADDR buf = extract_unsigned_integer (ovly_table + 16*i + 12, 4);
1403
1404 if (buf == 0 || (buf - 1) * 4 >= ovly_buf_table_size)
1405 continue;
1406
1407 ALL_OBJFILE_OSECTIONS (objfile, osect)
1408 if (vma == bfd_section_vma (objfile->obfd, osect->the_bfd_section)
1409 && pos == osect->the_bfd_section->filepos)
1410 {
1411 int ndx = osect - objfile->sections;
1412 tbl[ndx].mapped_ptr = ovly_buf_table_base + (buf - 1) * 4;
1413 tbl[ndx].mapped_val = i + 1;
1414 break;
1415 }
1416 }
1417
1418 xfree (ovly_table);
1419 set_objfile_data (objfile, spu_overlay_data, tbl);
1420 return tbl;
1421 }
1422
1423 /* Read _ovly_buf_table entry from the target to dermine whether
1424 OSECT is currently mapped, and update the mapped state. */
1425 static void
1426 spu_overlay_update_osect (struct obj_section *osect)
1427 {
1428 struct spu_overlay_table *ovly_table;
1429 CORE_ADDR val;
1430
1431 ovly_table = spu_get_overlay_table (osect->objfile);
1432 if (!ovly_table)
1433 return;
1434
1435 ovly_table += osect - osect->objfile->sections;
1436 if (ovly_table->mapped_ptr == 0)
1437 return;
1438
1439 val = read_memory_unsigned_integer (ovly_table->mapped_ptr, 4);
1440 osect->ovly_mapped = (val == ovly_table->mapped_val);
1441 }
1442
1443 /* If OSECT is NULL, then update all sections' mapped state.
1444 If OSECT is non-NULL, then update only OSECT's mapped state. */
1445 static void
1446 spu_overlay_update (struct obj_section *osect)
1447 {
1448 /* Just one section. */
1449 if (osect)
1450 spu_overlay_update_osect (osect);
1451
1452 /* All sections. */
1453 else
1454 {
1455 struct objfile *objfile;
1456
1457 ALL_OBJSECTIONS (objfile, osect)
1458 if (section_is_overlay (osect->the_bfd_section))
1459 spu_overlay_update_osect (osect);
1460 }
1461 }
1462
1463 /* Whenever a new objfile is loaded, read the target's _ovly_table.
1464 If there is one, go through all sections and make sure for non-
1465 overlay sections LMA equals VMA, while for overlay sections LMA
1466 is larger than local store size. */
1467 static void
1468 spu_overlay_new_objfile (struct objfile *objfile)
1469 {
1470 struct spu_overlay_table *ovly_table;
1471 struct obj_section *osect;
1472
1473 /* If we've already touched this file, do nothing. */
1474 if (!objfile || objfile_data (objfile, spu_overlay_data) != NULL)
1475 return;
1476
1477 /* Consider only SPU objfiles. */
1478 if (bfd_get_arch (objfile->obfd) != bfd_arch_spu)
1479 return;
1480
1481 /* Check if this objfile has overlays. */
1482 ovly_table = spu_get_overlay_table (objfile);
1483 if (!ovly_table)
1484 return;
1485
1486 /* Now go and fiddle with all the LMAs. */
1487 ALL_OBJFILE_OSECTIONS (objfile, osect)
1488 {
1489 bfd *obfd = objfile->obfd;
1490 asection *bsect = osect->the_bfd_section;
1491 int ndx = osect - objfile->sections;
1492
1493 if (ovly_table[ndx].mapped_ptr == 0)
1494 bfd_section_lma (obfd, bsect) = bfd_section_vma (obfd, bsect);
1495 else
1496 bfd_section_lma (obfd, bsect) = bsect->filepos + SPU_LS_SIZE;
1497 }
1498 }
1499
1500
1501 /* "info spu" commands. */
1502
1503 static void
1504 info_spu_event_command (char *args, int from_tty)
1505 {
1506 struct frame_info *frame = get_selected_frame (NULL);
1507 ULONGEST event_status = 0;
1508 ULONGEST event_mask = 0;
1509 struct cleanup *chain;
1510 gdb_byte buf[100];
1511 char annex[32];
1512 LONGEST len;
1513 int rc, id;
1514
1515 if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
1516 error (_("\"info spu\" is only supported on the SPU architecture."));
1517
1518 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
1519
1520 xsnprintf (annex, sizeof annex, "%d/event_status", id);
1521 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1522 buf, 0, (sizeof (buf) - 1));
1523 if (len <= 0)
1524 error (_("Could not read event_status."));
1525 buf[len] = '\0';
1526 event_status = strtoulst (buf, NULL, 16);
1527
1528 xsnprintf (annex, sizeof annex, "%d/event_mask", id);
1529 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1530 buf, 0, (sizeof (buf) - 1));
1531 if (len <= 0)
1532 error (_("Could not read event_mask."));
1533 buf[len] = '\0';
1534 event_mask = strtoulst (buf, NULL, 16);
1535
1536 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoEvent");
1537
1538 if (ui_out_is_mi_like_p (uiout))
1539 {
1540 ui_out_field_fmt (uiout, "event_status",
1541 "0x%s", phex_nz (event_status, 4));
1542 ui_out_field_fmt (uiout, "event_mask",
1543 "0x%s", phex_nz (event_mask, 4));
1544 }
1545 else
1546 {
1547 printf_filtered (_("Event Status 0x%s\n"), phex (event_status, 4));
1548 printf_filtered (_("Event Mask 0x%s\n"), phex (event_mask, 4));
1549 }
1550
1551 do_cleanups (chain);
1552 }
1553
1554 static void
1555 info_spu_signal_command (char *args, int from_tty)
1556 {
1557 struct frame_info *frame = get_selected_frame (NULL);
1558 ULONGEST signal1 = 0;
1559 ULONGEST signal1_type = 0;
1560 int signal1_pending = 0;
1561 ULONGEST signal2 = 0;
1562 ULONGEST signal2_type = 0;
1563 int signal2_pending = 0;
1564 struct cleanup *chain;
1565 char annex[32];
1566 gdb_byte buf[100];
1567 LONGEST len;
1568 int rc, id;
1569
1570 if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
1571 error (_("\"info spu\" is only supported on the SPU architecture."));
1572
1573 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
1574
1575 xsnprintf (annex, sizeof annex, "%d/signal1", id);
1576 len = target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 4);
1577 if (len < 0)
1578 error (_("Could not read signal1."));
1579 else if (len == 4)
1580 {
1581 signal1 = extract_unsigned_integer (buf, 4);
1582 signal1_pending = 1;
1583 }
1584
1585 xsnprintf (annex, sizeof annex, "%d/signal1_type", id);
1586 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1587 buf, 0, (sizeof (buf) - 1));
1588 if (len <= 0)
1589 error (_("Could not read signal1_type."));
1590 buf[len] = '\0';
1591 signal1_type = strtoulst (buf, NULL, 16);
1592
1593 xsnprintf (annex, sizeof annex, "%d/signal2", id);
1594 len = target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 4);
1595 if (len < 0)
1596 error (_("Could not read signal2."));
1597 else if (len == 4)
1598 {
1599 signal2 = extract_unsigned_integer (buf, 4);
1600 signal2_pending = 1;
1601 }
1602
1603 xsnprintf (annex, sizeof annex, "%d/signal2_type", id);
1604 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1605 buf, 0, (sizeof (buf) - 1));
1606 if (len <= 0)
1607 error (_("Could not read signal2_type."));
1608 buf[len] = '\0';
1609 signal2_type = strtoulst (buf, NULL, 16);
1610
1611 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoSignal");
1612
1613 if (ui_out_is_mi_like_p (uiout))
1614 {
1615 ui_out_field_int (uiout, "signal1_pending", signal1_pending);
1616 ui_out_field_fmt (uiout, "signal1", "0x%s", phex_nz (signal1, 4));
1617 ui_out_field_int (uiout, "signal1_type", signal1_type);
1618 ui_out_field_int (uiout, "signal2_pending", signal2_pending);
1619 ui_out_field_fmt (uiout, "signal2", "0x%s", phex_nz (signal2, 4));
1620 ui_out_field_int (uiout, "signal2_type", signal2_type);
1621 }
1622 else
1623 {
1624 if (signal1_pending)
1625 printf_filtered (_("Signal 1 control word 0x%s "), phex (signal1, 4));
1626 else
1627 printf_filtered (_("Signal 1 not pending "));
1628
1629 if (signal1_type)
1630 printf_filtered (_("(Type Or)\n"));
1631 else
1632 printf_filtered (_("(Type Overwrite)\n"));
1633
1634 if (signal2_pending)
1635 printf_filtered (_("Signal 2 control word 0x%s "), phex (signal2, 4));
1636 else
1637 printf_filtered (_("Signal 2 not pending "));
1638
1639 if (signal2_type)
1640 printf_filtered (_("(Type Or)\n"));
1641 else
1642 printf_filtered (_("(Type Overwrite)\n"));
1643 }
1644
1645 do_cleanups (chain);
1646 }
1647
1648 static void
1649 info_spu_mailbox_list (gdb_byte *buf, int nr,
1650 const char *field, const char *msg)
1651 {
1652 struct cleanup *chain;
1653 int i;
1654
1655 if (nr <= 0)
1656 return;
1657
1658 chain = make_cleanup_ui_out_table_begin_end (uiout, 1, nr, "mbox");
1659
1660 ui_out_table_header (uiout, 32, ui_left, field, msg);
1661 ui_out_table_body (uiout);
1662
1663 for (i = 0; i < nr; i++)
1664 {
1665 struct cleanup *val_chain;
1666 ULONGEST val;
1667 val_chain = make_cleanup_ui_out_tuple_begin_end (uiout, "mbox");
1668 val = extract_unsigned_integer (buf + 4*i, 4);
1669 ui_out_field_fmt (uiout, field, "0x%s", phex (val, 4));
1670 do_cleanups (val_chain);
1671
1672 if (!ui_out_is_mi_like_p (uiout))
1673 printf_filtered ("\n");
1674 }
1675
1676 do_cleanups (chain);
1677 }
1678
1679 static void
1680 info_spu_mailbox_command (char *args, int from_tty)
1681 {
1682 struct frame_info *frame = get_selected_frame (NULL);
1683 struct cleanup *chain;
1684 char annex[32];
1685 gdb_byte buf[1024];
1686 LONGEST len;
1687 int i, id;
1688
1689 if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
1690 error (_("\"info spu\" is only supported on the SPU architecture."));
1691
1692 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
1693
1694 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoMailbox");
1695
1696 xsnprintf (annex, sizeof annex, "%d/mbox_info", id);
1697 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1698 buf, 0, sizeof buf);
1699 if (len < 0)
1700 error (_("Could not read mbox_info."));
1701
1702 info_spu_mailbox_list (buf, len / 4, "mbox", "SPU Outbound Mailbox");
1703
1704 xsnprintf (annex, sizeof annex, "%d/ibox_info", id);
1705 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1706 buf, 0, sizeof buf);
1707 if (len < 0)
1708 error (_("Could not read ibox_info."));
1709
1710 info_spu_mailbox_list (buf, len / 4, "ibox", "SPU Outbound Interrupt Mailbox");
1711
1712 xsnprintf (annex, sizeof annex, "%d/wbox_info", id);
1713 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1714 buf, 0, sizeof buf);
1715 if (len < 0)
1716 error (_("Could not read wbox_info."));
1717
1718 info_spu_mailbox_list (buf, len / 4, "wbox", "SPU Inbound Mailbox");
1719
1720 do_cleanups (chain);
1721 }
1722
1723 static ULONGEST
1724 spu_mfc_get_bitfield (ULONGEST word, int first, int last)
1725 {
1726 ULONGEST mask = ~(~(ULONGEST)0 << (last - first + 1));
1727 return (word >> (63 - last)) & mask;
1728 }
1729
1730 static void
1731 info_spu_dma_cmdlist (gdb_byte *buf, int nr)
1732 {
1733 static char *spu_mfc_opcode[256] =
1734 {
1735 /* 00 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1736 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1737 /* 10 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1738 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1739 /* 20 */ "put", "putb", "putf", NULL, "putl", "putlb", "putlf", NULL,
1740 "puts", "putbs", "putfs", NULL, NULL, NULL, NULL, NULL,
1741 /* 30 */ "putr", "putrb", "putrf", NULL, "putrl", "putrlb", "putrlf", NULL,
1742 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1743 /* 40 */ "get", "getb", "getf", NULL, "getl", "getlb", "getlf", NULL,
1744 "gets", "getbs", "getfs", NULL, NULL, NULL, NULL, NULL,
1745 /* 50 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1746 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1747 /* 60 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1748 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1749 /* 70 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1750 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1751 /* 80 */ "sdcrt", "sdcrtst", NULL, NULL, NULL, NULL, NULL, NULL,
1752 NULL, "sdcrz", NULL, NULL, NULL, "sdcrst", NULL, "sdcrf",
1753 /* 90 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1754 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1755 /* a0 */ "sndsig", "sndsigb", "sndsigf", NULL, NULL, NULL, NULL, NULL,
1756 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1757 /* b0 */ "putlluc", NULL, NULL, NULL, "putllc", NULL, NULL, NULL,
1758 "putqlluc", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1759 /* c0 */ "barrier", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1760 "mfceieio", NULL, NULL, NULL, "mfcsync", NULL, NULL, NULL,
1761 /* d0 */ "getllar", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1762 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1763 /* e0 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1764 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1765 /* f0 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1766 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1767 };
1768
1769 int *seq = alloca (nr * sizeof (int));
1770 int done = 0;
1771 struct cleanup *chain;
1772 int i, j;
1773
1774
1775 /* Determine sequence in which to display (valid) entries. */
1776 for (i = 0; i < nr; i++)
1777 {
1778 /* Search for the first valid entry all of whose
1779 dependencies are met. */
1780 for (j = 0; j < nr; j++)
1781 {
1782 ULONGEST mfc_cq_dw3;
1783 ULONGEST dependencies;
1784
1785 if (done & (1 << (nr - 1 - j)))
1786 continue;
1787
1788 mfc_cq_dw3 = extract_unsigned_integer (buf + 32*j + 24, 8);
1789 if (!spu_mfc_get_bitfield (mfc_cq_dw3, 16, 16))
1790 continue;
1791
1792 dependencies = spu_mfc_get_bitfield (mfc_cq_dw3, 0, nr - 1);
1793 if ((dependencies & done) != dependencies)
1794 continue;
1795
1796 seq[i] = j;
1797 done |= 1 << (nr - 1 - j);
1798 break;
1799 }
1800
1801 if (j == nr)
1802 break;
1803 }
1804
1805 nr = i;
1806
1807
1808 chain = make_cleanup_ui_out_table_begin_end (uiout, 10, nr, "dma_cmd");
1809
1810 ui_out_table_header (uiout, 7, ui_left, "opcode", "Opcode");
1811 ui_out_table_header (uiout, 3, ui_left, "tag", "Tag");
1812 ui_out_table_header (uiout, 3, ui_left, "tid", "TId");
1813 ui_out_table_header (uiout, 3, ui_left, "rid", "RId");
1814 ui_out_table_header (uiout, 18, ui_left, "ea", "EA");
1815 ui_out_table_header (uiout, 7, ui_left, "lsa", "LSA");
1816 ui_out_table_header (uiout, 7, ui_left, "size", "Size");
1817 ui_out_table_header (uiout, 7, ui_left, "lstaddr", "LstAddr");
1818 ui_out_table_header (uiout, 7, ui_left, "lstsize", "LstSize");
1819 ui_out_table_header (uiout, 1, ui_left, "error_p", "E");
1820
1821 ui_out_table_body (uiout);
1822
1823 for (i = 0; i < nr; i++)
1824 {
1825 struct cleanup *cmd_chain;
1826 ULONGEST mfc_cq_dw0;
1827 ULONGEST mfc_cq_dw1;
1828 ULONGEST mfc_cq_dw2;
1829 int mfc_cmd_opcode, mfc_cmd_tag, rclass_id, tclass_id;
1830 int lsa, size, list_lsa, list_size, mfc_lsa, mfc_size;
1831 ULONGEST mfc_ea;
1832 int list_valid_p, noop_valid_p, qw_valid_p, ea_valid_p, cmd_error_p;
1833
1834 /* Decode contents of MFC Command Queue Context Save/Restore Registers.
1835 See "Cell Broadband Engine Registers V1.3", section 3.3.2.1. */
1836
1837 mfc_cq_dw0 = extract_unsigned_integer (buf + 32*seq[i], 8);
1838 mfc_cq_dw1 = extract_unsigned_integer (buf + 32*seq[i] + 8, 8);
1839 mfc_cq_dw2 = extract_unsigned_integer (buf + 32*seq[i] + 16, 8);
1840
1841 list_lsa = spu_mfc_get_bitfield (mfc_cq_dw0, 0, 14);
1842 list_size = spu_mfc_get_bitfield (mfc_cq_dw0, 15, 26);
1843 mfc_cmd_opcode = spu_mfc_get_bitfield (mfc_cq_dw0, 27, 34);
1844 mfc_cmd_tag = spu_mfc_get_bitfield (mfc_cq_dw0, 35, 39);
1845 list_valid_p = spu_mfc_get_bitfield (mfc_cq_dw0, 40, 40);
1846 rclass_id = spu_mfc_get_bitfield (mfc_cq_dw0, 41, 43);
1847 tclass_id = spu_mfc_get_bitfield (mfc_cq_dw0, 44, 46);
1848
1849 mfc_ea = spu_mfc_get_bitfield (mfc_cq_dw1, 0, 51) << 12
1850 | spu_mfc_get_bitfield (mfc_cq_dw2, 25, 36);
1851
1852 mfc_lsa = spu_mfc_get_bitfield (mfc_cq_dw2, 0, 13);
1853 mfc_size = spu_mfc_get_bitfield (mfc_cq_dw2, 14, 24);
1854 noop_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 37, 37);
1855 qw_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 38, 38);
1856 ea_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 39, 39);
1857 cmd_error_p = spu_mfc_get_bitfield (mfc_cq_dw2, 40, 40);
1858
1859 cmd_chain = make_cleanup_ui_out_tuple_begin_end (uiout, "cmd");
1860
1861 if (spu_mfc_opcode[mfc_cmd_opcode])
1862 ui_out_field_string (uiout, "opcode", spu_mfc_opcode[mfc_cmd_opcode]);
1863 else
1864 ui_out_field_int (uiout, "opcode", mfc_cmd_opcode);
1865
1866 ui_out_field_int (uiout, "tag", mfc_cmd_tag);
1867 ui_out_field_int (uiout, "tid", tclass_id);
1868 ui_out_field_int (uiout, "rid", rclass_id);
1869
1870 if (ea_valid_p)
1871 ui_out_field_fmt (uiout, "ea", "0x%s", phex (mfc_ea, 8));
1872 else
1873 ui_out_field_skip (uiout, "ea");
1874
1875 ui_out_field_fmt (uiout, "lsa", "0x%05x", mfc_lsa << 4);
1876 if (qw_valid_p)
1877 ui_out_field_fmt (uiout, "size", "0x%05x", mfc_size << 4);
1878 else
1879 ui_out_field_fmt (uiout, "size", "0x%05x", mfc_size);
1880
1881 if (list_valid_p)
1882 {
1883 ui_out_field_fmt (uiout, "lstaddr", "0x%05x", list_lsa << 3);
1884 ui_out_field_fmt (uiout, "lstsize", "0x%05x", list_size << 3);
1885 }
1886 else
1887 {
1888 ui_out_field_skip (uiout, "lstaddr");
1889 ui_out_field_skip (uiout, "lstsize");
1890 }
1891
1892 if (cmd_error_p)
1893 ui_out_field_string (uiout, "error_p", "*");
1894 else
1895 ui_out_field_skip (uiout, "error_p");
1896
1897 do_cleanups (cmd_chain);
1898
1899 if (!ui_out_is_mi_like_p (uiout))
1900 printf_filtered ("\n");
1901 }
1902
1903 do_cleanups (chain);
1904 }
1905
1906 static void
1907 info_spu_dma_command (char *args, int from_tty)
1908 {
1909 struct frame_info *frame = get_selected_frame (NULL);
1910 ULONGEST dma_info_type;
1911 ULONGEST dma_info_mask;
1912 ULONGEST dma_info_status;
1913 ULONGEST dma_info_stall_and_notify;
1914 ULONGEST dma_info_atomic_command_status;
1915 struct cleanup *chain;
1916 char annex[32];
1917 gdb_byte buf[1024];
1918 LONGEST len;
1919 int i, id;
1920
1921 if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
1922 error (_("\"info spu\" is only supported on the SPU architecture."));
1923
1924 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
1925
1926 xsnprintf (annex, sizeof annex, "%d/dma_info", id);
1927 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1928 buf, 0, 40 + 16 * 32);
1929 if (len <= 0)
1930 error (_("Could not read dma_info."));
1931
1932 dma_info_type = extract_unsigned_integer (buf, 8);
1933 dma_info_mask = extract_unsigned_integer (buf + 8, 8);
1934 dma_info_status = extract_unsigned_integer (buf + 16, 8);
1935 dma_info_stall_and_notify = extract_unsigned_integer (buf + 24, 8);
1936 dma_info_atomic_command_status = extract_unsigned_integer (buf + 32, 8);
1937
1938 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoDMA");
1939
1940 if (ui_out_is_mi_like_p (uiout))
1941 {
1942 ui_out_field_fmt (uiout, "dma_info_type", "0x%s",
1943 phex_nz (dma_info_type, 4));
1944 ui_out_field_fmt (uiout, "dma_info_mask", "0x%s",
1945 phex_nz (dma_info_mask, 4));
1946 ui_out_field_fmt (uiout, "dma_info_status", "0x%s",
1947 phex_nz (dma_info_status, 4));
1948 ui_out_field_fmt (uiout, "dma_info_stall_and_notify", "0x%s",
1949 phex_nz (dma_info_stall_and_notify, 4));
1950 ui_out_field_fmt (uiout, "dma_info_atomic_command_status", "0x%s",
1951 phex_nz (dma_info_atomic_command_status, 4));
1952 }
1953 else
1954 {
1955 const char *query_msg = _("no query pending");
1956
1957 if (dma_info_type & 4)
1958 switch (dma_info_type & 3)
1959 {
1960 case 1: query_msg = _("'any' query pending"); break;
1961 case 2: query_msg = _("'all' query pending"); break;
1962 default: query_msg = _("undefined query type"); break;
1963 }
1964
1965 printf_filtered (_("Tag-Group Status 0x%s\n"),
1966 phex (dma_info_status, 4));
1967 printf_filtered (_("Tag-Group Mask 0x%s (%s)\n"),
1968 phex (dma_info_mask, 4), query_msg);
1969 printf_filtered (_("Stall-and-Notify 0x%s\n"),
1970 phex (dma_info_stall_and_notify, 4));
1971 printf_filtered (_("Atomic Cmd Status 0x%s\n"),
1972 phex (dma_info_atomic_command_status, 4));
1973 printf_filtered ("\n");
1974 }
1975
1976 info_spu_dma_cmdlist (buf + 40, 16);
1977 do_cleanups (chain);
1978 }
1979
1980 static void
1981 info_spu_proxydma_command (char *args, int from_tty)
1982 {
1983 struct frame_info *frame = get_selected_frame (NULL);
1984 ULONGEST dma_info_type;
1985 ULONGEST dma_info_mask;
1986 ULONGEST dma_info_status;
1987 struct cleanup *chain;
1988 char annex[32];
1989 gdb_byte buf[1024];
1990 LONGEST len;
1991 int i, id;
1992
1993 if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
1994 error (_("\"info spu\" is only supported on the SPU architecture."));
1995
1996 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
1997
1998 xsnprintf (annex, sizeof annex, "%d/proxydma_info", id);
1999 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2000 buf, 0, 24 + 8 * 32);
2001 if (len <= 0)
2002 error (_("Could not read proxydma_info."));
2003
2004 dma_info_type = extract_unsigned_integer (buf, 8);
2005 dma_info_mask = extract_unsigned_integer (buf + 8, 8);
2006 dma_info_status = extract_unsigned_integer (buf + 16, 8);
2007
2008 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoProxyDMA");
2009
2010 if (ui_out_is_mi_like_p (uiout))
2011 {
2012 ui_out_field_fmt (uiout, "proxydma_info_type", "0x%s",
2013 phex_nz (dma_info_type, 4));
2014 ui_out_field_fmt (uiout, "proxydma_info_mask", "0x%s",
2015 phex_nz (dma_info_mask, 4));
2016 ui_out_field_fmt (uiout, "proxydma_info_status", "0x%s",
2017 phex_nz (dma_info_status, 4));
2018 }
2019 else
2020 {
2021 const char *query_msg;
2022
2023 switch (dma_info_type & 3)
2024 {
2025 case 0: query_msg = _("no query pending"); break;
2026 case 1: query_msg = _("'any' query pending"); break;
2027 case 2: query_msg = _("'all' query pending"); break;
2028 default: query_msg = _("undefined query type"); break;
2029 }
2030
2031 printf_filtered (_("Tag-Group Status 0x%s\n"),
2032 phex (dma_info_status, 4));
2033 printf_filtered (_("Tag-Group Mask 0x%s (%s)\n"),
2034 phex (dma_info_mask, 4), query_msg);
2035 printf_filtered ("\n");
2036 }
2037
2038 info_spu_dma_cmdlist (buf + 24, 8);
2039 do_cleanups (chain);
2040 }
2041
2042 static void
2043 info_spu_command (char *args, int from_tty)
2044 {
2045 printf_unfiltered (_("\"info spu\" must be followed by the name of an SPU facility.\n"));
2046 help_list (infospucmdlist, "info spu ", -1, gdb_stdout);
2047 }
2048
2049
2050 /* Set up gdbarch struct. */
2051
2052 static struct gdbarch *
2053 spu_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2054 {
2055 struct gdbarch *gdbarch;
2056 struct gdbarch_tdep *tdep;
2057
2058 /* Find a candidate among the list of pre-declared architectures. */
2059 arches = gdbarch_list_lookup_by_info (arches, &info);
2060 if (arches != NULL)
2061 return arches->gdbarch;
2062
2063 /* Is is for us? */
2064 if (info.bfd_arch_info->mach != bfd_mach_spu)
2065 return NULL;
2066
2067 /* Yes, create a new architecture. */
2068 tdep = XCALLOC (1, struct gdbarch_tdep);
2069 gdbarch = gdbarch_alloc (&info, tdep);
2070
2071 /* Disassembler. */
2072 set_gdbarch_print_insn (gdbarch, print_insn_spu);
2073
2074 /* Registers. */
2075 set_gdbarch_num_regs (gdbarch, SPU_NUM_REGS);
2076 set_gdbarch_num_pseudo_regs (gdbarch, SPU_NUM_PSEUDO_REGS);
2077 set_gdbarch_sp_regnum (gdbarch, SPU_SP_REGNUM);
2078 set_gdbarch_pc_regnum (gdbarch, SPU_PC_REGNUM);
2079 set_gdbarch_read_pc (gdbarch, spu_read_pc);
2080 set_gdbarch_write_pc (gdbarch, spu_write_pc);
2081 set_gdbarch_register_name (gdbarch, spu_register_name);
2082 set_gdbarch_register_type (gdbarch, spu_register_type);
2083 set_gdbarch_pseudo_register_read (gdbarch, spu_pseudo_register_read);
2084 set_gdbarch_pseudo_register_write (gdbarch, spu_pseudo_register_write);
2085 set_gdbarch_value_from_register (gdbarch, spu_value_from_register);
2086 set_gdbarch_register_reggroup_p (gdbarch, spu_register_reggroup_p);
2087
2088 /* Data types. */
2089 set_gdbarch_char_signed (gdbarch, 0);
2090 set_gdbarch_ptr_bit (gdbarch, 32);
2091 set_gdbarch_addr_bit (gdbarch, 32);
2092 set_gdbarch_short_bit (gdbarch, 16);
2093 set_gdbarch_int_bit (gdbarch, 32);
2094 set_gdbarch_long_bit (gdbarch, 32);
2095 set_gdbarch_long_long_bit (gdbarch, 64);
2096 set_gdbarch_float_bit (gdbarch, 32);
2097 set_gdbarch_double_bit (gdbarch, 64);
2098 set_gdbarch_long_double_bit (gdbarch, 64);
2099 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2100 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2101 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
2102
2103 /* Address conversion. */
2104 set_gdbarch_pointer_to_address (gdbarch, spu_pointer_to_address);
2105 set_gdbarch_integer_to_address (gdbarch, spu_integer_to_address);
2106
2107 /* Inferior function calls. */
2108 set_gdbarch_call_dummy_location (gdbarch, ON_STACK);
2109 set_gdbarch_frame_align (gdbarch, spu_frame_align);
2110 set_gdbarch_frame_red_zone_size (gdbarch, 2000);
2111 set_gdbarch_push_dummy_call (gdbarch, spu_push_dummy_call);
2112 set_gdbarch_dummy_id (gdbarch, spu_dummy_id);
2113 set_gdbarch_return_value (gdbarch, spu_return_value);
2114
2115 /* Frame handling. */
2116 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2117 frame_unwind_append_unwinder (gdbarch, &spu_frame_unwind);
2118 frame_base_set_default (gdbarch, &spu_frame_base);
2119 set_gdbarch_unwind_pc (gdbarch, spu_unwind_pc);
2120 set_gdbarch_unwind_sp (gdbarch, spu_unwind_sp);
2121 set_gdbarch_virtual_frame_pointer (gdbarch, spu_virtual_frame_pointer);
2122 set_gdbarch_frame_args_skip (gdbarch, 0);
2123 set_gdbarch_skip_prologue (gdbarch, spu_skip_prologue);
2124 set_gdbarch_in_function_epilogue_p (gdbarch, spu_in_function_epilogue_p);
2125
2126 /* Breakpoints. */
2127 set_gdbarch_decr_pc_after_break (gdbarch, 4);
2128 set_gdbarch_breakpoint_from_pc (gdbarch, spu_breakpoint_from_pc);
2129 set_gdbarch_cannot_step_breakpoint (gdbarch, 1);
2130 set_gdbarch_software_single_step (gdbarch, spu_software_single_step);
2131
2132 /* Overlays. */
2133 set_gdbarch_overlay_update (gdbarch, spu_overlay_update);
2134
2135 return gdbarch;
2136 }
2137
2138 void
2139 _initialize_spu_tdep (void)
2140 {
2141 register_gdbarch_init (bfd_arch_spu, spu_gdbarch_init);
2142
2143 /* Add ourselves to objfile event chain. */
2144 observer_attach_new_objfile (spu_overlay_new_objfile);
2145 spu_overlay_data = register_objfile_data ();
2146
2147 /* Add root prefix command for all "info spu" commands. */
2148 add_prefix_cmd ("spu", class_info, info_spu_command,
2149 _("Various SPU specific commands."),
2150 &infospucmdlist, "info spu ", 0, &infolist);
2151
2152 /* Add various "info spu" commands. */
2153 add_cmd ("event", class_info, info_spu_event_command,
2154 _("Display SPU event facility status.\n"),
2155 &infospucmdlist);
2156 add_cmd ("signal", class_info, info_spu_signal_command,
2157 _("Display SPU signal notification facility status.\n"),
2158 &infospucmdlist);
2159 add_cmd ("mailbox", class_info, info_spu_mailbox_command,
2160 _("Display SPU mailbox facility status.\n"),
2161 &infospucmdlist);
2162 add_cmd ("dma", class_info, info_spu_dma_command,
2163 _("Display MFC DMA status.\n"),
2164 &infospucmdlist);
2165 add_cmd ("proxydma", class_info, info_spu_proxydma_command,
2166 _("Display MFC Proxy-DMA status.\n"),
2167 &infospucmdlist);
2168 }
This page took 0.145642 seconds and 4 git commands to generate.