1 /* SPU target-dependent code for GDB, the GNU debugger.
2 Copyright (C) 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
4 Contributed by Ulrich Weigand <uweigand@de.ibm.com>.
5 Based on a port by Sid Manning <sid@us.ibm.com>.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "arch-utils.h"
27 #include "gdb_string.h"
28 #include "gdb_assert.h"
30 #include "frame-unwind.h"
31 #include "frame-base.h"
32 #include "trad-frame.h"
41 #include "reggroups.h"
42 #include "floatformat.h"
48 /* The tdep structure. */
51 /* SPU-specific vector type. */
52 struct type
*spu_builtin_type_vec128
;
56 /* SPU-specific vector type. */
58 spu_builtin_type_vec128 (struct gdbarch
*gdbarch
)
60 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
62 if (!tdep
->spu_builtin_type_vec128
)
64 const struct builtin_type
*bt
= builtin_type (gdbarch
);
67 t
= arch_composite_type (gdbarch
,
68 "__spu_builtin_type_vec128", TYPE_CODE_UNION
);
69 append_composite_type_field (t
, "uint128", bt
->builtin_int128
);
70 append_composite_type_field (t
, "v2_int64",
71 init_vector_type (bt
->builtin_int64
, 2));
72 append_composite_type_field (t
, "v4_int32",
73 init_vector_type (bt
->builtin_int32
, 4));
74 append_composite_type_field (t
, "v8_int16",
75 init_vector_type (bt
->builtin_int16
, 8));
76 append_composite_type_field (t
, "v16_int8",
77 init_vector_type (bt
->builtin_int8
, 16));
78 append_composite_type_field (t
, "v2_double",
79 init_vector_type (bt
->builtin_double
, 2));
80 append_composite_type_field (t
, "v4_float",
81 init_vector_type (bt
->builtin_float
, 4));
84 TYPE_NAME (t
) = "spu_builtin_type_vec128";
86 tdep
->spu_builtin_type_vec128
= t
;
89 return tdep
->spu_builtin_type_vec128
;
93 /* The list of available "info spu " commands. */
94 static struct cmd_list_element
*infospucmdlist
= NULL
;
99 spu_register_name (struct gdbarch
*gdbarch
, int reg_nr
)
101 static char *register_names
[] =
103 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
104 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
105 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
106 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
107 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
108 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
109 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
110 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
111 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
112 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
113 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
114 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
115 "r96", "r97", "r98", "r99", "r100", "r101", "r102", "r103",
116 "r104", "r105", "r106", "r107", "r108", "r109", "r110", "r111",
117 "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
118 "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127",
119 "id", "pc", "sp", "fpscr", "srr0", "lslr", "decr", "decr_status"
124 if (reg_nr
>= sizeof register_names
/ sizeof *register_names
)
127 return register_names
[reg_nr
];
131 spu_register_type (struct gdbarch
*gdbarch
, int reg_nr
)
133 if (reg_nr
< SPU_NUM_GPRS
)
134 return spu_builtin_type_vec128 (gdbarch
);
139 return builtin_type (gdbarch
)->builtin_uint32
;
142 return builtin_type (gdbarch
)->builtin_func_ptr
;
145 return builtin_type (gdbarch
)->builtin_data_ptr
;
147 case SPU_FPSCR_REGNUM
:
148 return builtin_type (gdbarch
)->builtin_uint128
;
150 case SPU_SRR0_REGNUM
:
151 return builtin_type (gdbarch
)->builtin_uint32
;
153 case SPU_LSLR_REGNUM
:
154 return builtin_type (gdbarch
)->builtin_uint32
;
156 case SPU_DECR_REGNUM
:
157 return builtin_type (gdbarch
)->builtin_uint32
;
159 case SPU_DECR_STATUS_REGNUM
:
160 return builtin_type (gdbarch
)->builtin_uint32
;
163 internal_error (__FILE__
, __LINE__
, "invalid regnum");
167 /* Pseudo registers for preferred slots - stack pointer. */
170 spu_pseudo_register_read_spu (struct regcache
*regcache
, const char *regname
,
173 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
174 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
179 regcache_raw_read_unsigned (regcache
, SPU_ID_REGNUM
, &id
);
180 xsnprintf (annex
, sizeof annex
, "%d/%s", (int) id
, regname
);
181 memset (reg
, 0, sizeof reg
);
182 target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
185 store_unsigned_integer (buf
, 4, byte_order
, strtoulst (reg
, NULL
, 16));
189 spu_pseudo_register_read (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
190 int regnum
, gdb_byte
*buf
)
199 regcache_raw_read (regcache
, SPU_RAW_SP_REGNUM
, reg
);
200 memcpy (buf
, reg
, 4);
203 case SPU_FPSCR_REGNUM
:
204 regcache_raw_read_unsigned (regcache
, SPU_ID_REGNUM
, &id
);
205 xsnprintf (annex
, sizeof annex
, "%d/fpcr", (int) id
);
206 target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
, buf
, 0, 16);
209 case SPU_SRR0_REGNUM
:
210 spu_pseudo_register_read_spu (regcache
, "srr0", buf
);
213 case SPU_LSLR_REGNUM
:
214 spu_pseudo_register_read_spu (regcache
, "lslr", buf
);
217 case SPU_DECR_REGNUM
:
218 spu_pseudo_register_read_spu (regcache
, "decr", buf
);
221 case SPU_DECR_STATUS_REGNUM
:
222 spu_pseudo_register_read_spu (regcache
, "decr_status", buf
);
226 internal_error (__FILE__
, __LINE__
, _("invalid regnum"));
231 spu_pseudo_register_write_spu (struct regcache
*regcache
, const char *regname
,
234 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
235 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
240 regcache_raw_read_unsigned (regcache
, SPU_ID_REGNUM
, &id
);
241 xsnprintf (annex
, sizeof annex
, "%d/%s", (int) id
, regname
);
242 xsnprintf (reg
, sizeof reg
, "0x%s",
243 phex_nz (extract_unsigned_integer (buf
, 4, byte_order
), 4));
244 target_write (¤t_target
, TARGET_OBJECT_SPU
, annex
,
245 reg
, 0, strlen (reg
));
249 spu_pseudo_register_write (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
250 int regnum
, const gdb_byte
*buf
)
259 regcache_raw_read (regcache
, SPU_RAW_SP_REGNUM
, reg
);
260 memcpy (reg
, buf
, 4);
261 regcache_raw_write (regcache
, SPU_RAW_SP_REGNUM
, reg
);
264 case SPU_FPSCR_REGNUM
:
265 regcache_raw_read_unsigned (regcache
, SPU_ID_REGNUM
, &id
);
266 xsnprintf (annex
, sizeof annex
, "%d/fpcr", (int) id
);
267 target_write (¤t_target
, TARGET_OBJECT_SPU
, annex
, buf
, 0, 16);
270 case SPU_SRR0_REGNUM
:
271 spu_pseudo_register_write_spu (regcache
, "srr0", buf
);
274 case SPU_LSLR_REGNUM
:
275 spu_pseudo_register_write_spu (regcache
, "lslr", buf
);
278 case SPU_DECR_REGNUM
:
279 spu_pseudo_register_write_spu (regcache
, "decr", buf
);
282 case SPU_DECR_STATUS_REGNUM
:
283 spu_pseudo_register_write_spu (regcache
, "decr_status", buf
);
287 internal_error (__FILE__
, __LINE__
, _("invalid regnum"));
291 /* Value conversion -- access scalar values at the preferred slot. */
293 static struct value
*
294 spu_value_from_register (struct type
*type
, int regnum
,
295 struct frame_info
*frame
)
297 struct value
*value
= default_value_from_register (type
, regnum
, frame
);
298 int len
= TYPE_LENGTH (type
);
300 if (regnum
< SPU_NUM_GPRS
&& len
< 16)
302 int preferred_slot
= len
< 4 ? 4 - len
: 0;
303 set_value_offset (value
, preferred_slot
);
309 /* Register groups. */
312 spu_register_reggroup_p (struct gdbarch
*gdbarch
, int regnum
,
313 struct reggroup
*group
)
315 /* Registers displayed via 'info regs'. */
316 if (group
== general_reggroup
)
319 /* Registers displayed via 'info float'. */
320 if (group
== float_reggroup
)
323 /* Registers that need to be saved/restored in order to
324 push or pop frames. */
325 if (group
== save_reggroup
|| group
== restore_reggroup
)
328 return default_register_reggroup_p (gdbarch
, regnum
, group
);
331 /* Address conversion. */
334 spu_pointer_to_address (struct gdbarch
*gdbarch
,
335 struct type
*type
, const gdb_byte
*buf
)
337 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
339 = extract_unsigned_integer (buf
, TYPE_LENGTH (type
), byte_order
);
340 ULONGEST lslr
= SPU_LS_SIZE
- 1; /* Hard-wired LS size. */
342 if (target_has_registers
&& target_has_stack
&& target_has_memory
)
343 lslr
= get_frame_register_unsigned (get_selected_frame (NULL
),
350 spu_integer_to_address (struct gdbarch
*gdbarch
,
351 struct type
*type
, const gdb_byte
*buf
)
353 ULONGEST addr
= unpack_long (type
, buf
);
354 ULONGEST lslr
= SPU_LS_SIZE
- 1; /* Hard-wired LS size. */
356 if (target_has_registers
&& target_has_stack
&& target_has_memory
)
357 lslr
= get_frame_register_unsigned (get_selected_frame (NULL
),
364 /* Decoding SPU instructions. */
401 is_rr (unsigned int insn
, int op
, int *rt
, int *ra
, int *rb
)
403 if ((insn
>> 21) == op
)
406 *ra
= (insn
>> 7) & 127;
407 *rb
= (insn
>> 14) & 127;
415 is_rrr (unsigned int insn
, int op
, int *rt
, int *ra
, int *rb
, int *rc
)
417 if ((insn
>> 28) == op
)
419 *rt
= (insn
>> 21) & 127;
420 *ra
= (insn
>> 7) & 127;
421 *rb
= (insn
>> 14) & 127;
430 is_ri7 (unsigned int insn
, int op
, int *rt
, int *ra
, int *i7
)
432 if ((insn
>> 21) == op
)
435 *ra
= (insn
>> 7) & 127;
436 *i7
= (((insn
>> 14) & 127) ^ 0x40) - 0x40;
444 is_ri10 (unsigned int insn
, int op
, int *rt
, int *ra
, int *i10
)
446 if ((insn
>> 24) == op
)
449 *ra
= (insn
>> 7) & 127;
450 *i10
= (((insn
>> 14) & 0x3ff) ^ 0x200) - 0x200;
458 is_ri16 (unsigned int insn
, int op
, int *rt
, int *i16
)
460 if ((insn
>> 23) == op
)
463 *i16
= (((insn
>> 7) & 0xffff) ^ 0x8000) - 0x8000;
471 is_ri18 (unsigned int insn
, int op
, int *rt
, int *i18
)
473 if ((insn
>> 25) == op
)
476 *i18
= (((insn
>> 7) & 0x3ffff) ^ 0x20000) - 0x20000;
484 is_branch (unsigned int insn
, int *offset
, int *reg
)
488 if (is_ri16 (insn
, op_br
, &rt
, &i16
)
489 || is_ri16 (insn
, op_brsl
, &rt
, &i16
)
490 || is_ri16 (insn
, op_brnz
, &rt
, &i16
)
491 || is_ri16 (insn
, op_brz
, &rt
, &i16
)
492 || is_ri16 (insn
, op_brhnz
, &rt
, &i16
)
493 || is_ri16 (insn
, op_brhz
, &rt
, &i16
))
495 *reg
= SPU_PC_REGNUM
;
500 if (is_ri16 (insn
, op_bra
, &rt
, &i16
)
501 || is_ri16 (insn
, op_brasl
, &rt
, &i16
))
508 if (is_ri7 (insn
, op_bi
, &rt
, reg
, &i7
)
509 || is_ri7 (insn
, op_bisl
, &rt
, reg
, &i7
)
510 || is_ri7 (insn
, op_biz
, &rt
, reg
, &i7
)
511 || is_ri7 (insn
, op_binz
, &rt
, reg
, &i7
)
512 || is_ri7 (insn
, op_bihz
, &rt
, reg
, &i7
)
513 || is_ri7 (insn
, op_bihnz
, &rt
, reg
, &i7
))
523 /* Prolog parsing. */
525 struct spu_prologue_data
527 /* Stack frame size. -1 if analysis was unsuccessful. */
530 /* How to find the CFA. The CFA is equal to SP at function entry. */
534 /* Offset relative to CFA where a register is saved. -1 if invalid. */
535 int reg_offset
[SPU_NUM_GPRS
];
539 spu_analyze_prologue (struct gdbarch
*gdbarch
,
540 CORE_ADDR start_pc
, CORE_ADDR end_pc
,
541 struct spu_prologue_data
*data
)
543 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
547 int reg_immed
[SPU_NUM_GPRS
];
549 CORE_ADDR prolog_pc
= start_pc
;
554 /* Initialize DATA to default values. */
557 data
->cfa_reg
= SPU_RAW_SP_REGNUM
;
558 data
->cfa_offset
= 0;
560 for (i
= 0; i
< SPU_NUM_GPRS
; i
++)
561 data
->reg_offset
[i
] = -1;
563 /* Set up REG_IMMED array. This is non-zero for a register if we know its
564 preferred slot currently holds this immediate value. */
565 for (i
= 0; i
< SPU_NUM_GPRS
; i
++)
568 /* Scan instructions until the first branch.
570 The following instructions are important prolog components:
572 - The first instruction to set up the stack pointer.
573 - The first instruction to set up the frame pointer.
574 - The first instruction to save the link register.
576 We return the instruction after the latest of these three,
577 or the incoming PC if none is found. The first instruction
578 to set up the stack pointer also defines the frame size.
580 Note that instructions saving incoming arguments to their stack
581 slots are not counted as important, because they are hard to
582 identify with certainty. This should not matter much, because
583 arguments are relevant only in code compiled with debug data,
584 and in such code the GDB core will advance until the first source
585 line anyway, using SAL data.
587 For purposes of stack unwinding, we analyze the following types
588 of instructions in addition:
590 - Any instruction adding to the current frame pointer.
591 - Any instruction loading an immediate constant into a register.
592 - Any instruction storing a register onto the stack.
594 These are used to compute the CFA and REG_OFFSET output. */
596 for (pc
= start_pc
; pc
< end_pc
; pc
+= 4)
599 int rt
, ra
, rb
, rc
, immed
;
601 if (target_read_memory (pc
, buf
, 4))
603 insn
= extract_unsigned_integer (buf
, 4, byte_order
);
605 /* AI is the typical instruction to set up a stack frame.
606 It is also used to initialize the frame pointer. */
607 if (is_ri10 (insn
, op_ai
, &rt
, &ra
, &immed
))
609 if (rt
== data
->cfa_reg
&& ra
== data
->cfa_reg
)
610 data
->cfa_offset
-= immed
;
612 if (rt
== SPU_RAW_SP_REGNUM
&& ra
== SPU_RAW_SP_REGNUM
620 else if (rt
== SPU_FP_REGNUM
&& ra
== SPU_RAW_SP_REGNUM
626 data
->cfa_reg
= SPU_FP_REGNUM
;
627 data
->cfa_offset
-= immed
;
631 /* A is used to set up stack frames of size >= 512 bytes.
632 If we have tracked the contents of the addend register,
633 we can handle this as well. */
634 else if (is_rr (insn
, op_a
, &rt
, &ra
, &rb
))
636 if (rt
== data
->cfa_reg
&& ra
== data
->cfa_reg
)
638 if (reg_immed
[rb
] != 0)
639 data
->cfa_offset
-= reg_immed
[rb
];
641 data
->cfa_reg
= -1; /* We don't know the CFA any more. */
644 if (rt
== SPU_RAW_SP_REGNUM
&& ra
== SPU_RAW_SP_REGNUM
650 if (reg_immed
[rb
] != 0)
651 data
->size
= -reg_immed
[rb
];
655 /* We need to track IL and ILA used to load immediate constants
656 in case they are later used as input to an A instruction. */
657 else if (is_ri16 (insn
, op_il
, &rt
, &immed
))
659 reg_immed
[rt
] = immed
;
661 if (rt
== SPU_RAW_SP_REGNUM
&& !found_sp
)
665 else if (is_ri18 (insn
, op_ila
, &rt
, &immed
))
667 reg_immed
[rt
] = immed
& 0x3ffff;
669 if (rt
== SPU_RAW_SP_REGNUM
&& !found_sp
)
673 /* STQD is used to save registers to the stack. */
674 else if (is_ri10 (insn
, op_stqd
, &rt
, &ra
, &immed
))
676 if (ra
== data
->cfa_reg
)
677 data
->reg_offset
[rt
] = data
->cfa_offset
- (immed
<< 4);
679 if (ra
== data
->cfa_reg
&& rt
== SPU_LR_REGNUM
687 /* _start uses SELB to set up the stack pointer. */
688 else if (is_rrr (insn
, op_selb
, &rt
, &ra
, &rb
, &rc
))
690 if (rt
== SPU_RAW_SP_REGNUM
&& !found_sp
)
694 /* We terminate if we find a branch. */
695 else if (is_branch (insn
, &immed
, &ra
))
700 /* If we successfully parsed until here, and didn't find any instruction
701 modifying SP, we assume we have a frameless function. */
705 /* Return cooked instead of raw SP. */
706 if (data
->cfa_reg
== SPU_RAW_SP_REGNUM
)
707 data
->cfa_reg
= SPU_SP_REGNUM
;
712 /* Return the first instruction after the prologue starting at PC. */
714 spu_skip_prologue (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
716 struct spu_prologue_data data
;
717 return spu_analyze_prologue (gdbarch
, pc
, (CORE_ADDR
)-1, &data
);
720 /* Return the frame pointer in use at address PC. */
722 spu_virtual_frame_pointer (struct gdbarch
*gdbarch
, CORE_ADDR pc
,
723 int *reg
, LONGEST
*offset
)
725 struct spu_prologue_data data
;
726 spu_analyze_prologue (gdbarch
, pc
, (CORE_ADDR
)-1, &data
);
728 if (data
.size
!= -1 && data
.cfa_reg
!= -1)
730 /* The 'frame pointer' address is CFA minus frame size. */
732 *offset
= data
.cfa_offset
- data
.size
;
736 /* ??? We don't really know ... */
737 *reg
= SPU_SP_REGNUM
;
742 /* Return true if we are in the function's epilogue, i.e. after the
743 instruction that destroyed the function's stack frame.
745 1) scan forward from the point of execution:
746 a) If you find an instruction that modifies the stack pointer
747 or transfers control (except a return), execution is not in
749 b) Stop scanning if you find a return instruction or reach the
750 end of the function or reach the hard limit for the size of
752 2) scan backward from the point of execution:
753 a) If you find an instruction that modifies the stack pointer,
754 execution *is* in an epilogue, return.
755 b) Stop scanning if you reach an instruction that transfers
756 control or the beginning of the function or reach the hard
757 limit for the size of an epilogue. */
760 spu_in_function_epilogue_p (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
762 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
763 CORE_ADDR scan_pc
, func_start
, func_end
, epilogue_start
, epilogue_end
;
766 int rt
, ra
, rb
, rc
, immed
;
768 /* Find the search limits based on function boundaries and hard limit.
769 We assume the epilogue can be up to 64 instructions long. */
771 const int spu_max_epilogue_size
= 64 * 4;
773 if (!find_pc_partial_function (pc
, NULL
, &func_start
, &func_end
))
776 if (pc
- func_start
< spu_max_epilogue_size
)
777 epilogue_start
= func_start
;
779 epilogue_start
= pc
- spu_max_epilogue_size
;
781 if (func_end
- pc
< spu_max_epilogue_size
)
782 epilogue_end
= func_end
;
784 epilogue_end
= pc
+ spu_max_epilogue_size
;
786 /* Scan forward until next 'bi $0'. */
788 for (scan_pc
= pc
; scan_pc
< epilogue_end
; scan_pc
+= 4)
790 if (target_read_memory (scan_pc
, buf
, 4))
792 insn
= extract_unsigned_integer (buf
, 4, byte_order
);
794 if (is_branch (insn
, &immed
, &ra
))
796 if (immed
== 0 && ra
== SPU_LR_REGNUM
)
802 if (is_ri10 (insn
, op_ai
, &rt
, &ra
, &immed
)
803 || is_rr (insn
, op_a
, &rt
, &ra
, &rb
)
804 || is_ri10 (insn
, op_lqd
, &rt
, &ra
, &immed
))
806 if (rt
== SPU_RAW_SP_REGNUM
)
811 if (scan_pc
>= epilogue_end
)
814 /* Scan backward until adjustment to stack pointer (R1). */
816 for (scan_pc
= pc
- 4; scan_pc
>= epilogue_start
; scan_pc
-= 4)
818 if (target_read_memory (scan_pc
, buf
, 4))
820 insn
= extract_unsigned_integer (buf
, 4, byte_order
);
822 if (is_branch (insn
, &immed
, &ra
))
825 if (is_ri10 (insn
, op_ai
, &rt
, &ra
, &immed
)
826 || is_rr (insn
, op_a
, &rt
, &ra
, &rb
)
827 || is_ri10 (insn
, op_lqd
, &rt
, &ra
, &immed
))
829 if (rt
== SPU_RAW_SP_REGNUM
)
838 /* Normal stack frames. */
840 struct spu_unwind_cache
843 CORE_ADDR frame_base
;
844 CORE_ADDR local_base
;
846 struct trad_frame_saved_reg
*saved_regs
;
849 static struct spu_unwind_cache
*
850 spu_frame_unwind_cache (struct frame_info
*this_frame
,
851 void **this_prologue_cache
)
853 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
854 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
855 struct spu_unwind_cache
*info
;
856 struct spu_prologue_data data
;
859 if (*this_prologue_cache
)
860 return *this_prologue_cache
;
862 info
= FRAME_OBSTACK_ZALLOC (struct spu_unwind_cache
);
863 *this_prologue_cache
= info
;
864 info
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
865 info
->frame_base
= 0;
866 info
->local_base
= 0;
868 /* Find the start of the current function, and analyze its prologue. */
869 info
->func
= get_frame_func (this_frame
);
872 /* Fall back to using the current PC as frame ID. */
873 info
->func
= get_frame_pc (this_frame
);
877 spu_analyze_prologue (gdbarch
, info
->func
, get_frame_pc (this_frame
),
880 /* If successful, use prologue analysis data. */
881 if (data
.size
!= -1 && data
.cfa_reg
!= -1)
886 /* Determine CFA via unwound CFA_REG plus CFA_OFFSET. */
887 get_frame_register (this_frame
, data
.cfa_reg
, buf
);
888 cfa
= extract_unsigned_integer (buf
, 4, byte_order
) + data
.cfa_offset
;
890 /* Call-saved register slots. */
891 for (i
= 0; i
< SPU_NUM_GPRS
; i
++)
892 if (i
== SPU_LR_REGNUM
893 || (i
>= SPU_SAVED1_REGNUM
&& i
<= SPU_SAVEDN_REGNUM
))
894 if (data
.reg_offset
[i
] != -1)
895 info
->saved_regs
[i
].addr
= cfa
- data
.reg_offset
[i
];
898 info
->frame_base
= cfa
;
899 info
->local_base
= cfa
- data
.size
;
902 /* Otherwise, fall back to reading the backchain link. */
909 /* Get the backchain. */
910 reg
= get_frame_register_unsigned (this_frame
, SPU_SP_REGNUM
);
911 status
= safe_read_memory_integer (reg
, 4, byte_order
, &backchain
);
913 /* A zero backchain terminates the frame chain. Also, sanity
914 check against the local store size limit. */
915 if (status
&& backchain
> 0 && backchain
< SPU_LS_SIZE
)
917 /* Assume the link register is saved into its slot. */
918 if (backchain
+ 16 < SPU_LS_SIZE
)
919 info
->saved_regs
[SPU_LR_REGNUM
].addr
= backchain
+ 16;
922 info
->frame_base
= backchain
;
923 info
->local_base
= reg
;
927 /* If we didn't find a frame, we cannot determine SP / return address. */
928 if (info
->frame_base
== 0)
931 /* The previous SP is equal to the CFA. */
932 trad_frame_set_value (info
->saved_regs
, SPU_SP_REGNUM
, info
->frame_base
);
934 /* Read full contents of the unwound link register in order to
935 be able to determine the return address. */
936 if (trad_frame_addr_p (info
->saved_regs
, SPU_LR_REGNUM
))
937 target_read_memory (info
->saved_regs
[SPU_LR_REGNUM
].addr
, buf
, 16);
939 get_frame_register (this_frame
, SPU_LR_REGNUM
, buf
);
941 /* Normally, the return address is contained in the slot 0 of the
942 link register, and slots 1-3 are zero. For an overlay return,
943 slot 0 contains the address of the overlay manager return stub,
944 slot 1 contains the partition number of the overlay section to
945 be returned to, and slot 2 contains the return address within
946 that section. Return the latter address in that case. */
947 if (extract_unsigned_integer (buf
+ 8, 4, byte_order
) != 0)
948 trad_frame_set_value (info
->saved_regs
, SPU_PC_REGNUM
,
949 extract_unsigned_integer (buf
+ 8, 4, byte_order
));
951 trad_frame_set_value (info
->saved_regs
, SPU_PC_REGNUM
,
952 extract_unsigned_integer (buf
, 4, byte_order
));
958 spu_frame_this_id (struct frame_info
*this_frame
,
959 void **this_prologue_cache
, struct frame_id
*this_id
)
961 struct spu_unwind_cache
*info
=
962 spu_frame_unwind_cache (this_frame
, this_prologue_cache
);
964 if (info
->frame_base
== 0)
967 *this_id
= frame_id_build (info
->frame_base
, info
->func
);
970 static struct value
*
971 spu_frame_prev_register (struct frame_info
*this_frame
,
972 void **this_prologue_cache
, int regnum
)
974 struct spu_unwind_cache
*info
975 = spu_frame_unwind_cache (this_frame
, this_prologue_cache
);
977 /* Special-case the stack pointer. */
978 if (regnum
== SPU_RAW_SP_REGNUM
)
979 regnum
= SPU_SP_REGNUM
;
981 return trad_frame_get_prev_register (this_frame
, info
->saved_regs
, regnum
);
984 static const struct frame_unwind spu_frame_unwind
= {
987 spu_frame_prev_register
,
989 default_frame_sniffer
993 spu_frame_base_address (struct frame_info
*this_frame
, void **this_cache
)
995 struct spu_unwind_cache
*info
996 = spu_frame_unwind_cache (this_frame
, this_cache
);
997 return info
->local_base
;
1000 static const struct frame_base spu_frame_base
= {
1002 spu_frame_base_address
,
1003 spu_frame_base_address
,
1004 spu_frame_base_address
1008 spu_unwind_pc (struct gdbarch
*gdbarch
, struct frame_info
*next_frame
)
1010 CORE_ADDR pc
= frame_unwind_register_unsigned (next_frame
, SPU_PC_REGNUM
);
1011 /* Mask off interrupt enable bit. */
1016 spu_unwind_sp (struct gdbarch
*gdbarch
, struct frame_info
*next_frame
)
1018 return frame_unwind_register_unsigned (next_frame
, SPU_SP_REGNUM
);
1022 spu_read_pc (struct regcache
*regcache
)
1025 regcache_cooked_read_unsigned (regcache
, SPU_PC_REGNUM
, &pc
);
1026 /* Mask off interrupt enable bit. */
1031 spu_write_pc (struct regcache
*regcache
, CORE_ADDR pc
)
1033 /* Keep interrupt enabled state unchanged. */
1035 regcache_cooked_read_unsigned (regcache
, SPU_PC_REGNUM
, &old_pc
);
1036 regcache_cooked_write_unsigned (regcache
, SPU_PC_REGNUM
,
1037 (pc
& -4) | (old_pc
& 3));
1041 /* Function calling convention. */
1044 spu_frame_align (struct gdbarch
*gdbarch
, CORE_ADDR sp
)
1050 spu_push_dummy_code (struct gdbarch
*gdbarch
, CORE_ADDR sp
, CORE_ADDR funaddr
,
1051 struct value
**args
, int nargs
, struct type
*value_type
,
1052 CORE_ADDR
*real_pc
, CORE_ADDR
*bp_addr
,
1053 struct regcache
*regcache
)
1055 /* Allocate space sufficient for a breakpoint, keeping the stack aligned. */
1056 sp
= (sp
- 4) & ~15;
1057 /* Store the address of that breakpoint */
1059 /* The call starts at the callee's entry point. */
1066 spu_scalar_value_p (struct type
*type
)
1068 switch (TYPE_CODE (type
))
1071 case TYPE_CODE_ENUM
:
1072 case TYPE_CODE_RANGE
:
1073 case TYPE_CODE_CHAR
:
1074 case TYPE_CODE_BOOL
:
1077 return TYPE_LENGTH (type
) <= 16;
1085 spu_value_to_regcache (struct regcache
*regcache
, int regnum
,
1086 struct type
*type
, const gdb_byte
*in
)
1088 int len
= TYPE_LENGTH (type
);
1090 if (spu_scalar_value_p (type
))
1092 int preferred_slot
= len
< 4 ? 4 - len
: 0;
1093 regcache_cooked_write_part (regcache
, regnum
, preferred_slot
, len
, in
);
1099 regcache_cooked_write (regcache
, regnum
++, in
);
1105 regcache_cooked_write_part (regcache
, regnum
, 0, len
, in
);
1110 spu_regcache_to_value (struct regcache
*regcache
, int regnum
,
1111 struct type
*type
, gdb_byte
*out
)
1113 int len
= TYPE_LENGTH (type
);
1115 if (spu_scalar_value_p (type
))
1117 int preferred_slot
= len
< 4 ? 4 - len
: 0;
1118 regcache_cooked_read_part (regcache
, regnum
, preferred_slot
, len
, out
);
1124 regcache_cooked_read (regcache
, regnum
++, out
);
1130 regcache_cooked_read_part (regcache
, regnum
, 0, len
, out
);
1135 spu_push_dummy_call (struct gdbarch
*gdbarch
, struct value
*function
,
1136 struct regcache
*regcache
, CORE_ADDR bp_addr
,
1137 int nargs
, struct value
**args
, CORE_ADDR sp
,
1138 int struct_return
, CORE_ADDR struct_addr
)
1140 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1143 int regnum
= SPU_ARG1_REGNUM
;
1147 /* Set the return address. */
1148 memset (buf
, 0, sizeof buf
);
1149 store_unsigned_integer (buf
, 4, byte_order
, bp_addr
);
1150 regcache_cooked_write (regcache
, SPU_LR_REGNUM
, buf
);
1152 /* If STRUCT_RETURN is true, then the struct return address (in
1153 STRUCT_ADDR) will consume the first argument-passing register.
1154 Both adjust the register count and store that value. */
1157 memset (buf
, 0, sizeof buf
);
1158 store_unsigned_integer (buf
, 4, byte_order
, struct_addr
);
1159 regcache_cooked_write (regcache
, regnum
++, buf
);
1162 /* Fill in argument registers. */
1163 for (i
= 0; i
< nargs
; i
++)
1165 struct value
*arg
= args
[i
];
1166 struct type
*type
= check_typedef (value_type (arg
));
1167 const gdb_byte
*contents
= value_contents (arg
);
1168 int len
= TYPE_LENGTH (type
);
1169 int n_regs
= align_up (len
, 16) / 16;
1171 /* If the argument doesn't wholly fit into registers, it and
1172 all subsequent arguments go to the stack. */
1173 if (regnum
+ n_regs
- 1 > SPU_ARGN_REGNUM
)
1179 spu_value_to_regcache (regcache
, regnum
, type
, contents
);
1183 /* Overflow arguments go to the stack. */
1184 if (stack_arg
!= -1)
1188 /* Allocate all required stack size. */
1189 for (i
= stack_arg
; i
< nargs
; i
++)
1191 struct type
*type
= check_typedef (value_type (args
[i
]));
1192 sp
-= align_up (TYPE_LENGTH (type
), 16);
1195 /* Fill in stack arguments. */
1197 for (i
= stack_arg
; i
< nargs
; i
++)
1199 struct value
*arg
= args
[i
];
1200 struct type
*type
= check_typedef (value_type (arg
));
1201 int len
= TYPE_LENGTH (type
);
1204 if (spu_scalar_value_p (type
))
1205 preferred_slot
= len
< 4 ? 4 - len
: 0;
1209 target_write_memory (ap
+ preferred_slot
, value_contents (arg
), len
);
1210 ap
+= align_up (TYPE_LENGTH (type
), 16);
1214 /* Allocate stack frame header. */
1217 /* Store stack back chain. */
1218 regcache_cooked_read (regcache
, SPU_RAW_SP_REGNUM
, buf
);
1219 target_write_memory (sp
, buf
, 16);
1221 /* Finally, update all slots of the SP register. */
1222 sp_delta
= sp
- extract_unsigned_integer (buf
, 4, byte_order
);
1223 for (i
= 0; i
< 4; i
++)
1225 CORE_ADDR sp_slot
= extract_unsigned_integer (buf
+ 4*i
, 4, byte_order
);
1226 store_unsigned_integer (buf
+ 4*i
, 4, byte_order
, sp_slot
+ sp_delta
);
1228 regcache_cooked_write (regcache
, SPU_RAW_SP_REGNUM
, buf
);
1233 static struct frame_id
1234 spu_dummy_id (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1236 CORE_ADDR pc
= get_frame_register_unsigned (this_frame
, SPU_PC_REGNUM
);
1237 CORE_ADDR sp
= get_frame_register_unsigned (this_frame
, SPU_SP_REGNUM
);
1238 return frame_id_build (sp
, pc
& -4);
1241 /* Function return value access. */
1243 static enum return_value_convention
1244 spu_return_value (struct gdbarch
*gdbarch
, struct type
*func_type
,
1245 struct type
*type
, struct regcache
*regcache
,
1246 gdb_byte
*out
, const gdb_byte
*in
)
1248 enum return_value_convention rvc
;
1250 if (TYPE_LENGTH (type
) <= (SPU_ARGN_REGNUM
- SPU_ARG1_REGNUM
+ 1) * 16)
1251 rvc
= RETURN_VALUE_REGISTER_CONVENTION
;
1253 rvc
= RETURN_VALUE_STRUCT_CONVENTION
;
1259 case RETURN_VALUE_REGISTER_CONVENTION
:
1260 spu_value_to_regcache (regcache
, SPU_ARG1_REGNUM
, type
, in
);
1263 case RETURN_VALUE_STRUCT_CONVENTION
:
1264 error ("Cannot set function return value.");
1272 case RETURN_VALUE_REGISTER_CONVENTION
:
1273 spu_regcache_to_value (regcache
, SPU_ARG1_REGNUM
, type
, out
);
1276 case RETURN_VALUE_STRUCT_CONVENTION
:
1277 error ("Function return value unknown.");
1288 static const gdb_byte
*
1289 spu_breakpoint_from_pc (struct gdbarch
*gdbarch
, CORE_ADDR
* pcptr
, int *lenptr
)
1291 static const gdb_byte breakpoint
[] = { 0x00, 0x00, 0x3f, 0xff };
1293 *lenptr
= sizeof breakpoint
;
1298 /* Software single-stepping support. */
1301 spu_software_single_step (struct frame_info
*frame
)
1303 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
1304 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1305 CORE_ADDR pc
, next_pc
;
1310 pc
= get_frame_pc (frame
);
1312 if (target_read_memory (pc
, buf
, 4))
1314 insn
= extract_unsigned_integer (buf
, 4, byte_order
);
1316 /* Next sequential instruction is at PC + 4, except if the current
1317 instruction is a PPE-assisted call, in which case it is at PC + 8.
1318 Wrap around LS limit to be on the safe side. */
1319 if ((insn
& 0xffffff00) == 0x00002100)
1320 next_pc
= (pc
+ 8) & (SPU_LS_SIZE
- 1);
1322 next_pc
= (pc
+ 4) & (SPU_LS_SIZE
- 1);
1324 insert_single_step_breakpoint (gdbarch
, next_pc
);
1326 if (is_branch (insn
, &offset
, ®
))
1328 CORE_ADDR target
= offset
;
1330 if (reg
== SPU_PC_REGNUM
)
1334 get_frame_register_bytes (frame
, reg
, 0, 4, buf
);
1335 target
+= extract_unsigned_integer (buf
, 4, byte_order
) & -4;
1338 target
= target
& (SPU_LS_SIZE
- 1);
1339 if (target
!= next_pc
)
1340 insert_single_step_breakpoint (gdbarch
, target
);
1347 /* Longjmp support. */
1350 spu_get_longjmp_target (struct frame_info
*frame
, CORE_ADDR
*pc
)
1352 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
1353 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1357 /* Jump buffer is pointed to by the argument register $r3. */
1358 get_frame_register_bytes (frame
, SPU_ARG1_REGNUM
, 0, 4, buf
);
1359 jb_addr
= extract_unsigned_integer (buf
, 4, byte_order
);
1360 if (target_read_memory (jb_addr
, buf
, 4))
1363 *pc
= extract_unsigned_integer (buf
, 4, byte_order
);
1368 /* Target overlays for the SPU overlay manager.
1370 See the documentation of simple_overlay_update for how the
1371 interface is supposed to work.
1373 Data structures used by the overlay manager:
1381 } _ovly_table[]; -- one entry per overlay section
1383 struct ovly_buf_table
1386 } _ovly_buf_table[]; -- one entry per overlay buffer
1388 _ovly_table should never change.
1390 Both tables are aligned to a 16-byte boundary, the symbols _ovly_table
1391 and _ovly_buf_table are of type STT_OBJECT and their size set to the size
1392 of the respective array. buf in _ovly_table is an index into _ovly_buf_table.
1394 mapped is an index into _ovly_table. Both the mapped and buf indices start
1395 from one to reference the first entry in their respective tables. */
1397 /* Using the per-objfile private data mechanism, we store for each
1398 objfile an array of "struct spu_overlay_table" structures, one
1399 for each obj_section of the objfile. This structure holds two
1400 fields, MAPPED_PTR and MAPPED_VAL. If MAPPED_PTR is zero, this
1401 is *not* an overlay section. If it is non-zero, it represents
1402 a target address. The overlay section is mapped iff the target
1403 integer at this location equals MAPPED_VAL. */
1405 static const struct objfile_data
*spu_overlay_data
;
1407 struct spu_overlay_table
1409 CORE_ADDR mapped_ptr
;
1410 CORE_ADDR mapped_val
;
1413 /* Retrieve the overlay table for OBJFILE. If not already cached, read
1414 the _ovly_table data structure from the target and initialize the
1415 spu_overlay_table data structure from it. */
1416 static struct spu_overlay_table
*
1417 spu_get_overlay_table (struct objfile
*objfile
)
1419 enum bfd_endian byte_order
= bfd_big_endian (objfile
->obfd
)?
1420 BFD_ENDIAN_BIG
: BFD_ENDIAN_LITTLE
;
1421 struct minimal_symbol
*ovly_table_msym
, *ovly_buf_table_msym
;
1422 CORE_ADDR ovly_table_base
, ovly_buf_table_base
;
1423 unsigned ovly_table_size
, ovly_buf_table_size
;
1424 struct spu_overlay_table
*tbl
;
1425 struct obj_section
*osect
;
1429 tbl
= objfile_data (objfile
, spu_overlay_data
);
1433 ovly_table_msym
= lookup_minimal_symbol ("_ovly_table", NULL
, objfile
);
1434 if (!ovly_table_msym
)
1437 ovly_buf_table_msym
= lookup_minimal_symbol ("_ovly_buf_table", NULL
, objfile
);
1438 if (!ovly_buf_table_msym
)
1441 ovly_table_base
= SYMBOL_VALUE_ADDRESS (ovly_table_msym
);
1442 ovly_table_size
= MSYMBOL_SIZE (ovly_table_msym
);
1444 ovly_buf_table_base
= SYMBOL_VALUE_ADDRESS (ovly_buf_table_msym
);
1445 ovly_buf_table_size
= MSYMBOL_SIZE (ovly_buf_table_msym
);
1447 ovly_table
= xmalloc (ovly_table_size
);
1448 read_memory (ovly_table_base
, ovly_table
, ovly_table_size
);
1450 tbl
= OBSTACK_CALLOC (&objfile
->objfile_obstack
,
1451 objfile
->sections_end
- objfile
->sections
,
1452 struct spu_overlay_table
);
1454 for (i
= 0; i
< ovly_table_size
/ 16; i
++)
1456 CORE_ADDR vma
= extract_unsigned_integer (ovly_table
+ 16*i
+ 0,
1458 CORE_ADDR size
= extract_unsigned_integer (ovly_table
+ 16*i
+ 4,
1460 CORE_ADDR pos
= extract_unsigned_integer (ovly_table
+ 16*i
+ 8,
1462 CORE_ADDR buf
= extract_unsigned_integer (ovly_table
+ 16*i
+ 12,
1465 if (buf
== 0 || (buf
- 1) * 4 >= ovly_buf_table_size
)
1468 ALL_OBJFILE_OSECTIONS (objfile
, osect
)
1469 if (vma
== bfd_section_vma (objfile
->obfd
, osect
->the_bfd_section
)
1470 && pos
== osect
->the_bfd_section
->filepos
)
1472 int ndx
= osect
- objfile
->sections
;
1473 tbl
[ndx
].mapped_ptr
= ovly_buf_table_base
+ (buf
- 1) * 4;
1474 tbl
[ndx
].mapped_val
= i
+ 1;
1480 set_objfile_data (objfile
, spu_overlay_data
, tbl
);
1484 /* Read _ovly_buf_table entry from the target to dermine whether
1485 OSECT is currently mapped, and update the mapped state. */
1487 spu_overlay_update_osect (struct obj_section
*osect
)
1489 enum bfd_endian byte_order
= bfd_big_endian (osect
->objfile
->obfd
)?
1490 BFD_ENDIAN_BIG
: BFD_ENDIAN_LITTLE
;
1491 struct spu_overlay_table
*ovly_table
;
1494 ovly_table
= spu_get_overlay_table (osect
->objfile
);
1498 ovly_table
+= osect
- osect
->objfile
->sections
;
1499 if (ovly_table
->mapped_ptr
== 0)
1502 val
= read_memory_unsigned_integer (ovly_table
->mapped_ptr
, 4, byte_order
);
1503 osect
->ovly_mapped
= (val
== ovly_table
->mapped_val
);
1506 /* If OSECT is NULL, then update all sections' mapped state.
1507 If OSECT is non-NULL, then update only OSECT's mapped state. */
1509 spu_overlay_update (struct obj_section
*osect
)
1511 /* Just one section. */
1513 spu_overlay_update_osect (osect
);
1518 struct objfile
*objfile
;
1520 ALL_OBJSECTIONS (objfile
, osect
)
1521 if (section_is_overlay (osect
))
1522 spu_overlay_update_osect (osect
);
1526 /* Whenever a new objfile is loaded, read the target's _ovly_table.
1527 If there is one, go through all sections and make sure for non-
1528 overlay sections LMA equals VMA, while for overlay sections LMA
1529 is larger than local store size. */
1531 spu_overlay_new_objfile (struct objfile
*objfile
)
1533 struct spu_overlay_table
*ovly_table
;
1534 struct obj_section
*osect
;
1536 /* If we've already touched this file, do nothing. */
1537 if (!objfile
|| objfile_data (objfile
, spu_overlay_data
) != NULL
)
1540 /* Consider only SPU objfiles. */
1541 if (bfd_get_arch (objfile
->obfd
) != bfd_arch_spu
)
1544 /* Check if this objfile has overlays. */
1545 ovly_table
= spu_get_overlay_table (objfile
);
1549 /* Now go and fiddle with all the LMAs. */
1550 ALL_OBJFILE_OSECTIONS (objfile
, osect
)
1552 bfd
*obfd
= objfile
->obfd
;
1553 asection
*bsect
= osect
->the_bfd_section
;
1554 int ndx
= osect
- objfile
->sections
;
1556 if (ovly_table
[ndx
].mapped_ptr
== 0)
1557 bfd_section_lma (obfd
, bsect
) = bfd_section_vma (obfd
, bsect
);
1559 bfd_section_lma (obfd
, bsect
) = bsect
->filepos
+ SPU_LS_SIZE
;
1564 /* "info spu" commands. */
1567 info_spu_event_command (char *args
, int from_tty
)
1569 struct frame_info
*frame
= get_selected_frame (NULL
);
1570 ULONGEST event_status
= 0;
1571 ULONGEST event_mask
= 0;
1572 struct cleanup
*chain
;
1578 if (gdbarch_bfd_arch_info (get_frame_arch (frame
))->arch
!= bfd_arch_spu
)
1579 error (_("\"info spu\" is only supported on the SPU architecture."));
1581 id
= get_frame_register_unsigned (frame
, SPU_ID_REGNUM
);
1583 xsnprintf (annex
, sizeof annex
, "%d/event_status", id
);
1584 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
1585 buf
, 0, (sizeof (buf
) - 1));
1587 error (_("Could not read event_status."));
1589 event_status
= strtoulst (buf
, NULL
, 16);
1591 xsnprintf (annex
, sizeof annex
, "%d/event_mask", id
);
1592 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
1593 buf
, 0, (sizeof (buf
) - 1));
1595 error (_("Could not read event_mask."));
1597 event_mask
= strtoulst (buf
, NULL
, 16);
1599 chain
= make_cleanup_ui_out_tuple_begin_end (uiout
, "SPUInfoEvent");
1601 if (ui_out_is_mi_like_p (uiout
))
1603 ui_out_field_fmt (uiout
, "event_status",
1604 "0x%s", phex_nz (event_status
, 4));
1605 ui_out_field_fmt (uiout
, "event_mask",
1606 "0x%s", phex_nz (event_mask
, 4));
1610 printf_filtered (_("Event Status 0x%s\n"), phex (event_status
, 4));
1611 printf_filtered (_("Event Mask 0x%s\n"), phex (event_mask
, 4));
1614 do_cleanups (chain
);
1618 info_spu_signal_command (char *args
, int from_tty
)
1620 struct frame_info
*frame
= get_selected_frame (NULL
);
1621 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
1622 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1623 ULONGEST signal1
= 0;
1624 ULONGEST signal1_type
= 0;
1625 int signal1_pending
= 0;
1626 ULONGEST signal2
= 0;
1627 ULONGEST signal2_type
= 0;
1628 int signal2_pending
= 0;
1629 struct cleanup
*chain
;
1635 if (gdbarch_bfd_arch_info (gdbarch
)->arch
!= bfd_arch_spu
)
1636 error (_("\"info spu\" is only supported on the SPU architecture."));
1638 id
= get_frame_register_unsigned (frame
, SPU_ID_REGNUM
);
1640 xsnprintf (annex
, sizeof annex
, "%d/signal1", id
);
1641 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
, buf
, 0, 4);
1643 error (_("Could not read signal1."));
1646 signal1
= extract_unsigned_integer (buf
, 4, byte_order
);
1647 signal1_pending
= 1;
1650 xsnprintf (annex
, sizeof annex
, "%d/signal1_type", id
);
1651 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
1652 buf
, 0, (sizeof (buf
) - 1));
1654 error (_("Could not read signal1_type."));
1656 signal1_type
= strtoulst (buf
, NULL
, 16);
1658 xsnprintf (annex
, sizeof annex
, "%d/signal2", id
);
1659 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
, buf
, 0, 4);
1661 error (_("Could not read signal2."));
1664 signal2
= extract_unsigned_integer (buf
, 4, byte_order
);
1665 signal2_pending
= 1;
1668 xsnprintf (annex
, sizeof annex
, "%d/signal2_type", id
);
1669 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
1670 buf
, 0, (sizeof (buf
) - 1));
1672 error (_("Could not read signal2_type."));
1674 signal2_type
= strtoulst (buf
, NULL
, 16);
1676 chain
= make_cleanup_ui_out_tuple_begin_end (uiout
, "SPUInfoSignal");
1678 if (ui_out_is_mi_like_p (uiout
))
1680 ui_out_field_int (uiout
, "signal1_pending", signal1_pending
);
1681 ui_out_field_fmt (uiout
, "signal1", "0x%s", phex_nz (signal1
, 4));
1682 ui_out_field_int (uiout
, "signal1_type", signal1_type
);
1683 ui_out_field_int (uiout
, "signal2_pending", signal2_pending
);
1684 ui_out_field_fmt (uiout
, "signal2", "0x%s", phex_nz (signal2
, 4));
1685 ui_out_field_int (uiout
, "signal2_type", signal2_type
);
1689 if (signal1_pending
)
1690 printf_filtered (_("Signal 1 control word 0x%s "), phex (signal1
, 4));
1692 printf_filtered (_("Signal 1 not pending "));
1695 printf_filtered (_("(Type Or)\n"));
1697 printf_filtered (_("(Type Overwrite)\n"));
1699 if (signal2_pending
)
1700 printf_filtered (_("Signal 2 control word 0x%s "), phex (signal2
, 4));
1702 printf_filtered (_("Signal 2 not pending "));
1705 printf_filtered (_("(Type Or)\n"));
1707 printf_filtered (_("(Type Overwrite)\n"));
1710 do_cleanups (chain
);
1714 info_spu_mailbox_list (gdb_byte
*buf
, int nr
, enum bfd_endian byte_order
,
1715 const char *field
, const char *msg
)
1717 struct cleanup
*chain
;
1723 chain
= make_cleanup_ui_out_table_begin_end (uiout
, 1, nr
, "mbox");
1725 ui_out_table_header (uiout
, 32, ui_left
, field
, msg
);
1726 ui_out_table_body (uiout
);
1728 for (i
= 0; i
< nr
; i
++)
1730 struct cleanup
*val_chain
;
1732 val_chain
= make_cleanup_ui_out_tuple_begin_end (uiout
, "mbox");
1733 val
= extract_unsigned_integer (buf
+ 4*i
, 4, byte_order
);
1734 ui_out_field_fmt (uiout
, field
, "0x%s", phex (val
, 4));
1735 do_cleanups (val_chain
);
1737 if (!ui_out_is_mi_like_p (uiout
))
1738 printf_filtered ("\n");
1741 do_cleanups (chain
);
1745 info_spu_mailbox_command (char *args
, int from_tty
)
1747 struct frame_info
*frame
= get_selected_frame (NULL
);
1748 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
1749 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1750 struct cleanup
*chain
;
1756 if (gdbarch_bfd_arch_info (gdbarch
)->arch
!= bfd_arch_spu
)
1757 error (_("\"info spu\" is only supported on the SPU architecture."));
1759 id
= get_frame_register_unsigned (frame
, SPU_ID_REGNUM
);
1761 chain
= make_cleanup_ui_out_tuple_begin_end (uiout
, "SPUInfoMailbox");
1763 xsnprintf (annex
, sizeof annex
, "%d/mbox_info", id
);
1764 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
1765 buf
, 0, sizeof buf
);
1767 error (_("Could not read mbox_info."));
1769 info_spu_mailbox_list (buf
, len
/ 4, byte_order
,
1770 "mbox", "SPU Outbound Mailbox");
1772 xsnprintf (annex
, sizeof annex
, "%d/ibox_info", id
);
1773 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
1774 buf
, 0, sizeof buf
);
1776 error (_("Could not read ibox_info."));
1778 info_spu_mailbox_list (buf
, len
/ 4, byte_order
,
1779 "ibox", "SPU Outbound Interrupt Mailbox");
1781 xsnprintf (annex
, sizeof annex
, "%d/wbox_info", id
);
1782 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
1783 buf
, 0, sizeof buf
);
1785 error (_("Could not read wbox_info."));
1787 info_spu_mailbox_list (buf
, len
/ 4, byte_order
,
1788 "wbox", "SPU Inbound Mailbox");
1790 do_cleanups (chain
);
1794 spu_mfc_get_bitfield (ULONGEST word
, int first
, int last
)
1796 ULONGEST mask
= ~(~(ULONGEST
)0 << (last
- first
+ 1));
1797 return (word
>> (63 - last
)) & mask
;
1801 info_spu_dma_cmdlist (gdb_byte
*buf
, int nr
, enum bfd_endian byte_order
)
1803 static char *spu_mfc_opcode
[256] =
1805 /* 00 */ NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
1806 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
1807 /* 10 */ NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
1808 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
1809 /* 20 */ "put", "putb", "putf", NULL
, "putl", "putlb", "putlf", NULL
,
1810 "puts", "putbs", "putfs", NULL
, NULL
, NULL
, NULL
, NULL
,
1811 /* 30 */ "putr", "putrb", "putrf", NULL
, "putrl", "putrlb", "putrlf", NULL
,
1812 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
1813 /* 40 */ "get", "getb", "getf", NULL
, "getl", "getlb", "getlf", NULL
,
1814 "gets", "getbs", "getfs", NULL
, NULL
, NULL
, NULL
, NULL
,
1815 /* 50 */ NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
1816 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
1817 /* 60 */ NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
1818 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
1819 /* 70 */ NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
1820 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
1821 /* 80 */ "sdcrt", "sdcrtst", NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
1822 NULL
, "sdcrz", NULL
, NULL
, NULL
, "sdcrst", NULL
, "sdcrf",
1823 /* 90 */ NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
1824 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
1825 /* a0 */ "sndsig", "sndsigb", "sndsigf", NULL
, NULL
, NULL
, NULL
, NULL
,
1826 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
1827 /* b0 */ "putlluc", NULL
, NULL
, NULL
, "putllc", NULL
, NULL
, NULL
,
1828 "putqlluc", NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
1829 /* c0 */ "barrier", NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
1830 "mfceieio", NULL
, NULL
, NULL
, "mfcsync", NULL
, NULL
, NULL
,
1831 /* d0 */ "getllar", NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
1832 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
1833 /* e0 */ NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
1834 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
1835 /* f0 */ NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
1836 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
1839 int *seq
= alloca (nr
* sizeof (int));
1841 struct cleanup
*chain
;
1845 /* Determine sequence in which to display (valid) entries. */
1846 for (i
= 0; i
< nr
; i
++)
1848 /* Search for the first valid entry all of whose
1849 dependencies are met. */
1850 for (j
= 0; j
< nr
; j
++)
1852 ULONGEST mfc_cq_dw3
;
1853 ULONGEST dependencies
;
1855 if (done
& (1 << (nr
- 1 - j
)))
1859 = extract_unsigned_integer (buf
+ 32*j
+ 24,8, byte_order
);
1860 if (!spu_mfc_get_bitfield (mfc_cq_dw3
, 16, 16))
1863 dependencies
= spu_mfc_get_bitfield (mfc_cq_dw3
, 0, nr
- 1);
1864 if ((dependencies
& done
) != dependencies
)
1868 done
|= 1 << (nr
- 1 - j
);
1879 chain
= make_cleanup_ui_out_table_begin_end (uiout
, 10, nr
, "dma_cmd");
1881 ui_out_table_header (uiout
, 7, ui_left
, "opcode", "Opcode");
1882 ui_out_table_header (uiout
, 3, ui_left
, "tag", "Tag");
1883 ui_out_table_header (uiout
, 3, ui_left
, "tid", "TId");
1884 ui_out_table_header (uiout
, 3, ui_left
, "rid", "RId");
1885 ui_out_table_header (uiout
, 18, ui_left
, "ea", "EA");
1886 ui_out_table_header (uiout
, 7, ui_left
, "lsa", "LSA");
1887 ui_out_table_header (uiout
, 7, ui_left
, "size", "Size");
1888 ui_out_table_header (uiout
, 7, ui_left
, "lstaddr", "LstAddr");
1889 ui_out_table_header (uiout
, 7, ui_left
, "lstsize", "LstSize");
1890 ui_out_table_header (uiout
, 1, ui_left
, "error_p", "E");
1892 ui_out_table_body (uiout
);
1894 for (i
= 0; i
< nr
; i
++)
1896 struct cleanup
*cmd_chain
;
1897 ULONGEST mfc_cq_dw0
;
1898 ULONGEST mfc_cq_dw1
;
1899 ULONGEST mfc_cq_dw2
;
1900 int mfc_cmd_opcode
, mfc_cmd_tag
, rclass_id
, tclass_id
;
1901 int lsa
, size
, list_lsa
, list_size
, mfc_lsa
, mfc_size
;
1903 int list_valid_p
, noop_valid_p
, qw_valid_p
, ea_valid_p
, cmd_error_p
;
1905 /* Decode contents of MFC Command Queue Context Save/Restore Registers.
1906 See "Cell Broadband Engine Registers V1.3", section 3.3.2.1. */
1909 = extract_unsigned_integer (buf
+ 32*seq
[i
], 8, byte_order
);
1911 = extract_unsigned_integer (buf
+ 32*seq
[i
] + 8, 8, byte_order
);
1913 = extract_unsigned_integer (buf
+ 32*seq
[i
] + 16, 8, byte_order
);
1915 list_lsa
= spu_mfc_get_bitfield (mfc_cq_dw0
, 0, 14);
1916 list_size
= spu_mfc_get_bitfield (mfc_cq_dw0
, 15, 26);
1917 mfc_cmd_opcode
= spu_mfc_get_bitfield (mfc_cq_dw0
, 27, 34);
1918 mfc_cmd_tag
= spu_mfc_get_bitfield (mfc_cq_dw0
, 35, 39);
1919 list_valid_p
= spu_mfc_get_bitfield (mfc_cq_dw0
, 40, 40);
1920 rclass_id
= spu_mfc_get_bitfield (mfc_cq_dw0
, 41, 43);
1921 tclass_id
= spu_mfc_get_bitfield (mfc_cq_dw0
, 44, 46);
1923 mfc_ea
= spu_mfc_get_bitfield (mfc_cq_dw1
, 0, 51) << 12
1924 | spu_mfc_get_bitfield (mfc_cq_dw2
, 25, 36);
1926 mfc_lsa
= spu_mfc_get_bitfield (mfc_cq_dw2
, 0, 13);
1927 mfc_size
= spu_mfc_get_bitfield (mfc_cq_dw2
, 14, 24);
1928 noop_valid_p
= spu_mfc_get_bitfield (mfc_cq_dw2
, 37, 37);
1929 qw_valid_p
= spu_mfc_get_bitfield (mfc_cq_dw2
, 38, 38);
1930 ea_valid_p
= spu_mfc_get_bitfield (mfc_cq_dw2
, 39, 39);
1931 cmd_error_p
= spu_mfc_get_bitfield (mfc_cq_dw2
, 40, 40);
1933 cmd_chain
= make_cleanup_ui_out_tuple_begin_end (uiout
, "cmd");
1935 if (spu_mfc_opcode
[mfc_cmd_opcode
])
1936 ui_out_field_string (uiout
, "opcode", spu_mfc_opcode
[mfc_cmd_opcode
]);
1938 ui_out_field_int (uiout
, "opcode", mfc_cmd_opcode
);
1940 ui_out_field_int (uiout
, "tag", mfc_cmd_tag
);
1941 ui_out_field_int (uiout
, "tid", tclass_id
);
1942 ui_out_field_int (uiout
, "rid", rclass_id
);
1945 ui_out_field_fmt (uiout
, "ea", "0x%s", phex (mfc_ea
, 8));
1947 ui_out_field_skip (uiout
, "ea");
1949 ui_out_field_fmt (uiout
, "lsa", "0x%05x", mfc_lsa
<< 4);
1951 ui_out_field_fmt (uiout
, "size", "0x%05x", mfc_size
<< 4);
1953 ui_out_field_fmt (uiout
, "size", "0x%05x", mfc_size
);
1957 ui_out_field_fmt (uiout
, "lstaddr", "0x%05x", list_lsa
<< 3);
1958 ui_out_field_fmt (uiout
, "lstsize", "0x%05x", list_size
<< 3);
1962 ui_out_field_skip (uiout
, "lstaddr");
1963 ui_out_field_skip (uiout
, "lstsize");
1967 ui_out_field_string (uiout
, "error_p", "*");
1969 ui_out_field_skip (uiout
, "error_p");
1971 do_cleanups (cmd_chain
);
1973 if (!ui_out_is_mi_like_p (uiout
))
1974 printf_filtered ("\n");
1977 do_cleanups (chain
);
1981 info_spu_dma_command (char *args
, int from_tty
)
1983 struct frame_info
*frame
= get_selected_frame (NULL
);
1984 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
1985 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1986 ULONGEST dma_info_type
;
1987 ULONGEST dma_info_mask
;
1988 ULONGEST dma_info_status
;
1989 ULONGEST dma_info_stall_and_notify
;
1990 ULONGEST dma_info_atomic_command_status
;
1991 struct cleanup
*chain
;
1997 if (gdbarch_bfd_arch_info (get_frame_arch (frame
))->arch
!= bfd_arch_spu
)
1998 error (_("\"info spu\" is only supported on the SPU architecture."));
2000 id
= get_frame_register_unsigned (frame
, SPU_ID_REGNUM
);
2002 xsnprintf (annex
, sizeof annex
, "%d/dma_info", id
);
2003 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
2004 buf
, 0, 40 + 16 * 32);
2006 error (_("Could not read dma_info."));
2009 = extract_unsigned_integer (buf
, 8, byte_order
);
2011 = extract_unsigned_integer (buf
+ 8, 8, byte_order
);
2013 = extract_unsigned_integer (buf
+ 16, 8, byte_order
);
2014 dma_info_stall_and_notify
2015 = extract_unsigned_integer (buf
+ 24, 8, byte_order
);
2016 dma_info_atomic_command_status
2017 = extract_unsigned_integer (buf
+ 32, 8, byte_order
);
2019 chain
= make_cleanup_ui_out_tuple_begin_end (uiout
, "SPUInfoDMA");
2021 if (ui_out_is_mi_like_p (uiout
))
2023 ui_out_field_fmt (uiout
, "dma_info_type", "0x%s",
2024 phex_nz (dma_info_type
, 4));
2025 ui_out_field_fmt (uiout
, "dma_info_mask", "0x%s",
2026 phex_nz (dma_info_mask
, 4));
2027 ui_out_field_fmt (uiout
, "dma_info_status", "0x%s",
2028 phex_nz (dma_info_status
, 4));
2029 ui_out_field_fmt (uiout
, "dma_info_stall_and_notify", "0x%s",
2030 phex_nz (dma_info_stall_and_notify
, 4));
2031 ui_out_field_fmt (uiout
, "dma_info_atomic_command_status", "0x%s",
2032 phex_nz (dma_info_atomic_command_status
, 4));
2036 const char *query_msg
= _("no query pending");
2038 if (dma_info_type
& 4)
2039 switch (dma_info_type
& 3)
2041 case 1: query_msg
= _("'any' query pending"); break;
2042 case 2: query_msg
= _("'all' query pending"); break;
2043 default: query_msg
= _("undefined query type"); break;
2046 printf_filtered (_("Tag-Group Status 0x%s\n"),
2047 phex (dma_info_status
, 4));
2048 printf_filtered (_("Tag-Group Mask 0x%s (%s)\n"),
2049 phex (dma_info_mask
, 4), query_msg
);
2050 printf_filtered (_("Stall-and-Notify 0x%s\n"),
2051 phex (dma_info_stall_and_notify
, 4));
2052 printf_filtered (_("Atomic Cmd Status 0x%s\n"),
2053 phex (dma_info_atomic_command_status
, 4));
2054 printf_filtered ("\n");
2057 info_spu_dma_cmdlist (buf
+ 40, 16, byte_order
);
2058 do_cleanups (chain
);
2062 info_spu_proxydma_command (char *args
, int from_tty
)
2064 struct frame_info
*frame
= get_selected_frame (NULL
);
2065 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2066 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2067 ULONGEST dma_info_type
;
2068 ULONGEST dma_info_mask
;
2069 ULONGEST dma_info_status
;
2070 struct cleanup
*chain
;
2076 if (gdbarch_bfd_arch_info (gdbarch
)->arch
!= bfd_arch_spu
)
2077 error (_("\"info spu\" is only supported on the SPU architecture."));
2079 id
= get_frame_register_unsigned (frame
, SPU_ID_REGNUM
);
2081 xsnprintf (annex
, sizeof annex
, "%d/proxydma_info", id
);
2082 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
2083 buf
, 0, 24 + 8 * 32);
2085 error (_("Could not read proxydma_info."));
2087 dma_info_type
= extract_unsigned_integer (buf
, 8, byte_order
);
2088 dma_info_mask
= extract_unsigned_integer (buf
+ 8, 8, byte_order
);
2089 dma_info_status
= extract_unsigned_integer (buf
+ 16, 8, byte_order
);
2091 chain
= make_cleanup_ui_out_tuple_begin_end (uiout
, "SPUInfoProxyDMA");
2093 if (ui_out_is_mi_like_p (uiout
))
2095 ui_out_field_fmt (uiout
, "proxydma_info_type", "0x%s",
2096 phex_nz (dma_info_type
, 4));
2097 ui_out_field_fmt (uiout
, "proxydma_info_mask", "0x%s",
2098 phex_nz (dma_info_mask
, 4));
2099 ui_out_field_fmt (uiout
, "proxydma_info_status", "0x%s",
2100 phex_nz (dma_info_status
, 4));
2104 const char *query_msg
;
2106 switch (dma_info_type
& 3)
2108 case 0: query_msg
= _("no query pending"); break;
2109 case 1: query_msg
= _("'any' query pending"); break;
2110 case 2: query_msg
= _("'all' query pending"); break;
2111 default: query_msg
= _("undefined query type"); break;
2114 printf_filtered (_("Tag-Group Status 0x%s\n"),
2115 phex (dma_info_status
, 4));
2116 printf_filtered (_("Tag-Group Mask 0x%s (%s)\n"),
2117 phex (dma_info_mask
, 4), query_msg
);
2118 printf_filtered ("\n");
2121 info_spu_dma_cmdlist (buf
+ 24, 8, byte_order
);
2122 do_cleanups (chain
);
2126 info_spu_command (char *args
, int from_tty
)
2128 printf_unfiltered (_("\"info spu\" must be followed by the name of an SPU facility.\n"));
2129 help_list (infospucmdlist
, "info spu ", -1, gdb_stdout
);
2133 /* Set up gdbarch struct. */
2135 static struct gdbarch
*
2136 spu_gdbarch_init (struct gdbarch_info info
, struct gdbarch_list
*arches
)
2138 struct gdbarch
*gdbarch
;
2139 struct gdbarch_tdep
*tdep
;
2141 /* Find a candidate among the list of pre-declared architectures. */
2142 arches
= gdbarch_list_lookup_by_info (arches
, &info
);
2144 return arches
->gdbarch
;
2147 if (info
.bfd_arch_info
->mach
!= bfd_mach_spu
)
2150 /* Yes, create a new architecture. */
2151 tdep
= XCALLOC (1, struct gdbarch_tdep
);
2152 gdbarch
= gdbarch_alloc (&info
, tdep
);
2155 set_gdbarch_print_insn (gdbarch
, print_insn_spu
);
2158 set_gdbarch_num_regs (gdbarch
, SPU_NUM_REGS
);
2159 set_gdbarch_num_pseudo_regs (gdbarch
, SPU_NUM_PSEUDO_REGS
);
2160 set_gdbarch_sp_regnum (gdbarch
, SPU_SP_REGNUM
);
2161 set_gdbarch_pc_regnum (gdbarch
, SPU_PC_REGNUM
);
2162 set_gdbarch_read_pc (gdbarch
, spu_read_pc
);
2163 set_gdbarch_write_pc (gdbarch
, spu_write_pc
);
2164 set_gdbarch_register_name (gdbarch
, spu_register_name
);
2165 set_gdbarch_register_type (gdbarch
, spu_register_type
);
2166 set_gdbarch_pseudo_register_read (gdbarch
, spu_pseudo_register_read
);
2167 set_gdbarch_pseudo_register_write (gdbarch
, spu_pseudo_register_write
);
2168 set_gdbarch_value_from_register (gdbarch
, spu_value_from_register
);
2169 set_gdbarch_register_reggroup_p (gdbarch
, spu_register_reggroup_p
);
2172 set_gdbarch_char_signed (gdbarch
, 0);
2173 set_gdbarch_ptr_bit (gdbarch
, 32);
2174 set_gdbarch_addr_bit (gdbarch
, 32);
2175 set_gdbarch_short_bit (gdbarch
, 16);
2176 set_gdbarch_int_bit (gdbarch
, 32);
2177 set_gdbarch_long_bit (gdbarch
, 32);
2178 set_gdbarch_long_long_bit (gdbarch
, 64);
2179 set_gdbarch_float_bit (gdbarch
, 32);
2180 set_gdbarch_double_bit (gdbarch
, 64);
2181 set_gdbarch_long_double_bit (gdbarch
, 64);
2182 set_gdbarch_float_format (gdbarch
, floatformats_ieee_single
);
2183 set_gdbarch_double_format (gdbarch
, floatformats_ieee_double
);
2184 set_gdbarch_long_double_format (gdbarch
, floatformats_ieee_double
);
2186 /* Address conversion. */
2187 set_gdbarch_pointer_to_address (gdbarch
, spu_pointer_to_address
);
2188 set_gdbarch_integer_to_address (gdbarch
, spu_integer_to_address
);
2190 /* Inferior function calls. */
2191 set_gdbarch_call_dummy_location (gdbarch
, ON_STACK
);
2192 set_gdbarch_frame_align (gdbarch
, spu_frame_align
);
2193 set_gdbarch_frame_red_zone_size (gdbarch
, 2000);
2194 set_gdbarch_push_dummy_code (gdbarch
, spu_push_dummy_code
);
2195 set_gdbarch_push_dummy_call (gdbarch
, spu_push_dummy_call
);
2196 set_gdbarch_dummy_id (gdbarch
, spu_dummy_id
);
2197 set_gdbarch_return_value (gdbarch
, spu_return_value
);
2199 /* Frame handling. */
2200 set_gdbarch_inner_than (gdbarch
, core_addr_lessthan
);
2201 frame_unwind_append_unwinder (gdbarch
, &spu_frame_unwind
);
2202 frame_base_set_default (gdbarch
, &spu_frame_base
);
2203 set_gdbarch_unwind_pc (gdbarch
, spu_unwind_pc
);
2204 set_gdbarch_unwind_sp (gdbarch
, spu_unwind_sp
);
2205 set_gdbarch_virtual_frame_pointer (gdbarch
, spu_virtual_frame_pointer
);
2206 set_gdbarch_frame_args_skip (gdbarch
, 0);
2207 set_gdbarch_skip_prologue (gdbarch
, spu_skip_prologue
);
2208 set_gdbarch_in_function_epilogue_p (gdbarch
, spu_in_function_epilogue_p
);
2211 set_gdbarch_decr_pc_after_break (gdbarch
, 4);
2212 set_gdbarch_breakpoint_from_pc (gdbarch
, spu_breakpoint_from_pc
);
2213 set_gdbarch_cannot_step_breakpoint (gdbarch
, 1);
2214 set_gdbarch_software_single_step (gdbarch
, spu_software_single_step
);
2215 set_gdbarch_get_longjmp_target (gdbarch
, spu_get_longjmp_target
);
2218 set_gdbarch_overlay_update (gdbarch
, spu_overlay_update
);
2223 /* Provide a prototype to silence -Wmissing-prototypes. */
2224 extern initialize_file_ftype _initialize_spu_tdep
;
2227 _initialize_spu_tdep (void)
2229 register_gdbarch_init (bfd_arch_spu
, spu_gdbarch_init
);
2231 /* Add ourselves to objfile event chain. */
2232 observer_attach_new_objfile (spu_overlay_new_objfile
);
2233 spu_overlay_data
= register_objfile_data ();
2235 /* Add root prefix command for all "info spu" commands. */
2236 add_prefix_cmd ("spu", class_info
, info_spu_command
,
2237 _("Various SPU specific commands."),
2238 &infospucmdlist
, "info spu ", 0, &infolist
);
2240 /* Add various "info spu" commands. */
2241 add_cmd ("event", class_info
, info_spu_event_command
,
2242 _("Display SPU event facility status.\n"),
2244 add_cmd ("signal", class_info
, info_spu_signal_command
,
2245 _("Display SPU signal notification facility status.\n"),
2247 add_cmd ("mailbox", class_info
, info_spu_mailbox_command
,
2248 _("Display SPU mailbox facility status.\n"),
2250 add_cmd ("dma", class_info
, info_spu_dma_command
,
2251 _("Display MFC DMA status.\n"),
2253 add_cmd ("proxydma", class_info
, info_spu_proxydma_command
,
2254 _("Display MFC Proxy-DMA status.\n"),