1 /* SPU target-dependent code for GDB, the GNU debugger.
2 Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
5 Contributed by Ulrich Weigand <uweigand@de.ibm.com>.
6 Based on a port by Sid Manning <sid@us.ibm.com>.
8 This file is part of GDB.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "arch-utils.h"
28 #include "gdb_string.h"
29 #include "gdb_assert.h"
31 #include "frame-unwind.h"
32 #include "frame-base.h"
33 #include "trad-frame.h"
42 #include "reggroups.h"
43 #include "floatformat.h"
51 /* The list of available "set spu " and "show spu " commands. */
52 static struct cmd_list_element
*setspucmdlist
= NULL
;
53 static struct cmd_list_element
*showspucmdlist
= NULL
;
55 /* Whether to stop for new SPE contexts. */
56 static int spu_stop_on_load_p
= 0;
57 /* Whether to automatically flush the SW-managed cache. */
58 static int spu_auto_flush_cache_p
= 1;
61 /* The tdep structure. */
64 /* The spufs ID identifying our address space. */
67 /* SPU-specific vector type. */
68 struct type
*spu_builtin_type_vec128
;
72 /* SPU-specific vector type. */
74 spu_builtin_type_vec128 (struct gdbarch
*gdbarch
)
76 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
78 if (!tdep
->spu_builtin_type_vec128
)
80 const struct builtin_type
*bt
= builtin_type (gdbarch
);
83 t
= arch_composite_type (gdbarch
,
84 "__spu_builtin_type_vec128", TYPE_CODE_UNION
);
85 append_composite_type_field (t
, "uint128", bt
->builtin_int128
);
86 append_composite_type_field (t
, "v2_int64",
87 init_vector_type (bt
->builtin_int64
, 2));
88 append_composite_type_field (t
, "v4_int32",
89 init_vector_type (bt
->builtin_int32
, 4));
90 append_composite_type_field (t
, "v8_int16",
91 init_vector_type (bt
->builtin_int16
, 8));
92 append_composite_type_field (t
, "v16_int8",
93 init_vector_type (bt
->builtin_int8
, 16));
94 append_composite_type_field (t
, "v2_double",
95 init_vector_type (bt
->builtin_double
, 2));
96 append_composite_type_field (t
, "v4_float",
97 init_vector_type (bt
->builtin_float
, 4));
100 TYPE_NAME (t
) = "spu_builtin_type_vec128";
102 tdep
->spu_builtin_type_vec128
= t
;
105 return tdep
->spu_builtin_type_vec128
;
109 /* The list of available "info spu " commands. */
110 static struct cmd_list_element
*infospucmdlist
= NULL
;
115 spu_register_name (struct gdbarch
*gdbarch
, int reg_nr
)
117 static char *register_names
[] =
119 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
120 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
121 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
122 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
123 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
124 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
125 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
126 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
127 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
128 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
129 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
130 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
131 "r96", "r97", "r98", "r99", "r100", "r101", "r102", "r103",
132 "r104", "r105", "r106", "r107", "r108", "r109", "r110", "r111",
133 "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
134 "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127",
135 "id", "pc", "sp", "fpscr", "srr0", "lslr", "decr", "decr_status"
140 if (reg_nr
>= sizeof register_names
/ sizeof *register_names
)
143 return register_names
[reg_nr
];
147 spu_register_type (struct gdbarch
*gdbarch
, int reg_nr
)
149 if (reg_nr
< SPU_NUM_GPRS
)
150 return spu_builtin_type_vec128 (gdbarch
);
155 return builtin_type (gdbarch
)->builtin_uint32
;
158 return builtin_type (gdbarch
)->builtin_func_ptr
;
161 return builtin_type (gdbarch
)->builtin_data_ptr
;
163 case SPU_FPSCR_REGNUM
:
164 return builtin_type (gdbarch
)->builtin_uint128
;
166 case SPU_SRR0_REGNUM
:
167 return builtin_type (gdbarch
)->builtin_uint32
;
169 case SPU_LSLR_REGNUM
:
170 return builtin_type (gdbarch
)->builtin_uint32
;
172 case SPU_DECR_REGNUM
:
173 return builtin_type (gdbarch
)->builtin_uint32
;
175 case SPU_DECR_STATUS_REGNUM
:
176 return builtin_type (gdbarch
)->builtin_uint32
;
179 internal_error (__FILE__
, __LINE__
, _("invalid regnum"));
183 /* Pseudo registers for preferred slots - stack pointer. */
186 spu_pseudo_register_read_spu (struct regcache
*regcache
, const char *regname
,
189 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
190 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
195 regcache_raw_read_unsigned (regcache
, SPU_ID_REGNUM
, &id
);
196 xsnprintf (annex
, sizeof annex
, "%d/%s", (int) id
, regname
);
197 memset (reg
, 0, sizeof reg
);
198 target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
201 store_unsigned_integer (buf
, 4, byte_order
, strtoulst (reg
, NULL
, 16));
205 spu_pseudo_register_read (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
206 int regnum
, gdb_byte
*buf
)
215 regcache_raw_read (regcache
, SPU_RAW_SP_REGNUM
, reg
);
216 memcpy (buf
, reg
, 4);
219 case SPU_FPSCR_REGNUM
:
220 regcache_raw_read_unsigned (regcache
, SPU_ID_REGNUM
, &id
);
221 xsnprintf (annex
, sizeof annex
, "%d/fpcr", (int) id
);
222 target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
, buf
, 0, 16);
225 case SPU_SRR0_REGNUM
:
226 spu_pseudo_register_read_spu (regcache
, "srr0", buf
);
229 case SPU_LSLR_REGNUM
:
230 spu_pseudo_register_read_spu (regcache
, "lslr", buf
);
233 case SPU_DECR_REGNUM
:
234 spu_pseudo_register_read_spu (regcache
, "decr", buf
);
237 case SPU_DECR_STATUS_REGNUM
:
238 spu_pseudo_register_read_spu (regcache
, "decr_status", buf
);
242 internal_error (__FILE__
, __LINE__
, _("invalid regnum"));
247 spu_pseudo_register_write_spu (struct regcache
*regcache
, const char *regname
,
250 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
251 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
256 regcache_raw_read_unsigned (regcache
, SPU_ID_REGNUM
, &id
);
257 xsnprintf (annex
, sizeof annex
, "%d/%s", (int) id
, regname
);
258 xsnprintf (reg
, sizeof reg
, "0x%s",
259 phex_nz (extract_unsigned_integer (buf
, 4, byte_order
), 4));
260 target_write (¤t_target
, TARGET_OBJECT_SPU
, annex
,
261 reg
, 0, strlen (reg
));
265 spu_pseudo_register_write (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
266 int regnum
, const gdb_byte
*buf
)
275 regcache_raw_read (regcache
, SPU_RAW_SP_REGNUM
, reg
);
276 memcpy (reg
, buf
, 4);
277 regcache_raw_write (regcache
, SPU_RAW_SP_REGNUM
, reg
);
280 case SPU_FPSCR_REGNUM
:
281 regcache_raw_read_unsigned (regcache
, SPU_ID_REGNUM
, &id
);
282 xsnprintf (annex
, sizeof annex
, "%d/fpcr", (int) id
);
283 target_write (¤t_target
, TARGET_OBJECT_SPU
, annex
, buf
, 0, 16);
286 case SPU_SRR0_REGNUM
:
287 spu_pseudo_register_write_spu (regcache
, "srr0", buf
);
290 case SPU_LSLR_REGNUM
:
291 spu_pseudo_register_write_spu (regcache
, "lslr", buf
);
294 case SPU_DECR_REGNUM
:
295 spu_pseudo_register_write_spu (regcache
, "decr", buf
);
298 case SPU_DECR_STATUS_REGNUM
:
299 spu_pseudo_register_write_spu (regcache
, "decr_status", buf
);
303 internal_error (__FILE__
, __LINE__
, _("invalid regnum"));
307 /* Value conversion -- access scalar values at the preferred slot. */
309 static struct value
*
310 spu_value_from_register (struct type
*type
, int regnum
,
311 struct frame_info
*frame
)
313 struct value
*value
= default_value_from_register (type
, regnum
, frame
);
314 int len
= TYPE_LENGTH (type
);
316 if (regnum
< SPU_NUM_GPRS
&& len
< 16)
318 int preferred_slot
= len
< 4 ? 4 - len
: 0;
319 set_value_offset (value
, preferred_slot
);
325 /* Register groups. */
328 spu_register_reggroup_p (struct gdbarch
*gdbarch
, int regnum
,
329 struct reggroup
*group
)
331 /* Registers displayed via 'info regs'. */
332 if (group
== general_reggroup
)
335 /* Registers displayed via 'info float'. */
336 if (group
== float_reggroup
)
339 /* Registers that need to be saved/restored in order to
340 push or pop frames. */
341 if (group
== save_reggroup
|| group
== restore_reggroup
)
344 return default_register_reggroup_p (gdbarch
, regnum
, group
);
348 /* Address handling. */
351 spu_gdbarch_id (struct gdbarch
*gdbarch
)
353 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
356 /* The objfile architecture of a standalone SPU executable does not
357 provide an SPU ID. Retrieve it from the the objfile's relocated
358 address range in this special case. */
360 && symfile_objfile
&& symfile_objfile
->obfd
361 && bfd_get_arch (symfile_objfile
->obfd
) == bfd_arch_spu
362 && symfile_objfile
->sections
!= symfile_objfile
->sections_end
)
363 id
= SPUADDR_SPU (obj_section_addr (symfile_objfile
->sections
));
369 spu_address_class_type_flags (int byte_size
, int dwarf2_addr_class
)
371 if (dwarf2_addr_class
== 1)
372 return TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1
;
378 spu_address_class_type_flags_to_name (struct gdbarch
*gdbarch
, int type_flags
)
380 if (type_flags
& TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1
)
387 spu_address_class_name_to_type_flags (struct gdbarch
*gdbarch
,
388 const char *name
, int *type_flags_ptr
)
390 if (strcmp (name
, "__ea") == 0)
392 *type_flags_ptr
= TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1
;
400 spu_address_to_pointer (struct gdbarch
*gdbarch
,
401 struct type
*type
, gdb_byte
*buf
, CORE_ADDR addr
)
403 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
404 store_unsigned_integer (buf
, TYPE_LENGTH (type
), byte_order
,
405 SPUADDR_ADDR (addr
));
409 spu_pointer_to_address (struct gdbarch
*gdbarch
,
410 struct type
*type
, const gdb_byte
*buf
)
412 int id
= spu_gdbarch_id (gdbarch
);
413 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
415 = extract_unsigned_integer (buf
, TYPE_LENGTH (type
), byte_order
);
417 /* Do not convert __ea pointers. */
418 if (TYPE_ADDRESS_CLASS_1 (type
))
421 return addr
? SPUADDR (id
, addr
) : 0;
425 spu_integer_to_address (struct gdbarch
*gdbarch
,
426 struct type
*type
, const gdb_byte
*buf
)
428 int id
= spu_gdbarch_id (gdbarch
);
429 ULONGEST addr
= unpack_long (type
, buf
);
431 return SPUADDR (id
, addr
);
435 /* Decoding SPU instructions. */
472 is_rr (unsigned int insn
, int op
, int *rt
, int *ra
, int *rb
)
474 if ((insn
>> 21) == op
)
477 *ra
= (insn
>> 7) & 127;
478 *rb
= (insn
>> 14) & 127;
486 is_rrr (unsigned int insn
, int op
, int *rt
, int *ra
, int *rb
, int *rc
)
488 if ((insn
>> 28) == op
)
490 *rt
= (insn
>> 21) & 127;
491 *ra
= (insn
>> 7) & 127;
492 *rb
= (insn
>> 14) & 127;
501 is_ri7 (unsigned int insn
, int op
, int *rt
, int *ra
, int *i7
)
503 if ((insn
>> 21) == op
)
506 *ra
= (insn
>> 7) & 127;
507 *i7
= (((insn
>> 14) & 127) ^ 0x40) - 0x40;
515 is_ri10 (unsigned int insn
, int op
, int *rt
, int *ra
, int *i10
)
517 if ((insn
>> 24) == op
)
520 *ra
= (insn
>> 7) & 127;
521 *i10
= (((insn
>> 14) & 0x3ff) ^ 0x200) - 0x200;
529 is_ri16 (unsigned int insn
, int op
, int *rt
, int *i16
)
531 if ((insn
>> 23) == op
)
534 *i16
= (((insn
>> 7) & 0xffff) ^ 0x8000) - 0x8000;
542 is_ri18 (unsigned int insn
, int op
, int *rt
, int *i18
)
544 if ((insn
>> 25) == op
)
547 *i18
= (((insn
>> 7) & 0x3ffff) ^ 0x20000) - 0x20000;
555 is_branch (unsigned int insn
, int *offset
, int *reg
)
559 if (is_ri16 (insn
, op_br
, &rt
, &i16
)
560 || is_ri16 (insn
, op_brsl
, &rt
, &i16
)
561 || is_ri16 (insn
, op_brnz
, &rt
, &i16
)
562 || is_ri16 (insn
, op_brz
, &rt
, &i16
)
563 || is_ri16 (insn
, op_brhnz
, &rt
, &i16
)
564 || is_ri16 (insn
, op_brhz
, &rt
, &i16
))
566 *reg
= SPU_PC_REGNUM
;
571 if (is_ri16 (insn
, op_bra
, &rt
, &i16
)
572 || is_ri16 (insn
, op_brasl
, &rt
, &i16
))
579 if (is_ri7 (insn
, op_bi
, &rt
, reg
, &i7
)
580 || is_ri7 (insn
, op_bisl
, &rt
, reg
, &i7
)
581 || is_ri7 (insn
, op_biz
, &rt
, reg
, &i7
)
582 || is_ri7 (insn
, op_binz
, &rt
, reg
, &i7
)
583 || is_ri7 (insn
, op_bihz
, &rt
, reg
, &i7
)
584 || is_ri7 (insn
, op_bihnz
, &rt
, reg
, &i7
))
594 /* Prolog parsing. */
596 struct spu_prologue_data
598 /* Stack frame size. -1 if analysis was unsuccessful. */
601 /* How to find the CFA. The CFA is equal to SP at function entry. */
605 /* Offset relative to CFA where a register is saved. -1 if invalid. */
606 int reg_offset
[SPU_NUM_GPRS
];
610 spu_analyze_prologue (struct gdbarch
*gdbarch
,
611 CORE_ADDR start_pc
, CORE_ADDR end_pc
,
612 struct spu_prologue_data
*data
)
614 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
619 int reg_immed
[SPU_NUM_GPRS
];
621 CORE_ADDR prolog_pc
= start_pc
;
626 /* Initialize DATA to default values. */
629 data
->cfa_reg
= SPU_RAW_SP_REGNUM
;
630 data
->cfa_offset
= 0;
632 for (i
= 0; i
< SPU_NUM_GPRS
; i
++)
633 data
->reg_offset
[i
] = -1;
635 /* Set up REG_IMMED array. This is non-zero for a register if we know its
636 preferred slot currently holds this immediate value. */
637 for (i
= 0; i
< SPU_NUM_GPRS
; i
++)
640 /* Scan instructions until the first branch.
642 The following instructions are important prolog components:
644 - The first instruction to set up the stack pointer.
645 - The first instruction to set up the frame pointer.
646 - The first instruction to save the link register.
647 - The first instruction to save the backchain.
649 We return the instruction after the latest of these four,
650 or the incoming PC if none is found. The first instruction
651 to set up the stack pointer also defines the frame size.
653 Note that instructions saving incoming arguments to their stack
654 slots are not counted as important, because they are hard to
655 identify with certainty. This should not matter much, because
656 arguments are relevant only in code compiled with debug data,
657 and in such code the GDB core will advance until the first source
658 line anyway, using SAL data.
660 For purposes of stack unwinding, we analyze the following types
661 of instructions in addition:
663 - Any instruction adding to the current frame pointer.
664 - Any instruction loading an immediate constant into a register.
665 - Any instruction storing a register onto the stack.
667 These are used to compute the CFA and REG_OFFSET output. */
669 for (pc
= start_pc
; pc
< end_pc
; pc
+= 4)
672 int rt
, ra
, rb
, rc
, immed
;
674 if (target_read_memory (pc
, buf
, 4))
676 insn
= extract_unsigned_integer (buf
, 4, byte_order
);
678 /* AI is the typical instruction to set up a stack frame.
679 It is also used to initialize the frame pointer. */
680 if (is_ri10 (insn
, op_ai
, &rt
, &ra
, &immed
))
682 if (rt
== data
->cfa_reg
&& ra
== data
->cfa_reg
)
683 data
->cfa_offset
-= immed
;
685 if (rt
== SPU_RAW_SP_REGNUM
&& ra
== SPU_RAW_SP_REGNUM
693 else if (rt
== SPU_FP_REGNUM
&& ra
== SPU_RAW_SP_REGNUM
699 data
->cfa_reg
= SPU_FP_REGNUM
;
700 data
->cfa_offset
-= immed
;
704 /* A is used to set up stack frames of size >= 512 bytes.
705 If we have tracked the contents of the addend register,
706 we can handle this as well. */
707 else if (is_rr (insn
, op_a
, &rt
, &ra
, &rb
))
709 if (rt
== data
->cfa_reg
&& ra
== data
->cfa_reg
)
711 if (reg_immed
[rb
] != 0)
712 data
->cfa_offset
-= reg_immed
[rb
];
714 data
->cfa_reg
= -1; /* We don't know the CFA any more. */
717 if (rt
== SPU_RAW_SP_REGNUM
&& ra
== SPU_RAW_SP_REGNUM
723 if (reg_immed
[rb
] != 0)
724 data
->size
= -reg_immed
[rb
];
728 /* We need to track IL and ILA used to load immediate constants
729 in case they are later used as input to an A instruction. */
730 else if (is_ri16 (insn
, op_il
, &rt
, &immed
))
732 reg_immed
[rt
] = immed
;
734 if (rt
== SPU_RAW_SP_REGNUM
&& !found_sp
)
738 else if (is_ri18 (insn
, op_ila
, &rt
, &immed
))
740 reg_immed
[rt
] = immed
& 0x3ffff;
742 if (rt
== SPU_RAW_SP_REGNUM
&& !found_sp
)
746 /* STQD is used to save registers to the stack. */
747 else if (is_ri10 (insn
, op_stqd
, &rt
, &ra
, &immed
))
749 if (ra
== data
->cfa_reg
)
750 data
->reg_offset
[rt
] = data
->cfa_offset
- (immed
<< 4);
752 if (ra
== data
->cfa_reg
&& rt
== SPU_LR_REGNUM
759 if (ra
== SPU_RAW_SP_REGNUM
760 && (found_sp
? immed
== 0 : rt
== SPU_RAW_SP_REGNUM
)
768 /* _start uses SELB to set up the stack pointer. */
769 else if (is_rrr (insn
, op_selb
, &rt
, &ra
, &rb
, &rc
))
771 if (rt
== SPU_RAW_SP_REGNUM
&& !found_sp
)
775 /* We terminate if we find a branch. */
776 else if (is_branch (insn
, &immed
, &ra
))
781 /* If we successfully parsed until here, and didn't find any instruction
782 modifying SP, we assume we have a frameless function. */
786 /* Return cooked instead of raw SP. */
787 if (data
->cfa_reg
== SPU_RAW_SP_REGNUM
)
788 data
->cfa_reg
= SPU_SP_REGNUM
;
793 /* Return the first instruction after the prologue starting at PC. */
795 spu_skip_prologue (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
797 struct spu_prologue_data data
;
798 return spu_analyze_prologue (gdbarch
, pc
, (CORE_ADDR
)-1, &data
);
801 /* Return the frame pointer in use at address PC. */
803 spu_virtual_frame_pointer (struct gdbarch
*gdbarch
, CORE_ADDR pc
,
804 int *reg
, LONGEST
*offset
)
806 struct spu_prologue_data data
;
807 spu_analyze_prologue (gdbarch
, pc
, (CORE_ADDR
)-1, &data
);
809 if (data
.size
!= -1 && data
.cfa_reg
!= -1)
811 /* The 'frame pointer' address is CFA minus frame size. */
813 *offset
= data
.cfa_offset
- data
.size
;
817 /* ??? We don't really know ... */
818 *reg
= SPU_SP_REGNUM
;
823 /* Return true if we are in the function's epilogue, i.e. after the
824 instruction that destroyed the function's stack frame.
826 1) scan forward from the point of execution:
827 a) If you find an instruction that modifies the stack pointer
828 or transfers control (except a return), execution is not in
830 b) Stop scanning if you find a return instruction or reach the
831 end of the function or reach the hard limit for the size of
833 2) scan backward from the point of execution:
834 a) If you find an instruction that modifies the stack pointer,
835 execution *is* in an epilogue, return.
836 b) Stop scanning if you reach an instruction that transfers
837 control or the beginning of the function or reach the hard
838 limit for the size of an epilogue. */
841 spu_in_function_epilogue_p (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
843 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
844 CORE_ADDR scan_pc
, func_start
, func_end
, epilogue_start
, epilogue_end
;
847 int rt
, ra
, rb
, rc
, immed
;
849 /* Find the search limits based on function boundaries and hard limit.
850 We assume the epilogue can be up to 64 instructions long. */
852 const int spu_max_epilogue_size
= 64 * 4;
854 if (!find_pc_partial_function (pc
, NULL
, &func_start
, &func_end
))
857 if (pc
- func_start
< spu_max_epilogue_size
)
858 epilogue_start
= func_start
;
860 epilogue_start
= pc
- spu_max_epilogue_size
;
862 if (func_end
- pc
< spu_max_epilogue_size
)
863 epilogue_end
= func_end
;
865 epilogue_end
= pc
+ spu_max_epilogue_size
;
867 /* Scan forward until next 'bi $0'. */
869 for (scan_pc
= pc
; scan_pc
< epilogue_end
; scan_pc
+= 4)
871 if (target_read_memory (scan_pc
, buf
, 4))
873 insn
= extract_unsigned_integer (buf
, 4, byte_order
);
875 if (is_branch (insn
, &immed
, &ra
))
877 if (immed
== 0 && ra
== SPU_LR_REGNUM
)
883 if (is_ri10 (insn
, op_ai
, &rt
, &ra
, &immed
)
884 || is_rr (insn
, op_a
, &rt
, &ra
, &rb
)
885 || is_ri10 (insn
, op_lqd
, &rt
, &ra
, &immed
))
887 if (rt
== SPU_RAW_SP_REGNUM
)
892 if (scan_pc
>= epilogue_end
)
895 /* Scan backward until adjustment to stack pointer (R1). */
897 for (scan_pc
= pc
- 4; scan_pc
>= epilogue_start
; scan_pc
-= 4)
899 if (target_read_memory (scan_pc
, buf
, 4))
901 insn
= extract_unsigned_integer (buf
, 4, byte_order
);
903 if (is_branch (insn
, &immed
, &ra
))
906 if (is_ri10 (insn
, op_ai
, &rt
, &ra
, &immed
)
907 || is_rr (insn
, op_a
, &rt
, &ra
, &rb
)
908 || is_ri10 (insn
, op_lqd
, &rt
, &ra
, &immed
))
910 if (rt
== SPU_RAW_SP_REGNUM
)
919 /* Normal stack frames. */
921 struct spu_unwind_cache
924 CORE_ADDR frame_base
;
925 CORE_ADDR local_base
;
927 struct trad_frame_saved_reg
*saved_regs
;
930 static struct spu_unwind_cache
*
931 spu_frame_unwind_cache (struct frame_info
*this_frame
,
932 void **this_prologue_cache
)
934 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
935 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
936 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
937 struct spu_unwind_cache
*info
;
938 struct spu_prologue_data data
;
939 CORE_ADDR id
= tdep
->id
;
942 if (*this_prologue_cache
)
943 return *this_prologue_cache
;
945 info
= FRAME_OBSTACK_ZALLOC (struct spu_unwind_cache
);
946 *this_prologue_cache
= info
;
947 info
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
948 info
->frame_base
= 0;
949 info
->local_base
= 0;
951 /* Find the start of the current function, and analyze its prologue. */
952 info
->func
= get_frame_func (this_frame
);
955 /* Fall back to using the current PC as frame ID. */
956 info
->func
= get_frame_pc (this_frame
);
960 spu_analyze_prologue (gdbarch
, info
->func
, get_frame_pc (this_frame
),
963 /* If successful, use prologue analysis data. */
964 if (data
.size
!= -1 && data
.cfa_reg
!= -1)
969 /* Determine CFA via unwound CFA_REG plus CFA_OFFSET. */
970 get_frame_register (this_frame
, data
.cfa_reg
, buf
);
971 cfa
= extract_unsigned_integer (buf
, 4, byte_order
) + data
.cfa_offset
;
972 cfa
= SPUADDR (id
, cfa
);
974 /* Call-saved register slots. */
975 for (i
= 0; i
< SPU_NUM_GPRS
; i
++)
976 if (i
== SPU_LR_REGNUM
977 || (i
>= SPU_SAVED1_REGNUM
&& i
<= SPU_SAVEDN_REGNUM
))
978 if (data
.reg_offset
[i
] != -1)
979 info
->saved_regs
[i
].addr
= cfa
- data
.reg_offset
[i
];
982 info
->frame_base
= cfa
;
983 info
->local_base
= cfa
- data
.size
;
986 /* Otherwise, fall back to reading the backchain link. */
994 /* Get local store limit. */
995 lslr
= get_frame_register_unsigned (this_frame
, SPU_LSLR_REGNUM
);
997 lslr
= (ULONGEST
) -1;
999 /* Get the backchain. */
1000 reg
= get_frame_register_unsigned (this_frame
, SPU_SP_REGNUM
);
1001 status
= safe_read_memory_integer (SPUADDR (id
, reg
), 4, byte_order
,
1004 /* A zero backchain terminates the frame chain. Also, sanity
1005 check against the local store size limit. */
1006 if (status
&& backchain
> 0 && backchain
<= lslr
)
1008 /* Assume the link register is saved into its slot. */
1009 if (backchain
+ 16 <= lslr
)
1010 info
->saved_regs
[SPU_LR_REGNUM
].addr
= SPUADDR (id
, backchain
+ 16);
1013 info
->frame_base
= SPUADDR (id
, backchain
);
1014 info
->local_base
= SPUADDR (id
, reg
);
1018 /* If we didn't find a frame, we cannot determine SP / return address. */
1019 if (info
->frame_base
== 0)
1022 /* The previous SP is equal to the CFA. */
1023 trad_frame_set_value (info
->saved_regs
, SPU_SP_REGNUM
,
1024 SPUADDR_ADDR (info
->frame_base
));
1026 /* Read full contents of the unwound link register in order to
1027 be able to determine the return address. */
1028 if (trad_frame_addr_p (info
->saved_regs
, SPU_LR_REGNUM
))
1029 target_read_memory (info
->saved_regs
[SPU_LR_REGNUM
].addr
, buf
, 16);
1031 get_frame_register (this_frame
, SPU_LR_REGNUM
, buf
);
1033 /* Normally, the return address is contained in the slot 0 of the
1034 link register, and slots 1-3 are zero. For an overlay return,
1035 slot 0 contains the address of the overlay manager return stub,
1036 slot 1 contains the partition number of the overlay section to
1037 be returned to, and slot 2 contains the return address within
1038 that section. Return the latter address in that case. */
1039 if (extract_unsigned_integer (buf
+ 8, 4, byte_order
) != 0)
1040 trad_frame_set_value (info
->saved_regs
, SPU_PC_REGNUM
,
1041 extract_unsigned_integer (buf
+ 8, 4, byte_order
));
1043 trad_frame_set_value (info
->saved_regs
, SPU_PC_REGNUM
,
1044 extract_unsigned_integer (buf
, 4, byte_order
));
1050 spu_frame_this_id (struct frame_info
*this_frame
,
1051 void **this_prologue_cache
, struct frame_id
*this_id
)
1053 struct spu_unwind_cache
*info
=
1054 spu_frame_unwind_cache (this_frame
, this_prologue_cache
);
1056 if (info
->frame_base
== 0)
1059 *this_id
= frame_id_build (info
->frame_base
, info
->func
);
1062 static struct value
*
1063 spu_frame_prev_register (struct frame_info
*this_frame
,
1064 void **this_prologue_cache
, int regnum
)
1066 struct spu_unwind_cache
*info
1067 = spu_frame_unwind_cache (this_frame
, this_prologue_cache
);
1069 /* Special-case the stack pointer. */
1070 if (regnum
== SPU_RAW_SP_REGNUM
)
1071 regnum
= SPU_SP_REGNUM
;
1073 return trad_frame_get_prev_register (this_frame
, info
->saved_regs
, regnum
);
1076 static const struct frame_unwind spu_frame_unwind
= {
1079 spu_frame_prev_register
,
1081 default_frame_sniffer
1085 spu_frame_base_address (struct frame_info
*this_frame
, void **this_cache
)
1087 struct spu_unwind_cache
*info
1088 = spu_frame_unwind_cache (this_frame
, this_cache
);
1089 return info
->local_base
;
1092 static const struct frame_base spu_frame_base
= {
1094 spu_frame_base_address
,
1095 spu_frame_base_address
,
1096 spu_frame_base_address
1100 spu_unwind_pc (struct gdbarch
*gdbarch
, struct frame_info
*next_frame
)
1102 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1103 CORE_ADDR pc
= frame_unwind_register_unsigned (next_frame
, SPU_PC_REGNUM
);
1104 /* Mask off interrupt enable bit. */
1105 return SPUADDR (tdep
->id
, pc
& -4);
1109 spu_unwind_sp (struct gdbarch
*gdbarch
, struct frame_info
*next_frame
)
1111 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1112 CORE_ADDR sp
= frame_unwind_register_unsigned (next_frame
, SPU_SP_REGNUM
);
1113 return SPUADDR (tdep
->id
, sp
);
1117 spu_read_pc (struct regcache
*regcache
)
1119 struct gdbarch_tdep
*tdep
= gdbarch_tdep (get_regcache_arch (regcache
));
1121 regcache_cooked_read_unsigned (regcache
, SPU_PC_REGNUM
, &pc
);
1122 /* Mask off interrupt enable bit. */
1123 return SPUADDR (tdep
->id
, pc
& -4);
1127 spu_write_pc (struct regcache
*regcache
, CORE_ADDR pc
)
1129 /* Keep interrupt enabled state unchanged. */
1131 regcache_cooked_read_unsigned (regcache
, SPU_PC_REGNUM
, &old_pc
);
1132 regcache_cooked_write_unsigned (regcache
, SPU_PC_REGNUM
,
1133 (SPUADDR_ADDR (pc
) & -4) | (old_pc
& 3));
1137 /* Cell/B.E. cross-architecture unwinder support. */
1139 struct spu2ppu_cache
1141 struct frame_id frame_id
;
1142 struct regcache
*regcache
;
1145 static struct gdbarch
*
1146 spu2ppu_prev_arch (struct frame_info
*this_frame
, void **this_cache
)
1148 struct spu2ppu_cache
*cache
= *this_cache
;
1149 return get_regcache_arch (cache
->regcache
);
1153 spu2ppu_this_id (struct frame_info
*this_frame
,
1154 void **this_cache
, struct frame_id
*this_id
)
1156 struct spu2ppu_cache
*cache
= *this_cache
;
1157 *this_id
= cache
->frame_id
;
1160 static struct value
*
1161 spu2ppu_prev_register (struct frame_info
*this_frame
,
1162 void **this_cache
, int regnum
)
1164 struct spu2ppu_cache
*cache
= *this_cache
;
1165 struct gdbarch
*gdbarch
= get_regcache_arch (cache
->regcache
);
1168 buf
= alloca (register_size (gdbarch
, regnum
));
1169 regcache_cooked_read (cache
->regcache
, regnum
, buf
);
1170 return frame_unwind_got_bytes (this_frame
, regnum
, buf
);
1174 spu2ppu_sniffer (const struct frame_unwind
*self
,
1175 struct frame_info
*this_frame
, void **this_prologue_cache
)
1177 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
1178 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1179 CORE_ADDR base
, func
, backchain
;
1182 if (gdbarch_bfd_arch_info (target_gdbarch
)->arch
== bfd_arch_spu
)
1185 base
= get_frame_sp (this_frame
);
1186 func
= get_frame_pc (this_frame
);
1187 if (target_read_memory (base
, buf
, 4))
1189 backchain
= extract_unsigned_integer (buf
, 4, byte_order
);
1193 struct frame_info
*fi
;
1195 struct spu2ppu_cache
*cache
1196 = FRAME_OBSTACK_CALLOC (1, struct spu2ppu_cache
);
1198 cache
->frame_id
= frame_id_build (base
+ 16, func
);
1200 for (fi
= get_next_frame (this_frame
); fi
; fi
= get_next_frame (fi
))
1201 if (gdbarch_bfd_arch_info (get_frame_arch (fi
))->arch
!= bfd_arch_spu
)
1206 cache
->regcache
= frame_save_as_regcache (fi
);
1207 *this_prologue_cache
= cache
;
1212 struct regcache
*regcache
;
1213 regcache
= get_thread_arch_regcache (inferior_ptid
, target_gdbarch
);
1214 cache
->regcache
= regcache_dup (regcache
);
1215 *this_prologue_cache
= cache
;
1224 spu2ppu_dealloc_cache (struct frame_info
*self
, void *this_cache
)
1226 struct spu2ppu_cache
*cache
= this_cache
;
1227 regcache_xfree (cache
->regcache
);
1230 static const struct frame_unwind spu2ppu_unwind
= {
1233 spu2ppu_prev_register
,
1236 spu2ppu_dealloc_cache
,
1241 /* Function calling convention. */
1244 spu_frame_align (struct gdbarch
*gdbarch
, CORE_ADDR sp
)
1250 spu_push_dummy_code (struct gdbarch
*gdbarch
, CORE_ADDR sp
, CORE_ADDR funaddr
,
1251 struct value
**args
, int nargs
, struct type
*value_type
,
1252 CORE_ADDR
*real_pc
, CORE_ADDR
*bp_addr
,
1253 struct regcache
*regcache
)
1255 /* Allocate space sufficient for a breakpoint, keeping the stack aligned. */
1256 sp
= (sp
- 4) & ~15;
1257 /* Store the address of that breakpoint */
1259 /* The call starts at the callee's entry point. */
1266 spu_scalar_value_p (struct type
*type
)
1268 switch (TYPE_CODE (type
))
1271 case TYPE_CODE_ENUM
:
1272 case TYPE_CODE_RANGE
:
1273 case TYPE_CODE_CHAR
:
1274 case TYPE_CODE_BOOL
:
1277 return TYPE_LENGTH (type
) <= 16;
1285 spu_value_to_regcache (struct regcache
*regcache
, int regnum
,
1286 struct type
*type
, const gdb_byte
*in
)
1288 int len
= TYPE_LENGTH (type
);
1290 if (spu_scalar_value_p (type
))
1292 int preferred_slot
= len
< 4 ? 4 - len
: 0;
1293 regcache_cooked_write_part (regcache
, regnum
, preferred_slot
, len
, in
);
1299 regcache_cooked_write (regcache
, regnum
++, in
);
1305 regcache_cooked_write_part (regcache
, regnum
, 0, len
, in
);
1310 spu_regcache_to_value (struct regcache
*regcache
, int regnum
,
1311 struct type
*type
, gdb_byte
*out
)
1313 int len
= TYPE_LENGTH (type
);
1315 if (spu_scalar_value_p (type
))
1317 int preferred_slot
= len
< 4 ? 4 - len
: 0;
1318 regcache_cooked_read_part (regcache
, regnum
, preferred_slot
, len
, out
);
1324 regcache_cooked_read (regcache
, regnum
++, out
);
1330 regcache_cooked_read_part (regcache
, regnum
, 0, len
, out
);
1335 spu_push_dummy_call (struct gdbarch
*gdbarch
, struct value
*function
,
1336 struct regcache
*regcache
, CORE_ADDR bp_addr
,
1337 int nargs
, struct value
**args
, CORE_ADDR sp
,
1338 int struct_return
, CORE_ADDR struct_addr
)
1340 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1343 int regnum
= SPU_ARG1_REGNUM
;
1347 /* Set the return address. */
1348 memset (buf
, 0, sizeof buf
);
1349 store_unsigned_integer (buf
, 4, byte_order
, SPUADDR_ADDR (bp_addr
));
1350 regcache_cooked_write (regcache
, SPU_LR_REGNUM
, buf
);
1352 /* If STRUCT_RETURN is true, then the struct return address (in
1353 STRUCT_ADDR) will consume the first argument-passing register.
1354 Both adjust the register count and store that value. */
1357 memset (buf
, 0, sizeof buf
);
1358 store_unsigned_integer (buf
, 4, byte_order
, SPUADDR_ADDR (struct_addr
));
1359 regcache_cooked_write (regcache
, regnum
++, buf
);
1362 /* Fill in argument registers. */
1363 for (i
= 0; i
< nargs
; i
++)
1365 struct value
*arg
= args
[i
];
1366 struct type
*type
= check_typedef (value_type (arg
));
1367 const gdb_byte
*contents
= value_contents (arg
);
1368 int len
= TYPE_LENGTH (type
);
1369 int n_regs
= align_up (len
, 16) / 16;
1371 /* If the argument doesn't wholly fit into registers, it and
1372 all subsequent arguments go to the stack. */
1373 if (regnum
+ n_regs
- 1 > SPU_ARGN_REGNUM
)
1379 spu_value_to_regcache (regcache
, regnum
, type
, contents
);
1383 /* Overflow arguments go to the stack. */
1384 if (stack_arg
!= -1)
1388 /* Allocate all required stack size. */
1389 for (i
= stack_arg
; i
< nargs
; i
++)
1391 struct type
*type
= check_typedef (value_type (args
[i
]));
1392 sp
-= align_up (TYPE_LENGTH (type
), 16);
1395 /* Fill in stack arguments. */
1397 for (i
= stack_arg
; i
< nargs
; i
++)
1399 struct value
*arg
= args
[i
];
1400 struct type
*type
= check_typedef (value_type (arg
));
1401 int len
= TYPE_LENGTH (type
);
1404 if (spu_scalar_value_p (type
))
1405 preferred_slot
= len
< 4 ? 4 - len
: 0;
1409 target_write_memory (ap
+ preferred_slot
, value_contents (arg
), len
);
1410 ap
+= align_up (TYPE_LENGTH (type
), 16);
1414 /* Allocate stack frame header. */
1417 /* Store stack back chain. */
1418 regcache_cooked_read (regcache
, SPU_RAW_SP_REGNUM
, buf
);
1419 target_write_memory (sp
, buf
, 16);
1421 /* Finally, update all slots of the SP register. */
1422 sp_delta
= sp
- extract_unsigned_integer (buf
, 4, byte_order
);
1423 for (i
= 0; i
< 4; i
++)
1425 CORE_ADDR sp_slot
= extract_unsigned_integer (buf
+ 4*i
, 4, byte_order
);
1426 store_unsigned_integer (buf
+ 4*i
, 4, byte_order
, sp_slot
+ sp_delta
);
1428 regcache_cooked_write (regcache
, SPU_RAW_SP_REGNUM
, buf
);
1433 static struct frame_id
1434 spu_dummy_id (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1436 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1437 CORE_ADDR pc
= get_frame_register_unsigned (this_frame
, SPU_PC_REGNUM
);
1438 CORE_ADDR sp
= get_frame_register_unsigned (this_frame
, SPU_SP_REGNUM
);
1439 return frame_id_build (SPUADDR (tdep
->id
, sp
), SPUADDR (tdep
->id
, pc
& -4));
1442 /* Function return value access. */
1444 static enum return_value_convention
1445 spu_return_value (struct gdbarch
*gdbarch
, struct type
*func_type
,
1446 struct type
*type
, struct regcache
*regcache
,
1447 gdb_byte
*out
, const gdb_byte
*in
)
1449 enum return_value_convention rvc
;
1451 if (TYPE_LENGTH (type
) <= (SPU_ARGN_REGNUM
- SPU_ARG1_REGNUM
+ 1) * 16)
1452 rvc
= RETURN_VALUE_REGISTER_CONVENTION
;
1454 rvc
= RETURN_VALUE_STRUCT_CONVENTION
;
1460 case RETURN_VALUE_REGISTER_CONVENTION
:
1461 spu_value_to_regcache (regcache
, SPU_ARG1_REGNUM
, type
, in
);
1464 case RETURN_VALUE_STRUCT_CONVENTION
:
1465 error (_("Cannot set function return value."));
1473 case RETURN_VALUE_REGISTER_CONVENTION
:
1474 spu_regcache_to_value (regcache
, SPU_ARG1_REGNUM
, type
, out
);
1477 case RETURN_VALUE_STRUCT_CONVENTION
:
1478 error (_("Function return value unknown."));
1489 static const gdb_byte
*
1490 spu_breakpoint_from_pc (struct gdbarch
*gdbarch
, CORE_ADDR
* pcptr
, int *lenptr
)
1492 static const gdb_byte breakpoint
[] = { 0x00, 0x00, 0x3f, 0xff };
1494 *lenptr
= sizeof breakpoint
;
1499 spu_memory_remove_breakpoint (struct gdbarch
*gdbarch
,
1500 struct bp_target_info
*bp_tgt
)
1502 /* We work around a problem in combined Cell/B.E. debugging here. Consider
1503 that in a combined application, we have some breakpoints inserted in SPU
1504 code, and now the application forks (on the PPU side). GDB common code
1505 will assume that the fork system call copied all breakpoints into the new
1506 process' address space, and that all those copies now need to be removed
1507 (see breakpoint.c:detach_breakpoints).
1509 While this is certainly true for PPU side breakpoints, it is not true
1510 for SPU side breakpoints. fork will clone the SPU context file
1511 descriptors, so that all the existing SPU contexts are in accessible
1512 in the new process. However, the contents of the SPU contexts themselves
1513 are *not* cloned. Therefore the effect of detach_breakpoints is to
1514 remove SPU breakpoints from the *original* SPU context's local store
1515 -- this is not the correct behaviour.
1517 The workaround is to check whether the PID we are asked to remove this
1518 breakpoint from (i.e. ptid_get_pid (inferior_ptid)) is different from the
1519 PID of the current inferior (i.e. current_inferior ()->pid). This is only
1520 true in the context of detach_breakpoints. If so, we simply do nothing.
1521 [ Note that for the fork child process, it does not matter if breakpoints
1522 remain inserted, because those SPU contexts are not runnable anyway --
1523 the Linux kernel allows only the original process to invoke spu_run. */
1525 if (ptid_get_pid (inferior_ptid
) != current_inferior ()->pid
)
1528 return default_memory_remove_breakpoint (gdbarch
, bp_tgt
);
1532 /* Software single-stepping support. */
1535 spu_software_single_step (struct frame_info
*frame
)
1537 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
1538 struct address_space
*aspace
= get_frame_address_space (frame
);
1539 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1540 CORE_ADDR pc
, next_pc
;
1546 pc
= get_frame_pc (frame
);
1548 if (target_read_memory (pc
, buf
, 4))
1550 insn
= extract_unsigned_integer (buf
, 4, byte_order
);
1552 /* Get local store limit. */
1553 lslr
= get_frame_register_unsigned (frame
, SPU_LSLR_REGNUM
);
1555 lslr
= (ULONGEST
) -1;
1557 /* Next sequential instruction is at PC + 4, except if the current
1558 instruction is a PPE-assisted call, in which case it is at PC + 8.
1559 Wrap around LS limit to be on the safe side. */
1560 if ((insn
& 0xffffff00) == 0x00002100)
1561 next_pc
= (SPUADDR_ADDR (pc
) + 8) & lslr
;
1563 next_pc
= (SPUADDR_ADDR (pc
) + 4) & lslr
;
1565 insert_single_step_breakpoint (gdbarch
,
1566 aspace
, SPUADDR (SPUADDR_SPU (pc
), next_pc
));
1568 if (is_branch (insn
, &offset
, ®
))
1570 CORE_ADDR target
= offset
;
1572 if (reg
== SPU_PC_REGNUM
)
1573 target
+= SPUADDR_ADDR (pc
);
1576 get_frame_register_bytes (frame
, reg
, 0, 4, buf
);
1577 target
+= extract_unsigned_integer (buf
, 4, byte_order
) & -4;
1580 target
= target
& lslr
;
1581 if (target
!= next_pc
)
1582 insert_single_step_breakpoint (gdbarch
, aspace
,
1583 SPUADDR (SPUADDR_SPU (pc
), target
));
1590 /* Longjmp support. */
1593 spu_get_longjmp_target (struct frame_info
*frame
, CORE_ADDR
*pc
)
1595 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
1596 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1597 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1601 /* Jump buffer is pointed to by the argument register $r3. */
1602 get_frame_register_bytes (frame
, SPU_ARG1_REGNUM
, 0, 4, buf
);
1603 jb_addr
= extract_unsigned_integer (buf
, 4, byte_order
);
1604 if (target_read_memory (SPUADDR (tdep
->id
, jb_addr
), buf
, 4))
1607 *pc
= extract_unsigned_integer (buf
, 4, byte_order
);
1608 *pc
= SPUADDR (tdep
->id
, *pc
);
1615 struct spu_dis_asm_data
1617 struct gdbarch
*gdbarch
;
1622 spu_dis_asm_print_address (bfd_vma addr
, struct disassemble_info
*info
)
1624 struct spu_dis_asm_data
*data
= info
->application_data
;
1625 print_address (data
->gdbarch
, SPUADDR (data
->id
, addr
), info
->stream
);
1629 gdb_print_insn_spu (bfd_vma memaddr
, struct disassemble_info
*info
)
1631 /* The opcodes disassembler does 18-bit address arithmetic. Make sure the
1632 SPU ID encoded in the high bits is added back when we call print_address. */
1633 struct disassemble_info spu_info
= *info
;
1634 struct spu_dis_asm_data data
;
1635 data
.gdbarch
= info
->application_data
;
1636 data
.id
= SPUADDR_SPU (memaddr
);
1638 spu_info
.application_data
= &data
;
1639 spu_info
.print_address_func
= spu_dis_asm_print_address
;
1640 return print_insn_spu (memaddr
, &spu_info
);
1644 /* Target overlays for the SPU overlay manager.
1646 See the documentation of simple_overlay_update for how the
1647 interface is supposed to work.
1649 Data structures used by the overlay manager:
1657 } _ovly_table[]; -- one entry per overlay section
1659 struct ovly_buf_table
1662 } _ovly_buf_table[]; -- one entry per overlay buffer
1664 _ovly_table should never change.
1666 Both tables are aligned to a 16-byte boundary, the symbols _ovly_table
1667 and _ovly_buf_table are of type STT_OBJECT and their size set to the size
1668 of the respective array. buf in _ovly_table is an index into _ovly_buf_table.
1670 mapped is an index into _ovly_table. Both the mapped and buf indices start
1671 from one to reference the first entry in their respective tables. */
1673 /* Using the per-objfile private data mechanism, we store for each
1674 objfile an array of "struct spu_overlay_table" structures, one
1675 for each obj_section of the objfile. This structure holds two
1676 fields, MAPPED_PTR and MAPPED_VAL. If MAPPED_PTR is zero, this
1677 is *not* an overlay section. If it is non-zero, it represents
1678 a target address. The overlay section is mapped iff the target
1679 integer at this location equals MAPPED_VAL. */
1681 static const struct objfile_data
*spu_overlay_data
;
1683 struct spu_overlay_table
1685 CORE_ADDR mapped_ptr
;
1686 CORE_ADDR mapped_val
;
1689 /* Retrieve the overlay table for OBJFILE. If not already cached, read
1690 the _ovly_table data structure from the target and initialize the
1691 spu_overlay_table data structure from it. */
1692 static struct spu_overlay_table
*
1693 spu_get_overlay_table (struct objfile
*objfile
)
1695 enum bfd_endian byte_order
= bfd_big_endian (objfile
->obfd
)?
1696 BFD_ENDIAN_BIG
: BFD_ENDIAN_LITTLE
;
1697 struct minimal_symbol
*ovly_table_msym
, *ovly_buf_table_msym
;
1698 CORE_ADDR ovly_table_base
, ovly_buf_table_base
;
1699 unsigned ovly_table_size
, ovly_buf_table_size
;
1700 struct spu_overlay_table
*tbl
;
1701 struct obj_section
*osect
;
1705 tbl
= objfile_data (objfile
, spu_overlay_data
);
1709 ovly_table_msym
= lookup_minimal_symbol ("_ovly_table", NULL
, objfile
);
1710 if (!ovly_table_msym
)
1713 ovly_buf_table_msym
= lookup_minimal_symbol ("_ovly_buf_table", NULL
, objfile
);
1714 if (!ovly_buf_table_msym
)
1717 ovly_table_base
= SYMBOL_VALUE_ADDRESS (ovly_table_msym
);
1718 ovly_table_size
= MSYMBOL_SIZE (ovly_table_msym
);
1720 ovly_buf_table_base
= SYMBOL_VALUE_ADDRESS (ovly_buf_table_msym
);
1721 ovly_buf_table_size
= MSYMBOL_SIZE (ovly_buf_table_msym
);
1723 ovly_table
= xmalloc (ovly_table_size
);
1724 read_memory (ovly_table_base
, ovly_table
, ovly_table_size
);
1726 tbl
= OBSTACK_CALLOC (&objfile
->objfile_obstack
,
1727 objfile
->sections_end
- objfile
->sections
,
1728 struct spu_overlay_table
);
1730 for (i
= 0; i
< ovly_table_size
/ 16; i
++)
1732 CORE_ADDR vma
= extract_unsigned_integer (ovly_table
+ 16*i
+ 0,
1734 CORE_ADDR size
= extract_unsigned_integer (ovly_table
+ 16*i
+ 4,
1736 CORE_ADDR pos
= extract_unsigned_integer (ovly_table
+ 16*i
+ 8,
1738 CORE_ADDR buf
= extract_unsigned_integer (ovly_table
+ 16*i
+ 12,
1741 if (buf
== 0 || (buf
- 1) * 4 >= ovly_buf_table_size
)
1744 ALL_OBJFILE_OSECTIONS (objfile
, osect
)
1745 if (vma
== bfd_section_vma (objfile
->obfd
, osect
->the_bfd_section
)
1746 && pos
== osect
->the_bfd_section
->filepos
)
1748 int ndx
= osect
- objfile
->sections
;
1749 tbl
[ndx
].mapped_ptr
= ovly_buf_table_base
+ (buf
- 1) * 4;
1750 tbl
[ndx
].mapped_val
= i
+ 1;
1756 set_objfile_data (objfile
, spu_overlay_data
, tbl
);
1760 /* Read _ovly_buf_table entry from the target to dermine whether
1761 OSECT is currently mapped, and update the mapped state. */
1763 spu_overlay_update_osect (struct obj_section
*osect
)
1765 enum bfd_endian byte_order
= bfd_big_endian (osect
->objfile
->obfd
)?
1766 BFD_ENDIAN_BIG
: BFD_ENDIAN_LITTLE
;
1767 struct spu_overlay_table
*ovly_table
;
1770 ovly_table
= spu_get_overlay_table (osect
->objfile
);
1774 ovly_table
+= osect
- osect
->objfile
->sections
;
1775 if (ovly_table
->mapped_ptr
== 0)
1778 id
= SPUADDR_SPU (obj_section_addr (osect
));
1779 val
= read_memory_unsigned_integer (SPUADDR (id
, ovly_table
->mapped_ptr
),
1781 osect
->ovly_mapped
= (val
== ovly_table
->mapped_val
);
1784 /* If OSECT is NULL, then update all sections' mapped state.
1785 If OSECT is non-NULL, then update only OSECT's mapped state. */
1787 spu_overlay_update (struct obj_section
*osect
)
1789 /* Just one section. */
1791 spu_overlay_update_osect (osect
);
1796 struct objfile
*objfile
;
1798 ALL_OBJSECTIONS (objfile
, osect
)
1799 if (section_is_overlay (osect
))
1800 spu_overlay_update_osect (osect
);
1804 /* Whenever a new objfile is loaded, read the target's _ovly_table.
1805 If there is one, go through all sections and make sure for non-
1806 overlay sections LMA equals VMA, while for overlay sections LMA
1807 is larger than SPU_OVERLAY_LMA. */
1809 spu_overlay_new_objfile (struct objfile
*objfile
)
1811 struct spu_overlay_table
*ovly_table
;
1812 struct obj_section
*osect
;
1814 /* If we've already touched this file, do nothing. */
1815 if (!objfile
|| objfile_data (objfile
, spu_overlay_data
) != NULL
)
1818 /* Consider only SPU objfiles. */
1819 if (bfd_get_arch (objfile
->obfd
) != bfd_arch_spu
)
1822 /* Check if this objfile has overlays. */
1823 ovly_table
= spu_get_overlay_table (objfile
);
1827 /* Now go and fiddle with all the LMAs. */
1828 ALL_OBJFILE_OSECTIONS (objfile
, osect
)
1830 bfd
*obfd
= objfile
->obfd
;
1831 asection
*bsect
= osect
->the_bfd_section
;
1832 int ndx
= osect
- objfile
->sections
;
1834 if (ovly_table
[ndx
].mapped_ptr
== 0)
1835 bfd_section_lma (obfd
, bsect
) = bfd_section_vma (obfd
, bsect
);
1837 bfd_section_lma (obfd
, bsect
) = SPU_OVERLAY_LMA
+ bsect
->filepos
;
1842 /* Insert temporary breakpoint on "main" function of newly loaded
1843 SPE context OBJFILE. */
1845 spu_catch_start (struct objfile
*objfile
)
1847 struct minimal_symbol
*minsym
;
1848 struct symtab
*symtab
;
1852 /* Do this only if requested by "set spu stop-on-load on". */
1853 if (!spu_stop_on_load_p
)
1856 /* Consider only SPU objfiles. */
1857 if (!objfile
|| bfd_get_arch (objfile
->obfd
) != bfd_arch_spu
)
1860 /* The main objfile is handled differently. */
1861 if (objfile
== symfile_objfile
)
1864 /* There can be multiple symbols named "main". Search for the
1865 "main" in *this* objfile. */
1866 minsym
= lookup_minimal_symbol ("main", NULL
, objfile
);
1870 /* If we have debugging information, try to use it -- this
1871 will allow us to properly skip the prologue. */
1872 pc
= SYMBOL_VALUE_ADDRESS (minsym
);
1873 symtab
= find_pc_sect_symtab (pc
, SYMBOL_OBJ_SECTION (minsym
));
1876 struct blockvector
*bv
= BLOCKVECTOR (symtab
);
1877 struct block
*block
= BLOCKVECTOR_BLOCK (bv
, GLOBAL_BLOCK
);
1879 struct symtab_and_line sal
;
1881 sym
= lookup_block_symbol (block
, "main", VAR_DOMAIN
);
1884 fixup_symbol_section (sym
, objfile
);
1885 sal
= find_function_start_sal (sym
, 1);
1890 /* Use a numerical address for the set_breakpoint command to avoid having
1891 the breakpoint re-set incorrectly. */
1892 xsnprintf (buf
, sizeof buf
, "*%s", core_addr_to_string (pc
));
1893 create_breakpoint (get_objfile_arch (objfile
), buf
/* arg */,
1894 NULL
/* cond_string */, -1 /* thread */,
1895 0 /* parse_condition_and_thread */, 1 /* tempflag */,
1896 bp_breakpoint
/* type_wanted */,
1897 0 /* ignore_count */,
1898 AUTO_BOOLEAN_FALSE
/* pending_break_support */,
1899 NULL
/* ops */, 0 /* from_tty */, 1 /* enabled */,
1904 /* Look up OBJFILE loaded into FRAME's SPU context. */
1905 static struct objfile
*
1906 spu_objfile_from_frame (struct frame_info
*frame
)
1908 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
1909 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1910 struct objfile
*obj
;
1912 if (gdbarch_bfd_arch_info (gdbarch
)->arch
!= bfd_arch_spu
)
1917 if (obj
->sections
!= obj
->sections_end
1918 && SPUADDR_SPU (obj_section_addr (obj
->sections
)) == tdep
->id
)
1925 /* Flush cache for ea pointer access if available. */
1927 flush_ea_cache (void)
1929 struct minimal_symbol
*msymbol
;
1930 struct objfile
*obj
;
1932 if (!has_stack_frames ())
1935 obj
= spu_objfile_from_frame (get_current_frame ());
1939 /* Lookup inferior function __cache_flush. */
1940 msymbol
= lookup_minimal_symbol ("__cache_flush", NULL
, obj
);
1941 if (msymbol
!= NULL
)
1946 type
= objfile_type (obj
)->builtin_void
;
1947 type
= lookup_function_type (type
);
1948 type
= lookup_pointer_type (type
);
1949 addr
= SYMBOL_VALUE_ADDRESS (msymbol
);
1951 call_function_by_hand (value_from_pointer (type
, addr
), 0, NULL
);
1955 /* This handler is called when the inferior has stopped. If it is stopped in
1956 SPU architecture then flush the ea cache if used. */
1958 spu_attach_normal_stop (struct bpstats
*bs
, int print_frame
)
1960 if (!spu_auto_flush_cache_p
)
1963 /* Temporarily reset spu_auto_flush_cache_p to avoid recursively
1964 re-entering this function when __cache_flush stops. */
1965 spu_auto_flush_cache_p
= 0;
1967 spu_auto_flush_cache_p
= 1;
1971 /* "info spu" commands. */
1974 info_spu_event_command (char *args
, int from_tty
)
1976 struct frame_info
*frame
= get_selected_frame (NULL
);
1977 ULONGEST event_status
= 0;
1978 ULONGEST event_mask
= 0;
1979 struct cleanup
*chain
;
1985 if (gdbarch_bfd_arch_info (get_frame_arch (frame
))->arch
!= bfd_arch_spu
)
1986 error (_("\"info spu\" is only supported on the SPU architecture."));
1988 id
= get_frame_register_unsigned (frame
, SPU_ID_REGNUM
);
1990 xsnprintf (annex
, sizeof annex
, "%d/event_status", id
);
1991 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
1992 buf
, 0, (sizeof (buf
) - 1));
1994 error (_("Could not read event_status."));
1996 event_status
= strtoulst (buf
, NULL
, 16);
1998 xsnprintf (annex
, sizeof annex
, "%d/event_mask", id
);
1999 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
2000 buf
, 0, (sizeof (buf
) - 1));
2002 error (_("Could not read event_mask."));
2004 event_mask
= strtoulst (buf
, NULL
, 16);
2006 chain
= make_cleanup_ui_out_tuple_begin_end (uiout
, "SPUInfoEvent");
2008 if (ui_out_is_mi_like_p (uiout
))
2010 ui_out_field_fmt (uiout
, "event_status",
2011 "0x%s", phex_nz (event_status
, 4));
2012 ui_out_field_fmt (uiout
, "event_mask",
2013 "0x%s", phex_nz (event_mask
, 4));
2017 printf_filtered (_("Event Status 0x%s\n"), phex (event_status
, 4));
2018 printf_filtered (_("Event Mask 0x%s\n"), phex (event_mask
, 4));
2021 do_cleanups (chain
);
2025 info_spu_signal_command (char *args
, int from_tty
)
2027 struct frame_info
*frame
= get_selected_frame (NULL
);
2028 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2029 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2030 ULONGEST signal1
= 0;
2031 ULONGEST signal1_type
= 0;
2032 int signal1_pending
= 0;
2033 ULONGEST signal2
= 0;
2034 ULONGEST signal2_type
= 0;
2035 int signal2_pending
= 0;
2036 struct cleanup
*chain
;
2042 if (gdbarch_bfd_arch_info (gdbarch
)->arch
!= bfd_arch_spu
)
2043 error (_("\"info spu\" is only supported on the SPU architecture."));
2045 id
= get_frame_register_unsigned (frame
, SPU_ID_REGNUM
);
2047 xsnprintf (annex
, sizeof annex
, "%d/signal1", id
);
2048 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
, buf
, 0, 4);
2050 error (_("Could not read signal1."));
2053 signal1
= extract_unsigned_integer (buf
, 4, byte_order
);
2054 signal1_pending
= 1;
2057 xsnprintf (annex
, sizeof annex
, "%d/signal1_type", id
);
2058 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
2059 buf
, 0, (sizeof (buf
) - 1));
2061 error (_("Could not read signal1_type."));
2063 signal1_type
= strtoulst (buf
, NULL
, 16);
2065 xsnprintf (annex
, sizeof annex
, "%d/signal2", id
);
2066 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
, buf
, 0, 4);
2068 error (_("Could not read signal2."));
2071 signal2
= extract_unsigned_integer (buf
, 4, byte_order
);
2072 signal2_pending
= 1;
2075 xsnprintf (annex
, sizeof annex
, "%d/signal2_type", id
);
2076 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
2077 buf
, 0, (sizeof (buf
) - 1));
2079 error (_("Could not read signal2_type."));
2081 signal2_type
= strtoulst (buf
, NULL
, 16);
2083 chain
= make_cleanup_ui_out_tuple_begin_end (uiout
, "SPUInfoSignal");
2085 if (ui_out_is_mi_like_p (uiout
))
2087 ui_out_field_int (uiout
, "signal1_pending", signal1_pending
);
2088 ui_out_field_fmt (uiout
, "signal1", "0x%s", phex_nz (signal1
, 4));
2089 ui_out_field_int (uiout
, "signal1_type", signal1_type
);
2090 ui_out_field_int (uiout
, "signal2_pending", signal2_pending
);
2091 ui_out_field_fmt (uiout
, "signal2", "0x%s", phex_nz (signal2
, 4));
2092 ui_out_field_int (uiout
, "signal2_type", signal2_type
);
2096 if (signal1_pending
)
2097 printf_filtered (_("Signal 1 control word 0x%s "), phex (signal1
, 4));
2099 printf_filtered (_("Signal 1 not pending "));
2102 printf_filtered (_("(Type Or)\n"));
2104 printf_filtered (_("(Type Overwrite)\n"));
2106 if (signal2_pending
)
2107 printf_filtered (_("Signal 2 control word 0x%s "), phex (signal2
, 4));
2109 printf_filtered (_("Signal 2 not pending "));
2112 printf_filtered (_("(Type Or)\n"));
2114 printf_filtered (_("(Type Overwrite)\n"));
2117 do_cleanups (chain
);
2121 info_spu_mailbox_list (gdb_byte
*buf
, int nr
, enum bfd_endian byte_order
,
2122 const char *field
, const char *msg
)
2124 struct cleanup
*chain
;
2130 chain
= make_cleanup_ui_out_table_begin_end (uiout
, 1, nr
, "mbox");
2132 ui_out_table_header (uiout
, 32, ui_left
, field
, msg
);
2133 ui_out_table_body (uiout
);
2135 for (i
= 0; i
< nr
; i
++)
2137 struct cleanup
*val_chain
;
2139 val_chain
= make_cleanup_ui_out_tuple_begin_end (uiout
, "mbox");
2140 val
= extract_unsigned_integer (buf
+ 4*i
, 4, byte_order
);
2141 ui_out_field_fmt (uiout
, field
, "0x%s", phex (val
, 4));
2142 do_cleanups (val_chain
);
2144 if (!ui_out_is_mi_like_p (uiout
))
2145 printf_filtered ("\n");
2148 do_cleanups (chain
);
2152 info_spu_mailbox_command (char *args
, int from_tty
)
2154 struct frame_info
*frame
= get_selected_frame (NULL
);
2155 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2156 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2157 struct cleanup
*chain
;
2163 if (gdbarch_bfd_arch_info (gdbarch
)->arch
!= bfd_arch_spu
)
2164 error (_("\"info spu\" is only supported on the SPU architecture."));
2166 id
= get_frame_register_unsigned (frame
, SPU_ID_REGNUM
);
2168 chain
= make_cleanup_ui_out_tuple_begin_end (uiout
, "SPUInfoMailbox");
2170 xsnprintf (annex
, sizeof annex
, "%d/mbox_info", id
);
2171 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
2172 buf
, 0, sizeof buf
);
2174 error (_("Could not read mbox_info."));
2176 info_spu_mailbox_list (buf
, len
/ 4, byte_order
,
2177 "mbox", "SPU Outbound Mailbox");
2179 xsnprintf (annex
, sizeof annex
, "%d/ibox_info", id
);
2180 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
2181 buf
, 0, sizeof buf
);
2183 error (_("Could not read ibox_info."));
2185 info_spu_mailbox_list (buf
, len
/ 4, byte_order
,
2186 "ibox", "SPU Outbound Interrupt Mailbox");
2188 xsnprintf (annex
, sizeof annex
, "%d/wbox_info", id
);
2189 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
2190 buf
, 0, sizeof buf
);
2192 error (_("Could not read wbox_info."));
2194 info_spu_mailbox_list (buf
, len
/ 4, byte_order
,
2195 "wbox", "SPU Inbound Mailbox");
2197 do_cleanups (chain
);
2201 spu_mfc_get_bitfield (ULONGEST word
, int first
, int last
)
2203 ULONGEST mask
= ~(~(ULONGEST
)0 << (last
- first
+ 1));
2204 return (word
>> (63 - last
)) & mask
;
2208 info_spu_dma_cmdlist (gdb_byte
*buf
, int nr
, enum bfd_endian byte_order
)
2210 static char *spu_mfc_opcode
[256] =
2212 /* 00 */ NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2213 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2214 /* 10 */ NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2215 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2216 /* 20 */ "put", "putb", "putf", NULL
, "putl", "putlb", "putlf", NULL
,
2217 "puts", "putbs", "putfs", NULL
, NULL
, NULL
, NULL
, NULL
,
2218 /* 30 */ "putr", "putrb", "putrf", NULL
, "putrl", "putrlb", "putrlf", NULL
,
2219 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2220 /* 40 */ "get", "getb", "getf", NULL
, "getl", "getlb", "getlf", NULL
,
2221 "gets", "getbs", "getfs", NULL
, NULL
, NULL
, NULL
, NULL
,
2222 /* 50 */ NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2223 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2224 /* 60 */ NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2225 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2226 /* 70 */ NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2227 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2228 /* 80 */ "sdcrt", "sdcrtst", NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2229 NULL
, "sdcrz", NULL
, NULL
, NULL
, "sdcrst", NULL
, "sdcrf",
2230 /* 90 */ NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2231 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2232 /* a0 */ "sndsig", "sndsigb", "sndsigf", NULL
, NULL
, NULL
, NULL
, NULL
,
2233 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2234 /* b0 */ "putlluc", NULL
, NULL
, NULL
, "putllc", NULL
, NULL
, NULL
,
2235 "putqlluc", NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2236 /* c0 */ "barrier", NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2237 "mfceieio", NULL
, NULL
, NULL
, "mfcsync", NULL
, NULL
, NULL
,
2238 /* d0 */ "getllar", NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2239 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2240 /* e0 */ NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2241 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2242 /* f0 */ NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2243 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2246 int *seq
= alloca (nr
* sizeof (int));
2248 struct cleanup
*chain
;
2252 /* Determine sequence in which to display (valid) entries. */
2253 for (i
= 0; i
< nr
; i
++)
2255 /* Search for the first valid entry all of whose
2256 dependencies are met. */
2257 for (j
= 0; j
< nr
; j
++)
2259 ULONGEST mfc_cq_dw3
;
2260 ULONGEST dependencies
;
2262 if (done
& (1 << (nr
- 1 - j
)))
2266 = extract_unsigned_integer (buf
+ 32*j
+ 24,8, byte_order
);
2267 if (!spu_mfc_get_bitfield (mfc_cq_dw3
, 16, 16))
2270 dependencies
= spu_mfc_get_bitfield (mfc_cq_dw3
, 0, nr
- 1);
2271 if ((dependencies
& done
) != dependencies
)
2275 done
|= 1 << (nr
- 1 - j
);
2286 chain
= make_cleanup_ui_out_table_begin_end (uiout
, 10, nr
, "dma_cmd");
2288 ui_out_table_header (uiout
, 7, ui_left
, "opcode", "Opcode");
2289 ui_out_table_header (uiout
, 3, ui_left
, "tag", "Tag");
2290 ui_out_table_header (uiout
, 3, ui_left
, "tid", "TId");
2291 ui_out_table_header (uiout
, 3, ui_left
, "rid", "RId");
2292 ui_out_table_header (uiout
, 18, ui_left
, "ea", "EA");
2293 ui_out_table_header (uiout
, 7, ui_left
, "lsa", "LSA");
2294 ui_out_table_header (uiout
, 7, ui_left
, "size", "Size");
2295 ui_out_table_header (uiout
, 7, ui_left
, "lstaddr", "LstAddr");
2296 ui_out_table_header (uiout
, 7, ui_left
, "lstsize", "LstSize");
2297 ui_out_table_header (uiout
, 1, ui_left
, "error_p", "E");
2299 ui_out_table_body (uiout
);
2301 for (i
= 0; i
< nr
; i
++)
2303 struct cleanup
*cmd_chain
;
2304 ULONGEST mfc_cq_dw0
;
2305 ULONGEST mfc_cq_dw1
;
2306 ULONGEST mfc_cq_dw2
;
2307 int mfc_cmd_opcode
, mfc_cmd_tag
, rclass_id
, tclass_id
;
2308 int lsa
, size
, list_lsa
, list_size
, mfc_lsa
, mfc_size
;
2310 int list_valid_p
, noop_valid_p
, qw_valid_p
, ea_valid_p
, cmd_error_p
;
2312 /* Decode contents of MFC Command Queue Context Save/Restore Registers.
2313 See "Cell Broadband Engine Registers V1.3", section 3.3.2.1. */
2316 = extract_unsigned_integer (buf
+ 32*seq
[i
], 8, byte_order
);
2318 = extract_unsigned_integer (buf
+ 32*seq
[i
] + 8, 8, byte_order
);
2320 = extract_unsigned_integer (buf
+ 32*seq
[i
] + 16, 8, byte_order
);
2322 list_lsa
= spu_mfc_get_bitfield (mfc_cq_dw0
, 0, 14);
2323 list_size
= spu_mfc_get_bitfield (mfc_cq_dw0
, 15, 26);
2324 mfc_cmd_opcode
= spu_mfc_get_bitfield (mfc_cq_dw0
, 27, 34);
2325 mfc_cmd_tag
= spu_mfc_get_bitfield (mfc_cq_dw0
, 35, 39);
2326 list_valid_p
= spu_mfc_get_bitfield (mfc_cq_dw0
, 40, 40);
2327 rclass_id
= spu_mfc_get_bitfield (mfc_cq_dw0
, 41, 43);
2328 tclass_id
= spu_mfc_get_bitfield (mfc_cq_dw0
, 44, 46);
2330 mfc_ea
= spu_mfc_get_bitfield (mfc_cq_dw1
, 0, 51) << 12
2331 | spu_mfc_get_bitfield (mfc_cq_dw2
, 25, 36);
2333 mfc_lsa
= spu_mfc_get_bitfield (mfc_cq_dw2
, 0, 13);
2334 mfc_size
= spu_mfc_get_bitfield (mfc_cq_dw2
, 14, 24);
2335 noop_valid_p
= spu_mfc_get_bitfield (mfc_cq_dw2
, 37, 37);
2336 qw_valid_p
= spu_mfc_get_bitfield (mfc_cq_dw2
, 38, 38);
2337 ea_valid_p
= spu_mfc_get_bitfield (mfc_cq_dw2
, 39, 39);
2338 cmd_error_p
= spu_mfc_get_bitfield (mfc_cq_dw2
, 40, 40);
2340 cmd_chain
= make_cleanup_ui_out_tuple_begin_end (uiout
, "cmd");
2342 if (spu_mfc_opcode
[mfc_cmd_opcode
])
2343 ui_out_field_string (uiout
, "opcode", spu_mfc_opcode
[mfc_cmd_opcode
]);
2345 ui_out_field_int (uiout
, "opcode", mfc_cmd_opcode
);
2347 ui_out_field_int (uiout
, "tag", mfc_cmd_tag
);
2348 ui_out_field_int (uiout
, "tid", tclass_id
);
2349 ui_out_field_int (uiout
, "rid", rclass_id
);
2352 ui_out_field_fmt (uiout
, "ea", "0x%s", phex (mfc_ea
, 8));
2354 ui_out_field_skip (uiout
, "ea");
2356 ui_out_field_fmt (uiout
, "lsa", "0x%05x", mfc_lsa
<< 4);
2358 ui_out_field_fmt (uiout
, "size", "0x%05x", mfc_size
<< 4);
2360 ui_out_field_fmt (uiout
, "size", "0x%05x", mfc_size
);
2364 ui_out_field_fmt (uiout
, "lstaddr", "0x%05x", list_lsa
<< 3);
2365 ui_out_field_fmt (uiout
, "lstsize", "0x%05x", list_size
<< 3);
2369 ui_out_field_skip (uiout
, "lstaddr");
2370 ui_out_field_skip (uiout
, "lstsize");
2374 ui_out_field_string (uiout
, "error_p", "*");
2376 ui_out_field_skip (uiout
, "error_p");
2378 do_cleanups (cmd_chain
);
2380 if (!ui_out_is_mi_like_p (uiout
))
2381 printf_filtered ("\n");
2384 do_cleanups (chain
);
2388 info_spu_dma_command (char *args
, int from_tty
)
2390 struct frame_info
*frame
= get_selected_frame (NULL
);
2391 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2392 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2393 ULONGEST dma_info_type
;
2394 ULONGEST dma_info_mask
;
2395 ULONGEST dma_info_status
;
2396 ULONGEST dma_info_stall_and_notify
;
2397 ULONGEST dma_info_atomic_command_status
;
2398 struct cleanup
*chain
;
2404 if (gdbarch_bfd_arch_info (get_frame_arch (frame
))->arch
!= bfd_arch_spu
)
2405 error (_("\"info spu\" is only supported on the SPU architecture."));
2407 id
= get_frame_register_unsigned (frame
, SPU_ID_REGNUM
);
2409 xsnprintf (annex
, sizeof annex
, "%d/dma_info", id
);
2410 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
2411 buf
, 0, 40 + 16 * 32);
2413 error (_("Could not read dma_info."));
2416 = extract_unsigned_integer (buf
, 8, byte_order
);
2418 = extract_unsigned_integer (buf
+ 8, 8, byte_order
);
2420 = extract_unsigned_integer (buf
+ 16, 8, byte_order
);
2421 dma_info_stall_and_notify
2422 = extract_unsigned_integer (buf
+ 24, 8, byte_order
);
2423 dma_info_atomic_command_status
2424 = extract_unsigned_integer (buf
+ 32, 8, byte_order
);
2426 chain
= make_cleanup_ui_out_tuple_begin_end (uiout
, "SPUInfoDMA");
2428 if (ui_out_is_mi_like_p (uiout
))
2430 ui_out_field_fmt (uiout
, "dma_info_type", "0x%s",
2431 phex_nz (dma_info_type
, 4));
2432 ui_out_field_fmt (uiout
, "dma_info_mask", "0x%s",
2433 phex_nz (dma_info_mask
, 4));
2434 ui_out_field_fmt (uiout
, "dma_info_status", "0x%s",
2435 phex_nz (dma_info_status
, 4));
2436 ui_out_field_fmt (uiout
, "dma_info_stall_and_notify", "0x%s",
2437 phex_nz (dma_info_stall_and_notify
, 4));
2438 ui_out_field_fmt (uiout
, "dma_info_atomic_command_status", "0x%s",
2439 phex_nz (dma_info_atomic_command_status
, 4));
2443 const char *query_msg
= _("no query pending");
2445 if (dma_info_type
& 4)
2446 switch (dma_info_type
& 3)
2448 case 1: query_msg
= _("'any' query pending"); break;
2449 case 2: query_msg
= _("'all' query pending"); break;
2450 default: query_msg
= _("undefined query type"); break;
2453 printf_filtered (_("Tag-Group Status 0x%s\n"),
2454 phex (dma_info_status
, 4));
2455 printf_filtered (_("Tag-Group Mask 0x%s (%s)\n"),
2456 phex (dma_info_mask
, 4), query_msg
);
2457 printf_filtered (_("Stall-and-Notify 0x%s\n"),
2458 phex (dma_info_stall_and_notify
, 4));
2459 printf_filtered (_("Atomic Cmd Status 0x%s\n"),
2460 phex (dma_info_atomic_command_status
, 4));
2461 printf_filtered ("\n");
2464 info_spu_dma_cmdlist (buf
+ 40, 16, byte_order
);
2465 do_cleanups (chain
);
2469 info_spu_proxydma_command (char *args
, int from_tty
)
2471 struct frame_info
*frame
= get_selected_frame (NULL
);
2472 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2473 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2474 ULONGEST dma_info_type
;
2475 ULONGEST dma_info_mask
;
2476 ULONGEST dma_info_status
;
2477 struct cleanup
*chain
;
2483 if (gdbarch_bfd_arch_info (gdbarch
)->arch
!= bfd_arch_spu
)
2484 error (_("\"info spu\" is only supported on the SPU architecture."));
2486 id
= get_frame_register_unsigned (frame
, SPU_ID_REGNUM
);
2488 xsnprintf (annex
, sizeof annex
, "%d/proxydma_info", id
);
2489 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
2490 buf
, 0, 24 + 8 * 32);
2492 error (_("Could not read proxydma_info."));
2494 dma_info_type
= extract_unsigned_integer (buf
, 8, byte_order
);
2495 dma_info_mask
= extract_unsigned_integer (buf
+ 8, 8, byte_order
);
2496 dma_info_status
= extract_unsigned_integer (buf
+ 16, 8, byte_order
);
2498 chain
= make_cleanup_ui_out_tuple_begin_end (uiout
, "SPUInfoProxyDMA");
2500 if (ui_out_is_mi_like_p (uiout
))
2502 ui_out_field_fmt (uiout
, "proxydma_info_type", "0x%s",
2503 phex_nz (dma_info_type
, 4));
2504 ui_out_field_fmt (uiout
, "proxydma_info_mask", "0x%s",
2505 phex_nz (dma_info_mask
, 4));
2506 ui_out_field_fmt (uiout
, "proxydma_info_status", "0x%s",
2507 phex_nz (dma_info_status
, 4));
2511 const char *query_msg
;
2513 switch (dma_info_type
& 3)
2515 case 0: query_msg
= _("no query pending"); break;
2516 case 1: query_msg
= _("'any' query pending"); break;
2517 case 2: query_msg
= _("'all' query pending"); break;
2518 default: query_msg
= _("undefined query type"); break;
2521 printf_filtered (_("Tag-Group Status 0x%s\n"),
2522 phex (dma_info_status
, 4));
2523 printf_filtered (_("Tag-Group Mask 0x%s (%s)\n"),
2524 phex (dma_info_mask
, 4), query_msg
);
2525 printf_filtered ("\n");
2528 info_spu_dma_cmdlist (buf
+ 24, 8, byte_order
);
2529 do_cleanups (chain
);
2533 info_spu_command (char *args
, int from_tty
)
2535 printf_unfiltered (_("\"info spu\" must be followed by the name of an SPU facility.\n"));
2536 help_list (infospucmdlist
, "info spu ", -1, gdb_stdout
);
2540 /* Root of all "set spu "/"show spu " commands. */
2543 show_spu_command (char *args
, int from_tty
)
2545 help_list (showspucmdlist
, "show spu ", all_commands
, gdb_stdout
);
2549 set_spu_command (char *args
, int from_tty
)
2551 help_list (setspucmdlist
, "set spu ", all_commands
, gdb_stdout
);
2555 show_spu_stop_on_load (struct ui_file
*file
, int from_tty
,
2556 struct cmd_list_element
*c
, const char *value
)
2558 fprintf_filtered (file
, _("Stopping for new SPE threads is %s.\n"),
2563 show_spu_auto_flush_cache (struct ui_file
*file
, int from_tty
,
2564 struct cmd_list_element
*c
, const char *value
)
2566 fprintf_filtered (file
, _("Automatic software-cache flush is %s.\n"),
2571 /* Set up gdbarch struct. */
2573 static struct gdbarch
*
2574 spu_gdbarch_init (struct gdbarch_info info
, struct gdbarch_list
*arches
)
2576 struct gdbarch
*gdbarch
;
2577 struct gdbarch_tdep
*tdep
;
2580 /* Which spufs ID was requested as address space? */
2582 id
= *(int *)info
.tdep_info
;
2583 /* For objfile architectures of SPU solibs, decode the ID from the name.
2584 This assumes the filename convention employed by solib-spu.c. */
2587 char *name
= strrchr (info
.abfd
->filename
, '@');
2589 sscanf (name
, "@0x%*x <%d>", &id
);
2592 /* Find a candidate among extant architectures. */
2593 for (arches
= gdbarch_list_lookup_by_info (arches
, &info
);
2595 arches
= gdbarch_list_lookup_by_info (arches
->next
, &info
))
2597 tdep
= gdbarch_tdep (arches
->gdbarch
);
2598 if (tdep
&& tdep
->id
== id
)
2599 return arches
->gdbarch
;
2602 /* None found, so create a new architecture. */
2603 tdep
= XCALLOC (1, struct gdbarch_tdep
);
2605 gdbarch
= gdbarch_alloc (&info
, tdep
);
2608 set_gdbarch_print_insn (gdbarch
, gdb_print_insn_spu
);
2611 set_gdbarch_num_regs (gdbarch
, SPU_NUM_REGS
);
2612 set_gdbarch_num_pseudo_regs (gdbarch
, SPU_NUM_PSEUDO_REGS
);
2613 set_gdbarch_sp_regnum (gdbarch
, SPU_SP_REGNUM
);
2614 set_gdbarch_pc_regnum (gdbarch
, SPU_PC_REGNUM
);
2615 set_gdbarch_read_pc (gdbarch
, spu_read_pc
);
2616 set_gdbarch_write_pc (gdbarch
, spu_write_pc
);
2617 set_gdbarch_register_name (gdbarch
, spu_register_name
);
2618 set_gdbarch_register_type (gdbarch
, spu_register_type
);
2619 set_gdbarch_pseudo_register_read (gdbarch
, spu_pseudo_register_read
);
2620 set_gdbarch_pseudo_register_write (gdbarch
, spu_pseudo_register_write
);
2621 set_gdbarch_value_from_register (gdbarch
, spu_value_from_register
);
2622 set_gdbarch_register_reggroup_p (gdbarch
, spu_register_reggroup_p
);
2625 set_gdbarch_char_signed (gdbarch
, 0);
2626 set_gdbarch_ptr_bit (gdbarch
, 32);
2627 set_gdbarch_addr_bit (gdbarch
, 32);
2628 set_gdbarch_short_bit (gdbarch
, 16);
2629 set_gdbarch_int_bit (gdbarch
, 32);
2630 set_gdbarch_long_bit (gdbarch
, 32);
2631 set_gdbarch_long_long_bit (gdbarch
, 64);
2632 set_gdbarch_float_bit (gdbarch
, 32);
2633 set_gdbarch_double_bit (gdbarch
, 64);
2634 set_gdbarch_long_double_bit (gdbarch
, 64);
2635 set_gdbarch_float_format (gdbarch
, floatformats_ieee_single
);
2636 set_gdbarch_double_format (gdbarch
, floatformats_ieee_double
);
2637 set_gdbarch_long_double_format (gdbarch
, floatformats_ieee_double
);
2639 /* Address handling. */
2640 set_gdbarch_address_to_pointer (gdbarch
, spu_address_to_pointer
);
2641 set_gdbarch_pointer_to_address (gdbarch
, spu_pointer_to_address
);
2642 set_gdbarch_integer_to_address (gdbarch
, spu_integer_to_address
);
2643 set_gdbarch_address_class_type_flags (gdbarch
, spu_address_class_type_flags
);
2644 set_gdbarch_address_class_type_flags_to_name
2645 (gdbarch
, spu_address_class_type_flags_to_name
);
2646 set_gdbarch_address_class_name_to_type_flags
2647 (gdbarch
, spu_address_class_name_to_type_flags
);
2650 /* Inferior function calls. */
2651 set_gdbarch_call_dummy_location (gdbarch
, ON_STACK
);
2652 set_gdbarch_frame_align (gdbarch
, spu_frame_align
);
2653 set_gdbarch_frame_red_zone_size (gdbarch
, 2000);
2654 set_gdbarch_push_dummy_code (gdbarch
, spu_push_dummy_code
);
2655 set_gdbarch_push_dummy_call (gdbarch
, spu_push_dummy_call
);
2656 set_gdbarch_dummy_id (gdbarch
, spu_dummy_id
);
2657 set_gdbarch_return_value (gdbarch
, spu_return_value
);
2659 /* Frame handling. */
2660 set_gdbarch_inner_than (gdbarch
, core_addr_lessthan
);
2661 frame_unwind_append_unwinder (gdbarch
, &spu_frame_unwind
);
2662 frame_base_set_default (gdbarch
, &spu_frame_base
);
2663 set_gdbarch_unwind_pc (gdbarch
, spu_unwind_pc
);
2664 set_gdbarch_unwind_sp (gdbarch
, spu_unwind_sp
);
2665 set_gdbarch_virtual_frame_pointer (gdbarch
, spu_virtual_frame_pointer
);
2666 set_gdbarch_frame_args_skip (gdbarch
, 0);
2667 set_gdbarch_skip_prologue (gdbarch
, spu_skip_prologue
);
2668 set_gdbarch_in_function_epilogue_p (gdbarch
, spu_in_function_epilogue_p
);
2670 /* Cell/B.E. cross-architecture unwinder support. */
2671 frame_unwind_prepend_unwinder (gdbarch
, &spu2ppu_unwind
);
2674 set_gdbarch_decr_pc_after_break (gdbarch
, 4);
2675 set_gdbarch_breakpoint_from_pc (gdbarch
, spu_breakpoint_from_pc
);
2676 set_gdbarch_memory_remove_breakpoint (gdbarch
, spu_memory_remove_breakpoint
);
2677 set_gdbarch_cannot_step_breakpoint (gdbarch
, 1);
2678 set_gdbarch_software_single_step (gdbarch
, spu_software_single_step
);
2679 set_gdbarch_get_longjmp_target (gdbarch
, spu_get_longjmp_target
);
2682 set_gdbarch_overlay_update (gdbarch
, spu_overlay_update
);
2687 /* Provide a prototype to silence -Wmissing-prototypes. */
2688 extern initialize_file_ftype _initialize_spu_tdep
;
2691 _initialize_spu_tdep (void)
2693 register_gdbarch_init (bfd_arch_spu
, spu_gdbarch_init
);
2695 /* Add ourselves to objfile event chain. */
2696 observer_attach_new_objfile (spu_overlay_new_objfile
);
2697 spu_overlay_data
= register_objfile_data ();
2699 /* Install spu stop-on-load handler. */
2700 observer_attach_new_objfile (spu_catch_start
);
2702 /* Add ourselves to normal_stop event chain. */
2703 observer_attach_normal_stop (spu_attach_normal_stop
);
2705 /* Add root prefix command for all "set spu"/"show spu" commands. */
2706 add_prefix_cmd ("spu", no_class
, set_spu_command
,
2707 _("Various SPU specific commands."),
2708 &setspucmdlist
, "set spu ", 0, &setlist
);
2709 add_prefix_cmd ("spu", no_class
, show_spu_command
,
2710 _("Various SPU specific commands."),
2711 &showspucmdlist
, "show spu ", 0, &showlist
);
2713 /* Toggle whether or not to add a temporary breakpoint at the "main"
2714 function of new SPE contexts. */
2715 add_setshow_boolean_cmd ("stop-on-load", class_support
,
2716 &spu_stop_on_load_p
, _("\
2717 Set whether to stop for new SPE threads."),
2719 Show whether to stop for new SPE threads."),
2721 Use \"on\" to give control to the user when a new SPE thread\n\
2722 enters its \"main\" function.\n\
2723 Use \"off\" to disable stopping for new SPE threads."),
2725 show_spu_stop_on_load
,
2726 &setspucmdlist
, &showspucmdlist
);
2728 /* Toggle whether or not to automatically flush the software-managed
2729 cache whenever SPE execution stops. */
2730 add_setshow_boolean_cmd ("auto-flush-cache", class_support
,
2731 &spu_auto_flush_cache_p
, _("\
2732 Set whether to automatically flush the software-managed cache."),
2734 Show whether to automatically flush the software-managed cache."),
2736 Use \"on\" to automatically flush the software-managed cache\n\
2737 whenever SPE execution stops.\n\
2738 Use \"off\" to never automatically flush the software-managed cache."),
2740 show_spu_auto_flush_cache
,
2741 &setspucmdlist
, &showspucmdlist
);
2743 /* Add root prefix command for all "info spu" commands. */
2744 add_prefix_cmd ("spu", class_info
, info_spu_command
,
2745 _("Various SPU specific commands."),
2746 &infospucmdlist
, "info spu ", 0, &infolist
);
2748 /* Add various "info spu" commands. */
2749 add_cmd ("event", class_info
, info_spu_event_command
,
2750 _("Display SPU event facility status.\n"),
2752 add_cmd ("signal", class_info
, info_spu_signal_command
,
2753 _("Display SPU signal notification facility status.\n"),
2755 add_cmd ("mailbox", class_info
, info_spu_mailbox_command
,
2756 _("Display SPU mailbox facility status.\n"),
2758 add_cmd ("dma", class_info
, info_spu_dma_command
,
2759 _("Display MFC DMA status.\n"),
2761 add_cmd ("proxydma", class_info
, info_spu_proxydma_command
,
2762 _("Display MFC Proxy-DMA status.\n"),