1 /* SPU target-dependent code for GDB, the GNU debugger.
2 Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
5 Contributed by Ulrich Weigand <uweigand@de.ibm.com>.
6 Based on a port by Sid Manning <sid@us.ibm.com>.
8 This file is part of GDB.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "arch-utils.h"
28 #include "gdb_string.h"
29 #include "gdb_assert.h"
31 #include "frame-unwind.h"
32 #include "frame-base.h"
33 #include "trad-frame.h"
42 #include "reggroups.h"
43 #include "floatformat.h"
51 /* The list of available "set spu " and "show spu " commands. */
52 static struct cmd_list_element
*setspucmdlist
= NULL
;
53 static struct cmd_list_element
*showspucmdlist
= NULL
;
55 /* Whether to stop for new SPE contexts. */
56 static int spu_stop_on_load_p
= 0;
57 /* Whether to automatically flush the SW-managed cache. */
58 static int spu_auto_flush_cache_p
= 1;
61 /* The tdep structure. */
64 /* The spufs ID identifying our address space. */
67 /* SPU-specific vector type. */
68 struct type
*spu_builtin_type_vec128
;
72 /* SPU-specific vector type. */
74 spu_builtin_type_vec128 (struct gdbarch
*gdbarch
)
76 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
78 if (!tdep
->spu_builtin_type_vec128
)
80 const struct builtin_type
*bt
= builtin_type (gdbarch
);
83 t
= arch_composite_type (gdbarch
,
84 "__spu_builtin_type_vec128", TYPE_CODE_UNION
);
85 append_composite_type_field (t
, "uint128", bt
->builtin_int128
);
86 append_composite_type_field (t
, "v2_int64",
87 init_vector_type (bt
->builtin_int64
, 2));
88 append_composite_type_field (t
, "v4_int32",
89 init_vector_type (bt
->builtin_int32
, 4));
90 append_composite_type_field (t
, "v8_int16",
91 init_vector_type (bt
->builtin_int16
, 8));
92 append_composite_type_field (t
, "v16_int8",
93 init_vector_type (bt
->builtin_int8
, 16));
94 append_composite_type_field (t
, "v2_double",
95 init_vector_type (bt
->builtin_double
, 2));
96 append_composite_type_field (t
, "v4_float",
97 init_vector_type (bt
->builtin_float
, 4));
100 TYPE_NAME (t
) = "spu_builtin_type_vec128";
102 tdep
->spu_builtin_type_vec128
= t
;
105 return tdep
->spu_builtin_type_vec128
;
109 /* The list of available "info spu " commands. */
110 static struct cmd_list_element
*infospucmdlist
= NULL
;
115 spu_register_name (struct gdbarch
*gdbarch
, int reg_nr
)
117 static char *register_names
[] =
119 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
120 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
121 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
122 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
123 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
124 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
125 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
126 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
127 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
128 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
129 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
130 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
131 "r96", "r97", "r98", "r99", "r100", "r101", "r102", "r103",
132 "r104", "r105", "r106", "r107", "r108", "r109", "r110", "r111",
133 "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
134 "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127",
135 "id", "pc", "sp", "fpscr", "srr0", "lslr", "decr", "decr_status"
140 if (reg_nr
>= sizeof register_names
/ sizeof *register_names
)
143 return register_names
[reg_nr
];
147 spu_register_type (struct gdbarch
*gdbarch
, int reg_nr
)
149 if (reg_nr
< SPU_NUM_GPRS
)
150 return spu_builtin_type_vec128 (gdbarch
);
155 return builtin_type (gdbarch
)->builtin_uint32
;
158 return builtin_type (gdbarch
)->builtin_func_ptr
;
161 return builtin_type (gdbarch
)->builtin_data_ptr
;
163 case SPU_FPSCR_REGNUM
:
164 return builtin_type (gdbarch
)->builtin_uint128
;
166 case SPU_SRR0_REGNUM
:
167 return builtin_type (gdbarch
)->builtin_uint32
;
169 case SPU_LSLR_REGNUM
:
170 return builtin_type (gdbarch
)->builtin_uint32
;
172 case SPU_DECR_REGNUM
:
173 return builtin_type (gdbarch
)->builtin_uint32
;
175 case SPU_DECR_STATUS_REGNUM
:
176 return builtin_type (gdbarch
)->builtin_uint32
;
179 internal_error (__FILE__
, __LINE__
, _("invalid regnum"));
183 /* Pseudo registers for preferred slots - stack pointer. */
186 spu_pseudo_register_read_spu (struct regcache
*regcache
, const char *regname
,
189 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
190 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
195 regcache_raw_read_unsigned (regcache
, SPU_ID_REGNUM
, &id
);
196 xsnprintf (annex
, sizeof annex
, "%d/%s", (int) id
, regname
);
197 memset (reg
, 0, sizeof reg
);
198 target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
201 store_unsigned_integer (buf
, 4, byte_order
, strtoulst (reg
, NULL
, 16));
205 spu_pseudo_register_read (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
206 int regnum
, gdb_byte
*buf
)
215 regcache_raw_read (regcache
, SPU_RAW_SP_REGNUM
, reg
);
216 memcpy (buf
, reg
, 4);
219 case SPU_FPSCR_REGNUM
:
220 regcache_raw_read_unsigned (regcache
, SPU_ID_REGNUM
, &id
);
221 xsnprintf (annex
, sizeof annex
, "%d/fpcr", (int) id
);
222 target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
, buf
, 0, 16);
225 case SPU_SRR0_REGNUM
:
226 spu_pseudo_register_read_spu (regcache
, "srr0", buf
);
229 case SPU_LSLR_REGNUM
:
230 spu_pseudo_register_read_spu (regcache
, "lslr", buf
);
233 case SPU_DECR_REGNUM
:
234 spu_pseudo_register_read_spu (regcache
, "decr", buf
);
237 case SPU_DECR_STATUS_REGNUM
:
238 spu_pseudo_register_read_spu (regcache
, "decr_status", buf
);
242 internal_error (__FILE__
, __LINE__
, _("invalid regnum"));
247 spu_pseudo_register_write_spu (struct regcache
*regcache
, const char *regname
,
250 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
251 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
256 regcache_raw_read_unsigned (regcache
, SPU_ID_REGNUM
, &id
);
257 xsnprintf (annex
, sizeof annex
, "%d/%s", (int) id
, regname
);
258 xsnprintf (reg
, sizeof reg
, "0x%s",
259 phex_nz (extract_unsigned_integer (buf
, 4, byte_order
), 4));
260 target_write (¤t_target
, TARGET_OBJECT_SPU
, annex
,
261 reg
, 0, strlen (reg
));
265 spu_pseudo_register_write (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
266 int regnum
, const gdb_byte
*buf
)
275 regcache_raw_read (regcache
, SPU_RAW_SP_REGNUM
, reg
);
276 memcpy (reg
, buf
, 4);
277 regcache_raw_write (regcache
, SPU_RAW_SP_REGNUM
, reg
);
280 case SPU_FPSCR_REGNUM
:
281 regcache_raw_read_unsigned (regcache
, SPU_ID_REGNUM
, &id
);
282 xsnprintf (annex
, sizeof annex
, "%d/fpcr", (int) id
);
283 target_write (¤t_target
, TARGET_OBJECT_SPU
, annex
, buf
, 0, 16);
286 case SPU_SRR0_REGNUM
:
287 spu_pseudo_register_write_spu (regcache
, "srr0", buf
);
290 case SPU_LSLR_REGNUM
:
291 spu_pseudo_register_write_spu (regcache
, "lslr", buf
);
294 case SPU_DECR_REGNUM
:
295 spu_pseudo_register_write_spu (regcache
, "decr", buf
);
298 case SPU_DECR_STATUS_REGNUM
:
299 spu_pseudo_register_write_spu (regcache
, "decr_status", buf
);
303 internal_error (__FILE__
, __LINE__
, _("invalid regnum"));
307 /* Value conversion -- access scalar values at the preferred slot. */
309 static struct value
*
310 spu_value_from_register (struct type
*type
, int regnum
,
311 struct frame_info
*frame
)
313 struct value
*value
= default_value_from_register (type
, regnum
, frame
);
314 int len
= TYPE_LENGTH (type
);
316 if (regnum
< SPU_NUM_GPRS
&& len
< 16)
318 int preferred_slot
= len
< 4 ? 4 - len
: 0;
319 set_value_offset (value
, preferred_slot
);
325 /* Register groups. */
328 spu_register_reggroup_p (struct gdbarch
*gdbarch
, int regnum
,
329 struct reggroup
*group
)
331 /* Registers displayed via 'info regs'. */
332 if (group
== general_reggroup
)
335 /* Registers displayed via 'info float'. */
336 if (group
== float_reggroup
)
339 /* Registers that need to be saved/restored in order to
340 push or pop frames. */
341 if (group
== save_reggroup
|| group
== restore_reggroup
)
344 return default_register_reggroup_p (gdbarch
, regnum
, group
);
348 /* Address handling. */
351 spu_gdbarch_id (struct gdbarch
*gdbarch
)
353 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
356 /* The objfile architecture of a standalone SPU executable does not
357 provide an SPU ID. Retrieve it from the the objfile's relocated
358 address range in this special case. */
360 && symfile_objfile
&& symfile_objfile
->obfd
361 && bfd_get_arch (symfile_objfile
->obfd
) == bfd_arch_spu
362 && symfile_objfile
->sections
!= symfile_objfile
->sections_end
)
363 id
= SPUADDR_SPU (obj_section_addr (symfile_objfile
->sections
));
369 spu_address_class_type_flags (int byte_size
, int dwarf2_addr_class
)
371 if (dwarf2_addr_class
== 1)
372 return TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1
;
378 spu_address_class_type_flags_to_name (struct gdbarch
*gdbarch
, int type_flags
)
380 if (type_flags
& TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1
)
387 spu_address_class_name_to_type_flags (struct gdbarch
*gdbarch
,
388 const char *name
, int *type_flags_ptr
)
390 if (strcmp (name
, "__ea") == 0)
392 *type_flags_ptr
= TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1
;
400 spu_address_to_pointer (struct gdbarch
*gdbarch
,
401 struct type
*type
, gdb_byte
*buf
, CORE_ADDR addr
)
403 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
404 store_unsigned_integer (buf
, TYPE_LENGTH (type
), byte_order
,
405 SPUADDR_ADDR (addr
));
409 spu_pointer_to_address (struct gdbarch
*gdbarch
,
410 struct type
*type
, const gdb_byte
*buf
)
412 int id
= spu_gdbarch_id (gdbarch
);
413 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
415 = extract_unsigned_integer (buf
, TYPE_LENGTH (type
), byte_order
);
417 /* Do not convert __ea pointers. */
418 if (TYPE_ADDRESS_CLASS_1 (type
))
421 return addr
? SPUADDR (id
, addr
) : 0;
425 spu_integer_to_address (struct gdbarch
*gdbarch
,
426 struct type
*type
, const gdb_byte
*buf
)
428 int id
= spu_gdbarch_id (gdbarch
);
429 ULONGEST addr
= unpack_long (type
, buf
);
431 return SPUADDR (id
, addr
);
435 /* Decoding SPU instructions. */
472 is_rr (unsigned int insn
, int op
, int *rt
, int *ra
, int *rb
)
474 if ((insn
>> 21) == op
)
477 *ra
= (insn
>> 7) & 127;
478 *rb
= (insn
>> 14) & 127;
486 is_rrr (unsigned int insn
, int op
, int *rt
, int *ra
, int *rb
, int *rc
)
488 if ((insn
>> 28) == op
)
490 *rt
= (insn
>> 21) & 127;
491 *ra
= (insn
>> 7) & 127;
492 *rb
= (insn
>> 14) & 127;
501 is_ri7 (unsigned int insn
, int op
, int *rt
, int *ra
, int *i7
)
503 if ((insn
>> 21) == op
)
506 *ra
= (insn
>> 7) & 127;
507 *i7
= (((insn
>> 14) & 127) ^ 0x40) - 0x40;
515 is_ri10 (unsigned int insn
, int op
, int *rt
, int *ra
, int *i10
)
517 if ((insn
>> 24) == op
)
520 *ra
= (insn
>> 7) & 127;
521 *i10
= (((insn
>> 14) & 0x3ff) ^ 0x200) - 0x200;
529 is_ri16 (unsigned int insn
, int op
, int *rt
, int *i16
)
531 if ((insn
>> 23) == op
)
534 *i16
= (((insn
>> 7) & 0xffff) ^ 0x8000) - 0x8000;
542 is_ri18 (unsigned int insn
, int op
, int *rt
, int *i18
)
544 if ((insn
>> 25) == op
)
547 *i18
= (((insn
>> 7) & 0x3ffff) ^ 0x20000) - 0x20000;
555 is_branch (unsigned int insn
, int *offset
, int *reg
)
559 if (is_ri16 (insn
, op_br
, &rt
, &i16
)
560 || is_ri16 (insn
, op_brsl
, &rt
, &i16
)
561 || is_ri16 (insn
, op_brnz
, &rt
, &i16
)
562 || is_ri16 (insn
, op_brz
, &rt
, &i16
)
563 || is_ri16 (insn
, op_brhnz
, &rt
, &i16
)
564 || is_ri16 (insn
, op_brhz
, &rt
, &i16
))
566 *reg
= SPU_PC_REGNUM
;
571 if (is_ri16 (insn
, op_bra
, &rt
, &i16
)
572 || is_ri16 (insn
, op_brasl
, &rt
, &i16
))
579 if (is_ri7 (insn
, op_bi
, &rt
, reg
, &i7
)
580 || is_ri7 (insn
, op_bisl
, &rt
, reg
, &i7
)
581 || is_ri7 (insn
, op_biz
, &rt
, reg
, &i7
)
582 || is_ri7 (insn
, op_binz
, &rt
, reg
, &i7
)
583 || is_ri7 (insn
, op_bihz
, &rt
, reg
, &i7
)
584 || is_ri7 (insn
, op_bihnz
, &rt
, reg
, &i7
))
594 /* Prolog parsing. */
596 struct spu_prologue_data
598 /* Stack frame size. -1 if analysis was unsuccessful. */
601 /* How to find the CFA. The CFA is equal to SP at function entry. */
605 /* Offset relative to CFA where a register is saved. -1 if invalid. */
606 int reg_offset
[SPU_NUM_GPRS
];
610 spu_analyze_prologue (struct gdbarch
*gdbarch
,
611 CORE_ADDR start_pc
, CORE_ADDR end_pc
,
612 struct spu_prologue_data
*data
)
614 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
619 int reg_immed
[SPU_NUM_GPRS
];
621 CORE_ADDR prolog_pc
= start_pc
;
626 /* Initialize DATA to default values. */
629 data
->cfa_reg
= SPU_RAW_SP_REGNUM
;
630 data
->cfa_offset
= 0;
632 for (i
= 0; i
< SPU_NUM_GPRS
; i
++)
633 data
->reg_offset
[i
] = -1;
635 /* Set up REG_IMMED array. This is non-zero for a register if we know its
636 preferred slot currently holds this immediate value. */
637 for (i
= 0; i
< SPU_NUM_GPRS
; i
++)
640 /* Scan instructions until the first branch.
642 The following instructions are important prolog components:
644 - The first instruction to set up the stack pointer.
645 - The first instruction to set up the frame pointer.
646 - The first instruction to save the link register.
647 - The first instruction to save the backchain.
649 We return the instruction after the latest of these four,
650 or the incoming PC if none is found. The first instruction
651 to set up the stack pointer also defines the frame size.
653 Note that instructions saving incoming arguments to their stack
654 slots are not counted as important, because they are hard to
655 identify with certainty. This should not matter much, because
656 arguments are relevant only in code compiled with debug data,
657 and in such code the GDB core will advance until the first source
658 line anyway, using SAL data.
660 For purposes of stack unwinding, we analyze the following types
661 of instructions in addition:
663 - Any instruction adding to the current frame pointer.
664 - Any instruction loading an immediate constant into a register.
665 - Any instruction storing a register onto the stack.
667 These are used to compute the CFA and REG_OFFSET output. */
669 for (pc
= start_pc
; pc
< end_pc
; pc
+= 4)
672 int rt
, ra
, rb
, rc
, immed
;
674 if (target_read_memory (pc
, buf
, 4))
676 insn
= extract_unsigned_integer (buf
, 4, byte_order
);
678 /* AI is the typical instruction to set up a stack frame.
679 It is also used to initialize the frame pointer. */
680 if (is_ri10 (insn
, op_ai
, &rt
, &ra
, &immed
))
682 if (rt
== data
->cfa_reg
&& ra
== data
->cfa_reg
)
683 data
->cfa_offset
-= immed
;
685 if (rt
== SPU_RAW_SP_REGNUM
&& ra
== SPU_RAW_SP_REGNUM
693 else if (rt
== SPU_FP_REGNUM
&& ra
== SPU_RAW_SP_REGNUM
699 data
->cfa_reg
= SPU_FP_REGNUM
;
700 data
->cfa_offset
-= immed
;
704 /* A is used to set up stack frames of size >= 512 bytes.
705 If we have tracked the contents of the addend register,
706 we can handle this as well. */
707 else if (is_rr (insn
, op_a
, &rt
, &ra
, &rb
))
709 if (rt
== data
->cfa_reg
&& ra
== data
->cfa_reg
)
711 if (reg_immed
[rb
] != 0)
712 data
->cfa_offset
-= reg_immed
[rb
];
714 data
->cfa_reg
= -1; /* We don't know the CFA any more. */
717 if (rt
== SPU_RAW_SP_REGNUM
&& ra
== SPU_RAW_SP_REGNUM
723 if (reg_immed
[rb
] != 0)
724 data
->size
= -reg_immed
[rb
];
728 /* We need to track IL and ILA used to load immediate constants
729 in case they are later used as input to an A instruction. */
730 else if (is_ri16 (insn
, op_il
, &rt
, &immed
))
732 reg_immed
[rt
] = immed
;
734 if (rt
== SPU_RAW_SP_REGNUM
&& !found_sp
)
738 else if (is_ri18 (insn
, op_ila
, &rt
, &immed
))
740 reg_immed
[rt
] = immed
& 0x3ffff;
742 if (rt
== SPU_RAW_SP_REGNUM
&& !found_sp
)
746 /* STQD is used to save registers to the stack. */
747 else if (is_ri10 (insn
, op_stqd
, &rt
, &ra
, &immed
))
749 if (ra
== data
->cfa_reg
)
750 data
->reg_offset
[rt
] = data
->cfa_offset
- (immed
<< 4);
752 if (ra
== data
->cfa_reg
&& rt
== SPU_LR_REGNUM
759 if (ra
== SPU_RAW_SP_REGNUM
760 && (found_sp
? immed
== 0 : rt
== SPU_RAW_SP_REGNUM
)
768 /* _start uses SELB to set up the stack pointer. */
769 else if (is_rrr (insn
, op_selb
, &rt
, &ra
, &rb
, &rc
))
771 if (rt
== SPU_RAW_SP_REGNUM
&& !found_sp
)
775 /* We terminate if we find a branch. */
776 else if (is_branch (insn
, &immed
, &ra
))
781 /* If we successfully parsed until here, and didn't find any instruction
782 modifying SP, we assume we have a frameless function. */
786 /* Return cooked instead of raw SP. */
787 if (data
->cfa_reg
== SPU_RAW_SP_REGNUM
)
788 data
->cfa_reg
= SPU_SP_REGNUM
;
793 /* Return the first instruction after the prologue starting at PC. */
795 spu_skip_prologue (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
797 struct spu_prologue_data data
;
798 return spu_analyze_prologue (gdbarch
, pc
, (CORE_ADDR
)-1, &data
);
801 /* Return the frame pointer in use at address PC. */
803 spu_virtual_frame_pointer (struct gdbarch
*gdbarch
, CORE_ADDR pc
,
804 int *reg
, LONGEST
*offset
)
806 struct spu_prologue_data data
;
807 spu_analyze_prologue (gdbarch
, pc
, (CORE_ADDR
)-1, &data
);
809 if (data
.size
!= -1 && data
.cfa_reg
!= -1)
811 /* The 'frame pointer' address is CFA minus frame size. */
813 *offset
= data
.cfa_offset
- data
.size
;
817 /* ??? We don't really know ... */
818 *reg
= SPU_SP_REGNUM
;
823 /* Return true if we are in the function's epilogue, i.e. after the
824 instruction that destroyed the function's stack frame.
826 1) scan forward from the point of execution:
827 a) If you find an instruction that modifies the stack pointer
828 or transfers control (except a return), execution is not in
830 b) Stop scanning if you find a return instruction or reach the
831 end of the function or reach the hard limit for the size of
833 2) scan backward from the point of execution:
834 a) If you find an instruction that modifies the stack pointer,
835 execution *is* in an epilogue, return.
836 b) Stop scanning if you reach an instruction that transfers
837 control or the beginning of the function or reach the hard
838 limit for the size of an epilogue. */
841 spu_in_function_epilogue_p (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
843 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
844 CORE_ADDR scan_pc
, func_start
, func_end
, epilogue_start
, epilogue_end
;
847 int rt
, ra
, rb
, rc
, immed
;
849 /* Find the search limits based on function boundaries and hard limit.
850 We assume the epilogue can be up to 64 instructions long. */
852 const int spu_max_epilogue_size
= 64 * 4;
854 if (!find_pc_partial_function (pc
, NULL
, &func_start
, &func_end
))
857 if (pc
- func_start
< spu_max_epilogue_size
)
858 epilogue_start
= func_start
;
860 epilogue_start
= pc
- spu_max_epilogue_size
;
862 if (func_end
- pc
< spu_max_epilogue_size
)
863 epilogue_end
= func_end
;
865 epilogue_end
= pc
+ spu_max_epilogue_size
;
867 /* Scan forward until next 'bi $0'. */
869 for (scan_pc
= pc
; scan_pc
< epilogue_end
; scan_pc
+= 4)
871 if (target_read_memory (scan_pc
, buf
, 4))
873 insn
= extract_unsigned_integer (buf
, 4, byte_order
);
875 if (is_branch (insn
, &immed
, &ra
))
877 if (immed
== 0 && ra
== SPU_LR_REGNUM
)
883 if (is_ri10 (insn
, op_ai
, &rt
, &ra
, &immed
)
884 || is_rr (insn
, op_a
, &rt
, &ra
, &rb
)
885 || is_ri10 (insn
, op_lqd
, &rt
, &ra
, &immed
))
887 if (rt
== SPU_RAW_SP_REGNUM
)
892 if (scan_pc
>= epilogue_end
)
895 /* Scan backward until adjustment to stack pointer (R1). */
897 for (scan_pc
= pc
- 4; scan_pc
>= epilogue_start
; scan_pc
-= 4)
899 if (target_read_memory (scan_pc
, buf
, 4))
901 insn
= extract_unsigned_integer (buf
, 4, byte_order
);
903 if (is_branch (insn
, &immed
, &ra
))
906 if (is_ri10 (insn
, op_ai
, &rt
, &ra
, &immed
)
907 || is_rr (insn
, op_a
, &rt
, &ra
, &rb
)
908 || is_ri10 (insn
, op_lqd
, &rt
, &ra
, &immed
))
910 if (rt
== SPU_RAW_SP_REGNUM
)
919 /* Normal stack frames. */
921 struct spu_unwind_cache
924 CORE_ADDR frame_base
;
925 CORE_ADDR local_base
;
927 struct trad_frame_saved_reg
*saved_regs
;
930 static struct spu_unwind_cache
*
931 spu_frame_unwind_cache (struct frame_info
*this_frame
,
932 void **this_prologue_cache
)
934 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
935 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
936 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
937 struct spu_unwind_cache
*info
;
938 struct spu_prologue_data data
;
939 CORE_ADDR id
= tdep
->id
;
942 if (*this_prologue_cache
)
943 return *this_prologue_cache
;
945 info
= FRAME_OBSTACK_ZALLOC (struct spu_unwind_cache
);
946 *this_prologue_cache
= info
;
947 info
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
948 info
->frame_base
= 0;
949 info
->local_base
= 0;
951 /* Find the start of the current function, and analyze its prologue. */
952 info
->func
= get_frame_func (this_frame
);
955 /* Fall back to using the current PC as frame ID. */
956 info
->func
= get_frame_pc (this_frame
);
960 spu_analyze_prologue (gdbarch
, info
->func
, get_frame_pc (this_frame
),
963 /* If successful, use prologue analysis data. */
964 if (data
.size
!= -1 && data
.cfa_reg
!= -1)
969 /* Determine CFA via unwound CFA_REG plus CFA_OFFSET. */
970 get_frame_register (this_frame
, data
.cfa_reg
, buf
);
971 cfa
= extract_unsigned_integer (buf
, 4, byte_order
) + data
.cfa_offset
;
972 cfa
= SPUADDR (id
, cfa
);
974 /* Call-saved register slots. */
975 for (i
= 0; i
< SPU_NUM_GPRS
; i
++)
976 if (i
== SPU_LR_REGNUM
977 || (i
>= SPU_SAVED1_REGNUM
&& i
<= SPU_SAVEDN_REGNUM
))
978 if (data
.reg_offset
[i
] != -1)
979 info
->saved_regs
[i
].addr
= cfa
- data
.reg_offset
[i
];
982 info
->frame_base
= cfa
;
983 info
->local_base
= cfa
- data
.size
;
986 /* Otherwise, fall back to reading the backchain link. */
994 /* Get local store limit. */
995 lslr
= get_frame_register_unsigned (this_frame
, SPU_LSLR_REGNUM
);
997 lslr
= (ULONGEST
) -1;
999 /* Get the backchain. */
1000 reg
= get_frame_register_unsigned (this_frame
, SPU_SP_REGNUM
);
1001 status
= safe_read_memory_integer (SPUADDR (id
, reg
), 4, byte_order
,
1004 /* A zero backchain terminates the frame chain. Also, sanity
1005 check against the local store size limit. */
1006 if (status
&& backchain
> 0 && backchain
<= lslr
)
1008 /* Assume the link register is saved into its slot. */
1009 if (backchain
+ 16 <= lslr
)
1010 info
->saved_regs
[SPU_LR_REGNUM
].addr
= SPUADDR (id
,
1014 info
->frame_base
= SPUADDR (id
, backchain
);
1015 info
->local_base
= SPUADDR (id
, reg
);
1019 /* If we didn't find a frame, we cannot determine SP / return address. */
1020 if (info
->frame_base
== 0)
1023 /* The previous SP is equal to the CFA. */
1024 trad_frame_set_value (info
->saved_regs
, SPU_SP_REGNUM
,
1025 SPUADDR_ADDR (info
->frame_base
));
1027 /* Read full contents of the unwound link register in order to
1028 be able to determine the return address. */
1029 if (trad_frame_addr_p (info
->saved_regs
, SPU_LR_REGNUM
))
1030 target_read_memory (info
->saved_regs
[SPU_LR_REGNUM
].addr
, buf
, 16);
1032 get_frame_register (this_frame
, SPU_LR_REGNUM
, buf
);
1034 /* Normally, the return address is contained in the slot 0 of the
1035 link register, and slots 1-3 are zero. For an overlay return,
1036 slot 0 contains the address of the overlay manager return stub,
1037 slot 1 contains the partition number of the overlay section to
1038 be returned to, and slot 2 contains the return address within
1039 that section. Return the latter address in that case. */
1040 if (extract_unsigned_integer (buf
+ 8, 4, byte_order
) != 0)
1041 trad_frame_set_value (info
->saved_regs
, SPU_PC_REGNUM
,
1042 extract_unsigned_integer (buf
+ 8, 4, byte_order
));
1044 trad_frame_set_value (info
->saved_regs
, SPU_PC_REGNUM
,
1045 extract_unsigned_integer (buf
, 4, byte_order
));
1051 spu_frame_this_id (struct frame_info
*this_frame
,
1052 void **this_prologue_cache
, struct frame_id
*this_id
)
1054 struct spu_unwind_cache
*info
=
1055 spu_frame_unwind_cache (this_frame
, this_prologue_cache
);
1057 if (info
->frame_base
== 0)
1060 *this_id
= frame_id_build (info
->frame_base
, info
->func
);
1063 static struct value
*
1064 spu_frame_prev_register (struct frame_info
*this_frame
,
1065 void **this_prologue_cache
, int regnum
)
1067 struct spu_unwind_cache
*info
1068 = spu_frame_unwind_cache (this_frame
, this_prologue_cache
);
1070 /* Special-case the stack pointer. */
1071 if (regnum
== SPU_RAW_SP_REGNUM
)
1072 regnum
= SPU_SP_REGNUM
;
1074 return trad_frame_get_prev_register (this_frame
, info
->saved_regs
, regnum
);
1077 static const struct frame_unwind spu_frame_unwind
= {
1080 spu_frame_prev_register
,
1082 default_frame_sniffer
1086 spu_frame_base_address (struct frame_info
*this_frame
, void **this_cache
)
1088 struct spu_unwind_cache
*info
1089 = spu_frame_unwind_cache (this_frame
, this_cache
);
1090 return info
->local_base
;
1093 static const struct frame_base spu_frame_base
= {
1095 spu_frame_base_address
,
1096 spu_frame_base_address
,
1097 spu_frame_base_address
1101 spu_unwind_pc (struct gdbarch
*gdbarch
, struct frame_info
*next_frame
)
1103 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1104 CORE_ADDR pc
= frame_unwind_register_unsigned (next_frame
, SPU_PC_REGNUM
);
1105 /* Mask off interrupt enable bit. */
1106 return SPUADDR (tdep
->id
, pc
& -4);
1110 spu_unwind_sp (struct gdbarch
*gdbarch
, struct frame_info
*next_frame
)
1112 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1113 CORE_ADDR sp
= frame_unwind_register_unsigned (next_frame
, SPU_SP_REGNUM
);
1114 return SPUADDR (tdep
->id
, sp
);
1118 spu_read_pc (struct regcache
*regcache
)
1120 struct gdbarch_tdep
*tdep
= gdbarch_tdep (get_regcache_arch (regcache
));
1122 regcache_cooked_read_unsigned (regcache
, SPU_PC_REGNUM
, &pc
);
1123 /* Mask off interrupt enable bit. */
1124 return SPUADDR (tdep
->id
, pc
& -4);
1128 spu_write_pc (struct regcache
*regcache
, CORE_ADDR pc
)
1130 /* Keep interrupt enabled state unchanged. */
1132 regcache_cooked_read_unsigned (regcache
, SPU_PC_REGNUM
, &old_pc
);
1133 regcache_cooked_write_unsigned (regcache
, SPU_PC_REGNUM
,
1134 (SPUADDR_ADDR (pc
) & -4) | (old_pc
& 3));
1138 /* Cell/B.E. cross-architecture unwinder support. */
1140 struct spu2ppu_cache
1142 struct frame_id frame_id
;
1143 struct regcache
*regcache
;
1146 static struct gdbarch
*
1147 spu2ppu_prev_arch (struct frame_info
*this_frame
, void **this_cache
)
1149 struct spu2ppu_cache
*cache
= *this_cache
;
1150 return get_regcache_arch (cache
->regcache
);
1154 spu2ppu_this_id (struct frame_info
*this_frame
,
1155 void **this_cache
, struct frame_id
*this_id
)
1157 struct spu2ppu_cache
*cache
= *this_cache
;
1158 *this_id
= cache
->frame_id
;
1161 static struct value
*
1162 spu2ppu_prev_register (struct frame_info
*this_frame
,
1163 void **this_cache
, int regnum
)
1165 struct spu2ppu_cache
*cache
= *this_cache
;
1166 struct gdbarch
*gdbarch
= get_regcache_arch (cache
->regcache
);
1169 buf
= alloca (register_size (gdbarch
, regnum
));
1170 regcache_cooked_read (cache
->regcache
, regnum
, buf
);
1171 return frame_unwind_got_bytes (this_frame
, regnum
, buf
);
1175 spu2ppu_sniffer (const struct frame_unwind
*self
,
1176 struct frame_info
*this_frame
, void **this_prologue_cache
)
1178 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
1179 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1180 CORE_ADDR base
, func
, backchain
;
1183 if (gdbarch_bfd_arch_info (target_gdbarch
)->arch
== bfd_arch_spu
)
1186 base
= get_frame_sp (this_frame
);
1187 func
= get_frame_pc (this_frame
);
1188 if (target_read_memory (base
, buf
, 4))
1190 backchain
= extract_unsigned_integer (buf
, 4, byte_order
);
1194 struct frame_info
*fi
;
1196 struct spu2ppu_cache
*cache
1197 = FRAME_OBSTACK_CALLOC (1, struct spu2ppu_cache
);
1199 cache
->frame_id
= frame_id_build (base
+ 16, func
);
1201 for (fi
= get_next_frame (this_frame
); fi
; fi
= get_next_frame (fi
))
1202 if (gdbarch_bfd_arch_info (get_frame_arch (fi
))->arch
!= bfd_arch_spu
)
1207 cache
->regcache
= frame_save_as_regcache (fi
);
1208 *this_prologue_cache
= cache
;
1213 struct regcache
*regcache
;
1214 regcache
= get_thread_arch_regcache (inferior_ptid
, target_gdbarch
);
1215 cache
->regcache
= regcache_dup (regcache
);
1216 *this_prologue_cache
= cache
;
1225 spu2ppu_dealloc_cache (struct frame_info
*self
, void *this_cache
)
1227 struct spu2ppu_cache
*cache
= this_cache
;
1228 regcache_xfree (cache
->regcache
);
1231 static const struct frame_unwind spu2ppu_unwind
= {
1234 spu2ppu_prev_register
,
1237 spu2ppu_dealloc_cache
,
1242 /* Function calling convention. */
1245 spu_frame_align (struct gdbarch
*gdbarch
, CORE_ADDR sp
)
1251 spu_push_dummy_code (struct gdbarch
*gdbarch
, CORE_ADDR sp
, CORE_ADDR funaddr
,
1252 struct value
**args
, int nargs
, struct type
*value_type
,
1253 CORE_ADDR
*real_pc
, CORE_ADDR
*bp_addr
,
1254 struct regcache
*regcache
)
1256 /* Allocate space sufficient for a breakpoint, keeping the stack aligned. */
1257 sp
= (sp
- 4) & ~15;
1258 /* Store the address of that breakpoint */
1260 /* The call starts at the callee's entry point. */
1267 spu_scalar_value_p (struct type
*type
)
1269 switch (TYPE_CODE (type
))
1272 case TYPE_CODE_ENUM
:
1273 case TYPE_CODE_RANGE
:
1274 case TYPE_CODE_CHAR
:
1275 case TYPE_CODE_BOOL
:
1278 return TYPE_LENGTH (type
) <= 16;
1286 spu_value_to_regcache (struct regcache
*regcache
, int regnum
,
1287 struct type
*type
, const gdb_byte
*in
)
1289 int len
= TYPE_LENGTH (type
);
1291 if (spu_scalar_value_p (type
))
1293 int preferred_slot
= len
< 4 ? 4 - len
: 0;
1294 regcache_cooked_write_part (regcache
, regnum
, preferred_slot
, len
, in
);
1300 regcache_cooked_write (regcache
, regnum
++, in
);
1306 regcache_cooked_write_part (regcache
, regnum
, 0, len
, in
);
1311 spu_regcache_to_value (struct regcache
*regcache
, int regnum
,
1312 struct type
*type
, gdb_byte
*out
)
1314 int len
= TYPE_LENGTH (type
);
1316 if (spu_scalar_value_p (type
))
1318 int preferred_slot
= len
< 4 ? 4 - len
: 0;
1319 regcache_cooked_read_part (regcache
, regnum
, preferred_slot
, len
, out
);
1325 regcache_cooked_read (regcache
, regnum
++, out
);
1331 regcache_cooked_read_part (regcache
, regnum
, 0, len
, out
);
1336 spu_push_dummy_call (struct gdbarch
*gdbarch
, struct value
*function
,
1337 struct regcache
*regcache
, CORE_ADDR bp_addr
,
1338 int nargs
, struct value
**args
, CORE_ADDR sp
,
1339 int struct_return
, CORE_ADDR struct_addr
)
1341 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1344 int regnum
= SPU_ARG1_REGNUM
;
1348 /* Set the return address. */
1349 memset (buf
, 0, sizeof buf
);
1350 store_unsigned_integer (buf
, 4, byte_order
, SPUADDR_ADDR (bp_addr
));
1351 regcache_cooked_write (regcache
, SPU_LR_REGNUM
, buf
);
1353 /* If STRUCT_RETURN is true, then the struct return address (in
1354 STRUCT_ADDR) will consume the first argument-passing register.
1355 Both adjust the register count and store that value. */
1358 memset (buf
, 0, sizeof buf
);
1359 store_unsigned_integer (buf
, 4, byte_order
, SPUADDR_ADDR (struct_addr
));
1360 regcache_cooked_write (regcache
, regnum
++, buf
);
1363 /* Fill in argument registers. */
1364 for (i
= 0; i
< nargs
; i
++)
1366 struct value
*arg
= args
[i
];
1367 struct type
*type
= check_typedef (value_type (arg
));
1368 const gdb_byte
*contents
= value_contents (arg
);
1369 int len
= TYPE_LENGTH (type
);
1370 int n_regs
= align_up (len
, 16) / 16;
1372 /* If the argument doesn't wholly fit into registers, it and
1373 all subsequent arguments go to the stack. */
1374 if (regnum
+ n_regs
- 1 > SPU_ARGN_REGNUM
)
1380 spu_value_to_regcache (regcache
, regnum
, type
, contents
);
1384 /* Overflow arguments go to the stack. */
1385 if (stack_arg
!= -1)
1389 /* Allocate all required stack size. */
1390 for (i
= stack_arg
; i
< nargs
; i
++)
1392 struct type
*type
= check_typedef (value_type (args
[i
]));
1393 sp
-= align_up (TYPE_LENGTH (type
), 16);
1396 /* Fill in stack arguments. */
1398 for (i
= stack_arg
; i
< nargs
; i
++)
1400 struct value
*arg
= args
[i
];
1401 struct type
*type
= check_typedef (value_type (arg
));
1402 int len
= TYPE_LENGTH (type
);
1405 if (spu_scalar_value_p (type
))
1406 preferred_slot
= len
< 4 ? 4 - len
: 0;
1410 target_write_memory (ap
+ preferred_slot
, value_contents (arg
), len
);
1411 ap
+= align_up (TYPE_LENGTH (type
), 16);
1415 /* Allocate stack frame header. */
1418 /* Store stack back chain. */
1419 regcache_cooked_read (regcache
, SPU_RAW_SP_REGNUM
, buf
);
1420 target_write_memory (sp
, buf
, 16);
1422 /* Finally, update all slots of the SP register. */
1423 sp_delta
= sp
- extract_unsigned_integer (buf
, 4, byte_order
);
1424 for (i
= 0; i
< 4; i
++)
1426 CORE_ADDR sp_slot
= extract_unsigned_integer (buf
+ 4*i
, 4, byte_order
);
1427 store_unsigned_integer (buf
+ 4*i
, 4, byte_order
, sp_slot
+ sp_delta
);
1429 regcache_cooked_write (regcache
, SPU_RAW_SP_REGNUM
, buf
);
1434 static struct frame_id
1435 spu_dummy_id (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1437 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1438 CORE_ADDR pc
= get_frame_register_unsigned (this_frame
, SPU_PC_REGNUM
);
1439 CORE_ADDR sp
= get_frame_register_unsigned (this_frame
, SPU_SP_REGNUM
);
1440 return frame_id_build (SPUADDR (tdep
->id
, sp
), SPUADDR (tdep
->id
, pc
& -4));
1443 /* Function return value access. */
1445 static enum return_value_convention
1446 spu_return_value (struct gdbarch
*gdbarch
, struct type
*func_type
,
1447 struct type
*type
, struct regcache
*regcache
,
1448 gdb_byte
*out
, const gdb_byte
*in
)
1450 enum return_value_convention rvc
;
1452 if (TYPE_LENGTH (type
) <= (SPU_ARGN_REGNUM
- SPU_ARG1_REGNUM
+ 1) * 16)
1453 rvc
= RETURN_VALUE_REGISTER_CONVENTION
;
1455 rvc
= RETURN_VALUE_STRUCT_CONVENTION
;
1461 case RETURN_VALUE_REGISTER_CONVENTION
:
1462 spu_value_to_regcache (regcache
, SPU_ARG1_REGNUM
, type
, in
);
1465 case RETURN_VALUE_STRUCT_CONVENTION
:
1466 error (_("Cannot set function return value."));
1474 case RETURN_VALUE_REGISTER_CONVENTION
:
1475 spu_regcache_to_value (regcache
, SPU_ARG1_REGNUM
, type
, out
);
1478 case RETURN_VALUE_STRUCT_CONVENTION
:
1479 error (_("Function return value unknown."));
1490 static const gdb_byte
*
1491 spu_breakpoint_from_pc (struct gdbarch
*gdbarch
,
1492 CORE_ADDR
* pcptr
, int *lenptr
)
1494 static const gdb_byte breakpoint
[] = { 0x00, 0x00, 0x3f, 0xff };
1496 *lenptr
= sizeof breakpoint
;
1501 spu_memory_remove_breakpoint (struct gdbarch
*gdbarch
,
1502 struct bp_target_info
*bp_tgt
)
1504 /* We work around a problem in combined Cell/B.E. debugging here. Consider
1505 that in a combined application, we have some breakpoints inserted in SPU
1506 code, and now the application forks (on the PPU side). GDB common code
1507 will assume that the fork system call copied all breakpoints into the new
1508 process' address space, and that all those copies now need to be removed
1509 (see breakpoint.c:detach_breakpoints).
1511 While this is certainly true for PPU side breakpoints, it is not true
1512 for SPU side breakpoints. fork will clone the SPU context file
1513 descriptors, so that all the existing SPU contexts are in accessible
1514 in the new process. However, the contents of the SPU contexts themselves
1515 are *not* cloned. Therefore the effect of detach_breakpoints is to
1516 remove SPU breakpoints from the *original* SPU context's local store
1517 -- this is not the correct behaviour.
1519 The workaround is to check whether the PID we are asked to remove this
1520 breakpoint from (i.e. ptid_get_pid (inferior_ptid)) is different from the
1521 PID of the current inferior (i.e. current_inferior ()->pid). This is only
1522 true in the context of detach_breakpoints. If so, we simply do nothing.
1523 [ Note that for the fork child process, it does not matter if breakpoints
1524 remain inserted, because those SPU contexts are not runnable anyway --
1525 the Linux kernel allows only the original process to invoke spu_run. */
1527 if (ptid_get_pid (inferior_ptid
) != current_inferior ()->pid
)
1530 return default_memory_remove_breakpoint (gdbarch
, bp_tgt
);
1534 /* Software single-stepping support. */
1537 spu_software_single_step (struct frame_info
*frame
)
1539 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
1540 struct address_space
*aspace
= get_frame_address_space (frame
);
1541 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1542 CORE_ADDR pc
, next_pc
;
1548 pc
= get_frame_pc (frame
);
1550 if (target_read_memory (pc
, buf
, 4))
1552 insn
= extract_unsigned_integer (buf
, 4, byte_order
);
1554 /* Get local store limit. */
1555 lslr
= get_frame_register_unsigned (frame
, SPU_LSLR_REGNUM
);
1557 lslr
= (ULONGEST
) -1;
1559 /* Next sequential instruction is at PC + 4, except if the current
1560 instruction is a PPE-assisted call, in which case it is at PC + 8.
1561 Wrap around LS limit to be on the safe side. */
1562 if ((insn
& 0xffffff00) == 0x00002100)
1563 next_pc
= (SPUADDR_ADDR (pc
) + 8) & lslr
;
1565 next_pc
= (SPUADDR_ADDR (pc
) + 4) & lslr
;
1567 insert_single_step_breakpoint (gdbarch
,
1568 aspace
, SPUADDR (SPUADDR_SPU (pc
), next_pc
));
1570 if (is_branch (insn
, &offset
, ®
))
1572 CORE_ADDR target
= offset
;
1574 if (reg
== SPU_PC_REGNUM
)
1575 target
+= SPUADDR_ADDR (pc
);
1578 get_frame_register_bytes (frame
, reg
, 0, 4, buf
);
1579 target
+= extract_unsigned_integer (buf
, 4, byte_order
) & -4;
1582 target
= target
& lslr
;
1583 if (target
!= next_pc
)
1584 insert_single_step_breakpoint (gdbarch
, aspace
,
1585 SPUADDR (SPUADDR_SPU (pc
), target
));
1592 /* Longjmp support. */
1595 spu_get_longjmp_target (struct frame_info
*frame
, CORE_ADDR
*pc
)
1597 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
1598 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1599 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1603 /* Jump buffer is pointed to by the argument register $r3. */
1604 get_frame_register_bytes (frame
, SPU_ARG1_REGNUM
, 0, 4, buf
);
1605 jb_addr
= extract_unsigned_integer (buf
, 4, byte_order
);
1606 if (target_read_memory (SPUADDR (tdep
->id
, jb_addr
), buf
, 4))
1609 *pc
= extract_unsigned_integer (buf
, 4, byte_order
);
1610 *pc
= SPUADDR (tdep
->id
, *pc
);
1617 struct spu_dis_asm_data
1619 struct gdbarch
*gdbarch
;
1624 spu_dis_asm_print_address (bfd_vma addr
, struct disassemble_info
*info
)
1626 struct spu_dis_asm_data
*data
= info
->application_data
;
1627 print_address (data
->gdbarch
, SPUADDR (data
->id
, addr
), info
->stream
);
1631 gdb_print_insn_spu (bfd_vma memaddr
, struct disassemble_info
*info
)
1633 /* The opcodes disassembler does 18-bit address arithmetic. Make
1634 sure the SPU ID encoded in the high bits is added back when we
1635 call print_address. */
1636 struct disassemble_info spu_info
= *info
;
1637 struct spu_dis_asm_data data
;
1638 data
.gdbarch
= info
->application_data
;
1639 data
.id
= SPUADDR_SPU (memaddr
);
1641 spu_info
.application_data
= &data
;
1642 spu_info
.print_address_func
= spu_dis_asm_print_address
;
1643 return print_insn_spu (memaddr
, &spu_info
);
1647 /* Target overlays for the SPU overlay manager.
1649 See the documentation of simple_overlay_update for how the
1650 interface is supposed to work.
1652 Data structures used by the overlay manager:
1660 } _ovly_table[]; -- one entry per overlay section
1662 struct ovly_buf_table
1665 } _ovly_buf_table[]; -- one entry per overlay buffer
1667 _ovly_table should never change.
1669 Both tables are aligned to a 16-byte boundary, the symbols
1670 _ovly_table and _ovly_buf_table are of type STT_OBJECT and their
1671 size set to the size of the respective array. buf in _ovly_table is
1672 an index into _ovly_buf_table.
1674 mapped is an index into _ovly_table. Both the mapped and buf indices start
1675 from one to reference the first entry in their respective tables. */
1677 /* Using the per-objfile private data mechanism, we store for each
1678 objfile an array of "struct spu_overlay_table" structures, one
1679 for each obj_section of the objfile. This structure holds two
1680 fields, MAPPED_PTR and MAPPED_VAL. If MAPPED_PTR is zero, this
1681 is *not* an overlay section. If it is non-zero, it represents
1682 a target address. The overlay section is mapped iff the target
1683 integer at this location equals MAPPED_VAL. */
1685 static const struct objfile_data
*spu_overlay_data
;
1687 struct spu_overlay_table
1689 CORE_ADDR mapped_ptr
;
1690 CORE_ADDR mapped_val
;
1693 /* Retrieve the overlay table for OBJFILE. If not already cached, read
1694 the _ovly_table data structure from the target and initialize the
1695 spu_overlay_table data structure from it. */
1696 static struct spu_overlay_table
*
1697 spu_get_overlay_table (struct objfile
*objfile
)
1699 enum bfd_endian byte_order
= bfd_big_endian (objfile
->obfd
)?
1700 BFD_ENDIAN_BIG
: BFD_ENDIAN_LITTLE
;
1701 struct minimal_symbol
*ovly_table_msym
, *ovly_buf_table_msym
;
1702 CORE_ADDR ovly_table_base
, ovly_buf_table_base
;
1703 unsigned ovly_table_size
, ovly_buf_table_size
;
1704 struct spu_overlay_table
*tbl
;
1705 struct obj_section
*osect
;
1709 tbl
= objfile_data (objfile
, spu_overlay_data
);
1713 ovly_table_msym
= lookup_minimal_symbol ("_ovly_table", NULL
, objfile
);
1714 if (!ovly_table_msym
)
1717 ovly_buf_table_msym
= lookup_minimal_symbol ("_ovly_buf_table",
1719 if (!ovly_buf_table_msym
)
1722 ovly_table_base
= SYMBOL_VALUE_ADDRESS (ovly_table_msym
);
1723 ovly_table_size
= MSYMBOL_SIZE (ovly_table_msym
);
1725 ovly_buf_table_base
= SYMBOL_VALUE_ADDRESS (ovly_buf_table_msym
);
1726 ovly_buf_table_size
= MSYMBOL_SIZE (ovly_buf_table_msym
);
1728 ovly_table
= xmalloc (ovly_table_size
);
1729 read_memory (ovly_table_base
, ovly_table
, ovly_table_size
);
1731 tbl
= OBSTACK_CALLOC (&objfile
->objfile_obstack
,
1732 objfile
->sections_end
- objfile
->sections
,
1733 struct spu_overlay_table
);
1735 for (i
= 0; i
< ovly_table_size
/ 16; i
++)
1737 CORE_ADDR vma
= extract_unsigned_integer (ovly_table
+ 16*i
+ 0,
1739 CORE_ADDR size
= extract_unsigned_integer (ovly_table
+ 16*i
+ 4,
1741 CORE_ADDR pos
= extract_unsigned_integer (ovly_table
+ 16*i
+ 8,
1743 CORE_ADDR buf
= extract_unsigned_integer (ovly_table
+ 16*i
+ 12,
1746 if (buf
== 0 || (buf
- 1) * 4 >= ovly_buf_table_size
)
1749 ALL_OBJFILE_OSECTIONS (objfile
, osect
)
1750 if (vma
== bfd_section_vma (objfile
->obfd
, osect
->the_bfd_section
)
1751 && pos
== osect
->the_bfd_section
->filepos
)
1753 int ndx
= osect
- objfile
->sections
;
1754 tbl
[ndx
].mapped_ptr
= ovly_buf_table_base
+ (buf
- 1) * 4;
1755 tbl
[ndx
].mapped_val
= i
+ 1;
1761 set_objfile_data (objfile
, spu_overlay_data
, tbl
);
1765 /* Read _ovly_buf_table entry from the target to dermine whether
1766 OSECT is currently mapped, and update the mapped state. */
1768 spu_overlay_update_osect (struct obj_section
*osect
)
1770 enum bfd_endian byte_order
= bfd_big_endian (osect
->objfile
->obfd
)?
1771 BFD_ENDIAN_BIG
: BFD_ENDIAN_LITTLE
;
1772 struct spu_overlay_table
*ovly_table
;
1775 ovly_table
= spu_get_overlay_table (osect
->objfile
);
1779 ovly_table
+= osect
- osect
->objfile
->sections
;
1780 if (ovly_table
->mapped_ptr
== 0)
1783 id
= SPUADDR_SPU (obj_section_addr (osect
));
1784 val
= read_memory_unsigned_integer (SPUADDR (id
, ovly_table
->mapped_ptr
),
1786 osect
->ovly_mapped
= (val
== ovly_table
->mapped_val
);
1789 /* If OSECT is NULL, then update all sections' mapped state.
1790 If OSECT is non-NULL, then update only OSECT's mapped state. */
1792 spu_overlay_update (struct obj_section
*osect
)
1794 /* Just one section. */
1796 spu_overlay_update_osect (osect
);
1801 struct objfile
*objfile
;
1803 ALL_OBJSECTIONS (objfile
, osect
)
1804 if (section_is_overlay (osect
))
1805 spu_overlay_update_osect (osect
);
1809 /* Whenever a new objfile is loaded, read the target's _ovly_table.
1810 If there is one, go through all sections and make sure for non-
1811 overlay sections LMA equals VMA, while for overlay sections LMA
1812 is larger than SPU_OVERLAY_LMA. */
1814 spu_overlay_new_objfile (struct objfile
*objfile
)
1816 struct spu_overlay_table
*ovly_table
;
1817 struct obj_section
*osect
;
1819 /* If we've already touched this file, do nothing. */
1820 if (!objfile
|| objfile_data (objfile
, spu_overlay_data
) != NULL
)
1823 /* Consider only SPU objfiles. */
1824 if (bfd_get_arch (objfile
->obfd
) != bfd_arch_spu
)
1827 /* Check if this objfile has overlays. */
1828 ovly_table
= spu_get_overlay_table (objfile
);
1832 /* Now go and fiddle with all the LMAs. */
1833 ALL_OBJFILE_OSECTIONS (objfile
, osect
)
1835 bfd
*obfd
= objfile
->obfd
;
1836 asection
*bsect
= osect
->the_bfd_section
;
1837 int ndx
= osect
- objfile
->sections
;
1839 if (ovly_table
[ndx
].mapped_ptr
== 0)
1840 bfd_section_lma (obfd
, bsect
) = bfd_section_vma (obfd
, bsect
);
1842 bfd_section_lma (obfd
, bsect
) = SPU_OVERLAY_LMA
+ bsect
->filepos
;
1847 /* Insert temporary breakpoint on "main" function of newly loaded
1848 SPE context OBJFILE. */
1850 spu_catch_start (struct objfile
*objfile
)
1852 struct minimal_symbol
*minsym
;
1853 struct symtab
*symtab
;
1857 /* Do this only if requested by "set spu stop-on-load on". */
1858 if (!spu_stop_on_load_p
)
1861 /* Consider only SPU objfiles. */
1862 if (!objfile
|| bfd_get_arch (objfile
->obfd
) != bfd_arch_spu
)
1865 /* The main objfile is handled differently. */
1866 if (objfile
== symfile_objfile
)
1869 /* There can be multiple symbols named "main". Search for the
1870 "main" in *this* objfile. */
1871 minsym
= lookup_minimal_symbol ("main", NULL
, objfile
);
1875 /* If we have debugging information, try to use it -- this
1876 will allow us to properly skip the prologue. */
1877 pc
= SYMBOL_VALUE_ADDRESS (minsym
);
1878 symtab
= find_pc_sect_symtab (pc
, SYMBOL_OBJ_SECTION (minsym
));
1881 struct blockvector
*bv
= BLOCKVECTOR (symtab
);
1882 struct block
*block
= BLOCKVECTOR_BLOCK (bv
, GLOBAL_BLOCK
);
1884 struct symtab_and_line sal
;
1886 sym
= lookup_block_symbol (block
, "main", VAR_DOMAIN
);
1889 fixup_symbol_section (sym
, objfile
);
1890 sal
= find_function_start_sal (sym
, 1);
1895 /* Use a numerical address for the set_breakpoint command to avoid having
1896 the breakpoint re-set incorrectly. */
1897 xsnprintf (buf
, sizeof buf
, "*%s", core_addr_to_string (pc
));
1898 create_breakpoint (get_objfile_arch (objfile
), buf
/* arg */,
1899 NULL
/* cond_string */, -1 /* thread */,
1900 0 /* parse_condition_and_thread */, 1 /* tempflag */,
1901 bp_breakpoint
/* type_wanted */,
1902 0 /* ignore_count */,
1903 AUTO_BOOLEAN_FALSE
/* pending_break_support */,
1904 NULL
/* ops */, 0 /* from_tty */, 1 /* enabled */,
1909 /* Look up OBJFILE loaded into FRAME's SPU context. */
1910 static struct objfile
*
1911 spu_objfile_from_frame (struct frame_info
*frame
)
1913 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
1914 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1915 struct objfile
*obj
;
1917 if (gdbarch_bfd_arch_info (gdbarch
)->arch
!= bfd_arch_spu
)
1922 if (obj
->sections
!= obj
->sections_end
1923 && SPUADDR_SPU (obj_section_addr (obj
->sections
)) == tdep
->id
)
1930 /* Flush cache for ea pointer access if available. */
1932 flush_ea_cache (void)
1934 struct minimal_symbol
*msymbol
;
1935 struct objfile
*obj
;
1937 if (!has_stack_frames ())
1940 obj
= spu_objfile_from_frame (get_current_frame ());
1944 /* Lookup inferior function __cache_flush. */
1945 msymbol
= lookup_minimal_symbol ("__cache_flush", NULL
, obj
);
1946 if (msymbol
!= NULL
)
1951 type
= objfile_type (obj
)->builtin_void
;
1952 type
= lookup_function_type (type
);
1953 type
= lookup_pointer_type (type
);
1954 addr
= SYMBOL_VALUE_ADDRESS (msymbol
);
1956 call_function_by_hand (value_from_pointer (type
, addr
), 0, NULL
);
1960 /* This handler is called when the inferior has stopped. If it is stopped in
1961 SPU architecture then flush the ea cache if used. */
1963 spu_attach_normal_stop (struct bpstats
*bs
, int print_frame
)
1965 if (!spu_auto_flush_cache_p
)
1968 /* Temporarily reset spu_auto_flush_cache_p to avoid recursively
1969 re-entering this function when __cache_flush stops. */
1970 spu_auto_flush_cache_p
= 0;
1972 spu_auto_flush_cache_p
= 1;
1976 /* "info spu" commands. */
1979 info_spu_event_command (char *args
, int from_tty
)
1981 struct frame_info
*frame
= get_selected_frame (NULL
);
1982 ULONGEST event_status
= 0;
1983 ULONGEST event_mask
= 0;
1984 struct cleanup
*chain
;
1990 if (gdbarch_bfd_arch_info (get_frame_arch (frame
))->arch
!= bfd_arch_spu
)
1991 error (_("\"info spu\" is only supported on the SPU architecture."));
1993 id
= get_frame_register_unsigned (frame
, SPU_ID_REGNUM
);
1995 xsnprintf (annex
, sizeof annex
, "%d/event_status", id
);
1996 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
1997 buf
, 0, (sizeof (buf
) - 1));
1999 error (_("Could not read event_status."));
2001 event_status
= strtoulst (buf
, NULL
, 16);
2003 xsnprintf (annex
, sizeof annex
, "%d/event_mask", id
);
2004 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
2005 buf
, 0, (sizeof (buf
) - 1));
2007 error (_("Could not read event_mask."));
2009 event_mask
= strtoulst (buf
, NULL
, 16);
2011 chain
= make_cleanup_ui_out_tuple_begin_end (uiout
, "SPUInfoEvent");
2013 if (ui_out_is_mi_like_p (uiout
))
2015 ui_out_field_fmt (uiout
, "event_status",
2016 "0x%s", phex_nz (event_status
, 4));
2017 ui_out_field_fmt (uiout
, "event_mask",
2018 "0x%s", phex_nz (event_mask
, 4));
2022 printf_filtered (_("Event Status 0x%s\n"), phex (event_status
, 4));
2023 printf_filtered (_("Event Mask 0x%s\n"), phex (event_mask
, 4));
2026 do_cleanups (chain
);
2030 info_spu_signal_command (char *args
, int from_tty
)
2032 struct frame_info
*frame
= get_selected_frame (NULL
);
2033 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2034 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2035 ULONGEST signal1
= 0;
2036 ULONGEST signal1_type
= 0;
2037 int signal1_pending
= 0;
2038 ULONGEST signal2
= 0;
2039 ULONGEST signal2_type
= 0;
2040 int signal2_pending
= 0;
2041 struct cleanup
*chain
;
2047 if (gdbarch_bfd_arch_info (gdbarch
)->arch
!= bfd_arch_spu
)
2048 error (_("\"info spu\" is only supported on the SPU architecture."));
2050 id
= get_frame_register_unsigned (frame
, SPU_ID_REGNUM
);
2052 xsnprintf (annex
, sizeof annex
, "%d/signal1", id
);
2053 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
, buf
, 0, 4);
2055 error (_("Could not read signal1."));
2058 signal1
= extract_unsigned_integer (buf
, 4, byte_order
);
2059 signal1_pending
= 1;
2062 xsnprintf (annex
, sizeof annex
, "%d/signal1_type", id
);
2063 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
2064 buf
, 0, (sizeof (buf
) - 1));
2066 error (_("Could not read signal1_type."));
2068 signal1_type
= strtoulst (buf
, NULL
, 16);
2070 xsnprintf (annex
, sizeof annex
, "%d/signal2", id
);
2071 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
, buf
, 0, 4);
2073 error (_("Could not read signal2."));
2076 signal2
= extract_unsigned_integer (buf
, 4, byte_order
);
2077 signal2_pending
= 1;
2080 xsnprintf (annex
, sizeof annex
, "%d/signal2_type", id
);
2081 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
2082 buf
, 0, (sizeof (buf
) - 1));
2084 error (_("Could not read signal2_type."));
2086 signal2_type
= strtoulst (buf
, NULL
, 16);
2088 chain
= make_cleanup_ui_out_tuple_begin_end (uiout
, "SPUInfoSignal");
2090 if (ui_out_is_mi_like_p (uiout
))
2092 ui_out_field_int (uiout
, "signal1_pending", signal1_pending
);
2093 ui_out_field_fmt (uiout
, "signal1", "0x%s", phex_nz (signal1
, 4));
2094 ui_out_field_int (uiout
, "signal1_type", signal1_type
);
2095 ui_out_field_int (uiout
, "signal2_pending", signal2_pending
);
2096 ui_out_field_fmt (uiout
, "signal2", "0x%s", phex_nz (signal2
, 4));
2097 ui_out_field_int (uiout
, "signal2_type", signal2_type
);
2101 if (signal1_pending
)
2102 printf_filtered (_("Signal 1 control word 0x%s "), phex (signal1
, 4));
2104 printf_filtered (_("Signal 1 not pending "));
2107 printf_filtered (_("(Type Or)\n"));
2109 printf_filtered (_("(Type Overwrite)\n"));
2111 if (signal2_pending
)
2112 printf_filtered (_("Signal 2 control word 0x%s "), phex (signal2
, 4));
2114 printf_filtered (_("Signal 2 not pending "));
2117 printf_filtered (_("(Type Or)\n"));
2119 printf_filtered (_("(Type Overwrite)\n"));
2122 do_cleanups (chain
);
2126 info_spu_mailbox_list (gdb_byte
*buf
, int nr
, enum bfd_endian byte_order
,
2127 const char *field
, const char *msg
)
2129 struct cleanup
*chain
;
2135 chain
= make_cleanup_ui_out_table_begin_end (uiout
, 1, nr
, "mbox");
2137 ui_out_table_header (uiout
, 32, ui_left
, field
, msg
);
2138 ui_out_table_body (uiout
);
2140 for (i
= 0; i
< nr
; i
++)
2142 struct cleanup
*val_chain
;
2144 val_chain
= make_cleanup_ui_out_tuple_begin_end (uiout
, "mbox");
2145 val
= extract_unsigned_integer (buf
+ 4*i
, 4, byte_order
);
2146 ui_out_field_fmt (uiout
, field
, "0x%s", phex (val
, 4));
2147 do_cleanups (val_chain
);
2149 if (!ui_out_is_mi_like_p (uiout
))
2150 printf_filtered ("\n");
2153 do_cleanups (chain
);
2157 info_spu_mailbox_command (char *args
, int from_tty
)
2159 struct frame_info
*frame
= get_selected_frame (NULL
);
2160 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2161 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2162 struct cleanup
*chain
;
2168 if (gdbarch_bfd_arch_info (gdbarch
)->arch
!= bfd_arch_spu
)
2169 error (_("\"info spu\" is only supported on the SPU architecture."));
2171 id
= get_frame_register_unsigned (frame
, SPU_ID_REGNUM
);
2173 chain
= make_cleanup_ui_out_tuple_begin_end (uiout
, "SPUInfoMailbox");
2175 xsnprintf (annex
, sizeof annex
, "%d/mbox_info", id
);
2176 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
2177 buf
, 0, sizeof buf
);
2179 error (_("Could not read mbox_info."));
2181 info_spu_mailbox_list (buf
, len
/ 4, byte_order
,
2182 "mbox", "SPU Outbound Mailbox");
2184 xsnprintf (annex
, sizeof annex
, "%d/ibox_info", id
);
2185 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
2186 buf
, 0, sizeof buf
);
2188 error (_("Could not read ibox_info."));
2190 info_spu_mailbox_list (buf
, len
/ 4, byte_order
,
2191 "ibox", "SPU Outbound Interrupt Mailbox");
2193 xsnprintf (annex
, sizeof annex
, "%d/wbox_info", id
);
2194 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
2195 buf
, 0, sizeof buf
);
2197 error (_("Could not read wbox_info."));
2199 info_spu_mailbox_list (buf
, len
/ 4, byte_order
,
2200 "wbox", "SPU Inbound Mailbox");
2202 do_cleanups (chain
);
2206 spu_mfc_get_bitfield (ULONGEST word
, int first
, int last
)
2208 ULONGEST mask
= ~(~(ULONGEST
)0 << (last
- first
+ 1));
2209 return (word
>> (63 - last
)) & mask
;
2213 info_spu_dma_cmdlist (gdb_byte
*buf
, int nr
, enum bfd_endian byte_order
)
2215 static char *spu_mfc_opcode
[256] =
2217 /* 00 */ NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2218 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2219 /* 10 */ NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2220 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2221 /* 20 */ "put", "putb", "putf", NULL
, "putl", "putlb", "putlf", NULL
,
2222 "puts", "putbs", "putfs", NULL
, NULL
, NULL
, NULL
, NULL
,
2223 /* 30 */ "putr", "putrb", "putrf", NULL
, "putrl", "putrlb", "putrlf", NULL
,
2224 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2225 /* 40 */ "get", "getb", "getf", NULL
, "getl", "getlb", "getlf", NULL
,
2226 "gets", "getbs", "getfs", NULL
, NULL
, NULL
, NULL
, NULL
,
2227 /* 50 */ NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2228 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2229 /* 60 */ NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2230 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2231 /* 70 */ NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2232 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2233 /* 80 */ "sdcrt", "sdcrtst", NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2234 NULL
, "sdcrz", NULL
, NULL
, NULL
, "sdcrst", NULL
, "sdcrf",
2235 /* 90 */ NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2236 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2237 /* a0 */ "sndsig", "sndsigb", "sndsigf", NULL
, NULL
, NULL
, NULL
, NULL
,
2238 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2239 /* b0 */ "putlluc", NULL
, NULL
, NULL
, "putllc", NULL
, NULL
, NULL
,
2240 "putqlluc", NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2241 /* c0 */ "barrier", NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2242 "mfceieio", NULL
, NULL
, NULL
, "mfcsync", NULL
, NULL
, NULL
,
2243 /* d0 */ "getllar", NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2244 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2245 /* e0 */ NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2246 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2247 /* f0 */ NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2248 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2251 int *seq
= alloca (nr
* sizeof (int));
2253 struct cleanup
*chain
;
2257 /* Determine sequence in which to display (valid) entries. */
2258 for (i
= 0; i
< nr
; i
++)
2260 /* Search for the first valid entry all of whose
2261 dependencies are met. */
2262 for (j
= 0; j
< nr
; j
++)
2264 ULONGEST mfc_cq_dw3
;
2265 ULONGEST dependencies
;
2267 if (done
& (1 << (nr
- 1 - j
)))
2271 = extract_unsigned_integer (buf
+ 32*j
+ 24,8, byte_order
);
2272 if (!spu_mfc_get_bitfield (mfc_cq_dw3
, 16, 16))
2275 dependencies
= spu_mfc_get_bitfield (mfc_cq_dw3
, 0, nr
- 1);
2276 if ((dependencies
& done
) != dependencies
)
2280 done
|= 1 << (nr
- 1 - j
);
2291 chain
= make_cleanup_ui_out_table_begin_end (uiout
, 10, nr
, "dma_cmd");
2293 ui_out_table_header (uiout
, 7, ui_left
, "opcode", "Opcode");
2294 ui_out_table_header (uiout
, 3, ui_left
, "tag", "Tag");
2295 ui_out_table_header (uiout
, 3, ui_left
, "tid", "TId");
2296 ui_out_table_header (uiout
, 3, ui_left
, "rid", "RId");
2297 ui_out_table_header (uiout
, 18, ui_left
, "ea", "EA");
2298 ui_out_table_header (uiout
, 7, ui_left
, "lsa", "LSA");
2299 ui_out_table_header (uiout
, 7, ui_left
, "size", "Size");
2300 ui_out_table_header (uiout
, 7, ui_left
, "lstaddr", "LstAddr");
2301 ui_out_table_header (uiout
, 7, ui_left
, "lstsize", "LstSize");
2302 ui_out_table_header (uiout
, 1, ui_left
, "error_p", "E");
2304 ui_out_table_body (uiout
);
2306 for (i
= 0; i
< nr
; i
++)
2308 struct cleanup
*cmd_chain
;
2309 ULONGEST mfc_cq_dw0
;
2310 ULONGEST mfc_cq_dw1
;
2311 ULONGEST mfc_cq_dw2
;
2312 int mfc_cmd_opcode
, mfc_cmd_tag
, rclass_id
, tclass_id
;
2313 int lsa
, size
, list_lsa
, list_size
, mfc_lsa
, mfc_size
;
2315 int list_valid_p
, noop_valid_p
, qw_valid_p
, ea_valid_p
, cmd_error_p
;
2317 /* Decode contents of MFC Command Queue Context Save/Restore Registers.
2318 See "Cell Broadband Engine Registers V1.3", section 3.3.2.1. */
2321 = extract_unsigned_integer (buf
+ 32*seq
[i
], 8, byte_order
);
2323 = extract_unsigned_integer (buf
+ 32*seq
[i
] + 8, 8, byte_order
);
2325 = extract_unsigned_integer (buf
+ 32*seq
[i
] + 16, 8, byte_order
);
2327 list_lsa
= spu_mfc_get_bitfield (mfc_cq_dw0
, 0, 14);
2328 list_size
= spu_mfc_get_bitfield (mfc_cq_dw0
, 15, 26);
2329 mfc_cmd_opcode
= spu_mfc_get_bitfield (mfc_cq_dw0
, 27, 34);
2330 mfc_cmd_tag
= spu_mfc_get_bitfield (mfc_cq_dw0
, 35, 39);
2331 list_valid_p
= spu_mfc_get_bitfield (mfc_cq_dw0
, 40, 40);
2332 rclass_id
= spu_mfc_get_bitfield (mfc_cq_dw0
, 41, 43);
2333 tclass_id
= spu_mfc_get_bitfield (mfc_cq_dw0
, 44, 46);
2335 mfc_ea
= spu_mfc_get_bitfield (mfc_cq_dw1
, 0, 51) << 12
2336 | spu_mfc_get_bitfield (mfc_cq_dw2
, 25, 36);
2338 mfc_lsa
= spu_mfc_get_bitfield (mfc_cq_dw2
, 0, 13);
2339 mfc_size
= spu_mfc_get_bitfield (mfc_cq_dw2
, 14, 24);
2340 noop_valid_p
= spu_mfc_get_bitfield (mfc_cq_dw2
, 37, 37);
2341 qw_valid_p
= spu_mfc_get_bitfield (mfc_cq_dw2
, 38, 38);
2342 ea_valid_p
= spu_mfc_get_bitfield (mfc_cq_dw2
, 39, 39);
2343 cmd_error_p
= spu_mfc_get_bitfield (mfc_cq_dw2
, 40, 40);
2345 cmd_chain
= make_cleanup_ui_out_tuple_begin_end (uiout
, "cmd");
2347 if (spu_mfc_opcode
[mfc_cmd_opcode
])
2348 ui_out_field_string (uiout
, "opcode", spu_mfc_opcode
[mfc_cmd_opcode
]);
2350 ui_out_field_int (uiout
, "opcode", mfc_cmd_opcode
);
2352 ui_out_field_int (uiout
, "tag", mfc_cmd_tag
);
2353 ui_out_field_int (uiout
, "tid", tclass_id
);
2354 ui_out_field_int (uiout
, "rid", rclass_id
);
2357 ui_out_field_fmt (uiout
, "ea", "0x%s", phex (mfc_ea
, 8));
2359 ui_out_field_skip (uiout
, "ea");
2361 ui_out_field_fmt (uiout
, "lsa", "0x%05x", mfc_lsa
<< 4);
2363 ui_out_field_fmt (uiout
, "size", "0x%05x", mfc_size
<< 4);
2365 ui_out_field_fmt (uiout
, "size", "0x%05x", mfc_size
);
2369 ui_out_field_fmt (uiout
, "lstaddr", "0x%05x", list_lsa
<< 3);
2370 ui_out_field_fmt (uiout
, "lstsize", "0x%05x", list_size
<< 3);
2374 ui_out_field_skip (uiout
, "lstaddr");
2375 ui_out_field_skip (uiout
, "lstsize");
2379 ui_out_field_string (uiout
, "error_p", "*");
2381 ui_out_field_skip (uiout
, "error_p");
2383 do_cleanups (cmd_chain
);
2385 if (!ui_out_is_mi_like_p (uiout
))
2386 printf_filtered ("\n");
2389 do_cleanups (chain
);
2393 info_spu_dma_command (char *args
, int from_tty
)
2395 struct frame_info
*frame
= get_selected_frame (NULL
);
2396 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2397 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2398 ULONGEST dma_info_type
;
2399 ULONGEST dma_info_mask
;
2400 ULONGEST dma_info_status
;
2401 ULONGEST dma_info_stall_and_notify
;
2402 ULONGEST dma_info_atomic_command_status
;
2403 struct cleanup
*chain
;
2409 if (gdbarch_bfd_arch_info (get_frame_arch (frame
))->arch
!= bfd_arch_spu
)
2410 error (_("\"info spu\" is only supported on the SPU architecture."));
2412 id
= get_frame_register_unsigned (frame
, SPU_ID_REGNUM
);
2414 xsnprintf (annex
, sizeof annex
, "%d/dma_info", id
);
2415 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
2416 buf
, 0, 40 + 16 * 32);
2418 error (_("Could not read dma_info."));
2421 = extract_unsigned_integer (buf
, 8, byte_order
);
2423 = extract_unsigned_integer (buf
+ 8, 8, byte_order
);
2425 = extract_unsigned_integer (buf
+ 16, 8, byte_order
);
2426 dma_info_stall_and_notify
2427 = extract_unsigned_integer (buf
+ 24, 8, byte_order
);
2428 dma_info_atomic_command_status
2429 = extract_unsigned_integer (buf
+ 32, 8, byte_order
);
2431 chain
= make_cleanup_ui_out_tuple_begin_end (uiout
, "SPUInfoDMA");
2433 if (ui_out_is_mi_like_p (uiout
))
2435 ui_out_field_fmt (uiout
, "dma_info_type", "0x%s",
2436 phex_nz (dma_info_type
, 4));
2437 ui_out_field_fmt (uiout
, "dma_info_mask", "0x%s",
2438 phex_nz (dma_info_mask
, 4));
2439 ui_out_field_fmt (uiout
, "dma_info_status", "0x%s",
2440 phex_nz (dma_info_status
, 4));
2441 ui_out_field_fmt (uiout
, "dma_info_stall_and_notify", "0x%s",
2442 phex_nz (dma_info_stall_and_notify
, 4));
2443 ui_out_field_fmt (uiout
, "dma_info_atomic_command_status", "0x%s",
2444 phex_nz (dma_info_atomic_command_status
, 4));
2448 const char *query_msg
= _("no query pending");
2450 if (dma_info_type
& 4)
2451 switch (dma_info_type
& 3)
2453 case 1: query_msg
= _("'any' query pending"); break;
2454 case 2: query_msg
= _("'all' query pending"); break;
2455 default: query_msg
= _("undefined query type"); break;
2458 printf_filtered (_("Tag-Group Status 0x%s\n"),
2459 phex (dma_info_status
, 4));
2460 printf_filtered (_("Tag-Group Mask 0x%s (%s)\n"),
2461 phex (dma_info_mask
, 4), query_msg
);
2462 printf_filtered (_("Stall-and-Notify 0x%s\n"),
2463 phex (dma_info_stall_and_notify
, 4));
2464 printf_filtered (_("Atomic Cmd Status 0x%s\n"),
2465 phex (dma_info_atomic_command_status
, 4));
2466 printf_filtered ("\n");
2469 info_spu_dma_cmdlist (buf
+ 40, 16, byte_order
);
2470 do_cleanups (chain
);
2474 info_spu_proxydma_command (char *args
, int from_tty
)
2476 struct frame_info
*frame
= get_selected_frame (NULL
);
2477 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2478 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2479 ULONGEST dma_info_type
;
2480 ULONGEST dma_info_mask
;
2481 ULONGEST dma_info_status
;
2482 struct cleanup
*chain
;
2488 if (gdbarch_bfd_arch_info (gdbarch
)->arch
!= bfd_arch_spu
)
2489 error (_("\"info spu\" is only supported on the SPU architecture."));
2491 id
= get_frame_register_unsigned (frame
, SPU_ID_REGNUM
);
2493 xsnprintf (annex
, sizeof annex
, "%d/proxydma_info", id
);
2494 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
2495 buf
, 0, 24 + 8 * 32);
2497 error (_("Could not read proxydma_info."));
2499 dma_info_type
= extract_unsigned_integer (buf
, 8, byte_order
);
2500 dma_info_mask
= extract_unsigned_integer (buf
+ 8, 8, byte_order
);
2501 dma_info_status
= extract_unsigned_integer (buf
+ 16, 8, byte_order
);
2503 chain
= make_cleanup_ui_out_tuple_begin_end (uiout
, "SPUInfoProxyDMA");
2505 if (ui_out_is_mi_like_p (uiout
))
2507 ui_out_field_fmt (uiout
, "proxydma_info_type", "0x%s",
2508 phex_nz (dma_info_type
, 4));
2509 ui_out_field_fmt (uiout
, "proxydma_info_mask", "0x%s",
2510 phex_nz (dma_info_mask
, 4));
2511 ui_out_field_fmt (uiout
, "proxydma_info_status", "0x%s",
2512 phex_nz (dma_info_status
, 4));
2516 const char *query_msg
;
2518 switch (dma_info_type
& 3)
2520 case 0: query_msg
= _("no query pending"); break;
2521 case 1: query_msg
= _("'any' query pending"); break;
2522 case 2: query_msg
= _("'all' query pending"); break;
2523 default: query_msg
= _("undefined query type"); break;
2526 printf_filtered (_("Tag-Group Status 0x%s\n"),
2527 phex (dma_info_status
, 4));
2528 printf_filtered (_("Tag-Group Mask 0x%s (%s)\n"),
2529 phex (dma_info_mask
, 4), query_msg
);
2530 printf_filtered ("\n");
2533 info_spu_dma_cmdlist (buf
+ 24, 8, byte_order
);
2534 do_cleanups (chain
);
2538 info_spu_command (char *args
, int from_tty
)
2540 printf_unfiltered (_("\"info spu\" must be followed by "
2541 "the name of an SPU facility.\n"));
2542 help_list (infospucmdlist
, "info spu ", -1, gdb_stdout
);
2546 /* Root of all "set spu "/"show spu " commands. */
2549 show_spu_command (char *args
, int from_tty
)
2551 help_list (showspucmdlist
, "show spu ", all_commands
, gdb_stdout
);
2555 set_spu_command (char *args
, int from_tty
)
2557 help_list (setspucmdlist
, "set spu ", all_commands
, gdb_stdout
);
2561 show_spu_stop_on_load (struct ui_file
*file
, int from_tty
,
2562 struct cmd_list_element
*c
, const char *value
)
2564 fprintf_filtered (file
, _("Stopping for new SPE threads is %s.\n"),
2569 show_spu_auto_flush_cache (struct ui_file
*file
, int from_tty
,
2570 struct cmd_list_element
*c
, const char *value
)
2572 fprintf_filtered (file
, _("Automatic software-cache flush is %s.\n"),
2577 /* Set up gdbarch struct. */
2579 static struct gdbarch
*
2580 spu_gdbarch_init (struct gdbarch_info info
, struct gdbarch_list
*arches
)
2582 struct gdbarch
*gdbarch
;
2583 struct gdbarch_tdep
*tdep
;
2586 /* Which spufs ID was requested as address space? */
2588 id
= *(int *)info
.tdep_info
;
2589 /* For objfile architectures of SPU solibs, decode the ID from the name.
2590 This assumes the filename convention employed by solib-spu.c. */
2593 char *name
= strrchr (info
.abfd
->filename
, '@');
2595 sscanf (name
, "@0x%*x <%d>", &id
);
2598 /* Find a candidate among extant architectures. */
2599 for (arches
= gdbarch_list_lookup_by_info (arches
, &info
);
2601 arches
= gdbarch_list_lookup_by_info (arches
->next
, &info
))
2603 tdep
= gdbarch_tdep (arches
->gdbarch
);
2604 if (tdep
&& tdep
->id
== id
)
2605 return arches
->gdbarch
;
2608 /* None found, so create a new architecture. */
2609 tdep
= XCALLOC (1, struct gdbarch_tdep
);
2611 gdbarch
= gdbarch_alloc (&info
, tdep
);
2614 set_gdbarch_print_insn (gdbarch
, gdb_print_insn_spu
);
2617 set_gdbarch_num_regs (gdbarch
, SPU_NUM_REGS
);
2618 set_gdbarch_num_pseudo_regs (gdbarch
, SPU_NUM_PSEUDO_REGS
);
2619 set_gdbarch_sp_regnum (gdbarch
, SPU_SP_REGNUM
);
2620 set_gdbarch_pc_regnum (gdbarch
, SPU_PC_REGNUM
);
2621 set_gdbarch_read_pc (gdbarch
, spu_read_pc
);
2622 set_gdbarch_write_pc (gdbarch
, spu_write_pc
);
2623 set_gdbarch_register_name (gdbarch
, spu_register_name
);
2624 set_gdbarch_register_type (gdbarch
, spu_register_type
);
2625 set_gdbarch_pseudo_register_read (gdbarch
, spu_pseudo_register_read
);
2626 set_gdbarch_pseudo_register_write (gdbarch
, spu_pseudo_register_write
);
2627 set_gdbarch_value_from_register (gdbarch
, spu_value_from_register
);
2628 set_gdbarch_register_reggroup_p (gdbarch
, spu_register_reggroup_p
);
2631 set_gdbarch_char_signed (gdbarch
, 0);
2632 set_gdbarch_ptr_bit (gdbarch
, 32);
2633 set_gdbarch_addr_bit (gdbarch
, 32);
2634 set_gdbarch_short_bit (gdbarch
, 16);
2635 set_gdbarch_int_bit (gdbarch
, 32);
2636 set_gdbarch_long_bit (gdbarch
, 32);
2637 set_gdbarch_long_long_bit (gdbarch
, 64);
2638 set_gdbarch_float_bit (gdbarch
, 32);
2639 set_gdbarch_double_bit (gdbarch
, 64);
2640 set_gdbarch_long_double_bit (gdbarch
, 64);
2641 set_gdbarch_float_format (gdbarch
, floatformats_ieee_single
);
2642 set_gdbarch_double_format (gdbarch
, floatformats_ieee_double
);
2643 set_gdbarch_long_double_format (gdbarch
, floatformats_ieee_double
);
2645 /* Address handling. */
2646 set_gdbarch_address_to_pointer (gdbarch
, spu_address_to_pointer
);
2647 set_gdbarch_pointer_to_address (gdbarch
, spu_pointer_to_address
);
2648 set_gdbarch_integer_to_address (gdbarch
, spu_integer_to_address
);
2649 set_gdbarch_address_class_type_flags (gdbarch
, spu_address_class_type_flags
);
2650 set_gdbarch_address_class_type_flags_to_name
2651 (gdbarch
, spu_address_class_type_flags_to_name
);
2652 set_gdbarch_address_class_name_to_type_flags
2653 (gdbarch
, spu_address_class_name_to_type_flags
);
2656 /* Inferior function calls. */
2657 set_gdbarch_call_dummy_location (gdbarch
, ON_STACK
);
2658 set_gdbarch_frame_align (gdbarch
, spu_frame_align
);
2659 set_gdbarch_frame_red_zone_size (gdbarch
, 2000);
2660 set_gdbarch_push_dummy_code (gdbarch
, spu_push_dummy_code
);
2661 set_gdbarch_push_dummy_call (gdbarch
, spu_push_dummy_call
);
2662 set_gdbarch_dummy_id (gdbarch
, spu_dummy_id
);
2663 set_gdbarch_return_value (gdbarch
, spu_return_value
);
2665 /* Frame handling. */
2666 set_gdbarch_inner_than (gdbarch
, core_addr_lessthan
);
2667 frame_unwind_append_unwinder (gdbarch
, &spu_frame_unwind
);
2668 frame_base_set_default (gdbarch
, &spu_frame_base
);
2669 set_gdbarch_unwind_pc (gdbarch
, spu_unwind_pc
);
2670 set_gdbarch_unwind_sp (gdbarch
, spu_unwind_sp
);
2671 set_gdbarch_virtual_frame_pointer (gdbarch
, spu_virtual_frame_pointer
);
2672 set_gdbarch_frame_args_skip (gdbarch
, 0);
2673 set_gdbarch_skip_prologue (gdbarch
, spu_skip_prologue
);
2674 set_gdbarch_in_function_epilogue_p (gdbarch
, spu_in_function_epilogue_p
);
2676 /* Cell/B.E. cross-architecture unwinder support. */
2677 frame_unwind_prepend_unwinder (gdbarch
, &spu2ppu_unwind
);
2680 set_gdbarch_decr_pc_after_break (gdbarch
, 4);
2681 set_gdbarch_breakpoint_from_pc (gdbarch
, spu_breakpoint_from_pc
);
2682 set_gdbarch_memory_remove_breakpoint (gdbarch
, spu_memory_remove_breakpoint
);
2683 set_gdbarch_cannot_step_breakpoint (gdbarch
, 1);
2684 set_gdbarch_software_single_step (gdbarch
, spu_software_single_step
);
2685 set_gdbarch_get_longjmp_target (gdbarch
, spu_get_longjmp_target
);
2688 set_gdbarch_overlay_update (gdbarch
, spu_overlay_update
);
2693 /* Provide a prototype to silence -Wmissing-prototypes. */
2694 extern initialize_file_ftype _initialize_spu_tdep
;
2697 _initialize_spu_tdep (void)
2699 register_gdbarch_init (bfd_arch_spu
, spu_gdbarch_init
);
2701 /* Add ourselves to objfile event chain. */
2702 observer_attach_new_objfile (spu_overlay_new_objfile
);
2703 spu_overlay_data
= register_objfile_data ();
2705 /* Install spu stop-on-load handler. */
2706 observer_attach_new_objfile (spu_catch_start
);
2708 /* Add ourselves to normal_stop event chain. */
2709 observer_attach_normal_stop (spu_attach_normal_stop
);
2711 /* Add root prefix command for all "set spu"/"show spu" commands. */
2712 add_prefix_cmd ("spu", no_class
, set_spu_command
,
2713 _("Various SPU specific commands."),
2714 &setspucmdlist
, "set spu ", 0, &setlist
);
2715 add_prefix_cmd ("spu", no_class
, show_spu_command
,
2716 _("Various SPU specific commands."),
2717 &showspucmdlist
, "show spu ", 0, &showlist
);
2719 /* Toggle whether or not to add a temporary breakpoint at the "main"
2720 function of new SPE contexts. */
2721 add_setshow_boolean_cmd ("stop-on-load", class_support
,
2722 &spu_stop_on_load_p
, _("\
2723 Set whether to stop for new SPE threads."),
2725 Show whether to stop for new SPE threads."),
2727 Use \"on\" to give control to the user when a new SPE thread\n\
2728 enters its \"main\" function.\n\
2729 Use \"off\" to disable stopping for new SPE threads."),
2731 show_spu_stop_on_load
,
2732 &setspucmdlist
, &showspucmdlist
);
2734 /* Toggle whether or not to automatically flush the software-managed
2735 cache whenever SPE execution stops. */
2736 add_setshow_boolean_cmd ("auto-flush-cache", class_support
,
2737 &spu_auto_flush_cache_p
, _("\
2738 Set whether to automatically flush the software-managed cache."),
2740 Show whether to automatically flush the software-managed cache."),
2742 Use \"on\" to automatically flush the software-managed cache\n\
2743 whenever SPE execution stops.\n\
2744 Use \"off\" to never automatically flush the software-managed cache."),
2746 show_spu_auto_flush_cache
,
2747 &setspucmdlist
, &showspucmdlist
);
2749 /* Add root prefix command for all "info spu" commands. */
2750 add_prefix_cmd ("spu", class_info
, info_spu_command
,
2751 _("Various SPU specific commands."),
2752 &infospucmdlist
, "info spu ", 0, &infolist
);
2754 /* Add various "info spu" commands. */
2755 add_cmd ("event", class_info
, info_spu_event_command
,
2756 _("Display SPU event facility status.\n"),
2758 add_cmd ("signal", class_info
, info_spu_signal_command
,
2759 _("Display SPU signal notification facility status.\n"),
2761 add_cmd ("mailbox", class_info
, info_spu_mailbox_command
,
2762 _("Display SPU mailbox facility status.\n"),
2764 add_cmd ("dma", class_info
, info_spu_dma_command
,
2765 _("Display MFC DMA status.\n"),
2767 add_cmd ("proxydma", class_info
, info_spu_proxydma_command
,
2768 _("Display MFC Proxy-DMA status.\n"),