/* SPU target-dependent code for GDB, the GNU debugger.
- Copyright (C) 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
+ Copyright (C) 2006-2014 Free Software Foundation, Inc.
Contributed by Ulrich Weigand <uweigand@de.ibm.com>.
Based on a port by Sid Manning <sid@us.ibm.com>.
#include "gdbtypes.h"
#include "gdbcmd.h"
#include "gdbcore.h"
-#include "gdb_string.h"
+#include <string.h>
#include "gdb_assert.h"
#include "frame.h"
#include "frame-unwind.h"
#include "floatformat.h"
#include "block.h"
#include "observer.h"
-
+#include "infcall.h"
+#include "dwarf2.h"
+#include "dwarf2-frame.h"
+#include "ax.h"
+#include "exceptions.h"
#include "spu-tdep.h"
/* Whether to stop for new SPE contexts. */
static int spu_stop_on_load_p = 0;
+/* Whether to automatically flush the SW-managed cache. */
+static int spu_auto_flush_cache_p = 1;
/* The tdep structure. */
return builtin_type (gdbarch)->builtin_uint32;
default:
- internal_error (__FILE__, __LINE__, "invalid regnum");
+ internal_error (__FILE__, __LINE__, _("invalid regnum"));
}
}
/* Pseudo registers for preferred slots - stack pointer. */
-static void
+static enum register_status
spu_pseudo_register_read_spu (struct regcache *regcache, const char *regname,
gdb_byte *buf)
{
struct gdbarch *gdbarch = get_regcache_arch (regcache);
enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
+ enum register_status status;
gdb_byte reg[32];
char annex[32];
ULONGEST id;
+ ULONGEST ul;
- regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
+ status = regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
+ if (status != REG_VALID)
+ return status;
xsnprintf (annex, sizeof annex, "%d/%s", (int) id, regname);
memset (reg, 0, sizeof reg);
target_read (¤t_target, TARGET_OBJECT_SPU, annex,
reg, 0, sizeof reg);
- store_unsigned_integer (buf, 4, byte_order, strtoulst (reg, NULL, 16));
+ ul = strtoulst ((char *) reg, NULL, 16);
+ store_unsigned_integer (buf, 4, byte_order, ul);
+ return REG_VALID;
}
-static void
+static enum register_status
spu_pseudo_register_read (struct gdbarch *gdbarch, struct regcache *regcache,
int regnum, gdb_byte *buf)
{
gdb_byte reg[16];
char annex[32];
ULONGEST id;
+ enum register_status status;
switch (regnum)
{
case SPU_SP_REGNUM:
- regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg);
+ status = regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg);
+ if (status != REG_VALID)
+ return status;
memcpy (buf, reg, 4);
- break;
+ return status;
case SPU_FPSCR_REGNUM:
- regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
+ status = regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
+ if (status != REG_VALID)
+ return status;
xsnprintf (annex, sizeof annex, "%d/fpcr", (int) id);
target_read (¤t_target, TARGET_OBJECT_SPU, annex, buf, 0, 16);
- break;
+ return status;
case SPU_SRR0_REGNUM:
- spu_pseudo_register_read_spu (regcache, "srr0", buf);
- break;
+ return spu_pseudo_register_read_spu (regcache, "srr0", buf);
case SPU_LSLR_REGNUM:
- spu_pseudo_register_read_spu (regcache, "lslr", buf);
- break;
+ return spu_pseudo_register_read_spu (regcache, "lslr", buf);
case SPU_DECR_REGNUM:
- spu_pseudo_register_read_spu (regcache, "decr", buf);
- break;
+ return spu_pseudo_register_read_spu (regcache, "decr", buf);
case SPU_DECR_STATUS_REGNUM:
- spu_pseudo_register_read_spu (regcache, "decr_status", buf);
- break;
+ return spu_pseudo_register_read_spu (regcache, "decr_status", buf);
default:
internal_error (__FILE__, __LINE__, _("invalid regnum"));
{
struct gdbarch *gdbarch = get_regcache_arch (regcache);
enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
- gdb_byte reg[32];
+ char reg[32];
char annex[32];
ULONGEST id;
xsnprintf (reg, sizeof reg, "0x%s",
phex_nz (extract_unsigned_integer (buf, 4, byte_order), 4));
target_write (¤t_target, TARGET_OBJECT_SPU, annex,
- reg, 0, strlen (reg));
+ (gdb_byte *) reg, 0, strlen (reg));
}
static void
}
}
+static int
+spu_ax_pseudo_register_collect (struct gdbarch *gdbarch,
+ struct agent_expr *ax, int regnum)
+{
+ switch (regnum)
+ {
+ case SPU_SP_REGNUM:
+ ax_reg_mask (ax, SPU_RAW_SP_REGNUM);
+ return 0;
+
+ case SPU_FPSCR_REGNUM:
+ case SPU_SRR0_REGNUM:
+ case SPU_LSLR_REGNUM:
+ case SPU_DECR_REGNUM:
+ case SPU_DECR_STATUS_REGNUM:
+ return -1;
+
+ default:
+ internal_error (__FILE__, __LINE__, _("invalid regnum"));
+ }
+}
+
+static int
+spu_ax_pseudo_register_push_stack (struct gdbarch *gdbarch,
+ struct agent_expr *ax, int regnum)
+{
+ switch (regnum)
+ {
+ case SPU_SP_REGNUM:
+ ax_reg (ax, SPU_RAW_SP_REGNUM);
+ return 0;
+
+ case SPU_FPSCR_REGNUM:
+ case SPU_SRR0_REGNUM:
+ case SPU_LSLR_REGNUM:
+ case SPU_DECR_REGNUM:
+ case SPU_DECR_STATUS_REGNUM:
+ return -1;
+
+ default:
+ internal_error (__FILE__, __LINE__, _("invalid regnum"));
+ }
+}
+
+
/* Value conversion -- access scalar values at the preferred slot. */
static struct value *
-spu_value_from_register (struct type *type, int regnum,
- struct frame_info *frame)
+spu_value_from_register (struct gdbarch *gdbarch, struct type *type,
+ int regnum, struct frame_id frame_id)
{
- struct value *value = default_value_from_register (type, regnum, frame);
+ struct value *value = default_value_from_register (gdbarch, type,
+ regnum, frame_id);
int len = TYPE_LENGTH (type);
if (regnum < SPU_NUM_GPRS && len < 16)
return default_register_reggroup_p (gdbarch, regnum, group);
}
-/* Address conversion. */
+/* DWARF-2 register numbers. */
+
+static int
+spu_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
+{
+ /* Use cooked instead of raw SP. */
+ return (reg == SPU_RAW_SP_REGNUM)? SPU_SP_REGNUM : reg;
+}
+
+
+/* Address handling. */
static int
spu_gdbarch_id (struct gdbarch *gdbarch)
int id = tdep->id;
/* The objfile architecture of a standalone SPU executable does not
- provide an SPU ID. Retrieve it from the the objfile's relocated
+ provide an SPU ID. Retrieve it from the objfile's relocated
address range in this special case. */
if (id == -1
&& symfile_objfile && symfile_objfile->obfd
return id;
}
-static ULONGEST
-spu_lslr (int id)
+static int
+spu_address_class_type_flags (int byte_size, int dwarf2_addr_class)
{
- gdb_byte buf[32];
- char annex[32];
-
- if (id == -1)
- return SPU_LS_SIZE - 1;
+ if (dwarf2_addr_class == 1)
+ return TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1;
+ else
+ return 0;
+}
- xsnprintf (annex, sizeof annex, "%d/lslr", id);
- memset (buf, 0, sizeof buf);
- target_read (¤t_target, TARGET_OBJECT_SPU, annex,
- buf, 0, sizeof buf);
+static const char *
+spu_address_class_type_flags_to_name (struct gdbarch *gdbarch, int type_flags)
+{
+ if (type_flags & TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1)
+ return "__ea";
+ else
+ return NULL;
+}
- return strtoulst (buf, NULL, 16);
+static int
+spu_address_class_name_to_type_flags (struct gdbarch *gdbarch,
+ const char *name, int *type_flags_ptr)
+{
+ if (strcmp (name, "__ea") == 0)
+ {
+ *type_flags_ptr = TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1;
+ return 1;
+ }
+ else
+ return 0;
}
static void
struct type *type, const gdb_byte *buf)
{
int id = spu_gdbarch_id (gdbarch);
- ULONGEST lslr = spu_lslr (id);
enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
ULONGEST addr
= extract_unsigned_integer (buf, TYPE_LENGTH (type), byte_order);
- return addr? SPUADDR (id, addr & lslr) : 0;
+ /* Do not convert __ea pointers. */
+ if (TYPE_ADDRESS_CLASS_1 (type))
+ return addr;
+
+ return addr? SPUADDR (id, addr) : 0;
}
static CORE_ADDR
struct type *type, const gdb_byte *buf)
{
int id = spu_gdbarch_id (gdbarch);
- ULONGEST lslr = spu_lslr (id);
ULONGEST addr = unpack_long (type, buf);
- return SPUADDR (id, addr & lslr);
+ return SPUADDR (id, addr);
}
op_a = 0x0c0,
op_ai = 0x1c,
- op_selb = 0x4,
+ op_selb = 0x8,
op_br = 0x64,
op_bra = 0x60,
int found_sp = 0;
int found_fp = 0;
int found_lr = 0;
+ int found_bc = 0;
int reg_immed[SPU_NUM_GPRS];
gdb_byte buf[16];
CORE_ADDR prolog_pc = start_pc;
- The first instruction to set up the stack pointer.
- The first instruction to set up the frame pointer.
- The first instruction to save the link register.
+ - The first instruction to save the backchain.
- We return the instruction after the latest of these three,
+ We return the instruction after the latest of these four,
or the incoming PC if none is found. The first instruction
to set up the stack pointer also defines the frame size.
found_lr = 1;
prolog_pc = pc + 4;
}
+
+ if (ra == SPU_RAW_SP_REGNUM
+ && (found_sp? immed == 0 : rt == SPU_RAW_SP_REGNUM)
+ && !found_bc)
+ {
+ found_bc = 1;
+ prolog_pc = pc + 4;
+ }
}
/* _start uses SELB to set up the stack pointer. */
}
else
{
- /* ??? We don't really know ... */
+ /* ??? We don't really know ... */
*reg = SPU_SP_REGNUM;
*offset = 0;
}
CORE_ADDR scan_pc, func_start, func_end, epilogue_start, epilogue_end;
bfd_byte buf[4];
unsigned int insn;
- int rt, ra, rb, rc, immed;
+ int rt, ra, rb, immed;
/* Find the search limits based on function boundaries and hard limit.
We assume the epilogue can be up to 64 instructions long. */
{
CORE_ADDR reg;
LONGEST backchain;
+ ULONGEST lslr;
int status;
+ /* Get local store limit. */
+ lslr = get_frame_register_unsigned (this_frame, SPU_LSLR_REGNUM);
+ if (!lslr)
+ lslr = (ULONGEST) -1;
+
/* Get the backchain. */
reg = get_frame_register_unsigned (this_frame, SPU_SP_REGNUM);
status = safe_read_memory_integer (SPUADDR (id, reg), 4, byte_order,
/* A zero backchain terminates the frame chain. Also, sanity
check against the local store size limit. */
- if (status && backchain > 0 && backchain < SPU_LS_SIZE)
+ if (status && backchain > 0 && backchain <= lslr)
{
/* Assume the link register is saved into its slot. */
- if (backchain + 16 < SPU_LS_SIZE)
- info->saved_regs[SPU_LR_REGNUM].addr = SPUADDR (id, backchain + 16);
+ if (backchain + 16 <= lslr)
+ info->saved_regs[SPU_LR_REGNUM].addr = SPUADDR (id,
+ backchain + 16);
/* Frame bases. */
info->frame_base = SPUADDR (id, backchain);
static const struct frame_unwind spu_frame_unwind = {
NORMAL_FRAME,
+ default_frame_unwind_stop_reason,
spu_frame_this_id,
spu_frame_prev_register,
NULL,
{
/* Keep interrupt enabled state unchanged. */
ULONGEST old_pc;
+
regcache_cooked_read_unsigned (regcache, SPU_PC_REGNUM, &old_pc);
regcache_cooked_write_unsigned (regcache, SPU_PC_REGNUM,
(SPUADDR_ADDR (pc) & -4) | (old_pc & 3));
CORE_ADDR base, func, backchain;
gdb_byte buf[4];
- if (gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_spu)
+ if (gdbarch_bfd_arch_info (target_gdbarch ())->arch == bfd_arch_spu)
return 0;
base = get_frame_sp (this_frame);
else
{
struct regcache *regcache;
- regcache = get_thread_arch_regcache (inferior_ptid, target_gdbarch);
+ regcache = get_thread_arch_regcache (inferior_ptid, target_gdbarch ());
cache->regcache = regcache_dup (regcache);
*this_prologue_cache = cache;
return 1;
static const struct frame_unwind spu2ppu_unwind = {
ARCH_FRAME,
+ default_frame_unwind_stop_reason,
spu2ppu_this_id,
spu2ppu_prev_register,
NULL,
struct value *arg = args[i];
struct type *type = check_typedef (value_type (arg));
const gdb_byte *contents = value_contents (arg);
- int len = TYPE_LENGTH (type);
- int n_regs = align_up (len, 16) / 16;
+ int n_regs = align_up (TYPE_LENGTH (type), 16) / 16;
/* If the argument doesn't wholly fit into registers, it and
all subsequent arguments go to the stack. */
/* Function return value access. */
static enum return_value_convention
-spu_return_value (struct gdbarch *gdbarch, struct type *func_type,
+spu_return_value (struct gdbarch *gdbarch, struct value *function,
struct type *type, struct regcache *regcache,
gdb_byte *out, const gdb_byte *in)
{
+ struct type *func_type = function ? value_type (function) : NULL;
enum return_value_convention rvc;
+ int opencl_vector = 0;
+
+ if (func_type)
+ {
+ func_type = check_typedef (func_type);
+
+ if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
+ func_type = check_typedef (TYPE_TARGET_TYPE (func_type));
+
+ if (TYPE_CODE (func_type) == TYPE_CODE_FUNC
+ && TYPE_CALLING_CONVENTION (func_type) == DW_CC_GDB_IBM_OpenCL
+ && TYPE_CODE (type) == TYPE_CODE_ARRAY
+ && TYPE_VECTOR (type))
+ opencl_vector = 1;
+ }
if (TYPE_LENGTH (type) <= (SPU_ARGN_REGNUM - SPU_ARG1_REGNUM + 1) * 16)
rvc = RETURN_VALUE_REGISTER_CONVENTION;
switch (rvc)
{
case RETURN_VALUE_REGISTER_CONVENTION:
- spu_value_to_regcache (regcache, SPU_ARG1_REGNUM, type, in);
+ if (opencl_vector && TYPE_LENGTH (type) == 2)
+ regcache_cooked_write_part (regcache, SPU_ARG1_REGNUM, 2, 2, in);
+ else
+ spu_value_to_regcache (regcache, SPU_ARG1_REGNUM, type, in);
break;
case RETURN_VALUE_STRUCT_CONVENTION:
- error ("Cannot set function return value.");
+ error (_("Cannot set function return value."));
break;
}
}
switch (rvc)
{
case RETURN_VALUE_REGISTER_CONVENTION:
- spu_regcache_to_value (regcache, SPU_ARG1_REGNUM, type, out);
+ if (opencl_vector && TYPE_LENGTH (type) == 2)
+ regcache_cooked_read_part (regcache, SPU_ARG1_REGNUM, 2, 2, out);
+ else
+ spu_regcache_to_value (regcache, SPU_ARG1_REGNUM, type, out);
break;
case RETURN_VALUE_STRUCT_CONVENTION:
- error ("Function return value unknown.");
+ error (_("Function return value unknown."));
break;
}
}
/* Breakpoints. */
static const gdb_byte *
-spu_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR * pcptr, int *lenptr)
+spu_breakpoint_from_pc (struct gdbarch *gdbarch,
+ CORE_ADDR * pcptr, int *lenptr)
{
static const gdb_byte breakpoint[] = { 0x00, 0x00, 0x3f, 0xff };
return breakpoint;
}
+static int
+spu_memory_remove_breakpoint (struct gdbarch *gdbarch,
+ struct bp_target_info *bp_tgt)
+{
+ /* We work around a problem in combined Cell/B.E. debugging here. Consider
+ that in a combined application, we have some breakpoints inserted in SPU
+ code, and now the application forks (on the PPU side). GDB common code
+ will assume that the fork system call copied all breakpoints into the new
+ process' address space, and that all those copies now need to be removed
+ (see breakpoint.c:detach_breakpoints).
+
+ While this is certainly true for PPU side breakpoints, it is not true
+ for SPU side breakpoints. fork will clone the SPU context file
+ descriptors, so that all the existing SPU contexts are in accessible
+ in the new process. However, the contents of the SPU contexts themselves
+ are *not* cloned. Therefore the effect of detach_breakpoints is to
+ remove SPU breakpoints from the *original* SPU context's local store
+ -- this is not the correct behaviour.
+
+ The workaround is to check whether the PID we are asked to remove this
+ breakpoint from (i.e. ptid_get_pid (inferior_ptid)) is different from the
+ PID of the current inferior (i.e. current_inferior ()->pid). This is only
+ true in the context of detach_breakpoints. If so, we simply do nothing.
+ [ Note that for the fork child process, it does not matter if breakpoints
+ remain inserted, because those SPU contexts are not runnable anyway --
+ the Linux kernel allows only the original process to invoke spu_run. */
+
+ if (ptid_get_pid (inferior_ptid) != current_inferior ()->pid)
+ return 0;
+
+ return default_memory_remove_breakpoint (gdbarch, bp_tgt);
+}
+
/* Software single-stepping support. */
spu_software_single_step (struct frame_info *frame)
{
struct gdbarch *gdbarch = get_frame_arch (frame);
+ struct address_space *aspace = get_frame_address_space (frame);
enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
CORE_ADDR pc, next_pc;
unsigned int insn;
int offset, reg;
gdb_byte buf[4];
+ ULONGEST lslr;
pc = get_frame_pc (frame);
return 1;
insn = extract_unsigned_integer (buf, 4, byte_order);
+ /* Get local store limit. */
+ lslr = get_frame_register_unsigned (frame, SPU_LSLR_REGNUM);
+ if (!lslr)
+ lslr = (ULONGEST) -1;
+
/* Next sequential instruction is at PC + 4, except if the current
instruction is a PPE-assisted call, in which case it is at PC + 8.
Wrap around LS limit to be on the safe side. */
if ((insn & 0xffffff00) == 0x00002100)
- next_pc = (SPUADDR_ADDR (pc) + 8) & (SPU_LS_SIZE - 1);
+ next_pc = (SPUADDR_ADDR (pc) + 8) & lslr;
else
- next_pc = (SPUADDR_ADDR (pc) + 4) & (SPU_LS_SIZE - 1);
+ next_pc = (SPUADDR_ADDR (pc) + 4) & lslr;
- insert_single_step_breakpoint (gdbarch, SPUADDR (SPUADDR_SPU (pc), next_pc));
+ insert_single_step_breakpoint (gdbarch,
+ aspace, SPUADDR (SPUADDR_SPU (pc), next_pc));
if (is_branch (insn, &offset, ®))
{
target += SPUADDR_ADDR (pc);
else if (reg != -1)
{
- get_frame_register_bytes (frame, reg, 0, 4, buf);
- target += extract_unsigned_integer (buf, 4, byte_order) & -4;
+ int optim, unavail;
+
+ if (get_frame_register_bytes (frame, reg, 0, 4, buf,
+ &optim, &unavail))
+ target += extract_unsigned_integer (buf, 4, byte_order) & -4;
+ else
+ {
+ if (optim)
+ throw_error (OPTIMIZED_OUT_ERROR,
+ _("Could not determine address of "
+ "single-step breakpoint."));
+ if (unavail)
+ throw_error (NOT_AVAILABLE_ERROR,
+ _("Could not determine address of "
+ "single-step breakpoint."));
+ }
}
- target = target & (SPU_LS_SIZE - 1);
+ target = target & lslr;
if (target != next_pc)
- insert_single_step_breakpoint (gdbarch,
+ insert_single_step_breakpoint (gdbarch, aspace,
SPUADDR (SPUADDR_SPU (pc), target));
}
enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
gdb_byte buf[4];
CORE_ADDR jb_addr;
+ int optim, unavail;
/* Jump buffer is pointed to by the argument register $r3. */
- get_frame_register_bytes (frame, SPU_ARG1_REGNUM, 0, 4, buf);
+ if (!get_frame_register_bytes (frame, SPU_ARG1_REGNUM, 0, 4, buf,
+ &optim, &unavail))
+ return 0;
+
jb_addr = extract_unsigned_integer (buf, 4, byte_order);
if (target_read_memory (SPUADDR (tdep->id, jb_addr), buf, 4))
return 0;
static int
gdb_print_insn_spu (bfd_vma memaddr, struct disassemble_info *info)
{
- /* The opcodes disassembler does 18-bit address arithmetic. Make sure the
- SPU ID encoded in the high bits is added back when we call print_address. */
+ /* The opcodes disassembler does 18-bit address arithmetic. Make
+ sure the SPU ID encoded in the high bits is added back when we
+ call print_address. */
struct disassemble_info spu_info = *info;
struct spu_dis_asm_data data;
data.gdbarch = info->application_data;
_ovly_table should never change.
- Both tables are aligned to a 16-byte boundary, the symbols _ovly_table
- and _ovly_buf_table are of type STT_OBJECT and their size set to the size
- of the respective array. buf in _ovly_table is an index into _ovly_buf_table.
+ Both tables are aligned to a 16-byte boundary, the symbols
+ _ovly_table and _ovly_buf_table are of type STT_OBJECT and their
+ size set to the size of the respective array. buf in _ovly_table is
+ an index into _ovly_buf_table.
- mapped is an index into _ovly_table. Both the mapped and buf indices start
+ mapped is an index into _ovly_table. Both the mapped and buf indices start
from one to reference the first entry in their respective tables. */
/* Using the per-objfile private data mechanism, we store for each
{
enum bfd_endian byte_order = bfd_big_endian (objfile->obfd)?
BFD_ENDIAN_BIG : BFD_ENDIAN_LITTLE;
- struct minimal_symbol *ovly_table_msym, *ovly_buf_table_msym;
+ struct bound_minimal_symbol ovly_table_msym, ovly_buf_table_msym;
CORE_ADDR ovly_table_base, ovly_buf_table_base;
unsigned ovly_table_size, ovly_buf_table_size;
struct spu_overlay_table *tbl;
struct obj_section *osect;
- char *ovly_table;
+ gdb_byte *ovly_table;
int i;
tbl = objfile_data (objfile, spu_overlay_data);
return tbl;
ovly_table_msym = lookup_minimal_symbol ("_ovly_table", NULL, objfile);
- if (!ovly_table_msym)
+ if (!ovly_table_msym.minsym)
return NULL;
- ovly_buf_table_msym = lookup_minimal_symbol ("_ovly_buf_table", NULL, objfile);
- if (!ovly_buf_table_msym)
+ ovly_buf_table_msym = lookup_minimal_symbol ("_ovly_buf_table",
+ NULL, objfile);
+ if (!ovly_buf_table_msym.minsym)
return NULL;
- ovly_table_base = SYMBOL_VALUE_ADDRESS (ovly_table_msym);
- ovly_table_size = MSYMBOL_SIZE (ovly_table_msym);
+ ovly_table_base = BMSYMBOL_VALUE_ADDRESS (ovly_table_msym);
+ ovly_table_size = MSYMBOL_SIZE (ovly_table_msym.minsym);
- ovly_buf_table_base = SYMBOL_VALUE_ADDRESS (ovly_buf_table_msym);
- ovly_buf_table_size = MSYMBOL_SIZE (ovly_buf_table_msym);
+ ovly_buf_table_base = BMSYMBOL_VALUE_ADDRESS (ovly_buf_table_msym);
+ ovly_buf_table_size = MSYMBOL_SIZE (ovly_buf_table_msym.minsym);
ovly_table = xmalloc (ovly_table_size);
read_memory (ovly_table_base, ovly_table, ovly_table_size);
/* Whenever a new objfile is loaded, read the target's _ovly_table.
If there is one, go through all sections and make sure for non-
overlay sections LMA equals VMA, while for overlay sections LMA
- is larger than local store size. */
+ is larger than SPU_OVERLAY_LMA. */
static void
spu_overlay_new_objfile (struct objfile *objfile)
{
if (ovly_table[ndx].mapped_ptr == 0)
bfd_section_lma (obfd, bsect) = bfd_section_vma (obfd, bsect);
else
- bfd_section_lma (obfd, bsect) = bsect->filepos + SPU_LS_SIZE;
+ bfd_section_lma (obfd, bsect) = SPU_OVERLAY_LMA + bsect->filepos;
}
}
static void
spu_catch_start (struct objfile *objfile)
{
- struct minimal_symbol *minsym;
+ struct bound_minimal_symbol minsym;
struct symtab *symtab;
CORE_ADDR pc;
char buf[32];
/* There can be multiple symbols named "main". Search for the
"main" in *this* objfile. */
minsym = lookup_minimal_symbol ("main", NULL, objfile);
- if (!minsym)
+ if (!minsym.minsym)
return;
/* If we have debugging information, try to use it -- this
will allow us to properly skip the prologue. */
- pc = SYMBOL_VALUE_ADDRESS (minsym);
- symtab = find_pc_sect_symtab (pc, SYMBOL_OBJ_SECTION (minsym));
+ pc = BMSYMBOL_VALUE_ADDRESS (minsym);
+ symtab = find_pc_sect_symtab (pc, MSYMBOL_OBJ_SECTION (minsym.objfile,
+ minsym.minsym));
if (symtab != NULL)
{
struct blockvector *bv = BLOCKVECTOR (symtab);
struct symbol *sym;
struct symtab_and_line sal;
- sym = lookup_block_symbol (block, "main", NULL, VAR_DOMAIN);
+ sym = lookup_block_symbol (block, "main", VAR_DOMAIN);
if (sym)
{
fixup_symbol_section (sym, objfile);
/* Use a numerical address for the set_breakpoint command to avoid having
the breakpoint re-set incorrectly. */
xsnprintf (buf, sizeof buf, "*%s", core_addr_to_string (pc));
- set_breakpoint (get_objfile_arch (objfile),
- buf, NULL /* condition */,
- 0 /* hardwareflag */, 1 /* tempflag */,
- -1 /* thread */, 0 /* ignore_count */,
- 0 /* pending */, 1 /* enabled */);
+ create_breakpoint (get_objfile_arch (objfile), buf /* arg */,
+ NULL /* cond_string */, -1 /* thread */,
+ NULL /* extra_string */,
+ 0 /* parse_condition_and_thread */, 1 /* tempflag */,
+ bp_breakpoint /* type_wanted */,
+ 0 /* ignore_count */,
+ AUTO_BOOLEAN_FALSE /* pending_break_support */,
+ &bkpt_breakpoint_ops /* ops */, 0 /* from_tty */,
+ 1 /* enabled */, 0 /* internal */, 0);
+}
+
+
+/* Look up OBJFILE loaded into FRAME's SPU context. */
+static struct objfile *
+spu_objfile_from_frame (struct frame_info *frame)
+{
+ struct gdbarch *gdbarch = get_frame_arch (frame);
+ struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
+ struct objfile *obj;
+
+ if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
+ return NULL;
+
+ ALL_OBJFILES (obj)
+ {
+ if (obj->sections != obj->sections_end
+ && SPUADDR_SPU (obj_section_addr (obj->sections)) == tdep->id)
+ return obj;
+ }
+
+ return NULL;
+}
+
+/* Flush cache for ea pointer access if available. */
+static void
+flush_ea_cache (void)
+{
+ struct bound_minimal_symbol msymbol;
+ struct objfile *obj;
+
+ if (!has_stack_frames ())
+ return;
+
+ obj = spu_objfile_from_frame (get_current_frame ());
+ if (obj == NULL)
+ return;
+
+ /* Lookup inferior function __cache_flush. */
+ msymbol = lookup_minimal_symbol ("__cache_flush", NULL, obj);
+ if (msymbol.minsym != NULL)
+ {
+ struct type *type;
+ CORE_ADDR addr;
+
+ type = objfile_type (obj)->builtin_void;
+ type = lookup_function_type (type);
+ type = lookup_pointer_type (type);
+ addr = BMSYMBOL_VALUE_ADDRESS (msymbol);
+
+ call_function_by_hand (value_from_pointer (type, addr), 0, NULL);
+ }
+}
+
+/* This handler is called when the inferior has stopped. If it is stopped in
+ SPU architecture then flush the ea cache if used. */
+static void
+spu_attach_normal_stop (struct bpstats *bs, int print_frame)
+{
+ if (!spu_auto_flush_cache_p)
+ return;
+
+ /* Temporarily reset spu_auto_flush_cache_p to avoid recursively
+ re-entering this function when __cache_flush stops. */
+ spu_auto_flush_cache_p = 0;
+ flush_ea_cache ();
+ spu_auto_flush_cache_p = 1;
}
gdb_byte buf[100];
char annex[32];
LONGEST len;
- int rc, id;
+ int id;
if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
error (_("\"info spu\" is only supported on the SPU architecture."));
if (len <= 0)
error (_("Could not read event_status."));
buf[len] = '\0';
- event_status = strtoulst (buf, NULL, 16);
+ event_status = strtoulst ((char *) buf, NULL, 16);
xsnprintf (annex, sizeof annex, "%d/event_mask", id);
len = target_read (¤t_target, TARGET_OBJECT_SPU, annex,
if (len <= 0)
error (_("Could not read event_mask."));
buf[len] = '\0';
- event_mask = strtoulst (buf, NULL, 16);
+ event_mask = strtoulst ((char *) buf, NULL, 16);
- chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoEvent");
+ chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "SPUInfoEvent");
- if (ui_out_is_mi_like_p (uiout))
+ if (ui_out_is_mi_like_p (current_uiout))
{
- ui_out_field_fmt (uiout, "event_status",
+ ui_out_field_fmt (current_uiout, "event_status",
"0x%s", phex_nz (event_status, 4));
- ui_out_field_fmt (uiout, "event_mask",
+ ui_out_field_fmt (current_uiout, "event_mask",
"0x%s", phex_nz (event_mask, 4));
}
else
char annex[32];
gdb_byte buf[100];
LONGEST len;
- int rc, id;
+ int id;
if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
error (_("\"info spu\" is only supported on the SPU architecture."));
if (len <= 0)
error (_("Could not read signal1_type."));
buf[len] = '\0';
- signal1_type = strtoulst (buf, NULL, 16);
+ signal1_type = strtoulst ((char *) buf, NULL, 16);
xsnprintf (annex, sizeof annex, "%d/signal2", id);
len = target_read (¤t_target, TARGET_OBJECT_SPU, annex, buf, 0, 4);
if (len <= 0)
error (_("Could not read signal2_type."));
buf[len] = '\0';
- signal2_type = strtoulst (buf, NULL, 16);
+ signal2_type = strtoulst ((char *) buf, NULL, 16);
- chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoSignal");
+ chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "SPUInfoSignal");
- if (ui_out_is_mi_like_p (uiout))
+ if (ui_out_is_mi_like_p (current_uiout))
{
- ui_out_field_int (uiout, "signal1_pending", signal1_pending);
- ui_out_field_fmt (uiout, "signal1", "0x%s", phex_nz (signal1, 4));
- ui_out_field_int (uiout, "signal1_type", signal1_type);
- ui_out_field_int (uiout, "signal2_pending", signal2_pending);
- ui_out_field_fmt (uiout, "signal2", "0x%s", phex_nz (signal2, 4));
- ui_out_field_int (uiout, "signal2_type", signal2_type);
+ ui_out_field_int (current_uiout, "signal1_pending", signal1_pending);
+ ui_out_field_fmt (current_uiout, "signal1", "0x%s", phex_nz (signal1, 4));
+ ui_out_field_int (current_uiout, "signal1_type", signal1_type);
+ ui_out_field_int (current_uiout, "signal2_pending", signal2_pending);
+ ui_out_field_fmt (current_uiout, "signal2", "0x%s", phex_nz (signal2, 4));
+ ui_out_field_int (current_uiout, "signal2_type", signal2_type);
}
else
{
if (nr <= 0)
return;
- chain = make_cleanup_ui_out_table_begin_end (uiout, 1, nr, "mbox");
+ chain = make_cleanup_ui_out_table_begin_end (current_uiout, 1, nr, "mbox");
- ui_out_table_header (uiout, 32, ui_left, field, msg);
- ui_out_table_body (uiout);
+ ui_out_table_header (current_uiout, 32, ui_left, field, msg);
+ ui_out_table_body (current_uiout);
for (i = 0; i < nr; i++)
{
struct cleanup *val_chain;
ULONGEST val;
- val_chain = make_cleanup_ui_out_tuple_begin_end (uiout, "mbox");
+ val_chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "mbox");
val = extract_unsigned_integer (buf + 4*i, 4, byte_order);
- ui_out_field_fmt (uiout, field, "0x%s", phex (val, 4));
+ ui_out_field_fmt (current_uiout, field, "0x%s", phex (val, 4));
do_cleanups (val_chain);
- if (!ui_out_is_mi_like_p (uiout))
+ if (!ui_out_is_mi_like_p (current_uiout))
printf_filtered ("\n");
}
char annex[32];
gdb_byte buf[1024];
LONGEST len;
- int i, id;
+ int id;
if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
error (_("\"info spu\" is only supported on the SPU architecture."));
id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
- chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoMailbox");
+ chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "SPUInfoMailbox");
xsnprintf (annex, sizeof annex, "%d/mbox_info", id);
len = target_read (¤t_target, TARGET_OBJECT_SPU, annex,
nr = i;
- chain = make_cleanup_ui_out_table_begin_end (uiout, 10, nr, "dma_cmd");
+ chain = make_cleanup_ui_out_table_begin_end (current_uiout, 10, nr,
+ "dma_cmd");
- ui_out_table_header (uiout, 7, ui_left, "opcode", "Opcode");
- ui_out_table_header (uiout, 3, ui_left, "tag", "Tag");
- ui_out_table_header (uiout, 3, ui_left, "tid", "TId");
- ui_out_table_header (uiout, 3, ui_left, "rid", "RId");
- ui_out_table_header (uiout, 18, ui_left, "ea", "EA");
- ui_out_table_header (uiout, 7, ui_left, "lsa", "LSA");
- ui_out_table_header (uiout, 7, ui_left, "size", "Size");
- ui_out_table_header (uiout, 7, ui_left, "lstaddr", "LstAddr");
- ui_out_table_header (uiout, 7, ui_left, "lstsize", "LstSize");
- ui_out_table_header (uiout, 1, ui_left, "error_p", "E");
+ ui_out_table_header (current_uiout, 7, ui_left, "opcode", "Opcode");
+ ui_out_table_header (current_uiout, 3, ui_left, "tag", "Tag");
+ ui_out_table_header (current_uiout, 3, ui_left, "tid", "TId");
+ ui_out_table_header (current_uiout, 3, ui_left, "rid", "RId");
+ ui_out_table_header (current_uiout, 18, ui_left, "ea", "EA");
+ ui_out_table_header (current_uiout, 7, ui_left, "lsa", "LSA");
+ ui_out_table_header (current_uiout, 7, ui_left, "size", "Size");
+ ui_out_table_header (current_uiout, 7, ui_left, "lstaddr", "LstAddr");
+ ui_out_table_header (current_uiout, 7, ui_left, "lstsize", "LstSize");
+ ui_out_table_header (current_uiout, 1, ui_left, "error_p", "E");
- ui_out_table_body (uiout);
+ ui_out_table_body (current_uiout);
for (i = 0; i < nr; i++)
{
ULONGEST mfc_cq_dw1;
ULONGEST mfc_cq_dw2;
int mfc_cmd_opcode, mfc_cmd_tag, rclass_id, tclass_id;
- int lsa, size, list_lsa, list_size, mfc_lsa, mfc_size;
+ int list_lsa, list_size, mfc_lsa, mfc_size;
ULONGEST mfc_ea;
int list_valid_p, noop_valid_p, qw_valid_p, ea_valid_p, cmd_error_p;
ea_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 39, 39);
cmd_error_p = spu_mfc_get_bitfield (mfc_cq_dw2, 40, 40);
- cmd_chain = make_cleanup_ui_out_tuple_begin_end (uiout, "cmd");
+ cmd_chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "cmd");
if (spu_mfc_opcode[mfc_cmd_opcode])
- ui_out_field_string (uiout, "opcode", spu_mfc_opcode[mfc_cmd_opcode]);
+ ui_out_field_string (current_uiout, "opcode", spu_mfc_opcode[mfc_cmd_opcode]);
else
- ui_out_field_int (uiout, "opcode", mfc_cmd_opcode);
+ ui_out_field_int (current_uiout, "opcode", mfc_cmd_opcode);
- ui_out_field_int (uiout, "tag", mfc_cmd_tag);
- ui_out_field_int (uiout, "tid", tclass_id);
- ui_out_field_int (uiout, "rid", rclass_id);
+ ui_out_field_int (current_uiout, "tag", mfc_cmd_tag);
+ ui_out_field_int (current_uiout, "tid", tclass_id);
+ ui_out_field_int (current_uiout, "rid", rclass_id);
if (ea_valid_p)
- ui_out_field_fmt (uiout, "ea", "0x%s", phex (mfc_ea, 8));
+ ui_out_field_fmt (current_uiout, "ea", "0x%s", phex (mfc_ea, 8));
else
- ui_out_field_skip (uiout, "ea");
+ ui_out_field_skip (current_uiout, "ea");
- ui_out_field_fmt (uiout, "lsa", "0x%05x", mfc_lsa << 4);
+ ui_out_field_fmt (current_uiout, "lsa", "0x%05x", mfc_lsa << 4);
if (qw_valid_p)
- ui_out_field_fmt (uiout, "size", "0x%05x", mfc_size << 4);
+ ui_out_field_fmt (current_uiout, "size", "0x%05x", mfc_size << 4);
else
- ui_out_field_fmt (uiout, "size", "0x%05x", mfc_size);
+ ui_out_field_fmt (current_uiout, "size", "0x%05x", mfc_size);
if (list_valid_p)
{
- ui_out_field_fmt (uiout, "lstaddr", "0x%05x", list_lsa << 3);
- ui_out_field_fmt (uiout, "lstsize", "0x%05x", list_size << 3);
+ ui_out_field_fmt (current_uiout, "lstaddr", "0x%05x", list_lsa << 3);
+ ui_out_field_fmt (current_uiout, "lstsize", "0x%05x", list_size << 3);
}
else
{
- ui_out_field_skip (uiout, "lstaddr");
- ui_out_field_skip (uiout, "lstsize");
+ ui_out_field_skip (current_uiout, "lstaddr");
+ ui_out_field_skip (current_uiout, "lstsize");
}
if (cmd_error_p)
- ui_out_field_string (uiout, "error_p", "*");
+ ui_out_field_string (current_uiout, "error_p", "*");
else
- ui_out_field_skip (uiout, "error_p");
+ ui_out_field_skip (current_uiout, "error_p");
do_cleanups (cmd_chain);
- if (!ui_out_is_mi_like_p (uiout))
+ if (!ui_out_is_mi_like_p (current_uiout))
printf_filtered ("\n");
}
char annex[32];
gdb_byte buf[1024];
LONGEST len;
- int i, id;
+ int id;
if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
error (_("\"info spu\" is only supported on the SPU architecture."));
dma_info_atomic_command_status
= extract_unsigned_integer (buf + 32, 8, byte_order);
- chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoDMA");
+ chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "SPUInfoDMA");
- if (ui_out_is_mi_like_p (uiout))
+ if (ui_out_is_mi_like_p (current_uiout))
{
- ui_out_field_fmt (uiout, "dma_info_type", "0x%s",
+ ui_out_field_fmt (current_uiout, "dma_info_type", "0x%s",
phex_nz (dma_info_type, 4));
- ui_out_field_fmt (uiout, "dma_info_mask", "0x%s",
+ ui_out_field_fmt (current_uiout, "dma_info_mask", "0x%s",
phex_nz (dma_info_mask, 4));
- ui_out_field_fmt (uiout, "dma_info_status", "0x%s",
+ ui_out_field_fmt (current_uiout, "dma_info_status", "0x%s",
phex_nz (dma_info_status, 4));
- ui_out_field_fmt (uiout, "dma_info_stall_and_notify", "0x%s",
+ ui_out_field_fmt (current_uiout, "dma_info_stall_and_notify", "0x%s",
phex_nz (dma_info_stall_and_notify, 4));
- ui_out_field_fmt (uiout, "dma_info_atomic_command_status", "0x%s",
+ ui_out_field_fmt (current_uiout, "dma_info_atomic_command_status", "0x%s",
phex_nz (dma_info_atomic_command_status, 4));
}
else
char annex[32];
gdb_byte buf[1024];
LONGEST len;
- int i, id;
+ int id;
if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
error (_("\"info spu\" is only supported on the SPU architecture."));
dma_info_mask = extract_unsigned_integer (buf + 8, 8, byte_order);
dma_info_status = extract_unsigned_integer (buf + 16, 8, byte_order);
- chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoProxyDMA");
+ chain = make_cleanup_ui_out_tuple_begin_end (current_uiout,
+ "SPUInfoProxyDMA");
- if (ui_out_is_mi_like_p (uiout))
+ if (ui_out_is_mi_like_p (current_uiout))
{
- ui_out_field_fmt (uiout, "proxydma_info_type", "0x%s",
+ ui_out_field_fmt (current_uiout, "proxydma_info_type", "0x%s",
phex_nz (dma_info_type, 4));
- ui_out_field_fmt (uiout, "proxydma_info_mask", "0x%s",
+ ui_out_field_fmt (current_uiout, "proxydma_info_mask", "0x%s",
phex_nz (dma_info_mask, 4));
- ui_out_field_fmt (uiout, "proxydma_info_status", "0x%s",
+ ui_out_field_fmt (current_uiout, "proxydma_info_status", "0x%s",
phex_nz (dma_info_status, 4));
}
else
static void
info_spu_command (char *args, int from_tty)
{
- printf_unfiltered (_("\"info spu\" must be followed by the name of an SPU facility.\n"));
+ printf_unfiltered (_("\"info spu\" must be followed by "
+ "the name of an SPU facility.\n"));
help_list (infospucmdlist, "info spu ", -1, gdb_stdout);
}
value);
}
+static void
+show_spu_auto_flush_cache (struct ui_file *file, int from_tty,
+ struct cmd_list_element *c, const char *value)
+{
+ fprintf_filtered (file, _("Automatic software-cache flush is %s.\n"),
+ value);
+}
+
/* Set up gdbarch struct. */
}
/* None found, so create a new architecture. */
- tdep = XCALLOC (1, struct gdbarch_tdep);
+ tdep = XCNEW (struct gdbarch_tdep);
tdep->id = id;
gdbarch = gdbarch_alloc (&info, tdep);
set_gdbarch_pseudo_register_write (gdbarch, spu_pseudo_register_write);
set_gdbarch_value_from_register (gdbarch, spu_value_from_register);
set_gdbarch_register_reggroup_p (gdbarch, spu_register_reggroup_p);
+ set_gdbarch_dwarf2_reg_to_regnum (gdbarch, spu_dwarf_reg_to_regnum);
+ set_gdbarch_ax_pseudo_register_collect
+ (gdbarch, spu_ax_pseudo_register_collect);
+ set_gdbarch_ax_pseudo_register_push_stack
+ (gdbarch, spu_ax_pseudo_register_push_stack);
/* Data types. */
set_gdbarch_char_signed (gdbarch, 0);
set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
- /* Address conversion. */
+ /* Address handling. */
set_gdbarch_address_to_pointer (gdbarch, spu_address_to_pointer);
set_gdbarch_pointer_to_address (gdbarch, spu_pointer_to_address);
set_gdbarch_integer_to_address (gdbarch, spu_integer_to_address);
+ set_gdbarch_address_class_type_flags (gdbarch, spu_address_class_type_flags);
+ set_gdbarch_address_class_type_flags_to_name
+ (gdbarch, spu_address_class_type_flags_to_name);
+ set_gdbarch_address_class_name_to_type_flags
+ (gdbarch, spu_address_class_name_to_type_flags);
+
/* Inferior function calls. */
set_gdbarch_call_dummy_location (gdbarch, ON_STACK);
/* Frame handling. */
set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
+ dwarf2_append_unwinders (gdbarch);
frame_unwind_append_unwinder (gdbarch, &spu_frame_unwind);
frame_base_set_default (gdbarch, &spu_frame_base);
set_gdbarch_unwind_pc (gdbarch, spu_unwind_pc);
/* Breakpoints. */
set_gdbarch_decr_pc_after_break (gdbarch, 4);
set_gdbarch_breakpoint_from_pc (gdbarch, spu_breakpoint_from_pc);
+ set_gdbarch_memory_remove_breakpoint (gdbarch, spu_memory_remove_breakpoint);
set_gdbarch_cannot_step_breakpoint (gdbarch, 1);
set_gdbarch_software_single_step (gdbarch, spu_software_single_step);
set_gdbarch_get_longjmp_target (gdbarch, spu_get_longjmp_target);
/* Install spu stop-on-load handler. */
observer_attach_new_objfile (spu_catch_start);
+ /* Add ourselves to normal_stop event chain. */
+ observer_attach_normal_stop (spu_attach_normal_stop);
+
/* Add root prefix command for all "set spu"/"show spu" commands. */
add_prefix_cmd ("spu", no_class, set_spu_command,
_("Various SPU specific commands."),
show_spu_stop_on_load,
&setspucmdlist, &showspucmdlist);
+ /* Toggle whether or not to automatically flush the software-managed
+ cache whenever SPE execution stops. */
+ add_setshow_boolean_cmd ("auto-flush-cache", class_support,
+ &spu_auto_flush_cache_p, _("\
+Set whether to automatically flush the software-managed cache."),
+ _("\
+Show whether to automatically flush the software-managed cache."),
+ _("\
+Use \"on\" to automatically flush the software-managed cache\n\
+whenever SPE execution stops.\n\
+Use \"off\" to never automatically flush the software-managed cache."),
+ NULL,
+ show_spu_auto_flush_cache,
+ &setspucmdlist, &showspucmdlist);
+
/* Add root prefix command for all "info spu" commands. */
add_prefix_cmd ("spu", class_info, info_spu_command,
_("Various SPU specific commands."),