*** empty log message ***
[deliverable/binutils-gdb.git] / gdb / spu-tdep.c
1 /* SPU target-dependent code for GDB, the GNU debugger.
2 Copyright (C) 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
3
4 Contributed by Ulrich Weigand <uweigand@de.ibm.com>.
5 Based on a port by Sid Manning <sid@us.ibm.com>.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "arch-utils.h"
24 #include "gdbtypes.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "gdb_string.h"
28 #include "gdb_assert.h"
29 #include "frame.h"
30 #include "frame-unwind.h"
31 #include "frame-base.h"
32 #include "trad-frame.h"
33 #include "symtab.h"
34 #include "symfile.h"
35 #include "value.h"
36 #include "inferior.h"
37 #include "dis-asm.h"
38 #include "objfiles.h"
39 #include "language.h"
40 #include "regcache.h"
41 #include "reggroups.h"
42 #include "floatformat.h"
43 #include "observer.h"
44
45 #include "spu-tdep.h"
46
47
48 /* The tdep structure. */
49 struct gdbarch_tdep
50 {
51 /* SPU-specific vector type. */
52 struct type *spu_builtin_type_vec128;
53 };
54
55
56 /* SPU-specific vector type. */
57 static struct type *
58 spu_builtin_type_vec128 (struct gdbarch *gdbarch)
59 {
60 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
61
62 if (!tdep->spu_builtin_type_vec128)
63 {
64 struct type *t;
65
66 t = init_composite_type ("__spu_builtin_type_vec128", TYPE_CODE_UNION);
67 append_composite_type_field (t, "uint128", builtin_type_int128);
68 append_composite_type_field (t, "v2_int64",
69 init_vector_type (builtin_type_int64, 2));
70 append_composite_type_field (t, "v4_int32",
71 init_vector_type (builtin_type_int32, 4));
72 append_composite_type_field (t, "v8_int16",
73 init_vector_type (builtin_type_int16, 8));
74 append_composite_type_field (t, "v16_int8",
75 init_vector_type (builtin_type_int8, 16));
76 append_composite_type_field (t, "v2_double",
77 init_vector_type (builtin_type (gdbarch)
78 ->builtin_double, 2));
79 append_composite_type_field (t, "v4_float",
80 init_vector_type (builtin_type (gdbarch)
81 ->builtin_float, 4));
82
83 TYPE_VECTOR (t) = 1;
84 TYPE_NAME (t) = "spu_builtin_type_vec128";
85
86 tdep->spu_builtin_type_vec128 = t;
87 }
88
89 return tdep->spu_builtin_type_vec128;
90 }
91
92
93 /* The list of available "info spu " commands. */
94 static struct cmd_list_element *infospucmdlist = NULL;
95
96 /* Registers. */
97
98 static const char *
99 spu_register_name (struct gdbarch *gdbarch, int reg_nr)
100 {
101 static char *register_names[] =
102 {
103 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
104 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
105 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
106 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
107 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
108 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
109 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
110 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
111 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
112 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
113 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
114 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
115 "r96", "r97", "r98", "r99", "r100", "r101", "r102", "r103",
116 "r104", "r105", "r106", "r107", "r108", "r109", "r110", "r111",
117 "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
118 "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127",
119 "id", "pc", "sp", "fpscr", "srr0", "lslr", "decr", "decr_status"
120 };
121
122 if (reg_nr < 0)
123 return NULL;
124 if (reg_nr >= sizeof register_names / sizeof *register_names)
125 return NULL;
126
127 return register_names[reg_nr];
128 }
129
130 static struct type *
131 spu_register_type (struct gdbarch *gdbarch, int reg_nr)
132 {
133 if (reg_nr < SPU_NUM_GPRS)
134 return spu_builtin_type_vec128 (gdbarch);
135
136 switch (reg_nr)
137 {
138 case SPU_ID_REGNUM:
139 return builtin_type_uint32;
140
141 case SPU_PC_REGNUM:
142 return builtin_type (gdbarch)->builtin_func_ptr;
143
144 case SPU_SP_REGNUM:
145 return builtin_type (gdbarch)->builtin_data_ptr;
146
147 case SPU_FPSCR_REGNUM:
148 return builtin_type_uint128;
149
150 case SPU_SRR0_REGNUM:
151 return builtin_type_uint32;
152
153 case SPU_LSLR_REGNUM:
154 return builtin_type_uint32;
155
156 case SPU_DECR_REGNUM:
157 return builtin_type_uint32;
158
159 case SPU_DECR_STATUS_REGNUM:
160 return builtin_type_uint32;
161
162 default:
163 internal_error (__FILE__, __LINE__, "invalid regnum");
164 }
165 }
166
167 /* Pseudo registers for preferred slots - stack pointer. */
168
169 static void
170 spu_pseudo_register_read_spu (struct regcache *regcache, const char *regname,
171 gdb_byte *buf)
172 {
173 gdb_byte reg[32];
174 char annex[32];
175 ULONGEST id;
176
177 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
178 xsnprintf (annex, sizeof annex, "%d/%s", (int) id, regname);
179 memset (reg, 0, sizeof reg);
180 target_read (&current_target, TARGET_OBJECT_SPU, annex,
181 reg, 0, sizeof reg);
182
183 store_unsigned_integer (buf, 4, strtoulst (reg, NULL, 16));
184 }
185
186 static void
187 spu_pseudo_register_read (struct gdbarch *gdbarch, struct regcache *regcache,
188 int regnum, gdb_byte *buf)
189 {
190 gdb_byte reg[16];
191 char annex[32];
192 ULONGEST id;
193
194 switch (regnum)
195 {
196 case SPU_SP_REGNUM:
197 regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg);
198 memcpy (buf, reg, 4);
199 break;
200
201 case SPU_FPSCR_REGNUM:
202 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
203 xsnprintf (annex, sizeof annex, "%d/fpcr", (int) id);
204 target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 16);
205 break;
206
207 case SPU_SRR0_REGNUM:
208 spu_pseudo_register_read_spu (regcache, "srr0", buf);
209 break;
210
211 case SPU_LSLR_REGNUM:
212 spu_pseudo_register_read_spu (regcache, "lslr", buf);
213 break;
214
215 case SPU_DECR_REGNUM:
216 spu_pseudo_register_read_spu (regcache, "decr", buf);
217 break;
218
219 case SPU_DECR_STATUS_REGNUM:
220 spu_pseudo_register_read_spu (regcache, "decr_status", buf);
221 break;
222
223 default:
224 internal_error (__FILE__, __LINE__, _("invalid regnum"));
225 }
226 }
227
228 static void
229 spu_pseudo_register_write_spu (struct regcache *regcache, const char *regname,
230 const gdb_byte *buf)
231 {
232 gdb_byte reg[32];
233 char annex[32];
234 ULONGEST id;
235
236 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
237 xsnprintf (annex, sizeof annex, "%d/%s", (int) id, regname);
238 xsnprintf (reg, sizeof reg, "0x%s",
239 phex_nz (extract_unsigned_integer (buf, 4), 4));
240 target_write (&current_target, TARGET_OBJECT_SPU, annex,
241 reg, 0, strlen (reg));
242 }
243
244 static void
245 spu_pseudo_register_write (struct gdbarch *gdbarch, struct regcache *regcache,
246 int regnum, const gdb_byte *buf)
247 {
248 gdb_byte reg[16];
249 char annex[32];
250 ULONGEST id;
251
252 switch (regnum)
253 {
254 case SPU_SP_REGNUM:
255 regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg);
256 memcpy (reg, buf, 4);
257 regcache_raw_write (regcache, SPU_RAW_SP_REGNUM, reg);
258 break;
259
260 case SPU_FPSCR_REGNUM:
261 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
262 xsnprintf (annex, sizeof annex, "%d/fpcr", (int) id);
263 target_write (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 16);
264 break;
265
266 case SPU_SRR0_REGNUM:
267 spu_pseudo_register_write_spu (regcache, "srr0", buf);
268 break;
269
270 case SPU_LSLR_REGNUM:
271 spu_pseudo_register_write_spu (regcache, "lslr", buf);
272 break;
273
274 case SPU_DECR_REGNUM:
275 spu_pseudo_register_write_spu (regcache, "decr", buf);
276 break;
277
278 case SPU_DECR_STATUS_REGNUM:
279 spu_pseudo_register_write_spu (regcache, "decr_status", buf);
280 break;
281
282 default:
283 internal_error (__FILE__, __LINE__, _("invalid regnum"));
284 }
285 }
286
287 /* Value conversion -- access scalar values at the preferred slot. */
288
289 static struct value *
290 spu_value_from_register (struct type *type, int regnum,
291 struct frame_info *frame)
292 {
293 struct value *value = default_value_from_register (type, regnum, frame);
294 int len = TYPE_LENGTH (type);
295
296 if (regnum < SPU_NUM_GPRS && len < 16)
297 {
298 int preferred_slot = len < 4 ? 4 - len : 0;
299 set_value_offset (value, preferred_slot);
300 }
301
302 return value;
303 }
304
305 /* Register groups. */
306
307 static int
308 spu_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
309 struct reggroup *group)
310 {
311 /* Registers displayed via 'info regs'. */
312 if (group == general_reggroup)
313 return 1;
314
315 /* Registers displayed via 'info float'. */
316 if (group == float_reggroup)
317 return 0;
318
319 /* Registers that need to be saved/restored in order to
320 push or pop frames. */
321 if (group == save_reggroup || group == restore_reggroup)
322 return 1;
323
324 return default_register_reggroup_p (gdbarch, regnum, group);
325 }
326
327 /* Address conversion. */
328
329 static CORE_ADDR
330 spu_pointer_to_address (struct gdbarch *gdbarch,
331 struct type *type, const gdb_byte *buf)
332 {
333 ULONGEST addr = extract_unsigned_integer (buf, TYPE_LENGTH (type));
334 ULONGEST lslr = SPU_LS_SIZE - 1; /* Hard-wired LS size. */
335
336 if (target_has_registers && target_has_stack && target_has_memory)
337 lslr = get_frame_register_unsigned (get_selected_frame (NULL),
338 SPU_LSLR_REGNUM);
339
340 return addr & lslr;
341 }
342
343 static CORE_ADDR
344 spu_integer_to_address (struct gdbarch *gdbarch,
345 struct type *type, const gdb_byte *buf)
346 {
347 ULONGEST addr = unpack_long (type, buf);
348 ULONGEST lslr = SPU_LS_SIZE - 1; /* Hard-wired LS size. */
349
350 if (target_has_registers && target_has_stack && target_has_memory)
351 lslr = get_frame_register_unsigned (get_selected_frame (NULL),
352 SPU_LSLR_REGNUM);
353
354 return addr & lslr;
355 }
356
357
358 /* Decoding SPU instructions. */
359
360 enum
361 {
362 op_lqd = 0x34,
363 op_lqx = 0x3c4,
364 op_lqa = 0x61,
365 op_lqr = 0x67,
366 op_stqd = 0x24,
367 op_stqx = 0x144,
368 op_stqa = 0x41,
369 op_stqr = 0x47,
370
371 op_il = 0x081,
372 op_ila = 0x21,
373 op_a = 0x0c0,
374 op_ai = 0x1c,
375
376 op_selb = 0x4,
377
378 op_br = 0x64,
379 op_bra = 0x60,
380 op_brsl = 0x66,
381 op_brasl = 0x62,
382 op_brnz = 0x42,
383 op_brz = 0x40,
384 op_brhnz = 0x46,
385 op_brhz = 0x44,
386 op_bi = 0x1a8,
387 op_bisl = 0x1a9,
388 op_biz = 0x128,
389 op_binz = 0x129,
390 op_bihz = 0x12a,
391 op_bihnz = 0x12b,
392 };
393
394 static int
395 is_rr (unsigned int insn, int op, int *rt, int *ra, int *rb)
396 {
397 if ((insn >> 21) == op)
398 {
399 *rt = insn & 127;
400 *ra = (insn >> 7) & 127;
401 *rb = (insn >> 14) & 127;
402 return 1;
403 }
404
405 return 0;
406 }
407
408 static int
409 is_rrr (unsigned int insn, int op, int *rt, int *ra, int *rb, int *rc)
410 {
411 if ((insn >> 28) == op)
412 {
413 *rt = (insn >> 21) & 127;
414 *ra = (insn >> 7) & 127;
415 *rb = (insn >> 14) & 127;
416 *rc = insn & 127;
417 return 1;
418 }
419
420 return 0;
421 }
422
423 static int
424 is_ri7 (unsigned int insn, int op, int *rt, int *ra, int *i7)
425 {
426 if ((insn >> 21) == op)
427 {
428 *rt = insn & 127;
429 *ra = (insn >> 7) & 127;
430 *i7 = (((insn >> 14) & 127) ^ 0x40) - 0x40;
431 return 1;
432 }
433
434 return 0;
435 }
436
437 static int
438 is_ri10 (unsigned int insn, int op, int *rt, int *ra, int *i10)
439 {
440 if ((insn >> 24) == op)
441 {
442 *rt = insn & 127;
443 *ra = (insn >> 7) & 127;
444 *i10 = (((insn >> 14) & 0x3ff) ^ 0x200) - 0x200;
445 return 1;
446 }
447
448 return 0;
449 }
450
451 static int
452 is_ri16 (unsigned int insn, int op, int *rt, int *i16)
453 {
454 if ((insn >> 23) == op)
455 {
456 *rt = insn & 127;
457 *i16 = (((insn >> 7) & 0xffff) ^ 0x8000) - 0x8000;
458 return 1;
459 }
460
461 return 0;
462 }
463
464 static int
465 is_ri18 (unsigned int insn, int op, int *rt, int *i18)
466 {
467 if ((insn >> 25) == op)
468 {
469 *rt = insn & 127;
470 *i18 = (((insn >> 7) & 0x3ffff) ^ 0x20000) - 0x20000;
471 return 1;
472 }
473
474 return 0;
475 }
476
477 static int
478 is_branch (unsigned int insn, int *offset, int *reg)
479 {
480 int rt, i7, i16;
481
482 if (is_ri16 (insn, op_br, &rt, &i16)
483 || is_ri16 (insn, op_brsl, &rt, &i16)
484 || is_ri16 (insn, op_brnz, &rt, &i16)
485 || is_ri16 (insn, op_brz, &rt, &i16)
486 || is_ri16 (insn, op_brhnz, &rt, &i16)
487 || is_ri16 (insn, op_brhz, &rt, &i16))
488 {
489 *reg = SPU_PC_REGNUM;
490 *offset = i16 << 2;
491 return 1;
492 }
493
494 if (is_ri16 (insn, op_bra, &rt, &i16)
495 || is_ri16 (insn, op_brasl, &rt, &i16))
496 {
497 *reg = -1;
498 *offset = i16 << 2;
499 return 1;
500 }
501
502 if (is_ri7 (insn, op_bi, &rt, reg, &i7)
503 || is_ri7 (insn, op_bisl, &rt, reg, &i7)
504 || is_ri7 (insn, op_biz, &rt, reg, &i7)
505 || is_ri7 (insn, op_binz, &rt, reg, &i7)
506 || is_ri7 (insn, op_bihz, &rt, reg, &i7)
507 || is_ri7 (insn, op_bihnz, &rt, reg, &i7))
508 {
509 *offset = 0;
510 return 1;
511 }
512
513 return 0;
514 }
515
516
517 /* Prolog parsing. */
518
519 struct spu_prologue_data
520 {
521 /* Stack frame size. -1 if analysis was unsuccessful. */
522 int size;
523
524 /* How to find the CFA. The CFA is equal to SP at function entry. */
525 int cfa_reg;
526 int cfa_offset;
527
528 /* Offset relative to CFA where a register is saved. -1 if invalid. */
529 int reg_offset[SPU_NUM_GPRS];
530 };
531
532 static CORE_ADDR
533 spu_analyze_prologue (CORE_ADDR start_pc, CORE_ADDR end_pc,
534 struct spu_prologue_data *data)
535 {
536 int found_sp = 0;
537 int found_fp = 0;
538 int found_lr = 0;
539 int reg_immed[SPU_NUM_GPRS];
540 gdb_byte buf[16];
541 CORE_ADDR prolog_pc = start_pc;
542 CORE_ADDR pc;
543 int i;
544
545
546 /* Initialize DATA to default values. */
547 data->size = -1;
548
549 data->cfa_reg = SPU_RAW_SP_REGNUM;
550 data->cfa_offset = 0;
551
552 for (i = 0; i < SPU_NUM_GPRS; i++)
553 data->reg_offset[i] = -1;
554
555 /* Set up REG_IMMED array. This is non-zero for a register if we know its
556 preferred slot currently holds this immediate value. */
557 for (i = 0; i < SPU_NUM_GPRS; i++)
558 reg_immed[i] = 0;
559
560 /* Scan instructions until the first branch.
561
562 The following instructions are important prolog components:
563
564 - The first instruction to set up the stack pointer.
565 - The first instruction to set up the frame pointer.
566 - The first instruction to save the link register.
567
568 We return the instruction after the latest of these three,
569 or the incoming PC if none is found. The first instruction
570 to set up the stack pointer also defines the frame size.
571
572 Note that instructions saving incoming arguments to their stack
573 slots are not counted as important, because they are hard to
574 identify with certainty. This should not matter much, because
575 arguments are relevant only in code compiled with debug data,
576 and in such code the GDB core will advance until the first source
577 line anyway, using SAL data.
578
579 For purposes of stack unwinding, we analyze the following types
580 of instructions in addition:
581
582 - Any instruction adding to the current frame pointer.
583 - Any instruction loading an immediate constant into a register.
584 - Any instruction storing a register onto the stack.
585
586 These are used to compute the CFA and REG_OFFSET output. */
587
588 for (pc = start_pc; pc < end_pc; pc += 4)
589 {
590 unsigned int insn;
591 int rt, ra, rb, rc, immed;
592
593 if (target_read_memory (pc, buf, 4))
594 break;
595 insn = extract_unsigned_integer (buf, 4);
596
597 /* AI is the typical instruction to set up a stack frame.
598 It is also used to initialize the frame pointer. */
599 if (is_ri10 (insn, op_ai, &rt, &ra, &immed))
600 {
601 if (rt == data->cfa_reg && ra == data->cfa_reg)
602 data->cfa_offset -= immed;
603
604 if (rt == SPU_RAW_SP_REGNUM && ra == SPU_RAW_SP_REGNUM
605 && !found_sp)
606 {
607 found_sp = 1;
608 prolog_pc = pc + 4;
609
610 data->size = -immed;
611 }
612 else if (rt == SPU_FP_REGNUM && ra == SPU_RAW_SP_REGNUM
613 && !found_fp)
614 {
615 found_fp = 1;
616 prolog_pc = pc + 4;
617
618 data->cfa_reg = SPU_FP_REGNUM;
619 data->cfa_offset -= immed;
620 }
621 }
622
623 /* A is used to set up stack frames of size >= 512 bytes.
624 If we have tracked the contents of the addend register,
625 we can handle this as well. */
626 else if (is_rr (insn, op_a, &rt, &ra, &rb))
627 {
628 if (rt == data->cfa_reg && ra == data->cfa_reg)
629 {
630 if (reg_immed[rb] != 0)
631 data->cfa_offset -= reg_immed[rb];
632 else
633 data->cfa_reg = -1; /* We don't know the CFA any more. */
634 }
635
636 if (rt == SPU_RAW_SP_REGNUM && ra == SPU_RAW_SP_REGNUM
637 && !found_sp)
638 {
639 found_sp = 1;
640 prolog_pc = pc + 4;
641
642 if (reg_immed[rb] != 0)
643 data->size = -reg_immed[rb];
644 }
645 }
646
647 /* We need to track IL and ILA used to load immediate constants
648 in case they are later used as input to an A instruction. */
649 else if (is_ri16 (insn, op_il, &rt, &immed))
650 {
651 reg_immed[rt] = immed;
652
653 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
654 found_sp = 1;
655 }
656
657 else if (is_ri18 (insn, op_ila, &rt, &immed))
658 {
659 reg_immed[rt] = immed & 0x3ffff;
660
661 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
662 found_sp = 1;
663 }
664
665 /* STQD is used to save registers to the stack. */
666 else if (is_ri10 (insn, op_stqd, &rt, &ra, &immed))
667 {
668 if (ra == data->cfa_reg)
669 data->reg_offset[rt] = data->cfa_offset - (immed << 4);
670
671 if (ra == data->cfa_reg && rt == SPU_LR_REGNUM
672 && !found_lr)
673 {
674 found_lr = 1;
675 prolog_pc = pc + 4;
676 }
677 }
678
679 /* _start uses SELB to set up the stack pointer. */
680 else if (is_rrr (insn, op_selb, &rt, &ra, &rb, &rc))
681 {
682 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
683 found_sp = 1;
684 }
685
686 /* We terminate if we find a branch. */
687 else if (is_branch (insn, &immed, &ra))
688 break;
689 }
690
691
692 /* If we successfully parsed until here, and didn't find any instruction
693 modifying SP, we assume we have a frameless function. */
694 if (!found_sp)
695 data->size = 0;
696
697 /* Return cooked instead of raw SP. */
698 if (data->cfa_reg == SPU_RAW_SP_REGNUM)
699 data->cfa_reg = SPU_SP_REGNUM;
700
701 return prolog_pc;
702 }
703
704 /* Return the first instruction after the prologue starting at PC. */
705 static CORE_ADDR
706 spu_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
707 {
708 struct spu_prologue_data data;
709 return spu_analyze_prologue (pc, (CORE_ADDR)-1, &data);
710 }
711
712 /* Return the frame pointer in use at address PC. */
713 static void
714 spu_virtual_frame_pointer (struct gdbarch *gdbarch, CORE_ADDR pc,
715 int *reg, LONGEST *offset)
716 {
717 struct spu_prologue_data data;
718 spu_analyze_prologue (pc, (CORE_ADDR)-1, &data);
719
720 if (data.size != -1 && data.cfa_reg != -1)
721 {
722 /* The 'frame pointer' address is CFA minus frame size. */
723 *reg = data.cfa_reg;
724 *offset = data.cfa_offset - data.size;
725 }
726 else
727 {
728 /* ??? We don't really know ... */
729 *reg = SPU_SP_REGNUM;
730 *offset = 0;
731 }
732 }
733
734 /* Return true if we are in the function's epilogue, i.e. after the
735 instruction that destroyed the function's stack frame.
736
737 1) scan forward from the point of execution:
738 a) If you find an instruction that modifies the stack pointer
739 or transfers control (except a return), execution is not in
740 an epilogue, return.
741 b) Stop scanning if you find a return instruction or reach the
742 end of the function or reach the hard limit for the size of
743 an epilogue.
744 2) scan backward from the point of execution:
745 a) If you find an instruction that modifies the stack pointer,
746 execution *is* in an epilogue, return.
747 b) Stop scanning if you reach an instruction that transfers
748 control or the beginning of the function or reach the hard
749 limit for the size of an epilogue. */
750
751 static int
752 spu_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
753 {
754 CORE_ADDR scan_pc, func_start, func_end, epilogue_start, epilogue_end;
755 bfd_byte buf[4];
756 unsigned int insn;
757 int rt, ra, rb, rc, immed;
758
759 /* Find the search limits based on function boundaries and hard limit.
760 We assume the epilogue can be up to 64 instructions long. */
761
762 const int spu_max_epilogue_size = 64 * 4;
763
764 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
765 return 0;
766
767 if (pc - func_start < spu_max_epilogue_size)
768 epilogue_start = func_start;
769 else
770 epilogue_start = pc - spu_max_epilogue_size;
771
772 if (func_end - pc < spu_max_epilogue_size)
773 epilogue_end = func_end;
774 else
775 epilogue_end = pc + spu_max_epilogue_size;
776
777 /* Scan forward until next 'bi $0'. */
778
779 for (scan_pc = pc; scan_pc < epilogue_end; scan_pc += 4)
780 {
781 if (target_read_memory (scan_pc, buf, 4))
782 return 0;
783 insn = extract_unsigned_integer (buf, 4);
784
785 if (is_branch (insn, &immed, &ra))
786 {
787 if (immed == 0 && ra == SPU_LR_REGNUM)
788 break;
789
790 return 0;
791 }
792
793 if (is_ri10 (insn, op_ai, &rt, &ra, &immed)
794 || is_rr (insn, op_a, &rt, &ra, &rb)
795 || is_ri10 (insn, op_lqd, &rt, &ra, &immed))
796 {
797 if (rt == SPU_RAW_SP_REGNUM)
798 return 0;
799 }
800 }
801
802 if (scan_pc >= epilogue_end)
803 return 0;
804
805 /* Scan backward until adjustment to stack pointer (R1). */
806
807 for (scan_pc = pc - 4; scan_pc >= epilogue_start; scan_pc -= 4)
808 {
809 if (target_read_memory (scan_pc, buf, 4))
810 return 0;
811 insn = extract_unsigned_integer (buf, 4);
812
813 if (is_branch (insn, &immed, &ra))
814 return 0;
815
816 if (is_ri10 (insn, op_ai, &rt, &ra, &immed)
817 || is_rr (insn, op_a, &rt, &ra, &rb)
818 || is_ri10 (insn, op_lqd, &rt, &ra, &immed))
819 {
820 if (rt == SPU_RAW_SP_REGNUM)
821 return 1;
822 }
823 }
824
825 return 0;
826 }
827
828
829 /* Normal stack frames. */
830
831 struct spu_unwind_cache
832 {
833 CORE_ADDR func;
834 CORE_ADDR frame_base;
835 CORE_ADDR local_base;
836
837 struct trad_frame_saved_reg *saved_regs;
838 };
839
840 static struct spu_unwind_cache *
841 spu_frame_unwind_cache (struct frame_info *this_frame,
842 void **this_prologue_cache)
843 {
844 struct spu_unwind_cache *info;
845 struct spu_prologue_data data;
846 gdb_byte buf[16];
847
848 if (*this_prologue_cache)
849 return *this_prologue_cache;
850
851 info = FRAME_OBSTACK_ZALLOC (struct spu_unwind_cache);
852 *this_prologue_cache = info;
853 info->saved_regs = trad_frame_alloc_saved_regs (this_frame);
854 info->frame_base = 0;
855 info->local_base = 0;
856
857 /* Find the start of the current function, and analyze its prologue. */
858 info->func = get_frame_func (this_frame);
859 if (info->func == 0)
860 {
861 /* Fall back to using the current PC as frame ID. */
862 info->func = get_frame_pc (this_frame);
863 data.size = -1;
864 }
865 else
866 spu_analyze_prologue (info->func, get_frame_pc (this_frame), &data);
867
868
869 /* If successful, use prologue analysis data. */
870 if (data.size != -1 && data.cfa_reg != -1)
871 {
872 CORE_ADDR cfa;
873 int i;
874
875 /* Determine CFA via unwound CFA_REG plus CFA_OFFSET. */
876 get_frame_register (this_frame, data.cfa_reg, buf);
877 cfa = extract_unsigned_integer (buf, 4) + data.cfa_offset;
878
879 /* Call-saved register slots. */
880 for (i = 0; i < SPU_NUM_GPRS; i++)
881 if (i == SPU_LR_REGNUM
882 || (i >= SPU_SAVED1_REGNUM && i <= SPU_SAVEDN_REGNUM))
883 if (data.reg_offset[i] != -1)
884 info->saved_regs[i].addr = cfa - data.reg_offset[i];
885
886 /* Frame bases. */
887 info->frame_base = cfa;
888 info->local_base = cfa - data.size;
889 }
890
891 /* Otherwise, fall back to reading the backchain link. */
892 else
893 {
894 CORE_ADDR reg;
895 LONGEST backchain;
896 int status;
897
898 /* Get the backchain. */
899 reg = get_frame_register_unsigned (this_frame, SPU_SP_REGNUM);
900 status = safe_read_memory_integer (reg, 4, &backchain);
901
902 /* A zero backchain terminates the frame chain. Also, sanity
903 check against the local store size limit. */
904 if (status && backchain > 0 && backchain < SPU_LS_SIZE)
905 {
906 /* Assume the link register is saved into its slot. */
907 if (backchain + 16 < SPU_LS_SIZE)
908 info->saved_regs[SPU_LR_REGNUM].addr = backchain + 16;
909
910 /* Frame bases. */
911 info->frame_base = backchain;
912 info->local_base = reg;
913 }
914 }
915
916 /* If we didn't find a frame, we cannot determine SP / return address. */
917 if (info->frame_base == 0)
918 return info;
919
920 /* The previous SP is equal to the CFA. */
921 trad_frame_set_value (info->saved_regs, SPU_SP_REGNUM, info->frame_base);
922
923 /* Read full contents of the unwound link register in order to
924 be able to determine the return address. */
925 if (trad_frame_addr_p (info->saved_regs, SPU_LR_REGNUM))
926 target_read_memory (info->saved_regs[SPU_LR_REGNUM].addr, buf, 16);
927 else
928 get_frame_register (this_frame, SPU_LR_REGNUM, buf);
929
930 /* Normally, the return address is contained in the slot 0 of the
931 link register, and slots 1-3 are zero. For an overlay return,
932 slot 0 contains the address of the overlay manager return stub,
933 slot 1 contains the partition number of the overlay section to
934 be returned to, and slot 2 contains the return address within
935 that section. Return the latter address in that case. */
936 if (extract_unsigned_integer (buf + 8, 4) != 0)
937 trad_frame_set_value (info->saved_regs, SPU_PC_REGNUM,
938 extract_unsigned_integer (buf + 8, 4));
939 else
940 trad_frame_set_value (info->saved_regs, SPU_PC_REGNUM,
941 extract_unsigned_integer (buf, 4));
942
943 return info;
944 }
945
946 static void
947 spu_frame_this_id (struct frame_info *this_frame,
948 void **this_prologue_cache, struct frame_id *this_id)
949 {
950 struct spu_unwind_cache *info =
951 spu_frame_unwind_cache (this_frame, this_prologue_cache);
952
953 if (info->frame_base == 0)
954 return;
955
956 *this_id = frame_id_build (info->frame_base, info->func);
957 }
958
959 static struct value *
960 spu_frame_prev_register (struct frame_info *this_frame,
961 void **this_prologue_cache, int regnum)
962 {
963 struct spu_unwind_cache *info
964 = spu_frame_unwind_cache (this_frame, this_prologue_cache);
965
966 /* Special-case the stack pointer. */
967 if (regnum == SPU_RAW_SP_REGNUM)
968 regnum = SPU_SP_REGNUM;
969
970 return trad_frame_get_prev_register (this_frame, info->saved_regs, regnum);
971 }
972
973 static const struct frame_unwind spu_frame_unwind = {
974 NORMAL_FRAME,
975 spu_frame_this_id,
976 spu_frame_prev_register,
977 NULL,
978 default_frame_sniffer
979 };
980
981 static CORE_ADDR
982 spu_frame_base_address (struct frame_info *this_frame, void **this_cache)
983 {
984 struct spu_unwind_cache *info
985 = spu_frame_unwind_cache (this_frame, this_cache);
986 return info->local_base;
987 }
988
989 static const struct frame_base spu_frame_base = {
990 &spu_frame_unwind,
991 spu_frame_base_address,
992 spu_frame_base_address,
993 spu_frame_base_address
994 };
995
996 static CORE_ADDR
997 spu_unwind_pc (struct gdbarch *gdbarch, struct frame_info *next_frame)
998 {
999 CORE_ADDR pc = frame_unwind_register_unsigned (next_frame, SPU_PC_REGNUM);
1000 /* Mask off interrupt enable bit. */
1001 return pc & -4;
1002 }
1003
1004 static CORE_ADDR
1005 spu_unwind_sp (struct gdbarch *gdbarch, struct frame_info *next_frame)
1006 {
1007 return frame_unwind_register_unsigned (next_frame, SPU_SP_REGNUM);
1008 }
1009
1010 static CORE_ADDR
1011 spu_read_pc (struct regcache *regcache)
1012 {
1013 ULONGEST pc;
1014 regcache_cooked_read_unsigned (regcache, SPU_PC_REGNUM, &pc);
1015 /* Mask off interrupt enable bit. */
1016 return pc & -4;
1017 }
1018
1019 static void
1020 spu_write_pc (struct regcache *regcache, CORE_ADDR pc)
1021 {
1022 /* Keep interrupt enabled state unchanged. */
1023 ULONGEST old_pc;
1024 regcache_cooked_read_unsigned (regcache, SPU_PC_REGNUM, &old_pc);
1025 regcache_cooked_write_unsigned (regcache, SPU_PC_REGNUM,
1026 (pc & -4) | (old_pc & 3));
1027 }
1028
1029
1030 /* Function calling convention. */
1031
1032 static CORE_ADDR
1033 spu_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1034 {
1035 return sp & ~15;
1036 }
1037
1038 static CORE_ADDR
1039 spu_push_dummy_code (struct gdbarch *gdbarch, CORE_ADDR sp, CORE_ADDR funaddr,
1040 struct value **args, int nargs, struct type *value_type,
1041 CORE_ADDR *real_pc, CORE_ADDR *bp_addr,
1042 struct regcache *regcache)
1043 {
1044 /* Allocate space sufficient for a breakpoint, keeping the stack aligned. */
1045 sp = (sp - 4) & ~15;
1046 /* Store the address of that breakpoint */
1047 *bp_addr = sp;
1048 /* The call starts at the callee's entry point. */
1049 *real_pc = funaddr;
1050
1051 return sp;
1052 }
1053
1054 static int
1055 spu_scalar_value_p (struct type *type)
1056 {
1057 switch (TYPE_CODE (type))
1058 {
1059 case TYPE_CODE_INT:
1060 case TYPE_CODE_ENUM:
1061 case TYPE_CODE_RANGE:
1062 case TYPE_CODE_CHAR:
1063 case TYPE_CODE_BOOL:
1064 case TYPE_CODE_PTR:
1065 case TYPE_CODE_REF:
1066 return TYPE_LENGTH (type) <= 16;
1067
1068 default:
1069 return 0;
1070 }
1071 }
1072
1073 static void
1074 spu_value_to_regcache (struct regcache *regcache, int regnum,
1075 struct type *type, const gdb_byte *in)
1076 {
1077 int len = TYPE_LENGTH (type);
1078
1079 if (spu_scalar_value_p (type))
1080 {
1081 int preferred_slot = len < 4 ? 4 - len : 0;
1082 regcache_cooked_write_part (regcache, regnum, preferred_slot, len, in);
1083 }
1084 else
1085 {
1086 while (len >= 16)
1087 {
1088 regcache_cooked_write (regcache, regnum++, in);
1089 in += 16;
1090 len -= 16;
1091 }
1092
1093 if (len > 0)
1094 regcache_cooked_write_part (regcache, regnum, 0, len, in);
1095 }
1096 }
1097
1098 static void
1099 spu_regcache_to_value (struct regcache *regcache, int regnum,
1100 struct type *type, gdb_byte *out)
1101 {
1102 int len = TYPE_LENGTH (type);
1103
1104 if (spu_scalar_value_p (type))
1105 {
1106 int preferred_slot = len < 4 ? 4 - len : 0;
1107 regcache_cooked_read_part (regcache, regnum, preferred_slot, len, out);
1108 }
1109 else
1110 {
1111 while (len >= 16)
1112 {
1113 regcache_cooked_read (regcache, regnum++, out);
1114 out += 16;
1115 len -= 16;
1116 }
1117
1118 if (len > 0)
1119 regcache_cooked_read_part (regcache, regnum, 0, len, out);
1120 }
1121 }
1122
1123 static CORE_ADDR
1124 spu_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1125 struct regcache *regcache, CORE_ADDR bp_addr,
1126 int nargs, struct value **args, CORE_ADDR sp,
1127 int struct_return, CORE_ADDR struct_addr)
1128 {
1129 CORE_ADDR sp_delta;
1130 int i;
1131 int regnum = SPU_ARG1_REGNUM;
1132 int stack_arg = -1;
1133 gdb_byte buf[16];
1134
1135 /* Set the return address. */
1136 memset (buf, 0, sizeof buf);
1137 store_unsigned_integer (buf, 4, bp_addr);
1138 regcache_cooked_write (regcache, SPU_LR_REGNUM, buf);
1139
1140 /* If STRUCT_RETURN is true, then the struct return address (in
1141 STRUCT_ADDR) will consume the first argument-passing register.
1142 Both adjust the register count and store that value. */
1143 if (struct_return)
1144 {
1145 memset (buf, 0, sizeof buf);
1146 store_unsigned_integer (buf, 4, struct_addr);
1147 regcache_cooked_write (regcache, regnum++, buf);
1148 }
1149
1150 /* Fill in argument registers. */
1151 for (i = 0; i < nargs; i++)
1152 {
1153 struct value *arg = args[i];
1154 struct type *type = check_typedef (value_type (arg));
1155 const gdb_byte *contents = value_contents (arg);
1156 int len = TYPE_LENGTH (type);
1157 int n_regs = align_up (len, 16) / 16;
1158
1159 /* If the argument doesn't wholly fit into registers, it and
1160 all subsequent arguments go to the stack. */
1161 if (regnum + n_regs - 1 > SPU_ARGN_REGNUM)
1162 {
1163 stack_arg = i;
1164 break;
1165 }
1166
1167 spu_value_to_regcache (regcache, regnum, type, contents);
1168 regnum += n_regs;
1169 }
1170
1171 /* Overflow arguments go to the stack. */
1172 if (stack_arg != -1)
1173 {
1174 CORE_ADDR ap;
1175
1176 /* Allocate all required stack size. */
1177 for (i = stack_arg; i < nargs; i++)
1178 {
1179 struct type *type = check_typedef (value_type (args[i]));
1180 sp -= align_up (TYPE_LENGTH (type), 16);
1181 }
1182
1183 /* Fill in stack arguments. */
1184 ap = sp;
1185 for (i = stack_arg; i < nargs; i++)
1186 {
1187 struct value *arg = args[i];
1188 struct type *type = check_typedef (value_type (arg));
1189 int len = TYPE_LENGTH (type);
1190 int preferred_slot;
1191
1192 if (spu_scalar_value_p (type))
1193 preferred_slot = len < 4 ? 4 - len : 0;
1194 else
1195 preferred_slot = 0;
1196
1197 target_write_memory (ap + preferred_slot, value_contents (arg), len);
1198 ap += align_up (TYPE_LENGTH (type), 16);
1199 }
1200 }
1201
1202 /* Allocate stack frame header. */
1203 sp -= 32;
1204
1205 /* Store stack back chain. */
1206 regcache_cooked_read (regcache, SPU_RAW_SP_REGNUM, buf);
1207 target_write_memory (sp, buf, 16);
1208
1209 /* Finally, update all slots of the SP register. */
1210 sp_delta = sp - extract_unsigned_integer (buf, 4);
1211 for (i = 0; i < 4; i++)
1212 {
1213 CORE_ADDR sp_slot = extract_unsigned_integer (buf + 4*i, 4);
1214 store_unsigned_integer (buf + 4*i, 4, sp_slot + sp_delta);
1215 }
1216 regcache_cooked_write (regcache, SPU_RAW_SP_REGNUM, buf);
1217
1218 return sp;
1219 }
1220
1221 static struct frame_id
1222 spu_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1223 {
1224 CORE_ADDR pc = get_frame_register_unsigned (this_frame, SPU_PC_REGNUM);
1225 CORE_ADDR sp = get_frame_register_unsigned (this_frame, SPU_SP_REGNUM);
1226 return frame_id_build (sp, pc & -4);
1227 }
1228
1229 /* Function return value access. */
1230
1231 static enum return_value_convention
1232 spu_return_value (struct gdbarch *gdbarch, struct type *func_type,
1233 struct type *type, struct regcache *regcache,
1234 gdb_byte *out, const gdb_byte *in)
1235 {
1236 enum return_value_convention rvc;
1237
1238 if (TYPE_LENGTH (type) <= (SPU_ARGN_REGNUM - SPU_ARG1_REGNUM + 1) * 16)
1239 rvc = RETURN_VALUE_REGISTER_CONVENTION;
1240 else
1241 rvc = RETURN_VALUE_STRUCT_CONVENTION;
1242
1243 if (in)
1244 {
1245 switch (rvc)
1246 {
1247 case RETURN_VALUE_REGISTER_CONVENTION:
1248 spu_value_to_regcache (regcache, SPU_ARG1_REGNUM, type, in);
1249 break;
1250
1251 case RETURN_VALUE_STRUCT_CONVENTION:
1252 error ("Cannot set function return value.");
1253 break;
1254 }
1255 }
1256 else if (out)
1257 {
1258 switch (rvc)
1259 {
1260 case RETURN_VALUE_REGISTER_CONVENTION:
1261 spu_regcache_to_value (regcache, SPU_ARG1_REGNUM, type, out);
1262 break;
1263
1264 case RETURN_VALUE_STRUCT_CONVENTION:
1265 error ("Function return value unknown.");
1266 break;
1267 }
1268 }
1269
1270 return rvc;
1271 }
1272
1273
1274 /* Breakpoints. */
1275
1276 static const gdb_byte *
1277 spu_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR * pcptr, int *lenptr)
1278 {
1279 static const gdb_byte breakpoint[] = { 0x00, 0x00, 0x3f, 0xff };
1280
1281 *lenptr = sizeof breakpoint;
1282 return breakpoint;
1283 }
1284
1285
1286 /* Software single-stepping support. */
1287
1288 static int
1289 spu_software_single_step (struct frame_info *frame)
1290 {
1291 CORE_ADDR pc, next_pc;
1292 unsigned int insn;
1293 int offset, reg;
1294 gdb_byte buf[4];
1295
1296 pc = get_frame_pc (frame);
1297
1298 if (target_read_memory (pc, buf, 4))
1299 return 1;
1300 insn = extract_unsigned_integer (buf, 4);
1301
1302 /* Next sequential instruction is at PC + 4, except if the current
1303 instruction is a PPE-assisted call, in which case it is at PC + 8.
1304 Wrap around LS limit to be on the safe side. */
1305 if ((insn & 0xffffff00) == 0x00002100)
1306 next_pc = (pc + 8) & (SPU_LS_SIZE - 1);
1307 else
1308 next_pc = (pc + 4) & (SPU_LS_SIZE - 1);
1309
1310 insert_single_step_breakpoint (next_pc);
1311
1312 if (is_branch (insn, &offset, &reg))
1313 {
1314 CORE_ADDR target = offset;
1315
1316 if (reg == SPU_PC_REGNUM)
1317 target += pc;
1318 else if (reg != -1)
1319 {
1320 get_frame_register_bytes (frame, reg, 0, 4, buf);
1321 target += extract_unsigned_integer (buf, 4) & -4;
1322 }
1323
1324 target = target & (SPU_LS_SIZE - 1);
1325 if (target != next_pc)
1326 insert_single_step_breakpoint (target);
1327 }
1328
1329 return 1;
1330 }
1331
1332 /* Target overlays for the SPU overlay manager.
1333
1334 See the documentation of simple_overlay_update for how the
1335 interface is supposed to work.
1336
1337 Data structures used by the overlay manager:
1338
1339 struct ovly_table
1340 {
1341 u32 vma;
1342 u32 size;
1343 u32 pos;
1344 u32 buf;
1345 } _ovly_table[]; -- one entry per overlay section
1346
1347 struct ovly_buf_table
1348 {
1349 u32 mapped;
1350 } _ovly_buf_table[]; -- one entry per overlay buffer
1351
1352 _ovly_table should never change.
1353
1354 Both tables are aligned to a 16-byte boundary, the symbols _ovly_table
1355 and _ovly_buf_table are of type STT_OBJECT and their size set to the size
1356 of the respective array. buf in _ovly_table is an index into _ovly_buf_table.
1357
1358 mapped is an index into _ovly_table. Both the mapped and buf indices start
1359 from one to reference the first entry in their respective tables. */
1360
1361 /* Using the per-objfile private data mechanism, we store for each
1362 objfile an array of "struct spu_overlay_table" structures, one
1363 for each obj_section of the objfile. This structure holds two
1364 fields, MAPPED_PTR and MAPPED_VAL. If MAPPED_PTR is zero, this
1365 is *not* an overlay section. If it is non-zero, it represents
1366 a target address. The overlay section is mapped iff the target
1367 integer at this location equals MAPPED_VAL. */
1368
1369 static const struct objfile_data *spu_overlay_data;
1370
1371 struct spu_overlay_table
1372 {
1373 CORE_ADDR mapped_ptr;
1374 CORE_ADDR mapped_val;
1375 };
1376
1377 /* Retrieve the overlay table for OBJFILE. If not already cached, read
1378 the _ovly_table data structure from the target and initialize the
1379 spu_overlay_table data structure from it. */
1380 static struct spu_overlay_table *
1381 spu_get_overlay_table (struct objfile *objfile)
1382 {
1383 struct minimal_symbol *ovly_table_msym, *ovly_buf_table_msym;
1384 CORE_ADDR ovly_table_base, ovly_buf_table_base;
1385 unsigned ovly_table_size, ovly_buf_table_size;
1386 struct spu_overlay_table *tbl;
1387 struct obj_section *osect;
1388 char *ovly_table;
1389 int i;
1390
1391 tbl = objfile_data (objfile, spu_overlay_data);
1392 if (tbl)
1393 return tbl;
1394
1395 ovly_table_msym = lookup_minimal_symbol ("_ovly_table", NULL, objfile);
1396 if (!ovly_table_msym)
1397 return NULL;
1398
1399 ovly_buf_table_msym = lookup_minimal_symbol ("_ovly_buf_table", NULL, objfile);
1400 if (!ovly_buf_table_msym)
1401 return NULL;
1402
1403 ovly_table_base = SYMBOL_VALUE_ADDRESS (ovly_table_msym);
1404 ovly_table_size = MSYMBOL_SIZE (ovly_table_msym);
1405
1406 ovly_buf_table_base = SYMBOL_VALUE_ADDRESS (ovly_buf_table_msym);
1407 ovly_buf_table_size = MSYMBOL_SIZE (ovly_buf_table_msym);
1408
1409 ovly_table = xmalloc (ovly_table_size);
1410 read_memory (ovly_table_base, ovly_table, ovly_table_size);
1411
1412 tbl = OBSTACK_CALLOC (&objfile->objfile_obstack,
1413 objfile->sections_end - objfile->sections,
1414 struct spu_overlay_table);
1415
1416 for (i = 0; i < ovly_table_size / 16; i++)
1417 {
1418 CORE_ADDR vma = extract_unsigned_integer (ovly_table + 16*i + 0, 4);
1419 CORE_ADDR size = extract_unsigned_integer (ovly_table + 16*i + 4, 4);
1420 CORE_ADDR pos = extract_unsigned_integer (ovly_table + 16*i + 8, 4);
1421 CORE_ADDR buf = extract_unsigned_integer (ovly_table + 16*i + 12, 4);
1422
1423 if (buf == 0 || (buf - 1) * 4 >= ovly_buf_table_size)
1424 continue;
1425
1426 ALL_OBJFILE_OSECTIONS (objfile, osect)
1427 if (vma == bfd_section_vma (objfile->obfd, osect->the_bfd_section)
1428 && pos == osect->the_bfd_section->filepos)
1429 {
1430 int ndx = osect - objfile->sections;
1431 tbl[ndx].mapped_ptr = ovly_buf_table_base + (buf - 1) * 4;
1432 tbl[ndx].mapped_val = i + 1;
1433 break;
1434 }
1435 }
1436
1437 xfree (ovly_table);
1438 set_objfile_data (objfile, spu_overlay_data, tbl);
1439 return tbl;
1440 }
1441
1442 /* Read _ovly_buf_table entry from the target to dermine whether
1443 OSECT is currently mapped, and update the mapped state. */
1444 static void
1445 spu_overlay_update_osect (struct obj_section *osect)
1446 {
1447 struct spu_overlay_table *ovly_table;
1448 CORE_ADDR val;
1449
1450 ovly_table = spu_get_overlay_table (osect->objfile);
1451 if (!ovly_table)
1452 return;
1453
1454 ovly_table += osect - osect->objfile->sections;
1455 if (ovly_table->mapped_ptr == 0)
1456 return;
1457
1458 val = read_memory_unsigned_integer (ovly_table->mapped_ptr, 4);
1459 osect->ovly_mapped = (val == ovly_table->mapped_val);
1460 }
1461
1462 /* If OSECT is NULL, then update all sections' mapped state.
1463 If OSECT is non-NULL, then update only OSECT's mapped state. */
1464 static void
1465 spu_overlay_update (struct obj_section *osect)
1466 {
1467 /* Just one section. */
1468 if (osect)
1469 spu_overlay_update_osect (osect);
1470
1471 /* All sections. */
1472 else
1473 {
1474 struct objfile *objfile;
1475
1476 ALL_OBJSECTIONS (objfile, osect)
1477 if (section_is_overlay (osect))
1478 spu_overlay_update_osect (osect);
1479 }
1480 }
1481
1482 /* Whenever a new objfile is loaded, read the target's _ovly_table.
1483 If there is one, go through all sections and make sure for non-
1484 overlay sections LMA equals VMA, while for overlay sections LMA
1485 is larger than local store size. */
1486 static void
1487 spu_overlay_new_objfile (struct objfile *objfile)
1488 {
1489 struct spu_overlay_table *ovly_table;
1490 struct obj_section *osect;
1491
1492 /* If we've already touched this file, do nothing. */
1493 if (!objfile || objfile_data (objfile, spu_overlay_data) != NULL)
1494 return;
1495
1496 /* Consider only SPU objfiles. */
1497 if (bfd_get_arch (objfile->obfd) != bfd_arch_spu)
1498 return;
1499
1500 /* Check if this objfile has overlays. */
1501 ovly_table = spu_get_overlay_table (objfile);
1502 if (!ovly_table)
1503 return;
1504
1505 /* Now go and fiddle with all the LMAs. */
1506 ALL_OBJFILE_OSECTIONS (objfile, osect)
1507 {
1508 bfd *obfd = objfile->obfd;
1509 asection *bsect = osect->the_bfd_section;
1510 int ndx = osect - objfile->sections;
1511
1512 if (ovly_table[ndx].mapped_ptr == 0)
1513 bfd_section_lma (obfd, bsect) = bfd_section_vma (obfd, bsect);
1514 else
1515 bfd_section_lma (obfd, bsect) = bsect->filepos + SPU_LS_SIZE;
1516 }
1517 }
1518
1519
1520 /* "info spu" commands. */
1521
1522 static void
1523 info_spu_event_command (char *args, int from_tty)
1524 {
1525 struct frame_info *frame = get_selected_frame (NULL);
1526 ULONGEST event_status = 0;
1527 ULONGEST event_mask = 0;
1528 struct cleanup *chain;
1529 gdb_byte buf[100];
1530 char annex[32];
1531 LONGEST len;
1532 int rc, id;
1533
1534 if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
1535 error (_("\"info spu\" is only supported on the SPU architecture."));
1536
1537 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
1538
1539 xsnprintf (annex, sizeof annex, "%d/event_status", id);
1540 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1541 buf, 0, (sizeof (buf) - 1));
1542 if (len <= 0)
1543 error (_("Could not read event_status."));
1544 buf[len] = '\0';
1545 event_status = strtoulst (buf, NULL, 16);
1546
1547 xsnprintf (annex, sizeof annex, "%d/event_mask", id);
1548 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1549 buf, 0, (sizeof (buf) - 1));
1550 if (len <= 0)
1551 error (_("Could not read event_mask."));
1552 buf[len] = '\0';
1553 event_mask = strtoulst (buf, NULL, 16);
1554
1555 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoEvent");
1556
1557 if (ui_out_is_mi_like_p (uiout))
1558 {
1559 ui_out_field_fmt (uiout, "event_status",
1560 "0x%s", phex_nz (event_status, 4));
1561 ui_out_field_fmt (uiout, "event_mask",
1562 "0x%s", phex_nz (event_mask, 4));
1563 }
1564 else
1565 {
1566 printf_filtered (_("Event Status 0x%s\n"), phex (event_status, 4));
1567 printf_filtered (_("Event Mask 0x%s\n"), phex (event_mask, 4));
1568 }
1569
1570 do_cleanups (chain);
1571 }
1572
1573 static void
1574 info_spu_signal_command (char *args, int from_tty)
1575 {
1576 struct frame_info *frame = get_selected_frame (NULL);
1577 ULONGEST signal1 = 0;
1578 ULONGEST signal1_type = 0;
1579 int signal1_pending = 0;
1580 ULONGEST signal2 = 0;
1581 ULONGEST signal2_type = 0;
1582 int signal2_pending = 0;
1583 struct cleanup *chain;
1584 char annex[32];
1585 gdb_byte buf[100];
1586 LONGEST len;
1587 int rc, id;
1588
1589 if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
1590 error (_("\"info spu\" is only supported on the SPU architecture."));
1591
1592 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
1593
1594 xsnprintf (annex, sizeof annex, "%d/signal1", id);
1595 len = target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 4);
1596 if (len < 0)
1597 error (_("Could not read signal1."));
1598 else if (len == 4)
1599 {
1600 signal1 = extract_unsigned_integer (buf, 4);
1601 signal1_pending = 1;
1602 }
1603
1604 xsnprintf (annex, sizeof annex, "%d/signal1_type", id);
1605 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1606 buf, 0, (sizeof (buf) - 1));
1607 if (len <= 0)
1608 error (_("Could not read signal1_type."));
1609 buf[len] = '\0';
1610 signal1_type = strtoulst (buf, NULL, 16);
1611
1612 xsnprintf (annex, sizeof annex, "%d/signal2", id);
1613 len = target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 4);
1614 if (len < 0)
1615 error (_("Could not read signal2."));
1616 else if (len == 4)
1617 {
1618 signal2 = extract_unsigned_integer (buf, 4);
1619 signal2_pending = 1;
1620 }
1621
1622 xsnprintf (annex, sizeof annex, "%d/signal2_type", id);
1623 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1624 buf, 0, (sizeof (buf) - 1));
1625 if (len <= 0)
1626 error (_("Could not read signal2_type."));
1627 buf[len] = '\0';
1628 signal2_type = strtoulst (buf, NULL, 16);
1629
1630 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoSignal");
1631
1632 if (ui_out_is_mi_like_p (uiout))
1633 {
1634 ui_out_field_int (uiout, "signal1_pending", signal1_pending);
1635 ui_out_field_fmt (uiout, "signal1", "0x%s", phex_nz (signal1, 4));
1636 ui_out_field_int (uiout, "signal1_type", signal1_type);
1637 ui_out_field_int (uiout, "signal2_pending", signal2_pending);
1638 ui_out_field_fmt (uiout, "signal2", "0x%s", phex_nz (signal2, 4));
1639 ui_out_field_int (uiout, "signal2_type", signal2_type);
1640 }
1641 else
1642 {
1643 if (signal1_pending)
1644 printf_filtered (_("Signal 1 control word 0x%s "), phex (signal1, 4));
1645 else
1646 printf_filtered (_("Signal 1 not pending "));
1647
1648 if (signal1_type)
1649 printf_filtered (_("(Type Or)\n"));
1650 else
1651 printf_filtered (_("(Type Overwrite)\n"));
1652
1653 if (signal2_pending)
1654 printf_filtered (_("Signal 2 control word 0x%s "), phex (signal2, 4));
1655 else
1656 printf_filtered (_("Signal 2 not pending "));
1657
1658 if (signal2_type)
1659 printf_filtered (_("(Type Or)\n"));
1660 else
1661 printf_filtered (_("(Type Overwrite)\n"));
1662 }
1663
1664 do_cleanups (chain);
1665 }
1666
1667 static void
1668 info_spu_mailbox_list (gdb_byte *buf, int nr,
1669 const char *field, const char *msg)
1670 {
1671 struct cleanup *chain;
1672 int i;
1673
1674 if (nr <= 0)
1675 return;
1676
1677 chain = make_cleanup_ui_out_table_begin_end (uiout, 1, nr, "mbox");
1678
1679 ui_out_table_header (uiout, 32, ui_left, field, msg);
1680 ui_out_table_body (uiout);
1681
1682 for (i = 0; i < nr; i++)
1683 {
1684 struct cleanup *val_chain;
1685 ULONGEST val;
1686 val_chain = make_cleanup_ui_out_tuple_begin_end (uiout, "mbox");
1687 val = extract_unsigned_integer (buf + 4*i, 4);
1688 ui_out_field_fmt (uiout, field, "0x%s", phex (val, 4));
1689 do_cleanups (val_chain);
1690
1691 if (!ui_out_is_mi_like_p (uiout))
1692 printf_filtered ("\n");
1693 }
1694
1695 do_cleanups (chain);
1696 }
1697
1698 static void
1699 info_spu_mailbox_command (char *args, int from_tty)
1700 {
1701 struct frame_info *frame = get_selected_frame (NULL);
1702 struct cleanup *chain;
1703 char annex[32];
1704 gdb_byte buf[1024];
1705 LONGEST len;
1706 int i, id;
1707
1708 if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
1709 error (_("\"info spu\" is only supported on the SPU architecture."));
1710
1711 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
1712
1713 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoMailbox");
1714
1715 xsnprintf (annex, sizeof annex, "%d/mbox_info", id);
1716 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1717 buf, 0, sizeof buf);
1718 if (len < 0)
1719 error (_("Could not read mbox_info."));
1720
1721 info_spu_mailbox_list (buf, len / 4, "mbox", "SPU Outbound Mailbox");
1722
1723 xsnprintf (annex, sizeof annex, "%d/ibox_info", id);
1724 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1725 buf, 0, sizeof buf);
1726 if (len < 0)
1727 error (_("Could not read ibox_info."));
1728
1729 info_spu_mailbox_list (buf, len / 4, "ibox", "SPU Outbound Interrupt Mailbox");
1730
1731 xsnprintf (annex, sizeof annex, "%d/wbox_info", id);
1732 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1733 buf, 0, sizeof buf);
1734 if (len < 0)
1735 error (_("Could not read wbox_info."));
1736
1737 info_spu_mailbox_list (buf, len / 4, "wbox", "SPU Inbound Mailbox");
1738
1739 do_cleanups (chain);
1740 }
1741
1742 static ULONGEST
1743 spu_mfc_get_bitfield (ULONGEST word, int first, int last)
1744 {
1745 ULONGEST mask = ~(~(ULONGEST)0 << (last - first + 1));
1746 return (word >> (63 - last)) & mask;
1747 }
1748
1749 static void
1750 info_spu_dma_cmdlist (gdb_byte *buf, int nr)
1751 {
1752 static char *spu_mfc_opcode[256] =
1753 {
1754 /* 00 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1755 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1756 /* 10 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1757 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1758 /* 20 */ "put", "putb", "putf", NULL, "putl", "putlb", "putlf", NULL,
1759 "puts", "putbs", "putfs", NULL, NULL, NULL, NULL, NULL,
1760 /* 30 */ "putr", "putrb", "putrf", NULL, "putrl", "putrlb", "putrlf", NULL,
1761 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1762 /* 40 */ "get", "getb", "getf", NULL, "getl", "getlb", "getlf", NULL,
1763 "gets", "getbs", "getfs", NULL, NULL, NULL, NULL, NULL,
1764 /* 50 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1765 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1766 /* 60 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1767 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1768 /* 70 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1769 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1770 /* 80 */ "sdcrt", "sdcrtst", NULL, NULL, NULL, NULL, NULL, NULL,
1771 NULL, "sdcrz", NULL, NULL, NULL, "sdcrst", NULL, "sdcrf",
1772 /* 90 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1773 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1774 /* a0 */ "sndsig", "sndsigb", "sndsigf", NULL, NULL, NULL, NULL, NULL,
1775 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1776 /* b0 */ "putlluc", NULL, NULL, NULL, "putllc", NULL, NULL, NULL,
1777 "putqlluc", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1778 /* c0 */ "barrier", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1779 "mfceieio", NULL, NULL, NULL, "mfcsync", NULL, NULL, NULL,
1780 /* d0 */ "getllar", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1781 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1782 /* e0 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1783 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1784 /* f0 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1785 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1786 };
1787
1788 int *seq = alloca (nr * sizeof (int));
1789 int done = 0;
1790 struct cleanup *chain;
1791 int i, j;
1792
1793
1794 /* Determine sequence in which to display (valid) entries. */
1795 for (i = 0; i < nr; i++)
1796 {
1797 /* Search for the first valid entry all of whose
1798 dependencies are met. */
1799 for (j = 0; j < nr; j++)
1800 {
1801 ULONGEST mfc_cq_dw3;
1802 ULONGEST dependencies;
1803
1804 if (done & (1 << (nr - 1 - j)))
1805 continue;
1806
1807 mfc_cq_dw3 = extract_unsigned_integer (buf + 32*j + 24, 8);
1808 if (!spu_mfc_get_bitfield (mfc_cq_dw3, 16, 16))
1809 continue;
1810
1811 dependencies = spu_mfc_get_bitfield (mfc_cq_dw3, 0, nr - 1);
1812 if ((dependencies & done) != dependencies)
1813 continue;
1814
1815 seq[i] = j;
1816 done |= 1 << (nr - 1 - j);
1817 break;
1818 }
1819
1820 if (j == nr)
1821 break;
1822 }
1823
1824 nr = i;
1825
1826
1827 chain = make_cleanup_ui_out_table_begin_end (uiout, 10, nr, "dma_cmd");
1828
1829 ui_out_table_header (uiout, 7, ui_left, "opcode", "Opcode");
1830 ui_out_table_header (uiout, 3, ui_left, "tag", "Tag");
1831 ui_out_table_header (uiout, 3, ui_left, "tid", "TId");
1832 ui_out_table_header (uiout, 3, ui_left, "rid", "RId");
1833 ui_out_table_header (uiout, 18, ui_left, "ea", "EA");
1834 ui_out_table_header (uiout, 7, ui_left, "lsa", "LSA");
1835 ui_out_table_header (uiout, 7, ui_left, "size", "Size");
1836 ui_out_table_header (uiout, 7, ui_left, "lstaddr", "LstAddr");
1837 ui_out_table_header (uiout, 7, ui_left, "lstsize", "LstSize");
1838 ui_out_table_header (uiout, 1, ui_left, "error_p", "E");
1839
1840 ui_out_table_body (uiout);
1841
1842 for (i = 0; i < nr; i++)
1843 {
1844 struct cleanup *cmd_chain;
1845 ULONGEST mfc_cq_dw0;
1846 ULONGEST mfc_cq_dw1;
1847 ULONGEST mfc_cq_dw2;
1848 int mfc_cmd_opcode, mfc_cmd_tag, rclass_id, tclass_id;
1849 int lsa, size, list_lsa, list_size, mfc_lsa, mfc_size;
1850 ULONGEST mfc_ea;
1851 int list_valid_p, noop_valid_p, qw_valid_p, ea_valid_p, cmd_error_p;
1852
1853 /* Decode contents of MFC Command Queue Context Save/Restore Registers.
1854 See "Cell Broadband Engine Registers V1.3", section 3.3.2.1. */
1855
1856 mfc_cq_dw0 = extract_unsigned_integer (buf + 32*seq[i], 8);
1857 mfc_cq_dw1 = extract_unsigned_integer (buf + 32*seq[i] + 8, 8);
1858 mfc_cq_dw2 = extract_unsigned_integer (buf + 32*seq[i] + 16, 8);
1859
1860 list_lsa = spu_mfc_get_bitfield (mfc_cq_dw0, 0, 14);
1861 list_size = spu_mfc_get_bitfield (mfc_cq_dw0, 15, 26);
1862 mfc_cmd_opcode = spu_mfc_get_bitfield (mfc_cq_dw0, 27, 34);
1863 mfc_cmd_tag = spu_mfc_get_bitfield (mfc_cq_dw0, 35, 39);
1864 list_valid_p = spu_mfc_get_bitfield (mfc_cq_dw0, 40, 40);
1865 rclass_id = spu_mfc_get_bitfield (mfc_cq_dw0, 41, 43);
1866 tclass_id = spu_mfc_get_bitfield (mfc_cq_dw0, 44, 46);
1867
1868 mfc_ea = spu_mfc_get_bitfield (mfc_cq_dw1, 0, 51) << 12
1869 | spu_mfc_get_bitfield (mfc_cq_dw2, 25, 36);
1870
1871 mfc_lsa = spu_mfc_get_bitfield (mfc_cq_dw2, 0, 13);
1872 mfc_size = spu_mfc_get_bitfield (mfc_cq_dw2, 14, 24);
1873 noop_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 37, 37);
1874 qw_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 38, 38);
1875 ea_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 39, 39);
1876 cmd_error_p = spu_mfc_get_bitfield (mfc_cq_dw2, 40, 40);
1877
1878 cmd_chain = make_cleanup_ui_out_tuple_begin_end (uiout, "cmd");
1879
1880 if (spu_mfc_opcode[mfc_cmd_opcode])
1881 ui_out_field_string (uiout, "opcode", spu_mfc_opcode[mfc_cmd_opcode]);
1882 else
1883 ui_out_field_int (uiout, "opcode", mfc_cmd_opcode);
1884
1885 ui_out_field_int (uiout, "tag", mfc_cmd_tag);
1886 ui_out_field_int (uiout, "tid", tclass_id);
1887 ui_out_field_int (uiout, "rid", rclass_id);
1888
1889 if (ea_valid_p)
1890 ui_out_field_fmt (uiout, "ea", "0x%s", phex (mfc_ea, 8));
1891 else
1892 ui_out_field_skip (uiout, "ea");
1893
1894 ui_out_field_fmt (uiout, "lsa", "0x%05x", mfc_lsa << 4);
1895 if (qw_valid_p)
1896 ui_out_field_fmt (uiout, "size", "0x%05x", mfc_size << 4);
1897 else
1898 ui_out_field_fmt (uiout, "size", "0x%05x", mfc_size);
1899
1900 if (list_valid_p)
1901 {
1902 ui_out_field_fmt (uiout, "lstaddr", "0x%05x", list_lsa << 3);
1903 ui_out_field_fmt (uiout, "lstsize", "0x%05x", list_size << 3);
1904 }
1905 else
1906 {
1907 ui_out_field_skip (uiout, "lstaddr");
1908 ui_out_field_skip (uiout, "lstsize");
1909 }
1910
1911 if (cmd_error_p)
1912 ui_out_field_string (uiout, "error_p", "*");
1913 else
1914 ui_out_field_skip (uiout, "error_p");
1915
1916 do_cleanups (cmd_chain);
1917
1918 if (!ui_out_is_mi_like_p (uiout))
1919 printf_filtered ("\n");
1920 }
1921
1922 do_cleanups (chain);
1923 }
1924
1925 static void
1926 info_spu_dma_command (char *args, int from_tty)
1927 {
1928 struct frame_info *frame = get_selected_frame (NULL);
1929 ULONGEST dma_info_type;
1930 ULONGEST dma_info_mask;
1931 ULONGEST dma_info_status;
1932 ULONGEST dma_info_stall_and_notify;
1933 ULONGEST dma_info_atomic_command_status;
1934 struct cleanup *chain;
1935 char annex[32];
1936 gdb_byte buf[1024];
1937 LONGEST len;
1938 int i, id;
1939
1940 if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
1941 error (_("\"info spu\" is only supported on the SPU architecture."));
1942
1943 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
1944
1945 xsnprintf (annex, sizeof annex, "%d/dma_info", id);
1946 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1947 buf, 0, 40 + 16 * 32);
1948 if (len <= 0)
1949 error (_("Could not read dma_info."));
1950
1951 dma_info_type = extract_unsigned_integer (buf, 8);
1952 dma_info_mask = extract_unsigned_integer (buf + 8, 8);
1953 dma_info_status = extract_unsigned_integer (buf + 16, 8);
1954 dma_info_stall_and_notify = extract_unsigned_integer (buf + 24, 8);
1955 dma_info_atomic_command_status = extract_unsigned_integer (buf + 32, 8);
1956
1957 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoDMA");
1958
1959 if (ui_out_is_mi_like_p (uiout))
1960 {
1961 ui_out_field_fmt (uiout, "dma_info_type", "0x%s",
1962 phex_nz (dma_info_type, 4));
1963 ui_out_field_fmt (uiout, "dma_info_mask", "0x%s",
1964 phex_nz (dma_info_mask, 4));
1965 ui_out_field_fmt (uiout, "dma_info_status", "0x%s",
1966 phex_nz (dma_info_status, 4));
1967 ui_out_field_fmt (uiout, "dma_info_stall_and_notify", "0x%s",
1968 phex_nz (dma_info_stall_and_notify, 4));
1969 ui_out_field_fmt (uiout, "dma_info_atomic_command_status", "0x%s",
1970 phex_nz (dma_info_atomic_command_status, 4));
1971 }
1972 else
1973 {
1974 const char *query_msg = _("no query pending");
1975
1976 if (dma_info_type & 4)
1977 switch (dma_info_type & 3)
1978 {
1979 case 1: query_msg = _("'any' query pending"); break;
1980 case 2: query_msg = _("'all' query pending"); break;
1981 default: query_msg = _("undefined query type"); break;
1982 }
1983
1984 printf_filtered (_("Tag-Group Status 0x%s\n"),
1985 phex (dma_info_status, 4));
1986 printf_filtered (_("Tag-Group Mask 0x%s (%s)\n"),
1987 phex (dma_info_mask, 4), query_msg);
1988 printf_filtered (_("Stall-and-Notify 0x%s\n"),
1989 phex (dma_info_stall_and_notify, 4));
1990 printf_filtered (_("Atomic Cmd Status 0x%s\n"),
1991 phex (dma_info_atomic_command_status, 4));
1992 printf_filtered ("\n");
1993 }
1994
1995 info_spu_dma_cmdlist (buf + 40, 16);
1996 do_cleanups (chain);
1997 }
1998
1999 static void
2000 info_spu_proxydma_command (char *args, int from_tty)
2001 {
2002 struct frame_info *frame = get_selected_frame (NULL);
2003 ULONGEST dma_info_type;
2004 ULONGEST dma_info_mask;
2005 ULONGEST dma_info_status;
2006 struct cleanup *chain;
2007 char annex[32];
2008 gdb_byte buf[1024];
2009 LONGEST len;
2010 int i, id;
2011
2012 if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
2013 error (_("\"info spu\" is only supported on the SPU architecture."));
2014
2015 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2016
2017 xsnprintf (annex, sizeof annex, "%d/proxydma_info", id);
2018 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2019 buf, 0, 24 + 8 * 32);
2020 if (len <= 0)
2021 error (_("Could not read proxydma_info."));
2022
2023 dma_info_type = extract_unsigned_integer (buf, 8);
2024 dma_info_mask = extract_unsigned_integer (buf + 8, 8);
2025 dma_info_status = extract_unsigned_integer (buf + 16, 8);
2026
2027 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoProxyDMA");
2028
2029 if (ui_out_is_mi_like_p (uiout))
2030 {
2031 ui_out_field_fmt (uiout, "proxydma_info_type", "0x%s",
2032 phex_nz (dma_info_type, 4));
2033 ui_out_field_fmt (uiout, "proxydma_info_mask", "0x%s",
2034 phex_nz (dma_info_mask, 4));
2035 ui_out_field_fmt (uiout, "proxydma_info_status", "0x%s",
2036 phex_nz (dma_info_status, 4));
2037 }
2038 else
2039 {
2040 const char *query_msg;
2041
2042 switch (dma_info_type & 3)
2043 {
2044 case 0: query_msg = _("no query pending"); break;
2045 case 1: query_msg = _("'any' query pending"); break;
2046 case 2: query_msg = _("'all' query pending"); break;
2047 default: query_msg = _("undefined query type"); break;
2048 }
2049
2050 printf_filtered (_("Tag-Group Status 0x%s\n"),
2051 phex (dma_info_status, 4));
2052 printf_filtered (_("Tag-Group Mask 0x%s (%s)\n"),
2053 phex (dma_info_mask, 4), query_msg);
2054 printf_filtered ("\n");
2055 }
2056
2057 info_spu_dma_cmdlist (buf + 24, 8);
2058 do_cleanups (chain);
2059 }
2060
2061 static void
2062 info_spu_command (char *args, int from_tty)
2063 {
2064 printf_unfiltered (_("\"info spu\" must be followed by the name of an SPU facility.\n"));
2065 help_list (infospucmdlist, "info spu ", -1, gdb_stdout);
2066 }
2067
2068
2069 /* Set up gdbarch struct. */
2070
2071 static struct gdbarch *
2072 spu_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2073 {
2074 struct gdbarch *gdbarch;
2075 struct gdbarch_tdep *tdep;
2076
2077 /* Find a candidate among the list of pre-declared architectures. */
2078 arches = gdbarch_list_lookup_by_info (arches, &info);
2079 if (arches != NULL)
2080 return arches->gdbarch;
2081
2082 /* Is is for us? */
2083 if (info.bfd_arch_info->mach != bfd_mach_spu)
2084 return NULL;
2085
2086 /* Yes, create a new architecture. */
2087 tdep = XCALLOC (1, struct gdbarch_tdep);
2088 gdbarch = gdbarch_alloc (&info, tdep);
2089
2090 /* Disassembler. */
2091 set_gdbarch_print_insn (gdbarch, print_insn_spu);
2092
2093 /* Registers. */
2094 set_gdbarch_num_regs (gdbarch, SPU_NUM_REGS);
2095 set_gdbarch_num_pseudo_regs (gdbarch, SPU_NUM_PSEUDO_REGS);
2096 set_gdbarch_sp_regnum (gdbarch, SPU_SP_REGNUM);
2097 set_gdbarch_pc_regnum (gdbarch, SPU_PC_REGNUM);
2098 set_gdbarch_read_pc (gdbarch, spu_read_pc);
2099 set_gdbarch_write_pc (gdbarch, spu_write_pc);
2100 set_gdbarch_register_name (gdbarch, spu_register_name);
2101 set_gdbarch_register_type (gdbarch, spu_register_type);
2102 set_gdbarch_pseudo_register_read (gdbarch, spu_pseudo_register_read);
2103 set_gdbarch_pseudo_register_write (gdbarch, spu_pseudo_register_write);
2104 set_gdbarch_value_from_register (gdbarch, spu_value_from_register);
2105 set_gdbarch_register_reggroup_p (gdbarch, spu_register_reggroup_p);
2106
2107 /* Data types. */
2108 set_gdbarch_char_signed (gdbarch, 0);
2109 set_gdbarch_ptr_bit (gdbarch, 32);
2110 set_gdbarch_addr_bit (gdbarch, 32);
2111 set_gdbarch_short_bit (gdbarch, 16);
2112 set_gdbarch_int_bit (gdbarch, 32);
2113 set_gdbarch_long_bit (gdbarch, 32);
2114 set_gdbarch_long_long_bit (gdbarch, 64);
2115 set_gdbarch_float_bit (gdbarch, 32);
2116 set_gdbarch_double_bit (gdbarch, 64);
2117 set_gdbarch_long_double_bit (gdbarch, 64);
2118 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2119 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2120 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
2121
2122 /* Address conversion. */
2123 set_gdbarch_pointer_to_address (gdbarch, spu_pointer_to_address);
2124 set_gdbarch_integer_to_address (gdbarch, spu_integer_to_address);
2125
2126 /* Inferior function calls. */
2127 set_gdbarch_call_dummy_location (gdbarch, ON_STACK);
2128 set_gdbarch_frame_align (gdbarch, spu_frame_align);
2129 set_gdbarch_frame_red_zone_size (gdbarch, 2000);
2130 set_gdbarch_push_dummy_code (gdbarch, spu_push_dummy_code);
2131 set_gdbarch_push_dummy_call (gdbarch, spu_push_dummy_call);
2132 set_gdbarch_dummy_id (gdbarch, spu_dummy_id);
2133 set_gdbarch_return_value (gdbarch, spu_return_value);
2134
2135 /* Frame handling. */
2136 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2137 frame_unwind_append_unwinder (gdbarch, &spu_frame_unwind);
2138 frame_base_set_default (gdbarch, &spu_frame_base);
2139 set_gdbarch_unwind_pc (gdbarch, spu_unwind_pc);
2140 set_gdbarch_unwind_sp (gdbarch, spu_unwind_sp);
2141 set_gdbarch_virtual_frame_pointer (gdbarch, spu_virtual_frame_pointer);
2142 set_gdbarch_frame_args_skip (gdbarch, 0);
2143 set_gdbarch_skip_prologue (gdbarch, spu_skip_prologue);
2144 set_gdbarch_in_function_epilogue_p (gdbarch, spu_in_function_epilogue_p);
2145
2146 /* Breakpoints. */
2147 set_gdbarch_decr_pc_after_break (gdbarch, 4);
2148 set_gdbarch_breakpoint_from_pc (gdbarch, spu_breakpoint_from_pc);
2149 set_gdbarch_cannot_step_breakpoint (gdbarch, 1);
2150 set_gdbarch_software_single_step (gdbarch, spu_software_single_step);
2151
2152 /* Overlays. */
2153 set_gdbarch_overlay_update (gdbarch, spu_overlay_update);
2154
2155 return gdbarch;
2156 }
2157
2158 /* Provide a prototype to silence -Wmissing-prototypes. */
2159 extern initialize_file_ftype _initialize_spu_tdep;
2160
2161 void
2162 _initialize_spu_tdep (void)
2163 {
2164 register_gdbarch_init (bfd_arch_spu, spu_gdbarch_init);
2165
2166 /* Add ourselves to objfile event chain. */
2167 observer_attach_new_objfile (spu_overlay_new_objfile);
2168 spu_overlay_data = register_objfile_data ();
2169
2170 /* Add root prefix command for all "info spu" commands. */
2171 add_prefix_cmd ("spu", class_info, info_spu_command,
2172 _("Various SPU specific commands."),
2173 &infospucmdlist, "info spu ", 0, &infolist);
2174
2175 /* Add various "info spu" commands. */
2176 add_cmd ("event", class_info, info_spu_event_command,
2177 _("Display SPU event facility status.\n"),
2178 &infospucmdlist);
2179 add_cmd ("signal", class_info, info_spu_signal_command,
2180 _("Display SPU signal notification facility status.\n"),
2181 &infospucmdlist);
2182 add_cmd ("mailbox", class_info, info_spu_mailbox_command,
2183 _("Display SPU mailbox facility status.\n"),
2184 &infospucmdlist);
2185 add_cmd ("dma", class_info, info_spu_dma_command,
2186 _("Display MFC DMA status.\n"),
2187 &infospucmdlist);
2188 add_cmd ("proxydma", class_info, info_spu_proxydma_command,
2189 _("Display MFC Proxy-DMA status.\n"),
2190 &infospucmdlist);
2191 }
This page took 0.076692 seconds and 4 git commands to generate.