sim: mcore: switch to common sim-reg
[deliverable/binutils-gdb.git] / sim / mcore / interp.c
1 /* Simulator for Motorola's MCore processor
2 Copyright (C) 1999-2015 Free Software Foundation, Inc.
3 Contributed by Cygnus Solutions.
4
5 This file is part of GDB, the GNU debugger.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include <signal.h>
22 #include <stdlib.h>
23 #include <string.h>
24 #include <sys/times.h>
25 #include <sys/param.h>
26 #include <unistd.h>
27 #include "bfd.h"
28 #include "gdb/callback.h"
29 #include "libiberty.h"
30 #include "gdb/remote-sim.h"
31
32 #include "sim-main.h"
33 #include "sim-base.h"
34 #include "sim-syscall.h"
35 #include "sim-options.h"
36
37 #define target_big_endian (CURRENT_TARGET_BYTE_ORDER == BIG_ENDIAN)
38
39
40 static unsigned long
41 mcore_extract_unsigned_integer (unsigned char *addr, int len)
42 {
43 unsigned long retval;
44 unsigned char * p;
45 unsigned char * startaddr = (unsigned char *)addr;
46 unsigned char * endaddr = startaddr + len;
47
48 if (len > (int) sizeof (unsigned long))
49 printf ("That operation is not available on integers of more than %zu bytes.",
50 sizeof (unsigned long));
51
52 /* Start at the most significant end of the integer, and work towards
53 the least significant. */
54 retval = 0;
55
56 if (! target_big_endian)
57 {
58 for (p = endaddr; p > startaddr;)
59 retval = (retval << 8) | * -- p;
60 }
61 else
62 {
63 for (p = startaddr; p < endaddr;)
64 retval = (retval << 8) | * p ++;
65 }
66
67 return retval;
68 }
69
70 static void
71 mcore_store_unsigned_integer (unsigned char *addr, int len, unsigned long val)
72 {
73 unsigned char * p;
74 unsigned char * startaddr = (unsigned char *)addr;
75 unsigned char * endaddr = startaddr + len;
76
77 if (! target_big_endian)
78 {
79 for (p = startaddr; p < endaddr;)
80 {
81 * p ++ = val & 0xff;
82 val >>= 8;
83 }
84 }
85 else
86 {
87 for (p = endaddr; p > startaddr;)
88 {
89 * -- p = val & 0xff;
90 val >>= 8;
91 }
92 }
93 }
94
95 /* The machine state.
96 This state is maintained in host byte order. The
97 fetch/store register functions must translate between host
98 byte order and the target processor byte order.
99 Keeping this data in target byte order simplifies the register
100 read/write functions. Keeping this data in native order improves
101 the performance of the simulator. Simulation speed is deemed more
102 important. */
103 /* TODO: Should be moved to sim-main.h:sim_cpu. */
104
105 /* The ordering of the mcore_regset structure is matched in the
106 gdb/config/mcore/tm-mcore.h file in the REGISTER_NAMES macro. */
107 struct mcore_regset
108 {
109 word gregs [16]; /* primary registers */
110 word alt_gregs [16]; /* alt register file */
111 word cregs [32]; /* control registers */
112 int ticks;
113 int stalls;
114 int cycles;
115 int insts;
116 word * active_gregs;
117 };
118
119 union
120 {
121 struct mcore_regset asregs;
122 word asints [1]; /* but accessed larger... */
123 } cpu;
124
125 #define LAST_VALID_CREG 32 /* only 0..12 implemented */
126 #define NUM_MCORE_REGS (16 + 16 + LAST_VALID_CREG + 1)
127
128 static int memcycles = 1;
129
130 #define gr asregs.active_gregs
131 #define cr asregs.cregs
132 #define sr asregs.cregs[0]
133 #define vbr asregs.cregs[1]
134 #define esr asregs.cregs[2]
135 #define fsr asregs.cregs[3]
136 #define epc asregs.cregs[4]
137 #define fpc asregs.cregs[5]
138 #define ss0 asregs.cregs[6]
139 #define ss1 asregs.cregs[7]
140 #define ss2 asregs.cregs[8]
141 #define ss3 asregs.cregs[9]
142 #define ss4 asregs.cregs[10]
143 #define gcr asregs.cregs[11]
144 #define gsr asregs.cregs[12]
145
146 /* maniuplate the carry bit */
147 #define C_ON() (cpu.sr & 1)
148 #define C_VALUE() (cpu.sr & 1)
149 #define C_OFF() ((cpu.sr & 1) == 0)
150 #define SET_C() {cpu.sr |= 1;}
151 #define CLR_C() {cpu.sr &= 0xfffffffe;}
152 #define NEW_C(v) {CLR_C(); cpu.sr |= ((v) & 1);}
153
154 #define SR_AF() ((cpu.sr >> 1) & 1)
155
156 #define TRAPCODE 1 /* r1 holds which function we want */
157 #define PARM1 2 /* first parameter */
158 #define PARM2 3
159 #define PARM3 4
160 #define PARM4 5
161 #define RET1 2 /* register for return values. */
162
163 /* Default to a 8 Mbyte (== 2^23) memory space. */
164 #define DEFAULT_MEMORY_SIZE 0x800000
165
166 static void
167 set_initial_gprs (SIM_CPU *scpu)
168 {
169 int i;
170 long space;
171
172 /* Set up machine just out of reset. */
173 CPU_PC_SET (scpu, 0);
174 cpu.sr = 0;
175
176 /* Clean out the GPRs and alternate GPRs. */
177 for (i = 0; i < 16; i++)
178 {
179 cpu.asregs.gregs[i] = 0;
180 cpu.asregs.alt_gregs[i] = 0;
181 }
182
183 /* Make our register set point to the right place. */
184 if (SR_AF())
185 cpu.asregs.active_gregs = &cpu.asregs.alt_gregs[0];
186 else
187 cpu.asregs.active_gregs = &cpu.asregs.gregs[0];
188
189 /* ABI specifies initial values for these registers. */
190 cpu.gr[0] = DEFAULT_MEMORY_SIZE - 4;
191
192 /* dac fix, the stack address must be 8-byte aligned! */
193 cpu.gr[0] = cpu.gr[0] - cpu.gr[0] % 8;
194 cpu.gr[PARM1] = 0;
195 cpu.gr[PARM2] = 0;
196 cpu.gr[PARM3] = 0;
197 cpu.gr[PARM4] = cpu.gr[0];
198 }
199
200 /* Simulate a monitor trap. */
201
202 static void
203 handle_trap1 (SIM_DESC sd)
204 {
205 /* XXX: We don't pass back the actual errno value. */
206 cpu.gr[RET1] = sim_syscall (STATE_CPU (sd, 0), cpu.gr[TRAPCODE],
207 cpu.gr[PARM1], cpu.gr[PARM2], cpu.gr[PARM3],
208 cpu.gr[PARM4]);
209 }
210
211 static void
212 process_stub (SIM_DESC sd, int what)
213 {
214 /* These values should match those in libgloss/mcore/syscalls.s. */
215 switch (what)
216 {
217 case 3: /* _read */
218 case 4: /* _write */
219 case 5: /* _open */
220 case 6: /* _close */
221 case 10: /* _unlink */
222 case 19: /* _lseek */
223 case 43: /* _times */
224 cpu.gr [TRAPCODE] = what;
225 handle_trap1 (sd);
226 break;
227
228 default:
229 if (STATE_VERBOSE_P (sd))
230 fprintf (stderr, "Unhandled stub opcode: %d\n", what);
231 break;
232 }
233 }
234
235 static void
236 util (SIM_DESC sd, SIM_CPU *scpu, unsigned what)
237 {
238 switch (what)
239 {
240 case 0: /* exit */
241 sim_engine_halt (sd, scpu, NULL, scpu->pc, sim_exited, cpu.gr[PARM1]);
242 break;
243
244 case 1: /* printf */
245 if (STATE_VERBOSE_P (sd))
246 fprintf (stderr, "WARNING: printf unimplemented\n");
247 break;
248
249 case 2: /* scanf */
250 if (STATE_VERBOSE_P (sd))
251 fprintf (stderr, "WARNING: scanf unimplemented\n");
252 break;
253
254 case 3: /* utime */
255 cpu.gr[RET1] = cpu.asregs.insts;
256 break;
257
258 case 0xFF:
259 process_stub (sd, cpu.gr[1]);
260 break;
261
262 default:
263 if (STATE_VERBOSE_P (sd))
264 fprintf (stderr, "Unhandled util code: %x\n", what);
265 break;
266 }
267 }
268
269 /* For figuring out whether we carried; addc/subc use this. */
270 static int
271 iu_carry (unsigned long a, unsigned long b, int cin)
272 {
273 unsigned long x;
274
275 x = (a & 0xffff) + (b & 0xffff) + cin;
276 x = (x >> 16) + (a >> 16) + (b >> 16);
277 x >>= 16;
278
279 return (x != 0);
280 }
281
282 /* TODO: Convert to common watchpoints. */
283 #undef WATCHFUNCTIONS
284 #ifdef WATCHFUNCTIONS
285
286 #define MAXWL 80
287 word WL[MAXWL];
288 char * WLstr[MAXWL];
289
290 int ENDWL=0;
291 int WLincyc;
292 int WLcyc[MAXWL];
293 int WLcnts[MAXWL];
294 int WLmax[MAXWL];
295 int WLmin[MAXWL];
296 word WLendpc;
297 int WLbcyc;
298 int WLW;
299 #endif
300
301 #define RD (inst & 0xF)
302 #define RS ((inst >> 4) & 0xF)
303 #define RX ((inst >> 8) & 0xF)
304 #define IMM5 ((inst >> 4) & 0x1F)
305 #define IMM4 ((inst) & 0xF)
306
307 #define rbat(X) sim_core_read_1 (scpu, 0, read_map, X)
308 #define rhat(X) sim_core_read_2 (scpu, 0, read_map, X)
309 #define rlat(X) sim_core_read_4 (scpu, 0, read_map, X)
310 #define wbat(X, D) sim_core_write_1 (scpu, 0, write_map, X, D)
311 #define what(X, D) sim_core_write_2 (scpu, 0, write_map, X, D)
312 #define wlat(X, D) sim_core_write_4 (scpu, 0, write_map, X, D)
313
314 static int tracing = 0;
315
316 #define ILLEGAL() \
317 sim_engine_halt (sd, scpu, NULL, pc, sim_stopped, SIM_SIGILL)
318
319 static void
320 step_once (SIM_DESC sd, SIM_CPU *scpu)
321 {
322 int needfetch;
323 word ibuf;
324 word pc;
325 unsigned short inst;
326 int memops;
327 int bonus_cycles;
328 int insts;
329 int w;
330 int cycs;
331 #ifdef WATCHFUNCTIONS
332 word WLhash;
333 #endif
334
335 pc = CPU_PC_GET (scpu);
336
337 /* Fetch the initial instructions that we'll decode. */
338 ibuf = rlat (pc & 0xFFFFFFFC);
339 needfetch = 0;
340
341 memops = 0;
342 bonus_cycles = 0;
343 insts = 0;
344
345 /* make our register set point to the right place */
346 if (SR_AF ())
347 cpu.asregs.active_gregs = & cpu.asregs.alt_gregs[0];
348 else
349 cpu.asregs.active_gregs = & cpu.asregs.gregs[0];
350
351 #ifdef WATCHFUNCTIONS
352 /* make a hash to speed exec loop, hope it's nonzero */
353 WLhash = 0xFFFFFFFF;
354
355 for (w = 1; w <= ENDWL; w++)
356 WLhash = WLhash & WL[w];
357 #endif
358
359 /* TODO: Unindent this block. */
360 {
361 word oldpc;
362
363 insts ++;
364
365 if (pc & 02)
366 {
367 if (! target_big_endian)
368 inst = ibuf >> 16;
369 else
370 inst = ibuf & 0xFFFF;
371 needfetch = 1;
372 }
373 else
374 {
375 if (! target_big_endian)
376 inst = ibuf & 0xFFFF;
377 else
378 inst = ibuf >> 16;
379 }
380
381 #ifdef WATCHFUNCTIONS
382 /* now scan list of watch addresses, if match, count it and
383 note return address and count cycles until pc=return address */
384
385 if ((WLincyc == 1) && (pc == WLendpc))
386 {
387 cycs = (cpu.asregs.cycles + (insts + bonus_cycles +
388 (memops * memcycles)) - WLbcyc);
389
390 if (WLcnts[WLW] == 1)
391 {
392 WLmax[WLW] = cycs;
393 WLmin[WLW] = cycs;
394 WLcyc[WLW] = 0;
395 }
396
397 if (cycs > WLmax[WLW])
398 {
399 WLmax[WLW] = cycs;
400 }
401
402 if (cycs < WLmin[WLW])
403 {
404 WLmin[WLW] = cycs;
405 }
406
407 WLcyc[WLW] += cycs;
408 WLincyc = 0;
409 WLendpc = 0;
410 }
411
412 /* Optimize with a hash to speed loop. */
413 if (WLincyc == 0)
414 {
415 if ((WLhash == 0) || ((WLhash & pc) != 0))
416 {
417 for (w=1; w <= ENDWL; w++)
418 {
419 if (pc == WL[w])
420 {
421 WLcnts[w]++;
422 WLbcyc = cpu.asregs.cycles + insts
423 + bonus_cycles + (memops * memcycles);
424 WLendpc = cpu.gr[15];
425 WLincyc = 1;
426 WLW = w;
427 break;
428 }
429 }
430 }
431 }
432 #endif
433
434 if (tracing)
435 fprintf (stderr, "%.4lx: inst = %.4x ", pc, inst);
436
437 oldpc = pc;
438
439 pc += 2;
440
441 switch (inst >> 8)
442 {
443 case 0x00:
444 switch RS
445 {
446 case 0x0:
447 switch RD
448 {
449 case 0x0: /* bkpt */
450 pc -= 2;
451 sim_engine_halt (sd, scpu, NULL, pc - 2,
452 sim_stopped, SIM_SIGTRAP);
453 break;
454
455 case 0x1: /* sync */
456 break;
457
458 case 0x2: /* rte */
459 pc = cpu.epc;
460 cpu.sr = cpu.esr;
461 needfetch = 1;
462
463 if (SR_AF ())
464 cpu.asregs.active_gregs = & cpu.asregs.alt_gregs[0];
465 else
466 cpu.asregs.active_gregs = & cpu.asregs.gregs[0];
467 break;
468
469 case 0x3: /* rfi */
470 pc = cpu.fpc;
471 cpu.sr = cpu.fsr;
472 needfetch = 1;
473
474 if (SR_AF ())
475 cpu.asregs.active_gregs = &cpu.asregs.alt_gregs[0];
476 else
477 cpu.asregs.active_gregs = &cpu.asregs.gregs[0];
478 break;
479
480 case 0x4: /* stop */
481 if (STATE_VERBOSE_P (sd))
482 fprintf (stderr, "WARNING: stop unimplemented\n");
483 break;
484
485 case 0x5: /* wait */
486 if (STATE_VERBOSE_P (sd))
487 fprintf (stderr, "WARNING: wait unimplemented\n");
488 break;
489
490 case 0x6: /* doze */
491 if (STATE_VERBOSE_P (sd))
492 fprintf (stderr, "WARNING: doze unimplemented\n");
493 break;
494
495 case 0x7:
496 ILLEGAL (); /* illegal */
497 break;
498
499 case 0x8: /* trap 0 */
500 case 0xA: /* trap 2 */
501 case 0xB: /* trap 3 */
502 sim_engine_halt (sd, scpu, NULL, pc,
503 sim_stopped, SIM_SIGTRAP);
504 break;
505
506 case 0xC: /* trap 4 */
507 case 0xD: /* trap 5 */
508 case 0xE: /* trap 6 */
509 ILLEGAL (); /* illegal */
510 break;
511
512 case 0xF: /* trap 7 */
513 sim_engine_halt (sd, scpu, NULL, pc, /* integer div-by-0 */
514 sim_stopped, SIM_SIGTRAP);
515 break;
516
517 case 0x9: /* trap 1 */
518 handle_trap1 (sd);
519 break;
520 }
521 break;
522
523 case 0x1:
524 ILLEGAL (); /* illegal */
525 break;
526
527 case 0x2: /* mvc */
528 cpu.gr[RD] = C_VALUE();
529 break;
530 case 0x3: /* mvcv */
531 cpu.gr[RD] = C_OFF();
532 break;
533 case 0x4: /* ldq */
534 {
535 word addr = cpu.gr[RD];
536 int regno = 4; /* always r4-r7 */
537
538 bonus_cycles++;
539 memops += 4;
540 do
541 {
542 cpu.gr[regno] = rlat(addr);
543 addr += 4;
544 regno++;
545 }
546 while ((regno&0x3) != 0);
547 }
548 break;
549 case 0x5: /* stq */
550 {
551 word addr = cpu.gr[RD];
552 int regno = 4; /* always r4-r7 */
553
554 memops += 4;
555 bonus_cycles++;
556 do
557 {
558 wlat(addr, cpu.gr[regno]);
559 addr += 4;
560 regno++;
561 }
562 while ((regno & 0x3) != 0);
563 }
564 break;
565 case 0x6: /* ldm */
566 {
567 word addr = cpu.gr[0];
568 int regno = RD;
569
570 /* bonus cycle is really only needed if
571 the next insn shifts the last reg loaded.
572
573 bonus_cycles++;
574 */
575 memops += 16-regno;
576 while (regno <= 0xF)
577 {
578 cpu.gr[regno] = rlat(addr);
579 addr += 4;
580 regno++;
581 }
582 }
583 break;
584 case 0x7: /* stm */
585 {
586 word addr = cpu.gr[0];
587 int regno = RD;
588
589 /* this should be removed! */
590 /* bonus_cycles ++; */
591
592 memops += 16 - regno;
593 while (regno <= 0xF)
594 {
595 wlat(addr, cpu.gr[regno]);
596 addr += 4;
597 regno++;
598 }
599 }
600 break;
601
602 case 0x8: /* dect */
603 cpu.gr[RD] -= C_VALUE();
604 break;
605 case 0x9: /* decf */
606 cpu.gr[RD] -= C_OFF();
607 break;
608 case 0xA: /* inct */
609 cpu.gr[RD] += C_VALUE();
610 break;
611 case 0xB: /* incf */
612 cpu.gr[RD] += C_OFF();
613 break;
614 case 0xC: /* jmp */
615 pc = cpu.gr[RD];
616 if (tracing && RD == 15)
617 fprintf (stderr, "Func return, r2 = %lxx, r3 = %lx\n",
618 cpu.gr[2], cpu.gr[3]);
619 bonus_cycles++;
620 needfetch = 1;
621 break;
622 case 0xD: /* jsr */
623 cpu.gr[15] = pc;
624 pc = cpu.gr[RD];
625 bonus_cycles++;
626 needfetch = 1;
627 break;
628 case 0xE: /* ff1 */
629 {
630 word tmp, i;
631 tmp = cpu.gr[RD];
632 for (i = 0; !(tmp & 0x80000000) && i < 32; i++)
633 tmp <<= 1;
634 cpu.gr[RD] = i;
635 }
636 break;
637 case 0xF: /* brev */
638 {
639 word tmp;
640 tmp = cpu.gr[RD];
641 tmp = ((tmp & 0xaaaaaaaa) >> 1) | ((tmp & 0x55555555) << 1);
642 tmp = ((tmp & 0xcccccccc) >> 2) | ((tmp & 0x33333333) << 2);
643 tmp = ((tmp & 0xf0f0f0f0) >> 4) | ((tmp & 0x0f0f0f0f) << 4);
644 tmp = ((tmp & 0xff00ff00) >> 8) | ((tmp & 0x00ff00ff) << 8);
645 cpu.gr[RD] = ((tmp & 0xffff0000) >> 16) | ((tmp & 0x0000ffff) << 16);
646 }
647 break;
648 }
649 break;
650 case 0x01:
651 switch RS
652 {
653 case 0x0: /* xtrb3 */
654 cpu.gr[1] = (cpu.gr[RD]) & 0xFF;
655 NEW_C (cpu.gr[RD] != 0);
656 break;
657 case 0x1: /* xtrb2 */
658 cpu.gr[1] = (cpu.gr[RD]>>8) & 0xFF;
659 NEW_C (cpu.gr[RD] != 0);
660 break;
661 case 0x2: /* xtrb1 */
662 cpu.gr[1] = (cpu.gr[RD]>>16) & 0xFF;
663 NEW_C (cpu.gr[RD] != 0);
664 break;
665 case 0x3: /* xtrb0 */
666 cpu.gr[1] = (cpu.gr[RD]>>24) & 0xFF;
667 NEW_C (cpu.gr[RD] != 0);
668 break;
669 case 0x4: /* zextb */
670 cpu.gr[RD] &= 0x000000FF;
671 break;
672 case 0x5: /* sextb */
673 {
674 long tmp;
675 tmp = cpu.gr[RD];
676 tmp <<= 24;
677 tmp >>= 24;
678 cpu.gr[RD] = tmp;
679 }
680 break;
681 case 0x6: /* zexth */
682 cpu.gr[RD] &= 0x0000FFFF;
683 break;
684 case 0x7: /* sexth */
685 {
686 long tmp;
687 tmp = cpu.gr[RD];
688 tmp <<= 16;
689 tmp >>= 16;
690 cpu.gr[RD] = tmp;
691 }
692 break;
693 case 0x8: /* declt */
694 --cpu.gr[RD];
695 NEW_C ((long)cpu.gr[RD] < 0);
696 break;
697 case 0x9: /* tstnbz */
698 {
699 word tmp = cpu.gr[RD];
700 NEW_C ((tmp & 0xFF000000) != 0 &&
701 (tmp & 0x00FF0000) != 0 && (tmp & 0x0000FF00) != 0 &&
702 (tmp & 0x000000FF) != 0);
703 }
704 break;
705 case 0xA: /* decgt */
706 --cpu.gr[RD];
707 NEW_C ((long)cpu.gr[RD] > 0);
708 break;
709 case 0xB: /* decne */
710 --cpu.gr[RD];
711 NEW_C ((long)cpu.gr[RD] != 0);
712 break;
713 case 0xC: /* clrt */
714 if (C_ON())
715 cpu.gr[RD] = 0;
716 break;
717 case 0xD: /* clrf */
718 if (C_OFF())
719 cpu.gr[RD] = 0;
720 break;
721 case 0xE: /* abs */
722 if (cpu.gr[RD] & 0x80000000)
723 cpu.gr[RD] = ~cpu.gr[RD] + 1;
724 break;
725 case 0xF: /* not */
726 cpu.gr[RD] = ~cpu.gr[RD];
727 break;
728 }
729 break;
730 case 0x02: /* movt */
731 if (C_ON())
732 cpu.gr[RD] = cpu.gr[RS];
733 break;
734 case 0x03: /* mult */
735 /* consume 2 bits per cycle from rs, until rs is 0 */
736 {
737 unsigned int t = cpu.gr[RS];
738 int ticks;
739 for (ticks = 0; t != 0 ; t >>= 2)
740 ticks++;
741 bonus_cycles += ticks;
742 }
743 bonus_cycles += 2; /* min. is 3, so add 2, plus ticks above */
744 if (tracing)
745 fprintf (stderr, " mult %lx by %lx to give %lx",
746 cpu.gr[RD], cpu.gr[RS], cpu.gr[RD] * cpu.gr[RS]);
747 cpu.gr[RD] = cpu.gr[RD] * cpu.gr[RS];
748 break;
749 case 0x04: /* loopt */
750 if (C_ON())
751 {
752 pc += (IMM4 << 1) - 32;
753 bonus_cycles ++;
754 needfetch = 1;
755 }
756 --cpu.gr[RS]; /* not RD! */
757 NEW_C (((long)cpu.gr[RS]) > 0);
758 break;
759 case 0x05: /* subu */
760 cpu.gr[RD] -= cpu.gr[RS];
761 break;
762 case 0x06: /* addc */
763 {
764 unsigned long tmp, a, b;
765 a = cpu.gr[RD];
766 b = cpu.gr[RS];
767 cpu.gr[RD] = a + b + C_VALUE ();
768 tmp = iu_carry (a, b, C_VALUE ());
769 NEW_C (tmp);
770 }
771 break;
772 case 0x07: /* subc */
773 {
774 unsigned long tmp, a, b;
775 a = cpu.gr[RD];
776 b = cpu.gr[RS];
777 cpu.gr[RD] = a - b + C_VALUE () - 1;
778 tmp = iu_carry (a,~b, C_VALUE ());
779 NEW_C (tmp);
780 }
781 break;
782 case 0x08: /* illegal */
783 case 0x09: /* illegal*/
784 ILLEGAL ();
785 break;
786 case 0x0A: /* movf */
787 if (C_OFF())
788 cpu.gr[RD] = cpu.gr[RS];
789 break;
790 case 0x0B: /* lsr */
791 {
792 unsigned long dst, src;
793 dst = cpu.gr[RD];
794 src = cpu.gr[RS];
795 /* We must not rely solely upon the native shift operations, since they
796 may not match the M*Core's behaviour on boundary conditions. */
797 dst = src > 31 ? 0 : dst >> src;
798 cpu.gr[RD] = dst;
799 }
800 break;
801 case 0x0C: /* cmphs */
802 NEW_C ((unsigned long )cpu.gr[RD] >=
803 (unsigned long)cpu.gr[RS]);
804 break;
805 case 0x0D: /* cmplt */
806 NEW_C ((long)cpu.gr[RD] < (long)cpu.gr[RS]);
807 break;
808 case 0x0E: /* tst */
809 NEW_C ((cpu.gr[RD] & cpu.gr[RS]) != 0);
810 break;
811 case 0x0F: /* cmpne */
812 NEW_C (cpu.gr[RD] != cpu.gr[RS]);
813 break;
814 case 0x10: case 0x11: /* mfcr */
815 {
816 unsigned r;
817 r = IMM5;
818 if (r <= LAST_VALID_CREG)
819 cpu.gr[RD] = cpu.cr[r];
820 else
821 ILLEGAL ();
822 }
823 break;
824
825 case 0x12: /* mov */
826 cpu.gr[RD] = cpu.gr[RS];
827 if (tracing)
828 fprintf (stderr, "MOV %lx into reg %d", cpu.gr[RD], RD);
829 break;
830
831 case 0x13: /* bgenr */
832 if (cpu.gr[RS] & 0x20)
833 cpu.gr[RD] = 0;
834 else
835 cpu.gr[RD] = 1 << (cpu.gr[RS] & 0x1F);
836 break;
837
838 case 0x14: /* rsub */
839 cpu.gr[RD] = cpu.gr[RS] - cpu.gr[RD];
840 break;
841
842 case 0x15: /* ixw */
843 cpu.gr[RD] += cpu.gr[RS]<<2;
844 break;
845
846 case 0x16: /* and */
847 cpu.gr[RD] &= cpu.gr[RS];
848 break;
849
850 case 0x17: /* xor */
851 cpu.gr[RD] ^= cpu.gr[RS];
852 break;
853
854 case 0x18: case 0x19: /* mtcr */
855 {
856 unsigned r;
857 r = IMM5;
858 if (r <= LAST_VALID_CREG)
859 cpu.cr[r] = cpu.gr[RD];
860 else
861 ILLEGAL ();
862
863 /* we might have changed register sets... */
864 if (SR_AF ())
865 cpu.asregs.active_gregs = & cpu.asregs.alt_gregs[0];
866 else
867 cpu.asregs.active_gregs = & cpu.asregs.gregs[0];
868 }
869 break;
870
871 case 0x1A: /* asr */
872 /* We must not rely solely upon the native shift operations, since they
873 may not match the M*Core's behaviour on boundary conditions. */
874 if (cpu.gr[RS] > 30)
875 cpu.gr[RD] = ((long) cpu.gr[RD]) < 0 ? -1 : 0;
876 else
877 cpu.gr[RD] = (long) cpu.gr[RD] >> cpu.gr[RS];
878 break;
879
880 case 0x1B: /* lsl */
881 /* We must not rely solely upon the native shift operations, since they
882 may not match the M*Core's behaviour on boundary conditions. */
883 cpu.gr[RD] = cpu.gr[RS] > 31 ? 0 : cpu.gr[RD] << cpu.gr[RS];
884 break;
885
886 case 0x1C: /* addu */
887 cpu.gr[RD] += cpu.gr[RS];
888 break;
889
890 case 0x1D: /* ixh */
891 cpu.gr[RD] += cpu.gr[RS] << 1;
892 break;
893
894 case 0x1E: /* or */
895 cpu.gr[RD] |= cpu.gr[RS];
896 break;
897
898 case 0x1F: /* andn */
899 cpu.gr[RD] &= ~cpu.gr[RS];
900 break;
901 case 0x20: case 0x21: /* addi */
902 cpu.gr[RD] =
903 cpu.gr[RD] + (IMM5 + 1);
904 break;
905 case 0x22: case 0x23: /* cmplti */
906 {
907 int tmp = (IMM5 + 1);
908 if (cpu.gr[RD] < tmp)
909 {
910 SET_C();
911 }
912 else
913 {
914 CLR_C();
915 }
916 }
917 break;
918 case 0x24: case 0x25: /* subi */
919 cpu.gr[RD] =
920 cpu.gr[RD] - (IMM5 + 1);
921 break;
922 case 0x26: case 0x27: /* illegal */
923 ILLEGAL ();
924 break;
925 case 0x28: case 0x29: /* rsubi */
926 cpu.gr[RD] =
927 IMM5 - cpu.gr[RD];
928 break;
929 case 0x2A: case 0x2B: /* cmpnei */
930 if (cpu.gr[RD] != IMM5)
931 {
932 SET_C();
933 }
934 else
935 {
936 CLR_C();
937 }
938 break;
939
940 case 0x2C: case 0x2D: /* bmaski, divu */
941 {
942 unsigned imm = IMM5;
943
944 if (imm == 1)
945 {
946 int exe;
947 int rxnlz, r1nlz;
948 unsigned int rx, r1;
949
950 rx = cpu.gr[RD];
951 r1 = cpu.gr[1];
952 exe = 0;
953
954 /* unsigned divide */
955 cpu.gr[RD] = (word) ((unsigned int) cpu.gr[RD] / (unsigned int)cpu.gr[1] );
956
957 /* compute bonus_cycles for divu */
958 for (r1nlz = 0; ((r1 & 0x80000000) == 0) && (r1nlz < 32); r1nlz ++)
959 r1 = r1 << 1;
960
961 for (rxnlz = 0; ((rx & 0x80000000) == 0) && (rxnlz < 32); rxnlz ++)
962 rx = rx << 1;
963
964 if (r1nlz < rxnlz)
965 exe += 4;
966 else
967 exe += 5 + r1nlz - rxnlz;
968
969 if (exe >= (2 * memcycles - 1))
970 {
971 bonus_cycles += exe - (2 * memcycles) + 1;
972 }
973 }
974 else if (imm == 0 || imm >= 8)
975 {
976 /* bmaski */
977 if (imm == 0)
978 cpu.gr[RD] = -1;
979 else
980 cpu.gr[RD] = (1 << imm) - 1;
981 }
982 else
983 {
984 /* illegal */
985 ILLEGAL ();
986 }
987 }
988 break;
989 case 0x2E: case 0x2F: /* andi */
990 cpu.gr[RD] = cpu.gr[RD] & IMM5;
991 break;
992 case 0x30: case 0x31: /* bclri */
993 cpu.gr[RD] = cpu.gr[RD] & ~(1<<IMM5);
994 break;
995 case 0x32: case 0x33: /* bgeni, divs */
996 {
997 unsigned imm = IMM5;
998 if (imm == 1)
999 {
1000 int exe,sc;
1001 int rxnlz, r1nlz;
1002 signed int rx, r1;
1003
1004 /* compute bonus_cycles for divu */
1005 rx = cpu.gr[RD];
1006 r1 = cpu.gr[1];
1007 exe = 0;
1008
1009 if (((rx < 0) && (r1 > 0)) || ((rx >= 0) && (r1 < 0)))
1010 sc = 1;
1011 else
1012 sc = 0;
1013
1014 rx = abs (rx);
1015 r1 = abs (r1);
1016
1017 /* signed divide, general registers are of type int, so / op is OK */
1018 cpu.gr[RD] = cpu.gr[RD] / cpu.gr[1];
1019
1020 for (r1nlz = 0; ((r1 & 0x80000000) == 0) && (r1nlz < 32) ; r1nlz ++ )
1021 r1 = r1 << 1;
1022
1023 for (rxnlz = 0; ((rx & 0x80000000) == 0) && (rxnlz < 32) ; rxnlz ++ )
1024 rx = rx << 1;
1025
1026 if (r1nlz < rxnlz)
1027 exe += 5;
1028 else
1029 exe += 6 + r1nlz - rxnlz + sc;
1030
1031 if (exe >= (2 * memcycles - 1))
1032 {
1033 bonus_cycles += exe - (2 * memcycles) + 1;
1034 }
1035 }
1036 else if (imm >= 7)
1037 {
1038 /* bgeni */
1039 cpu.gr[RD] = (1 << IMM5);
1040 }
1041 else
1042 {
1043 /* illegal */
1044 ILLEGAL ();
1045 }
1046 break;
1047 }
1048 case 0x34: case 0x35: /* bseti */
1049 cpu.gr[RD] = cpu.gr[RD] | (1 << IMM5);
1050 break;
1051 case 0x36: case 0x37: /* btsti */
1052 NEW_C (cpu.gr[RD] >> IMM5);
1053 break;
1054 case 0x38: case 0x39: /* xsr, rotli */
1055 {
1056 unsigned imm = IMM5;
1057 unsigned long tmp = cpu.gr[RD];
1058 if (imm == 0)
1059 {
1060 word cbit;
1061 cbit = C_VALUE();
1062 NEW_C (tmp);
1063 cpu.gr[RD] = (cbit << 31) | (tmp >> 1);
1064 }
1065 else
1066 cpu.gr[RD] = (tmp << imm) | (tmp >> (32 - imm));
1067 }
1068 break;
1069 case 0x3A: case 0x3B: /* asrc, asri */
1070 {
1071 unsigned imm = IMM5;
1072 long tmp = cpu.gr[RD];
1073 if (imm == 0)
1074 {
1075 NEW_C (tmp);
1076 cpu.gr[RD] = tmp >> 1;
1077 }
1078 else
1079 cpu.gr[RD] = tmp >> imm;
1080 }
1081 break;
1082 case 0x3C: case 0x3D: /* lslc, lsli */
1083 {
1084 unsigned imm = IMM5;
1085 unsigned long tmp = cpu.gr[RD];
1086 if (imm == 0)
1087 {
1088 NEW_C (tmp >> 31);
1089 cpu.gr[RD] = tmp << 1;
1090 }
1091 else
1092 cpu.gr[RD] = tmp << imm;
1093 }
1094 break;
1095 case 0x3E: case 0x3F: /* lsrc, lsri */
1096 {
1097 unsigned imm = IMM5;
1098 unsigned long tmp = cpu.gr[RD];
1099 if (imm == 0)
1100 {
1101 NEW_C (tmp);
1102 cpu.gr[RD] = tmp >> 1;
1103 }
1104 else
1105 cpu.gr[RD] = tmp >> imm;
1106 }
1107 break;
1108 case 0x40: case 0x41: case 0x42: case 0x43:
1109 case 0x44: case 0x45: case 0x46: case 0x47:
1110 case 0x48: case 0x49: case 0x4A: case 0x4B:
1111 case 0x4C: case 0x4D: case 0x4E: case 0x4F:
1112 ILLEGAL ();
1113 break;
1114 case 0x50:
1115 util (sd, scpu, inst & 0xFF);
1116 break;
1117 case 0x51: case 0x52: case 0x53:
1118 case 0x54: case 0x55: case 0x56: case 0x57:
1119 case 0x58: case 0x59: case 0x5A: case 0x5B:
1120 case 0x5C: case 0x5D: case 0x5E: case 0x5F:
1121 ILLEGAL ();
1122 break;
1123 case 0x60: case 0x61: case 0x62: case 0x63: /* movi */
1124 case 0x64: case 0x65: case 0x66: case 0x67:
1125 cpu.gr[RD] = (inst >> 4) & 0x7F;
1126 break;
1127 case 0x68: case 0x69: case 0x6A: case 0x6B:
1128 case 0x6C: case 0x6D: case 0x6E: case 0x6F: /* illegal */
1129 ILLEGAL ();
1130 break;
1131 case 0x71: case 0x72: case 0x73:
1132 case 0x74: case 0x75: case 0x76: case 0x77:
1133 case 0x78: case 0x79: case 0x7A: case 0x7B:
1134 case 0x7C: case 0x7D: case 0x7E: /* lrw */
1135 cpu.gr[RX] = rlat ((pc + ((inst & 0xFF) << 2)) & 0xFFFFFFFC);
1136 if (tracing)
1137 fprintf (stderr, "LRW of 0x%x from 0x%lx to reg %d",
1138 rlat ((pc + ((inst & 0xFF) << 2)) & 0xFFFFFFFC),
1139 (pc + ((inst & 0xFF) << 2)) & 0xFFFFFFFC, RX);
1140 memops++;
1141 break;
1142 case 0x7F: /* jsri */
1143 cpu.gr[15] = pc;
1144 if (tracing)
1145 fprintf (stderr,
1146 "func call: r2 = %lx r3 = %lx r4 = %lx r5 = %lx r6 = %lx r7 = %lx\n",
1147 cpu.gr[2], cpu.gr[3], cpu.gr[4], cpu.gr[5], cpu.gr[6], cpu.gr[7]);
1148 case 0x70: /* jmpi */
1149 pc = rlat ((pc + ((inst & 0xFF) << 2)) & 0xFFFFFFFC);
1150 memops++;
1151 bonus_cycles++;
1152 needfetch = 1;
1153 break;
1154
1155 case 0x80: case 0x81: case 0x82: case 0x83:
1156 case 0x84: case 0x85: case 0x86: case 0x87:
1157 case 0x88: case 0x89: case 0x8A: case 0x8B:
1158 case 0x8C: case 0x8D: case 0x8E: case 0x8F: /* ld */
1159 cpu.gr[RX] = rlat (cpu.gr[RD] + ((inst >> 2) & 0x003C));
1160 if (tracing)
1161 fprintf (stderr, "load reg %d from 0x%lx with 0x%lx",
1162 RX,
1163 cpu.gr[RD] + ((inst >> 2) & 0x003C), cpu.gr[RX]);
1164 memops++;
1165 break;
1166 case 0x90: case 0x91: case 0x92: case 0x93:
1167 case 0x94: case 0x95: case 0x96: case 0x97:
1168 case 0x98: case 0x99: case 0x9A: case 0x9B:
1169 case 0x9C: case 0x9D: case 0x9E: case 0x9F: /* st */
1170 wlat (cpu.gr[RD] + ((inst >> 2) & 0x003C), cpu.gr[RX]);
1171 if (tracing)
1172 fprintf (stderr, "store reg %d (containing 0x%lx) to 0x%lx",
1173 RX, cpu.gr[RX],
1174 cpu.gr[RD] + ((inst >> 2) & 0x003C));
1175 memops++;
1176 break;
1177 case 0xA0: case 0xA1: case 0xA2: case 0xA3:
1178 case 0xA4: case 0xA5: case 0xA6: case 0xA7:
1179 case 0xA8: case 0xA9: case 0xAA: case 0xAB:
1180 case 0xAC: case 0xAD: case 0xAE: case 0xAF: /* ld.b */
1181 cpu.gr[RX] = rbat (cpu.gr[RD] + RS);
1182 memops++;
1183 break;
1184 case 0xB0: case 0xB1: case 0xB2: case 0xB3:
1185 case 0xB4: case 0xB5: case 0xB6: case 0xB7:
1186 case 0xB8: case 0xB9: case 0xBA: case 0xBB:
1187 case 0xBC: case 0xBD: case 0xBE: case 0xBF: /* st.b */
1188 wbat (cpu.gr[RD] + RS, cpu.gr[RX]);
1189 memops++;
1190 break;
1191 case 0xC0: case 0xC1: case 0xC2: case 0xC3:
1192 case 0xC4: case 0xC5: case 0xC6: case 0xC7:
1193 case 0xC8: case 0xC9: case 0xCA: case 0xCB:
1194 case 0xCC: case 0xCD: case 0xCE: case 0xCF: /* ld.h */
1195 cpu.gr[RX] = rhat (cpu.gr[RD] + ((inst >> 3) & 0x001E));
1196 memops++;
1197 break;
1198 case 0xD0: case 0xD1: case 0xD2: case 0xD3:
1199 case 0xD4: case 0xD5: case 0xD6: case 0xD7:
1200 case 0xD8: case 0xD9: case 0xDA: case 0xDB:
1201 case 0xDC: case 0xDD: case 0xDE: case 0xDF: /* st.h */
1202 what (cpu.gr[RD] + ((inst >> 3) & 0x001E), cpu.gr[RX]);
1203 memops++;
1204 break;
1205 case 0xE8: case 0xE9: case 0xEA: case 0xEB:
1206 case 0xEC: case 0xED: case 0xEE: case 0xEF: /* bf */
1207 if (C_OFF())
1208 {
1209 int disp;
1210 disp = inst & 0x03FF;
1211 if (inst & 0x0400)
1212 disp |= 0xFFFFFC00;
1213 pc += disp<<1;
1214 bonus_cycles++;
1215 needfetch = 1;
1216 }
1217 break;
1218 case 0xE0: case 0xE1: case 0xE2: case 0xE3:
1219 case 0xE4: case 0xE5: case 0xE6: case 0xE7: /* bt */
1220 if (C_ON())
1221 {
1222 int disp;
1223 disp = inst & 0x03FF;
1224 if (inst & 0x0400)
1225 disp |= 0xFFFFFC00;
1226 pc += disp<<1;
1227 bonus_cycles++;
1228 needfetch = 1;
1229 }
1230 break;
1231
1232 case 0xF8: case 0xF9: case 0xFA: case 0xFB:
1233 case 0xFC: case 0xFD: case 0xFE: case 0xFF: /* bsr */
1234 cpu.gr[15] = pc;
1235 case 0xF0: case 0xF1: case 0xF2: case 0xF3:
1236 case 0xF4: case 0xF5: case 0xF6: case 0xF7: /* br */
1237 {
1238 int disp;
1239 disp = inst & 0x03FF;
1240 if (inst & 0x0400)
1241 disp |= 0xFFFFFC00;
1242 pc += disp<<1;
1243 bonus_cycles++;
1244 needfetch = 1;
1245 }
1246 break;
1247
1248 }
1249
1250 if (tracing)
1251 fprintf (stderr, "\n");
1252
1253 if (needfetch)
1254 {
1255 ibuf = rlat (pc & 0xFFFFFFFC);
1256 needfetch = 0;
1257 }
1258 }
1259
1260 /* Hide away the things we've cached while executing. */
1261 CPU_PC_SET (scpu, pc);
1262 cpu.asregs.insts += insts; /* instructions done ... */
1263 cpu.asregs.cycles += insts; /* and each takes a cycle */
1264 cpu.asregs.cycles += bonus_cycles; /* and extra cycles for branches */
1265 cpu.asregs.cycles += memops * memcycles; /* and memop cycle delays */
1266 }
1267
1268 void
1269 sim_engine_run (SIM_DESC sd,
1270 int next_cpu_nr, /* ignore */
1271 int nr_cpus, /* ignore */
1272 int siggnal) /* ignore */
1273 {
1274 sim_cpu *scpu;
1275
1276 SIM_ASSERT (STATE_MAGIC (sd) == SIM_MAGIC_NUMBER);
1277
1278 scpu = STATE_CPU (sd, 0);
1279
1280 while (1)
1281 {
1282 step_once (sd, scpu);
1283 if (sim_events_tick (sd))
1284 sim_events_process (sd);
1285 }
1286 }
1287
1288 static int
1289 mcore_reg_store (SIM_CPU *scpu, int rn, unsigned char *memory, int length)
1290 {
1291 if (rn < NUM_MCORE_REGS && rn >= 0)
1292 {
1293 if (length == 4)
1294 {
1295 long ival;
1296
1297 /* misalignment safe */
1298 ival = mcore_extract_unsigned_integer (memory, 4);
1299 cpu.asints[rn] = ival;
1300 }
1301
1302 return 4;
1303 }
1304 else
1305 return 0;
1306 }
1307
1308 static int
1309 mcore_reg_fetch (SIM_CPU *scpu, int rn, unsigned char *memory, int length)
1310 {
1311 if (rn < NUM_MCORE_REGS && rn >= 0)
1312 {
1313 if (length == 4)
1314 {
1315 long ival = cpu.asints[rn];
1316
1317 /* misalignment-safe */
1318 mcore_store_unsigned_integer (memory, 4, ival);
1319 }
1320
1321 return 4;
1322 }
1323 else
1324 return 0;
1325 }
1326
1327 void
1328 sim_info (SIM_DESC sd, int verbose)
1329 {
1330 #ifdef WATCHFUNCTIONS
1331 int w, wcyc;
1332 #endif
1333 double virttime = cpu.asregs.cycles / 36.0e6;
1334 host_callback *callback = STATE_CALLBACK (sd);
1335
1336 callback->printf_filtered (callback, "\n\n# instructions executed %10d\n",
1337 cpu.asregs.insts);
1338 callback->printf_filtered (callback, "# cycles %10d\n",
1339 cpu.asregs.cycles);
1340 callback->printf_filtered (callback, "# pipeline stalls %10d\n",
1341 cpu.asregs.stalls);
1342 callback->printf_filtered (callback, "# virtual time taken %10.4f\n",
1343 virttime);
1344
1345 #ifdef WATCHFUNCTIONS
1346 callback->printf_filtered (callback, "\nNumber of watched functions: %d\n",
1347 ENDWL);
1348
1349 wcyc = 0;
1350
1351 for (w = 1; w <= ENDWL; w++)
1352 {
1353 callback->printf_filtered (callback, "WL = %s %8x\n",WLstr[w],WL[w]);
1354 callback->printf_filtered (callback, " calls = %d, cycles = %d\n",
1355 WLcnts[w],WLcyc[w]);
1356
1357 if (WLcnts[w] != 0)
1358 callback->printf_filtered (callback,
1359 " maxcpc = %d, mincpc = %d, avecpc = %d\n",
1360 WLmax[w],WLmin[w],WLcyc[w]/WLcnts[w]);
1361 wcyc += WLcyc[w];
1362 }
1363
1364 callback->printf_filtered (callback,
1365 "Total cycles for watched functions: %d\n",wcyc);
1366 #endif
1367 }
1368
1369 static sim_cia
1370 mcore_pc_get (sim_cpu *cpu)
1371 {
1372 return cpu->pc;
1373 }
1374
1375 static void
1376 mcore_pc_set (sim_cpu *cpu, sim_cia pc)
1377 {
1378 cpu->pc = pc;
1379 }
1380
1381 static void
1382 free_state (SIM_DESC sd)
1383 {
1384 if (STATE_MODULES (sd) != NULL)
1385 sim_module_uninstall (sd);
1386 sim_cpu_free_all (sd);
1387 sim_state_free (sd);
1388 }
1389
1390 SIM_DESC
1391 sim_open (SIM_OPEN_KIND kind, host_callback *cb, struct bfd *abfd, char **argv)
1392 {
1393 int i;
1394 SIM_DESC sd = sim_state_alloc (kind, cb);
1395 SIM_ASSERT (STATE_MAGIC (sd) == SIM_MAGIC_NUMBER);
1396
1397 /* The cpu data is kept in a separately allocated chunk of memory. */
1398 if (sim_cpu_alloc_all (sd, 1, /*cgen_cpu_max_extra_bytes ()*/0) != SIM_RC_OK)
1399 {
1400 free_state (sd);
1401 return 0;
1402 }
1403
1404 if (sim_pre_argv_init (sd, argv[0]) != SIM_RC_OK)
1405 {
1406 free_state (sd);
1407 return 0;
1408 }
1409
1410 /* getopt will print the error message so we just have to exit if this fails.
1411 FIXME: Hmmm... in the case of gdb we need getopt to call
1412 print_filtered. */
1413 if (sim_parse_args (sd, argv) != SIM_RC_OK)
1414 {
1415 free_state (sd);
1416 return 0;
1417 }
1418
1419 /* Check for/establish the a reference program image. */
1420 if (sim_analyze_program (sd,
1421 (STATE_PROG_ARGV (sd) != NULL
1422 ? *STATE_PROG_ARGV (sd)
1423 : NULL), abfd) != SIM_RC_OK)
1424 {
1425 free_state (sd);
1426 return 0;
1427 }
1428
1429 /* Configure/verify the target byte order and other runtime
1430 configuration options. */
1431 if (sim_config (sd) != SIM_RC_OK)
1432 {
1433 sim_module_uninstall (sd);
1434 return 0;
1435 }
1436
1437 if (sim_post_argv_init (sd) != SIM_RC_OK)
1438 {
1439 /* Uninstall the modules to avoid memory leaks,
1440 file descriptor leaks, etc. */
1441 sim_module_uninstall (sd);
1442 return 0;
1443 }
1444
1445 /* CPU specific initialization. */
1446 for (i = 0; i < MAX_NR_PROCESSORS; ++i)
1447 {
1448 SIM_CPU *cpu = STATE_CPU (sd, i);
1449
1450 CPU_REG_FETCH (cpu) = mcore_reg_fetch;
1451 CPU_REG_STORE (cpu) = mcore_reg_store;
1452 CPU_PC_FETCH (cpu) = mcore_pc_get;
1453 CPU_PC_STORE (cpu) = mcore_pc_set;
1454
1455 set_initial_gprs (cpu); /* Reset the GPR registers. */
1456 }
1457
1458 /* Default to a 8 Mbyte (== 2^23) memory space. */
1459 sim_do_commandf (sd, "memory-size %#x", DEFAULT_MEMORY_SIZE);
1460
1461 return sd;
1462 }
1463
1464 SIM_RC
1465 sim_create_inferior (SIM_DESC sd, struct bfd *prog_bfd, char **argv, char **env)
1466 {
1467 SIM_CPU *scpu = STATE_CPU (sd, 0);
1468 char ** avp;
1469 int nargs = 0;
1470 int nenv = 0;
1471 int s_length;
1472 int l;
1473 unsigned long strings;
1474 unsigned long pointers;
1475 unsigned long hi_stack;
1476
1477
1478 /* Set the initial register set. */
1479 set_initial_gprs (scpu);
1480
1481 hi_stack = DEFAULT_MEMORY_SIZE - 4;
1482 CPU_PC_SET (scpu, bfd_get_start_address (prog_bfd));
1483
1484 /* Calculate the argument and environment strings. */
1485 s_length = 0;
1486 nargs = 0;
1487 avp = argv;
1488 while (avp && *avp)
1489 {
1490 l = strlen (*avp) + 1; /* include the null */
1491 s_length += (l + 3) & ~3; /* make it a 4 byte boundary */
1492 nargs++; avp++;
1493 }
1494
1495 nenv = 0;
1496 avp = env;
1497 while (avp && *avp)
1498 {
1499 l = strlen (*avp) + 1; /* include the null */
1500 s_length += (l + 3) & ~ 3;/* make it a 4 byte boundary */
1501 nenv++; avp++;
1502 }
1503
1504 /* Claim some memory for the pointers and strings. */
1505 pointers = hi_stack - sizeof(word) * (nenv+1+nargs+1);
1506 pointers &= ~3; /* must be 4-byte aligned */
1507 cpu.gr[0] = pointers;
1508
1509 strings = cpu.gr[0] - s_length;
1510 strings &= ~3; /* want to make it 4-byte aligned */
1511 cpu.gr[0] = strings;
1512 /* dac fix, the stack address must be 8-byte aligned! */
1513 cpu.gr[0] = cpu.gr[0] - cpu.gr[0] % 8;
1514
1515 /* Loop through the arguments and fill them in. */
1516 cpu.gr[PARM1] = nargs;
1517 if (nargs == 0)
1518 {
1519 /* No strings to fill in. */
1520 cpu.gr[PARM2] = 0;
1521 }
1522 else
1523 {
1524 cpu.gr[PARM2] = pointers;
1525 avp = argv;
1526 while (avp && *avp)
1527 {
1528 /* Save where we're putting it. */
1529 wlat (pointers, strings);
1530
1531 /* Copy the string. */
1532 l = strlen (* avp) + 1;
1533 sim_core_write_buffer (sd, scpu, write_map, *avp, strings, l);
1534
1535 /* Bump the pointers. */
1536 avp++;
1537 pointers += 4;
1538 strings += l+1;
1539 }
1540
1541 /* A null to finish the list. */
1542 wlat (pointers, 0);
1543 pointers += 4;
1544 }
1545
1546 /* Now do the environment pointers. */
1547 if (nenv == 0)
1548 {
1549 /* No strings to fill in. */
1550 cpu.gr[PARM3] = 0;
1551 }
1552 else
1553 {
1554 cpu.gr[PARM3] = pointers;
1555 avp = env;
1556
1557 while (avp && *avp)
1558 {
1559 /* Save where we're putting it. */
1560 wlat (pointers, strings);
1561
1562 /* Copy the string. */
1563 l = strlen (* avp) + 1;
1564 sim_core_write_buffer (sd, scpu, write_map, *avp, strings, l);
1565
1566 /* Bump the pointers. */
1567 avp++;
1568 pointers += 4;
1569 strings += l+1;
1570 }
1571
1572 /* A null to finish the list. */
1573 wlat (pointers, 0);
1574 pointers += 4;
1575 }
1576
1577 return SIM_RC_OK;
1578 }
This page took 0.260815 seconds and 5 git commands to generate.