2003-10-31 Dave Brolley <brolley@redhat.com>
[deliverable/binutils-gdb.git] / sim / frv / frv.c
1 /* frv simulator support code
2 Copyright (C) 1998, 1999, 2000, 2001, 2003 Free Software Foundation, Inc.
3 Contributed by Red Hat.
4
5 This file is part of the GNU simulators.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
20
21 #define WANT_CPU
22 #define WANT_CPU_FRVBF
23
24 #include "sim-main.h"
25 #include "cgen-mem.h"
26 #include "cgen-ops.h"
27 #include "cgen-engine.h"
28 #include "cgen-par.h"
29 #include "bfd.h"
30 #include <math.h>
31
32 /* Maintain a flag in order to know when to write the address of the next
33 VLIW instruction into the LR register. Used by JMPL. JMPIL, and CALL
34 insns. */
35 int frvbf_write_next_vliw_addr_to_LR;
36
37 /* The contents of BUF are in target byte order. */
38 int
39 frvbf_fetch_register (SIM_CPU *current_cpu, int rn, unsigned char *buf, int len)
40 {
41 if (rn <= GR_REGNUM_MAX)
42 SETTSI (buf, GET_H_GR (rn));
43 else if (rn <= FR_REGNUM_MAX)
44 SETTSI (buf, GET_H_FR (rn - GR_REGNUM_MAX - 1));
45 else if (rn == PC_REGNUM)
46 SETTSI (buf, GET_H_PC ());
47 else if (rn >= SPR_REGNUM_MIN && rn <= SPR_REGNUM_MAX)
48 {
49 /* Make sure the register is implemented. */
50 FRV_REGISTER_CONTROL *control = CPU_REGISTER_CONTROL (current_cpu);
51 int spr = rn - SPR_REGNUM_MIN;
52 if (! control->spr[spr].implemented)
53 return 0;
54 SETTSI (buf, GET_H_SPR (spr));
55 }
56 else
57 {
58 SETTSI (buf, 0xdeadbeef);
59 return 0;
60 }
61
62 return len;
63 }
64
65 /* The contents of BUF are in target byte order. */
66
67 int
68 frvbf_store_register (SIM_CPU *current_cpu, int rn, unsigned char *buf, int len)
69 {
70 if (rn <= GR_REGNUM_MAX)
71 SET_H_GR (rn, GETTSI (buf));
72 else if (rn <= FR_REGNUM_MAX)
73 SET_H_FR (rn - GR_REGNUM_MAX - 1, GETTSI (buf));
74 else if (rn == PC_REGNUM)
75 SET_H_PC (GETTSI (buf));
76 else if (rn >= SPR_REGNUM_MIN && rn <= SPR_REGNUM_MAX)
77 {
78 /* Make sure the register is implemented. */
79 FRV_REGISTER_CONTROL *control = CPU_REGISTER_CONTROL (current_cpu);
80 int spr = rn - SPR_REGNUM_MIN;
81 if (! control->spr[spr].implemented)
82 return 0;
83 SET_H_SPR (spr, GETTSI (buf));
84 }
85 else
86 return 0;
87
88 return len;
89 }
90 \f
91 /* Cover fns to access the general registers. */
92 USI
93 frvbf_h_gr_get_handler (SIM_CPU *current_cpu, UINT gr)
94 {
95 frv_check_gr_access (current_cpu, gr);
96 return CPU (h_gr[gr]);
97 }
98
99 void
100 frvbf_h_gr_set_handler (SIM_CPU *current_cpu, UINT gr, USI newval)
101 {
102 frv_check_gr_access (current_cpu, gr);
103
104 if (gr == 0)
105 return; /* Storing into gr0 has no effect. */
106
107 CPU (h_gr[gr]) = newval;
108 }
109 \f
110 /* Cover fns to access the floating point registers. */
111 SF
112 frvbf_h_fr_get_handler (SIM_CPU *current_cpu, UINT fr)
113 {
114 frv_check_fr_access (current_cpu, fr);
115 return CPU (h_fr[fr]);
116 }
117
118 void
119 frvbf_h_fr_set_handler (SIM_CPU *current_cpu, UINT fr, SF newval)
120 {
121 frv_check_fr_access (current_cpu, fr);
122 CPU (h_fr[fr]) = newval;
123 }
124 \f
125 /* Cover fns to access the general registers as double words. */
126 static UINT
127 check_register_alignment (SIM_CPU *current_cpu, UINT reg, int align_mask)
128 {
129 if (reg & align_mask)
130 {
131 SIM_DESC sd = CPU_STATE (current_cpu);
132 switch (STATE_ARCHITECTURE (sd)->mach)
133 {
134 case bfd_mach_fr400:
135 case bfd_mach_fr550:
136 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
137 break;
138 case bfd_mach_frvtomcat:
139 case bfd_mach_fr500:
140 case bfd_mach_frv:
141 frv_queue_register_exception_interrupt (current_cpu,
142 FRV_REC_UNALIGNED);
143 break;
144 default:
145 break;
146 }
147
148 reg &= ~align_mask;
149 }
150
151 return reg;
152 }
153
154 static UINT
155 check_fr_register_alignment (SIM_CPU *current_cpu, UINT reg, int align_mask)
156 {
157 if (reg & align_mask)
158 {
159 SIM_DESC sd = CPU_STATE (current_cpu);
160 switch (STATE_ARCHITECTURE (sd)->mach)
161 {
162 case bfd_mach_fr400:
163 case bfd_mach_fr550:
164 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
165 break;
166 case bfd_mach_frvtomcat:
167 case bfd_mach_fr500:
168 case bfd_mach_frv:
169 {
170 struct frv_fp_exception_info fp_info = {
171 FSR_NO_EXCEPTION, FTT_INVALID_FR
172 };
173 frv_queue_fp_exception_interrupt (current_cpu, & fp_info);
174 }
175 break;
176 default:
177 break;
178 }
179
180 reg &= ~align_mask;
181 }
182
183 return reg;
184 }
185
186 static UINT
187 check_memory_alignment (SIM_CPU *current_cpu, SI address, int align_mask)
188 {
189 if (address & align_mask)
190 {
191 SIM_DESC sd = CPU_STATE (current_cpu);
192 switch (STATE_ARCHITECTURE (sd)->mach)
193 {
194 case bfd_mach_fr400:
195 frv_queue_data_access_error_interrupt (current_cpu, address);
196 break;
197 case bfd_mach_frvtomcat:
198 case bfd_mach_fr500:
199 case bfd_mach_frv:
200 frv_queue_mem_address_not_aligned_interrupt (current_cpu, address);
201 break;
202 default:
203 break;
204 }
205
206 address &= ~align_mask;
207 }
208
209 return address;
210 }
211
212 DI
213 frvbf_h_gr_double_get_handler (SIM_CPU *current_cpu, UINT gr)
214 {
215 DI value;
216
217 if (gr == 0)
218 return 0; /* gr0 is always 0. */
219
220 /* Check the register alignment. */
221 gr = check_register_alignment (current_cpu, gr, 1);
222
223 value = GET_H_GR (gr);
224 value <<= 32;
225 value |= (USI) GET_H_GR (gr + 1);
226 return value;
227 }
228
229 void
230 frvbf_h_gr_double_set_handler (SIM_CPU *current_cpu, UINT gr, DI newval)
231 {
232 if (gr == 0)
233 return; /* Storing into gr0 has no effect. */
234
235 /* Check the register alignment. */
236 gr = check_register_alignment (current_cpu, gr, 1);
237
238 SET_H_GR (gr , (newval >> 32) & 0xffffffff);
239 SET_H_GR (gr + 1, (newval ) & 0xffffffff);
240 }
241 \f
242 /* Cover fns to access the floating point register as double words. */
243 DF
244 frvbf_h_fr_double_get_handler (SIM_CPU *current_cpu, UINT fr)
245 {
246 union {
247 SF as_sf[2];
248 DF as_df;
249 } value;
250
251 /* Check the register alignment. */
252 fr = check_fr_register_alignment (current_cpu, fr, 1);
253
254 if (CURRENT_HOST_BYTE_ORDER == LITTLE_ENDIAN)
255 {
256 value.as_sf[1] = GET_H_FR (fr);
257 value.as_sf[0] = GET_H_FR (fr + 1);
258 }
259 else
260 {
261 value.as_sf[0] = GET_H_FR (fr);
262 value.as_sf[1] = GET_H_FR (fr + 1);
263 }
264
265 return value.as_df;
266 }
267
268 void
269 frvbf_h_fr_double_set_handler (SIM_CPU *current_cpu, UINT fr, DF newval)
270 {
271 union {
272 SF as_sf[2];
273 DF as_df;
274 } value;
275
276 /* Check the register alignment. */
277 fr = check_fr_register_alignment (current_cpu, fr, 1);
278
279 value.as_df = newval;
280 if (CURRENT_HOST_BYTE_ORDER == LITTLE_ENDIAN)
281 {
282 SET_H_FR (fr , value.as_sf[1]);
283 SET_H_FR (fr + 1, value.as_sf[0]);
284 }
285 else
286 {
287 SET_H_FR (fr , value.as_sf[0]);
288 SET_H_FR (fr + 1, value.as_sf[1]);
289 }
290 }
291 \f
292 /* Cover fns to access the floating point register as integer words. */
293 USI
294 frvbf_h_fr_int_get_handler (SIM_CPU *current_cpu, UINT fr)
295 {
296 union {
297 SF as_sf;
298 USI as_usi;
299 } value;
300
301 value.as_sf = GET_H_FR (fr);
302 return value.as_usi;
303 }
304
305 void
306 frvbf_h_fr_int_set_handler (SIM_CPU *current_cpu, UINT fr, USI newval)
307 {
308 union {
309 SF as_sf;
310 USI as_usi;
311 } value;
312
313 value.as_usi = newval;
314 SET_H_FR (fr, value.as_sf);
315 }
316 \f
317 /* Cover fns to access the coprocessor registers as double words. */
318 DI
319 frvbf_h_cpr_double_get_handler (SIM_CPU *current_cpu, UINT cpr)
320 {
321 DI value;
322
323 /* Check the register alignment. */
324 cpr = check_register_alignment (current_cpu, cpr, 1);
325
326 value = GET_H_CPR (cpr);
327 value <<= 32;
328 value |= (USI) GET_H_CPR (cpr + 1);
329 return value;
330 }
331
332 void
333 frvbf_h_cpr_double_set_handler (SIM_CPU *current_cpu, UINT cpr, DI newval)
334 {
335 /* Check the register alignment. */
336 cpr = check_register_alignment (current_cpu, cpr, 1);
337
338 SET_H_CPR (cpr , (newval >> 32) & 0xffffffff);
339 SET_H_CPR (cpr + 1, (newval ) & 0xffffffff);
340 }
341 \f
342 /* Cover fns to write registers as quad words. */
343 void
344 frvbf_h_gr_quad_set_handler (SIM_CPU *current_cpu, UINT gr, SI *newval)
345 {
346 if (gr == 0)
347 return; /* Storing into gr0 has no effect. */
348
349 /* Check the register alignment. */
350 gr = check_register_alignment (current_cpu, gr, 3);
351
352 SET_H_GR (gr , newval[0]);
353 SET_H_GR (gr + 1, newval[1]);
354 SET_H_GR (gr + 2, newval[2]);
355 SET_H_GR (gr + 3, newval[3]);
356 }
357
358 void
359 frvbf_h_fr_quad_set_handler (SIM_CPU *current_cpu, UINT fr, SI *newval)
360 {
361 /* Check the register alignment. */
362 fr = check_fr_register_alignment (current_cpu, fr, 3);
363
364 SET_H_FR (fr , newval[0]);
365 SET_H_FR (fr + 1, newval[1]);
366 SET_H_FR (fr + 2, newval[2]);
367 SET_H_FR (fr + 3, newval[3]);
368 }
369
370 void
371 frvbf_h_cpr_quad_set_handler (SIM_CPU *current_cpu, UINT cpr, SI *newval)
372 {
373 /* Check the register alignment. */
374 cpr = check_register_alignment (current_cpu, cpr, 3);
375
376 SET_H_CPR (cpr , newval[0]);
377 SET_H_CPR (cpr + 1, newval[1]);
378 SET_H_CPR (cpr + 2, newval[2]);
379 SET_H_CPR (cpr + 3, newval[3]);
380 }
381 \f
382 /* Cover fns to access the special purpose registers. */
383 USI
384 frvbf_h_spr_get_handler (SIM_CPU *current_cpu, UINT spr)
385 {
386 /* Check access restrictions. */
387 frv_check_spr_read_access (current_cpu, spr);
388
389 switch (spr)
390 {
391 case H_SPR_PSR:
392 return spr_psr_get_handler (current_cpu);
393 case H_SPR_TBR:
394 return spr_tbr_get_handler (current_cpu);
395 case H_SPR_BPSR:
396 return spr_bpsr_get_handler (current_cpu);
397 case H_SPR_CCR:
398 return spr_ccr_get_handler (current_cpu);
399 case H_SPR_CCCR:
400 return spr_cccr_get_handler (current_cpu);
401 case H_SPR_SR0:
402 case H_SPR_SR1:
403 case H_SPR_SR2:
404 case H_SPR_SR3:
405 return spr_sr_get_handler (current_cpu, spr);
406 break;
407 default:
408 return CPU (h_spr[spr]);
409 }
410 return 0;
411 }
412
413 void
414 frvbf_h_spr_set_handler (SIM_CPU *current_cpu, UINT spr, USI newval)
415 {
416 FRV_REGISTER_CONTROL *control;
417 USI mask;
418 USI oldval;
419
420 /* Check access restrictions. */
421 frv_check_spr_write_access (current_cpu, spr);
422
423 /* Only set those fields which are writeable. */
424 control = CPU_REGISTER_CONTROL (current_cpu);
425 mask = control->spr[spr].read_only_mask;
426 oldval = GET_H_SPR (spr);
427
428 newval = (newval & ~mask) | (oldval & mask);
429
430 /* Some registers are represented by individual components which are
431 referenced more often than the register itself. */
432 switch (spr)
433 {
434 case H_SPR_PSR:
435 spr_psr_set_handler (current_cpu, newval);
436 break;
437 case H_SPR_TBR:
438 spr_tbr_set_handler (current_cpu, newval);
439 break;
440 case H_SPR_BPSR:
441 spr_bpsr_set_handler (current_cpu, newval);
442 break;
443 case H_SPR_CCR:
444 spr_ccr_set_handler (current_cpu, newval);
445 break;
446 case H_SPR_CCCR:
447 spr_cccr_set_handler (current_cpu, newval);
448 break;
449 case H_SPR_SR0:
450 case H_SPR_SR1:
451 case H_SPR_SR2:
452 case H_SPR_SR3:
453 spr_sr_set_handler (current_cpu, spr, newval);
454 break;
455 case H_SPR_IHSR8:
456 frv_cache_reconfigure (current_cpu, CPU_INSN_CACHE (current_cpu));
457 break;
458 default:
459 CPU (h_spr[spr]) = newval;
460 break;
461 }
462 }
463 \f
464 /* Cover fns to access the gr_hi and gr_lo registers. */
465 UHI
466 frvbf_h_gr_hi_get_handler (SIM_CPU *current_cpu, UINT gr)
467 {
468 return (GET_H_GR(gr) >> 16) & 0xffff;
469 }
470
471 void
472 frvbf_h_gr_hi_set_handler (SIM_CPU *current_cpu, UINT gr, UHI newval)
473 {
474 USI value = (GET_H_GR (gr) & 0xffff) | (newval << 16);
475 SET_H_GR (gr, value);
476 }
477
478 UHI
479 frvbf_h_gr_lo_get_handler (SIM_CPU *current_cpu, UINT gr)
480 {
481 return GET_H_GR(gr) & 0xffff;
482 }
483
484 void
485 frvbf_h_gr_lo_set_handler (SIM_CPU *current_cpu, UINT gr, UHI newval)
486 {
487 USI value = (GET_H_GR (gr) & 0xffff0000) | (newval & 0xffff);
488 SET_H_GR (gr, value);
489 }
490 \f
491 /* Cover fns to access the tbr bits. */
492 USI
493 spr_tbr_get_handler (SIM_CPU *current_cpu)
494 {
495 int tbr = ((GET_H_TBR_TBA () & 0xfffff) << 12) |
496 ((GET_H_TBR_TT () & 0xff) << 4);
497
498 return tbr;
499 }
500
501 void
502 spr_tbr_set_handler (SIM_CPU *current_cpu, USI newval)
503 {
504 int tbr = newval;
505
506 SET_H_TBR_TBA ((tbr >> 12) & 0xfffff) ;
507 SET_H_TBR_TT ((tbr >> 4) & 0xff) ;
508 }
509 \f
510 /* Cover fns to access the bpsr bits. */
511 USI
512 spr_bpsr_get_handler (SIM_CPU *current_cpu)
513 {
514 int bpsr = ((GET_H_BPSR_BS () & 0x1) << 12) |
515 ((GET_H_BPSR_BET () & 0x1) );
516
517 return bpsr;
518 }
519
520 void
521 spr_bpsr_set_handler (SIM_CPU *current_cpu, USI newval)
522 {
523 int bpsr = newval;
524
525 SET_H_BPSR_BS ((bpsr >> 12) & 1);
526 SET_H_BPSR_BET ((bpsr ) & 1);
527 }
528 \f
529 /* Cover fns to access the psr bits. */
530 USI
531 spr_psr_get_handler (SIM_CPU *current_cpu)
532 {
533 int psr = ((GET_H_PSR_IMPLE () & 0xf) << 28) |
534 ((GET_H_PSR_VER () & 0xf) << 24) |
535 ((GET_H_PSR_ICE () & 0x1) << 16) |
536 ((GET_H_PSR_NEM () & 0x1) << 14) |
537 ((GET_H_PSR_CM () & 0x1) << 13) |
538 ((GET_H_PSR_BE () & 0x1) << 12) |
539 ((GET_H_PSR_ESR () & 0x1) << 11) |
540 ((GET_H_PSR_EF () & 0x1) << 8) |
541 ((GET_H_PSR_EM () & 0x1) << 7) |
542 ((GET_H_PSR_PIL () & 0xf) << 3) |
543 ((GET_H_PSR_S () & 0x1) << 2) |
544 ((GET_H_PSR_PS () & 0x1) << 1) |
545 ((GET_H_PSR_ET () & 0x1) );
546
547 return psr;
548 }
549
550 void
551 spr_psr_set_handler (SIM_CPU *current_cpu, USI newval)
552 {
553 /* The handler for PSR.S references the value of PSR.ESR, so set PSR.S
554 first. */
555 SET_H_PSR_S ((newval >> 2) & 1);
556
557 SET_H_PSR_IMPLE ((newval >> 28) & 0xf);
558 SET_H_PSR_VER ((newval >> 24) & 0xf);
559 SET_H_PSR_ICE ((newval >> 16) & 1);
560 SET_H_PSR_NEM ((newval >> 14) & 1);
561 SET_H_PSR_CM ((newval >> 13) & 1);
562 SET_H_PSR_BE ((newval >> 12) & 1);
563 SET_H_PSR_ESR ((newval >> 11) & 1);
564 SET_H_PSR_EF ((newval >> 8) & 1);
565 SET_H_PSR_EM ((newval >> 7) & 1);
566 SET_H_PSR_PIL ((newval >> 3) & 0xf);
567 SET_H_PSR_PS ((newval >> 1) & 1);
568 SET_H_PSR_ET ((newval ) & 1);
569 }
570
571 void
572 frvbf_h_psr_s_set_handler (SIM_CPU *current_cpu, BI newval)
573 {
574 /* If switching from user to supervisor mode, or vice-versa, then switch
575 the supervisor/user context. */
576 int psr_s = GET_H_PSR_S ();
577 if (psr_s != (newval & 1))
578 {
579 frvbf_switch_supervisor_user_context (current_cpu);
580 CPU (h_psr_s) = newval & 1;
581 }
582 }
583 \f
584 /* Cover fns to access the ccr bits. */
585 USI
586 spr_ccr_get_handler (SIM_CPU *current_cpu)
587 {
588 int ccr = ((GET_H_ICCR (H_ICCR_ICC3) & 0xf) << 28) |
589 ((GET_H_ICCR (H_ICCR_ICC2) & 0xf) << 24) |
590 ((GET_H_ICCR (H_ICCR_ICC1) & 0xf) << 20) |
591 ((GET_H_ICCR (H_ICCR_ICC0) & 0xf) << 16) |
592 ((GET_H_FCCR (H_FCCR_FCC3) & 0xf) << 12) |
593 ((GET_H_FCCR (H_FCCR_FCC2) & 0xf) << 8) |
594 ((GET_H_FCCR (H_FCCR_FCC1) & 0xf) << 4) |
595 ((GET_H_FCCR (H_FCCR_FCC0) & 0xf) );
596
597 return ccr;
598 }
599
600 void
601 spr_ccr_set_handler (SIM_CPU *current_cpu, USI newval)
602 {
603 int ccr = newval;
604
605 SET_H_ICCR (H_ICCR_ICC3, (newval >> 28) & 0xf);
606 SET_H_ICCR (H_ICCR_ICC2, (newval >> 24) & 0xf);
607 SET_H_ICCR (H_ICCR_ICC1, (newval >> 20) & 0xf);
608 SET_H_ICCR (H_ICCR_ICC0, (newval >> 16) & 0xf);
609 SET_H_FCCR (H_FCCR_FCC3, (newval >> 12) & 0xf);
610 SET_H_FCCR (H_FCCR_FCC2, (newval >> 8) & 0xf);
611 SET_H_FCCR (H_FCCR_FCC1, (newval >> 4) & 0xf);
612 SET_H_FCCR (H_FCCR_FCC0, (newval ) & 0xf);
613 }
614 \f
615 QI
616 frvbf_set_icc_for_shift_right (
617 SIM_CPU *current_cpu, SI value, SI shift, QI icc
618 )
619 {
620 /* Set the C flag of the given icc to the logical OR of the bits shifted
621 out. */
622 int mask = (1 << shift) - 1;
623 if ((value & mask) != 0)
624 return icc | 0x1;
625
626 return icc & 0xe;
627 }
628
629 QI
630 frvbf_set_icc_for_shift_left (
631 SIM_CPU *current_cpu, SI value, SI shift, QI icc
632 )
633 {
634 /* Set the V flag of the given icc to the logical OR of the bits shifted
635 out. */
636 int mask = ((1 << shift) - 1) << (32 - shift);
637 if ((value & mask) != 0)
638 return icc | 0x2;
639
640 return icc & 0xd;
641 }
642 \f
643 /* Cover fns to access the cccr bits. */
644 USI
645 spr_cccr_get_handler (SIM_CPU *current_cpu)
646 {
647 int cccr = ((GET_H_CCCR (H_CCCR_CC7) & 0x3) << 14) |
648 ((GET_H_CCCR (H_CCCR_CC6) & 0x3) << 12) |
649 ((GET_H_CCCR (H_CCCR_CC5) & 0x3) << 10) |
650 ((GET_H_CCCR (H_CCCR_CC4) & 0x3) << 8) |
651 ((GET_H_CCCR (H_CCCR_CC3) & 0x3) << 6) |
652 ((GET_H_CCCR (H_CCCR_CC2) & 0x3) << 4) |
653 ((GET_H_CCCR (H_CCCR_CC1) & 0x3) << 2) |
654 ((GET_H_CCCR (H_CCCR_CC0) & 0x3) );
655
656 return cccr;
657 }
658
659 void
660 spr_cccr_set_handler (SIM_CPU *current_cpu, USI newval)
661 {
662 int cccr = newval;
663
664 SET_H_CCCR (H_CCCR_CC7, (newval >> 14) & 0x3);
665 SET_H_CCCR (H_CCCR_CC6, (newval >> 12) & 0x3);
666 SET_H_CCCR (H_CCCR_CC5, (newval >> 10) & 0x3);
667 SET_H_CCCR (H_CCCR_CC4, (newval >> 8) & 0x3);
668 SET_H_CCCR (H_CCCR_CC3, (newval >> 6) & 0x3);
669 SET_H_CCCR (H_CCCR_CC2, (newval >> 4) & 0x3);
670 SET_H_CCCR (H_CCCR_CC1, (newval >> 2) & 0x3);
671 SET_H_CCCR (H_CCCR_CC0, (newval ) & 0x3);
672 }
673 \f
674 /* Cover fns to access the sr bits. */
675 USI
676 spr_sr_get_handler (SIM_CPU *current_cpu, UINT spr)
677 {
678 /* If PSR.ESR is not set, then SR0-3 map onto SGR4-7 which will be GR4-7,
679 otherwise the correct mapping of USG4-7 or SGR4-7 will be in SR0-3. */
680 int psr_esr = GET_H_PSR_ESR ();
681 if (! psr_esr)
682 return GET_H_GR (4 + (spr - H_SPR_SR0));
683
684 return CPU (h_spr[spr]);
685 }
686
687 void
688 spr_sr_set_handler (SIM_CPU *current_cpu, UINT spr, USI newval)
689 {
690 /* If PSR.ESR is not set, then SR0-3 map onto SGR4-7 which will be GR4-7,
691 otherwise the correct mapping of USG4-7 or SGR4-7 will be in SR0-3. */
692 int psr_esr = GET_H_PSR_ESR ();
693 if (! psr_esr)
694 SET_H_GR (4 + (spr - H_SPR_SR0), newval);
695 else
696 CPU (h_spr[spr]) = newval;
697 }
698 \f
699 /* Switch SR0-SR4 with GR4-GR7 if PSR.ESR is set. */
700 void
701 frvbf_switch_supervisor_user_context (SIM_CPU *current_cpu)
702 {
703 if (GET_H_PSR_ESR ())
704 {
705 /* We need to be in supervisor mode to swap the registers. Access the
706 PSR.S directly in order to avoid recursive context switches. */
707 int i;
708 int save_psr_s = CPU (h_psr_s);
709 CPU (h_psr_s) = 1;
710 for (i = 0; i < 4; ++i)
711 {
712 int gr = i + 4;
713 int spr = i + H_SPR_SR0;
714 SI tmp = GET_H_SPR (spr);
715 SET_H_SPR (spr, GET_H_GR (gr));
716 SET_H_GR (gr, tmp);
717 }
718 CPU (h_psr_s) = save_psr_s;
719 }
720 }
721 \f
722 /* Handle load/store of quad registers. */
723 void
724 frvbf_load_quad_GR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI targ_ix)
725 {
726 int i;
727 SI value[4];
728
729 /* Check memory alignment */
730 address = check_memory_alignment (current_cpu, address, 0xf);
731
732 /* If we need to count cycles, then the cache operation will be
733 initiated from the model profiling functions.
734 See frvbf_model_.... */
735 if (model_insn)
736 {
737 CPU_LOAD_ADDRESS (current_cpu) = address;
738 CPU_LOAD_LENGTH (current_cpu) = 16;
739 }
740 else
741 {
742 for (i = 0; i < 4; ++i)
743 {
744 value[i] = frvbf_read_mem_SI (current_cpu, pc, address);
745 address += 4;
746 }
747 sim_queue_fn_xi_write (current_cpu, frvbf_h_gr_quad_set_handler, targ_ix,
748 value);
749 }
750 }
751
752 void
753 frvbf_store_quad_GR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI src_ix)
754 {
755 int i;
756 SI value[4];
757 USI hsr0;
758
759 /* Check register and memory alignment. */
760 src_ix = check_register_alignment (current_cpu, src_ix, 3);
761 address = check_memory_alignment (current_cpu, address, 0xf);
762
763 for (i = 0; i < 4; ++i)
764 {
765 /* GR0 is always 0. */
766 if (src_ix == 0)
767 value[i] = 0;
768 else
769 value[i] = GET_H_GR (src_ix + i);
770 }
771 hsr0 = GET_HSR0 ();
772 if (GET_HSR0_DCE (hsr0))
773 sim_queue_fn_mem_xi_write (current_cpu, frvbf_mem_set_XI, address, value);
774 else
775 sim_queue_mem_xi_write (current_cpu, address, value);
776 }
777
778 void
779 frvbf_load_quad_FRint (SIM_CPU *current_cpu, PCADDR pc, SI address, SI targ_ix)
780 {
781 int i;
782 SI value[4];
783
784 /* Check memory alignment */
785 address = check_memory_alignment (current_cpu, address, 0xf);
786
787 /* If we need to count cycles, then the cache operation will be
788 initiated from the model profiling functions.
789 See frvbf_model_.... */
790 if (model_insn)
791 {
792 CPU_LOAD_ADDRESS (current_cpu) = address;
793 CPU_LOAD_LENGTH (current_cpu) = 16;
794 }
795 else
796 {
797 for (i = 0; i < 4; ++i)
798 {
799 value[i] = frvbf_read_mem_SI (current_cpu, pc, address);
800 address += 4;
801 }
802 sim_queue_fn_xi_write (current_cpu, frvbf_h_fr_quad_set_handler, targ_ix,
803 value);
804 }
805 }
806
807 void
808 frvbf_store_quad_FRint (SIM_CPU *current_cpu, PCADDR pc, SI address, SI src_ix)
809 {
810 int i;
811 SI value[4];
812 USI hsr0;
813
814 /* Check register and memory alignment. */
815 src_ix = check_fr_register_alignment (current_cpu, src_ix, 3);
816 address = check_memory_alignment (current_cpu, address, 0xf);
817
818 for (i = 0; i < 4; ++i)
819 value[i] = GET_H_FR (src_ix + i);
820
821 hsr0 = GET_HSR0 ();
822 if (GET_HSR0_DCE (hsr0))
823 sim_queue_fn_mem_xi_write (current_cpu, frvbf_mem_set_XI, address, value);
824 else
825 sim_queue_mem_xi_write (current_cpu, address, value);
826 }
827
828 void
829 frvbf_load_quad_CPR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI targ_ix)
830 {
831 int i;
832 SI value[4];
833
834 /* Check memory alignment */
835 address = check_memory_alignment (current_cpu, address, 0xf);
836
837 /* If we need to count cycles, then the cache operation will be
838 initiated from the model profiling functions.
839 See frvbf_model_.... */
840 if (model_insn)
841 {
842 CPU_LOAD_ADDRESS (current_cpu) = address;
843 CPU_LOAD_LENGTH (current_cpu) = 16;
844 }
845 else
846 {
847 for (i = 0; i < 4; ++i)
848 {
849 value[i] = frvbf_read_mem_SI (current_cpu, pc, address);
850 address += 4;
851 }
852 sim_queue_fn_xi_write (current_cpu, frvbf_h_cpr_quad_set_handler, targ_ix,
853 value);
854 }
855 }
856
857 void
858 frvbf_store_quad_CPR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI src_ix)
859 {
860 int i;
861 SI value[4];
862 USI hsr0;
863
864 /* Check register and memory alignment. */
865 src_ix = check_register_alignment (current_cpu, src_ix, 3);
866 address = check_memory_alignment (current_cpu, address, 0xf);
867
868 for (i = 0; i < 4; ++i)
869 value[i] = GET_H_CPR (src_ix + i);
870
871 hsr0 = GET_HSR0 ();
872 if (GET_HSR0_DCE (hsr0))
873 sim_queue_fn_mem_xi_write (current_cpu, frvbf_mem_set_XI, address, value);
874 else
875 sim_queue_mem_xi_write (current_cpu, address, value);
876 }
877 \f
878 void
879 frvbf_signed_integer_divide (
880 SIM_CPU *current_cpu, SI arg1, SI arg2, int target_index, int non_excepting
881 )
882 {
883 enum frv_dtt dtt = FRV_DTT_NO_EXCEPTION;
884 if (arg1 == 0x80000000 && arg2 == -1)
885 {
886 /* 0x80000000/(-1) must result in 0x7fffffff when ISR.EDE is set
887 otherwise it may result in 0x7fffffff (sparc compatibility) or
888 0x80000000 (C language compatibility). */
889 USI isr;
890 dtt = FRV_DTT_OVERFLOW;
891
892 isr = GET_ISR ();
893 if (GET_ISR_EDE (isr))
894 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
895 0x7fffffff);
896 else
897 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
898 0x80000000);
899 frvbf_force_update (current_cpu); /* Force update of target register. */
900 }
901 else if (arg2 == 0)
902 dtt = FRV_DTT_DIVISION_BY_ZERO;
903 else
904 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
905 arg1 / arg2);
906
907 /* Check for exceptions. */
908 if (dtt != FRV_DTT_NO_EXCEPTION)
909 dtt = frvbf_division_exception (current_cpu, dtt, target_index,
910 non_excepting);
911 if (non_excepting && dtt == FRV_DTT_NO_EXCEPTION)
912 {
913 /* Non excepting instruction. Clear the NE flag for the target
914 register. */
915 SI NE_flags[2];
916 GET_NE_FLAGS (NE_flags, H_SPR_GNER0);
917 CLEAR_NE_FLAG (NE_flags, target_index);
918 SET_NE_FLAGS (H_SPR_GNER0, NE_flags);
919 }
920 }
921
922 void
923 frvbf_unsigned_integer_divide (
924 SIM_CPU *current_cpu, USI arg1, USI arg2, int target_index, int non_excepting
925 )
926 {
927 if (arg2 == 0)
928 frvbf_division_exception (current_cpu, FRV_DTT_DIVISION_BY_ZERO,
929 target_index, non_excepting);
930 else
931 {
932 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
933 arg1 / arg2);
934 if (non_excepting)
935 {
936 /* Non excepting instruction. Clear the NE flag for the target
937 register. */
938 SI NE_flags[2];
939 GET_NE_FLAGS (NE_flags, H_SPR_GNER0);
940 CLEAR_NE_FLAG (NE_flags, target_index);
941 SET_NE_FLAGS (H_SPR_GNER0, NE_flags);
942 }
943 }
944 }
945 \f
946 /* Clear accumulators. */
947 void
948 frvbf_clear_accumulators (SIM_CPU *current_cpu, SI acc_ix, int A)
949 {
950 SIM_DESC sd = CPU_STATE (current_cpu);
951 int acc_num =
952 (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr500) ? 8 :
953 (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550) ? 8 :
954 (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400) ? 4 :
955 63;
956 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
957
958 ps->mclracc_acc = acc_ix;
959 ps->mclracc_A = A;
960 if (A == 0 || acc_ix != 0) /* Clear 1 accumuator? */
961 {
962 /* This instruction is a nop if the referenced accumulator is not
963 implemented. */
964 if (acc_ix < acc_num)
965 sim_queue_fn_di_write (current_cpu, frvbf_h_acc40S_set, acc_ix, 0);
966 }
967 else
968 {
969 /* Clear all implemented accumulators. */
970 int i;
971 for (i = 0; i < acc_num; ++i)
972 sim_queue_fn_di_write (current_cpu, frvbf_h_acc40S_set, i, 0);
973 }
974 }
975 \f
976 /* Functions to aid insn semantics. */
977
978 /* Compute the result of the SCAN and SCANI insns after the shift and xor. */
979 SI
980 frvbf_scan_result (SIM_CPU *current_cpu, SI value)
981 {
982 SI i;
983 SI mask;
984
985 if (value == 0)
986 return 63;
987
988 /* Find the position of the first non-zero bit.
989 The loop will terminate since there is guaranteed to be at least one
990 non-zero bit. */
991 mask = 1 << (sizeof (mask) * 8 - 1);
992 for (i = 0; (value & mask) == 0; ++i)
993 value <<= 1;
994
995 return i;
996 }
997
998 /* Compute the result of the cut insns. */
999 SI
1000 frvbf_cut (SIM_CPU *current_cpu, SI reg1, SI reg2, SI cut_point)
1001 {
1002 SI result;
1003 if (cut_point < 32)
1004 {
1005 result = reg1 << cut_point;
1006 result |= (reg2 >> (32 - cut_point)) & ((1 << cut_point) - 1);
1007 }
1008 else
1009 result = reg2 << (cut_point - 32);
1010
1011 return result;
1012 }
1013
1014 /* Compute the result of the cut insns. */
1015 SI
1016 frvbf_media_cut (SIM_CPU *current_cpu, DI acc, SI cut_point)
1017 {
1018 /* The cut point is the lower 6 bits (signed) of what we are passed. */
1019 cut_point = cut_point << 26 >> 26;
1020
1021 /* The cut_point is relative to bit 40 of 64 bits. */
1022 if (cut_point >= 0)
1023 return (acc << (cut_point + 24)) >> 32;
1024
1025 /* Extend the sign bit (bit 40) for negative cuts. */
1026 if (cut_point == -32)
1027 return (acc << 24) >> 63; /* Special case for full shiftout. */
1028
1029 return (acc << 24) >> (32 + -cut_point);
1030 }
1031
1032 /* Compute the result of the cut insns. */
1033 SI
1034 frvbf_media_cut_ss (SIM_CPU *current_cpu, DI acc, SI cut_point)
1035 {
1036 /* The cut point is the lower 6 bits (signed) of what we are passed. */
1037 cut_point = cut_point << 26 >> 26;
1038
1039 if (cut_point >= 0)
1040 {
1041 /* The cut_point is relative to bit 40 of 64 bits. */
1042 DI shifted = acc << (cut_point + 24);
1043 DI unshifted = shifted >> (cut_point + 24);
1044
1045 /* The result will be saturated if significant bits are shifted out. */
1046 if (unshifted != acc)
1047 {
1048 if (acc < 0)
1049 return 0x80000000;
1050 return 0x7fffffff;
1051 }
1052 }
1053
1054 /* The result will not be saturated, so use the code for the normal cut. */
1055 return frvbf_media_cut (current_cpu, acc, cut_point);
1056 }
1057
1058 /* Compute the result of int accumulator cut (SCUTSS). */
1059 SI
1060 frvbf_iacc_cut (SIM_CPU *current_cpu, DI acc, SI cut_point)
1061 {
1062 /* The cut point is the lower 6 bits (signed) of what we are passed. */
1063 cut_point = cut_point << 25 >> 25;
1064
1065 if (cut_point <= -32)
1066 cut_point = -31; /* Special case for full shiftout. */
1067
1068 /* Negative cuts (cannot saturate). */
1069 if (cut_point < 0)
1070 return acc >> (32 + -cut_point);
1071
1072 /* Positive cuts will saturate if significant bits are shifted out. */
1073 if (acc != ((acc << cut_point) >> cut_point))
1074 if (acc >= 0)
1075 return 0x7fffffff;
1076 else
1077 return 0x80000000;
1078
1079 /* No saturate, just cut. */
1080 return ((acc << cut_point) >> 32);
1081 }
1082
1083 /* Compute the result of shift-left-arithmetic-with-saturation (SLASS). */
1084 SI
1085 frvbf_shift_left_arith_saturate (SIM_CPU *current_cpu, SI arg1, SI arg2)
1086 {
1087 int neg_arg1;
1088
1089 /* FIXME: what to do with negative shift amt? */
1090 if (arg2 <= 0)
1091 return arg1;
1092
1093 if (arg1 == 0)
1094 return 0;
1095
1096 /* Signed shift by 31 or greater saturates by definition. */
1097 if (arg2 >= 31)
1098 if (arg1 > 0)
1099 return (SI) 0x7fffffff;
1100 else
1101 return (SI) 0x80000000;
1102
1103 /* OK, arg2 is between 1 and 31. */
1104 neg_arg1 = (arg1 < 0);
1105 do {
1106 arg1 <<= 1;
1107 /* Check for sign bit change (saturation). */
1108 if (neg_arg1 && (arg1 >= 0))
1109 return (SI) 0x80000000;
1110 else if (!neg_arg1 && (arg1 < 0))
1111 return (SI) 0x7fffffff;
1112 } while (--arg2 > 0);
1113
1114 return arg1;
1115 }
1116
1117 /* Simulate the media custom insns. */
1118 void
1119 frvbf_media_cop (SIM_CPU *current_cpu, int cop_num)
1120 {
1121 /* The semantics of the insn are a nop, since it is implementation defined.
1122 We do need to check whether it's implemented and set up for MTRAP
1123 if it's not. */
1124 USI msr0 = GET_MSR (0);
1125 if (GET_MSR_EMCI (msr0) == 0)
1126 {
1127 /* no interrupt queued at this time. */
1128 frv_set_mp_exception_registers (current_cpu, MTT_UNIMPLEMENTED_MPOP, 0);
1129 }
1130 }
1131
1132 /* Simulate the media average (MAVEH) insn. */
1133 static HI
1134 do_media_average (SIM_CPU *current_cpu, HI arg1, HI arg2)
1135 {
1136 SIM_DESC sd = CPU_STATE (current_cpu);
1137 SI sum = (arg1 + arg2);
1138 HI result = sum >> 1;
1139 int rounding_value;
1140
1141 /* On fr400 and fr550, check the rounding mode. On other machines rounding is always
1142 toward negative infinity and the result is already correctly rounded. */
1143 switch (STATE_ARCHITECTURE (sd)->mach)
1144 {
1145 /* Need to check rounding mode. */
1146 case bfd_mach_fr400:
1147 case bfd_mach_fr550:
1148 /* Check whether rounding will be required. Rounding will be required
1149 if the sum is an odd number. */
1150 rounding_value = sum & 1;
1151 if (rounding_value)
1152 {
1153 USI msr0 = GET_MSR (0);
1154 /* Check MSR0.SRDAV to determine which bits control the rounding. */
1155 if (GET_MSR_SRDAV (msr0))
1156 {
1157 /* MSR0.RD controls rounding. */
1158 switch (GET_MSR_RD (msr0))
1159 {
1160 case 0:
1161 /* Round to nearest. */
1162 if (result >= 0)
1163 ++result;
1164 break;
1165 case 1:
1166 /* Round toward 0. */
1167 if (result < 0)
1168 ++result;
1169 break;
1170 case 2:
1171 /* Round toward positive infinity. */
1172 ++result;
1173 break;
1174 case 3:
1175 /* Round toward negative infinity. The result is already
1176 correctly rounded. */
1177 break;
1178 default:
1179 abort ();
1180 break;
1181 }
1182 }
1183 else
1184 {
1185 /* MSR0.RDAV controls rounding. If set, round toward positive
1186 infinity. Otherwise the result is already rounded correctly
1187 toward negative infinity. */
1188 if (GET_MSR_RDAV (msr0))
1189 ++result;
1190 }
1191 }
1192 break;
1193 default:
1194 break;
1195 }
1196
1197 return result;
1198 }
1199
1200 SI
1201 frvbf_media_average (SIM_CPU *current_cpu, SI reg1, SI reg2)
1202 {
1203 SI result;
1204 result = do_media_average (current_cpu, reg1 & 0xffff, reg2 & 0xffff);
1205 result &= 0xffff;
1206 result |= do_media_average (current_cpu, (reg1 >> 16) & 0xffff,
1207 (reg2 >> 16) & 0xffff) << 16;
1208 return result;
1209 }
1210
1211 /* Maintain a flag in order to know when to write the address of the next
1212 VLIW instruction into the LR register. Used by JMPL. JMPIL, and CALL. */
1213 void
1214 frvbf_set_write_next_vliw_addr_to_LR (SIM_CPU *current_cpu, int value)
1215 {
1216 frvbf_write_next_vliw_addr_to_LR = value;
1217 }
1218
1219 void
1220 frvbf_set_ne_index (SIM_CPU *current_cpu, int index)
1221 {
1222 USI NE_flags[2];
1223
1224 /* Save the target register so interrupt processing can set its NE flag
1225 in the event of an exception. */
1226 frv_interrupt_state.ne_index = index;
1227
1228 /* Clear the NE flag of the target register. It will be reset if necessary
1229 in the event of an exception. */
1230 GET_NE_FLAGS (NE_flags, H_SPR_FNER0);
1231 CLEAR_NE_FLAG (NE_flags, index);
1232 SET_NE_FLAGS (H_SPR_FNER0, NE_flags);
1233 }
1234
1235 void
1236 frvbf_force_update (SIM_CPU *current_cpu)
1237 {
1238 CGEN_WRITE_QUEUE *q = CPU_WRITE_QUEUE (current_cpu);
1239 int ix = CGEN_WRITE_QUEUE_INDEX (q);
1240 if (ix > 0)
1241 {
1242 CGEN_WRITE_QUEUE_ELEMENT *item = CGEN_WRITE_QUEUE_ELEMENT (q, ix - 1);
1243 item->flags |= FRV_WRITE_QUEUE_FORCE_WRITE;
1244 }
1245 }
1246 \f
1247 /* Condition code logic. */
1248 enum cr_ops {
1249 andcr, orcr, xorcr, nandcr, norcr, andncr, orncr, nandncr, norncr,
1250 num_cr_ops
1251 };
1252
1253 enum cr_result {cr_undefined, cr_undefined1, cr_false, cr_true};
1254
1255 static enum cr_result
1256 cr_logic[num_cr_ops][4][4] = {
1257 /* andcr */
1258 {
1259 /* undefined undefined false true */
1260 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1261 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1262 /* false */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1263 /* true */ {cr_undefined, cr_undefined, cr_false, cr_true }
1264 },
1265 /* orcr */
1266 {
1267 /* undefined undefined false true */
1268 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true },
1269 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true },
1270 /* false */ {cr_false, cr_false, cr_false, cr_true },
1271 /* true */ {cr_true, cr_true, cr_true, cr_true }
1272 },
1273 /* xorcr */
1274 {
1275 /* undefined undefined false true */
1276 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1277 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1278 /* false */ {cr_undefined, cr_undefined, cr_false, cr_true },
1279 /* true */ {cr_true, cr_true, cr_true, cr_false }
1280 },
1281 /* nandcr */
1282 {
1283 /* undefined undefined false true */
1284 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1285 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1286 /* false */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1287 /* true */ {cr_undefined, cr_undefined, cr_true, cr_false }
1288 },
1289 /* norcr */
1290 {
1291 /* undefined undefined false true */
1292 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false },
1293 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false },
1294 /* false */ {cr_true, cr_true, cr_true, cr_false },
1295 /* true */ {cr_false, cr_false, cr_false, cr_false }
1296 },
1297 /* andncr */
1298 {
1299 /* undefined undefined false true */
1300 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1301 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1302 /* false */ {cr_undefined, cr_undefined, cr_false, cr_true },
1303 /* true */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined}
1304 },
1305 /* orncr */
1306 {
1307 /* undefined undefined false true */
1308 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true },
1309 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true },
1310 /* false */ {cr_true, cr_true, cr_true, cr_true },
1311 /* true */ {cr_false, cr_false, cr_false, cr_true }
1312 },
1313 /* nandncr */
1314 {
1315 /* undefined undefined false true */
1316 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1317 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1318 /* false */ {cr_undefined, cr_undefined, cr_true, cr_false },
1319 /* true */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined}
1320 },
1321 /* norncr */
1322 {
1323 /* undefined undefined false true */
1324 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false },
1325 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false },
1326 /* false */ {cr_false, cr_false, cr_false, cr_false },
1327 /* true */ {cr_true, cr_true, cr_true, cr_false }
1328 }
1329 };
1330
1331 UQI
1332 frvbf_cr_logic (SIM_CPU *current_cpu, SI operation, UQI arg1, UQI arg2)
1333 {
1334 return cr_logic[operation][arg1][arg2];
1335 }
1336 \f
1337 /* Cache Manipulation. */
1338 void
1339 frvbf_insn_cache_preload (SIM_CPU *current_cpu, SI address, USI length, int lock)
1340 {
1341 /* If we need to count cycles, then the cache operation will be
1342 initiated from the model profiling functions.
1343 See frvbf_model_.... */
1344 int hsr0 = GET_HSR0 ();
1345 if (GET_HSR0_ICE (hsr0))
1346 {
1347 if (model_insn)
1348 {
1349 CPU_LOAD_ADDRESS (current_cpu) = address;
1350 CPU_LOAD_LENGTH (current_cpu) = length;
1351 CPU_LOAD_LOCK (current_cpu) = lock;
1352 }
1353 else
1354 {
1355 FRV_CACHE *cache = CPU_INSN_CACHE (current_cpu);
1356 frv_cache_preload (cache, address, length, lock);
1357 }
1358 }
1359 }
1360
1361 void
1362 frvbf_data_cache_preload (SIM_CPU *current_cpu, SI address, USI length, int lock)
1363 {
1364 /* If we need to count cycles, then the cache operation will be
1365 initiated from the model profiling functions.
1366 See frvbf_model_.... */
1367 int hsr0 = GET_HSR0 ();
1368 if (GET_HSR0_DCE (hsr0))
1369 {
1370 if (model_insn)
1371 {
1372 CPU_LOAD_ADDRESS (current_cpu) = address;
1373 CPU_LOAD_LENGTH (current_cpu) = length;
1374 CPU_LOAD_LOCK (current_cpu) = lock;
1375 }
1376 else
1377 {
1378 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1379 frv_cache_preload (cache, address, length, lock);
1380 }
1381 }
1382 }
1383
1384 void
1385 frvbf_insn_cache_unlock (SIM_CPU *current_cpu, SI address)
1386 {
1387 /* If we need to count cycles, then the cache operation will be
1388 initiated from the model profiling functions.
1389 See frvbf_model_.... */
1390 int hsr0 = GET_HSR0 ();
1391 if (GET_HSR0_ICE (hsr0))
1392 {
1393 if (model_insn)
1394 CPU_LOAD_ADDRESS (current_cpu) = address;
1395 else
1396 {
1397 FRV_CACHE *cache = CPU_INSN_CACHE (current_cpu);
1398 frv_cache_unlock (cache, address);
1399 }
1400 }
1401 }
1402
1403 void
1404 frvbf_data_cache_unlock (SIM_CPU *current_cpu, SI address)
1405 {
1406 /* If we need to count cycles, then the cache operation will be
1407 initiated from the model profiling functions.
1408 See frvbf_model_.... */
1409 int hsr0 = GET_HSR0 ();
1410 if (GET_HSR0_DCE (hsr0))
1411 {
1412 if (model_insn)
1413 CPU_LOAD_ADDRESS (current_cpu) = address;
1414 else
1415 {
1416 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1417 frv_cache_unlock (cache, address);
1418 }
1419 }
1420 }
1421
1422 void
1423 frvbf_insn_cache_invalidate (SIM_CPU *current_cpu, SI address, int all)
1424 {
1425 /* Make sure the insn was specified properly. -1 will be passed for ALL
1426 for a icei with A=0. */
1427 if (all == -1)
1428 {
1429 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
1430 return;
1431 }
1432
1433 /* If we need to count cycles, then the cache operation will be
1434 initiated from the model profiling functions.
1435 See frvbf_model_.... */
1436 if (model_insn)
1437 {
1438 /* Record the all-entries flag for use in profiling. */
1439 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1440 ps->all_cache_entries = all;
1441 CPU_LOAD_ADDRESS (current_cpu) = address;
1442 }
1443 else
1444 {
1445 FRV_CACHE *cache = CPU_INSN_CACHE (current_cpu);
1446 if (all)
1447 frv_cache_invalidate_all (cache, 0/* flush? */);
1448 else
1449 frv_cache_invalidate (cache, address, 0/* flush? */);
1450 }
1451 }
1452
1453 void
1454 frvbf_data_cache_invalidate (SIM_CPU *current_cpu, SI address, int all)
1455 {
1456 /* Make sure the insn was specified properly. -1 will be passed for ALL
1457 for a dcei with A=0. */
1458 if (all == -1)
1459 {
1460 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
1461 return;
1462 }
1463
1464 /* If we need to count cycles, then the cache operation will be
1465 initiated from the model profiling functions.
1466 See frvbf_model_.... */
1467 if (model_insn)
1468 {
1469 /* Record the all-entries flag for use in profiling. */
1470 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1471 ps->all_cache_entries = all;
1472 CPU_LOAD_ADDRESS (current_cpu) = address;
1473 }
1474 else
1475 {
1476 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1477 if (all)
1478 frv_cache_invalidate_all (cache, 0/* flush? */);
1479 else
1480 frv_cache_invalidate (cache, address, 0/* flush? */);
1481 }
1482 }
1483
1484 void
1485 frvbf_data_cache_flush (SIM_CPU *current_cpu, SI address, int all)
1486 {
1487 /* Make sure the insn was specified properly. -1 will be passed for ALL
1488 for a dcef with A=0. */
1489 if (all == -1)
1490 {
1491 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
1492 return;
1493 }
1494
1495 /* If we need to count cycles, then the cache operation will be
1496 initiated from the model profiling functions.
1497 See frvbf_model_.... */
1498 if (model_insn)
1499 {
1500 /* Record the all-entries flag for use in profiling. */
1501 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1502 ps->all_cache_entries = all;
1503 CPU_LOAD_ADDRESS (current_cpu) = address;
1504 }
1505 else
1506 {
1507 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1508 if (all)
1509 frv_cache_invalidate_all (cache, 1/* flush? */);
1510 else
1511 frv_cache_invalidate (cache, address, 1/* flush? */);
1512 }
1513 }
This page took 0.060172 seconds and 5 git commands to generate.