1 /* frv simulator support code
2 Copyright (C) 1998, 1999, 2000, 2001, 2003 Free Software Foundation, Inc.
3 Contributed by Red Hat.
5 This file is part of the GNU simulators.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
22 #define WANT_CPU_FRVBF
27 #include "cgen-engine.h"
32 /* Maintain a flag in order to know when to write the address of the next
33 VLIW instruction into the LR register. Used by JMPL. JMPIL, and CALL
35 int frvbf_write_next_vliw_addr_to_LR
;
37 /* The contents of BUF are in target byte order. */
39 frvbf_fetch_register (SIM_CPU
*current_cpu
, int rn
, unsigned char *buf
, int len
)
41 if (rn
<= GR_REGNUM_MAX
)
42 SETTSI (buf
, GET_H_GR (rn
));
43 else if (rn
<= FR_REGNUM_MAX
)
44 SETTSI (buf
, GET_H_FR (rn
- GR_REGNUM_MAX
- 1));
45 else if (rn
== PC_REGNUM
)
46 SETTSI (buf
, GET_H_PC ());
47 else if (rn
>= SPR_REGNUM_MIN
&& rn
<= SPR_REGNUM_MAX
)
49 /* Make sure the register is implemented. */
50 FRV_REGISTER_CONTROL
*control
= CPU_REGISTER_CONTROL (current_cpu
);
51 int spr
= rn
- SPR_REGNUM_MIN
;
52 if (! control
->spr
[spr
].implemented
)
54 SETTSI (buf
, GET_H_SPR (spr
));
58 SETTSI (buf
, 0xdeadbeef);
65 /* The contents of BUF are in target byte order. */
68 frvbf_store_register (SIM_CPU
*current_cpu
, int rn
, unsigned char *buf
, int len
)
70 if (rn
<= GR_REGNUM_MAX
)
71 SET_H_GR (rn
, GETTSI (buf
));
72 else if (rn
<= FR_REGNUM_MAX
)
73 SET_H_FR (rn
- GR_REGNUM_MAX
- 1, GETTSI (buf
));
74 else if (rn
== PC_REGNUM
)
75 SET_H_PC (GETTSI (buf
));
76 else if (rn
>= SPR_REGNUM_MIN
&& rn
<= SPR_REGNUM_MAX
)
78 /* Make sure the register is implemented. */
79 FRV_REGISTER_CONTROL
*control
= CPU_REGISTER_CONTROL (current_cpu
);
80 int spr
= rn
- SPR_REGNUM_MIN
;
81 if (! control
->spr
[spr
].implemented
)
83 SET_H_SPR (spr
, GETTSI (buf
));
91 /* Cover fns to access the general registers. */
93 frvbf_h_gr_get_handler (SIM_CPU
*current_cpu
, UINT gr
)
95 frv_check_gr_access (current_cpu
, gr
);
96 return CPU (h_gr
[gr
]);
100 frvbf_h_gr_set_handler (SIM_CPU
*current_cpu
, UINT gr
, USI newval
)
102 frv_check_gr_access (current_cpu
, gr
);
105 return; /* Storing into gr0 has no effect. */
107 CPU (h_gr
[gr
]) = newval
;
110 /* Cover fns to access the floating point registers. */
112 frvbf_h_fr_get_handler (SIM_CPU
*current_cpu
, UINT fr
)
114 frv_check_fr_access (current_cpu
, fr
);
115 return CPU (h_fr
[fr
]);
119 frvbf_h_fr_set_handler (SIM_CPU
*current_cpu
, UINT fr
, SF newval
)
121 frv_check_fr_access (current_cpu
, fr
);
122 CPU (h_fr
[fr
]) = newval
;
125 /* Cover fns to access the general registers as double words. */
127 check_register_alignment (SIM_CPU
*current_cpu
, UINT reg
, int align_mask
)
129 if (reg
& align_mask
)
131 SIM_DESC sd
= CPU_STATE (current_cpu
);
132 switch (STATE_ARCHITECTURE (sd
)->mach
)
136 frv_queue_program_interrupt (current_cpu
, FRV_ILLEGAL_INSTRUCTION
);
138 case bfd_mach_frvtomcat
:
141 frv_queue_register_exception_interrupt (current_cpu
,
155 check_fr_register_alignment (SIM_CPU
*current_cpu
, UINT reg
, int align_mask
)
157 if (reg
& align_mask
)
159 SIM_DESC sd
= CPU_STATE (current_cpu
);
160 switch (STATE_ARCHITECTURE (sd
)->mach
)
164 frv_queue_program_interrupt (current_cpu
, FRV_ILLEGAL_INSTRUCTION
);
166 case bfd_mach_frvtomcat
:
170 struct frv_fp_exception_info fp_info
= {
171 FSR_NO_EXCEPTION
, FTT_INVALID_FR
173 frv_queue_fp_exception_interrupt (current_cpu
, & fp_info
);
187 check_memory_alignment (SIM_CPU
*current_cpu
, SI address
, int align_mask
)
189 if (address
& align_mask
)
191 SIM_DESC sd
= CPU_STATE (current_cpu
);
192 switch (STATE_ARCHITECTURE (sd
)->mach
)
195 frv_queue_data_access_error_interrupt (current_cpu
, address
);
197 case bfd_mach_frvtomcat
:
200 frv_queue_mem_address_not_aligned_interrupt (current_cpu
, address
);
206 address
&= ~align_mask
;
213 frvbf_h_gr_double_get_handler (SIM_CPU
*current_cpu
, UINT gr
)
218 return 0; /* gr0 is always 0. */
220 /* Check the register alignment. */
221 gr
= check_register_alignment (current_cpu
, gr
, 1);
223 value
= GET_H_GR (gr
);
225 value
|= (USI
) GET_H_GR (gr
+ 1);
230 frvbf_h_gr_double_set_handler (SIM_CPU
*current_cpu
, UINT gr
, DI newval
)
233 return; /* Storing into gr0 has no effect. */
235 /* Check the register alignment. */
236 gr
= check_register_alignment (current_cpu
, gr
, 1);
238 SET_H_GR (gr
, (newval
>> 32) & 0xffffffff);
239 SET_H_GR (gr
+ 1, (newval
) & 0xffffffff);
242 /* Cover fns to access the floating point register as double words. */
244 frvbf_h_fr_double_get_handler (SIM_CPU
*current_cpu
, UINT fr
)
251 /* Check the register alignment. */
252 fr
= check_fr_register_alignment (current_cpu
, fr
, 1);
254 if (CURRENT_HOST_BYTE_ORDER
== LITTLE_ENDIAN
)
256 value
.as_sf
[1] = GET_H_FR (fr
);
257 value
.as_sf
[0] = GET_H_FR (fr
+ 1);
261 value
.as_sf
[0] = GET_H_FR (fr
);
262 value
.as_sf
[1] = GET_H_FR (fr
+ 1);
269 frvbf_h_fr_double_set_handler (SIM_CPU
*current_cpu
, UINT fr
, DF newval
)
276 /* Check the register alignment. */
277 fr
= check_fr_register_alignment (current_cpu
, fr
, 1);
279 value
.as_df
= newval
;
280 if (CURRENT_HOST_BYTE_ORDER
== LITTLE_ENDIAN
)
282 SET_H_FR (fr
, value
.as_sf
[1]);
283 SET_H_FR (fr
+ 1, value
.as_sf
[0]);
287 SET_H_FR (fr
, value
.as_sf
[0]);
288 SET_H_FR (fr
+ 1, value
.as_sf
[1]);
292 /* Cover fns to access the floating point register as integer words. */
294 frvbf_h_fr_int_get_handler (SIM_CPU
*current_cpu
, UINT fr
)
301 value
.as_sf
= GET_H_FR (fr
);
306 frvbf_h_fr_int_set_handler (SIM_CPU
*current_cpu
, UINT fr
, USI newval
)
313 value
.as_usi
= newval
;
314 SET_H_FR (fr
, value
.as_sf
);
317 /* Cover fns to access the coprocessor registers as double words. */
319 frvbf_h_cpr_double_get_handler (SIM_CPU
*current_cpu
, UINT cpr
)
323 /* Check the register alignment. */
324 cpr
= check_register_alignment (current_cpu
, cpr
, 1);
326 value
= GET_H_CPR (cpr
);
328 value
|= (USI
) GET_H_CPR (cpr
+ 1);
333 frvbf_h_cpr_double_set_handler (SIM_CPU
*current_cpu
, UINT cpr
, DI newval
)
335 /* Check the register alignment. */
336 cpr
= check_register_alignment (current_cpu
, cpr
, 1);
338 SET_H_CPR (cpr
, (newval
>> 32) & 0xffffffff);
339 SET_H_CPR (cpr
+ 1, (newval
) & 0xffffffff);
342 /* Cover fns to write registers as quad words. */
344 frvbf_h_gr_quad_set_handler (SIM_CPU
*current_cpu
, UINT gr
, SI
*newval
)
347 return; /* Storing into gr0 has no effect. */
349 /* Check the register alignment. */
350 gr
= check_register_alignment (current_cpu
, gr
, 3);
352 SET_H_GR (gr
, newval
[0]);
353 SET_H_GR (gr
+ 1, newval
[1]);
354 SET_H_GR (gr
+ 2, newval
[2]);
355 SET_H_GR (gr
+ 3, newval
[3]);
359 frvbf_h_fr_quad_set_handler (SIM_CPU
*current_cpu
, UINT fr
, SI
*newval
)
361 /* Check the register alignment. */
362 fr
= check_fr_register_alignment (current_cpu
, fr
, 3);
364 SET_H_FR (fr
, newval
[0]);
365 SET_H_FR (fr
+ 1, newval
[1]);
366 SET_H_FR (fr
+ 2, newval
[2]);
367 SET_H_FR (fr
+ 3, newval
[3]);
371 frvbf_h_cpr_quad_set_handler (SIM_CPU
*current_cpu
, UINT cpr
, SI
*newval
)
373 /* Check the register alignment. */
374 cpr
= check_register_alignment (current_cpu
, cpr
, 3);
376 SET_H_CPR (cpr
, newval
[0]);
377 SET_H_CPR (cpr
+ 1, newval
[1]);
378 SET_H_CPR (cpr
+ 2, newval
[2]);
379 SET_H_CPR (cpr
+ 3, newval
[3]);
382 /* Cover fns to access the special purpose registers. */
384 frvbf_h_spr_get_handler (SIM_CPU
*current_cpu
, UINT spr
)
386 /* Check access restrictions. */
387 frv_check_spr_read_access (current_cpu
, spr
);
392 return spr_psr_get_handler (current_cpu
);
394 return spr_tbr_get_handler (current_cpu
);
396 return spr_bpsr_get_handler (current_cpu
);
398 return spr_ccr_get_handler (current_cpu
);
400 return spr_cccr_get_handler (current_cpu
);
405 return spr_sr_get_handler (current_cpu
, spr
);
408 return CPU (h_spr
[spr
]);
414 frvbf_h_spr_set_handler (SIM_CPU
*current_cpu
, UINT spr
, USI newval
)
416 FRV_REGISTER_CONTROL
*control
;
420 /* Check access restrictions. */
421 frv_check_spr_write_access (current_cpu
, spr
);
423 /* Only set those fields which are writeable. */
424 control
= CPU_REGISTER_CONTROL (current_cpu
);
425 mask
= control
->spr
[spr
].read_only_mask
;
426 oldval
= GET_H_SPR (spr
);
428 newval
= (newval
& ~mask
) | (oldval
& mask
);
430 /* Some registers are represented by individual components which are
431 referenced more often than the register itself. */
435 spr_psr_set_handler (current_cpu
, newval
);
438 spr_tbr_set_handler (current_cpu
, newval
);
441 spr_bpsr_set_handler (current_cpu
, newval
);
444 spr_ccr_set_handler (current_cpu
, newval
);
447 spr_cccr_set_handler (current_cpu
, newval
);
453 spr_sr_set_handler (current_cpu
, spr
, newval
);
456 frv_cache_reconfigure (current_cpu
, CPU_INSN_CACHE (current_cpu
));
459 CPU (h_spr
[spr
]) = newval
;
464 /* Cover fns to access the gr_hi and gr_lo registers. */
466 frvbf_h_gr_hi_get_handler (SIM_CPU
*current_cpu
, UINT gr
)
468 return (GET_H_GR(gr
) >> 16) & 0xffff;
472 frvbf_h_gr_hi_set_handler (SIM_CPU
*current_cpu
, UINT gr
, UHI newval
)
474 USI value
= (GET_H_GR (gr
) & 0xffff) | (newval
<< 16);
475 SET_H_GR (gr
, value
);
479 frvbf_h_gr_lo_get_handler (SIM_CPU
*current_cpu
, UINT gr
)
481 return GET_H_GR(gr
) & 0xffff;
485 frvbf_h_gr_lo_set_handler (SIM_CPU
*current_cpu
, UINT gr
, UHI newval
)
487 USI value
= (GET_H_GR (gr
) & 0xffff0000) | (newval
& 0xffff);
488 SET_H_GR (gr
, value
);
491 /* Cover fns to access the tbr bits. */
493 spr_tbr_get_handler (SIM_CPU
*current_cpu
)
495 int tbr
= ((GET_H_TBR_TBA () & 0xfffff) << 12) |
496 ((GET_H_TBR_TT () & 0xff) << 4);
502 spr_tbr_set_handler (SIM_CPU
*current_cpu
, USI newval
)
506 SET_H_TBR_TBA ((tbr
>> 12) & 0xfffff) ;
507 SET_H_TBR_TT ((tbr
>> 4) & 0xff) ;
510 /* Cover fns to access the bpsr bits. */
512 spr_bpsr_get_handler (SIM_CPU
*current_cpu
)
514 int bpsr
= ((GET_H_BPSR_BS () & 0x1) << 12) |
515 ((GET_H_BPSR_BET () & 0x1) );
521 spr_bpsr_set_handler (SIM_CPU
*current_cpu
, USI newval
)
525 SET_H_BPSR_BS ((bpsr
>> 12) & 1);
526 SET_H_BPSR_BET ((bpsr
) & 1);
529 /* Cover fns to access the psr bits. */
531 spr_psr_get_handler (SIM_CPU
*current_cpu
)
533 int psr
= ((GET_H_PSR_IMPLE () & 0xf) << 28) |
534 ((GET_H_PSR_VER () & 0xf) << 24) |
535 ((GET_H_PSR_ICE () & 0x1) << 16) |
536 ((GET_H_PSR_NEM () & 0x1) << 14) |
537 ((GET_H_PSR_CM () & 0x1) << 13) |
538 ((GET_H_PSR_BE () & 0x1) << 12) |
539 ((GET_H_PSR_ESR () & 0x1) << 11) |
540 ((GET_H_PSR_EF () & 0x1) << 8) |
541 ((GET_H_PSR_EM () & 0x1) << 7) |
542 ((GET_H_PSR_PIL () & 0xf) << 3) |
543 ((GET_H_PSR_S () & 0x1) << 2) |
544 ((GET_H_PSR_PS () & 0x1) << 1) |
545 ((GET_H_PSR_ET () & 0x1) );
551 spr_psr_set_handler (SIM_CPU
*current_cpu
, USI newval
)
553 /* The handler for PSR.S references the value of PSR.ESR, so set PSR.S
555 SET_H_PSR_S ((newval
>> 2) & 1);
557 SET_H_PSR_IMPLE ((newval
>> 28) & 0xf);
558 SET_H_PSR_VER ((newval
>> 24) & 0xf);
559 SET_H_PSR_ICE ((newval
>> 16) & 1);
560 SET_H_PSR_NEM ((newval
>> 14) & 1);
561 SET_H_PSR_CM ((newval
>> 13) & 1);
562 SET_H_PSR_BE ((newval
>> 12) & 1);
563 SET_H_PSR_ESR ((newval
>> 11) & 1);
564 SET_H_PSR_EF ((newval
>> 8) & 1);
565 SET_H_PSR_EM ((newval
>> 7) & 1);
566 SET_H_PSR_PIL ((newval
>> 3) & 0xf);
567 SET_H_PSR_PS ((newval
>> 1) & 1);
568 SET_H_PSR_ET ((newval
) & 1);
572 frvbf_h_psr_s_set_handler (SIM_CPU
*current_cpu
, BI newval
)
574 /* If switching from user to supervisor mode, or vice-versa, then switch
575 the supervisor/user context. */
576 int psr_s
= GET_H_PSR_S ();
577 if (psr_s
!= (newval
& 1))
579 frvbf_switch_supervisor_user_context (current_cpu
);
580 CPU (h_psr_s
) = newval
& 1;
584 /* Cover fns to access the ccr bits. */
586 spr_ccr_get_handler (SIM_CPU
*current_cpu
)
588 int ccr
= ((GET_H_ICCR (H_ICCR_ICC3
) & 0xf) << 28) |
589 ((GET_H_ICCR (H_ICCR_ICC2
) & 0xf) << 24) |
590 ((GET_H_ICCR (H_ICCR_ICC1
) & 0xf) << 20) |
591 ((GET_H_ICCR (H_ICCR_ICC0
) & 0xf) << 16) |
592 ((GET_H_FCCR (H_FCCR_FCC3
) & 0xf) << 12) |
593 ((GET_H_FCCR (H_FCCR_FCC2
) & 0xf) << 8) |
594 ((GET_H_FCCR (H_FCCR_FCC1
) & 0xf) << 4) |
595 ((GET_H_FCCR (H_FCCR_FCC0
) & 0xf) );
601 spr_ccr_set_handler (SIM_CPU
*current_cpu
, USI newval
)
605 SET_H_ICCR (H_ICCR_ICC3
, (newval
>> 28) & 0xf);
606 SET_H_ICCR (H_ICCR_ICC2
, (newval
>> 24) & 0xf);
607 SET_H_ICCR (H_ICCR_ICC1
, (newval
>> 20) & 0xf);
608 SET_H_ICCR (H_ICCR_ICC0
, (newval
>> 16) & 0xf);
609 SET_H_FCCR (H_FCCR_FCC3
, (newval
>> 12) & 0xf);
610 SET_H_FCCR (H_FCCR_FCC2
, (newval
>> 8) & 0xf);
611 SET_H_FCCR (H_FCCR_FCC1
, (newval
>> 4) & 0xf);
612 SET_H_FCCR (H_FCCR_FCC0
, (newval
) & 0xf);
616 frvbf_set_icc_for_shift_right (
617 SIM_CPU
*current_cpu
, SI value
, SI shift
, QI icc
620 /* Set the C flag of the given icc to the logical OR of the bits shifted
622 int mask
= (1 << shift
) - 1;
623 if ((value
& mask
) != 0)
630 frvbf_set_icc_for_shift_left (
631 SIM_CPU
*current_cpu
, SI value
, SI shift
, QI icc
634 /* Set the V flag of the given icc to the logical OR of the bits shifted
636 int mask
= ((1 << shift
) - 1) << (32 - shift
);
637 if ((value
& mask
) != 0)
643 /* Cover fns to access the cccr bits. */
645 spr_cccr_get_handler (SIM_CPU
*current_cpu
)
647 int cccr
= ((GET_H_CCCR (H_CCCR_CC7
) & 0x3) << 14) |
648 ((GET_H_CCCR (H_CCCR_CC6
) & 0x3) << 12) |
649 ((GET_H_CCCR (H_CCCR_CC5
) & 0x3) << 10) |
650 ((GET_H_CCCR (H_CCCR_CC4
) & 0x3) << 8) |
651 ((GET_H_CCCR (H_CCCR_CC3
) & 0x3) << 6) |
652 ((GET_H_CCCR (H_CCCR_CC2
) & 0x3) << 4) |
653 ((GET_H_CCCR (H_CCCR_CC1
) & 0x3) << 2) |
654 ((GET_H_CCCR (H_CCCR_CC0
) & 0x3) );
660 spr_cccr_set_handler (SIM_CPU
*current_cpu
, USI newval
)
664 SET_H_CCCR (H_CCCR_CC7
, (newval
>> 14) & 0x3);
665 SET_H_CCCR (H_CCCR_CC6
, (newval
>> 12) & 0x3);
666 SET_H_CCCR (H_CCCR_CC5
, (newval
>> 10) & 0x3);
667 SET_H_CCCR (H_CCCR_CC4
, (newval
>> 8) & 0x3);
668 SET_H_CCCR (H_CCCR_CC3
, (newval
>> 6) & 0x3);
669 SET_H_CCCR (H_CCCR_CC2
, (newval
>> 4) & 0x3);
670 SET_H_CCCR (H_CCCR_CC1
, (newval
>> 2) & 0x3);
671 SET_H_CCCR (H_CCCR_CC0
, (newval
) & 0x3);
674 /* Cover fns to access the sr bits. */
676 spr_sr_get_handler (SIM_CPU
*current_cpu
, UINT spr
)
678 /* If PSR.ESR is not set, then SR0-3 map onto SGR4-7 which will be GR4-7,
679 otherwise the correct mapping of USG4-7 or SGR4-7 will be in SR0-3. */
680 int psr_esr
= GET_H_PSR_ESR ();
682 return GET_H_GR (4 + (spr
- H_SPR_SR0
));
684 return CPU (h_spr
[spr
]);
688 spr_sr_set_handler (SIM_CPU
*current_cpu
, UINT spr
, USI newval
)
690 /* If PSR.ESR is not set, then SR0-3 map onto SGR4-7 which will be GR4-7,
691 otherwise the correct mapping of USG4-7 or SGR4-7 will be in SR0-3. */
692 int psr_esr
= GET_H_PSR_ESR ();
694 SET_H_GR (4 + (spr
- H_SPR_SR0
), newval
);
696 CPU (h_spr
[spr
]) = newval
;
699 /* Switch SR0-SR4 with GR4-GR7 if PSR.ESR is set. */
701 frvbf_switch_supervisor_user_context (SIM_CPU
*current_cpu
)
703 if (GET_H_PSR_ESR ())
705 /* We need to be in supervisor mode to swap the registers. Access the
706 PSR.S directly in order to avoid recursive context switches. */
708 int save_psr_s
= CPU (h_psr_s
);
710 for (i
= 0; i
< 4; ++i
)
713 int spr
= i
+ H_SPR_SR0
;
714 SI tmp
= GET_H_SPR (spr
);
715 SET_H_SPR (spr
, GET_H_GR (gr
));
718 CPU (h_psr_s
) = save_psr_s
;
722 /* Handle load/store of quad registers. */
724 frvbf_load_quad_GR (SIM_CPU
*current_cpu
, PCADDR pc
, SI address
, SI targ_ix
)
729 /* Check memory alignment */
730 address
= check_memory_alignment (current_cpu
, address
, 0xf);
732 /* If we need to count cycles, then the cache operation will be
733 initiated from the model profiling functions.
734 See frvbf_model_.... */
737 CPU_LOAD_ADDRESS (current_cpu
) = address
;
738 CPU_LOAD_LENGTH (current_cpu
) = 16;
742 for (i
= 0; i
< 4; ++i
)
744 value
[i
] = frvbf_read_mem_SI (current_cpu
, pc
, address
);
747 sim_queue_fn_xi_write (current_cpu
, frvbf_h_gr_quad_set_handler
, targ_ix
,
753 frvbf_store_quad_GR (SIM_CPU
*current_cpu
, PCADDR pc
, SI address
, SI src_ix
)
759 /* Check register and memory alignment. */
760 src_ix
= check_register_alignment (current_cpu
, src_ix
, 3);
761 address
= check_memory_alignment (current_cpu
, address
, 0xf);
763 for (i
= 0; i
< 4; ++i
)
765 /* GR0 is always 0. */
769 value
[i
] = GET_H_GR (src_ix
+ i
);
772 if (GET_HSR0_DCE (hsr0
))
773 sim_queue_fn_mem_xi_write (current_cpu
, frvbf_mem_set_XI
, address
, value
);
775 sim_queue_mem_xi_write (current_cpu
, address
, value
);
779 frvbf_load_quad_FRint (SIM_CPU
*current_cpu
, PCADDR pc
, SI address
, SI targ_ix
)
784 /* Check memory alignment */
785 address
= check_memory_alignment (current_cpu
, address
, 0xf);
787 /* If we need to count cycles, then the cache operation will be
788 initiated from the model profiling functions.
789 See frvbf_model_.... */
792 CPU_LOAD_ADDRESS (current_cpu
) = address
;
793 CPU_LOAD_LENGTH (current_cpu
) = 16;
797 for (i
= 0; i
< 4; ++i
)
799 value
[i
] = frvbf_read_mem_SI (current_cpu
, pc
, address
);
802 sim_queue_fn_xi_write (current_cpu
, frvbf_h_fr_quad_set_handler
, targ_ix
,
808 frvbf_store_quad_FRint (SIM_CPU
*current_cpu
, PCADDR pc
, SI address
, SI src_ix
)
814 /* Check register and memory alignment. */
815 src_ix
= check_fr_register_alignment (current_cpu
, src_ix
, 3);
816 address
= check_memory_alignment (current_cpu
, address
, 0xf);
818 for (i
= 0; i
< 4; ++i
)
819 value
[i
] = GET_H_FR (src_ix
+ i
);
822 if (GET_HSR0_DCE (hsr0
))
823 sim_queue_fn_mem_xi_write (current_cpu
, frvbf_mem_set_XI
, address
, value
);
825 sim_queue_mem_xi_write (current_cpu
, address
, value
);
829 frvbf_load_quad_CPR (SIM_CPU
*current_cpu
, PCADDR pc
, SI address
, SI targ_ix
)
834 /* Check memory alignment */
835 address
= check_memory_alignment (current_cpu
, address
, 0xf);
837 /* If we need to count cycles, then the cache operation will be
838 initiated from the model profiling functions.
839 See frvbf_model_.... */
842 CPU_LOAD_ADDRESS (current_cpu
) = address
;
843 CPU_LOAD_LENGTH (current_cpu
) = 16;
847 for (i
= 0; i
< 4; ++i
)
849 value
[i
] = frvbf_read_mem_SI (current_cpu
, pc
, address
);
852 sim_queue_fn_xi_write (current_cpu
, frvbf_h_cpr_quad_set_handler
, targ_ix
,
858 frvbf_store_quad_CPR (SIM_CPU
*current_cpu
, PCADDR pc
, SI address
, SI src_ix
)
864 /* Check register and memory alignment. */
865 src_ix
= check_register_alignment (current_cpu
, src_ix
, 3);
866 address
= check_memory_alignment (current_cpu
, address
, 0xf);
868 for (i
= 0; i
< 4; ++i
)
869 value
[i
] = GET_H_CPR (src_ix
+ i
);
872 if (GET_HSR0_DCE (hsr0
))
873 sim_queue_fn_mem_xi_write (current_cpu
, frvbf_mem_set_XI
, address
, value
);
875 sim_queue_mem_xi_write (current_cpu
, address
, value
);
879 frvbf_signed_integer_divide (
880 SIM_CPU
*current_cpu
, SI arg1
, SI arg2
, int target_index
, int non_excepting
883 enum frv_dtt dtt
= FRV_DTT_NO_EXCEPTION
;
884 if (arg1
== 0x80000000 && arg2
== -1)
886 /* 0x80000000/(-1) must result in 0x7fffffff when ISR.EDE is set
887 otherwise it may result in 0x7fffffff (sparc compatibility) or
888 0x80000000 (C language compatibility). */
890 dtt
= FRV_DTT_OVERFLOW
;
893 if (GET_ISR_EDE (isr
))
894 sim_queue_fn_si_write (current_cpu
, frvbf_h_gr_set
, target_index
,
897 sim_queue_fn_si_write (current_cpu
, frvbf_h_gr_set
, target_index
,
899 frvbf_force_update (current_cpu
); /* Force update of target register. */
902 dtt
= FRV_DTT_DIVISION_BY_ZERO
;
904 sim_queue_fn_si_write (current_cpu
, frvbf_h_gr_set
, target_index
,
907 /* Check for exceptions. */
908 if (dtt
!= FRV_DTT_NO_EXCEPTION
)
909 dtt
= frvbf_division_exception (current_cpu
, dtt
, target_index
,
911 if (non_excepting
&& dtt
== FRV_DTT_NO_EXCEPTION
)
913 /* Non excepting instruction. Clear the NE flag for the target
916 GET_NE_FLAGS (NE_flags
, H_SPR_GNER0
);
917 CLEAR_NE_FLAG (NE_flags
, target_index
);
918 SET_NE_FLAGS (H_SPR_GNER0
, NE_flags
);
923 frvbf_unsigned_integer_divide (
924 SIM_CPU
*current_cpu
, USI arg1
, USI arg2
, int target_index
, int non_excepting
928 frvbf_division_exception (current_cpu
, FRV_DTT_DIVISION_BY_ZERO
,
929 target_index
, non_excepting
);
932 sim_queue_fn_si_write (current_cpu
, frvbf_h_gr_set
, target_index
,
936 /* Non excepting instruction. Clear the NE flag for the target
939 GET_NE_FLAGS (NE_flags
, H_SPR_GNER0
);
940 CLEAR_NE_FLAG (NE_flags
, target_index
);
941 SET_NE_FLAGS (H_SPR_GNER0
, NE_flags
);
946 /* Clear accumulators. */
948 frvbf_clear_accumulators (SIM_CPU
*current_cpu
, SI acc_ix
, int A
)
950 SIM_DESC sd
= CPU_STATE (current_cpu
);
952 (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr500
) ? 8 :
953 (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr550
) ? 8 :
954 (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr400
) ? 4 :
956 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (current_cpu
);
958 ps
->mclracc_acc
= acc_ix
;
960 if (A
== 0 || acc_ix
!= 0) /* Clear 1 accumuator? */
962 /* This instruction is a nop if the referenced accumulator is not
964 if (acc_ix
< acc_num
)
965 sim_queue_fn_di_write (current_cpu
, frvbf_h_acc40S_set
, acc_ix
, 0);
969 /* Clear all implemented accumulators. */
971 for (i
= 0; i
< acc_num
; ++i
)
972 sim_queue_fn_di_write (current_cpu
, frvbf_h_acc40S_set
, i
, 0);
976 /* Functions to aid insn semantics. */
978 /* Compute the result of the SCAN and SCANI insns after the shift and xor. */
980 frvbf_scan_result (SIM_CPU
*current_cpu
, SI value
)
988 /* Find the position of the first non-zero bit.
989 The loop will terminate since there is guaranteed to be at least one
991 mask
= 1 << (sizeof (mask
) * 8 - 1);
992 for (i
= 0; (value
& mask
) == 0; ++i
)
998 /* Compute the result of the cut insns. */
1000 frvbf_cut (SIM_CPU
*current_cpu
, SI reg1
, SI reg2
, SI cut_point
)
1005 result
= reg1
<< cut_point
;
1006 result
|= (reg2
>> (32 - cut_point
)) & ((1 << cut_point
) - 1);
1009 result
= reg2
<< (cut_point
- 32);
1014 /* Compute the result of the cut insns. */
1016 frvbf_media_cut (SIM_CPU
*current_cpu
, DI acc
, SI cut_point
)
1018 /* The cut point is the lower 6 bits (signed) of what we are passed. */
1019 cut_point
= cut_point
<< 26 >> 26;
1021 /* The cut_point is relative to bit 40 of 64 bits. */
1023 return (acc
<< (cut_point
+ 24)) >> 32;
1025 /* Extend the sign bit (bit 40) for negative cuts. */
1026 if (cut_point
== -32)
1027 return (acc
<< 24) >> 63; /* Special case for full shiftout. */
1029 return (acc
<< 24) >> (32 + -cut_point
);
1032 /* Compute the result of the cut insns. */
1034 frvbf_media_cut_ss (SIM_CPU
*current_cpu
, DI acc
, SI cut_point
)
1036 /* The cut point is the lower 6 bits (signed) of what we are passed. */
1037 cut_point
= cut_point
<< 26 >> 26;
1041 /* The cut_point is relative to bit 40 of 64 bits. */
1042 DI shifted
= acc
<< (cut_point
+ 24);
1043 DI unshifted
= shifted
>> (cut_point
+ 24);
1045 /* The result will be saturated if significant bits are shifted out. */
1046 if (unshifted
!= acc
)
1054 /* The result will not be saturated, so use the code for the normal cut. */
1055 return frvbf_media_cut (current_cpu
, acc
, cut_point
);
1058 /* Compute the result of int accumulator cut (SCUTSS). */
1060 frvbf_iacc_cut (SIM_CPU
*current_cpu
, DI acc
, SI cut_point
)
1062 /* The cut point is the lower 6 bits (signed) of what we are passed. */
1063 cut_point
= cut_point
<< 25 >> 25;
1065 if (cut_point
<= -32)
1066 cut_point
= -31; /* Special case for full shiftout. */
1068 /* Negative cuts (cannot saturate). */
1070 return acc
>> (32 + -cut_point
);
1072 /* Positive cuts will saturate if significant bits are shifted out. */
1073 if (acc
!= ((acc
<< cut_point
) >> cut_point
))
1079 /* No saturate, just cut. */
1080 return ((acc
<< cut_point
) >> 32);
1083 /* Compute the result of shift-left-arithmetic-with-saturation (SLASS). */
1085 frvbf_shift_left_arith_saturate (SIM_CPU
*current_cpu
, SI arg1
, SI arg2
)
1089 /* FIXME: what to do with negative shift amt? */
1096 /* Signed shift by 31 or greater saturates by definition. */
1099 return (SI
) 0x7fffffff;
1101 return (SI
) 0x80000000;
1103 /* OK, arg2 is between 1 and 31. */
1104 neg_arg1
= (arg1
< 0);
1107 /* Check for sign bit change (saturation). */
1108 if (neg_arg1
&& (arg1
>= 0))
1109 return (SI
) 0x80000000;
1110 else if (!neg_arg1
&& (arg1
< 0))
1111 return (SI
) 0x7fffffff;
1112 } while (--arg2
> 0);
1117 /* Simulate the media custom insns. */
1119 frvbf_media_cop (SIM_CPU
*current_cpu
, int cop_num
)
1121 /* The semantics of the insn are a nop, since it is implementation defined.
1122 We do need to check whether it's implemented and set up for MTRAP
1124 USI msr0
= GET_MSR (0);
1125 if (GET_MSR_EMCI (msr0
) == 0)
1127 /* no interrupt queued at this time. */
1128 frv_set_mp_exception_registers (current_cpu
, MTT_UNIMPLEMENTED_MPOP
, 0);
1132 /* Simulate the media average (MAVEH) insn. */
1134 do_media_average (SIM_CPU
*current_cpu
, HI arg1
, HI arg2
)
1136 SIM_DESC sd
= CPU_STATE (current_cpu
);
1137 SI sum
= (arg1
+ arg2
);
1138 HI result
= sum
>> 1;
1141 /* On fr400 and fr550, check the rounding mode. On other machines rounding is always
1142 toward negative infinity and the result is already correctly rounded. */
1143 switch (STATE_ARCHITECTURE (sd
)->mach
)
1145 /* Need to check rounding mode. */
1146 case bfd_mach_fr400
:
1147 case bfd_mach_fr550
:
1148 /* Check whether rounding will be required. Rounding will be required
1149 if the sum is an odd number. */
1150 rounding_value
= sum
& 1;
1153 USI msr0
= GET_MSR (0);
1154 /* Check MSR0.SRDAV to determine which bits control the rounding. */
1155 if (GET_MSR_SRDAV (msr0
))
1157 /* MSR0.RD controls rounding. */
1158 switch (GET_MSR_RD (msr0
))
1161 /* Round to nearest. */
1166 /* Round toward 0. */
1171 /* Round toward positive infinity. */
1175 /* Round toward negative infinity. The result is already
1176 correctly rounded. */
1185 /* MSR0.RDAV controls rounding. If set, round toward positive
1186 infinity. Otherwise the result is already rounded correctly
1187 toward negative infinity. */
1188 if (GET_MSR_RDAV (msr0
))
1201 frvbf_media_average (SIM_CPU
*current_cpu
, SI reg1
, SI reg2
)
1204 result
= do_media_average (current_cpu
, reg1
& 0xffff, reg2
& 0xffff);
1206 result
|= do_media_average (current_cpu
, (reg1
>> 16) & 0xffff,
1207 (reg2
>> 16) & 0xffff) << 16;
1211 /* Maintain a flag in order to know when to write the address of the next
1212 VLIW instruction into the LR register. Used by JMPL. JMPIL, and CALL. */
1214 frvbf_set_write_next_vliw_addr_to_LR (SIM_CPU
*current_cpu
, int value
)
1216 frvbf_write_next_vliw_addr_to_LR
= value
;
1220 frvbf_set_ne_index (SIM_CPU
*current_cpu
, int index
)
1224 /* Save the target register so interrupt processing can set its NE flag
1225 in the event of an exception. */
1226 frv_interrupt_state
.ne_index
= index
;
1228 /* Clear the NE flag of the target register. It will be reset if necessary
1229 in the event of an exception. */
1230 GET_NE_FLAGS (NE_flags
, H_SPR_FNER0
);
1231 CLEAR_NE_FLAG (NE_flags
, index
);
1232 SET_NE_FLAGS (H_SPR_FNER0
, NE_flags
);
1236 frvbf_force_update (SIM_CPU
*current_cpu
)
1238 CGEN_WRITE_QUEUE
*q
= CPU_WRITE_QUEUE (current_cpu
);
1239 int ix
= CGEN_WRITE_QUEUE_INDEX (q
);
1242 CGEN_WRITE_QUEUE_ELEMENT
*item
= CGEN_WRITE_QUEUE_ELEMENT (q
, ix
- 1);
1243 item
->flags
|= FRV_WRITE_QUEUE_FORCE_WRITE
;
1247 /* Condition code logic. */
1249 andcr
, orcr
, xorcr
, nandcr
, norcr
, andncr
, orncr
, nandncr
, norncr
,
1253 enum cr_result
{cr_undefined
, cr_undefined1
, cr_false
, cr_true
};
1255 static enum cr_result
1256 cr_logic
[num_cr_ops
][4][4] = {
1259 /* undefined undefined false true */
1260 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1261 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1262 /* false */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1263 /* true */ {cr_undefined
, cr_undefined
, cr_false
, cr_true
}
1267 /* undefined undefined false true */
1268 /* undefined */ {cr_undefined
, cr_undefined
, cr_false
, cr_true
},
1269 /* undefined */ {cr_undefined
, cr_undefined
, cr_false
, cr_true
},
1270 /* false */ {cr_false
, cr_false
, cr_false
, cr_true
},
1271 /* true */ {cr_true
, cr_true
, cr_true
, cr_true
}
1275 /* undefined undefined false true */
1276 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1277 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1278 /* false */ {cr_undefined
, cr_undefined
, cr_false
, cr_true
},
1279 /* true */ {cr_true
, cr_true
, cr_true
, cr_false
}
1283 /* undefined undefined false true */
1284 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1285 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1286 /* false */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1287 /* true */ {cr_undefined
, cr_undefined
, cr_true
, cr_false
}
1291 /* undefined undefined false true */
1292 /* undefined */ {cr_undefined
, cr_undefined
, cr_true
, cr_false
},
1293 /* undefined */ {cr_undefined
, cr_undefined
, cr_true
, cr_false
},
1294 /* false */ {cr_true
, cr_true
, cr_true
, cr_false
},
1295 /* true */ {cr_false
, cr_false
, cr_false
, cr_false
}
1299 /* undefined undefined false true */
1300 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1301 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1302 /* false */ {cr_undefined
, cr_undefined
, cr_false
, cr_true
},
1303 /* true */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
}
1307 /* undefined undefined false true */
1308 /* undefined */ {cr_undefined
, cr_undefined
, cr_false
, cr_true
},
1309 /* undefined */ {cr_undefined
, cr_undefined
, cr_false
, cr_true
},
1310 /* false */ {cr_true
, cr_true
, cr_true
, cr_true
},
1311 /* true */ {cr_false
, cr_false
, cr_false
, cr_true
}
1315 /* undefined undefined false true */
1316 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1317 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1318 /* false */ {cr_undefined
, cr_undefined
, cr_true
, cr_false
},
1319 /* true */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
}
1323 /* undefined undefined false true */
1324 /* undefined */ {cr_undefined
, cr_undefined
, cr_true
, cr_false
},
1325 /* undefined */ {cr_undefined
, cr_undefined
, cr_true
, cr_false
},
1326 /* false */ {cr_false
, cr_false
, cr_false
, cr_false
},
1327 /* true */ {cr_true
, cr_true
, cr_true
, cr_false
}
1332 frvbf_cr_logic (SIM_CPU
*current_cpu
, SI operation
, UQI arg1
, UQI arg2
)
1334 return cr_logic
[operation
][arg1
][arg2
];
1337 /* Cache Manipulation. */
1339 frvbf_insn_cache_preload (SIM_CPU
*current_cpu
, SI address
, USI length
, int lock
)
1341 /* If we need to count cycles, then the cache operation will be
1342 initiated from the model profiling functions.
1343 See frvbf_model_.... */
1344 int hsr0
= GET_HSR0 ();
1345 if (GET_HSR0_ICE (hsr0
))
1349 CPU_LOAD_ADDRESS (current_cpu
) = address
;
1350 CPU_LOAD_LENGTH (current_cpu
) = length
;
1351 CPU_LOAD_LOCK (current_cpu
) = lock
;
1355 FRV_CACHE
*cache
= CPU_INSN_CACHE (current_cpu
);
1356 frv_cache_preload (cache
, address
, length
, lock
);
1362 frvbf_data_cache_preload (SIM_CPU
*current_cpu
, SI address
, USI length
, int lock
)
1364 /* If we need to count cycles, then the cache operation will be
1365 initiated from the model profiling functions.
1366 See frvbf_model_.... */
1367 int hsr0
= GET_HSR0 ();
1368 if (GET_HSR0_DCE (hsr0
))
1372 CPU_LOAD_ADDRESS (current_cpu
) = address
;
1373 CPU_LOAD_LENGTH (current_cpu
) = length
;
1374 CPU_LOAD_LOCK (current_cpu
) = lock
;
1378 FRV_CACHE
*cache
= CPU_DATA_CACHE (current_cpu
);
1379 frv_cache_preload (cache
, address
, length
, lock
);
1385 frvbf_insn_cache_unlock (SIM_CPU
*current_cpu
, SI address
)
1387 /* If we need to count cycles, then the cache operation will be
1388 initiated from the model profiling functions.
1389 See frvbf_model_.... */
1390 int hsr0
= GET_HSR0 ();
1391 if (GET_HSR0_ICE (hsr0
))
1394 CPU_LOAD_ADDRESS (current_cpu
) = address
;
1397 FRV_CACHE
*cache
= CPU_INSN_CACHE (current_cpu
);
1398 frv_cache_unlock (cache
, address
);
1404 frvbf_data_cache_unlock (SIM_CPU
*current_cpu
, SI address
)
1406 /* If we need to count cycles, then the cache operation will be
1407 initiated from the model profiling functions.
1408 See frvbf_model_.... */
1409 int hsr0
= GET_HSR0 ();
1410 if (GET_HSR0_DCE (hsr0
))
1413 CPU_LOAD_ADDRESS (current_cpu
) = address
;
1416 FRV_CACHE
*cache
= CPU_DATA_CACHE (current_cpu
);
1417 frv_cache_unlock (cache
, address
);
1423 frvbf_insn_cache_invalidate (SIM_CPU
*current_cpu
, SI address
, int all
)
1425 /* Make sure the insn was specified properly. -1 will be passed for ALL
1426 for a icei with A=0. */
1429 frv_queue_program_interrupt (current_cpu
, FRV_ILLEGAL_INSTRUCTION
);
1433 /* If we need to count cycles, then the cache operation will be
1434 initiated from the model profiling functions.
1435 See frvbf_model_.... */
1438 /* Record the all-entries flag for use in profiling. */
1439 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (current_cpu
);
1440 ps
->all_cache_entries
= all
;
1441 CPU_LOAD_ADDRESS (current_cpu
) = address
;
1445 FRV_CACHE
*cache
= CPU_INSN_CACHE (current_cpu
);
1447 frv_cache_invalidate_all (cache
, 0/* flush? */);
1449 frv_cache_invalidate (cache
, address
, 0/* flush? */);
1454 frvbf_data_cache_invalidate (SIM_CPU
*current_cpu
, SI address
, int all
)
1456 /* Make sure the insn was specified properly. -1 will be passed for ALL
1457 for a dcei with A=0. */
1460 frv_queue_program_interrupt (current_cpu
, FRV_ILLEGAL_INSTRUCTION
);
1464 /* If we need to count cycles, then the cache operation will be
1465 initiated from the model profiling functions.
1466 See frvbf_model_.... */
1469 /* Record the all-entries flag for use in profiling. */
1470 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (current_cpu
);
1471 ps
->all_cache_entries
= all
;
1472 CPU_LOAD_ADDRESS (current_cpu
) = address
;
1476 FRV_CACHE
*cache
= CPU_DATA_CACHE (current_cpu
);
1478 frv_cache_invalidate_all (cache
, 0/* flush? */);
1480 frv_cache_invalidate (cache
, address
, 0/* flush? */);
1485 frvbf_data_cache_flush (SIM_CPU
*current_cpu
, SI address
, int all
)
1487 /* Make sure the insn was specified properly. -1 will be passed for ALL
1488 for a dcef with A=0. */
1491 frv_queue_program_interrupt (current_cpu
, FRV_ILLEGAL_INSTRUCTION
);
1495 /* If we need to count cycles, then the cache operation will be
1496 initiated from the model profiling functions.
1497 See frvbf_model_.... */
1500 /* Record the all-entries flag for use in profiling. */
1501 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (current_cpu
);
1502 ps
->all_cache_entries
= all
;
1503 CPU_LOAD_ADDRESS (current_cpu
) = address
;
1507 FRV_CACHE
*cache
= CPU_DATA_CACHE (current_cpu
);
1509 frv_cache_invalidate_all (cache
, 1/* flush? */);
1511 frv_cache_invalidate (cache
, address
, 1/* flush? */);