* config/tc-xtensa.c (xg_emit_insn): Include "dwarf2dbg.h" and add
[deliverable/binutils-gdb.git] / sim / frv / memory.c
CommitLineData
b34f6357 1/* frv memory model.
e930b1f5
DB
2 Copyright (C) 1999, 2000, 2001, 2003 Free Software Foundation, Inc.
3 Contributed by Red Hat
b34f6357
DB
4
5This file is part of the GNU simulators.
6
7This program is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
9the Free Software Foundation; either version 2, or (at your option)
10any later version.
11
12This program is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License along
18with this program; if not, write to the Free Software Foundation, Inc.,
1959 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
20
21#define WANT_CPU frvbf
22#define WANT_CPU_FRVBF
23
24#include "sim-main.h"
25#include "cgen-mem.h"
26#include "bfd.h"
27
28/* Check for alignment and access restrictions. Return the corrected address.
29 */
30static SI
31fr400_check_data_read_address (SIM_CPU *current_cpu, SI address, int align_mask)
32{
33 /* Check access restrictions for double word loads only. */
34 if (align_mask == 7)
35 {
36 if ((USI)address >= 0xfe800000 && (USI)address <= 0xfeffffff)
37 frv_queue_data_access_error_interrupt (current_cpu, address);
38 }
39 return address;
40}
41
42static SI
43fr500_check_data_read_address (SIM_CPU *current_cpu, SI address, int align_mask)
44{
45 if (address & align_mask)
46 {
47 frv_queue_mem_address_not_aligned_interrupt (current_cpu, address);
48 address &= ~align_mask;
49 }
50
51 if ((USI)address >= 0xfeff0600 && (USI)address <= 0xfeff7fff
52 || (USI)address >= 0xfe800000 && (USI)address <= 0xfefeffff)
53 frv_queue_data_access_error_interrupt (current_cpu, address);
54
55 return address;
56}
57
e930b1f5
DB
58static SI
59fr550_check_data_read_address (SIM_CPU *current_cpu, SI address, int align_mask)
60{
61 if ((USI)address >= 0xfe800000 && (USI)address <= 0xfefeffff
62 || (align_mask > 0x3
63 && ((USI)address >= 0xfeff0000 && (USI)address <= 0xfeffffff)))
64 frv_queue_data_access_error_interrupt (current_cpu, address);
65
66 return address;
67}
68
b34f6357
DB
69static SI
70check_data_read_address (SIM_CPU *current_cpu, SI address, int align_mask)
71{
72 SIM_DESC sd = CPU_STATE (current_cpu);
73 switch (STATE_ARCHITECTURE (sd)->mach)
74 {
75 case bfd_mach_fr400:
76 address = fr400_check_data_read_address (current_cpu, address,
77 align_mask);
78 break;
79 case bfd_mach_frvtomcat:
80 case bfd_mach_fr500:
81 case bfd_mach_frv:
82 address = fr500_check_data_read_address (current_cpu, address,
83 align_mask);
84 break;
e930b1f5
DB
85 case bfd_mach_fr550:
86 address = fr550_check_data_read_address (current_cpu, address,
87 align_mask);
88 break;
b34f6357
DB
89 default:
90 break;
91 }
92
93 return address;
94}
95
96static SI
97fr400_check_readwrite_address (SIM_CPU *current_cpu, SI address, int align_mask)
98{
99 if (address & align_mask)
100 {
101 /* Make sure that this exception is not masked. */
102 USI isr = GET_ISR ();
103 if (! GET_ISR_EMAM (isr))
104 {
105 /* Bad alignment causes a data_access_error on fr400. */
106 frv_queue_data_access_error_interrupt (current_cpu, address);
107 }
108 address &= ~align_mask;
109 }
110 /* Nothing to check. */
111 return address;
112}
113
114static SI
115fr500_check_readwrite_address (SIM_CPU *current_cpu, SI address, int align_mask)
116{
117 if ((USI)address >= 0xfe000000 && (USI)address <= 0xfe003fff
118 || (USI)address >= 0xfe004000 && (USI)address <= 0xfe3fffff
119 || (USI)address >= 0xfe400000 && (USI)address <= 0xfe403fff
120 || (USI)address >= 0xfe404000 && (USI)address <= 0xfe7fffff)
121 frv_queue_data_access_exception_interrupt (current_cpu);
122
123 return address;
124}
125
e930b1f5
DB
126static SI
127fr550_check_readwrite_address (SIM_CPU *current_cpu, SI address, int align_mask)
128{
129 /* No alignment restrictions on fr550 */
130
131 if ((USI)address >= 0xfe000000 && (USI)address <= 0xfe3fffff
132 || (USI)address >= 0xfe408000 && (USI)address <= 0xfe7fffff)
133 frv_queue_data_access_exception_interrupt (current_cpu);
134 else
135 {
136 USI hsr0 = GET_HSR0 ();
137 if (! GET_HSR0_RME (hsr0)
138 && (USI)address >= 0xfe400000 && (USI)address <= 0xfe407fff)
139 frv_queue_data_access_exception_interrupt (current_cpu);
140 }
141
142 return address;
143}
144
b34f6357
DB
145static SI
146check_readwrite_address (SIM_CPU *current_cpu, SI address, int align_mask)
147{
148 SIM_DESC sd = CPU_STATE (current_cpu);
149 switch (STATE_ARCHITECTURE (sd)->mach)
150 {
151 case bfd_mach_fr400:
152 address = fr400_check_readwrite_address (current_cpu, address,
153 align_mask);
154 break;
155 case bfd_mach_frvtomcat:
156 case bfd_mach_fr500:
157 case bfd_mach_frv:
158 address = fr500_check_readwrite_address (current_cpu, address,
159 align_mask);
160 break;
e930b1f5
DB
161 case bfd_mach_fr550:
162 address = fr550_check_readwrite_address (current_cpu, address,
163 align_mask);
164 break;
b34f6357
DB
165 default:
166 break;
167 }
168
169 return address;
170}
171
172static PCADDR
173fr400_check_insn_read_address (SIM_CPU *current_cpu, PCADDR address,
174 int align_mask)
175{
176 if (address & align_mask)
177 {
178 frv_queue_instruction_access_error_interrupt (current_cpu);
179 address &= ~align_mask;
180 }
181 else if ((USI)address >= 0xfe800000 && (USI)address <= 0xfeffffff)
182 frv_queue_instruction_access_error_interrupt (current_cpu);
183
184 return address;
185}
186
187static PCADDR
188fr500_check_insn_read_address (SIM_CPU *current_cpu, PCADDR address,
189 int align_mask)
190{
191 if (address & align_mask)
192 {
193 frv_queue_mem_address_not_aligned_interrupt (current_cpu, address);
194 address &= ~align_mask;
195 }
196
197 if ((USI)address >= 0xfeff0600 && (USI)address <= 0xfeff7fff
198 || (USI)address >= 0xfe800000 && (USI)address <= 0xfefeffff)
199 frv_queue_instruction_access_error_interrupt (current_cpu);
200 else if ((USI)address >= 0xfe004000 && (USI)address <= 0xfe3fffff
201 || (USI)address >= 0xfe400000 && (USI)address <= 0xfe403fff
202 || (USI)address >= 0xfe404000 && (USI)address <= 0xfe7fffff)
203 frv_queue_instruction_access_exception_interrupt (current_cpu);
204 else
205 {
206 USI hsr0 = GET_HSR0 ();
207 if (! GET_HSR0_RME (hsr0)
208 && (USI)address >= 0xfe000000 && (USI)address <= 0xfe003fff)
209 frv_queue_instruction_access_exception_interrupt (current_cpu);
210 }
211
212 return address;
213}
214
e930b1f5
DB
215static PCADDR
216fr550_check_insn_read_address (SIM_CPU *current_cpu, PCADDR address,
217 int align_mask)
218{
219 address &= ~align_mask;
220
221 if ((USI)address >= 0xfe800000 && (USI)address <= 0xfeffffff)
222 frv_queue_instruction_access_error_interrupt (current_cpu);
223 else if ((USI)address >= 0xfe008000 && (USI)address <= 0xfe7fffff)
224 frv_queue_instruction_access_exception_interrupt (current_cpu);
225 else
226 {
227 USI hsr0 = GET_HSR0 ();
228 if (! GET_HSR0_RME (hsr0)
229 && (USI)address >= 0xfe000000 && (USI)address <= 0xfe007fff)
230 frv_queue_instruction_access_exception_interrupt (current_cpu);
231 }
232
233 return address;
234}
235
b34f6357
DB
236static PCADDR
237check_insn_read_address (SIM_CPU *current_cpu, PCADDR address, int align_mask)
238{
239 SIM_DESC sd = CPU_STATE (current_cpu);
240 switch (STATE_ARCHITECTURE (sd)->mach)
241 {
242 case bfd_mach_fr400:
243 address = fr400_check_insn_read_address (current_cpu, address,
244 align_mask);
245 break;
246 case bfd_mach_frvtomcat:
247 case bfd_mach_fr500:
248 case bfd_mach_frv:
249 address = fr500_check_insn_read_address (current_cpu, address,
250 align_mask);
251 break;
e930b1f5
DB
252 case bfd_mach_fr550:
253 address = fr550_check_insn_read_address (current_cpu, address,
254 align_mask);
255 break;
b34f6357
DB
256 default:
257 break;
258 }
259
260 return address;
261}
262
263/* Memory reads. */
264QI
265frvbf_read_mem_QI (SIM_CPU *current_cpu, IADDR pc, SI address)
266{
267 USI hsr0 = GET_HSR0 ();
268 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
269
270 /* Check for access exceptions. */
271 address = check_data_read_address (current_cpu, address, 0);
272 address = check_readwrite_address (current_cpu, address, 0);
273
274 /* If we need to count cycles, then the cache operation will be
275 initiated from the model profiling functions.
276 See frvbf_model_.... */
277 if (model_insn)
278 {
279 CPU_LOAD_ADDRESS (current_cpu) = address;
280 CPU_LOAD_LENGTH (current_cpu) = 1;
281 CPU_LOAD_SIGNED (current_cpu) = 1;
282 return 0xb7; /* any random value */
283 }
284
285 if (GET_HSR0_DCE (hsr0))
286 {
287 int cycles;
288 cycles = frv_cache_read (cache, 0, address);
289 if (cycles != 0)
290 return CACHE_RETURN_DATA (cache, 0, address, QI, 1);
291 }
292
293 return GETMEMQI (current_cpu, pc, address);
294}
295
296UQI
297frvbf_read_mem_UQI (SIM_CPU *current_cpu, IADDR pc, SI address)
298{
299 USI hsr0 = GET_HSR0 ();
300 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
301
302 /* Check for access exceptions. */
303 address = check_data_read_address (current_cpu, address, 0);
304 address = check_readwrite_address (current_cpu, address, 0);
305
306 /* If we need to count cycles, then the cache operation will be
307 initiated from the model profiling functions.
308 See frvbf_model_.... */
309 if (model_insn)
310 {
311 CPU_LOAD_ADDRESS (current_cpu) = address;
312 CPU_LOAD_LENGTH (current_cpu) = 1;
313 CPU_LOAD_SIGNED (current_cpu) = 0;
314 return 0xb7; /* any random value */
315 }
316
317 if (GET_HSR0_DCE (hsr0))
318 {
319 int cycles;
320 cycles = frv_cache_read (cache, 0, address);
321 if (cycles != 0)
322 return CACHE_RETURN_DATA (cache, 0, address, UQI, 1);
323 }
324
325 return GETMEMUQI (current_cpu, pc, address);
326}
327
e930b1f5
DB
328/* Read a HI which spans two cache lines */
329static HI
330read_mem_unaligned_HI (SIM_CPU *current_cpu, IADDR pc, SI address)
331{
332 HI value = frvbf_read_mem_QI (current_cpu, pc, address);
333 value <<= 8;
334 value |= frvbf_read_mem_UQI (current_cpu, pc, address + 1);
335 return T2H_2 (value);
336}
337
b34f6357
DB
338HI
339frvbf_read_mem_HI (SIM_CPU *current_cpu, IADDR pc, SI address)
340{
341 USI hsr0;
342 FRV_CACHE *cache;
343
344 /* Check for access exceptions. */
345 address = check_data_read_address (current_cpu, address, 1);
346 address = check_readwrite_address (current_cpu, address, 1);
347
348 /* If we need to count cycles, then the cache operation will be
349 initiated from the model profiling functions.
350 See frvbf_model_.... */
351 hsr0 = GET_HSR0 ();
352 cache = CPU_DATA_CACHE (current_cpu);
353 if (model_insn)
354 {
355 CPU_LOAD_ADDRESS (current_cpu) = address;
356 CPU_LOAD_LENGTH (current_cpu) = 2;
357 CPU_LOAD_SIGNED (current_cpu) = 1;
358 return 0xb711; /* any random value */
359 }
360
361 if (GET_HSR0_DCE (hsr0))
362 {
363 int cycles;
e930b1f5
DB
364 /* Handle access which crosses cache line boundary */
365 SIM_DESC sd = CPU_STATE (current_cpu);
366 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
367 {
368 if (DATA_CROSSES_CACHE_LINE (cache, address, 2))
369 return read_mem_unaligned_HI (current_cpu, pc, address);
370 }
b34f6357
DB
371 cycles = frv_cache_read (cache, 0, address);
372 if (cycles != 0)
373 return CACHE_RETURN_DATA (cache, 0, address, HI, 2);
374 }
375
376 return GETMEMHI (current_cpu, pc, address);
377}
378
379UHI
380frvbf_read_mem_UHI (SIM_CPU *current_cpu, IADDR pc, SI address)
381{
382 USI hsr0;
383 FRV_CACHE *cache;
384
385 /* Check for access exceptions. */
386 address = check_data_read_address (current_cpu, address, 1);
387 address = check_readwrite_address (current_cpu, address, 1);
388
389 /* If we need to count cycles, then the cache operation will be
390 initiated from the model profiling functions.
391 See frvbf_model_.... */
392 hsr0 = GET_HSR0 ();
393 cache = CPU_DATA_CACHE (current_cpu);
394 if (model_insn)
395 {
396 CPU_LOAD_ADDRESS (current_cpu) = address;
397 CPU_LOAD_LENGTH (current_cpu) = 2;
398 CPU_LOAD_SIGNED (current_cpu) = 0;
399 return 0xb711; /* any random value */
400 }
401
402 if (GET_HSR0_DCE (hsr0))
403 {
404 int cycles;
e930b1f5
DB
405 /* Handle access which crosses cache line boundary */
406 SIM_DESC sd = CPU_STATE (current_cpu);
407 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
408 {
409 if (DATA_CROSSES_CACHE_LINE (cache, address, 2))
410 return read_mem_unaligned_HI (current_cpu, pc, address);
411 }
b34f6357
DB
412 cycles = frv_cache_read (cache, 0, address);
413 if (cycles != 0)
414 return CACHE_RETURN_DATA (cache, 0, address, UHI, 2);
415 }
416
417 return GETMEMUHI (current_cpu, pc, address);
418}
419
e930b1f5
DB
420/* Read a SI which spans two cache lines */
421static SI
422read_mem_unaligned_SI (SIM_CPU *current_cpu, IADDR pc, SI address)
423{
424 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
425 unsigned hi_len = cache->line_size - (address & (cache->line_size - 1));
426 char valarray[4];
427 SI SIvalue;
428 HI HIvalue;
429
430 switch (hi_len)
431 {
432 case 1:
433 valarray[0] = frvbf_read_mem_QI (current_cpu, pc, address);
434 SIvalue = frvbf_read_mem_SI (current_cpu, pc, address + 1);
435 SIvalue = H2T_4 (SIvalue);
436 memcpy (valarray + 1, (char*)&SIvalue, 3);
437 break;
438 case 2:
439 HIvalue = frvbf_read_mem_HI (current_cpu, pc, address);
440 HIvalue = H2T_2 (HIvalue);
441 memcpy (valarray, (char*)&HIvalue, 2);
442 HIvalue = frvbf_read_mem_HI (current_cpu, pc, address + 2);
443 HIvalue = H2T_2 (HIvalue);
444 memcpy (valarray + 2, (char*)&HIvalue, 2);
445 break;
446 case 3:
447 SIvalue = frvbf_read_mem_SI (current_cpu, pc, address - 1);
448 SIvalue = H2T_4 (SIvalue);
449 memcpy (valarray, (char*)&SIvalue, 3);
450 valarray[3] = frvbf_read_mem_QI (current_cpu, pc, address + 3);
451 break;
452 default:
453 abort (); /* can't happen */
454 }
455 return T2H_4 (*(SI*)valarray);
456}
457
b34f6357
DB
458SI
459frvbf_read_mem_SI (SIM_CPU *current_cpu, IADDR pc, SI address)
460{
461 FRV_CACHE *cache;
462 USI hsr0;
463
464 /* Check for access exceptions. */
465 address = check_data_read_address (current_cpu, address, 3);
466 address = check_readwrite_address (current_cpu, address, 3);
467
468 hsr0 = GET_HSR0 ();
469 cache = CPU_DATA_CACHE (current_cpu);
470 /* If we need to count cycles, then the cache operation will be
471 initiated from the model profiling functions.
472 See frvbf_model_.... */
473 if (model_insn)
474 {
475 CPU_LOAD_ADDRESS (current_cpu) = address;
476 CPU_LOAD_LENGTH (current_cpu) = 4;
477 return 0x37111319; /* any random value */
478 }
479
480 if (GET_HSR0_DCE (hsr0))
481 {
482 int cycles;
e930b1f5
DB
483 /* Handle access which crosses cache line boundary */
484 SIM_DESC sd = CPU_STATE (current_cpu);
485 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
486 {
487 if (DATA_CROSSES_CACHE_LINE (cache, address, 4))
488 return read_mem_unaligned_SI (current_cpu, pc, address);
489 }
b34f6357
DB
490 cycles = frv_cache_read (cache, 0, address);
491 if (cycles != 0)
492 return CACHE_RETURN_DATA (cache, 0, address, SI, 4);
493 }
494
495 return GETMEMSI (current_cpu, pc, address);
496}
497
498SI
499frvbf_read_mem_WI (SIM_CPU *current_cpu, IADDR pc, SI address)
500{
501 return frvbf_read_mem_SI (current_cpu, pc, address);
502}
503
e930b1f5
DB
504/* Read a SI which spans two cache lines */
505static DI
506read_mem_unaligned_DI (SIM_CPU *current_cpu, IADDR pc, SI address)
507{
508 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
509 unsigned hi_len = cache->line_size - (address & (cache->line_size - 1));
510 DI value, value1;
511
512 switch (hi_len)
513 {
514 case 1:
515 value = frvbf_read_mem_QI (current_cpu, pc, address);
516 value <<= 56;
517 value1 = frvbf_read_mem_DI (current_cpu, pc, address + 1);
518 value1 = H2T_8 (value1);
519 value |= value1 & ((DI)0x00ffffff << 32);
520 value |= value1 & 0xffffffffu;
521 break;
522 case 2:
523 value = frvbf_read_mem_HI (current_cpu, pc, address);
524 value = H2T_2 (value);
525 value <<= 48;
526 value1 = frvbf_read_mem_DI (current_cpu, pc, address + 2);
527 value1 = H2T_8 (value1);
528 value |= value1 & ((DI)0x0000ffff << 32);
529 value |= value1 & 0xffffffffu;
530 break;
531 case 3:
532 value = frvbf_read_mem_SI (current_cpu, pc, address - 1);
533 value = H2T_4 (value);
534 value <<= 40;
535 value1 = frvbf_read_mem_DI (current_cpu, pc, address + 3);
536 value1 = H2T_8 (value1);
537 value |= value1 & ((DI)0x000000ff << 32);
538 value |= value1 & 0xffffffffu;
539 break;
540 case 4:
541 value = frvbf_read_mem_SI (current_cpu, pc, address);
542 value = H2T_4 (value);
543 value <<= 32;
544 value1 = frvbf_read_mem_SI (current_cpu, pc, address + 4);
545 value1 = H2T_4 (value1);
546 value |= value1 & 0xffffffffu;
547 break;
548 case 5:
549 value = frvbf_read_mem_DI (current_cpu, pc, address - 3);
550 value = H2T_8 (value);
551 value <<= 24;
552 value1 = frvbf_read_mem_SI (current_cpu, pc, address + 5);
553 value1 = H2T_4 (value1);
554 value |= value1 & 0x00ffffff;
555 break;
556 case 6:
557 value = frvbf_read_mem_DI (current_cpu, pc, address - 2);
558 value = H2T_8 (value);
559 value <<= 16;
560 value1 = frvbf_read_mem_HI (current_cpu, pc, address + 6);
561 value1 = H2T_2 (value1);
562 value |= value1 & 0x0000ffff;
563 break;
564 case 7:
565 value = frvbf_read_mem_DI (current_cpu, pc, address - 1);
566 value = H2T_8 (value);
567 value <<= 8;
568 value1 = frvbf_read_mem_QI (current_cpu, pc, address + 7);
569 value |= value1 & 0x000000ff;
570 break;
571 default:
572 abort (); /* can't happen */
573 }
574 return T2H_8 (value);
575}
576
b34f6357
DB
577DI
578frvbf_read_mem_DI (SIM_CPU *current_cpu, IADDR pc, SI address)
579{
580 USI hsr0;
581 FRV_CACHE *cache;
582
583 /* Check for access exceptions. */
584 address = check_data_read_address (current_cpu, address, 7);
585 address = check_readwrite_address (current_cpu, address, 7);
586
587 /* If we need to count cycles, then the cache operation will be
588 initiated from the model profiling functions.
589 See frvbf_model_.... */
590 hsr0 = GET_HSR0 ();
591 cache = CPU_DATA_CACHE (current_cpu);
592 if (model_insn)
593 {
594 CPU_LOAD_ADDRESS (current_cpu) = address;
595 CPU_LOAD_LENGTH (current_cpu) = 8;
596 return 0x37111319; /* any random value */
597 }
598
599 if (GET_HSR0_DCE (hsr0))
600 {
601 int cycles;
e930b1f5
DB
602 /* Handle access which crosses cache line boundary */
603 SIM_DESC sd = CPU_STATE (current_cpu);
604 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
605 {
606 if (DATA_CROSSES_CACHE_LINE (cache, address, 8))
607 return read_mem_unaligned_DI (current_cpu, pc, address);
608 }
b34f6357
DB
609 cycles = frv_cache_read (cache, 0, address);
610 if (cycles != 0)
611 return CACHE_RETURN_DATA (cache, 0, address, DI, 8);
612 }
613
614 return GETMEMDI (current_cpu, pc, address);
615}
616
617DF
618frvbf_read_mem_DF (SIM_CPU *current_cpu, IADDR pc, SI address)
619{
620 USI hsr0;
621 FRV_CACHE *cache;
622
623 /* Check for access exceptions. */
624 address = check_data_read_address (current_cpu, address, 7);
625 address = check_readwrite_address (current_cpu, address, 7);
626
627 /* If we need to count cycles, then the cache operation will be
628 initiated from the model profiling functions.
629 See frvbf_model_.... */
630 hsr0 = GET_HSR0 ();
631 cache = CPU_DATA_CACHE (current_cpu);
632 if (model_insn)
633 {
634 CPU_LOAD_ADDRESS (current_cpu) = address;
635 CPU_LOAD_LENGTH (current_cpu) = 8;
636 return 0x37111319; /* any random value */
637 }
638
639 if (GET_HSR0_DCE (hsr0))
640 {
641 int cycles;
e930b1f5
DB
642 /* Handle access which crosses cache line boundary */
643 SIM_DESC sd = CPU_STATE (current_cpu);
644 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
645 {
646 if (DATA_CROSSES_CACHE_LINE (cache, address, 8))
647 return read_mem_unaligned_DI (current_cpu, pc, address);
648 }
b34f6357
DB
649 cycles = frv_cache_read (cache, 0, address);
650 if (cycles != 0)
651 return CACHE_RETURN_DATA (cache, 0, address, DF, 8);
652 }
653
654 return GETMEMDF (current_cpu, pc, address);
655}
656
657USI
658frvbf_read_imem_USI (SIM_CPU *current_cpu, PCADDR vpc)
659{
660 USI hsr0;
661 vpc = check_insn_read_address (current_cpu, vpc, 3);
662
663 hsr0 = GET_HSR0 ();
664 if (GET_HSR0_ICE (hsr0))
665 {
666 FRV_CACHE *cache;
667 USI value;
668
669 /* We don't want this to show up in the cache statistics. That read
670 is done in frvbf_simulate_insn_prefetch. So read the cache or memory
671 passively here. */
672 cache = CPU_INSN_CACHE (current_cpu);
673 if (frv_cache_read_passive_SI (cache, vpc, &value))
674 return value;
675 }
676 return sim_core_read_unaligned_4 (current_cpu, vpc, read_map, vpc);
677}
678
679static SI
680fr400_check_write_address (SIM_CPU *current_cpu, SI address, int align_mask)
681{
682 if (address & align_mask)
683 {
684 /* On the fr400, this causes a data_access_error. */
685 /* Make sure that this exception is not masked. */
686 USI isr = GET_ISR ();
687 if (! GET_ISR_EMAM (isr))
688 {
689 /* Bad alignment causes a data_access_error on fr400. */
690 frv_queue_data_access_error_interrupt (current_cpu, address);
691 }
692 address &= ~align_mask;
693 }
694 if (align_mask == 7
695 && address >= 0xfe800000 && address <= 0xfeffffff)
696 frv_queue_program_interrupt (current_cpu, FRV_DATA_STORE_ERROR);
697
698 return address;
699}
700
701static SI
702fr500_check_write_address (SIM_CPU *current_cpu, SI address, int align_mask)
703{
704 if (address & align_mask)
705 {
706 struct frv_interrupt_queue_element *item =
707 frv_queue_mem_address_not_aligned_interrupt (current_cpu, address);
708 /* Record the correct vliw slot with the interrupt. */
709 if (item != NULL)
710 item->slot = frv_interrupt_state.slot;
711 address &= ~align_mask;
712 }
713 if (address >= 0xfeff0600 && address <= 0xfeff7fff
714 || address >= 0xfe800000 && address <= 0xfefeffff)
715 frv_queue_program_interrupt (current_cpu, FRV_DATA_STORE_ERROR);
716
717 return address;
718}
719
e930b1f5
DB
720static SI
721fr550_check_write_address (SIM_CPU *current_cpu, SI address, int align_mask)
722{
723 if ((USI)address >= 0xfe800000 && (USI)address <= 0xfefeffff
724 || (align_mask > 0x3
725 && ((USI)address >= 0xfeff0000 && (USI)address <= 0xfeffffff)))
726 frv_queue_program_interrupt (current_cpu, FRV_DATA_STORE_ERROR);
727
728 return address;
729}
730
b34f6357
DB
731static SI
732check_write_address (SIM_CPU *current_cpu, SI address, int align_mask)
733{
734 SIM_DESC sd = CPU_STATE (current_cpu);
735 switch (STATE_ARCHITECTURE (sd)->mach)
736 {
737 case bfd_mach_fr400:
738 address = fr400_check_write_address (current_cpu, address, align_mask);
739 break;
740 case bfd_mach_frvtomcat:
741 case bfd_mach_fr500:
742 case bfd_mach_frv:
743 address = fr500_check_write_address (current_cpu, address, align_mask);
744 break;
e930b1f5
DB
745 case bfd_mach_fr550:
746 address = fr550_check_write_address (current_cpu, address, align_mask);
747 break;
b34f6357
DB
748 default:
749 break;
750 }
751 return address;
752}
753
754void
755frvbf_write_mem_QI (SIM_CPU *current_cpu, IADDR pc, SI address, QI value)
756{
757 USI hsr0;
758 hsr0 = GET_HSR0 ();
759 if (GET_HSR0_DCE (hsr0))
760 sim_queue_fn_mem_qi_write (current_cpu, frvbf_mem_set_QI, address, value);
761 else
762 sim_queue_mem_qi_write (current_cpu, address, value);
763 frv_set_write_queue_slot (current_cpu);
764}
765
766void
767frvbf_write_mem_UQI (SIM_CPU *current_cpu, IADDR pc, SI address, UQI value)
768{
769 frvbf_write_mem_QI (current_cpu, pc, address, value);
770}
771
772void
773frvbf_write_mem_HI (SIM_CPU *current_cpu, IADDR pc, SI address, HI value)
774{
775 USI hsr0;
776 hsr0 = GET_HSR0 ();
777 if (GET_HSR0_DCE (hsr0))
778 sim_queue_fn_mem_hi_write (current_cpu, frvbf_mem_set_HI, address, value);
779 else
780 sim_queue_mem_hi_write (current_cpu, address, value);
781 frv_set_write_queue_slot (current_cpu);
782}
783
784void
785frvbf_write_mem_UHI (SIM_CPU *current_cpu, IADDR pc, SI address, UHI value)
786{
787 frvbf_write_mem_HI (current_cpu, pc, address, value);
788}
789
790void
791frvbf_write_mem_SI (SIM_CPU *current_cpu, IADDR pc, SI address, SI value)
792{
793 USI hsr0;
794 hsr0 = GET_HSR0 ();
795 if (GET_HSR0_DCE (hsr0))
796 sim_queue_fn_mem_si_write (current_cpu, frvbf_mem_set_SI, address, value);
797 else
798 sim_queue_mem_si_write (current_cpu, address, value);
799 frv_set_write_queue_slot (current_cpu);
800}
801
802void
803frvbf_write_mem_WI (SIM_CPU *current_cpu, IADDR pc, SI address, SI value)
804{
805 frvbf_write_mem_SI (current_cpu, pc, address, value);
806}
807
808void
809frvbf_write_mem_DI (SIM_CPU *current_cpu, IADDR pc, SI address, DI value)
810{
811 USI hsr0;
812 hsr0 = GET_HSR0 ();
813 if (GET_HSR0_DCE (hsr0))
814 sim_queue_fn_mem_di_write (current_cpu, frvbf_mem_set_DI, address, value);
815 else
816 sim_queue_mem_di_write (current_cpu, address, value);
817 frv_set_write_queue_slot (current_cpu);
818}
819
820void
821frvbf_write_mem_DF (SIM_CPU *current_cpu, IADDR pc, SI address, DF value)
822{
823 USI hsr0;
824 hsr0 = GET_HSR0 ();
825 if (GET_HSR0_DCE (hsr0))
826 sim_queue_fn_mem_df_write (current_cpu, frvbf_mem_set_DF, address, value);
827 else
828 sim_queue_mem_df_write (current_cpu, address, value);
829 frv_set_write_queue_slot (current_cpu);
830}
831
832/* Memory writes. These do the actual writing through the cache. */
833void
834frvbf_mem_set_QI (SIM_CPU *current_cpu, IADDR pc, SI address, QI value)
835{
836 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
837
838 /* Check for access errors. */
839 address = check_write_address (current_cpu, address, 0);
840 address = check_readwrite_address (current_cpu, address, 0);
841
842 /* If we need to count cycles, then submit the write request to the cache
843 and let it prioritize the request. Otherwise perform the write now. */
844 if (model_insn)
845 {
846 int slot = UNIT_I0;
847 frv_cache_request_store (cache, address, slot, (char *)&value,
848 sizeof (value));
849 }
850 else
851 frv_cache_write (cache, address, (char *)&value, sizeof (value));
852}
853
e930b1f5
DB
854/* Write a HI which spans two cache lines */
855static void
856mem_set_unaligned_HI (SIM_CPU *current_cpu, IADDR pc, SI address, HI value)
857{
858 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
859 /* value is already in target byte order */
860 frv_cache_write (cache, address, (char *)&value, 1);
861 frv_cache_write (cache, address + 1, ((char *)&value + 1), 1);
862}
863
b34f6357
DB
864void
865frvbf_mem_set_HI (SIM_CPU *current_cpu, IADDR pc, SI address, HI value)
866{
867 FRV_CACHE *cache;
868
869 /* Check for access errors. */
870 address = check_write_address (current_cpu, address, 1);
871 address = check_readwrite_address (current_cpu, address, 1);
872
873 /* If we need to count cycles, then submit the write request to the cache
874 and let it prioritize the request. Otherwise perform the write now. */
875 value = H2T_2 (value);
876 cache = CPU_DATA_CACHE (current_cpu);
877 if (model_insn)
878 {
879 int slot = UNIT_I0;
880 frv_cache_request_store (cache, address, slot,
881 (char *)&value, sizeof (value));
882 }
883 else
e930b1f5
DB
884 {
885 /* Handle access which crosses cache line boundary */
886 SIM_DESC sd = CPU_STATE (current_cpu);
887 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
888 {
889 if (DATA_CROSSES_CACHE_LINE (cache, address, 2))
890 {
891 mem_set_unaligned_HI (current_cpu, pc, address, value);
892 return;
893 }
894 }
895 frv_cache_write (cache, address, (char *)&value, sizeof (value));
896 }
897}
898
899/* Write a SI which spans two cache lines */
900static void
901mem_set_unaligned_SI (SIM_CPU *current_cpu, IADDR pc, SI address, SI value)
902{
903 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
904 unsigned hi_len = cache->line_size - (address & (cache->line_size - 1));
905 /* value is already in target byte order */
906 frv_cache_write (cache, address, (char *)&value, hi_len);
907 frv_cache_write (cache, address + hi_len, (char *)&value + hi_len, 4 - hi_len);
b34f6357
DB
908}
909
910void
911frvbf_mem_set_SI (SIM_CPU *current_cpu, IADDR pc, SI address, SI value)
912{
913 FRV_CACHE *cache;
914
915 /* Check for access errors. */
916 address = check_write_address (current_cpu, address, 3);
917 address = check_readwrite_address (current_cpu, address, 3);
918
919 /* If we need to count cycles, then submit the write request to the cache
920 and let it prioritize the request. Otherwise perform the write now. */
921 cache = CPU_DATA_CACHE (current_cpu);
922 value = H2T_4 (value);
923 if (model_insn)
924 {
925 int slot = UNIT_I0;
926 frv_cache_request_store (cache, address, slot,
927 (char *)&value, sizeof (value));
928 }
929 else
e930b1f5
DB
930 {
931 /* Handle access which crosses cache line boundary */
932 SIM_DESC sd = CPU_STATE (current_cpu);
933 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
934 {
935 if (DATA_CROSSES_CACHE_LINE (cache, address, 4))
936 {
937 mem_set_unaligned_SI (current_cpu, pc, address, value);
938 return;
939 }
940 }
941 frv_cache_write (cache, address, (char *)&value, sizeof (value));
942 }
943}
944
945/* Write a DI which spans two cache lines */
946static void
947mem_set_unaligned_DI (SIM_CPU *current_cpu, IADDR pc, SI address, DI value)
948{
949 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
950 unsigned hi_len = cache->line_size - (address & (cache->line_size - 1));
951 /* value is already in target byte order */
952 frv_cache_write (cache, address, (char *)&value, hi_len);
953 frv_cache_write (cache, address + hi_len, (char *)&value + hi_len, 8 - hi_len);
b34f6357
DB
954}
955
956void
957frvbf_mem_set_DI (SIM_CPU *current_cpu, IADDR pc, SI address, DI value)
958{
959 FRV_CACHE *cache;
960
961 /* Check for access errors. */
962 address = check_write_address (current_cpu, address, 7);
963 address = check_readwrite_address (current_cpu, address, 7);
964
965 /* If we need to count cycles, then submit the write request to the cache
966 and let it prioritize the request. Otherwise perform the write now. */
967 value = H2T_8 (value);
968 cache = CPU_DATA_CACHE (current_cpu);
969 if (model_insn)
970 {
971 int slot = UNIT_I0;
972 frv_cache_request_store (cache, address, slot,
973 (char *)&value, sizeof (value));
974 }
975 else
e930b1f5
DB
976 {
977 /* Handle access which crosses cache line boundary */
978 SIM_DESC sd = CPU_STATE (current_cpu);
979 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
980 {
981 if (DATA_CROSSES_CACHE_LINE (cache, address, 8))
982 {
983 mem_set_unaligned_DI (current_cpu, pc, address, value);
984 return;
985 }
986 }
987 frv_cache_write (cache, address, (char *)&value, sizeof (value));
988 }
b34f6357
DB
989}
990
991void
992frvbf_mem_set_DF (SIM_CPU *current_cpu, IADDR pc, SI address, DF value)
993{
994 FRV_CACHE *cache;
995
996 /* Check for access errors. */
997 address = check_write_address (current_cpu, address, 7);
998 address = check_readwrite_address (current_cpu, address, 7);
999
1000 /* If we need to count cycles, then submit the write request to the cache
1001 and let it prioritize the request. Otherwise perform the write now. */
1002 value = H2T_8 (value);
1003 cache = CPU_DATA_CACHE (current_cpu);
1004 if (model_insn)
1005 {
1006 int slot = UNIT_I0;
1007 frv_cache_request_store (cache, address, slot,
1008 (char *)&value, sizeof (value));
1009 }
1010 else
e930b1f5
DB
1011 {
1012 /* Handle access which crosses cache line boundary */
1013 SIM_DESC sd = CPU_STATE (current_cpu);
1014 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
1015 {
1016 if (DATA_CROSSES_CACHE_LINE (cache, address, 8))
1017 {
1018 mem_set_unaligned_DI (current_cpu, pc, address, value);
1019 return;
1020 }
1021 }
1022 frv_cache_write (cache, address, (char *)&value, sizeof (value));
1023 }
b34f6357
DB
1024}
1025
1026void
1027frvbf_mem_set_XI (SIM_CPU *current_cpu, IADDR pc, SI address, SI *value)
1028{
1029 int i;
1030 FRV_CACHE *cache;
1031
1032 /* Check for access errors. */
1033 address = check_write_address (current_cpu, address, 0xf);
1034 address = check_readwrite_address (current_cpu, address, 0xf);
1035
1036 /* TODO -- reverse word order as well? */
1037 for (i = 0; i < 4; ++i)
1038 value[i] = H2T_4 (value[i]);
1039
1040 /* If we need to count cycles, then submit the write request to the cache
1041 and let it prioritize the request. Otherwise perform the write now. */
1042 cache = CPU_DATA_CACHE (current_cpu);
1043 if (model_insn)
1044 {
1045 int slot = UNIT_I0;
1046 frv_cache_request_store (cache, address, slot, (char*)value, 16);
1047 }
1048 else
1049 frv_cache_write (cache, address, (char*)value, 16);
1050}
1051
1052/* Record the current VLIW slot on the element at the top of the write queue.
1053*/
1054void
1055frv_set_write_queue_slot (SIM_CPU *current_cpu)
1056{
1057 FRV_VLIW *vliw = CPU_VLIW (current_cpu);
1058 int slot = vliw->next_slot - 1;
1059 CGEN_WRITE_QUEUE *q = CPU_WRITE_QUEUE (current_cpu);
1060 int ix = CGEN_WRITE_QUEUE_INDEX (q) - 1;
1061 CGEN_WRITE_QUEUE_ELEMENT *item = CGEN_WRITE_QUEUE_ELEMENT (q, ix);
1062 CGEN_WRITE_QUEUE_ELEMENT_PIPE (item) = (*vliw->current_vliw)[slot];
1063}
This page took 0.094467 seconds and 4 git commands to generate.