2 Copyright (C) 1999, 2000, 2001, 2003 Free Software Foundation, Inc.
3 Contributed by Red Hat.
5 This file is part of the GNU simulators.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
21 #define WANT_CPU frvbf
22 #define WANT_CPU_FRVBF
24 #include "libiberty.h"
30 frv_cache_init (SIM_CPU
*cpu
, FRV_CACHE
*cache
)
36 /* Set defaults for fields which are not initialized. */
38 switch (STATE_ARCHITECTURE (sd
)->mach
)
41 if (cache
->configured_sets
== 0)
42 cache
->configured_sets
= 512;
43 if (cache
->configured_ways
== 0)
44 cache
->configured_ways
= 2;
45 if (cache
->line_size
== 0)
46 cache
->line_size
= 32;
47 if (cache
->memory_latency
== 0)
48 cache
->memory_latency
= 20;
51 if (cache
->configured_sets
== 0)
52 cache
->configured_sets
= 128;
53 if (cache
->configured_ways
== 0)
54 cache
->configured_ways
= 4;
55 if (cache
->line_size
== 0)
56 cache
->line_size
= 64;
57 if (cache
->memory_latency
== 0)
58 cache
->memory_latency
= 20;
61 if (cache
->configured_sets
== 0)
62 cache
->configured_sets
= 64;
63 if (cache
->configured_ways
== 0)
64 cache
->configured_ways
= 4;
65 if (cache
->line_size
== 0)
66 cache
->line_size
= 64;
67 if (cache
->memory_latency
== 0)
68 cache
->memory_latency
= 20;
72 frv_cache_reconfigure (cpu
, cache
);
74 /* First allocate the cache storage based on the given dimensions. */
75 elements
= cache
->sets
* cache
->ways
;
76 cache
->tag_storage
= (FRV_CACHE_TAG
*)
77 zalloc (elements
* sizeof (*cache
->tag_storage
));
78 cache
->data_storage
= (char *) xmalloc (elements
* cache
->line_size
);
80 /* Initialize the pipelines and status buffers. */
81 for (i
= LS
; i
< FRV_CACHE_PIPELINES
; ++i
)
83 cache
->pipeline
[i
].requests
= NULL
;
84 cache
->pipeline
[i
].status
.flush
.valid
= 0;
85 cache
->pipeline
[i
].status
.return_buffer
.valid
= 0;
86 cache
->pipeline
[i
].status
.return_buffer
.data
87 = (char *) xmalloc (cache
->line_size
);
88 for (j
= FIRST_STAGE
; j
< FRV_CACHE_STAGES
; ++j
)
89 cache
->pipeline
[i
].stages
[j
].request
= NULL
;
91 cache
->BARS
.valid
= 0;
92 cache
->NARS
.valid
= 0;
94 /* Now set the cache state. */
96 cache
->statistics
.accesses
= 0;
97 cache
->statistics
.hits
= 0;
101 frv_cache_term (FRV_CACHE
*cache
)
103 /* Free the cache storage. */
104 free (cache
->tag_storage
);
105 free (cache
->data_storage
);
106 free (cache
->pipeline
[LS
].status
.return_buffer
.data
);
107 free (cache
->pipeline
[LD
].status
.return_buffer
.data
);
110 /* Reset the cache configuration based on registers in the cpu. */
112 frv_cache_reconfigure (SIM_CPU
*current_cpu
, FRV_CACHE
*cache
)
118 /* Set defaults for fields which are not initialized. */
119 sd
= CPU_STATE (current_cpu
);
120 switch (STATE_ARCHITECTURE (sd
)->mach
)
123 if (cache
== CPU_INSN_CACHE (current_cpu
))
125 ihsr8
= GET_IHSR8 ();
126 icdm
= GET_IHSR8_ICDM (ihsr8
);
127 /* If IHSR8.ICDM is set, then the cache becomes a one way cache. */
130 cache
->sets
= cache
->sets
* cache
->ways
;
137 /* Set the cache to its original settings. */
138 cache
->sets
= cache
->configured_sets
;
139 cache
->ways
= cache
->configured_ways
;
144 /* Determine whether the given cache is enabled. */
146 frv_cache_enabled (FRV_CACHE
*cache
)
148 SIM_CPU
*current_cpu
= cache
->cpu
;
149 int hsr0
= GET_HSR0 ();
150 if (GET_HSR0_ICE (hsr0
) && cache
== CPU_INSN_CACHE (current_cpu
))
152 if (GET_HSR0_DCE (hsr0
) && cache
== CPU_DATA_CACHE (current_cpu
))
157 /* Determine whether the given address is RAM access, assuming that HSR0.RME
160 ram_access (FRV_CACHE
*cache
, USI address
)
164 USI start
, end
, way_size
;
165 SIM_CPU
*current_cpu
= cache
->cpu
;
166 SIM_DESC sd
= CPU_STATE (current_cpu
);
168 switch (STATE_ARCHITECTURE (sd
)->mach
)
171 /* IHSR8.DCWE or IHSR8.ICWE deternines which ways get RAM access. */
172 ihsr8
= GET_IHSR8 ();
173 if (cache
== CPU_INSN_CACHE (current_cpu
))
177 cwe
= GET_IHSR8_ICWE (ihsr8
);
183 cwe
= GET_IHSR8_DCWE (ihsr8
);
185 way_size
= (end
- start
) / 4;
186 end
-= way_size
* cwe
;
187 return address
>= start
&& address
< end
;
192 return 1; /* RAM access */
195 /* Determine whether the given address should be accessed without using
198 non_cache_access (FRV_CACHE
*cache
, USI address
)
202 SIM_CPU
*current_cpu
= cache
->cpu
;
204 sd
= CPU_STATE (current_cpu
);
205 switch (STATE_ARCHITECTURE (sd
)->mach
)
208 if (address
>= 0xff000000
209 || address
>= 0xfe000000 && address
<= 0xfeffffff)
210 return 1; /* non-cache access */
213 if (address
>= 0xff000000
214 || address
>= 0xfeff0000 && address
<= 0xfeffffff)
215 return 1; /* non-cache access */
216 if (cache
== CPU_INSN_CACHE (current_cpu
))
218 if (address
>= 0xfe000000 && address
<= 0xfe007fff)
219 return 1; /* non-cache access */
221 else if (address
>= 0xfe400000 && address
<= 0xfe407fff)
222 return 1; /* non-cache access */
225 if (address
>= 0xff000000
226 || address
>= 0xfeff0000 && address
<= 0xfeffffff)
227 return 1; /* non-cache access */
228 if (cache
== CPU_INSN_CACHE (current_cpu
))
230 if (address
>= 0xfe000000 && address
<= 0xfe003fff)
231 return 1; /* non-cache access */
233 else if (address
>= 0xfe400000 && address
<= 0xfe403fff)
234 return 1; /* non-cache access */
239 if (GET_HSR0_RME (hsr0
))
240 return ram_access (cache
, address
);
242 return 0; /* cache-access */
245 /* Find the cache line corresponding to the given address.
246 If it is found then 'return_tag' is set to point to the tag for that line
248 If it is not found, 'return_tag' is set to point to the tag for the least
249 recently used line and 0 is returned.
252 get_tag (FRV_CACHE
*cache
, SI address
, FRV_CACHE_TAG
**return_tag
)
258 FRV_CACHE_TAG
*found
;
259 FRV_CACHE_TAG
*available
;
261 ++cache
->statistics
.accesses
;
263 /* First calculate which set this address will fall into. Do this by
264 shifting out the bits representing the offset within the line and
265 then keeping enough bits to index the set. */
266 set
= address
& ~(cache
->line_size
- 1);
267 for (bits
= cache
->line_size
- 1; bits
!= 0; bits
>>= 1)
269 set
&= (cache
->sets
- 1);
271 /* Now search the set for a valid tag which matches this address. At the
272 same time make note of the least recently used tag, which we will return
273 if no match is found. */
275 tag
= CACHE_ADDRESS_TAG (cache
, address
);
276 for (way
= 0; way
< cache
->ways
; ++way
)
278 found
= CACHE_TAG (cache
, set
, way
);
279 /* This tag is available as the least recently used if it is the
280 least recently used seen so far and it is not locked. */
281 if (! found
->locked
&& (available
== NULL
|| available
->lru
> found
->lru
))
283 if (found
->valid
&& found
->tag
== tag
)
286 ++cache
->statistics
.hits
;
287 return 1; /* found it */
291 *return_tag
= available
;
292 return 0; /* not found */
295 /* Write the given data out to memory. */
297 write_data_to_memory (FRV_CACHE
*cache
, SI address
, char *data
, int length
)
299 SIM_CPU
*cpu
= cache
->cpu
;
300 IADDR pc
= CPU_PC_GET (cpu
);
307 PROFILE_COUNT_WRITE (cpu
, address
, MODE_QI
);
310 PROFILE_COUNT_WRITE (cpu
, address
, MODE_HI
);
313 PROFILE_COUNT_WRITE (cpu
, address
, MODE_SI
);
316 PROFILE_COUNT_WRITE (cpu
, address
, MODE_DI
);
320 for (write_index
= 0; write_index
< length
; ++write_index
)
322 /* TODO: Better way to copy memory than a byte at a time? */
323 sim_core_write_unaligned_1 (cpu
, pc
, write_map
, address
+ write_index
,
328 /* Write a cache line out to memory. */
330 write_line_to_memory (FRV_CACHE
*cache
, FRV_CACHE_TAG
*tag
)
332 SI address
= tag
->tag
;
333 int set
= CACHE_TAG_SET_NUMBER (cache
, tag
);
335 for (bits
= cache
->line_size
- 1; bits
!= 0; bits
>>= 1)
338 write_data_to_memory (cache
, address
, tag
->line
, cache
->line_size
);
342 read_data_from_memory (SIM_CPU
*current_cpu
, SI address
, char *buffer
,
345 PCADDR pc
= CPU_PC_GET (current_cpu
);
347 PROFILE_COUNT_READ (current_cpu
, address
, MODE_QI
);
348 for (i
= 0; i
< length
; ++i
)
350 /* TODO: Better way to copy memory than a byte at a time? */
351 buffer
[i
] = sim_core_read_unaligned_1 (current_cpu
, pc
, read_map
,
356 /* Fill the given cache line from memory. */
358 fill_line_from_memory (FRV_CACHE
*cache
, FRV_CACHE_TAG
*tag
, SI address
)
363 SIM_CPU
*current_cpu
= cache
->cpu
;
365 /* If this line is already valid and the cache is in copy-back mode, then
366 write this line to memory before refilling it.
367 Check the dirty bit first, since it is less likely to be set. */
368 if (tag
->dirty
&& tag
->valid
)
370 int hsr0
= GET_HSR0 ();
371 if (GET_HSR0_CBM (hsr0
))
372 write_line_to_memory (cache
, tag
);
374 else if (tag
->line
== NULL
)
376 int line_index
= tag
- cache
->tag_storage
;
377 tag
->line
= cache
->data_storage
+ (line_index
* cache
->line_size
);
380 pc
= CPU_PC_GET (current_cpu
);
381 line_alignment
= cache
->line_size
- 1;
382 read_address
= address
& ~line_alignment
;
383 read_data_from_memory (current_cpu
, read_address
, tag
->line
,
385 tag
->tag
= CACHE_ADDRESS_TAG (cache
, address
);
389 /* Update the LRU information for the tags in the same set as the given tag. */
391 set_most_recently_used (FRV_CACHE
*cache
, FRV_CACHE_TAG
*tag
)
393 /* All tags in the same set are contiguous, so find the beginning of the
394 set by aligning to the size of a set. */
395 FRV_CACHE_TAG
*item
= cache
->tag_storage
+ CACHE_TAG_SET_START (cache
, tag
);
396 FRV_CACHE_TAG
*limit
= item
+ cache
->ways
;
400 if (item
->lru
> tag
->lru
)
404 tag
->lru
= cache
->ways
; /* Mark as most recently used. */
407 /* Update the LRU information for the tags in the same set as the given tag. */
409 set_least_recently_used (FRV_CACHE
*cache
, FRV_CACHE_TAG
*tag
)
411 /* All tags in the same set are contiguous, so find the beginning of the
412 set by aligning to the size of a set. */
413 FRV_CACHE_TAG
*item
= cache
->tag_storage
+ CACHE_TAG_SET_START (cache
, tag
);
414 FRV_CACHE_TAG
*limit
= item
+ cache
->ways
;
418 if (item
->lru
!= 0 && item
->lru
< tag
->lru
)
422 tag
->lru
= 0; /* Mark as least recently used. */
425 /* Find the line containing the given address and load it if it is not
427 Returns the tag of the requested line. */
428 static FRV_CACHE_TAG
*
429 find_or_retrieve_cache_line (FRV_CACHE
*cache
, SI address
)
431 /* See if this data is already in the cache. */
433 int found
= get_tag (cache
, address
, &tag
);
435 /* Fill the line from memory, if it is not valid. */
438 /* The tag could be NULL is all ways in the set were used and locked. */
442 fill_line_from_memory (cache
, tag
, address
);
446 /* Update the LRU information for the tags in this set. */
447 set_most_recently_used (cache
, tag
);
453 copy_line_to_return_buffer (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_TAG
*tag
,
456 /* A cache line was available for the data.
457 Copy the data from the cache line to the output buffer. */
458 memcpy (cache
->pipeline
[pipe
].status
.return_buffer
.data
,
459 tag
->line
, cache
->line_size
);
460 cache
->pipeline
[pipe
].status
.return_buffer
.address
461 = address
& ~(cache
->line_size
- 1);
462 cache
->pipeline
[pipe
].status
.return_buffer
.valid
= 1;
466 copy_memory_to_return_buffer (FRV_CACHE
*cache
, int pipe
, SI address
)
468 address
&= ~(cache
->line_size
- 1);
469 read_data_from_memory (cache
->cpu
, address
,
470 cache
->pipeline
[pipe
].status
.return_buffer
.data
,
472 cache
->pipeline
[pipe
].status
.return_buffer
.address
= address
;
473 cache
->pipeline
[pipe
].status
.return_buffer
.valid
= 1;
477 set_return_buffer_reqno (FRV_CACHE
*cache
, int pipe
, unsigned reqno
)
479 cache
->pipeline
[pipe
].status
.return_buffer
.reqno
= reqno
;
482 /* Read data from the given cache.
483 Returns the number of cycles required to obtain the data. */
485 frv_cache_read (FRV_CACHE
*cache
, int pipe
, SI address
)
489 if (non_cache_access (cache
, address
))
491 copy_memory_to_return_buffer (cache
, pipe
, address
);
495 tag
= find_or_retrieve_cache_line (cache
, address
);
498 return 0; /* Indicate non-cache-access. */
500 /* A cache line was available for the data.
501 Copy the data from the cache line to the output buffer. */
502 copy_line_to_return_buffer (cache
, pipe
, tag
, address
);
504 return 1; /* TODO - number of cycles unknown */
507 /* Writes data through the given cache.
508 The data is assumed to be in target endian order.
509 Returns the number of cycles required to write the data. */
511 frv_cache_write (FRV_CACHE
*cache
, SI address
, char *data
, unsigned length
)
515 /* See if this data is already in the cache. */
516 SIM_CPU
*current_cpu
= cache
->cpu
;
517 USI hsr0
= GET_HSR0 ();
521 if (non_cache_access (cache
, address
))
523 write_data_to_memory (cache
, address
, data
, length
);
527 found
= get_tag (cache
, address
, &tag
);
529 /* Write the data to the cache line if one was available and if it is
530 either a hit or a miss in copy-back mode.
531 The tag may be NULL if all ways were in use and locked on a miss.
533 copy_back
= GET_HSR0_CBM (GET_HSR0 ());
534 if (tag
!= NULL
&& (found
|| copy_back
))
537 /* Load the line from memory first, if it was a miss. */
539 fill_line_from_memory (cache
, tag
, address
);
540 line_offset
= address
& (cache
->line_size
- 1);
541 memcpy (tag
->line
+ line_offset
, data
, length
);
544 /* Update the LRU information for the tags in this set. */
545 set_most_recently_used (cache
, tag
);
548 /* Write the data to memory if there was no line available or we are in
549 write-through (not copy-back mode). */
550 if (tag
== NULL
|| ! copy_back
)
552 write_data_to_memory (cache
, address
, data
, length
);
557 return 1; /* TODO - number of cycles unknown */
560 /* Preload the cache line containing the given address. Lock the
562 Returns the number of cycles required to write the data. */
564 frv_cache_preload (FRV_CACHE
*cache
, SI address
, USI length
, int lock
)
569 if (non_cache_access (cache
, address
))
572 /* preload at least 1 line. */
576 offset
= address
& (cache
->line_size
- 1);
577 lines
= 1 + (offset
+ length
- 1) / cache
->line_size
;
579 /* Careful with this loop -- length is unsigned. */
580 for (/**/; lines
> 0; --lines
)
582 FRV_CACHE_TAG
*tag
= find_or_retrieve_cache_line (cache
, address
);
583 if (lock
&& tag
!= NULL
)
585 address
+= cache
->line_size
;
588 return 1; /* TODO - number of cycles unknown */
591 /* Unlock the cache line containing the given address.
592 Returns the number of cycles required to unlock the line. */
594 frv_cache_unlock (FRV_CACHE
*cache
, SI address
)
599 if (non_cache_access (cache
, address
))
602 found
= get_tag (cache
, address
, &tag
);
607 return 1; /* TODO - number of cycles unknown */
611 invalidate_return_buffer (FRV_CACHE
*cache
, SI address
)
613 /* If this address is in one of the return buffers, then invalidate that
615 address
&= ~(cache
->line_size
- 1);
616 if (address
== cache
->pipeline
[LS
].status
.return_buffer
.address
)
617 cache
->pipeline
[LS
].status
.return_buffer
.valid
= 0;
618 if (address
== cache
->pipeline
[LD
].status
.return_buffer
.address
)
619 cache
->pipeline
[LD
].status
.return_buffer
.valid
= 0;
622 /* Invalidate the cache line containing the given address. Flush the
624 Returns the number of cycles required to write the data. */
626 frv_cache_invalidate (FRV_CACHE
*cache
, SI address
, int flush
)
628 /* See if this data is already in the cache. */
632 /* Check for non-cache access. This operation is still perfromed even if
633 the cache is not currently enabled. */
634 if (non_cache_access (cache
, address
))
637 /* If the line is found, invalidate it. If a flush is requested, then flush
638 it if it is dirty. */
639 found
= get_tag (cache
, address
, &tag
);
643 /* If a flush is requested, then flush it if it is dirty. */
644 if (tag
->dirty
&& flush
)
645 write_line_to_memory (cache
, tag
);
646 set_least_recently_used (cache
, tag
);
650 /* If this is the insn cache, then flush the cpu's scache as well. */
652 if (cache
== CPU_INSN_CACHE (cpu
))
653 scache_flush_cpu (cpu
);
656 invalidate_return_buffer (cache
, address
);
658 return 1; /* TODO - number of cycles unknown */
661 /* Invalidate the entire cache. Flush the data if requested. */
663 frv_cache_invalidate_all (FRV_CACHE
*cache
, int flush
)
665 /* See if this data is already in the cache. */
666 int elements
= cache
->sets
* cache
->ways
;
667 FRV_CACHE_TAG
*tag
= cache
->tag_storage
;
671 for(i
= 0; i
< elements
; ++i
, ++tag
)
673 /* If a flush is requested, then flush it if it is dirty. */
674 if (tag
->valid
&& tag
->dirty
&& flush
)
675 write_line_to_memory (cache
, tag
);
681 /* If this is the insn cache, then flush the cpu's scache as well. */
683 if (cache
== CPU_INSN_CACHE (cpu
))
684 scache_flush_cpu (cpu
);
686 /* Invalidate both return buffers. */
687 cache
->pipeline
[LS
].status
.return_buffer
.valid
= 0;
688 cache
->pipeline
[LD
].status
.return_buffer
.valid
= 0;
690 return 1; /* TODO - number of cycles unknown */
693 /* ---------------------------------------------------------------------------
694 Functions for operating the cache in cycle accurate mode.
695 ------------------------------------------------------------------------- */
696 /* Convert a VLIW slot to a cache pipeline index. */
698 convert_slot_to_index (int slot
)
713 /* Allocate free chains of cache requests. */
714 #define FREE_CHAIN_SIZE 16
715 static FRV_CACHE_REQUEST
*frv_cache_request_free_chain
= NULL
;
716 static FRV_CACHE_REQUEST
*frv_store_request_free_chain
= NULL
;
719 allocate_new_cache_requests (void)
722 frv_cache_request_free_chain
= xmalloc (FREE_CHAIN_SIZE
723 * sizeof (FRV_CACHE_REQUEST
));
724 for (i
= 0; i
< FREE_CHAIN_SIZE
- 1; ++i
)
726 frv_cache_request_free_chain
[i
].next
727 = & frv_cache_request_free_chain
[i
+ 1];
730 frv_cache_request_free_chain
[FREE_CHAIN_SIZE
- 1].next
= NULL
;
733 /* Return the next free request in the queue for the given cache pipeline. */
734 static FRV_CACHE_REQUEST
*
735 new_cache_request (void)
737 FRV_CACHE_REQUEST
*req
;
739 /* Allocate new elements for the free chain if necessary. */
740 if (frv_cache_request_free_chain
== NULL
)
741 allocate_new_cache_requests ();
743 req
= frv_cache_request_free_chain
;
744 frv_cache_request_free_chain
= req
->next
;
749 /* Return the given cache request to the free chain. */
751 free_cache_request (FRV_CACHE_REQUEST
*req
)
753 if (req
->kind
== req_store
)
755 req
->next
= frv_store_request_free_chain
;
756 frv_store_request_free_chain
= req
;
760 req
->next
= frv_cache_request_free_chain
;
761 frv_cache_request_free_chain
= req
;
765 /* Search the free chain for an existing store request with a buffer that's
767 static FRV_CACHE_REQUEST
*
768 new_store_request (int length
)
770 FRV_CACHE_REQUEST
*prev
= NULL
;
771 FRV_CACHE_REQUEST
*req
;
772 for (req
= frv_store_request_free_chain
; req
!= NULL
; req
= req
->next
)
774 if (req
->u
.store
.length
== length
)
781 frv_store_request_free_chain
= req
->next
;
783 prev
->next
= req
->next
;
787 /* No existing request buffer was found, so make a new one. */
788 req
= new_cache_request ();
789 req
->kind
= req_store
;
790 req
->u
.store
.data
= xmalloc (length
);
791 req
->u
.store
.length
= length
;
795 /* Remove the given request from the given pipeline. */
797 pipeline_remove_request (FRV_CACHE_PIPELINE
*p
, FRV_CACHE_REQUEST
*request
)
799 FRV_CACHE_REQUEST
*next
= request
->next
;
800 FRV_CACHE_REQUEST
*prev
= request
->prev
;
811 /* Add the given request to the given pipeline. */
813 pipeline_add_request (FRV_CACHE_PIPELINE
*p
, FRV_CACHE_REQUEST
*request
)
815 FRV_CACHE_REQUEST
*prev
= NULL
;
816 FRV_CACHE_REQUEST
*item
;
818 /* Add the request in priority order. 0 is the highest priority. */
819 for (item
= p
->requests
; item
!= NULL
; item
= item
->next
)
821 if (item
->priority
> request
->priority
)
826 request
->next
= item
;
827 request
->prev
= prev
;
829 p
->requests
= request
;
831 prev
->next
= request
;
833 item
->prev
= request
;
836 /* Requeu the given request from the last of the given pipeline. */
838 pipeline_requeue_request (FRV_CACHE_PIPELINE
*p
)
840 FRV_CACHE_STAGE
*stage
= & p
->stages
[LAST_STAGE
];
841 FRV_CACHE_REQUEST
*req
= stage
->request
;
842 stage
->request
= NULL
;
843 pipeline_add_request (p
, req
);
846 /* Return the priority lower than the lowest one in this cache pipeline.
847 0 is the highest priority. */
849 next_priority (FRV_CACHE
*cache
, FRV_CACHE_PIPELINE
*pipeline
)
854 FRV_CACHE_REQUEST
*req
;
856 /* Check the priorities of any queued items. */
857 for (req
= pipeline
->requests
; req
!= NULL
; req
= req
->next
)
858 if (req
->priority
> lowest
)
859 lowest
= req
->priority
;
861 /* Check the priorities of items in the pipeline stages. */
862 for (i
= FIRST_STAGE
; i
< FRV_CACHE_STAGES
; ++i
)
864 FRV_CACHE_STAGE
*stage
= & pipeline
->stages
[i
];
865 if (stage
->request
!= NULL
&& stage
->request
->priority
> lowest
)
866 lowest
= stage
->request
->priority
;
869 /* Check the priorities of load requests waiting in WAR. These are one
870 higher than the request that spawned them. */
871 for (i
= 0; i
< NUM_WARS
; ++i
)
873 FRV_CACHE_WAR
*war
= & pipeline
->WAR
[i
];
874 if (war
->valid
&& war
->priority
> lowest
)
875 lowest
= war
->priority
+ 1;
878 /* Check the priorities of any BARS or NARS associated with this pipeline.
879 These are one higher than the request that spawned them. */
880 pipe
= pipeline
- cache
->pipeline
;
881 if (cache
->BARS
.valid
&& cache
->BARS
.pipe
== pipe
882 && cache
->BARS
.priority
> lowest
)
883 lowest
= cache
->BARS
.priority
+ 1;
884 if (cache
->NARS
.valid
&& cache
->NARS
.pipe
== pipe
885 && cache
->NARS
.priority
> lowest
)
886 lowest
= cache
->NARS
.priority
+ 1;
888 /* Return a priority 2 lower than the lowest found. This allows a WAR
889 request to be generated with a priority greater than this but less than
890 the next higher priority request. */
895 add_WAR_request (FRV_CACHE_PIPELINE
* pipeline
, FRV_CACHE_WAR
*war
)
897 /* Add the load request to the indexed pipeline. */
898 FRV_CACHE_REQUEST
*req
= new_cache_request ();
900 req
->reqno
= war
->reqno
;
901 req
->priority
= war
->priority
;
902 req
->address
= war
->address
;
903 req
->u
.WAR
.preload
= war
->preload
;
904 req
->u
.WAR
.lock
= war
->lock
;
905 pipeline_add_request (pipeline
, req
);
908 /* Remove the next request from the given pipeline and return it. */
909 static FRV_CACHE_REQUEST
*
910 pipeline_next_request (FRV_CACHE_PIPELINE
*p
)
912 FRV_CACHE_REQUEST
*first
= p
->requests
;
914 pipeline_remove_request (p
, first
);
918 /* Return the request which is at the given stage of the given pipeline. */
919 static FRV_CACHE_REQUEST
*
920 pipeline_stage_request (FRV_CACHE_PIPELINE
*p
, int stage
)
922 return p
->stages
[stage
].request
;
926 advance_pipelines (FRV_CACHE
*cache
)
930 FRV_CACHE_PIPELINE
*pipelines
= cache
->pipeline
;
932 /* Free the final stage requests. */
933 for (pipe
= 0; pipe
< FRV_CACHE_PIPELINES
; ++pipe
)
935 FRV_CACHE_REQUEST
*req
= pipelines
[pipe
].stages
[LAST_STAGE
].request
;
937 free_cache_request (req
);
940 /* Shuffle the requests along the pipeline. */
941 for (stage
= LAST_STAGE
; stage
> FIRST_STAGE
; --stage
)
943 for (pipe
= 0; pipe
< FRV_CACHE_PIPELINES
; ++pipe
)
944 pipelines
[pipe
].stages
[stage
] = pipelines
[pipe
].stages
[stage
- 1];
947 /* Add a new request to the pipeline. */
948 for (pipe
= 0; pipe
< FRV_CACHE_PIPELINES
; ++pipe
)
949 pipelines
[pipe
].stages
[FIRST_STAGE
].request
950 = pipeline_next_request (& pipelines
[pipe
]);
953 /* Handle a request for a load from the given address. */
955 frv_cache_request_load (FRV_CACHE
*cache
, unsigned reqno
, SI address
, int slot
)
957 FRV_CACHE_REQUEST
*req
;
959 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
960 int pipe
= convert_slot_to_index (slot
);
961 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
963 /* Add the load request to the indexed pipeline. */
964 req
= new_cache_request ();
965 req
->kind
= req_load
;
967 req
->priority
= next_priority (cache
, pipeline
);
968 req
->address
= address
;
970 pipeline_add_request (pipeline
, req
);
974 frv_cache_request_store (FRV_CACHE
*cache
, SI address
,
975 int slot
, char *data
, unsigned length
)
977 FRV_CACHE_REQUEST
*req
;
979 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
980 int pipe
= convert_slot_to_index (slot
);
981 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
983 /* Add the load request to the indexed pipeline. */
984 req
= new_store_request (length
);
985 req
->kind
= req_store
;
986 req
->reqno
= NO_REQNO
;
987 req
->priority
= next_priority (cache
, pipeline
);
988 req
->address
= address
;
989 req
->u
.store
.length
= length
;
990 memcpy (req
->u
.store
.data
, data
, length
);
992 pipeline_add_request (pipeline
, req
);
993 invalidate_return_buffer (cache
, address
);
996 /* Handle a request to invalidate the cache line containing the given address.
997 Flush the data if requested. */
999 frv_cache_request_invalidate (FRV_CACHE
*cache
, unsigned reqno
, SI address
,
1000 int slot
, int all
, int flush
)
1002 FRV_CACHE_REQUEST
*req
;
1004 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
1005 int pipe
= convert_slot_to_index (slot
);
1006 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1008 /* Add the load request to the indexed pipeline. */
1009 req
= new_cache_request ();
1010 req
->kind
= req_invalidate
;
1012 req
->priority
= next_priority (cache
, pipeline
);
1013 req
->address
= address
;
1014 req
->u
.invalidate
.all
= all
;
1015 req
->u
.invalidate
.flush
= flush
;
1017 pipeline_add_request (pipeline
, req
);
1020 /* Handle a request to preload the cache line containing the given address. */
1022 frv_cache_request_preload (FRV_CACHE
*cache
, SI address
,
1023 int slot
, int length
, int lock
)
1025 FRV_CACHE_REQUEST
*req
;
1027 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
1028 int pipe
= convert_slot_to_index (slot
);
1029 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1031 /* Add the load request to the indexed pipeline. */
1032 req
= new_cache_request ();
1033 req
->kind
= req_preload
;
1034 req
->reqno
= NO_REQNO
;
1035 req
->priority
= next_priority (cache
, pipeline
);
1036 req
->address
= address
;
1037 req
->u
.preload
.length
= length
;
1038 req
->u
.preload
.lock
= lock
;
1040 pipeline_add_request (pipeline
, req
);
1041 invalidate_return_buffer (cache
, address
);
1044 /* Handle a request to unlock the cache line containing the given address. */
1046 frv_cache_request_unlock (FRV_CACHE
*cache
, SI address
, int slot
)
1048 FRV_CACHE_REQUEST
*req
;
1050 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
1051 int pipe
= convert_slot_to_index (slot
);
1052 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1054 /* Add the load request to the indexed pipeline. */
1055 req
= new_cache_request ();
1056 req
->kind
= req_unlock
;
1057 req
->reqno
= NO_REQNO
;
1058 req
->priority
= next_priority (cache
, pipeline
);
1059 req
->address
= address
;
1061 pipeline_add_request (pipeline
, req
);
1064 /* Check whether this address interferes with a pending request of
1067 address_interference (FRV_CACHE
*cache
, SI address
, FRV_CACHE_REQUEST
*req
,
1071 int line_mask
= ~(cache
->line_size
- 1);
1073 int priority
= req
->priority
;
1074 FRV_CACHE_REQUEST
*other_req
;
1078 address
&= line_mask
;
1079 all_address
= -1 & line_mask
;
1081 /* Check for collisions in the queue for this pipeline. */
1082 for (other_req
= cache
->pipeline
[pipe
].requests
;
1084 other_req
= other_req
->next
)
1086 other_address
= other_req
->address
& line_mask
;
1087 if ((address
== other_address
|| address
== all_address
)
1088 && priority
> other_req
->priority
)
1092 /* Check for a collision in the the other pipeline. */
1093 other_pipe
= pipe
^ 1;
1094 other_req
= cache
->pipeline
[other_pipe
].stages
[LAST_STAGE
].request
;
1095 if (other_req
!= NULL
)
1097 other_address
= other_req
->address
& line_mask
;
1098 if (address
== other_address
|| address
== all_address
)
1102 /* Check for a collision with load requests waiting in WAR. */
1103 for (i
= LS
; i
< FRV_CACHE_PIPELINES
; ++i
)
1105 for (j
= 0; j
< NUM_WARS
; ++j
)
1107 FRV_CACHE_WAR
*war
= & cache
->pipeline
[i
].WAR
[j
];
1109 && (address
== (war
->address
& line_mask
)
1110 || address
== all_address
)
1111 && priority
> war
->priority
)
1114 /* If this is not a WAR request, then yield to any WAR requests in
1115 either pipeline or to a higher priority request in the same pipeline.
1117 if (req
->kind
!= req_WAR
)
1119 for (j
= FIRST_STAGE
; j
< FRV_CACHE_STAGES
; ++j
)
1121 other_req
= cache
->pipeline
[i
].stages
[j
].request
;
1122 if (other_req
!= NULL
)
1124 if (other_req
->kind
== req_WAR
)
1127 && (address
== (other_req
->address
& line_mask
)
1128 || address
== all_address
)
1129 && priority
> other_req
->priority
)
1136 /* Check for a collision with load requests waiting in ARS. */
1137 if (cache
->BARS
.valid
1138 && (address
== (cache
->BARS
.address
& line_mask
)
1139 || address
== all_address
)
1140 && priority
> cache
->BARS
.priority
)
1142 if (cache
->NARS
.valid
1143 && (address
== (cache
->NARS
.address
& line_mask
)
1144 || address
== all_address
)
1145 && priority
> cache
->NARS
.priority
)
1151 /* Wait for a free WAR register in BARS or NARS. */
1153 wait_for_WAR (FRV_CACHE
* cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1156 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1158 if (! cache
->BARS
.valid
)
1160 cache
->BARS
.pipe
= pipe
;
1161 cache
->BARS
.reqno
= req
->reqno
;
1162 cache
->BARS
.address
= req
->address
;
1163 cache
->BARS
.priority
= req
->priority
- 1;
1167 cache
->BARS
.preload
= 0;
1168 cache
->BARS
.lock
= 0;
1171 cache
->BARS
.preload
= 1;
1172 cache
->BARS
.lock
= 0;
1175 cache
->BARS
.preload
= 1;
1176 cache
->BARS
.lock
= req
->u
.preload
.lock
;
1179 cache
->BARS
.valid
= 1;
1182 if (! cache
->NARS
.valid
)
1184 cache
->NARS
.pipe
= pipe
;
1185 cache
->NARS
.reqno
= req
->reqno
;
1186 cache
->NARS
.address
= req
->address
;
1187 cache
->NARS
.priority
= req
->priority
- 1;
1191 cache
->NARS
.preload
= 0;
1192 cache
->NARS
.lock
= 0;
1195 cache
->NARS
.preload
= 1;
1196 cache
->NARS
.lock
= 0;
1199 cache
->NARS
.preload
= 1;
1200 cache
->NARS
.lock
= req
->u
.preload
.lock
;
1203 cache
->NARS
.valid
= 1;
1206 /* All wait registers are busy, so resubmit this request. */
1207 pipeline_requeue_request (pipeline
);
1210 /* Find a free WAR register and wait for memory to fetch the data. */
1212 wait_in_WAR (FRV_CACHE
* cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1215 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1217 /* Find a valid WAR to hold this request. */
1218 for (war
= 0; war
< NUM_WARS
; ++war
)
1219 if (! pipeline
->WAR
[war
].valid
)
1221 if (war
>= NUM_WARS
)
1223 wait_for_WAR (cache
, pipe
, req
);
1227 pipeline
->WAR
[war
].address
= req
->address
;
1228 pipeline
->WAR
[war
].reqno
= req
->reqno
;
1229 pipeline
->WAR
[war
].priority
= req
->priority
- 1;
1230 pipeline
->WAR
[war
].latency
= cache
->memory_latency
+ 1;
1234 pipeline
->WAR
[war
].preload
= 0;
1235 pipeline
->WAR
[war
].lock
= 0;
1238 pipeline
->WAR
[war
].preload
= 1;
1239 pipeline
->WAR
[war
].lock
= 0;
1242 pipeline
->WAR
[war
].preload
= 1;
1243 pipeline
->WAR
[war
].lock
= req
->u
.preload
.lock
;
1246 pipeline
->WAR
[war
].valid
= 1;
1250 handle_req_load (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1253 SI address
= req
->address
;
1255 /* If this address interferes with an existing request, then requeue it. */
1256 if (address_interference (cache
, address
, req
, pipe
))
1258 pipeline_requeue_request (& cache
->pipeline
[pipe
]);
1262 if (frv_cache_enabled (cache
) && ! non_cache_access (cache
, address
))
1264 int found
= get_tag (cache
, address
, &tag
);
1266 /* If the data was found, return it to the caller. */
1269 set_most_recently_used (cache
, tag
);
1270 copy_line_to_return_buffer (cache
, pipe
, tag
, address
);
1271 set_return_buffer_reqno (cache
, pipe
, req
->reqno
);
1276 /* The data is not in the cache or this is a non-cache access. We need to
1277 wait for the memory unit to fetch it. Store this request in the WAR in
1279 wait_in_WAR (cache
, pipe
, req
);
1283 handle_req_preload (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1293 SI address
= req
->address
;
1296 if (! frv_cache_enabled (cache
) || non_cache_access (cache
, address
))
1299 /* preload at least 1 line. */
1300 length
= req
->u
.preload
.length
;
1304 /* Make sure that this request does not interfere with a pending request. */
1305 offset
= address
& (cache
->line_size
- 1);
1306 lines
= 1 + (offset
+ length
- 1) / cache
->line_size
;
1307 cur_address
= address
& ~(cache
->line_size
- 1);
1308 for (line
= 0; line
< lines
; ++line
)
1310 /* If this address interferes with an existing request,
1312 if (address_interference (cache
, cur_address
, req
, pipe
))
1314 pipeline_requeue_request (& cache
->pipeline
[pipe
]);
1317 cur_address
+= cache
->line_size
;
1320 /* Now process each cache line. */
1321 /* Careful with this loop -- length is unsigned. */
1322 lock
= req
->u
.preload
.lock
;
1323 cur_address
= address
& ~(cache
->line_size
- 1);
1324 for (line
= 0; line
< lines
; ++line
)
1326 /* If the data was found, then lock it if requested. */
1327 found
= get_tag (cache
, cur_address
, &tag
);
1335 /* The data is not in the cache. We need to wait for the memory
1336 unit to fetch it. Store this request in the WAR in the meantime.
1338 wait_in_WAR (cache
, pipe
, req
);
1340 cur_address
+= cache
->line_size
;
1345 handle_req_store (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1347 SIM_CPU
*current_cpu
;
1351 SI address
= req
->address
;
1352 char *data
= req
->u
.store
.data
;
1353 int length
= req
->u
.store
.length
;
1355 /* If this address interferes with an existing request, then requeue it. */
1356 if (address_interference (cache
, address
, req
, pipe
))
1358 pipeline_requeue_request (& cache
->pipeline
[pipe
]);
1362 /* Non-cache access. Write the data directly to memory. */
1363 if (! frv_cache_enabled (cache
) || non_cache_access (cache
, address
))
1365 write_data_to_memory (cache
, address
, data
, length
);
1369 /* See if the data is in the cache. */
1370 found
= get_tag (cache
, address
, &tag
);
1372 /* Write the data to the cache line if one was available and if it is
1373 either a hit or a miss in copy-back mode.
1374 The tag may be NULL if all ways were in use and locked on a miss.
1376 current_cpu
= cache
->cpu
;
1377 copy_back
= GET_HSR0_CBM (GET_HSR0 ());
1378 if (tag
!= NULL
&& (found
|| copy_back
))
1381 /* Load the line from memory first, if it was a miss. */
1384 /* We need to wait for the memory unit to fetch the data.
1385 Store this request in the WAR and requeue the store request. */
1386 wait_in_WAR (cache
, pipe
, req
);
1387 pipeline_requeue_request (& cache
->pipeline
[pipe
]);
1388 /* Decrement the counts of accesses and hits because when the requeued
1389 request is processed again, it will appear to be a new access and
1391 --cache
->statistics
.accesses
;
1392 --cache
->statistics
.hits
;
1395 line_offset
= address
& (cache
->line_size
- 1);
1396 memcpy (tag
->line
+ line_offset
, data
, length
);
1397 invalidate_return_buffer (cache
, address
);
1400 /* Update the LRU information for the tags in this set. */
1401 set_most_recently_used (cache
, tag
);
1404 /* Write the data to memory if there was no line available or we are in
1405 write-through (not copy-back mode). */
1406 if (tag
== NULL
|| ! copy_back
)
1408 write_data_to_memory (cache
, address
, data
, length
);
1415 handle_req_invalidate (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1417 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1418 SI address
= req
->address
;
1419 SI interfere_address
= req
->u
.invalidate
.all
? -1 : address
;
1421 /* If this address interferes with an existing request, then requeue it. */
1422 if (address_interference (cache
, interfere_address
, req
, pipe
))
1424 pipeline_requeue_request (pipeline
);
1428 /* Invalidate the cache line now. This function already checks for
1429 non-cache access. */
1430 if (req
->u
.invalidate
.all
)
1431 frv_cache_invalidate_all (cache
, req
->u
.invalidate
.flush
);
1433 frv_cache_invalidate (cache
, address
, req
->u
.invalidate
.flush
);
1434 if (req
->u
.invalidate
.flush
)
1436 pipeline
->status
.flush
.reqno
= req
->reqno
;
1437 pipeline
->status
.flush
.address
= address
;
1438 pipeline
->status
.flush
.valid
= 1;
1443 handle_req_unlock (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1445 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1446 SI address
= req
->address
;
1448 /* If this address interferes with an existing request, then requeue it. */
1449 if (address_interference (cache
, address
, req
, pipe
))
1451 pipeline_requeue_request (pipeline
);
1455 /* Unlock the cache line. This function checks for non-cache access. */
1456 frv_cache_unlock (cache
, address
);
1460 handle_req_WAR (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1464 SI address
= req
->address
;
1466 if (frv_cache_enabled (cache
) && ! non_cache_access (cache
, address
))
1468 /* Look for the data in the cache. The statistics of cache hit or
1469 miss have already been recorded, so save and restore the stats before
1470 and after obtaining the cache line. */
1471 FRV_CACHE_STATISTICS save_stats
= cache
->statistics
;
1472 tag
= find_or_retrieve_cache_line (cache
, address
);
1473 cache
->statistics
= save_stats
;
1476 if (! req
->u
.WAR
.preload
)
1478 copy_line_to_return_buffer (cache
, pipe
, tag
, address
);
1479 set_return_buffer_reqno (cache
, pipe
, req
->reqno
);
1483 invalidate_return_buffer (cache
, address
);
1484 if (req
->u
.WAR
.lock
)
1491 /* All cache lines in the set were locked, so just copy the data to the
1492 return buffer directly. */
1493 if (! req
->u
.WAR
.preload
)
1495 copy_memory_to_return_buffer (cache
, pipe
, address
);
1496 set_return_buffer_reqno (cache
, pipe
, req
->reqno
);
1500 /* Resolve any conflicts and/or execute the given requests. */
1502 arbitrate_requests (FRV_CACHE
*cache
)
1505 /* Simply execute the requests in the final pipeline stages. */
1506 for (pipe
= LS
; pipe
< FRV_CACHE_PIPELINES
; ++pipe
)
1508 FRV_CACHE_REQUEST
*req
1509 = pipeline_stage_request (& cache
->pipeline
[pipe
], LAST_STAGE
);
1510 /* Make sure that there is a request to handle. */
1514 /* Handle the request. */
1518 handle_req_load (cache
, pipe
, req
);
1521 handle_req_store (cache
, pipe
, req
);
1523 case req_invalidate
:
1524 handle_req_invalidate (cache
, pipe
, req
);
1527 handle_req_preload (cache
, pipe
, req
);
1530 handle_req_unlock (cache
, pipe
, req
);
1533 handle_req_WAR (cache
, pipe
, req
);
1541 /* Move a waiting ARS register to a free WAR register. */
1543 move_ARS_to_WAR (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_WAR
*war
)
1545 /* If BARS is valid for this pipe, then move it to the given WAR. Move
1546 NARS to BARS if it is valid. */
1547 if (cache
->BARS
.valid
&& cache
->BARS
.pipe
== pipe
)
1549 war
->address
= cache
->BARS
.address
;
1550 war
->reqno
= cache
->BARS
.reqno
;
1551 war
->priority
= cache
->BARS
.priority
;
1552 war
->preload
= cache
->BARS
.preload
;
1553 war
->lock
= cache
->BARS
.lock
;
1554 war
->latency
= cache
->memory_latency
+ 1;
1556 if (cache
->NARS
.valid
)
1558 cache
->BARS
= cache
->NARS
;
1559 cache
->NARS
.valid
= 0;
1562 cache
->BARS
.valid
= 0;
1565 /* If NARS is valid for this pipe, then move it to the given WAR. */
1566 if (cache
->NARS
.valid
&& cache
->NARS
.pipe
== pipe
)
1568 war
->address
= cache
->NARS
.address
;
1569 war
->reqno
= cache
->NARS
.reqno
;
1570 war
->priority
= cache
->NARS
.priority
;
1571 war
->preload
= cache
->NARS
.preload
;
1572 war
->lock
= cache
->NARS
.lock
;
1573 war
->latency
= cache
->memory_latency
+ 1;
1575 cache
->NARS
.valid
= 0;
1579 /* Decrease the latencies of the various states in the cache. */
1581 decrease_latencies (FRV_CACHE
*cache
)
1584 /* Check the WAR registers. */
1585 for (pipe
= LS
; pipe
< FRV_CACHE_PIPELINES
; ++pipe
)
1587 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1588 for (j
= 0; j
< NUM_WARS
; ++j
)
1590 FRV_CACHE_WAR
*war
= & pipeline
->WAR
[j
];
1594 /* If the latency has expired, then submit a WAR request to the
1596 if (war
->latency
<= 0)
1598 add_WAR_request (pipeline
, war
);
1600 move_ARS_to_WAR (cache
, pipe
, war
);
1607 /* Run the cache for the given number of cycles. */
1609 frv_cache_run (FRV_CACHE
*cache
, int cycles
)
1612 for (i
= 0; i
< cycles
; ++i
)
1614 advance_pipelines (cache
);
1615 arbitrate_requests (cache
);
1616 decrease_latencies (cache
);
1621 frv_cache_read_passive_SI (FRV_CACHE
*cache
, SI address
, SI
*value
)
1626 if (non_cache_access (cache
, address
))
1630 FRV_CACHE_STATISTICS save_stats
= cache
->statistics
;
1631 int found
= get_tag (cache
, address
, &tag
);
1632 cache
->statistics
= save_stats
;
1635 return 0; /* Indicate non-cache-access. */
1638 /* A cache line was available for the data.
1639 Extract the target data from the line. */
1640 offset
= address
& (cache
->line_size
- 1);
1641 *value
= T2H_4 (*(SI
*)(tag
->line
+ offset
));
1645 /* Check the return buffers of the data cache to see if the requested data is
1648 frv_cache_data_in_buffer (FRV_CACHE
* cache
, int pipe
, SI address
,
1651 return cache
->pipeline
[pipe
].status
.return_buffer
.valid
1652 && cache
->pipeline
[pipe
].status
.return_buffer
.reqno
== reqno
1653 && cache
->pipeline
[pipe
].status
.return_buffer
.address
<= address
1654 && cache
->pipeline
[pipe
].status
.return_buffer
.address
+ cache
->line_size
1658 /* Check to see if the requested data has been flushed. */
1660 frv_cache_data_flushed (FRV_CACHE
* cache
, int pipe
, SI address
, unsigned reqno
)
1662 return cache
->pipeline
[pipe
].status
.flush
.valid
1663 && cache
->pipeline
[pipe
].status
.flush
.reqno
== reqno
1664 && cache
->pipeline
[pipe
].status
.flush
.address
<= address
1665 && cache
->pipeline
[pipe
].status
.flush
.address
+ cache
->line_size