2 Copyright (C) 1999, 2000, 2001 Free Software Foundation, Inc.
3 Contributed by Red Hat.
5 This file is part of the GNU simulators.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
21 #define WANT_CPU frvbf
22 #define WANT_CPU_FRVBF
24 #include "libiberty.h"
30 frv_cache_init (SIM_CPU
*cpu
, FRV_CACHE
*cache
)
36 /* Set defaults for fields which are not initialized. */
38 switch (STATE_ARCHITECTURE (sd
)->mach
)
45 if (cache
->line_size
== 0)
46 cache
->line_size
= 32;
47 if (cache
->memory_latency
== 0)
48 cache
->memory_latency
= 20;
55 if (cache
->line_size
== 0)
56 cache
->line_size
= 64;
57 if (cache
->memory_latency
== 0)
58 cache
->memory_latency
= 20;
62 /* First allocate the cache storage based on the given dimensions. */
63 elements
= cache
->sets
* cache
->ways
;
64 cache
->tag_storage
= (FRV_CACHE_TAG
*)
65 zalloc (elements
* sizeof (*cache
->tag_storage
));
66 cache
->data_storage
= (char *) xmalloc (elements
* cache
->line_size
);
68 /* Initialize the pipelines and status buffers. */
69 for (i
= LS
; i
< FRV_CACHE_PIPELINES
; ++i
)
71 cache
->pipeline
[i
].requests
= NULL
;
72 cache
->pipeline
[i
].status
.flush
.valid
= 0;
73 cache
->pipeline
[i
].status
.return_buffer
.valid
= 0;
74 cache
->pipeline
[i
].status
.return_buffer
.data
75 = (char *) xmalloc (cache
->line_size
);
76 for (j
= FIRST_STAGE
; j
< FRV_CACHE_STAGES
; ++j
)
77 cache
->pipeline
[i
].stages
[j
].request
= NULL
;
79 cache
->BARS
.valid
= 0;
80 cache
->NARS
.valid
= 0;
82 /* Now set the cache state. */
84 cache
->statistics
.accesses
= 0;
85 cache
->statistics
.hits
= 0;
89 frv_cache_term (FRV_CACHE
*cache
)
91 /* Free the cache storage. */
92 free (cache
->tag_storage
);
93 free (cache
->data_storage
);
94 free (cache
->pipeline
[LS
].status
.return_buffer
.data
);
95 free (cache
->pipeline
[LD
].status
.return_buffer
.data
);
98 /* Determine whether the given cache is enabled. */
100 frv_cache_enabled (FRV_CACHE
*cache
)
102 SIM_CPU
*current_cpu
= cache
->cpu
;
103 int hsr0
= GET_HSR0 ();
104 if (GET_HSR0_ICE (hsr0
) && cache
== CPU_INSN_CACHE (current_cpu
))
106 if (GET_HSR0_DCE (hsr0
) && cache
== CPU_DATA_CACHE (current_cpu
))
111 /* Determine whether the given address should be accessed without using
114 non_cache_access (FRV_CACHE
*cache
, USI address
)
118 SIM_CPU
*current_cpu
= cache
->cpu
;
120 sd
= CPU_STATE (current_cpu
);
121 switch (STATE_ARCHITECTURE (sd
)->mach
)
124 if (address
>= 0xff000000
125 || address
>= 0xfe000000 && address
<= 0xfeffffff)
126 return 1; /* non-cache access */
128 if (address
>= 0xff000000
129 || address
>= 0xfeff0000 && address
<= 0xfeffffff)
130 return 1; /* non-cache access */
131 if (cache
== CPU_INSN_CACHE (current_cpu
))
133 if (address
>= 0xfe000000 && address
<= 0xfe003fff)
134 return 1; /* non-cache access */
136 else if (address
>= 0xfe400000 && address
<= 0xfe403fff)
137 return 1; /* non-cache access */
141 if (GET_HSR0_RME (hsr0
))
142 return 1; /* non-cache access */
144 return 0; /* cache-access */
147 /* Find the cache line corresponding to the given address.
148 If it is found then 'return_tag' is set to point to the tag for that line
150 If it is not found, 'return_tag' is set to point to the tag for the least
151 recently used line and 0 is returned.
154 get_tag (FRV_CACHE
*cache
, SI address
, FRV_CACHE_TAG
**return_tag
)
160 FRV_CACHE_TAG
*found
;
161 FRV_CACHE_TAG
*available
;
163 ++cache
->statistics
.accesses
;
165 /* First calculate which set this address will fall into. Do this by
166 shifting out the bits representing the offset within the line and
167 then keeping enough bits to index the set. */
168 set
= address
& ~(cache
->line_size
- 1);
169 for (bits
= cache
->line_size
- 1; bits
!= 0; bits
>>= 1)
171 set
&= (cache
->sets
- 1);
173 /* Now search the set for a valid tag which matches this address. At the
174 same time make note of the least recently used tag, which we will return
175 if no match is found. */
177 tag
= CACHE_ADDRESS_TAG (cache
, address
);
178 for (way
= 0; way
< cache
->ways
; ++way
)
180 found
= CACHE_TAG (cache
, set
, way
);
181 /* This tag is available as the least recently used if it is the
182 least recently used seen so far and it is not locked. */
183 if (! found
->locked
&& (available
== NULL
|| available
->lru
> found
->lru
))
185 if (found
->valid
&& found
->tag
== tag
)
188 ++cache
->statistics
.hits
;
189 return 1; /* found it */
193 *return_tag
= available
;
194 return 0; /* not found */
197 /* Write the given data out to memory. */
199 write_data_to_memory (FRV_CACHE
*cache
, SI address
, char *data
, int length
)
201 SIM_CPU
*cpu
= cache
->cpu
;
202 IADDR pc
= CPU_PC_GET (cpu
);
209 PROFILE_COUNT_WRITE (cpu
, address
, MODE_QI
);
212 PROFILE_COUNT_WRITE (cpu
, address
, MODE_HI
);
215 PROFILE_COUNT_WRITE (cpu
, address
, MODE_SI
);
218 PROFILE_COUNT_WRITE (cpu
, address
, MODE_DI
);
222 for (write_index
= 0; write_index
< length
; ++write_index
)
224 /* TODO: Better way to copy memory than a byte at a time? */
225 sim_core_write_unaligned_1 (cpu
, pc
, write_map
, address
+ write_index
,
230 /* Write a cache line out to memory. */
232 write_line_to_memory (FRV_CACHE
*cache
, FRV_CACHE_TAG
*tag
)
234 SI address
= tag
->tag
;
235 int set
= CACHE_TAG_SET_NUMBER (cache
, tag
);
237 for (bits
= cache
->line_size
- 1; bits
!= 0; bits
>>= 1)
240 write_data_to_memory (cache
, address
, tag
->line
, cache
->line_size
);
244 read_data_from_memory (SIM_CPU
*current_cpu
, SI address
, char *buffer
,
247 PCADDR pc
= CPU_PC_GET (current_cpu
);
249 PROFILE_COUNT_READ (current_cpu
, address
, MODE_QI
);
250 for (i
= 0; i
< length
; ++i
)
252 /* TODO: Better way to copy memory than a byte at a time? */
253 buffer
[i
] = sim_core_read_unaligned_1 (current_cpu
, pc
, read_map
,
258 /* Fill the given cache line from memory. */
260 fill_line_from_memory (FRV_CACHE
*cache
, FRV_CACHE_TAG
*tag
, SI address
)
265 SIM_CPU
*current_cpu
= cache
->cpu
;
267 /* If this line is already valid and the cache is in copy-back mode, then
268 write this line to memory before refilling it.
269 Check the dirty bit first, since it is less likely to be set. */
270 if (tag
->dirty
&& tag
->valid
)
272 int hsr0
= GET_HSR0 ();
273 if (GET_HSR0_CBM (hsr0
))
274 write_line_to_memory (cache
, tag
);
276 else if (tag
->line
== NULL
)
278 int line_index
= tag
- cache
->tag_storage
;
279 tag
->line
= cache
->data_storage
+ (line_index
* cache
->line_size
);
282 pc
= CPU_PC_GET (current_cpu
);
283 line_alignment
= cache
->line_size
- 1;
284 read_address
= address
& ~line_alignment
;
285 read_data_from_memory (current_cpu
, read_address
, tag
->line
,
287 tag
->tag
= CACHE_ADDRESS_TAG (cache
, address
);
291 /* Update the LRU information for the tags in the same set as the given tag. */
293 set_most_recently_used (FRV_CACHE
*cache
, FRV_CACHE_TAG
*tag
)
295 /* All tags in the same set are contiguous, so find the beginning of the
296 set by aligning to the size of a set. */
297 FRV_CACHE_TAG
*item
= cache
->tag_storage
+ CACHE_TAG_SET_START (cache
, tag
);
298 FRV_CACHE_TAG
*limit
= item
+ cache
->ways
;
302 if (item
->lru
> tag
->lru
)
306 tag
->lru
= cache
->ways
; /* Mark as most recently used. */
309 /* Update the LRU information for the tags in the same set as the given tag. */
311 set_least_recently_used (FRV_CACHE
*cache
, FRV_CACHE_TAG
*tag
)
313 /* All tags in the same set are contiguous, so find the beginning of the
314 set by aligning to the size of a set. */
315 FRV_CACHE_TAG
*item
= cache
->tag_storage
+ CACHE_TAG_SET_START (cache
, tag
);
316 FRV_CACHE_TAG
*limit
= item
+ cache
->ways
;
320 if (item
->lru
!= 0 && item
->lru
< tag
->lru
)
324 tag
->lru
= 0; /* Mark as least recently used. */
327 /* Find the line containing the given address and load it if it is not
329 Returns the tag of the requested line. */
330 static FRV_CACHE_TAG
*
331 find_or_retrieve_cache_line (FRV_CACHE
*cache
, SI address
)
333 /* See if this data is already in the cache. */
335 int found
= get_tag (cache
, address
, &tag
);
337 /* Fill the line from memory, if it is not valid. */
340 /* The tag could be NULL is all ways in the set were used and locked. */
344 fill_line_from_memory (cache
, tag
, address
);
348 /* Update the LRU information for the tags in this set. */
349 set_most_recently_used (cache
, tag
);
355 copy_line_to_return_buffer (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_TAG
*tag
,
358 /* A cache line was available for the data.
359 Copy the data from the cache line to the output buffer. */
360 memcpy (cache
->pipeline
[pipe
].status
.return_buffer
.data
,
361 tag
->line
, cache
->line_size
);
362 cache
->pipeline
[pipe
].status
.return_buffer
.address
363 = address
& ~(cache
->line_size
- 1);
364 cache
->pipeline
[pipe
].status
.return_buffer
.valid
= 1;
368 copy_memory_to_return_buffer (FRV_CACHE
*cache
, int pipe
, SI address
)
370 address
&= ~(cache
->line_size
- 1);
371 read_data_from_memory (cache
->cpu
, address
,
372 cache
->pipeline
[pipe
].status
.return_buffer
.data
,
374 cache
->pipeline
[pipe
].status
.return_buffer
.address
= address
;
375 cache
->pipeline
[pipe
].status
.return_buffer
.valid
= 1;
379 set_return_buffer_reqno (FRV_CACHE
*cache
, int pipe
, unsigned reqno
)
381 cache
->pipeline
[pipe
].status
.return_buffer
.reqno
= reqno
;
384 /* Read data from the given cache.
385 Returns the number of cycles required to obtain the data. */
387 frv_cache_read (FRV_CACHE
*cache
, int pipe
, SI address
)
391 if (non_cache_access (cache
, address
))
393 copy_memory_to_return_buffer (cache
, pipe
, address
);
397 tag
= find_or_retrieve_cache_line (cache
, address
);
400 return 0; /* Indicate non-cache-access. */
402 /* A cache line was available for the data.
403 Copy the data from the cache line to the output buffer. */
404 copy_line_to_return_buffer (cache
, pipe
, tag
, address
);
406 return 1; /* TODO - number of cycles unknown */
409 /* Writes data through the given cache.
410 The data is assumed to be in target endian order.
411 Returns the number of cycles required to write the data. */
413 frv_cache_write (FRV_CACHE
*cache
, SI address
, char *data
, unsigned length
)
417 /* See if this data is already in the cache. */
418 SIM_CPU
*current_cpu
= cache
->cpu
;
419 USI hsr0
= GET_HSR0 ();
423 if (non_cache_access (cache
, address
))
425 write_data_to_memory (cache
, address
, data
, length
);
429 found
= get_tag (cache
, address
, &tag
);
431 /* Write the data to the cache line if one was available and if it is
432 either a hit or a miss in copy-back mode.
433 The tag may be NULL if all ways were in use and locked on a miss.
435 copy_back
= GET_HSR0_CBM (GET_HSR0 ());
436 if (tag
!= NULL
&& (found
|| copy_back
))
439 /* Load the line from memory first, if it was a miss. */
441 fill_line_from_memory (cache
, tag
, address
);
442 line_offset
= address
& (cache
->line_size
- 1);
443 memcpy (tag
->line
+ line_offset
, data
, length
);
446 /* Update the LRU information for the tags in this set. */
447 set_most_recently_used (cache
, tag
);
450 /* Write the data to memory if there was no line available or we are in
451 write-through (not copy-back mode). */
452 if (tag
== NULL
|| ! copy_back
)
454 write_data_to_memory (cache
, address
, data
, length
);
459 return 1; /* TODO - number of cycles unknown */
462 /* Preload the cache line containing the given address. Lock the
464 Returns the number of cycles required to write the data. */
466 frv_cache_preload (FRV_CACHE
*cache
, SI address
, USI length
, int lock
)
471 if (non_cache_access (cache
, address
))
474 /* preload at least 1 line. */
478 offset
= address
& (cache
->line_size
- 1);
479 lines
= 1 + (offset
+ length
- 1) / cache
->line_size
;
481 /* Careful with this loop -- length is unsigned. */
482 for (/**/; lines
> 0; --lines
)
484 FRV_CACHE_TAG
*tag
= find_or_retrieve_cache_line (cache
, address
);
485 if (lock
&& tag
!= NULL
)
487 address
+= cache
->line_size
;
490 return 1; /* TODO - number of cycles unknown */
493 /* Unlock the cache line containing the given address.
494 Returns the number of cycles required to unlock the line. */
496 frv_cache_unlock (FRV_CACHE
*cache
, SI address
)
501 if (non_cache_access (cache
, address
))
504 found
= get_tag (cache
, address
, &tag
);
509 return 1; /* TODO - number of cycles unknown */
513 invalidate_return_buffer (FRV_CACHE
*cache
, SI address
)
515 /* If this address is in one of the return buffers, then invalidate that
517 address
&= ~(cache
->line_size
- 1);
518 if (address
== cache
->pipeline
[LS
].status
.return_buffer
.address
)
519 cache
->pipeline
[LS
].status
.return_buffer
.valid
= 0;
520 if (address
== cache
->pipeline
[LD
].status
.return_buffer
.address
)
521 cache
->pipeline
[LD
].status
.return_buffer
.valid
= 0;
524 /* Invalidate the cache line containing the given address. Flush the
526 Returns the number of cycles required to write the data. */
528 frv_cache_invalidate (FRV_CACHE
*cache
, SI address
, int flush
)
530 /* See if this data is already in the cache. */
534 /* Check for non-cache access. This operation is still perfromed even if
535 the cache is not currently enabled. */
536 if (non_cache_access (cache
, address
))
539 /* If the line is found, invalidate it. If a flush is requested, then flush
540 it if it is dirty. */
541 found
= get_tag (cache
, address
, &tag
);
545 /* If a flush is requested, then flush it if it is dirty. */
546 if (tag
->dirty
&& flush
)
547 write_line_to_memory (cache
, tag
);
548 set_least_recently_used (cache
, tag
);
552 /* If this is the insn cache, then flush the cpu's scache as well. */
554 if (cache
== CPU_INSN_CACHE (cpu
))
555 scache_flush_cpu (cpu
);
558 invalidate_return_buffer (cache
, address
);
560 return 1; /* TODO - number of cycles unknown */
563 /* Invalidate the entire cache. Flush the data if requested. */
565 frv_cache_invalidate_all (FRV_CACHE
*cache
, int flush
)
567 /* See if this data is already in the cache. */
568 int elements
= cache
->sets
* cache
->ways
;
569 FRV_CACHE_TAG
*tag
= cache
->tag_storage
;
573 for(i
= 0; i
< elements
; ++i
, ++tag
)
575 /* If a flush is requested, then flush it if it is dirty. */
576 if (tag
->valid
&& tag
->dirty
&& flush
)
577 write_line_to_memory (cache
, tag
);
583 /* If this is the insn cache, then flush the cpu's scache as well. */
585 if (cache
== CPU_INSN_CACHE (cpu
))
586 scache_flush_cpu (cpu
);
588 /* Invalidate both return buffers. */
589 cache
->pipeline
[LS
].status
.return_buffer
.valid
= 0;
590 cache
->pipeline
[LD
].status
.return_buffer
.valid
= 0;
592 return 1; /* TODO - number of cycles unknown */
595 /* ---------------------------------------------------------------------------
596 Functions for operating the cache in cycle accurate mode.
597 ------------------------------------------------------------------------- */
598 /* Convert a VLIW slot to a cache pipeline index. */
600 convert_slot_to_index (int slot
)
615 /* Allocate free chains of cache requests. */
616 #define FREE_CHAIN_SIZE 16
617 static FRV_CACHE_REQUEST
*frv_cache_request_free_chain
= NULL
;
618 static FRV_CACHE_REQUEST
*frv_store_request_free_chain
= NULL
;
621 allocate_new_cache_requests (void)
624 frv_cache_request_free_chain
= xmalloc (FREE_CHAIN_SIZE
625 * sizeof (FRV_CACHE_REQUEST
));
626 for (i
= 0; i
< FREE_CHAIN_SIZE
- 1; ++i
)
628 frv_cache_request_free_chain
[i
].next
629 = & frv_cache_request_free_chain
[i
+ 1];
632 frv_cache_request_free_chain
[FREE_CHAIN_SIZE
- 1].next
= NULL
;
635 /* Return the next free request in the queue for the given cache pipeline. */
636 static FRV_CACHE_REQUEST
*
637 new_cache_request (void)
639 FRV_CACHE_REQUEST
*req
;
641 /* Allocate new elements for the free chain if necessary. */
642 if (frv_cache_request_free_chain
== NULL
)
643 allocate_new_cache_requests ();
645 req
= frv_cache_request_free_chain
;
646 frv_cache_request_free_chain
= req
->next
;
651 /* Return the given cache request to the free chain. */
653 free_cache_request (FRV_CACHE_REQUEST
*req
)
655 if (req
->kind
== req_store
)
657 req
->next
= frv_store_request_free_chain
;
658 frv_store_request_free_chain
= req
;
662 req
->next
= frv_cache_request_free_chain
;
663 frv_cache_request_free_chain
= req
;
667 /* Search the free chain for an existing store request with a buffer that's
669 static FRV_CACHE_REQUEST
*
670 new_store_request (int length
)
672 FRV_CACHE_REQUEST
*prev
= NULL
;
673 FRV_CACHE_REQUEST
*req
;
674 for (req
= frv_store_request_free_chain
; req
!= NULL
; req
= req
->next
)
676 if (req
->u
.store
.length
== length
)
683 frv_store_request_free_chain
= req
->next
;
685 prev
->next
= req
->next
;
689 /* No existing request buffer was found, so make a new one. */
690 req
= new_cache_request ();
691 req
->kind
= req_store
;
692 req
->u
.store
.data
= xmalloc (length
);
693 req
->u
.store
.length
= length
;
697 /* Remove the given request from the given pipeline. */
699 pipeline_remove_request (FRV_CACHE_PIPELINE
*p
, FRV_CACHE_REQUEST
*request
)
701 FRV_CACHE_REQUEST
*next
= request
->next
;
702 FRV_CACHE_REQUEST
*prev
= request
->prev
;
713 /* Add the given request to the given pipeline. */
715 pipeline_add_request (FRV_CACHE_PIPELINE
*p
, FRV_CACHE_REQUEST
*request
)
717 FRV_CACHE_REQUEST
*prev
= NULL
;
718 FRV_CACHE_REQUEST
*item
;
720 /* Add the request in priority order. 0 is the highest priority. */
721 for (item
= p
->requests
; item
!= NULL
; item
= item
->next
)
723 if (item
->priority
> request
->priority
)
728 request
->next
= item
;
729 request
->prev
= prev
;
731 p
->requests
= request
;
733 prev
->next
= request
;
735 item
->prev
= request
;
738 /* Requeu the given request from the last of the given pipeline. */
740 pipeline_requeue_request (FRV_CACHE_PIPELINE
*p
)
742 FRV_CACHE_STAGE
*stage
= & p
->stages
[LAST_STAGE
];
743 FRV_CACHE_REQUEST
*req
= stage
->request
;
744 stage
->request
= NULL
;
745 pipeline_add_request (p
, req
);
748 /* Return the priority lower than the lowest one in this cache pipeline.
749 0 is the highest priority. */
751 next_priority (FRV_CACHE
*cache
, FRV_CACHE_PIPELINE
*pipeline
)
756 FRV_CACHE_REQUEST
*req
;
758 /* Check the priorities of any queued items. */
759 for (req
= pipeline
->requests
; req
!= NULL
; req
= req
->next
)
760 if (req
->priority
> lowest
)
761 lowest
= req
->priority
;
763 /* Check the priorities of items in the pipeline stages. */
764 for (i
= FIRST_STAGE
; i
< FRV_CACHE_STAGES
; ++i
)
766 FRV_CACHE_STAGE
*stage
= & pipeline
->stages
[i
];
767 if (stage
->request
!= NULL
&& stage
->request
->priority
> lowest
)
768 lowest
= stage
->request
->priority
;
771 /* Check the priorities of load requests waiting in WAR. These are one
772 higher than the request that spawned them. */
773 for (i
= 0; i
< NUM_WARS
; ++i
)
775 FRV_CACHE_WAR
*war
= & pipeline
->WAR
[i
];
776 if (war
->valid
&& war
->priority
> lowest
)
777 lowest
= war
->priority
+ 1;
780 /* Check the priorities of any BARS or NARS associated with this pipeline.
781 These are one higher than the request that spawned them. */
782 pipe
= pipeline
- cache
->pipeline
;
783 if (cache
->BARS
.valid
&& cache
->BARS
.pipe
== pipe
784 && cache
->BARS
.priority
> lowest
)
785 lowest
= cache
->BARS
.priority
+ 1;
786 if (cache
->NARS
.valid
&& cache
->NARS
.pipe
== pipe
787 && cache
->NARS
.priority
> lowest
)
788 lowest
= cache
->NARS
.priority
+ 1;
790 /* Return a priority 2 lower than the lowest found. This allows a WAR
791 request to be generated with a priority greater than this but less than
792 the next higher priority request. */
797 add_WAR_request (FRV_CACHE_PIPELINE
* pipeline
, FRV_CACHE_WAR
*war
)
799 /* Add the load request to the indexed pipeline. */
800 FRV_CACHE_REQUEST
*req
= new_cache_request ();
802 req
->reqno
= war
->reqno
;
803 req
->priority
= war
->priority
;
804 req
->address
= war
->address
;
805 req
->u
.WAR
.preload
= war
->preload
;
806 req
->u
.WAR
.lock
= war
->lock
;
807 pipeline_add_request (pipeline
, req
);
810 /* Remove the next request from the given pipeline and return it. */
811 static FRV_CACHE_REQUEST
*
812 pipeline_next_request (FRV_CACHE_PIPELINE
*p
)
814 FRV_CACHE_REQUEST
*first
= p
->requests
;
816 pipeline_remove_request (p
, first
);
820 /* Return the request which is at the given stage of the given pipeline. */
821 static FRV_CACHE_REQUEST
*
822 pipeline_stage_request (FRV_CACHE_PIPELINE
*p
, int stage
)
824 return p
->stages
[stage
].request
;
828 advance_pipelines (FRV_CACHE
*cache
)
832 FRV_CACHE_PIPELINE
*pipelines
= cache
->pipeline
;
834 /* Free the final stage requests. */
835 for (pipe
= 0; pipe
< FRV_CACHE_PIPELINES
; ++pipe
)
837 FRV_CACHE_REQUEST
*req
= pipelines
[pipe
].stages
[LAST_STAGE
].request
;
839 free_cache_request (req
);
842 /* Shuffle the requests along the pipeline. */
843 for (stage
= LAST_STAGE
; stage
> FIRST_STAGE
; --stage
)
845 for (pipe
= 0; pipe
< FRV_CACHE_PIPELINES
; ++pipe
)
846 pipelines
[pipe
].stages
[stage
] = pipelines
[pipe
].stages
[stage
- 1];
849 /* Add a new request to the pipeline. */
850 for (pipe
= 0; pipe
< FRV_CACHE_PIPELINES
; ++pipe
)
851 pipelines
[pipe
].stages
[FIRST_STAGE
].request
852 = pipeline_next_request (& pipelines
[pipe
]);
855 /* Handle a request for a load from the given address. */
857 frv_cache_request_load (FRV_CACHE
*cache
, unsigned reqno
, SI address
, int slot
)
859 FRV_CACHE_REQUEST
*req
;
861 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
862 int pipe
= convert_slot_to_index (slot
);
863 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
865 /* Add the load request to the indexed pipeline. */
866 req
= new_cache_request ();
867 req
->kind
= req_load
;
869 req
->priority
= next_priority (cache
, pipeline
);
870 req
->address
= address
;
872 pipeline_add_request (pipeline
, req
);
876 frv_cache_request_store (FRV_CACHE
*cache
, SI address
,
877 int slot
, char *data
, unsigned length
)
879 FRV_CACHE_REQUEST
*req
;
881 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
882 int pipe
= convert_slot_to_index (slot
);
883 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
885 /* Add the load request to the indexed pipeline. */
886 req
= new_store_request (length
);
887 req
->kind
= req_store
;
888 req
->reqno
= NO_REQNO
;
889 req
->priority
= next_priority (cache
, pipeline
);
890 req
->address
= address
;
891 req
->u
.store
.length
= length
;
892 memcpy (req
->u
.store
.data
, data
, length
);
894 pipeline_add_request (pipeline
, req
);
895 invalidate_return_buffer (cache
, address
);
898 /* Handle a request to invalidate the cache line containing the given address.
899 Flush the data if requested. */
901 frv_cache_request_invalidate (FRV_CACHE
*cache
, unsigned reqno
, SI address
,
902 int slot
, int all
, int flush
)
904 FRV_CACHE_REQUEST
*req
;
906 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
907 int pipe
= convert_slot_to_index (slot
);
908 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
910 /* Add the load request to the indexed pipeline. */
911 req
= new_cache_request ();
912 req
->kind
= req_invalidate
;
914 req
->priority
= next_priority (cache
, pipeline
);
915 req
->address
= address
;
916 req
->u
.invalidate
.all
= all
;
917 req
->u
.invalidate
.flush
= flush
;
919 pipeline_add_request (pipeline
, req
);
922 /* Handle a request to preload the cache line containing the given address. */
924 frv_cache_request_preload (FRV_CACHE
*cache
, SI address
,
925 int slot
, int length
, int lock
)
927 FRV_CACHE_REQUEST
*req
;
929 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
930 int pipe
= convert_slot_to_index (slot
);
931 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
933 /* Add the load request to the indexed pipeline. */
934 req
= new_cache_request ();
935 req
->kind
= req_preload
;
936 req
->reqno
= NO_REQNO
;
937 req
->priority
= next_priority (cache
, pipeline
);
938 req
->address
= address
;
939 req
->u
.preload
.length
= length
;
940 req
->u
.preload
.lock
= lock
;
942 pipeline_add_request (pipeline
, req
);
943 invalidate_return_buffer (cache
, address
);
946 /* Handle a request to unlock the cache line containing the given address. */
948 frv_cache_request_unlock (FRV_CACHE
*cache
, SI address
, int slot
)
950 FRV_CACHE_REQUEST
*req
;
952 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
953 int pipe
= convert_slot_to_index (slot
);
954 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
956 /* Add the load request to the indexed pipeline. */
957 req
= new_cache_request ();
958 req
->kind
= req_unlock
;
959 req
->reqno
= NO_REQNO
;
960 req
->priority
= next_priority (cache
, pipeline
);
961 req
->address
= address
;
963 pipeline_add_request (pipeline
, req
);
966 /* Check whether this address interferes with a pending request of
969 address_interference (FRV_CACHE
*cache
, SI address
, FRV_CACHE_REQUEST
*req
,
973 int line_mask
= ~(cache
->line_size
- 1);
975 int priority
= req
->priority
;
976 FRV_CACHE_REQUEST
*other_req
;
980 address
&= line_mask
;
981 all_address
= -1 & line_mask
;
983 /* Check for collisions in the queue for this pipeline. */
984 for (other_req
= cache
->pipeline
[pipe
].requests
;
986 other_req
= other_req
->next
)
988 other_address
= other_req
->address
& line_mask
;
989 if ((address
== other_address
|| address
== all_address
)
990 && priority
> other_req
->priority
)
994 /* Check for a collision in the the other pipeline. */
995 other_pipe
= pipe
^ 1;
996 other_req
= cache
->pipeline
[other_pipe
].stages
[LAST_STAGE
].request
;
997 if (other_req
!= NULL
)
999 other_address
= other_req
->address
& line_mask
;
1000 if (address
== other_address
|| address
== all_address
)
1004 /* Check for a collision with load requests waiting in WAR. */
1005 for (i
= LS
; i
< FRV_CACHE_PIPELINES
; ++i
)
1007 for (j
= 0; j
< NUM_WARS
; ++j
)
1009 FRV_CACHE_WAR
*war
= & cache
->pipeline
[i
].WAR
[j
];
1011 && (address
== (war
->address
& line_mask
)
1012 || address
== all_address
)
1013 && priority
> war
->priority
)
1016 /* If this is not a WAR request, then yield to any WAR requests in
1018 if (req
->kind
!= req_WAR
)
1020 for (j
= FIRST_STAGE
; j
< FRV_CACHE_STAGES
; ++j
)
1022 other_req
= cache
->pipeline
[i
].stages
[j
].request
;
1023 if (other_req
!= NULL
&& other_req
->kind
== req_WAR
)
1029 /* Check for a collision with load requests waiting in ARS. */
1030 if (cache
->BARS
.valid
1031 && (address
== (cache
->BARS
.address
& line_mask
)
1032 || address
== all_address
)
1033 && priority
> cache
->BARS
.priority
)
1035 if (cache
->NARS
.valid
1036 && (address
== (cache
->NARS
.address
& line_mask
)
1037 || address
== all_address
)
1038 && priority
> cache
->NARS
.priority
)
1044 /* Wait for a free WAR register in BARS or NARS. */
1046 wait_for_WAR (FRV_CACHE
* cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1049 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1051 if (! cache
->BARS
.valid
)
1053 cache
->BARS
.pipe
= pipe
;
1054 cache
->BARS
.reqno
= req
->reqno
;
1055 cache
->BARS
.address
= req
->address
;
1056 cache
->BARS
.priority
= req
->priority
- 1;
1060 cache
->BARS
.preload
= 0;
1061 cache
->BARS
.lock
= 0;
1064 cache
->BARS
.preload
= 1;
1065 cache
->BARS
.lock
= 0;
1068 cache
->BARS
.preload
= 1;
1069 cache
->BARS
.lock
= req
->u
.preload
.lock
;
1072 cache
->BARS
.valid
= 1;
1075 if (! cache
->NARS
.valid
)
1077 cache
->NARS
.pipe
= pipe
;
1078 cache
->NARS
.reqno
= req
->reqno
;
1079 cache
->NARS
.address
= req
->address
;
1080 cache
->NARS
.priority
= req
->priority
- 1;
1084 cache
->NARS
.preload
= 0;
1085 cache
->NARS
.lock
= 0;
1088 cache
->NARS
.preload
= 1;
1089 cache
->NARS
.lock
= 0;
1092 cache
->NARS
.preload
= 1;
1093 cache
->NARS
.lock
= req
->u
.preload
.lock
;
1096 cache
->NARS
.valid
= 1;
1099 /* All wait registers are busy, so resubmit this request. */
1100 pipeline_requeue_request (pipeline
);
1103 /* Find a free WAR register and wait for memory to fetch the data. */
1105 wait_in_WAR (FRV_CACHE
* cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1108 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1110 /* Find a valid WAR to hold this request. */
1111 for (war
= 0; war
< NUM_WARS
; ++war
)
1112 if (! pipeline
->WAR
[war
].valid
)
1114 if (war
>= NUM_WARS
)
1116 wait_for_WAR (cache
, pipe
, req
);
1120 pipeline
->WAR
[war
].address
= req
->address
;
1121 pipeline
->WAR
[war
].reqno
= req
->reqno
;
1122 pipeline
->WAR
[war
].priority
= req
->priority
- 1;
1123 pipeline
->WAR
[war
].latency
= cache
->memory_latency
+ 1;
1127 pipeline
->WAR
[war
].preload
= 0;
1128 pipeline
->WAR
[war
].lock
= 0;
1131 pipeline
->WAR
[war
].preload
= 1;
1132 pipeline
->WAR
[war
].lock
= 0;
1135 pipeline
->WAR
[war
].preload
= 1;
1136 pipeline
->WAR
[war
].lock
= req
->u
.preload
.lock
;
1139 pipeline
->WAR
[war
].valid
= 1;
1143 handle_req_load (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1146 SI address
= req
->address
;
1148 /* If this address interferes with an existing request, then requeue it. */
1149 if (address_interference (cache
, address
, req
, pipe
))
1151 pipeline_requeue_request (& cache
->pipeline
[pipe
]);
1155 if (frv_cache_enabled (cache
) && ! non_cache_access (cache
, address
))
1157 int found
= get_tag (cache
, address
, &tag
);
1159 /* If the data was found, return it to the caller. */
1162 set_most_recently_used (cache
, tag
);
1163 copy_line_to_return_buffer (cache
, pipe
, tag
, address
);
1164 set_return_buffer_reqno (cache
, pipe
, req
->reqno
);
1169 /* The data is not in the cache or this is a non-cache access. We need to
1170 wait for the memory unit to fetch it. Store this request in the WAR in
1172 wait_in_WAR (cache
, pipe
, req
);
1176 handle_req_preload (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1186 SI address
= req
->address
;
1189 if (! frv_cache_enabled (cache
) || non_cache_access (cache
, address
))
1192 /* preload at least 1 line. */
1193 length
= req
->u
.preload
.length
;
1197 /* Make sure that this request does not interfere with a pending request. */
1198 offset
= address
& (cache
->line_size
- 1);
1199 lines
= 1 + (offset
+ length
- 1) / cache
->line_size
;
1200 cur_address
= address
& ~(cache
->line_size
- 1);
1201 for (line
= 0; line
< lines
; ++line
)
1203 /* If this address interferes with an existing request,
1205 if (address_interference (cache
, cur_address
, req
, pipe
))
1207 pipeline_requeue_request (& cache
->pipeline
[pipe
]);
1210 cur_address
+= cache
->line_size
;
1213 /* Now process each cache line. */
1214 /* Careful with this loop -- length is unsigned. */
1215 lock
= req
->u
.preload
.lock
;
1216 cur_address
= address
& ~(cache
->line_size
- 1);
1217 for (line
= 0; line
< lines
; ++line
)
1219 /* If the data was found, then lock it if requested. */
1220 found
= get_tag (cache
, cur_address
, &tag
);
1228 /* The data is not in the cache. We need to wait for the memory
1229 unit to fetch it. Store this request in the WAR in the meantime.
1231 wait_in_WAR (cache
, pipe
, req
);
1233 cur_address
+= cache
->line_size
;
1238 handle_req_store (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1240 SIM_CPU
*current_cpu
;
1244 SI address
= req
->address
;
1245 char *data
= req
->u
.store
.data
;
1246 int length
= req
->u
.store
.length
;
1248 /* If this address interferes with an existing request, then requeue it. */
1249 if (address_interference (cache
, address
, req
, pipe
))
1251 pipeline_requeue_request (& cache
->pipeline
[pipe
]);
1255 /* Non-cache access. Write the data directly to memory. */
1256 if (! frv_cache_enabled (cache
) || non_cache_access (cache
, address
))
1258 write_data_to_memory (cache
, address
, data
, length
);
1262 /* See if the data is in the cache. */
1263 found
= get_tag (cache
, address
, &tag
);
1265 /* Write the data to the cache line if one was available and if it is
1266 either a hit or a miss in copy-back mode.
1267 The tag may be NULL if all ways were in use and locked on a miss.
1269 current_cpu
= cache
->cpu
;
1270 copy_back
= GET_HSR0_CBM (GET_HSR0 ());
1271 if (tag
!= NULL
&& (found
|| copy_back
))
1274 /* Load the line from memory first, if it was a miss. */
1277 /* We need to wait for the memory unit to fetch the data.
1278 Store this request in the WAR and requeue the store request. */
1279 wait_in_WAR (cache
, pipe
, req
);
1280 pipeline_requeue_request (& cache
->pipeline
[pipe
]);
1281 /* Decrement the counts of accesses and hits because when the requeued
1282 request is processed again, it will appear to be a new access and
1284 --cache
->statistics
.accesses
;
1285 --cache
->statistics
.hits
;
1288 line_offset
= address
& (cache
->line_size
- 1);
1289 memcpy (tag
->line
+ line_offset
, data
, length
);
1290 invalidate_return_buffer (cache
, address
);
1293 /* Update the LRU information for the tags in this set. */
1294 set_most_recently_used (cache
, tag
);
1297 /* Write the data to memory if there was no line available or we are in
1298 write-through (not copy-back mode). */
1299 if (tag
== NULL
|| ! copy_back
)
1301 write_data_to_memory (cache
, address
, data
, length
);
1308 handle_req_invalidate (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1310 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1311 SI address
= req
->address
;
1312 SI interfere_address
= req
->u
.invalidate
.all
? -1 : address
;
1314 /* If this address interferes with an existing request, then requeue it. */
1315 if (address_interference (cache
, interfere_address
, req
, pipe
))
1317 pipeline_requeue_request (pipeline
);
1321 /* Invalidate the cache line now. This function already checks for
1322 non-cache access. */
1323 if (req
->u
.invalidate
.all
)
1324 frv_cache_invalidate_all (cache
, req
->u
.invalidate
.flush
);
1326 frv_cache_invalidate (cache
, address
, req
->u
.invalidate
.flush
);
1327 if (req
->u
.invalidate
.flush
)
1329 pipeline
->status
.flush
.reqno
= req
->reqno
;
1330 pipeline
->status
.flush
.address
= address
;
1331 pipeline
->status
.flush
.valid
= 1;
1336 handle_req_unlock (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1338 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1339 SI address
= req
->address
;
1341 /* If this address interferes with an existing request, then requeue it. */
1342 if (address_interference (cache
, address
, req
, pipe
))
1344 pipeline_requeue_request (pipeline
);
1348 /* Unlock the cache line. This function checks for non-cache access. */
1349 frv_cache_unlock (cache
, address
);
1353 handle_req_WAR (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1357 SI address
= req
->address
;
1359 if (frv_cache_enabled (cache
) && ! non_cache_access (cache
, address
))
1361 /* Look for the data in the cache. The statistics of cache hit or
1362 miss have already been recorded, so save and restore the stats before
1363 and after obtaining the cache line. */
1364 FRV_CACHE_STATISTICS save_stats
= cache
->statistics
;
1365 tag
= find_or_retrieve_cache_line (cache
, address
);
1366 cache
->statistics
= save_stats
;
1369 if (! req
->u
.WAR
.preload
)
1371 copy_line_to_return_buffer (cache
, pipe
, tag
, address
);
1372 set_return_buffer_reqno (cache
, pipe
, req
->reqno
);
1376 invalidate_return_buffer (cache
, address
);
1377 if (req
->u
.WAR
.lock
)
1384 /* All cache lines in the set were locked, so just copy the data to the
1385 return buffer directly. */
1386 if (! req
->u
.WAR
.preload
)
1388 copy_memory_to_return_buffer (cache
, pipe
, address
);
1389 set_return_buffer_reqno (cache
, pipe
, req
->reqno
);
1393 /* Resolve any conflicts and/or execute the given requests. */
1395 arbitrate_requests (FRV_CACHE
*cache
)
1398 /* Simply execute the requests in the final pipeline stages. */
1399 for (pipe
= LS
; pipe
< FRV_CACHE_PIPELINES
; ++pipe
)
1401 FRV_CACHE_REQUEST
*req
1402 = pipeline_stage_request (& cache
->pipeline
[pipe
], LAST_STAGE
);
1403 /* Make sure that there is a request to handle. */
1407 /* Handle the request. */
1411 handle_req_load (cache
, pipe
, req
);
1414 handle_req_store (cache
, pipe
, req
);
1416 case req_invalidate
:
1417 handle_req_invalidate (cache
, pipe
, req
);
1420 handle_req_preload (cache
, pipe
, req
);
1423 handle_req_unlock (cache
, pipe
, req
);
1426 handle_req_WAR (cache
, pipe
, req
);
1434 /* Move a waiting ARS register to a free WAR register. */
1436 move_ARS_to_WAR (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_WAR
*war
)
1438 /* If BARS is valid for this pipe, then move it to the given WAR. Move
1439 NARS to BARS if it is valid. */
1440 if (cache
->BARS
.valid
&& cache
->BARS
.pipe
== pipe
)
1442 war
->address
= cache
->BARS
.address
;
1443 war
->reqno
= cache
->BARS
.reqno
;
1444 war
->priority
= cache
->BARS
.priority
;
1445 war
->preload
= cache
->BARS
.preload
;
1446 war
->lock
= cache
->BARS
.lock
;
1447 war
->latency
= cache
->memory_latency
+ 1;
1449 if (cache
->NARS
.valid
)
1451 cache
->BARS
= cache
->NARS
;
1452 cache
->NARS
.valid
= 0;
1455 cache
->BARS
.valid
= 0;
1458 /* If NARS is valid for this pipe, then move it to the given WAR. */
1459 if (cache
->NARS
.valid
&& cache
->NARS
.pipe
== pipe
)
1461 war
->address
= cache
->NARS
.address
;
1462 war
->reqno
= cache
->NARS
.reqno
;
1463 war
->priority
= cache
->NARS
.priority
;
1464 war
->preload
= cache
->NARS
.preload
;
1465 war
->lock
= cache
->NARS
.lock
;
1466 war
->latency
= cache
->memory_latency
+ 1;
1468 cache
->NARS
.valid
= 0;
1472 /* Decrease the latencies of the various states in the cache. */
1474 decrease_latencies (FRV_CACHE
*cache
)
1477 /* Check the WAR registers. */
1478 for (pipe
= LS
; pipe
< FRV_CACHE_PIPELINES
; ++pipe
)
1480 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1481 for (j
= 0; j
< NUM_WARS
; ++j
)
1483 FRV_CACHE_WAR
*war
= & pipeline
->WAR
[j
];
1487 /* If the latency has expired, then submit a WAR request to the
1489 if (war
->latency
<= 0)
1491 add_WAR_request (pipeline
, war
);
1493 move_ARS_to_WAR (cache
, pipe
, war
);
1500 /* Run the cache for the given number of cycles. */
1502 frv_cache_run (FRV_CACHE
*cache
, int cycles
)
1505 for (i
= 0; i
< cycles
; ++i
)
1507 advance_pipelines (cache
);
1508 arbitrate_requests (cache
);
1509 decrease_latencies (cache
);
1514 frv_cache_read_passive_SI (FRV_CACHE
*cache
, SI address
, SI
*value
)
1519 if (non_cache_access (cache
, address
))
1523 FRV_CACHE_STATISTICS save_stats
= cache
->statistics
;
1524 int found
= get_tag (cache
, address
, &tag
);
1525 cache
->statistics
= save_stats
;
1528 return 0; /* Indicate non-cache-access. */
1531 /* A cache line was available for the data.
1532 Extract the target data from the line. */
1533 offset
= address
& (cache
->line_size
- 1);
1535 *value
= T2H_4 (*(SI
*)(tag
->line
+ offset
));
1539 /* Check the return buffers of the data cache to see if the requested data is
1542 frv_cache_data_in_buffer (FRV_CACHE
* cache
, int pipe
, SI address
,
1545 return cache
->pipeline
[pipe
].status
.return_buffer
.valid
1546 && cache
->pipeline
[pipe
].status
.return_buffer
.reqno
== reqno
1547 && cache
->pipeline
[pipe
].status
.return_buffer
.address
<= address
1548 && cache
->pipeline
[pipe
].status
.return_buffer
.address
+ cache
->line_size
1552 /* Check to see if the requested data has been flushed. */
1554 frv_cache_data_flushed (FRV_CACHE
* cache
, int pipe
, SI address
, unsigned reqno
)
1556 return cache
->pipeline
[pipe
].status
.flush
.valid
1557 && cache
->pipeline
[pipe
].status
.flush
.reqno
== reqno
1558 && cache
->pipeline
[pipe
].status
.flush
.address
<= address
1559 && cache
->pipeline
[pipe
].status
.flush
.address
+ cache
->line_size