sim/frv/
[deliverable/binutils-gdb.git] / sim / frv / cache.c
CommitLineData
b34f6357 1/* frv cache model.
e930b1f5 2 Copyright (C) 1999, 2000, 2001, 2003 Free Software Foundation, Inc.
b34f6357
DB
3 Contributed by Red Hat.
4
5This file is part of the GNU simulators.
6
7This program is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
9the Free Software Foundation; either version 2, or (at your option)
10any later version.
11
12This program is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License along
18with this program; if not, write to the Free Software Foundation, Inc.,
1959 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
20
21#define WANT_CPU frvbf
22#define WANT_CPU_FRVBF
23
24#include "libiberty.h"
25#include "sim-main.h"
26#include "cache.h"
27#include "bfd.h"
28
29void
30frv_cache_init (SIM_CPU *cpu, FRV_CACHE *cache)
31{
32 int elements;
33 int i, j;
34 SIM_DESC sd;
35
36 /* Set defaults for fields which are not initialized. */
37 sd = CPU_STATE (cpu);
38 switch (STATE_ARCHITECTURE (sd)->mach)
39 {
40 case bfd_mach_fr400:
e930b1f5
DB
41 if (cache->configured_sets == 0)
42 cache->configured_sets = 128;
43 if (cache->configured_ways == 0)
44 cache->configured_ways = 2;
b34f6357
DB
45 if (cache->line_size == 0)
46 cache->line_size = 32;
47 if (cache->memory_latency == 0)
48 cache->memory_latency = 20;
49 break;
e930b1f5
DB
50 case bfd_mach_fr550:
51 if (cache->configured_sets == 0)
52 cache->configured_sets = 128;
53 if (cache->configured_ways == 0)
54 cache->configured_ways = 4;
55 if (cache->line_size == 0)
56 cache->line_size = 64;
57 if (cache->memory_latency == 0)
58 cache->memory_latency = 20;
59 break;
b34f6357 60 default:
e930b1f5
DB
61 if (cache->configured_sets == 0)
62 cache->configured_sets = 64;
63 if (cache->configured_ways == 0)
64 cache->configured_ways = 4;
b34f6357
DB
65 if (cache->line_size == 0)
66 cache->line_size = 64;
67 if (cache->memory_latency == 0)
68 cache->memory_latency = 20;
69 break;
70 }
71
e930b1f5
DB
72 frv_cache_reconfigure (cpu, cache);
73
b34f6357
DB
74 /* First allocate the cache storage based on the given dimensions. */
75 elements = cache->sets * cache->ways;
76 cache->tag_storage = (FRV_CACHE_TAG *)
77 zalloc (elements * sizeof (*cache->tag_storage));
78 cache->data_storage = (char *) xmalloc (elements * cache->line_size);
79
80 /* Initialize the pipelines and status buffers. */
81 for (i = LS; i < FRV_CACHE_PIPELINES; ++i)
82 {
83 cache->pipeline[i].requests = NULL;
84 cache->pipeline[i].status.flush.valid = 0;
85 cache->pipeline[i].status.return_buffer.valid = 0;
86 cache->pipeline[i].status.return_buffer.data
87 = (char *) xmalloc (cache->line_size);
88 for (j = FIRST_STAGE; j < FRV_CACHE_STAGES; ++j)
89 cache->pipeline[i].stages[j].request = NULL;
90 }
91 cache->BARS.valid = 0;
92 cache->NARS.valid = 0;
93
94 /* Now set the cache state. */
95 cache->cpu = cpu;
96 cache->statistics.accesses = 0;
97 cache->statistics.hits = 0;
98}
99
100void
101frv_cache_term (FRV_CACHE *cache)
102{
103 /* Free the cache storage. */
104 free (cache->tag_storage);
105 free (cache->data_storage);
106 free (cache->pipeline[LS].status.return_buffer.data);
107 free (cache->pipeline[LD].status.return_buffer.data);
108}
109
e930b1f5
DB
110/* Reset the cache configuration based on registers in the cpu. */
111void
112frv_cache_reconfigure (SIM_CPU *current_cpu, FRV_CACHE *cache)
113{
114 int ihsr8;
115 int icdm;
116 SIM_DESC sd;
117
118 /* Set defaults for fields which are not initialized. */
119 sd = CPU_STATE (current_cpu);
120 switch (STATE_ARCHITECTURE (sd)->mach)
121 {
122 case bfd_mach_fr550:
123 if (cache == CPU_INSN_CACHE (current_cpu))
124 {
125 ihsr8 = GET_IHSR8 ();
126 icdm = GET_IHSR8_ICDM (ihsr8);
127 /* If IHSR8.ICDM is set, then the cache becomes a one way cache. */
128 if (icdm)
129 {
130 cache->sets = cache->sets * cache->ways;
131 cache->ways = 1;
132 break;
133 }
134 }
135 /* fall through */
136 default:
137 /* Set the cache to its original settings. */
138 cache->sets = cache->configured_sets;
139 cache->ways = cache->configured_ways;
140 break;
141 }
142}
143
b34f6357
DB
144/* Determine whether the given cache is enabled. */
145int
146frv_cache_enabled (FRV_CACHE *cache)
147{
148 SIM_CPU *current_cpu = cache->cpu;
149 int hsr0 = GET_HSR0 ();
150 if (GET_HSR0_ICE (hsr0) && cache == CPU_INSN_CACHE (current_cpu))
151 return 1;
152 if (GET_HSR0_DCE (hsr0) && cache == CPU_DATA_CACHE (current_cpu))
153 return 1;
154 return 0;
155}
156
e930b1f5
DB
157/* Determine whether the given address is RAM access, assuming that HSR0.RME
158 is set. */
159static int
160ram_access (FRV_CACHE *cache, USI address)
161{
162 int ihsr8;
163 int cwe;
164 USI start, end, way_size;
165 SIM_CPU *current_cpu = cache->cpu;
166 SIM_DESC sd = CPU_STATE (current_cpu);
167
168 switch (STATE_ARCHITECTURE (sd)->mach)
169 {
170 case bfd_mach_fr550:
171 /* IHSR8.DCWE or IHSR8.ICWE deternines which ways get RAM access. */
172 ihsr8 = GET_IHSR8 ();
173 if (cache == CPU_INSN_CACHE (current_cpu))
174 {
175 start = 0xfe000000;
176 end = 0xfe008000;
177 cwe = GET_IHSR8_ICWE (ihsr8);
178 }
179 else
180 {
181 start = 0xfe400000;
182 end = 0xfe408000;
183 cwe = GET_IHSR8_DCWE (ihsr8);
184 }
185 way_size = (end - start) / 4;
186 end -= way_size * cwe;
187 return address >= start && address < end;
188 default:
189 break;
190 }
191
192 return 1; /* RAM access */
193}
194
b34f6357
DB
195/* Determine whether the given address should be accessed without using
196 the cache. */
197static int
198non_cache_access (FRV_CACHE *cache, USI address)
199{
200 int hsr0;
201 SIM_DESC sd;
202 SIM_CPU *current_cpu = cache->cpu;
203
204 sd = CPU_STATE (current_cpu);
205 switch (STATE_ARCHITECTURE (sd)->mach)
206 {
207 case bfd_mach_fr400:
208 if (address >= 0xff000000
209 || address >= 0xfe000000 && address <= 0xfeffffff)
210 return 1; /* non-cache access */
e930b1f5
DB
211 case bfd_mach_fr550:
212 if (address >= 0xff000000
213 || address >= 0xfeff0000 && address <= 0xfeffffff)
214 return 1; /* non-cache access */
215 if (cache == CPU_INSN_CACHE (current_cpu))
216 {
217 if (address >= 0xfe000000 && address <= 0xfe007fff)
218 return 1; /* non-cache access */
219 }
220 else if (address >= 0xfe400000 && address <= 0xfe407fff)
221 return 1; /* non-cache access */
b34f6357
DB
222 default:
223 if (address >= 0xff000000
224 || address >= 0xfeff0000 && address <= 0xfeffffff)
225 return 1; /* non-cache access */
226 if (cache == CPU_INSN_CACHE (current_cpu))
227 {
228 if (address >= 0xfe000000 && address <= 0xfe003fff)
229 return 1; /* non-cache access */
230 }
231 else if (address >= 0xfe400000 && address <= 0xfe403fff)
232 return 1; /* non-cache access */
233 }
234
235 hsr0 = GET_HSR0 ();
236 if (GET_HSR0_RME (hsr0))
e930b1f5 237 return ram_access (cache, address);
b34f6357
DB
238
239 return 0; /* cache-access */
240}
241
242/* Find the cache line corresponding to the given address.
243 If it is found then 'return_tag' is set to point to the tag for that line
244 and 1 is returned.
245 If it is not found, 'return_tag' is set to point to the tag for the least
246 recently used line and 0 is returned.
247*/
248static int
249get_tag (FRV_CACHE *cache, SI address, FRV_CACHE_TAG **return_tag)
250{
251 int set;
252 int way;
253 int bits;
254 USI tag;
255 FRV_CACHE_TAG *found;
256 FRV_CACHE_TAG *available;
257
258 ++cache->statistics.accesses;
259
260 /* First calculate which set this address will fall into. Do this by
261 shifting out the bits representing the offset within the line and
262 then keeping enough bits to index the set. */
263 set = address & ~(cache->line_size - 1);
264 for (bits = cache->line_size - 1; bits != 0; bits >>= 1)
265 set >>= 1;
266 set &= (cache->sets - 1);
267
268 /* Now search the set for a valid tag which matches this address. At the
269 same time make note of the least recently used tag, which we will return
270 if no match is found. */
271 available = NULL;
272 tag = CACHE_ADDRESS_TAG (cache, address);
273 for (way = 0; way < cache->ways; ++way)
274 {
275 found = CACHE_TAG (cache, set, way);
276 /* This tag is available as the least recently used if it is the
277 least recently used seen so far and it is not locked. */
278 if (! found->locked && (available == NULL || available->lru > found->lru))
279 available = found;
280 if (found->valid && found->tag == tag)
281 {
282 *return_tag = found;
283 ++cache->statistics.hits;
284 return 1; /* found it */
285 }
286 }
287
288 *return_tag = available;
289 return 0; /* not found */
290}
291
292/* Write the given data out to memory. */
293static void
294write_data_to_memory (FRV_CACHE *cache, SI address, char *data, int length)
295{
296 SIM_CPU *cpu = cache->cpu;
297 IADDR pc = CPU_PC_GET (cpu);
298 int write_index = 0;
299
300 switch (length)
301 {
302 case 1:
303 default:
304 PROFILE_COUNT_WRITE (cpu, address, MODE_QI);
305 break;
306 case 2:
307 PROFILE_COUNT_WRITE (cpu, address, MODE_HI);
308 break;
309 case 4:
310 PROFILE_COUNT_WRITE (cpu, address, MODE_SI);
311 break;
312 case 8:
313 PROFILE_COUNT_WRITE (cpu, address, MODE_DI);
314 break;
315 }
316
317 for (write_index = 0; write_index < length; ++write_index)
318 {
319 /* TODO: Better way to copy memory than a byte at a time? */
320 sim_core_write_unaligned_1 (cpu, pc, write_map, address + write_index,
321 data[write_index]);
322 }
323}
324
325/* Write a cache line out to memory. */
326static void
327write_line_to_memory (FRV_CACHE *cache, FRV_CACHE_TAG *tag)
328{
329 SI address = tag->tag;
330 int set = CACHE_TAG_SET_NUMBER (cache, tag);
331 int bits;
332 for (bits = cache->line_size - 1; bits != 0; bits >>= 1)
333 set <<= 1;
334 address |= set;
335 write_data_to_memory (cache, address, tag->line, cache->line_size);
336}
337
338static void
339read_data_from_memory (SIM_CPU *current_cpu, SI address, char *buffer,
340 int length)
341{
342 PCADDR pc = CPU_PC_GET (current_cpu);
343 int i;
344 PROFILE_COUNT_READ (current_cpu, address, MODE_QI);
345 for (i = 0; i < length; ++i)
346 {
347 /* TODO: Better way to copy memory than a byte at a time? */
348 buffer[i] = sim_core_read_unaligned_1 (current_cpu, pc, read_map,
349 address + i);
350 }
351}
352
353/* Fill the given cache line from memory. */
354static void
355fill_line_from_memory (FRV_CACHE *cache, FRV_CACHE_TAG *tag, SI address)
356{
357 PCADDR pc;
358 int line_alignment;
359 SI read_address;
360 SIM_CPU *current_cpu = cache->cpu;
361
362 /* If this line is already valid and the cache is in copy-back mode, then
363 write this line to memory before refilling it.
364 Check the dirty bit first, since it is less likely to be set. */
365 if (tag->dirty && tag->valid)
366 {
367 int hsr0 = GET_HSR0 ();
368 if (GET_HSR0_CBM (hsr0))
369 write_line_to_memory (cache, tag);
370 }
371 else if (tag->line == NULL)
372 {
373 int line_index = tag - cache->tag_storage;
374 tag->line = cache->data_storage + (line_index * cache->line_size);
375 }
376
377 pc = CPU_PC_GET (current_cpu);
378 line_alignment = cache->line_size - 1;
379 read_address = address & ~line_alignment;
380 read_data_from_memory (current_cpu, read_address, tag->line,
381 cache->line_size);
382 tag->tag = CACHE_ADDRESS_TAG (cache, address);
383 tag->valid = 1;
384}
385
386/* Update the LRU information for the tags in the same set as the given tag. */
387static void
388set_most_recently_used (FRV_CACHE *cache, FRV_CACHE_TAG *tag)
389{
390 /* All tags in the same set are contiguous, so find the beginning of the
391 set by aligning to the size of a set. */
392 FRV_CACHE_TAG *item = cache->tag_storage + CACHE_TAG_SET_START (cache, tag);
393 FRV_CACHE_TAG *limit = item + cache->ways;
394
395 while (item < limit)
396 {
397 if (item->lru > tag->lru)
398 --item->lru;
399 ++item;
400 }
401 tag->lru = cache->ways; /* Mark as most recently used. */
402}
403
404/* Update the LRU information for the tags in the same set as the given tag. */
405static void
406set_least_recently_used (FRV_CACHE *cache, FRV_CACHE_TAG *tag)
407{
408 /* All tags in the same set are contiguous, so find the beginning of the
409 set by aligning to the size of a set. */
410 FRV_CACHE_TAG *item = cache->tag_storage + CACHE_TAG_SET_START (cache, tag);
411 FRV_CACHE_TAG *limit = item + cache->ways;
412
413 while (item < limit)
414 {
415 if (item->lru != 0 && item->lru < tag->lru)
416 ++item->lru;
417 ++item;
418 }
419 tag->lru = 0; /* Mark as least recently used. */
420}
421
422/* Find the line containing the given address and load it if it is not
423 already loaded.
424 Returns the tag of the requested line. */
425static FRV_CACHE_TAG *
426find_or_retrieve_cache_line (FRV_CACHE *cache, SI address)
427{
428 /* See if this data is already in the cache. */
429 FRV_CACHE_TAG *tag;
430 int found = get_tag (cache, address, &tag);
431
432 /* Fill the line from memory, if it is not valid. */
433 if (! found)
434 {
435 /* The tag could be NULL is all ways in the set were used and locked. */
436 if (tag == NULL)
437 return tag;
438
439 fill_line_from_memory (cache, tag, address);
440 tag->dirty = 0;
441 }
442
443 /* Update the LRU information for the tags in this set. */
444 set_most_recently_used (cache, tag);
445
446 return tag;
447}
448
449static void
450copy_line_to_return_buffer (FRV_CACHE *cache, int pipe, FRV_CACHE_TAG *tag,
451 SI address)
452{
453 /* A cache line was available for the data.
454 Copy the data from the cache line to the output buffer. */
455 memcpy (cache->pipeline[pipe].status.return_buffer.data,
456 tag->line, cache->line_size);
457 cache->pipeline[pipe].status.return_buffer.address
458 = address & ~(cache->line_size - 1);
459 cache->pipeline[pipe].status.return_buffer.valid = 1;
460}
461
462static void
463copy_memory_to_return_buffer (FRV_CACHE *cache, int pipe, SI address)
464{
465 address &= ~(cache->line_size - 1);
466 read_data_from_memory (cache->cpu, address,
467 cache->pipeline[pipe].status.return_buffer.data,
468 cache->line_size);
469 cache->pipeline[pipe].status.return_buffer.address = address;
470 cache->pipeline[pipe].status.return_buffer.valid = 1;
471}
472
473static void
474set_return_buffer_reqno (FRV_CACHE *cache, int pipe, unsigned reqno)
475{
476 cache->pipeline[pipe].status.return_buffer.reqno = reqno;
477}
478
479/* Read data from the given cache.
480 Returns the number of cycles required to obtain the data. */
481int
482frv_cache_read (FRV_CACHE *cache, int pipe, SI address)
483{
484 FRV_CACHE_TAG *tag;
485
486 if (non_cache_access (cache, address))
487 {
488 copy_memory_to_return_buffer (cache, pipe, address);
489 return 1;
490 }
491
492 tag = find_or_retrieve_cache_line (cache, address);
493
494 if (tag == NULL)
495 return 0; /* Indicate non-cache-access. */
496
497 /* A cache line was available for the data.
498 Copy the data from the cache line to the output buffer. */
499 copy_line_to_return_buffer (cache, pipe, tag, address);
500
501 return 1; /* TODO - number of cycles unknown */
502}
503
504/* Writes data through the given cache.
505 The data is assumed to be in target endian order.
506 Returns the number of cycles required to write the data. */
507int
508frv_cache_write (FRV_CACHE *cache, SI address, char *data, unsigned length)
509{
510 int copy_back;
511
512 /* See if this data is already in the cache. */
513 SIM_CPU *current_cpu = cache->cpu;
514 USI hsr0 = GET_HSR0 ();
515 FRV_CACHE_TAG *tag;
516 int found;
517
518 if (non_cache_access (cache, address))
519 {
520 write_data_to_memory (cache, address, data, length);
521 return 1;
522 }
523
524 found = get_tag (cache, address, &tag);
525
526 /* Write the data to the cache line if one was available and if it is
527 either a hit or a miss in copy-back mode.
528 The tag may be NULL if all ways were in use and locked on a miss.
529 */
530 copy_back = GET_HSR0_CBM (GET_HSR0 ());
531 if (tag != NULL && (found || copy_back))
532 {
533 int line_offset;
534 /* Load the line from memory first, if it was a miss. */
535 if (! found)
536 fill_line_from_memory (cache, tag, address);
537 line_offset = address & (cache->line_size - 1);
538 memcpy (tag->line + line_offset, data, length);
539 tag->dirty = 1;
540
541 /* Update the LRU information for the tags in this set. */
542 set_most_recently_used (cache, tag);
543 }
544
545 /* Write the data to memory if there was no line available or we are in
546 write-through (not copy-back mode). */
547 if (tag == NULL || ! copy_back)
548 {
549 write_data_to_memory (cache, address, data, length);
550 if (tag != NULL)
551 tag->dirty = 0;
552 }
553
554 return 1; /* TODO - number of cycles unknown */
555}
556
557/* Preload the cache line containing the given address. Lock the
558 data if requested.
559 Returns the number of cycles required to write the data. */
560int
561frv_cache_preload (FRV_CACHE *cache, SI address, USI length, int lock)
562{
563 int offset;
564 int lines;
565
566 if (non_cache_access (cache, address))
567 return 1;
568
569 /* preload at least 1 line. */
570 if (length == 0)
571 length = 1;
572
573 offset = address & (cache->line_size - 1);
574 lines = 1 + (offset + length - 1) / cache->line_size;
575
576 /* Careful with this loop -- length is unsigned. */
577 for (/**/; lines > 0; --lines)
578 {
579 FRV_CACHE_TAG *tag = find_or_retrieve_cache_line (cache, address);
580 if (lock && tag != NULL)
581 tag->locked = 1;
582 address += cache->line_size;
583 }
584
585 return 1; /* TODO - number of cycles unknown */
586}
587
588/* Unlock the cache line containing the given address.
589 Returns the number of cycles required to unlock the line. */
590int
591frv_cache_unlock (FRV_CACHE *cache, SI address)
592{
593 FRV_CACHE_TAG *tag;
594 int found;
595
596 if (non_cache_access (cache, address))
597 return 1;
598
599 found = get_tag (cache, address, &tag);
600
601 if (found)
602 tag->locked = 0;
603
604 return 1; /* TODO - number of cycles unknown */
605}
606
607static void
608invalidate_return_buffer (FRV_CACHE *cache, SI address)
609{
610 /* If this address is in one of the return buffers, then invalidate that
611 return buffer. */
612 address &= ~(cache->line_size - 1);
613 if (address == cache->pipeline[LS].status.return_buffer.address)
614 cache->pipeline[LS].status.return_buffer.valid = 0;
615 if (address == cache->pipeline[LD].status.return_buffer.address)
616 cache->pipeline[LD].status.return_buffer.valid = 0;
617}
618
619/* Invalidate the cache line containing the given address. Flush the
620 data if requested.
621 Returns the number of cycles required to write the data. */
622int
623frv_cache_invalidate (FRV_CACHE *cache, SI address, int flush)
624{
625 /* See if this data is already in the cache. */
626 FRV_CACHE_TAG *tag;
627 int found;
628
629 /* Check for non-cache access. This operation is still perfromed even if
630 the cache is not currently enabled. */
631 if (non_cache_access (cache, address))
632 return 1;
633
634 /* If the line is found, invalidate it. If a flush is requested, then flush
635 it if it is dirty. */
636 found = get_tag (cache, address, &tag);
637 if (found)
638 {
639 SIM_CPU *cpu;
640 /* If a flush is requested, then flush it if it is dirty. */
641 if (tag->dirty && flush)
642 write_line_to_memory (cache, tag);
643 set_least_recently_used (cache, tag);
644 tag->valid = 0;
645 tag->locked = 0;
646
647 /* If this is the insn cache, then flush the cpu's scache as well. */
648 cpu = cache->cpu;
649 if (cache == CPU_INSN_CACHE (cpu))
650 scache_flush_cpu (cpu);
651 }
652
653 invalidate_return_buffer (cache, address);
654
655 return 1; /* TODO - number of cycles unknown */
656}
657
658/* Invalidate the entire cache. Flush the data if requested. */
659int
660frv_cache_invalidate_all (FRV_CACHE *cache, int flush)
661{
662 /* See if this data is already in the cache. */
663 int elements = cache->sets * cache->ways;
664 FRV_CACHE_TAG *tag = cache->tag_storage;
665 SIM_CPU *cpu;
666 int i;
667
668 for(i = 0; i < elements; ++i, ++tag)
669 {
670 /* If a flush is requested, then flush it if it is dirty. */
671 if (tag->valid && tag->dirty && flush)
672 write_line_to_memory (cache, tag);
673 tag->valid = 0;
674 tag->locked = 0;
675 }
676
677
678 /* If this is the insn cache, then flush the cpu's scache as well. */
679 cpu = cache->cpu;
680 if (cache == CPU_INSN_CACHE (cpu))
681 scache_flush_cpu (cpu);
682
683 /* Invalidate both return buffers. */
684 cache->pipeline[LS].status.return_buffer.valid = 0;
685 cache->pipeline[LD].status.return_buffer.valid = 0;
686
687 return 1; /* TODO - number of cycles unknown */
688}
689
690/* ---------------------------------------------------------------------------
691 Functions for operating the cache in cycle accurate mode.
692 ------------------------------------------------------------------------- */
693/* Convert a VLIW slot to a cache pipeline index. */
694static int
695convert_slot_to_index (int slot)
696{
697 switch (slot)
698 {
699 case UNIT_I0:
700 case UNIT_C:
701 return LS;
702 case UNIT_I1:
703 return LD;
704 default:
705 abort ();
706 }
707 return 0;
708}
709
710/* Allocate free chains of cache requests. */
711#define FREE_CHAIN_SIZE 16
712static FRV_CACHE_REQUEST *frv_cache_request_free_chain = NULL;
713static FRV_CACHE_REQUEST *frv_store_request_free_chain = NULL;
714
715static void
716allocate_new_cache_requests (void)
717{
718 int i;
719 frv_cache_request_free_chain = xmalloc (FREE_CHAIN_SIZE
720 * sizeof (FRV_CACHE_REQUEST));
721 for (i = 0; i < FREE_CHAIN_SIZE - 1; ++i)
722 {
723 frv_cache_request_free_chain[i].next
724 = & frv_cache_request_free_chain[i + 1];
725 }
726
727 frv_cache_request_free_chain[FREE_CHAIN_SIZE - 1].next = NULL;
728}
729
730/* Return the next free request in the queue for the given cache pipeline. */
731static FRV_CACHE_REQUEST *
732new_cache_request (void)
733{
734 FRV_CACHE_REQUEST *req;
735
736 /* Allocate new elements for the free chain if necessary. */
737 if (frv_cache_request_free_chain == NULL)
738 allocate_new_cache_requests ();
739
740 req = frv_cache_request_free_chain;
741 frv_cache_request_free_chain = req->next;
742
743 return req;
744}
745
746/* Return the given cache request to the free chain. */
747static void
748free_cache_request (FRV_CACHE_REQUEST *req)
749{
750 if (req->kind == req_store)
751 {
752 req->next = frv_store_request_free_chain;
753 frv_store_request_free_chain = req;
754 }
755 else
756 {
757 req->next = frv_cache_request_free_chain;
758 frv_cache_request_free_chain = req;
759 }
760}
761
762/* Search the free chain for an existing store request with a buffer that's
763 large enough. */
764static FRV_CACHE_REQUEST *
765new_store_request (int length)
766{
767 FRV_CACHE_REQUEST *prev = NULL;
768 FRV_CACHE_REQUEST *req;
769 for (req = frv_store_request_free_chain; req != NULL; req = req->next)
770 {
771 if (req->u.store.length == length)
772 break;
773 prev = req;
774 }
775 if (req != NULL)
776 {
777 if (prev == NULL)
778 frv_store_request_free_chain = req->next;
779 else
780 prev->next = req->next;
781 return req;
782 }
783
784 /* No existing request buffer was found, so make a new one. */
785 req = new_cache_request ();
786 req->kind = req_store;
787 req->u.store.data = xmalloc (length);
788 req->u.store.length = length;
789 return req;
790}
791
792/* Remove the given request from the given pipeline. */
793static void
794pipeline_remove_request (FRV_CACHE_PIPELINE *p, FRV_CACHE_REQUEST *request)
795{
796 FRV_CACHE_REQUEST *next = request->next;
797 FRV_CACHE_REQUEST *prev = request->prev;
798
799 if (prev == NULL)
800 p->requests = next;
801 else
802 prev->next = next;
803
804 if (next != NULL)
805 next->prev = prev;
806}
807
808/* Add the given request to the given pipeline. */
809static void
810pipeline_add_request (FRV_CACHE_PIPELINE *p, FRV_CACHE_REQUEST *request)
811{
812 FRV_CACHE_REQUEST *prev = NULL;
813 FRV_CACHE_REQUEST *item;
814
815 /* Add the request in priority order. 0 is the highest priority. */
816 for (item = p->requests; item != NULL; item = item->next)
817 {
818 if (item->priority > request->priority)
819 break;
820 prev = item;
821 }
822
823 request->next = item;
824 request->prev = prev;
825 if (prev == NULL)
826 p->requests = request;
827 else
828 prev->next = request;
829 if (item != NULL)
830 item->prev = request;
831}
832
833/* Requeu the given request from the last of the given pipeline. */
834static void
835pipeline_requeue_request (FRV_CACHE_PIPELINE *p)
836{
837 FRV_CACHE_STAGE *stage = & p->stages[LAST_STAGE];
838 FRV_CACHE_REQUEST *req = stage->request;
839 stage->request = NULL;
840 pipeline_add_request (p, req);
841}
842
843/* Return the priority lower than the lowest one in this cache pipeline.
844 0 is the highest priority. */
845static int
846next_priority (FRV_CACHE *cache, FRV_CACHE_PIPELINE *pipeline)
847{
848 int i, j;
849 int pipe;
850 int lowest = 0;
851 FRV_CACHE_REQUEST *req;
852
853 /* Check the priorities of any queued items. */
854 for (req = pipeline->requests; req != NULL; req = req->next)
855 if (req->priority > lowest)
856 lowest = req->priority;
857
858 /* Check the priorities of items in the pipeline stages. */
859 for (i = FIRST_STAGE; i < FRV_CACHE_STAGES; ++i)
860 {
861 FRV_CACHE_STAGE *stage = & pipeline->stages[i];
862 if (stage->request != NULL && stage->request->priority > lowest)
863 lowest = stage->request->priority;
864 }
865
866 /* Check the priorities of load requests waiting in WAR. These are one
867 higher than the request that spawned them. */
868 for (i = 0; i < NUM_WARS; ++i)
869 {
870 FRV_CACHE_WAR *war = & pipeline->WAR[i];
871 if (war->valid && war->priority > lowest)
872 lowest = war->priority + 1;
873 }
874
875 /* Check the priorities of any BARS or NARS associated with this pipeline.
876 These are one higher than the request that spawned them. */
877 pipe = pipeline - cache->pipeline;
878 if (cache->BARS.valid && cache->BARS.pipe == pipe
879 && cache->BARS.priority > lowest)
880 lowest = cache->BARS.priority + 1;
881 if (cache->NARS.valid && cache->NARS.pipe == pipe
882 && cache->NARS.priority > lowest)
883 lowest = cache->NARS.priority + 1;
884
885 /* Return a priority 2 lower than the lowest found. This allows a WAR
886 request to be generated with a priority greater than this but less than
887 the next higher priority request. */
888 return lowest + 2;
889}
890
891static void
892add_WAR_request (FRV_CACHE_PIPELINE* pipeline, FRV_CACHE_WAR *war)
893{
894 /* Add the load request to the indexed pipeline. */
895 FRV_CACHE_REQUEST *req = new_cache_request ();
896 req->kind = req_WAR;
897 req->reqno = war->reqno;
898 req->priority = war->priority;
899 req->address = war->address;
900 req->u.WAR.preload = war->preload;
901 req->u.WAR.lock = war->lock;
902 pipeline_add_request (pipeline, req);
903}
904
905/* Remove the next request from the given pipeline and return it. */
906static FRV_CACHE_REQUEST *
907pipeline_next_request (FRV_CACHE_PIPELINE *p)
908{
909 FRV_CACHE_REQUEST *first = p->requests;
910 if (first != NULL)
911 pipeline_remove_request (p, first);
912 return first;
913}
914
915/* Return the request which is at the given stage of the given pipeline. */
916static FRV_CACHE_REQUEST *
917pipeline_stage_request (FRV_CACHE_PIPELINE *p, int stage)
918{
919 return p->stages[stage].request;
920}
921
922static void
923advance_pipelines (FRV_CACHE *cache)
924{
925 int stage;
926 int pipe;
927 FRV_CACHE_PIPELINE *pipelines = cache->pipeline;
928
929 /* Free the final stage requests. */
930 for (pipe = 0; pipe < FRV_CACHE_PIPELINES; ++pipe)
931 {
932 FRV_CACHE_REQUEST *req = pipelines[pipe].stages[LAST_STAGE].request;
933 if (req != NULL)
934 free_cache_request (req);
935 }
936
937 /* Shuffle the requests along the pipeline. */
938 for (stage = LAST_STAGE; stage > FIRST_STAGE; --stage)
939 {
940 for (pipe = 0; pipe < FRV_CACHE_PIPELINES; ++pipe)
941 pipelines[pipe].stages[stage] = pipelines[pipe].stages[stage - 1];
942 }
943
944 /* Add a new request to the pipeline. */
945 for (pipe = 0; pipe < FRV_CACHE_PIPELINES; ++pipe)
946 pipelines[pipe].stages[FIRST_STAGE].request
947 = pipeline_next_request (& pipelines[pipe]);
948}
949
950/* Handle a request for a load from the given address. */
951void
952frv_cache_request_load (FRV_CACHE *cache, unsigned reqno, SI address, int slot)
953{
954 FRV_CACHE_REQUEST *req;
955
956 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
957 int pipe = convert_slot_to_index (slot);
958 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
959
960 /* Add the load request to the indexed pipeline. */
961 req = new_cache_request ();
962 req->kind = req_load;
963 req->reqno = reqno;
964 req->priority = next_priority (cache, pipeline);
965 req->address = address;
966
967 pipeline_add_request (pipeline, req);
968}
969
970void
971frv_cache_request_store (FRV_CACHE *cache, SI address,
972 int slot, char *data, unsigned length)
973{
974 FRV_CACHE_REQUEST *req;
975
976 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
977 int pipe = convert_slot_to_index (slot);
978 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
979
980 /* Add the load request to the indexed pipeline. */
981 req = new_store_request (length);
982 req->kind = req_store;
983 req->reqno = NO_REQNO;
984 req->priority = next_priority (cache, pipeline);
985 req->address = address;
986 req->u.store.length = length;
987 memcpy (req->u.store.data, data, length);
988
989 pipeline_add_request (pipeline, req);
990 invalidate_return_buffer (cache, address);
991}
992
993/* Handle a request to invalidate the cache line containing the given address.
994 Flush the data if requested. */
995void
996frv_cache_request_invalidate (FRV_CACHE *cache, unsigned reqno, SI address,
997 int slot, int all, int flush)
998{
999 FRV_CACHE_REQUEST *req;
1000
1001 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
1002 int pipe = convert_slot_to_index (slot);
1003 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1004
1005 /* Add the load request to the indexed pipeline. */
1006 req = new_cache_request ();
1007 req->kind = req_invalidate;
1008 req->reqno = reqno;
1009 req->priority = next_priority (cache, pipeline);
1010 req->address = address;
1011 req->u.invalidate.all = all;
1012 req->u.invalidate.flush = flush;
1013
1014 pipeline_add_request (pipeline, req);
1015}
1016
1017/* Handle a request to preload the cache line containing the given address. */
1018void
1019frv_cache_request_preload (FRV_CACHE *cache, SI address,
1020 int slot, int length, int lock)
1021{
1022 FRV_CACHE_REQUEST *req;
1023
1024 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
1025 int pipe = convert_slot_to_index (slot);
1026 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1027
1028 /* Add the load request to the indexed pipeline. */
1029 req = new_cache_request ();
1030 req->kind = req_preload;
1031 req->reqno = NO_REQNO;
1032 req->priority = next_priority (cache, pipeline);
1033 req->address = address;
1034 req->u.preload.length = length;
1035 req->u.preload.lock = lock;
1036
1037 pipeline_add_request (pipeline, req);
1038 invalidate_return_buffer (cache, address);
1039}
1040
1041/* Handle a request to unlock the cache line containing the given address. */
1042void
1043frv_cache_request_unlock (FRV_CACHE *cache, SI address, int slot)
1044{
1045 FRV_CACHE_REQUEST *req;
1046
1047 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
1048 int pipe = convert_slot_to_index (slot);
1049 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1050
1051 /* Add the load request to the indexed pipeline. */
1052 req = new_cache_request ();
1053 req->kind = req_unlock;
1054 req->reqno = NO_REQNO;
1055 req->priority = next_priority (cache, pipeline);
1056 req->address = address;
1057
1058 pipeline_add_request (pipeline, req);
1059}
1060
1061/* Check whether this address interferes with a pending request of
1062 higher priority. */
1063static int
1064address_interference (FRV_CACHE *cache, SI address, FRV_CACHE_REQUEST *req,
1065 int pipe)
1066{
1067 int i, j;
1068 int line_mask = ~(cache->line_size - 1);
1069 int other_pipe;
1070 int priority = req->priority;
1071 FRV_CACHE_REQUEST *other_req;
1072 SI other_address;
1073 SI all_address;
1074
1075 address &= line_mask;
1076 all_address = -1 & line_mask;
1077
1078 /* Check for collisions in the queue for this pipeline. */
1079 for (other_req = cache->pipeline[pipe].requests;
1080 other_req != NULL;
1081 other_req = other_req->next)
1082 {
1083 other_address = other_req->address & line_mask;
1084 if ((address == other_address || address == all_address)
1085 && priority > other_req->priority)
1086 return 1;
1087 }
1088
1089 /* Check for a collision in the the other pipeline. */
1090 other_pipe = pipe ^ 1;
1091 other_req = cache->pipeline[other_pipe].stages[LAST_STAGE].request;
1092 if (other_req != NULL)
1093 {
1094 other_address = other_req->address & line_mask;
1095 if (address == other_address || address == all_address)
1096 return 1;
1097 }
1098
1099 /* Check for a collision with load requests waiting in WAR. */
1100 for (i = LS; i < FRV_CACHE_PIPELINES; ++i)
1101 {
1102 for (j = 0; j < NUM_WARS; ++j)
1103 {
1104 FRV_CACHE_WAR *war = & cache->pipeline[i].WAR[j];
1105 if (war->valid
1106 && (address == (war->address & line_mask)
1107 || address == all_address)
1108 && priority > war->priority)
1109 return 1;
1110 }
1111 /* If this is not a WAR request, then yield to any WAR requests in
0b01870b
DB
1112 either pipeline or to a higher priority request in the same pipeline.
1113 */
b34f6357
DB
1114 if (req->kind != req_WAR)
1115 {
1116 for (j = FIRST_STAGE; j < FRV_CACHE_STAGES; ++j)
1117 {
1118 other_req = cache->pipeline[i].stages[j].request;
0b01870b
DB
1119 if (other_req != NULL)
1120 {
1121 if (other_req->kind == req_WAR)
1122 return 1;
1123 if (i == pipe
1124 && (address == (other_req->address & line_mask)
1125 || address == all_address)
1126 && priority > other_req->priority)
1127 return 1;
1128 }
b34f6357
DB
1129 }
1130 }
1131 }
1132
1133 /* Check for a collision with load requests waiting in ARS. */
1134 if (cache->BARS.valid
1135 && (address == (cache->BARS.address & line_mask)
1136 || address == all_address)
1137 && priority > cache->BARS.priority)
1138 return 1;
1139 if (cache->NARS.valid
1140 && (address == (cache->NARS.address & line_mask)
1141 || address == all_address)
1142 && priority > cache->NARS.priority)
1143 return 1;
1144
1145 return 0;
1146}
1147
1148/* Wait for a free WAR register in BARS or NARS. */
1149static void
1150wait_for_WAR (FRV_CACHE* cache, int pipe, FRV_CACHE_REQUEST *req)
1151{
1152 FRV_CACHE_WAR war;
1153 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1154
1155 if (! cache->BARS.valid)
1156 {
1157 cache->BARS.pipe = pipe;
1158 cache->BARS.reqno = req->reqno;
1159 cache->BARS.address = req->address;
1160 cache->BARS.priority = req->priority - 1;
1161 switch (req->kind)
1162 {
1163 case req_load:
1164 cache->BARS.preload = 0;
1165 cache->BARS.lock = 0;
1166 break;
1167 case req_store:
1168 cache->BARS.preload = 1;
1169 cache->BARS.lock = 0;
1170 break;
1171 case req_preload:
1172 cache->BARS.preload = 1;
1173 cache->BARS.lock = req->u.preload.lock;
1174 break;
1175 }
1176 cache->BARS.valid = 1;
1177 return;
1178 }
1179 if (! cache->NARS.valid)
1180 {
1181 cache->NARS.pipe = pipe;
1182 cache->NARS.reqno = req->reqno;
1183 cache->NARS.address = req->address;
1184 cache->NARS.priority = req->priority - 1;
1185 switch (req->kind)
1186 {
1187 case req_load:
1188 cache->NARS.preload = 0;
1189 cache->NARS.lock = 0;
1190 break;
1191 case req_store:
1192 cache->NARS.preload = 1;
1193 cache->NARS.lock = 0;
1194 break;
1195 case req_preload:
1196 cache->NARS.preload = 1;
1197 cache->NARS.lock = req->u.preload.lock;
1198 break;
1199 }
1200 cache->NARS.valid = 1;
1201 return;
1202 }
1203 /* All wait registers are busy, so resubmit this request. */
1204 pipeline_requeue_request (pipeline);
1205}
1206
1207/* Find a free WAR register and wait for memory to fetch the data. */
1208static void
1209wait_in_WAR (FRV_CACHE* cache, int pipe, FRV_CACHE_REQUEST *req)
1210{
1211 int war;
1212 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1213
1214 /* Find a valid WAR to hold this request. */
1215 for (war = 0; war < NUM_WARS; ++war)
1216 if (! pipeline->WAR[war].valid)
1217 break;
1218 if (war >= NUM_WARS)
1219 {
1220 wait_for_WAR (cache, pipe, req);
1221 return;
1222 }
1223
1224 pipeline->WAR[war].address = req->address;
1225 pipeline->WAR[war].reqno = req->reqno;
1226 pipeline->WAR[war].priority = req->priority - 1;
1227 pipeline->WAR[war].latency = cache->memory_latency + 1;
1228 switch (req->kind)
1229 {
1230 case req_load:
1231 pipeline->WAR[war].preload = 0;
1232 pipeline->WAR[war].lock = 0;
1233 break;
1234 case req_store:
1235 pipeline->WAR[war].preload = 1;
1236 pipeline->WAR[war].lock = 0;
1237 break;
1238 case req_preload:
1239 pipeline->WAR[war].preload = 1;
1240 pipeline->WAR[war].lock = req->u.preload.lock;
1241 break;
1242 }
1243 pipeline->WAR[war].valid = 1;
1244}
1245
1246static void
1247handle_req_load (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1248{
1249 FRV_CACHE_TAG *tag;
1250 SI address = req->address;
1251
1252 /* If this address interferes with an existing request, then requeue it. */
1253 if (address_interference (cache, address, req, pipe))
1254 {
1255 pipeline_requeue_request (& cache->pipeline[pipe]);
1256 return;
1257 }
1258
1259 if (frv_cache_enabled (cache) && ! non_cache_access (cache, address))
1260 {
1261 int found = get_tag (cache, address, &tag);
1262
1263 /* If the data was found, return it to the caller. */
1264 if (found)
1265 {
1266 set_most_recently_used (cache, tag);
1267 copy_line_to_return_buffer (cache, pipe, tag, address);
1268 set_return_buffer_reqno (cache, pipe, req->reqno);
1269 return;
1270 }
1271 }
1272
1273 /* The data is not in the cache or this is a non-cache access. We need to
1274 wait for the memory unit to fetch it. Store this request in the WAR in
1275 the meantime. */
1276 wait_in_WAR (cache, pipe, req);
1277}
1278
1279static void
1280handle_req_preload (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1281{
1282 int found;
1283 FRV_CACHE_WAR war;
1284 FRV_CACHE_TAG *tag;
1285 int length;
1286 int lock;
1287 int offset;
1288 int lines;
1289 int line;
1290 SI address = req->address;
1291 SI cur_address;
1292
1293 if (! frv_cache_enabled (cache) || non_cache_access (cache, address))
1294 return;
1295
1296 /* preload at least 1 line. */
1297 length = req->u.preload.length;
1298 if (length == 0)
1299 length = 1;
1300
1301 /* Make sure that this request does not interfere with a pending request. */
1302 offset = address & (cache->line_size - 1);
1303 lines = 1 + (offset + length - 1) / cache->line_size;
1304 cur_address = address & ~(cache->line_size - 1);
1305 for (line = 0; line < lines; ++line)
1306 {
1307 /* If this address interferes with an existing request,
1308 then requeue it. */
1309 if (address_interference (cache, cur_address, req, pipe))
1310 {
1311 pipeline_requeue_request (& cache->pipeline[pipe]);
1312 return;
1313 }
1314 cur_address += cache->line_size;
1315 }
1316
1317 /* Now process each cache line. */
1318 /* Careful with this loop -- length is unsigned. */
1319 lock = req->u.preload.lock;
1320 cur_address = address & ~(cache->line_size - 1);
1321 for (line = 0; line < lines; ++line)
1322 {
1323 /* If the data was found, then lock it if requested. */
1324 found = get_tag (cache, cur_address, &tag);
1325 if (found)
1326 {
1327 if (lock)
1328 tag->locked = 1;
1329 }
1330 else
1331 {
1332 /* The data is not in the cache. We need to wait for the memory
1333 unit to fetch it. Store this request in the WAR in the meantime.
1334 */
1335 wait_in_WAR (cache, pipe, req);
1336 }
1337 cur_address += cache->line_size;
1338 }
1339}
1340
1341static void
1342handle_req_store (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1343{
1344 SIM_CPU *current_cpu;
1345 FRV_CACHE_TAG *tag;
1346 int found;
1347 int copy_back;
1348 SI address = req->address;
1349 char *data = req->u.store.data;
1350 int length = req->u.store.length;
1351
1352 /* If this address interferes with an existing request, then requeue it. */
1353 if (address_interference (cache, address, req, pipe))
1354 {
1355 pipeline_requeue_request (& cache->pipeline[pipe]);
1356 return;
1357 }
1358
1359 /* Non-cache access. Write the data directly to memory. */
1360 if (! frv_cache_enabled (cache) || non_cache_access (cache, address))
1361 {
1362 write_data_to_memory (cache, address, data, length);
1363 return;
1364 }
1365
1366 /* See if the data is in the cache. */
1367 found = get_tag (cache, address, &tag);
1368
1369 /* Write the data to the cache line if one was available and if it is
1370 either a hit or a miss in copy-back mode.
1371 The tag may be NULL if all ways were in use and locked on a miss.
1372 */
1373 current_cpu = cache->cpu;
1374 copy_back = GET_HSR0_CBM (GET_HSR0 ());
1375 if (tag != NULL && (found || copy_back))
1376 {
1377 int line_offset;
1378 /* Load the line from memory first, if it was a miss. */
1379 if (! found)
1380 {
1381 /* We need to wait for the memory unit to fetch the data.
1382 Store this request in the WAR and requeue the store request. */
1383 wait_in_WAR (cache, pipe, req);
1384 pipeline_requeue_request (& cache->pipeline[pipe]);
1385 /* Decrement the counts of accesses and hits because when the requeued
1386 request is processed again, it will appear to be a new access and
1387 a hit. */
1388 --cache->statistics.accesses;
1389 --cache->statistics.hits;
1390 return;
1391 }
1392 line_offset = address & (cache->line_size - 1);
1393 memcpy (tag->line + line_offset, data, length);
1394 invalidate_return_buffer (cache, address);
1395 tag->dirty = 1;
1396
1397 /* Update the LRU information for the tags in this set. */
1398 set_most_recently_used (cache, tag);
1399 }
1400
1401 /* Write the data to memory if there was no line available or we are in
1402 write-through (not copy-back mode). */
1403 if (tag == NULL || ! copy_back)
1404 {
1405 write_data_to_memory (cache, address, data, length);
1406 if (tag != NULL)
1407 tag->dirty = 0;
1408 }
1409}
1410
1411static void
1412handle_req_invalidate (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1413{
1414 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1415 SI address = req->address;
1416 SI interfere_address = req->u.invalidate.all ? -1 : address;
1417
1418 /* If this address interferes with an existing request, then requeue it. */
1419 if (address_interference (cache, interfere_address, req, pipe))
1420 {
1421 pipeline_requeue_request (pipeline);
1422 return;
1423 }
1424
1425 /* Invalidate the cache line now. This function already checks for
1426 non-cache access. */
1427 if (req->u.invalidate.all)
1428 frv_cache_invalidate_all (cache, req->u.invalidate.flush);
1429 else
1430 frv_cache_invalidate (cache, address, req->u.invalidate.flush);
1431 if (req->u.invalidate.flush)
1432 {
1433 pipeline->status.flush.reqno = req->reqno;
1434 pipeline->status.flush.address = address;
1435 pipeline->status.flush.valid = 1;
1436 }
1437}
1438
1439static void
1440handle_req_unlock (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1441{
1442 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1443 SI address = req->address;
1444
1445 /* If this address interferes with an existing request, then requeue it. */
1446 if (address_interference (cache, address, req, pipe))
1447 {
1448 pipeline_requeue_request (pipeline);
1449 return;
1450 }
1451
1452 /* Unlock the cache line. This function checks for non-cache access. */
1453 frv_cache_unlock (cache, address);
1454}
1455
1456static void
1457handle_req_WAR (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1458{
1459 char *buffer;
1460 FRV_CACHE_TAG *tag;
1461 SI address = req->address;
1462
1463 if (frv_cache_enabled (cache) && ! non_cache_access (cache, address))
1464 {
1465 /* Look for the data in the cache. The statistics of cache hit or
1466 miss have already been recorded, so save and restore the stats before
1467 and after obtaining the cache line. */
1468 FRV_CACHE_STATISTICS save_stats = cache->statistics;
1469 tag = find_or_retrieve_cache_line (cache, address);
1470 cache->statistics = save_stats;
1471 if (tag != NULL)
1472 {
1473 if (! req->u.WAR.preload)
1474 {
1475 copy_line_to_return_buffer (cache, pipe, tag, address);
1476 set_return_buffer_reqno (cache, pipe, req->reqno);
1477 }
1478 else
1479 {
1480 invalidate_return_buffer (cache, address);
1481 if (req->u.WAR.lock)
1482 tag->locked = 1;
1483 }
1484 return;
1485 }
1486 }
1487
1488 /* All cache lines in the set were locked, so just copy the data to the
1489 return buffer directly. */
1490 if (! req->u.WAR.preload)
1491 {
1492 copy_memory_to_return_buffer (cache, pipe, address);
1493 set_return_buffer_reqno (cache, pipe, req->reqno);
1494 }
1495}
1496
1497/* Resolve any conflicts and/or execute the given requests. */
1498static void
1499arbitrate_requests (FRV_CACHE *cache)
1500{
1501 int pipe;
1502 /* Simply execute the requests in the final pipeline stages. */
1503 for (pipe = LS; pipe < FRV_CACHE_PIPELINES; ++pipe)
1504 {
1505 FRV_CACHE_REQUEST *req
1506 = pipeline_stage_request (& cache->pipeline[pipe], LAST_STAGE);
1507 /* Make sure that there is a request to handle. */
1508 if (req == NULL)
1509 continue;
1510
1511 /* Handle the request. */
1512 switch (req->kind)
1513 {
1514 case req_load:
1515 handle_req_load (cache, pipe, req);
1516 break;
1517 case req_store:
1518 handle_req_store (cache, pipe, req);
1519 break;
1520 case req_invalidate:
1521 handle_req_invalidate (cache, pipe, req);
1522 break;
1523 case req_preload:
1524 handle_req_preload (cache, pipe, req);
1525 break;
1526 case req_unlock:
1527 handle_req_unlock (cache, pipe, req);
1528 break;
1529 case req_WAR:
1530 handle_req_WAR (cache, pipe, req);
1531 break;
1532 default:
1533 abort ();
1534 }
1535 }
1536}
1537
1538/* Move a waiting ARS register to a free WAR register. */
1539static void
1540move_ARS_to_WAR (FRV_CACHE *cache, int pipe, FRV_CACHE_WAR *war)
1541{
1542 /* If BARS is valid for this pipe, then move it to the given WAR. Move
1543 NARS to BARS if it is valid. */
1544 if (cache->BARS.valid && cache->BARS.pipe == pipe)
1545 {
1546 war->address = cache->BARS.address;
1547 war->reqno = cache->BARS.reqno;
1548 war->priority = cache->BARS.priority;
1549 war->preload = cache->BARS.preload;
1550 war->lock = cache->BARS.lock;
1551 war->latency = cache->memory_latency + 1;
1552 war->valid = 1;
1553 if (cache->NARS.valid)
1554 {
1555 cache->BARS = cache->NARS;
1556 cache->NARS.valid = 0;
1557 }
1558 else
1559 cache->BARS.valid = 0;
1560 return;
1561 }
1562 /* If NARS is valid for this pipe, then move it to the given WAR. */
1563 if (cache->NARS.valid && cache->NARS.pipe == pipe)
1564 {
1565 war->address = cache->NARS.address;
1566 war->reqno = cache->NARS.reqno;
1567 war->priority = cache->NARS.priority;
1568 war->preload = cache->NARS.preload;
1569 war->lock = cache->NARS.lock;
1570 war->latency = cache->memory_latency + 1;
1571 war->valid = 1;
1572 cache->NARS.valid = 0;
1573 }
1574}
1575
1576/* Decrease the latencies of the various states in the cache. */
1577static void
1578decrease_latencies (FRV_CACHE *cache)
1579{
1580 int pipe, j;
1581 /* Check the WAR registers. */
1582 for (pipe = LS; pipe < FRV_CACHE_PIPELINES; ++pipe)
1583 {
1584 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1585 for (j = 0; j < NUM_WARS; ++j)
1586 {
1587 FRV_CACHE_WAR *war = & pipeline->WAR[j];
1588 if (war->valid)
1589 {
1590 --war->latency;
1591 /* If the latency has expired, then submit a WAR request to the
1592 pipeline. */
1593 if (war->latency <= 0)
1594 {
1595 add_WAR_request (pipeline, war);
1596 war->valid = 0;
1597 move_ARS_to_WAR (cache, pipe, war);
1598 }
1599 }
1600 }
1601 }
1602}
1603
1604/* Run the cache for the given number of cycles. */
1605void
1606frv_cache_run (FRV_CACHE *cache, int cycles)
1607{
1608 int i;
1609 for (i = 0; i < cycles; ++i)
1610 {
1611 advance_pipelines (cache);
1612 arbitrate_requests (cache);
1613 decrease_latencies (cache);
1614 }
1615}
1616
1617int
1618frv_cache_read_passive_SI (FRV_CACHE *cache, SI address, SI *value)
1619{
1620 SI offset;
1621 FRV_CACHE_TAG *tag;
1622
1623 if (non_cache_access (cache, address))
1624 return 0;
1625
1626 {
1627 FRV_CACHE_STATISTICS save_stats = cache->statistics;
1628 int found = get_tag (cache, address, &tag);
1629 cache->statistics = save_stats;
1630
1631 if (! found)
1632 return 0; /* Indicate non-cache-access. */
1633 }
1634
1635 /* A cache line was available for the data.
1636 Extract the target data from the line. */
1637 offset = address & (cache->line_size - 1);
b34f6357
DB
1638 *value = T2H_4 (*(SI *)(tag->line + offset));
1639 return 1;
1640}
1641
1642/* Check the return buffers of the data cache to see if the requested data is
1643 available. */
1644int
1645frv_cache_data_in_buffer (FRV_CACHE* cache, int pipe, SI address,
1646 unsigned reqno)
1647{
1648 return cache->pipeline[pipe].status.return_buffer.valid
1649 && cache->pipeline[pipe].status.return_buffer.reqno == reqno
1650 && cache->pipeline[pipe].status.return_buffer.address <= address
1651 && cache->pipeline[pipe].status.return_buffer.address + cache->line_size
1652 > address;
1653}
1654
1655/* Check to see if the requested data has been flushed. */
1656int
1657frv_cache_data_flushed (FRV_CACHE* cache, int pipe, SI address, unsigned reqno)
1658{
1659 return cache->pipeline[pipe].status.flush.valid
1660 && cache->pipeline[pipe].status.flush.reqno == reqno
1661 && cache->pipeline[pipe].status.flush.address <= address
1662 && cache->pipeline[pipe].status.flush.address + cache->line_size
1663 > address;
1664}
This page took 0.106343 seconds and 4 git commands to generate.