1 /* Caching code for GDB, the GNU debugger.
3 Copyright (C) 1992-2014 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
25 #include "target-dcache.h"
27 #include "splay-tree.h"
29 /* Commands with a prefix of `{set,show} dcache'. */
30 static struct cmd_list_element
*dcache_set_list
= NULL
;
31 static struct cmd_list_element
*dcache_show_list
= NULL
;
33 /* The data cache could lead to incorrect results because it doesn't
34 know about volatile variables, thus making it impossible to debug
35 functions which use memory mapped I/O devices. Set the nocache
36 memory region attribute in those cases.
38 In general the dcache speeds up performance. Some speed improvement
39 comes from the actual caching mechanism, but the major gain is in
40 the reduction of the remote protocol overhead; instead of reading
41 or writing a large area of memory in 4 byte requests, the cache
42 bundles up the requests into LINE_SIZE chunks, reducing overhead
43 significantly. This is most useful when accessing a large amount
44 of data, such as when performing a backtrace.
46 The cache is a splay tree along with a linked list for replacement.
47 Each block caches a LINE_SIZE area of memory. Within each line we
48 remember the address of the line (which must be a multiple of
49 LINE_SIZE) and the actual data block.
51 Lines are only allocated as needed, so DCACHE_SIZE really specifies the
52 *maximum* number of lines in the cache.
54 At present, the cache is write-through rather than writeback: as soon
55 as data is written to the cache, it is also immediately written to
56 the target. Therefore, cache lines are never "dirty". Whether a given
57 line is valid or not depends on where it is stored in the dcache_struct;
58 there is no per-block valid flag. */
60 /* NOTE: Interaction of dcache and memory region attributes
62 As there is no requirement that memory region attributes be aligned
63 to or be a multiple of the dcache page size, dcache_read_line() and
64 dcache_write_line() must break up the page by memory region. If a
65 chunk does not have the cache attribute set, an invalid memory type
66 is set, etc., then the chunk is skipped. Those chunks are handled
67 in target_xfer_memory() (or target_xfer_memory_partial()).
69 This doesn't occur very often. The most common occurance is when
70 the last bit of the .text segment and the first bit of the .data
71 segment fall within the same dcache page with a ro/cacheable memory
72 region defined for the .text segment and a rw/non-cacheable memory
73 region defined for the .data segment. */
75 /* The maximum number of lines stored. The total size of the cache is
76 equal to DCACHE_SIZE times LINE_SIZE. */
77 #define DCACHE_DEFAULT_SIZE 4096
78 static unsigned dcache_size
= DCACHE_DEFAULT_SIZE
;
80 /* The default size of a cache line. Smaller values reduce the time taken to
81 read a single byte and make the cache more granular, but increase
82 overhead and reduce the effectiveness of the cache as a prefetcher. */
83 #define DCACHE_DEFAULT_LINE_SIZE 64
84 static unsigned dcache_line_size
= DCACHE_DEFAULT_LINE_SIZE
;
86 /* Each cache block holds LINE_SIZE bytes of data
87 starting at a multiple-of-LINE_SIZE address. */
89 #define LINE_SIZE_MASK(dcache) ((dcache->line_size - 1))
90 #define XFORM(dcache, x) ((x) & LINE_SIZE_MASK (dcache))
91 #define MASK(dcache, x) ((x) & ~LINE_SIZE_MASK (dcache))
95 /* For least-recently-allocated and free lists. */
96 struct dcache_block
*prev
;
97 struct dcache_block
*next
;
99 CORE_ADDR addr
; /* address of data */
100 int refs
; /* # hits */
101 gdb_byte data
[1]; /* line_size bytes at given address */
107 struct dcache_block
*oldest
; /* least-recently-allocated list. */
109 /* The free list is maintained identically to OLDEST to simplify
110 the code: we only need one set of accessors. */
111 struct dcache_block
*freelist
;
113 /* The number of in-use lines in the cache. */
115 CORE_ADDR line_size
; /* current line_size. */
117 /* The ptid of last inferior to use cache or null_ptid. */
121 typedef void (block_func
) (struct dcache_block
*block
, void *param
);
123 static struct dcache_block
*dcache_hit (DCACHE
*dcache
, CORE_ADDR addr
);
125 static int dcache_read_line (DCACHE
*dcache
, struct dcache_block
*db
);
127 static struct dcache_block
*dcache_alloc (DCACHE
*dcache
, CORE_ADDR addr
);
129 static void dcache_info (char *exp
, int tty
);
131 void _initialize_dcache (void);
133 static int dcache_enabled_p
= 0; /* OBSOLETE */
136 show_dcache_enabled_p (struct ui_file
*file
, int from_tty
,
137 struct cmd_list_element
*c
, const char *value
)
139 fprintf_filtered (file
, _("Deprecated remotecache flag is %s.\n"), value
);
142 /* Add BLOCK to circular block list BLIST, behind the block at *BLIST.
143 *BLIST is not updated (unless it was previously NULL of course).
144 This is for the least-recently-allocated list's sake:
145 BLIST points to the oldest block.
146 ??? This makes for poor cache usage of the free list,
147 but is it measurable? */
150 append_block (struct dcache_block
**blist
, struct dcache_block
*block
)
154 block
->next
= *blist
;
155 block
->prev
= (*blist
)->prev
;
156 block
->prev
->next
= block
;
157 (*blist
)->prev
= block
;
158 /* We don't update *BLIST here to maintain the invariant that for the
159 least-recently-allocated list *BLIST points to the oldest block. */
169 /* Remove BLOCK from circular block list BLIST. */
172 remove_block (struct dcache_block
**blist
, struct dcache_block
*block
)
174 if (block
->next
== block
)
180 block
->next
->prev
= block
->prev
;
181 block
->prev
->next
= block
->next
;
182 /* If we removed the block *BLIST points to, shift it to the next block
183 to maintain the invariant that for the least-recently-allocated list
184 *BLIST points to the oldest block. */
186 *blist
= block
->next
;
190 /* Iterate over all elements in BLIST, calling FUNC.
191 PARAM is passed to FUNC.
192 FUNC may remove the block it's passed, but only that block. */
195 for_each_block (struct dcache_block
**blist
, block_func
*func
, void *param
)
197 struct dcache_block
*db
;
205 struct dcache_block
*next
= db
->next
;
210 while (*blist
&& db
!= *blist
);
213 /* BLOCK_FUNC routine for dcache_free. */
216 free_block (struct dcache_block
*block
, void *param
)
221 /* Free a data cache. */
224 dcache_free (DCACHE
*dcache
)
226 splay_tree_delete (dcache
->tree
);
227 for_each_block (&dcache
->oldest
, free_block
, NULL
);
228 for_each_block (&dcache
->freelist
, free_block
, NULL
);
233 /* BLOCK_FUNC function for dcache_invalidate.
234 This doesn't remove the block from the oldest list on purpose.
235 dcache_invalidate will do it later. */
238 invalidate_block (struct dcache_block
*block
, void *param
)
240 DCACHE
*dcache
= (DCACHE
*) param
;
242 splay_tree_remove (dcache
->tree
, (splay_tree_key
) block
->addr
);
243 append_block (&dcache
->freelist
, block
);
246 /* Free all the data cache blocks, thus discarding all cached data. */
249 dcache_invalidate (DCACHE
*dcache
)
251 for_each_block (&dcache
->oldest
, invalidate_block
, dcache
);
253 dcache
->oldest
= NULL
;
255 dcache
->ptid
= null_ptid
;
257 if (dcache
->line_size
!= dcache_line_size
)
259 /* We've been asked to use a different line size.
260 All of our freelist blocks are now the wrong size, so free them. */
262 for_each_block (&dcache
->freelist
, free_block
, dcache
);
263 dcache
->freelist
= NULL
;
264 dcache
->line_size
= dcache_line_size
;
268 /* Invalidate the line associated with ADDR. */
271 dcache_invalidate_line (DCACHE
*dcache
, CORE_ADDR addr
)
273 struct dcache_block
*db
= dcache_hit (dcache
, addr
);
277 splay_tree_remove (dcache
->tree
, (splay_tree_key
) db
->addr
);
278 remove_block (&dcache
->oldest
, db
);
279 append_block (&dcache
->freelist
, db
);
284 /* If addr is present in the dcache, return the address of the block
285 containing it. Otherwise return NULL. */
287 static struct dcache_block
*
288 dcache_hit (DCACHE
*dcache
, CORE_ADDR addr
)
290 struct dcache_block
*db
;
292 splay_tree_node node
= splay_tree_lookup (dcache
->tree
,
293 (splay_tree_key
) MASK (dcache
, addr
));
298 db
= (struct dcache_block
*) node
->value
;
303 /* Fill a cache line from target memory.
304 The result is 1 for success, 0 if the (entire) cache line
308 dcache_read_line (DCACHE
*dcache
, struct dcache_block
*db
)
315 struct mem_region
*region
;
317 len
= dcache
->line_size
;
323 /* Don't overrun if this block is right at the end of the region. */
324 region
= lookup_mem_region (memaddr
);
325 if (region
->hi
== 0 || memaddr
+ len
< region
->hi
)
328 reg_len
= region
->hi
- memaddr
;
330 /* Skip non-readable regions. The cache attribute can be ignored,
331 since we may be loading this for a stack access. */
332 if (region
->attrib
.mode
== MEM_WO
)
340 res
= target_read_raw_memory (memaddr
, myaddr
, reg_len
);
352 /* Get a free cache block, put or keep it on the valid list,
353 and return its address. */
355 static struct dcache_block
*
356 dcache_alloc (DCACHE
*dcache
, CORE_ADDR addr
)
358 struct dcache_block
*db
;
360 if (dcache
->size
>= dcache_size
)
362 /* Evict the least recently allocated line. */
364 remove_block (&dcache
->oldest
, db
);
366 splay_tree_remove (dcache
->tree
, (splay_tree_key
) db
->addr
);
370 db
= dcache
->freelist
;
372 remove_block (&dcache
->freelist
, db
);
374 db
= xmalloc (offsetof (struct dcache_block
, data
) +
380 db
->addr
= MASK (dcache
, addr
);
383 /* Put DB at the end of the list, it's the newest. */
384 append_block (&dcache
->oldest
, db
);
386 splay_tree_insert (dcache
->tree
, (splay_tree_key
) db
->addr
,
387 (splay_tree_value
) db
);
392 /* Using the data cache DCACHE, store in *PTR the contents of the byte at
393 address ADDR in the remote machine.
395 Returns 1 for success, 0 for error. */
398 dcache_peek_byte (DCACHE
*dcache
, CORE_ADDR addr
, gdb_byte
*ptr
)
400 struct dcache_block
*db
= dcache_hit (dcache
, addr
);
404 db
= dcache_alloc (dcache
, addr
);
406 if (!dcache_read_line (dcache
, db
))
410 *ptr
= db
->data
[XFORM (dcache
, addr
)];
414 /* Write the byte at PTR into ADDR in the data cache.
416 The caller is responsible for also promptly writing the data
417 through to target memory.
419 If addr is not in cache, this function does nothing; writing to
420 an area of memory which wasn't present in the cache doesn't cause
423 Always return 1 (meaning success) to simplify dcache_xfer_memory. */
426 dcache_poke_byte (DCACHE
*dcache
, CORE_ADDR addr
, gdb_byte
*ptr
)
428 struct dcache_block
*db
= dcache_hit (dcache
, addr
);
431 db
->data
[XFORM (dcache
, addr
)] = *ptr
;
437 dcache_splay_tree_compare (splay_tree_key a
, splay_tree_key b
)
447 /* Allocate and initialize a data cache. */
454 dcache
= (DCACHE
*) xmalloc (sizeof (*dcache
));
456 dcache
->tree
= splay_tree_new (dcache_splay_tree_compare
,
460 dcache
->oldest
= NULL
;
461 dcache
->freelist
= NULL
;
463 dcache
->line_size
= dcache_line_size
;
464 dcache
->ptid
= null_ptid
;
470 /* Read or write LEN bytes from inferior memory at MEMADDR, transferring
471 to or from debugger address MYADDR. Write to inferior if SHOULD_WRITE is
474 Return the number of bytes actually transfered, or -1 if the
475 transfer is not supported or otherwise fails. Return of a non-negative
476 value less than LEN indicates that no further transfer is possible.
477 NOTE: This is different than the to_xfer_partial interface, in which
478 positive values less than LEN mean further transfers may be possible. */
481 dcache_xfer_memory (struct target_ops
*ops
, DCACHE
*dcache
,
482 CORE_ADDR memaddr
, gdb_byte
*myaddr
,
483 int len
, int should_write
)
487 int (*xfunc
) (DCACHE
*dcache
, CORE_ADDR addr
, gdb_byte
*ptr
);
489 xfunc
= should_write
? dcache_poke_byte
: dcache_peek_byte
;
491 /* If this is a different inferior from what we've recorded,
494 if (! ptid_equal (inferior_ptid
, dcache
->ptid
))
496 dcache_invalidate (dcache
);
497 dcache
->ptid
= inferior_ptid
;
500 /* Do write-through first, so that if it fails, we don't write to
505 res
= target_write (ops
, TARGET_OBJECT_RAW_MEMORY
,
506 NULL
, myaddr
, memaddr
, len
);
509 /* Update LEN to what was actually written. */
513 for (i
= 0; i
< len
; i
++)
515 if (!xfunc (dcache
, memaddr
+ i
, myaddr
+ i
))
517 /* That failed. Discard its cache line so we don't have a
518 partially read line. */
519 dcache_invalidate_line (dcache
, memaddr
+ i
);
520 /* If we're writing, we still wrote LEN bytes. */
531 /* FIXME: There would be some benefit to making the cache write-back and
532 moving the writeback operation to a higher layer, as it could occur
533 after a sequence of smaller writes have been completed (as when a stack
534 frame is constructed for an inferior function call). Note that only
535 moving it up one level to target_xfer_memory[_partial]() is not
536 sufficient since we want to coalesce memory transfers that are
537 "logically" connected but not actually a single call to one of the
538 memory transfer functions. */
540 /* Just update any cache lines which are already present. This is called
541 by memory_xfer_partial in cases where the access would otherwise not go
542 through the cache. */
545 dcache_update (DCACHE
*dcache
, CORE_ADDR memaddr
, gdb_byte
*myaddr
, int len
)
549 for (i
= 0; i
< len
; i
++)
550 dcache_poke_byte (dcache
, memaddr
+ i
, myaddr
+ i
);
553 /* Print DCACHE line INDEX. */
556 dcache_print_line (DCACHE
*dcache
, int index
)
559 struct dcache_block
*db
;
564 printf_filtered (_("No data cache available.\n"));
568 n
= splay_tree_min (dcache
->tree
);
570 for (i
= index
; i
> 0; --i
)
574 n
= splay_tree_successor (dcache
->tree
, n
->key
);
579 printf_filtered (_("No such cache line exists.\n"));
583 db
= (struct dcache_block
*) n
->value
;
585 printf_filtered (_("Line %d: address %s [%d hits]\n"),
586 index
, paddress (target_gdbarch (), db
->addr
), db
->refs
);
588 for (j
= 0; j
< dcache
->line_size
; j
++)
590 printf_filtered ("%02x ", db
->data
[j
]);
592 /* Print a newline every 16 bytes (48 characters). */
593 if ((j
% 16 == 15) && (j
!= dcache
->line_size
- 1))
594 printf_filtered ("\n");
596 printf_filtered ("\n");
599 /* Parse EXP and show the info about DCACHE. */
602 dcache_info_1 (DCACHE
*dcache
, char *exp
)
611 i
= strtol (exp
, &linestart
, 10);
612 if (linestart
== exp
|| i
< 0)
614 printf_filtered (_("Usage: info dcache [linenumber]\n"));
618 dcache_print_line (dcache
, i
);
622 printf_filtered (_("Dcache %u lines of %u bytes each.\n"),
624 dcache
? (unsigned) dcache
->line_size
627 if (dcache
== NULL
|| ptid_equal (dcache
->ptid
, null_ptid
))
629 printf_filtered (_("No data cache available.\n"));
633 printf_filtered (_("Contains data for %s\n"),
634 target_pid_to_str (dcache
->ptid
));
638 n
= splay_tree_min (dcache
->tree
);
643 struct dcache_block
*db
= (struct dcache_block
*) n
->value
;
645 printf_filtered (_("Line %d: address %s [%d hits]\n"),
646 i
, paddress (target_gdbarch (), db
->addr
), db
->refs
);
648 refcount
+= db
->refs
;
650 n
= splay_tree_successor (dcache
->tree
, n
->key
);
653 printf_filtered (_("Cache state: %d active lines, %d hits\n"), i
, refcount
);
657 dcache_info (char *exp
, int tty
)
659 dcache_info_1 (target_dcache_get (), exp
);
663 set_dcache_size (char *args
, int from_tty
,
664 struct cmd_list_element
*c
)
666 if (dcache_size
== 0)
668 dcache_size
= DCACHE_DEFAULT_SIZE
;
669 error (_("Dcache size must be greater than 0."));
671 target_dcache_invalidate ();
675 set_dcache_line_size (char *args
, int from_tty
,
676 struct cmd_list_element
*c
)
678 if (dcache_line_size
< 2
679 || (dcache_line_size
& (dcache_line_size
- 1)) != 0)
681 unsigned d
= dcache_line_size
;
682 dcache_line_size
= DCACHE_DEFAULT_LINE_SIZE
;
683 error (_("Invalid dcache line size: %u (must be power of 2)."), d
);
685 target_dcache_invalidate ();
689 set_dcache_command (char *arg
, int from_tty
)
692 "\"set dcache\" must be followed by the name of a subcommand.\n");
693 help_list (dcache_set_list
, "set dcache ", -1, gdb_stdout
);
697 show_dcache_command (char *args
, int from_tty
)
699 cmd_show_list (dcache_show_list
, from_tty
, "");
703 _initialize_dcache (void)
705 add_setshow_boolean_cmd ("remotecache", class_support
,
706 &dcache_enabled_p
, _("\
707 Set cache use for remote targets."), _("\
708 Show cache use for remote targets."), _("\
709 This used to enable the data cache for remote targets. The cache\n\
710 functionality is now controlled by the memory region system and the\n\
711 \"stack-cache\" flag; \"remotecache\" now does nothing and\n\
712 exists only for compatibility reasons."),
714 show_dcache_enabled_p
,
715 &setlist
, &showlist
);
717 add_info ("dcache", dcache_info
,
719 Print information on the dcache performance.\n\
720 With no arguments, this command prints the cache configuration and a\n\
721 summary of each line in the cache. Use \"info dcache <lineno> to dump\"\n\
722 the contents of a given line."));
724 add_prefix_cmd ("dcache", class_obscure
, set_dcache_command
, _("\
725 Use this command to set number of lines in dcache and line-size."),
726 &dcache_set_list
, "set dcache ", /*allow_unknown*/0, &setlist
);
727 add_prefix_cmd ("dcache", class_obscure
, show_dcache_command
, _("\
728 Show dcachesettings."),
729 &dcache_show_list
, "show dcache ", /*allow_unknown*/0, &showlist
);
731 add_setshow_zuinteger_cmd ("line-size", class_obscure
,
732 &dcache_line_size
, _("\
733 Set dcache line size in bytes (must be power of 2)."), _("\
734 Show dcache line size."),
736 set_dcache_line_size
,
738 &dcache_set_list
, &dcache_show_list
);
739 add_setshow_zuinteger_cmd ("size", class_obscure
,
741 Set number of dcache lines."), _("\
742 Show number of dcache lines."),
746 &dcache_set_list
, &dcache_show_list
);