xref: /dragonfly/contrib/gdb-7/gdb/dcache.c (revision 92fc8b5c)
1 /* Caching code for GDB, the GNU debugger.
2 
3    Copyright (C) 1992, 1993, 1995, 1996, 1998, 1999, 2000, 2001, 2003, 2007,
4    2008, 2009, 2010 Free Software Foundation, Inc.
5 
6    This file is part of GDB.
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License as published by
10    the Free Software Foundation; either version 3 of the License, or
11    (at your option) any later version.
12 
13    This program is distributed in the hope that it will be useful,
14    but WITHOUT ANY WARRANTY; without even the implied warranty of
15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16    GNU General Public License for more details.
17 
18    You should have received a copy of the GNU General Public License
19    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
20 
21 #include "defs.h"
22 #include "dcache.h"
23 #include "gdbcmd.h"
24 #include "gdb_string.h"
25 #include "gdbcore.h"
26 #include "target.h"
27 #include "inferior.h"
28 #include "splay-tree.h"
29 
30 /* The data cache could lead to incorrect results because it doesn't
31    know about volatile variables, thus making it impossible to debug
32    functions which use memory mapped I/O devices.  Set the nocache
33    memory region attribute in those cases.
34 
35    In general the dcache speeds up performance.  Some speed improvement
36    comes from the actual caching mechanism, but the major gain is in
37    the reduction of the remote protocol overhead; instead of reading
38    or writing a large area of memory in 4 byte requests, the cache
39    bundles up the requests into LINE_SIZE chunks, reducing overhead
40    significantly.  This is most useful when accessing a large amount
41    of data, such as when performing a backtrace.
42 
43    The cache is a splay tree along with a linked list for replacement.
44    Each block caches a LINE_SIZE area of memory.  Within each line we
45    remember the address of the line (which must be a multiple of
46    LINE_SIZE) and the actual data block.
47 
48    Lines are only allocated as needed, so DCACHE_SIZE really specifies the
49    *maximum* number of lines in the cache.
50 
51    At present, the cache is write-through rather than writeback: as soon
52    as data is written to the cache, it is also immediately written to
53    the target.  Therefore, cache lines are never "dirty".  Whether a given
54    line is valid or not depends on where it is stored in the dcache_struct;
55    there is no per-block valid flag.  */
56 
57 /* NOTE: Interaction of dcache and memory region attributes
58 
59    As there is no requirement that memory region attributes be aligned
60    to or be a multiple of the dcache page size, dcache_read_line() and
61    dcache_write_line() must break up the page by memory region.  If a
62    chunk does not have the cache attribute set, an invalid memory type
63    is set, etc., then the chunk is skipped.  Those chunks are handled
64    in target_xfer_memory() (or target_xfer_memory_partial()).
65 
66    This doesn't occur very often.  The most common occurance is when
67    the last bit of the .text segment and the first bit of the .data
68    segment fall within the same dcache page with a ro/cacheable memory
69    region defined for the .text segment and a rw/non-cacheable memory
70    region defined for the .data segment.  */
71 
72 /* The maximum number of lines stored.  The total size of the cache is
73    equal to DCACHE_SIZE times LINE_SIZE.  */
74 #define DCACHE_SIZE 4096
75 
76 /* The size of a cache line.  Smaller values reduce the time taken to
77    read a single byte and make the cache more granular, but increase
78    overhead and reduce the effectiveness of the cache as a prefetcher.  */
79 #define LINE_SIZE_POWER 6
80 #define LINE_SIZE (1 << LINE_SIZE_POWER)
81 
82 /* Each cache block holds LINE_SIZE bytes of data
83    starting at a multiple-of-LINE_SIZE address.  */
84 
85 #define LINE_SIZE_MASK  ((LINE_SIZE - 1))
86 #define XFORM(x) 	((x) & LINE_SIZE_MASK)
87 #define MASK(x)         ((x) & ~LINE_SIZE_MASK)
88 
89 struct dcache_block
90 {
91   /* for least-recently-allocated and free lists */
92   struct dcache_block *prev;
93   struct dcache_block *next;
94 
95   CORE_ADDR addr;		/* address of data */
96   gdb_byte data[LINE_SIZE];	/* bytes at given address */
97   int refs;			/* # hits */
98 };
99 
100 struct dcache_struct
101 {
102   splay_tree tree;
103   struct dcache_block *oldest; /* least-recently-allocated list */
104 
105   /* The free list is maintained identically to OLDEST to simplify
106      the code: we only need one set of accessors.  */
107   struct dcache_block *freelist;
108 
109   /* The number of in-use lines in the cache.  */
110   int size;
111 
112   /* The ptid of last inferior to use cache or null_ptid.  */
113   ptid_t ptid;
114 };
115 
116 typedef void (block_func) (struct dcache_block *block, void *param);
117 
118 static struct dcache_block *dcache_hit (DCACHE *dcache, CORE_ADDR addr);
119 
120 static int dcache_read_line (DCACHE *dcache, struct dcache_block *db);
121 
122 static struct dcache_block *dcache_alloc (DCACHE *dcache, CORE_ADDR addr);
123 
124 static void dcache_info (char *exp, int tty);
125 
126 void _initialize_dcache (void);
127 
128 static int dcache_enabled_p = 0; /* OBSOLETE */
129 
130 static void
131 show_dcache_enabled_p (struct ui_file *file, int from_tty,
132 		       struct cmd_list_element *c, const char *value)
133 {
134   fprintf_filtered (file, _("Deprecated remotecache flag is %s.\n"), value);
135 }
136 
137 static DCACHE *last_cache; /* Used by info dcache */
138 
139 /* Add BLOCK to circular block list BLIST, behind the block at *BLIST.
140    *BLIST is not updated (unless it was previously NULL of course).
141    This is for the least-recently-allocated list's sake:
142    BLIST points to the oldest block.
143    ??? This makes for poor cache usage of the free list,
144    but is it measurable?  */
145 
146 static void
147 append_block (struct dcache_block **blist, struct dcache_block *block)
148 {
149   if (*blist)
150     {
151       block->next = *blist;
152       block->prev = (*blist)->prev;
153       block->prev->next = block;
154       (*blist)->prev = block;
155       /* We don't update *BLIST here to maintain the invariant that for the
156 	 least-recently-allocated list *BLIST points to the oldest block.  */
157     }
158   else
159     {
160       block->next = block;
161       block->prev = block;
162       *blist = block;
163     }
164 }
165 
166 /* Remove BLOCK from circular block list BLIST.  */
167 
168 static void
169 remove_block (struct dcache_block **blist, struct dcache_block *block)
170 {
171   if (block->next == block)
172     {
173       *blist = NULL;
174     }
175   else
176     {
177       block->next->prev = block->prev;
178       block->prev->next = block->next;
179       /* If we removed the block *BLIST points to, shift it to the next block
180 	 to maintain the invariant that for the least-recently-allocated list
181 	 *BLIST points to the oldest block.  */
182       if (*blist == block)
183 	*blist = block->next;
184     }
185 }
186 
187 /* Iterate over all elements in BLIST, calling FUNC.
188    PARAM is passed to FUNC.
189    FUNC may remove the block it's passed, but only that block.  */
190 
191 static void
192 for_each_block (struct dcache_block **blist, block_func *func, void *param)
193 {
194   struct dcache_block *db;
195 
196   if (*blist == NULL)
197     return;
198 
199   db = *blist;
200   do
201     {
202       struct dcache_block *next = db->next;
203 
204       func (db, param);
205       db = next;
206     }
207   while (*blist && db != *blist);
208 }
209 
210 /* BLOCK_FUNC function for dcache_invalidate.
211    This doesn't remove the block from the oldest list on purpose.
212    dcache_invalidate will do it later.  */
213 
214 static void
215 invalidate_block (struct dcache_block *block, void *param)
216 {
217   DCACHE *dcache = (DCACHE *) param;
218 
219   splay_tree_remove (dcache->tree, (splay_tree_key) block->addr);
220   append_block (&dcache->freelist, block);
221 }
222 
223 /* Free all the data cache blocks, thus discarding all cached data.  */
224 
225 void
226 dcache_invalidate (DCACHE *dcache)
227 {
228   for_each_block (&dcache->oldest, invalidate_block, dcache);
229 
230   dcache->oldest = NULL;
231   dcache->size = 0;
232   dcache->ptid = null_ptid;
233 }
234 
235 /* Invalidate the line associated with ADDR.  */
236 
237 static void
238 dcache_invalidate_line (DCACHE *dcache, CORE_ADDR addr)
239 {
240   struct dcache_block *db = dcache_hit (dcache, addr);
241 
242   if (db)
243     {
244       splay_tree_remove (dcache->tree, (splay_tree_key) db->addr);
245       remove_block (&dcache->oldest, db);
246       append_block (&dcache->freelist, db);
247       --dcache->size;
248     }
249 }
250 
251 /* If addr is present in the dcache, return the address of the block
252    containing it.  Otherwise return NULL.  */
253 
254 static struct dcache_block *
255 dcache_hit (DCACHE *dcache, CORE_ADDR addr)
256 {
257   struct dcache_block *db;
258 
259   splay_tree_node node = splay_tree_lookup (dcache->tree,
260 					    (splay_tree_key) MASK (addr));
261 
262   if (!node)
263     return NULL;
264 
265   db = (struct dcache_block *) node->value;
266   db->refs++;
267   return db;
268 }
269 
270 /* Fill a cache line from target memory.
271    The result is 1 for success, 0 if the (entire) cache line
272    wasn't readable.  */
273 
274 static int
275 dcache_read_line (DCACHE *dcache, struct dcache_block *db)
276 {
277   CORE_ADDR memaddr;
278   gdb_byte *myaddr;
279   int len;
280   int res;
281   int reg_len;
282   struct mem_region *region;
283 
284   len = LINE_SIZE;
285   memaddr = db->addr;
286   myaddr  = db->data;
287 
288   while (len > 0)
289     {
290       /* Don't overrun if this block is right at the end of the region.  */
291       region = lookup_mem_region (memaddr);
292       if (region->hi == 0 || memaddr + len < region->hi)
293 	reg_len = len;
294       else
295 	reg_len = region->hi - memaddr;
296 
297       /* Skip non-readable regions.  The cache attribute can be ignored,
298          since we may be loading this for a stack access.  */
299       if (region->attrib.mode == MEM_WO)
300 	{
301 	  memaddr += reg_len;
302 	  myaddr  += reg_len;
303 	  len     -= reg_len;
304 	  continue;
305 	}
306 
307       res = target_read (&current_target, TARGET_OBJECT_RAW_MEMORY,
308 			 NULL, myaddr, memaddr, reg_len);
309       if (res < reg_len)
310 	return 0;
311 
312       memaddr += res;
313       myaddr += res;
314       len -= res;
315     }
316 
317   return 1;
318 }
319 
320 /* Get a free cache block, put or keep it on the valid list,
321    and return its address.  */
322 
323 static struct dcache_block *
324 dcache_alloc (DCACHE *dcache, CORE_ADDR addr)
325 {
326   struct dcache_block *db;
327 
328   if (dcache->size >= DCACHE_SIZE)
329     {
330       /* Evict the least recently allocated line.  */
331       db = dcache->oldest;
332       remove_block (&dcache->oldest, db);
333 
334       splay_tree_remove (dcache->tree, (splay_tree_key) db->addr);
335     }
336   else
337     {
338       db = dcache->freelist;
339       if (db)
340 	remove_block (&dcache->freelist, db);
341       else
342 	db = xmalloc (sizeof (struct dcache_block));
343 
344       dcache->size++;
345     }
346 
347   db->addr = MASK (addr);
348   db->refs = 0;
349 
350   /* Put DB at the end of the list, it's the newest.  */
351   append_block (&dcache->oldest, db);
352 
353   splay_tree_insert (dcache->tree, (splay_tree_key) db->addr,
354 		     (splay_tree_value) db);
355 
356   return db;
357 }
358 
359 /* Using the data cache DCACHE, store in *PTR the contents of the byte at
360    address ADDR in the remote machine.
361 
362    Returns 1 for success, 0 for error.  */
363 
364 static int
365 dcache_peek_byte (DCACHE *dcache, CORE_ADDR addr, gdb_byte *ptr)
366 {
367   struct dcache_block *db = dcache_hit (dcache, addr);
368 
369   if (!db)
370     {
371       db = dcache_alloc (dcache, addr);
372 
373       if (!dcache_read_line (dcache, db))
374          return 0;
375     }
376 
377   *ptr = db->data[XFORM (addr)];
378   return 1;
379 }
380 
381 /* Write the byte at PTR into ADDR in the data cache.
382 
383    The caller is responsible for also promptly writing the data
384    through to target memory.
385 
386    If addr is not in cache, this function does nothing; writing to
387    an area of memory which wasn't present in the cache doesn't cause
388    it to be loaded in.
389 
390    Always return 1 (meaning success) to simplify dcache_xfer_memory.  */
391 
392 static int
393 dcache_poke_byte (DCACHE *dcache, CORE_ADDR addr, gdb_byte *ptr)
394 {
395   struct dcache_block *db = dcache_hit (dcache, addr);
396 
397   if (db)
398     db->data[XFORM (addr)] = *ptr;
399 
400   return 1;
401 }
402 
403 static int
404 dcache_splay_tree_compare (splay_tree_key a, splay_tree_key b)
405 {
406   if (a > b)
407     return 1;
408   else if (a == b)
409     return 0;
410   else
411     return -1;
412 }
413 
414 /* Allocate and initialize a data cache.  */
415 
416 DCACHE *
417 dcache_init (void)
418 {
419   DCACHE *dcache;
420 
421   dcache = (DCACHE *) xmalloc (sizeof (*dcache));
422 
423   dcache->tree = splay_tree_new (dcache_splay_tree_compare,
424 				 NULL,
425 				 NULL);
426 
427   dcache->oldest = NULL;
428   dcache->freelist = NULL;
429   dcache->size = 0;
430   dcache->ptid = null_ptid;
431   last_cache = dcache;
432 
433   return dcache;
434 }
435 
436 /* BLOCK_FUNC routine for dcache_free.  */
437 
438 static void
439 free_block (struct dcache_block *block, void *param)
440 {
441   free (block);
442 }
443 
444 /* Free a data cache.  */
445 
446 void
447 dcache_free (DCACHE *dcache)
448 {
449   if (last_cache == dcache)
450     last_cache = NULL;
451 
452   splay_tree_delete (dcache->tree);
453   for_each_block (&dcache->oldest, free_block, NULL);
454   for_each_block (&dcache->freelist, free_block, NULL);
455   xfree (dcache);
456 }
457 
458 /* Read or write LEN bytes from inferior memory at MEMADDR, transferring
459    to or from debugger address MYADDR.  Write to inferior if SHOULD_WRITE is
460    nonzero.
461 
462    Return the number of bytes actually transfered, or -1 if the
463    transfer is not supported or otherwise fails.  Return of a non-negative
464    value less than LEN indicates that no further transfer is possible.
465    NOTE: This is different than the to_xfer_partial interface, in which
466    positive values less than LEN mean further transfers may be possible.  */
467 
468 int
469 dcache_xfer_memory (struct target_ops *ops, DCACHE *dcache,
470 		    CORE_ADDR memaddr, gdb_byte *myaddr,
471 		    int len, int should_write)
472 {
473   int i;
474   int res;
475   int (*xfunc) (DCACHE *dcache, CORE_ADDR addr, gdb_byte *ptr);
476 
477   xfunc = should_write ? dcache_poke_byte : dcache_peek_byte;
478 
479   /* If this is a different inferior from what we've recorded,
480      flush the cache.  */
481 
482   if (! ptid_equal (inferior_ptid, dcache->ptid))
483     {
484       dcache_invalidate (dcache);
485       dcache->ptid = inferior_ptid;
486     }
487 
488   /* Do write-through first, so that if it fails, we don't write to
489      the cache at all.  */
490 
491   if (should_write)
492     {
493       res = target_write (ops, TARGET_OBJECT_RAW_MEMORY,
494 			  NULL, myaddr, memaddr, len);
495       if (res <= 0)
496 	return res;
497       /* Update LEN to what was actually written.  */
498       len = res;
499     }
500 
501   for (i = 0; i < len; i++)
502     {
503       if (!xfunc (dcache, memaddr + i, myaddr + i))
504 	{
505 	  /* That failed.  Discard its cache line so we don't have a
506 	     partially read line.  */
507 	  dcache_invalidate_line (dcache, memaddr + i);
508 	  /* If we're writing, we still wrote LEN bytes.  */
509 	  if (should_write)
510 	    return len;
511 	  else
512 	    return i;
513 	}
514     }
515 
516   return len;
517 }
518 
519 /* FIXME: There would be some benefit to making the cache write-back and
520    moving the writeback operation to a higher layer, as it could occur
521    after a sequence of smaller writes have been completed (as when a stack
522    frame is constructed for an inferior function call).  Note that only
523    moving it up one level to target_xfer_memory[_partial]() is not
524    sufficient since we want to coalesce memory transfers that are
525    "logically" connected but not actually a single call to one of the
526    memory transfer functions.  */
527 
528 /* Just update any cache lines which are already present.  This is called
529    by memory_xfer_partial in cases where the access would otherwise not go
530    through the cache.  */
531 
532 void
533 dcache_update (DCACHE *dcache, CORE_ADDR memaddr, gdb_byte *myaddr, int len)
534 {
535   int i;
536 
537   for (i = 0; i < len; i++)
538     dcache_poke_byte (dcache, memaddr + i, myaddr + i);
539 }
540 
541 static void
542 dcache_print_line (int index)
543 {
544   splay_tree_node n;
545   struct dcache_block *db;
546   int i, j;
547 
548   if (!last_cache)
549     {
550       printf_filtered (_("No data cache available.\n"));
551       return;
552     }
553 
554   n = splay_tree_min (last_cache->tree);
555 
556   for (i = index; i > 0; --i)
557     {
558       if (!n)
559 	break;
560       n = splay_tree_successor (last_cache->tree, n->key);
561     }
562 
563   if (!n)
564     {
565       printf_filtered (_("No such cache line exists.\n"));
566       return;
567     }
568 
569   db = (struct dcache_block *) n->value;
570 
571   printf_filtered (_("Line %d: address %s [%d hits]\n"),
572 		   index, paddress (target_gdbarch, db->addr), db->refs);
573 
574   for (j = 0; j < LINE_SIZE; j++)
575     {
576       printf_filtered ("%02x ", db->data[j]);
577 
578       /* Print a newline every 16 bytes (48 characters) */
579       if ((j % 16 == 15) && (j != LINE_SIZE - 1))
580 	printf_filtered ("\n");
581     }
582   printf_filtered ("\n");
583 }
584 
585 static void
586 dcache_info (char *exp, int tty)
587 {
588   splay_tree_node n;
589   int i, refcount;
590 
591   if (exp)
592     {
593       char *linestart;
594 
595       i = strtol (exp, &linestart, 10);
596       if (linestart == exp || i < 0)
597 	{
598 	  printf_filtered (_("Usage: info dcache [linenumber]\n"));
599           return;
600 	}
601 
602       dcache_print_line (i);
603       return;
604     }
605 
606   printf_filtered (_("Dcache line width %d, maximum size %d\n"),
607 		   LINE_SIZE, DCACHE_SIZE);
608 
609   if (!last_cache || ptid_equal (last_cache->ptid, null_ptid))
610     {
611       printf_filtered (_("No data cache available.\n"));
612       return;
613     }
614 
615   printf_filtered (_("Contains data for %s\n"),
616 		   target_pid_to_str (last_cache->ptid));
617 
618   refcount = 0;
619 
620   n = splay_tree_min (last_cache->tree);
621   i = 0;
622 
623   while (n)
624     {
625       struct dcache_block *db = (struct dcache_block *) n->value;
626 
627       printf_filtered (_("Line %d: address %s [%d hits]\n"),
628 		       i, paddress (target_gdbarch, db->addr), db->refs);
629       i++;
630       refcount += db->refs;
631 
632       n = splay_tree_successor (last_cache->tree, n->key);
633     }
634 
635   printf_filtered (_("Cache state: %d active lines, %d hits\n"), i, refcount);
636 }
637 
638 void
639 _initialize_dcache (void)
640 {
641   add_setshow_boolean_cmd ("remotecache", class_support,
642 			   &dcache_enabled_p, _("\
643 Set cache use for remote targets."), _("\
644 Show cache use for remote targets."), _("\
645 This used to enable the data cache for remote targets.  The cache\n\
646 functionality is now controlled by the memory region system and the\n\
647 \"stack-cache\" flag; \"remotecache\" now does nothing and\n\
648 exists only for compatibility reasons."),
649 			   NULL,
650 			   show_dcache_enabled_p,
651 			   &setlist, &showlist);
652 
653   add_info ("dcache", dcache_info,
654 	    _("\
655 Print information on the dcache performance.\n\
656 With no arguments, this command prints the cache configuration and a\n\
657 summary of each line in the cache.  Use \"info dcache <lineno> to dump\"\n\
658 the contents of a given line."));
659 }
660