1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
|
/* Caching code for GDB, the GNU debugger.
Copyright (C) 1992-2014 Free Software Foundation, Inc.
This file is part of GDB.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
#include "defs.h"
#include "dcache.h"
#include "gdbcmd.h"
#include <string.h>
#include "gdbcore.h"
#include "target-dcache.h"
#include "inferior.h"
#include "splay-tree.h"
/* Commands with a prefix of `{set,show} dcache'. */
static struct cmd_list_element *dcache_set_list = NULL;
static struct cmd_list_element *dcache_show_list = NULL;
/* The data cache could lead to incorrect results because it doesn't
know about volatile variables, thus making it impossible to debug
functions which use memory mapped I/O devices. Set the nocache
memory region attribute in those cases.
In general the dcache speeds up performance. Some speed improvement
comes from the actual caching mechanism, but the major gain is in
the reduction of the remote protocol overhead; instead of reading
or writing a large area of memory in 4 byte requests, the cache
bundles up the requests into LINE_SIZE chunks, reducing overhead
significantly. This is most useful when accessing a large amount
of data, such as when performing a backtrace.
The cache is a splay tree along with a linked list for replacement.
Each block caches a LINE_SIZE area of memory. Within each line we
remember the address of the line (which must be a multiple of
LINE_SIZE) and the actual data block.
Lines are only allocated as needed, so DCACHE_SIZE really specifies the
*maximum* number of lines in the cache.
At present, the cache is write-through rather than writeback: as soon
as data is written to the cache, it is also immediately written to
the target. Therefore, cache lines are never "dirty". Whether a given
line is valid or not depends on where it is stored in the dcache_struct;
there is no per-block valid flag. */
/* NOTE: Interaction of dcache and memory region attributes
As there is no requirement that memory region attributes be aligned
to or be a multiple of the dcache page size, dcache_read_line() and
dcache_write_line() must break up the page by memory region. If a
chunk does not have the cache attribute set, an invalid memory type
is set, etc., then the chunk is skipped. Those chunks are handled
in target_xfer_memory() (or target_xfer_memory_partial()).
This doesn't occur very often. The most common occurance is when
the last bit of the .text segment and the first bit of the .data
segment fall within the same dcache page with a ro/cacheable memory
region defined for the .text segment and a rw/non-cacheable memory
region defined for the .data segment. */
/* The maximum number of lines stored. The total size of the cache is
equal to DCACHE_SIZE times LINE_SIZE. */
#define DCACHE_DEFAULT_SIZE 4096
static unsigned dcache_size = DCACHE_DEFAULT_SIZE;
/* The default size of a cache line. Smaller values reduce the time taken to
read a single byte and make the cache more granular, but increase
overhead and reduce the effectiveness of the cache as a prefetcher. */
#define DCACHE_DEFAULT_LINE_SIZE 64
static unsigned dcache_line_size = DCACHE_DEFAULT_LINE_SIZE;
/* Each cache block holds LINE_SIZE bytes of data
starting at a multiple-of-LINE_SIZE address. */
#define LINE_SIZE_MASK(dcache) ((dcache->line_size - 1))
#define XFORM(dcache, x) ((x) & LINE_SIZE_MASK (dcache))
#define MASK(dcache, x) ((x) & ~LINE_SIZE_MASK (dcache))
struct dcache_block
{
/* For least-recently-allocated and free lists. */
struct dcache_block *prev;
struct dcache_block *next;
CORE_ADDR addr; /* address of data */
int refs; /* # hits */
gdb_byte data[1]; /* line_size bytes at given address */
};
struct dcache_struct
{
splay_tree tree;
struct dcache_block *oldest; /* least-recently-allocated list. */
/* The free list is maintained identically to OLDEST to simplify
the code: we only need one set of accessors. */
struct dcache_block *freelist;
/* The number of in-use lines in the cache. */
int size;
CORE_ADDR line_size; /* current line_size. */
/* The ptid of last inferior to use cache or null_ptid. */
ptid_t ptid;
};
typedef void (block_func) (struct dcache_block *block, void *param);
static struct dcache_block *dcache_hit (DCACHE *dcache, CORE_ADDR addr);
static int dcache_read_line (DCACHE *dcache, struct dcache_block *db);
static struct dcache_block *dcache_alloc (DCACHE *dcache, CORE_ADDR addr);
static void dcache_info (char *exp, int tty);
void _initialize_dcache (void);
static int dcache_enabled_p = 0; /* OBSOLETE */
static void
show_dcache_enabled_p (struct ui_file *file, int from_tty,
struct cmd_list_element *c, const char *value)
{
fprintf_filtered (file, _("Deprecated remotecache flag is %s.\n"), value);
}
/* Add BLOCK to circular block list BLIST, behind the block at *BLIST.
*BLIST is not updated (unless it was previously NULL of course).
This is for the least-recently-allocated list's sake:
BLIST points to the oldest block.
??? This makes for poor cache usage of the free list,
but is it measurable? */
static void
append_block (struct dcache_block **blist, struct dcache_block *block)
{
if (*blist)
{
block->next = *blist;
block->prev = (*blist)->prev;
block->prev->next = block;
(*blist)->prev = block;
/* We don't update *BLIST here to maintain the invariant that for the
least-recently-allocated list *BLIST points to the oldest block. */
}
else
{
block->next = block;
block->prev = block;
*blist = block;
}
}
/* Remove BLOCK from circular block list BLIST. */
static void
remove_block (struct dcache_block **blist, struct dcache_block *block)
{
if (block->next == block)
{
*blist = NULL;
}
else
{
block->next->prev = block->prev;
block->prev->next = block->next;
/* If we removed the block *BLIST points to, shift it to the next block
to maintain the invariant that for the least-recently-allocated list
*BLIST points to the oldest block. */
if (*blist == block)
*blist = block->next;
}
}
/* Iterate over all elements in BLIST, calling FUNC.
PARAM is passed to FUNC.
FUNC may remove the block it's passed, but only that block. */
static void
for_each_block (struct dcache_block **blist, block_func *func, void *param)
{
struct dcache_block *db;
if (*blist == NULL)
return;
db = *blist;
do
{
struct dcache_block *next = db->next;
func (db, param);
db = next;
}
while (*blist && db != *blist);
}
/* BLOCK_FUNC routine for dcache_free. */
static void
free_block (struct dcache_block *block, void *param)
{
xfree (block);
}
/* Free a data cache. */
void
dcache_free (DCACHE *dcache)
{
splay_tree_delete (dcache->tree);
for_each_block (&dcache->oldest, free_block, NULL);
for_each_block (&dcache->freelist, free_block, NULL);
xfree (dcache);
}
/* BLOCK_FUNC function for dcache_invalidate.
This doesn't remove the block from the oldest list on purpose.
dcache_invalidate will do it later. */
static void
invalidate_block (struct dcache_block *block, void *param)
{
DCACHE *dcache = (DCACHE *) param;
splay_tree_remove (dcache->tree, (splay_tree_key) block->addr);
append_block (&dcache->freelist, block);
}
/* Free all the data cache blocks, thus discarding all cached data. */
void
dcache_invalidate (DCACHE *dcache)
{
for_each_block (&dcache->oldest, invalidate_block, dcache);
dcache->oldest = NULL;
dcache->size = 0;
dcache->ptid = null_ptid;
if (dcache->line_size != dcache_line_size)
{
/* We've been asked to use a different line size.
All of our freelist blocks are now the wrong size, so free them. */
for_each_block (&dcache->freelist, free_block, dcache);
dcache->freelist = NULL;
dcache->line_size = dcache_line_size;
}
}
/* Invalidate the line associated with ADDR. */
static void
dcache_invalidate_line (DCACHE *dcache, CORE_ADDR addr)
{
struct dcache_block *db = dcache_hit (dcache, addr);
if (db)
{
splay_tree_remove (dcache->tree, (splay_tree_key) db->addr);
remove_block (&dcache->oldest, db);
append_block (&dcache->freelist, db);
--dcache->size;
}
}
/* If addr is present in the dcache, return the address of the block
containing it. Otherwise return NULL. */
static struct dcache_block *
dcache_hit (DCACHE *dcache, CORE_ADDR addr)
{
struct dcache_block *db;
splay_tree_node node = splay_tree_lookup (dcache->tree,
(splay_tree_key) MASK (dcache, addr));
if (!node)
return NULL;
db = (struct dcache_block *) node->value;
db->refs++;
return db;
}
/* Fill a cache line from target memory.
The result is 1 for success, 0 if the (entire) cache line
wasn't readable. */
static int
dcache_read_line (DCACHE *dcache, struct dcache_block *db)
{
CORE_ADDR memaddr;
gdb_byte *myaddr;
int len;
int res;
int reg_len;
struct mem_region *region;
len = dcache->line_size;
memaddr = db->addr;
myaddr = db->data;
while (len > 0)
{
/* Don't overrun if this block is right at the end of the region. */
region = lookup_mem_region (memaddr);
if (region->hi == 0 || memaddr + len < region->hi)
reg_len = len;
else
reg_len = region->hi - memaddr;
/* Skip non-readable regions. The cache attribute can be ignored,
since we may be loading this for a stack access. */
if (region->attrib.mode == MEM_WO)
{
memaddr += reg_len;
myaddr += reg_len;
len -= reg_len;
continue;
}
res = target_read_raw_memory (memaddr, myaddr, reg_len);
if (res != 0)
return 0;
memaddr += reg_len;
myaddr += reg_len;
len -= reg_len;
}
return 1;
}
/* Get a free cache block, put or keep it on the valid list,
and return its address. */
static struct dcache_block *
dcache_alloc (DCACHE *dcache, CORE_ADDR addr)
{
struct dcache_block *db;
if (dcache->size >= dcache_size)
{
/* Evict the least recently allocated line. */
db = dcache->oldest;
remove_block (&dcache->oldest, db);
splay_tree_remove (dcache->tree, (splay_tree_key) db->addr);
}
else
{
db = dcache->freelist;
if (db)
remove_block (&dcache->freelist, db);
else
db = xmalloc (offsetof (struct dcache_block, data) +
dcache->line_size);
dcache->size++;
}
db->addr = MASK (dcache, addr);
db->refs = 0;
/* Put DB at the end of the list, it's the newest. */
append_block (&dcache->oldest, db);
splay_tree_insert (dcache->tree, (splay_tree_key) db->addr,
(splay_tree_value) db);
return db;
}
/* Using the data cache DCACHE, store in *PTR the contents of the byte at
address ADDR in the remote machine.
Returns 1 for success, 0 for error. */
static int
dcache_peek_byte (DCACHE *dcache, CORE_ADDR addr, gdb_byte *ptr)
{
struct dcache_block *db = dcache_hit (dcache, addr);
if (!db)
{
db = dcache_alloc (dcache, addr);
if (!dcache_read_line (dcache, db))
return 0;
}
*ptr = db->data[XFORM (dcache, addr)];
return 1;
}
/* Write the byte at PTR into ADDR in the data cache.
The caller is responsible for also promptly writing the data
through to target memory.
If addr is not in cache, this function does nothing; writing to
an area of memory which wasn't present in the cache doesn't cause
it to be loaded in.
Always return 1 (meaning success) to simplify dcache_xfer_memory. */
static int
dcache_poke_byte (DCACHE *dcache, CORE_ADDR addr, gdb_byte *ptr)
{
struct dcache_block *db = dcache_hit (dcache, addr);
if (db)
db->data[XFORM (dcache, addr)] = *ptr;
return 1;
}
static int
dcache_splay_tree_compare (splay_tree_key a, splay_tree_key b)
{
if (a > b)
return 1;
else if (a == b)
return 0;
else
return -1;
}
/* Allocate and initialize a data cache. */
DCACHE *
dcache_init (void)
{
DCACHE *dcache;
dcache = (DCACHE *) xmalloc (sizeof (*dcache));
dcache->tree = splay_tree_new (dcache_splay_tree_compare,
NULL,
NULL);
dcache->oldest = NULL;
dcache->freelist = NULL;
dcache->size = 0;
dcache->line_size = dcache_line_size;
dcache->ptid = null_ptid;
return dcache;
}
/* Read or write LEN bytes from inferior memory at MEMADDR, transferring
to or from debugger address MYADDR. Write to inferior if SHOULD_WRITE is
nonzero.
Return the number of bytes actually transfered, or -1 if the
transfer is not supported or otherwise fails. Return of a non-negative
value less than LEN indicates that no further transfer is possible.
NOTE: This is different than the to_xfer_partial interface, in which
positive values less than LEN mean further transfers may be possible. */
int
dcache_xfer_memory (struct target_ops *ops, DCACHE *dcache,
CORE_ADDR memaddr, gdb_byte *myaddr,
int len, int should_write)
{
int i;
int res;
int (*xfunc) (DCACHE *dcache, CORE_ADDR addr, gdb_byte *ptr);
xfunc = should_write ? dcache_poke_byte : dcache_peek_byte;
/* If this is a different inferior from what we've recorded,
flush the cache. */
if (! ptid_equal (inferior_ptid, dcache->ptid))
{
dcache_invalidate (dcache);
dcache->ptid = inferior_ptid;
}
/* Do write-through first, so that if it fails, we don't write to
the cache at all. */
if (should_write)
{
res = target_write (ops, TARGET_OBJECT_RAW_MEMORY,
NULL, myaddr, memaddr, len);
if (res <= 0)
return res;
/* Update LEN to what was actually written. */
len = res;
}
for (i = 0; i < len; i++)
{
if (!xfunc (dcache, memaddr + i, myaddr + i))
{
/* That failed. Discard its cache line so we don't have a
partially read line. */
dcache_invalidate_line (dcache, memaddr + i);
/* If we're writing, we still wrote LEN bytes. */
if (should_write)
return len;
else
return i;
}
}
return len;
}
/* FIXME: There would be some benefit to making the cache write-back and
moving the writeback operation to a higher layer, as it could occur
after a sequence of smaller writes have been completed (as when a stack
frame is constructed for an inferior function call). Note that only
moving it up one level to target_xfer_memory[_partial]() is not
sufficient since we want to coalesce memory transfers that are
"logically" connected but not actually a single call to one of the
memory transfer functions. */
/* Just update any cache lines which are already present. This is called
by memory_xfer_partial in cases where the access would otherwise not go
through the cache. */
void
dcache_update (DCACHE *dcache, CORE_ADDR memaddr, gdb_byte *myaddr, int len)
{
int i;
for (i = 0; i < len; i++)
dcache_poke_byte (dcache, memaddr + i, myaddr + i);
}
/* Print DCACHE line INDEX. */
static void
dcache_print_line (DCACHE *dcache, int index)
{
splay_tree_node n;
struct dcache_block *db;
int i, j;
if (dcache == NULL)
{
printf_filtered (_("No data cache available.\n"));
return;
}
n = splay_tree_min (dcache->tree);
for (i = index; i > 0; --i)
{
if (!n)
break;
n = splay_tree_successor (dcache->tree, n->key);
}
if (!n)
{
printf_filtered (_("No such cache line exists.\n"));
return;
}
db = (struct dcache_block *) n->value;
printf_filtered (_("Line %d: address %s [%d hits]\n"),
index, paddress (target_gdbarch (), db->addr), db->refs);
for (j = 0; j < dcache->line_size; j++)
{
printf_filtered ("%02x ", db->data[j]);
/* Print a newline every 16 bytes (48 characters). */
if ((j % 16 == 15) && (j != dcache->line_size - 1))
printf_filtered ("\n");
}
printf_filtered ("\n");
}
/* Parse EXP and show the info about DCACHE. */
static void
dcache_info_1 (DCACHE *dcache, char *exp)
{
splay_tree_node n;
int i, refcount;
if (exp)
{
char *linestart;
i = strtol (exp, &linestart, 10);
if (linestart == exp || i < 0)
{
printf_filtered (_("Usage: info dcache [linenumber]\n"));
return;
}
dcache_print_line (dcache, i);
return;
}
printf_filtered (_("Dcache %u lines of %u bytes each.\n"),
dcache_size,
dcache ? (unsigned) dcache->line_size
: dcache_line_size);
if (dcache == NULL || ptid_equal (dcache->ptid, null_ptid))
{
printf_filtered (_("No data cache available.\n"));
return;
}
printf_filtered (_("Contains data for %s\n"),
target_pid_to_str (dcache->ptid));
refcount = 0;
n = splay_tree_min (dcache->tree);
i = 0;
while (n)
{
struct dcache_block *db = (struct dcache_block *) n->value;
printf_filtered (_("Line %d: address %s [%d hits]\n"),
i, paddress (target_gdbarch (), db->addr), db->refs);
i++;
refcount += db->refs;
n = splay_tree_successor (dcache->tree, n->key);
}
printf_filtered (_("Cache state: %d active lines, %d hits\n"), i, refcount);
}
static void
dcache_info (char *exp, int tty)
{
dcache_info_1 (target_dcache_get (), exp);
}
static void
set_dcache_size (char *args, int from_tty,
struct cmd_list_element *c)
{
if (dcache_size == 0)
{
dcache_size = DCACHE_DEFAULT_SIZE;
error (_("Dcache size must be greater than 0."));
}
target_dcache_invalidate ();
}
static void
set_dcache_line_size (char *args, int from_tty,
struct cmd_list_element *c)
{
if (dcache_line_size < 2
|| (dcache_line_size & (dcache_line_size - 1)) != 0)
{
unsigned d = dcache_line_size;
dcache_line_size = DCACHE_DEFAULT_LINE_SIZE;
error (_("Invalid dcache line size: %u (must be power of 2)."), d);
}
target_dcache_invalidate ();
}
static void
set_dcache_command (char *arg, int from_tty)
{
printf_unfiltered (
"\"set dcache\" must be followed by the name of a subcommand.\n");
help_list (dcache_set_list, "set dcache ", -1, gdb_stdout);
}
static void
show_dcache_command (char *args, int from_tty)
{
cmd_show_list (dcache_show_list, from_tty, "");
}
void
_initialize_dcache (void)
{
add_setshow_boolean_cmd ("remotecache", class_support,
&dcache_enabled_p, _("\
Set cache use for remote targets."), _("\
Show cache use for remote targets."), _("\
This used to enable the data cache for remote targets. The cache\n\
functionality is now controlled by the memory region system and the\n\
\"stack-cache\" flag; \"remotecache\" now does nothing and\n\
exists only for compatibility reasons."),
NULL,
show_dcache_enabled_p,
&setlist, &showlist);
add_info ("dcache", dcache_info,
_("\
Print information on the dcache performance.\n\
With no arguments, this command prints the cache configuration and a\n\
summary of each line in the cache. Use \"info dcache <lineno> to dump\"\n\
the contents of a given line."));
add_prefix_cmd ("dcache", class_obscure, set_dcache_command, _("\
Use this command to set number of lines in dcache and line-size."),
&dcache_set_list, "set dcache ", /*allow_unknown*/0, &setlist);
add_prefix_cmd ("dcache", class_obscure, show_dcache_command, _("\
Show dcachesettings."),
&dcache_show_list, "show dcache ", /*allow_unknown*/0, &showlist);
add_setshow_zuinteger_cmd ("line-size", class_obscure,
&dcache_line_size, _("\
Set dcache line size in bytes (must be power of 2)."), _("\
Show dcache line size."),
NULL,
set_dcache_line_size,
NULL,
&dcache_set_list, &dcache_show_list);
add_setshow_zuinteger_cmd ("size", class_obscure,
&dcache_size, _("\
Set number of dcache lines."), _("\
Show number of dcache lines."),
NULL,
set_dcache_size,
NULL,
&dcache_set_list, &dcache_show_list);
}
|