1 /* Physical memory system for the virtual machine.
2 Copyright 2001, 2003 Brian R. Gaeke.
3
4 This file is part of VMIPS.
5
6 VMIPS is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 2 of the License, or (at your
9 option) any later version.
10
11 VMIPS is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License along
17 with VMIPS; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */
19
20 #include "cpu.h"
21 #include "devicemap.h"
22 #include "error.h"
23 #include "excnames.h"
24 #include "mapper.h"
25 #include "memorymodule.h"
26 #include "rommodule.h"
27 #include "options.h"
28 #include "range.h"
29 #include "vmips.h"
30 #include <cassert>
31
Cache(unsigned int bits_)32 Cache::Cache(unsigned int bits_) : bits(bits_)
33 {
34 size = 1<<bits;
35 mask = size - 1;
36 entries = new Entry[size];
37 for (int i = 0; i < size; ++i) {
38 entries[i].valid = false;
39 }
40 }
41
~Cache()42 Cache::~Cache()
43 {
44 delete [] entries;
45 }
46
Mapper()47 Mapper::Mapper () :
48 last_used_mapping (NULL),
49 caches_isolated (false),
50 caches_swapped (false)
51 {
52 /* Caches are direct-mapped, physically indexed, physically tagged,
53 * with 1-word lines. */
54 icache = new Cache(14); /* 2**14 words = 64k */
55 dcache = new Cache(14);
56 opt_bigendian = machine->opt->option("bigendian")->flag;
57 byteswapped = (((opt_bigendian) && (!machine->host_bigendian))
58 || ((!opt_bigendian) && machine->host_bigendian));
59 }
60
61 /* Deconstruction. Deallocate the range list. */
~Mapper()62 Mapper::~Mapper()
63 {
64 delete icache;
65 delete dcache;
66 for (Ranges::iterator i = ranges.begin(); i != ranges.end(); i++)
67 delete *i;
68 }
69
70 /* Add range R to the mapping. R must not overlap with any existing
71 * ranges in the mapping. Return 0 if R added sucessfully or -1 if
72 * R overlapped with an existing range.
73 */
74 int
add_range(Range * r)75 Mapper::add_range(Range *r)
76 {
77 assert (r && "Null range object passed to Mapper::add_range()");
78
79 /* Check to make sure the range is non-overlapping. */
80 for (Ranges::iterator i = ranges.begin(); i != ranges.end(); i++) {
81 if (r->overlaps(*i)) {
82 error("Attempt to map two VMIPS components to the "
83 "same memory area: (base %x extent %x) and "
84 "(base %x extent %x).", r->getBase(),
85 r->getExtent(), (*i)->getBase(),
86 (*i)->getExtent());
87 return -1;
88 }
89 }
90
91 /* Once we're satisfied that it doesn't overlap, add it to the list. */
92 ranges.push_back(r);
93 return 0;
94 }
95
96 /* Returns the first mapping in the rangelist, starting at the beginning,
97 * which maps the address P, or NULL if no such mapping exists. This
98 * function uses the LAST_USED_MAPPING instance variable as a cache to
99 * speed a succession of accesses to the same area of memory.
100 */
101 Range *
find_mapping_range(uint32 p)102 Mapper::find_mapping_range(uint32 p)
103 {
104 if (last_used_mapping && last_used_mapping->incorporates(p))
105 return last_used_mapping;
106
107 for (Ranges::iterator i = ranges.begin(), e = ranges.end(); i != e;
108 ++i) {
109 if ((*i)->incorporates(p)) {
110 last_used_mapping = *i;
111 return *i;
112 }
113 }
114
115 return NULL;
116 }
117
118 /* If the host processor is byte-swapped with respect to the target
119 * we are emulating, we will need to swap data bytes around when we
120 * do loads and stores. These functions implement the swapping.
121 *
122 * The mips_to_host_word(), etc. methods invoke the swap_word() methods
123 * if the host processor is the opposite endianness from the target.
124 */
125
126 /* Convert word W from big-endian to little-endian, or vice-versa,
127 * and return the result of the conversion.
128 */
129 uint32
swap_word(uint32 w)130 Mapper::swap_word(uint32 w)
131 {
132 return ((w & 0x0ff) << 24) | (((w >> 8) & 0x0ff) << 16) |
133 (((w >> 16) & 0x0ff) << 8) | ((w >> 24) & 0x0ff);
134 }
135
136 /* Convert halfword H from big-endian to little-endian, or vice-versa,
137 * and return the result of the conversion.
138 */
139 uint16
swap_halfword(uint16 h)140 Mapper::swap_halfword(uint16 h)
141 {
142 return ((h & 0x0ff) << 8) | ((h >> 8) & 0x0ff);
143 }
144
145 /* Convert word W from target processor byte-order to host processor
146 * byte-order and return the result of the conversion.
147 */
148 uint32
mips_to_host_word(uint32 w)149 Mapper::mips_to_host_word(uint32 w)
150 {
151 if (byteswapped)
152 w = swap_word (w);
153 return w;
154 }
155
156 /* Convert word W from host processor byte-order to target processor
157 * byte-order and return the result of the conversion.
158 */
159 uint32
host_to_mips_word(uint32 w)160 Mapper::host_to_mips_word(uint32 w)
161 {
162 if (byteswapped)
163 w = swap_word (w);
164 return w;
165 }
166
167 /* Convert halfword H from target processor byte-order to host processor
168 * byte-order and return the result of the conversion.
169 */
170 uint16
mips_to_host_halfword(uint16 h)171 Mapper::mips_to_host_halfword(uint16 h)
172 {
173 if (byteswapped)
174 h = swap_halfword(h);
175 return h;
176 }
177
178 /* Convert halfword H from host processor byte-order to target processor
179 * byte-order and return the result of the conversion.
180 */
181 uint16
host_to_mips_halfword(uint16 h)182 Mapper::host_to_mips_halfword(uint16 h)
183 {
184 if (byteswapped)
185 h = swap_halfword(h);
186 return h;
187 }
188
189 void
bus_error(DeviceExc * client,int32 mode,uint32 addr,int32 width,uint32 data)190 Mapper::bus_error (DeviceExc *client, int32 mode, uint32 addr,
191 int32 width, uint32 data)
192 {
193 last_berr_info.valid = true;
194 last_berr_info.client = client;
195 last_berr_info.mode = mode;
196 last_berr_info.addr = addr;
197 last_berr_info.width = width;
198 last_berr_info.data = data;
199 if (machine->opt->option("dbemsg")->flag) {
200 fprintf (stderr, "%s %s %s physical address 0x%x caused bus error",
201 (mode == DATASTORE) ? "store" : "load",
202 (width == 4) ? "word" : ((width == 2) ? "halfword" : "byte"),
203 (mode == DATASTORE) ? "to" : "from",
204 addr);
205 if (mode == DATASTORE)
206 fprintf (stderr, ", data = 0x%x", data);
207 fprintf (stderr, "\n");
208 }
209 client->exception((mode == INSTFETCH ? IBE : DBE), mode);
210 }
211
212 /* Set the cache control bits to the given values.
213 */
cache_set_control_bits(bool isolated,bool swapped)214 void Mapper::cache_set_control_bits(bool isolated, bool swapped)
215 {
216 #if defined(CACHE_DEBUG)
217 if (caches_isolated != isolated) {
218 printf("Isolated -> %d\n", isolated);
219 }
220 if (caches_swapped != swapped) {
221 printf("Swapped -> %d\n", swapped);
222 }
223 #endif
224 caches_isolated = isolated;
225 caches_swapped = swapped;
226 }
227
228 /* Test a specific cache entry for a hit; return whether we hit.
229 */
cache_use_entry(const Cache::Entry * const entry,uint32 tag,int32 mode) const230 bool Mapper::cache_use_entry(const Cache::Entry *const entry,
231 uint32 tag, int32 mode) const
232 {
233 return (caches_isolated && (mode != INSTFETCH)) ||
234 (entry->valid && entry->tag == tag);
235 }
236
237 /* Test cache for a hit; return whether we hit.
238 */
cache_hit(bool cacheable,int32 mode,uint32 & tag,uint32 & addr,Cache::Entry * & entry)239 bool Mapper::cache_hit(bool cacheable, int32 mode, uint32 &tag, uint32 &addr,
240 Cache::Entry *&entry)
241 {
242 if (cacheable) {
243 Cache *cache;
244 if (caches_swapped) {
245 cache = (mode == INSTFETCH) ? dcache : icache;
246 } else {
247 cache = (mode == INSTFETCH) ? icache : dcache;
248 }
249 tag = addr>>2; /* Tag is word address. */
250 entry = &cache->entries[tag & cache->mask];
251 if (cache_use_entry(entry, tag, mode)) {
252 #if defined(CACHE_DEBUG)
253 if (caches_isolated) {
254 printf("Read w/isolated cache 0x%x\n", addr);
255 }
256 #endif
257 return true;
258 }
259 addr &= ~0x3; /* Refill whole word. */
260 }
261 return false;
262 }
263
264 /* Read data from a specific cache entry.
265 */
cache_get_data_from_entry(const Cache::Entry * const entry,int size,uint32 addr)266 uint32 Mapper::cache_get_data_from_entry(const Cache::Entry *const entry,
267 int size, uint32 addr)
268 {
269 uint32 result;
270 uint32 n;
271 switch (size) {
272 case 4: result = entry->data; break;
273 case 2: n = (addr >> 1) & 0x1;
274 if (byteswapped)
275 n = 1 - n;
276 result = ((uint16 *)(&entry->data))[n];
277 break;
278 case 1: n = (addr & 0x3);
279 if (byteswapped)
280 n = 3 - n;
281 result = ((uint8 *)(&entry->data))[n];
282 break;
283 default: assert(0); result = 0xffffffff; break;
284 }
285 return result;
286 }
287
288 /* Write data to a specific cache entry.
289 */
cache_set_data_into_entry(Cache::Entry * const entry,int size,uint32 addr,uint32 data)290 void Mapper::cache_set_data_into_entry(Cache::Entry *const entry,
291 int size, uint32 addr, uint32 data)
292 {
293 uint32 n;
294 switch (size) {
295 case 4: entry->data = data; break;
296 case 2: n = (addr >> 1) & 0x1;
297 if (byteswapped)
298 n = 1 - n;
299 ((uint16 *)(&entry->data))[n] = data;
300 break;
301 case 1: n = addr & 0x3;
302 if (byteswapped)
303 n = 3 - n;
304 ((uint8 *)(&entry->data))[n] = data;
305 break;
306 default: assert(0); break;
307 }
308 }
309
310 /* Write data to cache, and then to main memory.
311 * 0. If isolated, no write through
312 * - If partial word, then invalidate, otherwise valid
313 * 1. If full word, write through and set tag+valid bit regardless of
314 * current contents
315 * 2. If partial word,
316 * - fill cache entry with new address
317 * - do partial-word update
318 * - write through
319 */
cache_write(int size,uint32 addr,uint32 data,Range * l,DeviceExc * client)320 void Mapper::cache_write(int size, uint32 addr, uint32 data, Range *l,
321 DeviceExc *client)
322 {
323 Cache *cache;
324 if (caches_swapped) {
325 cache = icache;
326 } else {
327 cache = dcache;
328 }
329 uint32 tag = addr>>2;
330 Cache::Entry *entry = &cache->entries[tag & cache->mask];
331 if (caches_isolated) {
332 #if defined(CACHE_DEBUG)
333 printf("Write(%d) w/isolated cache 0x%x -> 0x%x\n", size, data, addr);
334 #endif
335 if (size == 4) {
336 /* Caches isolated; write to cache only. */
337 cache_set_data_into_entry(entry,size,addr,data);
338 } else {
339 /* Partial-word store to isolated cache causes
340 invalidation. */
341 entry->valid = 0;
342 }
343 return; /* Don't write to memory. */
344 }
345 if (size != 4 && !cache_use_entry(entry, tag, DATASTORE)) {
346 /* Partial-word store to cache entry that is not already valid.
347 This triggers read-modify-write behavior. */
348 uint32 word_addr = addr & ~0x3; /* Refill whole word. */
349 uint32 word_offset = word_addr - l->getBase();
350
351 /* Fill cache entry with word containing addressed byte or
352 halfword. */
353 cache_do_fill(entry, tag, l, word_offset, DATASTORE, client, 4,
354 word_addr);
355 }
356 /* Update data in cache. */
357 cache_set_data_into_entry(entry,size,addr,data);
358 entry->valid = true;
359 entry->tag = tag;
360 /* Write word from cache to memory. */
361 l->store_word(addr - l->getBase(), mips_to_host_word(entry->data), client);
362 }
363
364 /* Refill a cache entry.
365 */
cache_do_fill(Cache::Entry * const entry,uint32 tag,Range * l,uint32 offset,int32 mode,DeviceExc * client,int32 size,uint32 addr)366 uint32 Mapper::cache_do_fill(Cache::Entry *const entry, uint32 tag,
367 Range *l, uint32 offset, int32 mode, DeviceExc *client,
368 int32 size, uint32 addr)
369 {
370 entry->valid = true;
371 entry->tag = tag;
372 if (!caches_isolated || mode==INSTFETCH) {
373 entry->data = host_to_mips_word(l->fetch_word(offset, mode,
374 client));
375 }
376 return cache_get_data_from_entry(entry,size,addr);
377 }
378
379 /* Fetch a word from the physical memory from physical address
380 * ADDR. MODE is INSTFETCH if this is an instruction fetch; DATALOAD
381 * otherwise. CACHEABLE is true if this access should be routed through
382 * the cache, false otherwise. This routine is shared between instruction
383 * fetches and word-wide data fetches.
384 *
385 * The routine returns either the specified word, if it is mapped and
386 * the address is correctly aligned, or else a word consisting of all
387 * ones is returned.
388 *
389 * Words are returned in the endianness of the target processor; since devices
390 * are implemented as Ranges, devices should return words in the host
391 * endianness.
392 *
393 * This routine may trigger exceptions IBE and/or DBE in the client
394 * processor, if the address is unmapped.
395 * This routine may trigger exception AdEL in the client
396 * processor, if the address is unaligned.
397 */
398 uint32
fetch_word(uint32 addr,int32 mode,bool cacheable,DeviceExc * client)399 Mapper::fetch_word(uint32 addr, int32 mode, bool cacheable, DeviceExc *client)
400 {
401 Range *l = NULL;
402 uint32 offset;
403 uint32 result, tag, oaddr = addr;
404 Cache::Entry *entry = NULL;
405
406 if (addr % 4 != 0) {
407 client->exception(AdEL,mode);
408 return 0xffffffff;
409 }
410
411 if (cache_hit(cacheable, mode, tag, addr, entry)) {
412 uint32 x = cache_get_data_from_entry(entry,4,addr);
413 #if defined(CACHE_DEBUG)
414 if (caches_isolated) {
415 printf("Isolated word read returned 0x%x\n", x);
416 }
417 #endif
418 return x;
419 }
420
421 l = find_mapping_range(addr);
422 if (!l) {
423 bus_error (client, mode, addr, 4);
424 return 0xffffffff;
425 }
426 offset = oaddr - l->getBase();
427 if (!l->canRead(offset)) {
428 /* Reads from write-only ranges return ones */
429 return 0xffffffff;
430 }
431
432 if (cacheable && entry) {
433 return cache_do_fill(entry,tag,l,offset,mode,client,4,oaddr);
434 }
435
436 return host_to_mips_word(l->fetch_word(offset, mode, client));
437 }
438
439 /* Fetch a halfword from the physical memory from physical address ADDR.
440 * CACHEABLE is true if this access should be routed through the cache,
441 * false otherwise.
442 *
443 * The routine returns either the specified halfword, if it is mapped
444 * and the address is correctly aligned, or else a halfword consisting
445 * of all ones is returned.
446 *
447 * Halfwords are returned in the endianness of the target processor;
448 * since devices are implemented as Ranges, devices should return halfwords
449 * in the host endianness.
450 *
451 * This routine may trigger exception DBE in the client processor,
452 * if the address is unmapped.
453 * This routine may trigger exception AdEL in the client
454 * processor, if the address is unaligned.
455 */
456 uint16
fetch_halfword(uint32 addr,bool cacheable,DeviceExc * client)457 Mapper::fetch_halfword(uint32 addr, bool cacheable, DeviceExc *client)
458 {
459 Range *l = NULL;
460 uint32 offset;
461 uint32 result, tag, oaddr = addr;
462 Cache::Entry *entry = NULL;
463
464 if (addr % 2 != 0) {
465 client->exception(AdEL,DATALOAD);
466 return 0xffff;
467 }
468
469 if (cache_hit(cacheable, DATALOAD, tag, addr, entry)) {
470 return cache_get_data_from_entry(entry,2,addr);
471 }
472
473 l = find_mapping_range(addr);
474 if (!l) {
475 bus_error (client, DATALOAD, addr, 2);
476 return 0xffff;
477 }
478 offset = oaddr - l->getBase();
479 if (!l->canRead(offset)) {
480 /* Reads from write-only ranges return ones */
481 return 0xffff;
482 }
483
484 if (cacheable && entry) {
485 return cache_do_fill(entry,tag,l,offset,DATALOAD,client,2,oaddr);
486 }
487 return host_to_mips_halfword(l->fetch_halfword(offset, client));
488 }
489
490 /* Fetch a byte from the physical memory from physical address ADDR.
491 * CACHEABLE is true if this access should be routed through the cache,
492 * false otherwise.
493 *
494 * The routine returns either the specified byte, if it is mapped,
495 * or else a byte consisting of all ones is returned.
496 *
497 * This routine may trigger exception DBE in the client processor,
498 * if the address is unmapped.
499 */
500 uint8
fetch_byte(uint32 addr,bool cacheable,DeviceExc * client)501 Mapper::fetch_byte(uint32 addr, bool cacheable, DeviceExc *client)
502 {
503 Range *l = NULL;
504 uint32 offset;
505 uint32 result, tag, oaddr = addr;
506 Cache::Entry *entry = NULL;
507
508 if (cache_hit(cacheable, DATALOAD, tag, addr, entry)) {
509 return cache_get_data_from_entry(entry,1,addr);
510 }
511 l = find_mapping_range(addr);
512 if (!l) {
513 bus_error (client, DATALOAD, addr, 1);
514 return 0xff;
515 }
516 offset = oaddr - l->getBase();
517 if (!l->canRead(offset)) {
518 /* Reads from write-only ranges return ones */
519 return 0xff;
520 }
521 if (cacheable && entry) {
522 return cache_do_fill(entry,tag,l,offset,DATALOAD,client,1,oaddr);
523 }
524 return l->fetch_byte(offset, client);
525 }
526
527
528 /* Store a word's-worth of DATA to physical address ADDR.
529 * CACHEABLE is true if this access should be routed through the cache,
530 * false otherwise.
531 *
532 * This routine may trigger exception AdES in the client processor,
533 * if the address is unaligned.
534 * This routine may trigger exception DBE in the client processor,
535 * if the address is unmapped.
536 */
537 void
store_word(uint32 addr,uint32 data,bool cacheable,DeviceExc * client)538 Mapper::store_word(uint32 addr, uint32 data, bool cacheable, DeviceExc *client)
539 {
540 Range *l = NULL;
541 uint32 offset;
542
543 if (addr % 4 != 0) {
544 client->exception(AdES,DATASTORE);
545 return;
546 }
547 l = find_mapping_range(addr);
548 if (!l) {
549 bus_error (client, DATASTORE, addr, 4, data);
550 return;
551 }
552 offset = addr - l->getBase();
553 if (!l->canWrite(offset)) {
554 fprintf(stderr, "Attempt to write read-only memory: 0x%08x\n",
555 addr);
556 return;
557 }
558 if (cacheable) {
559 cache_write(4, addr, data, l, client);
560 } else if (!caches_isolated) {
561 l->store_word(addr - l->getBase(), mips_to_host_word(data),
562 client);
563 }
564 }
565
566 /* Store half a word's-worth of DATA to physical address ADDR.
567 * CACHEABLE is true if this access should be routed through the cache,
568 * false otherwise.
569 *
570 * This routine may trigger exception AdES in the client processor,
571 * if the address is unaligned.
572 * This routine may trigger exception DBE in the client processor,
573 * if the address is unmapped.
574 */
575 void
store_halfword(uint32 addr,uint16 data,bool cacheable,DeviceExc * client)576 Mapper::store_halfword(uint32 addr, uint16 data, bool cacheable, DeviceExc
577 *client)
578 {
579 Range *l = NULL;
580 uint32 offset;
581
582 if (addr % 2 != 0) {
583 client->exception(AdES,DATASTORE);
584 return;
585 }
586 l = find_mapping_range(addr);
587 if (!l) {
588 bus_error (client, DATASTORE, addr, 2, data);
589 return;
590 }
591 offset = addr - l->getBase();
592 if (!l->canWrite(offset)) {
593 /* Write to read-only range */
594 fprintf(stderr, "Attempt to write read-only memory: 0x%08x\n",
595 addr);
596 return;
597 }
598 if (cacheable) {
599 cache_write(2, addr, data, l, client);
600 } else if (!caches_isolated) {
601 l->store_halfword(addr - l->getBase(),
602 mips_to_host_halfword(data), client);
603 }
604 }
605
606 /* Store a byte of DATA to physical address ADDR.
607 * CACHEABLE is true if this access should be routed through the cache,
608 * false otherwise.
609 *
610 * This routine may trigger exception DBE in the client processor,
611 * if the address is unmapped.
612 */
613 void
store_byte(uint32 addr,uint8 data,bool cacheable,DeviceExc * client)614 Mapper::store_byte(uint32 addr, uint8 data, bool cacheable, DeviceExc *client)
615 {
616 Range *l = NULL;
617 uint32 offset;
618
619 l = find_mapping_range(addr);
620 if (!l) {
621 bus_error (client, DATASTORE, addr, 1, data);
622 return;
623 }
624 offset = addr - l->getBase();
625 if (!l->canWrite(offset)) {
626 /* Write to read-only range */
627 fprintf(stderr, "Attempt to write read-only memory: 0x%08x\n",
628 addr);
629 return;
630 }
631 if (cacheable) {
632 cache_write(1, addr, data, l, client);
633 } else if (!caches_isolated) {
634 l->store_byte(addr - l->getBase(), data, client);
635 }
636 }
637
638 /* Print a hex dump of the first 8 words on top of the stack to the
639 * filehandle pointed to by F. The physical address that corresponds to the
640 * stack pointer is STACKPHYS. The stack is assumed to grow down in memory;
641 * that is, the addresses which are dumped are STACKPHYS, STACKPHYS - 4,
642 * STACKPHYS - 8, ...
643 */
644 void
dump_stack(FILE * f,uint32 stackphys)645 Mapper::dump_stack(FILE *f, uint32 stackphys)
646 {
647 Range *l;
648
649 fprintf(f, "Stack: ");
650 if ((l = find_mapping_range(stackphys)) == NULL) {
651 fprintf(f, "(points to hole in address space)");
652 } else {
653 if (!dynamic_cast<MemoryModule *> (l)) {
654 fprintf(f, "(points to non-RAM address space)");
655 } else {
656 for (int i = 0; i > -8; i--) {
657 uint32 data =
658 ((uint32 *) l->
659 getAddress())[(stackphys - l->getBase()) / 4 + i];
660 if (byteswapped)
661 data = swap_word (data);
662 fprintf(f, "%08x ", data);
663 }
664 }
665 }
666 fprintf(f, "\n");
667 }
668
669 /* Print a hex dump of the first word of memory at physical address
670 * ADDR to the filehandle pointed to by F.
671 */
672 void
dump_mem(FILE * f,uint32 phys)673 Mapper::dump_mem(FILE *f, uint32 phys)
674 {
675 Range *l;
676
677 if ((l = find_mapping_range(phys)) == NULL) {
678 fprintf(f, "(points to hole in address space)");
679 } else {
680 if (!(dynamic_cast<MemoryModule *> (l) || dynamic_cast<ROMModule *>(l))) {
681 fprintf(f, "(points to non-memory address space)");
682 } else {
683 uint32 data =
684 ((uint32 *) l->
685 getAddress())[(phys - l->getBase()) / 4];
686 if (byteswapped)
687 data = swap_word (data);
688 fprintf(f, "%08x ", data);
689 }
690 }
691 }
692