1 /*  Copyright 2005 Guillaume Duhamel
2 Copyright 2016 Shinya Miyamoto
3 
4 This file is part of Yabause.
5 
6 Yabause is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10 
11 Yabause is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 GNU General Public License for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with Yabause; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA
19 */
20 
21 /*! \file sh2cache.c
22 \brief SH2 internal cache operations FIL0016332.PDF section 8
23 */
24 
25 #ifdef PSP
26 # include <stdint.h>
27 #endif
28 #include <stdio.h>
29 #include <stdlib.h>
30 #include <sys/stat.h>
31 #include <ctype.h>
32 
33 #include "memory.h"
34 #include "yabause.h"
35 #include "sh2cache.h"
36 #include "sh2core.h"
37 #include "vdp2.h"
38 #include "vdp1.h"
39 #include "assert.h"
40 #include "scsp.h"
41 #include "scu.h"
42 #include "ygr.h"
43 #include "cs1.h"
44 #include "cs0.h"
45 #include "smpc.h"
46 #include "cs2.h"
47 
48 
49 #define AREA_MASK   (0xE0000000)
50 #define TAG_MASK   (0x1FFFFC00)
51 #define ENTRY_MASK (0x000003F0)
52 #define ENTRY_SHIFT (4)
53 #define LINE_MASK  (0x0000000F)
54 
55 #define CACHE_USE ((0x00)<<29)
56 #define CACHE_THROUGH ((0x01)<<29)
57 #define CACHE_PURGE ((0x02)<<29)
58 #define CACHE_ADDRES_ARRAY ((0x03)<<29)
59 #define CACHE_DATA_ARRAY ((0x06)<<29)
60 #define CACHE_IO ((0x07)<<29)
61 
cache_clear(cache_enty * ca)62 void cache_clear(cache_enty * ca){
63    int entry = 0;
64 	ca->enable = 0;
65 
66 	for (entry = 0; entry < 64; entry++){
67       int way = 0;
68       ca->lru[entry] = 0;
69 
70       for (way = 0; way < 4; way++)
71       {
72          int i = 0;
73          ca->way[way][entry].tag = 0;
74 
75          for (i = 0; i < 16; i++)
76             ca->way[way][entry].data[i] = 0;
77          ca->way[way][entry].v = 0;
78       }
79 	}
80 	return;
81 }
82 
cache_enable(cache_enty * ca)83 void cache_enable(cache_enty * ca){
84    //cache enable does not clear the cache
85 	ca->enable = 1;
86 }
87 
cache_disable(cache_enty * ca)88 void cache_disable(cache_enty * ca){
89 	ca->enable = 0;
90 }
91 
92 //lru is updated
93 //when cache hit occurs during a read
94 //when cache hit occurs during a write
95 //when replacement occurs after a cache miss
96 
update_lru(int way,u32 * lru)97 static INLINE void update_lru(int way, u32*lru)
98 {
99    if (way == 3)
100    {
101       *lru = *lru | 0xb;//set bits 3, 1, 0
102       return;
103    }
104    else if (way == 2)
105    {
106       *lru = *lru & 0x3E;//set bit 0 to 0
107       *lru = *lru | 0x14;//set bits 4 and 2
108       return;
109    }
110    else if (way == 1)
111    {
112       *lru = *lru | (1 << 5);//set bit 5
113       *lru = *lru & 0x39;//unset bits 2 and 1
114       return;
115    }
116    else
117    {
118       *lru = *lru & 0x7;//unset bits 5,4,3
119       return;
120    }
121 
122    //should not happen
123 }
124 
select_way_to_replace(SH2_struct * sh,u32 lru)125 static INLINE int select_way_to_replace(SH2_struct *sh, u32 lru)
126 {
127    if (sh->onchip.CCR & (1 << 3))//2-way mode
128    {
129       if ((lru & 1) == 1)
130          return 2;
131       else
132          return 3;
133    }
134    else
135    {
136       if ((lru & 0x38) == 0x38)//bits 5, 4, 3 must be 1
137          return 0;
138       else if ((lru & 0x26) == 0x6)//bit 5 must be zero. bits 2 and 1 must be 1
139          return 1;
140       else if ((lru & 0x15) == 1)//bits 4, 2 must be zero. bit 0 must be 1
141          return 2;
142       else if ((lru & 0xB) == 0)//bits 3, 1, 0 must be zero
143          return 3;
144    }
145 
146    //should not happen
147    return 0;
148 }
149 
150 //values are from console measurements and have extra delays included
151 //delay 0 if the measured cycles are 7 or less, otherwise subtract 7
152 #define ADJUST_CYCLES(n) (n <= 7 ? 0 : (n - 7))
153 
get_cache_through_timing_read_byte_word(u32 addr)154 int get_cache_through_timing_read_byte_word(u32 addr)
155 {
156    addr = (addr >> 16) & 0xFFF;
157 
158    if (addr <= 0x00f)//bios
159       return ADJUST_CYCLES(15);
160    else if (addr >= 0x010 && addr <= 0x017)//smpc
161       return ADJUST_CYCLES(15);
162    else if (addr >= 0x018 && addr <= 0x01f)//bup
163       return ADJUST_CYCLES(1);
164    else if (addr >= 0x020 && addr <= 0x02f)//lwram
165       return ADJUST_CYCLES(14);
166    //ignore input capture
167    else if (addr >= 0x200 && addr <= 0x3ff)//cs0
168       return ADJUST_CYCLES(1);
169    else if (addr >= 0x400 && addr <= 0x4ff)//cs1
170       return ADJUST_CYCLES(1);
171    else if (addr >= 0x580 && addr <= 0x58f)//cs2
172       return ADJUST_CYCLES(24);
173    else if (addr >= 0x5a0 && addr <= 0x5af)//sound ram
174       return ADJUST_CYCLES(53);
175    else if (addr >= 0x5b0 && addr <= 0x5bf)//scsp regs
176       return ADJUST_CYCLES(52);
177    else if (addr >= 0x5c0 && addr <= 0x5c7)//vdp1 ram
178       return ADJUST_CYCLES(51);
179    else if (addr >= 0x5c8 && addr <= 0x5cf)//vdp1 fb
180       return ADJUST_CYCLES(51);
181    else if (addr >= 0x5d0 && addr <= 0x5d7)//vdp1 regs
182       return ADJUST_CYCLES(35);
183    else if (addr >= 0x5e0 && addr <= 0x5ef)//vdp2 ram
184       return ADJUST_CYCLES(44);
185    else if (addr >= 0x5f0 && addr <= 0x5f7)//vdp2 color
186       return ADJUST_CYCLES(44);
187    else if (addr >= 0x5f8 && addr <= 0x5fb)//vdp2 regs
188       return ADJUST_CYCLES(44);
189    else if (addr >= 0x5fe && addr <= 0x5fe)//scu
190       return ADJUST_CYCLES(14);
191    else if (addr >= 0x600 && addr <= 0x7ff)//hwram
192       return ADJUST_CYCLES(14);
193 
194    return 0;
195 }
196 
get_cache_through_timing_read_long(u32 addr)197 int get_cache_through_timing_read_long(u32 addr)
198 {
199    addr = (addr >> 16) & 0xFFF;
200 
201    if (addr <= 0x00f)//bios
202       return ADJUST_CYCLES(23);
203    else if (addr >= 0x010 && addr <= 0x017)//smpc
204       return ADJUST_CYCLES(23);
205    else if (addr >= 0x018 && addr <= 0x01f)//bup
206       return ADJUST_CYCLES(1);
207    else if (addr >= 0x020 && addr <= 0x02f)//lwram
208       return ADJUST_CYCLES(21);
209    //ignore input capture
210    else if (addr >= 0x200 && addr <= 0x3ff)//cs0
211       return ADJUST_CYCLES(1);
212    else if (addr >= 0x400 && addr <= 0x4ff)//cs1
213       return ADJUST_CYCLES(1);
214    else if (addr >= 0x580 && addr <= 0x58f)//cs2
215       return ADJUST_CYCLES(24);
216    else if (addr >= 0x5a0 && addr <= 0x5af)//sound ram
217       return ADJUST_CYCLES(53);
218    else if (addr >= 0x5b0 && addr <= 0x5bf)//scsp regs
219       return ADJUST_CYCLES(52);
220    else if (addr >= 0x5c0 && addr <= 0x5c7)//vdp1 ram
221       return ADJUST_CYCLES(51);
222    else if (addr >= 0x5c8 && addr <= 0x5cf)//vdp1 fb
223       return ADJUST_CYCLES(51);
224    else if (addr >= 0x5d0 && addr <= 0x5d7)//vdp1 regs
225       return ADJUST_CYCLES(35);
226    else if (addr >= 0x5e0 && addr <= 0x5ef)//vdp2 ram
227       return ADJUST_CYCLES(44);
228    else if (addr >= 0x5f0 && addr <= 0x5f7)//vdp2 color
229       return ADJUST_CYCLES(44);
230    else if (addr >= 0x5f8 && addr <= 0x5fb)//vdp2 regs
231       return ADJUST_CYCLES(44);
232    else if (addr >= 0x5fe && addr <= 0x5fe)//scu
233       return ADJUST_CYCLES(14);
234    else if (addr >= 0x600 && addr <= 0x7ff)//hwram
235       return ADJUST_CYCLES(14);
236 
237    return 0;
238 }
239 
get_cache_through_timing_write_byte_word(u32 addr)240 int get_cache_through_timing_write_byte_word(u32 addr)
241 {
242    addr = (addr >> 16) & 0xFFF;
243 
244    if (addr <= 0x00f)//bios
245       return ADJUST_CYCLES(8);
246    else if (addr >= 0x010 && addr <= 0x017)//smpc
247       return ADJUST_CYCLES(8);
248    else if (addr >= 0x018 && addr <= 0x01f)//bup
249       return ADJUST_CYCLES(1);
250    else if (addr >= 0x020 && addr <= 0x02f)//lwram
251       return ADJUST_CYCLES(7);
252    //ignore input capture
253    else if (addr >= 0x200 && addr <= 0x3ff)//cs0
254       return ADJUST_CYCLES(1);
255    else if (addr >= 0x400 && addr <= 0x4ff)//cs1
256       return ADJUST_CYCLES(1);
257    else if (addr >= 0x580 && addr <= 0x58f)//cs2
258       return ADJUST_CYCLES(7);
259    else if (addr >= 0x5a0 && addr <= 0x5af)//sound ram
260       return ADJUST_CYCLES(19);
261    else if (addr >= 0x5b0 && addr <= 0x5bf)//scsp regs
262       return ADJUST_CYCLES(19);
263    else if (addr >= 0x5c0 && addr <= 0x5c7)//vdp1 ram
264       return ADJUST_CYCLES(11);
265    else if (addr >= 0x5c8 && addr <= 0x5cf)//vdp1 fb
266       return ADJUST_CYCLES(11);
267    else if (addr >= 0x5d0 && addr <= 0x5d7)//vdp1 regs
268       return ADJUST_CYCLES(11);
269    else if (addr >= 0x5e0 && addr <= 0x5ef)//vdp2 ram
270       return ADJUST_CYCLES(7);
271    else if (addr >= 0x5f0 && addr <= 0x5f7)//vdp2 color
272       return ADJUST_CYCLES(8);
273    else if (addr >= 0x5f8 && addr <= 0x5fb)//vdp2 regs
274       return ADJUST_CYCLES(7);
275    else if (addr >= 0x5fe && addr <= 0x5fe)//scu
276       return ADJUST_CYCLES(7);
277    else if (addr >= 0x600 && addr <= 0x7ff)//hwram
278       return ADJUST_CYCLES(7);
279 
280    return 0;
281 }
282 
get_cache_through_timing_write_long(u32 addr)283 int get_cache_through_timing_write_long(u32 addr)
284 {
285    addr = (addr >> 16) & 0xFFF;
286 
287    if (addr <= 0x00f)//bios
288       return ADJUST_CYCLES(16);
289    else if (addr >= 0x010 && addr <= 0x017)//smpc
290       return ADJUST_CYCLES(16);
291    else if (addr >= 0x018 && addr <= 0x01f)//bup
292       return ADJUST_CYCLES(1);
293    else if (addr >= 0x020 && addr <= 0x02f)//lwram
294       return ADJUST_CYCLES(14);
295    //ignore input capture
296    else if (addr >= 0x200 && addr <= 0x3ff)//cs0
297       return ADJUST_CYCLES(1);
298    else if (addr >= 0x400 && addr <= 0x4ff)//cs1
299       return ADJUST_CYCLES(1);
300    else if (addr >= 0x580 && addr <= 0x58f)//cs2
301       return ADJUST_CYCLES(14);
302    else if (addr >= 0x5a0 && addr <= 0x5af)//sound ram
303       return ADJUST_CYCLES(33);
304    else if (addr >= 0x5b0 && addr <= 0x5bf)//scsp regs
305       return ADJUST_CYCLES(32);
306    else if (addr >= 0x5c0 && addr <= 0x5c7)//vdp1 ram
307       return ADJUST_CYCLES(12);
308    else if (addr >= 0x5c8 && addr <= 0x5cf)//vdp1 fb
309       return ADJUST_CYCLES(12);
310    else if (addr >= 0x5d0 && addr <= 0x5d7)//vdp1 regs
311       return ADJUST_CYCLES(11);
312    else if (addr >= 0x5e0 && addr <= 0x5ef)//vdp2 ram
313       return ADJUST_CYCLES(7);
314    else if (addr >= 0x5f0 && addr <= 0x5f7)//vdp2 color
315       return ADJUST_CYCLES(8);
316    else if (addr >= 0x5f8 && addr <= 0x5fb)//vdp2 regs
317       return ADJUST_CYCLES(7);
318    else if (addr >= 0x5fe && addr <= 0x5fe)//scu
319       return ADJUST_CYCLES(7);
320    else if (addr >= 0x600 && addr <= 0x7ff)//hwram
321       return ADJUST_CYCLES(7);
322 
323    return 0;
324 }
cache_memory_write_b(SH2_struct * sh,cache_enty * ca,u32 addr,u8 val)325 void cache_memory_write_b(SH2_struct *sh, cache_enty * ca, u32 addr, u8 val){
326 
327 	switch (addr & AREA_MASK){
328 	case CACHE_USE:
329 	{
330       u32 tagaddr = 0;
331       u32 entry = 0;
332 		if (ca->enable == 0){
333 			MappedMemoryWriteByteNocache(sh, addr, val);
334 			return;
335 		}
336 		tagaddr = (addr & TAG_MASK);
337 		entry = (addr & ENTRY_MASK) >> ENTRY_SHIFT;
338 		if (ca->way[0][entry].v && ca->way[0][entry].tag == tagaddr){
339 			ca->way[0][entry].data[addr&LINE_MASK] = val;
340          update_lru(0, &ca->lru[entry]);
341 		}
342 		else if (ca->way[1][entry].v && ca->way[1][entry].tag == tagaddr){
343 			ca->way[1][entry].data[addr&LINE_MASK] = val;
344          update_lru(1, &ca->lru[entry]);
345 		}
346 		else if (ca->way[2][entry].v && ca->way[2][entry].tag == tagaddr){
347 			ca->way[2][entry].data[addr&LINE_MASK] = val;
348          update_lru(2, &ca->lru[entry]);
349 		}
350 		else if (ca->way[3][entry].v && ca->way[3][entry].tag == tagaddr){
351 			ca->way[3][entry].data[addr&LINE_MASK] = val;
352          update_lru(3, &ca->lru[entry]);
353 		}
354 		MappedMemoryWriteByteNocache(sh, addr, val);
355 	}
356 	break;
357 	case CACHE_THROUGH:
358       sh->cycles += get_cache_through_timing_write_byte_word(addr);
359 		MappedMemoryWriteByteNocache(sh, addr, val);
360 		break;
361 	default:
362 		MappedMemoryWriteByteNocache(sh, addr, val);
363 		break;
364 	}
365 }
366 
cache_memory_write_w(SH2_struct * sh,cache_enty * ca,u32 addr,u16 val)367 void cache_memory_write_w(SH2_struct *sh, cache_enty * ca, u32 addr, u16 val){
368 
369 	switch (addr & AREA_MASK){
370 	case CACHE_USE:
371 	{
372       u32 tagaddr = 0;
373       u32 entry = 0;
374 		if (ca->enable == 0){
375 			MappedMemoryWriteWordNocache(sh, addr, val);
376 			return;
377 		}
378 
379 		tagaddr = (addr & TAG_MASK);
380 		entry = (addr & ENTRY_MASK) >> ENTRY_SHIFT;
381 		if (ca->way[0][entry].v && ca->way[0][entry].tag == tagaddr){
382 			ca->way[0][entry].data[addr&LINE_MASK] = val >> 8;
383 			ca->way[0][entry].data[(addr&LINE_MASK) + 1] = val;
384          update_lru(0, &ca->lru[entry]);
385 		}
386 		else if (ca->way[1][entry].v && ca->way[1][entry].tag == tagaddr){
387 			ca->way[1][entry].data[addr&LINE_MASK] = val >> 8;
388 			ca->way[1][entry].data[(addr&LINE_MASK) + 1] = val;
389          update_lru(1, &ca->lru[entry]);
390 		}
391 		else if (ca->way[2][entry].v && ca->way[2][entry].tag == tagaddr){
392 			ca->way[2][entry].data[addr&LINE_MASK] = val >> 8;
393 			ca->way[2][entry].data[(addr&LINE_MASK) + 1] = val;
394          update_lru(2, &ca->lru[entry]);
395 		}
396 		else if (ca->way[3][entry].v && ca->way[3][entry].tag == tagaddr){
397 			ca->way[3][entry].data[addr&LINE_MASK] = val >> 8;
398 			ca->way[3][entry].data[(addr&LINE_MASK) + 1] = val;
399          update_lru(3, &ca->lru[entry]);
400 		}
401 
402 		// write through
403 		MappedMemoryWriteWordNocache(sh, addr, val);
404 	}
405 	break;
406 	case CACHE_THROUGH:
407       sh->cycles += get_cache_through_timing_write_byte_word(addr);
408 		MappedMemoryWriteWordNocache(sh, addr, val);
409 		break;
410 	default:
411 		MappedMemoryWriteWordNocache(sh, addr, val);
412 		break;
413 	}
414 }
415 
cache_memory_write_l(SH2_struct * sh,cache_enty * ca,u32 addr,u32 val)416 void cache_memory_write_l(SH2_struct *sh, cache_enty * ca, u32 addr, u32 val){
417 
418 	switch (addr & AREA_MASK){
419    case CACHE_PURGE://associative purge
420    {
421       int i;
422       u32 tagaddr = (addr & TAG_MASK);
423       u32 entry = (addr & ENTRY_MASK) >> ENTRY_SHIFT;
424       for (i = 0; i < 3; i++)
425       {
426          if (ca->way[i][entry].tag == tagaddr)
427          {
428             //only v bit is changed, the rest of the data remains
429             ca->way[i][entry].v = 0;
430             break;
431          }
432       }
433    }
434    break;
435 	case CACHE_USE:
436 	{
437       u32 tagaddr = 0;
438       u32 entry = 0;
439 		if (ca->enable == 0){
440 			MappedMemoryWriteLongNocache(sh, addr, val);
441 			return;
442 		}
443 
444 		tagaddr = (addr & TAG_MASK);
445 		entry = (addr & ENTRY_MASK) >> ENTRY_SHIFT;
446 		if (ca->way[0][entry].v && ca->way[0][entry].tag == tagaddr){
447 			ca->way[0][entry].data[(addr&LINE_MASK)] = ((val >> 24) & 0xFF);
448 			ca->way[0][entry].data[(addr&LINE_MASK) + 1] = ((val >> 16) & 0xFF);
449 			ca->way[0][entry].data[(addr&LINE_MASK) + 2] = ((val >> 8) & 0xFF);
450 			ca->way[0][entry].data[(addr&LINE_MASK) + 3] = ((val >> 0) & 0xFF);
451          update_lru(0, &ca->lru[entry]);
452 		}
453 		else if (ca->way[1][entry].v && ca->way[1][entry].tag == tagaddr){
454 			ca->way[1][entry].data[(addr&LINE_MASK)] = ((val >> 24) & 0xFF);
455 			ca->way[1][entry].data[(addr&LINE_MASK) + 1] = ((val >> 16) & 0xFF);
456 			ca->way[1][entry].data[(addr&LINE_MASK) + 2] = ((val >> 8) & 0xFF);
457 			ca->way[1][entry].data[(addr&LINE_MASK) + 3] = ((val >> 0) & 0xFF);
458          update_lru(1, &ca->lru[entry]);
459 		}
460 		else if (ca->way[2][entry].v && ca->way[2][entry].tag == tagaddr){
461 			ca->way[2][entry].data[(addr&LINE_MASK)] = ((val >> 24) & 0xFF);
462 			ca->way[2][entry].data[(addr&LINE_MASK) + 1] = ((val >> 16) & 0xFF);
463 			ca->way[2][entry].data[(addr&LINE_MASK) + 2] = ((val >> 8) & 0xFF);
464 			ca->way[2][entry].data[(addr&LINE_MASK) + 3] = ((val >> 0) & 0xFF);
465          update_lru(2, &ca->lru[entry]);
466 
467 		}
468 		else if (ca->way[3][entry].v && ca->way[3][entry].tag == tagaddr){
469 			ca->way[3][entry].data[(addr&LINE_MASK)] = ((val >> 24) & 0xFF);
470 			ca->way[3][entry].data[(addr&LINE_MASK) + 1] = ((val >> 16) & 0xFF);
471 			ca->way[3][entry].data[(addr&LINE_MASK) + 2] = ((val >> 8) & 0xFF);
472 			ca->way[3][entry].data[(addr&LINE_MASK) + 3] = ((val >> 0) & 0xFF);
473          update_lru(3, &ca->lru[entry]);
474 		}
475 
476 		// write through
477 		MappedMemoryWriteLongNocache(sh, addr, val);
478 	}
479 	break;
480 	case CACHE_THROUGH:
481       sh->cycles += get_cache_through_timing_write_long(addr);
482 		MappedMemoryWriteLongNocache(sh, addr, val);
483 		break;
484 	default:
485 		MappedMemoryWriteLongNocache(sh, addr, val);
486 		break;
487 	}
488 }
489 
sh2_cache_refill_read(SH2_struct * sh,u32 addr)490 u32 sh2_cache_refill_read(SH2_struct *sh, u32 addr)
491 {
492    addr &= 0xfffffff;
493 
494    if (addr <= 0x00fffff)
495    {
496       //bios
497       return BiosRomMemoryReadLong(addr);
498    }
499    else if (addr >= 0x0100000 && addr <= 0x017ffff)
500    {
501       //smpc
502       return SmpcReadLong(MSH2, addr);
503    }
504    else if (addr >= 0x0180000 && addr <= 0x01fffff)
505    {
506       //backup ram
507       return BupRamMemoryReadLong(addr);
508    }
509    else if (addr >= 0x0200000 && addr <= 0x02fffff)
510    {
511       //low wram
512       return LowWramMemoryReadLong(addr);
513    }
514    else if (addr >= 0x1000000 && addr <= 0x17fffff)
515    {
516       //ssh2 input capture
517       return UnhandledMemoryReadLong(addr);
518    }
519    else if (addr >= 0x1800000 && addr <= 0x1ffffff)
520    {
521       //msh2 input capture
522       return UnhandledMemoryReadLong(addr);
523    }
524    else if (addr >= 0x2000000 && addr <= 0x3ffffff)
525    {
526       //cs0
527       return CartridgeArea->Cs0ReadLong(MSH2, addr);
528    }
529    else if (addr >= 0x4000000 && addr <= 0x4ffffff)
530    {
531       return Cs1ReadLong(MSH2, addr);
532    }
533    else if (addr >= 0x5000000 && addr <= 0x57fffff)
534    {
535       //dummy
536    }
537    else if (addr >= 0x5800000 && addr <= 0x58fffff)
538    {
539       //cs2
540       if (yabsys.use_cd_block_lle)
541       {
542          return ygr_a_bus_read_long(addr);
543       }
544       else
545       {
546          return Cs2ReadLong(MSH2, addr);
547       }
548    }
549    else if (addr >= 0x5a00000 && addr <= 0x5afffff)
550    {
551       //sound ram
552       return SoundRamReadLong(addr);
553    }
554    else if (addr >= 0x5b00000 && addr <= 0x5bfffff)
555    {
556       //scsp regs
557       return ScspReadLong(addr);
558    }
559    else if (addr >= 0x5c00000 && addr <= 0x5c7ffff)
560    {
561       //vdp1 ram
562       return Vdp1RamReadLong(addr);
563    }
564    else if (addr >= 0x5c80000 && addr <= 0x5cfffff)
565    {
566       //vdp1 framebuffer
567       return Vdp1FrameBufferReadLong(addr);
568    }
569    else if (addr >= 0x5d00000 && addr <= 0x5d7ffff)
570    {
571       //vdp1 registers
572       return Vdp1ReadLong(addr);
573    }
574    else if (addr >= 0x5e00000 && addr <= 0x5efffff)
575    {
576       //vdp2 ram
577       return Vdp2RamReadLong(addr);
578    }
579    else if (addr >= 0x5f00000 && addr <= 0x5f7ffff)
580    {
581       //vdp2 color ram
582       return Vdp2ColorRamReadLong(addr);
583    }
584    else if (addr >= 0x5f80000 && addr <= 0x5fbffff)
585    {
586       //vdp2 registers
587       return Vdp2ReadLong(addr);
588    }
589    else if (addr >= 0x5fe0000 && addr <= 0x5feffff)
590    {
591       //scu registers
592       return ScuReadLong(addr);
593    }
594    else if (addr >= 0x6000000 && addr <= 0x7ffffff)
595    {
596       //high wram
597       return HighWramMemoryReadLong(addr);
598    }
599 
600    return 0;
601 }
602 
sh2_refill_cache(SH2_struct * sh,cache_enty * ca,int lruway,u32 entry,u32 addr)603 void sh2_refill_cache(SH2_struct *sh, cache_enty * ca, int lruway, u32 entry, u32 addr)
604 {
605    int i;
606 
607    sh->cycles += 4;
608 
609    for (i = 0; i < 16; i += 4) {
610       u32 val = sh2_cache_refill_read(sh, (addr & 0xFFFFFFF0) + i);
611       ca->way[lruway][entry].data[i + 0] = (val >> 24) & 0xff;
612       ca->way[lruway][entry].data[i + 1] = (val >> 16) & 0xff;
613       ca->way[lruway][entry].data[i + 2] = (val >> 8) & 0xff;
614       ca->way[lruway][entry].data[i + 3] = (val >> 0) & 0xff;
615    }
616 }
617 
cache_memory_read_b(SH2_struct * sh,cache_enty * ca,u32 addr)618 u8 cache_memory_read_b(SH2_struct *sh, cache_enty * ca, u32 addr){
619 	switch (addr & AREA_MASK){
620 	case CACHE_USE:
621 	{
622       u32 tagaddr = 0;
623       u32 entry = 0;
624       int i = 0;
625       int lruway = 0;
626 		if (ca->enable == 0){
627 			return MappedMemoryReadByteNocache(sh, addr);
628 		}
629 		tagaddr = (addr & TAG_MASK);
630 		entry = (addr & ENTRY_MASK) >> ENTRY_SHIFT;
631 		if (ca->way[0][entry].v && ca->way[0][entry].tag == tagaddr){
632          update_lru(0, &ca->lru[entry]);
633 			return ca->way[0][entry].data[addr&LINE_MASK];
634 		}
635 		else if (ca->way[1][entry].v && ca->way[1][entry].tag == tagaddr){
636          update_lru(1, &ca->lru[entry]);
637 			return ca->way[1][entry].data[addr&LINE_MASK];
638 		}
639 		else if (ca->way[2][entry].v && ca->way[2][entry].tag == tagaddr){
640          update_lru(2, &ca->lru[entry]);
641 			return ca->way[2][entry].data[addr&LINE_MASK];
642 		}
643 		else if (ca->way[3][entry].v && ca->way[3][entry].tag == tagaddr){
644          update_lru(3, &ca->lru[entry]);
645 			return ca->way[3][entry].data[addr&LINE_MASK];
646 		}
647 		// cache miss
648       lruway = select_way_to_replace(sh, ca->lru[entry]);
649       update_lru(lruway, &ca->lru[entry]);
650 		ca->way[lruway][entry].tag = tagaddr;
651 
652       sh2_refill_cache(sh, ca, lruway, entry, addr);
653 
654       ca->way[lruway][entry].v = 1; //becomes valid
655 		return ca->way[lruway][entry].data[addr&LINE_MASK];
656 	}
657 	break;
658 	case CACHE_THROUGH:
659       sh->cycles += get_cache_through_timing_read_byte_word(addr);
660 		return MappedMemoryReadByteNocache(sh, addr);
661 		break;
662 	default:
663 		return MappedMemoryReadByteNocache(sh, addr);
664 		break;
665 	}
666 	return 0;
667 }
668 
cache_memory_read_w(SH2_struct * sh,cache_enty * ca,u32 addr)669 u16 cache_memory_read_w(SH2_struct *sh, cache_enty * ca, u32 addr){
670 
671 	switch (addr & AREA_MASK){
672 	case CACHE_USE:
673 	{
674       u32 tagaddr = 0;
675       u32 entry = 0;
676       int i = 0;
677       int lruway = 0;
678 		if (ca->enable == 0){
679 			return MappedMemoryReadWordNocache(sh, addr);
680 		}
681 	   tagaddr = (addr & TAG_MASK);
682 		entry = (addr & ENTRY_MASK) >> ENTRY_SHIFT;
683 		if (ca->way[0][entry].v && ca->way[0][entry].tag == tagaddr){
684          update_lru(0, &ca->lru[entry]);
685 			return ((u16)(ca->way[0][entry].data[addr&LINE_MASK]) << 8) | ca->way[0][entry].data[(addr&LINE_MASK) + 1];
686 		}
687 		else if (ca->way[1][entry].v && ca->way[1][entry].tag == tagaddr){
688          update_lru(1, &ca->lru[entry]);
689 			return ((u16)(ca->way[1][entry].data[addr&LINE_MASK]) << 8) | ca->way[1][entry].data[(addr&LINE_MASK) + 1];
690 		}
691 		else if (ca->way[2][entry].v && ca->way[2][entry].tag == tagaddr){
692          update_lru(2, &ca->lru[entry]);
693 			return ((u16)(ca->way[2][entry].data[addr&LINE_MASK]) << 8) | ca->way[2][entry].data[(addr&LINE_MASK) + 1];
694 		}
695 		else if (ca->way[3][entry].v && ca->way[3][entry].tag == tagaddr){
696          update_lru(3, &ca->lru[entry]);
697 			return ((u16)(ca->way[3][entry].data[addr&LINE_MASK]) << 8) | ca->way[3][entry].data[(addr&LINE_MASK) + 1];
698 		}
699 
700 		// cache miss
701 		lruway = select_way_to_replace(sh, ca->lru[entry]);
702       update_lru(lruway, &ca->lru[entry]);
703 		ca->way[lruway][entry].tag = tagaddr;
704 
705       sh2_refill_cache(sh, ca, lruway, entry, addr);
706 
707       ca->way[lruway][entry].v = 1; //becomes valid
708 		return ((u16)(ca->way[lruway][entry].data[addr&LINE_MASK]) << 8) | ca->way[lruway][entry].data[(addr&LINE_MASK) + 1];
709 	}
710 	break;
711 	case CACHE_THROUGH:
712       sh->cycles += get_cache_through_timing_read_byte_word(addr);
713 		return MappedMemoryReadWordNocache(sh, addr);
714 		break;
715 	default:
716 		return MappedMemoryReadWordNocache(sh, addr);
717 		break;
718 	}
719 	return 0;
720 }
721 
cache_memory_read_l(SH2_struct * sh,cache_enty * ca,u32 addr)722 u32 cache_memory_read_l(SH2_struct *sh, cache_enty * ca, u32 addr){
723 	switch (addr & AREA_MASK){
724 	case CACHE_USE:
725 	{
726       u32 tagaddr = 0;
727       u32 entry = 0;
728       int i = 0;
729       int lruway = 0;
730 		if (ca->enable == 0){
731 			return MappedMemoryReadLongNocache(sh, addr);
732 		}
733 		tagaddr = (addr & TAG_MASK);
734 	   entry = (addr & ENTRY_MASK) >> ENTRY_SHIFT;
735 
736 		if (ca->way[0][entry].v && ca->way[0][entry].tag == tagaddr){
737          update_lru(0, &ca->lru[entry]);
738 			return ((u32)(ca->way[0][entry].data[addr&LINE_MASK]) << 24) |
739 				((u32)(ca->way[0][entry].data[(addr&LINE_MASK) + 1]) << 16) |
740 				((u32)(ca->way[0][entry].data[(addr&LINE_MASK) + 2]) << 8) |
741 				((u32)(ca->way[0][entry].data[(addr&LINE_MASK) + 3]) << 0);
742 		}
743 		else if (ca->way[1][entry].v && ca->way[1][entry].tag == tagaddr){
744          update_lru(1, &ca->lru[entry]);
745 			return ((u32)(ca->way[1][entry].data[addr&LINE_MASK]) << 24) |
746 				((u32)(ca->way[1][entry].data[(addr&LINE_MASK) + 1]) << 16) |
747 				((u32)(ca->way[1][entry].data[(addr&LINE_MASK) + 2]) << 8) |
748 				((u32)(ca->way[1][entry].data[(addr&LINE_MASK) + 3]) << 0);
749 		}
750 		else if (ca->way[2][entry].v && ca->way[2][entry].tag == tagaddr){
751          update_lru(2, &ca->lru[entry]);
752 			return ((u32)(ca->way[2][entry].data[addr&LINE_MASK]) << 24) |
753 				((u32)(ca->way[2][entry].data[(addr&LINE_MASK) + 1]) << 16) |
754 				((u32)(ca->way[2][entry].data[(addr&LINE_MASK) + 2]) << 8) |
755 				((u32)(ca->way[2][entry].data[(addr&LINE_MASK) + 3]) << 0);
756 		}
757 		else if (ca->way[3][entry].v && ca->way[3][entry].tag == tagaddr){
758          update_lru(3, &ca->lru[entry]);
759 			return ((u32)(ca->way[3][entry].data[addr&LINE_MASK]) << 24) |
760 				((u32)(ca->way[3][entry].data[(addr&LINE_MASK) + 1]) << 16) |
761 				((u32)(ca->way[3][entry].data[(addr&LINE_MASK) + 2]) << 8) |
762 				((u32)(ca->way[3][entry].data[(addr&LINE_MASK) + 3]) << 0);
763 			}
764 		// cache miss
765 		lruway = select_way_to_replace(sh, ca->lru[entry]);
766       update_lru(lruway, &ca->lru[entry]);
767 		ca->way[lruway][entry].tag = tagaddr;
768 
769       sh2_refill_cache(sh, ca, lruway, entry, addr);
770 
771       ca->way[lruway][entry].v = 1; //becomes valid
772 		return ((u32)(ca->way[lruway][entry].data[addr&LINE_MASK]) << 24) |
773 			((u32)(ca->way[lruway][entry].data[(addr&LINE_MASK) + 1]) << 16) |
774 			((u32)(ca->way[lruway][entry].data[(addr&LINE_MASK) + 2]) << 8) |
775 			((u32)(ca->way[lruway][entry].data[(addr&LINE_MASK) + 3]) << 0);
776 	}
777 	break;
778 	case CACHE_THROUGH:
779       sh->cycles += get_cache_through_timing_read_long(addr);
780 		return MappedMemoryReadLongNocache(sh, addr);
781 		break;
782 	default:
783 		return MappedMemoryReadLongNocache(sh, addr);
784 		break;
785 	}
786 	return 0;
787 }
788 
789