1 /* R3000 system control coprocessor emulation ("coprocessor zero").
2 Copyright 2001, 2002, 2003 Brian R. Gaeke.
3
4 This file is part of VMIPS.
5
6 VMIPS is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 2 of the License, or (at your
9 option) any later version.
10
11 VMIPS is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License along
17 with VMIPS; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */
19
20 /* Code to implement MIPS coprocessor zero (the "system control
21 * coprocessor"), which provides for address translation and
22 * exception handling.
23 */
24
25 #include "cpzero.h"
26 #include "mapper.h"
27 #include "excnames.h"
28 #include "cpu.h"
29 #include "cpzeroreg.h"
30 #include "intctrl.h"
31 #include "deviceexc.h"
32 #include "error.h"
33 #include "vmips.h"
34 #include "options.h"
35
36 static uint32 read_masks[] = {
37 Index_MASK, Random_MASK, EntryLo_MASK, 0, Context_MASK,
38 PageMask_MASK, Wired_MASK, Error_MASK, BadVAddr_MASK, Count_MASK,
39 EntryHi_MASK, Compare_MASK, Status_MASK, Cause_MASK, EPC_MASK,
40 PRId_MASK, Config_MASK, LLAddr_MASK, WatchLo_MASK, WatchHi_MASK,
41 0, 0, 0, 0, 0, 0, ECC_MASK, CacheErr_MASK, TagLo_MASK, TagHi_MASK,
42 ErrorEPC_MASK, 0
43 };
44
45 static uint32 write_masks[] = {
46 Index_MASK, 0, EntryLo_MASK, 0, Context_MASK & ~Context_BadVPN_MASK,
47 PageMask_MASK, Wired_MASK, Error_MASK, 0, Count_MASK,
48 EntryHi_MASK, Compare_MASK, Status_MASK,
49 Cause_MASK & ~Cause_IP_Ext_MASK, 0, 0, Config_MASK, LLAddr_MASK,
50 WatchLo_MASK, WatchHi_MASK, 0, 0, 0, 0, 0, 0, ECC_MASK,
51 CacheErr_MASK, TagLo_MASK, TagHi_MASK, ErrorEPC_MASK, 0
52 };
53
54 /* Reset (warm or cold) */
55 void
reset(void)56 CPZero::reset(void)
57 {
58 int r;
59 for (r = 0; r < 16; r++) {
60 #ifdef INTENTIONAL_CONFUSION
61 reg[r] = random() & (read_masks[r] | write_masks[r]);
62 #endif /* INTENTIONAL_CONFUSION */
63 }
64 /* Turn off any randomly-set pending-interrupt bits, as these
65 * can impact correctness. */
66 reg[Cause] &= ~Cause_IP_MASK;
67 /* Reset Random register to upper bound (8<=Random<=63) */
68 reg[Random] = Random_UPPER_BOUND << 8;
69 /* Reset Status register: clear KUc, IEc, SwC (i.e., caches are not
70 * switched), TS (TLB shutdown has not occurred), and set
71 * BEV (Bootstrap exception vectors ARE in effect).
72 */
73 reg[Status] = (reg[Status] | Status_DS_BEV_MASK) &
74 ~(Status_KUc_MASK | Status_IEc_MASK | Status_DS_SwC_MASK |
75 Status_DS_TS_MASK);
76 reg[PRId] = 0x00000230; /* MIPS R3000A */
77 }
78
79 /* Yow!! Are we in KERNEL MODE yet?? ...Read the Status register. */
80 bool
kernel_mode(void) const81 CPZero::kernel_mode(void) const
82 {
83 return !(reg[Status] & Status_KUc_MASK);
84 }
85
86 void
dump_regs(FILE * f)87 CPZero::dump_regs(FILE *f)
88 {
89 int x;
90 fprintf(f, "CP0 Dump Registers: [ ");
91 for (x = 0; x < 16; x++) {
92 fprintf(f," R%02d=%08x ",x,reg[x]);
93 if (x % 4 == 1) {
94 fputc('\n',f);
95 }
96 }
97 fprintf(f, "]\n");
98 }
99
100 static void
dump_tlb_entry(FILE * f,int index,const TLBEntry & e)101 dump_tlb_entry(FILE *f, int index, const TLBEntry &e)
102 {
103 fprintf(f,"Entry %02d: (%08x%08x) V=%05x A=%02x P=%05x %c%c%c%c\n", index,
104 e.entryHi, e.entryLo, e.vpn()>>12, e.asid()>>6, e.pfn()>>12,
105 e.noncacheable()?'N':'n', e.dirty()?'D':'d',
106 e.valid()?'V':'v', e.global()?'G':'g');
107 }
108
109 void
dump_tlb(FILE * f)110 CPZero::dump_tlb(FILE *f)
111 {
112 int x;
113 fprintf(f, "Dump TLB: [\n");
114 for (x = 0; x < TLB_ENTRIES; ++x)
115 dump_tlb_entry(f, x, tlb[x]);
116 fprintf(f, "]\n");
117 }
118
119 void
dump_regs_and_tlb(FILE * f)120 CPZero::dump_regs_and_tlb(FILE *f)
121 {
122 dump_regs(f);
123 dump_tlb(f);
124 }
125
126 /* Request for address translation (possibly using the TLB). */
127 uint32
address_trans(uint32 vaddr,int mode,bool * cacheable,DeviceExc * client)128 CPZero::address_trans(uint32 vaddr, int mode, bool *cacheable,
129 DeviceExc *client)
130 {
131 if (kernel_mode()) {
132 switch(vaddr & KSEG_SELECT_MASK) {
133 case KSEG0:
134 *cacheable = true;
135 return vaddr - KSEG0_CONST_TRANSLATION;
136 case KSEG1:
137 *cacheable = false;
138 return vaddr - KSEG1_CONST_TRANSLATION;
139 case KSEG2:
140 case KSEG2_top:
141 return tlb_translate(KSEG2, vaddr, mode, cacheable, client);
142 default: /* KUSEG */
143 return tlb_translate(KUSEG, vaddr, mode, cacheable, client);
144 }
145 }
146
147 /* user mode */
148 if (vaddr & KERNEL_SPACE_MASK) {
149 /* Can't go there. */
150 client->exception(mode == DATASTORE ? AdES : AdEL, mode);
151 return 0xffffffff;
152 } else /* user space address */ {
153 return tlb_translate(KUSEG, vaddr, mode, cacheable, client);
154 }
155 }
156
157 void
load_addr_trans_excp_info(uint32 va,uint32 vpn,TLBEntry * match)158 CPZero::load_addr_trans_excp_info(uint32 va, uint32 vpn, TLBEntry *match)
159 {
160 reg[BadVAddr] = va;
161 reg[Context] = (reg[Context] & ~Context_BadVPN_MASK) | ((va & 0x7ffff000) >> 10);
162 reg[EntryHi] = (va & EntryHi_VPN_MASK) | (reg[EntryHi] & ~EntryHi_VPN_MASK);
163 }
164
165 int
find_matching_tlb_entry(uint32 vpn,uint32 asid)166 CPZero::find_matching_tlb_entry(uint32 vpn, uint32 asid)
167 {
168 for (uint16 x = 0; x < TLB_ENTRIES; x++)
169 if (tlb[x].vpn() == vpn && (tlb[x].global() || tlb[x].asid() == asid))
170 return x;
171 return -1;
172 }
173
174 uint32
tlb_translate(uint32 seg,uint32 vaddr,int mode,bool * cacheable,DeviceExc * client)175 CPZero::tlb_translate(uint32 seg, uint32 vaddr, int mode, bool *cacheable,
176 DeviceExc *client)
177 {
178 uint32 asid = reg[EntryHi] & EntryHi_ASID_MASK;
179 uint32 vpn = vaddr & EntryHi_VPN_MASK;
180 int index = find_matching_tlb_entry(vpn, asid);
181 TLBEntry *match = (index == -1) ? 0 : &tlb[index];
182 tlb_miss_user = false;
183 if (match && match->valid()) {
184 if (mode == DATASTORE && !match->dirty()) {
185 /* TLB Mod exception - write to page not marked "dirty" */
186 load_addr_trans_excp_info(vaddr,vpn,match);
187 client->exception(Mod, DATASTORE);
188 return 0xffffffff;
189 } else {
190 /* We have a matching TLB entry which is valid. */
191 *cacheable = !match->noncacheable();
192 return match->pfn() | (vaddr & ~EntryHi_VPN_MASK);
193 }
194 }
195 /* If we got here, then there was no matching tlb entry, or it wasn't
196 * valid. Use special refill handler vector for user TLB miss. */
197 tlb_miss_user = (seg == KUSEG && !match);
198 load_addr_trans_excp_info(vaddr,vpn,match);
199 client->exception(mode == DATASTORE ? TLBS : TLBL, mode);
200 if (machine->opt->option("excmsg")->flag) {
201 /* If exception spew is on, print the fault address. It
202 * is just too handy to have. */
203 fprintf(stderr, " %s TLB miss at address 0x%x\n",
204 tlb_miss_user ? "User" : "Kernel", vaddr);
205 }
206 return 0xffffffff;
207 }
208
209 uint32
read_reg(const uint16 r)210 CPZero::read_reg(const uint16 r)
211 {
212 if (r == Cause) {
213 /* Update IP field of Cause register. */
214 reg[Cause] = (reg[Cause] & ~Cause_IP_MASK) | getIP();
215 }
216 /* This ensures that non-existent CP0 registers read as zero. */
217 return reg[r] & read_masks[r];
218 }
219
220 void
write_reg(const uint16 r,const uint32 data)221 CPZero::write_reg(const uint16 r, const uint32 data)
222 {
223 /* This preserves the bits which are readable but not writable, and writes
224 * the bits which are writable with new data, thus making it suitable
225 * for mtc0-type operations. If you want to write all the bits which
226 * are _connected_, use: reg[r] = new_data & write_masks[r]; . */
227 reg[r] = (reg[r] & (read_masks[r] & ~write_masks[r]))
228 | (data & write_masks[r]);
229 }
230
231 void
mfc0_emulate(uint32 instr,uint32 pc)232 CPZero::mfc0_emulate(uint32 instr, uint32 pc)
233 {
234 cpu->put_reg (CPU::rt (instr), read_reg (CPU::rd (instr)));
235 }
236
237 void
mtc0_emulate(uint32 instr,uint32 pc)238 CPZero::mtc0_emulate(uint32 instr, uint32 pc)
239 {
240 write_reg (CPU::rd (instr), cpu->get_reg (CPU::rt (instr)));
241 }
242
243 void
bc0x_emulate(uint32 instr,uint32 pc)244 CPZero::bc0x_emulate(uint32 instr, uint32 pc)
245 {
246 uint16 condition = CPU::rt (instr);
247 switch (condition) {
248 case 0: /* bc0f */ if (! cpCond ()) { cpu->branch (instr, pc); } break;
249 case 1: /* bc0t */ if (cpCond ()) { cpu->branch (instr, pc); } break;
250 case 2: /* bc0fl - not valid, but not reserved(A-17, H&K) - no-op. */ break;
251 case 3: /* bc0tl - not valid, but not reserved(A-21, H&K) - no-op. */ break;
252 default: cpu->exception (RI); break; /* reserved */
253 }
254 }
255
256 void
tlbr_emulate(uint32 instr,uint32 pc)257 CPZero::tlbr_emulate(uint32 instr, uint32 pc)
258 {
259 reg[EntryHi] = (tlb[(reg[Index] & Index_Index_MASK) >> 8].entryHi) &
260 write_masks[EntryHi];
261 reg[EntryLo] = (tlb[(reg[Index] & Index_Index_MASK) >> 8].entryLo) &
262 write_masks[EntryLo];
263 }
264
265 void
tlb_write(unsigned index)266 CPZero::tlb_write(unsigned index)
267 {
268 tlb[index].entryHi = read_reg(EntryHi);
269 tlb[index].entryLo = read_reg(EntryLo);
270 }
271
272 void
tlbwi_emulate(uint32 instr,uint32 pc)273 CPZero::tlbwi_emulate(uint32 instr, uint32 pc)
274 {
275 tlb_write ((reg[Index] & Index_Index_MASK) >> 8);
276 }
277
278 void
tlbwr_emulate(uint32 instr,uint32 pc)279 CPZero::tlbwr_emulate(uint32 instr, uint32 pc)
280 {
281 tlb_write ((reg[Random] & Random_Random_MASK) >> 8);
282 }
283
284 void
tlbp_emulate(uint32 instr,uint32 pc)285 CPZero::tlbp_emulate(uint32 instr, uint32 pc)
286 {
287 uint32 vpn = reg[EntryHi] & EntryHi_VPN_MASK;
288 uint32 asid = reg[EntryHi] & EntryHi_ASID_MASK;
289 int idx = find_matching_tlb_entry (vpn, asid);
290 if (idx != -1)
291 reg[Index] = (idx << 8);
292 else
293 reg[Index] = (1 << 31);
294 }
295
296 void
rfe_emulate(uint32 instr,uint32 pc)297 CPZero::rfe_emulate(uint32 instr, uint32 pc)
298 {
299 reg[Status] = (reg[Status] & 0xfffffff0) | ((reg[Status] >> 2) & 0x0f);
300 }
301
302 void
cpzero_emulate(uint32 instr,uint32 pc)303 CPZero::cpzero_emulate(uint32 instr, uint32 pc)
304 {
305 uint16 rs = CPU::rs (instr);
306 if (CPU::rs (instr) > 15) {
307 switch (CPU::funct (instr)) {
308 case 1: tlbr_emulate (instr, pc); break;
309 case 2: tlbwi_emulate (instr, pc); break;
310 case 6: tlbwr_emulate (instr, pc); break;
311 case 8: tlbp_emulate (instr, pc); break;
312 case 16: rfe_emulate (instr, pc); break;
313 default: cpu->exception (RI, ANY, 0); break;
314 }
315 } else {
316 switch (rs) {
317 case 0: mfc0_emulate (instr, pc); break;
318 case 2: cpu->exception (RI, ANY, 0); break; /* cfc0 - reserved */
319 case 4: mtc0_emulate (instr, pc); break;
320 case 6: cpu->exception (RI, ANY, 0); break; /* ctc0 - reserved */
321 case 8: bc0x_emulate (instr,pc); break;
322 default: cpu->exception (RI, ANY, 0); break;
323 }
324 }
325 }
326
327 void
adjust_random(void)328 CPZero::adjust_random(void)
329 {
330 /* For initial == 12, lower bound == 8, upper bound == 63, the
331 * sequence looks like this:
332 * 12 11 10 9 8 63 62 61 60 ... 12 11 10 9 8 63 ... (x)
333 * 51 52 53 54 55 0 1 2 3 ... 51 52 53 54 55 0 ... (63 - x)
334 */
335 int32 r = (int32) (reg[Random] >> 8);
336 r = -(((Random_UPPER_BOUND - r + 1) %
337 (Random_UPPER_BOUND - Random_LOWER_BOUND + 1)) -
338 Random_UPPER_BOUND);
339 reg[Random] = (uint32) (r << 8);
340 }
341
342 uint32
getIP(void)343 CPZero::getIP(void)
344 {
345 uint32 HwIP = 0, IP = 0;
346 if (intc != NULL) {
347 /* Check for a hardware interrupt. */
348 HwIP = intc->calculateIP();
349 }
350 IP = (reg[Cause] & Cause_IP_SW_MASK) | HwIP;
351 return IP;
352 }
353
354 void
enter_exception(uint32 pc,uint32 excCode,uint32 ce,bool dly)355 CPZero::enter_exception(uint32 pc, uint32 excCode, uint32 ce, bool dly)
356 {
357 /* Save exception PC in EPC. */
358 reg[EPC] = pc;
359 /* Disable interrupts and enter Kernel mode. */
360 reg[Status] = (reg[Status] & ~Status_KU_IE_MASK) |
361 ((reg[Status] & Status_KU_IE_MASK) << 2);
362 /* Clear Cause register BD, CE, and ExcCode fields. */
363 reg[Cause] &= ~(Cause_BD_MASK|Cause_CE_MASK|Cause_ExcCode_MASK);
364 /* Set Cause register CE field if this is a Coprocessor
365 * Unusable exception. (If we are passed ce=-1 we don't want
366 * to toggle bits in Cause.) */
367 if (excCode == CpU) {
368 reg[Cause] |= ((ce & 0x3) << 28);
369 }
370 /* Update IP, BD, ExcCode fields of Cause register. */
371 reg[Cause] &= ~Cause_IP_MASK;
372 reg[Cause] |= getIP () | (dly << 31) | (excCode << 2);
373 }
374
375 bool
use_boot_excp_address(void)376 CPZero::use_boot_excp_address(void)
377 {
378 return (reg[Status] & Status_DS_BEV_MASK);
379 }
380
381 bool
caches_isolated(void)382 CPZero::caches_isolated(void)
383 {
384 return (reg[Status] & Status_DS_IsC_MASK);
385 }
386
387 bool
caches_swapped(void)388 CPZero::caches_swapped(void)
389 {
390 return (reg[Status] & Status_DS_SwC_MASK);
391 }
392
393 bool
cop_usable(int coprocno)394 CPZero::cop_usable(int coprocno)
395 {
396 switch (coprocno) {
397 case 3: return (reg[Status] & Status_CU3_MASK);
398 case 2: return (reg[Status] & Status_CU2_MASK);
399 case 1: return (reg[Status] & Status_CU1_MASK);
400 case 0: return (reg[Status] & Status_CU0_MASK);
401 default: fatal_error ("Bad coprocno passed to CPZero::cop_usable()");
402 };
403 }
404
405 bool
interrupts_enabled(void) const406 CPZero::interrupts_enabled(void) const
407 {
408 return (reg[Status] & Status_IEc_MASK);
409 }
410
411 bool
interrupt_pending(void)412 CPZero::interrupt_pending(void)
413 {
414 if (! interrupts_enabled())
415 return false; /* Can't very well argue with IEc == 0... */
416 /* Mask IP with the interrupt mask, and return true if nonzero: */
417 return ((getIP () & (reg[Status] & Status_IM_MASK)) != 0);
418 }
419
420 void
read_debug_info(uint32 * status,uint32 * bad,uint32 * cause)421 CPZero::read_debug_info(uint32 *status, uint32 *bad, uint32 *cause)
422 {
423 *status = reg[Status];
424 *bad = reg[BadVAddr];
425 *cause = reg[Cause];
426 }
427
428 void
write_debug_info(uint32 status,uint32 bad,uint32 cause)429 CPZero::write_debug_info(uint32 status, uint32 bad, uint32 cause)
430 {
431 reg[Status] = status;
432 reg[BadVAddr] = bad;
433 reg[Cause] = cause;
434 }
435
436 /* TLB translate VADDR without exceptions. Returns true if a valid
437 * TLB mapping is found, false otherwise. If VADDR has no valid mapping,
438 * PADDR is written with 0xffffffff, otherwise it is written with the
439 * translation.
440 */
441 bool
debug_tlb_translate(uint32 vaddr,uint32 * paddr)442 CPZero::debug_tlb_translate(uint32 vaddr, uint32 *paddr)
443 {
444 bool rv = true;
445
446 if ((!kernel_mode()) && (vaddr & KERNEL_SPACE_MASK)) {
447 *paddr = 0xffffffff;
448 rv = false;
449 } else if (kernel_mode() && (vaddr & KSEG_SELECT_MASK) == KSEG0) {
450 *paddr = vaddr - KSEG0_CONST_TRANSLATION;
451 } else if (kernel_mode() && (vaddr & KSEG_SELECT_MASK) == KSEG1) {
452 *paddr = vaddr - KSEG1_CONST_TRANSLATION;
453 } else /* KUSEG */ {
454 uint32 asid = reg[EntryHi] & EntryHi_ASID_MASK;
455 uint32 vpn = vaddr & EntryHi_VPN_MASK;
456 int index = find_matching_tlb_entry (vpn, asid);
457 TLBEntry *match = (index == -1) ? 0 : &tlb[index];
458 if (!match || !match->valid()) {
459 *paddr = 0xffffffff;
460 rv = false;
461 } else {
462 *paddr = match->pfn() | (vaddr & ~EntryHi_VPN_MASK);
463 }
464 }
465 return rv;
466 }
467