1 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2 * Mupen64plus - interrupt.c *
3 * Mupen64Plus homepage: http://code.google.com/p/mupen64plus/ *
4 * Copyright (C) 2002 Hacktarux *
5 * *
6 * This program is free software; you can redistribute it and/or modify *
7 * it under the terms of the GNU General Public License as published by *
8 * the Free Software Foundation; either version 2 of the License, or *
9 * (at your option) any later version. *
10 * *
11 * This program is distributed in the hope that it will be useful, *
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
14 * GNU General Public License for more details. *
15 * *
16 * You should have received a copy of the GNU General Public License *
17 * along with this program; if not, write to the *
18 * Free Software Foundation, Inc., *
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. *
20 * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
21
22 #define M64P_CORE_PROTOTYPES 1
23
24 #include "interrupt.h"
25
26 #include <stddef.h>
27 #include <stdint.h>
28 #include <string.h>
29
30 #include "ai/ai_controller.h"
31 #include "api/callbacks.h"
32 #include "api/m64p_types.h"
33 #include "pifbootrom/pifbootrom.h"
34 #include "cached_interp.h"
35 #include "cp0_private.h"
36 #include "dd/dd_controller.h"
37 #include "exception.h"
38 #include "main/main.h"
39 #include "main/device.h"
40 #include "main/savestates.h"
41 #include "mi_controller.h"
42 #include "new_dynarec/new_dynarec.h"
43 #include "pi/pi_controller.h"
44 #include "r4300.h"
45 #include "r4300_core.h"
46 #include "rdp/rdp_core.h"
47 #include "recomp.h"
48 #include "reset.h"
49 #include "rsp/rsp_core.h"
50 #include "si/si_controller.h"
51 #include "vi/vi_controller.h"
52
53 #include <boolean.h>
54
55 extern int retro_return(bool just_flipping);
56
57 int interrupt_unsafe_state = 0;
58
59 struct interrupt_event
60 {
61 int type;
62 unsigned int count;
63 };
64
65
66 /***************************************************************************
67 * Pool of Single Linked List Nodes
68 **************************************************************************/
69 #define POOL_CAPACITY 16
70
71 struct node
72 {
73 struct interrupt_event data;
74 struct node *next;
75 };
76
77 struct pool
78 {
79 struct node nodes[POOL_CAPACITY];
80 struct node* stack[POOL_CAPACITY];
81 size_t index;
82 };
83
84 /* node allocation/deallocation on a given pool */
alloc_node(struct pool * p)85 static struct node* alloc_node(struct pool* p)
86 {
87 /* return NULL if pool is too small */
88 if (p->index >= POOL_CAPACITY)
89 return NULL;
90
91 return p->stack[p->index++];
92 }
93
free_node(struct pool * p,struct node * node)94 static void free_node(struct pool* p, struct node* node)
95 {
96 if (p->index == 0 || node == NULL)
97 return;
98
99 p->stack[--p->index] = node;
100 }
101
102 /* release all nodes */
clear_pool(struct pool * p)103 static void clear_pool(struct pool* p)
104 {
105 size_t i;
106
107 for(i = 0; i < POOL_CAPACITY; ++i)
108 p->stack[i] = &p->nodes[i];
109
110 p->index = 0;
111 }
112
113 /***************************************************************************
114 * Interrupt Queue
115 **************************************************************************/
116
117 struct interrupt_queue
118 {
119 struct pool pool;
120 struct node* first;
121 };
122
123 static struct interrupt_queue q;
124
clear_queue(struct interrupt_queue * _q)125 static void clear_queue(struct interrupt_queue *_q)
126 {
127 _q->first = NULL;
128 clear_pool(&_q->pool);
129 }
130
131 static int SPECIAL_done = 0;
132
before_event(unsigned int evt1,unsigned int evt2,int type2)133 static int before_event(unsigned int evt1, unsigned int evt2, int type2)
134 {
135 uint32_t count = g_cp0_regs[CP0_COUNT_REG];
136
137 if(evt1 - count < UINT32_C(0x80000000))
138 {
139 if(evt2 - count < UINT32_C(0x80000000))
140 {
141 if((evt1 - count) < (evt2 - count))
142 return 1;
143 return 0;
144 }
145 else
146 {
147 if((count - evt2) < UINT32_C(0x10000000))
148 {
149 switch(type2)
150 {
151 case SPECIAL_INT:
152 if(SPECIAL_done)
153 return 1;
154 break;
155 default:
156 break;
157 }
158 return 0;
159 }
160 return 1;
161 }
162 }
163 else return 0;
164 }
165
add_interrupt_event(int type,unsigned int delay)166 void add_interrupt_event(int type, unsigned int delay)
167 {
168 add_interrupt_event_count(type, g_cp0_regs[CP0_COUNT_REG] + delay);
169 }
170
add_interrupt_event_count(int type,unsigned int count)171 void add_interrupt_event_count(int type, unsigned int count)
172 {
173 struct node* event;
174 struct node* e;
175 int special;
176
177 special = (type == SPECIAL_INT);
178
179 if(g_cp0_regs[CP0_COUNT_REG] > UINT32_C(0x80000000)) SPECIAL_done = 0;
180
181 if (get_event(type))
182 {
183 DebugMessage(M64MSG_WARNING, "two events of type 0x%x in interrupt queue", type);
184 }
185
186 event = alloc_node(&q.pool);
187 if (event == NULL)
188 {
189 DebugMessage(M64MSG_ERROR, "Failed to allocate node for new interrupt event");
190 return;
191 }
192
193 event->data.count = count;
194 event->data.type = type;
195
196 if (q.first == NULL)
197 {
198 q.first = event;
199 event->next = NULL;
200 next_interrupt = q.first->data.count;
201 }
202 else if (before_event(count, q.first->data.count, q.first->data.type) && !special)
203 {
204 event->next = q.first;
205 q.first = event;
206 next_interrupt = q.first->data.count;
207 }
208 else
209 {
210 for(e = q.first;
211 e->next != NULL &&
212 (!before_event(count, e->next->data.count, e->next->data.type) || special);
213 e = e->next);
214
215 if (e->next == NULL)
216 {
217 e->next = event;
218 event->next = NULL;
219 }
220 else
221 {
222 if (!special)
223 for(; e->next != NULL && e->next->data.count == count; e = e->next);
224
225 event->next = e->next;
226 e->next = event;
227 }
228 }
229 }
230
remove_interrupt_event(void)231 static void remove_interrupt_event(void)
232 {
233 struct node* e = q.first;
234 uint32_t count = g_cp0_regs[CP0_COUNT_REG];
235
236 q.first = e->next;
237
238 free_node(&q.pool, e);
239
240 next_interrupt = (q.first != NULL
241 && (q.first->data.count >count
242 || (count - q.first->data.count) < UINT32_C(0x80000000)))
243 ? q.first->data.count
244 : 0;
245 }
246
get_event(int type)247 unsigned int get_event(int type)
248 {
249 struct node* e = q.first;
250
251 if (e == NULL)
252 return 0;
253
254 if (e->data.type == type)
255 return e->data.count;
256
257 for(; e->next != NULL && e->next->data.type != type; e = e->next);
258
259 return (e->next != NULL)
260 ? e->next->data.count
261 : 0;
262 }
263
get_next_event_type(void)264 int get_next_event_type(void)
265 {
266 return (q.first == NULL)
267 ? 0
268 : q.first->data.type;
269 }
270
remove_event(int type)271 void remove_event(int type)
272 {
273 struct node* to_del;
274 struct node* e = q.first;
275
276 if (e == NULL)
277 return;
278
279 if (e->data.type == type)
280 {
281 q.first = e->next;
282 free_node(&q.pool, e);
283 }
284 else
285 {
286 for(; e->next != NULL && e->next->data.type != type; e = e->next);
287
288 if (e->next != NULL)
289 {
290 to_del = e->next;
291 e->next = to_del->next;
292 free_node(&q.pool, to_del);
293 }
294 }
295 }
296
translate_event_queue(unsigned int base)297 void translate_event_queue(unsigned int base)
298 {
299 struct node* e;
300
301 remove_event(COMPARE_INT);
302 remove_event(SPECIAL_INT);
303
304 for(e = q.first; e != NULL; e = e->next)
305 {
306 e->data.count = (e->data.count - g_cp0_regs[CP0_COUNT_REG]) + base;
307 }
308 add_interrupt_event_count(COMPARE_INT, g_cp0_regs[CP0_COMPARE_REG]);
309 add_interrupt_event_count(SPECIAL_INT, 0);
310 }
311
save_eventqueue_infos(char * buf)312 int save_eventqueue_infos(char *buf)
313 {
314 int len;
315 struct node* e;
316
317 len = 0;
318
319 for(e = q.first; e != NULL; e = e->next)
320 {
321 memcpy(buf + len , &e->data.type , 4);
322 memcpy(buf + len + 4, &e->data.count, 4);
323 len += 8;
324 }
325
326 *((unsigned int*)&buf[len]) = 0xFFFFFFFF;
327 return len+4;
328 }
329
load_eventqueue_infos(char * buf)330 void load_eventqueue_infos(char *buf)
331 {
332 int len = 0;
333 clear_queue(&q);
334 while (*((unsigned int*)&buf[len]) != 0xFFFFFFFF)
335 {
336 int type = *((unsigned int*)&buf[len]);
337 unsigned int count = *((unsigned int*)&buf[len+4]);
338 add_interrupt_event_count(type, count);
339 len += 8;
340 }
341 }
342
init_interrupt(void)343 void init_interrupt(void)
344 {
345 SPECIAL_done = 1;
346
347
348 clear_queue(&q);
349 add_interrupt_event_count(SPECIAL_INT, 0);
350 }
351
check_interrupt(void)352 void check_interrupt(void)
353 {
354 struct node* event;
355
356 if (g_dev.r4300.mi.regs[MI_INTR_REG] & g_dev.r4300.mi.regs[MI_INTR_MASK_REG])
357 g_cp0_regs[CP0_CAUSE_REG] = (g_cp0_regs[CP0_CAUSE_REG] | CP0_CAUSE_IP2) & ~CP0_CAUSE_EXCCODE_MASK;
358 else
359 g_cp0_regs[CP0_CAUSE_REG] &= ~CP0_CAUSE_IP2;
360 if ((g_cp0_regs[CP0_STATUS_REG] & (CP0_STATUS_IE | CP0_STATUS_EXL | CP0_STATUS_ERL)) != CP0_STATUS_IE) return;
361 if (g_cp0_regs[CP0_STATUS_REG] & g_cp0_regs[CP0_CAUSE_REG] & UINT32_C(0xFF00))
362 {
363 event = alloc_node(&q.pool);
364
365 if (event == NULL)
366 {
367 DebugMessage(M64MSG_ERROR, "Failed to allocate node for new interrupt event");
368 return;
369 }
370
371 event->data.count = next_interrupt = g_cp0_regs[CP0_COUNT_REG];
372 event->data.type = CHECK_INT;
373
374 if (q.first == NULL)
375 {
376 q.first = event;
377 event->next = NULL;
378 }
379 else
380 {
381 event->next = q.first;
382 q.first = event;
383
384 }
385 }
386 }
387
wrapped_exception_general(void)388 static void wrapped_exception_general(void)
389 {
390 #ifdef NEW_DYNAREC
391 if (r4300emu == CORE_DYNAREC)
392 {
393 g_cp0_regs[CP0_EPC_REG] = (pcaddr&~3)-(pcaddr&1)*4;
394 pcaddr = 0x80000180;
395 g_cp0_regs[CP0_STATUS_REG] |= CP0_STATUS_EXL;
396 if(pcaddr&1)
397 g_cp0_regs[CP0_CAUSE_REG] |= CP0_CAUSE_BD;
398 else
399 g_cp0_regs[CP0_CAUSE_REG] &= ~CP0_CAUSE_BD;
400 pending_exception=1;
401 }
402 else
403 {
404 exception_general();
405 }
406 #else
407 exception_general();
408 #endif
409 }
410
raise_maskable_interrupt(uint32_t cause)411 void raise_maskable_interrupt(uint32_t cause)
412 {
413 g_cp0_regs[CP0_CAUSE_REG] = (g_cp0_regs[CP0_CAUSE_REG] | cause) & ~CP0_CAUSE_EXCCODE_MASK;
414
415 if (!(g_cp0_regs[CP0_STATUS_REG] & g_cp0_regs[CP0_CAUSE_REG] & UINT32_C(0xff00)))
416 return;
417
418 if ((g_cp0_regs[CP0_STATUS_REG] & (CP0_STATUS_IE | CP0_STATUS_EXL | CP0_STATUS_ERL)) != CP0_STATUS_IE)
419 return;
420
421 wrapped_exception_general();
422 }
423
special_int_handler(void)424 static void special_int_handler(void)
425 {
426 if (g_cp0_regs[CP0_COUNT_REG] > UINT32_C(0x10000000))
427 return;
428
429 SPECIAL_done = 1;
430 remove_interrupt_event();
431 add_interrupt_event_count(SPECIAL_INT, 0);
432 }
433
compare_int_handler(void)434 static void compare_int_handler(void)
435 {
436 remove_interrupt_event();
437 g_cp0_regs[CP0_COUNT_REG]+=count_per_op;
438 add_interrupt_event_count(COMPARE_INT, g_cp0_regs[CP0_COMPARE_REG]);
439 g_cp0_regs[CP0_COUNT_REG]-=count_per_op;
440
441 raise_maskable_interrupt(CP0_CAUSE_IP7);
442 }
443
hw2_int_handler(void)444 static void hw2_int_handler(void)
445 {
446 /* Hardware Interrupt 2 -- remove interrupt event from queue */
447 remove_interrupt_event();
448
449 g_cp0_regs[CP0_STATUS_REG] = (g_cp0_regs[CP0_STATUS_REG] & ~(CP0_STATUS_SR | CP0_STATUS_TS | UINT32_C(0x00080000))) | CP0_STATUS_IM4;
450 g_cp0_regs[CP0_CAUSE_REG] = (g_cp0_regs[CP0_CAUSE_REG] | CP0_CAUSE_IP4) & ~CP0_CAUSE_EXCCODE_MASK;
451
452 wrapped_exception_general();
453 }
454
nmi_int_handler(void)455 static void nmi_int_handler(void)
456 {
457 /* Non Maskable Interrupt -- remove interrupt event from queue */
458 remove_interrupt_event();
459 /* setup r4300 Status flags: reset TS and SR, set BEV, ERL, and SR */
460 g_cp0_regs[CP0_STATUS_REG] = (g_cp0_regs[CP0_STATUS_REG] & ~(CP0_STATUS_SR | CP0_STATUS_TS | UINT32_C(0x00080000))) | (CP0_STATUS_ERL | CP0_STATUS_BEV | CP0_STATUS_SR);
461 g_cp0_regs[CP0_CAUSE_REG] = 0x00000000;
462 /* simulate the soft reset code which would run from the PIF ROM */
463 pifbootrom_hle_execute(&g_dev);
464 /* clear all interrupts, reset interrupt counters back to 0 */
465 g_cp0_regs[CP0_COUNT_REG] = 0;
466 g_gs_vi_counter = 0;
467 init_interrupt();
468
469 g_dev.vi.delay = g_dev.vi.next_vi = 5000;
470 add_interrupt_event_count(VI_INT, g_dev.vi.next_vi);
471
472 /* clear the audio status register so that subsequent write_ai() calls will work properly */
473 g_dev.ai.regs[AI_STATUS_REG] = 0;
474 /* set ErrorEPC with the last instruction address */
475 g_cp0_regs[CP0_ERROREPC_REG] = PC->addr;
476 /* reset the r4300 internal state */
477 if (r4300emu != CORE_PURE_INTERPRETER)
478 {
479 /* clear all the compiled instruction blocks and re-initialize */
480 free_blocks();
481 init_blocks();
482 }
483 /* adjust ErrorEPC if we were in a delay slot, and clear the delay_slot and dyna_interp flags */
484 if(g_dev.r4300.delay_slot==1 || g_dev.r4300.delay_slot==3)
485 {
486 g_cp0_regs[CP0_ERROREPC_REG]-=4;
487 }
488 g_dev.r4300.delay_slot = 0;
489 dyna_interp = 0;
490 /* set next instruction address to reset vector */
491 last_addr = UINT32_C(0xa4000040);
492 generic_jump_to(UINT32_C(0xa4000040));
493
494 #ifdef NEW_DYNAREC
495 if (r4300emu == CORE_DYNAREC)
496 {
497 g_cp0_regs[CP0_ERROREPC_REG]=(pcaddr&~3)-(pcaddr&1)*4;
498 pcaddr = 0xa4000040;
499 pending_exception = 1;
500 invalidate_all_pages();
501 }
502 #endif
503 }
504
505
gen_interrupt(void)506 void gen_interrupt(void)
507 {
508 if (stop == 1)
509 {
510 g_gs_vi_counter = 0; /* debug */
511 dyna_stop();
512 }
513
514 if (!interrupt_unsafe_state)
515 {
516 if (reset_hard_job)
517 {
518 reset_hard();
519 reset_hard_job = 0;
520 return;
521 }
522 }
523
524 if (skip_jump)
525 {
526 uint32_t dest = skip_jump;
527 uint32_t count = g_cp0_regs[CP0_COUNT_REG];
528 skip_jump = 0;
529
530 next_interrupt = (q.first->data.count > count
531 || (count - q.first->data.count) < UINT32_C(0x80000000))
532 ? q.first->data.count
533 : 0;
534
535 last_addr = dest;
536 generic_jump_to(dest);
537 return;
538 }
539
540 switch(q.first->data.type)
541 {
542 case SPECIAL_INT:
543 special_int_handler();
544 break;
545
546 case VI_INT:
547 remove_interrupt_event();
548 vi_vertical_interrupt_event(&g_dev.vi);
549 retro_return(false);
550 break;
551
552 case COMPARE_INT:
553 compare_int_handler();
554 break;
555
556 case CHECK_INT:
557 remove_interrupt_event();
558 wrapped_exception_general();
559 break;
560
561 case SI_INT:
562 remove_interrupt_event();
563 si_end_of_dma_event(&g_dev.si);
564 break;
565
566 case PI_INT:
567 remove_interrupt_event();
568 pi_end_of_dma_event(&g_dev.pi);
569 break;
570
571 case AI_INT:
572 remove_interrupt_event();
573 ai_end_of_dma_event(&g_dev.ai);
574 break;
575
576 case SP_INT:
577 remove_interrupt_event();
578 rsp_interrupt_event(&g_dev.sp);
579 break;
580
581 case DP_INT:
582 remove_interrupt_event();
583 rdp_interrupt_event(&g_dev.dp);
584 break;
585
586 case HW2_INT:
587 hw2_int_handler();
588 break;
589
590 case NMI_INT:
591 nmi_int_handler();
592 break;
593
594 case CART_INT:
595 g_cp0_regs[CP0_CAUSE_REG] |= 0x00000800; /* set IP3 */
596 g_cp0_regs[CP0_CAUSE_REG] &= 0xFFFFFF83; /* mask out old exception code */
597 remove_interrupt_event();
598
599 #if 0
600 if (dd_end_of_dma_event(&g_dd) == 1)
601 {
602 remove_interrupt_event();
603 g_cp0_regs[CP0_CAUSE_REG] &= ~0x00000800;
604 }
605 #endif
606 break;
607
608 default:
609 DebugMessage(M64MSG_ERROR, "Unknown interrupt queue event type %.8X.", q.first->data.type);
610 remove_interrupt_event();
611 wrapped_exception_general();
612 break;
613 }
614 }
615
616