1 /* $Id: m68k-misc.c,v 1.27 2009/08/29 19:47:52 fredette Exp $ */
2
3 /* ic/m68k/m68k-misc.c - miscellaneous things for the m68k emulator: */
4
5 /*
6 * Copyright (c) 2002, 2003 Matt Fredette
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by Matt Fredette.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
27 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
32 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
34 */
35
36 /* includes: */
37 #include "m68k-impl.h"
38
39 _TME_RCSID("$Id: m68k-misc.c,v 1.27 2009/08/29 19:47:52 fredette Exp $");
40
41 /* the memory buffer read and write functions: */
42 #if TME_M68K_SIZE_8 != 1
43 #error "TME_M68K_SIZE_8 must be 1"
44 #endif
45 #if TME_M68K_SIZE_16 != 2
46 #error "TME_M68K_SIZE_16 must be 2"
47 #endif
48 #if TME_M68K_SIZE_32 != 4
49 #error "TME_M68K_SIZE_32 must be 4"
50 #endif
51 const _tme_m68k_xfer_memx _tme_m68k_read_memx[5] = {
52 NULL,
53 tme_m68k_read_memx8,
54 tme_m68k_read_memx16,
55 NULL,
56 tme_m68k_read_memx32
57 };
58 const _tme_m68k_xfer_memx _tme_m68k_write_memx[5] = {
59 NULL,
60 tme_m68k_write_memx8,
61 tme_m68k_write_memx16,
62 NULL,
63 tme_m68k_write_memx32
64 };
65 const _tme_m68k_xfer_mem _tme_m68k_read_mem[5] = {
66 NULL,
67 tme_m68k_read_mem8,
68 tme_m68k_read_mem16,
69 NULL,
70 tme_m68k_read_mem32
71 };
72 const _tme_m68k_xfer_mem _tme_m68k_write_mem[5] = {
73 NULL,
74 tme_m68k_write_mem8,
75 tme_m68k_write_mem16,
76 NULL,
77 tme_m68k_write_mem32
78 };
79
80 /* our bus signal handler: */
81 static int
_tme_m68k_bus_signal(struct tme_bus_connection * conn_bus,unsigned int signal)82 _tme_m68k_bus_signal(struct tme_bus_connection *conn_bus, unsigned int signal)
83 {
84 struct tme_m68k *ic;
85 unsigned int level_edge;
86
87 /* recover our IC: */
88 ic = conn_bus->tme_bus_connection.tme_connection_element->tme_element_private;
89
90 /* take out the level and edge: */
91 level_edge = signal;
92 signal = TME_BUS_SIGNAL_WHICH(signal);
93 level_edge ^= signal;
94
95 /* lock the external mutex: */
96 tme_mutex_lock(&ic->tme_m68k_external_mutex);
97
98 /* on the falling edge of HALT or RESET, halt the processor: */
99 if (((level_edge & TME_BUS_SIGNAL_LEVEL_MASK)
100 == TME_BUS_SIGNAL_LEVEL_ASSERTED)
101 && (signal == TME_BUS_SIGNAL_HALT
102 || signal == TME_BUS_SIGNAL_RESET)) {
103 ic->tme_m68k_external_halt = TRUE;
104 }
105
106 /* on the rising edge of RESET, reset the processor: */
107 else if (signal == TME_BUS_SIGNAL_RESET
108 && ((level_edge & TME_BUS_SIGNAL_LEVEL_MASK)
109 == TME_BUS_SIGNAL_LEVEL_NEGATED)) {
110 ic->tme_m68k_external_reset = TRUE;
111 }
112
113 /* on any other HALT or RESET, do nothing: */
114 else if (signal == TME_BUS_SIGNAL_RESET
115 || signal == TME_BUS_SIGNAL_HALT) {
116 /* nothing */
117 }
118
119 /* anything else: */
120 else {
121 abort();
122 }
123
124 /* unlock the external mutex: */
125 tme_mutex_unlock(&ic->tme_m68k_external_mutex);
126
127 /* notify any threads waiting on the external condition: */
128 tme_cond_notify(&ic->tme_m68k_external_cond, TRUE);
129 return (TME_OK);
130 }
131
132 /* this enables or disables an m6888x: */
133 static int
_tme_m6888x_enable(struct tme_m68k_bus_connection * conn_m68k,int enabled)134 _tme_m6888x_enable(struct tme_m68k_bus_connection *conn_m68k, int enabled)
135 {
136 struct tme_m68k *ic;
137
138 /* recover our IC: */
139 ic = conn_m68k->tme_m68k_bus_connection.tme_bus_connection.tme_connection_element->tme_element_private;
140
141 /* NB: we're lazy here and don't bother locking the external mutex: */
142 if (ic->tme_m68k_fpu_type == TME_M68K_FPU_NONE) {
143 return (ENXIO);
144 }
145 ic->tme_m68k_fpu_enabled = enabled;
146 return (TME_OK);
147 }
148
149 /* our interrupt handler: */
150 static int
_tme_m68k_bus_interrupt(struct tme_m68k_bus_connection * conn_m68k,unsigned int ipl)151 _tme_m68k_bus_interrupt(struct tme_m68k_bus_connection *conn_m68k, unsigned int ipl)
152 {
153 struct tme_m68k *ic;
154
155 /* recover our IC: */
156 ic = conn_m68k->tme_m68k_bus_connection.tme_bus_connection.tme_connection_element->tme_element_private;
157
158 /* lock the external mutex: */
159 tme_mutex_lock(&ic->tme_m68k_external_mutex);
160
161 /* set the interrupt line: */
162 ic->tme_m68k_external_ipl = ipl;
163
164 /* if the IPL has dropped below the NMI level, the next transition
165 to that level will cause an NMI: */
166 if (ipl < TME_M68K_IPL_NMI) {
167 ic->tme_m68k_external_ipl_previous_nmi = FALSE;
168 }
169
170 /* unlock the external mutex: */
171 tme_mutex_unlock(&ic->tme_m68k_external_mutex);
172
173 /* notify any threads waiting on the external condition: */
174 tme_cond_notify(&ic->tme_m68k_external_cond, TRUE);
175 return (TME_OK);
176 }
177
178 /* this checks for external signals. this must be called with the
179 external mutex held: */
180 void
tme_m68k_external_check(struct tme_m68k * ic,tme_uint32_t internal_exceptions)181 tme_m68k_external_check(struct tme_m68k *ic, tme_uint32_t internal_exceptions)
182 {
183 unsigned int ipl;
184 int vector;
185 int rc;
186
187 /* if an external reset has been requested, start reset exception
188 processing: */
189 if (ic->tme_m68k_external_reset) {
190 ic->tme_m68k_external_reset = FALSE;
191 tme_mutex_unlock(&ic->tme_m68k_external_mutex);
192 tme_m68k_exception(ic, TME_M68K_EXCEPTION_RESET);
193 }
194
195 /* if an external halt has been requested, halt: */
196 if (ic->tme_m68k_external_halt) {
197 ic->tme_m68k_external_halt = FALSE;
198 tme_mutex_unlock(&ic->tme_m68k_external_mutex);
199 ic->_tme_m68k_mode = TME_M68K_MODE_HALT;
200 TME_M68K_SEQUENCE_START;
201 tme_m68k_redispatch(ic);
202 }
203
204 /* if we are not halted, and an interrupt can be serviced, start
205 interrupt exception processing: */
206 ipl = ic->tme_m68k_external_ipl;
207 if (ic->_tme_m68k_mode != TME_M68K_MODE_HALT
208 && ipl >= TME_M68K_IPL_MIN
209 && ipl <= TME_M68K_IPL_MAX
210 && ((ipl == TME_M68K_IPL_NMI
211 && !ic->tme_m68k_external_ipl_previous_nmi)
212 || ipl > TME_M68K_FLAG_IPM(ic->tme_m68k_ireg_sr))) {
213
214 /* if this is an NMI, prevent it from being repeatedly accepted: */
215 if (ipl == TME_M68K_IPL_NMI) {
216 ic->tme_m68k_external_ipl_previous_nmi = TRUE;
217 }
218
219 tme_mutex_unlock(&ic->tme_m68k_external_mutex);
220
221 /* acknowledge the interrupt and get the vector: */
222 tme_m68k_callout_unlock(ic);
223 rc = (*ic->_tme_m68k_bus_connection->tme_m68k_bus_connection.tme_bus_intack)
224 (&ic->_tme_m68k_bus_connection->tme_m68k_bus_connection,
225 ipl, &vector);
226 tme_m68k_callout_relock(ic);
227 if (rc == TME_EDEADLK) {
228 abort();
229 }
230
231 /* if the interrupt acknowledge failed, this is a spurious interrupt: */
232 if (rc == ENOENT) {
233 vector = TME_M68K_VECTOR_SPURIOUS;
234 }
235
236 /* if no vector is given, use the autovector: */
237 else if (vector == TME_BUS_INTERRUPT_VECTOR_UNDEF) {
238 vector = TME_M68K_VECTOR_SPURIOUS + ipl;
239 }
240
241 /* dispatch the exceptions: */
242 tme_m68k_exception(ic, internal_exceptions | TME_M68K_EXCEPTION_INT(ipl, vector));
243 }
244
245 /* if there are internal exceptions to process, do so: */
246 if (internal_exceptions != 0) {
247 tme_mutex_unlock(&ic->tme_m68k_external_mutex);
248 tme_m68k_exception(ic, internal_exceptions);
249 }
250
251 /* there are no exceptions to process: */
252 }
253
254 /* the idle function, used when the processor is halted or stopped: */
255 static void
tme_m68k_idle(struct tme_m68k * ic)256 tme_m68k_idle(struct tme_m68k *ic)
257 {
258 /* lock the external mutex: */
259 tme_mutex_lock(&ic->tme_m68k_external_mutex);
260
261 /* loop forever: */
262 for (;;) {
263
264 /* check for any external signal: */
265 tme_m68k_external_check(ic, 0);
266
267 /* await an external condition: */
268 tme_cond_wait_yield(&ic->tme_m68k_external_cond, &ic->tme_m68k_external_mutex);
269 }
270 }
271
272 /* the m68k thread: */
273 static void
tme_m68k_thread(struct tme_m68k * ic)274 tme_m68k_thread(struct tme_m68k *ic)
275 {
276
277 /* we use longjmp to redispatch: */
278 do { } while (setjmp(ic->_tme_m68k_dispatcher));
279
280 /* we must not have a busy fast instruction TLB entry: */
281 assert (ic->_tme_m68k_insn_fetch_fast_itlb == NULL);
282
283 /* clear the group 0 hook: */
284 ic->_tme_m68k_group0_hook = NULL;
285
286 /* dispatch on the current mode: */
287 switch (ic->_tme_m68k_mode) {
288
289 case TME_M68K_MODE_EXECUTION:
290 (*ic->_tme_m68k_mode_execute)(ic);
291 /* NOTREACHED */
292
293 case TME_M68K_MODE_EXCEPTION:
294 (*ic->_tme_m68k_mode_exception)(ic);
295 /* NOTREACHED */
296
297 case TME_M68K_MODE_RTE:
298 (*ic->_tme_m68k_mode_rte)(ic);
299 /* NOTREACHED */
300
301 case TME_M68K_MODE_STOP:
302 case TME_M68K_MODE_HALT:
303 tme_m68k_idle(ic);
304 /* NOTREACHED */
305
306 default:
307 abort();
308 }
309 /* NOTREACHED */
310 }
311
312 /* the TLB filler for when we are on a generic bus: */
313 static int
_tme_m68k_generic_tlb_fill(struct tme_m68k_bus_connection * conn_m68k,struct tme_m68k_tlb * tlb,unsigned int function_code,tme_uint32_t external_address,unsigned int cycles)314 _tme_m68k_generic_tlb_fill(struct tme_m68k_bus_connection *conn_m68k,
315 struct tme_m68k_tlb *tlb,
316 unsigned int function_code,
317 tme_uint32_t external_address,
318 unsigned int cycles)
319 {
320 struct tme_m68k *ic;
321
322 /* recover our IC: */
323 ic = conn_m68k->tme_m68k_bus_connection.tme_bus_connection.tme_connection_element->tme_element_private;
324
325 /* call the generic bus TLB filler: */
326 (ic->_tme_m68k_bus_generic->tme_bus_tlb_fill)
327 (ic->_tme_m68k_bus_generic,
328 &tlb->tme_m68k_tlb_bus_tlb,
329 external_address,
330 cycles);
331
332 /* when we're on a generic bus a TLB entry is valid for all function codes: */
333 tlb->tme_m68k_tlb_function_codes_mask = -1;
334
335 return (TME_OK);
336 }
337
338 /* the connection scorer: */
339 static int
_tme_m68k_connection_score(struct tme_connection * conn,unsigned int * _score)340 _tme_m68k_connection_score(struct tme_connection *conn, unsigned int *_score)
341 {
342 struct tme_m68k_bus_connection *conn_m68k;
343 struct tme_bus_connection *conn_bus;
344 unsigned int score;
345
346 /* assume that this connection is useless: */
347 score = 0;
348
349 /* dispatch on the connection type: */
350 conn_m68k = (struct tme_m68k_bus_connection *) conn->tme_connection_other;
351 conn_bus = (struct tme_bus_connection *) conn->tme_connection_other;
352 switch (conn->tme_connection_type) {
353
354 /* this must be a bus, and not another m68k chip: */
355 case TME_CONNECTION_BUS_M68K:
356 if (conn_bus->tme_bus_tlb_set_add != NULL
357 && conn_m68k->tme_m68k_bus_tlb_fill != NULL
358 && conn_m68k->tme_m68k_bus_m6888x_enable == NULL) {
359 score = 10;
360 }
361 break;
362
363 /* this must be a bus, and not another chip: */
364 case TME_CONNECTION_BUS_GENERIC:
365 if (conn_bus->tme_bus_tlb_set_add != NULL
366 && conn_bus->tme_bus_tlb_fill != NULL) {
367 score = 1;
368 }
369 break;
370
371 default: abort();
372 }
373
374 *_score = score;
375 return (TME_OK);
376 }
377
378 /* this makes a new connection: */
379 static int
_tme_m68k_connection_make(struct tme_connection * conn,unsigned int state)380 _tme_m68k_connection_make(struct tme_connection *conn, unsigned int state)
381 {
382 struct tme_m68k *ic;
383 struct tme_m68k_bus_connection *conn_m68k;
384 struct tme_bus_connection *conn_bus;
385 struct tme_connection *conn_other;
386 struct tme_bus_tlb_set_info tlb_set_info;
387 unsigned long tlb_i;
388 struct tme_m68k_tlb *tlb;
389 int rc;
390
391 /* since the CPU is halted, it won't be making any connection calls,
392 so we only have to do work when the connection is fully made: */
393 if (state == TME_CONNECTION_FULL) {
394
395 /* recover our IC: */
396 ic = conn->tme_connection_element->tme_element_private;
397
398 /* dispatch on the connection type: */
399 conn_other = conn->tme_connection_other;
400 conn_m68k = (struct tme_m68k_bus_connection *) conn_other;
401 conn_bus = (struct tme_bus_connection *) conn_other;
402 switch (conn->tme_connection_type) {
403
404 case TME_CONNECTION_BUS_M68K:
405 ic->_tme_m68k_bus_connection = conn_m68k;
406 break;
407
408 /* we need an adaptation layer: */
409 case TME_CONNECTION_BUS_GENERIC:
410 conn_m68k = tme_new0(struct tme_m68k_bus_connection, 1);
411 conn_m68k->tme_m68k_bus_connection.tme_bus_connection.tme_connection_element = conn->tme_connection_element;
412 conn_m68k->tme_m68k_bus_tlb_fill = _tme_m68k_generic_tlb_fill;
413 ic->_tme_m68k_bus_connection = conn_m68k;
414 ic->_tme_m68k_bus_generic = conn_bus;
415 break;
416
417 default: abort();
418 }
419
420 /* make the TLB set information: */
421 memset(&tlb_set_info, 0, sizeof(tlb_set_info));
422 tlb_set_info.tme_bus_tlb_set_info_token0 = &ic->_tme_m68k_tlb_array[0].tme_m68k_tlb_token;
423 tlb_set_info.tme_bus_tlb_set_info_token_stride = sizeof(struct tme_m68k_tlb);
424 tlb_set_info.tme_bus_tlb_set_info_token_count = TME_ARRAY_ELS(ic->_tme_m68k_tlb_array);
425 tlb_set_info.tme_bus_tlb_set_info_bus_context = &ic->_tme_m68k_bus_context;
426
427 /* initialize the TLBs in the set: */
428 for (tlb_i = 0; tlb_i < TME_ARRAY_ELS(ic->_tme_m68k_tlb_array); tlb_i++) {
429 tlb = &ic->_tme_m68k_tlb_array[tlb_i];
430
431 /* initialize this token: */
432 tme_token_init(&tlb->tme_m68k_tlb_token);
433
434 /* connect this token with this TLB: */
435 tlb->tme_m68k_tlb_bus_tlb.tme_bus_tlb_token = &tlb->tme_m68k_tlb_token;
436 }
437
438 /* add the TLB set: */
439 rc = ((*ic->_tme_m68k_bus_connection->tme_m68k_bus_connection.tme_bus_tlb_set_add)
440 (&ic->_tme_m68k_bus_connection->tme_m68k_bus_connection,
441 &tlb_set_info));
442 assert (rc == TME_OK);
443 }
444
445 /* NB: the machine needs to issue a reset to bring the CPU out of halt. */
446 return (TME_OK);
447 }
448
449 /* this breaks a connection: */
450 static int
_tme_m68k_connection_break(struct tme_connection * conn,unsigned int state)451 _tme_m68k_connection_break(struct tme_connection *conn, unsigned int state)
452 {
453 abort();
454 return (0);
455 }
456
457 /* this makes new connection sides: */
458 static int
_tme_m68k_connections_new(struct tme_element * element,const char * const * args,struct tme_connection ** _conns,char ** _output)459 _tme_m68k_connections_new(struct tme_element *element, const char * const *args, struct tme_connection **_conns, char **_output)
460 {
461 struct tme_m68k_bus_connection *conn_m68k;
462 struct tme_bus_connection *conn_bus;
463 struct tme_connection *conn;
464
465 /* if we already have a bus connection, we can take no more connections: */
466 if (((struct tme_m68k *) element->tme_element_private)->_tme_m68k_bus_connection != NULL) {
467 return (TME_OK);
468 }
469
470 /* create our side of an m68k bus connection: */
471 conn_m68k = tme_new0(struct tme_m68k_bus_connection, 1);
472 conn_bus = &conn_m68k->tme_m68k_bus_connection;
473 conn = &conn_bus->tme_bus_connection;
474
475 /* fill in the generic connection: */
476 conn->tme_connection_next = *_conns;
477 conn->tme_connection_type = TME_CONNECTION_BUS_M68K;
478 conn->tme_connection_score = _tme_m68k_connection_score;
479 conn->tme_connection_make = _tme_m68k_connection_make;
480 conn->tme_connection_break = _tme_m68k_connection_break;
481
482 /* fill in the generic bus connection: */
483 conn_bus->tme_bus_signal = _tme_m68k_bus_signal;
484 conn_bus->tme_bus_tlb_set_add = NULL;
485
486 /* full in the m68k bus connection: */
487 conn_m68k->tme_m68k_bus_interrupt = _tme_m68k_bus_interrupt;
488 conn_m68k->tme_m68k_bus_tlb_fill = NULL;
489 conn_m68k->tme_m68k_bus_m6888x_enable = _tme_m6888x_enable;
490
491 /* add this connection to the set of possibilities: */
492 *_conns = conn;
493
494 /* create our side of a generic bus connection: */
495 conn_bus = tme_new0(struct tme_bus_connection, 1);
496 conn = &conn_bus->tme_bus_connection;
497
498 /* fill in the generic connection: */
499 conn->tme_connection_next = *_conns;
500 conn->tme_connection_type = TME_CONNECTION_BUS_GENERIC;
501 conn->tme_connection_score = _tme_m68k_connection_score;
502 conn->tme_connection_make = _tme_m68k_connection_make;
503 conn->tme_connection_break = _tme_m68k_connection_break;
504
505 /* fill in the generic bus connection: */
506 conn_bus->tme_bus_signal = _tme_m68k_bus_signal;
507 conn_bus->tme_bus_tlb_set_add = NULL;
508 conn_bus->tme_bus_tlb_fill = NULL;
509
510 /* add this connection to the set of possibilities: */
511 *_conns = conn;
512
513 /* done: */
514 return (TME_OK);
515 }
516
517 /* the common m68k new function: */
518 int
tme_m68k_new(struct tme_m68k * ic,const char * const * args,const void * extra,char ** _output)519 tme_m68k_new(struct tme_m68k *ic, const char * const *args, const void *extra, char **_output)
520 {
521 struct tme_element *element;
522 int arg_i;
523 int usage;
524
525 /* check our arguments: */
526 arg_i = 1;
527 usage = FALSE;
528 for (;;) {
529
530 if (0) {
531
532 }
533
534 /* if we've run out of arguments: */
535 else if (args[arg_i + 0] == NULL) {
536 break;
537 }
538
539 /* this is either a bad argument or an FPU argument: */
540 else {
541
542 /* if this is not an FPU argument: */
543 if (!tme_m68k_fpu_new(ic, args, &arg_i, &usage, _output)) {
544 tme_output_append_error(_output,
545 "%s %s, ",
546 args[arg_i],
547 _("unexpected"));
548 usage = TRUE;
549 }
550
551 if (usage) {
552 break;
553 }
554 }
555 }
556
557 if (usage) {
558 tme_output_append_error(_output,
559 "%s %s",
560 _("usage:"),
561 args[0]);
562 tme_m68k_fpu_usage(_output);
563 tme_free(ic);
564 return (EINVAL);
565 }
566
567 /* initialize the verifier: */
568 tme_m68k_verify_init();
569
570 /* dispatch on the type: */
571 switch (ic->tme_m68k_type) {
572 case TME_M68K_M68000:
573 ic->_tme_m68k_bus_16bit = 1;
574 break;
575 case TME_M68K_M68010:
576 ic->_tme_m68k_bus_16bit = 1;
577 break;
578 case TME_M68K_M68020:
579 ic->_tme_m68k_bus_16bit = 0;
580 break;
581 default:
582 abort();
583 }
584
585 /* we have no bus connection yet: */
586 ic->_tme_m68k_bus_connection = NULL;
587
588 /* fill the element: */
589 element = ic->tme_m68k_element;
590 element->tme_element_private = ic;
591 element->tme_element_connections_new = _tme_m68k_connections_new;
592
593 /* calculate the instruction burst size: */
594 /* XXX TBD: */
595 ic->_tme_m68k_instruction_burst = 200;
596 ic->_tme_m68k_instruction_burst_remaining
597 = ic->_tme_m68k_instruction_burst;
598
599 /* set the status register T bits mask: */
600 ic->_tme_m68k_sr_mask_t
601 = (TME_M68K_FLAG_T1
602 | ((ic->tme_m68k_type >= TME_M68K_M68020)
603 * TME_M68K_FLAG_T0));
604
605 /* initialize the small immediates: */
606 ic->tme_m68k_ireg_uint32(TME_M68K_IREG_ZERO) = 0;
607 ic->tme_m68k_ireg_uint32(TME_M68K_IREG_ONE) = 1;
608 ic->tme_m68k_ireg_uint32(TME_M68K_IREG_TWO) = 2;
609 ic->tme_m68k_ireg_uint32(TME_M68K_IREG_THREE) = 3;
610 ic->tme_m68k_ireg_uint32(TME_M68K_IREG_FOUR) = 4;
611 ic->tme_m68k_ireg_uint32(TME_M68K_IREG_FIVE) = 5;
612 ic->tme_m68k_ireg_uint32(TME_M68K_IREG_SIX) = 6;
613 ic->tme_m68k_ireg_uint32(TME_M68K_IREG_SEVEN) = 7;
614 ic->tme_m68k_ireg_uint32(TME_M68K_IREG_EIGHT) = 8;
615
616 /* force the processor to be halted: */
617 ic->_tme_m68k_mode = TME_M68K_MODE_HALT;
618 TME_M68K_SEQUENCE_START;
619
620 /* start the m68k thread: */
621 tme_thread_create((tme_thread_t) tme_m68k_thread, ic);
622
623 return (TME_OK);
624 }
625
626 /* the common m68k reset function: */
627 void
tme_m68k_do_reset(struct tme_m68k * ic)628 tme_m68k_do_reset(struct tme_m68k *ic)
629 {
630
631 /* force the VBR to zero: */
632 ic->tme_m68k_ireg_vbr = 0;
633
634 /* clear the E and F bits in the CACR: */
635 ic->tme_m68k_ireg_cacr = 0;
636
637 /* force supervisor mode, interrupts disabled: */
638 tme_m68k_change_sr(ic, TME_M68K_FLAG_S | (7 << 8));
639
640 /* load the initial SSP and PC: */
641 ic->_tme_m68k_ea_function_code = TME_M68K_FC_SP;
642 ic->_tme_m68k_ea_address = 0;
643 tme_m68k_read_mem32(ic, TME_M68K_IREG_A7);
644 ic->_tme_m68k_ea_address += sizeof(ic->tme_m68k_ireg_a7);
645 tme_m68k_read_mem32(ic, TME_M68K_IREG_PC);
646
647 /* clear all exceptions: */
648 ic->_tme_m68k_exceptions = 0;
649
650 /* reset the FPU: */
651 tme_m68k_fpu_reset(ic);
652
653 /* start execution: */
654 ic->_tme_m68k_mode = TME_M68K_MODE_EXECUTION;
655 TME_M68K_SEQUENCE_START;
656 tme_m68k_redispatch(ic);
657 }
658
659 /* this returns nonzero iff the slow instruction executor must be
660 used: */
661 int
tme_m68k_go_slow(const struct tme_m68k * ic)662 tme_m68k_go_slow(const struct tme_m68k *ic)
663 {
664 tme_bus_context_t bus_context;
665 const struct tme_m68k_tlb *tlb;
666 tme_uint32_t linear_pc;
667 const tme_shared tme_uint8_t *emulator_load;
668 const tme_shared tme_uint8_t *emulator_load_last;
669
670 bus_context = ic->_tme_m68k_bus_context;
671 tlb = &ic->_tme_m68k_itlb;
672 emulator_load = tlb->tme_m68k_tlb_emulator_off_read;
673 emulator_load_last = emulator_load;
674 if (emulator_load != TME_EMULATOR_OFF_UNDEF) {
675 emulator_load += (tme_bus_addr32_t) tlb->tme_m68k_tlb_linear_first;
676 emulator_load_last += (tme_bus_addr32_t) tlb->tme_m68k_tlb_linear_last;
677 assert (emulator_load <= emulator_load_last);
678 }
679 linear_pc = ic->tme_m68k_ireg_pc;
680 return (
681
682 /* the ITLB entry must support reads from emulator memory: */
683 tme_m68k_tlb_is_invalid(tlb)
684 || tlb->tme_m68k_tlb_bus_context != bus_context
685 || (tlb->tme_m68k_tlb_function_codes_mask
686 & TME_BIT(TME_M68K_FUNCTION_CODE_PROGRAM(ic))) == 0
687 || linear_pc < (tme_bus_addr32_t) tlb->tme_m68k_tlb_linear_first
688 || linear_pc > (tme_bus_addr32_t) tlb->tme_m68k_tlb_linear_last
689 || tlb->tme_m68k_tlb_emulator_off_read == TME_EMULATOR_OFF_UNDEF
690
691 /* the ITLB emulator memory must be 32-bit aligned for the
692 benefit of the fast instruction word fetch macros, so
693 that emulator address alignment goes with linear address
694 alignment: */
695 || (((unsigned long) tlb->tme_m68k_tlb_emulator_off_read)
696 & (sizeof(tme_uint32_t) - 1))
697
698 /* the ITLB emulator memory must not be so low that the
699 first valid pointer minus one, or the last valid pointer
700 minus (sizeof(tme_uint32_t) - 1), wraps around, nor so
701 high that the last valid pointer, plus one, wraps around: */
702 /* NB: this enables the fast instruction word fetch macros
703 to simply fetch 16 and 32 bit values until fetch_fast_next
704 is greater than ic->_tme_m68k_insn_fetch_fast_last, and
705 not have to do any pointer math or ever check for pointer
706 wrapping: */
707 || ((emulator_load
708 - 1)
709 >= emulator_load)
710 || ((emulator_load_last
711 - (sizeof(tme_uint32_t) - 1))
712 >= emulator_load_last)
713 || ((emulator_load_last
714 + 1)
715 <= emulator_load_last)
716
717 /* the linear PC must be 16-bit aligned: */
718 || (linear_pc & 1)
719
720 /* there must be no tracing: */
721 || (ic->tme_m68k_ireg_sr & ic->_tme_m68k_sr_mask_t) != 0);
722 }
723
724 /* this redispatches: */
725 void
tme_m68k_redispatch(struct tme_m68k * ic)726 tme_m68k_redispatch(struct tme_m68k *ic)
727 {
728 struct tme_m68k_tlb *tlb;
729
730 /* if we have a busy fast instruction TLB entry: */
731 tlb = ic->_tme_m68k_insn_fetch_fast_itlb;
732 if (__tme_predict_true(tlb != NULL)) {
733
734 /* unbusy and forget the fast instruction TLB entry: */
735 tme_m68k_tlb_unbusy(tlb);
736 ic->_tme_m68k_insn_fetch_fast_itlb = NULL;
737 }
738
739 /* do the redispatch: */
740 #ifdef _TME_M68K_STATS
741 ic->tme_m68k_stats.tme_m68k_stats_redispatches++;
742 #endif /* _TME_M68K_STATS */
743 longjmp(ic->_tme_m68k_dispatcher, 1);
744 }
745
746 /* this fills a TLB entry: */
747 void
tme_m68k_tlb_fill(struct tme_m68k * ic,struct tme_m68k_tlb * tlb,unsigned int function_code,tme_uint32_t linear_address,unsigned int cycles)748 tme_m68k_tlb_fill(struct tme_m68k *ic, struct tme_m68k_tlb *tlb,
749 unsigned int function_code,
750 tme_uint32_t linear_address,
751 unsigned int cycles)
752 {
753 tme_uint32_t external_address;
754 struct tme_bus_tlb tlb_internal;
755
756 #ifdef _TME_M68K_STATS
757 if (function_code == TME_M68K_FC_UP
758 || function_code == TME_M68K_FC_SP) {
759 ic->tme_m68k_stats.tme_m68k_stats_itlb_fill++;
760 }
761 else {
762 ic->tme_m68k_stats.tme_m68k_stats_dtlb_fill++;
763 }
764 #endif /* _TME_M68K_STATS */
765
766 /* when emulating a CPU with a 16-bit bus, only 24 bits of address
767 are external: */
768 external_address = linear_address;
769 if (ic->_tme_m68k_bus_16bit) {
770 external_address &= 0x00ffffff;
771 }
772
773 /* unbusy the TLB entry: */
774 tme_m68k_tlb_unbusy(tlb);
775
776 /* clear any invalid token: */
777 tme_token_invalid_clear(&tlb->tme_m68k_tlb_token);
778
779 /* unlock for the callout: */
780 tme_m68k_callout_unlock(ic);
781
782 /* fill the TLB entry: */
783 (*ic->_tme_m68k_bus_connection->tme_m68k_bus_tlb_fill)
784 (ic->_tme_m68k_bus_connection, tlb,
785 function_code,
786 external_address,
787 cycles);
788
789 /* relock after the callout: */
790 tme_m68k_callout_relock(ic);
791
792 /* set the context on the TLB entry: */
793 tlb->tme_m68k_tlb_bus_context = ic->_tme_m68k_bus_context;
794
795 /* rebusy the TLB entry: */
796 tme_m68k_tlb_busy(tlb);
797
798 /* if this code isn't 32-bit clean, we have to deal: */
799 if (external_address != linear_address) {
800 tlb_internal.tme_bus_tlb_addr_first
801 = (((tme_bus_addr32_t) tlb->tme_m68k_tlb_linear_first)
802 | (linear_address ^ external_address));
803 tlb_internal.tme_bus_tlb_addr_last
804 = (((tme_bus_addr32_t) tlb->tme_m68k_tlb_linear_last)
805 | (linear_address ^ external_address));
806 tlb_internal.tme_bus_tlb_cycles_ok = tlb->tme_m68k_tlb_bus_tlb.tme_bus_tlb_cycles_ok;
807 tme_bus_tlb_map(&tlb->tme_m68k_tlb_bus_tlb, external_address,
808 &tlb_internal, linear_address);
809 }
810 }
811
812 /* this triggers exception processing: */
813 void
tme_m68k_exception(struct tme_m68k * ic,tme_uint32_t new_exceptions)814 tme_m68k_exception(struct tme_m68k *ic, tme_uint32_t new_exceptions)
815 {
816 assert(new_exceptions != 0);
817
818 /* if the set of new exceptions includes a group zero exception: */
819 if (new_exceptions &
820 (TME_M68K_EXCEPTION_RESET
821 | TME_M68K_EXCEPTION_AERR
822 | TME_M68K_EXCEPTION_BERR)) {
823
824 /* there must be only one exception - you cannot trigger a group 0
825 exception simultaneously with any other group 0, 1, or 2
826 exception: */
827 assert((new_exceptions & (new_exceptions - 1)) == 0);
828
829 /* if this is a reset exception, it clears all other exceptions: */
830 if (new_exceptions == TME_M68K_EXCEPTION_RESET) {
831 ic->_tme_m68k_exceptions = 0;
832 }
833
834 /* otherwise, this is an address error or a bus error. if we were
835 already processing a group 0 exception, this is a
836 double fault, and the processor enters the halted state: */
837 else if (ic->_tme_m68k_exceptions &
838 (TME_M68K_EXCEPTION_RESET
839 | TME_M68K_EXCEPTION_AERR
840 | TME_M68K_EXCEPTION_BERR)) {
841 tme_log(TME_M68K_LOG_HANDLE(ic), 0, TME_OK,
842 (TME_M68K_LOG_HANDLE(ic),
843 _("double fault, processor halted")));
844 ic->_tme_m68k_mode = TME_M68K_MODE_HALT;
845 TME_M68K_SEQUENCE_START;
846 tme_m68k_redispatch(ic);
847 }
848 }
849
850 /* otherwise, exception processing must not already be happening: */
851 else {
852 assert(ic->_tme_m68k_exceptions == 0);
853 }
854
855 /* begin exception processing: */
856 ic->_tme_m68k_exceptions |= new_exceptions;
857 ic->_tme_m68k_mode = TME_M68K_MODE_EXCEPTION;
858 TME_M68K_SEQUENCE_START;
859 tme_m68k_redispatch(ic);
860 }
861
862 /* this changes SR, and swaps %a7 as needed: */
863 void
tme_m68k_change_sr(struct tme_m68k * ic,tme_uint16_t sr)864 tme_m68k_change_sr(struct tme_m68k *ic, tme_uint16_t sr)
865 {
866 tme_uint16_t flags_mode;
867
868 /* only recognize the M bit on a 68020 or better: */
869 flags_mode = (TME_M68K_FLAG_S
870 | ((ic->tme_m68k_type >= TME_M68K_M68020)
871 * TME_M68K_FLAG_M));
872
873 /* save %a7 in the proper stack pointer control register: */
874 switch (ic->tme_m68k_ireg_sr & flags_mode) {
875 case 0:
876 case TME_M68K_FLAG_M:
877 ic->tme_m68k_ireg_usp = ic->tme_m68k_ireg_a7;
878 break;
879 case TME_M68K_FLAG_S:
880 ic->tme_m68k_ireg_isp = ic->tme_m68k_ireg_a7;
881 break;
882 case (TME_M68K_FLAG_S | TME_M68K_FLAG_M):
883 ic->tme_m68k_ireg_msp = ic->tme_m68k_ireg_a7;
884 break;
885 }
886
887 /* load %a7 from the proper stack pointer control register: */
888 ic->tme_m68k_ireg_sr = sr;
889 switch (ic->tme_m68k_ireg_sr & flags_mode) {
890 case 0:
891 case TME_M68K_FLAG_M:
892 ic->tme_m68k_ireg_a7 = ic->tme_m68k_ireg_usp;
893 break;
894 case TME_M68K_FLAG_S:
895 ic->tme_m68k_ireg_a7 = ic->tme_m68k_ireg_isp;
896 break;
897 case (TME_M68K_FLAG_S | TME_M68K_FLAG_M):
898 ic->tme_m68k_ireg_a7 = ic->tme_m68k_ireg_msp;
899 break;
900 }
901 }
902
903 /* this starts processing an m68k exception: */
904 void
tme_m68k_exception_process_start(struct tme_m68k * ic,unsigned int ipl)905 tme_m68k_exception_process_start(struct tme_m68k *ic, unsigned int ipl)
906 {
907 tme_uint16_t sr;
908
909 /* make an internal copy of the status register, then set S, clear
910 T, and update I: */
911 if (!TME_M68K_SEQUENCE_RESTARTING) {
912 ic->tme_m68k_ireg_shadow_sr = ic->tme_m68k_ireg_sr;
913 sr = (ic->tme_m68k_ireg_sr | TME_M68K_FLAG_S) & ~ic->_tme_m68k_sr_mask_t;
914 if (ipl > TME_M68K_IPL_NONE) {
915 assert(ipl == TME_M68K_IPL_NMI
916 || ipl > TME_M68K_FLAG_IPM(sr));
917 sr = (sr & ~(TME_M68K_IPL_MAX << 8)) | (ipl << 8);
918 }
919 tme_m68k_change_sr(ic, sr);
920 }
921 }
922
923 /* this finishes processing an m68k exception: */
924 void
tme_m68k_exception_process_finish(struct tme_m68k * ic,tme_uint8_t format,tme_uint8_t vector)925 tme_m68k_exception_process_finish(struct tme_m68k *ic, tme_uint8_t format, tme_uint8_t vector)
926 {
927 tme_uint16_t vector_offset;
928
929 /* stack the frame format and vector offset, unless this is a 68000: */
930 vector_offset = ((tme_uint16_t) vector) << 2;
931 if (ic->tme_m68k_type != TME_M68K_M68000) {
932 tme_m68k_push16(ic, (((tme_uint16_t) format) << 12) | vector_offset);
933 }
934
935 /* stack the program counter: */
936 tme_m68k_push32(ic, ic->tme_m68k_ireg_pc);
937
938 /* stack the internal copy of the status register: */
939 tme_m68k_push16(ic, ic->tme_m68k_ireg_shadow_sr);
940
941 /* do a bus cycle to read the vector into the program counter: */
942 if (!TME_M68K_SEQUENCE_RESTARTING) {
943 ic->_tme_m68k_ea_function_code = TME_M68K_FC_SD;
944 ic->_tme_m68k_ea_address = ic->tme_m68k_ireg_vbr + vector_offset;
945 }
946 tme_m68k_read_mem32(ic, TME_M68K_IREG_PC);
947 }
948
949 /* common m68000 and m68010 exception processing: */
950 void
tme_m68000_exception_process(struct tme_m68k * ic)951 tme_m68000_exception_process(struct tme_m68k *ic)
952 {
953 tme_uint32_t exceptions;
954 tme_uint8_t vector;
955
956 /* get the set of exceptions. we must have no group 0 exceptions: */
957 exceptions = ic->_tme_m68k_exceptions;
958 assert((exceptions & (TME_M68K_EXCEPTION_RESET
959 | TME_M68K_EXCEPTION_AERR
960 | TME_M68K_EXCEPTION_BERR)) == 0);
961
962 /* these if statements are ordered to implement the priority
963 relationship between the different exceptions as outlined in
964 the 68000 user's manual (pp 93 in my copy): */
965
966 if (TME_M68K_EXCEPTION_IS_INST(exceptions)) {
967 tme_m68k_exception_process_start(ic, 0);
968 tme_m68k_exception_process_finish(ic, TME_M68K_FORMAT_0, TME_M68K_EXCEPTION_IS_INST(exceptions));
969 }
970
971 if (exceptions & TME_M68K_EXCEPTION_TRACE) {
972 tme_m68k_exception_process_start(ic, 0);
973 tme_m68k_exception_process_finish(ic, TME_M68K_FORMAT_0, TME_M68K_VECTOR_TRACE);
974 }
975
976 if (TME_M68K_EXCEPTION_IS_INT(exceptions)) {
977 tme_m68k_exception_process_start(ic, TME_M68K_EXCEPTION_IS_INT(exceptions));
978 tme_m68k_exception_process_finish(ic, TME_M68K_FORMAT_0, TME_M68K_EXCEPTION_INT_VEC(exceptions));
979 }
980
981 if (exceptions & TME_M68K_EXCEPTION_ILL) {
982 if (TME_FIELD_EXTRACTU(ic->_tme_m68k_insn_opcode, 12, 4) == 0xa) {
983 vector = TME_M68K_VECTOR_LINE_A;
984 }
985 else if (TME_FIELD_EXTRACTU(ic->_tme_m68k_insn_opcode, 12, 4) == 0xf) {
986 vector = TME_M68K_VECTOR_LINE_F;
987 }
988 else {
989 vector = TME_M68K_VECTOR_ILL;
990 }
991 tme_m68k_exception_process_start(ic, 0);
992 tme_m68k_exception_process_finish(ic, TME_M68K_FORMAT_0, vector);
993 }
994
995 if (exceptions & TME_M68K_EXCEPTION_PRIV) {
996 tme_m68k_exception_process_start(ic, 0);
997 tme_m68k_exception_process_finish(ic, TME_M68K_FORMAT_0, TME_M68K_VECTOR_TRACE);
998 }
999
1000 /* we have processed all exceptions - resume execution: */
1001 ic->_tme_m68k_exceptions = 0;
1002 ic->_tme_m68k_mode = TME_M68K_MODE_EXECUTION;
1003 TME_M68K_SEQUENCE_START;
1004 tme_m68k_redispatch(ic);
1005 }
1006
1007 /* common m68020 and later exception processing: */
1008 void
tme_m68020_exception_process(struct tme_m68k * ic)1009 tme_m68020_exception_process(struct tme_m68k *ic)
1010 {
1011 tme_uint32_t exceptions;
1012 tme_uint8_t vector;
1013 struct {
1014 tme_uint16_t tme_m68k_fmt1_sr;
1015 tme_uint16_t tme_m68k_fmt1_pc_hi;
1016 tme_uint16_t tme_m68k_fmt1_pc_lo;
1017 tme_uint16_t tme_m68k_fmt1_vector_offset;
1018 } fmt1;
1019
1020 /* get the set of exceptions. we must have no group 0 or 1
1021 exceptions: */
1022 exceptions = ic->_tme_m68k_exceptions;
1023 assert((exceptions & (TME_M68K_EXCEPTION_RESET
1024 | TME_M68K_EXCEPTION_AERR
1025 | TME_M68K_EXCEPTION_BERR)) == 0);
1026
1027 /* these if statements are ordered to implement the priority
1028 relationship between the different exceptions as outlined in
1029 the 68020 user's manual (pp 144 in my copy): */
1030
1031 /* group 2 exceptions: */
1032 if (TME_M68K_EXCEPTION_IS_INST(exceptions)) {
1033 tme_m68k_exception_process_start(ic, 0);
1034
1035 /* get the vector number: */
1036 vector = TME_M68K_EXCEPTION_IS_INST(exceptions);
1037
1038 /* of the group 2 exceptions, only the Format Error and TRAP #N
1039 exceptions generate a format 0 stack frame. the RTE mode code
1040 and the TRAP instruction code are expected to have left
1041 ic->tme_m68k_ireg_pc as the PC they want stacked: */
1042 if (vector == TME_M68K_VECTOR_FORMAT
1043 || (TME_M68K_VECTOR_TRAP_0 <= vector
1044 && vector < (TME_M68K_VECTOR_TRAP_0 + 16))) {
1045 tme_m68k_exception_process_finish(ic, TME_M68K_FORMAT_0, vector);
1046 }
1047
1048 /* all other group 2 exceptions generate a format 2 stack frame.
1049 all code that can signal this exception is expected to have
1050 left ic->tme_m68k_ireg_pc *and* ic->tme_m68k_ireg_pc_last as
1051 the PCs they want stacked: */
1052 else {
1053
1054 /* stack the program counter of the instruction that caused the exception: */
1055 tme_m68k_push32(ic, ic->tme_m68k_ireg_pc_last);
1056
1057 /* finish with a format 2 stack frame: */
1058 tme_m68k_exception_process_finish(ic, TME_M68K_FORMAT_2, vector);
1059 }
1060 }
1061
1062 /* group 3 exceptions: */
1063 if (exceptions & TME_M68K_EXCEPTION_ILL) {
1064 if (TME_FIELD_EXTRACTU(ic->_tme_m68k_insn_opcode, 12, 4) == 0xa) {
1065 vector = TME_M68K_VECTOR_LINE_A;
1066 }
1067 else if (TME_FIELD_EXTRACTU(ic->_tme_m68k_insn_opcode, 12, 4) == 0xf) {
1068 vector = TME_M68K_VECTOR_LINE_F;
1069 }
1070 else {
1071 vector = TME_M68K_VECTOR_ILL;
1072 }
1073 tme_m68k_exception_process_start(ic, 0);
1074 tme_m68k_exception_process_finish(ic, TME_M68K_FORMAT_0, vector);
1075 }
1076 if (exceptions & TME_M68K_EXCEPTION_PRIV) {
1077 tme_m68k_exception_process_start(ic, 0);
1078 tme_m68k_exception_process_finish(ic, TME_M68K_FORMAT_0, TME_M68K_VECTOR_PRIV);
1079 }
1080
1081 /* group 4.1 exceptions: */
1082 if (exceptions & TME_M68K_EXCEPTION_TRACE) {
1083 tme_m68k_exception_process_start(ic, 0);
1084 tme_m68k_push32(ic, ic->tme_m68k_ireg_pc_last);
1085 tme_m68k_exception_process_finish(ic, TME_M68K_FORMAT_2, TME_M68K_VECTOR_TRACE);
1086 }
1087
1088 /* group 4.2 exceptions: */
1089 if (TME_M68K_EXCEPTION_IS_INT(exceptions)) {
1090 tme_m68k_exception_process_start(ic, TME_M68K_EXCEPTION_IS_INT(exceptions));
1091 tme_m68k_exception_process_finish(ic, TME_M68K_FORMAT_0, TME_M68K_EXCEPTION_INT_VEC(exceptions));
1092
1093 /* if the M-bit is set: */
1094 if (ic->tme_m68k_ireg_sr & TME_M68K_FLAG_M) {
1095
1096 /* make the throwaway four-word stack frame (format 1): */
1097 fmt1.tme_m68k_fmt1_vector_offset = tme_htobe_u16((TME_M68K_FORMAT_1 << 12) | (TME_M68K_EXCEPTION_INT_VEC(exceptions) << 2));
1098 fmt1.tme_m68k_fmt1_pc_lo = tme_htobe_u16((ic->tme_m68k_ireg_pc >> 0) & 0xffff);
1099 fmt1.tme_m68k_fmt1_pc_hi = tme_htobe_u16((ic->tme_m68k_ireg_pc >> 16) & 0xffff);
1100 fmt1.tme_m68k_fmt1_sr = tme_htobe_u16(ic->tme_m68k_ireg_sr);
1101
1102 /* store the throwaway four-word stack frame on the interrupt stack: */
1103 if (!TME_M68K_SEQUENCE_RESTARTING) {
1104 ic->_tme_m68k_ea_function_code = TME_M68K_FC_SD;
1105 ic->_tme_m68k_ea_address = ic->tme_m68k_ireg_isp - sizeof(fmt1);
1106 }
1107 tme_m68k_write_mem(ic, (tme_uint8_t *) &fmt1, sizeof(fmt1));
1108
1109 /* move to the interrupt stack: */
1110 ic->tme_m68k_ireg_isp -= sizeof(fmt1);
1111 tme_m68k_change_sr(ic, ic->tme_m68k_ireg_sr & ~TME_M68K_FLAG_M);
1112 }
1113 }
1114
1115 /* we have processed all exceptions - resume execution: */
1116 ic->_tme_m68k_exceptions = 0;
1117 ic->_tme_m68k_mode = TME_M68K_MODE_EXECUTION;
1118 TME_M68K_SEQUENCE_START;
1119 tme_m68k_redispatch(ic);
1120 }
1121
1122 /* this starts an m68k RTE: */
1123 tme_uint16_t
tme_m68k_rte_start(struct tme_m68k * ic)1124 tme_m68k_rte_start(struct tme_m68k *ic)
1125 {
1126
1127 /* set up to read from the stack frame: */
1128 if (!TME_M68K_SEQUENCE_RESTARTING) {
1129 ic->_tme_m68k_ea_function_code = TME_M68K_FC_SD;
1130 ic->_tme_m68k_ea_address = ic->tme_m68k_ireg_a7;
1131 }
1132
1133 /* read the stacked status register: */
1134 tme_m68k_read_mem16(ic, TME_M68K_IREG_SHADOW_SR);
1135 if (!TME_M68K_SEQUENCE_RESTARTING) {
1136 ic->_tme_m68k_ea_address += sizeof(ic->tme_m68k_ireg_shadow_sr);
1137 }
1138
1139 /* read the stacked PC: */
1140 tme_m68k_read_mem32(ic, TME_M68K_IREG_PC_NEXT);
1141 if (!TME_M68K_SEQUENCE_RESTARTING) {
1142 ic->_tme_m68k_ea_address += sizeof(ic->tme_m68k_ireg_pc_next);
1143 }
1144
1145 /* read the stacked format/offset word, unless this is a 68000: */
1146 if (ic->tme_m68k_type != TME_M68K_M68000) {
1147 tme_m68k_read_mem16(ic, TME_M68K_IREG_FORMAT_OFFSET);
1148 if (!TME_M68K_SEQUENCE_RESTARTING) {
1149 ic->_tme_m68k_ea_address += sizeof(ic->tme_m68k_ireg_format_offset);
1150 }
1151 }
1152 else {
1153 ic->tme_m68k_ireg_format_offset = 0;
1154 }
1155
1156 /* return the frame format: */
1157 return (ic->tme_m68k_ireg_format_offset >> 12);
1158 }
1159
1160 /* this finishes an m68k RTE: */
1161 void
tme_m68k_rte_finish(struct tme_m68k * ic,tme_uint32_t format_extra)1162 tme_m68k_rte_finish(struct tme_m68k *ic, tme_uint32_t format_extra)
1163 {
1164 tme_uint32_t frame_size;
1165
1166 /* calculate the total frame size. the 68000 doesn't have a
1167 format/status word: */
1168 frame_size = (sizeof(ic->tme_m68k_ireg_shadow_sr)
1169 + sizeof(ic->tme_m68k_ireg_pc_next)
1170 + (ic->tme_m68k_type != TME_M68K_M68000
1171 ? sizeof(ic->tme_m68k_ireg_format_offset)
1172 : 0)
1173 + format_extra);
1174 assert((frame_size & 1) == 0);
1175
1176 /* adjust the stack: */
1177 ic->tme_m68k_ireg_a7 += frame_size;
1178
1179 /* set the status register: */
1180 tme_m68k_change_sr(ic, ic->tme_m68k_ireg_shadow_sr);
1181
1182 /* set the PC: */
1183 ic->tme_m68k_ireg_pc = ic->tme_m68k_ireg_pc_next;
1184
1185 /* redispatch: */
1186 tme_m68k_redispatch(ic);
1187 }
1188
1189 /* this stores the group 0 sequence into a region of host memory.
1190 this is used when preparing the state information to be stored
1191 on the stack for a bus or address error: */
1192 unsigned int
tme_m68k_sequence_empty(const struct tme_m68k * ic,tme_uint8_t * raw,unsigned int raw_avail)1193 tme_m68k_sequence_empty(const struct tme_m68k *ic, tme_uint8_t *raw, unsigned int raw_avail)
1194 {
1195 const struct _tme_m68k_sequence *sequence;
1196 unsigned int raw_used;
1197
1198 /* get the group 0 sequence: */
1199 sequence = &ic->_tme_m68k_group0_sequence;
1200 raw_used = 0;
1201
1202 /* we use 8 bits for the mode (2 bits) and flags (6 bits): */
1203 raw_used += sizeof(tme_uint8_t);
1204 assert(raw_avail >= raw_used);
1205 assert(sequence->_tme_m68k_sequence_mode < TME_BIT(2));
1206 assert(sequence->_tme_m68k_sequence_mode_flags < TME_BIT(6));
1207 *(raw++) = ((sequence->_tme_m68k_sequence_mode << 6)
1208 | sequence->_tme_m68k_sequence_mode_flags);
1209
1210
1211 /* we use 16 bits for the faulted memory transfer ordinal
1212 (12 bits) and already-transferred byte count (4 bits): */
1213 raw_used += sizeof(tme_uint16_t);
1214 assert(raw_avail >= raw_used);
1215 assert(sequence->_tme_m68k_sequence_transfer_faulted < TME_BIT(12));
1216 assert(sequence->_tme_m68k_sequence_transfer_faulted_after < TME_BIT(4));
1217 *(raw++) = sequence->_tme_m68k_sequence_transfer_faulted >> 4;
1218 *(raw++) = ((sequence->_tme_m68k_sequence_transfer_faulted << 4)
1219 | sequence->_tme_m68k_sequence_transfer_faulted_after);
1220
1221 #ifdef _TME_M68K_VERIFY
1222 /* we use sizeof(_tme_m68k_sequence_uid) bytes for the sequence UID: */
1223 raw_used += sizeof(sequence->_tme_m68k_sequence_uid);
1224 assert(raw_avail >= raw_used);
1225 memcpy(raw,
1226 &sequence->_tme_m68k_sequence_uid,
1227 sizeof(sequence->_tme_m68k_sequence_uid));
1228 raw += sizeof(sequence->_tme_m68k_sequence_uid);
1229 #endif /* _TME_M68K_VERIFY */
1230
1231 /* done: */
1232 return (raw_used);
1233 }
1234
1235 /* this restores the group 0 sequence from a region of host memory.
1236 this is used when reading the state information stored on the
1237 stack for a bus or address error: */
1238 unsigned int
tme_m68k_sequence_fill(struct tme_m68k * ic,const tme_uint8_t * raw,unsigned int raw_avail)1239 tme_m68k_sequence_fill(struct tme_m68k *ic, const tme_uint8_t *raw, unsigned int raw_avail)
1240 {
1241 struct _tme_m68k_sequence *sequence;
1242 unsigned int raw_used;
1243
1244 /* get the group 0 sequence: */
1245 sequence = &ic->_tme_m68k_group0_sequence;
1246 raw_used = 0;
1247
1248 /* we used 8 bits for the mode (2 bits) and flags (6 bits): */
1249 raw_used += sizeof(tme_uint8_t);
1250 if (raw_avail < raw_used) {
1251 return (0);
1252 }
1253 sequence->_tme_m68k_sequence_mode = *raw >> 6;
1254 sequence->_tme_m68k_sequence_mode_flags = (*(raw++) & (TME_BIT(6) - 1));
1255
1256 /* we used 16 bits for the faulted memory transfer ordinal
1257 (12 bits) and already-transferred byte count (4 bits): */
1258 raw_used += sizeof(tme_uint16_t);
1259 if (raw_avail < raw_used) {
1260 return (0);
1261 }
1262 sequence->_tme_m68k_sequence_transfer_faulted =
1263 (((tme_uint16_t) raw[0]) << 4)
1264 | (raw[1] >> 4);
1265 sequence->_tme_m68k_sequence_transfer_faulted_after = raw[1] & (TME_BIT(4) - 1);
1266 raw += sizeof(tme_uint16_t);
1267
1268 #ifdef _TME_M68K_VERIFY
1269 /* we used sizeof(_tme_m68k_sequence_uid) bytes for the sequence UID: */
1270 raw_used += sizeof(sequence->_tme_m68k_sequence_uid);
1271 if (raw_avail < raw_used) {
1272 return (0);
1273 }
1274 memcpy(&sequence->_tme_m68k_sequence_uid,
1275 raw,
1276 sizeof(sequence->_tme_m68k_sequence_uid));
1277 raw += sizeof(sequence->_tme_m68k_sequence_uid);
1278 #endif /* _TME_M68K_VERIFY */
1279
1280 /* initialize this to one: */
1281 sequence->_tme_m68k_sequence_transfer_next = 1;
1282
1283 /* done: */
1284 return (raw_used);
1285 }
1286
1287 /* this empties the instruction buffer into an exception frame: */
1288 unsigned int
tme_m68k_insn_buffer_empty(const struct tme_m68k * ic,tme_uint8_t * raw,unsigned int raw_avail)1289 tme_m68k_insn_buffer_empty(const struct tme_m68k *ic, tme_uint8_t *raw, unsigned int raw_avail)
1290 {
1291 unsigned int fetch_total;
1292
1293 /* get the total number of bytes in the instruction buffer: */
1294 fetch_total = ic->_tme_m68k_insn_fetch_slow_count_total;
1295
1296 /* save the total number of bytes fetched into the instruction
1297 buffer, the number of bytes in the instruction buffer fetched by
1298 the fast executor, and then the instruction buffer itself: */
1299 assert ((fetch_total % sizeof(tme_uint16_t)) == 0
1300 && fetch_total <= (TME_M68K_INSN_WORDS_MAX * sizeof(tme_uint16_t)));
1301 assert ((ic->_tme_m68k_insn_fetch_slow_count_fast % sizeof(tme_uint16_t)) == 0
1302 && ic->_tme_m68k_insn_fetch_slow_count_fast <= fetch_total);
1303 assert (raw_avail >= (sizeof(tme_uint8_t) + sizeof(tme_uint8_t) + fetch_total));
1304 raw[0] = fetch_total;
1305 raw[1] = ic->_tme_m68k_insn_fetch_slow_count_fast;
1306 memcpy(raw + 2,
1307 &ic->_tme_m68k_insn_fetch_buffer[0],
1308 fetch_total);
1309
1310 /* return the number of bytes we put in an exception frame: */
1311 return (sizeof(tme_uint8_t) + sizeof(tme_uint8_t) + fetch_total);
1312 }
1313
1314 /* this fills the instruction buffer from an exception frame: */
1315 unsigned int
tme_m68k_insn_buffer_fill(struct tme_m68k * ic,const tme_uint8_t * raw,unsigned int raw_avail)1316 tme_m68k_insn_buffer_fill(struct tme_m68k *ic, const tme_uint8_t *raw, unsigned int raw_avail)
1317 {
1318 unsigned int fetch_total;
1319 unsigned int fetch_fast;
1320
1321 /* there must be at least two bytes in the exception frame: */
1322 if (raw_avail >= (sizeof(tme_uint8_t) + sizeof(tme_uint8_t))) {
1323
1324 /* restore the total number of bytes fetched into the instruction
1325 buffer, and the number of bytes in the instruction buffer
1326 fetched by the fast executor: */
1327 fetch_total = raw[0];
1328 fetch_fast = raw[1];
1329 if ((fetch_total % sizeof(tme_uint16_t)) == 0
1330 && fetch_total <= (TME_M68K_INSN_WORDS_MAX * sizeof(tme_uint16_t))
1331 && (fetch_fast % sizeof(tme_uint16_t)) == 0
1332 && fetch_fast <= fetch_total
1333 && raw_avail >= (sizeof(tme_uint8_t) + sizeof(tme_uint8_t) + fetch_total)) {
1334
1335 /* restore the total number of bytes fetched into the instruction
1336 buffer, the number of bytes in the instruction buffer fetched by
1337 the fast executor, and then the instruction buffer itself: */
1338 ic->_tme_m68k_insn_fetch_slow_count_total = fetch_total;
1339 ic->_tme_m68k_insn_fetch_slow_count_fast = fetch_fast;
1340 memcpy(&ic->_tme_m68k_insn_fetch_buffer[0],
1341 raw + 2,
1342 fetch_total);
1343
1344 /* return the number of bytes restored from the exception frame: */
1345 return ((sizeof(tme_uint8_t) + sizeof(tme_uint8_t) + fetch_total));
1346 }
1347 }
1348
1349 /* this exception frame is invalid: */
1350 return (0);
1351 }
1352
1353 /* this unlocks data structures before a callout: */
1354 void
tme_m68k_callout_unlock(struct tme_m68k * ic)1355 tme_m68k_callout_unlock(struct tme_m68k *ic)
1356 {
1357 struct tme_m68k_tlb *tlb;
1358
1359 assert ((ic->_tme_m68k_mode == TME_M68K_MODE_EXECUTION)
1360 || (ic->_tme_m68k_insn_fetch_fast_itlb == NULL));
1361
1362 /* if we have a busy fast instruction TLB entry: */
1363 tlb = ic->_tme_m68k_insn_fetch_fast_itlb;
1364 if (tlb != NULL) {
1365
1366 /* unbusy the fast instruction TLB entry: */
1367 tme_m68k_tlb_unbusy(tlb);
1368 }
1369 }
1370
1371 /* this relocks data structures after a callout: */
1372 void
tme_m68k_callout_relock(struct tme_m68k * ic)1373 tme_m68k_callout_relock(struct tme_m68k *ic)
1374 {
1375 struct tme_m68k_tlb *tlb;
1376 tme_bus_context_t bus_context;
1377 struct tme_m68k_tlb *tlb_now;
1378
1379 assert ((ic->_tme_m68k_mode == TME_M68K_MODE_EXECUTION)
1380 || (ic->_tme_m68k_insn_fetch_fast_itlb == NULL));
1381
1382 /* if we have a busy fast instruction TLB entry: */
1383 tlb = ic->_tme_m68k_insn_fetch_fast_itlb;
1384 if (tlb != NULL) {
1385
1386 /* rebusy the fast instruction TLB entry: */
1387 tme_m68k_tlb_busy(tlb);
1388
1389 /* get the bus context: */
1390 bus_context = ic->_tme_m68k_bus_context;
1391
1392 /* get what should be our instruction TLB entry now: */
1393 tlb_now = &ic->_tme_m68k_itlb;
1394
1395 /* if this instruction TLB entry has changed, is for the wrong
1396 context, or is invalid: */
1397 if (__tme_predict_false(tlb_now != tlb
1398 || tlb->tme_m68k_tlb_bus_context != bus_context
1399 || tme_m68k_tlb_is_invalid(tlb))) {
1400
1401 /* poison ic->_tme_m68k_insn_fetch_fast_last so the fast
1402 instruction executor fetch macros will fail: */
1403 assert ((ic->_tme_m68k_insn_fetch_fast_next - 1) < ic->_tme_m68k_insn_fetch_fast_next);
1404 ic->_tme_m68k_insn_fetch_fast_last = ic->_tme_m68k_insn_fetch_fast_next - 1;
1405 }
1406 }
1407 }
1408
1409 /* this is the group 0 fault hook for the fast executor: */
1410 void
tme_m68k_group0_hook_fast(struct tme_m68k * ic)1411 tme_m68k_group0_hook_fast(struct tme_m68k *ic)
1412 {
1413 unsigned int fetch_fast;
1414
1415 /* get the number of bytes in the instruction buffer. they have all
1416 been fetched by the fast executor: */
1417 /* NB: it's possible for this to be zero: */
1418 fetch_fast = (ic->_tme_m68k_insn_fetch_fast_next - ic->_tme_m68k_insn_fetch_fast_start);
1419 assert ((fetch_fast % sizeof(tme_uint16_t)) == 0
1420 && fetch_fast <= (TME_M68K_INSN_WORDS_MAX * sizeof(tme_uint16_t)));
1421 ic->_tme_m68k_insn_fetch_slow_count_total = fetch_fast;
1422 ic->_tme_m68k_insn_fetch_slow_count_fast = fetch_fast;
1423 }
1424
1425 /* this starts a read/modify/write cycle: */
1426 int
tme_m68k_rmw_start(struct tme_m68k * ic,struct tme_m68k_rmw * rmw)1427 tme_m68k_rmw_start(struct tme_m68k *ic,
1428 struct tme_m68k_rmw *rmw)
1429 {
1430 tme_bus_context_t bus_context;
1431 struct tme_m68k_tlb *tlbs_all[3];
1432 int tlbs_busy[2];
1433 struct tme_m68k_tlb *tlb;
1434 struct tme_m68k_tlb *tlb_use;
1435 unsigned int tlb_i;
1436 unsigned int address_i;
1437 unsigned int address_i_fill;
1438 tme_uint32_t address;
1439 unsigned int address_cycles[2];
1440 unsigned int address_fills[2];
1441 tme_uint32_t *buffer_reg;
1442 int supported;
1443
1444 /* if the user reran the cycle: */
1445 if (TME_M68K_SEQUENCE_RESTARTING
1446 && (ic->_tme_m68k_group0_buffer_read_softrr > 0
1447 || ic->_tme_m68k_group0_buffer_write_softrr > 0)) {
1448
1449 /* return failure: */
1450 return (-1);
1451 }
1452
1453 /* we always rerun read/modify/write cycles in their entirety: */
1454 ic->_tme_m68k_sequence._tme_m68k_sequence_transfer_faulted
1455 = ic->_tme_m68k_sequence._tme_m68k_sequence_transfer_next - 1;
1456
1457 /* we only support tas and cas, which have one address, and cas2,
1458 which has two addresses: */
1459 assert (rmw->tme_m68k_rmw_address_count == 1
1460 || rmw->tme_m68k_rmw_address_count == 2);
1461
1462 /* get the context that we will use to index TLB entries for this
1463 instruction. NB that this may be different from the context in
1464 which the instruction eventually completes: */
1465 bus_context = ic->_tme_m68k_bus_context;
1466
1467 /* assume that we will only consider one TLB entry, for the first
1468 address: */
1469 tlbs_all[0] = TME_M68K_DTLB_ENTRY(ic,
1470 bus_context,
1471 ic->_tme_m68k_ea_function_code,
1472 rmw->tme_m68k_rmw_addresses[0]);
1473 tlbs_all[1] = NULL;
1474
1475 /* if there are two addresses: */
1476 if (rmw->tme_m68k_rmw_address_count == 2) {
1477
1478 /* we will consider another TLB entry for the second address: */
1479 tlbs_all[1] = TME_M68K_DTLB_ENTRY(ic,
1480 bus_context,
1481 ic->_tme_m68k_ea_function_code,
1482 rmw->tme_m68k_rmw_addresses[1]);
1483
1484 /* if the TLB entry for the second address collides with the TLB
1485 entry for the first address: */
1486 if (tlbs_all[1] == tlbs_all[0]) {
1487
1488 /* we will instead consider an alternate TLB entry for the
1489 second address: */
1490 tlbs_all[1] = TME_M68K_DTLB_ENTRY(ic,
1491 bus_context,
1492 ic->_tme_m68k_ea_function_code,
1493 (rmw->tme_m68k_rmw_addresses[1]
1494 + TME_M68K_TLB_ADDRESS_BIAS(1)));
1495 assert (tlbs_all[1] != tlbs_all[0]);
1496 }
1497 }
1498
1499 /* make sure that the list of TLB entries to consider is terminated: */
1500 tlbs_all[2] = NULL;
1501
1502 /* none of the TLB entries to consider are busy: */
1503 tlbs_busy[0] = FALSE;
1504 tlbs_busy[1] = FALSE;
1505
1506 /* the addresses aren't using any TLB entries yet: */
1507 rmw->tme_m68k_rmw_tlbs[0] = NULL;
1508 rmw->tme_m68k_rmw_tlbs[1] = NULL;
1509
1510 /* we haven't done any slow reads for any addresses yet: */
1511 rmw->tme_m68k_rmw_slow_reads[0] = FALSE;
1512 rmw->tme_m68k_rmw_slow_reads[1] = FALSE;
1513
1514 /* whenever we need to find a TLB entry to use for an address, we
1515 always prefer one that allows both reading and writing, because
1516 we hope that such a TLB entry allows both fast reading and fast
1517 writing.
1518
1519 if we can't find such a TLB entry initially, we try to fill a TLB
1520 entry for writing (you can't fill a TLB entry for both reading
1521 and writing), in the hopes that this gives us a TLB entry that
1522 allows both fast reading and fast writing. filling for writing
1523 is important with some virtual memory hardware, and may actually
1524 be required to enable writing.
1525
1526 if this fill gives us a TLB entry that doesn't allow both fast
1527 reading and fast writing, it actually might not allow reading at
1528 all. to check for this, we then try to fill a TLB entry for
1529 reading.
1530
1531 if we still don't have a TLB entry that allows both fast reading
1532 and fast writing, we must at least have a TLB entry that allows
1533 slow reading. at this point we do a slow read to start a locked
1534 read-modify-write cycle (unless this is a cas2, in which case we
1535 do a normal slow read).
1536
1537 we always want to return to the caller with a TLB entry that
1538 allows writing, so after we do a slow read we do one more TLB
1539 fill for writing.
1540
1541 the first TLB fill we do for an address will be for writing, so
1542 that is how we initialize an address' address_cycles mask: */
1543 address_cycles[0] = TME_BUS_CYCLE_WRITE;
1544 address_cycles[1] = TME_BUS_CYCLE_WRITE;
1545
1546 /* we haven't filled TLBs for any addresses yet: */
1547 address_fills[0] = 0;
1548 address_fills[1] = 0;
1549
1550 /* assume that we can support this instruction on the given memory: */
1551 supported = TRUE;
1552
1553 /* loop forever: */
1554 for (;;) {
1555
1556 /* assume that no address needs a TLB fill: */
1557 address_i_fill = rmw->tme_m68k_rmw_address_count;
1558
1559 /* get the bus context for this iteration: */
1560 bus_context = ic->_tme_m68k_bus_context;
1561
1562 /* walk the addresses: */
1563 address_i = 0;
1564 do {
1565
1566 /* get this address: */
1567 address = rmw->tme_m68k_rmw_addresses[address_i];
1568
1569 /* this address isn't using a TLB entry yet: */
1570 tlb_use = NULL;
1571
1572 /* walk the TLB entries we are considering: */
1573 for (tlb_i = 0;
1574 (tlb = tlbs_all[tlb_i]) != NULL;
1575 tlb_i++) {
1576
1577 /* if this TLB entry isn't busy, busy it: */
1578 if (!tlbs_busy[tlb_i]) {
1579 tme_m68k_tlb_busy(tlb);
1580 tlbs_busy[tlb_i] = TRUE;
1581 }
1582
1583 /* if this TLB entry is valid, applies to this context, function code
1584 and address, and allows at least the desired cycle(s), and
1585 either this address isn't already using a TLB entry, or the
1586 TLB entry it's using doesn't cover the entire operand, or
1587 this TLB entry allows more cycles or allows both fast
1588 reading and fast writing: */
1589 if (tme_m68k_tlb_is_valid(tlb)
1590 && tlb->tme_m68k_tlb_bus_context == bus_context
1591 && (tlb->tme_m68k_tlb_function_codes_mask
1592 & TME_BIT(ic->_tme_m68k_ea_function_code)) != 0
1593 && address >= (tme_bus_addr32_t) tlb->tme_m68k_tlb_linear_first
1594 && address <= (tme_bus_addr32_t) tlb->tme_m68k_tlb_linear_last
1595 && (tlb->tme_m68k_tlb_cycles_ok
1596 & address_cycles[address_i]) != 0
1597 && (tlb_use == NULL
1598 || (((tme_bus_addr32_t) tlb_use->tme_m68k_tlb_linear_last) - address) < rmw->tme_m68k_rmw_size
1599 || tlb->tme_m68k_tlb_cycles_ok > tlb_use->tme_m68k_tlb_cycles_ok
1600 || (tlb->tme_m68k_tlb_emulator_off_read != TME_EMULATOR_OFF_UNDEF
1601 && tlb->tme_m68k_tlb_emulator_off_write != TME_EMULATOR_OFF_UNDEF))) {
1602
1603 /* update the TLB entry this address is using: */
1604 tlb_use = tlb;
1605 }
1606 }
1607
1608 /* set the TLB entry being used by this address: */
1609 rmw->tme_m68k_rmw_tlbs[address_i] = tlb_use;
1610
1611 /* if this address is not using any TLB entry: */
1612 if (tlb_use == NULL) {
1613
1614 /* we need to fill a TLB entry for this address: */
1615 address_i_fill = address_i;
1616 }
1617
1618 } while (++address_i < rmw->tme_m68k_rmw_address_count);
1619
1620 /* if we need to fill a TLB entry for an address: */
1621 address_i = address_i_fill;
1622 if (address_i < rmw->tme_m68k_rmw_address_count) {
1623
1624 /* get this address: */
1625 address = rmw->tme_m68k_rmw_addresses[address_i];
1626
1627 /* get an unused TLB entry to fill: */
1628 tlb_i = 0;
1629 tlb = tlbs_all[0];
1630 if (tlb == rmw->tme_m68k_rmw_tlbs[!address_i]) {
1631 tlb_i = 1;
1632 tlb = tlbs_all[1];
1633 }
1634 assert (tlb != NULL
1635 && tlb != rmw->tme_m68k_rmw_tlbs[!address_i]);
1636
1637 /* NB: cas2 can need two TLB entries. we may find one good TLB
1638 entry for one address, but need to call out to fill a TLB for
1639 the second address, and unfortunately we have to unbusy the
1640 good one while we're doing the fill. while the good one is
1641 unbusy, it can be invalidated, and we'll have to fill it
1642 again, unbusying the good one we just filled, possibly
1643 leading to a vicious cycle.
1644
1645 it's also possible that the TLB entry we fill here could be
1646 invalidated after it's been filled and before we've busied it
1647 again. this is also the case for the single-TLB operations:
1648 normal memory reads and writes, and tas and cas, and to
1649 handle that we simply loop around the fill. since these
1650 operations only use a single TLB entry, we assume that there
1651 won't be a vicious cycle - that eventually a single filled
1652 TLB entry will stay valid until we can busy it and use it.
1653
1654 but we can't really guarantee this for two TLB entries.
1655 there's not much we can do about this, except put a limit on
1656 the number of times we will fill for each address. this
1657 limit is somewhat arbitrary: */
1658 /* XXX FIXME - this should be a macro, or a per-m68k argument: */
1659 if (rmw->tme_m68k_rmw_address_count == 2
1660 && address_fills[address_i]++ >= 20) {
1661
1662 /* we can't support this instruction on this memory: */
1663 supported = FALSE;
1664 break;
1665 }
1666
1667 /* if the other TLB entry is busy, unbusy it: */
1668 if (tlbs_busy[!tlb_i]) {
1669 tme_m68k_tlb_unbusy(tlbs_all[tlb_i]);
1670 tlbs_busy[!tlb_i] = FALSE;
1671 }
1672
1673 /* fill this TLB entry: */
1674 tme_m68k_tlb_fill(ic,
1675 tlb,
1676 ic->_tme_m68k_ea_function_code,
1677 address,
1678 address_cycles[address_i]);
1679
1680 /* restart: */
1681 continue;
1682 }
1683
1684 /* walk the addresses: */
1685 address_i = 0;
1686 do {
1687
1688 /* get this address and its TLB entry: */
1689 address = rmw->tme_m68k_rmw_addresses[address_i];
1690 tlb = rmw->tme_m68k_rmw_tlbs[address_i];
1691
1692 /* if this TLB entry doesn't cover the entire operand: */
1693 if ((((tme_bus_addr32_t) tlb->tme_m68k_tlb_linear_last) - address) < rmw->tme_m68k_rmw_size) {
1694
1695 /* we can't support this instruction on this memory, because
1696 we can't split an atomic operation across TLB entries. on
1697 a real m68k, the CPU can do repeated bus cycles under one
1698 bus lock: */
1699 supported = FALSE;
1700 break;
1701 }
1702
1703 /* if this TLB entry supports both fast reading and fast
1704 writing: */
1705 if (tlb->tme_m68k_tlb_emulator_off_read != TME_EMULATOR_OFF_UNDEF
1706 && tlb->tme_m68k_tlb_emulator_off_write != TME_EMULATOR_OFF_UNDEF) {
1707
1708 /* if fast reading and fast writing aren't to the same memory: */
1709 if (tlb->tme_m68k_tlb_emulator_off_read
1710 != tlb->tme_m68k_tlb_emulator_off_write) {
1711
1712 /* we can't support this instruction on this memory, because
1713 we can't split an atomic operation across two memories.
1714 on a real m68k, the CPU can do repeated bus cycles under
1715 one bus lock: */
1716 supported = FALSE;
1717 break;
1718 }
1719 }
1720
1721 /* otherwise, this TLB entry does not support both fast reading
1722 and fast writing: */
1723
1724 /* if we have already done a slow read for this address: */
1725 else if (rmw->tme_m68k_rmw_slow_reads[address_i]) {
1726
1727 /* this TLB entry must support writing: */
1728 assert (tlb->tme_m68k_tlb_cycles_ok & TME_BUS_CYCLE_WRITE);
1729
1730 /* nothing to do: */
1731 }
1732
1733 /* otherwise, we have not already done a slow read for this
1734 address: */
1735
1736 /* if this TLB entry doesn't support slow reading: */
1737 else if ((tlb->tme_m68k_tlb_cycles_ok & TME_BUS_CYCLE_READ) == 0) {
1738
1739 /* we must fill a TLB entry for reading: */
1740 assert (address_cycles[address_i] == TME_BUS_CYCLE_WRITE);
1741 address_cycles[address_i] = TME_BUS_CYCLE_READ;
1742
1743 /* restart: */
1744 break;
1745 }
1746
1747 /* otherwise, this TLB entry does support slow reading: */
1748 else {
1749
1750 /* if the other TLB entry is busy, unbusy it: */
1751 tlb_i = (tlb == tlbs_all[1]);
1752 if (tlbs_busy[!tlb_i]) {
1753 tme_m68k_tlb_unbusy(tlbs_all[tlb_i]);
1754 tlbs_busy[!tlb_i] = FALSE;
1755 }
1756
1757 /* this instruction can fault: */
1758 TME_M68K_INSN_CANFAULT;
1759
1760 /* do a slow read. if this is the first address, we start a
1761 slow read-modify-write cycle, otherwise we do a normal slow
1762 read cycle: */
1763 assert (rmw->tme_m68k_rmw_size <= sizeof(ic->tme_m68k_ireg_memx32));
1764 tme_m68k_read(ic,
1765 tlb,
1766 &ic->_tme_m68k_ea_function_code,
1767 &rmw->tme_m68k_rmw_addresses[address_i],
1768 (((tme_uint8_t *)
1769 (address_i == 0
1770 ? &ic->tme_m68k_ireg_memx32
1771 : &ic->tme_m68k_ireg_memy32))
1772 + (TME_ENDIAN_NATIVE == TME_ENDIAN_BIG
1773 ? (sizeof(ic->tme_m68k_ireg_memx32)
1774 - rmw->tme_m68k_rmw_size)
1775 : 0)),
1776 rmw->tme_m68k_rmw_size,
1777 (address_i == 0
1778 ? TME_M68K_BUS_CYCLE_RMW
1779 : TME_M68K_BUS_CYCLE_NORMAL));
1780
1781 /* we have done a slow read for this address: */
1782 rmw->tme_m68k_rmw_slow_reads[address_i] = TRUE;
1783
1784 /* now we need a TLB entry for this address that supports writing: */
1785 address_cycles[address_i] = TME_BUS_CYCLE_WRITE;
1786
1787 /* restart: */
1788 break;
1789 }
1790
1791 } while (++address_i < rmw->tme_m68k_rmw_address_count);
1792
1793 /* if this instruction is not supported or we've handled all
1794 addresses, stop now: */
1795 if (!supported
1796 || address_i >= rmw->tme_m68k_rmw_address_count) {
1797 break;
1798 }
1799 }
1800
1801 /* unbusy any TLB entries that aren't being used: */
1802 if (tlbs_busy[0]
1803 && (!supported
1804 || (tlbs_all[0] != rmw->tme_m68k_rmw_tlbs[0]
1805 && tlbs_all[0] != rmw->tme_m68k_rmw_tlbs[1]))) {
1806 tme_m68k_tlb_unbusy(tlbs_all[0]);
1807 }
1808 if (tlbs_busy[1]
1809 && (!supported
1810 || (tlbs_all[1] != rmw->tme_m68k_rmw_tlbs[0]
1811 && tlbs_all[1] != rmw->tme_m68k_rmw_tlbs[1]))) {
1812 tme_m68k_tlb_unbusy(tlbs_all[1]);
1813 }
1814
1815 /* if this instruction is not supported on this memory: */
1816 if (!supported) {
1817
1818 /* cause an illegal instruction exception: */
1819 TME_M68K_INSN_EXCEPTION(TME_M68K_EXCEPTION_ILL);
1820 }
1821
1822 /* if this is the cas2 instruction: */
1823 if (rmw->tme_m68k_rmw_address_count == 2) {
1824
1825 /* cas2 is a difficult instruction to emulate, since it accesses
1826 two different addresses during one atomic read-modify-write
1827 cycle.
1828
1829 most host CPUs can't do this, so when threads are not
1830 cooperative, we're forced to suspend all other threads when
1831 running a cas2 instruction: */
1832 if (!TME_THREADS_COOPERATIVE) {
1833 tme_thread_suspend_others();
1834 }
1835
1836 /* the cas2 functions also assume that we have read all operands
1837 into the memory buffers, which means we have to fast-read any
1838 addresses that we haven't already slow-read: */
1839 address_i = 0;
1840 do {
1841
1842 /* skip this address if we really did slow read it: */
1843 if (rmw->tme_m68k_rmw_slow_reads[address_i]) {
1844 continue;
1845 }
1846
1847 /* get this address and its TLB entry: */
1848 address = rmw->tme_m68k_rmw_addresses[address_i];
1849 tlb = rmw->tme_m68k_rmw_tlbs[address_i];
1850
1851 /* this TLB entry must support fast reading and fast writing: */
1852 assert (tlb->tme_m68k_tlb_emulator_off_read != TME_EMULATOR_OFF_UNDEF
1853 && tlb->tme_m68k_tlb_emulator_off_write == tlb->tme_m68k_tlb_emulator_off_read);
1854
1855 /* do the fast read. all other threads are suspended here, so
1856 we can do a memcpy instead of an atomic read: */
1857 assert (rmw->tme_m68k_rmw_size <= sizeof(ic->tme_m68k_ireg_memx32));
1858 buffer_reg
1859 = (address_i == 0
1860 ? &ic->tme_m68k_ireg_memx32
1861 : &ic->tme_m68k_ireg_memy32);
1862 memcpy((((tme_uint8_t *) buffer_reg)
1863 + (sizeof(ic->tme_m68k_ireg_memx32)
1864 - rmw->tme_m68k_rmw_size)),
1865 (((tme_uint8_t *)
1866 tlb->tme_m68k_tlb_emulator_off_read)
1867 + address),
1868 rmw->tme_m68k_rmw_size);
1869
1870 /* byteswap the value read: */
1871 *buffer_reg = tme_betoh_u32(*buffer_reg);
1872
1873 } while (++address_i < rmw->tme_m68k_rmw_address_count);
1874 }
1875
1876 /* return success: */
1877 return (0);
1878 }
1879
1880 /* this finishes a read/modify/write cycle: */
1881 void
tme_m68k_rmw_finish(struct tme_m68k * ic,struct tme_m68k_rmw * rmw,int do_write)1882 tme_m68k_rmw_finish(struct tme_m68k *ic,
1883 struct tme_m68k_rmw *rmw,
1884 int do_write)
1885 {
1886 struct tme_m68k_tlb *tlbs_all[2];
1887 int tlbs_busy[2];
1888 struct tme_m68k_tlb *tlb;
1889 unsigned int tlb_i;
1890 unsigned int address_i;
1891 tme_uint32_t address;
1892 int supported;
1893 tme_uint32_t *buffer_reg;
1894
1895 /* recover the tlbs_all[] array and tlbs_busy[] information: */
1896 tlbs_all[0] = rmw->tme_m68k_rmw_tlbs[0];
1897 tlbs_busy[0] = TRUE;
1898 if (rmw->tme_m68k_rmw_tlbs[1] != NULL
1899 && rmw->tme_m68k_rmw_tlbs[1] != rmw->tme_m68k_rmw_tlbs[0]) {
1900 tlbs_all[1] = rmw->tme_m68k_rmw_tlbs[1];
1901 tlbs_busy[1] = TRUE;
1902 }
1903 else {
1904 tlbs_all[1] = NULL;
1905 tlbs_busy[1] = FALSE;
1906 }
1907
1908 /* assume that this instruction is supported: */
1909 supported = TRUE;
1910
1911 /* loop over the addresses: */
1912 address_i = 0;
1913 do {
1914
1915 /* get this address and TLB entry: */
1916 address = rmw->tme_m68k_rmw_addresses[address_i];
1917 tlb = rmw->tme_m68k_rmw_tlbs[address_i];
1918
1919 /* get the buffer for this address: */
1920 buffer_reg
1921 = (address_i == 0
1922 ? &ic->tme_m68k_ireg_memx32
1923 : &ic->tme_m68k_ireg_memy32);
1924
1925 /* if we did a slow read for this operand: */
1926 if (rmw->tme_m68k_rmw_slow_reads[address_i]) {
1927
1928 /* if the other TLB entry is busy, unbusy it: */
1929 tlb_i = (tlb == tlbs_all[1]);
1930 if (tlbs_busy[!tlb_i]) {
1931 tme_m68k_tlb_unbusy(tlbs_all[tlb_i]);
1932 tlbs_busy[!tlb_i] = FALSE;
1933 }
1934
1935 /* do the slow write for this operand: */
1936 assert (rmw->tme_m68k_rmw_size <= sizeof(ic->tme_m68k_ireg_memx32));
1937 tme_m68k_write(ic,
1938 tlb,
1939 &ic->_tme_m68k_ea_function_code,
1940 &rmw->tme_m68k_rmw_addresses[address_i],
1941 (((tme_uint8_t *) buffer_reg)
1942 + (TME_ENDIAN_NATIVE == TME_ENDIAN_BIG
1943 ? (sizeof(ic->tme_m68k_ireg_memx32)
1944 - rmw->tme_m68k_rmw_size)
1945 : 0)),
1946 rmw->tme_m68k_rmw_size,
1947 (address_i == 0
1948 ? TME_M68K_BUS_CYCLE_RMW
1949 : TME_M68K_BUS_CYCLE_NORMAL));
1950
1951 /* if this is the cas2 instruction: */
1952 if (rmw->tme_m68k_rmw_address_count == 2) {
1953
1954 /* if a cas2 slow write doesn't fault, it just did a slow
1955 write to device memory, which is actually bad because we
1956 can't do an atomic cas2 involving any device memory at all
1957 (we can't do the dual reads and dual writes all atomically).
1958
1959 we tried to do the slow write anyways hoping that the slow
1960 write was really to write-protected memory that would
1961 fault, and when we would restart this address would point
1962 to fast-writable memory.
1963
1964 unfortunately, we can't undo the slow write. we do cause
1965 an illegal instruction exception, to make this problem
1966 visible: */
1967 supported = FALSE;
1968 break;
1969 }
1970 }
1971
1972 /* otherwise, if this is the cas2 instruction, and we're writing: */
1973 else if (rmw->tme_m68k_rmw_address_count == 2
1974 && do_write) {
1975
1976 /* this TLB entry must support fast reading and fast writing: */
1977 assert (tlb->tme_m68k_tlb_emulator_off_read != TME_EMULATOR_OFF_UNDEF
1978 && tlb->tme_m68k_tlb_emulator_off_write == tlb->tme_m68k_tlb_emulator_off_read);
1979
1980 /* byteswap the value to write: */
1981 *buffer_reg = tme_htobe_u32(*buffer_reg);
1982
1983 /* do the fast write. all other threads are suspended here, so
1984 we can do a memcpy instead of an atomic write: */
1985 assert (rmw->tme_m68k_rmw_size <= sizeof(ic->tme_m68k_ireg_memx32));
1986 memcpy((((tme_uint8_t *)
1987 tlb->tme_m68k_tlb_emulator_off_read)
1988 + address),
1989 (((tme_uint8_t *) buffer_reg)
1990 + (sizeof(ic->tme_m68k_ireg_memx32)
1991 - rmw->tme_m68k_rmw_size)),
1992 rmw->tme_m68k_rmw_size);
1993 }
1994
1995 } while (++address_i < rmw->tme_m68k_rmw_address_count);
1996
1997 /* unbusy all TLB entries: */
1998 if (tlbs_busy[0]) {
1999 tme_m68k_tlb_unbusy(tlbs_all[0]);
2000 }
2001 if (tlbs_busy[1]) {
2002 tme_m68k_tlb_unbusy(tlbs_all[1]);
2003 }
2004
2005 /* cas2 is a difficult instruction to emulate, since it accesses two
2006 different addresses during one atomic read-modify-write cycle.
2007 most host CPUs can't do this, so when threads are not
2008 cooperative, we're forced to suspend all other threads when
2009 running a cas2 instruction: */
2010 if (!TME_THREADS_COOPERATIVE
2011 && rmw->tme_m68k_rmw_address_count > 1) {
2012 tme_thread_resume_others();
2013 }
2014
2015 /* if this instruction is not supported on this memory: */
2016 if (!supported) {
2017
2018 /* cause an illegal instruction exception: */
2019 TME_M68K_INSN_EXCEPTION(TME_M68K_EXCEPTION_ILL);
2020 }
2021 }
2022
2023 /* this handles a bitfield offset. if the bitfield is in memory,
2024 and it hasn't already been done, this adjusts the effective
2025 address to point to the beginning of the bitfield. this always
2026 returns a nonnegative bitfield offset: */
2027 unsigned int
tme_m68k_bitfield_offset(struct tme_m68k * ic,int adjust)2028 tme_m68k_bitfield_offset(struct tme_m68k *ic, int adjust)
2029 {
2030 tme_int16_t specop;
2031 tme_int32_t bf_offset;
2032 tme_int32_t bf_ea_offset;
2033
2034 /* get the bitfield offset from a data register or as an immediate: */
2035 specop = ic->_tme_m68k_insn_specop;
2036 bf_offset = ((specop & TME_BIT(11))
2037 ? ic->tme_m68k_ireg_int32(TME_M68K_IREG_D0 + TME_FIELD_EXTRACTU(specop, 6, 3))
2038 : (tme_int32_t) TME_FIELD_EXTRACTU(specop, 6, 5));
2039
2040 /* if this bitfield is in a register (EA mode field is zero): */
2041 if (TME_FIELD_EXTRACTU(ic->_tme_m68k_insn_opcode, 3, 3) == 0) {
2042
2043 /* adjust the bitfield offset to be nonnegative: */
2044 bf_offset &= 31;
2045 }
2046
2047 /* otherwise, this bitfield is in memory: */
2048 else {
2049
2050 /* calculate the effective address offset and adjust the bitfield
2051 offset to be nonnegative: */
2052 bf_ea_offset = ((bf_offset < 0
2053 ? (bf_offset - 7)
2054 : bf_offset)
2055 / 8);
2056 bf_offset &= 7;
2057
2058 /* if this is our first call to this function for this instruction
2059 and we're not restarting, adjust the effective address: */
2060 if (adjust
2061 && !TME_M68K_SEQUENCE_RESTARTING) {
2062 ic->_tme_m68k_ea_address += bf_ea_offset;
2063 }
2064 }
2065
2066 /* return the nonnegative bitfield offset: */
2067 return ((unsigned int) bf_offset);
2068 }
2069
2070 /* this returns a bitfield width: */
2071 unsigned int
tme_m68k_bitfield_width(struct tme_m68k * ic)2072 tme_m68k_bitfield_width(struct tme_m68k *ic)
2073 {
2074 unsigned int bf_width;
2075 tme_int16_t specop;
2076
2077 /* get the bitfield width from a register or as an immediate: */
2078 specop = ic->_tme_m68k_insn_specop;
2079 if (specop & TME_BIT(5)) {
2080 bf_width = ic->tme_m68k_ireg_uint32(TME_M68K_IREG_D0 + TME_FIELD_EXTRACTU(specop, 0, 3));
2081 bf_width &= 31;
2082 }
2083 else {
2084 bf_width = TME_FIELD_EXTRACTU(specop, 0, 5);
2085 }
2086 if (bf_width == 0) bf_width = 32;
2087 return (bf_width);
2088 }
2089
2090 /* this reads a bitfield: */
2091 tme_uint32_t
_tme_m68k_bitfield_read(struct tme_m68k * ic,int is_signed)2092 _tme_m68k_bitfield_read(struct tme_m68k *ic, int is_signed)
2093 {
2094 unsigned int bf_offset, bf_width;
2095 unsigned int shift;
2096 tme_uint8_t *bf_bytes;
2097 tme_uint32_t bf_value;
2098 int ireg;
2099
2100 /* get the bitfield offset and width: */
2101 bf_offset = tme_m68k_bitfield_offset(ic, TRUE);
2102 bf_width = tme_m68k_bitfield_width(ic);
2103
2104 /* if this expression is > 32, in a register this means the bitfield
2105 wraps, and in memory this means the bitfield covers 5 bytes: */
2106 shift = (bf_offset + bf_width);
2107
2108 /* if this bitfield is in a register (EA mode field is zero): */
2109 if (TME_FIELD_EXTRACTU(ic->_tme_m68k_insn_opcode, 3, 3) == 0) {
2110 ireg = (TME_M68K_IREG_D0
2111 + TME_FIELD_EXTRACTU(ic->_tme_m68k_insn_opcode, 0, 3));
2112
2113 /* get the raw 32-bit word containing the bitfield: */
2114 bf_value = ic->tme_m68k_ireg_uint32(ireg);
2115
2116 /* if this bitfield wraps the register, shift in the wrapped part
2117 on the right: */
2118 if (shift > 32) {
2119 shift -= 32;
2120 bf_value = (bf_value << shift) | (bf_value >> (32 - shift));
2121 bf_offset -= shift;
2122 }
2123 }
2124
2125 /* otherwise, this bitfield is in memory: */
2126 else {
2127
2128 /* this instruction can fault: */
2129 ic->_tme_m68k_mode_flags |= TME_M68K_EXECUTION_INST_CANFAULT;
2130
2131 /* read in the bytes covering the bitfield: */
2132 bf_bytes = (tme_uint8_t *) &ic->tme_m68k_ireg_memx32;
2133 tme_m68k_read_mem(ic, bf_bytes, (bf_offset + bf_width + 7) / 8);
2134
2135 /* get the raw 32-bit word containing the bitfield: */
2136 bf_value = tme_betoh_u32(ic->tme_m68k_ireg_memx32);
2137
2138 /* if this bitfield covers 5 bytes, shift in the part from the fifth byte
2139 (actually in memy32!) on the right: */
2140 if (shift > 32) {
2141 shift -= 32;
2142 bf_value = (bf_value << shift) | (bf_bytes[4] >> (8 - shift));
2143 bf_offset -= shift;
2144 }
2145 }
2146
2147 /* shift the value: */
2148 shift = (32 - (bf_offset + bf_width));
2149 bf_value >>= shift;
2150
2151 /* mask the value: */
2152 bf_value &= (0xffffffffUL >> (32 - bf_width));
2153
2154 /* if this is a signed value, sign-extend it: */
2155 if (is_signed
2156 && (bf_value & TME_BIT(bf_width - 1))) {
2157 bf_value |= (0xffffffffUL << (bf_width - 1));
2158 }
2159
2160 /* all bitfield instructions that read the bitfield set the flags: */
2161 if (!TME_M68K_SEQUENCE_RESTARTING) {
2162 ic->tme_m68k_ireg_ccr = ((ic->tme_m68k_ireg_ccr & TME_M68K_FLAG_X)
2163 | ((bf_value & TME_BIT(bf_width - 1))
2164 ? TME_M68K_FLAG_N
2165 : 0)
2166 | (bf_value
2167 ? 0
2168 : TME_M68K_FLAG_Z));
2169 }
2170
2171 /* return the bitfield value: */
2172 return (bf_value);
2173 }
2174
2175 /* this writes a bitfield to memory: */
2176 void
tme_m68k_bitfield_write_unsigned(struct tme_m68k * ic,tme_uint32_t bf_value,int set_flags)2177 tme_m68k_bitfield_write_unsigned(struct tme_m68k *ic, tme_uint32_t bf_value, int set_flags)
2178 {
2179 unsigned int bf_offset, bf_width;
2180 unsigned int shift;
2181 tme_uint8_t *bf_bytes;
2182 unsigned int count;
2183 int ireg;
2184
2185 /* for bitfields in memory, we want to know if the memory covering
2186 the bitfield is already in our memory buffer, so we can avoid
2187 reading that memory again. all bitfield instructions set flags
2188 based on a bitfield value; if set_flags is FALSE our caller
2189 must have tested the old bitfield value, and so the bitfield
2190 memory must be in our buffer, otherwise assume that this is our
2191 first access to the bitfield memory: */
2192 #define first_memory set_flags
2193
2194 /* get the bitfield offset and width: */
2195 bf_offset = tme_m68k_bitfield_offset(ic, first_memory);
2196 bf_width = tme_m68k_bitfield_width(ic);
2197
2198 /* if this expression is > 32, in a register this means the bitfield
2199 wraps, and in memory this means the bitfield covers 5 bytes: */
2200 shift = (bf_offset + bf_width);
2201
2202 /* mask the value: */
2203 bf_value &= (0xffffffffUL >> (32 - bf_width));
2204
2205 /* if we're supposed to, set the flags: */
2206 if (set_flags
2207 && !TME_M68K_SEQUENCE_RESTARTING) {
2208 ic->tme_m68k_ireg_ccr = ((ic->tme_m68k_ireg_ccr & TME_M68K_FLAG_X)
2209 | ((bf_value & TME_BIT(bf_width - 1))
2210 ? TME_M68K_FLAG_N
2211 : 0)
2212 | (bf_value
2213 ? 0
2214 : TME_M68K_FLAG_Z));
2215 }
2216
2217 /* if this bitfield is in a register (EA mode field is zero): */
2218 if (TME_FIELD_EXTRACTU(ic->_tme_m68k_insn_opcode, 3, 3) == 0) {
2219 ireg = (TME_M68K_IREG_D0
2220 + TME_FIELD_EXTRACTU(ic->_tme_m68k_insn_opcode, 0, 3));
2221
2222 /* if this bitfield wraps the register, put the wrapped
2223 part in the left: */
2224 if (shift > 32) {
2225 shift -= 32;
2226 ic->tme_m68k_ireg_uint32(ireg) = ((ic->tme_m68k_ireg_uint32(ireg)
2227 & (0xffffffffUL >> shift))
2228 | (bf_value << (32 - shift)));
2229 bf_value >>= shift;
2230 bf_width -= shift;
2231 }
2232
2233 /* update the register: */
2234 shift = (32 - (bf_offset + bf_width));
2235 ic->tme_m68k_ireg_uint32(ireg) = ((ic->tme_m68k_ireg_uint32(ireg)
2236 & ~((0xffffffffUL >> (32 - bf_width)) << shift))
2237 | (bf_value << shift));
2238 }
2239
2240 /* otherwise, this bitfield is in memory: */
2241 else {
2242
2243 /* this instruction can fault: */
2244 ic->_tme_m68k_mode_flags |= TME_M68K_EXECUTION_INST_CANFAULT;
2245
2246 /* read in the bytes covering the bitfield if we haven't yet: */
2247 bf_bytes = (tme_uint8_t *) &ic->tme_m68k_ireg_memx32;
2248 count = (bf_offset + bf_width + 7) / 8;
2249 if (first_memory) {
2250 tme_m68k_read_mem(ic, bf_bytes, count);
2251 }
2252
2253 /* if this bitfield covers 5 bytes, put the part for the fifth
2254 byte (actually in memy32!) in on the left: */
2255 if (shift > 32) {
2256 shift -= 32;
2257 if (!TME_M68K_SEQUENCE_RESTARTING) {
2258 bf_bytes[4] = ((bf_bytes[4]
2259 & (0xff >> shift))
2260 | ((bf_value & 0xff) << (8 - shift)));
2261 }
2262 bf_value >>= shift;
2263 bf_width -= shift;
2264 }
2265
2266 /* update the memory buffer: */
2267 if (!TME_M68K_SEQUENCE_RESTARTING) {
2268 shift = (32 - (bf_offset + bf_width));
2269 ic->tme_m68k_ireg_memx32 =
2270 tme_htobe_u32((tme_betoh_u32(ic->tme_m68k_ireg_memx32)
2271 & ~((0xffffffffUL >> (32 - bf_width)) << shift))
2272 | (bf_value << shift));
2273 }
2274
2275 /* write out the bytes covering bitfield to memory: */
2276 tme_m68k_write_mem(ic, bf_bytes, count);
2277 }
2278 #undef first_memory
2279 }
2280
2281 /* our global verify hook function: */
2282 #undef tme_m68k_verify_hook
2283 void
tme_m68k_verify_hook(void)2284 tme_m68k_verify_hook(void)
2285 {
2286 }
2287
2288 #if 1
2289 #include <stdio.h>
2290
2291 /* this dumps out the m68k state: */
2292 void
tme_m68k_dump(struct tme_m68k * ic)2293 tme_m68k_dump(struct tme_m68k *ic)
2294 {
2295 int ireg;
2296 int count;
2297
2298 /* dump out the integer registers: */
2299 count = 0;
2300 for (ireg = TME_M68K_IREG_D0;
2301 ireg <= TME_M68K_IREG_A7;
2302 ireg++) {
2303 fprintf(stderr,
2304 "%%%c%d[%p] = 0x%08x",
2305 (ireg < TME_M68K_IREG_A0
2306 ? 'd'
2307 : 'a'),
2308 ireg - (ireg < TME_M68K_IREG_A0
2309 ? TME_M68K_IREG_D0
2310 : TME_M68K_IREG_A0),
2311 &ic->tme_m68k_ireg_uint32(ireg),
2312 ic->tme_m68k_ireg_uint32(ireg));
2313 if (++count == 2) {
2314 fprintf(stderr, "\n");
2315 count = 0;
2316 }
2317 else {
2318 fprintf(stderr, " ");
2319 }
2320 }
2321
2322 /* dump out the PC and next PC: */
2323 fprintf(stderr, "%%pc = 0x%08x %%pc_next = 0x%08x\n",
2324 ic->tme_m68k_ireg_pc,
2325 ic->tme_m68k_ireg_pc_next);
2326
2327 /* dump out the status register: */
2328 fprintf(stderr, "%%sr = 0x%04x", ic->tme_m68k_ireg_sr);
2329 fprintf(stderr, " flags:");
2330 if (ic->tme_m68k_ireg_ccr & TME_M68K_FLAG_X) {
2331 fprintf(stderr, " X");
2332 }
2333 if (ic->tme_m68k_ireg_ccr & TME_M68K_FLAG_N) {
2334 fprintf(stderr, " N");
2335 }
2336 if (ic->tme_m68k_ireg_ccr & TME_M68K_FLAG_Z) {
2337 fprintf(stderr, " Z");
2338 }
2339 if (ic->tme_m68k_ireg_ccr & TME_M68K_FLAG_V) {
2340 fprintf(stderr, " V");
2341 }
2342 if (ic->tme_m68k_ireg_ccr & TME_M68K_FLAG_C) {
2343 fprintf(stderr, " C");
2344 }
2345 fprintf(stderr, "\n");
2346
2347 /* dump out the effective address and memory buffers: */
2348 fprintf(stderr, "\n");
2349 fprintf(stderr, "EA = %d:0x%08x\n",
2350 ic->_tme_m68k_ea_function_code,
2351 ic->_tme_m68k_ea_address);
2352 fprintf(stderr, "%%memx[%p] = 0x%08x %%memy[%p] = 0x%08x\n",
2353 &ic->tme_m68k_ireg_memx32,
2354 ic->tme_m68k_ireg_memx32,
2355 &ic->tme_m68k_ireg_memy32,
2356 ic->tme_m68k_ireg_memy32);
2357
2358 /* dump out the control registers: */
2359 fprintf(stderr, "\n");
2360 fprintf(stderr, "%%usp = 0x%08x\n", ic->tme_m68k_ireg_usp);
2361 fprintf(stderr, "%%isp = 0x%08x\n", ic->tme_m68k_ireg_isp);
2362 fprintf(stderr, "%%msp = 0x%08x\n", ic->tme_m68k_ireg_msp);
2363 fprintf(stderr, "%%sfc = 0x%08x\n", ic->tme_m68k_ireg_sfc);
2364 fprintf(stderr, "%%dfc = 0x%08x\n", ic->tme_m68k_ireg_dfc);
2365 fprintf(stderr, "%%vbr = 0x%08x\n", ic->tme_m68k_ireg_vbr);
2366
2367 /* dump out instruction decoding information: */
2368 fprintf(stderr, "\n");
2369 fprintf(stderr, "opcode = 0x%04x specop = 0x%04x\n",
2370 ic->_tme_m68k_insn_opcode,
2371 ic->_tme_m68k_insn_specop);
2372 }
2373
2374 void
tme_m68k_dump_memory(struct tme_m68k * ic,tme_uint32_t address,tme_uint32_t resid)2375 tme_m68k_dump_memory(struct tme_m68k *ic, tme_uint32_t address, tme_uint32_t resid)
2376 {
2377 unsigned int saved_ea_function_code;
2378 tme_uint32_t saved_ea_address;
2379 tme_uint32_t address_display;
2380 tme_uint8_t buffer[16];
2381 tme_uint32_t count;
2382 tme_uint32_t byte_i;
2383
2384 /* save any EA function code and address: */
2385 saved_ea_function_code = ic->_tme_m68k_ea_function_code;
2386 saved_ea_address = ic->_tme_m68k_ea_address;
2387
2388 /* we always display aligned rows: */
2389 address_display = address & (((tme_uint32_t) 0) - sizeof(buffer));
2390
2391 /* while we have memory to dump: */
2392 for (; resid > 0; ) {
2393
2394 /* read more data: */
2395 byte_i = address % sizeof(buffer);
2396 count = TME_MIN(resid, sizeof(buffer) - byte_i);
2397 ic->_tme_m68k_ea_function_code = TME_M68K_FUNCTION_CODE_DATA(ic);
2398 ic->_tme_m68k_ea_address = address;
2399 tme_m68k_read_mem(ic, &buffer[byte_i], count);
2400 count += byte_i;
2401
2402 /* display the row: */
2403 fprintf(stderr, "0x%08x ", address_display);
2404 for (byte_i = 0;
2405 byte_i < count;
2406 byte_i++, address_display++) {
2407 if (address_display < address) {
2408 fprintf(stderr, " ");
2409 }
2410 else {
2411 fprintf(stderr, " %02x",
2412 buffer[byte_i]);
2413 address++;
2414 resid--;
2415 }
2416 }
2417 fputc('\n', stderr);
2418 }
2419
2420 /* restore any EA function code and address: */
2421 ic->_tme_m68k_ea_function_code = saved_ea_function_code;
2422 ic->_tme_m68k_ea_address = saved_ea_address;
2423 }
2424 #endif /* 1 */
2425