1 
2 // license:BSD-3-Clause
3 // copyright-holders:Ryan Holtz
4 
5 #include "emu.h"
6 #include "unsp.h"
7 #include "unspfe.h"
8 #include "unspdefs.h"
9 
invalidate_cache()10 void unsp_device::invalidate_cache()
11 {
12 	m_cache_dirty = true;
13 }
14 
execute_run_drc()15 void unsp_device::execute_run_drc()
16 {
17 	int execute_result;
18 
19 	/* reset the cache if dirty */
20 	if (m_cache_dirty)
21 	{
22 		code_flush_cache();
23 		m_cache_dirty = false;
24 	}
25 
26 
27 	/* execute */
28 	do
29 	{
30 		/* run as much as we can */
31 		execute_result = m_drcuml->execute(*m_entry);
32 
33 		/* if we need to recompile, do it */
34 		if (execute_result == EXECUTE_MISSING_CODE)
35 		{
36 			code_compile_block(UNSP_LPC);
37 		}
38 		else if (execute_result == EXECUTE_UNMAPPED_CODE)
39 		{
40 			fatalerror("Attempted to execute unmapped code at PC=%08X\n", UNSP_LPC);
41 		}
42 		else if (execute_result == EXECUTE_RESET_CACHE)
43 		{
44 			code_flush_cache();
45 		}
46 	} while (execute_result != EXECUTE_OUT_OF_CYCLES);
47 }
48 
49 
50 /***************************************************************************
51     C FUNCTION CALLBACKS
52 ***************************************************************************/
53 
54 /*-------------------------------------------------
55     cfunc_unimplemented - handler for
56     unimplemented opcdes
57 -------------------------------------------------*/
58 
ccfunc_unimplemented()59 inline void unsp_device::ccfunc_unimplemented()
60 {
61 	fatalerror("PC=%08X: Unimplemented op %04x\n", UNSP_LPC, (uint16_t)m_core->m_arg0);
62 }
63 
cfunc_unimplemented(void * param)64 static void cfunc_unimplemented(void *param)
65 {
66 	((unsp_device *)param)->ccfunc_unimplemented();
67 }
68 
69 #if UNSP_LOG_REGS
cfunc_log_write()70 void unsp_device::cfunc_log_write()
71 {
72 	log_write(m_core->m_arg0, m_core->m_arg1);
73 }
74 
cfunc_log_regs(void * param)75 static void cfunc_log_regs(void *param)
76 {
77 	((unsp_device *)param)->log_regs();
78 }
79 
ccfunc_log_write(void * param)80 static void ccfunc_log_write(void *param)
81 {
82 	((unsp_device *)param)->cfunc_log_write();
83 }
84 #endif
85 
cfunc_muls()86 void unsp_device::cfunc_muls()
87 {
88 	const uint32_t op = m_core->m_arg0;
89 	const uint16_t size = ((op >> 3) & 15) ? ((op >> 3) & 15) : 16;
90 	const uint16_t rd = (op >> 9) & 7;
91 	const uint16_t rs = op & 7;
92 	execute_muls_ss(rd, rs, size);
93 }
94 
ccfunc_muls(void * param)95 static void ccfunc_muls(void *param)
96 {
97 	((unsp_device *)param)->cfunc_muls();
98 }
99 
100 /***************************************************************************
101     CACHE MANAGEMENT
102 ***************************************************************************/
103 
104 /*-------------------------------------------------
105     flush_drc_cache - outward-facing accessor to
106     code_flush_cache
107 -------------------------------------------------*/
108 
flush_drc_cache()109 void unsp_device::flush_drc_cache()
110 {
111 	if (!m_enable_drc)
112 		return;
113 	m_cache_dirty = true;
114 }
115 
116 /*-------------------------------------------------
117     code_flush_cache - flush the cache and
118     regenerate static code
119 -------------------------------------------------*/
120 
code_flush_cache()121 void unsp_device::code_flush_cache()
122 {
123 	/* empty the transient cache contents */
124 	m_drcuml->reset();
125 
126 	try
127 	{
128 		/* generate the entry point and out-of-cycles handlers */
129 		static_generate_entry_point();
130 		static_generate_nocode_handler();
131 		static_generate_out_of_cycles();
132 
133 		static_generate_memory_accessor(false, "read", m_mem_read);
134 		static_generate_memory_accessor(true, "write", m_mem_write);
135 		static_generate_trigger_fiq();
136 		static_generate_trigger_irq();
137 		static_generate_check_interrupts();
138 	}
139 
140 	catch (drcuml_block::abort_compilation &)
141 	{
142 		fatalerror("Unable to generate static u'nSP code\n"); fflush(stdout);
143 	}
144 }
145 
146 /*-------------------------------------------------
147     code_compile_block - compile a block of the
148     given mode at the specified pc
149 -------------------------------------------------*/
150 
code_compile_block(offs_t pc)151 void unsp_device::code_compile_block(offs_t pc)
152 {
153 	compiler_state compiler = { 0 };
154 	const opcode_desc *seqhead, *seqlast;
155 	bool override = false;
156 
157 	g_profiler.start(PROFILER_DRC_COMPILE);
158 
159 	/* get a description of this sequence */
160 	const opcode_desc *desclist = m_drcfe->describe_code(pc);
161 
162 	bool succeeded = false;
163 	while (!succeeded)
164 	{
165 		try
166 		{
167 			/* start the block */
168 			drcuml_block &block(m_drcuml->begin_block(1024*8));
169 
170 			/* loop until we get through all instruction sequences */
171 			for (seqhead = desclist; seqhead != nullptr; seqhead = seqlast->next())
172 			{
173 				const opcode_desc *curdesc;
174 				uint32_t nextpc;
175 
176 				/* add a code log entry */
177 				if (m_drcuml->logging())
178 					block.append_comment("-------------------------");
179 
180 				/* determine the last instruction in this sequence */
181 				for (seqlast = seqhead; seqlast != nullptr; seqlast = seqlast->next())
182 					if (seqlast->flags & OPFLAG_END_SEQUENCE)
183 						break;
184 				assert(seqlast != nullptr);
185 
186 				/* if we don't have a hash for this mode/pc, or if we are overriding all, add one */
187 				if (override || !m_drcuml->hash_exists(0, seqhead->pc))
188 					UML_HASH(block, 0, seqhead->pc);
189 
190 				/* if we already have a hash, and this is the first sequence, assume that we */
191 				/* are recompiling due to being out of sync and allow future overrides */
192 				else if (seqhead == desclist)
193 				{
194 					override = true;
195 					UML_HASH(block, 0, seqhead->pc);
196 				}
197 
198 				/* otherwise, redispatch to that fixed PC and skip the rest of the processing */
199 				else
200 				{
201 					UML_LABEL(block, seqhead->pc | 0x80000000);
202 					UML_HASHJMP(block, 0, seqhead->pc, *m_nocode);
203 					continue;
204 				}
205 
206 				/* validate this code block if we're not pointing into ROM */
207 				if (m_program.space().get_write_ptr(seqhead->physpc) != nullptr)
208 					generate_checksum_block(block, compiler, seqhead, seqlast);
209 
210 				/* label this instruction, if it may be jumped to locally */
211 				if (seqhead->flags & OPFLAG_IS_BRANCH_TARGET)
212 					UML_LABEL(block, seqhead->pc | 0x80000000);
213 
214 				/* iterate over instructions in the sequence and compile them */
215 				for (curdesc = seqhead; curdesc != seqlast->next(); curdesc = curdesc->next())
216 				{
217 					generate_check_cycles(block, compiler, curdesc->pc + curdesc->length);
218 					generate_sequence_instruction(block, compiler, curdesc);
219 					UML_CALLH(block, *m_check_interrupts);
220 				}
221 
222 				nextpc = seqlast->pc + seqlast->length;
223 
224 				/* if the last instruction can change modes, use a variable mode; otherwise, assume the same mode */
225 				if (seqlast->next() == nullptr || seqlast->next()->pc != nextpc)
226 				{
227 					UML_HASHJMP(block, 0, nextpc, *m_nocode);          // hashjmp <mode>,nextpc,nocode
228 				}
229 			}
230 
231 			/* end the sequence */
232 			block.end();
233 			g_profiler.stop();
234 			succeeded = true;
235 		}
236 		catch (drcuml_block::abort_compilation &)
237 		{
238 			code_flush_cache();
239 		}
240 	}
241 }
242 
243 /***************************************************************************
244     STATIC CODEGEN
245 ***************************************************************************/
246 
247 /*-------------------------------------------------
248     alloc_handle - allocate a handle if not
249     already allocated
250 -------------------------------------------------*/
251 
alloc_handle(drcuml_state & drcuml,uml::code_handle * & handleptr,const char * name)252 static inline void alloc_handle(drcuml_state &drcuml, uml::code_handle *&handleptr, const char *name)
253 {
254 	if (!handleptr)
255 		handleptr = drcuml.handle_alloc(name);
256 }
257 
258 /*-------------------------------------------------
259     generate_entry_point - generate a
260     static entry point
261 -------------------------------------------------*/
262 
static_generate_entry_point()263 void unsp_device::static_generate_entry_point()
264 {
265 	/* begin generating */
266 	drcuml_block &block(m_drcuml->begin_block(32));
267 
268 	/* forward references */
269 	alloc_handle(*m_drcuml, m_nocode, "nocode");
270 
271 	alloc_handle(*m_drcuml, m_entry, "entry");
272 	UML_HANDLE(block, *m_entry);
273 
274 	/* load fast integer registers */
275 	//load_fast_iregs(block);
276 
277 	/* generate a hash jump via the current mode and PC */
278 	UML_ROLAND(block, I1, mem(&m_core->m_r[REG_SR]), 16, 0x003f0000);
279 	UML_OR(block, I1, I1, mem(&m_core->m_r[REG_PC]));
280 	UML_HASHJMP(block, 0, I1, *m_nocode);
281 	block.end();
282 }
283 
284 
285 /*-------------------------------------------------
286     static_generate_nocode_handler - generate an
287     exception handler for "out of code"
288 -------------------------------------------------*/
289 
static_generate_nocode_handler()290 void unsp_device::static_generate_nocode_handler()
291 {
292 	/* begin generating */
293 	drcuml_block &block(m_drcuml->begin_block(10));
294 
295 	/* generate a hash jump via the current mode and PC */
296 	alloc_handle(*m_drcuml, m_nocode, "nocode");
297 	UML_HANDLE(block, *m_nocode);
298 	UML_GETEXP(block, I0);
299 
300 	UML_ROLINS(block, mem(&m_core->m_r[REG_SR]), I0, 16, 0x3f);
301 	UML_AND(block, mem(&m_core->m_r[REG_PC]), I0, 0x0000ffff);
302 	//save_fast_iregs(block);
303 	UML_EXIT(block, EXECUTE_MISSING_CODE);
304 
305 	block.end();
306 }
307 
308 
309 /*-------------------------------------------------
310     static_generate_out_of_cycles - generate an
311     out of cycles exception handler
312 -------------------------------------------------*/
313 
static_generate_out_of_cycles()314 void unsp_device::static_generate_out_of_cycles()
315 {
316 	/* begin generating */
317 	drcuml_block &block(m_drcuml->begin_block(10));
318 
319 	/* generate a hash jump via the current mode and PC */
320 	alloc_handle(*m_drcuml, m_out_of_cycles, "out_of_cycles");
321 	UML_HANDLE(block, *m_out_of_cycles);
322 	//save_fast_iregs(block);
323 	UML_EXIT(block, EXECUTE_OUT_OF_CYCLES);
324 
325 	block.end();
326 }
327 
328 /*------------------------------------------------------------------
329     static_generate_memory_accessor
330 ------------------------------------------------------------------*/
331 
static_generate_memory_accessor(bool iswrite,const char * name,uml::code_handle * & handleptr)332 void unsp_device::static_generate_memory_accessor(bool iswrite, const char *name, uml::code_handle *&handleptr)
333 {
334 	/* on entry, address is in I0; data for writes is in I1 */
335 	/* on exit, read result is in I1 */
336 
337 	/* begin generating */
338 	drcuml_block &block(m_drcuml->begin_block(32));
339 
340 	/* add a global entry for this */
341 	alloc_handle(*m_drcuml, handleptr, name);
342 	UML_HANDLE(block, *handleptr);
343 
344 	if (iswrite)
345 	{
346 #if UNSP_LOG_REGS
347 		UML_MOV(block, mem(&m_core->m_arg0), I0);
348 		UML_MOV(block, mem(&m_core->m_arg1), I1);
349 		UML_CALLC(block, ccfunc_log_write, this);
350 #endif
351 		UML_WRITE(block, I0, I1, SIZE_WORD, SPACE_PROGRAM);
352 	}
353 	else
354 		UML_READ(block, I1, I0, SIZE_WORD, SPACE_PROGRAM);
355 	UML_RET(block);
356 
357 	block.end();
358 }
359 
360 
361 
362 /***************************************************************************
363     CODE GENERATION
364 ***************************************************************************/
365 
static_generate_check_interrupts()366 void unsp_device::static_generate_check_interrupts()
367 {
368 	uml::code_label test_loop = 1;
369 	uml::code_label found = 2;
370 	uml::code_label do_irq = 3;
371 
372 	/* begin generating */
373 	drcuml_block &block(m_drcuml->begin_block(256));
374 
375 	/* generate a hash jump via the current mode and PC */
376 	alloc_handle(*m_drcuml, m_check_interrupts, "check_interrupts");
377 	UML_HANDLE(block, *m_check_interrupts);
378 
379 	UML_CMP(block, mem(&m_core->m_sirq), 0);
380 	UML_RETc(block, uml::COND_Z);
381 
382 	UML_MOV(block, I2, 0);
383 	UML_MOV(block, I0, 1);
384 	UML_MOV(block, I1, mem(&m_core->m_sirq));
385 
386 	UML_LABEL(block, test_loop);
387 	UML_TEST(block, I1, I0);
388 	UML_JMPc(block, uml::COND_NZ, found);
389 	UML_SHL(block, I0, I0, 1);
390 	UML_ADD(block, I2, I2, 1);
391 	UML_CMP(block, I0, 1 << 9);
392 	UML_JMPc(block, uml::COND_NE, test_loop);
393 	UML_RET(block);
394 
395 	UML_LABEL(block, found);
396 	UML_CMP(block, I0, 1);
397 	UML_JMPc(block, uml::COND_NE, do_irq);
398 	UML_CALLH(block, *m_trigger_fiq);
399 	UML_RET(block);
400 
401 	UML_LABEL(block, do_irq);
402 	UML_CALLH(block, *m_trigger_irq);
403 	UML_RET(block);
404 
405 	block.end();
406 }
407 
static_generate_trigger_fiq()408 void unsp_device::static_generate_trigger_fiq()
409 {
410 	/* begin generating */
411 	drcuml_block &block(m_drcuml->begin_block(256));
412 
413 	/* generate a hash jump via the current mode and PC */
414 	alloc_handle(*m_drcuml, m_trigger_fiq, "trigger_fiq");
415 	UML_HANDLE(block, *m_trigger_fiq);
416 
417 	UML_TEST(block, mem(&m_core->m_enable_fiq), 1);
418 	UML_RETc(block, uml::COND_Z);
419 
420 	UML_TEST(block, mem(&m_core->m_fiq), 1);
421 	UML_RETc(block, uml::COND_NZ);
422 
423 	UML_MOV(block, mem(&m_core->m_fiq), 1);
424 
425 	UML_MOV(block, I0, mem(&m_core->m_r[REG_SP]));
426 
427 	UML_MOV(block, I1, mem(&m_core->m_r[REG_PC]));
428 	UML_CALLH(block, *m_mem_write);
429 	UML_SUB(block, I0, I0, 1);
430 
431 	UML_MOV(block, I1, mem(&m_core->m_r[REG_SR]));
432 	UML_CALLH(block, *m_mem_write);
433 	UML_SUB(block, I0, I0, 1);
434 
435 	UML_AND(block, mem(&m_core->m_r[REG_SP]), I0, 0x0000ffff);
436 
437 	UML_MOV(block, I0, 0x0000fff6);
438 	UML_CALLH(block, *m_mem_read);
439 	UML_MOV(block, mem(&m_core->m_r[REG_PC]), I1);
440 	UML_MOV(block, mem(&m_core->m_r[REG_SR]), 0);
441 	UML_HASHJMP(block, 0, I1, *m_nocode);
442 	UML_RET(block);
443 
444 	block.end();
445 }
446 
static_generate_trigger_irq()447 void unsp_device::static_generate_trigger_irq()
448 {
449 	/* begin generating */
450 	drcuml_block &block(m_drcuml->begin_block(256));
451 
452 	uml::code_label skip_ine = 1;
453 
454 	/* generate a hash jump via the current mode and PC */
455 	alloc_handle(*m_drcuml, m_trigger_irq, "trigger_irq");
456 	UML_HANDLE(block, *m_trigger_irq);
457 
458 	// If INE is 0 and IRQ is 1, abort
459 	UML_XOR(block, I1, mem(&m_core->m_irq), 1);
460 	UML_AND(block, I1, I1, mem(&m_core->m_ine));
461 	UML_RETc(block, uml::COND_NZ);
462 
463 	// If INE is 0 and IRQ is 0, we have a valid IRQ
464 	UML_TEST(block, mem(&m_core->m_ine), 1);
465 	UML_JMPc(block, uml::COND_Z, skip_ine);
466 
467 	// If INE is 1 and IRQ line is < PRI, abort
468 	UML_CMP(block, I0, mem(&m_core->m_pri));
469 	UML_RETc(block, uml::COND_LE);
470 
471 	// Update our current interrupt priority
472 	UML_MOV(block, mem(&m_core->m_pri), I0);
473 
474 	UML_LABEL(block, skip_ine);
475 
476 	UML_MOV(block, mem(&m_core->m_irq), 1);
477 
478 	UML_TEST(block, mem(&m_core->m_enable_irq), 1);
479 	UML_RETc(block, uml::COND_Z);
480 
481 	UML_MOV(block, I0, mem(&m_core->m_r[REG_SP]));
482 	UML_MOV(block, I1, mem(&m_core->m_r[REG_PC]));
483 	UML_CALLH(block, *m_mem_write);
484 	UML_SUB(block, I0, I0, 1);
485 
486 	UML_MOV(block, I1, mem(&m_core->m_r[REG_SR]));
487 	UML_CALLH(block, *m_mem_write);
488 	UML_SUB(block, I0, I0, 1);
489 
490 	UML_AND(block, mem(&m_core->m_r[REG_SP]), I0, 0x0000ffff);
491 
492 	UML_ADD(block, I0, I2, 0x0000fff7);
493 
494 	UML_CALLH(block, *m_mem_read);
495 
496 	UML_MOV(block, mem(&m_core->m_r[REG_PC]), I1);
497 	UML_MOV(block, mem(&m_core->m_r[REG_SR]), 0);
498 	UML_HASHJMP(block, 0, I1, *m_nocode);
499 	UML_RET(block);
500 
501 	block.end();
502 }
503 
504 /*-------------------------------------------------
505     generate_check_cycles - generate code to
506     generate an exception if cycles are out
507 -------------------------------------------------*/
508 
generate_check_cycles(drcuml_block & block,compiler_state & compiler,uml::parameter param)509 void unsp_device::generate_check_cycles(drcuml_block &block, compiler_state &compiler, uml::parameter param)
510 {
511 	UML_CMP(block, mem(&m_core->m_icount), 0);
512 	UML_EXHc(block, uml::COND_L, *m_out_of_cycles, param);
513 }
514 
515 /*-------------------------------------------------
516     generate_checksum_block - generate code to
517     validate a sequence of opcodes
518 -------------------------------------------------*/
519 
generate_checksum_block(drcuml_block & block,compiler_state & compiler,const opcode_desc * seqhead,const opcode_desc * seqlast)520 void unsp_device::generate_checksum_block(drcuml_block &block, compiler_state &compiler, const opcode_desc *seqhead, const opcode_desc *seqlast)
521 {
522 	const opcode_desc *curdesc;
523 	if (m_drcuml->logging())
524 	{
525 		block.append_comment("[Validation for %08X]", seqhead->pc);
526 	}
527 
528 	/* full verification; sum up everything */
529 	void *memptr = m_program.space().get_write_ptr(seqhead->physpc);
530 	UML_LOAD(block, I0, memptr, 0, SIZE_WORD, SCALE_x2);
531 	uint32_t sum = seqhead->opptr.w[0];
532 	for (int i = 1; i < seqhead->length; i++)
533 	{
534 		UML_LOAD(block, I1, memptr, i, SIZE_WORD, SCALE_x2);
535 		UML_ADD(block, I0, I0, I1);
536 		sum += ((uint16_t*)memptr)[i];
537 	}
538 	for (curdesc = seqhead->next(); curdesc != seqlast->next(); curdesc = curdesc->next())
539 	{
540 		if (!(curdesc->flags & OPFLAG_VIRTUAL_NOOP))
541 		{
542 			memptr = m_program.space().get_write_ptr(curdesc->physpc);
543 			UML_LOAD(block, I1, memptr, 0, SIZE_WORD, SCALE_x2);
544 			UML_ADD(block, I0, I0, I1);
545 			sum += curdesc->opptr.w[0];
546 			for (int i = 1; i < curdesc->length; i++)
547 			{
548 				UML_LOAD(block, I1, memptr, i, SIZE_WORD, SCALE_x2);
549 				UML_ADD(block, I0, I0, I1);
550 				sum += ((uint16_t*)memptr)[i];
551 			}
552 		}
553 	}
554 	UML_CMP(block, I0, sum);
555 	UML_EXHc(block, COND_NE, *m_nocode, seqhead->pc);
556 }
557 
558 
559 /*-------------------------------------------------
560     log_add_disasm_comment - add a comment
561     including disassembly of a MIPS instruction
562 -------------------------------------------------*/
563 
log_add_disasm_comment(drcuml_block & block,uint32_t pc,uint32_t op)564 void unsp_device::log_add_disasm_comment(drcuml_block &block, uint32_t pc, uint32_t op)
565 {
566 	if (m_drcuml->logging())
567 	{
568 		block.append_comment("%08X: %08x", pc, op);
569 	}
570 }
571 
572 
573 /*------------------------------------------------------------------
574     generate_branch
575 ------------------------------------------------------------------*/
576 
generate_branch(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)577 void unsp_device::generate_branch(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
578 {
579 	if (desc->targetpc == BRANCH_TARGET_DYNAMIC)
580 	{
581 		UML_ROLAND(block, I0, mem(&m_core->m_r[REG_SR]), 16, 0x3f0000);
582 		UML_OR(block, mem(&m_core->m_jmpdest), I0, mem(&m_core->m_r[REG_PC]));
583 	}
584 
585 	/* update the cycles and jump through the hash table to the target */
586 	if (desc->targetpc != BRANCH_TARGET_DYNAMIC)
587 	{
588 		UML_CALLH(block, *m_check_interrupts);
589 		UML_HASHJMP(block, 0, desc->targetpc, *m_nocode);
590 	}
591 	else
592 	{
593 		UML_CALLH(block, *m_check_interrupts);
594 		UML_HASHJMP(block, 0, mem(&m_core->m_jmpdest), *m_nocode);
595 	}
596 }
597 
598 
599 /*-------------------------------------------------
600     generate_sequence_instruction - generate code
601     for a single instruction in a sequence
602 -------------------------------------------------*/
603 
generate_sequence_instruction(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)604 void unsp_device::generate_sequence_instruction(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
605 {
606 	/* add an entry for the log */
607 	if (m_drcuml->logging() && !(desc->flags & OPFLAG_VIRTUAL_NOOP))
608 		log_add_disasm_comment(block, desc->pc, desc->opptr.w[0]);
609 
610 	/* set the PC map variable */
611 	UML_MAPVAR(block, MAPVAR_PC, desc->pc);
612 
613 	/* if we are debugging, call the debugger */
614 	if ((machine().debug_flags & DEBUG_FLAG_ENABLED) != 0)
615 	{
616 		//save_fast_iregs(block);
617 		UML_DEBUG(block, desc->pc);
618 	}
619 
620 #if UNSP_LOG_REGS
621 	UML_CALLC(block, cfunc_log_regs, this);
622 #endif
623 
624 	if (!(desc->flags & OPFLAG_VIRTUAL_NOOP))
625 	{
626 		/* compile the instruction */
627 		if (!generate_opcode(block, compiler, desc))
628 		{
629 			UML_ROLINS(block, mem(&m_core->m_r[REG_SR]), desc->pc, 16, 0x3f);
630 			UML_AND(block, mem(&m_core->m_r[REG_PC]), desc->pc, 0x0000ffff);
631 			UML_MOV(block, mem(&m_core->m_arg0), desc->opptr.w[0]);
632 			UML_CALLC(block, cfunc_unimplemented, this);
633 		}
634 	}
635 }
636 
637 
638 /*------------------------------------------------------------------
639     generate_add_lpc - adds an offset to the long program counter
640     comprised of PC and the low 6 bits of SR
641 ------------------------------------------------------------------*/
642 
generate_add_lpc(drcuml_block & block,int32_t offset)643 void unsp_device::generate_add_lpc(drcuml_block &block, int32_t offset)
644 {
645 	UML_ROLAND(block, I0, mem(&m_core->m_r[REG_SR]), 16, 0x3f0000);
646 	UML_OR(block, I0, I0, mem(&m_core->m_r[REG_PC]));
647 	UML_ADD(block, I0, I0, offset);
648 	UML_ROLINS(block, mem(&m_core->m_r[REG_SR]), I0, 16, 0x3f);
649 	UML_AND(block, mem(&m_core->m_r[REG_PC]), I0, 0x0000ffff);
650 }
651 
652 
653 /*------------------------------------------------------------------
654     generate_update_nzsc - perform a full flag update
655 ------------------------------------------------------------------*/
656 
generate_update_nzsc(drcuml_block & block)657 void unsp_device::generate_update_nzsc(drcuml_block &block)
658 {
659 	UML_XOR(block, I1, I1, I2);
660 	UML_TEST(block, I1, 0x8000);
661 	UML_SETc(block, uml::COND_NZ, I2);
662 
663 	UML_TEST(block, I3, 0x8000);
664 	UML_SETc(block, uml::COND_NZ, I1);
665 	UML_ROLINS(block, mem(&m_core->m_r[REG_SR]), I1, UNSP_N_SHIFT, UNSP_N);
666 
667 	UML_TEST(block, I3, 0x10000);
668 	UML_SETc(block, uml::COND_NZ, I1);
669 	UML_ROLINS(block, mem(&m_core->m_r[REG_SR]), I1, UNSP_C_SHIFT, UNSP_C);
670 	UML_CMP(block, I2, I1);
671 	UML_SETc(block, uml::COND_NE, I1);
672 	UML_ROLINS(block, mem(&m_core->m_r[REG_SR]), I1, UNSP_S_SHIFT, UNSP_S);
673 
674 	UML_TEST(block, I3, 0x0000ffff);
675 	UML_SETc(block, uml::COND_Z, I1);
676 	UML_ROLINS(block, mem(&m_core->m_r[REG_SR]), I1, UNSP_Z_SHIFT, UNSP_Z);
677 }
678 
679 
680 /*------------------------------------------------------------------
681     generate_update_nz - perform a partial flag update
682 ------------------------------------------------------------------*/
683 
generate_update_nz(drcuml_block & block)684 void unsp_device::generate_update_nz(drcuml_block &block)
685 {
686 	UML_TEST(block, I3, 0x8000);
687 	UML_SETc(block, uml::COND_NZ, I1);
688 	UML_ROLINS(block, mem(&m_core->m_r[REG_SR]), I1, UNSP_N_SHIFT, UNSP_N);
689 
690 	UML_AND(block, I2, I3, 0x0000ffff);
691 	UML_CMP(block, I2, 0);
692 	UML_SETc(block, uml::COND_E, I1);
693 	UML_ROLINS(block, mem(&m_core->m_r[REG_SR]), I1, UNSP_Z_SHIFT, UNSP_Z);
694 }
695 
696 
697 /*------------------------------------------------------------------
698     generate_opcode - main handler which generates the UML for a
699     single opcode
700 ------------------------------------------------------------------*/
701 
generate_opcode(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)702 bool unsp_device::generate_opcode(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
703 {
704 	uint32_t op = (uint32_t)desc->opptr.w[0];
705 
706 	generate_add_lpc(block, 1);
707 
708 	const uint16_t op0 = (op >> 12) & 15;
709 	const uint16_t opa = (op >> 9) & 7;
710 	const uint16_t op1 = (op >> 6) & 7;
711 	const uint16_t opn = (op >> 3) & 7;
712 	const uint16_t opb = op & 7;
713 
714 	const uint8_t lower_op = (op1 << 4) | op0;
715 
716 	uml::code_label skip_branch = compiler.m_labelnum++;
717 	uml::code_label reti_done = compiler.m_labelnum++;
718 	uml::code_label mul_opa_nohi = compiler.m_labelnum++;
719 	uml::code_label mul_opb_nohi = compiler.m_labelnum++;
720 	uml::code_label shift_no_sign = compiler.m_labelnum++;
721 	uml::code_label no_carry = compiler.m_labelnum++;
722 
723 	if(op0 < 0xf && opa == 0x7 && op1 < 2)
724 	{
725 		const uint32_t opimm = op & 0x3f;
726 		switch(op0)
727 		{
728 			case 0: // JB
729 				UML_TEST(block, mem(&m_core->m_r[REG_SR]), UNSP_C);
730 				UML_JMPc(block, uml::COND_NZ, skip_branch);
731 				break;
732 
733 			case 1: // JAE
734 				UML_TEST(block, mem(&m_core->m_r[REG_SR]), UNSP_C);
735 				UML_JMPc(block, uml::COND_Z, skip_branch);
736 				break;
737 
738 			case 2: // JGE
739 				UML_TEST(block, mem(&m_core->m_r[REG_SR]), UNSP_S);
740 				UML_JMPc(block, uml::COND_NZ, skip_branch);
741 				break;
742 
743 			case 3: // JL
744 				UML_TEST(block, mem(&m_core->m_r[REG_SR]), UNSP_S);
745 				UML_JMPc(block, uml::COND_Z, skip_branch);
746 				break;
747 
748 			case 4: // JNE
749 				UML_TEST(block, mem(&m_core->m_r[REG_SR]), UNSP_Z);
750 				UML_JMPc(block, uml::COND_NZ, skip_branch);
751 				break;
752 
753 			case 5: // JE
754 				UML_TEST(block, mem(&m_core->m_r[REG_SR]), UNSP_Z);
755 				UML_JMPc(block, uml::COND_Z, skip_branch);
756 				break;
757 
758 			case 6: // JPL
759 				UML_TEST(block, mem(&m_core->m_r[REG_SR]), UNSP_N);
760 				UML_JMPc(block, uml::COND_NZ, skip_branch);
761 				break;
762 
763 			case 7: // JMI
764 				UML_TEST(block, mem(&m_core->m_r[REG_SR]), UNSP_N);
765 				UML_JMPc(block, uml::COND_Z, skip_branch);
766 				break;
767 
768 			case 8: // JBE
769 				UML_AND(block, I0, mem(&m_core->m_r[REG_SR]), UNSP_Z | UNSP_C);
770 				UML_CMP(block, I0, UNSP_C);
771 				UML_JMPc(block, uml::COND_E, skip_branch);
772 				break;
773 
774 			case 9: // JA
775 				UML_AND(block, I0, mem(&m_core->m_r[REG_SR]), UNSP_Z | UNSP_C);
776 				UML_CMP(block, I0, UNSP_C);
777 				UML_JMPc(block, uml::COND_NE, skip_branch);
778 				break;
779 
780 			case 10: // JLE
781 				UML_TEST(block, mem(&m_core->m_r[REG_SR]), UNSP_Z | UNSP_S);
782 				UML_JMPc(block, uml::COND_Z, skip_branch);
783 				break;
784 
785 			case 11: // JG
786 				UML_TEST(block, mem(&m_core->m_r[REG_SR]), UNSP_Z);
787 				UML_JMPc(block, uml::COND_NZ, skip_branch);
788 				UML_TEST(block, mem(&m_core->m_r[REG_SR]), UNSP_S);
789 				UML_JMPc(block, uml::COND_NZ, skip_branch);
790 				break;
791 
792 			case 12: // JVC
793 				UML_ROLAND(block, I0, mem(&m_core->m_r[REG_SR]), 32-UNSP_S_SHIFT, 1);
794 				UML_ROLAND(block, I1, mem(&m_core->m_r[REG_SR]), 32-UNSP_N_SHIFT, 1);
795 				UML_CMP(block, I0, I1);
796 				UML_JMPc(block, uml::COND_NE, skip_branch);
797 				break;
798 
799 			case 13: // JVS
800 				UML_ROLAND(block, I0, mem(&m_core->m_r[REG_SR]), 32-UNSP_S_SHIFT, 1);
801 				UML_ROLAND(block, I1, mem(&m_core->m_r[REG_SR]), 32-UNSP_N_SHIFT, 1);
802 				UML_CMP(block, I0, I1);
803 				UML_JMPc(block, uml::COND_E, skip_branch);
804 				break;
805 
806 			case 14: // JMP
807 				UML_SUB(block, mem(&m_core->m_icount), mem(&m_core->m_icount), 4);
808 				UML_MOV(block, I0, desc->targetpc);
809 				UML_AND(block, mem(&m_core->m_r[REG_PC]), I0, 0x0000ffff);
810 				generate_branch(block, compiler, desc);
811 				return true;
812 
813 			default:
814 				return false;
815 		}
816 		UML_SUB(block, mem(&m_core->m_icount), mem(&m_core->m_icount), 4);
817 		generate_add_lpc(block, (op1 == 0) ? opimm : (0 - opimm));
818 		generate_branch(block, compiler, desc);
819 		UML_LABEL(block, skip_branch);
820 		UML_SUB(block, mem(&m_core->m_icount), mem(&m_core->m_icount), 2);
821 		return true;
822 	}
823 
824 	UML_SUB(block, mem(&m_core->m_icount), mem(&m_core->m_icount), desc->cycles);
825 
826 	if (lower_op == 0x2d) // Push
827 	{
828 		uint32_t r0 = opn;
829 		uint32_t r1 = opa;
830 		UML_MOV(block, I0, mem(&m_core->m_r[opb]));
831 		while (r0)
832 		{
833 			UML_MOV(block, I1, mem(&m_core->m_r[r1]));
834 			UML_CALLH(block, *m_mem_write);
835 			UML_SUB(block, I0, I0, 1);
836 			UML_AND(block, mem(&m_core->m_r[opb]), I0, 0x0000ffff);
837 			r0--;
838 			r1--;
839 		}
840 		return true;
841 	}
842 	else if (lower_op == 0x29)
843 	{
844 		if (op == 0x9a98) // reti
845 		{
846 			UML_ADD(block, I0, mem(&m_core->m_r[REG_SP]), 1);
847 			UML_CALLH(block, *m_mem_read);
848 			UML_MOV(block, mem(&m_core->m_r[REG_SR]), I1);
849 
850 			UML_ADD(block, I0, I0, 1);
851 			UML_CALLH(block, *m_mem_read);
852 			UML_MOV(block, mem(&m_core->m_r[REG_PC]), I1);
853 
854 			UML_AND(block, mem(&m_core->m_r[REG_SP]), I0, 0x0000ffff);
855 
856 			UML_TEST(block, mem(&m_core->m_fiq), 1);
857 			UML_MOVc(block, uml::COND_NZ, mem(&m_core->m_fiq), 0);
858 			UML_JMPc(block, uml::COND_NZ, reti_done);
859 
860 			UML_TEST(block, mem(&m_core->m_irq), 1);
861 			UML_MOVc(block, uml::COND_NZ, mem(&m_core->m_irq), 0);
862 
863 			UML_LABEL(block, reti_done);
864 			generate_branch(block, compiler, desc);
865 		}
866 		else // pop
867 		{
868 			uint32_t r0 = opn;
869 			uint32_t r1 = opa;
870 			bool do_branch = false;
871 
872 			UML_MOV(block, I0, mem(&m_core->m_r[opb]));
873 			while (r0)
874 			{
875 				r1++;
876 				UML_ADD(block, I0, I0, 1);
877 				UML_AND(block, mem(&m_core->m_r[opb]), I0, 0x0000ffff);
878 				UML_CALLH(block, *m_mem_read);
879 				UML_MOV(block, mem(&m_core->m_r[r1]), I1);
880 				if (r1 == REG_PC)
881 					do_branch = true;
882 				r0--;
883 			}
884 			if (do_branch)
885 				generate_branch(block, compiler, desc);
886 		}
887 		return true;
888 	}
889 	else if (op0 == 0xf)
890 	{
891 		switch (op1)
892 		{
893 			case 0x00: // Multiply, Unsigned * Signed
894 				if(opn == 1 && opa != 7)
895 				{
896 					UML_MOV(block, I0, mem(&m_core->m_r[opa]));
897 					UML_MOV(block, I1, mem(&m_core->m_r[opb]));
898 					UML_MULU(block, I2, I2, I0, I1);
899 
900 					UML_TEST(block, I1, 0x00008000);
901 					UML_JMPc(block, uml::COND_Z, mul_opb_nohi);
902 					UML_SHL(block, I0, I0, 16);
903 					UML_SUB(block, I2, I2, I0);
904 
905 					UML_LABEL(block, mul_opb_nohi);
906 					UML_SHR(block, mem(&m_core->m_r[REG_R4]), I2, 16);
907 					UML_AND(block, mem(&m_core->m_r[REG_R3]), I2, 0x0000ffff);
908 					return true;
909 				}
910 				return false;
911 
912 			case 0x01: // Call
913 				if(!(opa & 1))
914 				{
915 					generate_add_lpc(block, 1);
916 
917 					UML_MOV(block, I0, mem(&m_core->m_r[REG_SP]));
918 
919 					UML_MOV(block, I1, mem(&m_core->m_r[REG_PC]));
920 					UML_CALLH(block, *m_mem_write);
921 					UML_SUB(block, I0, I0, 1);
922 
923 					UML_MOV(block, I1, mem(&m_core->m_r[REG_SR]));
924 					UML_CALLH(block, *m_mem_write);
925 					UML_SUB(block, I0, I0, 1);
926 
927 					UML_AND(block, mem(&m_core->m_r[REG_SP]), I0, 0x0000ffff);
928 
929 					UML_MOV(block, I0, desc->targetpc);
930 					UML_AND(block, mem(&m_core->m_r[REG_PC]), I0, 0x0000ffff);
931 					UML_ROLINS(block, mem(&m_core->m_r[REG_SR]), I0, 16, 0x3f);
932 					generate_branch(block, compiler, desc);
933 					return true;
934 				}
935 				return false;
936 
937 			case 0x02: // Far Jump
938 				if (opa == 7)
939 				{
940 					UML_MOV(block, I0, desc->targetpc);
941 					UML_AND(block, mem(&m_core->m_r[REG_PC]), I0, 0x0000ffff);
942 					UML_ROLINS(block, mem(&m_core->m_r[REG_SR]), I0, 16, 0x3f);
943 					generate_branch(block, compiler, desc);
944 					return true;
945 				}
946 				return false;
947 
948 			case 0x04: // Multiply, Signed * Signed
949 				if(opn == 1 && opa != 7)
950 				{
951 					UML_MOV(block, I0, mem(&m_core->m_r[opa]));
952 					UML_MOV(block, I1, mem(&m_core->m_r[opb]));
953 					UML_MULU(block, I2, I2, I0, I1);
954 
955 					UML_TEST(block, I1, 0x00008000);
956 					UML_JMPc(block, uml::COND_Z, mul_opb_nohi);
957 					UML_SHL(block, I3, I0, 16);
958 					UML_SUB(block, I2, I2, I3);
959 
960 					UML_LABEL(block, mul_opb_nohi);
961 					UML_TEST(block, I0, 0x00008000);
962 					UML_JMPc(block, uml::COND_Z, mul_opa_nohi);
963 					UML_SHL(block, I3, I1, 16);
964 					UML_SUB(block, I2, I2, I3);
965 
966 					UML_LABEL(block, mul_opa_nohi);
967 					UML_SHR(block, mem(&m_core->m_r[REG_R4]), I2, 16);
968 					UML_AND(block, mem(&m_core->m_r[REG_R3]), I2, 0x0000ffff);
969 					return true;
970 				}
971 				return false;
972 
973 			case 0x05: // Interrupt flags
974 				switch(op & 0x3f)
975 				{
976 					case 0:
977 						UML_MOV(block, mem(&m_core->m_enable_irq), 0);
978 						UML_MOV(block, mem(&m_core->m_enable_fiq), 0);
979 						break;
980 
981 					case 1:
982 						UML_MOV(block, mem(&m_core->m_enable_irq), 1);
983 						UML_MOV(block, mem(&m_core->m_enable_fiq), 0);
984 						break;
985 
986 					case 2:
987 						UML_MOV(block, mem(&m_core->m_enable_irq), 0);
988 						UML_MOV(block, mem(&m_core->m_enable_fiq), 1);
989 						break;
990 
991 					case 3:
992 						UML_MOV(block, mem(&m_core->m_enable_irq), 1);
993 						UML_MOV(block, mem(&m_core->m_enable_fiq), 1);
994 						break;
995 
996 					case 4:
997 						UML_MOV(block, mem(&m_core->m_fir_move), 1);
998 						break;
999 
1000 					case 5:
1001 						UML_MOV(block, mem(&m_core->m_fir_move), 0);
1002 						break;
1003 
1004 					case 8:
1005 						UML_MOV(block, mem(&m_core->m_enable_irq), 0);
1006 						break;
1007 
1008 					case 9:
1009 						UML_MOV(block, mem(&m_core->m_enable_irq), 1);
1010 						break;
1011 
1012 					case 12:
1013 						UML_MOV(block, mem(&m_core->m_enable_fiq), 0);
1014 						break;
1015 
1016 					case 14:
1017 						UML_MOV(block, mem(&m_core->m_enable_fiq), 1);
1018 						break;
1019 
1020 					case 37:
1021 						// nop
1022 						break;
1023 
1024 					default:
1025 						logerror("unsp drc interrupt flags %02x\n", op & 0x3f);
1026 						return false;
1027 				}
1028 				return true;
1029 
1030 			case 0x06:
1031 			case 0x07:
1032 				if (opa == 7)
1033 					return false;
1034 
1035 				// MULS
1036 				UML_MOV(block, mem(&m_core->m_arg0), desc->opptr.w[0]);
1037 				UML_CALLC(block, ccfunc_muls, this);
1038 				return true;
1039 
1040 			default:
1041 				return false;
1042 		}
1043 	}
1044 
1045 	// At this point, we should be dealing solely with ALU ops.
1046 	UML_MOV(block, I2, mem(&m_core->m_r[opa]));
1047 
1048 	switch (op1)
1049 	{
1050 		case 0x00: // r, [bp+imm6]
1051 			UML_ADD(block, I0, mem(&m_core->m_r[REG_BP]), op & 0x3f);
1052 			UML_AND(block, I0, I0, 0x0000ffff);
1053 			if (op0 != 0x0d)
1054 				UML_CALLH(block, *m_mem_read);
1055 			break;
1056 
1057 		case 0x01: // r, imm6
1058 			UML_MOV(block, I1, op & 0x3f);
1059 			break;
1060 
1061 		case 0x03: // Indirect
1062 		{
1063 			const uint8_t lsbits = opn & 3;
1064 			if (opn & 4)
1065 			{
1066 				switch (lsbits)
1067 				{
1068 					case 0: // r, [<ds:>r]
1069 						UML_ROLAND(block, I0, mem(&m_core->m_r[REG_SR]), 6, 0x3f0000);
1070 						UML_OR(block, I0, I0, mem(&m_core->m_r[opb]));
1071 						if (op0 != 0x0d)
1072 							UML_CALLH(block, *m_mem_read);
1073 						break;
1074 
1075 					case 1: // r, [<ds:>r--]
1076 						UML_ROLAND(block, I0, mem(&m_core->m_r[REG_SR]), 6, 0x3f0000);
1077 						UML_OR(block, I0, I0, mem(&m_core->m_r[opb]));
1078 						if (op0 != 0x0d)
1079 							UML_CALLH(block, *m_mem_read);
1080 						UML_SUB(block, I3, I0, 1);
1081 						UML_ROLINS(block, mem(&m_core->m_r[REG_SR]), I3, 32-6, 0x0000fc00);
1082 						UML_AND(block, mem(&m_core->m_r[opb]), I3, 0x0000ffff);
1083 						break;
1084 
1085 					case 2: // r, [<ds:>r++]
1086 						UML_ROLAND(block, I0, mem(&m_core->m_r[REG_SR]), 6, 0x3f0000);
1087 						UML_OR(block, I0, I0, mem(&m_core->m_r[opb]));
1088 						if (op0 != 0x0d)
1089 							UML_CALLH(block, *m_mem_read);
1090 						UML_ADD(block, I3, I0, 1);
1091 						UML_ROLINS(block, mem(&m_core->m_r[REG_SR]), I3, 32-6, 0x0000fc00);
1092 						UML_AND(block, mem(&m_core->m_r[opb]), I3, 0x0000ffff);
1093 						break;
1094 
1095 					case 3: // r, [<ds:>++r]
1096 						UML_ROLAND(block, I0, mem(&m_core->m_r[REG_SR]), 6, 0x3f0000);
1097 						UML_OR(block, I0, I0, mem(&m_core->m_r[opb]));
1098 						UML_ADD(block, I0, I0, 1);
1099 						UML_ROLINS(block, mem(&m_core->m_r[REG_SR]), I0, 32-6, 0x0000fc00);
1100 						UML_AND(block, mem(&m_core->m_r[opb]), I0, 0x0000ffff);
1101 						if (op0 != 0x0d)
1102 							UML_CALLH(block, *m_mem_read);
1103 						break;
1104 
1105 					default:
1106 						// can't happen
1107 						break;
1108 				}
1109 			}
1110 			else
1111 			{
1112 				switch (lsbits)
1113 				{
1114 					case 0: // r, [r]
1115 						UML_MOV(block, I0, mem(&m_core->m_r[opb]));
1116 						if (op0 != 0x0d)
1117 							UML_CALLH(block, *m_mem_read);
1118 						break;
1119 
1120 					case 1: // r, [r--]
1121 						UML_MOV(block, I0, mem(&m_core->m_r[opb]));
1122 						if (op0 != 0x0d)
1123 							UML_CALLH(block, *m_mem_read);
1124 						UML_SUB(block, I3, I0, 1);
1125 						UML_AND(block, mem(&m_core->m_r[opb]), I3, 0x0000ffff);
1126 						break;
1127 
1128 					case 2: // r, [r++]
1129 						UML_MOV(block, I0, mem(&m_core->m_r[opb]));
1130 						if (op0 != 0x0d)
1131 							UML_CALLH(block, *m_mem_read);
1132 						UML_ADD(block, I3, I0, 1);
1133 						UML_AND(block, mem(&m_core->m_r[opb]), I3, 0x0000ffff);
1134 						break;
1135 
1136 					case 3: // r, [++r]
1137 						UML_ADD(block, I0, mem(&m_core->m_r[opb]), 1);
1138 						UML_AND(block, I0, I0, 0x0000ffff);
1139 						UML_MOV(block, mem(&m_core->m_r[opb]), I0);
1140 						if (op0 != 0x0d)
1141 							UML_CALLH(block, *m_mem_read);
1142 						break;
1143 					default:
1144 						// can't happen
1145 						break;
1146 				}
1147 			}
1148 			break;
1149 		}
1150 
1151 		case 0x04: // 16-bit ops
1152 			switch (opn)
1153 			{
1154 				case 0x00: // r
1155 					UML_MOV(block, I1, mem(&m_core->m_r[opb]));
1156 					break;
1157 
1158 				case 0x01: // imm16
1159 				{
1160 					UML_MOV(block, I2, mem(&m_core->m_r[opb]));
1161 					const uint16_t r1 = m_cache.read_word(desc->pc + 1);
1162 					generate_add_lpc(block, 1);
1163 					UML_MOV(block, I1, r1);
1164 					break;
1165 				}
1166 
1167 				case 0x02: // [imm16]
1168 				{
1169 					UML_MOV(block, I2, mem(&m_core->m_r[opb]));
1170 					const uint16_t r1 = m_cache.read_word(desc->pc + 1);
1171 					generate_add_lpc(block, 1);
1172 					UML_MOV(block, I0, r1);
1173 					if (op0 != 0x0d)
1174 						UML_CALLH(block, *m_mem_read);
1175 					break;
1176 				}
1177 
1178 				case 0x03: // store [imm16], r
1179 				{
1180 					UML_MOV(block, I1, I2);
1181 					UML_MOV(block, I2, mem(&m_core->m_r[opb]));
1182 					const uint16_t r2 = m_cache.read_word(desc->pc + 1);
1183 					generate_add_lpc(block, 1);
1184 					UML_MOV(block, I0, r2);
1185 					break;
1186 				}
1187 
1188 				default: // Shifted ops
1189 				{
1190 					UML_SHL(block, I0, mem(&m_core->m_r[opb]), 4);
1191 					UML_OR(block, I0, I0, mem(&m_core->m_sb));
1192 					UML_TEST(block, I0, 0x80000);
1193 					UML_JMPc(block, uml::COND_Z, shift_no_sign);
1194 					UML_OR(block, I0, I0, 0xf00000);
1195 
1196 					UML_LABEL(block, shift_no_sign);
1197 					UML_SHR(block, I0, I0, opn - 3);
1198 					UML_AND(block, mem(&m_core->m_sb), I0, 0xf);
1199 					UML_SHR(block, I1, I0, 4);
1200 					UML_AND(block, I1, I1, 0x0000ffff);
1201 					break;
1202 				}
1203 			}
1204 			break;
1205 
1206 		case 0x05: // More shifted ops
1207 			if (opn & 4) // Shift right
1208 			{
1209 				UML_SHL(block, I0, mem(&m_core->m_r[opb]), 4);
1210 				UML_OR(block, I0, I0, mem(&m_core->m_sb));
1211 				UML_SHR(block, I0, I0, opn - 3);
1212 				UML_AND(block, mem(&m_core->m_sb), I0, 0xf);
1213 				UML_SHR(block, I1, I0, 4);
1214 				UML_AND(block, I1, I1, 0x0000ffff);
1215 			}
1216 			else // Shift left
1217 			{
1218 				UML_SHL(block, I0, mem(&m_core->m_sb), 16);
1219 				UML_OR(block, I0, I0, mem(&m_core->m_r[opb]));
1220 				UML_SHL(block, I0, I0, opn + 1);
1221 				UML_SHR(block, I1, I0, 16);
1222 				UML_AND(block, mem(&m_core->m_sb), I1, 0xf);
1223 				UML_AND(block, I1, I0, 0x0000ffff);
1224 			}
1225 			break;
1226 
1227 		case 0x06: // Rotated ops
1228 		{
1229 			UML_SHL(block, I0, mem(&m_core->m_sb), 16);
1230 			UML_OR(block, I0, I0, mem(&m_core->m_r[opb]));
1231 			UML_SHL(block, I0, I0, 4);
1232 			UML_OR(block, I0, I0, mem(&m_core->m_sb));
1233 			if (opn & 4) // Rotate right
1234 			{
1235 				UML_SHR(block, I0, I0, opn - 3);
1236 				UML_AND(block, mem(&m_core->m_sb), I0, 0xf);
1237 			}
1238 			else // Rotate left
1239 			{
1240 				UML_SHL(block, I0, I0, opn + 1);
1241 				UML_SHR(block, I1, I0, 20);
1242 				UML_AND(block, mem(&m_core->m_sb), I1, 0xf);
1243 			}
1244 			UML_SHR(block, I1, I0, 4);
1245 			UML_AND(block, I1, I1, 0x0000ffff);
1246 			break;
1247 		}
1248 
1249 		case 0x07: // Direct 8
1250 			UML_MOV(block, I0, op & 0x3f);
1251 			UML_CALLH(block, *m_mem_read);
1252 			break;
1253 
1254 		default:
1255 			break;
1256 	}
1257 
1258 	switch (op0)
1259 	{
1260 		case 0x00: // Add
1261 			UML_ADD(block, I3, I2, I1);
1262 			if (opa != 7)
1263 				generate_update_nzsc(block);
1264 			break;
1265 
1266 		case 0x01: // Add w/ carry
1267 			UML_ROLAND(block, I3, mem(&m_core->m_r[REG_SR]), 32-UNSP_C_SHIFT, 1);
1268 			UML_ADD(block, I3, I3, I2);
1269 			UML_ADD(block, I3, I3, I1);
1270 			if (opa != 7)
1271 				generate_update_nzsc(block);
1272 			break;
1273 
1274 		case 0x02: // Subtract
1275 			UML_XOR(block, I1, I1, 0x0000ffff);
1276 			UML_ADD(block, I3, I1, I2);
1277 			UML_ADD(block, I3, I3, 1);
1278 			if (opa != 7)
1279 				generate_update_nzsc(block);
1280 			break;
1281 
1282 		case 0x03: // Subtract w/ carry
1283 			UML_XOR(block, I1, I1, 0x0000ffff);
1284 			UML_ADD(block, I3, I1, I2);
1285 			UML_TEST(block, mem(&m_core->m_r[REG_SR]), UNSP_C);
1286 			UML_JMPc(block, uml::COND_Z, no_carry);
1287 			UML_ADD(block, I3, I3, 1);
1288 
1289 			UML_LABEL(block, no_carry);
1290 			if (opa != 7)
1291 				generate_update_nzsc(block);
1292 			break;
1293 
1294 		case 0x04: // Compare
1295 			UML_XOR(block, I1, I1, 0x0000ffff);
1296 			UML_ADD(block, I3, I1, I2);
1297 			UML_ADD(block, I3, I3, 1);
1298 			if (opa != 7)
1299 				generate_update_nzsc(block);
1300 			return true;
1301 
1302 		case 0x06: // Negate
1303 			UML_SUB(block, I3, 0, I1);
1304 			if (opa != 7)
1305 				generate_update_nz(block);
1306 			break;
1307 
1308 		case 0x08: // XOR
1309 			UML_XOR(block, I3, I2, I1);
1310 			if (opa != 7)
1311 				generate_update_nz(block);
1312 			break;
1313 
1314 		case 0x09: // Load
1315 			UML_MOV(block, I3, I1);
1316 			if (opa != 7)
1317 				generate_update_nz(block);
1318 			break;
1319 
1320 		case 0x0a: // OR
1321 			UML_OR(block, I3, I2, I1);
1322 			if (opa != 7)
1323 				generate_update_nz(block);
1324 			break;
1325 
1326 		case 0x0b: // AND
1327 			UML_AND(block, I3, I2, I1);
1328 			if (opa != 7)
1329 				generate_update_nz(block);
1330 			break;
1331 
1332 		case 0x0c: // Test
1333 			UML_AND(block, I3, I2, I1);
1334 			if (opa != 7)
1335 				generate_update_nz(block);
1336 			return true;
1337 
1338 		case 0x0d: // Store
1339 			UML_MOV(block, I1, I2);
1340 			UML_CALLH(block, *m_mem_write);
1341 			return true;
1342 
1343 		default:
1344 			return false;
1345 	}
1346 
1347 	if (op1 == 0x04 && opn == 0x03)
1348 	{
1349 		UML_MOV(block, I1, I3);
1350 		UML_CALLH(block, *m_mem_write);
1351 		return true;
1352 	}
1353 
1354 	UML_AND(block, mem(&m_core->m_r[opa]), I3, 0x0000ffff);
1355 	if (opa == REG_PC)
1356 		generate_branch(block, compiler, desc);
1357 	return true;
1358 }
1359