1 // license:BSD-3-Clause
2 // copyright-holders:Ryan Holtz
3 
4 #include "e132xs.h"
5 
6 constexpr uint32_t WRITE_ONLY_REGMASK = (1 << BCR_REGISTER) | (1 << TPR_REGISTER) | (1 << FCR_REGISTER) | (1 << MCR_REGISTER);
7 
generate_check_delay_pc(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)8 void hyperstone_device::generate_check_delay_pc(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
9 {
10 	/* if PC is used in a delay instruction, the delayed PC should be used */
11 	UML_TEST(block, mem(&m_core->delay_slot), 1);
12 	UML_MOVc(block, uml::COND_NZ, DRC_PC, mem(&m_core->delay_pc));
13 	UML_MOVc(block, uml::COND_NZ, mem(&m_core->delay_slot), 0);
14 	UML_SETc(block, uml::COND_NZ, mem(&m_core->delay_slot_taken));
15 }
16 
generate_decode_const(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)17 void hyperstone_device::generate_decode_const(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
18 {
19 	const uint16_t imm_1 = m_pr16(desc->pc + 2);
20 
21 	if (imm_1 & 0x8000)
22 	{
23 		const uint16_t imm_2 = m_pr16(desc->pc + 4);
24 
25 		uint32_t imm = imm_2;
26 		imm |= ((imm_1 & 0x3fff) << 16);
27 
28 		if (imm_1 & 0x4000)
29 			imm |= 0xc0000000;
30 
31 		UML_ADD(block, DRC_PC, DRC_PC, 4);
32 		UML_MOV(block, I1, imm);
33 	}
34 	else
35 	{
36 		uint32_t imm = imm_1 & 0x3fff;
37 
38 		if (imm_1 & 0x4000)
39 			imm |= 0xffffc000;
40 
41 		UML_ADD(block, DRC_PC, DRC_PC, 2);
42 		UML_MOV(block, I1, imm);
43 	}
44 }
45 
generate_decode_immediate_s(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)46 void hyperstone_device::generate_decode_immediate_s(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
47 {
48 	uint16_t op = desc->opptr.w[0];
49 
50 	switch (op & 0xf)
51 	{
52 		case 0:
53 			UML_MOV(block, I1, 16);
54 			return;
55 		case 1:
56 		{
57 			uint32_t extra_u = (m_pr16(desc->pc + 2) << 16) | m_pr16(desc->pc + 4);
58 			UML_ADD(block, DRC_PC, DRC_PC, 4);
59 			UML_MOV(block, I1, extra_u);
60 			return;
61 		}
62 		case 2:
63 		{
64 			uint32_t extra_u = m_pr16(desc->pc + 2);
65 			UML_ADD(block, DRC_PC, DRC_PC, 2);
66 			UML_MOV(block, I1, extra_u);
67 			return;
68 		}
69 		case 3:
70 		{
71 			uint32_t extra_u = 0xffff0000 | m_pr16(desc->pc + 2);
72 			UML_ADD(block, DRC_PC, DRC_PC, 2);
73 			UML_MOV(block, I1, extra_u);
74 			return;
75 		}
76 		default:
77 			UML_MOV(block, I1, s_immediate_values[op & 0xf]);
78 			return;
79 	}
80 }
81 
generate_ignore_immediate_s(drcuml_block & block,const opcode_desc * desc)82 void hyperstone_device::generate_ignore_immediate_s(drcuml_block &block, const opcode_desc *desc)
83 {
84 	uint16_t op = desc->opptr.w[0];
85 
86 	static const uint32_t offsets[16] = { 0, 4, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
87 	const uint8_t nybble = op & 0x0f;
88 
89 	UML_ADD(block, DRC_PC, DRC_PC, offsets[nybble]);
90 }
91 
generate_decode_pcrel(drcuml_block & block,const opcode_desc * desc)92 void hyperstone_device::generate_decode_pcrel(drcuml_block &block, const opcode_desc *desc)
93 {
94 	uint16_t op = desc->opptr.w[0];
95 
96 	int32_t offset;
97 	if (op & 0x80)
98 	{
99 		uint16_t next = m_pr16(desc->pc + 2);
100 
101 		offset = (op & 0x7f) << 16;
102 		offset |= (next & 0xfffe);
103 
104 		if (next & 1)
105 			offset |= 0xff800000;
106 
107 		UML_ADD(block, DRC_PC, DRC_PC, 2);
108 	}
109 	else
110 	{
111 		offset = op & 0x7e;
112 
113 		if (op & 1)
114 			offset |= 0xffffff80;
115 	}
116 
117 	UML_MOV(block, I1, offset);
118 }
119 
generate_ignore_pcrel(drcuml_block & block,const opcode_desc * desc)120 void hyperstone_device::generate_ignore_pcrel(drcuml_block &block, const opcode_desc *desc)
121 {
122 	uint16_t op = desc->opptr.w[0];
123 
124 	if (op & 0x80)
125 	{
126 		UML_ADD(block, DRC_PC, DRC_PC, 2);
127 	}
128 }
129 
generate_set_global_register(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)130 void hyperstone_device::generate_set_global_register(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
131 {
132 	// Expects register index in I4, value in I5, clobbers I6
133 	int extended;
134 	UML_CMP(block, I4, 16);
135 	UML_JMPc(block, uml::COND_AE, extended = compiler.m_labelnum++);
136 
137 	int generic_store, set_sr, done;
138 	UML_CMP(block, I4, 1);
139 	UML_JMPc(block, uml::COND_A, generic_store = compiler.m_labelnum++);
140 	UML_JMPc(block, uml::COND_E, set_sr = compiler.m_labelnum++);
141 	UML_AND(block, DRC_PC, I5, ~1);
142 	UML_JMP(block, done = compiler.m_labelnum++);
143 
144 	UML_LABEL(block, set_sr);
145 	UML_ROLINS(block, DRC_SR, I5, 0, 0x0000ffff);
146 	UML_AND(block, DRC_SR, DRC_SR, ~0x40);
147 	UML_TEST(block, mem(&m_core->intblock), ~0);
148 	UML_MOVc(block, uml::COND_Z, mem(&m_core->intblock), 1);
149 	UML_JMP(block, done);
150 
151 	UML_LABEL(block, generic_store);
152 	UML_STORE(block, (void *)m_core->global_regs, I4, I5, SIZE_DWORD, SCALE_x4);
153 	UML_JMP(block, done);
154 
155 	int above_bcr;
156 	UML_LABEL(block, extended);
157 	UML_SUB(block, mem(&m_core->icount), mem(&m_core->icount), I7);
158 	UML_MOV(block, I7, 0);
159 	UML_CMP(block, I4, 17);
160 	UML_JMPc(block, uml::COND_BE, generic_store);
161 	UML_CMP(block, I4, BCR_REGISTER);
162 	UML_JMPc(block, uml::COND_A, above_bcr = compiler.m_labelnum++);
163 	UML_JMPc(block, uml::COND_E, generic_store);
164 
165 	// SP or UB
166 	UML_AND(block, I5, I5, ~3);
167 	UML_JMP(block, generic_store);
168 
169 	int set_tpr, set_tcr, set_tr, set_fcr;
170 	UML_LABEL(block, above_bcr);
171 	UML_CMP(block, I4, TCR_REGISTER);
172 	UML_JMPc(block, uml::COND_B, set_tpr = compiler.m_labelnum++);
173 	UML_JMPc(block, uml::COND_E, set_tcr = compiler.m_labelnum++);
174 	// Above TCR
175 	UML_CMP(block, I4, WCR_REGISTER);
176 	UML_JMPc(block, uml::COND_B, set_tr = compiler.m_labelnum++);
177 	UML_JMPc(block, uml::COND_E, generic_store); // WCR
178 	// Above WCR
179 	UML_CMP(block, I4, FCR_REGISTER);
180 	UML_JMPc(block, uml::COND_B, done); // ISR - read only
181 	UML_JMPc(block, uml::COND_E, set_fcr = compiler.m_labelnum++);
182 	UML_CMP(block, I4, MCR_REGISTER);
183 	UML_JMPc(block, uml::COND_A, generic_store); // regs 28..31
184 	// Set MCR
185 	UML_ROLAND(block, I6, I5, 20, 0x7);
186 	UML_LOAD(block, I6, (void *)s_trap_entries, I6, SIZE_DWORD, SCALE_x4);
187 	UML_MOV(block, mem(&m_core->trap_entry), I6);
188 	UML_JMP(block, generic_store);
189 
190 	int skip_compute_tr;
191 	UML_LABEL(block, set_tpr);
192 	UML_STORE(block, (void *)m_core->global_regs, I4, I5, SIZE_DWORD, SCALE_x4);
193 	UML_TEST(block, I5, 0x80000000);
194 	UML_JMPc(block, uml::COND_NZ, skip_compute_tr = compiler.m_labelnum++);
195 	UML_CALLC(block, cfunc_compute_tr, this);
196 	UML_CALLC(block, cfunc_update_timer_prescale, this);
197 	UML_LABEL(block, skip_compute_tr);
198 	UML_CALLC(block, cfunc_adjust_timer_interrupt, this);
199 	UML_JMP(block, done);
200 
201 	UML_LABEL(block, set_tcr);
202 	UML_LOAD(block, I6, (void *)m_core->global_regs, I4, SIZE_DWORD, SCALE_x4);
203 	UML_CMP(block, I6, I5);
204 	UML_JMPc(block, uml::COND_E, done);
205 	UML_STORE(block, (void *)m_core->global_regs, I4, I5, SIZE_DWORD, SCALE_x4);
206 	UML_CALLC(block, cfunc_adjust_timer_interrupt, this);
207 	UML_CMP(block, mem(&m_core->intblock), 1);
208 	UML_MOVc(block, uml::COND_L, mem(&m_core->intblock), 1);
209 	UML_JMP(block, done);
210 
211 	UML_LABEL(block, set_tr);
212 	UML_STORE(block, (void *)m_core->global_regs, I4, I5, SIZE_DWORD, SCALE_x4);
213 	UML_MOV(block, mem(&m_core->tr_base_value), I5);
214 	UML_CALLC(block, cfunc_total_cycles, this);
215 	UML_DMOV(block, mem(&m_core->tr_base_cycles), mem(&m_core->numcycles));
216 	UML_CALLC(block, cfunc_adjust_timer_interrupt, this);
217 	UML_JMP(block, done);
218 
219 	int skip_adjust_timer;
220 	UML_LABEL(block, set_fcr);
221 	UML_LOAD(block, I6, (void *)m_core->global_regs, I4, SIZE_DWORD, SCALE_x4);
222 	UML_XOR(block, I6, I6, I5);
223 	UML_TEST(block, I6, 0x80000000);
224 	UML_JMPc(block, uml::COND_Z, skip_adjust_timer = compiler.m_labelnum++);
225 	UML_CALLC(block, cfunc_adjust_timer_interrupt, this);
226 	UML_LABEL(block, skip_adjust_timer);
227 	UML_STORE(block, (void *)m_core->global_regs, I4, I5, SIZE_DWORD, SCALE_x4);
228 	UML_CMP(block, mem(&m_core->intblock), 1);
229 	UML_MOVc(block, uml::COND_L, mem(&m_core->intblock), 1);
230 	// Fall through to done
231 
232 	UML_LABEL(block, done);
233 }
234 
235 template <hyperstone_device::trap_exception_or_int TYPE>
generate_trap_exception_or_int(drcuml_block & block)236 void hyperstone_device::generate_trap_exception_or_int(drcuml_block &block)
237 {
238 	UML_ADD(block, I7, I7, mem(&m_core->clock_cycles_2));
239 
240 	UML_MOV(block, I4, DRC_SR);
241 
242 	UML_ROLAND(block, I1, DRC_SR, 7, 0x7f);
243 	UML_ROLAND(block, I2, DRC_SR, 11, 0xf);
244 	UML_TEST(block, I2, 0xf);
245 	UML_MOVc(block, uml::COND_Z, I2, 16);
246 	UML_ADD(block, I3, I1, I2);
247 
248 	if (TYPE != IS_TRAP)
249 		UML_ROLINS(block, DRC_SR,  2, 21, 0x01e00000);
250 	else
251 		UML_ROLINS(block, DRC_SR,  6, 21, 0x01e00000);
252 	UML_ROLINS(block, DRC_SR, I3, 25, 0xfe000000);
253 
254 	UML_AND(block, I1, I3, 0x3f);
255 	UML_AND(block, I2, DRC_PC, ~1);
256 	UML_ROLINS(block, I2, DRC_SR, 32-S_SHIFT, 1);
257 	UML_STORE(block, (void *)m_core->local_regs, I1, I2, SIZE_DWORD, SCALE_x4);
258 	UML_ADD(block, I2, I1, 1);
259 	UML_AND(block, I3, I2, 0x3f);
260 	UML_STORE(block, (void *)m_core->local_regs, I3, I4, SIZE_DWORD, SCALE_x4);
261 
262 	UML_AND(block, DRC_SR, DRC_SR, ~(M_MASK | T_MASK));
263 	if (TYPE == IS_INT)
264 		UML_OR(block, DRC_SR, DRC_SR, (L_MASK | S_MASK | I_MASK));
265 	else
266 		UML_OR(block, DRC_SR, DRC_SR, (L_MASK | S_MASK));
267 
268 	UML_MOV(block, DRC_PC, I0);
269 	generate_branch(block, DRC_PC, nullptr, true);
270 }
271 
generate_int(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc,uint32_t addr)272 void hyperstone_device::generate_int(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc, uint32_t addr)
273 {
274 	printf("Unimplemented: generate_int (%08x)\n", desc->pc);
275 	fflush(stdout);
276 	fatalerror(" ");
277 }
278 
generate_exception(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc,uint32_t addr)279 void hyperstone_device::generate_exception(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc, uint32_t addr)
280 {
281 	printf("Unimplemented: generate_exception (%08x)\n", desc->pc);
282 	fflush(stdout);
283 	fatalerror(" ");
284 }
285 
generate_software(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)286 void hyperstone_device::generate_software(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
287 {
288 	UML_MOV(block, I7, mem(&m_core->clock_cycles_6));
289 
290 	uint16_t op = desc->opptr.w[0];
291 
292 	const uint32_t src_code = op & 0xf;
293 	const uint32_t srcf_code = src_code + 1;
294 	const uint32_t dst_code = (op & 0xf0) >> 4;
295 
296 	generate_check_delay_pc(block, compiler, desc);
297 
298 	UML_ROLAND(block, I3, DRC_SR, 7, 0x7f); // I3 = FP
299 
300 	UML_ADD(block, I2, I3, src_code);
301 	UML_AND(block, I4, I2, 0x3f);
302 	UML_LOAD(block, I0, (void *)m_core->local_regs, I4, SIZE_DWORD, SCALE_x4); // I0 = sreg
303 	UML_ADD(block, I2, I3, srcf_code);
304 	UML_AND(block, I4, I2, 0x3f);
305 	UML_LOAD(block, I1, (void *)m_core->local_regs, I4, SIZE_DWORD, SCALE_x4); // I1 = sregf
306 
307 	UML_ROLINS(block, DRC_SR, 1, 19, 0x00180000);
308 
309 	uint32_t num = op >> 8;
310 	int mem3 = compiler.m_labelnum++;
311 	int have_code_addr = compiler.m_labelnum++;
312 	UML_MOV(block, I4, mem(&m_core->trap_entry));
313 	UML_CMP(block, I4, 0xffffff00);
314 	UML_JMPc(block, uml::COND_E, mem3);
315 	UML_OR(block, I5, I4, (0x10c | ((0xcf - num) << 4)));
316 	UML_JMP(block, have_code_addr);
317 
318 	UML_LABEL(block, mem3);
319 	UML_SUB(block, I5, I4, 0x100);
320 	UML_OR(block, I5, I5, ((num & 0xf) << 4)); // I5 = addr
321 
322 	UML_LABEL(block, have_code_addr);
323 
324 	UML_ROLAND(block, I2, DRC_SR, 11, 0xf);
325 	UML_TEST(block, I2, 0xf);
326 	UML_MOVc(block, uml::COND_Z, I2, 16);
327 	UML_ADD(block, I4, I2, I3); // I4 = reg
328 
329 	UML_AND(block, I2, mem(&SP), 0xffffff00);
330 	UML_ADD(block, I6, I2, 0x100); // I6 = (SP & ~0xff) + 0x100
331 	UML_ADD(block, I2, I3, dst_code);
332 	UML_AND(block, I2, I2, 0x3f);
333 	UML_SHL(block, I2, I2, 2); // I2 = (((fp + DST_CODE) & 0x3f) << 2)
334 	UML_ADD(block, I6, I6, I2); // I6 = (SP & ~0xff) + 0x100 + (((fp + DST_CODE) & 0x3f) << 2)
335 
336 	UML_AND(block, I2, I4, 0x3f);
337 	UML_STORE(block, (void *)m_core->local_regs, I2, I6, SIZE_DWORD, SCALE_x4); // m_core->local_regs[(reg + 0) & 0x3f] = stack_of_dst;
338 	UML_ADD(block, I6, I2, 1);
339 	UML_AND(block, I2, I6, 0x3f);
340 	UML_STORE(block, (void *)m_core->local_regs, I2, I0, SIZE_DWORD, SCALE_x4); // m_core->local_regs[(reg + 1) & 0x3f] = sreg;
341 	UML_ADD(block, I6, I2, 1);
342 	UML_AND(block, I2, I6, 0x3f);
343 	UML_STORE(block, (void *)m_core->local_regs, I2, I1, SIZE_DWORD, SCALE_x4); // m_core->local_regs[(reg + 2) & 0x3f] = sregf;
344 
345 	UML_AND(block, I0, DRC_PC, ~1);
346 	UML_ROLINS(block, I0, DRC_SR, 32-S_SHIFT, 1);
347 	UML_ADD(block, I6, I2, 1);
348 	UML_AND(block, I2, I6, 0x3f);
349 	UML_STORE(block, (void *)m_core->local_regs, I2, I0, SIZE_DWORD, SCALE_x4); // m_core->local_regs[(reg + 3) & 0x3f] = (PC & ~1) | GET_S;
350 
351 	UML_ADD(block, I6, I2, 1);
352 	UML_AND(block, I2, I6, 0x3f);
353 	UML_STORE(block, (void *)m_core->local_regs, I2, DRC_SR, SIZE_DWORD, SCALE_x4); // m_core->local_regs[(reg + 4) & 0x3f] = oldSR;
354 
355 	UML_MOV(block, DRC_PC, I5); // PC = addr
356 
357 	UML_MOV(block, I0, DRC_SR);
358 	UML_ROLINS(block, I0, 0x00c08000, 0, 0x01e08000); // SET_FL(6), SR |= L_MASK
359 	UML_ROLINS(block, I0, I4, 25, 0xfe000000); // SET_FP(reg)
360 	UML_AND(block, DRC_SR, I0, ~(M_MASK | T_MASK));
361 
362 	generate_branch(block, desc->targetpc, desc);
363 }
364 
365 
366 template <hyperstone_device::reg_bank DST_GLOBAL, hyperstone_device::reg_bank SRC_GLOBAL>
generate_chk(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)367 void hyperstone_device::generate_chk(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
368 {
369 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
370 
371 	uint16_t op = desc->opptr.w[0];
372 
373 	generate_check_delay_pc(block, compiler, desc);
374 
375 	if (!DST_GLOBAL || !SRC_GLOBAL)
376 		UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
377 
378 	const uint32_t src_code = op & 0xf;
379 	const uint32_t dst_code = (op & 0xf0) >> 4;
380 
381 	if (DST_GLOBAL)
382 	{
383 		UML_LOAD(block, I1, (void *)m_core->global_regs, dst_code, SIZE_DWORD, SCALE_x4);
384 	}
385 	else
386 	{
387 		UML_ADD(block, I2, I3, dst_code);
388 		UML_AND(block, I4, I2, 0x3f);
389 		UML_LOAD(block, I1, (void *)m_core->local_regs, I4, SIZE_DWORD, SCALE_x4);
390 	}
391 
392 	if (SRC_GLOBAL)
393 	{
394 		if (src_code == SR_REGISTER)
395 		{
396 			UML_TEST(block, I1, ~0);
397 			UML_EXHc(block, uml::COND_Z, *m_exception[EXCEPTION_RANGE_ERROR], 0);
398 		}
399 		else
400 		{
401 			UML_LOAD(block, I0, (void *)m_core->global_regs, src_code, SIZE_DWORD, SCALE_x4);
402 			UML_CMP(block, I1, I0);
403 			if (src_code == PC_REGISTER)
404 				UML_EXHc(block, uml::COND_AE, *m_exception[EXCEPTION_RANGE_ERROR], 0);
405 			else
406 				UML_EXHc(block, uml::COND_A, *m_exception[EXCEPTION_RANGE_ERROR], 0);
407 		}
408 	}
409 	else
410 	{
411 		UML_ADD(block, I2, I3, src_code);
412 		UML_AND(block, I4, I2, 0x3f);
413 		UML_LOAD(block, I0, (void *)m_core->local_regs, I4, SIZE_DWORD, SCALE_x4);
414 
415 		UML_CMP(block, I1, I0);
416 		UML_EXHc(block, uml::COND_A, *m_exception[EXCEPTION_RANGE_ERROR], 0);
417 	}
418 }
419 
420 
421 template <hyperstone_device::reg_bank DST_GLOBAL, hyperstone_device::reg_bank SRC_GLOBAL>
generate_movd(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)422 void hyperstone_device::generate_movd(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
423 {
424 	UML_MOV(block, I7, mem(&m_core->clock_cycles_2));
425 
426 	uint16_t op = desc->opptr.w[0];
427 
428 	generate_check_delay_pc(block, compiler, desc);
429 
430 	const uint32_t src_code = op & 0xf;
431 	const uint32_t srcf_code = src_code + 1;
432 	const uint32_t dst_code = (op & 0xf0) >> 4;
433 	const uint32_t dstf_code = dst_code + 1;
434 
435 	if (DST_GLOBAL && (dst_code == PC_REGISTER))
436 	{
437 		if (SRC_GLOBAL && src_code < 2)
438 		{
439 			printf("Denoted PC or SR in RET instruction. PC = %08X\n", desc->pc);
440 			return;
441 		}
442 
443 		UML_AND(block, I1, DRC_SR, (S_MASK | L_MASK));
444 		if (SRC_GLOBAL)
445 		{
446 			UML_LOAD(block, I2, (void *)m_core->global_regs, src_code, SIZE_DWORD, SCALE_x4);
447 			UML_LOAD(block, I3, (void *)m_core->global_regs, srcf_code, SIZE_DWORD, SCALE_x4);
448 		}
449 		else
450 		{
451 			UML_ROLAND(block, I5, DRC_SR, 7, 0x7f);
452 			UML_ADD(block, I3, I5, src_code);
453 			UML_AND(block, I4, I3, 0x3f);
454 			UML_LOAD(block, I2, (void *)m_core->local_regs, I4, SIZE_DWORD, SCALE_x4);
455 
456 			UML_ADD(block, I6, I5, srcf_code);
457 			UML_AND(block, I5, I6, 0x3f);
458 			UML_LOAD(block, I3, (void *)m_core->local_regs, I5, SIZE_DWORD, SCALE_x4);
459 		}
460 
461 		UML_AND(block, DRC_PC, I2, ~1);
462 
463 		UML_AND(block, DRC_SR, I3, 0xffe3ffff);
464 		UML_ROLINS(block, DRC_SR, I2, S_SHIFT, S_MASK);
465 
466 		UML_TEST(block, mem(&m_core->intblock), ~0);
467 		UML_MOVc(block, uml::COND_Z, mem(&m_core->intblock), 1);
468 
469 		int no_exception;
470 		UML_AND(block, I2, DRC_SR, (S_MASK | L_MASK));
471 		UML_AND(block, I3, I1, I2);
472 		UML_TEST(block, I3, S_MASK);
473 		UML_JMPc(block, uml::COND_NZ, no_exception = compiler.m_labelnum++); // If S is set and unchanged, there won't be an exception.
474 
475 		UML_XOR(block, I3, I1, I2);
476 		UML_AND(block, I4, I3, I2);
477 		UML_TEST(block, I4, S_MASK);
478 		UML_EXHc(block, uml::COND_NZ, *m_exception[EXCEPTION_PRIVILEGE_ERROR], 0); // If S is newly set, it's a privilege error.
479 
480 		UML_TEST(block, I3, L_MASK);
481 		UML_JMPc(block, uml::COND_Z, no_exception); // If L is unchanged, there won't be an exception.
482 		UML_TEST(block, I1, L_MASK);
483 		UML_JMPc(block, uml::COND_NZ, no_exception); // If L was previously set, there won't be an exception.
484 		UML_TEST(block, I2, S_MASK);
485 		UML_EXHc(block, uml::COND_Z, *m_exception[EXCEPTION_PRIVILEGE_ERROR], 0); // If L is newly set and we are not in Supervisor mode, it's a privilege error.
486 
487 		int diff_in_range, done_ret;
488 		UML_LABEL(block, no_exception);
489 		UML_MOV(block, I0, mem(&SP));
490 		UML_ROLAND(block, I1, I0, 30, 0x7f);
491 		UML_ROLAND(block, I2, DRC_SR, 7, 0x7f);
492 		UML_SUB(block, I3, I2, I1);
493 		UML_CMP(block, I3, -64);
494 		UML_JMPc(block, uml::COND_L, done_ret = compiler.m_labelnum++);
495 		UML_CMP(block, I3, 64);
496 		UML_JMPc(block, uml::COND_L, diff_in_range = compiler.m_labelnum++);
497 		UML_OR(block, I3, I3, 0x80);
498 		UML_SEXT(block, I3, I3, SIZE_BYTE);
499 		UML_LABEL(block, diff_in_range);
500 
501 		int pop_next;
502 		UML_LABEL(block, pop_next = compiler.m_labelnum++);
503 		UML_CMP(block, I3, 0);
504 		UML_JMPc(block, uml::COND_GE, done_ret);
505 		UML_SUB(block, I0, I0, 4);
506 		UML_CALLH(block, *m_mem_read32);
507 		UML_ROLAND(block, I2, I0, 30, 0x3f);
508 		UML_STORE(block, (void *)m_core->local_regs, I2, I1, SIZE_DWORD, SCALE_x4);
509 		UML_ADD(block, I3, I3, 1);
510 		UML_TEST(block, I3, ~0);
511 		UML_JMP(block, pop_next);
512 
513 		UML_LABEL(block, done_ret);
514 		UML_MOV(block, mem(&SP), I0);
515 		generate_branch(block, desc->targetpc, desc);
516 		return;
517 	}
518 	else if (SRC_GLOBAL && (src_code == SR_REGISTER)) // Rd doesn't denote PC and Rs denotes SR
519 	{
520 		UML_OR(block, DRC_SR, DRC_SR, Z_MASK);
521 		UML_AND(block, DRC_SR, DRC_SR, ~N_MASK);
522 		if (DST_GLOBAL)
523 		{
524 			UML_MOV(block, I4, dst_code);
525 			UML_MOV(block, I5, 0);
526 			generate_set_global_register(block, compiler, desc);
527 			UML_MOV(block, I4, dstf_code);
528 			UML_MOV(block, I5, 0);
529 			generate_set_global_register(block, compiler, desc);
530 			if (dst_code == PC_REGISTER || dstf_code == PC_REGISTER)
531 				generate_branch(block, desc->targetpc, desc);
532 		}
533 		else
534 		{
535 			UML_ROLAND(block, I0, DRC_SR, 7, 0x7f);
536 			UML_ADD(block, I0, I0, dst_code);
537 			UML_AND(block, I0, I0, 0x3f);
538 			UML_STORE(block, (void *)m_core->local_regs, I0, 0, SIZE_DWORD, SCALE_x4);
539 			UML_ADD(block, I0, I0, 1);
540 			UML_AND(block, I0, I0, 0x3f);
541 			UML_STORE(block, (void *)m_core->local_regs, I0, 0, SIZE_DWORD, SCALE_x4);
542 		}
543 	}
544 	else // Rd doesn't denote PC and Rs doesn't denote SR
545 	{
546 		if (!SRC_GLOBAL || !DST_GLOBAL)
547 		{
548 			UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
549 		}
550 
551 		if (SRC_GLOBAL)
552 		{
553 			UML_LOAD(block, I0, (void *)m_core->global_regs, src_code, SIZE_DWORD, SCALE_x4);
554 			UML_LOAD(block, I1, (void *)m_core->global_regs, srcf_code, SIZE_DWORD, SCALE_x4);
555 		}
556 		else
557 		{
558 			UML_ADD(block, I0, I3, src_code);
559 			UML_AND(block, I0, I0, 0x3f);
560 			UML_LOAD(block, I0, (void *)m_core->local_regs, I0, SIZE_DWORD, SCALE_x4);
561 			UML_ADD(block, I1, I3, srcf_code);
562 			UML_AND(block, I1, I1, 0x3f);
563 			UML_LOAD(block, I1, (void *)m_core->local_regs, I1, SIZE_DWORD, SCALE_x4);
564 		}
565 
566 		UML_AND(block, DRC_SR, DRC_SR, ~(Z_MASK | N_MASK));
567 
568 		UML_OR(block, I2, I0, I1);
569 		UML_TEST(block, I2, ~0);
570 		UML_SETc(block, uml::COND_Z, I2);
571 		UML_ROLINS(block, DRC_SR, I2, Z_SHIFT, Z_MASK);
572 
573 		UML_TEST(block, I0, 0x80000000);
574 		UML_SETc(block, uml::COND_NZ, I2);
575 		UML_ROLINS(block, DRC_SR, I2, N_SHIFT, N_MASK);
576 
577 		if (DST_GLOBAL)
578 		{
579 			UML_MOV(block, I4, dst_code);
580 			UML_MOV(block, I5, I0);
581 			generate_set_global_register(block, compiler, desc);
582 			UML_MOV(block, I4, dstf_code);
583 			UML_MOV(block, I5, I1);
584 			generate_set_global_register(block, compiler, desc);
585 			if (dst_code == PC_REGISTER || dstf_code == PC_REGISTER)
586 				generate_branch(block, desc->targetpc, desc);
587 		}
588 		else
589 		{
590 			UML_ADD(block, I2, I3, dst_code);
591 			UML_AND(block, I2, I2, 0x3f);
592 			UML_STORE(block, (void *)m_core->local_regs, I2, I0, SIZE_DWORD, SCALE_x4);
593 			UML_ADD(block, I2, I3, dstf_code);
594 			UML_AND(block, I2, I2, 0x3f);
595 			UML_STORE(block, (void *)m_core->local_regs, I2, I1, SIZE_DWORD, SCALE_x4);
596 		}
597 	}
598 }
599 
600 
601 template <hyperstone_device::reg_bank DST_GLOBAL, hyperstone_device::reg_bank SRC_GLOBAL, hyperstone_device::sign_mode SIGNED>
generate_divsu(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)602 void hyperstone_device::generate_divsu(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
603 {
604 	UML_MOV(block, I7, mem(&m_core->clock_cycles_36));
605 
606 	uint16_t op = desc->opptr.w[0];
607 
608 	const uint32_t dst_code = (op & 0xf0) >> 4;
609 	const uint32_t dstf_code = dst_code + 1;
610 	const uint32_t src_code = op & 0xf;
611 
612 	if ((SRC_GLOBAL == DST_GLOBAL && (src_code == dst_code || src_code == dstf_code)) || (SRC_GLOBAL && src_code < 2))
613 	{
614 		printf("Denoted the same register code or PC/SR as source in generate_divsu. PC = %08X\n", desc->pc);
615 		return;
616 	}
617 
618 	if (!SRC_GLOBAL || !DST_GLOBAL)
619 		UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
620 
621 	if (SRC_GLOBAL)
622 		UML_LOAD(block, I0, (void *)m_core->global_regs, src_code, SIZE_DWORD, SCALE_x4);
623 	else
624 	{
625 		UML_ADD(block, I2, I3, src_code);
626 		UML_AND(block, I4, I2, 0x3f);
627 		UML_LOAD(block, I0, (void *)m_core->local_regs, I4, SIZE_DWORD, SCALE_x4);
628 	}
629 
630 #ifndef PTR64
631 	UML_DAND(block, I0, I0, 0x00000000ffffffffULL);
632 #endif
633 
634 	if (DST_GLOBAL)
635 	{
636 		UML_LOAD(block, I1, (void *)m_core->global_regs, dst_code, SIZE_DWORD, SCALE_x4);
637 		UML_LOAD(block, I2, (void *)m_core->global_regs, dstf_code, SIZE_DWORD, SCALE_x4);
638 	}
639 	else
640 	{
641 		UML_ADD(block, I2, I3, dst_code);
642 		UML_AND(block, I5, I2, 0x3f);
643 		UML_LOAD(block, I1, (void *)m_core->local_regs, I5, SIZE_DWORD, SCALE_x4);
644 		UML_ADD(block, I4, I3, dstf_code);
645 		UML_AND(block, I6, I4, 0x3f);
646 		UML_LOAD(block, I2, (void *)m_core->local_regs, I6, SIZE_DWORD, SCALE_x4);
647 	}
648 
649 #ifndef PTR64
650 	UML_DAND(block, I2, I2, 0x00000000ffffffffULL);
651 #endif
652 
653 	UML_DSHL(block, I1, I1, 32);
654 	UML_DOR(block, I1, I1, I2);
655 
656 	int no_result = compiler.m_labelnum++;
657 	int done = compiler.m_labelnum++;
658 	UML_TEST(block, I0, ~0);
659 	UML_JMPc(block, uml::COND_Z, no_result);
660 	if (SIGNED)
661 	{
662 		UML_DTEST(block, I1, 0x8000000000000000LL);
663 		UML_JMPc(block, uml::COND_NZ, no_result);
664 	}
665 
666 	if (SIGNED)
667 		UML_DDIVS(block, I2, I4, I1, I0);
668 	else
669 		UML_DDIVU(block, I2, I4, I1, I0);
670 
671 	UML_AND(block, I3, DRC_SR, ~(V_MASK | Z_MASK | N_MASK));
672 	UML_TEST(block, I2, ~0);
673 	UML_MOVc(block, uml::COND_Z, I0, Z_MASK);
674 	UML_MOVc(block, uml::COND_NZ, I0, 0);
675 	UML_ROLINS(block, I0, I2, 3, N_MASK);
676 	UML_OR(block, DRC_SR, I3, I0);
677 
678 	if (DST_GLOBAL)
679 	{
680 		UML_STORE(block, (void *)m_core->global_regs, dst_code, I4, SIZE_DWORD, SCALE_x4);
681 		UML_STORE(block, (void *)m_core->global_regs, dstf_code, I2, SIZE_DWORD, SCALE_x4);
682 	}
683 	else
684 	{
685 		UML_STORE(block, (void *)m_core->local_regs, I5, I4, SIZE_DWORD, SCALE_x4);
686 		UML_STORE(block, (void *)m_core->local_regs, I6, I2, SIZE_DWORD, SCALE_x4);
687 	}
688 
689 	UML_JMP(block, done);
690 
691 	UML_LABEL(block, no_result);
692 	UML_OR(block, DRC_SR, DRC_SR, V_MASK);
693 	UML_EXH(block, *m_exception[EXCEPTION_RANGE_ERROR], 0);
694 
695 	UML_LABEL(block, done);
696 }
697 
698 
699 template <hyperstone_device::reg_bank DST_GLOBAL, hyperstone_device::reg_bank SRC_GLOBAL>
generate_xm(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)700 void hyperstone_device::generate_xm(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
701 {
702 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
703 
704 	uint16_t op = desc->opptr.w[0];
705 	const uint32_t dst_code = (op & 0xf0) >> 4;
706 	const uint32_t src_code = op & 0xf;
707 
708 	const uint32_t next = m_pr16(desc->pc + 2);
709 	const uint8_t sub_type = (next & 0x7000) >> 12;
710 
711 	uint32_t extra_u = next & 0xfff;
712 	if (next & 0x8000)
713 	{
714 		extra_u = ((extra_u & 0xfff) << 16) | m_pr16(desc->pc + 4);
715 		UML_ADD(block, DRC_PC, DRC_PC, 4);
716 	}
717 	else
718 	{
719 		UML_ADD(block, DRC_PC, DRC_PC, 2);
720 	}
721 
722 	UML_MOV(block, I1, extra_u);
723 
724 	generate_check_delay_pc(block, compiler, desc);
725 
726 	if (!SRC_GLOBAL || !DST_GLOBAL)
727 		UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
728 
729 	if (SRC_GLOBAL)
730 	{
731 		if (src_code == SR_REGISTER)
732 			UML_AND(block, I0, DRC_SR, C_MASK);
733 		else
734 			UML_LOAD(block, I0, (void *)m_core->global_regs, src_code, SIZE_DWORD, SCALE_x4);
735 	}
736 	else
737 	{
738 		UML_ADD(block, I2, I3, src_code);
739 		UML_AND(block, I4, I2, 0x3f);
740 		UML_LOAD(block, I0, (void *)m_core->local_regs, I4, SIZE_DWORD, SCALE_x4);
741 	}
742 
743 	if ((SRC_GLOBAL && (src_code == SR_REGISTER)) || (DST_GLOBAL && (dst_code < 2)))
744 	{
745 		return;
746 	}
747 
748 	if (sub_type < 4)
749 	{
750 		UML_CMP(block, I0, extra_u);
751 		int skip, done;
752 		if (SRC_GLOBAL && (src_code == PC_REGISTER))
753 		{
754 			UML_JMPc(block, uml::COND_B, skip = compiler.m_labelnum++);
755 			UML_EXH(block, *m_exception[EXCEPTION_RANGE_ERROR], 0);
756 			UML_JMP(block, done = compiler.m_labelnum++);
757 		}
758 		else
759 		{
760 			UML_JMPc(block, uml::COND_BE, skip = compiler.m_labelnum++);
761 			UML_EXH(block, *m_exception[EXCEPTION_RANGE_ERROR], 0);
762 			UML_JMP(block, done = compiler.m_labelnum++);
763 		}
764 
765 		UML_LABEL(block, skip);
766 		UML_SHL(block, I5, I0, sub_type);
767 
768 		UML_LABEL(block, done);
769 	}
770 	else
771 	{
772 		UML_SHL(block, I5, I0, sub_type - 4);
773 	}
774 
775 	if (DST_GLOBAL)
776 	{
777 		UML_STORE(block, (void *)m_core->global_regs, dst_code, I5, SIZE_DWORD, SCALE_x4);
778 	}
779 	else
780 	{
781 		UML_ADD(block, I6, I3, dst_code);
782 		UML_AND(block, I4, I6, 0x3f);
783 		UML_STORE(block, (void *)m_core->local_regs, I4, I5, SIZE_DWORD, SCALE_x4);
784 	}
785 }
786 
787 
788 template <hyperstone_device::reg_bank DST_GLOBAL, hyperstone_device::reg_bank SRC_GLOBAL>
generate_mask(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)789 void hyperstone_device::generate_mask(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
790 {
791 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
792 
793 	uint16_t op = desc->opptr.w[0];
794 	const uint32_t src_code = op & 0xf;
795 	const uint32_t dst_code = (op & 0xf0) >> 4;
796 
797 	generate_decode_const(block, compiler, desc);
798 	generate_check_delay_pc(block, compiler, desc);
799 
800 	if (!SRC_GLOBAL || !DST_GLOBAL)
801 		UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
802 
803 	if (SRC_GLOBAL)
804 	{
805 		UML_LOAD(block, I2, (void *)m_core->global_regs, src_code, SIZE_DWORD, SCALE_x4);
806 	}
807 	else
808 	{
809 		UML_ADD(block, I2, I3, src_code);
810 		UML_AND(block, I2, I2, 0x3f);
811 		UML_LOAD(block, I2, (void *)m_core->local_regs, I2, SIZE_DWORD, SCALE_x4);
812 	}
813 
814 	UML_AND(block, I1, I1, I2);
815 
816 	int skip_mask;
817 	UML_AND(block, DRC_SR, DRC_SR, ~Z_MASK);
818 	UML_TEST(block, I1, ~0);
819 	UML_JMPc(block, uml::COND_NZ, skip_mask = compiler.m_labelnum++);
820 	UML_OR(block, DRC_SR, DRC_SR, Z_MASK);
821 	UML_LABEL(block, skip_mask);
822 
823 	if (DST_GLOBAL)
824 	{
825 		if (dst_code < 2)
826 		{
827 			UML_MOV(block, I4, dst_code);
828 			UML_MOV(block, I5, I1);
829 			generate_set_global_register(block, compiler, desc);
830 			if (dst_code == PC_REGISTER)
831 				generate_branch(block, desc->targetpc, desc);
832 		}
833 		else
834 		{
835 			UML_STORE(block, (void *)m_core->global_regs, dst_code, I1, SIZE_DWORD, SCALE_x4);
836 		}
837 	}
838 	else
839 	{
840 		UML_ADD(block, I0, I3, dst_code);
841 		UML_AND(block, I0, I0, 0x3f);
842 		UML_STORE(block, (void *)m_core->local_regs, I0, I1, SIZE_DWORD, SCALE_x4);
843 	}
844 }
845 
846 
847 template <hyperstone_device::reg_bank DST_GLOBAL, hyperstone_device::reg_bank SRC_GLOBAL>
generate_sum(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)848 void hyperstone_device::generate_sum(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
849 {
850 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
851 
852 	uint16_t op = desc->opptr.w[0];
853 	const uint32_t src_code = op & 0xf;
854 	const uint32_t dst_code = (op & 0xf0) >> 4;
855 
856 	generate_decode_const(block, compiler, desc);
857 	generate_check_delay_pc(block, compiler, desc);
858 
859 	if (!SRC_GLOBAL || !DST_GLOBAL)
860 	{
861 		UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
862 	}
863 
864 	if (SRC_GLOBAL)
865 	{
866 		UML_LOAD(block, I2, (void *)m_core->global_regs, src_code, SIZE_DWORD, SCALE_x4);
867 	}
868 	else
869 	{
870 		UML_ADD(block, I2, I3, src_code);
871 		UML_AND(block, I2, I2, 0x3f);
872 		UML_LOAD(block, I2, (void *)m_core->local_regs, I2, SIZE_DWORD, SCALE_x4);
873 	}
874 
875 #ifndef PTR64
876 	UML_DAND(block, I1, I1, 0x00000000ffffffffULL);
877 	UML_DAND(block, I2, I2, 0x00000000ffffffffULL);
878 #endif
879 
880 	UML_DADD(block, I5, I1, I2);
881 
882 	UML_AND(block, DRC_SR, DRC_SR, ~(C_MASK | V_MASK | Z_MASK | N_MASK));
883 	UML_DTEST(block, I5, 0x100000000ULL);
884 	UML_SETc(block, uml::COND_NZ, I6);
885 	UML_ROLINS(block, DRC_SR, I6, C_SHIFT, C_MASK);
886 
887 	UML_XOR(block, I6, I5, I1);
888 	UML_XOR(block, I1, I5, I2);
889 	UML_AND(block, I1, I1, I6);
890 	UML_AND(block, I1, I1, 0x80000000);
891 	UML_ROLINS(block, DRC_SR, I1, 4, V_MASK);
892 
893 	UML_TEST(block, I5, ~0);
894 	UML_SETc(block, uml::COND_Z, I6);
895 	UML_ROLINS(block, DRC_SR, I6, Z_SHIFT, Z_MASK);
896 
897 	UML_ROLINS(block, DRC_SR, I5, 3, N_MASK);
898 
899 	if (DST_GLOBAL)
900 	{
901 		if (dst_code < 2)
902 		{
903 			UML_MOV(block, I4, dst_code);
904 			generate_set_global_register(block, compiler, desc);
905 			if (dst_code == PC_REGISTER)
906 				generate_branch(block, desc->targetpc, desc);
907 		}
908 		else
909 		{
910 			UML_STORE(block, (void *)m_core->global_regs, dst_code, I5, SIZE_DWORD, SCALE_x4);
911 		}
912 	}
913 	else
914 	{
915 		UML_ADD(block, I0, I3, dst_code);
916 		UML_AND(block, I0, I0, 0x3f);
917 		UML_STORE(block, (void *)m_core->local_regs, I0, I5, SIZE_DWORD, SCALE_x4);
918 	}
919 }
920 
921 
922 template <hyperstone_device::reg_bank DST_GLOBAL, hyperstone_device::reg_bank SRC_GLOBAL>
generate_sums(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)923 void hyperstone_device::generate_sums(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
924 {
925 	printf("Unimplemented: generate_sums (%08x)\n", desc->pc);
926 	fflush(stdout);
927 	fatalerror(" ");
928 }
929 
930 
931 template <hyperstone_device::reg_bank DST_GLOBAL, hyperstone_device::reg_bank SRC_GLOBAL>
generate_cmp(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)932 void hyperstone_device::generate_cmp(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
933 {
934 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
935 
936 	uint16_t op = desc->opptr.w[0];
937 	const uint32_t src_code = op & 0xf;
938 	const uint32_t dst_code = (op & 0xf0) >> 4;
939 
940 	generate_check_delay_pc(block, compiler, desc);
941 
942 	if (!SRC_GLOBAL || !DST_GLOBAL)
943 		UML_ROLAND(block, I2, DRC_SR, 7, 0x7f);
944 
945 	if (SRC_GLOBAL)
946 	{
947 		if (src_code == SR_REGISTER)
948 			UML_AND(block, I0, DRC_SR, C_MASK);
949 		else
950 			UML_LOAD(block, I0, (void *)m_core->global_regs, src_code, SIZE_DWORD, SCALE_x4);
951 	}
952 	else
953 	{
954 		UML_ADD(block, I1, I2, src_code);
955 		UML_AND(block, I1, I1, 0x3f);
956 		UML_LOAD(block, I0, (void *)m_core->local_regs, I1, SIZE_DWORD, SCALE_x4);
957 	}
958 
959 	if (DST_GLOBAL)
960 	{
961 		UML_LOAD(block, I1, (void *)m_core->global_regs, dst_code, SIZE_DWORD, SCALE_x4);
962 	}
963 	else
964 	{
965 		UML_ADD(block, I2, I2, dst_code);
966 		UML_AND(block, I2, I2, 0x3f);
967 		UML_LOAD(block, I1, (void *)m_core->local_regs, I2, SIZE_DWORD, SCALE_x4);
968 	}
969 
970 #ifndef PTR64
971 	UML_DAND(block, I0, I0, 0x00000000ffffffffULL);
972 	UML_DAND(block, I1, I1, 0x00000000ffffffffULL);
973 #endif
974 
975 	UML_DSUB(block, I2, I1, I0); // tmp
976 
977 	UML_XOR(block, I2, I2, I1);
978 	UML_XOR(block, I3, I1, I0);
979 	UML_AND(block, I3, I3, I2);
980 	UML_ROLINS(block, I3, I3, 4, V_MASK);
981 
982 	UML_CMP(block, I1, I0);
983 	UML_SETc(block, uml::COND_B, I2);
984 	UML_ROLINS(block, I3, I2, C_SHIFT, C_MASK);
985 
986 	UML_CMP(block, I1, I0);
987 	UML_SETc(block, uml::COND_E, I2);
988 	UML_ROLINS(block, I3, I2, Z_SHIFT, Z_MASK);
989 
990 	UML_CMP(block, I1, I0);
991 	UML_SETc(block, uml::COND_L, I2);
992 	UML_ROLINS(block, I3, I2, N_SHIFT, N_MASK);
993 
994 	UML_ROLINS(block, DRC_SR, I3, 0, (V_MASK | N_MASK | Z_MASK | C_MASK));
995 }
996 
generate_get_global_register(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)997 void hyperstone_device::generate_get_global_register(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
998 {
999 	uint16_t op = desc->opptr.w[0];
1000 	const uint32_t src_code = op & 0xf;
1001 
1002 	int regular_load = compiler.m_labelnum++;
1003 	int done = compiler.m_labelnum++;
1004 	UML_TEST(block, DRC_SR, H_MASK);
1005 	UML_MOVc(block, uml::COND_NZ, I1, 16 + src_code);
1006 	UML_MOVc(block, uml::COND_Z, I1, src_code);
1007 	UML_CMP(block, I1, TR_REGISTER);
1008 	UML_JMPc(block, uml::COND_NE, regular_load);
1009 
1010 	UML_SHR(block, I2, mem(&m_core->tr_clocks_per_tick), 1);
1011 	UML_SUB(block, mem(&m_core->icount), mem(&m_core->icount), I7);
1012 	UML_MOV(block, I7, 0);
1013 	UML_CMP(block, mem(&m_core->icount), I2);
1014 	UML_MOVc(block, uml::COND_BE, I2, 0);
1015 	UML_SUB(block, mem(&m_core->icount), mem(&m_core->icount), I2);
1016 	UML_CALLC(block, cfunc_compute_tr, this);
1017 	UML_MOV(block, I5, mem(&m_core->tr_result));
1018 	UML_JMP(block, done);
1019 
1020 	UML_LABEL(block, regular_load);
1021 	UML_LOAD(block, I5, (void *)m_core->global_regs, I1, SIZE_DWORD, SCALE_x4);
1022 	UML_SHL(block, I2, 1, I1);
1023 	UML_TEST(block, I2, WRITE_ONLY_REGMASK);
1024 	UML_MOVc(block, uml::COND_NZ, I5, 0);
1025 
1026 	UML_LABEL(block, done);
1027 }
1028 
1029 template <hyperstone_device::reg_bank DST_GLOBAL, hyperstone_device::reg_bank SRC_GLOBAL>
generate_mov(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)1030 void hyperstone_device::generate_mov(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
1031 {
1032 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
1033 
1034 	uint16_t op = desc->opptr.w[0];
1035 	const uint32_t src_code = op & 0xf;
1036 	const uint32_t dst_code = (op & 0xf0) >> 4;
1037 
1038 	generate_check_delay_pc(block, compiler, desc);
1039 
1040 	if (DST_GLOBAL)
1041 	{
1042 		int no_exception;
1043 		UML_TEST(block, DRC_SR, H_MASK);
1044 		UML_JMPc(block, uml::COND_Z, no_exception = compiler.m_labelnum++);
1045 		UML_TEST(block, DRC_SR, S_MASK);
1046 		UML_JMPc(block, uml::COND_NZ, no_exception);
1047 		UML_EXH(block, *m_exception[EXCEPTION_PRIVILEGE_ERROR], 0);
1048 		UML_LABEL(block, no_exception);
1049 	}
1050 
1051 	if (SRC_GLOBAL)
1052 	{
1053 		generate_get_global_register(block, compiler, desc);
1054 		if (!DST_GLOBAL)
1055 			UML_ROLAND(block, I1, DRC_SR, 7, 0x7f);
1056 	}
1057 	else
1058 	{
1059 		UML_ROLAND(block, I1, DRC_SR, 7, 0x7f);
1060 		UML_ADD(block, I2, I1, src_code);
1061 		UML_AND(block, I2, I2, 0x3f);
1062 		UML_LOAD(block, I5, (void *)m_core->local_regs, I2, SIZE_DWORD, SCALE_x4);
1063 	}
1064 
1065 	UML_AND(block, DRC_SR, DRC_SR, ~(Z_MASK | N_MASK));
1066 	UML_TEST(block, I5, ~0);
1067 	UML_SETc(block, uml::COND_Z, I2);
1068 	UML_ROLINS(block, DRC_SR, I2, Z_SHIFT, Z_MASK);
1069 	UML_ROLINS(block, DRC_SR, I5, 3, N_MASK);
1070 
1071 	int done = compiler.m_labelnum++;
1072 	if (DST_GLOBAL)
1073 	{
1074 		UML_TEST(block, DRC_SR, H_MASK);
1075 		UML_MOVc(block, uml::COND_NZ, I4, 16 + dst_code);
1076 		UML_MOVc(block, uml::COND_Z, I4, dst_code);
1077 		UML_AND(block, DRC_SR, DRC_SR, ~H_MASK);
1078 		UML_MOV(block, I3, I4);
1079 		generate_set_global_register(block, compiler, desc);
1080 		UML_CMP(block, I3, 0);
1081 		UML_JMPc(block, uml::COND_NE, done);
1082 		generate_branch(block, desc->targetpc, desc);
1083 	}
1084 	else
1085 	{
1086 		UML_AND(block, DRC_SR, DRC_SR, ~H_MASK);
1087 		UML_ADD(block, I2, I1, dst_code);
1088 		UML_AND(block, I2, I2, 0x3f);
1089 		UML_STORE(block, (void *)m_core->local_regs, I2, I5, SIZE_DWORD, SCALE_x4);
1090 	}
1091 
1092 	UML_LABEL(block, done);
1093 }
1094 
1095 
1096 template <hyperstone_device::reg_bank DST_GLOBAL, hyperstone_device::reg_bank SRC_GLOBAL>
generate_add(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)1097 void hyperstone_device::generate_add(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
1098 {
1099 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
1100 
1101 	uint16_t op = desc->opptr.w[0];
1102 	const uint32_t src_code = op & 0xf;
1103 	const uint32_t dst_code = (op & 0xf0) >> 4;
1104 
1105 	generate_check_delay_pc(block, compiler, desc);
1106 
1107 	if (!SRC_GLOBAL || !DST_GLOBAL)
1108 		UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
1109 
1110 	if (SRC_GLOBAL)
1111 	{
1112 		if (src_code == SR_REGISTER)
1113 			UML_AND(block, I0, DRC_SR, 1);
1114 		else
1115 			UML_LOAD(block, I0, (void *)m_core->global_regs, src_code, SIZE_DWORD, SCALE_x4);
1116 	}
1117 	else
1118 	{
1119 		UML_ADD(block, I2, I3, src_code);
1120 		UML_AND(block, I2, I2, 0x3f);
1121 		UML_LOAD(block, I0, (void *)m_core->local_regs, I2, SIZE_DWORD, SCALE_x4);
1122 	}
1123 
1124 	if (DST_GLOBAL)
1125 	{
1126 		UML_LOAD(block, I1, (void *)m_core->global_regs, dst_code, SIZE_DWORD, SCALE_x4);
1127 	}
1128 	else
1129 	{
1130 		UML_ADD(block, I3, I3, dst_code);
1131 		UML_AND(block, I3, I3, 0x3f);
1132 		UML_LOAD(block, I1, (void *)m_core->local_regs, I3, SIZE_DWORD, SCALE_x4);
1133 	}
1134 
1135 	UML_ADD(block, I2, I0, I1);
1136 
1137 	UML_SETc(block, uml::COND_C, I6);
1138 
1139 	UML_XOR(block, I4, I0, I2);
1140 	UML_XOR(block, I5, I1, I2);
1141 	UML_AND(block, I4, I4, I5);
1142 	UML_ROLINS(block, I6, I4, 4, V_MASK);
1143 
1144 	UML_TEST(block, I2, ~0);
1145 	UML_SETc(block, uml::COND_Z, I4);
1146 	UML_ROLINS(block, I6, I4, Z_SHIFT, Z_MASK);
1147 	UML_ROLINS(block, I6, I2, 3, N_MASK);
1148 	UML_ROLINS(block, DRC_SR, I6, 0, (V_MASK | N_MASK | Z_MASK | C_MASK));
1149 
1150 	if (DST_GLOBAL)
1151 	{
1152 		if (dst_code < 2)
1153 		{
1154 			UML_MOV(block, I4, dst_code);
1155 			UML_MOV(block, I5, I2);
1156 			generate_set_global_register(block, compiler, desc);
1157 
1158 			if (dst_code == PC_REGISTER)
1159 			{
1160 				UML_AND(block, DRC_SR, DRC_SR, ~M_MASK);
1161 				generate_branch(block, desc->targetpc, desc);
1162 			}
1163 		}
1164 		else
1165 		{
1166 			UML_STORE(block, (void *)m_core->global_regs, dst_code, I2, SIZE_DWORD, SCALE_x4);
1167 		}
1168 	}
1169 	else
1170 	{
1171 		UML_STORE(block, (void *)m_core->local_regs, I3, I2, SIZE_DWORD, SCALE_x4);
1172 	}
1173 }
1174 
1175 
1176 template <hyperstone_device::reg_bank DST_GLOBAL, hyperstone_device::reg_bank SRC_GLOBAL>
generate_adds(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)1177 void hyperstone_device::generate_adds(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
1178 {
1179 	printf("Unimplemented: generate_adds (%08x)\n", desc->pc);
1180 	fflush(stdout);
1181 	fatalerror(" ");
1182 }
1183 
1184 
1185 template <hyperstone_device::reg_bank DST_GLOBAL, hyperstone_device::reg_bank SRC_GLOBAL>
generate_cmpb(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)1186 void hyperstone_device::generate_cmpb(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
1187 {
1188 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
1189 
1190 	uint16_t op = desc->opptr.w[0];
1191 	const uint32_t src_code = op & 0xf;
1192 	const uint32_t dst_code = (op & 0xf0) >> 4;
1193 
1194 	generate_check_delay_pc(block, compiler, desc);
1195 
1196 	if (!SRC_GLOBAL || !DST_GLOBAL)
1197 		UML_ROLAND(block, I2, DRC_SR, 7, 0x7f);
1198 
1199 	if (SRC_GLOBAL)
1200 	{
1201 		UML_LOAD(block, I0, (void *)m_core->global_regs, src_code, SIZE_DWORD, SCALE_x4);
1202 	}
1203 	else
1204 	{
1205 		UML_ADD(block, I1, I2, src_code);
1206 		UML_AND(block, I1, I1, 0x3f);
1207 		UML_LOAD(block, I0, (void *)m_core->local_regs, I1, SIZE_DWORD, SCALE_x4);
1208 	}
1209 
1210 	if (DST_GLOBAL)
1211 	{
1212 		UML_LOAD(block, I1, (void *)m_core->global_regs, dst_code, SIZE_DWORD, SCALE_x4);
1213 	}
1214 	else
1215 	{
1216 		UML_ADD(block, I2, I2, dst_code);
1217 		UML_AND(block, I2, I2, 0x3f);
1218 		UML_LOAD(block, I1, (void *)m_core->local_regs, I2, SIZE_DWORD, SCALE_x4);
1219 	}
1220 
1221 	UML_TEST(block, I1, I0);
1222 	UML_SETc(block, uml::COND_Z, I0);
1223 	UML_ROLINS(block, DRC_SR, I0, Z_SHIFT, Z_MASK);
1224 }
1225 
1226 
1227 template <hyperstone_device::reg_bank DST_GLOBAL, hyperstone_device::reg_bank SRC_GLOBAL>
generate_subc(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)1228 void hyperstone_device::generate_subc(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
1229 {
1230 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
1231 
1232 	uint16_t op = desc->opptr.w[0];
1233 	const uint32_t src_code = op & 0xf;
1234 	const uint32_t dst_code = (op & 0xf0) >> 4;
1235 
1236 	generate_check_delay_pc(block, compiler, desc);
1237 
1238 	if (!SRC_GLOBAL || !DST_GLOBAL)
1239 		UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
1240 
1241 	UML_AND(block, I0, DRC_SR, C_MASK);
1242 #ifndef PTR64
1243 	UML_DAND(block, I0, I0, 0x00000000ffffffffULL);
1244 #endif
1245 
1246 	if (SRC_GLOBAL)
1247 	{
1248 		if (src_code != SR_REGISTER)
1249 		{
1250 			UML_LOAD(block, I2, (void *)m_core->global_regs, src_code, SIZE_DWORD, SCALE_x4);
1251 #ifndef PTR64
1252 			UML_DAND(block, I2, I2, 0x00000000ffffffffULL);
1253 #endif
1254 			UML_DADD(block, I0, I2, I0);
1255 		}
1256 	}
1257 	else
1258 	{
1259 		UML_ADD(block, I2, I3, src_code);
1260 		UML_AND(block, I2, I2, 0x3f);
1261 		UML_LOAD(block, I2, (void *)m_core->local_regs, I2, SIZE_DWORD, SCALE_x4);
1262 #ifndef PTR64
1263 		UML_DAND(block, I2, I2, 0x00000000ffffffffULL);
1264 #endif
1265 		UML_DADD(block, I0, I2, I0);
1266 	}
1267 
1268 	if (DST_GLOBAL)
1269 	{
1270 		UML_LOAD(block, I1, (void *)m_core->global_regs, dst_code, SIZE_DWORD, SCALE_x4);
1271 	}
1272 	else
1273 	{
1274 		UML_ADD(block, I2, I3, dst_code);
1275 		UML_AND(block, I4, I2, 0x3f);
1276 		UML_LOAD(block, I1, (void *)m_core->local_regs, I4, SIZE_DWORD, SCALE_x4);
1277 	}
1278 
1279 #ifndef PTR64
1280 	UML_DAND(block, I1, I1, 0x00000000ffffffffULL);
1281 #endif
1282 
1283 	UML_AND(block, I6, DRC_SR, Z_MASK);
1284 	UML_AND(block, I5, DRC_SR, ~(C_MASK | V_MASK | Z_MASK | N_MASK));
1285 
1286 	UML_DSUB(block, I2, I1, I0);
1287 	UML_DROLINS(block, I5, I2, 32, C_MASK);
1288 	UML_XOR(block, I2, I2, I1); // tmp ^ dreg
1289 	UML_XOR(block, I3, I1, I0); // dreg ^ sreg_c
1290 	UML_AND(block, I3, I2, I3); // (tmp ^ dreg) ^ (dreg ^ sreg_c)
1291 	UML_ROLINS(block, I5, I3, 4, V_MASK);
1292 
1293 	UML_SUB(block, I1, I1, I0);
1294 	UML_TEST(block, I1, ~0);
1295 	UML_MOVc(block, uml::COND_Z, I2, Z_MASK);
1296 	UML_MOVc(block, uml::COND_NZ, I2, 0);
1297 	UML_AND(block, I2, I2, I6); // old_z && dreg == 0
1298 	UML_OR(block, I5, I5, I2);
1299 	UML_ROLINS(block, I5, I1, 3, N_MASK);
1300 	UML_ROLINS(block, DRC_SR, I5, 0, (C_MASK | V_MASK | Z_MASK | N_MASK));
1301 
1302 	if (DST_GLOBAL)
1303 	{
1304 		if (dst_code < 2)
1305 		{
1306 			UML_MOV(block, I4, dst_code);
1307 			UML_MOV(block, I5, I1);
1308 			generate_set_global_register(block, compiler, desc);
1309 
1310 			if (dst_code == PC_REGISTER)
1311 				generate_branch(block, desc->targetpc, desc);
1312 		}
1313 		else
1314 		{
1315 			UML_STORE(block, (void *)m_core->global_regs, dst_code, I1, SIZE_DWORD, SCALE_x4);
1316 		}
1317 	}
1318 	else
1319 	{
1320 		UML_STORE(block, (void *)m_core->local_regs, I4, I1, SIZE_DWORD, SCALE_x4);
1321 	}
1322 }
1323 
1324 
1325 template <hyperstone_device::reg_bank DST_GLOBAL, hyperstone_device::reg_bank SRC_GLOBAL>
generate_sub(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)1326 void hyperstone_device::generate_sub(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
1327 {
1328 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
1329 
1330 	uint16_t op = desc->opptr.w[0];
1331 	const uint32_t src_code = op & 0xf;
1332 	const uint32_t dst_code = (op & 0xf0) >> 4;
1333 
1334 	generate_check_delay_pc(block, compiler, desc);
1335 
1336 	if (!SRC_GLOBAL || !DST_GLOBAL)
1337 		UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
1338 
1339 	if (SRC_GLOBAL)
1340 	{
1341 		if (src_code == SR_REGISTER)
1342 			UML_AND(block, I0, DRC_SR, 1);
1343 		else
1344 			UML_LOAD(block, I0, (void *)m_core->global_regs, src_code, SIZE_DWORD, SCALE_x4);
1345 	}
1346 	else
1347 	{
1348 		UML_ADD(block, I2, I3, src_code);
1349 		UML_AND(block, I4, I2, 0x3f);
1350 		UML_LOAD(block, I0, (void *)m_core->local_regs, I4, SIZE_DWORD, SCALE_x4);
1351 	}
1352 
1353 	if (DST_GLOBAL)
1354 	{
1355 		UML_LOAD(block, I1, (void *)m_core->global_regs, dst_code, SIZE_DWORD, SCALE_x4);
1356 	}
1357 	else
1358 	{
1359 		UML_ADD(block, I2, I3, dst_code);
1360 		UML_AND(block, I4, I2, 0x3f);
1361 		UML_LOAD(block, I1, (void *)m_core->local_regs, I4, SIZE_DWORD, SCALE_x4);
1362 	}
1363 
1364 #ifndef PTR64
1365 	UML_DAND(block, I0, I0, 0x00000000ffffffffULL);
1366 	UML_DAND(block, I1, I1, 0x00000000ffffffffULL);
1367 #endif
1368 
1369 	UML_DSUB(block, I2, I1, I0);
1370 
1371 	UML_AND(block, DRC_SR, DRC_SR, ~(C_MASK | V_MASK | Z_MASK | N_MASK));
1372 
1373 	UML_DTEST(block, I2, 0x100000000ULL);
1374 	UML_SETc(block, uml::COND_NZ, I4);
1375 	UML_ROLINS(block, DRC_SR, I4, 0, C_MASK);
1376 
1377 	UML_XOR(block, I4, I1, I2);
1378 	UML_XOR(block, I5, I0, I1);
1379 	UML_AND(block, I6, I4, I5);
1380 	UML_ROLINS(block, DRC_SR, I6, 4, V_MASK);
1381 
1382 	UML_SUB(block, I2, I1, I0);
1383 
1384 	UML_TEST(block, I2, ~0);
1385 	UML_SETc(block, uml::COND_Z, I4);
1386 	UML_ROLINS(block, DRC_SR, I4, Z_SHIFT, Z_MASK);
1387 	UML_ROLINS(block, DRC_SR, I2, 3, N_MASK);
1388 
1389 	if (DST_GLOBAL)
1390 	{
1391 		if (dst_code < 2)
1392 		{
1393 			UML_MOV(block, I4, dst_code);
1394 			UML_MOV(block, I5, I2);
1395 			generate_set_global_register(block, compiler, desc);
1396 
1397 			if (dst_code == PC_REGISTER)
1398 			{
1399 				UML_AND(block, DRC_SR, DRC_SR, ~M_MASK);
1400 				generate_branch(block, desc->targetpc, desc);
1401 			}
1402 		}
1403 		else
1404 		{
1405 			UML_STORE(block, (void *)m_core->global_regs, dst_code, I2, SIZE_DWORD, SCALE_x4);
1406 		}
1407 	}
1408 	else
1409 	{
1410 		UML_ADD(block, I4, I3, dst_code);
1411 		UML_AND(block, I5, I4, 0x3f);
1412 		UML_STORE(block, (void *)m_core->local_regs, I5, I2, SIZE_DWORD, SCALE_x4);
1413 	}
1414 }
1415 
1416 
1417 template <hyperstone_device::reg_bank DST_GLOBAL, hyperstone_device::reg_bank SRC_GLOBAL>
generate_subs(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)1418 void hyperstone_device::generate_subs(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
1419 {
1420 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
1421 
1422 	uint16_t op = desc->opptr.w[0];
1423 	const uint32_t src_code = op & 0xf;
1424 	const uint32_t dst_code = (op & 0xf0) >> 4;
1425 
1426 	generate_check_delay_pc(block, compiler, desc);
1427 
1428 	if (!SRC_GLOBAL || !DST_GLOBAL)
1429 		UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
1430 
1431 	if (SRC_GLOBAL)
1432 	{
1433 		if (src_code == SR_REGISTER)
1434 			UML_AND(block, I0, DRC_SR, 1);
1435 		else
1436 			UML_LOAD(block, I0, (void *)m_core->global_regs, src_code, SIZE_DWORD, SCALE_x4);
1437 	}
1438 	else
1439 	{
1440 		UML_ADD(block, I2, I3, src_code);
1441 		UML_AND(block, I2, I2, 0x3f);
1442 		UML_LOAD(block, I0, (void *)m_core->local_regs, I2, SIZE_DWORD, SCALE_x4);
1443 	}
1444 
1445 	if (DST_GLOBAL)
1446 	{
1447 		UML_LOAD(block, I1, (void *)m_core->global_regs, dst_code, SIZE_DWORD, SCALE_x4);
1448 	}
1449 	else
1450 	{
1451 		UML_ADD(block, I2, I3, dst_code);
1452 		UML_AND(block, I2, I2, 0x3f);
1453 		UML_LOAD(block, I1, (void *)m_core->local_regs, I2, SIZE_DWORD, SCALE_x4);
1454 	}
1455 
1456 	UML_DSEXT(block, I0, I0, SIZE_DWORD);
1457 	UML_DSEXT(block, I1, I1, SIZE_DWORD);
1458 	UML_DSUB(block, I2, I1, I0);
1459 
1460 	UML_AND(block, DRC_SR, DRC_SR, ~(V_MASK | Z_MASK | N_MASK));
1461 
1462 	UML_XOR(block, I4, I1, I2);
1463 	UML_XOR(block, I5, I0, I1);
1464 	UML_AND(block, I4, I4, I5);
1465 	UML_ROLINS(block, DRC_SR, I4, 4, V_MASK);
1466 
1467 	UML_TEST(block, I2, ~0);
1468 	UML_SETc(block, uml::COND_Z, I4);
1469 	UML_ROLINS(block, DRC_SR, I4, Z_SHIFT, Z_MASK);
1470 	UML_ROLINS(block, DRC_SR, I2, 3, N_MASK);
1471 
1472 	if (DST_GLOBAL)
1473 	{
1474 		if (dst_code < 2)
1475 		{
1476 			UML_MOV(block, I4, dst_code);
1477 			UML_MOV(block, I5, I2);
1478 			generate_set_global_register(block, compiler, desc);
1479 
1480 			if (dst_code == PC_REGISTER)
1481 			{
1482 				generate_branch(block, desc->targetpc, desc);
1483 			}
1484 		}
1485 		else
1486 		{
1487 			UML_STORE(block, (void *)m_core->global_regs, dst_code, I2, SIZE_DWORD, SCALE_x4);
1488 		}
1489 	}
1490 	else
1491 	{
1492 		UML_ADD(block, I4, I3, dst_code);
1493 		UML_AND(block, I5, I4, 0x3f);
1494 		UML_STORE(block, (void *)m_core->local_regs, I5, I2, SIZE_DWORD, SCALE_x4);
1495 	}
1496 
1497 	int no_exception = compiler.m_labelnum++;
1498 	UML_TEST(block, DRC_SR, V_MASK);
1499 	UML_JMPc(block, uml::COND_Z, no_exception);
1500 	UML_ROLINS(block, DRC_SR, ((desc->length >> 1) << ILC_SHIFT), 0, ILC_MASK);
1501 	generate_trap_exception_or_int<IS_EXCEPTION>(block);
1502 	UML_LABEL(block, no_exception);
1503 }
1504 
1505 
1506 template <hyperstone_device::reg_bank DST_GLOBAL, hyperstone_device::reg_bank SRC_GLOBAL>
generate_addc(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)1507 void hyperstone_device::generate_addc(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
1508 {
1509 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
1510 
1511 	uint16_t op = desc->opptr.w[0];
1512 	const uint32_t src_code = op & 0xf;
1513 	const uint32_t dst_code = (op & 0xf0) >> 4;
1514 
1515 	generate_check_delay_pc(block, compiler, desc);
1516 
1517 	if (!SRC_GLOBAL || !DST_GLOBAL)
1518 		UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
1519 
1520 	if (SRC_GLOBAL)
1521 	{
1522 		if (src_code == SR_REGISTER)
1523 		{
1524 			UML_AND(block, I0, DRC_SR, 1);
1525 		}
1526 		else
1527 		{
1528 			UML_LOAD(block, I0, (void *)m_core->global_regs, src_code, SIZE_DWORD, SCALE_x4);
1529 			UML_AND(block, I1, DRC_SR, 1);
1530 #ifndef PTR64
1531 			UML_DAND(block, I0, I0, 0x00000000ffffffffULL);
1532 			UML_DAND(block, I1, I1, 0x00000000ffffffffULL);
1533 #endif
1534 			UML_DADD(block, I0, I0, I1);
1535 		}
1536 	}
1537 	else
1538 	{
1539 		UML_ADD(block, I1, I3, src_code);
1540 		UML_AND(block, I1, I1, 0x3f);
1541 		UML_LOAD(block, I0, (void *)m_core->local_regs, I1, SIZE_DWORD, SCALE_x4);
1542 		UML_AND(block, I1, DRC_SR, 1);
1543 #ifndef PTR64
1544 		UML_DAND(block, I0, I0, 0x00000000ffffffffULL);
1545 		UML_DAND(block, I1, I1, 0x00000000ffffffffULL);
1546 #endif
1547 		UML_DADD(block, I0, I0, I1);
1548 	}
1549 
1550 	if (DST_GLOBAL)
1551 	{
1552 		UML_LOAD(block, I1, (void *)m_core->global_regs, dst_code, SIZE_DWORD, SCALE_x4);
1553 	}
1554 	else
1555 	{
1556 		UML_ADD(block, I3, I3, dst_code);
1557 		UML_AND(block, I3, I3, 0x3f);
1558 		UML_LOAD(block, I1, (void *)m_core->local_regs, I3, SIZE_DWORD, SCALE_x4);
1559 	}
1560 
1561 #ifndef PTR64
1562 	UML_DAND(block, I1, I1, 0x00000000ffffffffULL);
1563 #endif
1564 
1565 	UML_DADD(block, I2, I0, I1);
1566 
1567 	UML_XOR(block, I4, I0, I2);
1568 	UML_XOR(block, I5, I1, I2);
1569 	UML_AND(block, I4, I4, I5);
1570 	UML_AND(block, I4, I4, I2);
1571 	UML_ROLAND(block, I4, I4, 4, V_MASK);
1572 	UML_DROLINS(block, I4, I2, 32, C_MASK);
1573 
1574 	UML_ADD(block, I0, I0, I1);
1575 	UML_SHR(block, I1, DRC_SR, Z_SHIFT);
1576 	UML_AND(block, I1, I1, 1);
1577 	UML_TEST(block, I0, ~0);
1578 	UML_SETc(block, uml::COND_Z, I2);
1579 	UML_AND(block, I1, I1, I2);
1580 	UML_ROLINS(block, I4, I1, Z_SHIFT, Z_MASK);
1581 	UML_ROLINS(block, I4, I0, 3, N_MASK);
1582 
1583 	UML_ROLINS(block, DRC_SR, I4, 0, (C_MASK | V_MASK | Z_MASK | N_MASK));
1584 
1585 	if (DST_GLOBAL)
1586 	{
1587 		if (dst_code < 2)
1588 		{
1589 			UML_MOV(block, I4, dst_code);
1590 			UML_MOV(block, I5, I0);
1591 			generate_set_global_register(block, compiler, desc);
1592 		}
1593 		else
1594 		{
1595 			UML_STORE(block, (void *)m_core->global_regs, dst_code, I0, SIZE_DWORD, SCALE_x4);
1596 		}
1597 	}
1598 	else
1599 	{
1600 		UML_STORE(block, (void *)m_core->local_regs, I3, I0, SIZE_DWORD, SCALE_x4);
1601 	}
1602 }
1603 
1604 
1605 template <hyperstone_device::reg_bank DST_GLOBAL, hyperstone_device::reg_bank SRC_GLOBAL>
generate_neg(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)1606 void hyperstone_device::generate_neg(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
1607 {
1608 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
1609 
1610 	uint16_t op = desc->opptr.w[0];
1611 
1612 	generate_check_delay_pc(block, compiler, desc);
1613 
1614 	const uint32_t dst_code = (op & 0xf0) >> 4;
1615 	const uint32_t src_code = op & 0xf;
1616 
1617 	if (!SRC_GLOBAL || !DST_GLOBAL)
1618 		UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
1619 
1620 	if (SRC_GLOBAL)
1621 	{
1622 		UML_LOAD(block, I0, (void *)m_core->global_regs, src_code, SIZE_DWORD, SCALE_x4);
1623 	}
1624 	else
1625 	{
1626 		UML_ADD(block, I2, I3, src_code);
1627 		UML_AND(block, I1, I2, 0x3f);
1628 		UML_LOAD(block, I0, (void *)m_core->local_regs, I1, SIZE_DWORD, SCALE_x4);
1629 	}
1630 
1631 #ifndef PTR64
1632 	UML_DAND(block, I0, I0, 0x00000000ffffffffULL);
1633 #endif
1634 
1635 	UML_DSUB(block, I4, 0, I0);
1636 	UML_SUB(block, I2, 0, I0);
1637 
1638 	UML_TEST(block, I2, ~0);
1639 	UML_MOVc(block, uml::COND_Z, I6, Z_MASK);
1640 	UML_MOVc(block, uml::COND_NZ, I6, 0);
1641 
1642 	UML_AND(block, I5, DRC_SR, ~(C_MASK | V_MASK | Z_MASK | N_MASK));
1643 	UML_DROLINS(block, I6, I4, 32, C_MASK);
1644 	UML_AND(block, I1, I4, I0);
1645 	UML_ROLINS(block, I6, I1, 4, V_MASK);
1646 	UML_ROLINS(block, I6, I2, 3, N_MASK);
1647 	UML_OR(block, DRC_SR, I5, I6);
1648 
1649 	if (DST_GLOBAL)
1650 	{
1651 		if (dst_code < 2)
1652 		{
1653 			UML_MOV(block, I4, dst_code);
1654 			UML_MOV(block, I5, I2);
1655 			generate_set_global_register(block, compiler, desc);
1656 		}
1657 		else
1658 		{
1659 			UML_STORE(block, (void *)m_core->global_regs, dst_code, I2, SIZE_DWORD, SCALE_x4);
1660 		}
1661 	}
1662 	else
1663 	{
1664 		UML_ADD(block, I1, I3, dst_code);
1665 		UML_AND(block, I4, I1, 0x3f);
1666 		UML_STORE(block, (void *)m_core->local_regs, I4, I2, SIZE_DWORD, SCALE_x4);
1667 	}
1668 }
1669 
1670 
1671 template <hyperstone_device::reg_bank DST_GLOBAL, hyperstone_device::reg_bank SRC_GLOBAL>
generate_negs(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)1672 void hyperstone_device::generate_negs(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
1673 {
1674 	printf("Unimplemented: generate_negs (%08x)\n", desc->pc);
1675 	fflush(stdout);
1676 	fatalerror(" ");
1677 }
1678 
1679 
1680 template <hyperstone_device::reg_bank DST_GLOBAL, hyperstone_device::reg_bank SRC_GLOBAL>
generate_and(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)1681 void hyperstone_device::generate_and(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
1682 {
1683 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
1684 
1685 	uint16_t op = desc->opptr.w[0];
1686 
1687 	generate_check_delay_pc(block, compiler, desc);
1688 
1689 	const uint32_t dst_code = (op & 0xf0) >> 4;
1690 	const uint32_t src_code = op & 0xf;
1691 
1692 	if (!SRC_GLOBAL || !DST_GLOBAL)
1693 		UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
1694 
1695 	if (SRC_GLOBAL)
1696 	{
1697 		UML_LOAD(block, I0, (void *)m_core->global_regs, src_code, SIZE_DWORD, SCALE_x4);
1698 	}
1699 	else
1700 	{
1701 		UML_ADD(block, I2, I3, src_code);
1702 		UML_AND(block, I1, I2, 0x3f);
1703 		UML_LOAD(block, I0, (void *)m_core->local_regs, I1, SIZE_DWORD, SCALE_x4);
1704 	}
1705 
1706 	if (DST_GLOBAL)
1707 	{
1708 		UML_LOAD(block, I1, (void *)m_core->global_regs, dst_code, SIZE_DWORD, SCALE_x4);
1709 	}
1710 	else
1711 	{
1712 		UML_ADD(block, I2, I3, dst_code);
1713 		UML_AND(block, I4, I2, 0x3f);
1714 		UML_LOAD(block, I1, (void *)m_core->local_regs, I4, SIZE_DWORD, SCALE_x4);
1715 	}
1716 
1717 	UML_AND(block, I5, I1, I0);
1718 
1719 	UML_TEST(block, I5, ~0);
1720 	UML_MOVc(block, uml::COND_Z, I1, Z_MASK);
1721 	UML_MOVc(block, uml::COND_NZ, I1, 0);
1722 	UML_ROLINS(block, DRC_SR, I1, 0, Z_MASK);
1723 
1724 	if (DST_GLOBAL)
1725 	{
1726 		if (dst_code < 2)
1727 		{
1728 			UML_MOV(block, I4, dst_code);
1729 			generate_set_global_register(block, compiler, desc);
1730 		}
1731 		else
1732 		{
1733 			UML_STORE(block, (void *)m_core->global_regs, dst_code, I5, SIZE_DWORD, SCALE_x4);
1734 		}
1735 	}
1736 	else
1737 	{
1738 		UML_STORE(block, (void *)m_core->local_regs, I4, I5, SIZE_DWORD, SCALE_x4);
1739 	}
1740 }
1741 
1742 
1743 template <hyperstone_device::reg_bank DST_GLOBAL, hyperstone_device::reg_bank SRC_GLOBAL>
generate_andn(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)1744 void hyperstone_device::generate_andn(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
1745 {
1746 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
1747 
1748 	uint16_t op = desc->opptr.w[0];
1749 
1750 	generate_check_delay_pc(block, compiler, desc);
1751 
1752 	const uint32_t dst_code = (op & 0xf0) >> 4;
1753 	const uint32_t src_code = op & 0xf;
1754 
1755 	if (!SRC_GLOBAL || !DST_GLOBAL)
1756 		UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
1757 
1758 	if (SRC_GLOBAL)
1759 	{
1760 		UML_LOAD(block, I0, (void *)m_core->global_regs, src_code, SIZE_DWORD, SCALE_x4);
1761 	}
1762 	else
1763 	{
1764 		UML_ADD(block, I2, I3, src_code);
1765 		UML_AND(block, I1, I2, 0x3f);
1766 		UML_LOAD(block, I0, (void *)m_core->local_regs, I1, SIZE_DWORD, SCALE_x4);
1767 	}
1768 
1769 	if (DST_GLOBAL)
1770 	{
1771 		UML_LOAD(block, I1, (void *)m_core->global_regs, dst_code, SIZE_DWORD, SCALE_x4);
1772 	}
1773 	else
1774 	{
1775 		UML_ADD(block, I2, I3, dst_code);
1776 		UML_AND(block, I4, I2, 0x3f);
1777 		UML_LOAD(block, I1, (void *)m_core->local_regs, I4, SIZE_DWORD, SCALE_x4);
1778 	}
1779 
1780 	UML_XOR(block, I2, I0, ~0);
1781 	UML_AND(block, I5, I1, I2);
1782 
1783 	UML_TEST(block, I5, ~0);
1784 	UML_MOVc(block, uml::COND_Z, I1, Z_MASK);
1785 	UML_MOVc(block, uml::COND_NZ, I1, 0);
1786 	UML_ROLINS(block, DRC_SR, I1, 0, Z_MASK);
1787 
1788 	if (DST_GLOBAL)
1789 	{
1790 		if (dst_code < 2)
1791 		{
1792 			UML_MOV(block, I4, dst_code);
1793 			generate_set_global_register(block, compiler, desc);
1794 		}
1795 		else
1796 		{
1797 			UML_STORE(block, (void *)m_core->global_regs, dst_code, I5, SIZE_DWORD, SCALE_x4);
1798 		}
1799 	}
1800 	else
1801 	{
1802 		UML_STORE(block, (void *)m_core->local_regs, I4, I5, SIZE_DWORD, SCALE_x4);
1803 	}
1804 }
1805 
1806 
1807 template <hyperstone_device::reg_bank DST_GLOBAL, hyperstone_device::reg_bank SRC_GLOBAL>
generate_or(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)1808 void hyperstone_device::generate_or(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
1809 {
1810 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
1811 
1812 	uint16_t op = desc->opptr.w[0];
1813 
1814 	generate_check_delay_pc(block, compiler, desc);
1815 
1816 	const uint32_t dst_code = (op & 0xf0) >> 4;
1817 	const uint32_t src_code = op & 0xf;
1818 
1819 	if (!SRC_GLOBAL || !DST_GLOBAL)
1820 		UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
1821 
1822 	if (SRC_GLOBAL)
1823 	{
1824 		UML_LOAD(block, I0, (void *)m_core->global_regs, src_code, SIZE_DWORD, SCALE_x4);
1825 	}
1826 	else
1827 	{
1828 		UML_ADD(block, I2, I3, src_code);
1829 		UML_AND(block, I1, I2, 0x3f);
1830 		UML_LOAD(block, I0, (void *)m_core->local_regs, I1, SIZE_DWORD, SCALE_x4);
1831 	}
1832 
1833 	if (DST_GLOBAL)
1834 	{
1835 		UML_LOAD(block, I1, (void *)m_core->global_regs, dst_code, SIZE_DWORD, SCALE_x4);
1836 	}
1837 	else
1838 	{
1839 		UML_ADD(block, I2, I3, dst_code);
1840 		UML_AND(block, I4, I2, 0x3f);
1841 		UML_LOAD(block, I1, (void *)m_core->local_regs, I4, SIZE_DWORD, SCALE_x4);
1842 	}
1843 
1844 	UML_OR(block, I5, I1, I0);
1845 
1846 	UML_TEST(block, I5, ~0);
1847 	UML_MOVc(block, uml::COND_Z, I1, Z_MASK);
1848 	UML_MOVc(block, uml::COND_NZ, I1, 0);
1849 	UML_ROLINS(block, DRC_SR, I1, 0, Z_MASK);
1850 
1851 	if (DST_GLOBAL)
1852 	{
1853 		if (dst_code < 2)
1854 		{
1855 			UML_MOV(block, I4, dst_code);
1856 			generate_set_global_register(block, compiler, desc);
1857 		}
1858 		else
1859 		{
1860 			UML_STORE(block, (void *)m_core->global_regs, dst_code, I5, SIZE_DWORD, SCALE_x4);
1861 		}
1862 	}
1863 	else
1864 	{
1865 		UML_STORE(block, (void *)m_core->local_regs, I4, I5, SIZE_DWORD, SCALE_x4);
1866 	}
1867 }
1868 
1869 
1870 template <hyperstone_device::reg_bank DST_GLOBAL, hyperstone_device::reg_bank SRC_GLOBAL>
generate_xor(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)1871 void hyperstone_device::generate_xor(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
1872 {
1873 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
1874 
1875 	uint16_t op = desc->opptr.w[0];
1876 
1877 	generate_check_delay_pc(block, compiler, desc);
1878 
1879 	const uint32_t dst_code = (op & 0xf0) >> 4;
1880 	const uint32_t src_code = op & 0xf;
1881 
1882 	if (!SRC_GLOBAL || !DST_GLOBAL)
1883 		UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
1884 
1885 	if (SRC_GLOBAL)
1886 	{
1887 		UML_LOAD(block, I0, (void *)m_core->global_regs, src_code, SIZE_DWORD, SCALE_x4);
1888 	}
1889 	else
1890 	{
1891 		UML_ADD(block, I2, I3, src_code);
1892 		UML_AND(block, I1, I2, 0x3f);
1893 		UML_LOAD(block, I0, (void *)m_core->local_regs, I1, SIZE_DWORD, SCALE_x4);
1894 	}
1895 
1896 	if (DST_GLOBAL)
1897 	{
1898 		UML_LOAD(block, I1, (void *)m_core->global_regs, dst_code, SIZE_DWORD, SCALE_x4);
1899 	}
1900 	else
1901 	{
1902 		UML_ADD(block, I2, I3, dst_code);
1903 		UML_AND(block, I4, I2, 0x3f);
1904 		UML_LOAD(block, I1, (void *)m_core->local_regs, I4, SIZE_DWORD, SCALE_x4);
1905 	}
1906 
1907 	UML_XOR(block, I5, I1, I0);
1908 
1909 	UML_TEST(block, I5, ~0);
1910 	UML_MOVc(block, uml::COND_Z, I1, Z_MASK);
1911 	UML_MOVc(block, uml::COND_NZ, I1, 0);
1912 	UML_ROLINS(block, DRC_SR, I1, 0, Z_MASK);
1913 
1914 	if (DST_GLOBAL)
1915 	{
1916 		if (dst_code < 2)
1917 		{
1918 			UML_MOV(block, I4, dst_code);
1919 			generate_set_global_register(block, compiler, desc);
1920 		}
1921 		else
1922 		{
1923 			UML_STORE(block, (void *)m_core->global_regs, dst_code, I5, SIZE_DWORD, SCALE_x4);
1924 		}
1925 	}
1926 	else
1927 	{
1928 		UML_STORE(block, (void *)m_core->local_regs, I4, I5, SIZE_DWORD, SCALE_x4);
1929 	}
1930 }
1931 
1932 
1933 template <hyperstone_device::reg_bank DST_GLOBAL, hyperstone_device::reg_bank SRC_GLOBAL>
generate_not(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)1934 void hyperstone_device::generate_not(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
1935 {
1936 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
1937 
1938 	uint16_t op = desc->opptr.w[0];
1939 
1940 	generate_check_delay_pc(block, compiler, desc);
1941 
1942 	const uint32_t dst_code = (op & 0xf0) >> 4;
1943 	const uint32_t src_code = op & 0xf;
1944 
1945 	if (!SRC_GLOBAL || !DST_GLOBAL)
1946 		UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
1947 
1948 	if (SRC_GLOBAL)
1949 		UML_LOAD(block, I0, (void *)m_core->global_regs, src_code, SIZE_DWORD, SCALE_x4);
1950 	else
1951 	{
1952 		UML_ADD(block, I2, I3, src_code);
1953 		UML_AND(block, I1, I2, 0x3f);
1954 		UML_LOAD(block, I0, (void *)m_core->local_regs, I1, SIZE_DWORD, SCALE_x4);
1955 	}
1956 
1957 	UML_XOR(block, I5, I0, ~0);
1958 
1959 	UML_TEST(block, I5, ~0);
1960 	UML_MOVc(block, uml::COND_Z, I1, Z_MASK);
1961 	UML_MOVc(block, uml::COND_NZ, I1, 0);
1962 	UML_ROLINS(block, DRC_SR, I1, 0, Z_MASK);
1963 
1964 	if (DST_GLOBAL)
1965 	{
1966 		if (dst_code < 2)
1967 		{
1968 			UML_MOV(block, I4, dst_code);
1969 			generate_set_global_register(block, compiler, desc);
1970 		}
1971 		else
1972 		{
1973 			UML_STORE(block, (void *)m_core->global_regs, dst_code, I5, SIZE_DWORD, SCALE_x4);
1974 		}
1975 	}
1976 	else
1977 	{
1978 		UML_ADD(block, I2, I3, dst_code);
1979 		UML_AND(block, I4, I2, 0x3f);
1980 		UML_STORE(block, (void *)m_core->local_regs, I4, I5, SIZE_DWORD, SCALE_x4);
1981 	}
1982 }
1983 
1984 
1985 template <hyperstone_device::reg_bank DST_GLOBAL, hyperstone_device::imm_size IMM_LONG>
generate_cmpi(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)1986 void hyperstone_device::generate_cmpi(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
1987 {
1988 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
1989 
1990 	uint16_t op = desc->opptr.w[0];
1991 	const uint32_t dst_code = (op & 0xf0) >> 4;
1992 
1993 	if (IMM_LONG)
1994 	{
1995 		generate_decode_immediate_s(block, compiler, desc); // I1 <-- imm32
1996 	}
1997 	else
1998 	{
1999 		UML_MOV(block, I1, op & 0xf);
2000 	}
2001 
2002 	generate_check_delay_pc(block, compiler, desc);
2003 
2004 	if (DST_GLOBAL)
2005 	{
2006 		UML_LOAD(block, I2, (void *)m_core->global_regs, dst_code, SIZE_DWORD, SCALE_x4);
2007 	}
2008 	else
2009 	{
2010 		UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
2011 		UML_ADD(block, I2, I3, dst_code);
2012 		UML_AND(block, I4, I2, 0x3f);
2013 		UML_LOAD(block, I2, (void *)m_core->local_regs, I4, SIZE_DWORD, SCALE_x4);
2014 	}
2015 
2016 	UML_AND(block, DRC_SR, DRC_SR, ~(V_MASK | Z_MASK | N_MASK | C_MASK));
2017 
2018 #ifndef PTR64
2019 	UML_DAND(block, I1, I1, 0x00000000ffffffffULL);
2020 	UML_DAND(block, I2, I2, 0x00000000ffffffffULL);
2021 #endif
2022 
2023 	UML_DSUB(block, I0, I2, I1);
2024 
2025 	int no_v;
2026 	UML_XOR(block, I0, I0, I2);
2027 	UML_XOR(block, I3, I1, I2);
2028 	UML_AND(block, I0, I0, I3);
2029 	UML_TEST(block, I0, 0x80000000);
2030 	UML_JMPc(block, uml::COND_Z, no_v = compiler.m_labelnum++);
2031 	UML_OR(block, DRC_SR, DRC_SR, V_MASK);
2032 	UML_LABEL(block, no_v);
2033 
2034 	int no_n;
2035 	UML_MOV(block, I3, 0);
2036 	UML_CMP(block, I2, I1);
2037 	UML_MOVc(block, uml::COND_E, I3, Z_MASK);
2038 	UML_MOVc(block, uml::COND_B, I3, C_MASK);
2039 	UML_JMPc(block, uml::COND_GE, no_n = compiler.m_labelnum++);
2040 	UML_OR(block, I3, I3, N_MASK);
2041 	UML_LABEL(block, no_n);
2042 
2043 	UML_OR(block, DRC_SR, DRC_SR, I3);
2044 }
2045 
2046 
2047 template <hyperstone_device::reg_bank DST_GLOBAL, hyperstone_device::imm_size IMM_LONG>
generate_movi(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)2048 void hyperstone_device::generate_movi(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
2049 {
2050 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
2051 
2052 	uint16_t op = desc->opptr.w[0];
2053 	const uint32_t dst_code = (op & 0xf0) >> 4;
2054 	const uint32_t src_code = op & 0xf;
2055 
2056 	if (IMM_LONG)
2057 		generate_decode_immediate_s(block, compiler, desc);
2058 	else
2059 		UML_MOV(block, I1, src_code);
2060 
2061 	generate_check_delay_pc(block, compiler, desc);
2062 
2063 	int done, no_exception;
2064 	if (DST_GLOBAL)
2065 	{
2066 		UML_TEST(block, DRC_SR, H_MASK);
2067 		UML_JMPc(block, uml::COND_Z, no_exception = compiler.m_labelnum++);
2068 		UML_TEST(block, DRC_SR, S_MASK);
2069 		UML_JMPc(block, uml::COND_NZ, no_exception);
2070 		UML_EXH(block, *m_exception[EXCEPTION_PRIVILEGE_ERROR], 0);
2071 		UML_JMP(block, done = compiler.m_labelnum++);
2072 		UML_LABEL(block, no_exception);
2073 	}
2074 
2075 	UML_AND(block, DRC_SR, DRC_SR, ~(Z_MASK | N_MASK));
2076 
2077 	int no_z;
2078 	UML_TEST(block, I1, ~0);
2079 	UML_JMPc(block, uml::COND_NZ, no_z = compiler.m_labelnum++);
2080 	UML_OR(block, DRC_SR, DRC_SR, Z_MASK);
2081 	UML_LABEL(block, no_z);
2082 
2083 	int no_n;
2084 	UML_TEST(block, I1, 0x80000000);
2085 	UML_JMPc(block, uml::COND_Z, no_n = compiler.m_labelnum++);
2086 	UML_OR(block, DRC_SR, DRC_SR, N_MASK);
2087 	UML_LABEL(block, no_n);
2088 
2089 #if MISSIONCRAFT_FLAGS
2090 	UML_AND(block, DRC_SR, DRC_SR, ~V_MASK);
2091 #endif
2092 
2093 	if (DST_GLOBAL)
2094 	{
2095 		UML_TEST(block, DRC_SR, H_MASK);
2096 		UML_MOVc(block, uml::COND_NZ, I4, dst_code + 16);
2097 		UML_MOVc(block, uml::COND_Z, I4, dst_code);
2098 		UML_AND(block, DRC_SR, DRC_SR, ~H_MASK);
2099 		UML_MOV(block, I5, I1);
2100 		generate_set_global_register(block, compiler, desc);
2101 
2102 		UML_TEST(block, op, 0xf0);
2103 		UML_JMPc(block, uml::COND_NZ, done);
2104 		UML_AND(block, DRC_SR, DRC_SR, ~M_MASK);
2105 		generate_branch(block, desc->targetpc, desc);
2106 
2107 		UML_LABEL(block, done);
2108 	}
2109 	else
2110 	{
2111 		UML_AND(block, DRC_SR, DRC_SR, ~H_MASK);
2112 		UML_ROLAND(block, I2, DRC_SR, 7, 0x7f);
2113 		UML_ADD(block, I0, I2, dst_code);
2114 		UML_AND(block, I0, I0, 0x3f);
2115 		UML_STORE(block, (void *)m_core->local_regs, I0, I1, SIZE_DWORD, SCALE_x4);
2116 	}
2117 }
2118 
2119 
2120 template <hyperstone_device::reg_bank DST_GLOBAL, hyperstone_device::imm_size IMM_LONG>
generate_addi(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)2121 void hyperstone_device::generate_addi(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
2122 {
2123 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
2124 
2125 	uint16_t op = desc->opptr.w[0];
2126 	const uint32_t dst_code = (op & 0xf0) >> 4;
2127 	const uint32_t src_code = op & 0xf;
2128 
2129 	if (IMM_LONG)
2130 	{
2131 		generate_decode_immediate_s(block, compiler, desc); // I1 <-- imm32
2132 	}
2133 	else
2134 	{
2135 		UML_MOV(block, I1, src_code);
2136 	}
2137 
2138 	generate_check_delay_pc(block, compiler, desc);
2139 
2140 	if (DST_GLOBAL)
2141 	{
2142 		UML_LOAD(block, I0, (void *)m_core->global_regs, dst_code, SIZE_DWORD, SCALE_x4);
2143 	}
2144 	else
2145 	{
2146 		UML_ROLAND(block, I2, DRC_SR, 7, 0x7f);
2147 		UML_ADD(block, I2, I2, dst_code);
2148 		UML_AND(block, I2, I2, 0x3f);
2149 		UML_LOAD(block, I0, (void *)m_core->local_regs, I2, SIZE_DWORD, SCALE_x4);
2150 	}
2151 
2152 	if (!(op & 0x10f))
2153 	{
2154 		UML_ROLAND(block, I3, DRC_SR, 32-Z_SHIFT, 1);
2155 		UML_XOR(block, I3, I3, 1);
2156 		UML_AND(block, I4, I0, 1);
2157 		UML_OR(block, I3, I3, I4);
2158 		UML_AND(block, I1, DRC_SR, I3);
2159 	}
2160 
2161 #ifndef PTR64
2162 	UML_DAND(block, I0, I0, 0x00000000ffffffffULL);
2163 	UML_DAND(block, I1, I1, 0x00000000ffffffffULL);
2164 #endif
2165 
2166 	UML_DADD(block, I3, I0, I1);
2167 
2168 	UML_DROLAND(block, I6, I3, 32, C_MASK);
2169 
2170 	UML_XOR(block, I4, I0, I3);
2171 	UML_XOR(block, I5, I1, I3);
2172 	UML_AND(block, I4, I4, I5);
2173 	UML_ROLINS(block, I6, I4, 4, V_MASK);
2174 
2175 	UML_TEST(block, I3, ~0);
2176 	UML_SETc(block, uml::COND_Z, I4);
2177 	UML_ROLINS(block, I6, I4, Z_SHIFT, Z_MASK);
2178 	UML_ROLINS(block, I6, I3, 3, N_MASK);
2179 	UML_ROLINS(block, DRC_SR, I6, 0, (V_MASK | N_MASK | Z_MASK | C_MASK));
2180 
2181 	if (DST_GLOBAL)
2182 	{
2183 		if (dst_code < 2)
2184 		{
2185 			UML_MOV(block, I4, dst_code);
2186 			UML_MOV(block, I5, I3);
2187 			generate_set_global_register(block, compiler, desc);
2188 
2189 			if (dst_code == PC_REGISTER)
2190 			{
2191 				UML_AND(block, DRC_SR, DRC_SR, ~M_MASK);
2192 				generate_branch(block, desc->targetpc, desc);
2193 			}
2194 		}
2195 		else
2196 		{
2197 			UML_STORE(block, (void *)m_core->global_regs, dst_code, I3, SIZE_DWORD, SCALE_x4);
2198 		}
2199 	}
2200 	else
2201 	{
2202 		UML_STORE(block, (void *)m_core->local_regs, I2, I3, SIZE_DWORD, SCALE_x4);
2203 	}
2204 }
2205 
2206 
2207 template <hyperstone_device::reg_bank DST_GLOBAL, hyperstone_device::imm_size IMM_LONG>
generate_addsi(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)2208 void hyperstone_device::generate_addsi(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
2209 {
2210 	printf("Unimplemented: generate_addsi (%08x)\n", desc->pc);
2211 	fflush(stdout);
2212 	fatalerror(" ");
2213 }
2214 
2215 
2216 template <hyperstone_device::reg_bank DST_GLOBAL, hyperstone_device::imm_size IMM_LONG>
generate_cmpbi(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)2217 void hyperstone_device::generate_cmpbi(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
2218 {
2219 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
2220 
2221 	uint16_t op = desc->opptr.w[0];
2222 	const uint32_t dst_code = (op & 0xf0) >> 4;
2223 
2224 	if (!IMM_LONG)
2225 		generate_check_delay_pc(block, compiler, desc);
2226 
2227 	if (DST_GLOBAL)
2228 	{
2229 		UML_LOAD(block, I2, (void *)m_core->global_regs, dst_code, SIZE_DWORD, SCALE_x4);
2230 	}
2231 	else
2232 	{
2233 		UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
2234 		UML_ADD(block, I4, I3, dst_code);
2235 		UML_AND(block, I5, I4, 0x3f);
2236 		UML_LOAD(block, I2, (void *)m_core->local_regs, I5, SIZE_DWORD, SCALE_x4);
2237 	}
2238 
2239 	const uint32_t n = ((op & 0x100) >> 4) | (op & 0x0f);
2240 	if (n)
2241 	{
2242 		if (n == 31)
2243 		{
2244 			if (IMM_LONG)
2245 			{
2246 				generate_ignore_immediate_s(block, desc);
2247 				generate_check_delay_pc(block, compiler, desc);
2248 			}
2249 			UML_MOV(block, I1, 0x7fffffff);
2250 		}
2251 		else
2252 		{
2253 			if (IMM_LONG)
2254 			{
2255 				generate_decode_immediate_s(block, compiler, desc);
2256 				generate_check_delay_pc(block, compiler, desc);
2257 			}
2258 			else
2259 			{
2260 				UML_MOV(block, I1, op & 0xf);
2261 			}
2262 		}
2263 
2264 		UML_AND(block, DRC_SR, DRC_SR, ~Z_MASK);
2265 		UML_TEST(block, I2, I1);
2266 		UML_SETc(block, uml::COND_Z, I3);
2267 		UML_ROLINS(block, DRC_SR, I3, Z_SHIFT, Z_MASK);
2268 	}
2269 	else
2270 	{
2271 		if (IMM_LONG)
2272 		{
2273 			generate_ignore_immediate_s(block, desc);
2274 			generate_check_delay_pc(block, compiler, desc);
2275 		}
2276 
2277 		int or_mask, done;
2278 		UML_TEST(block, I2, 0xff000000);
2279 		UML_JMPc(block, uml::COND_Z, or_mask = compiler.m_labelnum++);
2280 		UML_TEST(block, I2, 0x00ff0000);
2281 		UML_JMPc(block, uml::COND_Z, or_mask);
2282 		UML_TEST(block, I2, 0x0000ff00);
2283 		UML_JMPc(block, uml::COND_Z, or_mask);
2284 		UML_TEST(block, I2, 0x000000ff);
2285 		UML_JMPc(block, uml::COND_Z, or_mask);
2286 		UML_AND(block, DRC_SR, DRC_SR, ~Z_MASK);
2287 		UML_JMP(block, done = compiler.m_labelnum++);
2288 
2289 		UML_LABEL(block, or_mask);
2290 		UML_OR(block, DRC_SR, DRC_SR, Z_MASK);
2291 
2292 		UML_LABEL(block, done);
2293 	}
2294 }
2295 
2296 
2297 template <hyperstone_device::reg_bank DST_GLOBAL, hyperstone_device::imm_size IMM_LONG>
generate_andni(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)2298 void hyperstone_device::generate_andni(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
2299 {
2300 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
2301 
2302 	uint16_t op = desc->opptr.w[0];
2303 	const uint32_t dst_code = (op & 0xf0) >> 4;
2304 
2305 	if (DRC_N_OP_MASK == 0x10f)
2306 		UML_MOV(block, I1, 0x7fffffff);
2307 	else if (IMM_LONG)
2308 		generate_decode_immediate_s(block, compiler, desc); // I1 <-- imm32
2309 	else
2310 		UML_MOV(block, I1, op & 0xf);
2311 
2312 	generate_check_delay_pc(block, compiler, desc);
2313 
2314 	if (DST_GLOBAL)
2315 	{
2316 		UML_LOAD(block, I2, (void *)m_core->global_regs, dst_code, SIZE_DWORD, SCALE_x4);
2317 	}
2318 	else
2319 	{
2320 		UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
2321 		UML_ADD(block, I2, I3, dst_code);
2322 		UML_AND(block, I4, I2, 0x3f);
2323 		UML_LOAD(block, I2, (void *)m_core->local_regs, I4, SIZE_DWORD, SCALE_x4);
2324 	}
2325 
2326 	UML_XOR(block, I1, I1, ~0);
2327 	UML_AND(block, I5, I2, I1);
2328 
2329 	UML_TEST(block, I5, ~0);
2330 	UML_MOVc(block, uml::COND_Z, I2, Z_MASK);
2331 	UML_MOVc(block, uml::COND_NZ, I2, 0);
2332 	UML_ROLINS(block, DRC_SR, I2, 0, Z_MASK);
2333 
2334 	if (DST_GLOBAL)
2335 	{
2336 		if (dst_code < 2)
2337 		{
2338 			UML_MOV(block, I4, dst_code);
2339 			generate_set_global_register(block, compiler, desc);
2340 			if (dst_code == PC_REGISTER)
2341 			{
2342 				generate_branch(block, desc->targetpc, desc);
2343 			}
2344 		}
2345 		else
2346 		{
2347 			UML_STORE(block, (void *)m_core->global_regs, dst_code, I5, SIZE_DWORD, SCALE_x4);
2348 		}
2349 	}
2350 	else
2351 	{
2352 		UML_ADD(block, I4, I3, dst_code);
2353 		UML_AND(block, I4, I4, 0x3f);
2354 		UML_STORE(block, (void *)m_core->local_regs, I4, I5, SIZE_DWORD, SCALE_x4);
2355 	}
2356 }
2357 
2358 
2359 template <hyperstone_device::reg_bank DST_GLOBAL, hyperstone_device::imm_size IMM_LONG>
generate_ori(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)2360 void hyperstone_device::generate_ori(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
2361 {
2362 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
2363 
2364 	uint16_t op = desc->opptr.w[0];
2365 	const uint32_t dst_code = (op & 0xf0) >> 4;
2366 
2367 	if (IMM_LONG)
2368 	{
2369 		generate_decode_immediate_s(block, compiler, desc); // I1 <-- imm32
2370 	}
2371 	else
2372 	{
2373 		UML_MOV(block, I1, op & 0xf);
2374 	}
2375 
2376 	generate_check_delay_pc(block, compiler, desc);
2377 
2378 	if (DST_GLOBAL)
2379 	{
2380 		UML_LOAD(block, I2, (void *)m_core->global_regs, dst_code, SIZE_DWORD, SCALE_x4);
2381 	}
2382 	else
2383 	{
2384 		UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
2385 		UML_ADD(block, I2, I3, dst_code);
2386 		UML_AND(block, I4, I2, 0x3f);
2387 		UML_LOAD(block, I2, (void *)m_core->local_regs, I4, SIZE_DWORD, SCALE_x4);
2388 	}
2389 
2390 	UML_OR(block, I5, I2, I1);
2391 
2392 	UML_TEST(block, I5, ~0);
2393 	UML_MOVc(block, uml::COND_Z, I2, Z_MASK);
2394 	UML_MOVc(block, uml::COND_NZ, I2, 0);
2395 	UML_ROLINS(block, DRC_SR, I2, 0, Z_MASK);
2396 
2397 	if (DST_GLOBAL)
2398 	{
2399 		if (dst_code < 2)
2400 		{
2401 			UML_MOV(block, I4, dst_code);
2402 			generate_set_global_register(block, compiler, desc);
2403 			if (dst_code == PC_REGISTER)
2404 			{
2405 				generate_branch(block, desc->targetpc, desc);
2406 			}
2407 		}
2408 		else
2409 		{
2410 			UML_STORE(block, (void *)m_core->global_regs, dst_code, I5, SIZE_DWORD, SCALE_x4);
2411 		}
2412 	}
2413 	else
2414 	{
2415 		UML_ADD(block, I4, I3, dst_code);
2416 		UML_AND(block, I4, I4, 0x3f);
2417 		UML_STORE(block, (void *)m_core->local_regs, I4, I5, SIZE_DWORD, SCALE_x4);
2418 	}
2419 }
2420 
2421 
2422 template <hyperstone_device::reg_bank DST_GLOBAL, hyperstone_device::imm_size IMM_LONG>
generate_xori(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)2423 void hyperstone_device::generate_xori(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
2424 {
2425 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
2426 
2427 	uint16_t op = desc->opptr.w[0];
2428 	const uint32_t dst_code = (op & 0xf0) >> 4;
2429 
2430 	if (IMM_LONG)
2431 	{
2432 		generate_decode_immediate_s(block, compiler, desc); // I1 <-- imm32
2433 	}
2434 	else
2435 	{
2436 		UML_MOV(block, I1, op & 0xf);
2437 	}
2438 
2439 	generate_check_delay_pc(block, compiler, desc);
2440 
2441 	if (DST_GLOBAL)
2442 	{
2443 		UML_LOAD(block, I2, (void *)m_core->global_regs, dst_code, SIZE_DWORD, SCALE_x4);
2444 	}
2445 	else
2446 	{
2447 		UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
2448 		UML_ADD(block, I2, I3, dst_code);
2449 		UML_AND(block, I4, I2, 0x3f);
2450 		UML_LOAD(block, I2, (void *)m_core->local_regs, I4, SIZE_DWORD, SCALE_x4);
2451 	}
2452 
2453 	UML_XOR(block, I5, I2, I1);
2454 
2455 	UML_TEST(block, I5, ~0);
2456 	UML_MOVc(block, uml::COND_Z, I2, Z_MASK);
2457 	UML_MOVc(block, uml::COND_NZ, I2, 0);
2458 	UML_ROLINS(block, DRC_SR, I2, 0, Z_MASK);
2459 
2460 	if (DST_GLOBAL)
2461 	{
2462 		if (dst_code < 2)
2463 		{
2464 			UML_MOV(block, I4, dst_code);
2465 			generate_set_global_register(block, compiler, desc);
2466 			if (dst_code == PC_REGISTER)
2467 			{
2468 				generate_branch(block, desc->targetpc, desc);
2469 			}
2470 		}
2471 		else
2472 		{
2473 			UML_STORE(block, (void *)m_core->global_regs, dst_code, I5, SIZE_DWORD, SCALE_x4);
2474 		}
2475 	}
2476 	else
2477 	{
2478 		UML_ADD(block, I4, I3, dst_code);
2479 		UML_AND(block, I4, I4, 0x3f);
2480 		UML_STORE(block, (void *)m_core->local_regs, I4, I5, SIZE_DWORD, SCALE_x4);
2481 	}
2482 }
2483 
2484 
2485 template <hyperstone_device::shift_type HI_N>
generate_shrdi(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)2486 void hyperstone_device::generate_shrdi(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
2487 {
2488 	UML_MOV(block, I7, mem(&m_core->clock_cycles_2));
2489 
2490 	uint16_t op = desc->opptr.w[0];
2491 
2492 	const uint32_t dst_code = (op & 0xf0) >> 4;
2493 
2494 	generate_check_delay_pc(block, compiler, desc);
2495 
2496 	UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
2497 
2498 	UML_ADD(block, I2, I3, dst_code);
2499 	UML_AND(block, I1, I2, 0x3f);
2500 	UML_LOAD(block, I0, (void *)m_core->local_regs, I1, SIZE_DWORD, SCALE_x4); // I0 = sreg
2501 
2502 	UML_ADD(block, I2, I3, dst_code + 1);
2503 	UML_AND(block, I6, I2, 0x3f);
2504 	UML_LOAD(block, I2, (void *)m_core->local_regs, I6, SIZE_DWORD, SCALE_x4); // I1 = sregf
2505 
2506 #ifndef PTR64
2507 	UML_DAND(block, I2, I2, 0x00000000ffffffff);
2508 #endif
2509 
2510 	UML_DSHL(block, I0, I0, 32);
2511 	UML_DOR(block, I2, I2, I0);
2512 
2513 	UML_AND(block, I4, DRC_SR, ~(C_MASK | Z_MASK | N_MASK));
2514 
2515 	const uint32_t n = HI_N ? (0x10 | (op & 0xf)) : (op & 0xf);
2516 	if (HI_N || n)
2517 	{
2518 		int no_carry = compiler.m_labelnum++;
2519 		UML_DTEST(block, I2, (1 << (n - 1)));
2520 		UML_JMPc(block, uml::COND_Z, no_carry);
2521 		UML_OR(block, I4, I4, 1);
2522 		UML_LABEL(block, no_carry);
2523 
2524 		UML_DSHR(block, I2, I2, n);
2525 	}
2526 
2527 	UML_DTEST(block, I2, ~0ULL);
2528 	UML_MOVc(block, uml::COND_Z, I5, Z_MASK);
2529 	UML_MOVc(block, uml::COND_NZ, I5, 0);
2530 	UML_DROLINS(block, I5, I2, 3, N_MASK);
2531 	UML_OR(block, DRC_SR, I4, I5);
2532 
2533 	UML_STORE(block, (void *)m_core->local_regs, I6, I2, SIZE_DWORD, SCALE_x4);
2534 	UML_DSHR(block, I0, I2, 32);
2535 	UML_STORE(block, (void *)m_core->local_regs, I1, I0, SIZE_DWORD, SCALE_x4);
2536 }
2537 
2538 
generate_shrd(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)2539 void hyperstone_device::generate_shrd(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
2540 {
2541 	UML_MOV(block, I7, mem(&m_core->clock_cycles_2));
2542 
2543 	uint16_t op = desc->opptr.w[0];
2544 
2545 	const uint32_t src_code = op & 0xf;
2546 	const uint32_t dst_code = (op & 0xf0) >> 4;
2547 	const uint32_t dstf_code = dst_code + 1;
2548 
2549 	generate_check_delay_pc(block, compiler, desc);
2550 
2551 	if (src_code == dst_code || src_code == dstf_code)
2552 	{
2553 		return;
2554 	}
2555 
2556 	UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
2557 
2558 	UML_ADD(block, I2, I3, dst_code);
2559 	UML_AND(block, I4, I2, 0x3f);
2560 	UML_LOAD(block, I0, (void *)m_core->local_regs, I4, SIZE_DWORD, SCALE_x4);
2561 
2562 	UML_ADD(block, I2, I3, dstf_code);
2563 	UML_AND(block, I5, I2, 0x3f);
2564 	UML_LOAD(block, I1, (void *)m_core->local_regs, I5, SIZE_DWORD, SCALE_x4);
2565 
2566 #ifndef PTR64
2567 	UML_DAND(block, I1, I1, 0x00000000ffffffff);
2568 #endif
2569 
2570 	UML_DSHL(block, I2, I0, 32);
2571 	UML_DOR(block, I0, I1, I2);
2572 
2573 	UML_ADD(block, I2, I3, src_code);
2574 	UML_AND(block, I6, I2, 0x3f);
2575 	UML_LOAD(block, I2, (void *)m_core->local_regs, I6, SIZE_DWORD, SCALE_x4);
2576 	UML_AND(block, I6, I2, 0x1f);
2577 
2578 	int no_shift = compiler.m_labelnum++;
2579 	UML_TEST(block, I6, ~0);
2580 	UML_JMPc(block, uml::COND_Z, no_shift);
2581 
2582 	UML_SUB(block, I2, I6, 1);
2583 	UML_DSHR(block, I3, I0, I2);
2584 	UML_AND(block, I2, I3, 1);
2585 	UML_DSHR(block, I0, I0, I6);
2586 
2587 	UML_LABEL(block, no_shift);
2588 	UML_DCMP(block, I0, 0ULL);
2589 	UML_MOVc(block, uml::COND_E, I3, Z_MASK);
2590 	UML_MOVc(block, uml::COND_NE, I3, 0);
2591 	UML_OR(block, I1, I2, I3);
2592 	UML_DROLINS(block, I1, I0, 3, N_MASK);
2593 	UML_ROLINS(block, DRC_SR, I1, 0, (C_MASK | Z_MASK | N_MASK));
2594 
2595 	UML_STORE(block, (void *)m_core->local_regs, I5, I0, SIZE_DWORD, SCALE_x4);
2596 	UML_DSHR(block, I1, I0, 32);
2597 	UML_STORE(block, (void *)m_core->local_regs, I4, I1, SIZE_DWORD, SCALE_x4);
2598 }
2599 
2600 
generate_shr(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)2601 void hyperstone_device::generate_shr(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
2602 {
2603 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
2604 
2605 	uint16_t op = desc->opptr.w[0];
2606 
2607 	const uint32_t dst_code = (op & 0xf0) >> 4;
2608 	const uint32_t src_code = op & 0xf;
2609 
2610 	generate_check_delay_pc(block, compiler, desc);
2611 
2612 	UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
2613 
2614 	UML_ADD(block, I2, I3, dst_code);
2615 	UML_AND(block, I4, I2, 0x3f);
2616 	UML_LOAD(block, I0, (void *)m_core->local_regs, I4, SIZE_DWORD, SCALE_x4); // I0 = dreg
2617 
2618 	UML_ADD(block, I2, I3, src_code);
2619 	UML_AND(block, I1, I2, 0x3f);
2620 	UML_LOAD(block, I5, (void *)m_core->local_regs, I1, SIZE_DWORD, SCALE_x4);
2621 	UML_AND(block, I1, I5, 0x1f); // I1 = sreg & 0x1f
2622 
2623 	UML_AND(block, I6, DRC_SR, ~(C_MASK | Z_MASK | N_MASK));
2624 
2625 	int no_shift = compiler.m_labelnum++;
2626 	int no_carry = compiler.m_labelnum++;
2627 	UML_CMP(block, I1, 0);
2628 	UML_JMPc(block, uml::COND_E, no_shift);
2629 	UML_SUB(block, I2, I1, 1);
2630 	UML_SHL(block, I2, 1, I2);
2631 	UML_TEST(block, I0, I2);
2632 	UML_JMPc(block, uml::COND_Z, no_carry);
2633 	UML_OR(block, I6, I6, C_MASK);
2634 
2635 	UML_LABEL(block, no_carry);
2636 	UML_SHR(block, I0, I0, I1);
2637 
2638 	UML_LABEL(block, no_shift);
2639 	UML_TEST(block, I0, ~0);
2640 	UML_MOVc(block, uml::COND_Z, I5, Z_MASK);
2641 	UML_MOVc(block, uml::COND_NZ, I5, 0);
2642 	UML_ROLINS(block, I5, I0, 3, N_MASK);
2643 
2644 	UML_OR(block, DRC_SR, I5, I6);
2645 	UML_STORE(block, (void *)m_core->local_regs, I4, I0, SIZE_DWORD, SCALE_x4);
2646 }
2647 
2648 
2649 template <hyperstone_device::shift_type HI_N, hyperstone_device::reg_bank DST_GLOBAL>
generate_shri(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)2650 void hyperstone_device::generate_shri(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
2651 {
2652 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
2653 
2654 	uint16_t op = desc->opptr.w[0];
2655 
2656 	const uint32_t dst_code = (op & 0xf0) >> 4;
2657 
2658 	generate_check_delay_pc(block, compiler, desc);
2659 
2660 	if (DST_GLOBAL)
2661 	{
2662 		UML_LOAD(block, I4, (void *)m_core->global_regs, dst_code, SIZE_DWORD, SCALE_x4);
2663 	}
2664 	else
2665 	{
2666 		UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
2667 		UML_ADD(block, I2, I3, dst_code);
2668 		UML_AND(block, I6, I2, 0x3f);
2669 		UML_LOAD(block, I4, (void *)m_core->local_regs, I6, SIZE_DWORD, SCALE_x4);
2670 	}
2671 
2672 	UML_AND(block, I1, DRC_SR, ~(C_MASK | Z_MASK | N_MASK));
2673 	const uint32_t n = HI_N ? (0x10 | (op & 0xf)) : (op & 0xf);
2674 	if (HI_N || n)
2675 		UML_ROLINS(block, I1, I4, 32 - (n - 1), 1);
2676 
2677 	UML_SHR(block, I5, I4, n);
2678 
2679 	UML_TEST(block, I5, ~0);
2680 	UML_MOVc(block, uml::COND_Z, I2, Z_MASK);
2681 	UML_MOVc(block, uml::COND_NZ, I2, 0);
2682 	UML_ROLINS(block, I2, I5, 3, N_MASK);
2683 	UML_OR(block, DRC_SR, I1, I2);
2684 
2685 	if (DST_GLOBAL)
2686 	{
2687 		if (dst_code < 2)
2688 		{
2689 			UML_MOV(block, I4, dst_code);
2690 			generate_set_global_register(block, compiler, desc);
2691 		}
2692 		else
2693 		{
2694 			UML_STORE(block, (void *)m_core->global_regs, dst_code, I5, SIZE_DWORD, SCALE_x4);
2695 		}
2696 	}
2697 	else
2698 	{
2699 		UML_STORE(block, (void *)m_core->local_regs, I6, I5, SIZE_DWORD, SCALE_x4);
2700 	}
2701 }
2702 
2703 
2704 template <hyperstone_device::shift_type HI_N>
generate_sardi(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)2705 void hyperstone_device::generate_sardi(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
2706 {
2707 	printf("Unimplemented: generate_sardi (%08x)\n", desc->pc);
2708 	fflush(stdout);
2709 	fatalerror(" ");
2710 }
2711 
2712 
generate_sard(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)2713 void hyperstone_device::generate_sard(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
2714 {
2715 	UML_MOV(block, I7, mem(&m_core->clock_cycles_2));
2716 
2717 	uint16_t op = desc->opptr.w[0];
2718 
2719 	const uint32_t src_code = op & 0xf;
2720 	const uint32_t dst_code = (op & 0xf0) >> 4;
2721 	const uint32_t dstf_code = dst_code + 1;
2722 
2723 	generate_check_delay_pc(block, compiler, desc);
2724 
2725 	if (src_code == dst_code || src_code == dstf_code)
2726 	{
2727 		return;
2728 	}
2729 
2730 	UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
2731 
2732 	UML_ADD(block, I2, I3, dst_code);
2733 	UML_AND(block, I4, I2, 0x3f);
2734 	UML_LOAD(block, I0, (void *)m_core->local_regs, I4, SIZE_DWORD, SCALE_x4);
2735 
2736 	UML_ADD(block, I2, I3, dstf_code);
2737 	UML_AND(block, I5, I2, 0x3f);
2738 	UML_LOAD(block, I1, (void *)m_core->local_regs, I5, SIZE_DWORD, SCALE_x4);
2739 
2740 #ifndef PTR64
2741 	UML_DAND(block, I1, I1, 0x00000000ffffffff);
2742 #endif
2743 
2744 	UML_DSHL(block, I2, I0, 32);
2745 	UML_DOR(block, I0, I1, I2);
2746 
2747 	UML_ADD(block, I2, I3, src_code);
2748 	UML_AND(block, I6, I2, 0x3f);
2749 	UML_LOAD(block, I2, (void *)m_core->local_regs, I6, SIZE_DWORD, SCALE_x4);
2750 	UML_AND(block, I6, I2, 0x1f);
2751 
2752 	int no_shift = compiler.m_labelnum++;
2753 	UML_TEST(block, I6, ~0);
2754 	UML_JMPc(block, uml::COND_Z, no_shift);
2755 
2756 	UML_SUB(block, I2, I6, 1);
2757 	UML_DSAR(block, I3, I0, I2);
2758 	UML_AND(block, I2, I3, 1);
2759 	UML_DSAR(block, I0, I0, I6);
2760 
2761 	UML_LABEL(block, no_shift);
2762 	UML_DCMP(block, I0, 0);
2763 	UML_MOVc(block, uml::COND_E, I3, Z_MASK);
2764 	UML_MOVc(block, uml::COND_NE, I3, 0);
2765 	UML_OR(block, I1, I2, I3);
2766 	UML_DROLINS(block, I1, I0, 3, N_MASK);
2767 	UML_ROLINS(block, DRC_SR, I1, 0, (C_MASK | Z_MASK | N_MASK));
2768 
2769 	UML_STORE(block, (void *)m_core->local_regs, I5, I0, SIZE_DWORD, SCALE_x4);
2770 	UML_DSHR(block, I1, I0, 32);
2771 	UML_STORE(block, (void *)m_core->local_regs, I4, I1, SIZE_DWORD, SCALE_x4);
2772 }
2773 
2774 
generate_sar(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)2775 void hyperstone_device::generate_sar(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
2776 {
2777 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
2778 
2779 	uint16_t op = desc->opptr.w[0];
2780 
2781 	const uint32_t dst_code = (op & 0xf0) >> 4;
2782 	const uint32_t src_code = op & 0xf;
2783 
2784 	generate_check_delay_pc(block, compiler, desc);
2785 
2786 	UML_ROLAND(block, I1, DRC_SR, 7, 0x7f);
2787 	UML_ADD(block, I2, I1, dst_code);
2788 	UML_AND(block, I2, I2, 0x3f);
2789 	UML_LOAD(block, I0, (void *)m_core->local_regs, I2, SIZE_DWORD, SCALE_x4);
2790 
2791 	UML_ADD(block, I1, I1, src_code);
2792 	UML_AND(block, I1, I1, 0x3f);
2793 	UML_LOAD(block, I1, (void *)m_core->local_regs, I1, SIZE_DWORD, SCALE_x4);
2794 	UML_AND(block, I1, I1, 0x1f);
2795 
2796 	int no_shift = compiler.m_labelnum++;
2797 	UML_MOV(block, I3, 0);
2798 	UML_CMP(block, I1, 0);
2799 	UML_JMPc(block, uml::COND_E, no_shift);
2800 	UML_SUB(block, I4, I1, 1);
2801 	UML_SHR(block, I4, I0, I4);
2802 	UML_AND(block, I3, I4, 1);
2803 	UML_SAR(block, I0, I0, I1);
2804 	UML_LABEL(block, no_shift);
2805 
2806 	UML_TEST(block, I0, ~0);
2807 	UML_SETc(block, uml::COND_Z, I1);
2808 	UML_ROLINS(block, I3, I1, Z_SHIFT, Z_MASK);
2809 	UML_ROLINS(block, I3, I0, 3, N_MASK);
2810 	UML_ROLINS(block, DRC_SR, I3, 0, (N_MASK | Z_MASK | C_MASK));
2811 
2812 	UML_STORE(block, (void *)m_core->local_regs, I2, I0, SIZE_DWORD, SCALE_x4);
2813 }
2814 
2815 
2816 template <hyperstone_device::shift_type HI_N, hyperstone_device::reg_bank DST_GLOBAL>
generate_sari(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)2817 void hyperstone_device::generate_sari(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
2818 {
2819 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
2820 
2821 	uint16_t op = desc->opptr.w[0];
2822 
2823 	const uint32_t dst_code = (op & 0xf0) >> 4;
2824 
2825 	generate_check_delay_pc(block, compiler, desc);
2826 
2827 	if (DST_GLOBAL)
2828 	{
2829 		UML_LOAD(block, I0, (void *)m_core->global_regs, dst_code, SIZE_DWORD, SCALE_x4);
2830 	}
2831 	else
2832 	{
2833 		UML_ROLAND(block, I1, DRC_SR, 7, 0x7f);
2834 		UML_ADD(block, I1, I1, dst_code);
2835 		UML_AND(block, I1, I1, 0x3f);
2836 		UML_LOAD(block, I0, (void *)m_core->local_regs, I1, SIZE_DWORD, SCALE_x4);
2837 	}
2838 
2839 	const uint32_t n = HI_N ? DRC_HI_N_VALUE : DRC_LO_N_VALUE;
2840 
2841 	UML_MOV(block, I2, 0);
2842 	if (HI_N || n)
2843 	{
2844 
2845 		UML_ROLINS(block, I2, I0, 32 - (n - 1), 1);
2846 		UML_SAR(block, I0, I0, n);
2847 	}
2848 
2849 	UML_TEST(block, I0, ~0);
2850 	UML_MOVc(block, uml::COND_Z, I3, Z_MASK);
2851 	UML_MOVc(block, uml::COND_NZ, I3, 0);
2852 	UML_OR(block, I2, I2, I3);
2853 	UML_ROLINS(block, I2, I0, 3, N_MASK);
2854 	UML_ROLINS(block, DRC_SR, I2, 0, (C_MASK | Z_MASK | N_MASK));
2855 
2856 	if (DST_GLOBAL)
2857 	{
2858 		if (dst_code < 2)
2859 		{
2860 			UML_MOV(block, I4, dst_code);
2861 			UML_MOV(block, I5, I0);
2862 			generate_set_global_register(block, compiler, desc);
2863 		}
2864 		else
2865 		{
2866 			UML_STORE(block, (void *)m_core->global_regs, dst_code, I0, SIZE_DWORD, SCALE_x4);
2867 		}
2868 	}
2869 	else
2870 	{
2871 		UML_STORE(block, (void *)m_core->local_regs, I1, I0, SIZE_DWORD, SCALE_x4);
2872 	}
2873 }
2874 
2875 
2876 template <hyperstone_device::shift_type HI_N>
generate_shldi(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)2877 void hyperstone_device::generate_shldi(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
2878 {
2879 	UML_MOV(block, I7, mem(&m_core->clock_cycles_2));
2880 
2881 	uint16_t op = desc->opptr.w[0];
2882 
2883 	const uint32_t dst_code = (op & 0xf0) >> 4;
2884 	const uint32_t dstf_code = dst_code + 1;
2885 
2886 	generate_check_delay_pc(block, compiler, desc);
2887 
2888 	UML_ROLAND(block, I4, DRC_SR, 7, 0x7f); // I4: FP
2889 
2890 	UML_ADD(block, I2, I4, dst_code);
2891 	UML_AND(block, I2, I2, 0x3f); // I2: dst_code
2892 	UML_LOAD(block, I6, (void *)m_core->local_regs, I2, SIZE_DWORD, SCALE_x4); // I0: high_order
2893 
2894 	UML_ADD(block, I3, I4, dstf_code);
2895 	UML_AND(block, I3, I3, 0x3f); // I3: dstf_code
2896 	UML_LOAD(block, I1, (void *)m_core->local_regs, I3, SIZE_DWORD, SCALE_x4); // I1: low_order
2897 
2898 #ifndef PTR64
2899 	UML_DAND(block, I1, I1, 0x00000000ffffffff);
2900 #endif
2901 
2902 	UML_DSHL(block, I0, I6, 32);
2903 	UML_DOR(block, I0, I0, I1); // I0: val, I1 free after this point
2904 
2905 	UML_MOV(block, I4, HI_N ? (0x10 | (op & 0xf)) : (op & 0xf));
2906 
2907 	UML_DSHR(block, I1, 0xffffffff00000000ULL, I4); // I1: mask
2908 
2909 	UML_AND(block, DRC_SR, DRC_SR, ~C_MASK);
2910 
2911 	int no_carry = compiler.m_labelnum++;
2912 	UML_TEST(block, I4, 0x1f);
2913 	UML_JMPc(block, uml::COND_Z, no_carry);
2914 	UML_DROLINS(block, DRC_SR, I0, I4, 1); // Insert carry flag
2915 	UML_LABEL(block, no_carry);
2916 
2917 	int no_hi_bit = compiler.m_labelnum++;
2918 	int no_overflow = compiler.m_labelnum++;
2919 	UML_AND(block, I5, I6, I1); // I5: high_order & mask
2920 	UML_DSHL(block, I0, I0, I4); // I0: val << n
2921 
2922 	UML_MOV(block, I4, 0);
2923 	UML_DTEST(block, I0, 0x8000000000000000ULL);
2924 	UML_JMPc(block, uml::COND_Z, no_hi_bit);
2925 	UML_XOR(block, I5, I5, I1); // I5: (high_order & mask) ^ mask
2926 	UML_LABEL(block, no_hi_bit);
2927 	UML_TEST(block, I5, ~0);
2928 	UML_JMPc(block, uml::COND_Z, no_overflow);
2929 	UML_OR(block, I4, I4, V_MASK);
2930 	UML_LABEL(block, no_overflow);
2931 
2932 	UML_DTEST(block, I0, ~0ULL);
2933 	UML_MOVc(block, uml::COND_Z, I1, Z_MASK);
2934 	UML_MOVc(block, uml::COND_NZ, I1, 0);
2935 	UML_DROLINS(block, I1, I0, 3, N_MASK);
2936 	UML_OR(block, I1, I1, I4);
2937 	UML_ROLINS(block, DRC_SR, I1, 0, (N_MASK | Z_MASK | V_MASK));
2938 
2939 	UML_STORE(block, (void *)m_core->local_regs, I3, I0, SIZE_DWORD, SCALE_x4);
2940 	UML_DSHR(block, I0, I0, 32);
2941 	UML_STORE(block, (void *)m_core->local_regs, I2, I0, SIZE_DWORD, SCALE_x4);
2942 }
2943 
2944 
generate_shld(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)2945 void hyperstone_device::generate_shld(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
2946 {
2947 	UML_MOV(block, I7, mem(&m_core->clock_cycles_2));
2948 
2949 	uint16_t op = desc->opptr.w[0];
2950 
2951 	const uint32_t src_code = op & 0xf;
2952 	const uint32_t dst_code = (op & 0xf0) >> 4;
2953 	const uint32_t dstf_code = dst_code + 1;
2954 
2955 	generate_check_delay_pc(block, compiler, desc);
2956 
2957 	if (src_code == dst_code || src_code == dstf_code)
2958 	{
2959 		return;
2960 	}
2961 
2962 	UML_ROLAND(block, I4, DRC_SR, 7, 0x7f); // I4: FP
2963 
2964 	UML_ADD(block, I2, I4, dst_code);
2965 	UML_AND(block, I2, I2, 0x3f); // I2: dst_code
2966 	UML_LOAD(block, I6, (void *)m_core->local_regs, I2, SIZE_DWORD, SCALE_x4); // I0: high_order
2967 
2968 	UML_ADD(block, I3, I4, dstf_code);
2969 	UML_AND(block, I3, I3, 0x3f); // I3: dstf_code
2970 	UML_LOAD(block, I1, (void *)m_core->local_regs, I3, SIZE_DWORD, SCALE_x4); // I1: low_order
2971 
2972 #ifndef PTR64
2973 	UML_DAND(block, I1, I1, 0x00000000ffffffff);
2974 #endif
2975 
2976 	UML_DSHL(block, I0, I6, 32);
2977 	UML_DOR(block, I0, I0, I1); // I0: val, I1 free after this point
2978 
2979 	UML_ADD(block, I4, I4, src_code);
2980 	UML_AND(block, I4, I4, 0x3f);
2981 	UML_LOAD(block, I4, (void *)m_core->local_regs, I4, SIZE_DWORD, SCALE_x4);
2982 	UML_AND(block, I4, I4, 0x1f); // I4: n
2983 
2984 	UML_DSHR(block, I1, 0xffffffff00000000ULL, I4); // I1: mask
2985 
2986 	UML_AND(block, DRC_SR, DRC_SR, ~C_MASK);
2987 
2988 	int no_carry = compiler.m_labelnum++;
2989 	UML_TEST(block, I4, 0x1f);
2990 	UML_JMPc(block, uml::COND_Z, no_carry);
2991 	UML_DROLINS(block, DRC_SR, I0, I4, 1); // Insert carry flag
2992 	UML_LABEL(block, no_carry);
2993 
2994 	int no_hi_bit = compiler.m_labelnum++;
2995 	int no_overflow = compiler.m_labelnum++;
2996 	UML_AND(block, I5, I6, I1); // I5: high_order & mask
2997 	UML_DSHL(block, I0, I0, I4); // I0: val << n
2998 
2999 	UML_MOV(block, I4, 0);
3000 	UML_DTEST(block, I0, 0x8000000000000000ULL);
3001 	UML_JMPc(block, uml::COND_Z, no_hi_bit);
3002 	UML_XOR(block, I5, I5, I1); // I5: (high_order & mask) ^ mask
3003 	UML_LABEL(block, no_hi_bit);
3004 	UML_TEST(block, I5, ~0);
3005 	UML_JMPc(block, uml::COND_Z, no_overflow);
3006 	UML_OR(block, I4, I4, V_MASK);
3007 	UML_LABEL(block, no_overflow);
3008 
3009 	UML_DTEST(block, I0, ~0ULL);
3010 	UML_MOVc(block, uml::COND_Z, I1, Z_MASK);
3011 	UML_MOVc(block, uml::COND_NZ, I1, 0);
3012 	UML_DROLINS(block, I1, I0, 3, N_MASK);
3013 	UML_OR(block, I1, I1, I4);
3014 	UML_ROLINS(block, DRC_SR, I1, 0, (N_MASK | Z_MASK | V_MASK));
3015 
3016 	UML_STORE(block, (void *)m_core->local_regs, I3, I0, SIZE_DWORD, SCALE_x4);
3017 	UML_DSHR(block, I0, I0, 32);
3018 	UML_STORE(block, (void *)m_core->local_regs, I2, I0, SIZE_DWORD, SCALE_x4);
3019 }
3020 
3021 
generate_shl(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)3022 void hyperstone_device::generate_shl(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
3023 {
3024 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
3025 
3026 	uint16_t op = desc->opptr.w[0];
3027 
3028 	const uint32_t dst_code = (op & 0xf0) >> 4;
3029 	const uint32_t src_code = op & 0xf;
3030 
3031 	generate_check_delay_pc(block, compiler, desc);
3032 
3033 	UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
3034 
3035 	UML_ADD(block, I2, I3, dst_code);
3036 	UML_AND(block, I4, I2, 0x3f);
3037 	UML_LOAD(block, I0, (void *)m_core->local_regs, I4, SIZE_DWORD, SCALE_x4); // I0 = dreg
3038 
3039 	UML_ADD(block, I2, I3, src_code);
3040 	UML_AND(block, I1, I2, 0x3f);
3041 	UML_LOAD(block, I5, (void *)m_core->local_regs, I1, SIZE_DWORD, SCALE_x4);
3042 	UML_AND(block, I1, I5, 0x1f); // I1 = sreg & 0x1f
3043 
3044 	UML_AND(block, I6, DRC_SR, ~(C_MASK | Z_MASK | N_MASK | V_MASK));
3045 
3046 	int done_shift = compiler.m_labelnum++;
3047 	int no_carry = compiler.m_labelnum++;
3048 	UML_CMP(block, I1, 0);
3049 	UML_JMPc(block, uml::COND_E, done_shift);
3050 	UML_SUB(block, I2, I1, 1);
3051 	UML_SHL(block, I2, I0, I2);
3052 	UML_TEST(block, I2, 0x80000000);
3053 	UML_JMPc(block, uml::COND_Z, no_carry);
3054 	UML_OR(block, I6, I6, C_MASK);
3055 
3056 	UML_LABEL(block, no_carry);
3057 	UML_DSHR(block, I5, 0xffffffff00000000ULL, I1);
3058 	UML_AND(block, I3, I0, I5);
3059 
3060 	UML_SHL(block, I0, I0, I1);
3061 
3062 	int no_hi_bit = compiler.m_labelnum++;
3063 	UML_TEST(block, I0, 0x80000000);
3064 	UML_JMPc(block, uml::COND_Z, no_hi_bit);
3065 
3066 	UML_XOR(block, I3, I3, I5);
3067 
3068 	UML_LABEL(block, no_hi_bit);
3069 	UML_TEST(block, I3, ~0);
3070 	UML_JMPc(block, uml::COND_Z, done_shift);
3071 	UML_OR(block, I6, I6, V_MASK);
3072 
3073 	UML_LABEL(block, done_shift);
3074 	UML_TEST(block, I0, ~0);
3075 	UML_MOVc(block, uml::COND_Z, I5, Z_MASK);
3076 	UML_MOVc(block, uml::COND_NZ, I5, 0);
3077 	UML_ROLINS(block, I5, I0, 3, N_MASK);
3078 
3079 	UML_OR(block, DRC_SR, I5, I6);
3080 	UML_STORE(block, (void *)m_core->local_regs, I4, I0, SIZE_DWORD, SCALE_x4);
3081 }
3082 
3083 
3084 template <hyperstone_device::shift_type HI_N, hyperstone_device::reg_bank DST_GLOBAL>
generate_shli(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)3085 void hyperstone_device::generate_shli(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
3086 {
3087 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
3088 
3089 	uint16_t op = desc->opptr.w[0];
3090 
3091 	const uint32_t dst_code = (op & 0xf0) >> 4;
3092 
3093 	generate_check_delay_pc(block, compiler, desc);
3094 
3095 	if (DST_GLOBAL)
3096 	{
3097 		UML_LOAD(block, I4, (void *)m_core->global_regs, dst_code, SIZE_DWORD, SCALE_x4);
3098 	}
3099 	else
3100 	{
3101 		UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
3102 		UML_ADD(block, I2, I3, dst_code);
3103 		UML_AND(block, I6, I2, 0x3f);
3104 		UML_LOAD(block, I4, (void *)m_core->local_regs, I6, SIZE_DWORD, SCALE_x4);
3105 	}
3106 
3107 	UML_AND(block, I1, DRC_SR, ~(C_MASK | Z_MASK | N_MASK | V_MASK));
3108 	const uint32_t n = HI_N ? (0x10 | (op & 0xf)) : (op & 0xf);
3109 
3110 	if (HI_N || n)
3111 	{
3112 		int skip_c = compiler.m_labelnum++;
3113 		UML_TEST(block, I4, (0x80000000 >> (n - 1)));
3114 		UML_JMPc(block, uml::COND_Z, skip_c);
3115 		UML_OR(block, I1, I1, 1);
3116 		UML_LABEL(block, skip_c);
3117 	}
3118 
3119 	UML_SHL(block, I5, I4, n);
3120 
3121 	int done_v = compiler.m_labelnum++;
3122 	uint32_t mask = (uint32_t)(0xffffffff00000000ULL >> n);
3123 
3124 	int no_high_bit = compiler.m_labelnum++;
3125 	UML_TEST(block, I5, 0x80000000);
3126 	UML_JMPc(block, uml::COND_Z, no_high_bit);
3127 
3128 	UML_AND(block, I4, I4, mask);
3129 	UML_XOR(block, I4, I4, mask);
3130 	UML_TEST(block, I4, ~0);
3131 	UML_JMPc(block, uml::COND_Z, done_v);
3132 	UML_OR(block, I1, I1, V_MASK);
3133 	UML_JMP(block, done_v);
3134 
3135 	UML_LABEL(block, no_high_bit);
3136 	UML_TEST(block, I4, mask);
3137 	UML_JMPc(block, uml::COND_Z, done_v);
3138 	UML_OR(block, I1, I1, V_MASK);
3139 	UML_LABEL(block, done_v);
3140 
3141 	UML_TEST(block, I5, ~0);
3142 	UML_MOVc(block, uml::COND_Z, I2, Z_MASK);
3143 	UML_MOVc(block, uml::COND_NZ, I2, 0);
3144 	UML_ROLINS(block, I2, I5, 3, N_MASK);
3145 	UML_OR(block, DRC_SR, I1, I2);
3146 
3147 	if (DST_GLOBAL)
3148 	{
3149 		if (dst_code < 2)
3150 		{
3151 			UML_MOV(block, I4, dst_code);
3152 			generate_set_global_register(block, compiler, desc);
3153 		}
3154 		else
3155 		{
3156 			UML_STORE(block, (void *)m_core->global_regs, dst_code, I5, SIZE_DWORD, SCALE_x4);
3157 		}
3158 	}
3159 	else
3160 	{
3161 		UML_STORE(block, (void *)m_core->local_regs, I6, I5, SIZE_DWORD, SCALE_x4);
3162 	}
3163 }
3164 
3165 
generate_testlz(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)3166 void hyperstone_device::generate_testlz(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
3167 {
3168 	UML_MOV(block, I7, mem(&m_core->clock_cycles_2));
3169 
3170 	uint16_t op = desc->opptr.w[0];
3171 
3172 	const uint32_t dst_code = (op & 0xf0) >> 4;
3173 	const uint32_t src_code = op & 0xf;
3174 
3175 	generate_check_delay_pc(block, compiler, desc);
3176 
3177 	UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
3178 	UML_ADD(block, I2, I3, src_code);
3179 	UML_AND(block, I1, I2, 0x3f);
3180 	UML_LOAD(block, I0, (void *)m_core->local_regs, I1, SIZE_DWORD, SCALE_x4);
3181 
3182 	UML_LZCNT(block, I4, I0);
3183 
3184 	UML_ADD(block, I2, I3, dst_code);
3185 	UML_AND(block, I1, I2, 0x3f);
3186 	UML_STORE(block, (void *)m_core->local_regs, I1, I4, SIZE_DWORD, SCALE_x4);
3187 }
3188 
3189 
generate_rol(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)3190 void hyperstone_device::generate_rol(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
3191 {
3192 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
3193 
3194 	uint16_t op = desc->opptr.w[0];
3195 
3196 	const uint32_t dst_code = (op & 0xf0) >> 4;
3197 	const uint32_t src_code = op & 0xf;
3198 
3199 	generate_check_delay_pc(block, compiler, desc);
3200 
3201 	UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
3202 
3203 	UML_ADD(block, I2, I3, dst_code);
3204 	UML_AND(block, I4, I2, 0x3f);
3205 	UML_LOAD(block, I0, (void *)m_core->local_regs, I4, SIZE_DWORD, SCALE_x4); // I0 = dreg
3206 
3207 	UML_ADD(block, I2, I3, src_code);
3208 	UML_AND(block, I1, I2, 0x3f);
3209 	UML_LOAD(block, I5, (void *)m_core->local_regs, I1, SIZE_DWORD, SCALE_x4);
3210 	UML_AND(block, I1, I5, 0x1f); // I1 = sreg & 0x1f
3211 
3212 	int no_shift = compiler.m_labelnum++;
3213 	UML_CMP(block, I1, 0);
3214 	UML_JMPc(block, uml::COND_E, no_shift);
3215 	UML_ROL(block, I2, I0, I1);
3216 	UML_LABEL(block, no_shift);
3217 
3218 	UML_DSHR(block, I5, 0xffffffff00000000ULL, I1);
3219 	UML_AND(block, I3, I0, I5);
3220 
3221 	UML_MOV(block, I6, 0);
3222 
3223 	int no_hi_bit = compiler.m_labelnum++;
3224 	UML_TEST(block, I0, 0x80000000);
3225 	UML_JMPc(block, uml::COND_Z, no_hi_bit);
3226 
3227 	UML_XOR(block, I3, I3, I5);
3228 
3229 	int done_shift = compiler.m_labelnum++;
3230 	UML_LABEL(block, no_hi_bit);
3231 	UML_TEST(block, I3, ~0);
3232 	UML_JMPc(block, uml::COND_Z, done_shift);
3233 	UML_OR(block, I6, I6, V_MASK);
3234 
3235 	UML_LABEL(block, done_shift);
3236 	UML_TEST(block, I2, ~0);
3237 	UML_MOVc(block, uml::COND_Z, I5, Z_MASK);
3238 	UML_MOVc(block, uml::COND_NZ, I5, 0);
3239 	UML_ROLINS(block, I5, I2, 3, N_MASK);
3240 
3241 	UML_OR(block, I5, I5, I6);
3242 	UML_ROLINS(block, DRC_SR, I5, 0, (V_MASK | N_MASK | Z_MASK | C_MASK));
3243 	UML_STORE(block, (void *)m_core->local_regs, I4, I2, SIZE_DWORD, SCALE_x4);
3244 }
3245 
3246 
3247 template <hyperstone_device::reg_bank DST_GLOBAL, hyperstone_device::reg_bank SRC_GLOBAL>
generate_ldxx1(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)3248 void hyperstone_device::generate_ldxx1(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
3249 {
3250 	const uint16_t op = desc->opptr.w[0];
3251 	uint16_t next_1 = m_pr16(desc->pc + 2);
3252 	const uint16_t sub_type = (next_1 & 0x3000) >> 12;
3253 
3254 	uint32_t extra_s;
3255 	if (next_1 & 0x8000)
3256 	{
3257 		const uint16_t next_2 = m_pr16(desc->pc + 4);
3258 
3259 		extra_s = next_2;
3260 		extra_s |= ((next_1 & 0xfff) << 16);
3261 
3262 		if (next_1 & 0x4000)
3263 			extra_s |= 0xf0000000;
3264 
3265 		UML_ADD(block, DRC_PC, DRC_PC, 4);
3266 	}
3267 	else
3268 	{
3269 		extra_s = next_1 & 0xfff;
3270 
3271 		if (next_1 & 0x4000)
3272 			extra_s |= 0xfffff000;
3273 
3274 		UML_ADD(block, DRC_PC, DRC_PC, 2);
3275 	}
3276 
3277 	generate_check_delay_pc(block, compiler, desc);
3278 
3279 	const uint32_t src_code = op & 0xf;
3280 	const uint32_t srcf_code = src_code + 1;
3281 	const uint32_t dst_code = (op & 0xf0) >> 4;
3282 
3283 	if (!DST_GLOBAL || !SRC_GLOBAL)
3284 		UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
3285 
3286 	if (DST_GLOBAL)
3287 	{
3288 		if (dst_code == SR_REGISTER)
3289 			UML_MOV(block, I4, 0);
3290 		else
3291 			UML_LOAD(block, I4, (void *)m_core->global_regs, dst_code, SIZE_DWORD, SCALE_x4);
3292 	}
3293 	else
3294 	{
3295 		UML_ADD(block, I2, I3, dst_code);
3296 		UML_AND(block, I5, I2, 0x3f);
3297 		UML_LOAD(block, I4, (void *)m_core->local_regs, I5, SIZE_DWORD, SCALE_x4);
3298 	}
3299 
3300 	switch (sub_type)
3301 	{
3302 		case 0: // LDBS.A
3303 			UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
3304 			UML_ADD(block, I0, I4, extra_s);
3305 			UML_CALLH(block, *m_mem_read8);
3306 			UML_SEXT(block, I5, I1, SIZE_BYTE);
3307 
3308 			if (SRC_GLOBAL)
3309 			{
3310 				UML_MOV(block, I4, src_code);
3311 				generate_set_global_register(block, compiler, desc);
3312 				if (src_code == PC_REGISTER)
3313 					generate_branch(block, desc->targetpc, desc);
3314 			}
3315 			else
3316 			{
3317 				UML_ADD(block, I2, I3, src_code);
3318 				UML_AND(block, I2, I2, 0x3f);
3319 				UML_STORE(block, (void *)m_core->local_regs, I2, I5, SIZE_DWORD, SCALE_x4);
3320 			}
3321 			break;
3322 
3323 		case 1: // LDBU.A
3324 			UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
3325 			UML_ADD(block, I0, I4, extra_s);
3326 			UML_CALLH(block, *m_mem_read8);
3327 
3328 			if (SRC_GLOBAL)
3329 			{
3330 				UML_MOV(block, I4, src_code);
3331 				UML_MOV(block, I5, I1);
3332 				generate_set_global_register(block, compiler, desc);
3333 				if (src_code == PC_REGISTER)
3334 					generate_branch(block, desc->targetpc, desc);
3335 			}
3336 			else
3337 			{
3338 				UML_ADD(block, I2, I3, src_code);
3339 				UML_AND(block, I2, I2, 0x3f);
3340 				UML_STORE(block, (void *)m_core->local_regs, I2, I1, SIZE_DWORD, SCALE_x4);
3341 			}
3342 			break;
3343 
3344 		case 2:
3345 			UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
3346 			UML_ADD(block, I0, I4, extra_s & ~1);
3347 			UML_AND(block, I0, I0, ~1);
3348 			UML_CALLH(block, *m_mem_read16);
3349 
3350 			if (SRC_GLOBAL)
3351 			{
3352 				UML_MOV(block, I4, src_code);
3353 				if (extra_s & 1) // LDHS.A
3354 					UML_SEXT(block, I5, I1, SIZE_WORD);
3355 				else // LDHU.A
3356 					UML_MOV(block, I5, I1);
3357 
3358 				generate_set_global_register(block, compiler, desc);
3359 
3360 				if (src_code == 0)
3361 					generate_branch(block, desc->targetpc, desc);
3362 			}
3363 			else
3364 			{
3365 				UML_ADD(block, I2, I3, src_code);
3366 				UML_AND(block, I2, I2, 0x3f);
3367 				if (extra_s & 1)
3368 				{
3369 					UML_SEXT(block, I5, I1, SIZE_WORD);
3370 					UML_STORE(block, (void *)m_core->local_regs, I2, I5, SIZE_DWORD, SCALE_x4);
3371 				}
3372 				else
3373 				{
3374 					UML_STORE(block, (void *)m_core->local_regs, I2, I1, SIZE_DWORD, SCALE_x4);
3375 				}
3376 			}
3377 			break;
3378 
3379 		case 3:
3380 		{
3381 			uint32_t switch_val = extra_s & 3;
3382 			extra_s &= ~3;
3383 			UML_ADD(block, I0, I4, extra_s);
3384 			UML_AND(block, I0, I0, ~3);
3385 			switch (switch_val)
3386 			{
3387 				case 0: // LDW.A/D
3388 					UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
3389 					UML_CALLH(block, *m_mem_read32);
3390 
3391 					if (SRC_GLOBAL)
3392 					{
3393 						UML_MOV(block, I5, I1);
3394 						UML_MOV(block, I4, src_code);
3395 						generate_set_global_register(block, compiler, desc);
3396 						if (src_code == PC_REGISTER)
3397 							generate_branch(block, desc->targetpc, desc);
3398 					}
3399 					else
3400 					{
3401 						UML_ADD(block, I2, I3, src_code);
3402 						UML_AND(block, I2, I2, 0x3f);
3403 						UML_STORE(block, (void *)m_core->local_regs, I2, I1, SIZE_DWORD, SCALE_x4);
3404 					}
3405 					break;
3406 				case 1: // LDD.A
3407 					UML_MOV(block, I7, mem(&m_core->clock_cycles_2));
3408 					UML_CALLH(block, *m_mem_read32);
3409 
3410 					if (SRC_GLOBAL)
3411 					{
3412 						UML_MOV(block, I5, I1);
3413 						UML_MOV(block, I4, src_code);
3414 						generate_set_global_register(block, compiler, desc);
3415 						if (src_code == PC_REGISTER)
3416 							generate_branch(block, desc->targetpc, desc);
3417 					}
3418 					else
3419 					{
3420 						UML_ADD(block, I2, I3, src_code);
3421 						UML_AND(block, I2, I2, 0x3f);
3422 						UML_STORE(block, (void *)m_core->local_regs, I2, I1, SIZE_DWORD, SCALE_x4);
3423 					}
3424 
3425 					UML_ADD(block, I0, I0, 4);
3426 					UML_CALLH(block, *m_mem_read32);
3427 
3428 					if (SRC_GLOBAL)
3429 					{
3430 						UML_MOV(block, I5, I1);
3431 						UML_MOV(block, I4, srcf_code);
3432 						generate_set_global_register(block, compiler, desc);
3433 					}
3434 					else
3435 					{
3436 						UML_ADD(block, I2, I3, srcf_code);
3437 						UML_AND(block, I2, I2, 0x3f);
3438 						UML_STORE(block, (void *)m_core->local_regs, I2, I1, SIZE_DWORD, SCALE_x4);
3439 					}
3440 					break;
3441 				case 2: // LDW.IOD
3442 					UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
3443 					UML_ROLAND(block, I0, I0, 21, 0x7ffc);
3444 					UML_CALLH(block, *m_io_read32);
3445 
3446 					if (SRC_GLOBAL)
3447 					{
3448 						UML_MOV(block, I5, I1);
3449 						UML_MOV(block, I4, src_code);
3450 						generate_set_global_register(block, compiler, desc);
3451 						if (src_code == 0)
3452 							generate_branch(block, desc->targetpc, desc);
3453 					}
3454 					else
3455 					{
3456 						UML_ADD(block, I2, I3, src_code);
3457 						UML_AND(block, I2, I2, 0x3f);
3458 						UML_STORE(block, (void *)m_core->local_regs, I2, I1, SIZE_DWORD, SCALE_x4);
3459 					}
3460 					break;
3461 
3462 				case 3: // LDD.IODs
3463 					UML_MOV(block, I7, mem(&m_core->clock_cycles_2));
3464 					UML_ROLAND(block, I0, I0, 21, 0x7ffc);
3465 					UML_CALLH(block, *m_io_read32);
3466 
3467 					if (SRC_GLOBAL)
3468 					{
3469 						UML_MOV(block, I5, I1);
3470 						UML_MOV(block, I4, src_code);
3471 						generate_set_global_register(block, compiler, desc);
3472 						if (src_code == PC_REGISTER)
3473 							generate_branch(block, desc->targetpc, desc);
3474 					}
3475 					else
3476 					{
3477 						UML_ADD(block, I2, I3, src_code);
3478 						UML_AND(block, I2, I2, 0x3f);
3479 						UML_STORE(block, (void *)m_core->local_regs, I2, I1, SIZE_DWORD, SCALE_x4);
3480 					}
3481 
3482 					UML_ADD(block, I0, I0, 4);
3483 					UML_CALLH(block, *m_io_read32);
3484 
3485 					if (SRC_GLOBAL)
3486 					{
3487 						UML_MOV(block, I5, I1);
3488 						UML_MOV(block, I4, srcf_code);
3489 						generate_set_global_register(block, compiler, desc);
3490 					}
3491 					else
3492 					{
3493 						UML_ADD(block, I2, I3, srcf_code);
3494 						UML_AND(block, I2, I2, 0x3f);
3495 						UML_STORE(block, (void *)m_core->local_regs, I2, I1, SIZE_DWORD, SCALE_x4);
3496 					}
3497 					break;
3498 			}
3499 			break;
3500 		}
3501 	}
3502 }
3503 
3504 
3505 template <hyperstone_device::reg_bank DST_GLOBAL, hyperstone_device::reg_bank SRC_GLOBAL>
generate_ldxx2(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)3506 void hyperstone_device::generate_ldxx2(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
3507 {
3508 	const uint16_t op = desc->opptr.w[0];
3509 	uint16_t next_1 = m_pr16(desc->pc + 2);
3510 	const uint16_t sub_type = (next_1 & 0x3000) >> 12;
3511 
3512 	uint32_t extra_s;
3513 	if (next_1 & 0x8000)
3514 	{
3515 		const uint16_t next_2 = m_pr16(desc->pc + 4);
3516 
3517 		extra_s = next_2;
3518 		extra_s |= ((next_1 & 0xfff) << 16);
3519 
3520 		if (next_1 & 0x4000)
3521 			extra_s |= 0xf0000000;
3522 
3523 		UML_ADD(block, DRC_PC, DRC_PC, 4);
3524 	}
3525 	else
3526 	{
3527 		extra_s = next_1 & 0xfff;
3528 
3529 		if (next_1 & 0x4000)
3530 			extra_s |= 0xfffff000;
3531 
3532 		UML_ADD(block, DRC_PC, DRC_PC, 2);
3533 	}
3534 
3535 	generate_check_delay_pc(block, compiler, desc);
3536 
3537 	const uint32_t src_code = op & 0xf;
3538 	const uint32_t srcf_code = src_code + 1;
3539 	const uint32_t dst_code = (op & 0xf0) >> 4;
3540 
3541 	if (DST_GLOBAL && dst_code < 2)
3542 	{
3543 		printf("Denoted PC or SR in hyperstone_ldxx2. PC = %08X\n", desc->pc);
3544 		return;
3545 	}
3546 
3547 	if (!DST_GLOBAL || !SRC_GLOBAL)
3548 		UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
3549 
3550 	if (DST_GLOBAL)
3551 	{
3552 		UML_LOAD(block, I6, (void *)m_core->global_regs, dst_code, SIZE_DWORD, SCALE_x4);
3553 	}
3554 	else
3555 	{
3556 		UML_ADD(block, I2, I3, dst_code);
3557 		UML_AND(block, I5, I2, 0x3f);
3558 		UML_LOAD(block, I6, (void *)m_core->local_regs, I5, SIZE_DWORD, SCALE_x4);
3559 	}
3560 
3561 	switch (sub_type)
3562 	{
3563 		case 0: // LDBS.N
3564 		case 1: // LDBU.N
3565 		case 2: // LDHS.N, LDHU.N
3566 		{
3567 			UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
3568 			if (sub_type == 0)
3569 			{
3570 				UML_MOV(block, I0, I6);
3571 				UML_CALLH(block, *m_mem_read8);
3572 				UML_SEXT(block, I5, I1, SIZE_BYTE);
3573 			}
3574 			else if (sub_type == 2)
3575 			{
3576 				UML_AND(block, I0, I6, ~1);
3577 				UML_CALLH(block, *m_mem_read16);
3578 				if (extra_s & 1)
3579 					UML_SEXT(block, I5, I1, SIZE_WORD);
3580 				else
3581 					UML_MOV(block, I5, I1);
3582 			}
3583 			else
3584 			{
3585 				UML_MOV(block, I0, I6);
3586 				UML_CALLH(block, *m_mem_read8);
3587 				UML_MOV(block, I5, I1);
3588 			}
3589 
3590 			if (SRC_GLOBAL)
3591 			{
3592 				UML_MOV(block, I4, src_code);
3593 				generate_set_global_register(block, compiler, desc);
3594 			}
3595 			else
3596 			{
3597 				UML_ADD(block, I2, I3, src_code);
3598 				UML_AND(block, I4, I2, 0x3f);
3599 				UML_STORE(block, (void *)m_core->local_regs, I4, I5, SIZE_DWORD, SCALE_x4);
3600 			}
3601 
3602 			if (DST_GLOBAL != SRC_GLOBAL || src_code != dst_code)
3603 			{
3604 				if (sub_type == 2)
3605 					UML_ADD(block, I4, I6, extra_s & ~1);
3606 				else
3607 					UML_ADD(block, I4, I6, extra_s);
3608 
3609 				if (DST_GLOBAL)
3610 				{
3611 					UML_STORE(block, (void *)m_core->global_regs, dst_code, I4, SIZE_DWORD, SCALE_x4);
3612 				}
3613 				else
3614 				{
3615 					UML_ADD(block, I2, I3, dst_code);
3616 					UML_AND(block, I5, I2, 0x3f);
3617 					UML_STORE(block, (void *)m_core->local_regs, I5, I4, SIZE_DWORD, SCALE_x4);
3618 				}
3619 			}
3620 			break;
3621 		}
3622 		case 3:
3623 			switch (extra_s & 3)
3624 			{
3625 				case 0: // LDW.N
3626 				{
3627 					UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
3628 					UML_AND(block, I0, I6, ~3);
3629 					UML_CALLH(block, *m_mem_read32);
3630 
3631 					if (SRC_GLOBAL)
3632 					{
3633 						UML_MOV(block, I4, src_code);
3634 						UML_MOV(block, I5, I1);
3635 						generate_set_global_register(block, compiler, desc);
3636 					}
3637 					else
3638 					{
3639 						UML_ADD(block, I2, I3, src_code);
3640 						UML_AND(block, I4, I2, 0x3f);
3641 						UML_STORE(block, (void *)m_core->local_regs, I4, I1, SIZE_DWORD, SCALE_x4);
3642 					}
3643 
3644 					if (DST_GLOBAL != SRC_GLOBAL || src_code != dst_code)
3645 					{
3646 						UML_ADD(block, I4, I6, extra_s);
3647 
3648 						if (DST_GLOBAL)
3649 						{
3650 							UML_STORE(block, (void *)m_core->global_regs, dst_code, I4, SIZE_DWORD, SCALE_x4);
3651 						}
3652 						else
3653 						{
3654 							UML_ADD(block, I2, I3, dst_code);
3655 							UML_AND(block, I5, I2, 0x3f);
3656 							UML_STORE(block, (void *)m_core->local_regs, I5, I4, SIZE_DWORD, SCALE_x4);
3657 						}
3658 					}
3659 					break;
3660 				}
3661 				case 1: // LDD.N
3662 				{
3663 					UML_MOV(block, I7, mem(&m_core->clock_cycles_2));
3664 					UML_AND(block, I0, I6, ~3);
3665 					UML_CALLH(block, *m_mem_read32);
3666 
3667 					if (SRC_GLOBAL)
3668 					{
3669 						UML_MOV(block, I4, src_code);
3670 						UML_MOV(block, I5, I1);
3671 						generate_set_global_register(block, compiler, desc);
3672 
3673 						UML_ADD(block, I0, I0, 4);
3674 						UML_CALLH(block, *m_mem_read32);
3675 
3676 						UML_MOV(block, I4, src_code);
3677 						UML_MOV(block, I5, I1);
3678 						generate_set_global_register(block, compiler, desc);
3679 					}
3680 					else
3681 					{
3682 						UML_ADD(block, I2, I3, src_code);
3683 						UML_AND(block, I4, I2, 0x3f);
3684 						UML_STORE(block, (void *)m_core->local_regs, I4, I1, SIZE_DWORD, SCALE_x4);
3685 
3686 						UML_ADD(block, I0, I0, 4);
3687 						UML_CALLH(block, *m_mem_read32);
3688 
3689 						UML_ADD(block, I2, I3, srcf_code);
3690 						UML_AND(block, I4, I2, 0x3f);
3691 						UML_STORE(block, (void *)m_core->local_regs, I4, I1, SIZE_DWORD, SCALE_x4);
3692 					}
3693 
3694 					if (DST_GLOBAL != SRC_GLOBAL || src_code != dst_code)
3695 					{
3696 						UML_ADD(block, I4, I6, extra_s & ~1);
3697 
3698 						if (DST_GLOBAL)
3699 						{
3700 							UML_STORE(block, (void *)m_core->global_regs, dst_code, I4, SIZE_DWORD, SCALE_x4);
3701 						}
3702 						else
3703 						{
3704 							UML_ADD(block, I2, I3, dst_code);
3705 							UML_AND(block, I5, I2, 0x3f);
3706 							UML_STORE(block, (void *)m_core->local_regs, I5, I4, SIZE_DWORD, SCALE_x4);
3707 						}
3708 					}
3709 					break;
3710 				}
3711 				case 2: // Reserved
3712 					printf("Reserved instruction in generate_ldxx2. PC = %08X\n", desc->pc);
3713 					break;
3714 				case 3: // LDW.S
3715 				{
3716 					UML_MOV(block, I7, mem(&m_core->clock_cycles_3));
3717 
3718 					int below_sp = compiler.m_labelnum++;
3719 					int done = compiler.m_labelnum++;
3720 
3721 					UML_MOV(block, I2, mem(&m_core->global_regs[SP_REGISTER]));
3722 					UML_CMP(block, I6, I2);
3723 					UML_JMPc(block, uml::COND_B, below_sp);
3724 
3725 					UML_ROLAND(block, I0, I6, 30, 0x3f);
3726 					UML_LOAD(block, I1, (void *)m_core->local_regs, I0, SIZE_DWORD, SCALE_x4);
3727 					UML_JMP(block, done);
3728 
3729 					UML_LABEL(block, below_sp);
3730 					UML_AND(block, I0, I6, ~3);
3731 					UML_CALLH(block, *m_mem_read32);
3732 
3733 					UML_LABEL(block, done);
3734 
3735 					if (SRC_GLOBAL)
3736 					{
3737 						UML_MOV(block, I4, src_code);
3738 						UML_MOV(block, I5, I1);
3739 						generate_set_global_register(block, compiler, desc);
3740 					}
3741 					else
3742 					{
3743 						UML_ADD(block, I2, I3, src_code);
3744 						UML_AND(block, I4, I2, 0x3f);
3745 						UML_STORE(block, (void *)m_core->local_regs, I4, I1, SIZE_DWORD, SCALE_x4);
3746 					}
3747 
3748 					if (DST_GLOBAL != SRC_GLOBAL || src_code != dst_code)
3749 					{
3750 						UML_ADD(block, I4, I6, extra_s & ~3);
3751 
3752 						if (DST_GLOBAL)
3753 						{
3754 							UML_STORE(block, (void *)m_core->global_regs, dst_code, I4, SIZE_DWORD, SCALE_x4);
3755 						}
3756 						else
3757 						{
3758 							UML_ADD(block, I2, I3, dst_code);
3759 							UML_AND(block, I5, I2, 0x3f);
3760 							UML_STORE(block, (void *)m_core->local_regs, I5, I4, SIZE_DWORD, SCALE_x4);
3761 						}
3762 					}
3763 					break;
3764 				}
3765 			}
3766 			break;
3767 	}
3768 }
3769 
3770 
3771 template <hyperstone_device::reg_bank DST_GLOBAL, hyperstone_device::reg_bank SRC_GLOBAL>
generate_stxx1(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)3772 void hyperstone_device::generate_stxx1(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
3773 {
3774 	const uint16_t op = desc->opptr.w[0];
3775 
3776 	uint16_t next_1 = m_pr16(desc->pc + 2);
3777 	const uint16_t sub_type = (next_1 & 0x3000) >> 12;
3778 
3779 	uint32_t extra_s;
3780 	if (next_1 & 0x8000)
3781 	{
3782 		const uint16_t next_2 = m_pr16(desc->pc + 4);
3783 
3784 		extra_s = next_2;
3785 		extra_s |= ((next_1 & 0xfff) << 16);
3786 
3787 		if (next_1 & 0x4000)
3788 			extra_s |= 0xf0000000;
3789 
3790 		UML_ADD(block, DRC_PC, DRC_PC, 4);
3791 	}
3792 	else
3793 	{
3794 		extra_s = next_1 & 0xfff;
3795 
3796 		if (next_1 & 0x4000)
3797 			extra_s |= 0xfffff000;
3798 
3799 		UML_ADD(block, DRC_PC, DRC_PC, 2);
3800 	}
3801 
3802 	generate_check_delay_pc(block, compiler, desc);
3803 
3804 	const uint32_t src_code = op & 0xf;
3805 	const uint32_t dst_code = (op & 0xf0) >> 4;
3806 
3807 	UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
3808 
3809 	if (DST_GLOBAL)
3810 	{
3811 		if (dst_code == SR_REGISTER)
3812 			UML_MOV(block, I0, 0);
3813 		else
3814 			UML_LOAD(block, I0, (void *)m_core->global_regs, dst_code, SIZE_DWORD, SCALE_x4);
3815 	}
3816 	else
3817 	{
3818 		UML_ADD(block, I1, I3, dst_code);
3819 		UML_AND(block, I1, I1, 0x3f);
3820 		UML_LOAD(block, I0, (void *)m_core->local_regs, I1, SIZE_DWORD, SCALE_x4);
3821 	}
3822 
3823 	if (SRC_GLOBAL)
3824 	{
3825 		if (src_code == SR_REGISTER)
3826 			UML_MOV(block, I1, 0);
3827 		else
3828 			UML_LOAD(block, I1, (void *)m_core->global_regs, src_code, SIZE_DWORD, SCALE_x4);
3829 	}
3830 	else
3831 	{
3832 		UML_ADD(block, I1, I3, src_code);
3833 		UML_AND(block, I1, I1, 0x3f);
3834 		UML_LOAD(block, I1, (void *)m_core->local_regs, I1, SIZE_DWORD, SCALE_x4);
3835 	}
3836 
3837 	switch (sub_type)
3838 	{
3839 		case 0: // STBS.D
3840 		case 1: // STBU.D
3841 			// TODO: missing trap on range error for STBS.D
3842 			UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
3843 			UML_ADD(block, I0, I0, extra_s);
3844 			UML_CALLH(block, *m_mem_write8);
3845 			break;
3846 
3847 		case 2: // STHS.D, STHU.D
3848 			// TODO: missing trap on range error with STHS.D
3849 			UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
3850 			UML_ADD(block, I0, I0, extra_s);
3851 			UML_AND(block, I0, I0, ~1);
3852 			UML_CALLH(block, *m_mem_write16);
3853 			break;
3854 
3855 		case 3:
3856 			switch (extra_s & 3)
3857 			{
3858 				case 0: // STW.D
3859 					UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
3860 					UML_ADD(block, I0, I0, extra_s & ~1);
3861 					UML_AND(block, I0, I0, ~3);
3862 					UML_CALLH(block, *m_mem_write32);
3863 					break;
3864 				case 1: // STD.D
3865 				{
3866 					UML_MOV(block, I7, mem(&m_core->clock_cycles_2));
3867 					UML_ADD(block, I0, I0, extra_s & ~1);
3868 					UML_AND(block, I0, I0, ~3);
3869 					UML_CALLH(block, *m_mem_write32);
3870 
3871 					if (SRC_GLOBAL)
3872 					{
3873 						if (src_code == SR_REGISTER)
3874 							UML_MOV(block, I1, 0);
3875 						else
3876 							UML_LOAD(block, I1, (void *)m_core->global_regs, src_code + 1, SIZE_DWORD, SCALE_x4);
3877 					}
3878 					else
3879 					{
3880 						UML_ADD(block, I1, I3, src_code + 1);
3881 						UML_AND(block, I1, I1, 0x3f);
3882 						UML_LOAD(block, I1, (void *)m_core->local_regs, I1, SIZE_DWORD, SCALE_x4);
3883 					}
3884 
3885 					UML_ADD(block, I0, I0, 4);
3886 					UML_CALLH(block, *m_mem_write32);
3887 					break;
3888 				}
3889 				case 2: // STW.IOD
3890 					UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
3891 					UML_ADD(block, I0, I0, extra_s & ~3);
3892 					UML_ROLAND(block, I0, I0, 21, 0x7ffc);
3893 					UML_CALLH(block, *m_io_write32);
3894 					break;
3895 				case 3: // STD.IOD
3896 				{
3897 					UML_MOV(block, I7, mem(&m_core->clock_cycles_2));
3898 					UML_ADD(block, I0, I0, extra_s & ~1); // Is this correct?
3899 					UML_ROLAND(block, I0, I0, 21, 0x7ffc);
3900 					UML_CALLH(block, *m_io_write32);
3901 
3902 					if (SRC_GLOBAL)
3903 					{
3904 						if (src_code == SR_REGISTER)
3905 							UML_MOV(block, I1, 0);
3906 						else
3907 							UML_LOAD(block, I1, (void *)m_core->global_regs, src_code + 1, SIZE_DWORD, SCALE_x4);
3908 					}
3909 					else
3910 					{
3911 						UML_ADD(block, I1, I3, src_code + 1);
3912 						UML_AND(block, I1, I1, 0x3f);
3913 						UML_LOAD(block, I1, (void *)m_core->local_regs, I1, SIZE_DWORD, SCALE_x4);
3914 					}
3915 
3916 					UML_ADD(block, I0, I0, 4);
3917 					UML_CALLH(block, *m_io_write32);
3918 					break;
3919 				}
3920 			}
3921 			break;
3922 	}
3923 }
3924 
3925 
3926 template <hyperstone_device::reg_bank DST_GLOBAL, hyperstone_device::reg_bank SRC_GLOBAL>
generate_stxx2(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)3927 void hyperstone_device::generate_stxx2(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
3928 {
3929 	const uint16_t op = desc->opptr.w[0];
3930 
3931 	uint16_t next_1 = m_pr16(desc->pc + 2);
3932 	const uint16_t sub_type = (next_1 & 0x3000) >> 12;
3933 
3934 	uint32_t extra_s;
3935 	if (next_1 & 0x8000)
3936 	{
3937 		const uint16_t next_2 = m_pr16(desc->pc + 4);
3938 
3939 		extra_s = next_2;
3940 		extra_s |= ((next_1 & 0xfff) << 16);
3941 
3942 		if (next_1 & 0x4000)
3943 			extra_s |= 0xf0000000;
3944 
3945 		UML_ADD(block, DRC_PC, DRC_PC, 4);
3946 	}
3947 	else
3948 	{
3949 		extra_s = next_1 & 0xfff;
3950 
3951 		if (next_1 & 0x4000)
3952 			extra_s |= 0xfffff000;
3953 
3954 		UML_ADD(block, DRC_PC, DRC_PC, 2);
3955 	}
3956 
3957 	generate_check_delay_pc(block, compiler, desc);
3958 
3959 	const uint32_t src_code = op & 0xf;
3960 	const uint32_t dst_code = (op & 0xf0) >> 4;
3961 
3962 	if (DST_GLOBAL && dst_code < 2)
3963 	{
3964 		printf("Denoted PC or SR in hyperstone_ldxx2. PC = %08X\n", desc->pc);
3965 		UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
3966 		return;
3967 	}
3968 
3969 	if (!DST_GLOBAL || !SRC_GLOBAL)
3970 		UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
3971 
3972 	if (DST_GLOBAL)
3973 	{
3974 		UML_LOAD(block, I0, (void *)m_core->global_regs, dst_code, SIZE_DWORD, SCALE_x4);
3975 	}
3976 	else
3977 	{
3978 		UML_ADD(block, I2, I3, dst_code);
3979 		UML_AND(block, I6, I2, 0x3f);
3980 		UML_LOAD(block, I0, (void *)m_core->local_regs, I6, SIZE_DWORD, SCALE_x4);
3981 	}
3982 
3983 	if (SRC_GLOBAL)
3984 	{
3985 		if (src_code == SR_REGISTER)
3986 			UML_MOV(block, I1, 0);
3987 		else
3988 			UML_LOAD(block, I1, (void *)m_core->global_regs, src_code, SIZE_DWORD, SCALE_x4);
3989 	}
3990 	else
3991 	{
3992 		UML_ADD(block, I2, I3, src_code);
3993 		UML_AND(block, I5, I2, 0x3f);
3994 		UML_LOAD(block, I1, (void *)m_core->local_regs, I5, SIZE_DWORD, SCALE_x4);
3995 	}
3996 
3997 	switch (sub_type)
3998 	{
3999 		case 0: // STBS.N
4000 		case 1: // STBU.N
4001 			// TODO: missing trap on range error with STBS.N
4002 			UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
4003 			UML_CALLH(block, *m_mem_write8);
4004 			UML_ADD(block, I0, I0, extra_s);
4005 
4006 			if (DST_GLOBAL)
4007 				UML_STORE(block, (void *)m_core->global_regs, dst_code, I0, SIZE_DWORD, SCALE_x4);
4008 			else
4009 				UML_STORE(block, (void *)m_core->local_regs, I6, I0, SIZE_DWORD, SCALE_x4);
4010 			break;
4011 		case 2: // STHS.N, STHU.N
4012 			// TODO: missing trap on range error with STHS.N
4013 			UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
4014 			UML_MOV(block, I5, I0);
4015 			UML_AND(block, I0, I0, ~1);
4016 			UML_CALLH(block, *m_mem_write16);
4017 			UML_ADD(block, I5, I5, extra_s & ~1);
4018 
4019 			if (DST_GLOBAL)
4020 				UML_STORE(block, (void *)m_core->global_regs, dst_code, I5, SIZE_DWORD, SCALE_x4);
4021 			else
4022 				UML_STORE(block, (void *)m_core->local_regs, I6, I5, SIZE_DWORD, SCALE_x4);
4023 			break;
4024 		case 3:
4025 			switch (extra_s & 3)
4026 			{
4027 				case 0: // STW.N
4028 					UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
4029 					UML_MOV(block, I5, I0);
4030 					UML_AND(block, I0, I0, ~3);
4031 					UML_CALLH(block, *m_mem_write32);
4032 					UML_ADD(block, I5, I5, extra_s);
4033 
4034 					if (DST_GLOBAL)
4035 						UML_STORE(block, (void *)m_core->global_regs, dst_code, I5, SIZE_DWORD, SCALE_x4);
4036 					else
4037 						UML_STORE(block, (void *)m_core->local_regs, I6, I5, SIZE_DWORD, SCALE_x4);
4038 					break;
4039 				case 1: // STD.N
4040 					UML_MOV(block, I7, mem(&m_core->clock_cycles_2));
4041 					UML_MOV(block, I5, I0);
4042 					UML_AND(block, I0, I0, ~3);
4043 					UML_CALLH(block, *m_mem_write32);
4044 
4045 					UML_ADD(block, I5, I5, extra_s & ~1);
4046 					if (DST_GLOBAL)
4047 						UML_STORE(block, (void *)m_core->global_regs, dst_code, I5, SIZE_DWORD, SCALE_x4);
4048 					else
4049 						UML_STORE(block, (void *)m_core->local_regs, I6, I5, SIZE_DWORD, SCALE_x4);
4050 
4051 					if (SRC_GLOBAL)
4052 					{
4053 						UML_LOAD(block, I1, (void *)m_core->global_regs, src_code + 1, SIZE_DWORD, SCALE_x4);
4054 					}
4055 					else
4056 					{
4057 						UML_ADD(block, I2, I3, src_code + 1);
4058 						UML_AND(block, I4, I2, 0x3f);
4059 						UML_LOAD(block, I1, (void *)m_core->local_regs, I4, SIZE_DWORD, SCALE_x4);
4060 					}
4061 
4062 					UML_ADD(block, I0, I0, 4);
4063 					UML_CALLH(block, *m_mem_write32);
4064 					break;
4065 				case 2: // Reserved
4066 					printf("Executed Reserved instruction in hyperstone_stxx2. PC = %08X\n", desc->pc);
4067 					break;
4068 				case 3: // STW.S
4069 				{
4070 					UML_MOV(block, I7, mem(&m_core->clock_cycles_3));
4071 
4072 					int less_than_sp = compiler.m_labelnum++;
4073 					int store_done = compiler.m_labelnum++;
4074 
4075 					UML_MOV(block, I5, I0);
4076 					UML_CMP(block, I5, mem(&SP));
4077 					UML_JMPc(block, uml::COND_B, less_than_sp);
4078 
4079 					UML_ROLAND(block, I4, I0, 30, 0x3f);
4080 					UML_STORE(block, (void *)m_core->local_regs, I4, I1, SIZE_DWORD, SCALE_x4);
4081 					UML_JMP(block, store_done);
4082 
4083 					UML_LABEL(block, less_than_sp);
4084 					UML_AND(block, I0, I0, ~3);
4085 					UML_CALLH(block, *m_mem_write32);
4086 
4087 					UML_LABEL(block, store_done);
4088 					UML_ADD(block, I5, I5, extra_s & ~3);
4089 					if (DST_GLOBAL)
4090 						UML_STORE(block, (void *)m_core->global_regs, dst_code, I5, SIZE_DWORD, SCALE_x4);
4091 					else
4092 						UML_STORE(block, (void *)m_core->local_regs, I6, I5, SIZE_DWORD, SCALE_x4);
4093 					break;
4094 				}
4095 			}
4096 			break;
4097 	}
4098 }
4099 
4100 
4101 template <hyperstone_device::reg_bank DST_GLOBAL, hyperstone_device::reg_bank SRC_GLOBAL, hyperstone_device::sign_mode SIGNED>
generate_mulsu(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)4102 void hyperstone_device::generate_mulsu(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
4103 {
4104 	UML_MOV(block, I7, mem(&m_core->clock_cycles_36));
4105 
4106 	uint16_t op = desc->opptr.w[0];
4107 
4108 	const uint32_t dst_code = (op & 0xf0) >> 4;
4109 	const uint32_t dstf_code = dst_code + 1;
4110 	const uint32_t src_code = op & 0xf;
4111 
4112 	if ((SRC_GLOBAL && src_code < 2) || (DST_GLOBAL && dst_code < 2))
4113 	{
4114 		printf("Denoted PC or SR in hyperstone_muls/u instruction. PC = %08X\n", desc->pc);
4115 		return;
4116 	}
4117 
4118 	if (!SRC_GLOBAL || !DST_GLOBAL)
4119 		UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
4120 
4121 	if (SRC_GLOBAL)
4122 		UML_LOAD(block, I0, (void *)m_core->global_regs, src_code, SIZE_DWORD, SCALE_x4);
4123 	else
4124 	{
4125 		UML_ADD(block, I2, I3, src_code);
4126 		UML_AND(block, I4, I2, 0x3f);
4127 		UML_LOAD(block, I0, (void *)m_core->local_regs, I4, SIZE_DWORD, SCALE_x4);
4128 	}
4129 
4130 	if (DST_GLOBAL)
4131 	{
4132 		UML_LOAD(block, I1, (void *)m_core->global_regs, dst_code, SIZE_DWORD, SCALE_x4);
4133 	}
4134 	else
4135 	{
4136 		UML_ADD(block, I2, I3, dst_code);
4137 		UML_AND(block, I6, I2, 0x3f);
4138 		UML_LOAD(block, I1, (void *)m_core->local_regs, I6, SIZE_DWORD, SCALE_x4);
4139 	}
4140 
4141 	if (SIGNED == IS_SIGNED)
4142 		UML_MULS(block, I4, I5, I0, I1);
4143 	else
4144 		UML_MULU(block, I4, I5, I0, I1);
4145 
4146 	UML_OR(block, I2, I4, I5);
4147 	UML_TEST(block, I2, ~0);
4148 	UML_MOVc(block, uml::COND_NZ, I2, 0);
4149 	UML_MOVc(block, uml::COND_Z, I2, Z_MASK);
4150 	UML_ROLINS(block, I2, I5, 3, N_MASK);
4151 	UML_ROLINS(block, DRC_SR, I2, 0, (N_MASK | Z_MASK));
4152 
4153 	if (DST_GLOBAL)
4154 	{
4155 		UML_STORE(block, (void *)m_core->global_regs, dst_code, I5, SIZE_DWORD, SCALE_x4);
4156 		UML_STORE(block, (void *)m_core->global_regs, dstf_code, I4, SIZE_DWORD, SCALE_x4);
4157 	}
4158 	else
4159 	{
4160 		UML_STORE(block, (void *)m_core->local_regs, I6, I5, SIZE_DWORD, SCALE_x4);
4161 		UML_ADD(block, I2, I3, dstf_code);
4162 		UML_AND(block, I5, I2, 0x3f);
4163 		UML_STORE(block, (void *)m_core->local_regs, I5, I4, SIZE_DWORD, SCALE_x4);
4164 	}
4165 
4166 	int done = compiler.m_labelnum++;
4167 	UML_MOV(block, I7, mem(&m_core->clock_cycles_6));
4168 	if (SIGNED == IS_SIGNED)
4169 	{
4170 		UML_CMP(block, I0, 0xffff8000);
4171 		UML_JMPc(block, uml::COND_B, done);
4172 		UML_CMP(block, I0, 0x00008000);
4173 		UML_JMPc(block, uml::COND_AE, done);
4174 		UML_CMP(block, I1, 0xffff8000);
4175 		UML_JMPc(block, uml::COND_B, done);
4176 		UML_CMP(block, I1, 0x00008000);
4177 		UML_JMPc(block, uml::COND_AE, done);
4178 	}
4179 	else
4180 	{
4181 		UML_CMP(block, I0, 0x0000ffff);
4182 		UML_JMPc(block, uml::COND_A, done);
4183 		UML_CMP(block, I1, 0x0000ffff);
4184 		UML_JMPc(block, uml::COND_A, done);
4185 	}
4186 	UML_SUB(block, I7, I7, mem(&m_core->clock_cycles_2));
4187 
4188 	UML_LABEL(block, done);
4189 }
4190 
4191 
4192 template <hyperstone_device::reg_bank DST_GLOBAL, hyperstone_device::reg_bank SRC_GLOBAL>
generate_mul(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)4193 void hyperstone_device::generate_mul(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
4194 {
4195 	const uint16_t op = desc->opptr.w[0];
4196 
4197 	generate_check_delay_pc(block, compiler, desc);
4198 
4199 	const uint32_t src_code = op & 0xf;
4200 	const uint32_t dst_code = (op & 0xf0) >> 4;
4201 
4202 	if ((SRC_GLOBAL && src_code < 2) || (DST_GLOBAL && dst_code < 2))
4203 	{
4204 		printf("Denoted PC or SR in hyperstone_mul instruction. PC = %08X\n", desc->pc);
4205 		return;
4206 	}
4207 
4208 	if (!SRC_GLOBAL || !DST_GLOBAL)
4209 		UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
4210 
4211 	if (SRC_GLOBAL)
4212 		UML_LOAD(block, I0, (void *)m_core->global_regs, src_code, SIZE_DWORD, SCALE_x4);
4213 	else
4214 	{
4215 		UML_ADD(block, I2, I3, src_code);
4216 		UML_AND(block, I1, I2, 0x3f);
4217 		UML_LOAD(block, I0, (void *)m_core->local_regs, I1, SIZE_DWORD, SCALE_x4);
4218 	}
4219 
4220 	if (DST_GLOBAL)
4221 		UML_LOAD(block, I1, (void *)m_core->global_regs, dst_code, SIZE_DWORD, SCALE_x4);
4222 	else
4223 	{
4224 		UML_ADD(block, I2, I3, dst_code);
4225 		UML_AND(block, I6, I2, 0x3f);
4226 		UML_LOAD(block, I1, (void *)m_core->local_regs, I6, SIZE_DWORD, SCALE_x4);
4227 	}
4228 
4229 	UML_MULU(block, I2, I3, I0, I1);
4230 
4231 	UML_AND(block, I4, DRC_SR, ~(Z_MASK | N_MASK));
4232 	UML_TEST(block, I2, ~0);
4233 	UML_MOVc(block, uml::COND_Z, I5, Z_MASK);
4234 	UML_MOVc(block, uml::COND_NZ, I5, 0);
4235 	UML_ROLINS(block, I5, I2, 3, N_MASK);
4236 	UML_ROLINS(block, DRC_SR, I5, 0, (Z_MASK | N_MASK));
4237 
4238 	if (DST_GLOBAL)
4239 		UML_STORE(block, (void *)m_core->global_regs, dst_code, I2, SIZE_DWORD, SCALE_x4);
4240 	else
4241 		UML_STORE(block, (void *)m_core->local_regs, I6, I2, SIZE_DWORD, SCALE_x4);
4242 
4243 	UML_MOV(block, I7, mem(&m_core->clock_cycles_3));
4244 	int add_cycles = compiler.m_labelnum++;
4245 	int done = compiler.m_labelnum++;
4246 	UML_CMP(block, I0, 0xffff8000);
4247 	UML_JMPc(block, uml::COND_B, add_cycles);
4248 	UML_CMP(block, I0, 0x8000);
4249 	UML_JMPc(block, uml::COND_AE, add_cycles);
4250 	UML_CMP(block, I1, 0xffff8000);
4251 	UML_JMPc(block, uml::COND_B, add_cycles);
4252 	UML_CMP(block, I1, 0x8000);
4253 	UML_JMPc(block, uml::COND_AE, add_cycles);
4254 	UML_JMP(block, done);
4255 
4256 	UML_LABEL(block, add_cycles);
4257 	UML_ADD(block, I7, I7, mem(&m_core->clock_cycles_2));
4258 
4259 	UML_LABEL(block, done);
4260 	// TODO: proper cycle counts
4261 }
4262 
4263 
4264 template <hyperstone_device::shift_type HI_N, hyperstone_device::reg_bank DST_GLOBAL>
generate_set(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)4265 void hyperstone_device::generate_set(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
4266 {
4267 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
4268 
4269 	const uint16_t op = desc->opptr.w[0];
4270 
4271 	generate_check_delay_pc(block, compiler, desc);
4272 
4273 	const uint32_t dst_code = (op & 0xf0) >> 4;
4274 	const uint32_t n = op & 0xf;
4275 
4276 	if (DST_GLOBAL && dst_code < 2)
4277 	{
4278 		return;
4279 	}
4280 
4281 	if (HI_N)
4282 	{
4283 		if (n >= 4 || n == 2)
4284 		{
4285 			static const uint32_t   set_result[16] = { 0, 0, 0,          0, 0xffffffff,  0, 0xffffffff,  0, 0xffffffff,  0, 0xffffffff,  0, 0xffffffff,  0, 0xffffffff,  0 };
4286 			static const uint32_t unset_result[16] = { 0, 0, 0xffffffff, 0,  0, 0xffffffff,  0, 0xffffffff,  0, 0xffffffff,  0, 0xffffffff,  0, 0xffffffff,  0, 0xffffffff };
4287 			static const uint32_t mask[16] = { 0, 0, 0, 0, (N_MASK | Z_MASK), (N_MASK | Z_MASK), N_MASK, N_MASK,
4288 				(C_MASK | Z_MASK), (C_MASK | Z_MASK), C_MASK, C_MASK, Z_MASK, Z_MASK, V_MASK, V_MASK };
4289 
4290 			UML_TEST(block, DRC_SR, mask[n]);
4291 			UML_MOVc(block, uml::COND_NZ, I0, set_result[n]);
4292 			UML_MOVc(block, uml::COND_Z, I0, unset_result[n]);
4293 		}
4294 		else
4295 		{
4296 			printf("Used reserved N value (%d) in hyperstone_set. PC = %08X\n", n, desc->pc);
4297 			return;
4298 		}
4299 	}
4300 	else
4301 	{
4302 		if (n == 0)
4303 		{
4304 			int no_low_bit = compiler.m_labelnum++;
4305 			UML_MOV(block, I1, mem(&m_core->global_regs[SP_REGISTER]));
4306 			UML_AND(block, I0, I1, 0xfffffe00);
4307 			UML_ROLINS(block, I0, DRC_SR, 9, 0x000001fc);
4308 			UML_TEST(block, I1, 0x100);
4309 			UML_JMPc(block, uml::COND_Z, no_low_bit);
4310 			UML_TEST(block, DRC_SR, 0x80000000);
4311 			UML_JMPc(block, uml::COND_NZ, no_low_bit);
4312 			UML_OR(block, I0, I0, 1);
4313 			UML_LABEL(block, no_low_bit);
4314 		}
4315 		else if (n >= 2)
4316 		{
4317 			static const uint32_t   set_result[16] = { 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0 };
4318 			static const uint32_t unset_result[16] = { 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 };
4319 			static const uint32_t mask[16] = { 0, 0, 0, 0, (N_MASK | Z_MASK), (N_MASK | Z_MASK), N_MASK, N_MASK,
4320 				(C_MASK | Z_MASK), (C_MASK | Z_MASK), C_MASK, C_MASK, Z_MASK, Z_MASK, V_MASK, V_MASK };
4321 
4322 			UML_TEST(block, DRC_SR, mask[n]);
4323 			UML_MOVc(block, uml::COND_NZ, I0, set_result[n]);
4324 			UML_MOVc(block, uml::COND_Z, I0, unset_result[n]);
4325 		}
4326 		else
4327 		{
4328 			printf("Used reserved N value (%d) in hyperstone_set. PC = %08X\n", n, desc->pc);
4329 			return;
4330 		}
4331 	}
4332 
4333 	if (DST_GLOBAL)
4334 	{
4335 		UML_STORE(block, (void *)m_core->global_regs, dst_code, I0, SIZE_DWORD, SCALE_x4);
4336 	}
4337 	else
4338 	{
4339 		UML_ROLAND(block, I1, DRC_SR, 7, 0x7f);
4340 		UML_ADD(block, I2, I1, dst_code);
4341 		UML_AND(block, I3, I2, 0x3f);
4342 		UML_STORE(block, (void *)m_core->local_regs, I3, I0, SIZE_DWORD, SCALE_x4);
4343 	}
4344 }
4345 
4346 
4347 template <hyperstone_device::reg_bank SRC_GLOBAL>
generate_ldwr(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)4348 void hyperstone_device::generate_ldwr(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
4349 {
4350 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
4351 
4352 	const uint16_t op = desc->opptr.w[0];
4353 
4354 	generate_check_delay_pc(block, compiler, desc);
4355 
4356 	const uint32_t src_code = op & 0xf;
4357 	const uint32_t dst_code = (op & 0xf0) >> 4;
4358 
4359 	UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
4360 	UML_ADD(block, I2, I3, dst_code);
4361 	UML_AND(block, I4, I2, 0x3f);
4362 	UML_LOAD(block, I0, (void *)m_core->local_regs, I4, SIZE_DWORD, SCALE_x4);
4363 	UML_AND(block, I0, I0, ~3);
4364 	UML_CALLH(block, *m_mem_read32);
4365 
4366 	if (SRC_GLOBAL)
4367 	{
4368 		if (src_code < 2)
4369 		{
4370 			UML_MOV(block, I4, src_code);
4371 			UML_MOV(block, I5, I1);
4372 			generate_set_global_register(block, compiler, desc);
4373 			if (src_code == PC_REGISTER)
4374 			{
4375 				generate_branch(block, desc->targetpc, desc);
4376 			}
4377 		}
4378 		else
4379 		{
4380 			UML_STORE(block, (void *)m_core->global_regs, src_code, I1, SIZE_DWORD, SCALE_x4);
4381 		}
4382 	}
4383 	else
4384 	{
4385 		UML_ADD(block, I2, I3, src_code);
4386 		UML_AND(block, I4, I2, 0x3f);
4387 		UML_STORE(block, (void *)m_core->local_regs, I4, I1, SIZE_DWORD, SCALE_x4);
4388 	}
4389 }
4390 
4391 
4392 template <hyperstone_device::reg_bank SRC_GLOBAL>
generate_lddr(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)4393 void hyperstone_device::generate_lddr(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
4394 {
4395 	UML_MOV(block, I7, mem(&m_core->clock_cycles_2));
4396 
4397 	uint16_t op = desc->opptr.w[0];
4398 
4399 	generate_check_delay_pc(block, compiler, desc);
4400 
4401 	const uint32_t src_code = op & 0xf;
4402 	const uint32_t dst_code = (op & 0xf0) >> 4;
4403 
4404 	UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
4405 	UML_ADD(block, I2, I3, dst_code);
4406 	UML_AND(block, I1, I2, 0x3f);
4407 	UML_LOAD(block, I0, (void *)m_core->local_regs, I1, SIZE_DWORD, SCALE_x4);
4408 	UML_AND(block, I0, I0, ~3);
4409 	UML_CALLH(block, *m_mem_read32);
4410 
4411 	if (SRC_GLOBAL)
4412 	{
4413 		UML_MOV(block, I4, src_code);
4414 		UML_MOV(block, I5, I1);
4415 		generate_set_global_register(block, compiler, desc);
4416 
4417 		UML_ADD(block, I0, I0, 4);
4418 		UML_CALLH(block, *m_mem_read32);
4419 
4420 		UML_MOV(block, I4, src_code + 1);
4421 		UML_MOV(block, I5, I1);
4422 		generate_set_global_register(block, compiler, desc);
4423 	}
4424 	else
4425 	{
4426 		UML_ADD(block, I2, I3, src_code);
4427 		UML_AND(block, I4, I2, 0x3f);
4428 		UML_STORE(block, (void *)m_core->local_regs, I4, I1, SIZE_DWORD, SCALE_x4);
4429 
4430 		UML_ADD(block, I0, I0, 4);
4431 		UML_CALLH(block, *m_mem_read32);
4432 
4433 		UML_ADD(block, I2, I3, src_code + 1);
4434 		UML_AND(block, I4, I2, 0x3f);
4435 		UML_STORE(block, (void *)m_core->local_regs, I4, I1, SIZE_DWORD, SCALE_x4);
4436 	}
4437 }
4438 
4439 
4440 template <hyperstone_device::reg_bank SRC_GLOBAL>
generate_ldwp(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)4441 void hyperstone_device::generate_ldwp(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
4442 {
4443 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
4444 
4445 	const uint16_t op = desc->opptr.w[0];
4446 
4447 	generate_check_delay_pc(block, compiler, desc);
4448 
4449 	const uint32_t src_code = op & 0xf;
4450 	const uint32_t dst_code = (op & 0xf0) >> 4;
4451 
4452 	UML_ROLAND(block, I0, DRC_SR, 7, 0x7f);
4453 	UML_ADD(block, I1, I0, dst_code);
4454 	UML_AND(block, I2, I1, 0x3f);
4455 	UML_LOAD(block, I0, (void *)m_core->local_regs, I2, SIZE_DWORD, SCALE_x4);
4456 
4457 	UML_ADD(block, I3, I0, 4);
4458 	UML_AND(block, I0, I0, ~3);
4459 	UML_CALLH(block, *m_mem_read32);
4460 
4461 	if (SRC_GLOBAL)
4462 	{
4463 		UML_MOV(block, I4, src_code);
4464 		UML_MOV(block, I5, I1);
4465 		generate_set_global_register(block, compiler, desc);
4466 
4467 		UML_ROLAND(block, I0, DRC_SR, 7, 0x7f);
4468 		UML_ADD(block, I1, I0, dst_code);
4469 		UML_AND(block, I2, I1, 0x3f);
4470 		UML_STORE(block, (void *)m_core->local_regs, I2, I3, SIZE_DWORD, SCALE_x4);
4471 
4472 		if (src_code == PC_REGISTER)
4473 			generate_branch(block, desc->targetpc, desc);
4474 	}
4475 	else
4476 	{
4477 		UML_ROLAND(block, I0, DRC_SR, 7, 0x7f);
4478 		UML_ADD(block, I4, I0, src_code);
4479 		UML_AND(block, I5, I4, 0x3f);
4480 		UML_STORE(block, (void *)m_core->local_regs, I5, I1, SIZE_DWORD, SCALE_x4);
4481 
4482 		if (src_code != dst_code)
4483 		{
4484 			UML_ADD(block, I4, I0, dst_code);
4485 			UML_AND(block, I5, I4, 0x3f);
4486 			UML_STORE(block, (void *)m_core->local_regs, I5, I3, SIZE_DWORD, SCALE_x4);
4487 		}
4488 	}
4489 }
4490 
4491 
4492 template <hyperstone_device::reg_bank SRC_GLOBAL>
generate_lddp(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)4493 void hyperstone_device::generate_lddp(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
4494 {
4495 	UML_MOV(block, I7, mem(&m_core->clock_cycles_2));
4496 
4497 	const uint16_t op = desc->opptr.w[0];
4498 
4499 	generate_check_delay_pc(block, compiler, desc);
4500 
4501 	const uint32_t src_code = op & 0xf;
4502 	const uint32_t dst_code = (op & 0xf0) >> 4;
4503 
4504 	UML_ROLAND(block, I0, DRC_SR, 7, 0x7f);
4505 	UML_ADD(block, I1, I0, dst_code);
4506 	UML_AND(block, I2, I1, 0x3f);
4507 	UML_LOAD(block, I0, (void *)m_core->local_regs, I2, SIZE_DWORD, SCALE_x4);
4508 
4509 	UML_ADD(block, I3, I0, 8);
4510 	UML_AND(block, I0, I0, ~3);
4511 	UML_CALLH(block, *m_mem_read32);
4512 	UML_MOV(block, I2, I1);             // I2: dreg[0]
4513 	UML_ADD(block, I0, I0, 4);
4514 	UML_CALLH(block, *m_mem_read32);    // I1: dreg[4]
4515 
4516 	if (SRC_GLOBAL)
4517 	{
4518 		UML_MOV(block, I4, src_code);
4519 		UML_MOV(block, I5, I2);
4520 		generate_set_global_register(block, compiler, desc);
4521 		UML_MOV(block, I4, src_code + 1);
4522 		UML_MOV(block, I5, I1);
4523 		generate_set_global_register(block, compiler, desc);
4524 
4525 		UML_ROLAND(block, I0, DRC_SR, 7, 0x7f);
4526 		UML_ADD(block, I1, I0, dst_code);
4527 		UML_AND(block, I2, I1, 0x3f);
4528 		UML_STORE(block, (void *)m_core->local_regs, I2, I3, SIZE_DWORD, SCALE_x4);
4529 
4530 		if (src_code == PC_REGISTER || (src_code + 1) == PC_REGISTER)
4531 			generate_branch(block, desc->targetpc, desc);
4532 	}
4533 	else
4534 	{
4535 		UML_ROLAND(block, I0, DRC_SR, 7, 0x7f);
4536 		UML_ADD(block, I4, I0, src_code);
4537 		UML_AND(block, I5, I4, 0x3f);
4538 		UML_STORE(block, (void *)m_core->local_regs, I5, I2, SIZE_DWORD, SCALE_x4);
4539 		UML_ADD(block, I4, I0, src_code + 1);
4540 		UML_AND(block, I5, I4, 0x3f);
4541 		UML_STORE(block, (void *)m_core->local_regs, I5, I1, SIZE_DWORD, SCALE_x4);
4542 
4543 		if (src_code != dst_code && (src_code + 1) != dst_code)
4544 		{
4545 			UML_ADD(block, I4, I0, dst_code);
4546 			UML_AND(block, I5, I4, 0x3f);
4547 			UML_STORE(block, (void *)m_core->local_regs, I5, I3, SIZE_DWORD, SCALE_x4);
4548 		}
4549 	}
4550 }
4551 
4552 
4553 template <hyperstone_device::reg_bank SRC_GLOBAL>
generate_stwr(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)4554 void hyperstone_device::generate_stwr(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
4555 {
4556 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
4557 
4558 	const uint16_t op = desc->opptr.w[0];
4559 
4560 	generate_check_delay_pc(block, compiler, desc);
4561 
4562 	const uint32_t src_code = op & 0xf;
4563 	const uint32_t dst_code = (op & 0xf0) >> 4;
4564 
4565 	UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
4566 
4567 	if (SRC_GLOBAL)
4568 	{
4569 		if (src_code == SR_REGISTER)
4570 			UML_MOV(block, I1, 0);
4571 		else
4572 			UML_LOAD(block, I1, (void *)m_core->global_regs, src_code, SIZE_DWORD, SCALE_x4);
4573 	}
4574 	else
4575 	{
4576 		UML_ADD(block, I2, I3, src_code);
4577 		UML_AND(block, I2, I2, 0x3f);
4578 		UML_LOAD(block, I1, (void *)m_core->local_regs, I2, SIZE_DWORD, SCALE_x4);
4579 	}
4580 
4581 	UML_ADD(block, I2, I3, dst_code);
4582 	UML_AND(block, I4, I2, 0x3f);
4583 	UML_LOAD(block, I0, (void *)m_core->local_regs, I4, SIZE_DWORD, SCALE_x4);
4584 	UML_AND(block, I0, I0, ~3);
4585 	UML_CALLH(block, *m_mem_write32);
4586 }
4587 
4588 
4589 template <hyperstone_device::reg_bank SRC_GLOBAL>
generate_stdr(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)4590 void hyperstone_device::generate_stdr(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
4591 {
4592 	UML_MOV(block, I7, mem(&m_core->clock_cycles_2));
4593 
4594 	const uint16_t op = desc->opptr.w[0];
4595 
4596 	generate_check_delay_pc(block, compiler, desc);
4597 
4598 	const uint32_t src_code = op & 0xf;
4599 	const uint32_t dst_code = (op & 0xf0) >> 4;
4600 
4601 	UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
4602 
4603 	if (SRC_GLOBAL)
4604 	{
4605 		if (src_code == SR_REGISTER)
4606 			UML_MOV(block, I1, 0);
4607 		else
4608 			UML_LOAD(block, I1, (void *)m_core->global_regs, src_code, SIZE_DWORD, SCALE_x4);
4609 
4610 		if ((src_code + 1) == SR_REGISTER)
4611 			UML_MOV(block, I2, 0);
4612 		else
4613 			UML_LOAD(block, I2, (void *)m_core->global_regs, src_code + 1, SIZE_DWORD, SCALE_x4);
4614 	}
4615 	else
4616 	{
4617 		UML_ADD(block, I4, I3, src_code);
4618 		UML_AND(block, I5, I4, 0x3f);
4619 		UML_LOAD(block, I1, (void *)m_core->local_regs, I5, SIZE_DWORD, SCALE_x4);
4620 		UML_ADD(block, I4, I3, src_code + 1);
4621 		UML_AND(block, I5, I4, 0x3f);
4622 		UML_LOAD(block, I2, (void *)m_core->local_regs, I5, SIZE_DWORD, SCALE_x4);
4623 	}
4624 
4625 	UML_ADD(block, I4, I3, dst_code);
4626 	UML_AND(block, I5, I4, 0x3f);
4627 	UML_LOAD(block, I0, (void *)m_core->local_regs, I5, SIZE_DWORD, SCALE_x4);
4628 	UML_AND(block, I0, I0, ~3);
4629 
4630 	UML_CALLH(block, *m_mem_write32);
4631 	UML_ADD(block, I0, I0, 4);
4632 	UML_MOV(block, I1, I2);
4633 	UML_CALLH(block, *m_mem_write32);
4634 }
4635 
4636 
4637 template <hyperstone_device::reg_bank SRC_GLOBAL>
generate_stwp(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)4638 void hyperstone_device::generate_stwp(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
4639 {
4640 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
4641 
4642 	const uint16_t op = desc->opptr.w[0];
4643 
4644 	generate_check_delay_pc(block, compiler, desc);
4645 
4646 	const uint32_t src_code = op & 0xf;
4647 	const uint32_t dst_code = (op & 0xf0) >> 4;
4648 
4649 	UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
4650 
4651 	if (SRC_GLOBAL)
4652 	{
4653 		if (src_code == SR_REGISTER)
4654 			UML_MOV(block, I1, 0);
4655 		else
4656 			UML_LOAD(block, I1, (void *)m_core->global_regs, src_code, SIZE_DWORD, SCALE_x4);
4657 	}
4658 	else
4659 	{
4660 		UML_ADD(block, I2, I3, src_code);
4661 		UML_AND(block, I0, I2, 0x3f);
4662 		UML_LOAD(block, I1, (void *)m_core->local_regs, I0, SIZE_DWORD, SCALE_x4);
4663 	}
4664 
4665 	UML_ADD(block, I2, I3, dst_code);
4666 	UML_AND(block, I4, I2, 0x3f);
4667 	UML_LOAD(block, I5, (void *)m_core->local_regs, I4, SIZE_DWORD, SCALE_x4);
4668 	UML_AND(block, I0, I5, ~3);
4669 	UML_CALLH(block, *m_mem_write32);
4670 	UML_ADD(block, I2, I5, 4);
4671 	UML_STORE(block, (void *)m_core->local_regs, I4, I2, SIZE_DWORD, SCALE_x4);
4672 }
4673 
4674 
4675 template <hyperstone_device::reg_bank SRC_GLOBAL>
generate_stdp(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)4676 void hyperstone_device::generate_stdp(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
4677 {
4678 	UML_MOV(block, I7, mem(&m_core->clock_cycles_2));
4679 
4680 	const uint16_t op = desc->opptr.w[0];
4681 
4682 	generate_check_delay_pc(block, compiler, desc);
4683 
4684 	const uint32_t src_code = op & 0xf;
4685 	const uint32_t dst_code = (op & 0xf0) >> 4;
4686 
4687 	UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
4688 	UML_ADD(block, I2, I3, dst_code);
4689 	UML_AND(block, I4, I2, 0x3f); // I4 = dst_code
4690 	UML_LOAD(block, I0, (void *)m_core->local_regs, I4, SIZE_DWORD, SCALE_x4); // I0 = dreg
4691 
4692 	if (SRC_GLOBAL)
4693 	{
4694 		if (src_code == SR_REGISTER)
4695 			UML_MOV(block, I1, 0);
4696 		else
4697 			UML_LOAD(block, I1, (void *)m_core->global_regs, src_code, SIZE_DWORD, SCALE_x4);
4698 
4699 		UML_CALLH(block, *m_mem_write32);
4700 
4701 		if ((src_code + 1) == SR_REGISTER)
4702 			UML_MOV(block, I1, 0);
4703 		else
4704 			UML_LOAD(block, I1, (void *)m_core->global_regs, src_code + 1, SIZE_DWORD, SCALE_x4);
4705 
4706 		UML_ADD(block, I2, I0, 4);
4707 		UML_AND(block, I0, I2, ~3);
4708 		UML_CALLH(block, *m_mem_write32);
4709 		UML_ADD(block, I2, I2, 4);
4710 		UML_STORE(block, (void *)m_core->local_regs, I4, I2, SIZE_DWORD, SCALE_x4);
4711 	}
4712 	else
4713 	{
4714 		int srcf_dst_equal = compiler.m_labelnum++;
4715 		int done = compiler.m_labelnum++;
4716 
4717 		UML_MOV(block, I6, I0);
4718 		UML_AND(block, I0, I0, ~3);
4719 		UML_ADD(block, I2, I3, src_code);
4720 		UML_AND(block, I5, I2, 0x3f);
4721 		UML_LOAD(block, I1, (void *)m_core->local_regs, I5, SIZE_DWORD, SCALE_x4);
4722 		UML_CALLH(block, *m_mem_write32);
4723 
4724 		UML_ADD(block, I2, I5, 1);
4725 		UML_AND(block, I5, I2, 0x3f);
4726 		UML_ADD(block, I1, I6, 8);
4727 		UML_CMP(block, I4, I5);
4728 		UML_JMPc(block, uml::COND_E, srcf_dst_equal);
4729 
4730 		UML_STORE(block, (void *)m_core->local_regs, I4, I1, SIZE_DWORD, SCALE_x4);
4731 		UML_LOAD(block, I1, (void *)m_core->local_regs, I5, SIZE_DWORD, SCALE_x4);
4732 		UML_JMP(block, done);
4733 
4734 		UML_LABEL(block, srcf_dst_equal);
4735 		UML_STORE(block, (void *)m_core->local_regs, I4, I1, SIZE_DWORD, SCALE_x4);
4736 
4737 		UML_LABEL(block, done);
4738 		UML_ADD(block, I0, I0, 4);
4739 		UML_CALLH(block, *m_mem_write32);
4740 	}
4741 }
4742 
4743 
4744 template <hyperstone_device::branch_condition CONDITION, hyperstone_device::condition_set COND_SET>
generate_b(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)4745 void hyperstone_device::generate_b(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
4746 {
4747 	static const uint32_t condition_masks[6] = { V_MASK, Z_MASK, C_MASK, C_MASK | Z_MASK, N_MASK, N_MASK | Z_MASK };
4748 
4749 	int done = compiler.m_labelnum++;
4750 	uml::condition_t condition = COND_SET ? uml::COND_Z : uml::COND_NZ;
4751 
4752 	int skip;
4753 	UML_TEST(block, DRC_SR, condition_masks[CONDITION]);
4754 	UML_JMPc(block, condition, skip = compiler.m_labelnum++);
4755 	generate_br(block, compiler, desc);
4756 
4757 	UML_JMP(block, done);
4758 
4759 	UML_LABEL(block, skip);
4760 	generate_ignore_pcrel(block, desc);
4761 	generate_check_delay_pc(block, compiler, desc);
4762 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
4763 
4764 	UML_LABEL(block, done);
4765 }
4766 
4767 
generate_br(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)4768 void hyperstone_device::generate_br(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
4769 {
4770 	UML_MOV(block, I7, mem(&m_core->clock_cycles_2));
4771 
4772 	generate_decode_pcrel(block, desc);
4773 	generate_check_delay_pc(block, compiler, desc);
4774 
4775 	UML_ADD(block, DRC_PC, DRC_PC, I1);
4776 	UML_AND(block, DRC_SR, DRC_SR, ~M_MASK);
4777 
4778 	generate_branch(block, desc->targetpc, desc);
4779 	// TODO: correct cycle count
4780 }
4781 
4782 
4783 template <hyperstone_device::branch_condition CONDITION, hyperstone_device::condition_set COND_SET>
generate_db(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)4784 void hyperstone_device::generate_db(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
4785 {
4786 	static const uint32_t condition_masks[6] = { V_MASK, Z_MASK, C_MASK, C_MASK | Z_MASK, N_MASK, N_MASK | Z_MASK };
4787 	int skip_jump = compiler.m_labelnum++;
4788 	int done = compiler.m_labelnum++;
4789 
4790 	UML_TEST(block, DRC_SR, condition_masks[CONDITION]);
4791 	if (COND_SET)
4792 		UML_JMPc(block, uml::COND_Z, skip_jump);
4793 	else
4794 		UML_JMPc(block, uml::COND_NZ, skip_jump);
4795 
4796 	generate_dbr(block, compiler, desc);
4797 	UML_JMP(block, done);
4798 
4799 	UML_LABEL(block, skip_jump);
4800 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
4801 	generate_ignore_pcrel(block, desc);
4802 	generate_check_delay_pc(block, compiler, desc);
4803 
4804 	UML_LABEL(block, done);
4805 }
4806 
4807 
generate_dbr(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)4808 void hyperstone_device::generate_dbr(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
4809 {
4810 	UML_MOV(block, I7, mem(&m_core->clock_cycles_2));
4811 
4812 	generate_decode_pcrel(block, desc);
4813 	generate_check_delay_pc(block, compiler, desc);
4814 
4815 	UML_MOV(block, mem(&m_core->delay_slot), 1);
4816 	UML_ADD(block, mem(&m_core->delay_pc), DRC_PC, I1);
4817 	UML_MOV(block, mem(&m_core->intblock), 3);
4818 }
4819 
4820 
generate_frame(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)4821 void hyperstone_device::generate_frame(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
4822 {
4823 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
4824 
4825 	const uint16_t op = desc->opptr.w[0];
4826 
4827 	generate_check_delay_pc(block, compiler, desc);
4828 
4829 	UML_ROLAND(block, I1, DRC_SR, 7, 0x7f);
4830 	UML_SUB(block, I1, I1, op & 0xf);
4831 	UML_ROLINS(block, DRC_SR, I1, 25, 0xfe000000);  // SET_FP(GET_FP - SRC_CODE)
4832 	UML_ROLINS(block, DRC_SR, op, 17, 0x01e00000);  // SET_FL(DST_CODE)
4833 	UML_AND(block, DRC_SR, DRC_SR, ~M_MASK);        // SET_M(0)
4834 
4835 	UML_MOV(block, I0, mem(&SP));
4836 	UML_MOV(block, I6, I0);
4837 	UML_AND(block, I0, I0, ~3);
4838 	const uint32_t dst_code = (op & 0xf0) >> 4;
4839 	UML_ADD(block, I1, I1, dst_code ? dst_code : 16);
4840 	UML_ROLAND(block, I2, I0, 30, 0x7f);
4841 	UML_ADD(block, I2, I2, (64 - 10));
4842 	UML_SUB(block, I3, I2, I1);
4843 	UML_SEXT(block, I3, I3, SIZE_BYTE);             // difference = ((SP & 0x1fc) >> 2) + (64 - 10) - ((GET_FP - SRC_CODE) + GET_FL)
4844 
4845 	int diff_in_range, done;
4846 	UML_CMP(block, I3, -64);
4847 	UML_JMPc(block, uml::COND_L, done = compiler.m_labelnum++);
4848 	UML_CMP(block, I3, 64);
4849 	UML_JMPc(block, uml::COND_L, diff_in_range = compiler.m_labelnum++);
4850 	UML_OR(block, I3, I3, 0xffffff80);
4851 	UML_LABEL(block, diff_in_range);
4852 
4853 	UML_CMP(block, I0, mem(&UB));
4854 	UML_SETc(block, uml::COND_AE, I4);
4855 	UML_CMP(block, I3, 0);
4856 	UML_JMPc(block, uml::COND_GE, done);
4857 
4858 	int push_next;
4859 	UML_LABEL(block, push_next = compiler.m_labelnum++);
4860 	UML_ROLAND(block, I2, I0, 30, 0x3f);
4861 	UML_LOAD(block, I1, (void *)m_core->local_regs, I2, SIZE_DWORD, SCALE_x4);
4862 	UML_CALLH(block, *m_mem_write32);
4863 	UML_ADD(block, I0, I0, 4);
4864 	UML_ADD(block, I6, I6, 4);
4865 	UML_ADD(block, I3, I3, 1);
4866 
4867 	UML_TEST(block, I3, ~0);
4868 	UML_JMPc(block, uml::COND_NZ, push_next);
4869 
4870 	UML_MOV(block, mem(&SP), I6);
4871 
4872 	UML_TEST(block, I4, ~0);
4873 	UML_EXHc(block, uml::COND_NZ, *m_exception[EXCEPTION_FRAME_ERROR], 0);
4874 
4875 	UML_LABEL(block, done);
4876 }
4877 
4878 template <hyperstone_device::reg_bank SRC_GLOBAL>
generate_call(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)4879 void hyperstone_device::generate_call(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
4880 {
4881 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
4882 	UML_ROLINS(block, DRC_SR, ((desc->length >> 1) << ILC_SHIFT), 0, ILC_MASK);
4883 
4884 	uint16_t op = desc->opptr.w[0];
4885 	uint16_t imm_1 = m_pr16(desc->pc + 2);
4886 
4887 	int32_t extra_s = 0;
4888 
4889 	if (imm_1 & 0x8000)
4890 	{
4891 		uint16_t imm_2 = m_pr16(desc->pc + 4);
4892 
4893 		extra_s = imm_2;
4894 		extra_s |= ((imm_1 & 0x3fff) << 16);
4895 
4896 		if (imm_1 & 0x4000)
4897 			extra_s |= 0xc0000000;
4898 
4899 		UML_ADD(block, DRC_PC, DRC_PC, 4);
4900 	}
4901 	else
4902 	{
4903 		extra_s = imm_1 & 0x3fff;
4904 
4905 		if (imm_1 & 0x4000)
4906 			extra_s |= 0xffffc000;
4907 
4908 		UML_ADD(block, DRC_PC, DRC_PC, 2);
4909 	}
4910 
4911 	UML_MOV(block, I1, extra_s);
4912 
4913 	generate_check_delay_pc(block, compiler, desc);
4914 
4915 	const uint32_t src_code = op & 0xf;
4916 	uint32_t dst_code = (op & 0xf0) >> 4;
4917 
4918 	if (!dst_code)
4919 		dst_code = 16;
4920 
4921 	UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
4922 
4923 	if (SRC_GLOBAL)
4924 	{
4925 		if (src_code == SR_REGISTER)
4926 			UML_MOV(block, I2, 0);
4927 		else
4928 			UML_LOAD(block, I2, (void *)m_core->global_regs, src_code, SIZE_DWORD, SCALE_x4);
4929 	}
4930 	else
4931 	{
4932 		UML_ADD(block, I4, I3, src_code);
4933 		UML_AND(block, I5, I4, 0x3f);
4934 		UML_LOAD(block, I2, (void *)m_core->local_regs, I5, SIZE_DWORD, SCALE_x4);
4935 	}
4936 
4937 	UML_AND(block, I4, DRC_PC, ~1);
4938 	UML_ROLINS(block, I4, DRC_SR, 32-S_SHIFT, 1);
4939 
4940 	UML_ADD(block, I1, I3, dst_code);
4941 	UML_AND(block, I6, I1, 0x3f);
4942 	UML_STORE(block, (void *)m_core->local_regs, I6, I4, SIZE_DWORD, SCALE_x4);
4943 
4944 	UML_ADD(block, I4, I6, 1);
4945 	UML_AND(block, I5, I4, 0x3f);
4946 	UML_STORE(block, (void *)m_core->local_regs, I5, DRC_SR, SIZE_DWORD, SCALE_x4);
4947 
4948 	UML_ROLINS(block, DRC_SR, I1, 25, 0xfe000000);
4949 	UML_ROLINS(block, DRC_SR, 6, 21, 0x01e00000);
4950 	UML_AND(block, DRC_SR, DRC_SR, ~M_MASK);
4951 
4952 	UML_ADD(block, DRC_PC, I2, extra_s & ~1);
4953 
4954 	UML_MOV(block, mem(&m_core->intblock), 2);
4955 
4956 	generate_branch(block, desc->targetpc, nullptr);
4957 	//TODO: add interrupt locks, errors, ....
4958 }
4959 
4960 
4961 
generate_trap_op(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)4962 void hyperstone_device::generate_trap_op(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
4963 {
4964 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1)); // TODO: with the latency it can change
4965 
4966 	static const uint32_t conditions[16] = {
4967 		0, 0, 0, 0, N_MASK | Z_MASK, N_MASK | Z_MASK, N_MASK, N_MASK, C_MASK | Z_MASK, C_MASK | Z_MASK, C_MASK, C_MASK, Z_MASK, Z_MASK, V_MASK, 0
4968 	};
4969 	static const bool trap_if_set[16] = {
4970 		false, false, false, false, true, false, true, false, true, false, true, false, true, false, true, false
4971 	};
4972 
4973 	uint16_t op = desc->opptr.w[0];
4974 
4975 	generate_check_delay_pc(block, compiler, desc);
4976 
4977 	const uint8_t trapno = (op & 0xfc) >> 2;
4978 	const uint8_t code = ((op & 0x300) >> 6) | (op & 0x03);
4979 
4980 	UML_TEST(block, DRC_SR, conditions[code]);
4981 
4982 	int skip_trap = compiler.m_labelnum++;
4983 	if (trap_if_set[code])
4984 		UML_JMPc(block, uml::COND_Z, skip_trap);
4985 	else
4986 		UML_JMPc(block, uml::COND_NZ, skip_trap);
4987 
4988 	UML_ROLINS(block, DRC_SR, ((desc->length >> 1) << ILC_SHIFT), 0, ILC_MASK);
4989 	generate_get_trap_addr(block, compiler.m_labelnum, trapno);
4990 	generate_trap_exception_or_int<IS_TRAP>(block);
4991 
4992 	UML_LABEL(block, skip_trap);
4993 }
4994 
generate_extend(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)4995 void hyperstone_device::generate_extend(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
4996 {
4997 	UML_MOV(block, I7, mem(&m_core->clock_cycles_1));
4998 
4999 	uint16_t op = desc->opptr.w[0];
5000 
5001 	uint16_t func = m_pr16(desc->pc + 2);
5002 	UML_ADD(block, DRC_PC, DRC_PC, 2);
5003 
5004 	generate_check_delay_pc(block, compiler, desc);
5005 
5006 	const uint32_t src_code = op & 0xf;
5007 	const uint32_t dst_code = (op & 0xf0) >> 4;
5008 
5009 	UML_ROLAND(block, I3, DRC_SR, 7, 0x7f);
5010 
5011 	UML_ADD(block, I2, I3, src_code);
5012 	UML_AND(block, I2, I2, 0x3f);
5013 	UML_LOAD(block, I0, (void *)m_core->local_regs, I2, SIZE_DWORD, SCALE_x4); // I0: vals
5014 
5015 	UML_ADD(block, I2, I3, dst_code);
5016 	UML_AND(block, I2, I2, 0x3f);
5017 	UML_LOAD(block, I1, (void *)m_core->local_regs, I2, SIZE_DWORD, SCALE_x4); // I1: vald
5018 
5019 	switch (func)
5020 	{
5021 		// signed or unsigned multiplication, single word product
5022 		case EMUL:
5023 		case EMUL_N: // used in "N" type cpu
5024 		{
5025 			UML_MULU(block, I2, I3, I0, I1);
5026 			UML_STORE(block, (void *)m_core->global_regs, 15, I2, SIZE_DWORD, SCALE_x4);
5027 			break;
5028 		}
5029 
5030 		case EMULU: // unsigned multiplication, double word product
5031 		case EMULS: // signed multiplication, double word product
5032 		{
5033 			if (func == EMULU)
5034 				UML_MULU(block, I2, I3, I0, I1);
5035 			else
5036 				UML_MULS(block, I2, I3, I0, I1);
5037 			UML_STORE(block, (void *)m_core->global_regs, 14, I3, SIZE_DWORD, SCALE_x4);
5038 			UML_STORE(block, (void *)m_core->global_regs, 15, I2, SIZE_DWORD, SCALE_x4);
5039 			break;
5040 		}
5041 
5042 		case EMAC:  // signed multiply/add, single word product sum
5043 		case EMSUB: // signed multiply/substract, single word product difference
5044 		{
5045 			UML_MULS(block, I2, I3, I0, I1);
5046 			UML_LOAD(block, I3, (void *)m_core->global_regs, 15, SIZE_DWORD, SCALE_x4);
5047 			if (func == EMAC)
5048 				UML_ADD(block, I3, I3, I2);
5049 			else
5050 				UML_SUB(block, I3, I3, I2);
5051 			UML_STORE(block, (void *)m_core->global_regs, 15, I3, SIZE_DWORD, SCALE_x4);
5052 			break;
5053 		}
5054 
5055 		case EMACD:  // signed multiply/add, double word product sum
5056 		case EMSUBD: // signed multiply/substract, double word product difference
5057 		{
5058 			UML_DSEXT(block, I0, I0, SIZE_DWORD);
5059 			UML_DSEXT(block, I1, I1, SIZE_DWORD);
5060 			UML_DMULS(block, I2, I3, I0, I1);
5061 			UML_LOAD(block, I3, (void *)m_core->global_regs, 14, SIZE_DWORD, SCALE_x4);
5062 			UML_LOAD(block, I4, (void *)m_core->global_regs, 15, SIZE_DWORD, SCALE_x4);
5063 			UML_DSHL(block, I3, I3, 32);
5064 			UML_DOR(block, I3, I3, I4);
5065 			if (func == EMACD)
5066 				UML_DADD(block, I3, I3, I2);
5067 			else
5068 				UML_DSUB(block, I3, I3, I2);
5069 			UML_STORE(block, (void *)m_core->global_regs, 15, I3, SIZE_DWORD, SCALE_x4);
5070 			UML_DSHR(block, I3, I3, 32);
5071 			UML_STORE(block, (void *)m_core->global_regs, 14, I3, SIZE_DWORD, SCALE_x4);
5072 			break;
5073 		}
5074 
5075 		// signed half-word multiply/add, single word product sum
5076 		case EHMAC:
5077 		{
5078 			UML_AND(block, I2, I0, 0x0000ffff);
5079 			UML_AND(block, I3, I1, 0x0000ffff);
5080 			UML_MULS(block, I2, I3, I2, I3);
5081 			UML_SHR(block, I0, I0, 16);
5082 			UML_SHR(block, I1, I1, 16);
5083 			UML_MULS(block, I0, I1, I0, I1);
5084 			UML_ADD(block, I0, I0, I2);
5085 			UML_LOAD(block, I1, (void *)m_core->global_regs, 15, SIZE_DWORD, SCALE_x4);
5086 			UML_ADD(block, I0, I0, I1);
5087 			UML_STORE(block, (void *)m_core->global_regs, 15, I0, SIZE_DWORD, SCALE_x4);
5088 			break;
5089 		}
5090 
5091 		// signed half-word multiply/add, double word product sum
5092 		case EHMACD:
5093 		{
5094 			printf("Unimplemented extended opcode, EHMACD, PC = %08x\n", desc->pc);
5095 			fatalerror(" ");
5096 			break;
5097 		}
5098 
5099 		// half-word complex multiply
5100 		case EHCMULD:
5101 		{
5102 			printf("Unimplemented extended opcode, EHCMULD, PC = %08x\n", desc->pc);
5103 			fatalerror(" ");
5104 			break;
5105 		}
5106 
5107 		// half-word complex multiply/add
5108 		case EHCMACD:
5109 		{
5110 			printf("Unimplemented extended opcode, EHCMACD, PC = %08x\n", desc->pc);
5111 			fatalerror(" ");
5112 			break;
5113 		}
5114 
5115 		// half-word (complex) add/substract
5116 		// Ls is not used and should denote the same register as Ld
5117 		case EHCSUMD:
5118 		{
5119 			printf("Unimplemented extended opcode, EHCSUMD, PC = %08x\n", desc->pc);
5120 			fatalerror(" ");
5121 			break;
5122 		}
5123 
5124 		// half-word (complex) add/substract with fixed point adjustment
5125 		// Ls is not used and should denote the same register as Ld
5126 		case EHCFFTD:
5127 		{
5128 			printf("Unimplemented extended opcode, EHCFFTD, PC = %08x\n", desc->pc);
5129 			fatalerror(" ");
5130 			break;
5131 		}
5132 
5133 		// half-word (complex) add/substract with fixed point adjustment and shift
5134 		// Ls is not used and should denote the same register as Ld
5135 		case EHCFFTSD:
5136 		{
5137 			printf("Unimplemented extended opcode, EHCFFTSD, PC = %08x\n", desc->pc);
5138 			fatalerror(" ");
5139 			break;
5140 		}
5141 
5142 		default:
5143 			printf("Unknown extended opcode (%04x), PC = %08x\n", func, desc->pc);
5144 			fatalerror(" ");
5145 			break;
5146 	}
5147 }
5148 
5149 
generate_reserved(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)5150 void hyperstone_device::generate_reserved(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
5151 {
5152 	printf("Unimplemented: generate_reserved (%08x)\n", desc->pc);
5153 	fflush(stdout);
5154 	fatalerror(" ");
5155 }
5156 
generate_do(drcuml_block & block,compiler_state & compiler,const opcode_desc * desc)5157 void hyperstone_device::generate_do(drcuml_block &block, compiler_state &compiler, const opcode_desc *desc)
5158 {
5159 	printf("Unimplemented: generate_do (%08x)\n", desc->pc);
5160 	fflush(stdout);
5161 	fatalerror(" ");
5162 }
5163 
5164