xref: /qemu/target/m68k/translate.c (revision b097ba37)
1 /*
2  *  m68k translation
3  *
4  *  Copyright (c) 2005-2007 CodeSourcery
5  *  Written by Paul Brook
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "qemu/log.h"
27 #include "qemu/qemu-print.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/translator.h"
30 
31 #include "exec/helper-proto.h"
32 #include "exec/helper-gen.h"
33 
34 #include "trace-tcg.h"
35 #include "exec/log.h"
36 #include "fpu/softfloat.h"
37 
38 
39 //#define DEBUG_DISPATCH 1
40 
41 #define DEFO32(name, offset) static TCGv QREG_##name;
42 #define DEFO64(name, offset) static TCGv_i64 QREG_##name;
43 #include "qregs.def"
44 #undef DEFO32
45 #undef DEFO64
46 
47 static TCGv_i32 cpu_halted;
48 static TCGv_i32 cpu_exception_index;
49 
50 static char cpu_reg_names[2 * 8 * 3 + 5 * 4];
51 static TCGv cpu_dregs[8];
52 static TCGv cpu_aregs[8];
53 static TCGv_i64 cpu_macc[4];
54 
55 #define REG(insn, pos)  (((insn) >> (pos)) & 7)
56 #define DREG(insn, pos) cpu_dregs[REG(insn, pos)]
57 #define AREG(insn, pos) get_areg(s, REG(insn, pos))
58 #define MACREG(acc)     cpu_macc[acc]
59 #define QREG_SP         get_areg(s, 7)
60 
61 static TCGv NULL_QREG;
62 #define IS_NULL_QREG(t) (t == NULL_QREG)
63 /* Used to distinguish stores from bad addressing modes.  */
64 static TCGv store_dummy;
65 
66 #include "exec/gen-icount.h"
67 
68 void m68k_tcg_init(void)
69 {
70     char *p;
71     int i;
72 
73 #define DEFO32(name, offset) \
74     QREG_##name = tcg_global_mem_new_i32(cpu_env, \
75         offsetof(CPUM68KState, offset), #name);
76 #define DEFO64(name, offset) \
77     QREG_##name = tcg_global_mem_new_i64(cpu_env, \
78         offsetof(CPUM68KState, offset), #name);
79 #include "qregs.def"
80 #undef DEFO32
81 #undef DEFO64
82 
83     cpu_halted = tcg_global_mem_new_i32(cpu_env,
84                                         -offsetof(M68kCPU, env) +
85                                         offsetof(CPUState, halted), "HALTED");
86     cpu_exception_index = tcg_global_mem_new_i32(cpu_env,
87                                                  -offsetof(M68kCPU, env) +
88                                                  offsetof(CPUState, exception_index),
89                                                  "EXCEPTION");
90 
91     p = cpu_reg_names;
92     for (i = 0; i < 8; i++) {
93         sprintf(p, "D%d", i);
94         cpu_dregs[i] = tcg_global_mem_new(cpu_env,
95                                           offsetof(CPUM68KState, dregs[i]), p);
96         p += 3;
97         sprintf(p, "A%d", i);
98         cpu_aregs[i] = tcg_global_mem_new(cpu_env,
99                                           offsetof(CPUM68KState, aregs[i]), p);
100         p += 3;
101     }
102     for (i = 0; i < 4; i++) {
103         sprintf(p, "ACC%d", i);
104         cpu_macc[i] = tcg_global_mem_new_i64(cpu_env,
105                                          offsetof(CPUM68KState, macc[i]), p);
106         p += 5;
107     }
108 
109     NULL_QREG = tcg_global_mem_new(cpu_env, -4, "NULL");
110     store_dummy = tcg_global_mem_new(cpu_env, -8, "NULL");
111 }
112 
113 /* internal defines */
114 typedef struct DisasContext {
115     DisasContextBase base;
116     CPUM68KState *env;
117     target_ulong pc;
118     CCOp cc_op; /* Current CC operation */
119     int cc_op_synced;
120     TCGv_i64 mactmp;
121     int done_mac;
122     int writeback_mask;
123     TCGv writeback[8];
124 #define MAX_TO_RELEASE 8
125     int release_count;
126     TCGv release[MAX_TO_RELEASE];
127 } DisasContext;
128 
129 static void init_release_array(DisasContext *s)
130 {
131 #ifdef CONFIG_DEBUG_TCG
132     memset(s->release, 0, sizeof(s->release));
133 #endif
134     s->release_count = 0;
135 }
136 
137 static void do_release(DisasContext *s)
138 {
139     int i;
140     for (i = 0; i < s->release_count; i++) {
141         tcg_temp_free(s->release[i]);
142     }
143     init_release_array(s);
144 }
145 
146 static TCGv mark_to_release(DisasContext *s, TCGv tmp)
147 {
148     g_assert(s->release_count < MAX_TO_RELEASE);
149     return s->release[s->release_count++] = tmp;
150 }
151 
152 static TCGv get_areg(DisasContext *s, unsigned regno)
153 {
154     if (s->writeback_mask & (1 << regno)) {
155         return s->writeback[regno];
156     } else {
157         return cpu_aregs[regno];
158     }
159 }
160 
161 static void delay_set_areg(DisasContext *s, unsigned regno,
162                            TCGv val, bool give_temp)
163 {
164     if (s->writeback_mask & (1 << regno)) {
165         if (give_temp) {
166             tcg_temp_free(s->writeback[regno]);
167             s->writeback[regno] = val;
168         } else {
169             tcg_gen_mov_i32(s->writeback[regno], val);
170         }
171     } else {
172         s->writeback_mask |= 1 << regno;
173         if (give_temp) {
174             s->writeback[regno] = val;
175         } else {
176             TCGv tmp = tcg_temp_new();
177             s->writeback[regno] = tmp;
178             tcg_gen_mov_i32(tmp, val);
179         }
180     }
181 }
182 
183 static void do_writebacks(DisasContext *s)
184 {
185     unsigned mask = s->writeback_mask;
186     if (mask) {
187         s->writeback_mask = 0;
188         do {
189             unsigned regno = ctz32(mask);
190             tcg_gen_mov_i32(cpu_aregs[regno], s->writeback[regno]);
191             tcg_temp_free(s->writeback[regno]);
192             mask &= mask - 1;
193         } while (mask);
194     }
195 }
196 
197 /* is_jmp field values */
198 #define DISAS_JUMP      DISAS_TARGET_0 /* only pc was modified dynamically */
199 #define DISAS_EXIT      DISAS_TARGET_1 /* cpu state was modified dynamically */
200 
201 #if defined(CONFIG_USER_ONLY)
202 #define IS_USER(s) 1
203 #else
204 #define IS_USER(s)   (!(s->base.tb->flags & TB_FLAGS_MSR_S))
205 #define SFC_INDEX(s) ((s->base.tb->flags & TB_FLAGS_SFC_S) ? \
206                       MMU_KERNEL_IDX : MMU_USER_IDX)
207 #define DFC_INDEX(s) ((s->base.tb->flags & TB_FLAGS_DFC_S) ? \
208                       MMU_KERNEL_IDX : MMU_USER_IDX)
209 #endif
210 
211 typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn);
212 
213 #ifdef DEBUG_DISPATCH
214 #define DISAS_INSN(name)                                                \
215     static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
216                                   uint16_t insn);                       \
217     static void disas_##name(CPUM68KState *env, DisasContext *s,        \
218                              uint16_t insn)                             \
219     {                                                                   \
220         qemu_log("Dispatch " #name "\n");                               \
221         real_disas_##name(env, s, insn);                                \
222     }                                                                   \
223     static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
224                                   uint16_t insn)
225 #else
226 #define DISAS_INSN(name)                                                \
227     static void disas_##name(CPUM68KState *env, DisasContext *s,        \
228                              uint16_t insn)
229 #endif
230 
231 static const uint8_t cc_op_live[CC_OP_NB] = {
232     [CC_OP_DYNAMIC] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
233     [CC_OP_FLAGS] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
234     [CC_OP_ADDB ... CC_OP_ADDL] = CCF_X | CCF_N | CCF_V,
235     [CC_OP_SUBB ... CC_OP_SUBL] = CCF_X | CCF_N | CCF_V,
236     [CC_OP_CMPB ... CC_OP_CMPL] = CCF_X | CCF_N | CCF_V,
237     [CC_OP_LOGIC] = CCF_X | CCF_N
238 };
239 
240 static void set_cc_op(DisasContext *s, CCOp op)
241 {
242     CCOp old_op = s->cc_op;
243     int dead;
244 
245     if (old_op == op) {
246         return;
247     }
248     s->cc_op = op;
249     s->cc_op_synced = 0;
250 
251     /* Discard CC computation that will no longer be used.
252        Note that X and N are never dead.  */
253     dead = cc_op_live[old_op] & ~cc_op_live[op];
254     if (dead & CCF_C) {
255         tcg_gen_discard_i32(QREG_CC_C);
256     }
257     if (dead & CCF_Z) {
258         tcg_gen_discard_i32(QREG_CC_Z);
259     }
260     if (dead & CCF_V) {
261         tcg_gen_discard_i32(QREG_CC_V);
262     }
263 }
264 
265 /* Update the CPU env CC_OP state.  */
266 static void update_cc_op(DisasContext *s)
267 {
268     if (!s->cc_op_synced) {
269         s->cc_op_synced = 1;
270         tcg_gen_movi_i32(QREG_CC_OP, s->cc_op);
271     }
272 }
273 
274 /* Generate a jump to an immediate address.  */
275 static void gen_jmp_im(DisasContext *s, uint32_t dest)
276 {
277     update_cc_op(s);
278     tcg_gen_movi_i32(QREG_PC, dest);
279     s->base.is_jmp = DISAS_JUMP;
280 }
281 
282 /* Generate a jump to the address in qreg DEST.  */
283 static void gen_jmp(DisasContext *s, TCGv dest)
284 {
285     update_cc_op(s);
286     tcg_gen_mov_i32(QREG_PC, dest);
287     s->base.is_jmp = DISAS_JUMP;
288 }
289 
290 static void gen_exception(DisasContext *s, uint32_t dest, int nr)
291 {
292     TCGv_i32 tmp;
293 
294     update_cc_op(s);
295     tcg_gen_movi_i32(QREG_PC, dest);
296 
297     tmp = tcg_const_i32(nr);
298     gen_helper_raise_exception(cpu_env, tmp);
299     tcg_temp_free_i32(tmp);
300 
301     s->base.is_jmp = DISAS_NORETURN;
302 }
303 
304 static inline void gen_addr_fault(DisasContext *s)
305 {
306     gen_exception(s, s->base.pc_next, EXCP_ADDRESS);
307 }
308 
309 /* Generate a load from the specified address.  Narrow values are
310    sign extended to full register width.  */
311 static inline TCGv gen_load(DisasContext *s, int opsize, TCGv addr,
312                             int sign, int index)
313 {
314     TCGv tmp;
315     tmp = tcg_temp_new_i32();
316     switch(opsize) {
317     case OS_BYTE:
318         if (sign)
319             tcg_gen_qemu_ld8s(tmp, addr, index);
320         else
321             tcg_gen_qemu_ld8u(tmp, addr, index);
322         break;
323     case OS_WORD:
324         if (sign)
325             tcg_gen_qemu_ld16s(tmp, addr, index);
326         else
327             tcg_gen_qemu_ld16u(tmp, addr, index);
328         break;
329     case OS_LONG:
330         tcg_gen_qemu_ld32u(tmp, addr, index);
331         break;
332     default:
333         g_assert_not_reached();
334     }
335     return tmp;
336 }
337 
338 /* Generate a store.  */
339 static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val,
340                              int index)
341 {
342     switch(opsize) {
343     case OS_BYTE:
344         tcg_gen_qemu_st8(val, addr, index);
345         break;
346     case OS_WORD:
347         tcg_gen_qemu_st16(val, addr, index);
348         break;
349     case OS_LONG:
350         tcg_gen_qemu_st32(val, addr, index);
351         break;
352     default:
353         g_assert_not_reached();
354     }
355 }
356 
357 typedef enum {
358     EA_STORE,
359     EA_LOADU,
360     EA_LOADS
361 } ea_what;
362 
363 /* Generate an unsigned load if VAL is 0 a signed load if val is -1,
364    otherwise generate a store.  */
365 static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
366                      ea_what what, int index)
367 {
368     if (what == EA_STORE) {
369         gen_store(s, opsize, addr, val, index);
370         return store_dummy;
371     } else {
372         return mark_to_release(s, gen_load(s, opsize, addr,
373                                            what == EA_LOADS, index));
374     }
375 }
376 
377 /* Read a 16-bit immediate constant */
378 static inline uint16_t read_im16(CPUM68KState *env, DisasContext *s)
379 {
380     uint16_t im;
381     im = cpu_lduw_code(env, s->pc);
382     s->pc += 2;
383     return im;
384 }
385 
386 /* Read an 8-bit immediate constant */
387 static inline uint8_t read_im8(CPUM68KState *env, DisasContext *s)
388 {
389     return read_im16(env, s);
390 }
391 
392 /* Read a 32-bit immediate constant.  */
393 static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s)
394 {
395     uint32_t im;
396     im = read_im16(env, s) << 16;
397     im |= 0xffff & read_im16(env, s);
398     return im;
399 }
400 
401 /* Read a 64-bit immediate constant.  */
402 static inline uint64_t read_im64(CPUM68KState *env, DisasContext *s)
403 {
404     uint64_t im;
405     im = (uint64_t)read_im32(env, s) << 32;
406     im |= (uint64_t)read_im32(env, s);
407     return im;
408 }
409 
410 /* Calculate and address index.  */
411 static TCGv gen_addr_index(DisasContext *s, uint16_t ext, TCGv tmp)
412 {
413     TCGv add;
414     int scale;
415 
416     add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12);
417     if ((ext & 0x800) == 0) {
418         tcg_gen_ext16s_i32(tmp, add);
419         add = tmp;
420     }
421     scale = (ext >> 9) & 3;
422     if (scale != 0) {
423         tcg_gen_shli_i32(tmp, add, scale);
424         add = tmp;
425     }
426     return add;
427 }
428 
429 /* Handle a base + index + displacement effective addresss.
430    A NULL_QREG base means pc-relative.  */
431 static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base)
432 {
433     uint32_t offset;
434     uint16_t ext;
435     TCGv add;
436     TCGv tmp;
437     uint32_t bd, od;
438 
439     offset = s->pc;
440     ext = read_im16(env, s);
441 
442     if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX))
443         return NULL_QREG;
444 
445     if (m68k_feature(s->env, M68K_FEATURE_M68000) &&
446         !m68k_feature(s->env, M68K_FEATURE_SCALED_INDEX)) {
447         ext &= ~(3 << 9);
448     }
449 
450     if (ext & 0x100) {
451         /* full extension word format */
452         if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL))
453             return NULL_QREG;
454 
455         if ((ext & 0x30) > 0x10) {
456             /* base displacement */
457             if ((ext & 0x30) == 0x20) {
458                 bd = (int16_t)read_im16(env, s);
459             } else {
460                 bd = read_im32(env, s);
461             }
462         } else {
463             bd = 0;
464         }
465         tmp = mark_to_release(s, tcg_temp_new());
466         if ((ext & 0x44) == 0) {
467             /* pre-index */
468             add = gen_addr_index(s, ext, tmp);
469         } else {
470             add = NULL_QREG;
471         }
472         if ((ext & 0x80) == 0) {
473             /* base not suppressed */
474             if (IS_NULL_QREG(base)) {
475                 base = mark_to_release(s, tcg_const_i32(offset + bd));
476                 bd = 0;
477             }
478             if (!IS_NULL_QREG(add)) {
479                 tcg_gen_add_i32(tmp, add, base);
480                 add = tmp;
481             } else {
482                 add = base;
483             }
484         }
485         if (!IS_NULL_QREG(add)) {
486             if (bd != 0) {
487                 tcg_gen_addi_i32(tmp, add, bd);
488                 add = tmp;
489             }
490         } else {
491             add = mark_to_release(s, tcg_const_i32(bd));
492         }
493         if ((ext & 3) != 0) {
494             /* memory indirect */
495             base = mark_to_release(s, gen_load(s, OS_LONG, add, 0, IS_USER(s)));
496             if ((ext & 0x44) == 4) {
497                 add = gen_addr_index(s, ext, tmp);
498                 tcg_gen_add_i32(tmp, add, base);
499                 add = tmp;
500             } else {
501                 add = base;
502             }
503             if ((ext & 3) > 1) {
504                 /* outer displacement */
505                 if ((ext & 3) == 2) {
506                     od = (int16_t)read_im16(env, s);
507                 } else {
508                     od = read_im32(env, s);
509                 }
510             } else {
511                 od = 0;
512             }
513             if (od != 0) {
514                 tcg_gen_addi_i32(tmp, add, od);
515                 add = tmp;
516             }
517         }
518     } else {
519         /* brief extension word format */
520         tmp = mark_to_release(s, tcg_temp_new());
521         add = gen_addr_index(s, ext, tmp);
522         if (!IS_NULL_QREG(base)) {
523             tcg_gen_add_i32(tmp, add, base);
524             if ((int8_t)ext)
525                 tcg_gen_addi_i32(tmp, tmp, (int8_t)ext);
526         } else {
527             tcg_gen_addi_i32(tmp, add, offset + (int8_t)ext);
528         }
529         add = tmp;
530     }
531     return add;
532 }
533 
534 /* Sign or zero extend a value.  */
535 
536 static inline void gen_ext(TCGv res, TCGv val, int opsize, int sign)
537 {
538     switch (opsize) {
539     case OS_BYTE:
540         if (sign) {
541             tcg_gen_ext8s_i32(res, val);
542         } else {
543             tcg_gen_ext8u_i32(res, val);
544         }
545         break;
546     case OS_WORD:
547         if (sign) {
548             tcg_gen_ext16s_i32(res, val);
549         } else {
550             tcg_gen_ext16u_i32(res, val);
551         }
552         break;
553     case OS_LONG:
554         tcg_gen_mov_i32(res, val);
555         break;
556     default:
557         g_assert_not_reached();
558     }
559 }
560 
561 /* Evaluate all the CC flags.  */
562 
563 static void gen_flush_flags(DisasContext *s)
564 {
565     TCGv t0, t1;
566 
567     switch (s->cc_op) {
568     case CC_OP_FLAGS:
569         return;
570 
571     case CC_OP_ADDB:
572     case CC_OP_ADDW:
573     case CC_OP_ADDL:
574         tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
575         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
576         /* Compute signed overflow for addition.  */
577         t0 = tcg_temp_new();
578         t1 = tcg_temp_new();
579         tcg_gen_sub_i32(t0, QREG_CC_N, QREG_CC_V);
580         gen_ext(t0, t0, s->cc_op - CC_OP_ADDB, 1);
581         tcg_gen_xor_i32(t1, QREG_CC_N, QREG_CC_V);
582         tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
583         tcg_temp_free(t0);
584         tcg_gen_andc_i32(QREG_CC_V, t1, QREG_CC_V);
585         tcg_temp_free(t1);
586         break;
587 
588     case CC_OP_SUBB:
589     case CC_OP_SUBW:
590     case CC_OP_SUBL:
591         tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
592         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
593         /* Compute signed overflow for subtraction.  */
594         t0 = tcg_temp_new();
595         t1 = tcg_temp_new();
596         tcg_gen_add_i32(t0, QREG_CC_N, QREG_CC_V);
597         gen_ext(t0, t0, s->cc_op - CC_OP_SUBB, 1);
598         tcg_gen_xor_i32(t1, QREG_CC_N, t0);
599         tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
600         tcg_temp_free(t0);
601         tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t1);
602         tcg_temp_free(t1);
603         break;
604 
605     case CC_OP_CMPB:
606     case CC_OP_CMPW:
607     case CC_OP_CMPL:
608         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_C, QREG_CC_N, QREG_CC_V);
609         tcg_gen_sub_i32(QREG_CC_Z, QREG_CC_N, QREG_CC_V);
610         gen_ext(QREG_CC_Z, QREG_CC_Z, s->cc_op - CC_OP_CMPB, 1);
611         /* Compute signed overflow for subtraction.  */
612         t0 = tcg_temp_new();
613         tcg_gen_xor_i32(t0, QREG_CC_Z, QREG_CC_N);
614         tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, QREG_CC_N);
615         tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t0);
616         tcg_temp_free(t0);
617         tcg_gen_mov_i32(QREG_CC_N, QREG_CC_Z);
618         break;
619 
620     case CC_OP_LOGIC:
621         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
622         tcg_gen_movi_i32(QREG_CC_C, 0);
623         tcg_gen_movi_i32(QREG_CC_V, 0);
624         break;
625 
626     case CC_OP_DYNAMIC:
627         gen_helper_flush_flags(cpu_env, QREG_CC_OP);
628         s->cc_op_synced = 1;
629         break;
630 
631     default:
632         t0 = tcg_const_i32(s->cc_op);
633         gen_helper_flush_flags(cpu_env, t0);
634         tcg_temp_free(t0);
635         s->cc_op_synced = 1;
636         break;
637     }
638 
639     /* Note that flush_flags also assigned to env->cc_op.  */
640     s->cc_op = CC_OP_FLAGS;
641 }
642 
643 static inline TCGv gen_extend(DisasContext *s, TCGv val, int opsize, int sign)
644 {
645     TCGv tmp;
646 
647     if (opsize == OS_LONG) {
648         tmp = val;
649     } else {
650         tmp = mark_to_release(s, tcg_temp_new());
651         gen_ext(tmp, val, opsize, sign);
652     }
653 
654     return tmp;
655 }
656 
657 static void gen_logic_cc(DisasContext *s, TCGv val, int opsize)
658 {
659     gen_ext(QREG_CC_N, val, opsize, 1);
660     set_cc_op(s, CC_OP_LOGIC);
661 }
662 
663 static void gen_update_cc_cmp(DisasContext *s, TCGv dest, TCGv src, int opsize)
664 {
665     tcg_gen_mov_i32(QREG_CC_N, dest);
666     tcg_gen_mov_i32(QREG_CC_V, src);
667     set_cc_op(s, CC_OP_CMPB + opsize);
668 }
669 
670 static void gen_update_cc_add(TCGv dest, TCGv src, int opsize)
671 {
672     gen_ext(QREG_CC_N, dest, opsize, 1);
673     tcg_gen_mov_i32(QREG_CC_V, src);
674 }
675 
676 static inline int opsize_bytes(int opsize)
677 {
678     switch (opsize) {
679     case OS_BYTE: return 1;
680     case OS_WORD: return 2;
681     case OS_LONG: return 4;
682     case OS_SINGLE: return 4;
683     case OS_DOUBLE: return 8;
684     case OS_EXTENDED: return 12;
685     case OS_PACKED: return 12;
686     default:
687         g_assert_not_reached();
688     }
689 }
690 
691 static inline int insn_opsize(int insn)
692 {
693     switch ((insn >> 6) & 3) {
694     case 0: return OS_BYTE;
695     case 1: return OS_WORD;
696     case 2: return OS_LONG;
697     default:
698         g_assert_not_reached();
699     }
700 }
701 
702 static inline int ext_opsize(int ext, int pos)
703 {
704     switch ((ext >> pos) & 7) {
705     case 0: return OS_LONG;
706     case 1: return OS_SINGLE;
707     case 2: return OS_EXTENDED;
708     case 3: return OS_PACKED;
709     case 4: return OS_WORD;
710     case 5: return OS_DOUBLE;
711     case 6: return OS_BYTE;
712     default:
713         g_assert_not_reached();
714     }
715 }
716 
717 /* Assign value to a register.  If the width is less than the register width
718    only the low part of the register is set.  */
719 static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
720 {
721     TCGv tmp;
722     switch (opsize) {
723     case OS_BYTE:
724         tcg_gen_andi_i32(reg, reg, 0xffffff00);
725         tmp = tcg_temp_new();
726         tcg_gen_ext8u_i32(tmp, val);
727         tcg_gen_or_i32(reg, reg, tmp);
728         tcg_temp_free(tmp);
729         break;
730     case OS_WORD:
731         tcg_gen_andi_i32(reg, reg, 0xffff0000);
732         tmp = tcg_temp_new();
733         tcg_gen_ext16u_i32(tmp, val);
734         tcg_gen_or_i32(reg, reg, tmp);
735         tcg_temp_free(tmp);
736         break;
737     case OS_LONG:
738     case OS_SINGLE:
739         tcg_gen_mov_i32(reg, val);
740         break;
741     default:
742         g_assert_not_reached();
743     }
744 }
745 
746 /* Generate code for an "effective address".  Does not adjust the base
747    register for autoincrement addressing modes.  */
748 static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s,
749                          int mode, int reg0, int opsize)
750 {
751     TCGv reg;
752     TCGv tmp;
753     uint16_t ext;
754     uint32_t offset;
755 
756     switch (mode) {
757     case 0: /* Data register direct.  */
758     case 1: /* Address register direct.  */
759         return NULL_QREG;
760     case 3: /* Indirect postincrement.  */
761         if (opsize == OS_UNSIZED) {
762             return NULL_QREG;
763         }
764         /* fallthru */
765     case 2: /* Indirect register */
766         return get_areg(s, reg0);
767     case 4: /* Indirect predecrememnt.  */
768         if (opsize == OS_UNSIZED) {
769             return NULL_QREG;
770         }
771         reg = get_areg(s, reg0);
772         tmp = mark_to_release(s, tcg_temp_new());
773         if (reg0 == 7 && opsize == OS_BYTE &&
774             m68k_feature(s->env, M68K_FEATURE_M68000)) {
775             tcg_gen_subi_i32(tmp, reg, 2);
776         } else {
777             tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
778         }
779         return tmp;
780     case 5: /* Indirect displacement.  */
781         reg = get_areg(s, reg0);
782         tmp = mark_to_release(s, tcg_temp_new());
783         ext = read_im16(env, s);
784         tcg_gen_addi_i32(tmp, reg, (int16_t)ext);
785         return tmp;
786     case 6: /* Indirect index + displacement.  */
787         reg = get_areg(s, reg0);
788         return gen_lea_indexed(env, s, reg);
789     case 7: /* Other */
790         switch (reg0) {
791         case 0: /* Absolute short.  */
792             offset = (int16_t)read_im16(env, s);
793             return mark_to_release(s, tcg_const_i32(offset));
794         case 1: /* Absolute long.  */
795             offset = read_im32(env, s);
796             return mark_to_release(s, tcg_const_i32(offset));
797         case 2: /* pc displacement  */
798             offset = s->pc;
799             offset += (int16_t)read_im16(env, s);
800             return mark_to_release(s, tcg_const_i32(offset));
801         case 3: /* pc index+displacement.  */
802             return gen_lea_indexed(env, s, NULL_QREG);
803         case 4: /* Immediate.  */
804         default:
805             return NULL_QREG;
806         }
807     }
808     /* Should never happen.  */
809     return NULL_QREG;
810 }
811 
812 static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn,
813                     int opsize)
814 {
815     int mode = extract32(insn, 3, 3);
816     int reg0 = REG(insn, 0);
817     return gen_lea_mode(env, s, mode, reg0, opsize);
818 }
819 
820 /* Generate code to load/store a value from/into an EA.  If WHAT > 0 this is
821    a write otherwise it is a read (0 == sign extend, -1 == zero extend).
822    ADDRP is non-null for readwrite operands.  */
823 static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0,
824                         int opsize, TCGv val, TCGv *addrp, ea_what what,
825                         int index)
826 {
827     TCGv reg, tmp, result;
828     int32_t offset;
829 
830     switch (mode) {
831     case 0: /* Data register direct.  */
832         reg = cpu_dregs[reg0];
833         if (what == EA_STORE) {
834             gen_partset_reg(opsize, reg, val);
835             return store_dummy;
836         } else {
837             return gen_extend(s, reg, opsize, what == EA_LOADS);
838         }
839     case 1: /* Address register direct.  */
840         reg = get_areg(s, reg0);
841         if (what == EA_STORE) {
842             tcg_gen_mov_i32(reg, val);
843             return store_dummy;
844         } else {
845             return gen_extend(s, reg, opsize, what == EA_LOADS);
846         }
847     case 2: /* Indirect register */
848         reg = get_areg(s, reg0);
849         return gen_ldst(s, opsize, reg, val, what, index);
850     case 3: /* Indirect postincrement.  */
851         reg = get_areg(s, reg0);
852         result = gen_ldst(s, opsize, reg, val, what, index);
853         if (what == EA_STORE || !addrp) {
854             TCGv tmp = tcg_temp_new();
855             if (reg0 == 7 && opsize == OS_BYTE &&
856                 m68k_feature(s->env, M68K_FEATURE_M68000)) {
857                 tcg_gen_addi_i32(tmp, reg, 2);
858             } else {
859                 tcg_gen_addi_i32(tmp, reg, opsize_bytes(opsize));
860             }
861             delay_set_areg(s, reg0, tmp, true);
862         }
863         return result;
864     case 4: /* Indirect predecrememnt.  */
865         if (addrp && what == EA_STORE) {
866             tmp = *addrp;
867         } else {
868             tmp = gen_lea_mode(env, s, mode, reg0, opsize);
869             if (IS_NULL_QREG(tmp)) {
870                 return tmp;
871             }
872             if (addrp) {
873                 *addrp = tmp;
874             }
875         }
876         result = gen_ldst(s, opsize, tmp, val, what, index);
877         if (what == EA_STORE || !addrp) {
878             delay_set_areg(s, reg0, tmp, false);
879         }
880         return result;
881     case 5: /* Indirect displacement.  */
882     case 6: /* Indirect index + displacement.  */
883     do_indirect:
884         if (addrp && what == EA_STORE) {
885             tmp = *addrp;
886         } else {
887             tmp = gen_lea_mode(env, s, mode, reg0, opsize);
888             if (IS_NULL_QREG(tmp)) {
889                 return tmp;
890             }
891             if (addrp) {
892                 *addrp = tmp;
893             }
894         }
895         return gen_ldst(s, opsize, tmp, val, what, index);
896     case 7: /* Other */
897         switch (reg0) {
898         case 0: /* Absolute short.  */
899         case 1: /* Absolute long.  */
900         case 2: /* pc displacement  */
901         case 3: /* pc index+displacement.  */
902             goto do_indirect;
903         case 4: /* Immediate.  */
904             /* Sign extend values for consistency.  */
905             switch (opsize) {
906             case OS_BYTE:
907                 if (what == EA_LOADS) {
908                     offset = (int8_t)read_im8(env, s);
909                 } else {
910                     offset = read_im8(env, s);
911                 }
912                 break;
913             case OS_WORD:
914                 if (what == EA_LOADS) {
915                     offset = (int16_t)read_im16(env, s);
916                 } else {
917                     offset = read_im16(env, s);
918                 }
919                 break;
920             case OS_LONG:
921                 offset = read_im32(env, s);
922                 break;
923             default:
924                 g_assert_not_reached();
925             }
926             return mark_to_release(s, tcg_const_i32(offset));
927         default:
928             return NULL_QREG;
929         }
930     }
931     /* Should never happen.  */
932     return NULL_QREG;
933 }
934 
935 static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
936                    int opsize, TCGv val, TCGv *addrp, ea_what what, int index)
937 {
938     int mode = extract32(insn, 3, 3);
939     int reg0 = REG(insn, 0);
940     return gen_ea_mode(env, s, mode, reg0, opsize, val, addrp, what, index);
941 }
942 
943 static TCGv_ptr gen_fp_ptr(int freg)
944 {
945     TCGv_ptr fp = tcg_temp_new_ptr();
946     tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fregs[freg]));
947     return fp;
948 }
949 
950 static TCGv_ptr gen_fp_result_ptr(void)
951 {
952     TCGv_ptr fp = tcg_temp_new_ptr();
953     tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fp_result));
954     return fp;
955 }
956 
957 static void gen_fp_move(TCGv_ptr dest, TCGv_ptr src)
958 {
959     TCGv t32;
960     TCGv_i64 t64;
961 
962     t32 = tcg_temp_new();
963     tcg_gen_ld16u_i32(t32, src, offsetof(FPReg, l.upper));
964     tcg_gen_st16_i32(t32, dest, offsetof(FPReg, l.upper));
965     tcg_temp_free(t32);
966 
967     t64 = tcg_temp_new_i64();
968     tcg_gen_ld_i64(t64, src, offsetof(FPReg, l.lower));
969     tcg_gen_st_i64(t64, dest, offsetof(FPReg, l.lower));
970     tcg_temp_free_i64(t64);
971 }
972 
973 static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
974                         int index)
975 {
976     TCGv tmp;
977     TCGv_i64 t64;
978 
979     t64 = tcg_temp_new_i64();
980     tmp = tcg_temp_new();
981     switch (opsize) {
982     case OS_BYTE:
983         tcg_gen_qemu_ld8s(tmp, addr, index);
984         gen_helper_exts32(cpu_env, fp, tmp);
985         break;
986     case OS_WORD:
987         tcg_gen_qemu_ld16s(tmp, addr, index);
988         gen_helper_exts32(cpu_env, fp, tmp);
989         break;
990     case OS_LONG:
991         tcg_gen_qemu_ld32u(tmp, addr, index);
992         gen_helper_exts32(cpu_env, fp, tmp);
993         break;
994     case OS_SINGLE:
995         tcg_gen_qemu_ld32u(tmp, addr, index);
996         gen_helper_extf32(cpu_env, fp, tmp);
997         break;
998     case OS_DOUBLE:
999         tcg_gen_qemu_ld64(t64, addr, index);
1000         gen_helper_extf64(cpu_env, fp, t64);
1001         break;
1002     case OS_EXTENDED:
1003         if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1004             gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1005             break;
1006         }
1007         tcg_gen_qemu_ld32u(tmp, addr, index);
1008         tcg_gen_shri_i32(tmp, tmp, 16);
1009         tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
1010         tcg_gen_addi_i32(tmp, addr, 4);
1011         tcg_gen_qemu_ld64(t64, tmp, index);
1012         tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
1013         break;
1014     case OS_PACKED:
1015         /* unimplemented data type on 68040/ColdFire
1016          * FIXME if needed for another FPU
1017          */
1018         gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1019         break;
1020     default:
1021         g_assert_not_reached();
1022     }
1023     tcg_temp_free(tmp);
1024     tcg_temp_free_i64(t64);
1025 }
1026 
1027 static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
1028                          int index)
1029 {
1030     TCGv tmp;
1031     TCGv_i64 t64;
1032 
1033     t64 = tcg_temp_new_i64();
1034     tmp = tcg_temp_new();
1035     switch (opsize) {
1036     case OS_BYTE:
1037         gen_helper_reds32(tmp, cpu_env, fp);
1038         tcg_gen_qemu_st8(tmp, addr, index);
1039         break;
1040     case OS_WORD:
1041         gen_helper_reds32(tmp, cpu_env, fp);
1042         tcg_gen_qemu_st16(tmp, addr, index);
1043         break;
1044     case OS_LONG:
1045         gen_helper_reds32(tmp, cpu_env, fp);
1046         tcg_gen_qemu_st32(tmp, addr, index);
1047         break;
1048     case OS_SINGLE:
1049         gen_helper_redf32(tmp, cpu_env, fp);
1050         tcg_gen_qemu_st32(tmp, addr, index);
1051         break;
1052     case OS_DOUBLE:
1053         gen_helper_redf64(t64, cpu_env, fp);
1054         tcg_gen_qemu_st64(t64, addr, index);
1055         break;
1056     case OS_EXTENDED:
1057         if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1058             gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1059             break;
1060         }
1061         tcg_gen_ld16u_i32(tmp, fp, offsetof(FPReg, l.upper));
1062         tcg_gen_shli_i32(tmp, tmp, 16);
1063         tcg_gen_qemu_st32(tmp, addr, index);
1064         tcg_gen_addi_i32(tmp, addr, 4);
1065         tcg_gen_ld_i64(t64, fp, offsetof(FPReg, l.lower));
1066         tcg_gen_qemu_st64(t64, tmp, index);
1067         break;
1068     case OS_PACKED:
1069         /* unimplemented data type on 68040/ColdFire
1070          * FIXME if needed for another FPU
1071          */
1072         gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1073         break;
1074     default:
1075         g_assert_not_reached();
1076     }
1077     tcg_temp_free(tmp);
1078     tcg_temp_free_i64(t64);
1079 }
1080 
1081 static void gen_ldst_fp(DisasContext *s, int opsize, TCGv addr,
1082                         TCGv_ptr fp, ea_what what, int index)
1083 {
1084     if (what == EA_STORE) {
1085         gen_store_fp(s, opsize, addr, fp, index);
1086     } else {
1087         gen_load_fp(s, opsize, addr, fp, index);
1088     }
1089 }
1090 
1091 static int gen_ea_mode_fp(CPUM68KState *env, DisasContext *s, int mode,
1092                           int reg0, int opsize, TCGv_ptr fp, ea_what what,
1093                           int index)
1094 {
1095     TCGv reg, addr, tmp;
1096     TCGv_i64 t64;
1097 
1098     switch (mode) {
1099     case 0: /* Data register direct.  */
1100         reg = cpu_dregs[reg0];
1101         if (what == EA_STORE) {
1102             switch (opsize) {
1103             case OS_BYTE:
1104             case OS_WORD:
1105             case OS_LONG:
1106                 gen_helper_reds32(reg, cpu_env, fp);
1107                 break;
1108             case OS_SINGLE:
1109                 gen_helper_redf32(reg, cpu_env, fp);
1110                 break;
1111             default:
1112                 g_assert_not_reached();
1113             }
1114         } else {
1115             tmp = tcg_temp_new();
1116             switch (opsize) {
1117             case OS_BYTE:
1118                 tcg_gen_ext8s_i32(tmp, reg);
1119                 gen_helper_exts32(cpu_env, fp, tmp);
1120                 break;
1121             case OS_WORD:
1122                 tcg_gen_ext16s_i32(tmp, reg);
1123                 gen_helper_exts32(cpu_env, fp, tmp);
1124                 break;
1125             case OS_LONG:
1126                 gen_helper_exts32(cpu_env, fp, reg);
1127                 break;
1128             case OS_SINGLE:
1129                 gen_helper_extf32(cpu_env, fp, reg);
1130                 break;
1131             default:
1132                 g_assert_not_reached();
1133             }
1134             tcg_temp_free(tmp);
1135         }
1136         return 0;
1137     case 1: /* Address register direct.  */
1138         return -1;
1139     case 2: /* Indirect register */
1140         addr = get_areg(s, reg0);
1141         gen_ldst_fp(s, opsize, addr, fp, what, index);
1142         return 0;
1143     case 3: /* Indirect postincrement.  */
1144         addr = cpu_aregs[reg0];
1145         gen_ldst_fp(s, opsize, addr, fp, what, index);
1146         tcg_gen_addi_i32(addr, addr, opsize_bytes(opsize));
1147         return 0;
1148     case 4: /* Indirect predecrememnt.  */
1149         addr = gen_lea_mode(env, s, mode, reg0, opsize);
1150         if (IS_NULL_QREG(addr)) {
1151             return -1;
1152         }
1153         gen_ldst_fp(s, opsize, addr, fp, what, index);
1154         tcg_gen_mov_i32(cpu_aregs[reg0], addr);
1155         return 0;
1156     case 5: /* Indirect displacement.  */
1157     case 6: /* Indirect index + displacement.  */
1158     do_indirect:
1159         addr = gen_lea_mode(env, s, mode, reg0, opsize);
1160         if (IS_NULL_QREG(addr)) {
1161             return -1;
1162         }
1163         gen_ldst_fp(s, opsize, addr, fp, what, index);
1164         return 0;
1165     case 7: /* Other */
1166         switch (reg0) {
1167         case 0: /* Absolute short.  */
1168         case 1: /* Absolute long.  */
1169         case 2: /* pc displacement  */
1170         case 3: /* pc index+displacement.  */
1171             goto do_indirect;
1172         case 4: /* Immediate.  */
1173             if (what == EA_STORE) {
1174                 return -1;
1175             }
1176             switch (opsize) {
1177             case OS_BYTE:
1178                 tmp = tcg_const_i32((int8_t)read_im8(env, s));
1179                 gen_helper_exts32(cpu_env, fp, tmp);
1180                 tcg_temp_free(tmp);
1181                 break;
1182             case OS_WORD:
1183                 tmp = tcg_const_i32((int16_t)read_im16(env, s));
1184                 gen_helper_exts32(cpu_env, fp, tmp);
1185                 tcg_temp_free(tmp);
1186                 break;
1187             case OS_LONG:
1188                 tmp = tcg_const_i32(read_im32(env, s));
1189                 gen_helper_exts32(cpu_env, fp, tmp);
1190                 tcg_temp_free(tmp);
1191                 break;
1192             case OS_SINGLE:
1193                 tmp = tcg_const_i32(read_im32(env, s));
1194                 gen_helper_extf32(cpu_env, fp, tmp);
1195                 tcg_temp_free(tmp);
1196                 break;
1197             case OS_DOUBLE:
1198                 t64 = tcg_const_i64(read_im64(env, s));
1199                 gen_helper_extf64(cpu_env, fp, t64);
1200                 tcg_temp_free_i64(t64);
1201                 break;
1202             case OS_EXTENDED:
1203                 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1204                     gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1205                     break;
1206                 }
1207                 tmp = tcg_const_i32(read_im32(env, s) >> 16);
1208                 tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
1209                 tcg_temp_free(tmp);
1210                 t64 = tcg_const_i64(read_im64(env, s));
1211                 tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
1212                 tcg_temp_free_i64(t64);
1213                 break;
1214             case OS_PACKED:
1215                 /* unimplemented data type on 68040/ColdFire
1216                  * FIXME if needed for another FPU
1217                  */
1218                 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1219                 break;
1220             default:
1221                 g_assert_not_reached();
1222             }
1223             return 0;
1224         default:
1225             return -1;
1226         }
1227     }
1228     return -1;
1229 }
1230 
1231 static int gen_ea_fp(CPUM68KState *env, DisasContext *s, uint16_t insn,
1232                        int opsize, TCGv_ptr fp, ea_what what, int index)
1233 {
1234     int mode = extract32(insn, 3, 3);
1235     int reg0 = REG(insn, 0);
1236     return gen_ea_mode_fp(env, s, mode, reg0, opsize, fp, what, index);
1237 }
1238 
1239 typedef struct {
1240     TCGCond tcond;
1241     bool g1;
1242     bool g2;
1243     TCGv v1;
1244     TCGv v2;
1245 } DisasCompare;
1246 
1247 static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond)
1248 {
1249     TCGv tmp, tmp2;
1250     TCGCond tcond;
1251     CCOp op = s->cc_op;
1252 
1253     /* The CC_OP_CMP form can handle most normal comparisons directly.  */
1254     if (op == CC_OP_CMPB || op == CC_OP_CMPW || op == CC_OP_CMPL) {
1255         c->g1 = c->g2 = 1;
1256         c->v1 = QREG_CC_N;
1257         c->v2 = QREG_CC_V;
1258         switch (cond) {
1259         case 2: /* HI */
1260         case 3: /* LS */
1261             tcond = TCG_COND_LEU;
1262             goto done;
1263         case 4: /* CC */
1264         case 5: /* CS */
1265             tcond = TCG_COND_LTU;
1266             goto done;
1267         case 6: /* NE */
1268         case 7: /* EQ */
1269             tcond = TCG_COND_EQ;
1270             goto done;
1271         case 10: /* PL */
1272         case 11: /* MI */
1273             c->g1 = c->g2 = 0;
1274             c->v2 = tcg_const_i32(0);
1275             c->v1 = tmp = tcg_temp_new();
1276             tcg_gen_sub_i32(tmp, QREG_CC_N, QREG_CC_V);
1277             gen_ext(tmp, tmp, op - CC_OP_CMPB, 1);
1278             /* fallthru */
1279         case 12: /* GE */
1280         case 13: /* LT */
1281             tcond = TCG_COND_LT;
1282             goto done;
1283         case 14: /* GT */
1284         case 15: /* LE */
1285             tcond = TCG_COND_LE;
1286             goto done;
1287         }
1288     }
1289 
1290     c->g1 = 1;
1291     c->g2 = 0;
1292     c->v2 = tcg_const_i32(0);
1293 
1294     switch (cond) {
1295     case 0: /* T */
1296     case 1: /* F */
1297         c->v1 = c->v2;
1298         tcond = TCG_COND_NEVER;
1299         goto done;
1300     case 14: /* GT (!(Z || (N ^ V))) */
1301     case 15: /* LE (Z || (N ^ V)) */
1302         /* Logic operations clear V, which simplifies LE to (Z || N),
1303            and since Z and N are co-located, this becomes a normal
1304            comparison vs N.  */
1305         if (op == CC_OP_LOGIC) {
1306             c->v1 = QREG_CC_N;
1307             tcond = TCG_COND_LE;
1308             goto done;
1309         }
1310         break;
1311     case 12: /* GE (!(N ^ V)) */
1312     case 13: /* LT (N ^ V) */
1313         /* Logic operations clear V, which simplifies this to N.  */
1314         if (op != CC_OP_LOGIC) {
1315             break;
1316         }
1317         /* fallthru */
1318     case 10: /* PL (!N) */
1319     case 11: /* MI (N) */
1320         /* Several cases represent N normally.  */
1321         if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1322             op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1323             op == CC_OP_LOGIC) {
1324             c->v1 = QREG_CC_N;
1325             tcond = TCG_COND_LT;
1326             goto done;
1327         }
1328         break;
1329     case 6: /* NE (!Z) */
1330     case 7: /* EQ (Z) */
1331         /* Some cases fold Z into N.  */
1332         if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1333             op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1334             op == CC_OP_LOGIC) {
1335             tcond = TCG_COND_EQ;
1336             c->v1 = QREG_CC_N;
1337             goto done;
1338         }
1339         break;
1340     case 4: /* CC (!C) */
1341     case 5: /* CS (C) */
1342         /* Some cases fold C into X.  */
1343         if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1344             op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL) {
1345             tcond = TCG_COND_NE;
1346             c->v1 = QREG_CC_X;
1347             goto done;
1348         }
1349         /* fallthru */
1350     case 8: /* VC (!V) */
1351     case 9: /* VS (V) */
1352         /* Logic operations clear V and C.  */
1353         if (op == CC_OP_LOGIC) {
1354             tcond = TCG_COND_NEVER;
1355             c->v1 = c->v2;
1356             goto done;
1357         }
1358         break;
1359     }
1360 
1361     /* Otherwise, flush flag state to CC_OP_FLAGS.  */
1362     gen_flush_flags(s);
1363 
1364     switch (cond) {
1365     case 0: /* T */
1366     case 1: /* F */
1367     default:
1368         /* Invalid, or handled above.  */
1369         abort();
1370     case 2: /* HI (!C && !Z) -> !(C || Z)*/
1371     case 3: /* LS (C || Z) */
1372         c->v1 = tmp = tcg_temp_new();
1373         c->g1 = 0;
1374         tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1375         tcg_gen_or_i32(tmp, tmp, QREG_CC_C);
1376         tcond = TCG_COND_NE;
1377         break;
1378     case 4: /* CC (!C) */
1379     case 5: /* CS (C) */
1380         c->v1 = QREG_CC_C;
1381         tcond = TCG_COND_NE;
1382         break;
1383     case 6: /* NE (!Z) */
1384     case 7: /* EQ (Z) */
1385         c->v1 = QREG_CC_Z;
1386         tcond = TCG_COND_EQ;
1387         break;
1388     case 8: /* VC (!V) */
1389     case 9: /* VS (V) */
1390         c->v1 = QREG_CC_V;
1391         tcond = TCG_COND_LT;
1392         break;
1393     case 10: /* PL (!N) */
1394     case 11: /* MI (N) */
1395         c->v1 = QREG_CC_N;
1396         tcond = TCG_COND_LT;
1397         break;
1398     case 12: /* GE (!(N ^ V)) */
1399     case 13: /* LT (N ^ V) */
1400         c->v1 = tmp = tcg_temp_new();
1401         c->g1 = 0;
1402         tcg_gen_xor_i32(tmp, QREG_CC_N, QREG_CC_V);
1403         tcond = TCG_COND_LT;
1404         break;
1405     case 14: /* GT (!(Z || (N ^ V))) */
1406     case 15: /* LE (Z || (N ^ V)) */
1407         c->v1 = tmp = tcg_temp_new();
1408         c->g1 = 0;
1409         tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1410         tcg_gen_neg_i32(tmp, tmp);
1411         tmp2 = tcg_temp_new();
1412         tcg_gen_xor_i32(tmp2, QREG_CC_N, QREG_CC_V);
1413         tcg_gen_or_i32(tmp, tmp, tmp2);
1414         tcg_temp_free(tmp2);
1415         tcond = TCG_COND_LT;
1416         break;
1417     }
1418 
1419  done:
1420     if ((cond & 1) == 0) {
1421         tcond = tcg_invert_cond(tcond);
1422     }
1423     c->tcond = tcond;
1424 }
1425 
1426 static void free_cond(DisasCompare *c)
1427 {
1428     if (!c->g1) {
1429         tcg_temp_free(c->v1);
1430     }
1431     if (!c->g2) {
1432         tcg_temp_free(c->v2);
1433     }
1434 }
1435 
1436 static void gen_jmpcc(DisasContext *s, int cond, TCGLabel *l1)
1437 {
1438   DisasCompare c;
1439 
1440   gen_cc_cond(&c, s, cond);
1441   update_cc_op(s);
1442   tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
1443   free_cond(&c);
1444 }
1445 
1446 /* Force a TB lookup after an instruction that changes the CPU state.  */
1447 static void gen_exit_tb(DisasContext *s)
1448 {
1449     update_cc_op(s);
1450     tcg_gen_movi_i32(QREG_PC, s->pc);
1451     s->base.is_jmp = DISAS_EXIT;
1452 }
1453 
1454 #define SRC_EA(env, result, opsize, op_sign, addrp) do {                \
1455         result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp,         \
1456                         op_sign ? EA_LOADS : EA_LOADU, IS_USER(s));     \
1457         if (IS_NULL_QREG(result)) {                                     \
1458             gen_addr_fault(s);                                          \
1459             return;                                                     \
1460         }                                                               \
1461     } while (0)
1462 
1463 #define DEST_EA(env, insn, opsize, val, addrp) do {                     \
1464         TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp,       \
1465                                 EA_STORE, IS_USER(s));                  \
1466         if (IS_NULL_QREG(ea_result)) {                                  \
1467             gen_addr_fault(s);                                          \
1468             return;                                                     \
1469         }                                                               \
1470     } while (0)
1471 
1472 static inline bool use_goto_tb(DisasContext *s, uint32_t dest)
1473 {
1474 #ifndef CONFIG_USER_ONLY
1475     return (s->base.pc_first & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)
1476         || (s->base.pc_next & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
1477 #else
1478     return true;
1479 #endif
1480 }
1481 
1482 /* Generate a jump to an immediate address.  */
1483 static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest)
1484 {
1485     if (unlikely(s->base.singlestep_enabled)) {
1486         gen_exception(s, dest, EXCP_DEBUG);
1487     } else if (use_goto_tb(s, dest)) {
1488         tcg_gen_goto_tb(n);
1489         tcg_gen_movi_i32(QREG_PC, dest);
1490         tcg_gen_exit_tb(s->base.tb, n);
1491     } else {
1492         gen_jmp_im(s, dest);
1493         tcg_gen_exit_tb(NULL, 0);
1494     }
1495     s->base.is_jmp = DISAS_NORETURN;
1496 }
1497 
1498 DISAS_INSN(scc)
1499 {
1500     DisasCompare c;
1501     int cond;
1502     TCGv tmp;
1503 
1504     cond = (insn >> 8) & 0xf;
1505     gen_cc_cond(&c, s, cond);
1506 
1507     tmp = tcg_temp_new();
1508     tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
1509     free_cond(&c);
1510 
1511     tcg_gen_neg_i32(tmp, tmp);
1512     DEST_EA(env, insn, OS_BYTE, tmp, NULL);
1513     tcg_temp_free(tmp);
1514 }
1515 
1516 DISAS_INSN(dbcc)
1517 {
1518     TCGLabel *l1;
1519     TCGv reg;
1520     TCGv tmp;
1521     int16_t offset;
1522     uint32_t base;
1523 
1524     reg = DREG(insn, 0);
1525     base = s->pc;
1526     offset = (int16_t)read_im16(env, s);
1527     l1 = gen_new_label();
1528     gen_jmpcc(s, (insn >> 8) & 0xf, l1);
1529 
1530     tmp = tcg_temp_new();
1531     tcg_gen_ext16s_i32(tmp, reg);
1532     tcg_gen_addi_i32(tmp, tmp, -1);
1533     gen_partset_reg(OS_WORD, reg, tmp);
1534     tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, -1, l1);
1535     gen_jmp_tb(s, 1, base + offset);
1536     gen_set_label(l1);
1537     gen_jmp_tb(s, 0, s->pc);
1538 }
1539 
1540 DISAS_INSN(undef_mac)
1541 {
1542     gen_exception(s, s->base.pc_next, EXCP_LINEA);
1543 }
1544 
1545 DISAS_INSN(undef_fpu)
1546 {
1547     gen_exception(s, s->base.pc_next, EXCP_LINEF);
1548 }
1549 
1550 DISAS_INSN(undef)
1551 {
1552     /* ??? This is both instructions that are as yet unimplemented
1553        for the 680x0 series, as well as those that are implemented
1554        but actually illegal for CPU32 or pre-68020.  */
1555     qemu_log_mask(LOG_UNIMP, "Illegal instruction: %04x @ %08x\n",
1556                   insn, s->base.pc_next);
1557     gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
1558 }
1559 
1560 DISAS_INSN(mulw)
1561 {
1562     TCGv reg;
1563     TCGv tmp;
1564     TCGv src;
1565     int sign;
1566 
1567     sign = (insn & 0x100) != 0;
1568     reg = DREG(insn, 9);
1569     tmp = tcg_temp_new();
1570     if (sign)
1571         tcg_gen_ext16s_i32(tmp, reg);
1572     else
1573         tcg_gen_ext16u_i32(tmp, reg);
1574     SRC_EA(env, src, OS_WORD, sign, NULL);
1575     tcg_gen_mul_i32(tmp, tmp, src);
1576     tcg_gen_mov_i32(reg, tmp);
1577     gen_logic_cc(s, tmp, OS_LONG);
1578     tcg_temp_free(tmp);
1579 }
1580 
1581 DISAS_INSN(divw)
1582 {
1583     int sign;
1584     TCGv src;
1585     TCGv destr;
1586 
1587     /* divX.w <EA>,Dn    32/16 -> 16r:16q */
1588 
1589     sign = (insn & 0x100) != 0;
1590 
1591     /* dest.l / src.w */
1592 
1593     SRC_EA(env, src, OS_WORD, sign, NULL);
1594     destr = tcg_const_i32(REG(insn, 9));
1595     if (sign) {
1596         gen_helper_divsw(cpu_env, destr, src);
1597     } else {
1598         gen_helper_divuw(cpu_env, destr, src);
1599     }
1600     tcg_temp_free(destr);
1601 
1602     set_cc_op(s, CC_OP_FLAGS);
1603 }
1604 
1605 DISAS_INSN(divl)
1606 {
1607     TCGv num, reg, den;
1608     int sign;
1609     uint16_t ext;
1610 
1611     ext = read_im16(env, s);
1612 
1613     sign = (ext & 0x0800) != 0;
1614 
1615     if (ext & 0x400) {
1616         if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
1617             gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
1618             return;
1619         }
1620 
1621         /* divX.l <EA>, Dr:Dq    64/32 -> 32r:32q */
1622 
1623         SRC_EA(env, den, OS_LONG, 0, NULL);
1624         num = tcg_const_i32(REG(ext, 12));
1625         reg = tcg_const_i32(REG(ext, 0));
1626         if (sign) {
1627             gen_helper_divsll(cpu_env, num, reg, den);
1628         } else {
1629             gen_helper_divull(cpu_env, num, reg, den);
1630         }
1631         tcg_temp_free(reg);
1632         tcg_temp_free(num);
1633         set_cc_op(s, CC_OP_FLAGS);
1634         return;
1635     }
1636 
1637     /* divX.l <EA>, Dq        32/32 -> 32q     */
1638     /* divXl.l <EA>, Dr:Dq    32/32 -> 32r:32q */
1639 
1640     SRC_EA(env, den, OS_LONG, 0, NULL);
1641     num = tcg_const_i32(REG(ext, 12));
1642     reg = tcg_const_i32(REG(ext, 0));
1643     if (sign) {
1644         gen_helper_divsl(cpu_env, num, reg, den);
1645     } else {
1646         gen_helper_divul(cpu_env, num, reg, den);
1647     }
1648     tcg_temp_free(reg);
1649     tcg_temp_free(num);
1650 
1651     set_cc_op(s, CC_OP_FLAGS);
1652 }
1653 
1654 static void bcd_add(TCGv dest, TCGv src)
1655 {
1656     TCGv t0, t1;
1657 
1658     /*  dest10 = dest10 + src10 + X
1659      *
1660      *        t1 = src
1661      *        t2 = t1 + 0x066
1662      *        t3 = t2 + dest + X
1663      *        t4 = t2 ^ dest
1664      *        t5 = t3 ^ t4
1665      *        t6 = ~t5 & 0x110
1666      *        t7 = (t6 >> 2) | (t6 >> 3)
1667      *        return t3 - t7
1668      */
1669 
1670     /* t1 = (src + 0x066) + dest + X
1671      *    = result with some possible exceding 0x6
1672      */
1673 
1674     t0 = tcg_const_i32(0x066);
1675     tcg_gen_add_i32(t0, t0, src);
1676 
1677     t1 = tcg_temp_new();
1678     tcg_gen_add_i32(t1, t0, dest);
1679     tcg_gen_add_i32(t1, t1, QREG_CC_X);
1680 
1681     /* we will remove exceding 0x6 where there is no carry */
1682 
1683     /* t0 = (src + 0x0066) ^ dest
1684      *    = t1 without carries
1685      */
1686 
1687     tcg_gen_xor_i32(t0, t0, dest);
1688 
1689     /* extract the carries
1690      * t0 = t0 ^ t1
1691      *    = only the carries
1692      */
1693 
1694     tcg_gen_xor_i32(t0, t0, t1);
1695 
1696     /* generate 0x1 where there is no carry
1697      * and for each 0x10, generate a 0x6
1698      */
1699 
1700     tcg_gen_shri_i32(t0, t0, 3);
1701     tcg_gen_not_i32(t0, t0);
1702     tcg_gen_andi_i32(t0, t0, 0x22);
1703     tcg_gen_add_i32(dest, t0, t0);
1704     tcg_gen_add_i32(dest, dest, t0);
1705     tcg_temp_free(t0);
1706 
1707     /* remove the exceding 0x6
1708      * for digits that have not generated a carry
1709      */
1710 
1711     tcg_gen_sub_i32(dest, t1, dest);
1712     tcg_temp_free(t1);
1713 }
1714 
1715 static void bcd_sub(TCGv dest, TCGv src)
1716 {
1717     TCGv t0, t1, t2;
1718 
1719     /*  dest10 = dest10 - src10 - X
1720      *         = bcd_add(dest + 1 - X, 0x199 - src)
1721      */
1722 
1723     /* t0 = 0x066 + (0x199 - src) */
1724 
1725     t0 = tcg_temp_new();
1726     tcg_gen_subfi_i32(t0, 0x1ff, src);
1727 
1728     /* t1 = t0 + dest + 1 - X*/
1729 
1730     t1 = tcg_temp_new();
1731     tcg_gen_add_i32(t1, t0, dest);
1732     tcg_gen_addi_i32(t1, t1, 1);
1733     tcg_gen_sub_i32(t1, t1, QREG_CC_X);
1734 
1735     /* t2 = t0 ^ dest */
1736 
1737     t2 = tcg_temp_new();
1738     tcg_gen_xor_i32(t2, t0, dest);
1739 
1740     /* t0 = t1 ^ t2 */
1741 
1742     tcg_gen_xor_i32(t0, t1, t2);
1743 
1744     /* t2 = ~t0 & 0x110
1745      * t0 = (t2 >> 2) | (t2 >> 3)
1746      *
1747      * to fit on 8bit operands, changed in:
1748      *
1749      * t2 = ~(t0 >> 3) & 0x22
1750      * t0 = t2 + t2
1751      * t0 = t0 + t2
1752      */
1753 
1754     tcg_gen_shri_i32(t2, t0, 3);
1755     tcg_gen_not_i32(t2, t2);
1756     tcg_gen_andi_i32(t2, t2, 0x22);
1757     tcg_gen_add_i32(t0, t2, t2);
1758     tcg_gen_add_i32(t0, t0, t2);
1759     tcg_temp_free(t2);
1760 
1761     /* return t1 - t0 */
1762 
1763     tcg_gen_sub_i32(dest, t1, t0);
1764     tcg_temp_free(t0);
1765     tcg_temp_free(t1);
1766 }
1767 
1768 static void bcd_flags(TCGv val)
1769 {
1770     tcg_gen_andi_i32(QREG_CC_C, val, 0x0ff);
1771     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_C);
1772 
1773     tcg_gen_extract_i32(QREG_CC_C, val, 8, 1);
1774 
1775     tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
1776 }
1777 
1778 DISAS_INSN(abcd_reg)
1779 {
1780     TCGv src;
1781     TCGv dest;
1782 
1783     gen_flush_flags(s); /* !Z is sticky */
1784 
1785     src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
1786     dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0);
1787     bcd_add(dest, src);
1788     gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1789 
1790     bcd_flags(dest);
1791 }
1792 
1793 DISAS_INSN(abcd_mem)
1794 {
1795     TCGv src, dest, addr;
1796 
1797     gen_flush_flags(s); /* !Z is sticky */
1798 
1799     /* Indirect pre-decrement load (mode 4) */
1800 
1801     src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1802                       NULL_QREG, NULL, EA_LOADU, IS_USER(s));
1803     dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1804                        NULL_QREG, &addr, EA_LOADU, IS_USER(s));
1805 
1806     bcd_add(dest, src);
1807 
1808     gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
1809                 EA_STORE, IS_USER(s));
1810 
1811     bcd_flags(dest);
1812 }
1813 
1814 DISAS_INSN(sbcd_reg)
1815 {
1816     TCGv src, dest;
1817 
1818     gen_flush_flags(s); /* !Z is sticky */
1819 
1820     src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
1821     dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0);
1822 
1823     bcd_sub(dest, src);
1824 
1825     gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1826 
1827     bcd_flags(dest);
1828 }
1829 
1830 DISAS_INSN(sbcd_mem)
1831 {
1832     TCGv src, dest, addr;
1833 
1834     gen_flush_flags(s); /* !Z is sticky */
1835 
1836     /* Indirect pre-decrement load (mode 4) */
1837 
1838     src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1839                       NULL_QREG, NULL, EA_LOADU, IS_USER(s));
1840     dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1841                        NULL_QREG, &addr, EA_LOADU, IS_USER(s));
1842 
1843     bcd_sub(dest, src);
1844 
1845     gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
1846                 EA_STORE, IS_USER(s));
1847 
1848     bcd_flags(dest);
1849 }
1850 
1851 DISAS_INSN(nbcd)
1852 {
1853     TCGv src, dest;
1854     TCGv addr;
1855 
1856     gen_flush_flags(s); /* !Z is sticky */
1857 
1858     SRC_EA(env, src, OS_BYTE, 0, &addr);
1859 
1860     dest = tcg_const_i32(0);
1861     bcd_sub(dest, src);
1862 
1863     DEST_EA(env, insn, OS_BYTE, dest, &addr);
1864 
1865     bcd_flags(dest);
1866 
1867     tcg_temp_free(dest);
1868 }
1869 
1870 DISAS_INSN(addsub)
1871 {
1872     TCGv reg;
1873     TCGv dest;
1874     TCGv src;
1875     TCGv tmp;
1876     TCGv addr;
1877     int add;
1878     int opsize;
1879 
1880     add = (insn & 0x4000) != 0;
1881     opsize = insn_opsize(insn);
1882     reg = gen_extend(s, DREG(insn, 9), opsize, 1);
1883     dest = tcg_temp_new();
1884     if (insn & 0x100) {
1885         SRC_EA(env, tmp, opsize, 1, &addr);
1886         src = reg;
1887     } else {
1888         tmp = reg;
1889         SRC_EA(env, src, opsize, 1, NULL);
1890     }
1891     if (add) {
1892         tcg_gen_add_i32(dest, tmp, src);
1893         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, src);
1894         set_cc_op(s, CC_OP_ADDB + opsize);
1895     } else {
1896         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, tmp, src);
1897         tcg_gen_sub_i32(dest, tmp, src);
1898         set_cc_op(s, CC_OP_SUBB + opsize);
1899     }
1900     gen_update_cc_add(dest, src, opsize);
1901     if (insn & 0x100) {
1902         DEST_EA(env, insn, opsize, dest, &addr);
1903     } else {
1904         gen_partset_reg(opsize, DREG(insn, 9), dest);
1905     }
1906     tcg_temp_free(dest);
1907 }
1908 
1909 /* Reverse the order of the bits in REG.  */
1910 DISAS_INSN(bitrev)
1911 {
1912     TCGv reg;
1913     reg = DREG(insn, 0);
1914     gen_helper_bitrev(reg, reg);
1915 }
1916 
1917 DISAS_INSN(bitop_reg)
1918 {
1919     int opsize;
1920     int op;
1921     TCGv src1;
1922     TCGv src2;
1923     TCGv tmp;
1924     TCGv addr;
1925     TCGv dest;
1926 
1927     if ((insn & 0x38) != 0)
1928         opsize = OS_BYTE;
1929     else
1930         opsize = OS_LONG;
1931     op = (insn >> 6) & 3;
1932     SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1933 
1934     gen_flush_flags(s);
1935     src2 = tcg_temp_new();
1936     if (opsize == OS_BYTE)
1937         tcg_gen_andi_i32(src2, DREG(insn, 9), 7);
1938     else
1939         tcg_gen_andi_i32(src2, DREG(insn, 9), 31);
1940 
1941     tmp = tcg_const_i32(1);
1942     tcg_gen_shl_i32(tmp, tmp, src2);
1943     tcg_temp_free(src2);
1944 
1945     tcg_gen_and_i32(QREG_CC_Z, src1, tmp);
1946 
1947     dest = tcg_temp_new();
1948     switch (op) {
1949     case 1: /* bchg */
1950         tcg_gen_xor_i32(dest, src1, tmp);
1951         break;
1952     case 2: /* bclr */
1953         tcg_gen_andc_i32(dest, src1, tmp);
1954         break;
1955     case 3: /* bset */
1956         tcg_gen_or_i32(dest, src1, tmp);
1957         break;
1958     default: /* btst */
1959         break;
1960     }
1961     tcg_temp_free(tmp);
1962     if (op) {
1963         DEST_EA(env, insn, opsize, dest, &addr);
1964     }
1965     tcg_temp_free(dest);
1966 }
1967 
1968 DISAS_INSN(sats)
1969 {
1970     TCGv reg;
1971     reg = DREG(insn, 0);
1972     gen_flush_flags(s);
1973     gen_helper_sats(reg, reg, QREG_CC_V);
1974     gen_logic_cc(s, reg, OS_LONG);
1975 }
1976 
1977 static void gen_push(DisasContext *s, TCGv val)
1978 {
1979     TCGv tmp;
1980 
1981     tmp = tcg_temp_new();
1982     tcg_gen_subi_i32(tmp, QREG_SP, 4);
1983     gen_store(s, OS_LONG, tmp, val, IS_USER(s));
1984     tcg_gen_mov_i32(QREG_SP, tmp);
1985     tcg_temp_free(tmp);
1986 }
1987 
1988 static TCGv mreg(int reg)
1989 {
1990     if (reg < 8) {
1991         /* Dx */
1992         return cpu_dregs[reg];
1993     }
1994     /* Ax */
1995     return cpu_aregs[reg & 7];
1996 }
1997 
1998 DISAS_INSN(movem)
1999 {
2000     TCGv addr, incr, tmp, r[16];
2001     int is_load = (insn & 0x0400) != 0;
2002     int opsize = (insn & 0x40) != 0 ? OS_LONG : OS_WORD;
2003     uint16_t mask = read_im16(env, s);
2004     int mode = extract32(insn, 3, 3);
2005     int reg0 = REG(insn, 0);
2006     int i;
2007 
2008     tmp = cpu_aregs[reg0];
2009 
2010     switch (mode) {
2011     case 0: /* data register direct */
2012     case 1: /* addr register direct */
2013     do_addr_fault:
2014         gen_addr_fault(s);
2015         return;
2016 
2017     case 2: /* indirect */
2018         break;
2019 
2020     case 3: /* indirect post-increment */
2021         if (!is_load) {
2022             /* post-increment is not allowed */
2023             goto do_addr_fault;
2024         }
2025         break;
2026 
2027     case 4: /* indirect pre-decrement */
2028         if (is_load) {
2029             /* pre-decrement is not allowed */
2030             goto do_addr_fault;
2031         }
2032         /* We want a bare copy of the address reg, without any pre-decrement
2033            adjustment, as gen_lea would provide.  */
2034         break;
2035 
2036     default:
2037         tmp = gen_lea_mode(env, s, mode, reg0, opsize);
2038         if (IS_NULL_QREG(tmp)) {
2039             goto do_addr_fault;
2040         }
2041         break;
2042     }
2043 
2044     addr = tcg_temp_new();
2045     tcg_gen_mov_i32(addr, tmp);
2046     incr = tcg_const_i32(opsize_bytes(opsize));
2047 
2048     if (is_load) {
2049         /* memory to register */
2050         for (i = 0; i < 16; i++) {
2051             if (mask & (1 << i)) {
2052                 r[i] = gen_load(s, opsize, addr, 1, IS_USER(s));
2053                 tcg_gen_add_i32(addr, addr, incr);
2054             }
2055         }
2056         for (i = 0; i < 16; i++) {
2057             if (mask & (1 << i)) {
2058                 tcg_gen_mov_i32(mreg(i), r[i]);
2059                 tcg_temp_free(r[i]);
2060             }
2061         }
2062         if (mode == 3) {
2063             /* post-increment: movem (An)+,X */
2064             tcg_gen_mov_i32(cpu_aregs[reg0], addr);
2065         }
2066     } else {
2067         /* register to memory */
2068         if (mode == 4) {
2069             /* pre-decrement: movem X,-(An) */
2070             for (i = 15; i >= 0; i--) {
2071                 if ((mask << i) & 0x8000) {
2072                     tcg_gen_sub_i32(addr, addr, incr);
2073                     if (reg0 + 8 == i &&
2074                         m68k_feature(s->env, M68K_FEATURE_EXT_FULL)) {
2075                         /* M68020+: if the addressing register is the
2076                          * register moved to memory, the value written
2077                          * is the initial value decremented by the size of
2078                          * the operation, regardless of how many actual
2079                          * stores have been performed until this point.
2080                          * M68000/M68010: the value is the initial value.
2081                          */
2082                         tmp = tcg_temp_new();
2083                         tcg_gen_sub_i32(tmp, cpu_aregs[reg0], incr);
2084                         gen_store(s, opsize, addr, tmp, IS_USER(s));
2085                         tcg_temp_free(tmp);
2086                     } else {
2087                         gen_store(s, opsize, addr, mreg(i), IS_USER(s));
2088                     }
2089                 }
2090             }
2091             tcg_gen_mov_i32(cpu_aregs[reg0], addr);
2092         } else {
2093             for (i = 0; i < 16; i++) {
2094                 if (mask & (1 << i)) {
2095                     gen_store(s, opsize, addr, mreg(i), IS_USER(s));
2096                     tcg_gen_add_i32(addr, addr, incr);
2097                 }
2098             }
2099         }
2100     }
2101 
2102     tcg_temp_free(incr);
2103     tcg_temp_free(addr);
2104 }
2105 
2106 DISAS_INSN(movep)
2107 {
2108     uint8_t i;
2109     int16_t displ;
2110     TCGv reg;
2111     TCGv addr;
2112     TCGv abuf;
2113     TCGv dbuf;
2114 
2115     displ = read_im16(env, s);
2116 
2117     addr = AREG(insn, 0);
2118     reg = DREG(insn, 9);
2119 
2120     abuf = tcg_temp_new();
2121     tcg_gen_addi_i32(abuf, addr, displ);
2122     dbuf = tcg_temp_new();
2123 
2124     if (insn & 0x40) {
2125         i = 4;
2126     } else {
2127         i = 2;
2128     }
2129 
2130     if (insn & 0x80) {
2131         for ( ; i > 0 ; i--) {
2132             tcg_gen_shri_i32(dbuf, reg, (i - 1) * 8);
2133             tcg_gen_qemu_st8(dbuf, abuf, IS_USER(s));
2134             if (i > 1) {
2135                 tcg_gen_addi_i32(abuf, abuf, 2);
2136             }
2137         }
2138     } else {
2139         for ( ; i > 0 ; i--) {
2140             tcg_gen_qemu_ld8u(dbuf, abuf, IS_USER(s));
2141             tcg_gen_deposit_i32(reg, reg, dbuf, (i - 1) * 8, 8);
2142             if (i > 1) {
2143                 tcg_gen_addi_i32(abuf, abuf, 2);
2144             }
2145         }
2146     }
2147     tcg_temp_free(abuf);
2148     tcg_temp_free(dbuf);
2149 }
2150 
2151 DISAS_INSN(bitop_im)
2152 {
2153     int opsize;
2154     int op;
2155     TCGv src1;
2156     uint32_t mask;
2157     int bitnum;
2158     TCGv tmp;
2159     TCGv addr;
2160 
2161     if ((insn & 0x38) != 0)
2162         opsize = OS_BYTE;
2163     else
2164         opsize = OS_LONG;
2165     op = (insn >> 6) & 3;
2166 
2167     bitnum = read_im16(env, s);
2168     if (m68k_feature(s->env, M68K_FEATURE_M68000)) {
2169         if (bitnum & 0xfe00) {
2170             disas_undef(env, s, insn);
2171             return;
2172         }
2173     } else {
2174         if (bitnum & 0xff00) {
2175             disas_undef(env, s, insn);
2176             return;
2177         }
2178     }
2179 
2180     SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
2181 
2182     gen_flush_flags(s);
2183     if (opsize == OS_BYTE)
2184         bitnum &= 7;
2185     else
2186         bitnum &= 31;
2187     mask = 1 << bitnum;
2188 
2189    tcg_gen_andi_i32(QREG_CC_Z, src1, mask);
2190 
2191     if (op) {
2192         tmp = tcg_temp_new();
2193         switch (op) {
2194         case 1: /* bchg */
2195             tcg_gen_xori_i32(tmp, src1, mask);
2196             break;
2197         case 2: /* bclr */
2198             tcg_gen_andi_i32(tmp, src1, ~mask);
2199             break;
2200         case 3: /* bset */
2201             tcg_gen_ori_i32(tmp, src1, mask);
2202             break;
2203         default: /* btst */
2204             break;
2205         }
2206         DEST_EA(env, insn, opsize, tmp, &addr);
2207         tcg_temp_free(tmp);
2208     }
2209 }
2210 
2211 static TCGv gen_get_ccr(DisasContext *s)
2212 {
2213     TCGv dest;
2214 
2215     update_cc_op(s);
2216     dest = tcg_temp_new();
2217     gen_helper_get_ccr(dest, cpu_env);
2218     return dest;
2219 }
2220 
2221 static TCGv gen_get_sr(DisasContext *s)
2222 {
2223     TCGv ccr;
2224     TCGv sr;
2225 
2226     ccr = gen_get_ccr(s);
2227     sr = tcg_temp_new();
2228     tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
2229     tcg_gen_or_i32(sr, sr, ccr);
2230     return sr;
2231 }
2232 
2233 static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
2234 {
2235     if (ccr_only) {
2236         tcg_gen_movi_i32(QREG_CC_C, val & CCF_C ? 1 : 0);
2237         tcg_gen_movi_i32(QREG_CC_V, val & CCF_V ? -1 : 0);
2238         tcg_gen_movi_i32(QREG_CC_Z, val & CCF_Z ? 0 : 1);
2239         tcg_gen_movi_i32(QREG_CC_N, val & CCF_N ? -1 : 0);
2240         tcg_gen_movi_i32(QREG_CC_X, val & CCF_X ? 1 : 0);
2241     } else {
2242         TCGv sr = tcg_const_i32(val);
2243         gen_helper_set_sr(cpu_env, sr);
2244         tcg_temp_free(sr);
2245     }
2246     set_cc_op(s, CC_OP_FLAGS);
2247 }
2248 
2249 static void gen_set_sr(DisasContext *s, TCGv val, int ccr_only)
2250 {
2251     if (ccr_only) {
2252         gen_helper_set_ccr(cpu_env, val);
2253     } else {
2254         gen_helper_set_sr(cpu_env, val);
2255     }
2256     set_cc_op(s, CC_OP_FLAGS);
2257 }
2258 
2259 static void gen_move_to_sr(CPUM68KState *env, DisasContext *s, uint16_t insn,
2260                            bool ccr_only)
2261 {
2262     if ((insn & 0x3f) == 0x3c) {
2263         uint16_t val;
2264         val = read_im16(env, s);
2265         gen_set_sr_im(s, val, ccr_only);
2266     } else {
2267         TCGv src;
2268         SRC_EA(env, src, OS_WORD, 0, NULL);
2269         gen_set_sr(s, src, ccr_only);
2270     }
2271 }
2272 
2273 DISAS_INSN(arith_im)
2274 {
2275     int op;
2276     TCGv im;
2277     TCGv src1;
2278     TCGv dest;
2279     TCGv addr;
2280     int opsize;
2281     bool with_SR = ((insn & 0x3f) == 0x3c);
2282 
2283     op = (insn >> 9) & 7;
2284     opsize = insn_opsize(insn);
2285     switch (opsize) {
2286     case OS_BYTE:
2287         im = tcg_const_i32((int8_t)read_im8(env, s));
2288         break;
2289     case OS_WORD:
2290         im = tcg_const_i32((int16_t)read_im16(env, s));
2291         break;
2292     case OS_LONG:
2293         im = tcg_const_i32(read_im32(env, s));
2294         break;
2295     default:
2296         g_assert_not_reached();
2297     }
2298 
2299     if (with_SR) {
2300         /* SR/CCR can only be used with andi/eori/ori */
2301         if (op == 2 || op == 3 || op == 6) {
2302             disas_undef(env, s, insn);
2303             return;
2304         }
2305         switch (opsize) {
2306         case OS_BYTE:
2307             src1 = gen_get_ccr(s);
2308             break;
2309         case OS_WORD:
2310             if (IS_USER(s)) {
2311                 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
2312                 return;
2313             }
2314             src1 = gen_get_sr(s);
2315             break;
2316         default:
2317             /* OS_LONG; others already g_assert_not_reached.  */
2318             disas_undef(env, s, insn);
2319             return;
2320         }
2321     } else {
2322         SRC_EA(env, src1, opsize, 1, (op == 6) ? NULL : &addr);
2323     }
2324     dest = tcg_temp_new();
2325     switch (op) {
2326     case 0: /* ori */
2327         tcg_gen_or_i32(dest, src1, im);
2328         if (with_SR) {
2329             gen_set_sr(s, dest, opsize == OS_BYTE);
2330         } else {
2331             DEST_EA(env, insn, opsize, dest, &addr);
2332             gen_logic_cc(s, dest, opsize);
2333         }
2334         break;
2335     case 1: /* andi */
2336         tcg_gen_and_i32(dest, src1, im);
2337         if (with_SR) {
2338             gen_set_sr(s, dest, opsize == OS_BYTE);
2339         } else {
2340             DEST_EA(env, insn, opsize, dest, &addr);
2341             gen_logic_cc(s, dest, opsize);
2342         }
2343         break;
2344     case 2: /* subi */
2345         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, src1, im);
2346         tcg_gen_sub_i32(dest, src1, im);
2347         gen_update_cc_add(dest, im, opsize);
2348         set_cc_op(s, CC_OP_SUBB + opsize);
2349         DEST_EA(env, insn, opsize, dest, &addr);
2350         break;
2351     case 3: /* addi */
2352         tcg_gen_add_i32(dest, src1, im);
2353         gen_update_cc_add(dest, im, opsize);
2354         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, im);
2355         set_cc_op(s, CC_OP_ADDB + opsize);
2356         DEST_EA(env, insn, opsize, dest, &addr);
2357         break;
2358     case 5: /* eori */
2359         tcg_gen_xor_i32(dest, src1, im);
2360         if (with_SR) {
2361             gen_set_sr(s, dest, opsize == OS_BYTE);
2362         } else {
2363             DEST_EA(env, insn, opsize, dest, &addr);
2364             gen_logic_cc(s, dest, opsize);
2365         }
2366         break;
2367     case 6: /* cmpi */
2368         gen_update_cc_cmp(s, src1, im, opsize);
2369         break;
2370     default:
2371         abort();
2372     }
2373     tcg_temp_free(im);
2374     tcg_temp_free(dest);
2375 }
2376 
2377 DISAS_INSN(cas)
2378 {
2379     int opsize;
2380     TCGv addr;
2381     uint16_t ext;
2382     TCGv load;
2383     TCGv cmp;
2384     TCGMemOp opc;
2385 
2386     switch ((insn >> 9) & 3) {
2387     case 1:
2388         opsize = OS_BYTE;
2389         opc = MO_SB;
2390         break;
2391     case 2:
2392         opsize = OS_WORD;
2393         opc = MO_TESW;
2394         break;
2395     case 3:
2396         opsize = OS_LONG;
2397         opc = MO_TESL;
2398         break;
2399     default:
2400         g_assert_not_reached();
2401     }
2402 
2403     ext = read_im16(env, s);
2404 
2405     /* cas Dc,Du,<EA> */
2406 
2407     addr = gen_lea(env, s, insn, opsize);
2408     if (IS_NULL_QREG(addr)) {
2409         gen_addr_fault(s);
2410         return;
2411     }
2412 
2413     cmp = gen_extend(s, DREG(ext, 0), opsize, 1);
2414 
2415     /* if  <EA> == Dc then
2416      *     <EA> = Du
2417      *     Dc = <EA> (because <EA> == Dc)
2418      * else
2419      *     Dc = <EA>
2420      */
2421 
2422     load = tcg_temp_new();
2423     tcg_gen_atomic_cmpxchg_i32(load, addr, cmp, DREG(ext, 6),
2424                                IS_USER(s), opc);
2425     /* update flags before setting cmp to load */
2426     gen_update_cc_cmp(s, load, cmp, opsize);
2427     gen_partset_reg(opsize, DREG(ext, 0), load);
2428 
2429     tcg_temp_free(load);
2430 
2431     switch (extract32(insn, 3, 3)) {
2432     case 3: /* Indirect postincrement.  */
2433         tcg_gen_addi_i32(AREG(insn, 0), addr, opsize_bytes(opsize));
2434         break;
2435     case 4: /* Indirect predecrememnt.  */
2436         tcg_gen_mov_i32(AREG(insn, 0), addr);
2437         break;
2438     }
2439 }
2440 
2441 DISAS_INSN(cas2w)
2442 {
2443     uint16_t ext1, ext2;
2444     TCGv addr1, addr2;
2445     TCGv regs;
2446 
2447     /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2448 
2449     ext1 = read_im16(env, s);
2450 
2451     if (ext1 & 0x8000) {
2452         /* Address Register */
2453         addr1 = AREG(ext1, 12);
2454     } else {
2455         /* Data Register */
2456         addr1 = DREG(ext1, 12);
2457     }
2458 
2459     ext2 = read_im16(env, s);
2460     if (ext2 & 0x8000) {
2461         /* Address Register */
2462         addr2 = AREG(ext2, 12);
2463     } else {
2464         /* Data Register */
2465         addr2 = DREG(ext2, 12);
2466     }
2467 
2468     /* if (R1) == Dc1 && (R2) == Dc2 then
2469      *     (R1) = Du1
2470      *     (R2) = Du2
2471      * else
2472      *     Dc1 = (R1)
2473      *     Dc2 = (R2)
2474      */
2475 
2476     regs = tcg_const_i32(REG(ext2, 6) |
2477                          (REG(ext1, 6) << 3) |
2478                          (REG(ext2, 0) << 6) |
2479                          (REG(ext1, 0) << 9));
2480     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2481         gen_helper_exit_atomic(cpu_env);
2482     } else {
2483         gen_helper_cas2w(cpu_env, regs, addr1, addr2);
2484     }
2485     tcg_temp_free(regs);
2486 
2487     /* Note that cas2w also assigned to env->cc_op.  */
2488     s->cc_op = CC_OP_CMPW;
2489     s->cc_op_synced = 1;
2490 }
2491 
2492 DISAS_INSN(cas2l)
2493 {
2494     uint16_t ext1, ext2;
2495     TCGv addr1, addr2, regs;
2496 
2497     /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2498 
2499     ext1 = read_im16(env, s);
2500 
2501     if (ext1 & 0x8000) {
2502         /* Address Register */
2503         addr1 = AREG(ext1, 12);
2504     } else {
2505         /* Data Register */
2506         addr1 = DREG(ext1, 12);
2507     }
2508 
2509     ext2 = read_im16(env, s);
2510     if (ext2 & 0x8000) {
2511         /* Address Register */
2512         addr2 = AREG(ext2, 12);
2513     } else {
2514         /* Data Register */
2515         addr2 = DREG(ext2, 12);
2516     }
2517 
2518     /* if (R1) == Dc1 && (R2) == Dc2 then
2519      *     (R1) = Du1
2520      *     (R2) = Du2
2521      * else
2522      *     Dc1 = (R1)
2523      *     Dc2 = (R2)
2524      */
2525 
2526     regs = tcg_const_i32(REG(ext2, 6) |
2527                          (REG(ext1, 6) << 3) |
2528                          (REG(ext2, 0) << 6) |
2529                          (REG(ext1, 0) << 9));
2530     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2531         gen_helper_cas2l_parallel(cpu_env, regs, addr1, addr2);
2532     } else {
2533         gen_helper_cas2l(cpu_env, regs, addr1, addr2);
2534     }
2535     tcg_temp_free(regs);
2536 
2537     /* Note that cas2l also assigned to env->cc_op.  */
2538     s->cc_op = CC_OP_CMPL;
2539     s->cc_op_synced = 1;
2540 }
2541 
2542 DISAS_INSN(byterev)
2543 {
2544     TCGv reg;
2545 
2546     reg = DREG(insn, 0);
2547     tcg_gen_bswap32_i32(reg, reg);
2548 }
2549 
2550 DISAS_INSN(move)
2551 {
2552     TCGv src;
2553     TCGv dest;
2554     int op;
2555     int opsize;
2556 
2557     switch (insn >> 12) {
2558     case 1: /* move.b */
2559         opsize = OS_BYTE;
2560         break;
2561     case 2: /* move.l */
2562         opsize = OS_LONG;
2563         break;
2564     case 3: /* move.w */
2565         opsize = OS_WORD;
2566         break;
2567     default:
2568         abort();
2569     }
2570     SRC_EA(env, src, opsize, 1, NULL);
2571     op = (insn >> 6) & 7;
2572     if (op == 1) {
2573         /* movea */
2574         /* The value will already have been sign extended.  */
2575         dest = AREG(insn, 9);
2576         tcg_gen_mov_i32(dest, src);
2577     } else {
2578         /* normal move */
2579         uint16_t dest_ea;
2580         dest_ea = ((insn >> 9) & 7) | (op << 3);
2581         DEST_EA(env, dest_ea, opsize, src, NULL);
2582         /* This will be correct because loads sign extend.  */
2583         gen_logic_cc(s, src, opsize);
2584     }
2585 }
2586 
2587 DISAS_INSN(negx)
2588 {
2589     TCGv z;
2590     TCGv src;
2591     TCGv addr;
2592     int opsize;
2593 
2594     opsize = insn_opsize(insn);
2595     SRC_EA(env, src, opsize, 1, &addr);
2596 
2597     gen_flush_flags(s); /* compute old Z */
2598 
2599     /* Perform substract with borrow.
2600      * (X, N) =  -(src + X);
2601      */
2602 
2603     z = tcg_const_i32(0);
2604     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, z, QREG_CC_X, z);
2605     tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, z, z, QREG_CC_N, QREG_CC_X);
2606     tcg_temp_free(z);
2607     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
2608 
2609     tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
2610 
2611     /* Compute signed-overflow for negation.  The normal formula for
2612      * subtraction is (res ^ src) & (src ^ dest), but with dest==0
2613      * this simplies to res & src.
2614      */
2615 
2616     tcg_gen_and_i32(QREG_CC_V, QREG_CC_N, src);
2617 
2618     /* Copy the rest of the results into place.  */
2619     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
2620     tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
2621 
2622     set_cc_op(s, CC_OP_FLAGS);
2623 
2624     /* result is in QREG_CC_N */
2625 
2626     DEST_EA(env, insn, opsize, QREG_CC_N, &addr);
2627 }
2628 
2629 DISAS_INSN(lea)
2630 {
2631     TCGv reg;
2632     TCGv tmp;
2633 
2634     reg = AREG(insn, 9);
2635     tmp = gen_lea(env, s, insn, OS_LONG);
2636     if (IS_NULL_QREG(tmp)) {
2637         gen_addr_fault(s);
2638         return;
2639     }
2640     tcg_gen_mov_i32(reg, tmp);
2641 }
2642 
2643 DISAS_INSN(clr)
2644 {
2645     int opsize;
2646     TCGv zero;
2647 
2648     zero = tcg_const_i32(0);
2649 
2650     opsize = insn_opsize(insn);
2651     DEST_EA(env, insn, opsize, zero, NULL);
2652     gen_logic_cc(s, zero, opsize);
2653     tcg_temp_free(zero);
2654 }
2655 
2656 DISAS_INSN(move_from_ccr)
2657 {
2658     TCGv ccr;
2659 
2660     ccr = gen_get_ccr(s);
2661     DEST_EA(env, insn, OS_WORD, ccr, NULL);
2662 }
2663 
2664 DISAS_INSN(neg)
2665 {
2666     TCGv src1;
2667     TCGv dest;
2668     TCGv addr;
2669     int opsize;
2670 
2671     opsize = insn_opsize(insn);
2672     SRC_EA(env, src1, opsize, 1, &addr);
2673     dest = tcg_temp_new();
2674     tcg_gen_neg_i32(dest, src1);
2675     set_cc_op(s, CC_OP_SUBB + opsize);
2676     gen_update_cc_add(dest, src1, opsize);
2677     tcg_gen_setcondi_i32(TCG_COND_NE, QREG_CC_X, dest, 0);
2678     DEST_EA(env, insn, opsize, dest, &addr);
2679     tcg_temp_free(dest);
2680 }
2681 
2682 DISAS_INSN(move_to_ccr)
2683 {
2684     gen_move_to_sr(env, s, insn, true);
2685 }
2686 
2687 DISAS_INSN(not)
2688 {
2689     TCGv src1;
2690     TCGv dest;
2691     TCGv addr;
2692     int opsize;
2693 
2694     opsize = insn_opsize(insn);
2695     SRC_EA(env, src1, opsize, 1, &addr);
2696     dest = tcg_temp_new();
2697     tcg_gen_not_i32(dest, src1);
2698     DEST_EA(env, insn, opsize, dest, &addr);
2699     gen_logic_cc(s, dest, opsize);
2700 }
2701 
2702 DISAS_INSN(swap)
2703 {
2704     TCGv src1;
2705     TCGv src2;
2706     TCGv reg;
2707 
2708     src1 = tcg_temp_new();
2709     src2 = tcg_temp_new();
2710     reg = DREG(insn, 0);
2711     tcg_gen_shli_i32(src1, reg, 16);
2712     tcg_gen_shri_i32(src2, reg, 16);
2713     tcg_gen_or_i32(reg, src1, src2);
2714     tcg_temp_free(src2);
2715     tcg_temp_free(src1);
2716     gen_logic_cc(s, reg, OS_LONG);
2717 }
2718 
2719 DISAS_INSN(bkpt)
2720 {
2721     gen_exception(s, s->base.pc_next, EXCP_DEBUG);
2722 }
2723 
2724 DISAS_INSN(pea)
2725 {
2726     TCGv tmp;
2727 
2728     tmp = gen_lea(env, s, insn, OS_LONG);
2729     if (IS_NULL_QREG(tmp)) {
2730         gen_addr_fault(s);
2731         return;
2732     }
2733     gen_push(s, tmp);
2734 }
2735 
2736 DISAS_INSN(ext)
2737 {
2738     int op;
2739     TCGv reg;
2740     TCGv tmp;
2741 
2742     reg = DREG(insn, 0);
2743     op = (insn >> 6) & 7;
2744     tmp = tcg_temp_new();
2745     if (op == 3)
2746         tcg_gen_ext16s_i32(tmp, reg);
2747     else
2748         tcg_gen_ext8s_i32(tmp, reg);
2749     if (op == 2)
2750         gen_partset_reg(OS_WORD, reg, tmp);
2751     else
2752         tcg_gen_mov_i32(reg, tmp);
2753     gen_logic_cc(s, tmp, OS_LONG);
2754     tcg_temp_free(tmp);
2755 }
2756 
2757 DISAS_INSN(tst)
2758 {
2759     int opsize;
2760     TCGv tmp;
2761 
2762     opsize = insn_opsize(insn);
2763     SRC_EA(env, tmp, opsize, 1, NULL);
2764     gen_logic_cc(s, tmp, opsize);
2765 }
2766 
2767 DISAS_INSN(pulse)
2768 {
2769   /* Implemented as a NOP.  */
2770 }
2771 
2772 DISAS_INSN(illegal)
2773 {
2774     gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
2775 }
2776 
2777 /* ??? This should be atomic.  */
2778 DISAS_INSN(tas)
2779 {
2780     TCGv dest;
2781     TCGv src1;
2782     TCGv addr;
2783 
2784     dest = tcg_temp_new();
2785     SRC_EA(env, src1, OS_BYTE, 1, &addr);
2786     gen_logic_cc(s, src1, OS_BYTE);
2787     tcg_gen_ori_i32(dest, src1, 0x80);
2788     DEST_EA(env, insn, OS_BYTE, dest, &addr);
2789     tcg_temp_free(dest);
2790 }
2791 
2792 DISAS_INSN(mull)
2793 {
2794     uint16_t ext;
2795     TCGv src1;
2796     int sign;
2797 
2798     ext = read_im16(env, s);
2799 
2800     sign = ext & 0x800;
2801 
2802     if (ext & 0x400) {
2803         if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
2804             gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
2805             return;
2806         }
2807 
2808         SRC_EA(env, src1, OS_LONG, 0, NULL);
2809 
2810         if (sign) {
2811             tcg_gen_muls2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2812         } else {
2813             tcg_gen_mulu2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2814         }
2815         /* if Dl == Dh, 68040 returns low word */
2816         tcg_gen_mov_i32(DREG(ext, 0), QREG_CC_N);
2817         tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_Z);
2818         tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N);
2819 
2820         tcg_gen_movi_i32(QREG_CC_V, 0);
2821         tcg_gen_movi_i32(QREG_CC_C, 0);
2822 
2823         set_cc_op(s, CC_OP_FLAGS);
2824         return;
2825     }
2826     SRC_EA(env, src1, OS_LONG, 0, NULL);
2827     if (m68k_feature(s->env, M68K_FEATURE_M68000)) {
2828         tcg_gen_movi_i32(QREG_CC_C, 0);
2829         if (sign) {
2830             tcg_gen_muls2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2831             /* QREG_CC_V is -(QREG_CC_V != (QREG_CC_N >> 31)) */
2832             tcg_gen_sari_i32(QREG_CC_Z, QREG_CC_N, 31);
2833             tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_Z);
2834         } else {
2835             tcg_gen_mulu2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2836             /* QREG_CC_V is -(QREG_CC_V != 0), use QREG_CC_C as 0 */
2837             tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_C);
2838         }
2839         tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
2840         tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_N);
2841 
2842         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
2843 
2844         set_cc_op(s, CC_OP_FLAGS);
2845     } else {
2846         /* The upper 32 bits of the product are discarded, so
2847            muls.l and mulu.l are functionally equivalent.  */
2848         tcg_gen_mul_i32(DREG(ext, 12), src1, DREG(ext, 12));
2849         gen_logic_cc(s, DREG(ext, 12), OS_LONG);
2850     }
2851 }
2852 
2853 static void gen_link(DisasContext *s, uint16_t insn, int32_t offset)
2854 {
2855     TCGv reg;
2856     TCGv tmp;
2857 
2858     reg = AREG(insn, 0);
2859     tmp = tcg_temp_new();
2860     tcg_gen_subi_i32(tmp, QREG_SP, 4);
2861     gen_store(s, OS_LONG, tmp, reg, IS_USER(s));
2862     if ((insn & 7) != 7) {
2863         tcg_gen_mov_i32(reg, tmp);
2864     }
2865     tcg_gen_addi_i32(QREG_SP, tmp, offset);
2866     tcg_temp_free(tmp);
2867 }
2868 
2869 DISAS_INSN(link)
2870 {
2871     int16_t offset;
2872 
2873     offset = read_im16(env, s);
2874     gen_link(s, insn, offset);
2875 }
2876 
2877 DISAS_INSN(linkl)
2878 {
2879     int32_t offset;
2880 
2881     offset = read_im32(env, s);
2882     gen_link(s, insn, offset);
2883 }
2884 
2885 DISAS_INSN(unlk)
2886 {
2887     TCGv src;
2888     TCGv reg;
2889     TCGv tmp;
2890 
2891     src = tcg_temp_new();
2892     reg = AREG(insn, 0);
2893     tcg_gen_mov_i32(src, reg);
2894     tmp = gen_load(s, OS_LONG, src, 0, IS_USER(s));
2895     tcg_gen_mov_i32(reg, tmp);
2896     tcg_gen_addi_i32(QREG_SP, src, 4);
2897     tcg_temp_free(src);
2898     tcg_temp_free(tmp);
2899 }
2900 
2901 #if defined(CONFIG_SOFTMMU)
2902 DISAS_INSN(reset)
2903 {
2904     if (IS_USER(s)) {
2905         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
2906         return;
2907     }
2908 
2909     gen_helper_reset(cpu_env);
2910 }
2911 #endif
2912 
2913 DISAS_INSN(nop)
2914 {
2915 }
2916 
2917 DISAS_INSN(rtd)
2918 {
2919     TCGv tmp;
2920     int16_t offset = read_im16(env, s);
2921 
2922     tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
2923     tcg_gen_addi_i32(QREG_SP, QREG_SP, offset + 4);
2924     gen_jmp(s, tmp);
2925 }
2926 
2927 DISAS_INSN(rts)
2928 {
2929     TCGv tmp;
2930 
2931     tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
2932     tcg_gen_addi_i32(QREG_SP, QREG_SP, 4);
2933     gen_jmp(s, tmp);
2934 }
2935 
2936 DISAS_INSN(jump)
2937 {
2938     TCGv tmp;
2939 
2940     /* Load the target address first to ensure correct exception
2941        behavior.  */
2942     tmp = gen_lea(env, s, insn, OS_LONG);
2943     if (IS_NULL_QREG(tmp)) {
2944         gen_addr_fault(s);
2945         return;
2946     }
2947     if ((insn & 0x40) == 0) {
2948         /* jsr */
2949         gen_push(s, tcg_const_i32(s->pc));
2950     }
2951     gen_jmp(s, tmp);
2952 }
2953 
2954 DISAS_INSN(addsubq)
2955 {
2956     TCGv src;
2957     TCGv dest;
2958     TCGv val;
2959     int imm;
2960     TCGv addr;
2961     int opsize;
2962 
2963     if ((insn & 070) == 010) {
2964         /* Operation on address register is always long.  */
2965         opsize = OS_LONG;
2966     } else {
2967         opsize = insn_opsize(insn);
2968     }
2969     SRC_EA(env, src, opsize, 1, &addr);
2970     imm = (insn >> 9) & 7;
2971     if (imm == 0) {
2972         imm = 8;
2973     }
2974     val = tcg_const_i32(imm);
2975     dest = tcg_temp_new();
2976     tcg_gen_mov_i32(dest, src);
2977     if ((insn & 0x38) == 0x08) {
2978         /* Don't update condition codes if the destination is an
2979            address register.  */
2980         if (insn & 0x0100) {
2981             tcg_gen_sub_i32(dest, dest, val);
2982         } else {
2983             tcg_gen_add_i32(dest, dest, val);
2984         }
2985     } else {
2986         if (insn & 0x0100) {
2987             tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2988             tcg_gen_sub_i32(dest, dest, val);
2989             set_cc_op(s, CC_OP_SUBB + opsize);
2990         } else {
2991             tcg_gen_add_i32(dest, dest, val);
2992             tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2993             set_cc_op(s, CC_OP_ADDB + opsize);
2994         }
2995         gen_update_cc_add(dest, val, opsize);
2996     }
2997     tcg_temp_free(val);
2998     DEST_EA(env, insn, opsize, dest, &addr);
2999     tcg_temp_free(dest);
3000 }
3001 
3002 DISAS_INSN(tpf)
3003 {
3004     switch (insn & 7) {
3005     case 2: /* One extension word.  */
3006         s->pc += 2;
3007         break;
3008     case 3: /* Two extension words.  */
3009         s->pc += 4;
3010         break;
3011     case 4: /* No extension words.  */
3012         break;
3013     default:
3014         disas_undef(env, s, insn);
3015     }
3016 }
3017 
3018 DISAS_INSN(branch)
3019 {
3020     int32_t offset;
3021     uint32_t base;
3022     int op;
3023     TCGLabel *l1;
3024 
3025     base = s->pc;
3026     op = (insn >> 8) & 0xf;
3027     offset = (int8_t)insn;
3028     if (offset == 0) {
3029         offset = (int16_t)read_im16(env, s);
3030     } else if (offset == -1) {
3031         offset = read_im32(env, s);
3032     }
3033     if (op == 1) {
3034         /* bsr */
3035         gen_push(s, tcg_const_i32(s->pc));
3036     }
3037     if (op > 1) {
3038         /* Bcc */
3039         l1 = gen_new_label();
3040         gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1);
3041         gen_jmp_tb(s, 1, base + offset);
3042         gen_set_label(l1);
3043         gen_jmp_tb(s, 0, s->pc);
3044     } else {
3045         /* Unconditional branch.  */
3046         update_cc_op(s);
3047         gen_jmp_tb(s, 0, base + offset);
3048     }
3049 }
3050 
3051 DISAS_INSN(moveq)
3052 {
3053     tcg_gen_movi_i32(DREG(insn, 9), (int8_t)insn);
3054     gen_logic_cc(s, DREG(insn, 9), OS_LONG);
3055 }
3056 
3057 DISAS_INSN(mvzs)
3058 {
3059     int opsize;
3060     TCGv src;
3061     TCGv reg;
3062 
3063     if (insn & 0x40)
3064         opsize = OS_WORD;
3065     else
3066         opsize = OS_BYTE;
3067     SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL);
3068     reg = DREG(insn, 9);
3069     tcg_gen_mov_i32(reg, src);
3070     gen_logic_cc(s, src, opsize);
3071 }
3072 
3073 DISAS_INSN(or)
3074 {
3075     TCGv reg;
3076     TCGv dest;
3077     TCGv src;
3078     TCGv addr;
3079     int opsize;
3080 
3081     opsize = insn_opsize(insn);
3082     reg = gen_extend(s, DREG(insn, 9), opsize, 0);
3083     dest = tcg_temp_new();
3084     if (insn & 0x100) {
3085         SRC_EA(env, src, opsize, 0, &addr);
3086         tcg_gen_or_i32(dest, src, reg);
3087         DEST_EA(env, insn, opsize, dest, &addr);
3088     } else {
3089         SRC_EA(env, src, opsize, 0, NULL);
3090         tcg_gen_or_i32(dest, src, reg);
3091         gen_partset_reg(opsize, DREG(insn, 9), dest);
3092     }
3093     gen_logic_cc(s, dest, opsize);
3094     tcg_temp_free(dest);
3095 }
3096 
3097 DISAS_INSN(suba)
3098 {
3099     TCGv src;
3100     TCGv reg;
3101 
3102     SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
3103     reg = AREG(insn, 9);
3104     tcg_gen_sub_i32(reg, reg, src);
3105 }
3106 
3107 static inline void gen_subx(DisasContext *s, TCGv src, TCGv dest, int opsize)
3108 {
3109     TCGv tmp;
3110 
3111     gen_flush_flags(s); /* compute old Z */
3112 
3113     /* Perform substract with borrow.
3114      * (X, N) = dest - (src + X);
3115      */
3116 
3117     tmp = tcg_const_i32(0);
3118     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, tmp, QREG_CC_X, tmp);
3119     tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, dest, tmp, QREG_CC_N, QREG_CC_X);
3120     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3121     tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
3122 
3123     /* Compute signed-overflow for substract.  */
3124 
3125     tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, dest);
3126     tcg_gen_xor_i32(tmp, dest, src);
3127     tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, tmp);
3128     tcg_temp_free(tmp);
3129 
3130     /* Copy the rest of the results into place.  */
3131     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
3132     tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
3133 
3134     set_cc_op(s, CC_OP_FLAGS);
3135 
3136     /* result is in QREG_CC_N */
3137 }
3138 
3139 DISAS_INSN(subx_reg)
3140 {
3141     TCGv dest;
3142     TCGv src;
3143     int opsize;
3144 
3145     opsize = insn_opsize(insn);
3146 
3147     src = gen_extend(s, DREG(insn, 0), opsize, 1);
3148     dest = gen_extend(s, DREG(insn, 9), opsize, 1);
3149 
3150     gen_subx(s, src, dest, opsize);
3151 
3152     gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3153 }
3154 
3155 DISAS_INSN(subx_mem)
3156 {
3157     TCGv src;
3158     TCGv addr_src;
3159     TCGv dest;
3160     TCGv addr_dest;
3161     int opsize;
3162 
3163     opsize = insn_opsize(insn);
3164 
3165     addr_src = AREG(insn, 0);
3166     tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
3167     src = gen_load(s, opsize, addr_src, 1, IS_USER(s));
3168 
3169     addr_dest = AREG(insn, 9);
3170     tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
3171     dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s));
3172 
3173     gen_subx(s, src, dest, opsize);
3174 
3175     gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s));
3176 
3177     tcg_temp_free(dest);
3178     tcg_temp_free(src);
3179 }
3180 
3181 DISAS_INSN(mov3q)
3182 {
3183     TCGv src;
3184     int val;
3185 
3186     val = (insn >> 9) & 7;
3187     if (val == 0)
3188         val = -1;
3189     src = tcg_const_i32(val);
3190     gen_logic_cc(s, src, OS_LONG);
3191     DEST_EA(env, insn, OS_LONG, src, NULL);
3192     tcg_temp_free(src);
3193 }
3194 
3195 DISAS_INSN(cmp)
3196 {
3197     TCGv src;
3198     TCGv reg;
3199     int opsize;
3200 
3201     opsize = insn_opsize(insn);
3202     SRC_EA(env, src, opsize, 1, NULL);
3203     reg = gen_extend(s, DREG(insn, 9), opsize, 1);
3204     gen_update_cc_cmp(s, reg, src, opsize);
3205 }
3206 
3207 DISAS_INSN(cmpa)
3208 {
3209     int opsize;
3210     TCGv src;
3211     TCGv reg;
3212 
3213     if (insn & 0x100) {
3214         opsize = OS_LONG;
3215     } else {
3216         opsize = OS_WORD;
3217     }
3218     SRC_EA(env, src, opsize, 1, NULL);
3219     reg = AREG(insn, 9);
3220     gen_update_cc_cmp(s, reg, src, OS_LONG);
3221 }
3222 
3223 DISAS_INSN(cmpm)
3224 {
3225     int opsize = insn_opsize(insn);
3226     TCGv src, dst;
3227 
3228     /* Post-increment load (mode 3) from Ay.  */
3229     src = gen_ea_mode(env, s, 3, REG(insn, 0), opsize,
3230                       NULL_QREG, NULL, EA_LOADS, IS_USER(s));
3231     /* Post-increment load (mode 3) from Ax.  */
3232     dst = gen_ea_mode(env, s, 3, REG(insn, 9), opsize,
3233                       NULL_QREG, NULL, EA_LOADS, IS_USER(s));
3234 
3235     gen_update_cc_cmp(s, dst, src, opsize);
3236 }
3237 
3238 DISAS_INSN(eor)
3239 {
3240     TCGv src;
3241     TCGv dest;
3242     TCGv addr;
3243     int opsize;
3244 
3245     opsize = insn_opsize(insn);
3246 
3247     SRC_EA(env, src, opsize, 0, &addr);
3248     dest = tcg_temp_new();
3249     tcg_gen_xor_i32(dest, src, DREG(insn, 9));
3250     gen_logic_cc(s, dest, opsize);
3251     DEST_EA(env, insn, opsize, dest, &addr);
3252     tcg_temp_free(dest);
3253 }
3254 
3255 static void do_exg(TCGv reg1, TCGv reg2)
3256 {
3257     TCGv temp = tcg_temp_new();
3258     tcg_gen_mov_i32(temp, reg1);
3259     tcg_gen_mov_i32(reg1, reg2);
3260     tcg_gen_mov_i32(reg2, temp);
3261     tcg_temp_free(temp);
3262 }
3263 
3264 DISAS_INSN(exg_dd)
3265 {
3266     /* exchange Dx and Dy */
3267     do_exg(DREG(insn, 9), DREG(insn, 0));
3268 }
3269 
3270 DISAS_INSN(exg_aa)
3271 {
3272     /* exchange Ax and Ay */
3273     do_exg(AREG(insn, 9), AREG(insn, 0));
3274 }
3275 
3276 DISAS_INSN(exg_da)
3277 {
3278     /* exchange Dx and Ay */
3279     do_exg(DREG(insn, 9), AREG(insn, 0));
3280 }
3281 
3282 DISAS_INSN(and)
3283 {
3284     TCGv src;
3285     TCGv reg;
3286     TCGv dest;
3287     TCGv addr;
3288     int opsize;
3289 
3290     dest = tcg_temp_new();
3291 
3292     opsize = insn_opsize(insn);
3293     reg = DREG(insn, 9);
3294     if (insn & 0x100) {
3295         SRC_EA(env, src, opsize, 0, &addr);
3296         tcg_gen_and_i32(dest, src, reg);
3297         DEST_EA(env, insn, opsize, dest, &addr);
3298     } else {
3299         SRC_EA(env, src, opsize, 0, NULL);
3300         tcg_gen_and_i32(dest, src, reg);
3301         gen_partset_reg(opsize, reg, dest);
3302     }
3303     gen_logic_cc(s, dest, opsize);
3304     tcg_temp_free(dest);
3305 }
3306 
3307 DISAS_INSN(adda)
3308 {
3309     TCGv src;
3310     TCGv reg;
3311 
3312     SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
3313     reg = AREG(insn, 9);
3314     tcg_gen_add_i32(reg, reg, src);
3315 }
3316 
3317 static inline void gen_addx(DisasContext *s, TCGv src, TCGv dest, int opsize)
3318 {
3319     TCGv tmp;
3320 
3321     gen_flush_flags(s); /* compute old Z */
3322 
3323     /* Perform addition with carry.
3324      * (X, N) = src + dest + X;
3325      */
3326 
3327     tmp = tcg_const_i32(0);
3328     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_X, tmp, dest, tmp);
3329     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_N, QREG_CC_X, src, tmp);
3330     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3331 
3332     /* Compute signed-overflow for addition.  */
3333 
3334     tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3335     tcg_gen_xor_i32(tmp, dest, src);
3336     tcg_gen_andc_i32(QREG_CC_V, QREG_CC_V, tmp);
3337     tcg_temp_free(tmp);
3338 
3339     /* Copy the rest of the results into place.  */
3340     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
3341     tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
3342 
3343     set_cc_op(s, CC_OP_FLAGS);
3344 
3345     /* result is in QREG_CC_N */
3346 }
3347 
3348 DISAS_INSN(addx_reg)
3349 {
3350     TCGv dest;
3351     TCGv src;
3352     int opsize;
3353 
3354     opsize = insn_opsize(insn);
3355 
3356     dest = gen_extend(s, DREG(insn, 9), opsize, 1);
3357     src = gen_extend(s, DREG(insn, 0), opsize, 1);
3358 
3359     gen_addx(s, src, dest, opsize);
3360 
3361     gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3362 }
3363 
3364 DISAS_INSN(addx_mem)
3365 {
3366     TCGv src;
3367     TCGv addr_src;
3368     TCGv dest;
3369     TCGv addr_dest;
3370     int opsize;
3371 
3372     opsize = insn_opsize(insn);
3373 
3374     addr_src = AREG(insn, 0);
3375     tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
3376     src = gen_load(s, opsize, addr_src, 1, IS_USER(s));
3377 
3378     addr_dest = AREG(insn, 9);
3379     tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
3380     dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s));
3381 
3382     gen_addx(s, src, dest, opsize);
3383 
3384     gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s));
3385 
3386     tcg_temp_free(dest);
3387     tcg_temp_free(src);
3388 }
3389 
3390 static inline void shift_im(DisasContext *s, uint16_t insn, int opsize)
3391 {
3392     int count = (insn >> 9) & 7;
3393     int logical = insn & 8;
3394     int left = insn & 0x100;
3395     int bits = opsize_bytes(opsize) * 8;
3396     TCGv reg = gen_extend(s, DREG(insn, 0), opsize, !logical);
3397 
3398     if (count == 0) {
3399         count = 8;
3400     }
3401 
3402     tcg_gen_movi_i32(QREG_CC_V, 0);
3403     if (left) {
3404         tcg_gen_shri_i32(QREG_CC_C, reg, bits - count);
3405         tcg_gen_shli_i32(QREG_CC_N, reg, count);
3406 
3407         /* Note that ColdFire always clears V (done above),
3408            while M68000 sets if the most significant bit is changed at
3409            any time during the shift operation */
3410         if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
3411             /* if shift count >= bits, V is (reg != 0) */
3412             if (count >= bits) {
3413                 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, reg, QREG_CC_V);
3414             } else {
3415                 TCGv t0 = tcg_temp_new();
3416                 tcg_gen_sari_i32(QREG_CC_V, reg, bits - 1);
3417                 tcg_gen_sari_i32(t0, reg, bits - count - 1);
3418                 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, t0);
3419                 tcg_temp_free(t0);
3420             }
3421             tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
3422         }
3423     } else {
3424         tcg_gen_shri_i32(QREG_CC_C, reg, count - 1);
3425         if (logical) {
3426             tcg_gen_shri_i32(QREG_CC_N, reg, count);
3427         } else {
3428             tcg_gen_sari_i32(QREG_CC_N, reg, count);
3429         }
3430     }
3431 
3432     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3433     tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3434     tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3435     tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3436 
3437     gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3438     set_cc_op(s, CC_OP_FLAGS);
3439 }
3440 
3441 static inline void shift_reg(DisasContext *s, uint16_t insn, int opsize)
3442 {
3443     int logical = insn & 8;
3444     int left = insn & 0x100;
3445     int bits = opsize_bytes(opsize) * 8;
3446     TCGv reg = gen_extend(s, DREG(insn, 0), opsize, !logical);
3447     TCGv s32;
3448     TCGv_i64 t64, s64;
3449 
3450     t64 = tcg_temp_new_i64();
3451     s64 = tcg_temp_new_i64();
3452     s32 = tcg_temp_new();
3453 
3454     /* Note that m68k truncates the shift count modulo 64, not 32.
3455        In addition, a 64-bit shift makes it easy to find "the last
3456        bit shifted out", for the carry flag.  */
3457     tcg_gen_andi_i32(s32, DREG(insn, 9), 63);
3458     tcg_gen_extu_i32_i64(s64, s32);
3459     tcg_gen_extu_i32_i64(t64, reg);
3460 
3461     /* Optimistically set V=0.  Also used as a zero source below.  */
3462     tcg_gen_movi_i32(QREG_CC_V, 0);
3463     if (left) {
3464         tcg_gen_shl_i64(t64, t64, s64);
3465 
3466         if (opsize == OS_LONG) {
3467             tcg_gen_extr_i64_i32(QREG_CC_N, QREG_CC_C, t64);
3468             /* Note that C=0 if shift count is 0, and we get that for free.  */
3469         } else {
3470             TCGv zero = tcg_const_i32(0);
3471             tcg_gen_extrl_i64_i32(QREG_CC_N, t64);
3472             tcg_gen_shri_i32(QREG_CC_C, QREG_CC_N, bits);
3473             tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3474                                 s32, zero, zero, QREG_CC_C);
3475             tcg_temp_free(zero);
3476         }
3477         tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3478 
3479         /* X = C, but only if the shift count was non-zero.  */
3480         tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3481                             QREG_CC_C, QREG_CC_X);
3482 
3483         /* M68000 sets V if the most significant bit is changed at
3484          * any time during the shift operation.  Do this via creating
3485          * an extension of the sign bit, comparing, and discarding
3486          * the bits below the sign bit.  I.e.
3487          *     int64_t s = (intN_t)reg;
3488          *     int64_t t = (int64_t)(intN_t)reg << count;
3489          *     V = ((s ^ t) & (-1 << (bits - 1))) != 0
3490          */
3491         if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
3492             TCGv_i64 tt = tcg_const_i64(32);
3493             /* if shift is greater than 32, use 32 */
3494             tcg_gen_movcond_i64(TCG_COND_GT, s64, s64, tt, tt, s64);
3495             tcg_temp_free_i64(tt);
3496             /* Sign extend the input to 64 bits; re-do the shift.  */
3497             tcg_gen_ext_i32_i64(t64, reg);
3498             tcg_gen_shl_i64(s64, t64, s64);
3499             /* Clear all bits that are unchanged.  */
3500             tcg_gen_xor_i64(t64, t64, s64);
3501             /* Ignore the bits below the sign bit.  */
3502             tcg_gen_andi_i64(t64, t64, -1ULL << (bits - 1));
3503             /* If any bits remain set, we have overflow.  */
3504             tcg_gen_setcondi_i64(TCG_COND_NE, t64, t64, 0);
3505             tcg_gen_extrl_i64_i32(QREG_CC_V, t64);
3506             tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
3507         }
3508     } else {
3509         tcg_gen_shli_i64(t64, t64, 32);
3510         if (logical) {
3511             tcg_gen_shr_i64(t64, t64, s64);
3512         } else {
3513             tcg_gen_sar_i64(t64, t64, s64);
3514         }
3515         tcg_gen_extr_i64_i32(QREG_CC_C, QREG_CC_N, t64);
3516 
3517         /* Note that C=0 if shift count is 0, and we get that for free.  */
3518         tcg_gen_shri_i32(QREG_CC_C, QREG_CC_C, 31);
3519 
3520         /* X = C, but only if the shift count was non-zero.  */
3521         tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3522                             QREG_CC_C, QREG_CC_X);
3523     }
3524     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3525     tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3526 
3527     tcg_temp_free(s32);
3528     tcg_temp_free_i64(s64);
3529     tcg_temp_free_i64(t64);
3530 
3531     /* Write back the result.  */
3532     gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3533     set_cc_op(s, CC_OP_FLAGS);
3534 }
3535 
3536 DISAS_INSN(shift8_im)
3537 {
3538     shift_im(s, insn, OS_BYTE);
3539 }
3540 
3541 DISAS_INSN(shift16_im)
3542 {
3543     shift_im(s, insn, OS_WORD);
3544 }
3545 
3546 DISAS_INSN(shift_im)
3547 {
3548     shift_im(s, insn, OS_LONG);
3549 }
3550 
3551 DISAS_INSN(shift8_reg)
3552 {
3553     shift_reg(s, insn, OS_BYTE);
3554 }
3555 
3556 DISAS_INSN(shift16_reg)
3557 {
3558     shift_reg(s, insn, OS_WORD);
3559 }
3560 
3561 DISAS_INSN(shift_reg)
3562 {
3563     shift_reg(s, insn, OS_LONG);
3564 }
3565 
3566 DISAS_INSN(shift_mem)
3567 {
3568     int logical = insn & 8;
3569     int left = insn & 0x100;
3570     TCGv src;
3571     TCGv addr;
3572 
3573     SRC_EA(env, src, OS_WORD, !logical, &addr);
3574     tcg_gen_movi_i32(QREG_CC_V, 0);
3575     if (left) {
3576         tcg_gen_shri_i32(QREG_CC_C, src, 15);
3577         tcg_gen_shli_i32(QREG_CC_N, src, 1);
3578 
3579         /* Note that ColdFire always clears V,
3580            while M68000 sets if the most significant bit is changed at
3581            any time during the shift operation */
3582         if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
3583             src = gen_extend(s, src, OS_WORD, 1);
3584             tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3585         }
3586     } else {
3587         tcg_gen_mov_i32(QREG_CC_C, src);
3588         if (logical) {
3589             tcg_gen_shri_i32(QREG_CC_N, src, 1);
3590         } else {
3591             tcg_gen_sari_i32(QREG_CC_N, src, 1);
3592         }
3593     }
3594 
3595     gen_ext(QREG_CC_N, QREG_CC_N, OS_WORD, 1);
3596     tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3597     tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3598     tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3599 
3600     DEST_EA(env, insn, OS_WORD, QREG_CC_N, &addr);
3601     set_cc_op(s, CC_OP_FLAGS);
3602 }
3603 
3604 static void rotate(TCGv reg, TCGv shift, int left, int size)
3605 {
3606     switch (size) {
3607     case 8:
3608         /* Replicate the 8-bit input so that a 32-bit rotate works.  */
3609         tcg_gen_ext8u_i32(reg, reg);
3610         tcg_gen_muli_i32(reg, reg, 0x01010101);
3611         goto do_long;
3612     case 16:
3613         /* Replicate the 16-bit input so that a 32-bit rotate works.  */
3614         tcg_gen_deposit_i32(reg, reg, reg, 16, 16);
3615         goto do_long;
3616     do_long:
3617     default:
3618         if (left) {
3619             tcg_gen_rotl_i32(reg, reg, shift);
3620         } else {
3621             tcg_gen_rotr_i32(reg, reg, shift);
3622         }
3623     }
3624 
3625     /* compute flags */
3626 
3627     switch (size) {
3628     case 8:
3629         tcg_gen_ext8s_i32(reg, reg);
3630         break;
3631     case 16:
3632         tcg_gen_ext16s_i32(reg, reg);
3633         break;
3634     default:
3635         break;
3636     }
3637 
3638     /* QREG_CC_X is not affected */
3639 
3640     tcg_gen_mov_i32(QREG_CC_N, reg);
3641     tcg_gen_mov_i32(QREG_CC_Z, reg);
3642 
3643     if (left) {
3644         tcg_gen_andi_i32(QREG_CC_C, reg, 1);
3645     } else {
3646         tcg_gen_shri_i32(QREG_CC_C, reg, 31);
3647     }
3648 
3649     tcg_gen_movi_i32(QREG_CC_V, 0); /* always cleared */
3650 }
3651 
3652 static void rotate_x_flags(TCGv reg, TCGv X, int size)
3653 {
3654     switch (size) {
3655     case 8:
3656         tcg_gen_ext8s_i32(reg, reg);
3657         break;
3658     case 16:
3659         tcg_gen_ext16s_i32(reg, reg);
3660         break;
3661     default:
3662         break;
3663     }
3664     tcg_gen_mov_i32(QREG_CC_N, reg);
3665     tcg_gen_mov_i32(QREG_CC_Z, reg);
3666     tcg_gen_mov_i32(QREG_CC_X, X);
3667     tcg_gen_mov_i32(QREG_CC_C, X);
3668     tcg_gen_movi_i32(QREG_CC_V, 0);
3669 }
3670 
3671 /* Result of rotate_x() is valid if 0 <= shift <= size */
3672 static TCGv rotate_x(TCGv reg, TCGv shift, int left, int size)
3673 {
3674     TCGv X, shl, shr, shx, sz, zero;
3675 
3676     sz = tcg_const_i32(size);
3677 
3678     shr = tcg_temp_new();
3679     shl = tcg_temp_new();
3680     shx = tcg_temp_new();
3681     if (left) {
3682         tcg_gen_mov_i32(shl, shift);      /* shl = shift */
3683         tcg_gen_movi_i32(shr, size + 1);
3684         tcg_gen_sub_i32(shr, shr, shift); /* shr = size + 1 - shift */
3685         tcg_gen_subi_i32(shx, shift, 1);  /* shx = shift - 1 */
3686         /* shx = shx < 0 ? size : shx; */
3687         zero = tcg_const_i32(0);
3688         tcg_gen_movcond_i32(TCG_COND_LT, shx, shx, zero, sz, shx);
3689         tcg_temp_free(zero);
3690     } else {
3691         tcg_gen_mov_i32(shr, shift);      /* shr = shift */
3692         tcg_gen_movi_i32(shl, size + 1);
3693         tcg_gen_sub_i32(shl, shl, shift); /* shl = size + 1 - shift */
3694         tcg_gen_sub_i32(shx, sz, shift); /* shx = size - shift */
3695     }
3696 
3697     /* reg = (reg << shl) | (reg >> shr) | (x << shx); */
3698 
3699     tcg_gen_shl_i32(shl, reg, shl);
3700     tcg_gen_shr_i32(shr, reg, shr);
3701     tcg_gen_or_i32(reg, shl, shr);
3702     tcg_temp_free(shl);
3703     tcg_temp_free(shr);
3704     tcg_gen_shl_i32(shx, QREG_CC_X, shx);
3705     tcg_gen_or_i32(reg, reg, shx);
3706     tcg_temp_free(shx);
3707 
3708     /* X = (reg >> size) & 1 */
3709 
3710     X = tcg_temp_new();
3711     tcg_gen_shr_i32(X, reg, sz);
3712     tcg_gen_andi_i32(X, X, 1);
3713     tcg_temp_free(sz);
3714 
3715     return X;
3716 }
3717 
3718 /* Result of rotate32_x() is valid if 0 <= shift < 33 */
3719 static TCGv rotate32_x(TCGv reg, TCGv shift, int left)
3720 {
3721     TCGv_i64 t0, shift64;
3722     TCGv X, lo, hi, zero;
3723 
3724     shift64 = tcg_temp_new_i64();
3725     tcg_gen_extu_i32_i64(shift64, shift);
3726 
3727     t0 = tcg_temp_new_i64();
3728 
3729     X = tcg_temp_new();
3730     lo = tcg_temp_new();
3731     hi = tcg_temp_new();
3732 
3733     if (left) {
3734         /* create [reg:X:..] */
3735 
3736         tcg_gen_shli_i32(lo, QREG_CC_X, 31);
3737         tcg_gen_concat_i32_i64(t0, lo, reg);
3738 
3739         /* rotate */
3740 
3741         tcg_gen_rotl_i64(t0, t0, shift64);
3742         tcg_temp_free_i64(shift64);
3743 
3744         /* result is [reg:..:reg:X] */
3745 
3746         tcg_gen_extr_i64_i32(lo, hi, t0);
3747         tcg_gen_andi_i32(X, lo, 1);
3748 
3749         tcg_gen_shri_i32(lo, lo, 1);
3750     } else {
3751         /* create [..:X:reg] */
3752 
3753         tcg_gen_concat_i32_i64(t0, reg, QREG_CC_X);
3754 
3755         tcg_gen_rotr_i64(t0, t0, shift64);
3756         tcg_temp_free_i64(shift64);
3757 
3758         /* result is value: [X:reg:..:reg] */
3759 
3760         tcg_gen_extr_i64_i32(lo, hi, t0);
3761 
3762         /* extract X */
3763 
3764         tcg_gen_shri_i32(X, hi, 31);
3765 
3766         /* extract result */
3767 
3768         tcg_gen_shli_i32(hi, hi, 1);
3769     }
3770     tcg_temp_free_i64(t0);
3771     tcg_gen_or_i32(lo, lo, hi);
3772     tcg_temp_free(hi);
3773 
3774     /* if shift == 0, register and X are not affected */
3775 
3776     zero = tcg_const_i32(0);
3777     tcg_gen_movcond_i32(TCG_COND_EQ, X, shift, zero, QREG_CC_X, X);
3778     tcg_gen_movcond_i32(TCG_COND_EQ, reg, shift, zero, reg, lo);
3779     tcg_temp_free(zero);
3780     tcg_temp_free(lo);
3781 
3782     return X;
3783 }
3784 
3785 DISAS_INSN(rotate_im)
3786 {
3787     TCGv shift;
3788     int tmp;
3789     int left = (insn & 0x100);
3790 
3791     tmp = (insn >> 9) & 7;
3792     if (tmp == 0) {
3793         tmp = 8;
3794     }
3795 
3796     shift = tcg_const_i32(tmp);
3797     if (insn & 8) {
3798         rotate(DREG(insn, 0), shift, left, 32);
3799     } else {
3800         TCGv X = rotate32_x(DREG(insn, 0), shift, left);
3801         rotate_x_flags(DREG(insn, 0), X, 32);
3802         tcg_temp_free(X);
3803     }
3804     tcg_temp_free(shift);
3805 
3806     set_cc_op(s, CC_OP_FLAGS);
3807 }
3808 
3809 DISAS_INSN(rotate8_im)
3810 {
3811     int left = (insn & 0x100);
3812     TCGv reg;
3813     TCGv shift;
3814     int tmp;
3815 
3816     reg = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
3817 
3818     tmp = (insn >> 9) & 7;
3819     if (tmp == 0) {
3820         tmp = 8;
3821     }
3822 
3823     shift = tcg_const_i32(tmp);
3824     if (insn & 8) {
3825         rotate(reg, shift, left, 8);
3826     } else {
3827         TCGv X = rotate_x(reg, shift, left, 8);
3828         rotate_x_flags(reg, X, 8);
3829         tcg_temp_free(X);
3830     }
3831     tcg_temp_free(shift);
3832     gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3833     set_cc_op(s, CC_OP_FLAGS);
3834 }
3835 
3836 DISAS_INSN(rotate16_im)
3837 {
3838     int left = (insn & 0x100);
3839     TCGv reg;
3840     TCGv shift;
3841     int tmp;
3842 
3843     reg = gen_extend(s, DREG(insn, 0), OS_WORD, 0);
3844     tmp = (insn >> 9) & 7;
3845     if (tmp == 0) {
3846         tmp = 8;
3847     }
3848 
3849     shift = tcg_const_i32(tmp);
3850     if (insn & 8) {
3851         rotate(reg, shift, left, 16);
3852     } else {
3853         TCGv X = rotate_x(reg, shift, left, 16);
3854         rotate_x_flags(reg, X, 16);
3855         tcg_temp_free(X);
3856     }
3857     tcg_temp_free(shift);
3858     gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3859     set_cc_op(s, CC_OP_FLAGS);
3860 }
3861 
3862 DISAS_INSN(rotate_reg)
3863 {
3864     TCGv reg;
3865     TCGv src;
3866     TCGv t0, t1;
3867     int left = (insn & 0x100);
3868 
3869     reg = DREG(insn, 0);
3870     src = DREG(insn, 9);
3871     /* shift in [0..63] */
3872     t0 = tcg_temp_new();
3873     tcg_gen_andi_i32(t0, src, 63);
3874     t1 = tcg_temp_new_i32();
3875     if (insn & 8) {
3876         tcg_gen_andi_i32(t1, src, 31);
3877         rotate(reg, t1, left, 32);
3878         /* if shift == 0, clear C */
3879         tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3880                             t0, QREG_CC_V /* 0 */,
3881                             QREG_CC_V /* 0 */, QREG_CC_C);
3882     } else {
3883         TCGv X;
3884         /* modulo 33 */
3885         tcg_gen_movi_i32(t1, 33);
3886         tcg_gen_remu_i32(t1, t0, t1);
3887         X = rotate32_x(DREG(insn, 0), t1, left);
3888         rotate_x_flags(DREG(insn, 0), X, 32);
3889         tcg_temp_free(X);
3890     }
3891     tcg_temp_free(t1);
3892     tcg_temp_free(t0);
3893     set_cc_op(s, CC_OP_FLAGS);
3894 }
3895 
3896 DISAS_INSN(rotate8_reg)
3897 {
3898     TCGv reg;
3899     TCGv src;
3900     TCGv t0, t1;
3901     int left = (insn & 0x100);
3902 
3903     reg = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
3904     src = DREG(insn, 9);
3905     /* shift in [0..63] */
3906     t0 = tcg_temp_new_i32();
3907     tcg_gen_andi_i32(t0, src, 63);
3908     t1 = tcg_temp_new_i32();
3909     if (insn & 8) {
3910         tcg_gen_andi_i32(t1, src, 7);
3911         rotate(reg, t1, left, 8);
3912         /* if shift == 0, clear C */
3913         tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3914                             t0, QREG_CC_V /* 0 */,
3915                             QREG_CC_V /* 0 */, QREG_CC_C);
3916     } else {
3917         TCGv X;
3918         /* modulo 9 */
3919         tcg_gen_movi_i32(t1, 9);
3920         tcg_gen_remu_i32(t1, t0, t1);
3921         X = rotate_x(reg, t1, left, 8);
3922         rotate_x_flags(reg, X, 8);
3923         tcg_temp_free(X);
3924     }
3925     tcg_temp_free(t1);
3926     tcg_temp_free(t0);
3927     gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3928     set_cc_op(s, CC_OP_FLAGS);
3929 }
3930 
3931 DISAS_INSN(rotate16_reg)
3932 {
3933     TCGv reg;
3934     TCGv src;
3935     TCGv t0, t1;
3936     int left = (insn & 0x100);
3937 
3938     reg = gen_extend(s, DREG(insn, 0), OS_WORD, 0);
3939     src = DREG(insn, 9);
3940     /* shift in [0..63] */
3941     t0 = tcg_temp_new_i32();
3942     tcg_gen_andi_i32(t0, src, 63);
3943     t1 = tcg_temp_new_i32();
3944     if (insn & 8) {
3945         tcg_gen_andi_i32(t1, src, 15);
3946         rotate(reg, t1, left, 16);
3947         /* if shift == 0, clear C */
3948         tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3949                             t0, QREG_CC_V /* 0 */,
3950                             QREG_CC_V /* 0 */, QREG_CC_C);
3951     } else {
3952         TCGv X;
3953         /* modulo 17 */
3954         tcg_gen_movi_i32(t1, 17);
3955         tcg_gen_remu_i32(t1, t0, t1);
3956         X = rotate_x(reg, t1, left, 16);
3957         rotate_x_flags(reg, X, 16);
3958         tcg_temp_free(X);
3959     }
3960     tcg_temp_free(t1);
3961     tcg_temp_free(t0);
3962     gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3963     set_cc_op(s, CC_OP_FLAGS);
3964 }
3965 
3966 DISAS_INSN(rotate_mem)
3967 {
3968     TCGv src;
3969     TCGv addr;
3970     TCGv shift;
3971     int left = (insn & 0x100);
3972 
3973     SRC_EA(env, src, OS_WORD, 0, &addr);
3974 
3975     shift = tcg_const_i32(1);
3976     if (insn & 0x0200) {
3977         rotate(src, shift, left, 16);
3978     } else {
3979         TCGv X = rotate_x(src, shift, left, 16);
3980         rotate_x_flags(src, X, 16);
3981         tcg_temp_free(X);
3982     }
3983     tcg_temp_free(shift);
3984     DEST_EA(env, insn, OS_WORD, src, &addr);
3985     set_cc_op(s, CC_OP_FLAGS);
3986 }
3987 
3988 DISAS_INSN(bfext_reg)
3989 {
3990     int ext = read_im16(env, s);
3991     int is_sign = insn & 0x200;
3992     TCGv src = DREG(insn, 0);
3993     TCGv dst = DREG(ext, 12);
3994     int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
3995     int ofs = extract32(ext, 6, 5);  /* big bit-endian */
3996     int pos = 32 - ofs - len;        /* little bit-endian */
3997     TCGv tmp = tcg_temp_new();
3998     TCGv shift;
3999 
4000     /* In general, we're going to rotate the field so that it's at the
4001        top of the word and then right-shift by the complement of the
4002        width to extend the field.  */
4003     if (ext & 0x20) {
4004         /* Variable width.  */
4005         if (ext & 0x800) {
4006             /* Variable offset.  */
4007             tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
4008             tcg_gen_rotl_i32(tmp, src, tmp);
4009         } else {
4010             tcg_gen_rotli_i32(tmp, src, ofs);
4011         }
4012 
4013         shift = tcg_temp_new();
4014         tcg_gen_neg_i32(shift, DREG(ext, 0));
4015         tcg_gen_andi_i32(shift, shift, 31);
4016         tcg_gen_sar_i32(QREG_CC_N, tmp, shift);
4017         if (is_sign) {
4018             tcg_gen_mov_i32(dst, QREG_CC_N);
4019         } else {
4020             tcg_gen_shr_i32(dst, tmp, shift);
4021         }
4022         tcg_temp_free(shift);
4023     } else {
4024         /* Immediate width.  */
4025         if (ext & 0x800) {
4026             /* Variable offset */
4027             tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
4028             tcg_gen_rotl_i32(tmp, src, tmp);
4029             src = tmp;
4030             pos = 32 - len;
4031         } else {
4032             /* Immediate offset.  If the field doesn't wrap around the
4033                end of the word, rely on (s)extract completely.  */
4034             if (pos < 0) {
4035                 tcg_gen_rotli_i32(tmp, src, ofs);
4036                 src = tmp;
4037                 pos = 32 - len;
4038             }
4039         }
4040 
4041         tcg_gen_sextract_i32(QREG_CC_N, src, pos, len);
4042         if (is_sign) {
4043             tcg_gen_mov_i32(dst, QREG_CC_N);
4044         } else {
4045             tcg_gen_extract_i32(dst, src, pos, len);
4046         }
4047     }
4048 
4049     tcg_temp_free(tmp);
4050     set_cc_op(s, CC_OP_LOGIC);
4051 }
4052 
4053 DISAS_INSN(bfext_mem)
4054 {
4055     int ext = read_im16(env, s);
4056     int is_sign = insn & 0x200;
4057     TCGv dest = DREG(ext, 12);
4058     TCGv addr, len, ofs;
4059 
4060     addr = gen_lea(env, s, insn, OS_UNSIZED);
4061     if (IS_NULL_QREG(addr)) {
4062         gen_addr_fault(s);
4063         return;
4064     }
4065 
4066     if (ext & 0x20) {
4067         len = DREG(ext, 0);
4068     } else {
4069         len = tcg_const_i32(extract32(ext, 0, 5));
4070     }
4071     if (ext & 0x800) {
4072         ofs = DREG(ext, 6);
4073     } else {
4074         ofs = tcg_const_i32(extract32(ext, 6, 5));
4075     }
4076 
4077     if (is_sign) {
4078         gen_helper_bfexts_mem(dest, cpu_env, addr, ofs, len);
4079         tcg_gen_mov_i32(QREG_CC_N, dest);
4080     } else {
4081         TCGv_i64 tmp = tcg_temp_new_i64();
4082         gen_helper_bfextu_mem(tmp, cpu_env, addr, ofs, len);
4083         tcg_gen_extr_i64_i32(dest, QREG_CC_N, tmp);
4084         tcg_temp_free_i64(tmp);
4085     }
4086     set_cc_op(s, CC_OP_LOGIC);
4087 
4088     if (!(ext & 0x20)) {
4089         tcg_temp_free(len);
4090     }
4091     if (!(ext & 0x800)) {
4092         tcg_temp_free(ofs);
4093     }
4094 }
4095 
4096 DISAS_INSN(bfop_reg)
4097 {
4098     int ext = read_im16(env, s);
4099     TCGv src = DREG(insn, 0);
4100     int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
4101     int ofs = extract32(ext, 6, 5);  /* big bit-endian */
4102     TCGv mask, tofs, tlen;
4103 
4104     tofs = NULL;
4105     tlen = NULL;
4106     if ((insn & 0x0f00) == 0x0d00) { /* bfffo */
4107         tofs = tcg_temp_new();
4108         tlen = tcg_temp_new();
4109     }
4110 
4111     if ((ext & 0x820) == 0) {
4112         /* Immediate width and offset.  */
4113         uint32_t maski = 0x7fffffffu >> (len - 1);
4114         if (ofs + len <= 32) {
4115             tcg_gen_shli_i32(QREG_CC_N, src, ofs);
4116         } else {
4117             tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
4118         }
4119         tcg_gen_andi_i32(QREG_CC_N, QREG_CC_N, ~maski);
4120         mask = tcg_const_i32(ror32(maski, ofs));
4121         if (tofs) {
4122             tcg_gen_movi_i32(tofs, ofs);
4123             tcg_gen_movi_i32(tlen, len);
4124         }
4125     } else {
4126         TCGv tmp = tcg_temp_new();
4127         if (ext & 0x20) {
4128             /* Variable width */
4129             tcg_gen_subi_i32(tmp, DREG(ext, 0), 1);
4130             tcg_gen_andi_i32(tmp, tmp, 31);
4131             mask = tcg_const_i32(0x7fffffffu);
4132             tcg_gen_shr_i32(mask, mask, tmp);
4133             if (tlen) {
4134                 tcg_gen_addi_i32(tlen, tmp, 1);
4135             }
4136         } else {
4137             /* Immediate width */
4138             mask = tcg_const_i32(0x7fffffffu >> (len - 1));
4139             if (tlen) {
4140                 tcg_gen_movi_i32(tlen, len);
4141             }
4142         }
4143         if (ext & 0x800) {
4144             /* Variable offset */
4145             tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
4146             tcg_gen_rotl_i32(QREG_CC_N, src, tmp);
4147             tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4148             tcg_gen_rotr_i32(mask, mask, tmp);
4149             if (tofs) {
4150                 tcg_gen_mov_i32(tofs, tmp);
4151             }
4152         } else {
4153             /* Immediate offset (and variable width) */
4154             tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
4155             tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4156             tcg_gen_rotri_i32(mask, mask, ofs);
4157             if (tofs) {
4158                 tcg_gen_movi_i32(tofs, ofs);
4159             }
4160         }
4161         tcg_temp_free(tmp);
4162     }
4163     set_cc_op(s, CC_OP_LOGIC);
4164 
4165     switch (insn & 0x0f00) {
4166     case 0x0a00: /* bfchg */
4167         tcg_gen_eqv_i32(src, src, mask);
4168         break;
4169     case 0x0c00: /* bfclr */
4170         tcg_gen_and_i32(src, src, mask);
4171         break;
4172     case 0x0d00: /* bfffo */
4173         gen_helper_bfffo_reg(DREG(ext, 12), QREG_CC_N, tofs, tlen);
4174         tcg_temp_free(tlen);
4175         tcg_temp_free(tofs);
4176         break;
4177     case 0x0e00: /* bfset */
4178         tcg_gen_orc_i32(src, src, mask);
4179         break;
4180     case 0x0800: /* bftst */
4181         /* flags already set; no other work to do.  */
4182         break;
4183     default:
4184         g_assert_not_reached();
4185     }
4186     tcg_temp_free(mask);
4187 }
4188 
4189 DISAS_INSN(bfop_mem)
4190 {
4191     int ext = read_im16(env, s);
4192     TCGv addr, len, ofs;
4193     TCGv_i64 t64;
4194 
4195     addr = gen_lea(env, s, insn, OS_UNSIZED);
4196     if (IS_NULL_QREG(addr)) {
4197         gen_addr_fault(s);
4198         return;
4199     }
4200 
4201     if (ext & 0x20) {
4202         len = DREG(ext, 0);
4203     } else {
4204         len = tcg_const_i32(extract32(ext, 0, 5));
4205     }
4206     if (ext & 0x800) {
4207         ofs = DREG(ext, 6);
4208     } else {
4209         ofs = tcg_const_i32(extract32(ext, 6, 5));
4210     }
4211 
4212     switch (insn & 0x0f00) {
4213     case 0x0a00: /* bfchg */
4214         gen_helper_bfchg_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4215         break;
4216     case 0x0c00: /* bfclr */
4217         gen_helper_bfclr_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4218         break;
4219     case 0x0d00: /* bfffo */
4220         t64 = tcg_temp_new_i64();
4221         gen_helper_bfffo_mem(t64, cpu_env, addr, ofs, len);
4222         tcg_gen_extr_i64_i32(DREG(ext, 12), QREG_CC_N, t64);
4223         tcg_temp_free_i64(t64);
4224         break;
4225     case 0x0e00: /* bfset */
4226         gen_helper_bfset_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4227         break;
4228     case 0x0800: /* bftst */
4229         gen_helper_bfexts_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4230         break;
4231     default:
4232         g_assert_not_reached();
4233     }
4234     set_cc_op(s, CC_OP_LOGIC);
4235 
4236     if (!(ext & 0x20)) {
4237         tcg_temp_free(len);
4238     }
4239     if (!(ext & 0x800)) {
4240         tcg_temp_free(ofs);
4241     }
4242 }
4243 
4244 DISAS_INSN(bfins_reg)
4245 {
4246     int ext = read_im16(env, s);
4247     TCGv dst = DREG(insn, 0);
4248     TCGv src = DREG(ext, 12);
4249     int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
4250     int ofs = extract32(ext, 6, 5);  /* big bit-endian */
4251     int pos = 32 - ofs - len;        /* little bit-endian */
4252     TCGv tmp;
4253 
4254     tmp = tcg_temp_new();
4255 
4256     if (ext & 0x20) {
4257         /* Variable width */
4258         tcg_gen_neg_i32(tmp, DREG(ext, 0));
4259         tcg_gen_andi_i32(tmp, tmp, 31);
4260         tcg_gen_shl_i32(QREG_CC_N, src, tmp);
4261     } else {
4262         /* Immediate width */
4263         tcg_gen_shli_i32(QREG_CC_N, src, 32 - len);
4264     }
4265     set_cc_op(s, CC_OP_LOGIC);
4266 
4267     /* Immediate width and offset */
4268     if ((ext & 0x820) == 0) {
4269         /* Check for suitability for deposit.  */
4270         if (pos >= 0) {
4271             tcg_gen_deposit_i32(dst, dst, src, pos, len);
4272         } else {
4273             uint32_t maski = -2U << (len - 1);
4274             uint32_t roti = (ofs + len) & 31;
4275             tcg_gen_andi_i32(tmp, src, ~maski);
4276             tcg_gen_rotri_i32(tmp, tmp, roti);
4277             tcg_gen_andi_i32(dst, dst, ror32(maski, roti));
4278             tcg_gen_or_i32(dst, dst, tmp);
4279         }
4280     } else {
4281         TCGv mask = tcg_temp_new();
4282         TCGv rot = tcg_temp_new();
4283 
4284         if (ext & 0x20) {
4285             /* Variable width */
4286             tcg_gen_subi_i32(rot, DREG(ext, 0), 1);
4287             tcg_gen_andi_i32(rot, rot, 31);
4288             tcg_gen_movi_i32(mask, -2);
4289             tcg_gen_shl_i32(mask, mask, rot);
4290             tcg_gen_mov_i32(rot, DREG(ext, 0));
4291             tcg_gen_andc_i32(tmp, src, mask);
4292         } else {
4293             /* Immediate width (variable offset) */
4294             uint32_t maski = -2U << (len - 1);
4295             tcg_gen_andi_i32(tmp, src, ~maski);
4296             tcg_gen_movi_i32(mask, maski);
4297             tcg_gen_movi_i32(rot, len & 31);
4298         }
4299         if (ext & 0x800) {
4300             /* Variable offset */
4301             tcg_gen_add_i32(rot, rot, DREG(ext, 6));
4302         } else {
4303             /* Immediate offset (variable width) */
4304             tcg_gen_addi_i32(rot, rot, ofs);
4305         }
4306         tcg_gen_andi_i32(rot, rot, 31);
4307         tcg_gen_rotr_i32(mask, mask, rot);
4308         tcg_gen_rotr_i32(tmp, tmp, rot);
4309         tcg_gen_and_i32(dst, dst, mask);
4310         tcg_gen_or_i32(dst, dst, tmp);
4311 
4312         tcg_temp_free(rot);
4313         tcg_temp_free(mask);
4314     }
4315     tcg_temp_free(tmp);
4316 }
4317 
4318 DISAS_INSN(bfins_mem)
4319 {
4320     int ext = read_im16(env, s);
4321     TCGv src = DREG(ext, 12);
4322     TCGv addr, len, ofs;
4323 
4324     addr = gen_lea(env, s, insn, OS_UNSIZED);
4325     if (IS_NULL_QREG(addr)) {
4326         gen_addr_fault(s);
4327         return;
4328     }
4329 
4330     if (ext & 0x20) {
4331         len = DREG(ext, 0);
4332     } else {
4333         len = tcg_const_i32(extract32(ext, 0, 5));
4334     }
4335     if (ext & 0x800) {
4336         ofs = DREG(ext, 6);
4337     } else {
4338         ofs = tcg_const_i32(extract32(ext, 6, 5));
4339     }
4340 
4341     gen_helper_bfins_mem(QREG_CC_N, cpu_env, addr, src, ofs, len);
4342     set_cc_op(s, CC_OP_LOGIC);
4343 
4344     if (!(ext & 0x20)) {
4345         tcg_temp_free(len);
4346     }
4347     if (!(ext & 0x800)) {
4348         tcg_temp_free(ofs);
4349     }
4350 }
4351 
4352 DISAS_INSN(ff1)
4353 {
4354     TCGv reg;
4355     reg = DREG(insn, 0);
4356     gen_logic_cc(s, reg, OS_LONG);
4357     gen_helper_ff1(reg, reg);
4358 }
4359 
4360 DISAS_INSN(chk)
4361 {
4362     TCGv src, reg;
4363     int opsize;
4364 
4365     switch ((insn >> 7) & 3) {
4366     case 3:
4367         opsize = OS_WORD;
4368         break;
4369     case 2:
4370         if (m68k_feature(env, M68K_FEATURE_CHK2)) {
4371             opsize = OS_LONG;
4372             break;
4373         }
4374         /* fallthru */
4375     default:
4376         gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4377         return;
4378     }
4379     SRC_EA(env, src, opsize, 1, NULL);
4380     reg = gen_extend(s, DREG(insn, 9), opsize, 1);
4381 
4382     gen_flush_flags(s);
4383     gen_helper_chk(cpu_env, reg, src);
4384 }
4385 
4386 DISAS_INSN(chk2)
4387 {
4388     uint16_t ext;
4389     TCGv addr1, addr2, bound1, bound2, reg;
4390     int opsize;
4391 
4392     switch ((insn >> 9) & 3) {
4393     case 0:
4394         opsize = OS_BYTE;
4395         break;
4396     case 1:
4397         opsize = OS_WORD;
4398         break;
4399     case 2:
4400         opsize = OS_LONG;
4401         break;
4402     default:
4403         gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4404         return;
4405     }
4406 
4407     ext = read_im16(env, s);
4408     if ((ext & 0x0800) == 0) {
4409         gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4410         return;
4411     }
4412 
4413     addr1 = gen_lea(env, s, insn, OS_UNSIZED);
4414     addr2 = tcg_temp_new();
4415     tcg_gen_addi_i32(addr2, addr1, opsize_bytes(opsize));
4416 
4417     bound1 = gen_load(s, opsize, addr1, 1, IS_USER(s));
4418     tcg_temp_free(addr1);
4419     bound2 = gen_load(s, opsize, addr2, 1, IS_USER(s));
4420     tcg_temp_free(addr2);
4421 
4422     reg = tcg_temp_new();
4423     if (ext & 0x8000) {
4424         tcg_gen_mov_i32(reg, AREG(ext, 12));
4425     } else {
4426         gen_ext(reg, DREG(ext, 12), opsize, 1);
4427     }
4428 
4429     gen_flush_flags(s);
4430     gen_helper_chk2(cpu_env, reg, bound1, bound2);
4431     tcg_temp_free(reg);
4432     tcg_temp_free(bound1);
4433     tcg_temp_free(bound2);
4434 }
4435 
4436 static void m68k_copy_line(TCGv dst, TCGv src, int index)
4437 {
4438     TCGv addr;
4439     TCGv_i64 t0, t1;
4440 
4441     addr = tcg_temp_new();
4442 
4443     t0 = tcg_temp_new_i64();
4444     t1 = tcg_temp_new_i64();
4445 
4446     tcg_gen_andi_i32(addr, src, ~15);
4447     tcg_gen_qemu_ld64(t0, addr, index);
4448     tcg_gen_addi_i32(addr, addr, 8);
4449     tcg_gen_qemu_ld64(t1, addr, index);
4450 
4451     tcg_gen_andi_i32(addr, dst, ~15);
4452     tcg_gen_qemu_st64(t0, addr, index);
4453     tcg_gen_addi_i32(addr, addr, 8);
4454     tcg_gen_qemu_st64(t1, addr, index);
4455 
4456     tcg_temp_free_i64(t0);
4457     tcg_temp_free_i64(t1);
4458     tcg_temp_free(addr);
4459 }
4460 
4461 DISAS_INSN(move16_reg)
4462 {
4463     int index = IS_USER(s);
4464     TCGv tmp;
4465     uint16_t ext;
4466 
4467     ext = read_im16(env, s);
4468     if ((ext & (1 << 15)) == 0) {
4469         gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4470     }
4471 
4472     m68k_copy_line(AREG(ext, 12), AREG(insn, 0), index);
4473 
4474     /* Ax can be Ay, so save Ay before incrementing Ax */
4475     tmp = tcg_temp_new();
4476     tcg_gen_mov_i32(tmp, AREG(ext, 12));
4477     tcg_gen_addi_i32(AREG(insn, 0), AREG(insn, 0), 16);
4478     tcg_gen_addi_i32(AREG(ext, 12), tmp, 16);
4479     tcg_temp_free(tmp);
4480 }
4481 
4482 DISAS_INSN(move16_mem)
4483 {
4484     int index = IS_USER(s);
4485     TCGv reg, addr;
4486 
4487     reg = AREG(insn, 0);
4488     addr = tcg_const_i32(read_im32(env, s));
4489 
4490     if ((insn >> 3) & 1) {
4491         /* MOVE16 (xxx).L, (Ay) */
4492         m68k_copy_line(reg, addr, index);
4493     } else {
4494         /* MOVE16 (Ay), (xxx).L */
4495         m68k_copy_line(addr, reg, index);
4496     }
4497 
4498     tcg_temp_free(addr);
4499 
4500     if (((insn >> 3) & 2) == 0) {
4501         /* (Ay)+ */
4502         tcg_gen_addi_i32(reg, reg, 16);
4503     }
4504 }
4505 
4506 DISAS_INSN(strldsr)
4507 {
4508     uint16_t ext;
4509     uint32_t addr;
4510 
4511     addr = s->pc - 2;
4512     ext = read_im16(env, s);
4513     if (ext != 0x46FC) {
4514         gen_exception(s, addr, EXCP_ILLEGAL);
4515         return;
4516     }
4517     ext = read_im16(env, s);
4518     if (IS_USER(s) || (ext & SR_S) == 0) {
4519         gen_exception(s, addr, EXCP_PRIVILEGE);
4520         return;
4521     }
4522     gen_push(s, gen_get_sr(s));
4523     gen_set_sr_im(s, ext, 0);
4524 }
4525 
4526 DISAS_INSN(move_from_sr)
4527 {
4528     TCGv sr;
4529 
4530     if (IS_USER(s) && !m68k_feature(env, M68K_FEATURE_M68000)) {
4531         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4532         return;
4533     }
4534     sr = gen_get_sr(s);
4535     DEST_EA(env, insn, OS_WORD, sr, NULL);
4536 }
4537 
4538 #if defined(CONFIG_SOFTMMU)
4539 DISAS_INSN(moves)
4540 {
4541     int opsize;
4542     uint16_t ext;
4543     TCGv reg;
4544     TCGv addr;
4545     int extend;
4546 
4547     if (IS_USER(s)) {
4548         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4549         return;
4550     }
4551 
4552     ext = read_im16(env, s);
4553 
4554     opsize = insn_opsize(insn);
4555 
4556     if (ext & 0x8000) {
4557         /* address register */
4558         reg = AREG(ext, 12);
4559         extend = 1;
4560     } else {
4561         /* data register */
4562         reg = DREG(ext, 12);
4563         extend = 0;
4564     }
4565 
4566     addr = gen_lea(env, s, insn, opsize);
4567     if (IS_NULL_QREG(addr)) {
4568         gen_addr_fault(s);
4569         return;
4570     }
4571 
4572     if (ext & 0x0800) {
4573         /* from reg to ea */
4574         gen_store(s, opsize, addr, reg, DFC_INDEX(s));
4575     } else {
4576         /* from ea to reg */
4577         TCGv tmp = gen_load(s, opsize, addr, 0, SFC_INDEX(s));
4578         if (extend) {
4579             gen_ext(reg, tmp, opsize, 1);
4580         } else {
4581             gen_partset_reg(opsize, reg, tmp);
4582         }
4583         tcg_temp_free(tmp);
4584     }
4585     switch (extract32(insn, 3, 3)) {
4586     case 3: /* Indirect postincrement.  */
4587         tcg_gen_addi_i32(AREG(insn, 0), addr,
4588                          REG(insn, 0) == 7 && opsize == OS_BYTE
4589                          ? 2
4590                          : opsize_bytes(opsize));
4591         break;
4592     case 4: /* Indirect predecrememnt.  */
4593         tcg_gen_mov_i32(AREG(insn, 0), addr);
4594         break;
4595     }
4596 }
4597 
4598 DISAS_INSN(move_to_sr)
4599 {
4600     if (IS_USER(s)) {
4601         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4602         return;
4603     }
4604     gen_move_to_sr(env, s, insn, false);
4605     gen_exit_tb(s);
4606 }
4607 
4608 DISAS_INSN(move_from_usp)
4609 {
4610     if (IS_USER(s)) {
4611         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4612         return;
4613     }
4614     tcg_gen_ld_i32(AREG(insn, 0), cpu_env,
4615                    offsetof(CPUM68KState, sp[M68K_USP]));
4616 }
4617 
4618 DISAS_INSN(move_to_usp)
4619 {
4620     if (IS_USER(s)) {
4621         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4622         return;
4623     }
4624     tcg_gen_st_i32(AREG(insn, 0), cpu_env,
4625                    offsetof(CPUM68KState, sp[M68K_USP]));
4626 }
4627 
4628 DISAS_INSN(halt)
4629 {
4630     if (IS_USER(s)) {
4631         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4632         return;
4633     }
4634 
4635     gen_exception(s, s->pc, EXCP_HALT_INSN);
4636 }
4637 
4638 DISAS_INSN(stop)
4639 {
4640     uint16_t ext;
4641 
4642     if (IS_USER(s)) {
4643         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4644         return;
4645     }
4646 
4647     ext = read_im16(env, s);
4648 
4649     gen_set_sr_im(s, ext, 0);
4650     tcg_gen_movi_i32(cpu_halted, 1);
4651     gen_exception(s, s->pc, EXCP_HLT);
4652 }
4653 
4654 DISAS_INSN(rte)
4655 {
4656     if (IS_USER(s)) {
4657         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4658         return;
4659     }
4660     gen_exception(s, s->base.pc_next, EXCP_RTE);
4661 }
4662 
4663 DISAS_INSN(cf_movec)
4664 {
4665     uint16_t ext;
4666     TCGv reg;
4667 
4668     if (IS_USER(s)) {
4669         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4670         return;
4671     }
4672 
4673     ext = read_im16(env, s);
4674 
4675     if (ext & 0x8000) {
4676         reg = AREG(ext, 12);
4677     } else {
4678         reg = DREG(ext, 12);
4679     }
4680     gen_helper_cf_movec_to(cpu_env, tcg_const_i32(ext & 0xfff), reg);
4681     gen_exit_tb(s);
4682 }
4683 
4684 DISAS_INSN(m68k_movec)
4685 {
4686     uint16_t ext;
4687     TCGv reg;
4688 
4689     if (IS_USER(s)) {
4690         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4691         return;
4692     }
4693 
4694     ext = read_im16(env, s);
4695 
4696     if (ext & 0x8000) {
4697         reg = AREG(ext, 12);
4698     } else {
4699         reg = DREG(ext, 12);
4700     }
4701     if (insn & 1) {
4702         gen_helper_m68k_movec_to(cpu_env, tcg_const_i32(ext & 0xfff), reg);
4703     } else {
4704         gen_helper_m68k_movec_from(reg, cpu_env, tcg_const_i32(ext & 0xfff));
4705     }
4706     gen_exit_tb(s);
4707 }
4708 
4709 DISAS_INSN(intouch)
4710 {
4711     if (IS_USER(s)) {
4712         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4713         return;
4714     }
4715     /* ICache fetch.  Implement as no-op.  */
4716 }
4717 
4718 DISAS_INSN(cpushl)
4719 {
4720     if (IS_USER(s)) {
4721         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4722         return;
4723     }
4724     /* Cache push/invalidate.  Implement as no-op.  */
4725 }
4726 
4727 DISAS_INSN(cpush)
4728 {
4729     if (IS_USER(s)) {
4730         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4731         return;
4732     }
4733     /* Cache push/invalidate.  Implement as no-op.  */
4734 }
4735 
4736 DISAS_INSN(cinv)
4737 {
4738     if (IS_USER(s)) {
4739         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4740         return;
4741     }
4742     /* Invalidate cache line.  Implement as no-op.  */
4743 }
4744 
4745 #if defined(CONFIG_SOFTMMU)
4746 DISAS_INSN(pflush)
4747 {
4748     TCGv opmode;
4749 
4750     if (IS_USER(s)) {
4751         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4752         return;
4753     }
4754 
4755     opmode = tcg_const_i32((insn >> 3) & 3);
4756     gen_helper_pflush(cpu_env, AREG(insn, 0), opmode);
4757     tcg_temp_free(opmode);
4758 }
4759 
4760 DISAS_INSN(ptest)
4761 {
4762     TCGv is_read;
4763 
4764     if (IS_USER(s)) {
4765         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4766         return;
4767     }
4768     is_read = tcg_const_i32((insn >> 5) & 1);
4769     gen_helper_ptest(cpu_env, AREG(insn, 0), is_read);
4770     tcg_temp_free(is_read);
4771 }
4772 #endif
4773 
4774 DISAS_INSN(wddata)
4775 {
4776     gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4777 }
4778 
4779 DISAS_INSN(wdebug)
4780 {
4781     M68kCPU *cpu = m68k_env_get_cpu(env);
4782 
4783     if (IS_USER(s)) {
4784         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4785         return;
4786     }
4787     /* TODO: Implement wdebug.  */
4788     cpu_abort(CPU(cpu), "WDEBUG not implemented");
4789 }
4790 #endif
4791 
4792 DISAS_INSN(trap)
4793 {
4794     gen_exception(s, s->base.pc_next, EXCP_TRAP0 + (insn & 0xf));
4795 }
4796 
4797 static void gen_load_fcr(DisasContext *s, TCGv res, int reg)
4798 {
4799     switch (reg) {
4800     case M68K_FPIAR:
4801         tcg_gen_movi_i32(res, 0);
4802         break;
4803     case M68K_FPSR:
4804         tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpsr));
4805         break;
4806     case M68K_FPCR:
4807         tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpcr));
4808         break;
4809     }
4810 }
4811 
4812 static void gen_store_fcr(DisasContext *s, TCGv val, int reg)
4813 {
4814     switch (reg) {
4815     case M68K_FPIAR:
4816         break;
4817     case M68K_FPSR:
4818         tcg_gen_st_i32(val, cpu_env, offsetof(CPUM68KState, fpsr));
4819         break;
4820     case M68K_FPCR:
4821         gen_helper_set_fpcr(cpu_env, val);
4822         break;
4823     }
4824 }
4825 
4826 static void gen_qemu_store_fcr(DisasContext *s, TCGv addr, int reg)
4827 {
4828     int index = IS_USER(s);
4829     TCGv tmp;
4830 
4831     tmp = tcg_temp_new();
4832     gen_load_fcr(s, tmp, reg);
4833     tcg_gen_qemu_st32(tmp, addr, index);
4834     tcg_temp_free(tmp);
4835 }
4836 
4837 static void gen_qemu_load_fcr(DisasContext *s, TCGv addr, int reg)
4838 {
4839     int index = IS_USER(s);
4840     TCGv tmp;
4841 
4842     tmp = tcg_temp_new();
4843     tcg_gen_qemu_ld32u(tmp, addr, index);
4844     gen_store_fcr(s, tmp, reg);
4845     tcg_temp_free(tmp);
4846 }
4847 
4848 
4849 static void gen_op_fmove_fcr(CPUM68KState *env, DisasContext *s,
4850                              uint32_t insn, uint32_t ext)
4851 {
4852     int mask = (ext >> 10) & 7;
4853     int is_write = (ext >> 13) & 1;
4854     int mode = extract32(insn, 3, 3);
4855     int i;
4856     TCGv addr, tmp;
4857 
4858     switch (mode) {
4859     case 0: /* Dn */
4860         if (mask != M68K_FPIAR && mask != M68K_FPSR && mask != M68K_FPCR) {
4861             gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4862             return;
4863         }
4864         if (is_write) {
4865             gen_load_fcr(s, DREG(insn, 0), mask);
4866         } else {
4867             gen_store_fcr(s, DREG(insn, 0), mask);
4868         }
4869         return;
4870     case 1: /* An, only with FPIAR */
4871         if (mask != M68K_FPIAR) {
4872             gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4873             return;
4874         }
4875         if (is_write) {
4876             gen_load_fcr(s, AREG(insn, 0), mask);
4877         } else {
4878             gen_store_fcr(s, AREG(insn, 0), mask);
4879         }
4880         return;
4881     default:
4882         break;
4883     }
4884 
4885     tmp = gen_lea(env, s, insn, OS_LONG);
4886     if (IS_NULL_QREG(tmp)) {
4887         gen_addr_fault(s);
4888         return;
4889     }
4890 
4891     addr = tcg_temp_new();
4892     tcg_gen_mov_i32(addr, tmp);
4893 
4894     /* mask:
4895      *
4896      * 0b100 Floating-Point Control Register
4897      * 0b010 Floating-Point Status Register
4898      * 0b001 Floating-Point Instruction Address Register
4899      *
4900      */
4901 
4902     if (is_write && mode == 4) {
4903         for (i = 2; i >= 0; i--, mask >>= 1) {
4904             if (mask & 1) {
4905                 gen_qemu_store_fcr(s, addr, 1 << i);
4906                 if (mask != 1) {
4907                     tcg_gen_subi_i32(addr, addr, opsize_bytes(OS_LONG));
4908                 }
4909             }
4910        }
4911        tcg_gen_mov_i32(AREG(insn, 0), addr);
4912     } else {
4913         for (i = 0; i < 3; i++, mask >>= 1) {
4914             if (mask & 1) {
4915                 if (is_write) {
4916                     gen_qemu_store_fcr(s, addr, 1 << i);
4917                 } else {
4918                     gen_qemu_load_fcr(s, addr, 1 << i);
4919                 }
4920                 if (mask != 1 || mode == 3) {
4921                     tcg_gen_addi_i32(addr, addr, opsize_bytes(OS_LONG));
4922                 }
4923             }
4924         }
4925         if (mode == 3) {
4926             tcg_gen_mov_i32(AREG(insn, 0), addr);
4927         }
4928     }
4929     tcg_temp_free_i32(addr);
4930 }
4931 
4932 static void gen_op_fmovem(CPUM68KState *env, DisasContext *s,
4933                           uint32_t insn, uint32_t ext)
4934 {
4935     int opsize;
4936     TCGv addr, tmp;
4937     int mode = (ext >> 11) & 0x3;
4938     int is_load = ((ext & 0x2000) == 0);
4939 
4940     if (m68k_feature(s->env, M68K_FEATURE_FPU)) {
4941         opsize = OS_EXTENDED;
4942     } else {
4943         opsize = OS_DOUBLE;  /* FIXME */
4944     }
4945 
4946     addr = gen_lea(env, s, insn, opsize);
4947     if (IS_NULL_QREG(addr)) {
4948         gen_addr_fault(s);
4949         return;
4950     }
4951 
4952     tmp = tcg_temp_new();
4953     if (mode & 0x1) {
4954         /* Dynamic register list */
4955         tcg_gen_ext8u_i32(tmp, DREG(ext, 4));
4956     } else {
4957         /* Static register list */
4958         tcg_gen_movi_i32(tmp, ext & 0xff);
4959     }
4960 
4961     if (!is_load && (mode & 2) == 0) {
4962         /* predecrement addressing mode
4963          * only available to store register to memory
4964          */
4965         if (opsize == OS_EXTENDED) {
4966             gen_helper_fmovemx_st_predec(tmp, cpu_env, addr, tmp);
4967         } else {
4968             gen_helper_fmovemd_st_predec(tmp, cpu_env, addr, tmp);
4969         }
4970     } else {
4971         /* postincrement addressing mode */
4972         if (opsize == OS_EXTENDED) {
4973             if (is_load) {
4974                 gen_helper_fmovemx_ld_postinc(tmp, cpu_env, addr, tmp);
4975             } else {
4976                 gen_helper_fmovemx_st_postinc(tmp, cpu_env, addr, tmp);
4977             }
4978         } else {
4979             if (is_load) {
4980                 gen_helper_fmovemd_ld_postinc(tmp, cpu_env, addr, tmp);
4981             } else {
4982                 gen_helper_fmovemd_st_postinc(tmp, cpu_env, addr, tmp);
4983             }
4984         }
4985     }
4986     if ((insn & 070) == 030 || (insn & 070) == 040) {
4987         tcg_gen_mov_i32(AREG(insn, 0), tmp);
4988     }
4989     tcg_temp_free(tmp);
4990 }
4991 
4992 /* ??? FP exceptions are not implemented.  Most exceptions are deferred until
4993    immediately before the next FP instruction is executed.  */
4994 DISAS_INSN(fpu)
4995 {
4996     uint16_t ext;
4997     int opmode;
4998     int opsize;
4999     TCGv_ptr cpu_src, cpu_dest;
5000 
5001     ext = read_im16(env, s);
5002     opmode = ext & 0x7f;
5003     switch ((ext >> 13) & 7) {
5004     case 0:
5005         break;
5006     case 1:
5007         goto undef;
5008     case 2:
5009         if (insn == 0xf200 && (ext & 0xfc00) == 0x5c00) {
5010             /* fmovecr */
5011             TCGv rom_offset = tcg_const_i32(opmode);
5012             cpu_dest = gen_fp_ptr(REG(ext, 7));
5013             gen_helper_fconst(cpu_env, cpu_dest, rom_offset);
5014             tcg_temp_free_ptr(cpu_dest);
5015             tcg_temp_free(rom_offset);
5016             return;
5017         }
5018         break;
5019     case 3: /* fmove out */
5020         cpu_src = gen_fp_ptr(REG(ext, 7));
5021         opsize = ext_opsize(ext, 10);
5022         if (gen_ea_fp(env, s, insn, opsize, cpu_src,
5023                       EA_STORE, IS_USER(s)) == -1) {
5024             gen_addr_fault(s);
5025         }
5026         gen_helper_ftst(cpu_env, cpu_src);
5027         tcg_temp_free_ptr(cpu_src);
5028         return;
5029     case 4: /* fmove to control register.  */
5030     case 5: /* fmove from control register.  */
5031         gen_op_fmove_fcr(env, s, insn, ext);
5032         return;
5033     case 6: /* fmovem */
5034     case 7:
5035         if ((ext & 0x1000) == 0 && !m68k_feature(s->env, M68K_FEATURE_FPU)) {
5036             goto undef;
5037         }
5038         gen_op_fmovem(env, s, insn, ext);
5039         return;
5040     }
5041     if (ext & (1 << 14)) {
5042         /* Source effective address.  */
5043         opsize = ext_opsize(ext, 10);
5044         cpu_src = gen_fp_result_ptr();
5045         if (gen_ea_fp(env, s, insn, opsize, cpu_src,
5046                       EA_LOADS, IS_USER(s)) == -1) {
5047             gen_addr_fault(s);
5048             return;
5049         }
5050     } else {
5051         /* Source register.  */
5052         opsize = OS_EXTENDED;
5053         cpu_src = gen_fp_ptr(REG(ext, 10));
5054     }
5055     cpu_dest = gen_fp_ptr(REG(ext, 7));
5056     switch (opmode) {
5057     case 0: /* fmove */
5058         gen_fp_move(cpu_dest, cpu_src);
5059         break;
5060     case 0x40: /* fsmove */
5061         gen_helper_fsround(cpu_env, cpu_dest, cpu_src);
5062         break;
5063     case 0x44: /* fdmove */
5064         gen_helper_fdround(cpu_env, cpu_dest, cpu_src);
5065         break;
5066     case 1: /* fint */
5067         gen_helper_firound(cpu_env, cpu_dest, cpu_src);
5068         break;
5069     case 2: /* fsinh */
5070         gen_helper_fsinh(cpu_env, cpu_dest, cpu_src);
5071         break;
5072     case 3: /* fintrz */
5073         gen_helper_fitrunc(cpu_env, cpu_dest, cpu_src);
5074         break;
5075     case 4: /* fsqrt */
5076         gen_helper_fsqrt(cpu_env, cpu_dest, cpu_src);
5077         break;
5078     case 0x41: /* fssqrt */
5079         gen_helper_fssqrt(cpu_env, cpu_dest, cpu_src);
5080         break;
5081     case 0x45: /* fdsqrt */
5082         gen_helper_fdsqrt(cpu_env, cpu_dest, cpu_src);
5083         break;
5084     case 0x06: /* flognp1 */
5085         gen_helper_flognp1(cpu_env, cpu_dest, cpu_src);
5086         break;
5087     case 0x09: /* ftanh */
5088         gen_helper_ftanh(cpu_env, cpu_dest, cpu_src);
5089         break;
5090     case 0x0a: /* fatan */
5091         gen_helper_fatan(cpu_env, cpu_dest, cpu_src);
5092         break;
5093     case 0x0c: /* fasin */
5094         gen_helper_fasin(cpu_env, cpu_dest, cpu_src);
5095         break;
5096     case 0x0d: /* fatanh */
5097         gen_helper_fatanh(cpu_env, cpu_dest, cpu_src);
5098         break;
5099     case 0x0e: /* fsin */
5100         gen_helper_fsin(cpu_env, cpu_dest, cpu_src);
5101         break;
5102     case 0x0f: /* ftan */
5103         gen_helper_ftan(cpu_env, cpu_dest, cpu_src);
5104         break;
5105     case 0x10: /* fetox */
5106         gen_helper_fetox(cpu_env, cpu_dest, cpu_src);
5107         break;
5108     case 0x11: /* ftwotox */
5109         gen_helper_ftwotox(cpu_env, cpu_dest, cpu_src);
5110         break;
5111     case 0x12: /* ftentox */
5112         gen_helper_ftentox(cpu_env, cpu_dest, cpu_src);
5113         break;
5114     case 0x14: /* flogn */
5115         gen_helper_flogn(cpu_env, cpu_dest, cpu_src);
5116         break;
5117     case 0x15: /* flog10 */
5118         gen_helper_flog10(cpu_env, cpu_dest, cpu_src);
5119         break;
5120     case 0x16: /* flog2 */
5121         gen_helper_flog2(cpu_env, cpu_dest, cpu_src);
5122         break;
5123     case 0x18: /* fabs */
5124         gen_helper_fabs(cpu_env, cpu_dest, cpu_src);
5125         break;
5126     case 0x58: /* fsabs */
5127         gen_helper_fsabs(cpu_env, cpu_dest, cpu_src);
5128         break;
5129     case 0x5c: /* fdabs */
5130         gen_helper_fdabs(cpu_env, cpu_dest, cpu_src);
5131         break;
5132     case 0x19: /* fcosh */
5133         gen_helper_fcosh(cpu_env, cpu_dest, cpu_src);
5134         break;
5135     case 0x1a: /* fneg */
5136         gen_helper_fneg(cpu_env, cpu_dest, cpu_src);
5137         break;
5138     case 0x5a: /* fsneg */
5139         gen_helper_fsneg(cpu_env, cpu_dest, cpu_src);
5140         break;
5141     case 0x5e: /* fdneg */
5142         gen_helper_fdneg(cpu_env, cpu_dest, cpu_src);
5143         break;
5144     case 0x1c: /* facos */
5145         gen_helper_facos(cpu_env, cpu_dest, cpu_src);
5146         break;
5147     case 0x1d: /* fcos */
5148         gen_helper_fcos(cpu_env, cpu_dest, cpu_src);
5149         break;
5150     case 0x1e: /* fgetexp */
5151         gen_helper_fgetexp(cpu_env, cpu_dest, cpu_src);
5152         break;
5153     case 0x1f: /* fgetman */
5154         gen_helper_fgetman(cpu_env, cpu_dest, cpu_src);
5155         break;
5156     case 0x20: /* fdiv */
5157         gen_helper_fdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5158         break;
5159     case 0x60: /* fsdiv */
5160         gen_helper_fsdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5161         break;
5162     case 0x64: /* fddiv */
5163         gen_helper_fddiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5164         break;
5165     case 0x21: /* fmod */
5166         gen_helper_fmod(cpu_env, cpu_dest, cpu_src, cpu_dest);
5167         break;
5168     case 0x22: /* fadd */
5169         gen_helper_fadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5170         break;
5171     case 0x62: /* fsadd */
5172         gen_helper_fsadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5173         break;
5174     case 0x66: /* fdadd */
5175         gen_helper_fdadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5176         break;
5177     case 0x23: /* fmul */
5178         gen_helper_fmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5179         break;
5180     case 0x63: /* fsmul */
5181         gen_helper_fsmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5182         break;
5183     case 0x67: /* fdmul */
5184         gen_helper_fdmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5185         break;
5186     case 0x24: /* fsgldiv */
5187         gen_helper_fsgldiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5188         break;
5189     case 0x25: /* frem */
5190         gen_helper_frem(cpu_env, cpu_dest, cpu_src, cpu_dest);
5191         break;
5192     case 0x26: /* fscale */
5193         gen_helper_fscale(cpu_env, cpu_dest, cpu_src, cpu_dest);
5194         break;
5195     case 0x27: /* fsglmul */
5196         gen_helper_fsglmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5197         break;
5198     case 0x28: /* fsub */
5199         gen_helper_fsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5200         break;
5201     case 0x68: /* fssub */
5202         gen_helper_fssub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5203         break;
5204     case 0x6c: /* fdsub */
5205         gen_helper_fdsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5206         break;
5207     case 0x30: case 0x31: case 0x32:
5208     case 0x33: case 0x34: case 0x35:
5209     case 0x36: case 0x37: {
5210             TCGv_ptr cpu_dest2 = gen_fp_ptr(REG(ext, 0));
5211             gen_helper_fsincos(cpu_env, cpu_dest, cpu_dest2, cpu_src);
5212             tcg_temp_free_ptr(cpu_dest2);
5213         }
5214         break;
5215     case 0x38: /* fcmp */
5216         gen_helper_fcmp(cpu_env, cpu_src, cpu_dest);
5217         return;
5218     case 0x3a: /* ftst */
5219         gen_helper_ftst(cpu_env, cpu_src);
5220         return;
5221     default:
5222         goto undef;
5223     }
5224     tcg_temp_free_ptr(cpu_src);
5225     gen_helper_ftst(cpu_env, cpu_dest);
5226     tcg_temp_free_ptr(cpu_dest);
5227     return;
5228 undef:
5229     /* FIXME: Is this right for offset addressing modes?  */
5230     s->pc -= 2;
5231     disas_undef_fpu(env, s, insn);
5232 }
5233 
5234 static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond)
5235 {
5236     TCGv fpsr;
5237 
5238     c->g1 = 1;
5239     c->v2 = tcg_const_i32(0);
5240     c->g2 = 0;
5241     /* TODO: Raise BSUN exception.  */
5242     fpsr = tcg_temp_new();
5243     gen_load_fcr(s, fpsr, M68K_FPSR);
5244     switch (cond) {
5245     case 0:  /* False */
5246     case 16: /* Signaling False */
5247         c->v1 = c->v2;
5248         c->tcond = TCG_COND_NEVER;
5249         break;
5250     case 1:  /* EQual Z */
5251     case 17: /* Signaling EQual Z */
5252         c->v1 = tcg_temp_new();
5253         c->g1 = 0;
5254         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5255         c->tcond = TCG_COND_NE;
5256         break;
5257     case 2:  /* Ordered Greater Than !(A || Z || N) */
5258     case 18: /* Greater Than !(A || Z || N) */
5259         c->v1 = tcg_temp_new();
5260         c->g1 = 0;
5261         tcg_gen_andi_i32(c->v1, fpsr,
5262                          FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5263         c->tcond = TCG_COND_EQ;
5264         break;
5265     case 3:  /* Ordered Greater than or Equal Z || !(A || N) */
5266     case 19: /* Greater than or Equal Z || !(A || N) */
5267         c->v1 = tcg_temp_new();
5268         c->g1 = 0;
5269         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5270         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
5271         tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_Z | FPSR_CC_N);
5272         tcg_gen_or_i32(c->v1, c->v1, fpsr);
5273         tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5274         c->tcond = TCG_COND_NE;
5275         break;
5276     case 4:  /* Ordered Less Than !(!N || A || Z); */
5277     case 20: /* Less Than !(!N || A || Z); */
5278         c->v1 = tcg_temp_new();
5279         c->g1 = 0;
5280         tcg_gen_xori_i32(c->v1, fpsr, FPSR_CC_N);
5281         tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_N | FPSR_CC_A | FPSR_CC_Z);
5282         c->tcond = TCG_COND_EQ;
5283         break;
5284     case 5:  /* Ordered Less than or Equal Z || (N && !A) */
5285     case 21: /* Less than or Equal Z || (N && !A) */
5286         c->v1 = tcg_temp_new();
5287         c->g1 = 0;
5288         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5289         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
5290         tcg_gen_andc_i32(c->v1, fpsr, c->v1);
5291         tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_Z | FPSR_CC_N);
5292         c->tcond = TCG_COND_NE;
5293         break;
5294     case 6:  /* Ordered Greater or Less than !(A || Z) */
5295     case 22: /* Greater or Less than !(A || Z) */
5296         c->v1 = tcg_temp_new();
5297         c->g1 = 0;
5298         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
5299         c->tcond = TCG_COND_EQ;
5300         break;
5301     case 7:  /* Ordered !A */
5302     case 23: /* Greater, Less or Equal !A */
5303         c->v1 = tcg_temp_new();
5304         c->g1 = 0;
5305         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5306         c->tcond = TCG_COND_EQ;
5307         break;
5308     case 8:  /* Unordered A */
5309     case 24: /* Not Greater, Less or Equal A */
5310         c->v1 = tcg_temp_new();
5311         c->g1 = 0;
5312         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5313         c->tcond = TCG_COND_NE;
5314         break;
5315     case 9:  /* Unordered or Equal A || Z */
5316     case 25: /* Not Greater or Less then A || Z */
5317         c->v1 = tcg_temp_new();
5318         c->g1 = 0;
5319         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
5320         c->tcond = TCG_COND_NE;
5321         break;
5322     case 10: /* Unordered or Greater Than A || !(N || Z)) */
5323     case 26: /* Not Less or Equal A || !(N || Z)) */
5324         c->v1 = tcg_temp_new();
5325         c->g1 = 0;
5326         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5327         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
5328         tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_A | FPSR_CC_N);
5329         tcg_gen_or_i32(c->v1, c->v1, fpsr);
5330         tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5331         c->tcond = TCG_COND_NE;
5332         break;
5333     case 11: /* Unordered or Greater or Equal A || Z || !N */
5334     case 27: /* Not Less Than A || Z || !N */
5335         c->v1 = tcg_temp_new();
5336         c->g1 = 0;
5337         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5338         tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5339         c->tcond = TCG_COND_NE;
5340         break;
5341     case 12: /* Unordered or Less Than A || (N && !Z) */
5342     case 28: /* Not Greater than or Equal A || (N && !Z) */
5343         c->v1 = tcg_temp_new();
5344         c->g1 = 0;
5345         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5346         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
5347         tcg_gen_andc_i32(c->v1, fpsr, c->v1);
5348         tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_A | FPSR_CC_N);
5349         c->tcond = TCG_COND_NE;
5350         break;
5351     case 13: /* Unordered or Less or Equal A || Z || N */
5352     case 29: /* Not Greater Than A || Z || N */
5353         c->v1 = tcg_temp_new();
5354         c->g1 = 0;
5355         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5356         c->tcond = TCG_COND_NE;
5357         break;
5358     case 14: /* Not Equal !Z */
5359     case 30: /* Signaling Not Equal !Z */
5360         c->v1 = tcg_temp_new();
5361         c->g1 = 0;
5362         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5363         c->tcond = TCG_COND_EQ;
5364         break;
5365     case 15: /* True */
5366     case 31: /* Signaling True */
5367         c->v1 = c->v2;
5368         c->tcond = TCG_COND_ALWAYS;
5369         break;
5370     }
5371     tcg_temp_free(fpsr);
5372 }
5373 
5374 static void gen_fjmpcc(DisasContext *s, int cond, TCGLabel *l1)
5375 {
5376     DisasCompare c;
5377 
5378     gen_fcc_cond(&c, s, cond);
5379     update_cc_op(s);
5380     tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
5381     free_cond(&c);
5382 }
5383 
5384 DISAS_INSN(fbcc)
5385 {
5386     uint32_t offset;
5387     uint32_t base;
5388     TCGLabel *l1;
5389 
5390     base = s->pc;
5391     offset = (int16_t)read_im16(env, s);
5392     if (insn & (1 << 6)) {
5393         offset = (offset << 16) | read_im16(env, s);
5394     }
5395 
5396     l1 = gen_new_label();
5397     update_cc_op(s);
5398     gen_fjmpcc(s, insn & 0x3f, l1);
5399     gen_jmp_tb(s, 0, s->pc);
5400     gen_set_label(l1);
5401     gen_jmp_tb(s, 1, base + offset);
5402 }
5403 
5404 DISAS_INSN(fscc)
5405 {
5406     DisasCompare c;
5407     int cond;
5408     TCGv tmp;
5409     uint16_t ext;
5410 
5411     ext = read_im16(env, s);
5412     cond = ext & 0x3f;
5413     gen_fcc_cond(&c, s, cond);
5414 
5415     tmp = tcg_temp_new();
5416     tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
5417     free_cond(&c);
5418 
5419     tcg_gen_neg_i32(tmp, tmp);
5420     DEST_EA(env, insn, OS_BYTE, tmp, NULL);
5421     tcg_temp_free(tmp);
5422 }
5423 
5424 #if defined(CONFIG_SOFTMMU)
5425 DISAS_INSN(frestore)
5426 {
5427     TCGv addr;
5428 
5429     if (IS_USER(s)) {
5430         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
5431         return;
5432     }
5433     if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
5434         SRC_EA(env, addr, OS_LONG, 0, NULL);
5435         /* FIXME: check the state frame */
5436     } else {
5437         disas_undef(env, s, insn);
5438     }
5439 }
5440 
5441 DISAS_INSN(fsave)
5442 {
5443     if (IS_USER(s)) {
5444         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
5445         return;
5446     }
5447 
5448     if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
5449         /* always write IDLE */
5450         TCGv idle = tcg_const_i32(0x41000000);
5451         DEST_EA(env, insn, OS_LONG, idle, NULL);
5452         tcg_temp_free(idle);
5453     } else {
5454         disas_undef(env, s, insn);
5455     }
5456 }
5457 #endif
5458 
5459 static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper)
5460 {
5461     TCGv tmp = tcg_temp_new();
5462     if (s->env->macsr & MACSR_FI) {
5463         if (upper)
5464             tcg_gen_andi_i32(tmp, val, 0xffff0000);
5465         else
5466             tcg_gen_shli_i32(tmp, val, 16);
5467     } else if (s->env->macsr & MACSR_SU) {
5468         if (upper)
5469             tcg_gen_sari_i32(tmp, val, 16);
5470         else
5471             tcg_gen_ext16s_i32(tmp, val);
5472     } else {
5473         if (upper)
5474             tcg_gen_shri_i32(tmp, val, 16);
5475         else
5476             tcg_gen_ext16u_i32(tmp, val);
5477     }
5478     return tmp;
5479 }
5480 
5481 static void gen_mac_clear_flags(void)
5482 {
5483     tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR,
5484                      ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV));
5485 }
5486 
5487 DISAS_INSN(mac)
5488 {
5489     TCGv rx;
5490     TCGv ry;
5491     uint16_t ext;
5492     int acc;
5493     TCGv tmp;
5494     TCGv addr;
5495     TCGv loadval;
5496     int dual;
5497     TCGv saved_flags;
5498 
5499     if (!s->done_mac) {
5500         s->mactmp = tcg_temp_new_i64();
5501         s->done_mac = 1;
5502     }
5503 
5504     ext = read_im16(env, s);
5505 
5506     acc = ((insn >> 7) & 1) | ((ext >> 3) & 2);
5507     dual = ((insn & 0x30) != 0 && (ext & 3) != 0);
5508     if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) {
5509         disas_undef(env, s, insn);
5510         return;
5511     }
5512     if (insn & 0x30) {
5513         /* MAC with load.  */
5514         tmp = gen_lea(env, s, insn, OS_LONG);
5515         addr = tcg_temp_new();
5516         tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK);
5517         /* Load the value now to ensure correct exception behavior.
5518            Perform writeback after reading the MAC inputs.  */
5519         loadval = gen_load(s, OS_LONG, addr, 0, IS_USER(s));
5520 
5521         acc ^= 1;
5522         rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12);
5523         ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0);
5524     } else {
5525         loadval = addr = NULL_QREG;
5526         rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5527         ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5528     }
5529 
5530     gen_mac_clear_flags();
5531 #if 0
5532     l1 = -1;
5533     /* Disabled because conditional branches clobber temporary vars.  */
5534     if ((s->env->macsr & MACSR_OMC) != 0 && !dual) {
5535         /* Skip the multiply if we know we will ignore it.  */
5536         l1 = gen_new_label();
5537         tmp = tcg_temp_new();
5538         tcg_gen_andi_i32(tmp, QREG_MACSR, 1 << (acc + 8));
5539         gen_op_jmp_nz32(tmp, l1);
5540     }
5541 #endif
5542 
5543     if ((ext & 0x0800) == 0) {
5544         /* Word.  */
5545         rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0);
5546         ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0);
5547     }
5548     if (s->env->macsr & MACSR_FI) {
5549         gen_helper_macmulf(s->mactmp, cpu_env, rx, ry);
5550     } else {
5551         if (s->env->macsr & MACSR_SU)
5552             gen_helper_macmuls(s->mactmp, cpu_env, rx, ry);
5553         else
5554             gen_helper_macmulu(s->mactmp, cpu_env, rx, ry);
5555         switch ((ext >> 9) & 3) {
5556         case 1:
5557             tcg_gen_shli_i64(s->mactmp, s->mactmp, 1);
5558             break;
5559         case 3:
5560             tcg_gen_shri_i64(s->mactmp, s->mactmp, 1);
5561             break;
5562         }
5563     }
5564 
5565     if (dual) {
5566         /* Save the overflow flag from the multiply.  */
5567         saved_flags = tcg_temp_new();
5568         tcg_gen_mov_i32(saved_flags, QREG_MACSR);
5569     } else {
5570         saved_flags = NULL_QREG;
5571     }
5572 
5573 #if 0
5574     /* Disabled because conditional branches clobber temporary vars.  */
5575     if ((s->env->macsr & MACSR_OMC) != 0 && dual) {
5576         /* Skip the accumulate if the value is already saturated.  */
5577         l1 = gen_new_label();
5578         tmp = tcg_temp_new();
5579         gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
5580         gen_op_jmp_nz32(tmp, l1);
5581     }
5582 #endif
5583 
5584     if (insn & 0x100)
5585         tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5586     else
5587         tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5588 
5589     if (s->env->macsr & MACSR_FI)
5590         gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
5591     else if (s->env->macsr & MACSR_SU)
5592         gen_helper_macsats(cpu_env, tcg_const_i32(acc));
5593     else
5594         gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
5595 
5596 #if 0
5597     /* Disabled because conditional branches clobber temporary vars.  */
5598     if (l1 != -1)
5599         gen_set_label(l1);
5600 #endif
5601 
5602     if (dual) {
5603         /* Dual accumulate variant.  */
5604         acc = (ext >> 2) & 3;
5605         /* Restore the overflow flag from the multiplier.  */
5606         tcg_gen_mov_i32(QREG_MACSR, saved_flags);
5607 #if 0
5608         /* Disabled because conditional branches clobber temporary vars.  */
5609         if ((s->env->macsr & MACSR_OMC) != 0) {
5610             /* Skip the accumulate if the value is already saturated.  */
5611             l1 = gen_new_label();
5612             tmp = tcg_temp_new();
5613             gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
5614             gen_op_jmp_nz32(tmp, l1);
5615         }
5616 #endif
5617         if (ext & 2)
5618             tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5619         else
5620             tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5621         if (s->env->macsr & MACSR_FI)
5622             gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
5623         else if (s->env->macsr & MACSR_SU)
5624             gen_helper_macsats(cpu_env, tcg_const_i32(acc));
5625         else
5626             gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
5627 #if 0
5628         /* Disabled because conditional branches clobber temporary vars.  */
5629         if (l1 != -1)
5630             gen_set_label(l1);
5631 #endif
5632     }
5633     gen_helper_mac_set_flags(cpu_env, tcg_const_i32(acc));
5634 
5635     if (insn & 0x30) {
5636         TCGv rw;
5637         rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5638         tcg_gen_mov_i32(rw, loadval);
5639         /* FIXME: Should address writeback happen with the masked or
5640            unmasked value?  */
5641         switch ((insn >> 3) & 7) {
5642         case 3: /* Post-increment.  */
5643             tcg_gen_addi_i32(AREG(insn, 0), addr, 4);
5644             break;
5645         case 4: /* Pre-decrement.  */
5646             tcg_gen_mov_i32(AREG(insn, 0), addr);
5647         }
5648         tcg_temp_free(loadval);
5649     }
5650 }
5651 
5652 DISAS_INSN(from_mac)
5653 {
5654     TCGv rx;
5655     TCGv_i64 acc;
5656     int accnum;
5657 
5658     rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5659     accnum = (insn >> 9) & 3;
5660     acc = MACREG(accnum);
5661     if (s->env->macsr & MACSR_FI) {
5662         gen_helper_get_macf(rx, cpu_env, acc);
5663     } else if ((s->env->macsr & MACSR_OMC) == 0) {
5664         tcg_gen_extrl_i64_i32(rx, acc);
5665     } else if (s->env->macsr & MACSR_SU) {
5666         gen_helper_get_macs(rx, acc);
5667     } else {
5668         gen_helper_get_macu(rx, acc);
5669     }
5670     if (insn & 0x40) {
5671         tcg_gen_movi_i64(acc, 0);
5672         tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5673     }
5674 }
5675 
5676 DISAS_INSN(move_mac)
5677 {
5678     /* FIXME: This can be done without a helper.  */
5679     int src;
5680     TCGv dest;
5681     src = insn & 3;
5682     dest = tcg_const_i32((insn >> 9) & 3);
5683     gen_helper_mac_move(cpu_env, dest, tcg_const_i32(src));
5684     gen_mac_clear_flags();
5685     gen_helper_mac_set_flags(cpu_env, dest);
5686 }
5687 
5688 DISAS_INSN(from_macsr)
5689 {
5690     TCGv reg;
5691 
5692     reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5693     tcg_gen_mov_i32(reg, QREG_MACSR);
5694 }
5695 
5696 DISAS_INSN(from_mask)
5697 {
5698     TCGv reg;
5699     reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5700     tcg_gen_mov_i32(reg, QREG_MAC_MASK);
5701 }
5702 
5703 DISAS_INSN(from_mext)
5704 {
5705     TCGv reg;
5706     TCGv acc;
5707     reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5708     acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
5709     if (s->env->macsr & MACSR_FI)
5710         gen_helper_get_mac_extf(reg, cpu_env, acc);
5711     else
5712         gen_helper_get_mac_exti(reg, cpu_env, acc);
5713 }
5714 
5715 DISAS_INSN(macsr_to_ccr)
5716 {
5717     TCGv tmp = tcg_temp_new();
5718     tcg_gen_andi_i32(tmp, QREG_MACSR, 0xf);
5719     gen_helper_set_sr(cpu_env, tmp);
5720     tcg_temp_free(tmp);
5721     set_cc_op(s, CC_OP_FLAGS);
5722 }
5723 
5724 DISAS_INSN(to_mac)
5725 {
5726     TCGv_i64 acc;
5727     TCGv val;
5728     int accnum;
5729     accnum = (insn >> 9) & 3;
5730     acc = MACREG(accnum);
5731     SRC_EA(env, val, OS_LONG, 0, NULL);
5732     if (s->env->macsr & MACSR_FI) {
5733         tcg_gen_ext_i32_i64(acc, val);
5734         tcg_gen_shli_i64(acc, acc, 8);
5735     } else if (s->env->macsr & MACSR_SU) {
5736         tcg_gen_ext_i32_i64(acc, val);
5737     } else {
5738         tcg_gen_extu_i32_i64(acc, val);
5739     }
5740     tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5741     gen_mac_clear_flags();
5742     gen_helper_mac_set_flags(cpu_env, tcg_const_i32(accnum));
5743 }
5744 
5745 DISAS_INSN(to_macsr)
5746 {
5747     TCGv val;
5748     SRC_EA(env, val, OS_LONG, 0, NULL);
5749     gen_helper_set_macsr(cpu_env, val);
5750     gen_exit_tb(s);
5751 }
5752 
5753 DISAS_INSN(to_mask)
5754 {
5755     TCGv val;
5756     SRC_EA(env, val, OS_LONG, 0, NULL);
5757     tcg_gen_ori_i32(QREG_MAC_MASK, val, 0xffff0000);
5758 }
5759 
5760 DISAS_INSN(to_mext)
5761 {
5762     TCGv val;
5763     TCGv acc;
5764     SRC_EA(env, val, OS_LONG, 0, NULL);
5765     acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
5766     if (s->env->macsr & MACSR_FI)
5767         gen_helper_set_mac_extf(cpu_env, val, acc);
5768     else if (s->env->macsr & MACSR_SU)
5769         gen_helper_set_mac_exts(cpu_env, val, acc);
5770     else
5771         gen_helper_set_mac_extu(cpu_env, val, acc);
5772 }
5773 
5774 static disas_proc opcode_table[65536];
5775 
5776 static void
5777 register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask)
5778 {
5779   int i;
5780   int from;
5781   int to;
5782 
5783   /* Sanity check.  All set bits must be included in the mask.  */
5784   if (opcode & ~mask) {
5785       fprintf(stderr,
5786               "qemu internal error: bogus opcode definition %04x/%04x\n",
5787               opcode, mask);
5788       abort();
5789   }
5790   /* This could probably be cleverer.  For now just optimize the case where
5791      the top bits are known.  */
5792   /* Find the first zero bit in the mask.  */
5793   i = 0x8000;
5794   while ((i & mask) != 0)
5795       i >>= 1;
5796   /* Iterate over all combinations of this and lower bits.  */
5797   if (i == 0)
5798       i = 1;
5799   else
5800       i <<= 1;
5801   from = opcode & ~(i - 1);
5802   to = from + i;
5803   for (i = from; i < to; i++) {
5804       if ((i & mask) == opcode)
5805           opcode_table[i] = proc;
5806   }
5807 }
5808 
5809 /* Register m68k opcode handlers.  Order is important.
5810    Later insn override earlier ones.  */
5811 void register_m68k_insns (CPUM68KState *env)
5812 {
5813     /* Build the opcode table only once to avoid
5814        multithreading issues. */
5815     if (opcode_table[0] != NULL) {
5816         return;
5817     }
5818 
5819     /* use BASE() for instruction available
5820      * for CF_ISA_A and M68000.
5821      */
5822 #define BASE(name, opcode, mask) \
5823     register_opcode(disas_##name, 0x##opcode, 0x##mask)
5824 #define INSN(name, opcode, mask, feature) do { \
5825     if (m68k_feature(env, M68K_FEATURE_##feature)) \
5826         BASE(name, opcode, mask); \
5827     } while(0)
5828     BASE(undef,     0000, 0000);
5829     INSN(arith_im,  0080, fff8, CF_ISA_A);
5830     INSN(arith_im,  0000, ff00, M68000);
5831     INSN(chk2,      00c0, f9c0, CHK2);
5832     INSN(bitrev,    00c0, fff8, CF_ISA_APLUSC);
5833     BASE(bitop_reg, 0100, f1c0);
5834     BASE(bitop_reg, 0140, f1c0);
5835     BASE(bitop_reg, 0180, f1c0);
5836     BASE(bitop_reg, 01c0, f1c0);
5837     INSN(movep,     0108, f138, MOVEP);
5838     INSN(arith_im,  0280, fff8, CF_ISA_A);
5839     INSN(arith_im,  0200, ff00, M68000);
5840     INSN(undef,     02c0, ffc0, M68000);
5841     INSN(byterev,   02c0, fff8, CF_ISA_APLUSC);
5842     INSN(arith_im,  0480, fff8, CF_ISA_A);
5843     INSN(arith_im,  0400, ff00, M68000);
5844     INSN(undef,     04c0, ffc0, M68000);
5845     INSN(arith_im,  0600, ff00, M68000);
5846     INSN(undef,     06c0, ffc0, M68000);
5847     INSN(ff1,       04c0, fff8, CF_ISA_APLUSC);
5848     INSN(arith_im,  0680, fff8, CF_ISA_A);
5849     INSN(arith_im,  0c00, ff38, CF_ISA_A);
5850     INSN(arith_im,  0c00, ff00, M68000);
5851     BASE(bitop_im,  0800, ffc0);
5852     BASE(bitop_im,  0840, ffc0);
5853     BASE(bitop_im,  0880, ffc0);
5854     BASE(bitop_im,  08c0, ffc0);
5855     INSN(arith_im,  0a80, fff8, CF_ISA_A);
5856     INSN(arith_im,  0a00, ff00, M68000);
5857 #if defined(CONFIG_SOFTMMU)
5858     INSN(moves,     0e00, ff00, M68000);
5859 #endif
5860     INSN(cas,       0ac0, ffc0, CAS);
5861     INSN(cas,       0cc0, ffc0, CAS);
5862     INSN(cas,       0ec0, ffc0, CAS);
5863     INSN(cas2w,     0cfc, ffff, CAS);
5864     INSN(cas2l,     0efc, ffff, CAS);
5865     BASE(move,      1000, f000);
5866     BASE(move,      2000, f000);
5867     BASE(move,      3000, f000);
5868     INSN(chk,       4000, f040, M68000);
5869     INSN(strldsr,   40e7, ffff, CF_ISA_APLUSC);
5870     INSN(negx,      4080, fff8, CF_ISA_A);
5871     INSN(negx,      4000, ff00, M68000);
5872     INSN(undef,     40c0, ffc0, M68000);
5873     INSN(move_from_sr, 40c0, fff8, CF_ISA_A);
5874     INSN(move_from_sr, 40c0, ffc0, M68000);
5875     BASE(lea,       41c0, f1c0);
5876     BASE(clr,       4200, ff00);
5877     BASE(undef,     42c0, ffc0);
5878     INSN(move_from_ccr, 42c0, fff8, CF_ISA_A);
5879     INSN(move_from_ccr, 42c0, ffc0, M68000);
5880     INSN(neg,       4480, fff8, CF_ISA_A);
5881     INSN(neg,       4400, ff00, M68000);
5882     INSN(undef,     44c0, ffc0, M68000);
5883     BASE(move_to_ccr, 44c0, ffc0);
5884     INSN(not,       4680, fff8, CF_ISA_A);
5885     INSN(not,       4600, ff00, M68000);
5886 #if defined(CONFIG_SOFTMMU)
5887     BASE(move_to_sr, 46c0, ffc0);
5888 #endif
5889     INSN(nbcd,      4800, ffc0, M68000);
5890     INSN(linkl,     4808, fff8, M68000);
5891     BASE(pea,       4840, ffc0);
5892     BASE(swap,      4840, fff8);
5893     INSN(bkpt,      4848, fff8, BKPT);
5894     INSN(movem,     48d0, fbf8, CF_ISA_A);
5895     INSN(movem,     48e8, fbf8, CF_ISA_A);
5896     INSN(movem,     4880, fb80, M68000);
5897     BASE(ext,       4880, fff8);
5898     BASE(ext,       48c0, fff8);
5899     BASE(ext,       49c0, fff8);
5900     BASE(tst,       4a00, ff00);
5901     INSN(tas,       4ac0, ffc0, CF_ISA_B);
5902     INSN(tas,       4ac0, ffc0, M68000);
5903 #if defined(CONFIG_SOFTMMU)
5904     INSN(halt,      4ac8, ffff, CF_ISA_A);
5905 #endif
5906     INSN(pulse,     4acc, ffff, CF_ISA_A);
5907     BASE(illegal,   4afc, ffff);
5908     INSN(mull,      4c00, ffc0, CF_ISA_A);
5909     INSN(mull,      4c00, ffc0, LONG_MULDIV);
5910     INSN(divl,      4c40, ffc0, CF_ISA_A);
5911     INSN(divl,      4c40, ffc0, LONG_MULDIV);
5912     INSN(sats,      4c80, fff8, CF_ISA_B);
5913     BASE(trap,      4e40, fff0);
5914     BASE(link,      4e50, fff8);
5915     BASE(unlk,      4e58, fff8);
5916 #if defined(CONFIG_SOFTMMU)
5917     INSN(move_to_usp, 4e60, fff8, USP);
5918     INSN(move_from_usp, 4e68, fff8, USP);
5919     INSN(reset,     4e70, ffff, M68000);
5920     BASE(stop,      4e72, ffff);
5921     BASE(rte,       4e73, ffff);
5922     INSN(cf_movec,  4e7b, ffff, CF_ISA_A);
5923     INSN(m68k_movec, 4e7a, fffe, M68000);
5924 #endif
5925     BASE(nop,       4e71, ffff);
5926     INSN(rtd,       4e74, ffff, RTD);
5927     BASE(rts,       4e75, ffff);
5928     BASE(jump,      4e80, ffc0);
5929     BASE(jump,      4ec0, ffc0);
5930     INSN(addsubq,   5000, f080, M68000);
5931     BASE(addsubq,   5080, f0c0);
5932     INSN(scc,       50c0, f0f8, CF_ISA_A); /* Scc.B Dx   */
5933     INSN(scc,       50c0, f0c0, M68000);   /* Scc.B <EA> */
5934     INSN(dbcc,      50c8, f0f8, M68000);
5935     INSN(tpf,       51f8, fff8, CF_ISA_A);
5936 
5937     /* Branch instructions.  */
5938     BASE(branch,    6000, f000);
5939     /* Disable long branch instructions, then add back the ones we want.  */
5940     BASE(undef,     60ff, f0ff); /* All long branches.  */
5941     INSN(branch,    60ff, f0ff, CF_ISA_B);
5942     INSN(undef,     60ff, ffff, CF_ISA_B); /* bra.l */
5943     INSN(branch,    60ff, ffff, BRAL);
5944     INSN(branch,    60ff, f0ff, BCCL);
5945 
5946     BASE(moveq,     7000, f100);
5947     INSN(mvzs,      7100, f100, CF_ISA_B);
5948     BASE(or,        8000, f000);
5949     BASE(divw,      80c0, f0c0);
5950     INSN(sbcd_reg,  8100, f1f8, M68000);
5951     INSN(sbcd_mem,  8108, f1f8, M68000);
5952     BASE(addsub,    9000, f000);
5953     INSN(undef,     90c0, f0c0, CF_ISA_A);
5954     INSN(subx_reg,  9180, f1f8, CF_ISA_A);
5955     INSN(subx_reg,  9100, f138, M68000);
5956     INSN(subx_mem,  9108, f138, M68000);
5957     INSN(suba,      91c0, f1c0, CF_ISA_A);
5958     INSN(suba,      90c0, f0c0, M68000);
5959 
5960     BASE(undef_mac, a000, f000);
5961     INSN(mac,       a000, f100, CF_EMAC);
5962     INSN(from_mac,  a180, f9b0, CF_EMAC);
5963     INSN(move_mac,  a110, f9fc, CF_EMAC);
5964     INSN(from_macsr,a980, f9f0, CF_EMAC);
5965     INSN(from_mask, ad80, fff0, CF_EMAC);
5966     INSN(from_mext, ab80, fbf0, CF_EMAC);
5967     INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC);
5968     INSN(to_mac,    a100, f9c0, CF_EMAC);
5969     INSN(to_macsr,  a900, ffc0, CF_EMAC);
5970     INSN(to_mext,   ab00, fbc0, CF_EMAC);
5971     INSN(to_mask,   ad00, ffc0, CF_EMAC);
5972 
5973     INSN(mov3q,     a140, f1c0, CF_ISA_B);
5974     INSN(cmp,       b000, f1c0, CF_ISA_B); /* cmp.b */
5975     INSN(cmp,       b040, f1c0, CF_ISA_B); /* cmp.w */
5976     INSN(cmpa,      b0c0, f1c0, CF_ISA_B); /* cmpa.w */
5977     INSN(cmp,       b080, f1c0, CF_ISA_A);
5978     INSN(cmpa,      b1c0, f1c0, CF_ISA_A);
5979     INSN(cmp,       b000, f100, M68000);
5980     INSN(eor,       b100, f100, M68000);
5981     INSN(cmpm,      b108, f138, M68000);
5982     INSN(cmpa,      b0c0, f0c0, M68000);
5983     INSN(eor,       b180, f1c0, CF_ISA_A);
5984     BASE(and,       c000, f000);
5985     INSN(exg_dd,    c140, f1f8, M68000);
5986     INSN(exg_aa,    c148, f1f8, M68000);
5987     INSN(exg_da,    c188, f1f8, M68000);
5988     BASE(mulw,      c0c0, f0c0);
5989     INSN(abcd_reg,  c100, f1f8, M68000);
5990     INSN(abcd_mem,  c108, f1f8, M68000);
5991     BASE(addsub,    d000, f000);
5992     INSN(undef,     d0c0, f0c0, CF_ISA_A);
5993     INSN(addx_reg,      d180, f1f8, CF_ISA_A);
5994     INSN(addx_reg,  d100, f138, M68000);
5995     INSN(addx_mem,  d108, f138, M68000);
5996     INSN(adda,      d1c0, f1c0, CF_ISA_A);
5997     INSN(adda,      d0c0, f0c0, M68000);
5998     INSN(shift_im,  e080, f0f0, CF_ISA_A);
5999     INSN(shift_reg, e0a0, f0f0, CF_ISA_A);
6000     INSN(shift8_im, e000, f0f0, M68000);
6001     INSN(shift16_im, e040, f0f0, M68000);
6002     INSN(shift_im,  e080, f0f0, M68000);
6003     INSN(shift8_reg, e020, f0f0, M68000);
6004     INSN(shift16_reg, e060, f0f0, M68000);
6005     INSN(shift_reg, e0a0, f0f0, M68000);
6006     INSN(shift_mem, e0c0, fcc0, M68000);
6007     INSN(rotate_im, e090, f0f0, M68000);
6008     INSN(rotate8_im, e010, f0f0, M68000);
6009     INSN(rotate16_im, e050, f0f0, M68000);
6010     INSN(rotate_reg, e0b0, f0f0, M68000);
6011     INSN(rotate8_reg, e030, f0f0, M68000);
6012     INSN(rotate16_reg, e070, f0f0, M68000);
6013     INSN(rotate_mem, e4c0, fcc0, M68000);
6014     INSN(bfext_mem, e9c0, fdc0, BITFIELD);  /* bfextu & bfexts */
6015     INSN(bfext_reg, e9c0, fdf8, BITFIELD);
6016     INSN(bfins_mem, efc0, ffc0, BITFIELD);
6017     INSN(bfins_reg, efc0, fff8, BITFIELD);
6018     INSN(bfop_mem, eac0, ffc0, BITFIELD);   /* bfchg */
6019     INSN(bfop_reg, eac0, fff8, BITFIELD);   /* bfchg */
6020     INSN(bfop_mem, ecc0, ffc0, BITFIELD);   /* bfclr */
6021     INSN(bfop_reg, ecc0, fff8, BITFIELD);   /* bfclr */
6022     INSN(bfop_mem, edc0, ffc0, BITFIELD);   /* bfffo */
6023     INSN(bfop_reg, edc0, fff8, BITFIELD);   /* bfffo */
6024     INSN(bfop_mem, eec0, ffc0, BITFIELD);   /* bfset */
6025     INSN(bfop_reg, eec0, fff8, BITFIELD);   /* bfset */
6026     INSN(bfop_mem, e8c0, ffc0, BITFIELD);   /* bftst */
6027     INSN(bfop_reg, e8c0, fff8, BITFIELD);   /* bftst */
6028     BASE(undef_fpu, f000, f000);
6029     INSN(fpu,       f200, ffc0, CF_FPU);
6030     INSN(fbcc,      f280, ffc0, CF_FPU);
6031     INSN(fpu,       f200, ffc0, FPU);
6032     INSN(fscc,      f240, ffc0, FPU);
6033     INSN(fbcc,      f280, ff80, FPU);
6034 #if defined(CONFIG_SOFTMMU)
6035     INSN(frestore,  f340, ffc0, CF_FPU);
6036     INSN(fsave,     f300, ffc0, CF_FPU);
6037     INSN(frestore,  f340, ffc0, FPU);
6038     INSN(fsave,     f300, ffc0, FPU);
6039     INSN(intouch,   f340, ffc0, CF_ISA_A);
6040     INSN(cpushl,    f428, ff38, CF_ISA_A);
6041     INSN(cpush,     f420, ff20, M68040);
6042     INSN(cinv,      f400, ff20, M68040);
6043     INSN(pflush,    f500, ffe0, M68040);
6044     INSN(ptest,     f548, ffd8, M68040);
6045     INSN(wddata,    fb00, ff00, CF_ISA_A);
6046     INSN(wdebug,    fbc0, ffc0, CF_ISA_A);
6047 #endif
6048     INSN(move16_mem, f600, ffe0, M68040);
6049     INSN(move16_reg, f620, fff8, M68040);
6050 #undef INSN
6051 }
6052 
6053 static void m68k_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
6054 {
6055     DisasContext *dc = container_of(dcbase, DisasContext, base);
6056     CPUM68KState *env = cpu->env_ptr;
6057 
6058     dc->env = env;
6059     dc->pc = dc->base.pc_first;
6060     dc->cc_op = CC_OP_DYNAMIC;
6061     dc->cc_op_synced = 1;
6062     dc->done_mac = 0;
6063     dc->writeback_mask = 0;
6064     init_release_array(dc);
6065 }
6066 
6067 static void m68k_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
6068 {
6069 }
6070 
6071 static void m68k_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
6072 {
6073     DisasContext *dc = container_of(dcbase, DisasContext, base);
6074     tcg_gen_insn_start(dc->base.pc_next, dc->cc_op);
6075 }
6076 
6077 static bool m68k_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
6078                                      const CPUBreakpoint *bp)
6079 {
6080     DisasContext *dc = container_of(dcbase, DisasContext, base);
6081 
6082     gen_exception(dc, dc->base.pc_next, EXCP_DEBUG);
6083     /* The address covered by the breakpoint must be included in
6084        [tb->pc, tb->pc + tb->size) in order to for it to be
6085        properly cleared -- thus we increment the PC here so that
6086        the logic setting tb->size below does the right thing.  */
6087     dc->base.pc_next += 2;
6088 
6089     return true;
6090 }
6091 
6092 static void m68k_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
6093 {
6094     DisasContext *dc = container_of(dcbase, DisasContext, base);
6095     CPUM68KState *env = cpu->env_ptr;
6096     uint16_t insn = read_im16(env, dc);
6097 
6098     opcode_table[insn](env, dc, insn);
6099     do_writebacks(dc);
6100     do_release(dc);
6101 
6102     dc->base.pc_next = dc->pc;
6103 
6104     if (dc->base.is_jmp == DISAS_NEXT) {
6105         /* Stop translation when the next insn might touch a new page.
6106          * This ensures that prefetch aborts at the right place.
6107          *
6108          * We cannot determine the size of the next insn without
6109          * completely decoding it.  However, the maximum insn size
6110          * is 32 bytes, so end if we do not have that much remaining.
6111          * This may produce several small TBs at the end of each page,
6112          * but they will all be linked with goto_tb.
6113          *
6114          * ??? ColdFire maximum is 4 bytes; MC68000's maximum is also
6115          * smaller than MC68020's.
6116          */
6117         target_ulong start_page_offset
6118             = dc->pc - (dc->base.pc_first & TARGET_PAGE_MASK);
6119 
6120         if (start_page_offset >= TARGET_PAGE_SIZE - 32) {
6121             dc->base.is_jmp = DISAS_TOO_MANY;
6122         }
6123     }
6124 }
6125 
6126 static void m68k_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
6127 {
6128     DisasContext *dc = container_of(dcbase, DisasContext, base);
6129 
6130     if (dc->base.is_jmp == DISAS_NORETURN) {
6131         return;
6132     }
6133     if (dc->base.singlestep_enabled) {
6134         gen_helper_raise_exception(cpu_env, tcg_const_i32(EXCP_DEBUG));
6135         return;
6136     }
6137 
6138     switch (dc->base.is_jmp) {
6139     case DISAS_TOO_MANY:
6140         update_cc_op(dc);
6141         gen_jmp_tb(dc, 0, dc->pc);
6142         break;
6143     case DISAS_JUMP:
6144         /* We updated CC_OP and PC in gen_jmp/gen_jmp_im.  */
6145         tcg_gen_lookup_and_goto_ptr();
6146         break;
6147     case DISAS_EXIT:
6148         /* We updated CC_OP and PC in gen_exit_tb, but also modified
6149            other state that may require returning to the main loop.  */
6150         tcg_gen_exit_tb(NULL, 0);
6151         break;
6152     default:
6153         g_assert_not_reached();
6154     }
6155 }
6156 
6157 static void m68k_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
6158 {
6159     qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
6160     log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
6161 }
6162 
6163 static const TranslatorOps m68k_tr_ops = {
6164     .init_disas_context = m68k_tr_init_disas_context,
6165     .tb_start           = m68k_tr_tb_start,
6166     .insn_start         = m68k_tr_insn_start,
6167     .breakpoint_check   = m68k_tr_breakpoint_check,
6168     .translate_insn     = m68k_tr_translate_insn,
6169     .tb_stop            = m68k_tr_tb_stop,
6170     .disas_log          = m68k_tr_disas_log,
6171 };
6172 
6173 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
6174 {
6175     DisasContext dc;
6176     translator_loop(&m68k_tr_ops, &dc.base, cpu, tb, max_insns);
6177 }
6178 
6179 static double floatx80_to_double(CPUM68KState *env, uint16_t high, uint64_t low)
6180 {
6181     floatx80 a = { .high = high, .low = low };
6182     union {
6183         float64 f64;
6184         double d;
6185     } u;
6186 
6187     u.f64 = floatx80_to_float64(a, &env->fp_status);
6188     return u.d;
6189 }
6190 
6191 void m68k_cpu_dump_state(CPUState *cs, FILE *f, int flags)
6192 {
6193     M68kCPU *cpu = M68K_CPU(cs);
6194     CPUM68KState *env = &cpu->env;
6195     int i;
6196     uint16_t sr;
6197     for (i = 0; i < 8; i++) {
6198         qemu_fprintf(f, "D%d = %08x   A%d = %08x   "
6199                      "F%d = %04x %016"PRIx64"  (%12g)\n",
6200                      i, env->dregs[i], i, env->aregs[i],
6201                      i, env->fregs[i].l.upper, env->fregs[i].l.lower,
6202                      floatx80_to_double(env, env->fregs[i].l.upper,
6203                                         env->fregs[i].l.lower));
6204     }
6205     qemu_fprintf(f, "PC = %08x   ", env->pc);
6206     sr = env->sr | cpu_m68k_get_ccr(env);
6207     qemu_fprintf(f, "SR = %04x T:%x I:%x %c%c %c%c%c%c%c\n",
6208                  sr, (sr & SR_T) >> SR_T_SHIFT, (sr & SR_I) >> SR_I_SHIFT,
6209                  (sr & SR_S) ? 'S' : 'U', (sr & SR_M) ? '%' : 'I',
6210                  (sr & CCF_X) ? 'X' : '-', (sr & CCF_N) ? 'N' : '-',
6211                  (sr & CCF_Z) ? 'Z' : '-', (sr & CCF_V) ? 'V' : '-',
6212                  (sr & CCF_C) ? 'C' : '-');
6213     qemu_fprintf(f, "FPSR = %08x %c%c%c%c ", env->fpsr,
6214                  (env->fpsr & FPSR_CC_A) ? 'A' : '-',
6215                  (env->fpsr & FPSR_CC_I) ? 'I' : '-',
6216                  (env->fpsr & FPSR_CC_Z) ? 'Z' : '-',
6217                  (env->fpsr & FPSR_CC_N) ? 'N' : '-');
6218     qemu_fprintf(f, "\n                                "
6219                  "FPCR =     %04x ", env->fpcr);
6220     switch (env->fpcr & FPCR_PREC_MASK) {
6221     case FPCR_PREC_X:
6222         qemu_fprintf(f, "X ");
6223         break;
6224     case FPCR_PREC_S:
6225         qemu_fprintf(f, "S ");
6226         break;
6227     case FPCR_PREC_D:
6228         qemu_fprintf(f, "D ");
6229         break;
6230     }
6231     switch (env->fpcr & FPCR_RND_MASK) {
6232     case FPCR_RND_N:
6233         qemu_fprintf(f, "RN ");
6234         break;
6235     case FPCR_RND_Z:
6236         qemu_fprintf(f, "RZ ");
6237         break;
6238     case FPCR_RND_M:
6239         qemu_fprintf(f, "RM ");
6240         break;
6241     case FPCR_RND_P:
6242         qemu_fprintf(f, "RP ");
6243         break;
6244     }
6245     qemu_fprintf(f, "\n");
6246 #ifdef CONFIG_SOFTMMU
6247     qemu_fprintf(f, "%sA7(MSP) = %08x %sA7(USP) = %08x %sA7(ISP) = %08x\n",
6248                  env->current_sp == M68K_SSP ? "->" : "  ", env->sp[M68K_SSP],
6249                  env->current_sp == M68K_USP ? "->" : "  ", env->sp[M68K_USP],
6250                  env->current_sp == M68K_ISP ? "->" : "  ", env->sp[M68K_ISP]);
6251     qemu_fprintf(f, "VBR = 0x%08x\n", env->vbr);
6252     qemu_fprintf(f, "SFC = %x DFC %x\n", env->sfc, env->dfc);
6253     qemu_fprintf(f, "SSW %08x TCR %08x URP %08x SRP %08x\n",
6254                  env->mmu.ssw, env->mmu.tcr, env->mmu.urp, env->mmu.srp);
6255     qemu_fprintf(f, "DTTR0/1: %08x/%08x ITTR0/1: %08x/%08x\n",
6256                  env->mmu.ttr[M68K_DTTR0], env->mmu.ttr[M68K_DTTR1],
6257                  env->mmu.ttr[M68K_ITTR0], env->mmu.ttr[M68K_ITTR1]);
6258     qemu_fprintf(f, "MMUSR %08x, fault at %08x\n",
6259                  env->mmu.mmusr, env->mmu.ar);
6260 #endif
6261 }
6262 
6263 void restore_state_to_opc(CPUM68KState *env, TranslationBlock *tb,
6264                           target_ulong *data)
6265 {
6266     int cc_op = data[1];
6267     env->pc = data[0];
6268     if (cc_op != CC_OP_DYNAMIC) {
6269         env->cc_op = cc_op;
6270     }
6271 }
6272