xref: /qemu/target/microblaze/translate.c (revision dc03272d)
1 /*
2  *  Xilinx MicroBlaze emulation for qemu: main translation routines.
3  *
4  *  Copyright (c) 2009 Edgar E. Iglesias.
5  *  Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "microblaze-decode.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 
32 #include "trace-tcg.h"
33 #include "exec/log.h"
34 
35 
36 #define SIM_COMPAT 0
37 #define DISAS_GNU 1
38 #define DISAS_MB 1
39 #if DISAS_MB && !SIM_COMPAT
40 #  define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41 #else
42 #  define LOG_DIS(...) do { } while (0)
43 #endif
44 
45 #define D(x)
46 
47 #define EXTRACT_FIELD(src, start, end) \
48             (((src) >> start) & ((1 << (end - start + 1)) - 1))
49 
50 /* is_jmp field values */
51 #define DISAS_JUMP    DISAS_TARGET_0 /* only pc was modified dynamically */
52 #define DISAS_UPDATE  DISAS_TARGET_1 /* cpu state was modified dynamically */
53 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
54 
55 static TCGv env_debug;
56 static TCGv cpu_R[32];
57 static TCGv cpu_SR[18];
58 static TCGv env_imm;
59 static TCGv env_btaken;
60 static TCGv env_btarget;
61 static TCGv env_iflags;
62 static TCGv env_res_addr;
63 static TCGv env_res_val;
64 
65 #include "exec/gen-icount.h"
66 
67 /* This is the state at translation time.  */
68 typedef struct DisasContext {
69     MicroBlazeCPU *cpu;
70     target_ulong pc;
71 
72     /* Decoder.  */
73     int type_b;
74     uint32_t ir;
75     uint8_t opcode;
76     uint8_t rd, ra, rb;
77     uint16_t imm;
78 
79     unsigned int cpustate_changed;
80     unsigned int delayed_branch;
81     unsigned int tb_flags, synced_flags; /* tb dependent flags.  */
82     unsigned int clear_imm;
83     int is_jmp;
84 
85 #define JMP_NOJMP     0
86 #define JMP_DIRECT    1
87 #define JMP_DIRECT_CC 2
88 #define JMP_INDIRECT  3
89     unsigned int jmp;
90     uint32_t jmp_pc;
91 
92     int abort_at_next_insn;
93     int nr_nops;
94     struct TranslationBlock *tb;
95     int singlestep_enabled;
96 } DisasContext;
97 
98 static const char *regnames[] =
99 {
100     "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
101     "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
102     "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
103     "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
104 };
105 
106 static const char *special_regnames[] =
107 {
108     "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
109     "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
110     "sr16", "sr17", "sr18"
111 };
112 
113 static inline void t_sync_flags(DisasContext *dc)
114 {
115     /* Synch the tb dependent flags between translator and runtime.  */
116     if (dc->tb_flags != dc->synced_flags) {
117         tcg_gen_movi_tl(env_iflags, dc->tb_flags);
118         dc->synced_flags = dc->tb_flags;
119     }
120 }
121 
122 static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
123 {
124     TCGv_i32 tmp = tcg_const_i32(index);
125 
126     t_sync_flags(dc);
127     tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
128     gen_helper_raise_exception(cpu_env, tmp);
129     tcg_temp_free_i32(tmp);
130     dc->is_jmp = DISAS_UPDATE;
131 }
132 
133 static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
134 {
135 #ifndef CONFIG_USER_ONLY
136     return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
137 #else
138     return true;
139 #endif
140 }
141 
142 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
143 {
144     if (use_goto_tb(dc, dest)) {
145         tcg_gen_goto_tb(n);
146         tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
147         tcg_gen_exit_tb((uintptr_t)dc->tb + n);
148     } else {
149         tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
150         tcg_gen_exit_tb(0);
151     }
152 }
153 
154 static void read_carry(DisasContext *dc, TCGv d)
155 {
156     tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
157 }
158 
159 /*
160  * write_carry sets the carry bits in MSR based on bit 0 of v.
161  * v[31:1] are ignored.
162  */
163 static void write_carry(DisasContext *dc, TCGv v)
164 {
165     TCGv t0 = tcg_temp_new();
166     tcg_gen_shli_tl(t0, v, 31);
167     tcg_gen_sari_tl(t0, t0, 31);
168     tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
169     tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
170                     ~(MSR_C | MSR_CC));
171     tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
172     tcg_temp_free(t0);
173 }
174 
175 static void write_carryi(DisasContext *dc, bool carry)
176 {
177     TCGv t0 = tcg_temp_new();
178     tcg_gen_movi_tl(t0, carry);
179     write_carry(dc, t0);
180     tcg_temp_free(t0);
181 }
182 
183 /* True if ALU operand b is a small immediate that may deserve
184    faster treatment.  */
185 static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
186 {
187     /* Immediate insn without the imm prefix ?  */
188     return dc->type_b && !(dc->tb_flags & IMM_FLAG);
189 }
190 
191 static inline TCGv *dec_alu_op_b(DisasContext *dc)
192 {
193     if (dc->type_b) {
194         if (dc->tb_flags & IMM_FLAG)
195             tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
196         else
197             tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
198         return &env_imm;
199     } else
200         return &cpu_R[dc->rb];
201 }
202 
203 static void dec_add(DisasContext *dc)
204 {
205     unsigned int k, c;
206     TCGv cf;
207 
208     k = dc->opcode & 4;
209     c = dc->opcode & 2;
210 
211     LOG_DIS("add%s%s%s r%d r%d r%d\n",
212             dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
213             dc->rd, dc->ra, dc->rb);
214 
215     /* Take care of the easy cases first.  */
216     if (k) {
217         /* k - keep carry, no need to update MSR.  */
218         /* If rd == r0, it's a nop.  */
219         if (dc->rd) {
220             tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
221 
222             if (c) {
223                 /* c - Add carry into the result.  */
224                 cf = tcg_temp_new();
225 
226                 read_carry(dc, cf);
227                 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
228                 tcg_temp_free(cf);
229             }
230         }
231         return;
232     }
233 
234     /* From now on, we can assume k is zero.  So we need to update MSR.  */
235     /* Extract carry.  */
236     cf = tcg_temp_new();
237     if (c) {
238         read_carry(dc, cf);
239     } else {
240         tcg_gen_movi_tl(cf, 0);
241     }
242 
243     if (dc->rd) {
244         TCGv ncf = tcg_temp_new();
245         gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
246         tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
247         tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
248         write_carry(dc, ncf);
249         tcg_temp_free(ncf);
250     } else {
251         gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
252         write_carry(dc, cf);
253     }
254     tcg_temp_free(cf);
255 }
256 
257 static void dec_sub(DisasContext *dc)
258 {
259     unsigned int u, cmp, k, c;
260     TCGv cf, na;
261 
262     u = dc->imm & 2;
263     k = dc->opcode & 4;
264     c = dc->opcode & 2;
265     cmp = (dc->imm & 1) && (!dc->type_b) && k;
266 
267     if (cmp) {
268         LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
269         if (dc->rd) {
270             if (u)
271                 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
272             else
273                 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
274         }
275         return;
276     }
277 
278     LOG_DIS("sub%s%s r%d, r%d r%d\n",
279              k ? "k" : "",  c ? "c" : "", dc->rd, dc->ra, dc->rb);
280 
281     /* Take care of the easy cases first.  */
282     if (k) {
283         /* k - keep carry, no need to update MSR.  */
284         /* If rd == r0, it's a nop.  */
285         if (dc->rd) {
286             tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
287 
288             if (c) {
289                 /* c - Add carry into the result.  */
290                 cf = tcg_temp_new();
291 
292                 read_carry(dc, cf);
293                 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
294                 tcg_temp_free(cf);
295             }
296         }
297         return;
298     }
299 
300     /* From now on, we can assume k is zero.  So we need to update MSR.  */
301     /* Extract carry. And complement a into na.  */
302     cf = tcg_temp_new();
303     na = tcg_temp_new();
304     if (c) {
305         read_carry(dc, cf);
306     } else {
307         tcg_gen_movi_tl(cf, 1);
308     }
309 
310     /* d = b + ~a + c. carry defaults to 1.  */
311     tcg_gen_not_tl(na, cpu_R[dc->ra]);
312 
313     if (dc->rd) {
314         TCGv ncf = tcg_temp_new();
315         gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
316         tcg_gen_add_tl(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
317         tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
318         write_carry(dc, ncf);
319         tcg_temp_free(ncf);
320     } else {
321         gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
322         write_carry(dc, cf);
323     }
324     tcg_temp_free(cf);
325     tcg_temp_free(na);
326 }
327 
328 static void dec_pattern(DisasContext *dc)
329 {
330     unsigned int mode;
331 
332     if ((dc->tb_flags & MSR_EE_FLAG)
333           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
334           && !dc->cpu->cfg.use_pcmp_instr) {
335         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
336         t_gen_raise_exception(dc, EXCP_HW_EXCP);
337     }
338 
339     mode = dc->opcode & 3;
340     switch (mode) {
341         case 0:
342             /* pcmpbf.  */
343             LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
344             if (dc->rd)
345                 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
346             break;
347         case 2:
348             LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
349             if (dc->rd) {
350                 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_R[dc->rd],
351                                    cpu_R[dc->ra], cpu_R[dc->rb]);
352             }
353             break;
354         case 3:
355             LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
356             if (dc->rd) {
357                 tcg_gen_setcond_tl(TCG_COND_NE, cpu_R[dc->rd],
358                                    cpu_R[dc->ra], cpu_R[dc->rb]);
359             }
360             break;
361         default:
362             cpu_abort(CPU(dc->cpu),
363                       "unsupported pattern insn opcode=%x\n", dc->opcode);
364             break;
365     }
366 }
367 
368 static void dec_and(DisasContext *dc)
369 {
370     unsigned int not;
371 
372     if (!dc->type_b && (dc->imm & (1 << 10))) {
373         dec_pattern(dc);
374         return;
375     }
376 
377     not = dc->opcode & (1 << 1);
378     LOG_DIS("and%s\n", not ? "n" : "");
379 
380     if (!dc->rd)
381         return;
382 
383     if (not) {
384         tcg_gen_andc_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
385     } else
386         tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
387 }
388 
389 static void dec_or(DisasContext *dc)
390 {
391     if (!dc->type_b && (dc->imm & (1 << 10))) {
392         dec_pattern(dc);
393         return;
394     }
395 
396     LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
397     if (dc->rd)
398         tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
399 }
400 
401 static void dec_xor(DisasContext *dc)
402 {
403     if (!dc->type_b && (dc->imm & (1 << 10))) {
404         dec_pattern(dc);
405         return;
406     }
407 
408     LOG_DIS("xor r%d\n", dc->rd);
409     if (dc->rd)
410         tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
411 }
412 
413 static inline void msr_read(DisasContext *dc, TCGv d)
414 {
415     tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
416 }
417 
418 static inline void msr_write(DisasContext *dc, TCGv v)
419 {
420     TCGv t;
421 
422     t = tcg_temp_new();
423     dc->cpustate_changed = 1;
424     /* PVR bit is not writable.  */
425     tcg_gen_andi_tl(t, v, ~MSR_PVR);
426     tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
427     tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t);
428     tcg_temp_free(t);
429 }
430 
431 static void dec_msr(DisasContext *dc)
432 {
433     CPUState *cs = CPU(dc->cpu);
434     TCGv t0, t1;
435     unsigned int sr, to, rn;
436     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
437 
438     sr = dc->imm & ((1 << 14) - 1);
439     to = dc->imm & (1 << 14);
440     dc->type_b = 1;
441     if (to)
442         dc->cpustate_changed = 1;
443 
444     /* msrclr and msrset.  */
445     if (!(dc->imm & (1 << 15))) {
446         unsigned int clr = dc->ir & (1 << 16);
447 
448         LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
449                 dc->rd, dc->imm);
450 
451         if (!dc->cpu->cfg.use_msr_instr) {
452             /* nop??? */
453             return;
454         }
455 
456         if ((dc->tb_flags & MSR_EE_FLAG)
457             && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
458             tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
459             t_gen_raise_exception(dc, EXCP_HW_EXCP);
460             return;
461         }
462 
463         if (dc->rd)
464             msr_read(dc, cpu_R[dc->rd]);
465 
466         t0 = tcg_temp_new();
467         t1 = tcg_temp_new();
468         msr_read(dc, t0);
469         tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
470 
471         if (clr) {
472             tcg_gen_not_tl(t1, t1);
473             tcg_gen_and_tl(t0, t0, t1);
474         } else
475             tcg_gen_or_tl(t0, t0, t1);
476         msr_write(dc, t0);
477         tcg_temp_free(t0);
478         tcg_temp_free(t1);
479 	tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
480         dc->is_jmp = DISAS_UPDATE;
481         return;
482     }
483 
484     if (to) {
485         if ((dc->tb_flags & MSR_EE_FLAG)
486              && mem_index == MMU_USER_IDX) {
487             tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
488             t_gen_raise_exception(dc, EXCP_HW_EXCP);
489             return;
490         }
491     }
492 
493 #if !defined(CONFIG_USER_ONLY)
494     /* Catch read/writes to the mmu block.  */
495     if ((sr & ~0xff) == 0x1000) {
496         sr &= 7;
497         LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
498         if (to)
499             gen_helper_mmu_write(cpu_env, tcg_const_tl(sr), cpu_R[dc->ra]);
500         else
501             gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_tl(sr));
502         return;
503     }
504 #endif
505 
506     if (to) {
507         LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
508         switch (sr) {
509             case 0:
510                 break;
511             case 1:
512                 msr_write(dc, cpu_R[dc->ra]);
513                 break;
514             case 0x3:
515                 tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
516                 break;
517             case 0x5:
518                 tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
519                 break;
520             case 0x7:
521                 tcg_gen_andi_tl(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
522                 break;
523             case 0x800:
524                 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, slr));
525                 break;
526             case 0x802:
527                 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, shr));
528                 break;
529             default:
530                 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
531                 break;
532         }
533     } else {
534         LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
535 
536         switch (sr) {
537             case 0:
538                 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
539                 break;
540             case 1:
541                 msr_read(dc, cpu_R[dc->rd]);
542                 break;
543             case 0x3:
544                 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
545                 break;
546             case 0x5:
547                 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
548                 break;
549              case 0x7:
550                 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_FSR]);
551                 break;
552             case 0xb:
553                 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
554                 break;
555             case 0x800:
556                 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, slr));
557                 break;
558             case 0x802:
559                 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, shr));
560                 break;
561             case 0x2000:
562             case 0x2001:
563             case 0x2002:
564             case 0x2003:
565             case 0x2004:
566             case 0x2005:
567             case 0x2006:
568             case 0x2007:
569             case 0x2008:
570             case 0x2009:
571             case 0x200a:
572             case 0x200b:
573             case 0x200c:
574                 rn = sr & 0xf;
575                 tcg_gen_ld_tl(cpu_R[dc->rd],
576                               cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
577                 break;
578             default:
579                 cpu_abort(cs, "unknown mfs reg %x\n", sr);
580                 break;
581         }
582     }
583 
584     if (dc->rd == 0) {
585         tcg_gen_movi_tl(cpu_R[0], 0);
586     }
587 }
588 
589 /* Multiplier unit.  */
590 static void dec_mul(DisasContext *dc)
591 {
592     TCGv tmp;
593     unsigned int subcode;
594 
595     if ((dc->tb_flags & MSR_EE_FLAG)
596          && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
597          && !dc->cpu->cfg.use_hw_mul) {
598         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
599         t_gen_raise_exception(dc, EXCP_HW_EXCP);
600         return;
601     }
602 
603     subcode = dc->imm & 3;
604 
605     if (dc->type_b) {
606         LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
607         tcg_gen_mul_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
608         return;
609     }
610 
611     /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2.  */
612     if (subcode >= 1 && subcode <= 3 && dc->cpu->cfg.use_hw_mul < 2) {
613         /* nop??? */
614     }
615 
616     tmp = tcg_temp_new();
617     switch (subcode) {
618         case 0:
619             LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
620             tcg_gen_mul_tl(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
621             break;
622         case 1:
623             LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
624             tcg_gen_muls2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
625             break;
626         case 2:
627             LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
628             tcg_gen_mulsu2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
629             break;
630         case 3:
631             LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
632             tcg_gen_mulu2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
633             break;
634         default:
635             cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
636             break;
637     }
638     tcg_temp_free(tmp);
639 }
640 
641 /* Div unit.  */
642 static void dec_div(DisasContext *dc)
643 {
644     unsigned int u;
645 
646     u = dc->imm & 2;
647     LOG_DIS("div\n");
648 
649     if ((dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
650           && !dc->cpu->cfg.use_div) {
651         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
652         t_gen_raise_exception(dc, EXCP_HW_EXCP);
653     }
654 
655     if (u)
656         gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
657                         cpu_R[dc->ra]);
658     else
659         gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
660                         cpu_R[dc->ra]);
661     if (!dc->rd)
662         tcg_gen_movi_tl(cpu_R[dc->rd], 0);
663 }
664 
665 static void dec_barrel(DisasContext *dc)
666 {
667     TCGv t0;
668     unsigned int imm_w, imm_s;
669     bool s, t, e = false, i = false;
670 
671     if ((dc->tb_flags & MSR_EE_FLAG)
672           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
673           && !dc->cpu->cfg.use_barrel) {
674         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
675         t_gen_raise_exception(dc, EXCP_HW_EXCP);
676         return;
677     }
678 
679     if (dc->type_b) {
680         /* Insert and extract are only available in immediate mode.  */
681         i = extract32(dc->imm, 15, 1);
682         e = extract32(dc->imm, 14, 1);
683     }
684     s = extract32(dc->imm, 10, 1);
685     t = extract32(dc->imm, 9, 1);
686     imm_w = extract32(dc->imm, 6, 5);
687     imm_s = extract32(dc->imm, 0, 5);
688 
689     LOG_DIS("bs%s%s%s r%d r%d r%d\n",
690             e ? "e" : "",
691             s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
692 
693     if (e) {
694         if (imm_w + imm_s > 32 || imm_w == 0) {
695             /* These inputs have an undefined behavior.  */
696             qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
697                           imm_w, imm_s);
698         } else {
699             tcg_gen_extract_i32(cpu_R[dc->rd], cpu_R[dc->ra], imm_s, imm_w);
700         }
701     } else if (i) {
702         int width = imm_w - imm_s + 1;
703 
704         if (imm_w < imm_s) {
705             /* These inputs have an undefined behavior.  */
706             qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
707                           imm_w, imm_s);
708         } else {
709             tcg_gen_deposit_i32(cpu_R[dc->rd], cpu_R[dc->rd], cpu_R[dc->ra],
710                                 imm_s, width);
711         }
712     } else {
713         t0 = tcg_temp_new();
714 
715         tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
716         tcg_gen_andi_tl(t0, t0, 31);
717 
718         if (s) {
719             tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
720         } else {
721             if (t) {
722                 tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
723             } else {
724                 tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
725             }
726         }
727         tcg_temp_free(t0);
728     }
729 }
730 
731 static void dec_bit(DisasContext *dc)
732 {
733     CPUState *cs = CPU(dc->cpu);
734     TCGv t0;
735     unsigned int op;
736     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
737 
738     op = dc->ir & ((1 << 9) - 1);
739     switch (op) {
740         case 0x21:
741             /* src.  */
742             t0 = tcg_temp_new();
743 
744             LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
745             tcg_gen_andi_tl(t0, cpu_SR[SR_MSR], MSR_CC);
746             write_carry(dc, cpu_R[dc->ra]);
747             if (dc->rd) {
748                 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
749                 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t0);
750             }
751             tcg_temp_free(t0);
752             break;
753 
754         case 0x1:
755         case 0x41:
756             /* srl.  */
757             LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
758 
759             /* Update carry. Note that write carry only looks at the LSB.  */
760             write_carry(dc, cpu_R[dc->ra]);
761             if (dc->rd) {
762                 if (op == 0x41)
763                     tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
764                 else
765                     tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
766             }
767             break;
768         case 0x60:
769             LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
770             tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
771             break;
772         case 0x61:
773             LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
774             tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
775             break;
776         case 0x64:
777         case 0x66:
778         case 0x74:
779         case 0x76:
780             /* wdc.  */
781             LOG_DIS("wdc r%d\n", dc->ra);
782             if ((dc->tb_flags & MSR_EE_FLAG)
783                  && mem_index == MMU_USER_IDX) {
784                 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
785                 t_gen_raise_exception(dc, EXCP_HW_EXCP);
786                 return;
787             }
788             break;
789         case 0x68:
790             /* wic.  */
791             LOG_DIS("wic r%d\n", dc->ra);
792             if ((dc->tb_flags & MSR_EE_FLAG)
793                  && mem_index == MMU_USER_IDX) {
794                 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
795                 t_gen_raise_exception(dc, EXCP_HW_EXCP);
796                 return;
797             }
798             break;
799         case 0xe0:
800             if ((dc->tb_flags & MSR_EE_FLAG)
801                 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
802                 && !dc->cpu->cfg.use_pcmp_instr) {
803                 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
804                 t_gen_raise_exception(dc, EXCP_HW_EXCP);
805             }
806             if (dc->cpu->cfg.use_pcmp_instr) {
807                 tcg_gen_clzi_i32(cpu_R[dc->rd], cpu_R[dc->ra], 32);
808             }
809             break;
810         case 0x1e0:
811             /* swapb */
812             LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
813             tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
814             break;
815         case 0x1e2:
816             /*swaph */
817             LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
818             tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
819             break;
820         default:
821             cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
822                       dc->pc, op, dc->rd, dc->ra, dc->rb);
823             break;
824     }
825 }
826 
827 static inline void sync_jmpstate(DisasContext *dc)
828 {
829     if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
830         if (dc->jmp == JMP_DIRECT) {
831             tcg_gen_movi_tl(env_btaken, 1);
832         }
833         dc->jmp = JMP_INDIRECT;
834         tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
835     }
836 }
837 
838 static void dec_imm(DisasContext *dc)
839 {
840     LOG_DIS("imm %x\n", dc->imm << 16);
841     tcg_gen_movi_tl(env_imm, (dc->imm << 16));
842     dc->tb_flags |= IMM_FLAG;
843     dc->clear_imm = 0;
844 }
845 
846 static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
847 {
848     unsigned int extimm = dc->tb_flags & IMM_FLAG;
849     /* Should be set to one if r1 is used by loadstores.  */
850     int stackprot = 0;
851 
852     /* All load/stores use ra.  */
853     if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
854         stackprot = 1;
855     }
856 
857     /* Treat the common cases first.  */
858     if (!dc->type_b) {
859         /* If any of the regs is r0, return a ptr to the other.  */
860         if (dc->ra == 0) {
861             return &cpu_R[dc->rb];
862         } else if (dc->rb == 0) {
863             return &cpu_R[dc->ra];
864         }
865 
866         if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
867             stackprot = 1;
868         }
869 
870         *t = tcg_temp_new();
871         tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
872 
873         if (stackprot) {
874             gen_helper_stackprot(cpu_env, *t);
875         }
876         return t;
877     }
878     /* Immediate.  */
879     if (!extimm) {
880         if (dc->imm == 0) {
881             return &cpu_R[dc->ra];
882         }
883         *t = tcg_temp_new();
884         tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
885         tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
886     } else {
887         *t = tcg_temp_new();
888         tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
889     }
890 
891     if (stackprot) {
892         gen_helper_stackprot(cpu_env, *t);
893     }
894     return t;
895 }
896 
897 static void dec_load(DisasContext *dc)
898 {
899     TCGv t, v, *addr;
900     unsigned int size, rev = 0, ex = 0;
901     TCGMemOp mop;
902 
903     mop = dc->opcode & 3;
904     size = 1 << mop;
905     if (!dc->type_b) {
906         rev = (dc->ir >> 9) & 1;
907         ex = (dc->ir >> 10) & 1;
908     }
909     mop |= MO_TE;
910     if (rev) {
911         mop ^= MO_BSWAP;
912     }
913 
914     if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
915           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
916         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
917         t_gen_raise_exception(dc, EXCP_HW_EXCP);
918         return;
919     }
920 
921     LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
922                                                         ex ? "x" : "");
923 
924     t_sync_flags(dc);
925     addr = compute_ldst_addr(dc, &t);
926 
927     /*
928      * When doing reverse accesses we need to do two things.
929      *
930      * 1. Reverse the address wrt endianness.
931      * 2. Byteswap the data lanes on the way back into the CPU core.
932      */
933     if (rev && size != 4) {
934         /* Endian reverse the address. t is addr.  */
935         switch (size) {
936             case 1:
937             {
938                 /* 00 -> 11
939                    01 -> 10
940                    10 -> 10
941                    11 -> 00 */
942                 TCGv low = tcg_temp_new();
943 
944                 /* Force addr into the temp.  */
945                 if (addr != &t) {
946                     t = tcg_temp_new();
947                     tcg_gen_mov_tl(t, *addr);
948                     addr = &t;
949                 }
950 
951                 tcg_gen_andi_tl(low, t, 3);
952                 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
953                 tcg_gen_andi_tl(t, t, ~3);
954                 tcg_gen_or_tl(t, t, low);
955                 tcg_temp_free(low);
956                 break;
957             }
958 
959             case 2:
960                 /* 00 -> 10
961                    10 -> 00.  */
962                 /* Force addr into the temp.  */
963                 if (addr != &t) {
964                     t = tcg_temp_new();
965                     tcg_gen_xori_tl(t, *addr, 2);
966                     addr = &t;
967                 } else {
968                     tcg_gen_xori_tl(t, t, 2);
969                 }
970                 break;
971             default:
972                 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
973                 break;
974         }
975     }
976 
977     /* lwx does not throw unaligned access errors, so force alignment */
978     if (ex) {
979         /* Force addr into the temp.  */
980         if (addr != &t) {
981             t = tcg_temp_new();
982             tcg_gen_mov_tl(t, *addr);
983             addr = &t;
984         }
985         tcg_gen_andi_tl(t, t, ~3);
986     }
987 
988     /* If we get a fault on a dslot, the jmpstate better be in sync.  */
989     sync_jmpstate(dc);
990 
991     /* Verify alignment if needed.  */
992     /*
993      * Microblaze gives MMU faults priority over faults due to
994      * unaligned addresses. That's why we speculatively do the load
995      * into v. If the load succeeds, we verify alignment of the
996      * address and if that succeeds we write into the destination reg.
997      */
998     v = tcg_temp_new();
999     tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
1000 
1001     if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1002         tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1003         gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1004                             tcg_const_tl(0), tcg_const_tl(size - 1));
1005     }
1006 
1007     if (ex) {
1008         tcg_gen_mov_tl(env_res_addr, *addr);
1009         tcg_gen_mov_tl(env_res_val, v);
1010     }
1011     if (dc->rd) {
1012         tcg_gen_mov_tl(cpu_R[dc->rd], v);
1013     }
1014     tcg_temp_free(v);
1015 
1016     if (ex) { /* lwx */
1017         /* no support for AXI exclusive so always clear C */
1018         write_carryi(dc, 0);
1019     }
1020 
1021     if (addr == &t)
1022         tcg_temp_free(t);
1023 }
1024 
1025 static void dec_store(DisasContext *dc)
1026 {
1027     TCGv t, *addr, swx_addr;
1028     TCGLabel *swx_skip = NULL;
1029     unsigned int size, rev = 0, ex = 0;
1030     TCGMemOp mop;
1031 
1032     mop = dc->opcode & 3;
1033     size = 1 << mop;
1034     if (!dc->type_b) {
1035         rev = (dc->ir >> 9) & 1;
1036         ex = (dc->ir >> 10) & 1;
1037     }
1038     mop |= MO_TE;
1039     if (rev) {
1040         mop ^= MO_BSWAP;
1041     }
1042 
1043     if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
1044           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1045         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1046         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1047         return;
1048     }
1049 
1050     LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1051                                                         ex ? "x" : "");
1052     t_sync_flags(dc);
1053     /* If we get a fault on a dslot, the jmpstate better be in sync.  */
1054     sync_jmpstate(dc);
1055     addr = compute_ldst_addr(dc, &t);
1056 
1057     swx_addr = tcg_temp_local_new();
1058     if (ex) { /* swx */
1059         TCGv tval;
1060 
1061         /* Force addr into the swx_addr. */
1062         tcg_gen_mov_tl(swx_addr, *addr);
1063         addr = &swx_addr;
1064         /* swx does not throw unaligned access errors, so force alignment */
1065         tcg_gen_andi_tl(swx_addr, swx_addr, ~3);
1066 
1067         write_carryi(dc, 1);
1068         swx_skip = gen_new_label();
1069         tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);
1070 
1071         /* Compare the value loaded at lwx with current contents of
1072            the reserved location.
1073            FIXME: This only works for system emulation where we can expect
1074            this compare and the following write to be atomic. For user
1075            emulation we need to add atomicity between threads.  */
1076         tval = tcg_temp_new();
1077         tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env, false),
1078                            MO_TEUL);
1079         tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
1080         write_carryi(dc, 0);
1081         tcg_temp_free(tval);
1082     }
1083 
1084     if (rev && size != 4) {
1085         /* Endian reverse the address. t is addr.  */
1086         switch (size) {
1087             case 1:
1088             {
1089                 /* 00 -> 11
1090                    01 -> 10
1091                    10 -> 10
1092                    11 -> 00 */
1093                 TCGv low = tcg_temp_new();
1094 
1095                 /* Force addr into the temp.  */
1096                 if (addr != &t) {
1097                     t = tcg_temp_new();
1098                     tcg_gen_mov_tl(t, *addr);
1099                     addr = &t;
1100                 }
1101 
1102                 tcg_gen_andi_tl(low, t, 3);
1103                 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1104                 tcg_gen_andi_tl(t, t, ~3);
1105                 tcg_gen_or_tl(t, t, low);
1106                 tcg_temp_free(low);
1107                 break;
1108             }
1109 
1110             case 2:
1111                 /* 00 -> 10
1112                    10 -> 00.  */
1113                 /* Force addr into the temp.  */
1114                 if (addr != &t) {
1115                     t = tcg_temp_new();
1116                     tcg_gen_xori_tl(t, *addr, 2);
1117                     addr = &t;
1118                 } else {
1119                     tcg_gen_xori_tl(t, t, 2);
1120                 }
1121                 break;
1122             default:
1123                 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1124                 break;
1125         }
1126     }
1127     tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
1128 
1129     /* Verify alignment if needed.  */
1130     if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1131         tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1132         /* FIXME: if the alignment is wrong, we should restore the value
1133          *        in memory. One possible way to achieve this is to probe
1134          *        the MMU prior to the memaccess, thay way we could put
1135          *        the alignment checks in between the probe and the mem
1136          *        access.
1137          */
1138         gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1139                             tcg_const_tl(1), tcg_const_tl(size - 1));
1140     }
1141 
1142     if (ex) {
1143         gen_set_label(swx_skip);
1144     }
1145     tcg_temp_free(swx_addr);
1146 
1147     if (addr == &t)
1148         tcg_temp_free(t);
1149 }
1150 
1151 static inline void eval_cc(DisasContext *dc, unsigned int cc,
1152                            TCGv d, TCGv a, TCGv b)
1153 {
1154     switch (cc) {
1155         case CC_EQ:
1156             tcg_gen_setcond_tl(TCG_COND_EQ, d, a, b);
1157             break;
1158         case CC_NE:
1159             tcg_gen_setcond_tl(TCG_COND_NE, d, a, b);
1160             break;
1161         case CC_LT:
1162             tcg_gen_setcond_tl(TCG_COND_LT, d, a, b);
1163             break;
1164         case CC_LE:
1165             tcg_gen_setcond_tl(TCG_COND_LE, d, a, b);
1166             break;
1167         case CC_GE:
1168             tcg_gen_setcond_tl(TCG_COND_GE, d, a, b);
1169             break;
1170         case CC_GT:
1171             tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
1172             break;
1173         default:
1174             cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1175             break;
1176     }
1177 }
1178 
1179 static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
1180 {
1181     TCGLabel *l1 = gen_new_label();
1182     /* Conditional jmp.  */
1183     tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
1184     tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
1185     tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
1186     gen_set_label(l1);
1187 }
1188 
1189 static void dec_bcc(DisasContext *dc)
1190 {
1191     unsigned int cc;
1192     unsigned int dslot;
1193 
1194     cc = EXTRACT_FIELD(dc->ir, 21, 23);
1195     dslot = dc->ir & (1 << 25);
1196     LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1197 
1198     dc->delayed_branch = 1;
1199     if (dslot) {
1200         dc->delayed_branch = 2;
1201         dc->tb_flags |= D_FLAG;
1202         tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1203                       cpu_env, offsetof(CPUMBState, bimm));
1204     }
1205 
1206     if (dec_alu_op_b_is_small_imm(dc)) {
1207         int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend.  */
1208 
1209         tcg_gen_movi_tl(env_btarget, dc->pc + offset);
1210         dc->jmp = JMP_DIRECT_CC;
1211         dc->jmp_pc = dc->pc + offset;
1212     } else {
1213         dc->jmp = JMP_INDIRECT;
1214         tcg_gen_movi_tl(env_btarget, dc->pc);
1215         tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1216     }
1217     eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
1218 }
1219 
1220 static void dec_br(DisasContext *dc)
1221 {
1222     unsigned int dslot, link, abs, mbar;
1223     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1224 
1225     dslot = dc->ir & (1 << 20);
1226     abs = dc->ir & (1 << 19);
1227     link = dc->ir & (1 << 18);
1228 
1229     /* Memory barrier.  */
1230     mbar = (dc->ir >> 16) & 31;
1231     if (mbar == 2 && dc->imm == 4) {
1232         /* mbar IMM & 16 decodes to sleep.  */
1233         if (dc->rd & 16) {
1234             TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1235             TCGv_i32 tmp_1 = tcg_const_i32(1);
1236 
1237             LOG_DIS("sleep\n");
1238 
1239             t_sync_flags(dc);
1240             tcg_gen_st_i32(tmp_1, cpu_env,
1241                            -offsetof(MicroBlazeCPU, env)
1242                            +offsetof(CPUState, halted));
1243             tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
1244             gen_helper_raise_exception(cpu_env, tmp_hlt);
1245             tcg_temp_free_i32(tmp_hlt);
1246             tcg_temp_free_i32(tmp_1);
1247             return;
1248         }
1249         LOG_DIS("mbar %d\n", dc->rd);
1250         /* Break the TB.  */
1251         dc->cpustate_changed = 1;
1252         return;
1253     }
1254 
1255     LOG_DIS("br%s%s%s%s imm=%x\n",
1256              abs ? "a" : "", link ? "l" : "",
1257              dc->type_b ? "i" : "", dslot ? "d" : "",
1258              dc->imm);
1259 
1260     dc->delayed_branch = 1;
1261     if (dslot) {
1262         dc->delayed_branch = 2;
1263         dc->tb_flags |= D_FLAG;
1264         tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1265                       cpu_env, offsetof(CPUMBState, bimm));
1266     }
1267     if (link && dc->rd)
1268         tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
1269 
1270     dc->jmp = JMP_INDIRECT;
1271     if (abs) {
1272         tcg_gen_movi_tl(env_btaken, 1);
1273         tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
1274         if (link && !dslot) {
1275             if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1276                 t_gen_raise_exception(dc, EXCP_BREAK);
1277             if (dc->imm == 0) {
1278                 if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
1279                     tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1280                     t_gen_raise_exception(dc, EXCP_HW_EXCP);
1281                     return;
1282                 }
1283 
1284                 t_gen_raise_exception(dc, EXCP_DEBUG);
1285             }
1286         }
1287     } else {
1288         if (dec_alu_op_b_is_small_imm(dc)) {
1289             dc->jmp = JMP_DIRECT;
1290             dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1291         } else {
1292             tcg_gen_movi_tl(env_btaken, 1);
1293             tcg_gen_movi_tl(env_btarget, dc->pc);
1294             tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1295         }
1296     }
1297 }
1298 
1299 static inline void do_rti(DisasContext *dc)
1300 {
1301     TCGv t0, t1;
1302     t0 = tcg_temp_new();
1303     t1 = tcg_temp_new();
1304     tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
1305     tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
1306     tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1307 
1308     tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1309     tcg_gen_or_tl(t1, t1, t0);
1310     msr_write(dc, t1);
1311     tcg_temp_free(t1);
1312     tcg_temp_free(t0);
1313     dc->tb_flags &= ~DRTI_FLAG;
1314 }
1315 
1316 static inline void do_rtb(DisasContext *dc)
1317 {
1318     TCGv t0, t1;
1319     t0 = tcg_temp_new();
1320     t1 = tcg_temp_new();
1321     tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1322     tcg_gen_shri_tl(t0, t1, 1);
1323     tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1324 
1325     tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1326     tcg_gen_or_tl(t1, t1, t0);
1327     msr_write(dc, t1);
1328     tcg_temp_free(t1);
1329     tcg_temp_free(t0);
1330     dc->tb_flags &= ~DRTB_FLAG;
1331 }
1332 
1333 static inline void do_rte(DisasContext *dc)
1334 {
1335     TCGv t0, t1;
1336     t0 = tcg_temp_new();
1337     t1 = tcg_temp_new();
1338 
1339     tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
1340     tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
1341     tcg_gen_shri_tl(t0, t1, 1);
1342     tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1343 
1344     tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1345     tcg_gen_or_tl(t1, t1, t0);
1346     msr_write(dc, t1);
1347     tcg_temp_free(t1);
1348     tcg_temp_free(t0);
1349     dc->tb_flags &= ~DRTE_FLAG;
1350 }
1351 
1352 static void dec_rts(DisasContext *dc)
1353 {
1354     unsigned int b_bit, i_bit, e_bit;
1355     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1356 
1357     i_bit = dc->ir & (1 << 21);
1358     b_bit = dc->ir & (1 << 22);
1359     e_bit = dc->ir & (1 << 23);
1360 
1361     dc->delayed_branch = 2;
1362     dc->tb_flags |= D_FLAG;
1363     tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1364                   cpu_env, offsetof(CPUMBState, bimm));
1365 
1366     if (i_bit) {
1367         LOG_DIS("rtid ir=%x\n", dc->ir);
1368         if ((dc->tb_flags & MSR_EE_FLAG)
1369              && mem_index == MMU_USER_IDX) {
1370             tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1371             t_gen_raise_exception(dc, EXCP_HW_EXCP);
1372         }
1373         dc->tb_flags |= DRTI_FLAG;
1374     } else if (b_bit) {
1375         LOG_DIS("rtbd ir=%x\n", dc->ir);
1376         if ((dc->tb_flags & MSR_EE_FLAG)
1377              && mem_index == MMU_USER_IDX) {
1378             tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1379             t_gen_raise_exception(dc, EXCP_HW_EXCP);
1380         }
1381         dc->tb_flags |= DRTB_FLAG;
1382     } else if (e_bit) {
1383         LOG_DIS("rted ir=%x\n", dc->ir);
1384         if ((dc->tb_flags & MSR_EE_FLAG)
1385              && mem_index == MMU_USER_IDX) {
1386             tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1387             t_gen_raise_exception(dc, EXCP_HW_EXCP);
1388         }
1389         dc->tb_flags |= DRTE_FLAG;
1390     } else
1391         LOG_DIS("rts ir=%x\n", dc->ir);
1392 
1393     dc->jmp = JMP_INDIRECT;
1394     tcg_gen_movi_tl(env_btaken, 1);
1395     tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1396 }
1397 
1398 static int dec_check_fpuv2(DisasContext *dc)
1399 {
1400     if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
1401         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
1402         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1403     }
1404     return (dc->cpu->cfg.use_fpu == 2) ? 0 : PVR2_USE_FPU2_MASK;
1405 }
1406 
1407 static void dec_fpu(DisasContext *dc)
1408 {
1409     unsigned int fpu_insn;
1410 
1411     if ((dc->tb_flags & MSR_EE_FLAG)
1412           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1413           && !dc->cpu->cfg.use_fpu) {
1414         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1415         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1416         return;
1417     }
1418 
1419     fpu_insn = (dc->ir >> 7) & 7;
1420 
1421     switch (fpu_insn) {
1422         case 0:
1423             gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1424                             cpu_R[dc->rb]);
1425             break;
1426 
1427         case 1:
1428             gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1429                              cpu_R[dc->rb]);
1430             break;
1431 
1432         case 2:
1433             gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1434                             cpu_R[dc->rb]);
1435             break;
1436 
1437         case 3:
1438             gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1439                             cpu_R[dc->rb]);
1440             break;
1441 
1442         case 4:
1443             switch ((dc->ir >> 4) & 7) {
1444                 case 0:
1445                     gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1446                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1447                     break;
1448                 case 1:
1449                     gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1450                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1451                     break;
1452                 case 2:
1453                     gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1454                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1455                     break;
1456                 case 3:
1457                     gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1458                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1459                     break;
1460                 case 4:
1461                     gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1462                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1463                     break;
1464                 case 5:
1465                     gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1466                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1467                     break;
1468                 case 6:
1469                     gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1470                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1471                     break;
1472                 default:
1473                     qemu_log_mask(LOG_UNIMP,
1474                                   "unimplemented fcmp fpu_insn=%x pc=%x"
1475                                   " opc=%x\n",
1476                                   fpu_insn, dc->pc, dc->opcode);
1477                     dc->abort_at_next_insn = 1;
1478                     break;
1479             }
1480             break;
1481 
1482         case 5:
1483             if (!dec_check_fpuv2(dc)) {
1484                 return;
1485             }
1486             gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1487             break;
1488 
1489         case 6:
1490             if (!dec_check_fpuv2(dc)) {
1491                 return;
1492             }
1493             gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1494             break;
1495 
1496         case 7:
1497             if (!dec_check_fpuv2(dc)) {
1498                 return;
1499             }
1500             gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1501             break;
1502 
1503         default:
1504             qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1505                           " opc=%x\n",
1506                           fpu_insn, dc->pc, dc->opcode);
1507             dc->abort_at_next_insn = 1;
1508             break;
1509     }
1510 }
1511 
1512 static void dec_null(DisasContext *dc)
1513 {
1514     if ((dc->tb_flags & MSR_EE_FLAG)
1515           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1516         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1517         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1518         return;
1519     }
1520     qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1521     dc->abort_at_next_insn = 1;
1522 }
1523 
1524 /* Insns connected to FSL or AXI stream attached devices.  */
1525 static void dec_stream(DisasContext *dc)
1526 {
1527     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1528     TCGv_i32 t_id, t_ctrl;
1529     int ctrl;
1530 
1531     LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1532             dc->type_b ? "" : "d", dc->imm);
1533 
1534     if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
1535         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1536         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1537         return;
1538     }
1539 
1540     t_id = tcg_temp_new();
1541     if (dc->type_b) {
1542         tcg_gen_movi_tl(t_id, dc->imm & 0xf);
1543         ctrl = dc->imm >> 10;
1544     } else {
1545         tcg_gen_andi_tl(t_id, cpu_R[dc->rb], 0xf);
1546         ctrl = dc->imm >> 5;
1547     }
1548 
1549     t_ctrl = tcg_const_tl(ctrl);
1550 
1551     if (dc->rd == 0) {
1552         gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1553     } else {
1554         gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1555     }
1556     tcg_temp_free(t_id);
1557     tcg_temp_free(t_ctrl);
1558 }
1559 
1560 static struct decoder_info {
1561     struct {
1562         uint32_t bits;
1563         uint32_t mask;
1564     };
1565     void (*dec)(DisasContext *dc);
1566 } decinfo[] = {
1567     {DEC_ADD, dec_add},
1568     {DEC_SUB, dec_sub},
1569     {DEC_AND, dec_and},
1570     {DEC_XOR, dec_xor},
1571     {DEC_OR, dec_or},
1572     {DEC_BIT, dec_bit},
1573     {DEC_BARREL, dec_barrel},
1574     {DEC_LD, dec_load},
1575     {DEC_ST, dec_store},
1576     {DEC_IMM, dec_imm},
1577     {DEC_BR, dec_br},
1578     {DEC_BCC, dec_bcc},
1579     {DEC_RTS, dec_rts},
1580     {DEC_FPU, dec_fpu},
1581     {DEC_MUL, dec_mul},
1582     {DEC_DIV, dec_div},
1583     {DEC_MSR, dec_msr},
1584     {DEC_STREAM, dec_stream},
1585     {{0, 0}, dec_null}
1586 };
1587 
1588 static inline void decode(DisasContext *dc, uint32_t ir)
1589 {
1590     int i;
1591 
1592     dc->ir = ir;
1593     LOG_DIS("%8.8x\t", dc->ir);
1594 
1595     if (dc->ir)
1596         dc->nr_nops = 0;
1597     else {
1598         if ((dc->tb_flags & MSR_EE_FLAG)
1599               && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1600               && (dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
1601             tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1602             t_gen_raise_exception(dc, EXCP_HW_EXCP);
1603             return;
1604         }
1605 
1606         LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1607         dc->nr_nops++;
1608         if (dc->nr_nops > 4) {
1609             cpu_abort(CPU(dc->cpu), "fetching nop sequence\n");
1610         }
1611     }
1612     /* bit 2 seems to indicate insn type.  */
1613     dc->type_b = ir & (1 << 29);
1614 
1615     dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1616     dc->rd = EXTRACT_FIELD(ir, 21, 25);
1617     dc->ra = EXTRACT_FIELD(ir, 16, 20);
1618     dc->rb = EXTRACT_FIELD(ir, 11, 15);
1619     dc->imm = EXTRACT_FIELD(ir, 0, 15);
1620 
1621     /* Large switch for all insns.  */
1622     for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1623         if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1624             decinfo[i].dec(dc);
1625             break;
1626         }
1627     }
1628 }
1629 
1630 /* generate intermediate code for basic block 'tb'.  */
1631 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
1632 {
1633     CPUMBState *env = cs->env_ptr;
1634     MicroBlazeCPU *cpu = mb_env_get_cpu(env);
1635     uint32_t pc_start;
1636     struct DisasContext ctx;
1637     struct DisasContext *dc = &ctx;
1638     uint32_t page_start, org_flags;
1639     target_ulong npc;
1640     int num_insns;
1641     int max_insns;
1642 
1643     pc_start = tb->pc;
1644     dc->cpu = cpu;
1645     dc->tb = tb;
1646     org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1647 
1648     dc->is_jmp = DISAS_NEXT;
1649     dc->jmp = 0;
1650     dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1651     if (dc->delayed_branch) {
1652         dc->jmp = JMP_INDIRECT;
1653     }
1654     dc->pc = pc_start;
1655     dc->singlestep_enabled = cs->singlestep_enabled;
1656     dc->cpustate_changed = 0;
1657     dc->abort_at_next_insn = 0;
1658     dc->nr_nops = 0;
1659 
1660     if (pc_start & 3) {
1661         cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1662     }
1663 
1664     page_start = pc_start & TARGET_PAGE_MASK;
1665     num_insns = 0;
1666     max_insns = tb_cflags(tb) & CF_COUNT_MASK;
1667     if (max_insns == 0) {
1668         max_insns = CF_COUNT_MASK;
1669     }
1670     if (max_insns > TCG_MAX_INSNS) {
1671         max_insns = TCG_MAX_INSNS;
1672     }
1673 
1674     gen_tb_start(tb);
1675     do
1676     {
1677         tcg_gen_insn_start(dc->pc);
1678         num_insns++;
1679 
1680 #if SIM_COMPAT
1681         if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1682             tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1683             gen_helper_debug();
1684         }
1685 #endif
1686 
1687         if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1688             t_gen_raise_exception(dc, EXCP_DEBUG);
1689             dc->is_jmp = DISAS_UPDATE;
1690             /* The address covered by the breakpoint must be included in
1691                [tb->pc, tb->pc + tb->size) in order to for it to be
1692                properly cleared -- thus we increment the PC here so that
1693                the logic setting tb->size below does the right thing.  */
1694             dc->pc += 4;
1695             break;
1696         }
1697 
1698         /* Pretty disas.  */
1699         LOG_DIS("%8.8x:\t", dc->pc);
1700 
1701         if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
1702             gen_io_start();
1703         }
1704 
1705         dc->clear_imm = 1;
1706         decode(dc, cpu_ldl_code(env, dc->pc));
1707         if (dc->clear_imm)
1708             dc->tb_flags &= ~IMM_FLAG;
1709         dc->pc += 4;
1710 
1711         if (dc->delayed_branch) {
1712             dc->delayed_branch--;
1713             if (!dc->delayed_branch) {
1714                 if (dc->tb_flags & DRTI_FLAG)
1715                     do_rti(dc);
1716                  if (dc->tb_flags & DRTB_FLAG)
1717                     do_rtb(dc);
1718                 if (dc->tb_flags & DRTE_FLAG)
1719                     do_rte(dc);
1720                 /* Clear the delay slot flag.  */
1721                 dc->tb_flags &= ~D_FLAG;
1722                 /* If it is a direct jump, try direct chaining.  */
1723                 if (dc->jmp == JMP_INDIRECT) {
1724                     eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
1725                     dc->is_jmp = DISAS_JUMP;
1726                 } else if (dc->jmp == JMP_DIRECT) {
1727                     t_sync_flags(dc);
1728                     gen_goto_tb(dc, 0, dc->jmp_pc);
1729                     dc->is_jmp = DISAS_TB_JUMP;
1730                 } else if (dc->jmp == JMP_DIRECT_CC) {
1731                     TCGLabel *l1 = gen_new_label();
1732                     t_sync_flags(dc);
1733                     /* Conditional jmp.  */
1734                     tcg_gen_brcondi_tl(TCG_COND_NE, env_btaken, 0, l1);
1735                     gen_goto_tb(dc, 1, dc->pc);
1736                     gen_set_label(l1);
1737                     gen_goto_tb(dc, 0, dc->jmp_pc);
1738 
1739                     dc->is_jmp = DISAS_TB_JUMP;
1740                 }
1741                 break;
1742             }
1743         }
1744         if (cs->singlestep_enabled) {
1745             break;
1746         }
1747     } while (!dc->is_jmp && !dc->cpustate_changed
1748              && !tcg_op_buf_full()
1749              && !singlestep
1750              && (dc->pc - page_start < TARGET_PAGE_SIZE)
1751              && num_insns < max_insns);
1752 
1753     npc = dc->pc;
1754     if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1755         if (dc->tb_flags & D_FLAG) {
1756             dc->is_jmp = DISAS_UPDATE;
1757             tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1758             sync_jmpstate(dc);
1759         } else
1760             npc = dc->jmp_pc;
1761     }
1762 
1763     if (tb_cflags(tb) & CF_LAST_IO)
1764         gen_io_end();
1765     /* Force an update if the per-tb cpu state has changed.  */
1766     if (dc->is_jmp == DISAS_NEXT
1767         && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1768         dc->is_jmp = DISAS_UPDATE;
1769         tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1770     }
1771     t_sync_flags(dc);
1772 
1773     if (unlikely(cs->singlestep_enabled)) {
1774         TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1775 
1776         if (dc->is_jmp != DISAS_JUMP) {
1777             tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1778         }
1779         gen_helper_raise_exception(cpu_env, tmp);
1780         tcg_temp_free_i32(tmp);
1781     } else {
1782         switch(dc->is_jmp) {
1783             case DISAS_NEXT:
1784                 gen_goto_tb(dc, 1, npc);
1785                 break;
1786             default:
1787             case DISAS_JUMP:
1788             case DISAS_UPDATE:
1789                 /* indicate that the hash table must be used
1790                    to find the next TB */
1791                 tcg_gen_exit_tb(0);
1792                 break;
1793             case DISAS_TB_JUMP:
1794                 /* nothing more to generate */
1795                 break;
1796         }
1797     }
1798     gen_tb_end(tb, num_insns);
1799 
1800     tb->size = dc->pc - pc_start;
1801     tb->icount = num_insns;
1802 
1803 #ifdef DEBUG_DISAS
1804 #if !SIM_COMPAT
1805     if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1806         && qemu_log_in_addr_range(pc_start)) {
1807         qemu_log_lock();
1808         qemu_log("--------------\n");
1809         log_target_disas(cs, pc_start, dc->pc - pc_start);
1810         qemu_log_unlock();
1811     }
1812 #endif
1813 #endif
1814     assert(!dc->abort_at_next_insn);
1815 }
1816 
1817 void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1818                        int flags)
1819 {
1820     MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1821     CPUMBState *env = &cpu->env;
1822     int i;
1823 
1824     if (!env || !f)
1825         return;
1826 
1827     cpu_fprintf(f, "IN: PC=%x %s\n",
1828                 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
1829     cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1830              env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
1831              env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
1832     cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1833              env->btaken, env->btarget,
1834              (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
1835              (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1836              (env->sregs[SR_MSR] & MSR_EIP),
1837              (env->sregs[SR_MSR] & MSR_IE));
1838 
1839     for (i = 0; i < 32; i++) {
1840         cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1841         if ((i + 1) % 4 == 0)
1842             cpu_fprintf(f, "\n");
1843         }
1844     cpu_fprintf(f, "\n\n");
1845 }
1846 
1847 void mb_tcg_init(void)
1848 {
1849     int i;
1850 
1851     env_debug = tcg_global_mem_new(cpu_env,
1852                     offsetof(CPUMBState, debug),
1853                     "debug0");
1854     env_iflags = tcg_global_mem_new(cpu_env,
1855                     offsetof(CPUMBState, iflags),
1856                     "iflags");
1857     env_imm = tcg_global_mem_new(cpu_env,
1858                     offsetof(CPUMBState, imm),
1859                     "imm");
1860     env_btarget = tcg_global_mem_new(cpu_env,
1861                      offsetof(CPUMBState, btarget),
1862                      "btarget");
1863     env_btaken = tcg_global_mem_new(cpu_env,
1864                      offsetof(CPUMBState, btaken),
1865                      "btaken");
1866     env_res_addr = tcg_global_mem_new(cpu_env,
1867                      offsetof(CPUMBState, res_addr),
1868                      "res_addr");
1869     env_res_val = tcg_global_mem_new(cpu_env,
1870                      offsetof(CPUMBState, res_val),
1871                      "res_val");
1872     for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1873         cpu_R[i] = tcg_global_mem_new(cpu_env,
1874                           offsetof(CPUMBState, regs[i]),
1875                           regnames[i]);
1876     }
1877     for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
1878         cpu_SR[i] = tcg_global_mem_new(cpu_env,
1879                           offsetof(CPUMBState, sregs[i]),
1880                           special_regnames[i]);
1881     }
1882 }
1883 
1884 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1885                           target_ulong *data)
1886 {
1887     env->sregs[SR_PC] = data[0];
1888 }
1889