xref: /qemu/target/microblaze/translate.c (revision 7271a819)
1 /*
2  *  Xilinx MicroBlaze emulation for qemu: main translation routines.
3  *
4  *  Copyright (c) 2009 Edgar E. Iglesias.
5  *  Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "microblaze-decode.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 
32 #include "trace-tcg.h"
33 #include "exec/log.h"
34 
35 
36 #define SIM_COMPAT 0
37 #define DISAS_GNU 1
38 #define DISAS_MB 1
39 #if DISAS_MB && !SIM_COMPAT
40 #  define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41 #else
42 #  define LOG_DIS(...) do { } while (0)
43 #endif
44 
45 #define D(x)
46 
47 #define EXTRACT_FIELD(src, start, end) \
48             (((src) >> start) & ((1 << (end - start + 1)) - 1))
49 
50 /* is_jmp field values */
51 #define DISAS_JUMP    DISAS_TARGET_0 /* only pc was modified dynamically */
52 #define DISAS_UPDATE  DISAS_TARGET_1 /* cpu state was modified dynamically */
53 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
54 
55 static TCGv env_debug;
56 static TCGv_env cpu_env;
57 static TCGv cpu_R[32];
58 static TCGv cpu_SR[18];
59 static TCGv env_imm;
60 static TCGv env_btaken;
61 static TCGv env_btarget;
62 static TCGv env_iflags;
63 static TCGv env_res_addr;
64 static TCGv env_res_val;
65 
66 #include "exec/gen-icount.h"
67 
68 /* This is the state at translation time.  */
69 typedef struct DisasContext {
70     MicroBlazeCPU *cpu;
71     target_ulong pc;
72 
73     /* Decoder.  */
74     int type_b;
75     uint32_t ir;
76     uint8_t opcode;
77     uint8_t rd, ra, rb;
78     uint16_t imm;
79 
80     unsigned int cpustate_changed;
81     unsigned int delayed_branch;
82     unsigned int tb_flags, synced_flags; /* tb dependent flags.  */
83     unsigned int clear_imm;
84     int is_jmp;
85 
86 #define JMP_NOJMP     0
87 #define JMP_DIRECT    1
88 #define JMP_DIRECT_CC 2
89 #define JMP_INDIRECT  3
90     unsigned int jmp;
91     uint32_t jmp_pc;
92 
93     int abort_at_next_insn;
94     int nr_nops;
95     struct TranslationBlock *tb;
96     int singlestep_enabled;
97 } DisasContext;
98 
99 static const char *regnames[] =
100 {
101     "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
102     "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
103     "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
104     "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
105 };
106 
107 static const char *special_regnames[] =
108 {
109     "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
110     "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
111     "sr16", "sr17", "sr18"
112 };
113 
114 static inline void t_sync_flags(DisasContext *dc)
115 {
116     /* Synch the tb dependent flags between translator and runtime.  */
117     if (dc->tb_flags != dc->synced_flags) {
118         tcg_gen_movi_tl(env_iflags, dc->tb_flags);
119         dc->synced_flags = dc->tb_flags;
120     }
121 }
122 
123 static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
124 {
125     TCGv_i32 tmp = tcg_const_i32(index);
126 
127     t_sync_flags(dc);
128     tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
129     gen_helper_raise_exception(cpu_env, tmp);
130     tcg_temp_free_i32(tmp);
131     dc->is_jmp = DISAS_UPDATE;
132 }
133 
134 static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
135 {
136 #ifndef CONFIG_USER_ONLY
137     return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
138 #else
139     return true;
140 #endif
141 }
142 
143 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
144 {
145     if (use_goto_tb(dc, dest)) {
146         tcg_gen_goto_tb(n);
147         tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
148         tcg_gen_exit_tb((uintptr_t)dc->tb + n);
149     } else {
150         tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
151         tcg_gen_exit_tb(0);
152     }
153 }
154 
155 static void read_carry(DisasContext *dc, TCGv d)
156 {
157     tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
158 }
159 
160 /*
161  * write_carry sets the carry bits in MSR based on bit 0 of v.
162  * v[31:1] are ignored.
163  */
164 static void write_carry(DisasContext *dc, TCGv v)
165 {
166     TCGv t0 = tcg_temp_new();
167     tcg_gen_shli_tl(t0, v, 31);
168     tcg_gen_sari_tl(t0, t0, 31);
169     tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
170     tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
171                     ~(MSR_C | MSR_CC));
172     tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
173     tcg_temp_free(t0);
174 }
175 
176 static void write_carryi(DisasContext *dc, bool carry)
177 {
178     TCGv t0 = tcg_temp_new();
179     tcg_gen_movi_tl(t0, carry);
180     write_carry(dc, t0);
181     tcg_temp_free(t0);
182 }
183 
184 /* True if ALU operand b is a small immediate that may deserve
185    faster treatment.  */
186 static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
187 {
188     /* Immediate insn without the imm prefix ?  */
189     return dc->type_b && !(dc->tb_flags & IMM_FLAG);
190 }
191 
192 static inline TCGv *dec_alu_op_b(DisasContext *dc)
193 {
194     if (dc->type_b) {
195         if (dc->tb_flags & IMM_FLAG)
196             tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
197         else
198             tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
199         return &env_imm;
200     } else
201         return &cpu_R[dc->rb];
202 }
203 
204 static void dec_add(DisasContext *dc)
205 {
206     unsigned int k, c;
207     TCGv cf;
208 
209     k = dc->opcode & 4;
210     c = dc->opcode & 2;
211 
212     LOG_DIS("add%s%s%s r%d r%d r%d\n",
213             dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
214             dc->rd, dc->ra, dc->rb);
215 
216     /* Take care of the easy cases first.  */
217     if (k) {
218         /* k - keep carry, no need to update MSR.  */
219         /* If rd == r0, it's a nop.  */
220         if (dc->rd) {
221             tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
222 
223             if (c) {
224                 /* c - Add carry into the result.  */
225                 cf = tcg_temp_new();
226 
227                 read_carry(dc, cf);
228                 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
229                 tcg_temp_free(cf);
230             }
231         }
232         return;
233     }
234 
235     /* From now on, we can assume k is zero.  So we need to update MSR.  */
236     /* Extract carry.  */
237     cf = tcg_temp_new();
238     if (c) {
239         read_carry(dc, cf);
240     } else {
241         tcg_gen_movi_tl(cf, 0);
242     }
243 
244     if (dc->rd) {
245         TCGv ncf = tcg_temp_new();
246         gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
247         tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
248         tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
249         write_carry(dc, ncf);
250         tcg_temp_free(ncf);
251     } else {
252         gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
253         write_carry(dc, cf);
254     }
255     tcg_temp_free(cf);
256 }
257 
258 static void dec_sub(DisasContext *dc)
259 {
260     unsigned int u, cmp, k, c;
261     TCGv cf, na;
262 
263     u = dc->imm & 2;
264     k = dc->opcode & 4;
265     c = dc->opcode & 2;
266     cmp = (dc->imm & 1) && (!dc->type_b) && k;
267 
268     if (cmp) {
269         LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
270         if (dc->rd) {
271             if (u)
272                 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
273             else
274                 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
275         }
276         return;
277     }
278 
279     LOG_DIS("sub%s%s r%d, r%d r%d\n",
280              k ? "k" : "",  c ? "c" : "", dc->rd, dc->ra, dc->rb);
281 
282     /* Take care of the easy cases first.  */
283     if (k) {
284         /* k - keep carry, no need to update MSR.  */
285         /* If rd == r0, it's a nop.  */
286         if (dc->rd) {
287             tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
288 
289             if (c) {
290                 /* c - Add carry into the result.  */
291                 cf = tcg_temp_new();
292 
293                 read_carry(dc, cf);
294                 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
295                 tcg_temp_free(cf);
296             }
297         }
298         return;
299     }
300 
301     /* From now on, we can assume k is zero.  So we need to update MSR.  */
302     /* Extract carry. And complement a into na.  */
303     cf = tcg_temp_new();
304     na = tcg_temp_new();
305     if (c) {
306         read_carry(dc, cf);
307     } else {
308         tcg_gen_movi_tl(cf, 1);
309     }
310 
311     /* d = b + ~a + c. carry defaults to 1.  */
312     tcg_gen_not_tl(na, cpu_R[dc->ra]);
313 
314     if (dc->rd) {
315         TCGv ncf = tcg_temp_new();
316         gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
317         tcg_gen_add_tl(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
318         tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
319         write_carry(dc, ncf);
320         tcg_temp_free(ncf);
321     } else {
322         gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
323         write_carry(dc, cf);
324     }
325     tcg_temp_free(cf);
326     tcg_temp_free(na);
327 }
328 
329 static void dec_pattern(DisasContext *dc)
330 {
331     unsigned int mode;
332 
333     if ((dc->tb_flags & MSR_EE_FLAG)
334           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
335           && !dc->cpu->cfg.use_pcmp_instr) {
336         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
337         t_gen_raise_exception(dc, EXCP_HW_EXCP);
338     }
339 
340     mode = dc->opcode & 3;
341     switch (mode) {
342         case 0:
343             /* pcmpbf.  */
344             LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
345             if (dc->rd)
346                 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
347             break;
348         case 2:
349             LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
350             if (dc->rd) {
351                 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_R[dc->rd],
352                                    cpu_R[dc->ra], cpu_R[dc->rb]);
353             }
354             break;
355         case 3:
356             LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
357             if (dc->rd) {
358                 tcg_gen_setcond_tl(TCG_COND_NE, cpu_R[dc->rd],
359                                    cpu_R[dc->ra], cpu_R[dc->rb]);
360             }
361             break;
362         default:
363             cpu_abort(CPU(dc->cpu),
364                       "unsupported pattern insn opcode=%x\n", dc->opcode);
365             break;
366     }
367 }
368 
369 static void dec_and(DisasContext *dc)
370 {
371     unsigned int not;
372 
373     if (!dc->type_b && (dc->imm & (1 << 10))) {
374         dec_pattern(dc);
375         return;
376     }
377 
378     not = dc->opcode & (1 << 1);
379     LOG_DIS("and%s\n", not ? "n" : "");
380 
381     if (!dc->rd)
382         return;
383 
384     if (not) {
385         tcg_gen_andc_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
386     } else
387         tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
388 }
389 
390 static void dec_or(DisasContext *dc)
391 {
392     if (!dc->type_b && (dc->imm & (1 << 10))) {
393         dec_pattern(dc);
394         return;
395     }
396 
397     LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
398     if (dc->rd)
399         tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
400 }
401 
402 static void dec_xor(DisasContext *dc)
403 {
404     if (!dc->type_b && (dc->imm & (1 << 10))) {
405         dec_pattern(dc);
406         return;
407     }
408 
409     LOG_DIS("xor r%d\n", dc->rd);
410     if (dc->rd)
411         tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
412 }
413 
414 static inline void msr_read(DisasContext *dc, TCGv d)
415 {
416     tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
417 }
418 
419 static inline void msr_write(DisasContext *dc, TCGv v)
420 {
421     TCGv t;
422 
423     t = tcg_temp_new();
424     dc->cpustate_changed = 1;
425     /* PVR bit is not writable.  */
426     tcg_gen_andi_tl(t, v, ~MSR_PVR);
427     tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
428     tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], v);
429     tcg_temp_free(t);
430 }
431 
432 static void dec_msr(DisasContext *dc)
433 {
434     CPUState *cs = CPU(dc->cpu);
435     TCGv t0, t1;
436     unsigned int sr, to, rn;
437     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
438 
439     sr = dc->imm & ((1 << 14) - 1);
440     to = dc->imm & (1 << 14);
441     dc->type_b = 1;
442     if (to)
443         dc->cpustate_changed = 1;
444 
445     /* msrclr and msrset.  */
446     if (!(dc->imm & (1 << 15))) {
447         unsigned int clr = dc->ir & (1 << 16);
448 
449         LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
450                 dc->rd, dc->imm);
451 
452         if (!dc->cpu->cfg.use_msr_instr) {
453             /* nop??? */
454             return;
455         }
456 
457         if ((dc->tb_flags & MSR_EE_FLAG)
458             && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
459             tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
460             t_gen_raise_exception(dc, EXCP_HW_EXCP);
461             return;
462         }
463 
464         if (dc->rd)
465             msr_read(dc, cpu_R[dc->rd]);
466 
467         t0 = tcg_temp_new();
468         t1 = tcg_temp_new();
469         msr_read(dc, t0);
470         tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
471 
472         if (clr) {
473             tcg_gen_not_tl(t1, t1);
474             tcg_gen_and_tl(t0, t0, t1);
475         } else
476             tcg_gen_or_tl(t0, t0, t1);
477         msr_write(dc, t0);
478         tcg_temp_free(t0);
479         tcg_temp_free(t1);
480 	tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
481         dc->is_jmp = DISAS_UPDATE;
482         return;
483     }
484 
485     if (to) {
486         if ((dc->tb_flags & MSR_EE_FLAG)
487              && mem_index == MMU_USER_IDX) {
488             tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
489             t_gen_raise_exception(dc, EXCP_HW_EXCP);
490             return;
491         }
492     }
493 
494 #if !defined(CONFIG_USER_ONLY)
495     /* Catch read/writes to the mmu block.  */
496     if ((sr & ~0xff) == 0x1000) {
497         sr &= 7;
498         LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
499         if (to)
500             gen_helper_mmu_write(cpu_env, tcg_const_tl(sr), cpu_R[dc->ra]);
501         else
502             gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_tl(sr));
503         return;
504     }
505 #endif
506 
507     if (to) {
508         LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
509         switch (sr) {
510             case 0:
511                 break;
512             case 1:
513                 msr_write(dc, cpu_R[dc->ra]);
514                 break;
515             case 0x3:
516                 tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
517                 break;
518             case 0x5:
519                 tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
520                 break;
521             case 0x7:
522                 tcg_gen_andi_tl(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
523                 break;
524             case 0x800:
525                 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, slr));
526                 break;
527             case 0x802:
528                 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, shr));
529                 break;
530             default:
531                 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
532                 break;
533         }
534     } else {
535         LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
536 
537         switch (sr) {
538             case 0:
539                 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
540                 break;
541             case 1:
542                 msr_read(dc, cpu_R[dc->rd]);
543                 break;
544             case 0x3:
545                 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
546                 break;
547             case 0x5:
548                 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
549                 break;
550              case 0x7:
551                 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_FSR]);
552                 break;
553             case 0xb:
554                 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
555                 break;
556             case 0x800:
557                 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, slr));
558                 break;
559             case 0x802:
560                 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, shr));
561                 break;
562             case 0x2000:
563             case 0x2001:
564             case 0x2002:
565             case 0x2003:
566             case 0x2004:
567             case 0x2005:
568             case 0x2006:
569             case 0x2007:
570             case 0x2008:
571             case 0x2009:
572             case 0x200a:
573             case 0x200b:
574             case 0x200c:
575                 rn = sr & 0xf;
576                 tcg_gen_ld_tl(cpu_R[dc->rd],
577                               cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
578                 break;
579             default:
580                 cpu_abort(cs, "unknown mfs reg %x\n", sr);
581                 break;
582         }
583     }
584 
585     if (dc->rd == 0) {
586         tcg_gen_movi_tl(cpu_R[0], 0);
587     }
588 }
589 
590 /* Multiplier unit.  */
591 static void dec_mul(DisasContext *dc)
592 {
593     TCGv tmp;
594     unsigned int subcode;
595 
596     if ((dc->tb_flags & MSR_EE_FLAG)
597          && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
598          && !dc->cpu->cfg.use_hw_mul) {
599         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
600         t_gen_raise_exception(dc, EXCP_HW_EXCP);
601         return;
602     }
603 
604     subcode = dc->imm & 3;
605 
606     if (dc->type_b) {
607         LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
608         tcg_gen_mul_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
609         return;
610     }
611 
612     /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2.  */
613     if (subcode >= 1 && subcode <= 3 && dc->cpu->cfg.use_hw_mul < 2) {
614         /* nop??? */
615     }
616 
617     tmp = tcg_temp_new();
618     switch (subcode) {
619         case 0:
620             LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
621             tcg_gen_mul_tl(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
622             break;
623         case 1:
624             LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
625             tcg_gen_muls2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
626             break;
627         case 2:
628             LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
629             tcg_gen_mulsu2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
630             break;
631         case 3:
632             LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
633             tcg_gen_mulu2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
634             break;
635         default:
636             cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
637             break;
638     }
639     tcg_temp_free(tmp);
640 }
641 
642 /* Div unit.  */
643 static void dec_div(DisasContext *dc)
644 {
645     unsigned int u;
646 
647     u = dc->imm & 2;
648     LOG_DIS("div\n");
649 
650     if ((dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
651           && !dc->cpu->cfg.use_div) {
652         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
653         t_gen_raise_exception(dc, EXCP_HW_EXCP);
654     }
655 
656     if (u)
657         gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
658                         cpu_R[dc->ra]);
659     else
660         gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
661                         cpu_R[dc->ra]);
662     if (!dc->rd)
663         tcg_gen_movi_tl(cpu_R[dc->rd], 0);
664 }
665 
666 static void dec_barrel(DisasContext *dc)
667 {
668     TCGv t0;
669     unsigned int imm_w, imm_s;
670     bool s, t, e = false, i = false;
671 
672     if ((dc->tb_flags & MSR_EE_FLAG)
673           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
674           && !dc->cpu->cfg.use_barrel) {
675         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
676         t_gen_raise_exception(dc, EXCP_HW_EXCP);
677         return;
678     }
679 
680     if (dc->type_b) {
681         /* Insert and extract are only available in immediate mode.  */
682         i = extract32(dc->imm, 15, 1);
683         e = extract32(dc->imm, 14, 1);
684     }
685     s = extract32(dc->imm, 10, 1);
686     t = extract32(dc->imm, 9, 1);
687     imm_w = extract32(dc->imm, 6, 5);
688     imm_s = extract32(dc->imm, 0, 5);
689 
690     LOG_DIS("bs%s%s%s r%d r%d r%d\n",
691             e ? "e" : "",
692             s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
693 
694     if (e) {
695         if (imm_w + imm_s > 32 || imm_w == 0) {
696             /* These inputs have an undefined behavior.  */
697             qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
698                           imm_w, imm_s);
699         } else {
700             tcg_gen_extract_i32(cpu_R[dc->rd], cpu_R[dc->ra], imm_s, imm_w);
701         }
702     } else if (i) {
703         int width = imm_w - imm_s + 1;
704 
705         if (imm_w < imm_s) {
706             /* These inputs have an undefined behavior.  */
707             qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
708                           imm_w, imm_s);
709         } else {
710             tcg_gen_deposit_i32(cpu_R[dc->rd], cpu_R[dc->rd], cpu_R[dc->ra],
711                                 imm_s, width);
712         }
713     } else {
714         t0 = tcg_temp_new();
715 
716         tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
717         tcg_gen_andi_tl(t0, t0, 31);
718 
719         if (s) {
720             tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
721         } else {
722             if (t) {
723                 tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
724             } else {
725                 tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
726             }
727         }
728         tcg_temp_free(t0);
729     }
730 }
731 
732 static void dec_bit(DisasContext *dc)
733 {
734     CPUState *cs = CPU(dc->cpu);
735     TCGv t0;
736     unsigned int op;
737     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
738 
739     op = dc->ir & ((1 << 9) - 1);
740     switch (op) {
741         case 0x21:
742             /* src.  */
743             t0 = tcg_temp_new();
744 
745             LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
746             tcg_gen_andi_tl(t0, cpu_SR[SR_MSR], MSR_CC);
747             write_carry(dc, cpu_R[dc->ra]);
748             if (dc->rd) {
749                 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
750                 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t0);
751             }
752             tcg_temp_free(t0);
753             break;
754 
755         case 0x1:
756         case 0x41:
757             /* srl.  */
758             LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
759 
760             /* Update carry. Note that write carry only looks at the LSB.  */
761             write_carry(dc, cpu_R[dc->ra]);
762             if (dc->rd) {
763                 if (op == 0x41)
764                     tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
765                 else
766                     tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
767             }
768             break;
769         case 0x60:
770             LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
771             tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
772             break;
773         case 0x61:
774             LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
775             tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
776             break;
777         case 0x64:
778         case 0x66:
779         case 0x74:
780         case 0x76:
781             /* wdc.  */
782             LOG_DIS("wdc r%d\n", dc->ra);
783             if ((dc->tb_flags & MSR_EE_FLAG)
784                  && mem_index == MMU_USER_IDX) {
785                 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
786                 t_gen_raise_exception(dc, EXCP_HW_EXCP);
787                 return;
788             }
789             break;
790         case 0x68:
791             /* wic.  */
792             LOG_DIS("wic r%d\n", dc->ra);
793             if ((dc->tb_flags & MSR_EE_FLAG)
794                  && mem_index == MMU_USER_IDX) {
795                 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
796                 t_gen_raise_exception(dc, EXCP_HW_EXCP);
797                 return;
798             }
799             break;
800         case 0xe0:
801             if ((dc->tb_flags & MSR_EE_FLAG)
802                 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
803                 && !dc->cpu->cfg.use_pcmp_instr) {
804                 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
805                 t_gen_raise_exception(dc, EXCP_HW_EXCP);
806             }
807             if (dc->cpu->cfg.use_pcmp_instr) {
808                 tcg_gen_clzi_i32(cpu_R[dc->rd], cpu_R[dc->ra], 32);
809             }
810             break;
811         case 0x1e0:
812             /* swapb */
813             LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
814             tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
815             break;
816         case 0x1e2:
817             /*swaph */
818             LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
819             tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
820             break;
821         default:
822             cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
823                       dc->pc, op, dc->rd, dc->ra, dc->rb);
824             break;
825     }
826 }
827 
828 static inline void sync_jmpstate(DisasContext *dc)
829 {
830     if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
831         if (dc->jmp == JMP_DIRECT) {
832             tcg_gen_movi_tl(env_btaken, 1);
833         }
834         dc->jmp = JMP_INDIRECT;
835         tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
836     }
837 }
838 
839 static void dec_imm(DisasContext *dc)
840 {
841     LOG_DIS("imm %x\n", dc->imm << 16);
842     tcg_gen_movi_tl(env_imm, (dc->imm << 16));
843     dc->tb_flags |= IMM_FLAG;
844     dc->clear_imm = 0;
845 }
846 
847 static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
848 {
849     unsigned int extimm = dc->tb_flags & IMM_FLAG;
850     /* Should be set to one if r1 is used by loadstores.  */
851     int stackprot = 0;
852 
853     /* All load/stores use ra.  */
854     if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
855         stackprot = 1;
856     }
857 
858     /* Treat the common cases first.  */
859     if (!dc->type_b) {
860         /* If any of the regs is r0, return a ptr to the other.  */
861         if (dc->ra == 0) {
862             return &cpu_R[dc->rb];
863         } else if (dc->rb == 0) {
864             return &cpu_R[dc->ra];
865         }
866 
867         if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
868             stackprot = 1;
869         }
870 
871         *t = tcg_temp_new();
872         tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
873 
874         if (stackprot) {
875             gen_helper_stackprot(cpu_env, *t);
876         }
877         return t;
878     }
879     /* Immediate.  */
880     if (!extimm) {
881         if (dc->imm == 0) {
882             return &cpu_R[dc->ra];
883         }
884         *t = tcg_temp_new();
885         tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
886         tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
887     } else {
888         *t = tcg_temp_new();
889         tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
890     }
891 
892     if (stackprot) {
893         gen_helper_stackprot(cpu_env, *t);
894     }
895     return t;
896 }
897 
898 static void dec_load(DisasContext *dc)
899 {
900     TCGv t, v, *addr;
901     unsigned int size, rev = 0, ex = 0;
902     TCGMemOp mop;
903 
904     mop = dc->opcode & 3;
905     size = 1 << mop;
906     if (!dc->type_b) {
907         rev = (dc->ir >> 9) & 1;
908         ex = (dc->ir >> 10) & 1;
909     }
910     mop |= MO_TE;
911     if (rev) {
912         mop ^= MO_BSWAP;
913     }
914 
915     if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
916           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
917         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
918         t_gen_raise_exception(dc, EXCP_HW_EXCP);
919         return;
920     }
921 
922     LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
923                                                         ex ? "x" : "");
924 
925     t_sync_flags(dc);
926     addr = compute_ldst_addr(dc, &t);
927 
928     /*
929      * When doing reverse accesses we need to do two things.
930      *
931      * 1. Reverse the address wrt endianness.
932      * 2. Byteswap the data lanes on the way back into the CPU core.
933      */
934     if (rev && size != 4) {
935         /* Endian reverse the address. t is addr.  */
936         switch (size) {
937             case 1:
938             {
939                 /* 00 -> 11
940                    01 -> 10
941                    10 -> 10
942                    11 -> 00 */
943                 TCGv low = tcg_temp_new();
944 
945                 /* Force addr into the temp.  */
946                 if (addr != &t) {
947                     t = tcg_temp_new();
948                     tcg_gen_mov_tl(t, *addr);
949                     addr = &t;
950                 }
951 
952                 tcg_gen_andi_tl(low, t, 3);
953                 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
954                 tcg_gen_andi_tl(t, t, ~3);
955                 tcg_gen_or_tl(t, t, low);
956                 tcg_gen_mov_tl(env_imm, t);
957                 tcg_temp_free(low);
958                 break;
959             }
960 
961             case 2:
962                 /* 00 -> 10
963                    10 -> 00.  */
964                 /* Force addr into the temp.  */
965                 if (addr != &t) {
966                     t = tcg_temp_new();
967                     tcg_gen_xori_tl(t, *addr, 2);
968                     addr = &t;
969                 } else {
970                     tcg_gen_xori_tl(t, t, 2);
971                 }
972                 break;
973             default:
974                 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
975                 break;
976         }
977     }
978 
979     /* lwx does not throw unaligned access errors, so force alignment */
980     if (ex) {
981         /* Force addr into the temp.  */
982         if (addr != &t) {
983             t = tcg_temp_new();
984             tcg_gen_mov_tl(t, *addr);
985             addr = &t;
986         }
987         tcg_gen_andi_tl(t, t, ~3);
988     }
989 
990     /* If we get a fault on a dslot, the jmpstate better be in sync.  */
991     sync_jmpstate(dc);
992 
993     /* Verify alignment if needed.  */
994     /*
995      * Microblaze gives MMU faults priority over faults due to
996      * unaligned addresses. That's why we speculatively do the load
997      * into v. If the load succeeds, we verify alignment of the
998      * address and if that succeeds we write into the destination reg.
999      */
1000     v = tcg_temp_new();
1001     tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
1002 
1003     if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1004         tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1005         gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1006                             tcg_const_tl(0), tcg_const_tl(size - 1));
1007     }
1008 
1009     if (ex) {
1010         tcg_gen_mov_tl(env_res_addr, *addr);
1011         tcg_gen_mov_tl(env_res_val, v);
1012     }
1013     if (dc->rd) {
1014         tcg_gen_mov_tl(cpu_R[dc->rd], v);
1015     }
1016     tcg_temp_free(v);
1017 
1018     if (ex) { /* lwx */
1019         /* no support for AXI exclusive so always clear C */
1020         write_carryi(dc, 0);
1021     }
1022 
1023     if (addr == &t)
1024         tcg_temp_free(t);
1025 }
1026 
1027 static void dec_store(DisasContext *dc)
1028 {
1029     TCGv t, *addr, swx_addr;
1030     TCGLabel *swx_skip = NULL;
1031     unsigned int size, rev = 0, ex = 0;
1032     TCGMemOp mop;
1033 
1034     mop = dc->opcode & 3;
1035     size = 1 << mop;
1036     if (!dc->type_b) {
1037         rev = (dc->ir >> 9) & 1;
1038         ex = (dc->ir >> 10) & 1;
1039     }
1040     mop |= MO_TE;
1041     if (rev) {
1042         mop ^= MO_BSWAP;
1043     }
1044 
1045     if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
1046           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1047         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1048         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1049         return;
1050     }
1051 
1052     LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1053                                                         ex ? "x" : "");
1054     t_sync_flags(dc);
1055     /* If we get a fault on a dslot, the jmpstate better be in sync.  */
1056     sync_jmpstate(dc);
1057     addr = compute_ldst_addr(dc, &t);
1058 
1059     swx_addr = tcg_temp_local_new();
1060     if (ex) { /* swx */
1061         TCGv tval;
1062 
1063         /* Force addr into the swx_addr. */
1064         tcg_gen_mov_tl(swx_addr, *addr);
1065         addr = &swx_addr;
1066         /* swx does not throw unaligned access errors, so force alignment */
1067         tcg_gen_andi_tl(swx_addr, swx_addr, ~3);
1068 
1069         write_carryi(dc, 1);
1070         swx_skip = gen_new_label();
1071         tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);
1072 
1073         /* Compare the value loaded at lwx with current contents of
1074            the reserved location.
1075            FIXME: This only works for system emulation where we can expect
1076            this compare and the following write to be atomic. For user
1077            emulation we need to add atomicity between threads.  */
1078         tval = tcg_temp_new();
1079         tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env, false),
1080                            MO_TEUL);
1081         tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
1082         write_carryi(dc, 0);
1083         tcg_temp_free(tval);
1084     }
1085 
1086     if (rev && size != 4) {
1087         /* Endian reverse the address. t is addr.  */
1088         switch (size) {
1089             case 1:
1090             {
1091                 /* 00 -> 11
1092                    01 -> 10
1093                    10 -> 10
1094                    11 -> 00 */
1095                 TCGv low = tcg_temp_new();
1096 
1097                 /* Force addr into the temp.  */
1098                 if (addr != &t) {
1099                     t = tcg_temp_new();
1100                     tcg_gen_mov_tl(t, *addr);
1101                     addr = &t;
1102                 }
1103 
1104                 tcg_gen_andi_tl(low, t, 3);
1105                 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1106                 tcg_gen_andi_tl(t, t, ~3);
1107                 tcg_gen_or_tl(t, t, low);
1108                 tcg_gen_mov_tl(env_imm, t);
1109                 tcg_temp_free(low);
1110                 break;
1111             }
1112 
1113             case 2:
1114                 /* 00 -> 10
1115                    10 -> 00.  */
1116                 /* Force addr into the temp.  */
1117                 if (addr != &t) {
1118                     t = tcg_temp_new();
1119                     tcg_gen_xori_tl(t, *addr, 2);
1120                     addr = &t;
1121                 } else {
1122                     tcg_gen_xori_tl(t, t, 2);
1123                 }
1124                 break;
1125             default:
1126                 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1127                 break;
1128         }
1129     }
1130     tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
1131 
1132     /* Verify alignment if needed.  */
1133     if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1134         tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1135         /* FIXME: if the alignment is wrong, we should restore the value
1136          *        in memory. One possible way to achieve this is to probe
1137          *        the MMU prior to the memaccess, thay way we could put
1138          *        the alignment checks in between the probe and the mem
1139          *        access.
1140          */
1141         gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1142                             tcg_const_tl(1), tcg_const_tl(size - 1));
1143     }
1144 
1145     if (ex) {
1146         gen_set_label(swx_skip);
1147     }
1148     tcg_temp_free(swx_addr);
1149 
1150     if (addr == &t)
1151         tcg_temp_free(t);
1152 }
1153 
1154 static inline void eval_cc(DisasContext *dc, unsigned int cc,
1155                            TCGv d, TCGv a, TCGv b)
1156 {
1157     switch (cc) {
1158         case CC_EQ:
1159             tcg_gen_setcond_tl(TCG_COND_EQ, d, a, b);
1160             break;
1161         case CC_NE:
1162             tcg_gen_setcond_tl(TCG_COND_NE, d, a, b);
1163             break;
1164         case CC_LT:
1165             tcg_gen_setcond_tl(TCG_COND_LT, d, a, b);
1166             break;
1167         case CC_LE:
1168             tcg_gen_setcond_tl(TCG_COND_LE, d, a, b);
1169             break;
1170         case CC_GE:
1171             tcg_gen_setcond_tl(TCG_COND_GE, d, a, b);
1172             break;
1173         case CC_GT:
1174             tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
1175             break;
1176         default:
1177             cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1178             break;
1179     }
1180 }
1181 
1182 static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
1183 {
1184     TCGLabel *l1 = gen_new_label();
1185     /* Conditional jmp.  */
1186     tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
1187     tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
1188     tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
1189     gen_set_label(l1);
1190 }
1191 
1192 static void dec_bcc(DisasContext *dc)
1193 {
1194     unsigned int cc;
1195     unsigned int dslot;
1196 
1197     cc = EXTRACT_FIELD(dc->ir, 21, 23);
1198     dslot = dc->ir & (1 << 25);
1199     LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1200 
1201     dc->delayed_branch = 1;
1202     if (dslot) {
1203         dc->delayed_branch = 2;
1204         dc->tb_flags |= D_FLAG;
1205         tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1206                       cpu_env, offsetof(CPUMBState, bimm));
1207     }
1208 
1209     if (dec_alu_op_b_is_small_imm(dc)) {
1210         int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend.  */
1211 
1212         tcg_gen_movi_tl(env_btarget, dc->pc + offset);
1213         dc->jmp = JMP_DIRECT_CC;
1214         dc->jmp_pc = dc->pc + offset;
1215     } else {
1216         dc->jmp = JMP_INDIRECT;
1217         tcg_gen_movi_tl(env_btarget, dc->pc);
1218         tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1219     }
1220     eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
1221 }
1222 
1223 static void dec_br(DisasContext *dc)
1224 {
1225     unsigned int dslot, link, abs, mbar;
1226     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1227 
1228     dslot = dc->ir & (1 << 20);
1229     abs = dc->ir & (1 << 19);
1230     link = dc->ir & (1 << 18);
1231 
1232     /* Memory barrier.  */
1233     mbar = (dc->ir >> 16) & 31;
1234     if (mbar == 2 && dc->imm == 4) {
1235         /* mbar IMM & 16 decodes to sleep.  */
1236         if (dc->rd & 16) {
1237             TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1238             TCGv_i32 tmp_1 = tcg_const_i32(1);
1239 
1240             LOG_DIS("sleep\n");
1241 
1242             t_sync_flags(dc);
1243             tcg_gen_st_i32(tmp_1, cpu_env,
1244                            -offsetof(MicroBlazeCPU, env)
1245                            +offsetof(CPUState, halted));
1246             tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
1247             gen_helper_raise_exception(cpu_env, tmp_hlt);
1248             tcg_temp_free_i32(tmp_hlt);
1249             tcg_temp_free_i32(tmp_1);
1250             return;
1251         }
1252         LOG_DIS("mbar %d\n", dc->rd);
1253         /* Break the TB.  */
1254         dc->cpustate_changed = 1;
1255         return;
1256     }
1257 
1258     LOG_DIS("br%s%s%s%s imm=%x\n",
1259              abs ? "a" : "", link ? "l" : "",
1260              dc->type_b ? "i" : "", dslot ? "d" : "",
1261              dc->imm);
1262 
1263     dc->delayed_branch = 1;
1264     if (dslot) {
1265         dc->delayed_branch = 2;
1266         dc->tb_flags |= D_FLAG;
1267         tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1268                       cpu_env, offsetof(CPUMBState, bimm));
1269     }
1270     if (link && dc->rd)
1271         tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
1272 
1273     dc->jmp = JMP_INDIRECT;
1274     if (abs) {
1275         tcg_gen_movi_tl(env_btaken, 1);
1276         tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
1277         if (link && !dslot) {
1278             if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1279                 t_gen_raise_exception(dc, EXCP_BREAK);
1280             if (dc->imm == 0) {
1281                 if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
1282                     tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1283                     t_gen_raise_exception(dc, EXCP_HW_EXCP);
1284                     return;
1285                 }
1286 
1287                 t_gen_raise_exception(dc, EXCP_DEBUG);
1288             }
1289         }
1290     } else {
1291         if (dec_alu_op_b_is_small_imm(dc)) {
1292             dc->jmp = JMP_DIRECT;
1293             dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1294         } else {
1295             tcg_gen_movi_tl(env_btaken, 1);
1296             tcg_gen_movi_tl(env_btarget, dc->pc);
1297             tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1298         }
1299     }
1300 }
1301 
1302 static inline void do_rti(DisasContext *dc)
1303 {
1304     TCGv t0, t1;
1305     t0 = tcg_temp_new();
1306     t1 = tcg_temp_new();
1307     tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
1308     tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
1309     tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1310 
1311     tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1312     tcg_gen_or_tl(t1, t1, t0);
1313     msr_write(dc, t1);
1314     tcg_temp_free(t1);
1315     tcg_temp_free(t0);
1316     dc->tb_flags &= ~DRTI_FLAG;
1317 }
1318 
1319 static inline void do_rtb(DisasContext *dc)
1320 {
1321     TCGv t0, t1;
1322     t0 = tcg_temp_new();
1323     t1 = tcg_temp_new();
1324     tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1325     tcg_gen_shri_tl(t0, t1, 1);
1326     tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1327 
1328     tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1329     tcg_gen_or_tl(t1, t1, t0);
1330     msr_write(dc, t1);
1331     tcg_temp_free(t1);
1332     tcg_temp_free(t0);
1333     dc->tb_flags &= ~DRTB_FLAG;
1334 }
1335 
1336 static inline void do_rte(DisasContext *dc)
1337 {
1338     TCGv t0, t1;
1339     t0 = tcg_temp_new();
1340     t1 = tcg_temp_new();
1341 
1342     tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
1343     tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
1344     tcg_gen_shri_tl(t0, t1, 1);
1345     tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1346 
1347     tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1348     tcg_gen_or_tl(t1, t1, t0);
1349     msr_write(dc, t1);
1350     tcg_temp_free(t1);
1351     tcg_temp_free(t0);
1352     dc->tb_flags &= ~DRTE_FLAG;
1353 }
1354 
1355 static void dec_rts(DisasContext *dc)
1356 {
1357     unsigned int b_bit, i_bit, e_bit;
1358     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1359 
1360     i_bit = dc->ir & (1 << 21);
1361     b_bit = dc->ir & (1 << 22);
1362     e_bit = dc->ir & (1 << 23);
1363 
1364     dc->delayed_branch = 2;
1365     dc->tb_flags |= D_FLAG;
1366     tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1367                   cpu_env, offsetof(CPUMBState, bimm));
1368 
1369     if (i_bit) {
1370         LOG_DIS("rtid ir=%x\n", dc->ir);
1371         if ((dc->tb_flags & MSR_EE_FLAG)
1372              && mem_index == MMU_USER_IDX) {
1373             tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1374             t_gen_raise_exception(dc, EXCP_HW_EXCP);
1375         }
1376         dc->tb_flags |= DRTI_FLAG;
1377     } else if (b_bit) {
1378         LOG_DIS("rtbd ir=%x\n", dc->ir);
1379         if ((dc->tb_flags & MSR_EE_FLAG)
1380              && mem_index == MMU_USER_IDX) {
1381             tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1382             t_gen_raise_exception(dc, EXCP_HW_EXCP);
1383         }
1384         dc->tb_flags |= DRTB_FLAG;
1385     } else if (e_bit) {
1386         LOG_DIS("rted ir=%x\n", dc->ir);
1387         if ((dc->tb_flags & MSR_EE_FLAG)
1388              && mem_index == MMU_USER_IDX) {
1389             tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1390             t_gen_raise_exception(dc, EXCP_HW_EXCP);
1391         }
1392         dc->tb_flags |= DRTE_FLAG;
1393     } else
1394         LOG_DIS("rts ir=%x\n", dc->ir);
1395 
1396     dc->jmp = JMP_INDIRECT;
1397     tcg_gen_movi_tl(env_btaken, 1);
1398     tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1399 }
1400 
1401 static int dec_check_fpuv2(DisasContext *dc)
1402 {
1403     if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
1404         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
1405         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1406     }
1407     return (dc->cpu->cfg.use_fpu == 2) ? 0 : PVR2_USE_FPU2_MASK;
1408 }
1409 
1410 static void dec_fpu(DisasContext *dc)
1411 {
1412     unsigned int fpu_insn;
1413 
1414     if ((dc->tb_flags & MSR_EE_FLAG)
1415           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1416           && (dc->cpu->cfg.use_fpu != 1)) {
1417         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1418         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1419         return;
1420     }
1421 
1422     fpu_insn = (dc->ir >> 7) & 7;
1423 
1424     switch (fpu_insn) {
1425         case 0:
1426             gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1427                             cpu_R[dc->rb]);
1428             break;
1429 
1430         case 1:
1431             gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1432                              cpu_R[dc->rb]);
1433             break;
1434 
1435         case 2:
1436             gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1437                             cpu_R[dc->rb]);
1438             break;
1439 
1440         case 3:
1441             gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1442                             cpu_R[dc->rb]);
1443             break;
1444 
1445         case 4:
1446             switch ((dc->ir >> 4) & 7) {
1447                 case 0:
1448                     gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1449                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1450                     break;
1451                 case 1:
1452                     gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1453                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1454                     break;
1455                 case 2:
1456                     gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1457                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1458                     break;
1459                 case 3:
1460                     gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1461                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1462                     break;
1463                 case 4:
1464                     gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1465                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1466                     break;
1467                 case 5:
1468                     gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1469                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1470                     break;
1471                 case 6:
1472                     gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1473                                        cpu_R[dc->ra], cpu_R[dc->rb]);
1474                     break;
1475                 default:
1476                     qemu_log_mask(LOG_UNIMP,
1477                                   "unimplemented fcmp fpu_insn=%x pc=%x"
1478                                   " opc=%x\n",
1479                                   fpu_insn, dc->pc, dc->opcode);
1480                     dc->abort_at_next_insn = 1;
1481                     break;
1482             }
1483             break;
1484 
1485         case 5:
1486             if (!dec_check_fpuv2(dc)) {
1487                 return;
1488             }
1489             gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1490             break;
1491 
1492         case 6:
1493             if (!dec_check_fpuv2(dc)) {
1494                 return;
1495             }
1496             gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1497             break;
1498 
1499         case 7:
1500             if (!dec_check_fpuv2(dc)) {
1501                 return;
1502             }
1503             gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1504             break;
1505 
1506         default:
1507             qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1508                           " opc=%x\n",
1509                           fpu_insn, dc->pc, dc->opcode);
1510             dc->abort_at_next_insn = 1;
1511             break;
1512     }
1513 }
1514 
1515 static void dec_null(DisasContext *dc)
1516 {
1517     if ((dc->tb_flags & MSR_EE_FLAG)
1518           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1519         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1520         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1521         return;
1522     }
1523     qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1524     dc->abort_at_next_insn = 1;
1525 }
1526 
1527 /* Insns connected to FSL or AXI stream attached devices.  */
1528 static void dec_stream(DisasContext *dc)
1529 {
1530     int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1531     TCGv_i32 t_id, t_ctrl;
1532     int ctrl;
1533 
1534     LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1535             dc->type_b ? "" : "d", dc->imm);
1536 
1537     if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
1538         tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1539         t_gen_raise_exception(dc, EXCP_HW_EXCP);
1540         return;
1541     }
1542 
1543     t_id = tcg_temp_new();
1544     if (dc->type_b) {
1545         tcg_gen_movi_tl(t_id, dc->imm & 0xf);
1546         ctrl = dc->imm >> 10;
1547     } else {
1548         tcg_gen_andi_tl(t_id, cpu_R[dc->rb], 0xf);
1549         ctrl = dc->imm >> 5;
1550     }
1551 
1552     t_ctrl = tcg_const_tl(ctrl);
1553 
1554     if (dc->rd == 0) {
1555         gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1556     } else {
1557         gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1558     }
1559     tcg_temp_free(t_id);
1560     tcg_temp_free(t_ctrl);
1561 }
1562 
1563 static struct decoder_info {
1564     struct {
1565         uint32_t bits;
1566         uint32_t mask;
1567     };
1568     void (*dec)(DisasContext *dc);
1569 } decinfo[] = {
1570     {DEC_ADD, dec_add},
1571     {DEC_SUB, dec_sub},
1572     {DEC_AND, dec_and},
1573     {DEC_XOR, dec_xor},
1574     {DEC_OR, dec_or},
1575     {DEC_BIT, dec_bit},
1576     {DEC_BARREL, dec_barrel},
1577     {DEC_LD, dec_load},
1578     {DEC_ST, dec_store},
1579     {DEC_IMM, dec_imm},
1580     {DEC_BR, dec_br},
1581     {DEC_BCC, dec_bcc},
1582     {DEC_RTS, dec_rts},
1583     {DEC_FPU, dec_fpu},
1584     {DEC_MUL, dec_mul},
1585     {DEC_DIV, dec_div},
1586     {DEC_MSR, dec_msr},
1587     {DEC_STREAM, dec_stream},
1588     {{0, 0}, dec_null}
1589 };
1590 
1591 static inline void decode(DisasContext *dc, uint32_t ir)
1592 {
1593     int i;
1594 
1595     dc->ir = ir;
1596     LOG_DIS("%8.8x\t", dc->ir);
1597 
1598     if (dc->ir)
1599         dc->nr_nops = 0;
1600     else {
1601         if ((dc->tb_flags & MSR_EE_FLAG)
1602               && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1603               && (dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
1604             tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1605             t_gen_raise_exception(dc, EXCP_HW_EXCP);
1606             return;
1607         }
1608 
1609         LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1610         dc->nr_nops++;
1611         if (dc->nr_nops > 4) {
1612             cpu_abort(CPU(dc->cpu), "fetching nop sequence\n");
1613         }
1614     }
1615     /* bit 2 seems to indicate insn type.  */
1616     dc->type_b = ir & (1 << 29);
1617 
1618     dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1619     dc->rd = EXTRACT_FIELD(ir, 21, 25);
1620     dc->ra = EXTRACT_FIELD(ir, 16, 20);
1621     dc->rb = EXTRACT_FIELD(ir, 11, 15);
1622     dc->imm = EXTRACT_FIELD(ir, 0, 15);
1623 
1624     /* Large switch for all insns.  */
1625     for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1626         if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1627             decinfo[i].dec(dc);
1628             break;
1629         }
1630     }
1631 }
1632 
1633 /* generate intermediate code for basic block 'tb'.  */
1634 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
1635 {
1636     CPUMBState *env = cs->env_ptr;
1637     MicroBlazeCPU *cpu = mb_env_get_cpu(env);
1638     uint32_t pc_start;
1639     struct DisasContext ctx;
1640     struct DisasContext *dc = &ctx;
1641     uint32_t next_page_start, org_flags;
1642     target_ulong npc;
1643     int num_insns;
1644     int max_insns;
1645 
1646     pc_start = tb->pc;
1647     dc->cpu = cpu;
1648     dc->tb = tb;
1649     org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1650 
1651     dc->is_jmp = DISAS_NEXT;
1652     dc->jmp = 0;
1653     dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1654     if (dc->delayed_branch) {
1655         dc->jmp = JMP_INDIRECT;
1656     }
1657     dc->pc = pc_start;
1658     dc->singlestep_enabled = cs->singlestep_enabled;
1659     dc->cpustate_changed = 0;
1660     dc->abort_at_next_insn = 0;
1661     dc->nr_nops = 0;
1662 
1663     if (pc_start & 3) {
1664         cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1665     }
1666 
1667     next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1668     num_insns = 0;
1669     max_insns = tb->cflags & CF_COUNT_MASK;
1670     if (max_insns == 0) {
1671         max_insns = CF_COUNT_MASK;
1672     }
1673     if (max_insns > TCG_MAX_INSNS) {
1674         max_insns = TCG_MAX_INSNS;
1675     }
1676 
1677     gen_tb_start(tb);
1678     do
1679     {
1680         tcg_gen_insn_start(dc->pc);
1681         num_insns++;
1682 
1683 #if SIM_COMPAT
1684         if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1685             tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1686             gen_helper_debug();
1687         }
1688 #endif
1689 
1690         if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1691             t_gen_raise_exception(dc, EXCP_DEBUG);
1692             dc->is_jmp = DISAS_UPDATE;
1693             /* The address covered by the breakpoint must be included in
1694                [tb->pc, tb->pc + tb->size) in order to for it to be
1695                properly cleared -- thus we increment the PC here so that
1696                the logic setting tb->size below does the right thing.  */
1697             dc->pc += 4;
1698             break;
1699         }
1700 
1701         /* Pretty disas.  */
1702         LOG_DIS("%8.8x:\t", dc->pc);
1703 
1704         if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
1705             gen_io_start();
1706         }
1707 
1708         dc->clear_imm = 1;
1709         decode(dc, cpu_ldl_code(env, dc->pc));
1710         if (dc->clear_imm)
1711             dc->tb_flags &= ~IMM_FLAG;
1712         dc->pc += 4;
1713 
1714         if (dc->delayed_branch) {
1715             dc->delayed_branch--;
1716             if (!dc->delayed_branch) {
1717                 if (dc->tb_flags & DRTI_FLAG)
1718                     do_rti(dc);
1719                  if (dc->tb_flags & DRTB_FLAG)
1720                     do_rtb(dc);
1721                 if (dc->tb_flags & DRTE_FLAG)
1722                     do_rte(dc);
1723                 /* Clear the delay slot flag.  */
1724                 dc->tb_flags &= ~D_FLAG;
1725                 /* If it is a direct jump, try direct chaining.  */
1726                 if (dc->jmp == JMP_INDIRECT) {
1727                     eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
1728                     dc->is_jmp = DISAS_JUMP;
1729                 } else if (dc->jmp == JMP_DIRECT) {
1730                     t_sync_flags(dc);
1731                     gen_goto_tb(dc, 0, dc->jmp_pc);
1732                     dc->is_jmp = DISAS_TB_JUMP;
1733                 } else if (dc->jmp == JMP_DIRECT_CC) {
1734                     TCGLabel *l1 = gen_new_label();
1735                     t_sync_flags(dc);
1736                     /* Conditional jmp.  */
1737                     tcg_gen_brcondi_tl(TCG_COND_NE, env_btaken, 0, l1);
1738                     gen_goto_tb(dc, 1, dc->pc);
1739                     gen_set_label(l1);
1740                     gen_goto_tb(dc, 0, dc->jmp_pc);
1741 
1742                     dc->is_jmp = DISAS_TB_JUMP;
1743                 }
1744                 break;
1745             }
1746         }
1747         if (cs->singlestep_enabled) {
1748             break;
1749         }
1750     } while (!dc->is_jmp && !dc->cpustate_changed
1751              && !tcg_op_buf_full()
1752              && !singlestep
1753              && (dc->pc < next_page_start)
1754              && num_insns < max_insns);
1755 
1756     npc = dc->pc;
1757     if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1758         if (dc->tb_flags & D_FLAG) {
1759             dc->is_jmp = DISAS_UPDATE;
1760             tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1761             sync_jmpstate(dc);
1762         } else
1763             npc = dc->jmp_pc;
1764     }
1765 
1766     if (tb->cflags & CF_LAST_IO)
1767         gen_io_end();
1768     /* Force an update if the per-tb cpu state has changed.  */
1769     if (dc->is_jmp == DISAS_NEXT
1770         && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1771         dc->is_jmp = DISAS_UPDATE;
1772         tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1773     }
1774     t_sync_flags(dc);
1775 
1776     if (unlikely(cs->singlestep_enabled)) {
1777         TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1778 
1779         if (dc->is_jmp != DISAS_JUMP) {
1780             tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1781         }
1782         gen_helper_raise_exception(cpu_env, tmp);
1783         tcg_temp_free_i32(tmp);
1784     } else {
1785         switch(dc->is_jmp) {
1786             case DISAS_NEXT:
1787                 gen_goto_tb(dc, 1, npc);
1788                 break;
1789             default:
1790             case DISAS_JUMP:
1791             case DISAS_UPDATE:
1792                 /* indicate that the hash table must be used
1793                    to find the next TB */
1794                 tcg_gen_exit_tb(0);
1795                 break;
1796             case DISAS_TB_JUMP:
1797                 /* nothing more to generate */
1798                 break;
1799         }
1800     }
1801     gen_tb_end(tb, num_insns);
1802 
1803     tb->size = dc->pc - pc_start;
1804     tb->icount = num_insns;
1805 
1806 #ifdef DEBUG_DISAS
1807 #if !SIM_COMPAT
1808     if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1809         && qemu_log_in_addr_range(pc_start)) {
1810         qemu_log_lock();
1811         qemu_log("--------------\n");
1812 #if DISAS_GNU
1813         log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
1814 #endif
1815         qemu_log("\nisize=%d osize=%d\n",
1816                  dc->pc - pc_start, tcg_op_buf_count());
1817         qemu_log_unlock();
1818     }
1819 #endif
1820 #endif
1821     assert(!dc->abort_at_next_insn);
1822 }
1823 
1824 void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1825                        int flags)
1826 {
1827     MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1828     CPUMBState *env = &cpu->env;
1829     int i;
1830 
1831     if (!env || !f)
1832         return;
1833 
1834     cpu_fprintf(f, "IN: PC=%x %s\n",
1835                 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
1836     cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1837              env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
1838              env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
1839     cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1840              env->btaken, env->btarget,
1841              (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
1842              (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1843              (env->sregs[SR_MSR] & MSR_EIP),
1844              (env->sregs[SR_MSR] & MSR_IE));
1845 
1846     for (i = 0; i < 32; i++) {
1847         cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1848         if ((i + 1) % 4 == 0)
1849             cpu_fprintf(f, "\n");
1850         }
1851     cpu_fprintf(f, "\n\n");
1852 }
1853 
1854 void mb_tcg_init(void)
1855 {
1856     int i;
1857 
1858     cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
1859     tcg_ctx.tcg_env = cpu_env;
1860 
1861     env_debug = tcg_global_mem_new(cpu_env,
1862                     offsetof(CPUMBState, debug),
1863                     "debug0");
1864     env_iflags = tcg_global_mem_new(cpu_env,
1865                     offsetof(CPUMBState, iflags),
1866                     "iflags");
1867     env_imm = tcg_global_mem_new(cpu_env,
1868                     offsetof(CPUMBState, imm),
1869                     "imm");
1870     env_btarget = tcg_global_mem_new(cpu_env,
1871                      offsetof(CPUMBState, btarget),
1872                      "btarget");
1873     env_btaken = tcg_global_mem_new(cpu_env,
1874                      offsetof(CPUMBState, btaken),
1875                      "btaken");
1876     env_res_addr = tcg_global_mem_new(cpu_env,
1877                      offsetof(CPUMBState, res_addr),
1878                      "res_addr");
1879     env_res_val = tcg_global_mem_new(cpu_env,
1880                      offsetof(CPUMBState, res_val),
1881                      "res_val");
1882     for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1883         cpu_R[i] = tcg_global_mem_new(cpu_env,
1884                           offsetof(CPUMBState, regs[i]),
1885                           regnames[i]);
1886     }
1887     for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
1888         cpu_SR[i] = tcg_global_mem_new(cpu_env,
1889                           offsetof(CPUMBState, sregs[i]),
1890                           special_regnames[i]);
1891     }
1892 }
1893 
1894 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1895                           target_ulong *data)
1896 {
1897     env->sregs[SR_PC] = data[0];
1898 }
1899