xref: /qemu/target/microblaze/translate.c (revision 02326733)
1 /*
2  *  Xilinx MicroBlaze emulation for qemu: main translation routines.
3  *
4  *  Copyright (c) 2009 Edgar E. Iglesias.
5  *  Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
28 #include "exec/translator.h"
29 #include "qemu/qemu-print.h"
30 
31 #include "exec/log.h"
32 
33 #define HELPER_H "helper.h"
34 #include "exec/helper-info.c.inc"
35 #undef  HELPER_H
36 
37 #define EXTRACT_FIELD(src, start, end) \
38             (((src) >> start) & ((1 << (end - start + 1)) - 1))
39 
40 /* is_jmp field values */
41 #define DISAS_JUMP    DISAS_TARGET_0 /* only pc was modified dynamically */
42 #define DISAS_EXIT    DISAS_TARGET_1 /* all cpu state modified dynamically */
43 
44 /* cpu state besides pc was modified dynamically; update pc to next */
45 #define DISAS_EXIT_NEXT DISAS_TARGET_2
46 /* cpu state besides pc was modified dynamically; update pc to btarget */
47 #define DISAS_EXIT_JUMP DISAS_TARGET_3
48 
49 static TCGv_i32 cpu_R[32];
50 static TCGv_i32 cpu_pc;
51 static TCGv_i32 cpu_msr;
52 static TCGv_i32 cpu_msr_c;
53 static TCGv_i32 cpu_imm;
54 static TCGv_i32 cpu_bvalue;
55 static TCGv_i32 cpu_btarget;
56 static TCGv_i32 cpu_iflags;
57 static TCGv cpu_res_addr;
58 static TCGv_i32 cpu_res_val;
59 
60 /* This is the state at translation time.  */
61 typedef struct DisasContext {
62     DisasContextBase base;
63     const MicroBlazeCPUConfig *cfg;
64 
65     TCGv_i32 r0;
66     bool r0_set;
67 
68     /* Decoder.  */
69     uint32_t ext_imm;
70     unsigned int tb_flags;
71     unsigned int tb_flags_to_set;
72     int mem_index;
73 
74     /* Condition under which to jump, including NEVER and ALWAYS. */
75     TCGCond jmp_cond;
76 
77     /* Immediate branch-taken destination, or -1 for indirect. */
78     uint32_t jmp_dest;
79 } DisasContext;
80 
81 static int typeb_imm(DisasContext *dc, int x)
82 {
83     if (dc->tb_flags & IMM_FLAG) {
84         return deposit32(dc->ext_imm, 0, 16, x);
85     }
86     return x;
87 }
88 
89 /* Include the auto-generated decoder.  */
90 #include "decode-insns.c.inc"
91 
92 static void t_sync_flags(DisasContext *dc)
93 {
94     /* Synch the tb dependent flags between translator and runtime.  */
95     if ((dc->tb_flags ^ dc->base.tb->flags) & IFLAGS_TB_MASK) {
96         tcg_gen_movi_i32(cpu_iflags, dc->tb_flags & IFLAGS_TB_MASK);
97     }
98 }
99 
100 static void gen_raise_exception(DisasContext *dc, uint32_t index)
101 {
102     gen_helper_raise_exception(tcg_env, tcg_constant_i32(index));
103     dc->base.is_jmp = DISAS_NORETURN;
104 }
105 
106 static void gen_raise_exception_sync(DisasContext *dc, uint32_t index)
107 {
108     t_sync_flags(dc);
109     tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
110     gen_raise_exception(dc, index);
111 }
112 
113 static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
114 {
115     TCGv_i32 tmp = tcg_constant_i32(esr_ec);
116     tcg_gen_st_i32(tmp, tcg_env, offsetof(CPUMBState, esr));
117 
118     gen_raise_exception_sync(dc, EXCP_HW_EXCP);
119 }
120 
121 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
122 {
123     if (translator_use_goto_tb(&dc->base, dest)) {
124         tcg_gen_goto_tb(n);
125         tcg_gen_movi_i32(cpu_pc, dest);
126         tcg_gen_exit_tb(dc->base.tb, n);
127     } else {
128         tcg_gen_movi_i32(cpu_pc, dest);
129         tcg_gen_lookup_and_goto_ptr();
130     }
131     dc->base.is_jmp = DISAS_NORETURN;
132 }
133 
134 /*
135  * Returns true if the insn an illegal operation.
136  * If exceptions are enabled, an exception is raised.
137  */
138 static bool trap_illegal(DisasContext *dc, bool cond)
139 {
140     if (cond && (dc->tb_flags & MSR_EE)
141         && dc->cfg->illegal_opcode_exception) {
142         gen_raise_hw_excp(dc, ESR_EC_ILLEGAL_OP);
143     }
144     return cond;
145 }
146 
147 /*
148  * Returns true if the insn is illegal in userspace.
149  * If exceptions are enabled, an exception is raised.
150  */
151 static bool trap_userspace(DisasContext *dc, bool cond)
152 {
153     bool cond_user = cond && dc->mem_index == MMU_USER_IDX;
154 
155     if (cond_user && (dc->tb_flags & MSR_EE)) {
156         gen_raise_hw_excp(dc, ESR_EC_PRIVINSN);
157     }
158     return cond_user;
159 }
160 
161 /*
162  * Return true, and log an error, if the current insn is
163  * within a delay slot.
164  */
165 static bool invalid_delay_slot(DisasContext *dc, const char *insn_type)
166 {
167     if (dc->tb_flags & D_FLAG) {
168         qemu_log_mask(LOG_GUEST_ERROR,
169                       "Invalid insn in delay slot: %s at %08x\n",
170                       insn_type, (uint32_t)dc->base.pc_next);
171         return true;
172     }
173     return false;
174 }
175 
176 static TCGv_i32 reg_for_read(DisasContext *dc, int reg)
177 {
178     if (likely(reg != 0)) {
179         return cpu_R[reg];
180     }
181     if (!dc->r0_set) {
182         if (dc->r0 == NULL) {
183             dc->r0 = tcg_temp_new_i32();
184         }
185         tcg_gen_movi_i32(dc->r0, 0);
186         dc->r0_set = true;
187     }
188     return dc->r0;
189 }
190 
191 static TCGv_i32 reg_for_write(DisasContext *dc, int reg)
192 {
193     if (likely(reg != 0)) {
194         return cpu_R[reg];
195     }
196     if (dc->r0 == NULL) {
197         dc->r0 = tcg_temp_new_i32();
198     }
199     return dc->r0;
200 }
201 
202 static bool do_typea(DisasContext *dc, arg_typea *arg, bool side_effects,
203                      void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
204 {
205     TCGv_i32 rd, ra, rb;
206 
207     if (arg->rd == 0 && !side_effects) {
208         return true;
209     }
210 
211     rd = reg_for_write(dc, arg->rd);
212     ra = reg_for_read(dc, arg->ra);
213     rb = reg_for_read(dc, arg->rb);
214     fn(rd, ra, rb);
215     return true;
216 }
217 
218 static bool do_typea0(DisasContext *dc, arg_typea0 *arg, bool side_effects,
219                       void (*fn)(TCGv_i32, TCGv_i32))
220 {
221     TCGv_i32 rd, ra;
222 
223     if (arg->rd == 0 && !side_effects) {
224         return true;
225     }
226 
227     rd = reg_for_write(dc, arg->rd);
228     ra = reg_for_read(dc, arg->ra);
229     fn(rd, ra);
230     return true;
231 }
232 
233 static bool do_typeb_imm(DisasContext *dc, arg_typeb *arg, bool side_effects,
234                          void (*fni)(TCGv_i32, TCGv_i32, int32_t))
235 {
236     TCGv_i32 rd, ra;
237 
238     if (arg->rd == 0 && !side_effects) {
239         return true;
240     }
241 
242     rd = reg_for_write(dc, arg->rd);
243     ra = reg_for_read(dc, arg->ra);
244     fni(rd, ra, arg->imm);
245     return true;
246 }
247 
248 static bool do_typeb_val(DisasContext *dc, arg_typeb *arg, bool side_effects,
249                          void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
250 {
251     TCGv_i32 rd, ra, imm;
252 
253     if (arg->rd == 0 && !side_effects) {
254         return true;
255     }
256 
257     rd = reg_for_write(dc, arg->rd);
258     ra = reg_for_read(dc, arg->ra);
259     imm = tcg_constant_i32(arg->imm);
260 
261     fn(rd, ra, imm);
262     return true;
263 }
264 
265 #define DO_TYPEA(NAME, SE, FN) \
266     static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
267     { return do_typea(dc, a, SE, FN); }
268 
269 #define DO_TYPEA_CFG(NAME, CFG, SE, FN) \
270     static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
271     { return dc->cfg->CFG && do_typea(dc, a, SE, FN); }
272 
273 #define DO_TYPEA0(NAME, SE, FN) \
274     static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
275     { return do_typea0(dc, a, SE, FN); }
276 
277 #define DO_TYPEA0_CFG(NAME, CFG, SE, FN) \
278     static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
279     { return dc->cfg->CFG && do_typea0(dc, a, SE, FN); }
280 
281 #define DO_TYPEBI(NAME, SE, FNI) \
282     static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
283     { return do_typeb_imm(dc, a, SE, FNI); }
284 
285 #define DO_TYPEBI_CFG(NAME, CFG, SE, FNI) \
286     static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
287     { return dc->cfg->CFG && do_typeb_imm(dc, a, SE, FNI); }
288 
289 #define DO_TYPEBV(NAME, SE, FN) \
290     static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
291     { return do_typeb_val(dc, a, SE, FN); }
292 
293 #define ENV_WRAPPER2(NAME, HELPER) \
294     static void NAME(TCGv_i32 out, TCGv_i32 ina) \
295     { HELPER(out, tcg_env, ina); }
296 
297 #define ENV_WRAPPER3(NAME, HELPER) \
298     static void NAME(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) \
299     { HELPER(out, tcg_env, ina, inb); }
300 
301 /* No input carry, but output carry. */
302 static void gen_add(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
303 {
304     TCGv_i32 zero = tcg_constant_i32(0);
305 
306     tcg_gen_add2_i32(out, cpu_msr_c, ina, zero, inb, zero);
307 }
308 
309 /* Input and output carry. */
310 static void gen_addc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
311 {
312     TCGv_i32 zero = tcg_constant_i32(0);
313     TCGv_i32 tmp = tcg_temp_new_i32();
314 
315     tcg_gen_add2_i32(tmp, cpu_msr_c, ina, zero, cpu_msr_c, zero);
316     tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
317 }
318 
319 /* Input carry, but no output carry. */
320 static void gen_addkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
321 {
322     tcg_gen_add_i32(out, ina, inb);
323     tcg_gen_add_i32(out, out, cpu_msr_c);
324 }
325 
326 DO_TYPEA(add, true, gen_add)
327 DO_TYPEA(addc, true, gen_addc)
328 DO_TYPEA(addk, false, tcg_gen_add_i32)
329 DO_TYPEA(addkc, true, gen_addkc)
330 
331 DO_TYPEBV(addi, true, gen_add)
332 DO_TYPEBV(addic, true, gen_addc)
333 DO_TYPEBI(addik, false, tcg_gen_addi_i32)
334 DO_TYPEBV(addikc, true, gen_addkc)
335 
336 static void gen_andni(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
337 {
338     tcg_gen_andi_i32(out, ina, ~imm);
339 }
340 
341 DO_TYPEA(and, false, tcg_gen_and_i32)
342 DO_TYPEBI(andi, false, tcg_gen_andi_i32)
343 DO_TYPEA(andn, false, tcg_gen_andc_i32)
344 DO_TYPEBI(andni, false, gen_andni)
345 
346 static void gen_bsra(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
347 {
348     TCGv_i32 tmp = tcg_temp_new_i32();
349     tcg_gen_andi_i32(tmp, inb, 31);
350     tcg_gen_sar_i32(out, ina, tmp);
351 }
352 
353 static void gen_bsrl(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
354 {
355     TCGv_i32 tmp = tcg_temp_new_i32();
356     tcg_gen_andi_i32(tmp, inb, 31);
357     tcg_gen_shr_i32(out, ina, tmp);
358 }
359 
360 static void gen_bsll(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
361 {
362     TCGv_i32 tmp = tcg_temp_new_i32();
363     tcg_gen_andi_i32(tmp, inb, 31);
364     tcg_gen_shl_i32(out, ina, tmp);
365 }
366 
367 static void gen_bsefi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
368 {
369     /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
370     int imm_w = extract32(imm, 5, 5);
371     int imm_s = extract32(imm, 0, 5);
372 
373     if (imm_w + imm_s > 32 || imm_w == 0) {
374         /* These inputs have an undefined behavior.  */
375         qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
376                       imm_w, imm_s);
377     } else {
378         tcg_gen_extract_i32(out, ina, imm_s, imm_w);
379     }
380 }
381 
382 static void gen_bsifi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
383 {
384     /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
385     int imm_w = extract32(imm, 5, 5);
386     int imm_s = extract32(imm, 0, 5);
387     int width = imm_w - imm_s + 1;
388 
389     if (imm_w < imm_s) {
390         /* These inputs have an undefined behavior.  */
391         qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
392                       imm_w, imm_s);
393     } else {
394         tcg_gen_deposit_i32(out, out, ina, imm_s, width);
395     }
396 }
397 
398 DO_TYPEA_CFG(bsra, use_barrel, false, gen_bsra)
399 DO_TYPEA_CFG(bsrl, use_barrel, false, gen_bsrl)
400 DO_TYPEA_CFG(bsll, use_barrel, false, gen_bsll)
401 
402 DO_TYPEBI_CFG(bsrai, use_barrel, false, tcg_gen_sari_i32)
403 DO_TYPEBI_CFG(bsrli, use_barrel, false, tcg_gen_shri_i32)
404 DO_TYPEBI_CFG(bslli, use_barrel, false, tcg_gen_shli_i32)
405 
406 DO_TYPEBI_CFG(bsefi, use_barrel, false, gen_bsefi)
407 DO_TYPEBI_CFG(bsifi, use_barrel, false, gen_bsifi)
408 
409 static void gen_clz(TCGv_i32 out, TCGv_i32 ina)
410 {
411     tcg_gen_clzi_i32(out, ina, 32);
412 }
413 
414 DO_TYPEA0_CFG(clz, use_pcmp_instr, false, gen_clz)
415 
416 static void gen_cmp(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
417 {
418     TCGv_i32 lt = tcg_temp_new_i32();
419 
420     tcg_gen_setcond_i32(TCG_COND_LT, lt, inb, ina);
421     tcg_gen_sub_i32(out, inb, ina);
422     tcg_gen_deposit_i32(out, out, lt, 31, 1);
423 }
424 
425 static void gen_cmpu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
426 {
427     TCGv_i32 lt = tcg_temp_new_i32();
428 
429     tcg_gen_setcond_i32(TCG_COND_LTU, lt, inb, ina);
430     tcg_gen_sub_i32(out, inb, ina);
431     tcg_gen_deposit_i32(out, out, lt, 31, 1);
432 }
433 
434 DO_TYPEA(cmp, false, gen_cmp)
435 DO_TYPEA(cmpu, false, gen_cmpu)
436 
437 ENV_WRAPPER3(gen_fadd, gen_helper_fadd)
438 ENV_WRAPPER3(gen_frsub, gen_helper_frsub)
439 ENV_WRAPPER3(gen_fmul, gen_helper_fmul)
440 ENV_WRAPPER3(gen_fdiv, gen_helper_fdiv)
441 ENV_WRAPPER3(gen_fcmp_un, gen_helper_fcmp_un)
442 ENV_WRAPPER3(gen_fcmp_lt, gen_helper_fcmp_lt)
443 ENV_WRAPPER3(gen_fcmp_eq, gen_helper_fcmp_eq)
444 ENV_WRAPPER3(gen_fcmp_le, gen_helper_fcmp_le)
445 ENV_WRAPPER3(gen_fcmp_gt, gen_helper_fcmp_gt)
446 ENV_WRAPPER3(gen_fcmp_ne, gen_helper_fcmp_ne)
447 ENV_WRAPPER3(gen_fcmp_ge, gen_helper_fcmp_ge)
448 
449 DO_TYPEA_CFG(fadd, use_fpu, true, gen_fadd)
450 DO_TYPEA_CFG(frsub, use_fpu, true, gen_frsub)
451 DO_TYPEA_CFG(fmul, use_fpu, true, gen_fmul)
452 DO_TYPEA_CFG(fdiv, use_fpu, true, gen_fdiv)
453 DO_TYPEA_CFG(fcmp_un, use_fpu, true, gen_fcmp_un)
454 DO_TYPEA_CFG(fcmp_lt, use_fpu, true, gen_fcmp_lt)
455 DO_TYPEA_CFG(fcmp_eq, use_fpu, true, gen_fcmp_eq)
456 DO_TYPEA_CFG(fcmp_le, use_fpu, true, gen_fcmp_le)
457 DO_TYPEA_CFG(fcmp_gt, use_fpu, true, gen_fcmp_gt)
458 DO_TYPEA_CFG(fcmp_ne, use_fpu, true, gen_fcmp_ne)
459 DO_TYPEA_CFG(fcmp_ge, use_fpu, true, gen_fcmp_ge)
460 
461 ENV_WRAPPER2(gen_flt, gen_helper_flt)
462 ENV_WRAPPER2(gen_fint, gen_helper_fint)
463 ENV_WRAPPER2(gen_fsqrt, gen_helper_fsqrt)
464 
465 DO_TYPEA0_CFG(flt, use_fpu >= 2, true, gen_flt)
466 DO_TYPEA0_CFG(fint, use_fpu >= 2, true, gen_fint)
467 DO_TYPEA0_CFG(fsqrt, use_fpu >= 2, true, gen_fsqrt)
468 
469 /* Does not use ENV_WRAPPER3, because arguments are swapped as well. */
470 static void gen_idiv(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
471 {
472     gen_helper_divs(out, tcg_env, inb, ina);
473 }
474 
475 static void gen_idivu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
476 {
477     gen_helper_divu(out, tcg_env, inb, ina);
478 }
479 
480 DO_TYPEA_CFG(idiv, use_div, true, gen_idiv)
481 DO_TYPEA_CFG(idivu, use_div, true, gen_idivu)
482 
483 static bool trans_imm(DisasContext *dc, arg_imm *arg)
484 {
485     if (invalid_delay_slot(dc, "imm")) {
486         return true;
487     }
488     dc->ext_imm = arg->imm << 16;
489     tcg_gen_movi_i32(cpu_imm, dc->ext_imm);
490     dc->tb_flags_to_set = IMM_FLAG;
491     return true;
492 }
493 
494 static void gen_mulh(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
495 {
496     TCGv_i32 tmp = tcg_temp_new_i32();
497     tcg_gen_muls2_i32(tmp, out, ina, inb);
498 }
499 
500 static void gen_mulhu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
501 {
502     TCGv_i32 tmp = tcg_temp_new_i32();
503     tcg_gen_mulu2_i32(tmp, out, ina, inb);
504 }
505 
506 static void gen_mulhsu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
507 {
508     TCGv_i32 tmp = tcg_temp_new_i32();
509     tcg_gen_mulsu2_i32(tmp, out, ina, inb);
510 }
511 
512 DO_TYPEA_CFG(mul, use_hw_mul, false, tcg_gen_mul_i32)
513 DO_TYPEA_CFG(mulh, use_hw_mul >= 2, false, gen_mulh)
514 DO_TYPEA_CFG(mulhu, use_hw_mul >= 2, false, gen_mulhu)
515 DO_TYPEA_CFG(mulhsu, use_hw_mul >= 2, false, gen_mulhsu)
516 DO_TYPEBI_CFG(muli, use_hw_mul, false, tcg_gen_muli_i32)
517 
518 DO_TYPEA(or, false, tcg_gen_or_i32)
519 DO_TYPEBI(ori, false, tcg_gen_ori_i32)
520 
521 static void gen_pcmpeq(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
522 {
523     tcg_gen_setcond_i32(TCG_COND_EQ, out, ina, inb);
524 }
525 
526 static void gen_pcmpne(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
527 {
528     tcg_gen_setcond_i32(TCG_COND_NE, out, ina, inb);
529 }
530 
531 DO_TYPEA_CFG(pcmpbf, use_pcmp_instr, false, gen_helper_pcmpbf)
532 DO_TYPEA_CFG(pcmpeq, use_pcmp_instr, false, gen_pcmpeq)
533 DO_TYPEA_CFG(pcmpne, use_pcmp_instr, false, gen_pcmpne)
534 
535 /* No input carry, but output carry. */
536 static void gen_rsub(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
537 {
538     tcg_gen_setcond_i32(TCG_COND_GEU, cpu_msr_c, inb, ina);
539     tcg_gen_sub_i32(out, inb, ina);
540 }
541 
542 /* Input and output carry. */
543 static void gen_rsubc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
544 {
545     TCGv_i32 zero = tcg_constant_i32(0);
546     TCGv_i32 tmp = tcg_temp_new_i32();
547 
548     tcg_gen_not_i32(tmp, ina);
549     tcg_gen_add2_i32(tmp, cpu_msr_c, tmp, zero, cpu_msr_c, zero);
550     tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
551 }
552 
553 /* No input or output carry. */
554 static void gen_rsubk(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
555 {
556     tcg_gen_sub_i32(out, inb, ina);
557 }
558 
559 /* Input carry, no output carry. */
560 static void gen_rsubkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
561 {
562     TCGv_i32 nota = tcg_temp_new_i32();
563 
564     tcg_gen_not_i32(nota, ina);
565     tcg_gen_add_i32(out, inb, nota);
566     tcg_gen_add_i32(out, out, cpu_msr_c);
567 }
568 
569 DO_TYPEA(rsub, true, gen_rsub)
570 DO_TYPEA(rsubc, true, gen_rsubc)
571 DO_TYPEA(rsubk, false, gen_rsubk)
572 DO_TYPEA(rsubkc, true, gen_rsubkc)
573 
574 DO_TYPEBV(rsubi, true, gen_rsub)
575 DO_TYPEBV(rsubic, true, gen_rsubc)
576 DO_TYPEBV(rsubik, false, gen_rsubk)
577 DO_TYPEBV(rsubikc, true, gen_rsubkc)
578 
579 DO_TYPEA0(sext8, false, tcg_gen_ext8s_i32)
580 DO_TYPEA0(sext16, false, tcg_gen_ext16s_i32)
581 
582 static void gen_sra(TCGv_i32 out, TCGv_i32 ina)
583 {
584     tcg_gen_andi_i32(cpu_msr_c, ina, 1);
585     tcg_gen_sari_i32(out, ina, 1);
586 }
587 
588 static void gen_src(TCGv_i32 out, TCGv_i32 ina)
589 {
590     TCGv_i32 tmp = tcg_temp_new_i32();
591 
592     tcg_gen_mov_i32(tmp, cpu_msr_c);
593     tcg_gen_andi_i32(cpu_msr_c, ina, 1);
594     tcg_gen_extract2_i32(out, ina, tmp, 1);
595 }
596 
597 static void gen_srl(TCGv_i32 out, TCGv_i32 ina)
598 {
599     tcg_gen_andi_i32(cpu_msr_c, ina, 1);
600     tcg_gen_shri_i32(out, ina, 1);
601 }
602 
603 DO_TYPEA0(sra, false, gen_sra)
604 DO_TYPEA0(src, false, gen_src)
605 DO_TYPEA0(srl, false, gen_srl)
606 
607 static void gen_swaph(TCGv_i32 out, TCGv_i32 ina)
608 {
609     tcg_gen_rotri_i32(out, ina, 16);
610 }
611 
612 DO_TYPEA0(swapb, false, tcg_gen_bswap32_i32)
613 DO_TYPEA0(swaph, false, gen_swaph)
614 
615 static bool trans_wdic(DisasContext *dc, arg_wdic *a)
616 {
617     /* Cache operations are nops: only check for supervisor mode.  */
618     trap_userspace(dc, true);
619     return true;
620 }
621 
622 DO_TYPEA(xor, false, tcg_gen_xor_i32)
623 DO_TYPEBI(xori, false, tcg_gen_xori_i32)
624 
625 static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
626 {
627     TCGv ret = tcg_temp_new();
628 
629     /* If any of the regs is r0, set t to the value of the other reg.  */
630     if (ra && rb) {
631         TCGv_i32 tmp = tcg_temp_new_i32();
632         tcg_gen_add_i32(tmp, cpu_R[ra], cpu_R[rb]);
633         tcg_gen_extu_i32_tl(ret, tmp);
634     } else if (ra) {
635         tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
636     } else if (rb) {
637         tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
638     } else {
639         tcg_gen_movi_tl(ret, 0);
640     }
641 
642     if ((ra == 1 || rb == 1) && dc->cfg->stackprot) {
643         gen_helper_stackprot(tcg_env, ret);
644     }
645     return ret;
646 }
647 
648 static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
649 {
650     TCGv ret = tcg_temp_new();
651 
652     /* If any of the regs is r0, set t to the value of the other reg.  */
653     if (ra) {
654         TCGv_i32 tmp = tcg_temp_new_i32();
655         tcg_gen_addi_i32(tmp, cpu_R[ra], imm);
656         tcg_gen_extu_i32_tl(ret, tmp);
657     } else {
658         tcg_gen_movi_tl(ret, (uint32_t)imm);
659     }
660 
661     if (ra == 1 && dc->cfg->stackprot) {
662         gen_helper_stackprot(tcg_env, ret);
663     }
664     return ret;
665 }
666 
667 #ifndef CONFIG_USER_ONLY
668 static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
669 {
670     int addr_size = dc->cfg->addr_size;
671     TCGv ret = tcg_temp_new();
672 
673     if (addr_size == 32 || ra == 0) {
674         if (rb) {
675             tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
676         } else {
677             tcg_gen_movi_tl(ret, 0);
678         }
679     } else {
680         if (rb) {
681             tcg_gen_concat_i32_i64(ret, cpu_R[rb], cpu_R[ra]);
682         } else {
683             tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
684             tcg_gen_shli_tl(ret, ret, 32);
685         }
686         if (addr_size < 64) {
687             /* Mask off out of range bits.  */
688             tcg_gen_andi_i64(ret, ret, MAKE_64BIT_MASK(0, addr_size));
689         }
690     }
691     return ret;
692 }
693 #endif
694 
695 #ifndef CONFIG_USER_ONLY
696 static void record_unaligned_ess(DisasContext *dc, int rd,
697                                  MemOp size, bool store)
698 {
699     uint32_t iflags = tcg_get_insn_start_param(dc->base.insn_start, 1);
700 
701     iflags |= ESR_ESS_FLAG;
702     iflags |= rd << 5;
703     iflags |= store * ESR_S;
704     iflags |= (size == MO_32) * ESR_W;
705 
706     tcg_set_insn_start_param(dc->base.insn_start, 1, iflags);
707 }
708 #endif
709 
710 static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
711                     int mem_index, bool rev)
712 {
713     MemOp size = mop & MO_SIZE;
714 
715     /*
716      * When doing reverse accesses we need to do two things.
717      *
718      * 1. Reverse the address wrt endianness.
719      * 2. Byteswap the data lanes on the way back into the CPU core.
720      */
721     if (rev) {
722         if (size > MO_8) {
723             mop ^= MO_BSWAP;
724         }
725         if (size < MO_32) {
726             tcg_gen_xori_tl(addr, addr, 3 - size);
727         }
728     }
729 
730     /*
731      * For system mode, enforce alignment if the cpu configuration
732      * requires it.  For user-mode, the Linux kernel will have fixed up
733      * any unaligned access, so emulate that by *not* setting MO_ALIGN.
734      */
735 #ifndef CONFIG_USER_ONLY
736     if (size > MO_8 &&
737         (dc->tb_flags & MSR_EE) &&
738         dc->cfg->unaligned_exceptions) {
739         record_unaligned_ess(dc, rd, size, false);
740         mop |= MO_ALIGN;
741     }
742 #endif
743 
744     tcg_gen_qemu_ld_i32(reg_for_write(dc, rd), addr, mem_index, mop);
745     return true;
746 }
747 
748 static bool trans_lbu(DisasContext *dc, arg_typea *arg)
749 {
750     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
751     return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
752 }
753 
754 static bool trans_lbur(DisasContext *dc, arg_typea *arg)
755 {
756     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
757     return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
758 }
759 
760 static bool trans_lbuea(DisasContext *dc, arg_typea *arg)
761 {
762     if (trap_userspace(dc, true)) {
763         return true;
764     }
765 #ifdef CONFIG_USER_ONLY
766     return true;
767 #else
768     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
769     return do_load(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
770 #endif
771 }
772 
773 static bool trans_lbui(DisasContext *dc, arg_typeb *arg)
774 {
775     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
776     return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
777 }
778 
779 static bool trans_lhu(DisasContext *dc, arg_typea *arg)
780 {
781     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
782     return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
783 }
784 
785 static bool trans_lhur(DisasContext *dc, arg_typea *arg)
786 {
787     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
788     return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
789 }
790 
791 static bool trans_lhuea(DisasContext *dc, arg_typea *arg)
792 {
793     if (trap_userspace(dc, true)) {
794         return true;
795     }
796 #ifdef CONFIG_USER_ONLY
797     return true;
798 #else
799     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
800     return do_load(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
801 #endif
802 }
803 
804 static bool trans_lhui(DisasContext *dc, arg_typeb *arg)
805 {
806     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
807     return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
808 }
809 
810 static bool trans_lw(DisasContext *dc, arg_typea *arg)
811 {
812     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
813     return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
814 }
815 
816 static bool trans_lwr(DisasContext *dc, arg_typea *arg)
817 {
818     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
819     return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
820 }
821 
822 static bool trans_lwea(DisasContext *dc, arg_typea *arg)
823 {
824     if (trap_userspace(dc, true)) {
825         return true;
826     }
827 #ifdef CONFIG_USER_ONLY
828     return true;
829 #else
830     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
831     return do_load(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
832 #endif
833 }
834 
835 static bool trans_lwi(DisasContext *dc, arg_typeb *arg)
836 {
837     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
838     return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
839 }
840 
841 static bool trans_lwx(DisasContext *dc, arg_typea *arg)
842 {
843     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
844 
845     /* lwx does not throw unaligned access errors, so force alignment */
846     tcg_gen_andi_tl(addr, addr, ~3);
847 
848     tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index, MO_TEUL);
849     tcg_gen_mov_tl(cpu_res_addr, addr);
850 
851     if (arg->rd) {
852         tcg_gen_mov_i32(cpu_R[arg->rd], cpu_res_val);
853     }
854 
855     /* No support for AXI exclusive so always clear C */
856     tcg_gen_movi_i32(cpu_msr_c, 0);
857     return true;
858 }
859 
860 static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop,
861                      int mem_index, bool rev)
862 {
863     MemOp size = mop & MO_SIZE;
864 
865     /*
866      * When doing reverse accesses we need to do two things.
867      *
868      * 1. Reverse the address wrt endianness.
869      * 2. Byteswap the data lanes on the way back into the CPU core.
870      */
871     if (rev) {
872         if (size > MO_8) {
873             mop ^= MO_BSWAP;
874         }
875         if (size < MO_32) {
876             tcg_gen_xori_tl(addr, addr, 3 - size);
877         }
878     }
879 
880     /*
881      * For system mode, enforce alignment if the cpu configuration
882      * requires it.  For user-mode, the Linux kernel will have fixed up
883      * any unaligned access, so emulate that by *not* setting MO_ALIGN.
884      */
885 #ifndef CONFIG_USER_ONLY
886     if (size > MO_8 &&
887         (dc->tb_flags & MSR_EE) &&
888         dc->cfg->unaligned_exceptions) {
889         record_unaligned_ess(dc, rd, size, true);
890         mop |= MO_ALIGN;
891     }
892 #endif
893 
894     tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop);
895     return true;
896 }
897 
898 static bool trans_sb(DisasContext *dc, arg_typea *arg)
899 {
900     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
901     return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
902 }
903 
904 static bool trans_sbr(DisasContext *dc, arg_typea *arg)
905 {
906     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
907     return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
908 }
909 
910 static bool trans_sbea(DisasContext *dc, arg_typea *arg)
911 {
912     if (trap_userspace(dc, true)) {
913         return true;
914     }
915 #ifdef CONFIG_USER_ONLY
916     return true;
917 #else
918     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
919     return do_store(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
920 #endif
921 }
922 
923 static bool trans_sbi(DisasContext *dc, arg_typeb *arg)
924 {
925     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
926     return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
927 }
928 
929 static bool trans_sh(DisasContext *dc, arg_typea *arg)
930 {
931     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
932     return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
933 }
934 
935 static bool trans_shr(DisasContext *dc, arg_typea *arg)
936 {
937     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
938     return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
939 }
940 
941 static bool trans_shea(DisasContext *dc, arg_typea *arg)
942 {
943     if (trap_userspace(dc, true)) {
944         return true;
945     }
946 #ifdef CONFIG_USER_ONLY
947     return true;
948 #else
949     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
950     return do_store(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
951 #endif
952 }
953 
954 static bool trans_shi(DisasContext *dc, arg_typeb *arg)
955 {
956     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
957     return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
958 }
959 
960 static bool trans_sw(DisasContext *dc, arg_typea *arg)
961 {
962     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
963     return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
964 }
965 
966 static bool trans_swr(DisasContext *dc, arg_typea *arg)
967 {
968     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
969     return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
970 }
971 
972 static bool trans_swea(DisasContext *dc, arg_typea *arg)
973 {
974     if (trap_userspace(dc, true)) {
975         return true;
976     }
977 #ifdef CONFIG_USER_ONLY
978     return true;
979 #else
980     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
981     return do_store(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
982 #endif
983 }
984 
985 static bool trans_swi(DisasContext *dc, arg_typeb *arg)
986 {
987     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
988     return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
989 }
990 
991 static bool trans_swx(DisasContext *dc, arg_typea *arg)
992 {
993     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
994     TCGLabel *swx_done = gen_new_label();
995     TCGLabel *swx_fail = gen_new_label();
996     TCGv_i32 tval;
997 
998     /* swx does not throw unaligned access errors, so force alignment */
999     tcg_gen_andi_tl(addr, addr, ~3);
1000 
1001     /*
1002      * Compare the address vs the one we used during lwx.
1003      * On mismatch, the operation fails.  On match, addr dies at the
1004      * branch, but we know we can use the equal version in the global.
1005      * In either case, addr is no longer needed.
1006      */
1007     tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_fail);
1008 
1009     /*
1010      * Compare the value loaded during lwx with current contents of
1011      * the reserved location.
1012      */
1013     tval = tcg_temp_new_i32();
1014 
1015     tcg_gen_atomic_cmpxchg_i32(tval, cpu_res_addr, cpu_res_val,
1016                                reg_for_write(dc, arg->rd),
1017                                dc->mem_index, MO_TEUL);
1018 
1019     tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_fail);
1020 
1021     /* Success */
1022     tcg_gen_movi_i32(cpu_msr_c, 0);
1023     tcg_gen_br(swx_done);
1024 
1025     /* Failure */
1026     gen_set_label(swx_fail);
1027     tcg_gen_movi_i32(cpu_msr_c, 1);
1028 
1029     gen_set_label(swx_done);
1030 
1031     /*
1032      * Prevent the saved address from working again without another ldx.
1033      * Akin to the pseudocode setting reservation = 0.
1034      */
1035     tcg_gen_movi_tl(cpu_res_addr, -1);
1036     return true;
1037 }
1038 
1039 static void setup_dslot(DisasContext *dc, bool type_b)
1040 {
1041     dc->tb_flags_to_set |= D_FLAG;
1042     if (type_b && (dc->tb_flags & IMM_FLAG)) {
1043         dc->tb_flags_to_set |= BIMM_FLAG;
1044     }
1045 }
1046 
1047 static bool do_branch(DisasContext *dc, int dest_rb, int dest_imm,
1048                       bool delay, bool abs, int link)
1049 {
1050     uint32_t add_pc;
1051 
1052     if (invalid_delay_slot(dc, "branch")) {
1053         return true;
1054     }
1055     if (delay) {
1056         setup_dslot(dc, dest_rb < 0);
1057     }
1058 
1059     if (link) {
1060         tcg_gen_movi_i32(cpu_R[link], dc->base.pc_next);
1061     }
1062 
1063     /* Store the branch taken destination into btarget.  */
1064     add_pc = abs ? 0 : dc->base.pc_next;
1065     if (dest_rb > 0) {
1066         dc->jmp_dest = -1;
1067         tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], add_pc);
1068     } else {
1069         dc->jmp_dest = add_pc + dest_imm;
1070         tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1071     }
1072     dc->jmp_cond = TCG_COND_ALWAYS;
1073     return true;
1074 }
1075 
1076 #define DO_BR(NAME, NAMEI, DELAY, ABS, LINK)                               \
1077     static bool trans_##NAME(DisasContext *dc, arg_typea_br *arg)          \
1078     { return do_branch(dc, arg->rb, 0, DELAY, ABS, LINK ? arg->rd : 0); }  \
1079     static bool trans_##NAMEI(DisasContext *dc, arg_typeb_br *arg)         \
1080     { return do_branch(dc, -1, arg->imm, DELAY, ABS, LINK ? arg->rd : 0); }
1081 
1082 DO_BR(br, bri, false, false, false)
1083 DO_BR(bra, brai, false, true, false)
1084 DO_BR(brd, brid, true, false, false)
1085 DO_BR(brad, braid, true, true, false)
1086 DO_BR(brld, brlid, true, false, true)
1087 DO_BR(brald, bralid, true, true, true)
1088 
1089 static bool do_bcc(DisasContext *dc, int dest_rb, int dest_imm,
1090                    TCGCond cond, int ra, bool delay)
1091 {
1092     TCGv_i32 zero, next;
1093 
1094     if (invalid_delay_slot(dc, "bcc")) {
1095         return true;
1096     }
1097     if (delay) {
1098         setup_dslot(dc, dest_rb < 0);
1099     }
1100 
1101     dc->jmp_cond = cond;
1102 
1103     /* Cache the condition register in cpu_bvalue across any delay slot.  */
1104     tcg_gen_mov_i32(cpu_bvalue, reg_for_read(dc, ra));
1105 
1106     /* Store the branch taken destination into btarget.  */
1107     if (dest_rb > 0) {
1108         dc->jmp_dest = -1;
1109         tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], dc->base.pc_next);
1110     } else {
1111         dc->jmp_dest = dc->base.pc_next + dest_imm;
1112         tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1113     }
1114 
1115     /* Compute the final destination into btarget.  */
1116     zero = tcg_constant_i32(0);
1117     next = tcg_constant_i32(dc->base.pc_next + (delay + 1) * 4);
1118     tcg_gen_movcond_i32(dc->jmp_cond, cpu_btarget,
1119                         reg_for_read(dc, ra), zero,
1120                         cpu_btarget, next);
1121 
1122     return true;
1123 }
1124 
1125 #define DO_BCC(NAME, COND)                                              \
1126     static bool trans_##NAME(DisasContext *dc, arg_typea_bc *arg)       \
1127     { return do_bcc(dc, arg->rb, 0, COND, arg->ra, false); }            \
1128     static bool trans_##NAME##d(DisasContext *dc, arg_typea_bc *arg)    \
1129     { return do_bcc(dc, arg->rb, 0, COND, arg->ra, true); }             \
1130     static bool trans_##NAME##i(DisasContext *dc, arg_typeb_bc *arg)    \
1131     { return do_bcc(dc, -1, arg->imm, COND, arg->ra, false); }          \
1132     static bool trans_##NAME##id(DisasContext *dc, arg_typeb_bc *arg)   \
1133     { return do_bcc(dc, -1, arg->imm, COND, arg->ra, true); }
1134 
1135 DO_BCC(beq, TCG_COND_EQ)
1136 DO_BCC(bge, TCG_COND_GE)
1137 DO_BCC(bgt, TCG_COND_GT)
1138 DO_BCC(ble, TCG_COND_LE)
1139 DO_BCC(blt, TCG_COND_LT)
1140 DO_BCC(bne, TCG_COND_NE)
1141 
1142 static bool trans_brk(DisasContext *dc, arg_typea_br *arg)
1143 {
1144     if (trap_userspace(dc, true)) {
1145         return true;
1146     }
1147     if (invalid_delay_slot(dc, "brk")) {
1148         return true;
1149     }
1150 
1151     tcg_gen_mov_i32(cpu_pc, reg_for_read(dc, arg->rb));
1152     if (arg->rd) {
1153         tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1154     }
1155     tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_BIP);
1156     tcg_gen_movi_tl(cpu_res_addr, -1);
1157 
1158     dc->base.is_jmp = DISAS_EXIT;
1159     return true;
1160 }
1161 
1162 static bool trans_brki(DisasContext *dc, arg_typeb_br *arg)
1163 {
1164     uint32_t imm = arg->imm;
1165 
1166     if (trap_userspace(dc, imm != 0x8 && imm != 0x18)) {
1167         return true;
1168     }
1169     if (invalid_delay_slot(dc, "brki")) {
1170         return true;
1171     }
1172 
1173     tcg_gen_movi_i32(cpu_pc, imm);
1174     if (arg->rd) {
1175         tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1176     }
1177     tcg_gen_movi_tl(cpu_res_addr, -1);
1178 
1179 #ifdef CONFIG_USER_ONLY
1180     switch (imm) {
1181     case 0x8:  /* syscall trap */
1182         gen_raise_exception_sync(dc, EXCP_SYSCALL);
1183         break;
1184     case 0x18: /* debug trap */
1185         gen_raise_exception_sync(dc, EXCP_DEBUG);
1186         break;
1187     default:   /* eliminated with trap_userspace check */
1188         g_assert_not_reached();
1189     }
1190 #else
1191     uint32_t msr_to_set = 0;
1192 
1193     if (imm != 0x18) {
1194         msr_to_set |= MSR_BIP;
1195     }
1196     if (imm == 0x8 || imm == 0x18) {
1197         /* MSR_UM and MSR_VM are in tb_flags, so we know their value. */
1198         msr_to_set |= (dc->tb_flags & (MSR_UM | MSR_VM)) << 1;
1199         tcg_gen_andi_i32(cpu_msr, cpu_msr,
1200                          ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM));
1201     }
1202     tcg_gen_ori_i32(cpu_msr, cpu_msr, msr_to_set);
1203     dc->base.is_jmp = DISAS_EXIT;
1204 #endif
1205 
1206     return true;
1207 }
1208 
1209 static bool trans_mbar(DisasContext *dc, arg_mbar *arg)
1210 {
1211     int mbar_imm = arg->imm;
1212 
1213     /* Note that mbar is a specialized branch instruction. */
1214     if (invalid_delay_slot(dc, "mbar")) {
1215         return true;
1216     }
1217 
1218     /* Data access memory barrier.  */
1219     if ((mbar_imm & 2) == 0) {
1220         tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1221     }
1222 
1223     /* Sleep. */
1224     if (mbar_imm & 16) {
1225         if (trap_userspace(dc, true)) {
1226             /* Sleep is a privileged instruction.  */
1227             return true;
1228         }
1229 
1230         t_sync_flags(dc);
1231 
1232         tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
1233                        -offsetof(MicroBlazeCPU, env)
1234                        +offsetof(CPUState, halted));
1235 
1236         tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
1237 
1238         gen_raise_exception(dc, EXCP_HLT);
1239     }
1240 
1241     /*
1242      * If !(mbar_imm & 1), this is an instruction access memory barrier
1243      * and we need to end the TB so that we recognize self-modified
1244      * code immediately.
1245      *
1246      * However, there are some data mbars that need the TB break
1247      * (and return to main loop) to recognize interrupts right away.
1248      * E.g. recognizing a change to an interrupt controller register.
1249      *
1250      * Therefore, choose to end the TB always.
1251      */
1252     dc->base.is_jmp = DISAS_EXIT_NEXT;
1253     return true;
1254 }
1255 
1256 static bool do_rts(DisasContext *dc, arg_typeb_bc *arg, int to_set)
1257 {
1258     if (trap_userspace(dc, to_set)) {
1259         return true;
1260     }
1261     if (invalid_delay_slot(dc, "rts")) {
1262         return true;
1263     }
1264 
1265     dc->tb_flags_to_set |= to_set;
1266     setup_dslot(dc, true);
1267 
1268     dc->jmp_cond = TCG_COND_ALWAYS;
1269     dc->jmp_dest = -1;
1270     tcg_gen_addi_i32(cpu_btarget, reg_for_read(dc, arg->ra), arg->imm);
1271     return true;
1272 }
1273 
1274 #define DO_RTS(NAME, IFLAG) \
1275     static bool trans_##NAME(DisasContext *dc, arg_typeb_bc *arg) \
1276     { return do_rts(dc, arg, IFLAG); }
1277 
1278 DO_RTS(rtbd, DRTB_FLAG)
1279 DO_RTS(rtid, DRTI_FLAG)
1280 DO_RTS(rted, DRTE_FLAG)
1281 DO_RTS(rtsd, 0)
1282 
1283 static bool trans_zero(DisasContext *dc, arg_zero *arg)
1284 {
1285     /* If opcode_0_illegal, trap.  */
1286     if (dc->cfg->opcode_0_illegal) {
1287         trap_illegal(dc, true);
1288         return true;
1289     }
1290     /*
1291      * Otherwise, this is "add r0, r0, r0".
1292      * Continue to trans_add so that MSR[C] gets cleared.
1293      */
1294     return false;
1295 }
1296 
1297 static void msr_read(DisasContext *dc, TCGv_i32 d)
1298 {
1299     TCGv_i32 t;
1300 
1301     /* Replicate the cpu_msr_c boolean into the proper bit and the copy. */
1302     t = tcg_temp_new_i32();
1303     tcg_gen_muli_i32(t, cpu_msr_c, MSR_C | MSR_CC);
1304     tcg_gen_or_i32(d, cpu_msr, t);
1305 }
1306 
1307 static bool do_msrclrset(DisasContext *dc, arg_type_msr *arg, bool set)
1308 {
1309     uint32_t imm = arg->imm;
1310 
1311     if (trap_userspace(dc, imm != MSR_C)) {
1312         return true;
1313     }
1314 
1315     if (arg->rd) {
1316         msr_read(dc, cpu_R[arg->rd]);
1317     }
1318 
1319     /*
1320      * Handle the carry bit separately.
1321      * This is the only bit that userspace can modify.
1322      */
1323     if (imm & MSR_C) {
1324         tcg_gen_movi_i32(cpu_msr_c, set);
1325     }
1326 
1327     /*
1328      * MSR_C and MSR_CC set above.
1329      * MSR_PVR is not writable, and is always clear.
1330      */
1331     imm &= ~(MSR_C | MSR_CC | MSR_PVR);
1332 
1333     if (imm != 0) {
1334         if (set) {
1335             tcg_gen_ori_i32(cpu_msr, cpu_msr, imm);
1336         } else {
1337             tcg_gen_andi_i32(cpu_msr, cpu_msr, ~imm);
1338         }
1339         dc->base.is_jmp = DISAS_EXIT_NEXT;
1340     }
1341     return true;
1342 }
1343 
1344 static bool trans_msrclr(DisasContext *dc, arg_type_msr *arg)
1345 {
1346     return do_msrclrset(dc, arg, false);
1347 }
1348 
1349 static bool trans_msrset(DisasContext *dc, arg_type_msr *arg)
1350 {
1351     return do_msrclrset(dc, arg, true);
1352 }
1353 
1354 static bool trans_mts(DisasContext *dc, arg_mts *arg)
1355 {
1356     if (trap_userspace(dc, true)) {
1357         return true;
1358     }
1359 
1360 #ifdef CONFIG_USER_ONLY
1361     g_assert_not_reached();
1362 #else
1363     if (arg->e && arg->rs != 0x1003) {
1364         qemu_log_mask(LOG_GUEST_ERROR,
1365                       "Invalid extended mts reg 0x%x\n", arg->rs);
1366         return true;
1367     }
1368 
1369     TCGv_i32 src = reg_for_read(dc, arg->ra);
1370     switch (arg->rs) {
1371     case SR_MSR:
1372         /* Install MSR_C.  */
1373         tcg_gen_extract_i32(cpu_msr_c, src, 2, 1);
1374         /*
1375          * Clear MSR_C and MSR_CC;
1376          * MSR_PVR is not writable, and is always clear.
1377          */
1378         tcg_gen_andi_i32(cpu_msr, src, ~(MSR_C | MSR_CC | MSR_PVR));
1379         break;
1380     case SR_FSR:
1381         tcg_gen_st_i32(src, tcg_env, offsetof(CPUMBState, fsr));
1382         break;
1383     case 0x800:
1384         tcg_gen_st_i32(src, tcg_env, offsetof(CPUMBState, slr));
1385         break;
1386     case 0x802:
1387         tcg_gen_st_i32(src, tcg_env, offsetof(CPUMBState, shr));
1388         break;
1389 
1390     case 0x1000: /* PID */
1391     case 0x1001: /* ZPR */
1392     case 0x1002: /* TLBX */
1393     case 0x1003: /* TLBLO */
1394     case 0x1004: /* TLBHI */
1395     case 0x1005: /* TLBSX */
1396         {
1397             TCGv_i32 tmp_ext = tcg_constant_i32(arg->e);
1398             TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7);
1399 
1400             gen_helper_mmu_write(tcg_env, tmp_ext, tmp_reg, src);
1401         }
1402         break;
1403 
1404     default:
1405         qemu_log_mask(LOG_GUEST_ERROR, "Invalid mts reg 0x%x\n", arg->rs);
1406         return true;
1407     }
1408     dc->base.is_jmp = DISAS_EXIT_NEXT;
1409     return true;
1410 #endif
1411 }
1412 
1413 static bool trans_mfs(DisasContext *dc, arg_mfs *arg)
1414 {
1415     TCGv_i32 dest = reg_for_write(dc, arg->rd);
1416 
1417     if (arg->e) {
1418         switch (arg->rs) {
1419         case SR_EAR:
1420             {
1421                 TCGv_i64 t64 = tcg_temp_new_i64();
1422                 tcg_gen_ld_i64(t64, tcg_env, offsetof(CPUMBState, ear));
1423                 tcg_gen_extrh_i64_i32(dest, t64);
1424             }
1425             return true;
1426 #ifndef CONFIG_USER_ONLY
1427         case 0x1003: /* TLBLO */
1428             /* Handled below. */
1429             break;
1430 #endif
1431         case 0x2006 ... 0x2009:
1432             /* High bits of PVR6-9 not implemented. */
1433             tcg_gen_movi_i32(dest, 0);
1434             return true;
1435         default:
1436             qemu_log_mask(LOG_GUEST_ERROR,
1437                           "Invalid extended mfs reg 0x%x\n", arg->rs);
1438             return true;
1439         }
1440     }
1441 
1442     switch (arg->rs) {
1443     case SR_PC:
1444         tcg_gen_movi_i32(dest, dc->base.pc_next);
1445         break;
1446     case SR_MSR:
1447         msr_read(dc, dest);
1448         break;
1449     case SR_EAR:
1450         {
1451             TCGv_i64 t64 = tcg_temp_new_i64();
1452             tcg_gen_ld_i64(t64, tcg_env, offsetof(CPUMBState, ear));
1453             tcg_gen_extrl_i64_i32(dest, t64);
1454         }
1455         break;
1456     case SR_ESR:
1457         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, esr));
1458         break;
1459     case SR_FSR:
1460         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, fsr));
1461         break;
1462     case SR_BTR:
1463         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, btr));
1464         break;
1465     case SR_EDR:
1466         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, edr));
1467         break;
1468     case 0x800:
1469         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, slr));
1470         break;
1471     case 0x802:
1472         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, shr));
1473         break;
1474 
1475 #ifndef CONFIG_USER_ONLY
1476     case 0x1000: /* PID */
1477     case 0x1001: /* ZPR */
1478     case 0x1002: /* TLBX */
1479     case 0x1003: /* TLBLO */
1480     case 0x1004: /* TLBHI */
1481     case 0x1005: /* TLBSX */
1482         {
1483             TCGv_i32 tmp_ext = tcg_constant_i32(arg->e);
1484             TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7);
1485 
1486             gen_helper_mmu_read(dest, tcg_env, tmp_ext, tmp_reg);
1487         }
1488         break;
1489 #endif
1490 
1491     case 0x2000 ... 0x200c:
1492         tcg_gen_ld_i32(dest, tcg_env,
1493                        offsetof(MicroBlazeCPU, cfg.pvr_regs[arg->rs - 0x2000])
1494                        - offsetof(MicroBlazeCPU, env));
1495         break;
1496     default:
1497         qemu_log_mask(LOG_GUEST_ERROR, "Invalid mfs reg 0x%x\n", arg->rs);
1498         break;
1499     }
1500     return true;
1501 }
1502 
1503 static void do_rti(DisasContext *dc)
1504 {
1505     TCGv_i32 tmp = tcg_temp_new_i32();
1506 
1507     tcg_gen_shri_i32(tmp, cpu_msr, 1);
1508     tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_IE);
1509     tcg_gen_andi_i32(tmp, tmp, MSR_VM | MSR_UM);
1510     tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM));
1511     tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1512 }
1513 
1514 static void do_rtb(DisasContext *dc)
1515 {
1516     TCGv_i32 tmp = tcg_temp_new_i32();
1517 
1518     tcg_gen_shri_i32(tmp, cpu_msr, 1);
1519     tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_BIP));
1520     tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1521     tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1522 }
1523 
1524 static void do_rte(DisasContext *dc)
1525 {
1526     TCGv_i32 tmp = tcg_temp_new_i32();
1527 
1528     tcg_gen_shri_i32(tmp, cpu_msr, 1);
1529     tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_EE);
1530     tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1531     tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_EIP));
1532     tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1533 }
1534 
1535 /* Insns connected to FSL or AXI stream attached devices.  */
1536 static bool do_get(DisasContext *dc, int rd, int rb, int imm, int ctrl)
1537 {
1538     TCGv_i32 t_id, t_ctrl;
1539 
1540     if (trap_userspace(dc, true)) {
1541         return true;
1542     }
1543 
1544     t_id = tcg_temp_new_i32();
1545     if (rb) {
1546         tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
1547     } else {
1548         tcg_gen_movi_i32(t_id, imm);
1549     }
1550 
1551     t_ctrl = tcg_constant_i32(ctrl);
1552     gen_helper_get(reg_for_write(dc, rd), t_id, t_ctrl);
1553     return true;
1554 }
1555 
1556 static bool trans_get(DisasContext *dc, arg_get *arg)
1557 {
1558     return do_get(dc, arg->rd, 0, arg->imm, arg->ctrl);
1559 }
1560 
1561 static bool trans_getd(DisasContext *dc, arg_getd *arg)
1562 {
1563     return do_get(dc, arg->rd, arg->rb, 0, arg->ctrl);
1564 }
1565 
1566 static bool do_put(DisasContext *dc, int ra, int rb, int imm, int ctrl)
1567 {
1568     TCGv_i32 t_id, t_ctrl;
1569 
1570     if (trap_userspace(dc, true)) {
1571         return true;
1572     }
1573 
1574     t_id = tcg_temp_new_i32();
1575     if (rb) {
1576         tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
1577     } else {
1578         tcg_gen_movi_i32(t_id, imm);
1579     }
1580 
1581     t_ctrl = tcg_constant_i32(ctrl);
1582     gen_helper_put(t_id, t_ctrl, reg_for_read(dc, ra));
1583     return true;
1584 }
1585 
1586 static bool trans_put(DisasContext *dc, arg_put *arg)
1587 {
1588     return do_put(dc, arg->ra, 0, arg->imm, arg->ctrl);
1589 }
1590 
1591 static bool trans_putd(DisasContext *dc, arg_putd *arg)
1592 {
1593     return do_put(dc, arg->ra, arg->rb, 0, arg->ctrl);
1594 }
1595 
1596 static void mb_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
1597 {
1598     DisasContext *dc = container_of(dcb, DisasContext, base);
1599     MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1600     int bound;
1601 
1602     dc->cfg = &cpu->cfg;
1603     dc->tb_flags = dc->base.tb->flags;
1604     dc->ext_imm = dc->base.tb->cs_base;
1605     dc->r0 = NULL;
1606     dc->r0_set = false;
1607     dc->mem_index = cpu_mmu_index(cs, false);
1608     dc->jmp_cond = dc->tb_flags & D_FLAG ? TCG_COND_ALWAYS : TCG_COND_NEVER;
1609     dc->jmp_dest = -1;
1610 
1611     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
1612     dc->base.max_insns = MIN(dc->base.max_insns, bound);
1613 }
1614 
1615 static void mb_tr_tb_start(DisasContextBase *dcb, CPUState *cs)
1616 {
1617 }
1618 
1619 static void mb_tr_insn_start(DisasContextBase *dcb, CPUState *cs)
1620 {
1621     DisasContext *dc = container_of(dcb, DisasContext, base);
1622 
1623     tcg_gen_insn_start(dc->base.pc_next, dc->tb_flags & ~MSR_TB_MASK);
1624 }
1625 
1626 static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs)
1627 {
1628     DisasContext *dc = container_of(dcb, DisasContext, base);
1629     uint32_t ir;
1630 
1631     /* TODO: This should raise an exception, not terminate qemu. */
1632     if (dc->base.pc_next & 3) {
1633         cpu_abort(cs, "Microblaze: unaligned PC=%x\n",
1634                   (uint32_t)dc->base.pc_next);
1635     }
1636 
1637     dc->tb_flags_to_set = 0;
1638 
1639     ir = cpu_ldl_code(cpu_env(cs), dc->base.pc_next);
1640     if (!decode(dc, ir)) {
1641         trap_illegal(dc, true);
1642     }
1643 
1644     if (dc->r0) {
1645         dc->r0 = NULL;
1646         dc->r0_set = false;
1647     }
1648 
1649     /* Discard the imm global when its contents cannot be used. */
1650     if ((dc->tb_flags & ~dc->tb_flags_to_set) & IMM_FLAG) {
1651         tcg_gen_discard_i32(cpu_imm);
1652     }
1653 
1654     dc->tb_flags &= ~(IMM_FLAG | BIMM_FLAG | D_FLAG);
1655     dc->tb_flags |= dc->tb_flags_to_set;
1656     dc->base.pc_next += 4;
1657 
1658     if (dc->jmp_cond != TCG_COND_NEVER && !(dc->tb_flags & D_FLAG)) {
1659         /*
1660          * Finish any return-from branch.
1661          */
1662         uint32_t rt_ibe = dc->tb_flags & (DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1663         if (unlikely(rt_ibe != 0)) {
1664             dc->tb_flags &= ~(DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1665             if (rt_ibe & DRTI_FLAG) {
1666                 do_rti(dc);
1667             } else if (rt_ibe & DRTB_FLAG) {
1668                 do_rtb(dc);
1669             } else {
1670                 do_rte(dc);
1671             }
1672         }
1673 
1674         /* Complete the branch, ending the TB. */
1675         switch (dc->base.is_jmp) {
1676         case DISAS_NORETURN:
1677             /*
1678              * E.g. illegal insn in a delay slot.  We've already exited
1679              * and will handle D_FLAG in mb_cpu_do_interrupt.
1680              */
1681             break;
1682         case DISAS_NEXT:
1683             /*
1684              * Normal insn a delay slot.
1685              * However, the return-from-exception type insns should
1686              * return to the main loop, as they have adjusted MSR.
1687              */
1688             dc->base.is_jmp = (rt_ibe ? DISAS_EXIT_JUMP : DISAS_JUMP);
1689             break;
1690         case DISAS_EXIT_NEXT:
1691             /*
1692              * E.g. mts insn in a delay slot.  Continue with btarget,
1693              * but still return to the main loop.
1694              */
1695             dc->base.is_jmp = DISAS_EXIT_JUMP;
1696             break;
1697         default:
1698             g_assert_not_reached();
1699         }
1700     }
1701 }
1702 
1703 static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
1704 {
1705     DisasContext *dc = container_of(dcb, DisasContext, base);
1706 
1707     if (dc->base.is_jmp == DISAS_NORETURN) {
1708         /* We have already exited the TB. */
1709         return;
1710     }
1711 
1712     t_sync_flags(dc);
1713 
1714     switch (dc->base.is_jmp) {
1715     case DISAS_TOO_MANY:
1716         gen_goto_tb(dc, 0, dc->base.pc_next);
1717         return;
1718 
1719     case DISAS_EXIT:
1720         break;
1721     case DISAS_EXIT_NEXT:
1722         tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
1723         break;
1724     case DISAS_EXIT_JUMP:
1725         tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1726         tcg_gen_discard_i32(cpu_btarget);
1727         break;
1728 
1729     case DISAS_JUMP:
1730         if (dc->jmp_dest != -1 && !(tb_cflags(dc->base.tb) & CF_NO_GOTO_TB)) {
1731             /* Direct jump. */
1732             tcg_gen_discard_i32(cpu_btarget);
1733 
1734             if (dc->jmp_cond != TCG_COND_ALWAYS) {
1735                 /* Conditional direct jump. */
1736                 TCGLabel *taken = gen_new_label();
1737                 TCGv_i32 tmp = tcg_temp_new_i32();
1738 
1739                 /*
1740                  * Copy bvalue to a temp now, so we can discard bvalue.
1741                  * This can avoid writing bvalue to memory when the
1742                  * delay slot cannot raise an exception.
1743                  */
1744                 tcg_gen_mov_i32(tmp, cpu_bvalue);
1745                 tcg_gen_discard_i32(cpu_bvalue);
1746 
1747                 tcg_gen_brcondi_i32(dc->jmp_cond, tmp, 0, taken);
1748                 gen_goto_tb(dc, 1, dc->base.pc_next);
1749                 gen_set_label(taken);
1750             }
1751             gen_goto_tb(dc, 0, dc->jmp_dest);
1752             return;
1753         }
1754 
1755         /* Indirect jump (or direct jump w/ goto_tb disabled) */
1756         tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1757         tcg_gen_discard_i32(cpu_btarget);
1758         tcg_gen_lookup_and_goto_ptr();
1759         return;
1760 
1761     default:
1762         g_assert_not_reached();
1763     }
1764 
1765     /* Finish DISAS_EXIT_* */
1766     if (unlikely(cs->singlestep_enabled)) {
1767         gen_raise_exception(dc, EXCP_DEBUG);
1768     } else {
1769         tcg_gen_exit_tb(NULL, 0);
1770     }
1771 }
1772 
1773 static void mb_tr_disas_log(const DisasContextBase *dcb,
1774                             CPUState *cs, FILE *logfile)
1775 {
1776     fprintf(logfile, "IN: %s\n", lookup_symbol(dcb->pc_first));
1777     target_disas(logfile, cs, dcb->pc_first, dcb->tb->size);
1778 }
1779 
1780 static const TranslatorOps mb_tr_ops = {
1781     .init_disas_context = mb_tr_init_disas_context,
1782     .tb_start           = mb_tr_tb_start,
1783     .insn_start         = mb_tr_insn_start,
1784     .translate_insn     = mb_tr_translate_insn,
1785     .tb_stop            = mb_tr_tb_stop,
1786     .disas_log          = mb_tr_disas_log,
1787 };
1788 
1789 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
1790                            vaddr pc, void *host_pc)
1791 {
1792     DisasContext dc;
1793     translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base);
1794 }
1795 
1796 void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1797 {
1798     CPUMBState *env = cpu_env(cs);
1799     uint32_t iflags;
1800     int i;
1801 
1802     qemu_fprintf(f, "pc=0x%08x msr=0x%05x mode=%s(saved=%s) eip=%d ie=%d\n",
1803                  env->pc, env->msr,
1804                  (env->msr & MSR_UM) ? "user" : "kernel",
1805                  (env->msr & MSR_UMS) ? "user" : "kernel",
1806                  (bool)(env->msr & MSR_EIP),
1807                  (bool)(env->msr & MSR_IE));
1808 
1809     iflags = env->iflags;
1810     qemu_fprintf(f, "iflags: 0x%08x", iflags);
1811     if (iflags & IMM_FLAG) {
1812         qemu_fprintf(f, " IMM(0x%08x)", env->imm);
1813     }
1814     if (iflags & BIMM_FLAG) {
1815         qemu_fprintf(f, " BIMM");
1816     }
1817     if (iflags & D_FLAG) {
1818         qemu_fprintf(f, " D(btarget=0x%08x)", env->btarget);
1819     }
1820     if (iflags & DRTI_FLAG) {
1821         qemu_fprintf(f, " DRTI");
1822     }
1823     if (iflags & DRTE_FLAG) {
1824         qemu_fprintf(f, " DRTE");
1825     }
1826     if (iflags & DRTB_FLAG) {
1827         qemu_fprintf(f, " DRTB");
1828     }
1829     if (iflags & ESR_ESS_FLAG) {
1830         qemu_fprintf(f, " ESR_ESS(0x%04x)", iflags & ESR_ESS_MASK);
1831     }
1832 
1833     qemu_fprintf(f, "\nesr=0x%04x fsr=0x%02x btr=0x%08x edr=0x%x\n"
1834                  "ear=0x" TARGET_FMT_lx " slr=0x%x shr=0x%x\n",
1835                  env->esr, env->fsr, env->btr, env->edr,
1836                  env->ear, env->slr, env->shr);
1837 
1838     for (i = 0; i < 32; i++) {
1839         qemu_fprintf(f, "r%2.2d=%08x%c",
1840                      i, env->regs[i], i % 4 == 3 ? '\n' : ' ');
1841     }
1842     qemu_fprintf(f, "\n");
1843 }
1844 
1845 void mb_tcg_init(void)
1846 {
1847 #define R(X)  { &cpu_R[X], offsetof(CPUMBState, regs[X]), "r" #X }
1848 #define SP(X) { &cpu_##X, offsetof(CPUMBState, X), #X }
1849 
1850     static const struct {
1851         TCGv_i32 *var; int ofs; char name[8];
1852     } i32s[] = {
1853         /*
1854          * Note that r0 is handled specially in reg_for_read
1855          * and reg_for_write.  Nothing should touch cpu_R[0].
1856          * Leave that element NULL, which will assert quickly
1857          * inside the tcg generator functions.
1858          */
1859                R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
1860         R(8),  R(9),  R(10), R(11), R(12), R(13), R(14), R(15),
1861         R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23),
1862         R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31),
1863 
1864         SP(pc),
1865         SP(msr),
1866         SP(msr_c),
1867         SP(imm),
1868         SP(iflags),
1869         SP(bvalue),
1870         SP(btarget),
1871         SP(res_val),
1872     };
1873 
1874 #undef R
1875 #undef SP
1876 
1877     for (int i = 0; i < ARRAY_SIZE(i32s); ++i) {
1878         *i32s[i].var =
1879           tcg_global_mem_new_i32(tcg_env, i32s[i].ofs, i32s[i].name);
1880     }
1881 
1882     cpu_res_addr =
1883         tcg_global_mem_new(tcg_env, offsetof(CPUMBState, res_addr), "res_addr");
1884 }
1885