xref: /qemu/target/microblaze/translate.c (revision 4abc8923)
1 /*
2  *  Xilinx MicroBlaze emulation for qemu: main translation routines.
3  *
4  *  Copyright (c) 2009 Edgar E. Iglesias.
5  *  Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "exec/cpu_ldst.h"
26 #include "tcg/tcg-op.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/cpu_ldst.h"
30 #include "exec/translator.h"
31 #include "qemu/qemu-print.h"
32 
33 #include "exec/log.h"
34 
35 #define HELPER_H "helper.h"
36 #include "exec/helper-info.c.inc"
37 #undef  HELPER_H
38 
39 #define EXTRACT_FIELD(src, start, end) \
40             (((src) >> start) & ((1 << (end - start + 1)) - 1))
41 
42 /* is_jmp field values */
43 #define DISAS_JUMP    DISAS_TARGET_0 /* only pc was modified dynamically */
44 #define DISAS_EXIT    DISAS_TARGET_1 /* all cpu state modified dynamically */
45 
46 /* cpu state besides pc was modified dynamically; update pc to next */
47 #define DISAS_EXIT_NEXT DISAS_TARGET_2
48 /* cpu state besides pc was modified dynamically; update pc to btarget */
49 #define DISAS_EXIT_JUMP DISAS_TARGET_3
50 
51 static TCGv_i32 cpu_R[32];
52 static TCGv_i32 cpu_pc;
53 static TCGv_i32 cpu_msr;
54 static TCGv_i32 cpu_msr_c;
55 static TCGv_i32 cpu_imm;
56 static TCGv_i32 cpu_bvalue;
57 static TCGv_i32 cpu_btarget;
58 static TCGv_i32 cpu_iflags;
59 static TCGv cpu_res_addr;
60 static TCGv_i32 cpu_res_val;
61 
62 /* This is the state at translation time.  */
63 typedef struct DisasContext {
64     DisasContextBase base;
65     const MicroBlazeCPUConfig *cfg;
66 
67     TCGv_i32 r0;
68     bool r0_set;
69 
70     /* Decoder.  */
71     uint32_t ext_imm;
72     unsigned int tb_flags;
73     unsigned int tb_flags_to_set;
74     int mem_index;
75 
76     /* Condition under which to jump, including NEVER and ALWAYS. */
77     TCGCond jmp_cond;
78 
79     /* Immediate branch-taken destination, or -1 for indirect. */
80     uint32_t jmp_dest;
81 } DisasContext;
82 
83 static int typeb_imm(DisasContext *dc, int x)
84 {
85     if (dc->tb_flags & IMM_FLAG) {
86         return deposit32(dc->ext_imm, 0, 16, x);
87     }
88     return x;
89 }
90 
91 /* Include the auto-generated decoder.  */
92 #include "decode-insns.c.inc"
93 
94 static void t_sync_flags(DisasContext *dc)
95 {
96     /* Synch the tb dependent flags between translator and runtime.  */
97     if ((dc->tb_flags ^ dc->base.tb->flags) & IFLAGS_TB_MASK) {
98         tcg_gen_movi_i32(cpu_iflags, dc->tb_flags & IFLAGS_TB_MASK);
99     }
100 }
101 
102 static void gen_raise_exception(DisasContext *dc, uint32_t index)
103 {
104     gen_helper_raise_exception(tcg_env, tcg_constant_i32(index));
105     dc->base.is_jmp = DISAS_NORETURN;
106 }
107 
108 static void gen_raise_exception_sync(DisasContext *dc, uint32_t index)
109 {
110     t_sync_flags(dc);
111     tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
112     gen_raise_exception(dc, index);
113 }
114 
115 static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
116 {
117     TCGv_i32 tmp = tcg_constant_i32(esr_ec);
118     tcg_gen_st_i32(tmp, tcg_env, offsetof(CPUMBState, esr));
119 
120     gen_raise_exception_sync(dc, EXCP_HW_EXCP);
121 }
122 
123 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
124 {
125     if (translator_use_goto_tb(&dc->base, dest)) {
126         tcg_gen_goto_tb(n);
127         tcg_gen_movi_i32(cpu_pc, dest);
128         tcg_gen_exit_tb(dc->base.tb, n);
129     } else {
130         tcg_gen_movi_i32(cpu_pc, dest);
131         tcg_gen_lookup_and_goto_ptr();
132     }
133     dc->base.is_jmp = DISAS_NORETURN;
134 }
135 
136 /*
137  * Returns true if the insn an illegal operation.
138  * If exceptions are enabled, an exception is raised.
139  */
140 static bool trap_illegal(DisasContext *dc, bool cond)
141 {
142     if (cond && (dc->tb_flags & MSR_EE)
143         && dc->cfg->illegal_opcode_exception) {
144         gen_raise_hw_excp(dc, ESR_EC_ILLEGAL_OP);
145     }
146     return cond;
147 }
148 
149 /*
150  * Returns true if the insn is illegal in userspace.
151  * If exceptions are enabled, an exception is raised.
152  */
153 static bool trap_userspace(DisasContext *dc, bool cond)
154 {
155     bool cond_user = cond && dc->mem_index == MMU_USER_IDX;
156 
157     if (cond_user && (dc->tb_flags & MSR_EE)) {
158         gen_raise_hw_excp(dc, ESR_EC_PRIVINSN);
159     }
160     return cond_user;
161 }
162 
163 /*
164  * Return true, and log an error, if the current insn is
165  * within a delay slot.
166  */
167 static bool invalid_delay_slot(DisasContext *dc, const char *insn_type)
168 {
169     if (dc->tb_flags & D_FLAG) {
170         qemu_log_mask(LOG_GUEST_ERROR,
171                       "Invalid insn in delay slot: %s at %08x\n",
172                       insn_type, (uint32_t)dc->base.pc_next);
173         return true;
174     }
175     return false;
176 }
177 
178 static TCGv_i32 reg_for_read(DisasContext *dc, int reg)
179 {
180     if (likely(reg != 0)) {
181         return cpu_R[reg];
182     }
183     if (!dc->r0_set) {
184         if (dc->r0 == NULL) {
185             dc->r0 = tcg_temp_new_i32();
186         }
187         tcg_gen_movi_i32(dc->r0, 0);
188         dc->r0_set = true;
189     }
190     return dc->r0;
191 }
192 
193 static TCGv_i32 reg_for_write(DisasContext *dc, int reg)
194 {
195     if (likely(reg != 0)) {
196         return cpu_R[reg];
197     }
198     if (dc->r0 == NULL) {
199         dc->r0 = tcg_temp_new_i32();
200     }
201     return dc->r0;
202 }
203 
204 static bool do_typea(DisasContext *dc, arg_typea *arg, bool side_effects,
205                      void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
206 {
207     TCGv_i32 rd, ra, rb;
208 
209     if (arg->rd == 0 && !side_effects) {
210         return true;
211     }
212 
213     rd = reg_for_write(dc, arg->rd);
214     ra = reg_for_read(dc, arg->ra);
215     rb = reg_for_read(dc, arg->rb);
216     fn(rd, ra, rb);
217     return true;
218 }
219 
220 static bool do_typea0(DisasContext *dc, arg_typea0 *arg, bool side_effects,
221                       void (*fn)(TCGv_i32, TCGv_i32))
222 {
223     TCGv_i32 rd, ra;
224 
225     if (arg->rd == 0 && !side_effects) {
226         return true;
227     }
228 
229     rd = reg_for_write(dc, arg->rd);
230     ra = reg_for_read(dc, arg->ra);
231     fn(rd, ra);
232     return true;
233 }
234 
235 static bool do_typeb_imm(DisasContext *dc, arg_typeb *arg, bool side_effects,
236                          void (*fni)(TCGv_i32, TCGv_i32, int32_t))
237 {
238     TCGv_i32 rd, ra;
239 
240     if (arg->rd == 0 && !side_effects) {
241         return true;
242     }
243 
244     rd = reg_for_write(dc, arg->rd);
245     ra = reg_for_read(dc, arg->ra);
246     fni(rd, ra, arg->imm);
247     return true;
248 }
249 
250 static bool do_typeb_val(DisasContext *dc, arg_typeb *arg, bool side_effects,
251                          void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
252 {
253     TCGv_i32 rd, ra, imm;
254 
255     if (arg->rd == 0 && !side_effects) {
256         return true;
257     }
258 
259     rd = reg_for_write(dc, arg->rd);
260     ra = reg_for_read(dc, arg->ra);
261     imm = tcg_constant_i32(arg->imm);
262 
263     fn(rd, ra, imm);
264     return true;
265 }
266 
267 #define DO_TYPEA(NAME, SE, FN) \
268     static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
269     { return do_typea(dc, a, SE, FN); }
270 
271 #define DO_TYPEA_CFG(NAME, CFG, SE, FN) \
272     static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
273     { return dc->cfg->CFG && do_typea(dc, a, SE, FN); }
274 
275 #define DO_TYPEA0(NAME, SE, FN) \
276     static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
277     { return do_typea0(dc, a, SE, FN); }
278 
279 #define DO_TYPEA0_CFG(NAME, CFG, SE, FN) \
280     static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
281     { return dc->cfg->CFG && do_typea0(dc, a, SE, FN); }
282 
283 #define DO_TYPEBI(NAME, SE, FNI) \
284     static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
285     { return do_typeb_imm(dc, a, SE, FNI); }
286 
287 #define DO_TYPEBI_CFG(NAME, CFG, SE, FNI) \
288     static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
289     { return dc->cfg->CFG && do_typeb_imm(dc, a, SE, FNI); }
290 
291 #define DO_TYPEBV(NAME, SE, FN) \
292     static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
293     { return do_typeb_val(dc, a, SE, FN); }
294 
295 #define ENV_WRAPPER2(NAME, HELPER) \
296     static void NAME(TCGv_i32 out, TCGv_i32 ina) \
297     { HELPER(out, tcg_env, ina); }
298 
299 #define ENV_WRAPPER3(NAME, HELPER) \
300     static void NAME(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) \
301     { HELPER(out, tcg_env, ina, inb); }
302 
303 /* No input carry, but output carry. */
304 static void gen_add(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
305 {
306     TCGv_i32 zero = tcg_constant_i32(0);
307 
308     tcg_gen_add2_i32(out, cpu_msr_c, ina, zero, inb, zero);
309 }
310 
311 /* Input and output carry. */
312 static void gen_addc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
313 {
314     TCGv_i32 zero = tcg_constant_i32(0);
315     TCGv_i32 tmp = tcg_temp_new_i32();
316 
317     tcg_gen_add2_i32(tmp, cpu_msr_c, ina, zero, cpu_msr_c, zero);
318     tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
319 }
320 
321 /* Input carry, but no output carry. */
322 static void gen_addkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
323 {
324     tcg_gen_add_i32(out, ina, inb);
325     tcg_gen_add_i32(out, out, cpu_msr_c);
326 }
327 
328 DO_TYPEA(add, true, gen_add)
329 DO_TYPEA(addc, true, gen_addc)
330 DO_TYPEA(addk, false, tcg_gen_add_i32)
331 DO_TYPEA(addkc, true, gen_addkc)
332 
333 DO_TYPEBV(addi, true, gen_add)
334 DO_TYPEBV(addic, true, gen_addc)
335 DO_TYPEBI(addik, false, tcg_gen_addi_i32)
336 DO_TYPEBV(addikc, true, gen_addkc)
337 
338 static void gen_andni(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
339 {
340     tcg_gen_andi_i32(out, ina, ~imm);
341 }
342 
343 DO_TYPEA(and, false, tcg_gen_and_i32)
344 DO_TYPEBI(andi, false, tcg_gen_andi_i32)
345 DO_TYPEA(andn, false, tcg_gen_andc_i32)
346 DO_TYPEBI(andni, false, gen_andni)
347 
348 static void gen_bsra(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
349 {
350     TCGv_i32 tmp = tcg_temp_new_i32();
351     tcg_gen_andi_i32(tmp, inb, 31);
352     tcg_gen_sar_i32(out, ina, tmp);
353 }
354 
355 static void gen_bsrl(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
356 {
357     TCGv_i32 tmp = tcg_temp_new_i32();
358     tcg_gen_andi_i32(tmp, inb, 31);
359     tcg_gen_shr_i32(out, ina, tmp);
360 }
361 
362 static void gen_bsll(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
363 {
364     TCGv_i32 tmp = tcg_temp_new_i32();
365     tcg_gen_andi_i32(tmp, inb, 31);
366     tcg_gen_shl_i32(out, ina, tmp);
367 }
368 
369 static void gen_bsefi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
370 {
371     /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
372     int imm_w = extract32(imm, 5, 5);
373     int imm_s = extract32(imm, 0, 5);
374 
375     if (imm_w + imm_s > 32 || imm_w == 0) {
376         /* These inputs have an undefined behavior.  */
377         qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
378                       imm_w, imm_s);
379     } else {
380         tcg_gen_extract_i32(out, ina, imm_s, imm_w);
381     }
382 }
383 
384 static void gen_bsifi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
385 {
386     /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
387     int imm_w = extract32(imm, 5, 5);
388     int imm_s = extract32(imm, 0, 5);
389     int width = imm_w - imm_s + 1;
390 
391     if (imm_w < imm_s) {
392         /* These inputs have an undefined behavior.  */
393         qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
394                       imm_w, imm_s);
395     } else {
396         tcg_gen_deposit_i32(out, out, ina, imm_s, width);
397     }
398 }
399 
400 DO_TYPEA_CFG(bsra, use_barrel, false, gen_bsra)
401 DO_TYPEA_CFG(bsrl, use_barrel, false, gen_bsrl)
402 DO_TYPEA_CFG(bsll, use_barrel, false, gen_bsll)
403 
404 DO_TYPEBI_CFG(bsrai, use_barrel, false, tcg_gen_sari_i32)
405 DO_TYPEBI_CFG(bsrli, use_barrel, false, tcg_gen_shri_i32)
406 DO_TYPEBI_CFG(bslli, use_barrel, false, tcg_gen_shli_i32)
407 
408 DO_TYPEBI_CFG(bsefi, use_barrel, false, gen_bsefi)
409 DO_TYPEBI_CFG(bsifi, use_barrel, false, gen_bsifi)
410 
411 static void gen_clz(TCGv_i32 out, TCGv_i32 ina)
412 {
413     tcg_gen_clzi_i32(out, ina, 32);
414 }
415 
416 DO_TYPEA0_CFG(clz, use_pcmp_instr, false, gen_clz)
417 
418 static void gen_cmp(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
419 {
420     TCGv_i32 lt = tcg_temp_new_i32();
421 
422     tcg_gen_setcond_i32(TCG_COND_LT, lt, inb, ina);
423     tcg_gen_sub_i32(out, inb, ina);
424     tcg_gen_deposit_i32(out, out, lt, 31, 1);
425 }
426 
427 static void gen_cmpu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
428 {
429     TCGv_i32 lt = tcg_temp_new_i32();
430 
431     tcg_gen_setcond_i32(TCG_COND_LTU, lt, inb, ina);
432     tcg_gen_sub_i32(out, inb, ina);
433     tcg_gen_deposit_i32(out, out, lt, 31, 1);
434 }
435 
436 DO_TYPEA(cmp, false, gen_cmp)
437 DO_TYPEA(cmpu, false, gen_cmpu)
438 
439 ENV_WRAPPER3(gen_fadd, gen_helper_fadd)
440 ENV_WRAPPER3(gen_frsub, gen_helper_frsub)
441 ENV_WRAPPER3(gen_fmul, gen_helper_fmul)
442 ENV_WRAPPER3(gen_fdiv, gen_helper_fdiv)
443 ENV_WRAPPER3(gen_fcmp_un, gen_helper_fcmp_un)
444 ENV_WRAPPER3(gen_fcmp_lt, gen_helper_fcmp_lt)
445 ENV_WRAPPER3(gen_fcmp_eq, gen_helper_fcmp_eq)
446 ENV_WRAPPER3(gen_fcmp_le, gen_helper_fcmp_le)
447 ENV_WRAPPER3(gen_fcmp_gt, gen_helper_fcmp_gt)
448 ENV_WRAPPER3(gen_fcmp_ne, gen_helper_fcmp_ne)
449 ENV_WRAPPER3(gen_fcmp_ge, gen_helper_fcmp_ge)
450 
451 DO_TYPEA_CFG(fadd, use_fpu, true, gen_fadd)
452 DO_TYPEA_CFG(frsub, use_fpu, true, gen_frsub)
453 DO_TYPEA_CFG(fmul, use_fpu, true, gen_fmul)
454 DO_TYPEA_CFG(fdiv, use_fpu, true, gen_fdiv)
455 DO_TYPEA_CFG(fcmp_un, use_fpu, true, gen_fcmp_un)
456 DO_TYPEA_CFG(fcmp_lt, use_fpu, true, gen_fcmp_lt)
457 DO_TYPEA_CFG(fcmp_eq, use_fpu, true, gen_fcmp_eq)
458 DO_TYPEA_CFG(fcmp_le, use_fpu, true, gen_fcmp_le)
459 DO_TYPEA_CFG(fcmp_gt, use_fpu, true, gen_fcmp_gt)
460 DO_TYPEA_CFG(fcmp_ne, use_fpu, true, gen_fcmp_ne)
461 DO_TYPEA_CFG(fcmp_ge, use_fpu, true, gen_fcmp_ge)
462 
463 ENV_WRAPPER2(gen_flt, gen_helper_flt)
464 ENV_WRAPPER2(gen_fint, gen_helper_fint)
465 ENV_WRAPPER2(gen_fsqrt, gen_helper_fsqrt)
466 
467 DO_TYPEA0_CFG(flt, use_fpu >= 2, true, gen_flt)
468 DO_TYPEA0_CFG(fint, use_fpu >= 2, true, gen_fint)
469 DO_TYPEA0_CFG(fsqrt, use_fpu >= 2, true, gen_fsqrt)
470 
471 /* Does not use ENV_WRAPPER3, because arguments are swapped as well. */
472 static void gen_idiv(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
473 {
474     gen_helper_divs(out, tcg_env, inb, ina);
475 }
476 
477 static void gen_idivu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
478 {
479     gen_helper_divu(out, tcg_env, inb, ina);
480 }
481 
482 DO_TYPEA_CFG(idiv, use_div, true, gen_idiv)
483 DO_TYPEA_CFG(idivu, use_div, true, gen_idivu)
484 
485 static bool trans_imm(DisasContext *dc, arg_imm *arg)
486 {
487     if (invalid_delay_slot(dc, "imm")) {
488         return true;
489     }
490     dc->ext_imm = arg->imm << 16;
491     tcg_gen_movi_i32(cpu_imm, dc->ext_imm);
492     dc->tb_flags_to_set = IMM_FLAG;
493     return true;
494 }
495 
496 static void gen_mulh(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
497 {
498     TCGv_i32 tmp = tcg_temp_new_i32();
499     tcg_gen_muls2_i32(tmp, out, ina, inb);
500 }
501 
502 static void gen_mulhu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
503 {
504     TCGv_i32 tmp = tcg_temp_new_i32();
505     tcg_gen_mulu2_i32(tmp, out, ina, inb);
506 }
507 
508 static void gen_mulhsu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
509 {
510     TCGv_i32 tmp = tcg_temp_new_i32();
511     tcg_gen_mulsu2_i32(tmp, out, ina, inb);
512 }
513 
514 DO_TYPEA_CFG(mul, use_hw_mul, false, tcg_gen_mul_i32)
515 DO_TYPEA_CFG(mulh, use_hw_mul >= 2, false, gen_mulh)
516 DO_TYPEA_CFG(mulhu, use_hw_mul >= 2, false, gen_mulhu)
517 DO_TYPEA_CFG(mulhsu, use_hw_mul >= 2, false, gen_mulhsu)
518 DO_TYPEBI_CFG(muli, use_hw_mul, false, tcg_gen_muli_i32)
519 
520 DO_TYPEA(or, false, tcg_gen_or_i32)
521 DO_TYPEBI(ori, false, tcg_gen_ori_i32)
522 
523 static void gen_pcmpeq(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
524 {
525     tcg_gen_setcond_i32(TCG_COND_EQ, out, ina, inb);
526 }
527 
528 static void gen_pcmpne(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
529 {
530     tcg_gen_setcond_i32(TCG_COND_NE, out, ina, inb);
531 }
532 
533 DO_TYPEA_CFG(pcmpbf, use_pcmp_instr, false, gen_helper_pcmpbf)
534 DO_TYPEA_CFG(pcmpeq, use_pcmp_instr, false, gen_pcmpeq)
535 DO_TYPEA_CFG(pcmpne, use_pcmp_instr, false, gen_pcmpne)
536 
537 /* No input carry, but output carry. */
538 static void gen_rsub(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
539 {
540     tcg_gen_setcond_i32(TCG_COND_GEU, cpu_msr_c, inb, ina);
541     tcg_gen_sub_i32(out, inb, ina);
542 }
543 
544 /* Input and output carry. */
545 static void gen_rsubc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
546 {
547     TCGv_i32 zero = tcg_constant_i32(0);
548     TCGv_i32 tmp = tcg_temp_new_i32();
549 
550     tcg_gen_not_i32(tmp, ina);
551     tcg_gen_add2_i32(tmp, cpu_msr_c, tmp, zero, cpu_msr_c, zero);
552     tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
553 }
554 
555 /* No input or output carry. */
556 static void gen_rsubk(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
557 {
558     tcg_gen_sub_i32(out, inb, ina);
559 }
560 
561 /* Input carry, no output carry. */
562 static void gen_rsubkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
563 {
564     TCGv_i32 nota = tcg_temp_new_i32();
565 
566     tcg_gen_not_i32(nota, ina);
567     tcg_gen_add_i32(out, inb, nota);
568     tcg_gen_add_i32(out, out, cpu_msr_c);
569 }
570 
571 DO_TYPEA(rsub, true, gen_rsub)
572 DO_TYPEA(rsubc, true, gen_rsubc)
573 DO_TYPEA(rsubk, false, gen_rsubk)
574 DO_TYPEA(rsubkc, true, gen_rsubkc)
575 
576 DO_TYPEBV(rsubi, true, gen_rsub)
577 DO_TYPEBV(rsubic, true, gen_rsubc)
578 DO_TYPEBV(rsubik, false, gen_rsubk)
579 DO_TYPEBV(rsubikc, true, gen_rsubkc)
580 
581 DO_TYPEA0(sext8, false, tcg_gen_ext8s_i32)
582 DO_TYPEA0(sext16, false, tcg_gen_ext16s_i32)
583 
584 static void gen_sra(TCGv_i32 out, TCGv_i32 ina)
585 {
586     tcg_gen_andi_i32(cpu_msr_c, ina, 1);
587     tcg_gen_sari_i32(out, ina, 1);
588 }
589 
590 static void gen_src(TCGv_i32 out, TCGv_i32 ina)
591 {
592     TCGv_i32 tmp = tcg_temp_new_i32();
593 
594     tcg_gen_mov_i32(tmp, cpu_msr_c);
595     tcg_gen_andi_i32(cpu_msr_c, ina, 1);
596     tcg_gen_extract2_i32(out, ina, tmp, 1);
597 }
598 
599 static void gen_srl(TCGv_i32 out, TCGv_i32 ina)
600 {
601     tcg_gen_andi_i32(cpu_msr_c, ina, 1);
602     tcg_gen_shri_i32(out, ina, 1);
603 }
604 
605 DO_TYPEA0(sra, false, gen_sra)
606 DO_TYPEA0(src, false, gen_src)
607 DO_TYPEA0(srl, false, gen_srl)
608 
609 static void gen_swaph(TCGv_i32 out, TCGv_i32 ina)
610 {
611     tcg_gen_rotri_i32(out, ina, 16);
612 }
613 
614 DO_TYPEA0(swapb, false, tcg_gen_bswap32_i32)
615 DO_TYPEA0(swaph, false, gen_swaph)
616 
617 static bool trans_wdic(DisasContext *dc, arg_wdic *a)
618 {
619     /* Cache operations are nops: only check for supervisor mode.  */
620     trap_userspace(dc, true);
621     return true;
622 }
623 
624 DO_TYPEA(xor, false, tcg_gen_xor_i32)
625 DO_TYPEBI(xori, false, tcg_gen_xori_i32)
626 
627 static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
628 {
629     TCGv ret = tcg_temp_new();
630 
631     /* If any of the regs is r0, set t to the value of the other reg.  */
632     if (ra && rb) {
633         TCGv_i32 tmp = tcg_temp_new_i32();
634         tcg_gen_add_i32(tmp, cpu_R[ra], cpu_R[rb]);
635         tcg_gen_extu_i32_tl(ret, tmp);
636     } else if (ra) {
637         tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
638     } else if (rb) {
639         tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
640     } else {
641         tcg_gen_movi_tl(ret, 0);
642     }
643 
644     if ((ra == 1 || rb == 1) && dc->cfg->stackprot) {
645         gen_helper_stackprot(tcg_env, ret);
646     }
647     return ret;
648 }
649 
650 static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
651 {
652     TCGv ret = tcg_temp_new();
653 
654     /* If any of the regs is r0, set t to the value of the other reg.  */
655     if (ra) {
656         TCGv_i32 tmp = tcg_temp_new_i32();
657         tcg_gen_addi_i32(tmp, cpu_R[ra], imm);
658         tcg_gen_extu_i32_tl(ret, tmp);
659     } else {
660         tcg_gen_movi_tl(ret, (uint32_t)imm);
661     }
662 
663     if (ra == 1 && dc->cfg->stackprot) {
664         gen_helper_stackprot(tcg_env, ret);
665     }
666     return ret;
667 }
668 
669 #ifndef CONFIG_USER_ONLY
670 static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
671 {
672     int addr_size = dc->cfg->addr_size;
673     TCGv ret = tcg_temp_new();
674 
675     if (addr_size == 32 || ra == 0) {
676         if (rb) {
677             tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
678         } else {
679             tcg_gen_movi_tl(ret, 0);
680         }
681     } else {
682         if (rb) {
683             tcg_gen_concat_i32_i64(ret, cpu_R[rb], cpu_R[ra]);
684         } else {
685             tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
686             tcg_gen_shli_tl(ret, ret, 32);
687         }
688         if (addr_size < 64) {
689             /* Mask off out of range bits.  */
690             tcg_gen_andi_i64(ret, ret, MAKE_64BIT_MASK(0, addr_size));
691         }
692     }
693     return ret;
694 }
695 #endif
696 
697 #ifndef CONFIG_USER_ONLY
698 static void record_unaligned_ess(DisasContext *dc, int rd,
699                                  MemOp size, bool store)
700 {
701     uint32_t iflags = tcg_get_insn_start_param(dc->base.insn_start, 1);
702 
703     iflags |= ESR_ESS_FLAG;
704     iflags |= rd << 5;
705     iflags |= store * ESR_S;
706     iflags |= (size == MO_32) * ESR_W;
707 
708     tcg_set_insn_start_param(dc->base.insn_start, 1, iflags);
709 }
710 #endif
711 
712 static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
713                     int mem_index, bool rev)
714 {
715     MemOp size = mop & MO_SIZE;
716 
717     /*
718      * When doing reverse accesses we need to do two things.
719      *
720      * 1. Reverse the address wrt endianness.
721      * 2. Byteswap the data lanes on the way back into the CPU core.
722      */
723     if (rev) {
724         if (size > MO_8) {
725             mop ^= MO_BSWAP;
726         }
727         if (size < MO_32) {
728             tcg_gen_xori_tl(addr, addr, 3 - size);
729         }
730     }
731 
732     /*
733      * For system mode, enforce alignment if the cpu configuration
734      * requires it.  For user-mode, the Linux kernel will have fixed up
735      * any unaligned access, so emulate that by *not* setting MO_ALIGN.
736      */
737 #ifndef CONFIG_USER_ONLY
738     if (size > MO_8 &&
739         (dc->tb_flags & MSR_EE) &&
740         dc->cfg->unaligned_exceptions) {
741         record_unaligned_ess(dc, rd, size, false);
742         mop |= MO_ALIGN;
743     }
744 #endif
745 
746     tcg_gen_qemu_ld_i32(reg_for_write(dc, rd), addr, mem_index, mop);
747     return true;
748 }
749 
750 static bool trans_lbu(DisasContext *dc, arg_typea *arg)
751 {
752     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
753     return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
754 }
755 
756 static bool trans_lbur(DisasContext *dc, arg_typea *arg)
757 {
758     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
759     return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
760 }
761 
762 static bool trans_lbuea(DisasContext *dc, arg_typea *arg)
763 {
764     if (trap_userspace(dc, true)) {
765         return true;
766     }
767 #ifdef CONFIG_USER_ONLY
768     return true;
769 #else
770     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
771     return do_load(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
772 #endif
773 }
774 
775 static bool trans_lbui(DisasContext *dc, arg_typeb *arg)
776 {
777     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
778     return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
779 }
780 
781 static bool trans_lhu(DisasContext *dc, arg_typea *arg)
782 {
783     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
784     return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
785 }
786 
787 static bool trans_lhur(DisasContext *dc, arg_typea *arg)
788 {
789     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
790     return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
791 }
792 
793 static bool trans_lhuea(DisasContext *dc, arg_typea *arg)
794 {
795     if (trap_userspace(dc, true)) {
796         return true;
797     }
798 #ifdef CONFIG_USER_ONLY
799     return true;
800 #else
801     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
802     return do_load(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
803 #endif
804 }
805 
806 static bool trans_lhui(DisasContext *dc, arg_typeb *arg)
807 {
808     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
809     return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
810 }
811 
812 static bool trans_lw(DisasContext *dc, arg_typea *arg)
813 {
814     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
815     return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
816 }
817 
818 static bool trans_lwr(DisasContext *dc, arg_typea *arg)
819 {
820     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
821     return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
822 }
823 
824 static bool trans_lwea(DisasContext *dc, arg_typea *arg)
825 {
826     if (trap_userspace(dc, true)) {
827         return true;
828     }
829 #ifdef CONFIG_USER_ONLY
830     return true;
831 #else
832     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
833     return do_load(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
834 #endif
835 }
836 
837 static bool trans_lwi(DisasContext *dc, arg_typeb *arg)
838 {
839     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
840     return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
841 }
842 
843 static bool trans_lwx(DisasContext *dc, arg_typea *arg)
844 {
845     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
846 
847     /* lwx does not throw unaligned access errors, so force alignment */
848     tcg_gen_andi_tl(addr, addr, ~3);
849 
850     tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index, MO_TEUL);
851     tcg_gen_mov_tl(cpu_res_addr, addr);
852 
853     if (arg->rd) {
854         tcg_gen_mov_i32(cpu_R[arg->rd], cpu_res_val);
855     }
856 
857     /* No support for AXI exclusive so always clear C */
858     tcg_gen_movi_i32(cpu_msr_c, 0);
859     return true;
860 }
861 
862 static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop,
863                      int mem_index, bool rev)
864 {
865     MemOp size = mop & MO_SIZE;
866 
867     /*
868      * When doing reverse accesses we need to do two things.
869      *
870      * 1. Reverse the address wrt endianness.
871      * 2. Byteswap the data lanes on the way back into the CPU core.
872      */
873     if (rev) {
874         if (size > MO_8) {
875             mop ^= MO_BSWAP;
876         }
877         if (size < MO_32) {
878             tcg_gen_xori_tl(addr, addr, 3 - size);
879         }
880     }
881 
882     /*
883      * For system mode, enforce alignment if the cpu configuration
884      * requires it.  For user-mode, the Linux kernel will have fixed up
885      * any unaligned access, so emulate that by *not* setting MO_ALIGN.
886      */
887 #ifndef CONFIG_USER_ONLY
888     if (size > MO_8 &&
889         (dc->tb_flags & MSR_EE) &&
890         dc->cfg->unaligned_exceptions) {
891         record_unaligned_ess(dc, rd, size, true);
892         mop |= MO_ALIGN;
893     }
894 #endif
895 
896     tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop);
897     return true;
898 }
899 
900 static bool trans_sb(DisasContext *dc, arg_typea *arg)
901 {
902     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
903     return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
904 }
905 
906 static bool trans_sbr(DisasContext *dc, arg_typea *arg)
907 {
908     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
909     return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
910 }
911 
912 static bool trans_sbea(DisasContext *dc, arg_typea *arg)
913 {
914     if (trap_userspace(dc, true)) {
915         return true;
916     }
917 #ifdef CONFIG_USER_ONLY
918     return true;
919 #else
920     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
921     return do_store(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
922 #endif
923 }
924 
925 static bool trans_sbi(DisasContext *dc, arg_typeb *arg)
926 {
927     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
928     return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
929 }
930 
931 static bool trans_sh(DisasContext *dc, arg_typea *arg)
932 {
933     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
934     return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
935 }
936 
937 static bool trans_shr(DisasContext *dc, arg_typea *arg)
938 {
939     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
940     return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
941 }
942 
943 static bool trans_shea(DisasContext *dc, arg_typea *arg)
944 {
945     if (trap_userspace(dc, true)) {
946         return true;
947     }
948 #ifdef CONFIG_USER_ONLY
949     return true;
950 #else
951     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
952     return do_store(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
953 #endif
954 }
955 
956 static bool trans_shi(DisasContext *dc, arg_typeb *arg)
957 {
958     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
959     return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
960 }
961 
962 static bool trans_sw(DisasContext *dc, arg_typea *arg)
963 {
964     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
965     return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
966 }
967 
968 static bool trans_swr(DisasContext *dc, arg_typea *arg)
969 {
970     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
971     return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
972 }
973 
974 static bool trans_swea(DisasContext *dc, arg_typea *arg)
975 {
976     if (trap_userspace(dc, true)) {
977         return true;
978     }
979 #ifdef CONFIG_USER_ONLY
980     return true;
981 #else
982     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
983     return do_store(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
984 #endif
985 }
986 
987 static bool trans_swi(DisasContext *dc, arg_typeb *arg)
988 {
989     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
990     return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
991 }
992 
993 static bool trans_swx(DisasContext *dc, arg_typea *arg)
994 {
995     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
996     TCGLabel *swx_done = gen_new_label();
997     TCGLabel *swx_fail = gen_new_label();
998     TCGv_i32 tval;
999 
1000     /* swx does not throw unaligned access errors, so force alignment */
1001     tcg_gen_andi_tl(addr, addr, ~3);
1002 
1003     /*
1004      * Compare the address vs the one we used during lwx.
1005      * On mismatch, the operation fails.  On match, addr dies at the
1006      * branch, but we know we can use the equal version in the global.
1007      * In either case, addr is no longer needed.
1008      */
1009     tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_fail);
1010 
1011     /*
1012      * Compare the value loaded during lwx with current contents of
1013      * the reserved location.
1014      */
1015     tval = tcg_temp_new_i32();
1016 
1017     tcg_gen_atomic_cmpxchg_i32(tval, cpu_res_addr, cpu_res_val,
1018                                reg_for_write(dc, arg->rd),
1019                                dc->mem_index, MO_TEUL);
1020 
1021     tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_fail);
1022 
1023     /* Success */
1024     tcg_gen_movi_i32(cpu_msr_c, 0);
1025     tcg_gen_br(swx_done);
1026 
1027     /* Failure */
1028     gen_set_label(swx_fail);
1029     tcg_gen_movi_i32(cpu_msr_c, 1);
1030 
1031     gen_set_label(swx_done);
1032 
1033     /*
1034      * Prevent the saved address from working again without another ldx.
1035      * Akin to the pseudocode setting reservation = 0.
1036      */
1037     tcg_gen_movi_tl(cpu_res_addr, -1);
1038     return true;
1039 }
1040 
1041 static void setup_dslot(DisasContext *dc, bool type_b)
1042 {
1043     dc->tb_flags_to_set |= D_FLAG;
1044     if (type_b && (dc->tb_flags & IMM_FLAG)) {
1045         dc->tb_flags_to_set |= BIMM_FLAG;
1046     }
1047 }
1048 
1049 static bool do_branch(DisasContext *dc, int dest_rb, int dest_imm,
1050                       bool delay, bool abs, int link)
1051 {
1052     uint32_t add_pc;
1053 
1054     if (invalid_delay_slot(dc, "branch")) {
1055         return true;
1056     }
1057     if (delay) {
1058         setup_dslot(dc, dest_rb < 0);
1059     }
1060 
1061     if (link) {
1062         tcg_gen_movi_i32(cpu_R[link], dc->base.pc_next);
1063     }
1064 
1065     /* Store the branch taken destination into btarget.  */
1066     add_pc = abs ? 0 : dc->base.pc_next;
1067     if (dest_rb > 0) {
1068         dc->jmp_dest = -1;
1069         tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], add_pc);
1070     } else {
1071         dc->jmp_dest = add_pc + dest_imm;
1072         tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1073     }
1074     dc->jmp_cond = TCG_COND_ALWAYS;
1075     return true;
1076 }
1077 
1078 #define DO_BR(NAME, NAMEI, DELAY, ABS, LINK)                               \
1079     static bool trans_##NAME(DisasContext *dc, arg_typea_br *arg)          \
1080     { return do_branch(dc, arg->rb, 0, DELAY, ABS, LINK ? arg->rd : 0); }  \
1081     static bool trans_##NAMEI(DisasContext *dc, arg_typeb_br *arg)         \
1082     { return do_branch(dc, -1, arg->imm, DELAY, ABS, LINK ? arg->rd : 0); }
1083 
1084 DO_BR(br, bri, false, false, false)
1085 DO_BR(bra, brai, false, true, false)
1086 DO_BR(brd, brid, true, false, false)
1087 DO_BR(brad, braid, true, true, false)
1088 DO_BR(brld, brlid, true, false, true)
1089 DO_BR(brald, bralid, true, true, true)
1090 
1091 static bool do_bcc(DisasContext *dc, int dest_rb, int dest_imm,
1092                    TCGCond cond, int ra, bool delay)
1093 {
1094     TCGv_i32 zero, next;
1095 
1096     if (invalid_delay_slot(dc, "bcc")) {
1097         return true;
1098     }
1099     if (delay) {
1100         setup_dslot(dc, dest_rb < 0);
1101     }
1102 
1103     dc->jmp_cond = cond;
1104 
1105     /* Cache the condition register in cpu_bvalue across any delay slot.  */
1106     tcg_gen_mov_i32(cpu_bvalue, reg_for_read(dc, ra));
1107 
1108     /* Store the branch taken destination into btarget.  */
1109     if (dest_rb > 0) {
1110         dc->jmp_dest = -1;
1111         tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], dc->base.pc_next);
1112     } else {
1113         dc->jmp_dest = dc->base.pc_next + dest_imm;
1114         tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1115     }
1116 
1117     /* Compute the final destination into btarget.  */
1118     zero = tcg_constant_i32(0);
1119     next = tcg_constant_i32(dc->base.pc_next + (delay + 1) * 4);
1120     tcg_gen_movcond_i32(dc->jmp_cond, cpu_btarget,
1121                         reg_for_read(dc, ra), zero,
1122                         cpu_btarget, next);
1123 
1124     return true;
1125 }
1126 
1127 #define DO_BCC(NAME, COND)                                              \
1128     static bool trans_##NAME(DisasContext *dc, arg_typea_bc *arg)       \
1129     { return do_bcc(dc, arg->rb, 0, COND, arg->ra, false); }            \
1130     static bool trans_##NAME##d(DisasContext *dc, arg_typea_bc *arg)    \
1131     { return do_bcc(dc, arg->rb, 0, COND, arg->ra, true); }             \
1132     static bool trans_##NAME##i(DisasContext *dc, arg_typeb_bc *arg)    \
1133     { return do_bcc(dc, -1, arg->imm, COND, arg->ra, false); }          \
1134     static bool trans_##NAME##id(DisasContext *dc, arg_typeb_bc *arg)   \
1135     { return do_bcc(dc, -1, arg->imm, COND, arg->ra, true); }
1136 
1137 DO_BCC(beq, TCG_COND_EQ)
1138 DO_BCC(bge, TCG_COND_GE)
1139 DO_BCC(bgt, TCG_COND_GT)
1140 DO_BCC(ble, TCG_COND_LE)
1141 DO_BCC(blt, TCG_COND_LT)
1142 DO_BCC(bne, TCG_COND_NE)
1143 
1144 static bool trans_brk(DisasContext *dc, arg_typea_br *arg)
1145 {
1146     if (trap_userspace(dc, true)) {
1147         return true;
1148     }
1149     if (invalid_delay_slot(dc, "brk")) {
1150         return true;
1151     }
1152 
1153     tcg_gen_mov_i32(cpu_pc, reg_for_read(dc, arg->rb));
1154     if (arg->rd) {
1155         tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1156     }
1157     tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_BIP);
1158     tcg_gen_movi_tl(cpu_res_addr, -1);
1159 
1160     dc->base.is_jmp = DISAS_EXIT;
1161     return true;
1162 }
1163 
1164 static bool trans_brki(DisasContext *dc, arg_typeb_br *arg)
1165 {
1166     uint32_t imm = arg->imm;
1167 
1168     if (trap_userspace(dc, imm != 0x8 && imm != 0x18)) {
1169         return true;
1170     }
1171     if (invalid_delay_slot(dc, "brki")) {
1172         return true;
1173     }
1174 
1175     tcg_gen_movi_i32(cpu_pc, imm);
1176     if (arg->rd) {
1177         tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1178     }
1179     tcg_gen_movi_tl(cpu_res_addr, -1);
1180 
1181 #ifdef CONFIG_USER_ONLY
1182     switch (imm) {
1183     case 0x8:  /* syscall trap */
1184         gen_raise_exception_sync(dc, EXCP_SYSCALL);
1185         break;
1186     case 0x18: /* debug trap */
1187         gen_raise_exception_sync(dc, EXCP_DEBUG);
1188         break;
1189     default:   /* eliminated with trap_userspace check */
1190         g_assert_not_reached();
1191     }
1192 #else
1193     uint32_t msr_to_set = 0;
1194 
1195     if (imm != 0x18) {
1196         msr_to_set |= MSR_BIP;
1197     }
1198     if (imm == 0x8 || imm == 0x18) {
1199         /* MSR_UM and MSR_VM are in tb_flags, so we know their value. */
1200         msr_to_set |= (dc->tb_flags & (MSR_UM | MSR_VM)) << 1;
1201         tcg_gen_andi_i32(cpu_msr, cpu_msr,
1202                          ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM));
1203     }
1204     tcg_gen_ori_i32(cpu_msr, cpu_msr, msr_to_set);
1205     dc->base.is_jmp = DISAS_EXIT;
1206 #endif
1207 
1208     return true;
1209 }
1210 
1211 static bool trans_mbar(DisasContext *dc, arg_mbar *arg)
1212 {
1213     int mbar_imm = arg->imm;
1214 
1215     /* Note that mbar is a specialized branch instruction. */
1216     if (invalid_delay_slot(dc, "mbar")) {
1217         return true;
1218     }
1219 
1220     /* Data access memory barrier.  */
1221     if ((mbar_imm & 2) == 0) {
1222         tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1223     }
1224 
1225     /* Sleep. */
1226     if (mbar_imm & 16) {
1227         if (trap_userspace(dc, true)) {
1228             /* Sleep is a privileged instruction.  */
1229             return true;
1230         }
1231 
1232         t_sync_flags(dc);
1233 
1234         tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
1235                        -offsetof(MicroBlazeCPU, env)
1236                        +offsetof(CPUState, halted));
1237 
1238         tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
1239 
1240         gen_raise_exception(dc, EXCP_HLT);
1241     }
1242 
1243     /*
1244      * If !(mbar_imm & 1), this is an instruction access memory barrier
1245      * and we need to end the TB so that we recognize self-modified
1246      * code immediately.
1247      *
1248      * However, there are some data mbars that need the TB break
1249      * (and return to main loop) to recognize interrupts right away.
1250      * E.g. recognizing a change to an interrupt controller register.
1251      *
1252      * Therefore, choose to end the TB always.
1253      */
1254     dc->base.is_jmp = DISAS_EXIT_NEXT;
1255     return true;
1256 }
1257 
1258 static bool do_rts(DisasContext *dc, arg_typeb_bc *arg, int to_set)
1259 {
1260     if (trap_userspace(dc, to_set)) {
1261         return true;
1262     }
1263     if (invalid_delay_slot(dc, "rts")) {
1264         return true;
1265     }
1266 
1267     dc->tb_flags_to_set |= to_set;
1268     setup_dslot(dc, true);
1269 
1270     dc->jmp_cond = TCG_COND_ALWAYS;
1271     dc->jmp_dest = -1;
1272     tcg_gen_addi_i32(cpu_btarget, reg_for_read(dc, arg->ra), arg->imm);
1273     return true;
1274 }
1275 
1276 #define DO_RTS(NAME, IFLAG) \
1277     static bool trans_##NAME(DisasContext *dc, arg_typeb_bc *arg) \
1278     { return do_rts(dc, arg, IFLAG); }
1279 
1280 DO_RTS(rtbd, DRTB_FLAG)
1281 DO_RTS(rtid, DRTI_FLAG)
1282 DO_RTS(rted, DRTE_FLAG)
1283 DO_RTS(rtsd, 0)
1284 
1285 static bool trans_zero(DisasContext *dc, arg_zero *arg)
1286 {
1287     /* If opcode_0_illegal, trap.  */
1288     if (dc->cfg->opcode_0_illegal) {
1289         trap_illegal(dc, true);
1290         return true;
1291     }
1292     /*
1293      * Otherwise, this is "add r0, r0, r0".
1294      * Continue to trans_add so that MSR[C] gets cleared.
1295      */
1296     return false;
1297 }
1298 
1299 static void msr_read(DisasContext *dc, TCGv_i32 d)
1300 {
1301     TCGv_i32 t;
1302 
1303     /* Replicate the cpu_msr_c boolean into the proper bit and the copy. */
1304     t = tcg_temp_new_i32();
1305     tcg_gen_muli_i32(t, cpu_msr_c, MSR_C | MSR_CC);
1306     tcg_gen_or_i32(d, cpu_msr, t);
1307 }
1308 
1309 static bool do_msrclrset(DisasContext *dc, arg_type_msr *arg, bool set)
1310 {
1311     uint32_t imm = arg->imm;
1312 
1313     if (trap_userspace(dc, imm != MSR_C)) {
1314         return true;
1315     }
1316 
1317     if (arg->rd) {
1318         msr_read(dc, cpu_R[arg->rd]);
1319     }
1320 
1321     /*
1322      * Handle the carry bit separately.
1323      * This is the only bit that userspace can modify.
1324      */
1325     if (imm & MSR_C) {
1326         tcg_gen_movi_i32(cpu_msr_c, set);
1327     }
1328 
1329     /*
1330      * MSR_C and MSR_CC set above.
1331      * MSR_PVR is not writable, and is always clear.
1332      */
1333     imm &= ~(MSR_C | MSR_CC | MSR_PVR);
1334 
1335     if (imm != 0) {
1336         if (set) {
1337             tcg_gen_ori_i32(cpu_msr, cpu_msr, imm);
1338         } else {
1339             tcg_gen_andi_i32(cpu_msr, cpu_msr, ~imm);
1340         }
1341         dc->base.is_jmp = DISAS_EXIT_NEXT;
1342     }
1343     return true;
1344 }
1345 
1346 static bool trans_msrclr(DisasContext *dc, arg_type_msr *arg)
1347 {
1348     return do_msrclrset(dc, arg, false);
1349 }
1350 
1351 static bool trans_msrset(DisasContext *dc, arg_type_msr *arg)
1352 {
1353     return do_msrclrset(dc, arg, true);
1354 }
1355 
1356 static bool trans_mts(DisasContext *dc, arg_mts *arg)
1357 {
1358     if (trap_userspace(dc, true)) {
1359         return true;
1360     }
1361 
1362 #ifdef CONFIG_USER_ONLY
1363     g_assert_not_reached();
1364 #else
1365     if (arg->e && arg->rs != 0x1003) {
1366         qemu_log_mask(LOG_GUEST_ERROR,
1367                       "Invalid extended mts reg 0x%x\n", arg->rs);
1368         return true;
1369     }
1370 
1371     TCGv_i32 src = reg_for_read(dc, arg->ra);
1372     switch (arg->rs) {
1373     case SR_MSR:
1374         /* Install MSR_C.  */
1375         tcg_gen_extract_i32(cpu_msr_c, src, 2, 1);
1376         /*
1377          * Clear MSR_C and MSR_CC;
1378          * MSR_PVR is not writable, and is always clear.
1379          */
1380         tcg_gen_andi_i32(cpu_msr, src, ~(MSR_C | MSR_CC | MSR_PVR));
1381         break;
1382     case SR_FSR:
1383         tcg_gen_st_i32(src, tcg_env, offsetof(CPUMBState, fsr));
1384         break;
1385     case 0x800:
1386         tcg_gen_st_i32(src, tcg_env, offsetof(CPUMBState, slr));
1387         break;
1388     case 0x802:
1389         tcg_gen_st_i32(src, tcg_env, offsetof(CPUMBState, shr));
1390         break;
1391 
1392     case 0x1000: /* PID */
1393     case 0x1001: /* ZPR */
1394     case 0x1002: /* TLBX */
1395     case 0x1003: /* TLBLO */
1396     case 0x1004: /* TLBHI */
1397     case 0x1005: /* TLBSX */
1398         {
1399             TCGv_i32 tmp_ext = tcg_constant_i32(arg->e);
1400             TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7);
1401 
1402             gen_helper_mmu_write(tcg_env, tmp_ext, tmp_reg, src);
1403         }
1404         break;
1405 
1406     default:
1407         qemu_log_mask(LOG_GUEST_ERROR, "Invalid mts reg 0x%x\n", arg->rs);
1408         return true;
1409     }
1410     dc->base.is_jmp = DISAS_EXIT_NEXT;
1411     return true;
1412 #endif
1413 }
1414 
1415 static bool trans_mfs(DisasContext *dc, arg_mfs *arg)
1416 {
1417     TCGv_i32 dest = reg_for_write(dc, arg->rd);
1418 
1419     if (arg->e) {
1420         switch (arg->rs) {
1421         case SR_EAR:
1422             {
1423                 TCGv_i64 t64 = tcg_temp_new_i64();
1424                 tcg_gen_ld_i64(t64, tcg_env, offsetof(CPUMBState, ear));
1425                 tcg_gen_extrh_i64_i32(dest, t64);
1426             }
1427             return true;
1428 #ifndef CONFIG_USER_ONLY
1429         case 0x1003: /* TLBLO */
1430             /* Handled below. */
1431             break;
1432 #endif
1433         case 0x2006 ... 0x2009:
1434             /* High bits of PVR6-9 not implemented. */
1435             tcg_gen_movi_i32(dest, 0);
1436             return true;
1437         default:
1438             qemu_log_mask(LOG_GUEST_ERROR,
1439                           "Invalid extended mfs reg 0x%x\n", arg->rs);
1440             return true;
1441         }
1442     }
1443 
1444     switch (arg->rs) {
1445     case SR_PC:
1446         tcg_gen_movi_i32(dest, dc->base.pc_next);
1447         break;
1448     case SR_MSR:
1449         msr_read(dc, dest);
1450         break;
1451     case SR_EAR:
1452         {
1453             TCGv_i64 t64 = tcg_temp_new_i64();
1454             tcg_gen_ld_i64(t64, tcg_env, offsetof(CPUMBState, ear));
1455             tcg_gen_extrl_i64_i32(dest, t64);
1456         }
1457         break;
1458     case SR_ESR:
1459         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, esr));
1460         break;
1461     case SR_FSR:
1462         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, fsr));
1463         break;
1464     case SR_BTR:
1465         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, btr));
1466         break;
1467     case SR_EDR:
1468         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, edr));
1469         break;
1470     case 0x800:
1471         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, slr));
1472         break;
1473     case 0x802:
1474         tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, shr));
1475         break;
1476 
1477 #ifndef CONFIG_USER_ONLY
1478     case 0x1000: /* PID */
1479     case 0x1001: /* ZPR */
1480     case 0x1002: /* TLBX */
1481     case 0x1003: /* TLBLO */
1482     case 0x1004: /* TLBHI */
1483     case 0x1005: /* TLBSX */
1484         {
1485             TCGv_i32 tmp_ext = tcg_constant_i32(arg->e);
1486             TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7);
1487 
1488             gen_helper_mmu_read(dest, tcg_env, tmp_ext, tmp_reg);
1489         }
1490         break;
1491 #endif
1492 
1493     case 0x2000 ... 0x200c:
1494         tcg_gen_ld_i32(dest, tcg_env,
1495                        offsetof(MicroBlazeCPU, cfg.pvr_regs[arg->rs - 0x2000])
1496                        - offsetof(MicroBlazeCPU, env));
1497         break;
1498     default:
1499         qemu_log_mask(LOG_GUEST_ERROR, "Invalid mfs reg 0x%x\n", arg->rs);
1500         break;
1501     }
1502     return true;
1503 }
1504 
1505 static void do_rti(DisasContext *dc)
1506 {
1507     TCGv_i32 tmp = tcg_temp_new_i32();
1508 
1509     tcg_gen_shri_i32(tmp, cpu_msr, 1);
1510     tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_IE);
1511     tcg_gen_andi_i32(tmp, tmp, MSR_VM | MSR_UM);
1512     tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM));
1513     tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1514 }
1515 
1516 static void do_rtb(DisasContext *dc)
1517 {
1518     TCGv_i32 tmp = tcg_temp_new_i32();
1519 
1520     tcg_gen_shri_i32(tmp, cpu_msr, 1);
1521     tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_BIP));
1522     tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1523     tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1524 }
1525 
1526 static void do_rte(DisasContext *dc)
1527 {
1528     TCGv_i32 tmp = tcg_temp_new_i32();
1529 
1530     tcg_gen_shri_i32(tmp, cpu_msr, 1);
1531     tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_EE);
1532     tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1533     tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_EIP));
1534     tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1535 }
1536 
1537 /* Insns connected to FSL or AXI stream attached devices.  */
1538 static bool do_get(DisasContext *dc, int rd, int rb, int imm, int ctrl)
1539 {
1540     TCGv_i32 t_id, t_ctrl;
1541 
1542     if (trap_userspace(dc, true)) {
1543         return true;
1544     }
1545 
1546     t_id = tcg_temp_new_i32();
1547     if (rb) {
1548         tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
1549     } else {
1550         tcg_gen_movi_i32(t_id, imm);
1551     }
1552 
1553     t_ctrl = tcg_constant_i32(ctrl);
1554     gen_helper_get(reg_for_write(dc, rd), t_id, t_ctrl);
1555     return true;
1556 }
1557 
1558 static bool trans_get(DisasContext *dc, arg_get *arg)
1559 {
1560     return do_get(dc, arg->rd, 0, arg->imm, arg->ctrl);
1561 }
1562 
1563 static bool trans_getd(DisasContext *dc, arg_getd *arg)
1564 {
1565     return do_get(dc, arg->rd, arg->rb, 0, arg->ctrl);
1566 }
1567 
1568 static bool do_put(DisasContext *dc, int ra, int rb, int imm, int ctrl)
1569 {
1570     TCGv_i32 t_id, t_ctrl;
1571 
1572     if (trap_userspace(dc, true)) {
1573         return true;
1574     }
1575 
1576     t_id = tcg_temp_new_i32();
1577     if (rb) {
1578         tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
1579     } else {
1580         tcg_gen_movi_i32(t_id, imm);
1581     }
1582 
1583     t_ctrl = tcg_constant_i32(ctrl);
1584     gen_helper_put(t_id, t_ctrl, reg_for_read(dc, ra));
1585     return true;
1586 }
1587 
1588 static bool trans_put(DisasContext *dc, arg_put *arg)
1589 {
1590     return do_put(dc, arg->ra, 0, arg->imm, arg->ctrl);
1591 }
1592 
1593 static bool trans_putd(DisasContext *dc, arg_putd *arg)
1594 {
1595     return do_put(dc, arg->ra, arg->rb, 0, arg->ctrl);
1596 }
1597 
1598 static void mb_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
1599 {
1600     DisasContext *dc = container_of(dcb, DisasContext, base);
1601     MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1602     int bound;
1603 
1604     dc->cfg = &cpu->cfg;
1605     dc->tb_flags = dc->base.tb->flags;
1606     dc->ext_imm = dc->base.tb->cs_base;
1607     dc->r0 = NULL;
1608     dc->r0_set = false;
1609     dc->mem_index = cpu_mmu_index(cs, false);
1610     dc->jmp_cond = dc->tb_flags & D_FLAG ? TCG_COND_ALWAYS : TCG_COND_NEVER;
1611     dc->jmp_dest = -1;
1612 
1613     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
1614     dc->base.max_insns = MIN(dc->base.max_insns, bound);
1615 }
1616 
1617 static void mb_tr_tb_start(DisasContextBase *dcb, CPUState *cs)
1618 {
1619 }
1620 
1621 static void mb_tr_insn_start(DisasContextBase *dcb, CPUState *cs)
1622 {
1623     DisasContext *dc = container_of(dcb, DisasContext, base);
1624 
1625     tcg_gen_insn_start(dc->base.pc_next, dc->tb_flags & ~MSR_TB_MASK);
1626 }
1627 
1628 static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs)
1629 {
1630     DisasContext *dc = container_of(dcb, DisasContext, base);
1631     uint32_t ir;
1632 
1633     /* TODO: This should raise an exception, not terminate qemu. */
1634     if (dc->base.pc_next & 3) {
1635         cpu_abort(cs, "Microblaze: unaligned PC=%x\n",
1636                   (uint32_t)dc->base.pc_next);
1637     }
1638 
1639     dc->tb_flags_to_set = 0;
1640 
1641     ir = cpu_ldl_code(cpu_env(cs), dc->base.pc_next);
1642     if (!decode(dc, ir)) {
1643         trap_illegal(dc, true);
1644     }
1645 
1646     if (dc->r0) {
1647         dc->r0 = NULL;
1648         dc->r0_set = false;
1649     }
1650 
1651     /* Discard the imm global when its contents cannot be used. */
1652     if ((dc->tb_flags & ~dc->tb_flags_to_set) & IMM_FLAG) {
1653         tcg_gen_discard_i32(cpu_imm);
1654     }
1655 
1656     dc->tb_flags &= ~(IMM_FLAG | BIMM_FLAG | D_FLAG);
1657     dc->tb_flags |= dc->tb_flags_to_set;
1658     dc->base.pc_next += 4;
1659 
1660     if (dc->jmp_cond != TCG_COND_NEVER && !(dc->tb_flags & D_FLAG)) {
1661         /*
1662          * Finish any return-from branch.
1663          */
1664         uint32_t rt_ibe = dc->tb_flags & (DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1665         if (unlikely(rt_ibe != 0)) {
1666             dc->tb_flags &= ~(DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1667             if (rt_ibe & DRTI_FLAG) {
1668                 do_rti(dc);
1669             } else if (rt_ibe & DRTB_FLAG) {
1670                 do_rtb(dc);
1671             } else {
1672                 do_rte(dc);
1673             }
1674         }
1675 
1676         /* Complete the branch, ending the TB. */
1677         switch (dc->base.is_jmp) {
1678         case DISAS_NORETURN:
1679             /*
1680              * E.g. illegal insn in a delay slot.  We've already exited
1681              * and will handle D_FLAG in mb_cpu_do_interrupt.
1682              */
1683             break;
1684         case DISAS_NEXT:
1685             /*
1686              * Normal insn a delay slot.
1687              * However, the return-from-exception type insns should
1688              * return to the main loop, as they have adjusted MSR.
1689              */
1690             dc->base.is_jmp = (rt_ibe ? DISAS_EXIT_JUMP : DISAS_JUMP);
1691             break;
1692         case DISAS_EXIT_NEXT:
1693             /*
1694              * E.g. mts insn in a delay slot.  Continue with btarget,
1695              * but still return to the main loop.
1696              */
1697             dc->base.is_jmp = DISAS_EXIT_JUMP;
1698             break;
1699         default:
1700             g_assert_not_reached();
1701         }
1702     }
1703 }
1704 
1705 static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
1706 {
1707     DisasContext *dc = container_of(dcb, DisasContext, base);
1708 
1709     if (dc->base.is_jmp == DISAS_NORETURN) {
1710         /* We have already exited the TB. */
1711         return;
1712     }
1713 
1714     t_sync_flags(dc);
1715 
1716     switch (dc->base.is_jmp) {
1717     case DISAS_TOO_MANY:
1718         gen_goto_tb(dc, 0, dc->base.pc_next);
1719         return;
1720 
1721     case DISAS_EXIT:
1722         break;
1723     case DISAS_EXIT_NEXT:
1724         tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
1725         break;
1726     case DISAS_EXIT_JUMP:
1727         tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1728         tcg_gen_discard_i32(cpu_btarget);
1729         break;
1730 
1731     case DISAS_JUMP:
1732         if (dc->jmp_dest != -1 && !(tb_cflags(dc->base.tb) & CF_NO_GOTO_TB)) {
1733             /* Direct jump. */
1734             tcg_gen_discard_i32(cpu_btarget);
1735 
1736             if (dc->jmp_cond != TCG_COND_ALWAYS) {
1737                 /* Conditional direct jump. */
1738                 TCGLabel *taken = gen_new_label();
1739                 TCGv_i32 tmp = tcg_temp_new_i32();
1740 
1741                 /*
1742                  * Copy bvalue to a temp now, so we can discard bvalue.
1743                  * This can avoid writing bvalue to memory when the
1744                  * delay slot cannot raise an exception.
1745                  */
1746                 tcg_gen_mov_i32(tmp, cpu_bvalue);
1747                 tcg_gen_discard_i32(cpu_bvalue);
1748 
1749                 tcg_gen_brcondi_i32(dc->jmp_cond, tmp, 0, taken);
1750                 gen_goto_tb(dc, 1, dc->base.pc_next);
1751                 gen_set_label(taken);
1752             }
1753             gen_goto_tb(dc, 0, dc->jmp_dest);
1754             return;
1755         }
1756 
1757         /* Indirect jump (or direct jump w/ goto_tb disabled) */
1758         tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1759         tcg_gen_discard_i32(cpu_btarget);
1760         tcg_gen_lookup_and_goto_ptr();
1761         return;
1762 
1763     default:
1764         g_assert_not_reached();
1765     }
1766 
1767     /* Finish DISAS_EXIT_* */
1768     if (unlikely(cs->singlestep_enabled)) {
1769         gen_raise_exception(dc, EXCP_DEBUG);
1770     } else {
1771         tcg_gen_exit_tb(NULL, 0);
1772     }
1773 }
1774 
1775 static void mb_tr_disas_log(const DisasContextBase *dcb,
1776                             CPUState *cs, FILE *logfile)
1777 {
1778     fprintf(logfile, "IN: %s\n", lookup_symbol(dcb->pc_first));
1779     target_disas(logfile, cs, dcb->pc_first, dcb->tb->size);
1780 }
1781 
1782 static const TranslatorOps mb_tr_ops = {
1783     .init_disas_context = mb_tr_init_disas_context,
1784     .tb_start           = mb_tr_tb_start,
1785     .insn_start         = mb_tr_insn_start,
1786     .translate_insn     = mb_tr_translate_insn,
1787     .tb_stop            = mb_tr_tb_stop,
1788     .disas_log          = mb_tr_disas_log,
1789 };
1790 
1791 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
1792                            vaddr pc, void *host_pc)
1793 {
1794     DisasContext dc;
1795     translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base);
1796 }
1797 
1798 void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1799 {
1800     CPUMBState *env = cpu_env(cs);
1801     uint32_t iflags;
1802     int i;
1803 
1804     qemu_fprintf(f, "pc=0x%08x msr=0x%05x mode=%s(saved=%s) eip=%d ie=%d\n",
1805                  env->pc, env->msr,
1806                  (env->msr & MSR_UM) ? "user" : "kernel",
1807                  (env->msr & MSR_UMS) ? "user" : "kernel",
1808                  (bool)(env->msr & MSR_EIP),
1809                  (bool)(env->msr & MSR_IE));
1810 
1811     iflags = env->iflags;
1812     qemu_fprintf(f, "iflags: 0x%08x", iflags);
1813     if (iflags & IMM_FLAG) {
1814         qemu_fprintf(f, " IMM(0x%08x)", env->imm);
1815     }
1816     if (iflags & BIMM_FLAG) {
1817         qemu_fprintf(f, " BIMM");
1818     }
1819     if (iflags & D_FLAG) {
1820         qemu_fprintf(f, " D(btarget=0x%08x)", env->btarget);
1821     }
1822     if (iflags & DRTI_FLAG) {
1823         qemu_fprintf(f, " DRTI");
1824     }
1825     if (iflags & DRTE_FLAG) {
1826         qemu_fprintf(f, " DRTE");
1827     }
1828     if (iflags & DRTB_FLAG) {
1829         qemu_fprintf(f, " DRTB");
1830     }
1831     if (iflags & ESR_ESS_FLAG) {
1832         qemu_fprintf(f, " ESR_ESS(0x%04x)", iflags & ESR_ESS_MASK);
1833     }
1834 
1835     qemu_fprintf(f, "\nesr=0x%04x fsr=0x%02x btr=0x%08x edr=0x%x\n"
1836                  "ear=0x" TARGET_FMT_lx " slr=0x%x shr=0x%x\n",
1837                  env->esr, env->fsr, env->btr, env->edr,
1838                  env->ear, env->slr, env->shr);
1839 
1840     for (i = 0; i < 32; i++) {
1841         qemu_fprintf(f, "r%2.2d=%08x%c",
1842                      i, env->regs[i], i % 4 == 3 ? '\n' : ' ');
1843     }
1844     qemu_fprintf(f, "\n");
1845 }
1846 
1847 void mb_tcg_init(void)
1848 {
1849 #define R(X)  { &cpu_R[X], offsetof(CPUMBState, regs[X]), "r" #X }
1850 #define SP(X) { &cpu_##X, offsetof(CPUMBState, X), #X }
1851 
1852     static const struct {
1853         TCGv_i32 *var; int ofs; char name[8];
1854     } i32s[] = {
1855         /*
1856          * Note that r0 is handled specially in reg_for_read
1857          * and reg_for_write.  Nothing should touch cpu_R[0].
1858          * Leave that element NULL, which will assert quickly
1859          * inside the tcg generator functions.
1860          */
1861                R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
1862         R(8),  R(9),  R(10), R(11), R(12), R(13), R(14), R(15),
1863         R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23),
1864         R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31),
1865 
1866         SP(pc),
1867         SP(msr),
1868         SP(msr_c),
1869         SP(imm),
1870         SP(iflags),
1871         SP(bvalue),
1872         SP(btarget),
1873         SP(res_val),
1874     };
1875 
1876 #undef R
1877 #undef SP
1878 
1879     for (int i = 0; i < ARRAY_SIZE(i32s); ++i) {
1880         *i32s[i].var =
1881           tcg_global_mem_new_i32(tcg_env, i32s[i].ofs, i32s[i].name);
1882     }
1883 
1884     cpu_res_addr =
1885         tcg_global_mem_new(tcg_env, offsetof(CPUMBState, res_addr), "res_addr");
1886 }
1887