xref: /qemu/target/microblaze/translate.c (revision d53106c9)
1 /*
2  *  Xilinx MicroBlaze emulation for qemu: main translation routines.
3  *
4  *  Copyright (c) 2009 Edgar E. Iglesias.
5  *  Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "qemu/qemu-print.h"
31 
32 #include "exec/log.h"
33 
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef  HELPER_H
37 
38 #define EXTRACT_FIELD(src, start, end) \
39             (((src) >> start) & ((1 << (end - start + 1)) - 1))
40 
41 /* is_jmp field values */
42 #define DISAS_JUMP    DISAS_TARGET_0 /* only pc was modified dynamically */
43 #define DISAS_EXIT    DISAS_TARGET_1 /* all cpu state modified dynamically */
44 
45 /* cpu state besides pc was modified dynamically; update pc to next */
46 #define DISAS_EXIT_NEXT DISAS_TARGET_2
47 /* cpu state besides pc was modified dynamically; update pc to btarget */
48 #define DISAS_EXIT_JUMP DISAS_TARGET_3
49 
50 static TCGv_i32 cpu_R[32];
51 static TCGv_i32 cpu_pc;
52 static TCGv_i32 cpu_msr;
53 static TCGv_i32 cpu_msr_c;
54 static TCGv_i32 cpu_imm;
55 static TCGv_i32 cpu_bvalue;
56 static TCGv_i32 cpu_btarget;
57 static TCGv_i32 cpu_iflags;
58 static TCGv cpu_res_addr;
59 static TCGv_i32 cpu_res_val;
60 
61 #include "exec/gen-icount.h"
62 
63 /* This is the state at translation time.  */
64 typedef struct DisasContext {
65     DisasContextBase base;
66     const MicroBlazeCPUConfig *cfg;
67 
68     /* TCG op of the current insn_start.  */
69     TCGOp *insn_start;
70 
71     TCGv_i32 r0;
72     bool r0_set;
73 
74     /* Decoder.  */
75     uint32_t ext_imm;
76     unsigned int tb_flags;
77     unsigned int tb_flags_to_set;
78     int mem_index;
79 
80     /* Condition under which to jump, including NEVER and ALWAYS. */
81     TCGCond jmp_cond;
82 
83     /* Immediate branch-taken destination, or -1 for indirect. */
84     uint32_t jmp_dest;
85 } DisasContext;
86 
87 static int typeb_imm(DisasContext *dc, int x)
88 {
89     if (dc->tb_flags & IMM_FLAG) {
90         return deposit32(dc->ext_imm, 0, 16, x);
91     }
92     return x;
93 }
94 
95 /* Include the auto-generated decoder.  */
96 #include "decode-insns.c.inc"
97 
98 static void t_sync_flags(DisasContext *dc)
99 {
100     /* Synch the tb dependent flags between translator and runtime.  */
101     if ((dc->tb_flags ^ dc->base.tb->flags) & IFLAGS_TB_MASK) {
102         tcg_gen_movi_i32(cpu_iflags, dc->tb_flags & IFLAGS_TB_MASK);
103     }
104 }
105 
106 static void gen_raise_exception(DisasContext *dc, uint32_t index)
107 {
108     gen_helper_raise_exception(cpu_env, tcg_constant_i32(index));
109     dc->base.is_jmp = DISAS_NORETURN;
110 }
111 
112 static void gen_raise_exception_sync(DisasContext *dc, uint32_t index)
113 {
114     t_sync_flags(dc);
115     tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
116     gen_raise_exception(dc, index);
117 }
118 
119 static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
120 {
121     TCGv_i32 tmp = tcg_constant_i32(esr_ec);
122     tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUMBState, esr));
123 
124     gen_raise_exception_sync(dc, EXCP_HW_EXCP);
125 }
126 
127 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
128 {
129     if (translator_use_goto_tb(&dc->base, dest)) {
130         tcg_gen_goto_tb(n);
131         tcg_gen_movi_i32(cpu_pc, dest);
132         tcg_gen_exit_tb(dc->base.tb, n);
133     } else {
134         tcg_gen_movi_i32(cpu_pc, dest);
135         tcg_gen_lookup_and_goto_ptr();
136     }
137     dc->base.is_jmp = DISAS_NORETURN;
138 }
139 
140 /*
141  * Returns true if the insn an illegal operation.
142  * If exceptions are enabled, an exception is raised.
143  */
144 static bool trap_illegal(DisasContext *dc, bool cond)
145 {
146     if (cond && (dc->tb_flags & MSR_EE)
147         && dc->cfg->illegal_opcode_exception) {
148         gen_raise_hw_excp(dc, ESR_EC_ILLEGAL_OP);
149     }
150     return cond;
151 }
152 
153 /*
154  * Returns true if the insn is illegal in userspace.
155  * If exceptions are enabled, an exception is raised.
156  */
157 static bool trap_userspace(DisasContext *dc, bool cond)
158 {
159     bool cond_user = cond && dc->mem_index == MMU_USER_IDX;
160 
161     if (cond_user && (dc->tb_flags & MSR_EE)) {
162         gen_raise_hw_excp(dc, ESR_EC_PRIVINSN);
163     }
164     return cond_user;
165 }
166 
167 /*
168  * Return true, and log an error, if the current insn is
169  * within a delay slot.
170  */
171 static bool invalid_delay_slot(DisasContext *dc, const char *insn_type)
172 {
173     if (dc->tb_flags & D_FLAG) {
174         qemu_log_mask(LOG_GUEST_ERROR,
175                       "Invalid insn in delay slot: %s at %08x\n",
176                       insn_type, (uint32_t)dc->base.pc_next);
177         return true;
178     }
179     return false;
180 }
181 
182 static TCGv_i32 reg_for_read(DisasContext *dc, int reg)
183 {
184     if (likely(reg != 0)) {
185         return cpu_R[reg];
186     }
187     if (!dc->r0_set) {
188         if (dc->r0 == NULL) {
189             dc->r0 = tcg_temp_new_i32();
190         }
191         tcg_gen_movi_i32(dc->r0, 0);
192         dc->r0_set = true;
193     }
194     return dc->r0;
195 }
196 
197 static TCGv_i32 reg_for_write(DisasContext *dc, int reg)
198 {
199     if (likely(reg != 0)) {
200         return cpu_R[reg];
201     }
202     if (dc->r0 == NULL) {
203         dc->r0 = tcg_temp_new_i32();
204     }
205     return dc->r0;
206 }
207 
208 static bool do_typea(DisasContext *dc, arg_typea *arg, bool side_effects,
209                      void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
210 {
211     TCGv_i32 rd, ra, rb;
212 
213     if (arg->rd == 0 && !side_effects) {
214         return true;
215     }
216 
217     rd = reg_for_write(dc, arg->rd);
218     ra = reg_for_read(dc, arg->ra);
219     rb = reg_for_read(dc, arg->rb);
220     fn(rd, ra, rb);
221     return true;
222 }
223 
224 static bool do_typea0(DisasContext *dc, arg_typea0 *arg, bool side_effects,
225                       void (*fn)(TCGv_i32, TCGv_i32))
226 {
227     TCGv_i32 rd, ra;
228 
229     if (arg->rd == 0 && !side_effects) {
230         return true;
231     }
232 
233     rd = reg_for_write(dc, arg->rd);
234     ra = reg_for_read(dc, arg->ra);
235     fn(rd, ra);
236     return true;
237 }
238 
239 static bool do_typeb_imm(DisasContext *dc, arg_typeb *arg, bool side_effects,
240                          void (*fni)(TCGv_i32, TCGv_i32, int32_t))
241 {
242     TCGv_i32 rd, ra;
243 
244     if (arg->rd == 0 && !side_effects) {
245         return true;
246     }
247 
248     rd = reg_for_write(dc, arg->rd);
249     ra = reg_for_read(dc, arg->ra);
250     fni(rd, ra, arg->imm);
251     return true;
252 }
253 
254 static bool do_typeb_val(DisasContext *dc, arg_typeb *arg, bool side_effects,
255                          void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
256 {
257     TCGv_i32 rd, ra, imm;
258 
259     if (arg->rd == 0 && !side_effects) {
260         return true;
261     }
262 
263     rd = reg_for_write(dc, arg->rd);
264     ra = reg_for_read(dc, arg->ra);
265     imm = tcg_constant_i32(arg->imm);
266 
267     fn(rd, ra, imm);
268     return true;
269 }
270 
271 #define DO_TYPEA(NAME, SE, FN) \
272     static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
273     { return do_typea(dc, a, SE, FN); }
274 
275 #define DO_TYPEA_CFG(NAME, CFG, SE, FN) \
276     static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
277     { return dc->cfg->CFG && do_typea(dc, a, SE, FN); }
278 
279 #define DO_TYPEA0(NAME, SE, FN) \
280     static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
281     { return do_typea0(dc, a, SE, FN); }
282 
283 #define DO_TYPEA0_CFG(NAME, CFG, SE, FN) \
284     static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
285     { return dc->cfg->CFG && do_typea0(dc, a, SE, FN); }
286 
287 #define DO_TYPEBI(NAME, SE, FNI) \
288     static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
289     { return do_typeb_imm(dc, a, SE, FNI); }
290 
291 #define DO_TYPEBI_CFG(NAME, CFG, SE, FNI) \
292     static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
293     { return dc->cfg->CFG && do_typeb_imm(dc, a, SE, FNI); }
294 
295 #define DO_TYPEBV(NAME, SE, FN) \
296     static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
297     { return do_typeb_val(dc, a, SE, FN); }
298 
299 #define ENV_WRAPPER2(NAME, HELPER) \
300     static void NAME(TCGv_i32 out, TCGv_i32 ina) \
301     { HELPER(out, cpu_env, ina); }
302 
303 #define ENV_WRAPPER3(NAME, HELPER) \
304     static void NAME(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) \
305     { HELPER(out, cpu_env, ina, inb); }
306 
307 /* No input carry, but output carry. */
308 static void gen_add(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
309 {
310     TCGv_i32 zero = tcg_constant_i32(0);
311 
312     tcg_gen_add2_i32(out, cpu_msr_c, ina, zero, inb, zero);
313 }
314 
315 /* Input and output carry. */
316 static void gen_addc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
317 {
318     TCGv_i32 zero = tcg_constant_i32(0);
319     TCGv_i32 tmp = tcg_temp_new_i32();
320 
321     tcg_gen_add2_i32(tmp, cpu_msr_c, ina, zero, cpu_msr_c, zero);
322     tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
323 }
324 
325 /* Input carry, but no output carry. */
326 static void gen_addkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
327 {
328     tcg_gen_add_i32(out, ina, inb);
329     tcg_gen_add_i32(out, out, cpu_msr_c);
330 }
331 
332 DO_TYPEA(add, true, gen_add)
333 DO_TYPEA(addc, true, gen_addc)
334 DO_TYPEA(addk, false, tcg_gen_add_i32)
335 DO_TYPEA(addkc, true, gen_addkc)
336 
337 DO_TYPEBV(addi, true, gen_add)
338 DO_TYPEBV(addic, true, gen_addc)
339 DO_TYPEBI(addik, false, tcg_gen_addi_i32)
340 DO_TYPEBV(addikc, true, gen_addkc)
341 
342 static void gen_andni(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
343 {
344     tcg_gen_andi_i32(out, ina, ~imm);
345 }
346 
347 DO_TYPEA(and, false, tcg_gen_and_i32)
348 DO_TYPEBI(andi, false, tcg_gen_andi_i32)
349 DO_TYPEA(andn, false, tcg_gen_andc_i32)
350 DO_TYPEBI(andni, false, gen_andni)
351 
352 static void gen_bsra(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
353 {
354     TCGv_i32 tmp = tcg_temp_new_i32();
355     tcg_gen_andi_i32(tmp, inb, 31);
356     tcg_gen_sar_i32(out, ina, tmp);
357 }
358 
359 static void gen_bsrl(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
360 {
361     TCGv_i32 tmp = tcg_temp_new_i32();
362     tcg_gen_andi_i32(tmp, inb, 31);
363     tcg_gen_shr_i32(out, ina, tmp);
364 }
365 
366 static void gen_bsll(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
367 {
368     TCGv_i32 tmp = tcg_temp_new_i32();
369     tcg_gen_andi_i32(tmp, inb, 31);
370     tcg_gen_shl_i32(out, ina, tmp);
371 }
372 
373 static void gen_bsefi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
374 {
375     /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
376     int imm_w = extract32(imm, 5, 5);
377     int imm_s = extract32(imm, 0, 5);
378 
379     if (imm_w + imm_s > 32 || imm_w == 0) {
380         /* These inputs have an undefined behavior.  */
381         qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
382                       imm_w, imm_s);
383     } else {
384         tcg_gen_extract_i32(out, ina, imm_s, imm_w);
385     }
386 }
387 
388 static void gen_bsifi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
389 {
390     /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
391     int imm_w = extract32(imm, 5, 5);
392     int imm_s = extract32(imm, 0, 5);
393     int width = imm_w - imm_s + 1;
394 
395     if (imm_w < imm_s) {
396         /* These inputs have an undefined behavior.  */
397         qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
398                       imm_w, imm_s);
399     } else {
400         tcg_gen_deposit_i32(out, out, ina, imm_s, width);
401     }
402 }
403 
404 DO_TYPEA_CFG(bsra, use_barrel, false, gen_bsra)
405 DO_TYPEA_CFG(bsrl, use_barrel, false, gen_bsrl)
406 DO_TYPEA_CFG(bsll, use_barrel, false, gen_bsll)
407 
408 DO_TYPEBI_CFG(bsrai, use_barrel, false, tcg_gen_sari_i32)
409 DO_TYPEBI_CFG(bsrli, use_barrel, false, tcg_gen_shri_i32)
410 DO_TYPEBI_CFG(bslli, use_barrel, false, tcg_gen_shli_i32)
411 
412 DO_TYPEBI_CFG(bsefi, use_barrel, false, gen_bsefi)
413 DO_TYPEBI_CFG(bsifi, use_barrel, false, gen_bsifi)
414 
415 static void gen_clz(TCGv_i32 out, TCGv_i32 ina)
416 {
417     tcg_gen_clzi_i32(out, ina, 32);
418 }
419 
420 DO_TYPEA0_CFG(clz, use_pcmp_instr, false, gen_clz)
421 
422 static void gen_cmp(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
423 {
424     TCGv_i32 lt = tcg_temp_new_i32();
425 
426     tcg_gen_setcond_i32(TCG_COND_LT, lt, inb, ina);
427     tcg_gen_sub_i32(out, inb, ina);
428     tcg_gen_deposit_i32(out, out, lt, 31, 1);
429 }
430 
431 static void gen_cmpu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
432 {
433     TCGv_i32 lt = tcg_temp_new_i32();
434 
435     tcg_gen_setcond_i32(TCG_COND_LTU, lt, inb, ina);
436     tcg_gen_sub_i32(out, inb, ina);
437     tcg_gen_deposit_i32(out, out, lt, 31, 1);
438 }
439 
440 DO_TYPEA(cmp, false, gen_cmp)
441 DO_TYPEA(cmpu, false, gen_cmpu)
442 
443 ENV_WRAPPER3(gen_fadd, gen_helper_fadd)
444 ENV_WRAPPER3(gen_frsub, gen_helper_frsub)
445 ENV_WRAPPER3(gen_fmul, gen_helper_fmul)
446 ENV_WRAPPER3(gen_fdiv, gen_helper_fdiv)
447 ENV_WRAPPER3(gen_fcmp_un, gen_helper_fcmp_un)
448 ENV_WRAPPER3(gen_fcmp_lt, gen_helper_fcmp_lt)
449 ENV_WRAPPER3(gen_fcmp_eq, gen_helper_fcmp_eq)
450 ENV_WRAPPER3(gen_fcmp_le, gen_helper_fcmp_le)
451 ENV_WRAPPER3(gen_fcmp_gt, gen_helper_fcmp_gt)
452 ENV_WRAPPER3(gen_fcmp_ne, gen_helper_fcmp_ne)
453 ENV_WRAPPER3(gen_fcmp_ge, gen_helper_fcmp_ge)
454 
455 DO_TYPEA_CFG(fadd, use_fpu, true, gen_fadd)
456 DO_TYPEA_CFG(frsub, use_fpu, true, gen_frsub)
457 DO_TYPEA_CFG(fmul, use_fpu, true, gen_fmul)
458 DO_TYPEA_CFG(fdiv, use_fpu, true, gen_fdiv)
459 DO_TYPEA_CFG(fcmp_un, use_fpu, true, gen_fcmp_un)
460 DO_TYPEA_CFG(fcmp_lt, use_fpu, true, gen_fcmp_lt)
461 DO_TYPEA_CFG(fcmp_eq, use_fpu, true, gen_fcmp_eq)
462 DO_TYPEA_CFG(fcmp_le, use_fpu, true, gen_fcmp_le)
463 DO_TYPEA_CFG(fcmp_gt, use_fpu, true, gen_fcmp_gt)
464 DO_TYPEA_CFG(fcmp_ne, use_fpu, true, gen_fcmp_ne)
465 DO_TYPEA_CFG(fcmp_ge, use_fpu, true, gen_fcmp_ge)
466 
467 ENV_WRAPPER2(gen_flt, gen_helper_flt)
468 ENV_WRAPPER2(gen_fint, gen_helper_fint)
469 ENV_WRAPPER2(gen_fsqrt, gen_helper_fsqrt)
470 
471 DO_TYPEA0_CFG(flt, use_fpu >= 2, true, gen_flt)
472 DO_TYPEA0_CFG(fint, use_fpu >= 2, true, gen_fint)
473 DO_TYPEA0_CFG(fsqrt, use_fpu >= 2, true, gen_fsqrt)
474 
475 /* Does not use ENV_WRAPPER3, because arguments are swapped as well. */
476 static void gen_idiv(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
477 {
478     gen_helper_divs(out, cpu_env, inb, ina);
479 }
480 
481 static void gen_idivu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
482 {
483     gen_helper_divu(out, cpu_env, inb, ina);
484 }
485 
486 DO_TYPEA_CFG(idiv, use_div, true, gen_idiv)
487 DO_TYPEA_CFG(idivu, use_div, true, gen_idivu)
488 
489 static bool trans_imm(DisasContext *dc, arg_imm *arg)
490 {
491     if (invalid_delay_slot(dc, "imm")) {
492         return true;
493     }
494     dc->ext_imm = arg->imm << 16;
495     tcg_gen_movi_i32(cpu_imm, dc->ext_imm);
496     dc->tb_flags_to_set = IMM_FLAG;
497     return true;
498 }
499 
500 static void gen_mulh(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
501 {
502     TCGv_i32 tmp = tcg_temp_new_i32();
503     tcg_gen_muls2_i32(tmp, out, ina, inb);
504 }
505 
506 static void gen_mulhu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
507 {
508     TCGv_i32 tmp = tcg_temp_new_i32();
509     tcg_gen_mulu2_i32(tmp, out, ina, inb);
510 }
511 
512 static void gen_mulhsu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
513 {
514     TCGv_i32 tmp = tcg_temp_new_i32();
515     tcg_gen_mulsu2_i32(tmp, out, ina, inb);
516 }
517 
518 DO_TYPEA_CFG(mul, use_hw_mul, false, tcg_gen_mul_i32)
519 DO_TYPEA_CFG(mulh, use_hw_mul >= 2, false, gen_mulh)
520 DO_TYPEA_CFG(mulhu, use_hw_mul >= 2, false, gen_mulhu)
521 DO_TYPEA_CFG(mulhsu, use_hw_mul >= 2, false, gen_mulhsu)
522 DO_TYPEBI_CFG(muli, use_hw_mul, false, tcg_gen_muli_i32)
523 
524 DO_TYPEA(or, false, tcg_gen_or_i32)
525 DO_TYPEBI(ori, false, tcg_gen_ori_i32)
526 
527 static void gen_pcmpeq(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
528 {
529     tcg_gen_setcond_i32(TCG_COND_EQ, out, ina, inb);
530 }
531 
532 static void gen_pcmpne(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
533 {
534     tcg_gen_setcond_i32(TCG_COND_NE, out, ina, inb);
535 }
536 
537 DO_TYPEA_CFG(pcmpbf, use_pcmp_instr, false, gen_helper_pcmpbf)
538 DO_TYPEA_CFG(pcmpeq, use_pcmp_instr, false, gen_pcmpeq)
539 DO_TYPEA_CFG(pcmpne, use_pcmp_instr, false, gen_pcmpne)
540 
541 /* No input carry, but output carry. */
542 static void gen_rsub(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
543 {
544     tcg_gen_setcond_i32(TCG_COND_GEU, cpu_msr_c, inb, ina);
545     tcg_gen_sub_i32(out, inb, ina);
546 }
547 
548 /* Input and output carry. */
549 static void gen_rsubc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
550 {
551     TCGv_i32 zero = tcg_constant_i32(0);
552     TCGv_i32 tmp = tcg_temp_new_i32();
553 
554     tcg_gen_not_i32(tmp, ina);
555     tcg_gen_add2_i32(tmp, cpu_msr_c, tmp, zero, cpu_msr_c, zero);
556     tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
557 }
558 
559 /* No input or output carry. */
560 static void gen_rsubk(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
561 {
562     tcg_gen_sub_i32(out, inb, ina);
563 }
564 
565 /* Input carry, no output carry. */
566 static void gen_rsubkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
567 {
568     TCGv_i32 nota = tcg_temp_new_i32();
569 
570     tcg_gen_not_i32(nota, ina);
571     tcg_gen_add_i32(out, inb, nota);
572     tcg_gen_add_i32(out, out, cpu_msr_c);
573 }
574 
575 DO_TYPEA(rsub, true, gen_rsub)
576 DO_TYPEA(rsubc, true, gen_rsubc)
577 DO_TYPEA(rsubk, false, gen_rsubk)
578 DO_TYPEA(rsubkc, true, gen_rsubkc)
579 
580 DO_TYPEBV(rsubi, true, gen_rsub)
581 DO_TYPEBV(rsubic, true, gen_rsubc)
582 DO_TYPEBV(rsubik, false, gen_rsubk)
583 DO_TYPEBV(rsubikc, true, gen_rsubkc)
584 
585 DO_TYPEA0(sext8, false, tcg_gen_ext8s_i32)
586 DO_TYPEA0(sext16, false, tcg_gen_ext16s_i32)
587 
588 static void gen_sra(TCGv_i32 out, TCGv_i32 ina)
589 {
590     tcg_gen_andi_i32(cpu_msr_c, ina, 1);
591     tcg_gen_sari_i32(out, ina, 1);
592 }
593 
594 static void gen_src(TCGv_i32 out, TCGv_i32 ina)
595 {
596     TCGv_i32 tmp = tcg_temp_new_i32();
597 
598     tcg_gen_mov_i32(tmp, cpu_msr_c);
599     tcg_gen_andi_i32(cpu_msr_c, ina, 1);
600     tcg_gen_extract2_i32(out, ina, tmp, 1);
601 }
602 
603 static void gen_srl(TCGv_i32 out, TCGv_i32 ina)
604 {
605     tcg_gen_andi_i32(cpu_msr_c, ina, 1);
606     tcg_gen_shri_i32(out, ina, 1);
607 }
608 
609 DO_TYPEA0(sra, false, gen_sra)
610 DO_TYPEA0(src, false, gen_src)
611 DO_TYPEA0(srl, false, gen_srl)
612 
613 static void gen_swaph(TCGv_i32 out, TCGv_i32 ina)
614 {
615     tcg_gen_rotri_i32(out, ina, 16);
616 }
617 
618 DO_TYPEA0(swapb, false, tcg_gen_bswap32_i32)
619 DO_TYPEA0(swaph, false, gen_swaph)
620 
621 static bool trans_wdic(DisasContext *dc, arg_wdic *a)
622 {
623     /* Cache operations are nops: only check for supervisor mode.  */
624     trap_userspace(dc, true);
625     return true;
626 }
627 
628 DO_TYPEA(xor, false, tcg_gen_xor_i32)
629 DO_TYPEBI(xori, false, tcg_gen_xori_i32)
630 
631 static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
632 {
633     TCGv ret = tcg_temp_new();
634 
635     /* If any of the regs is r0, set t to the value of the other reg.  */
636     if (ra && rb) {
637         TCGv_i32 tmp = tcg_temp_new_i32();
638         tcg_gen_add_i32(tmp, cpu_R[ra], cpu_R[rb]);
639         tcg_gen_extu_i32_tl(ret, tmp);
640     } else if (ra) {
641         tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
642     } else if (rb) {
643         tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
644     } else {
645         tcg_gen_movi_tl(ret, 0);
646     }
647 
648     if ((ra == 1 || rb == 1) && dc->cfg->stackprot) {
649         gen_helper_stackprot(cpu_env, ret);
650     }
651     return ret;
652 }
653 
654 static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
655 {
656     TCGv ret = tcg_temp_new();
657 
658     /* If any of the regs is r0, set t to the value of the other reg.  */
659     if (ra) {
660         TCGv_i32 tmp = tcg_temp_new_i32();
661         tcg_gen_addi_i32(tmp, cpu_R[ra], imm);
662         tcg_gen_extu_i32_tl(ret, tmp);
663     } else {
664         tcg_gen_movi_tl(ret, (uint32_t)imm);
665     }
666 
667     if (ra == 1 && dc->cfg->stackprot) {
668         gen_helper_stackprot(cpu_env, ret);
669     }
670     return ret;
671 }
672 
673 #ifndef CONFIG_USER_ONLY
674 static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
675 {
676     int addr_size = dc->cfg->addr_size;
677     TCGv ret = tcg_temp_new();
678 
679     if (addr_size == 32 || ra == 0) {
680         if (rb) {
681             tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
682         } else {
683             tcg_gen_movi_tl(ret, 0);
684         }
685     } else {
686         if (rb) {
687             tcg_gen_concat_i32_i64(ret, cpu_R[rb], cpu_R[ra]);
688         } else {
689             tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
690             tcg_gen_shli_tl(ret, ret, 32);
691         }
692         if (addr_size < 64) {
693             /* Mask off out of range bits.  */
694             tcg_gen_andi_i64(ret, ret, MAKE_64BIT_MASK(0, addr_size));
695         }
696     }
697     return ret;
698 }
699 #endif
700 
701 #ifndef CONFIG_USER_ONLY
702 static void record_unaligned_ess(DisasContext *dc, int rd,
703                                  MemOp size, bool store)
704 {
705     uint32_t iflags = tcg_get_insn_start_param(dc->insn_start, 1);
706 
707     iflags |= ESR_ESS_FLAG;
708     iflags |= rd << 5;
709     iflags |= store * ESR_S;
710     iflags |= (size == MO_32) * ESR_W;
711 
712     tcg_set_insn_start_param(dc->insn_start, 1, iflags);
713 }
714 #endif
715 
716 static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
717                     int mem_index, bool rev)
718 {
719     MemOp size = mop & MO_SIZE;
720 
721     /*
722      * When doing reverse accesses we need to do two things.
723      *
724      * 1. Reverse the address wrt endianness.
725      * 2. Byteswap the data lanes on the way back into the CPU core.
726      */
727     if (rev) {
728         if (size > MO_8) {
729             mop ^= MO_BSWAP;
730         }
731         if (size < MO_32) {
732             tcg_gen_xori_tl(addr, addr, 3 - size);
733         }
734     }
735 
736     /*
737      * For system mode, enforce alignment if the cpu configuration
738      * requires it.  For user-mode, the Linux kernel will have fixed up
739      * any unaligned access, so emulate that by *not* setting MO_ALIGN.
740      */
741 #ifndef CONFIG_USER_ONLY
742     if (size > MO_8 &&
743         (dc->tb_flags & MSR_EE) &&
744         dc->cfg->unaligned_exceptions) {
745         record_unaligned_ess(dc, rd, size, false);
746         mop |= MO_ALIGN;
747     }
748 #endif
749 
750     tcg_gen_qemu_ld_i32(reg_for_write(dc, rd), addr, mem_index, mop);
751     return true;
752 }
753 
754 static bool trans_lbu(DisasContext *dc, arg_typea *arg)
755 {
756     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
757     return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
758 }
759 
760 static bool trans_lbur(DisasContext *dc, arg_typea *arg)
761 {
762     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
763     return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
764 }
765 
766 static bool trans_lbuea(DisasContext *dc, arg_typea *arg)
767 {
768     if (trap_userspace(dc, true)) {
769         return true;
770     }
771 #ifdef CONFIG_USER_ONLY
772     return true;
773 #else
774     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
775     return do_load(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
776 #endif
777 }
778 
779 static bool trans_lbui(DisasContext *dc, arg_typeb *arg)
780 {
781     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
782     return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
783 }
784 
785 static bool trans_lhu(DisasContext *dc, arg_typea *arg)
786 {
787     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
788     return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
789 }
790 
791 static bool trans_lhur(DisasContext *dc, arg_typea *arg)
792 {
793     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
794     return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
795 }
796 
797 static bool trans_lhuea(DisasContext *dc, arg_typea *arg)
798 {
799     if (trap_userspace(dc, true)) {
800         return true;
801     }
802 #ifdef CONFIG_USER_ONLY
803     return true;
804 #else
805     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
806     return do_load(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
807 #endif
808 }
809 
810 static bool trans_lhui(DisasContext *dc, arg_typeb *arg)
811 {
812     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
813     return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
814 }
815 
816 static bool trans_lw(DisasContext *dc, arg_typea *arg)
817 {
818     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
819     return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
820 }
821 
822 static bool trans_lwr(DisasContext *dc, arg_typea *arg)
823 {
824     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
825     return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
826 }
827 
828 static bool trans_lwea(DisasContext *dc, arg_typea *arg)
829 {
830     if (trap_userspace(dc, true)) {
831         return true;
832     }
833 #ifdef CONFIG_USER_ONLY
834     return true;
835 #else
836     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
837     return do_load(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
838 #endif
839 }
840 
841 static bool trans_lwi(DisasContext *dc, arg_typeb *arg)
842 {
843     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
844     return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
845 }
846 
847 static bool trans_lwx(DisasContext *dc, arg_typea *arg)
848 {
849     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
850 
851     /* lwx does not throw unaligned access errors, so force alignment */
852     tcg_gen_andi_tl(addr, addr, ~3);
853 
854     tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index, MO_TEUL);
855     tcg_gen_mov_tl(cpu_res_addr, addr);
856 
857     if (arg->rd) {
858         tcg_gen_mov_i32(cpu_R[arg->rd], cpu_res_val);
859     }
860 
861     /* No support for AXI exclusive so always clear C */
862     tcg_gen_movi_i32(cpu_msr_c, 0);
863     return true;
864 }
865 
866 static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop,
867                      int mem_index, bool rev)
868 {
869     MemOp size = mop & MO_SIZE;
870 
871     /*
872      * When doing reverse accesses we need to do two things.
873      *
874      * 1. Reverse the address wrt endianness.
875      * 2. Byteswap the data lanes on the way back into the CPU core.
876      */
877     if (rev) {
878         if (size > MO_8) {
879             mop ^= MO_BSWAP;
880         }
881         if (size < MO_32) {
882             tcg_gen_xori_tl(addr, addr, 3 - size);
883         }
884     }
885 
886     /*
887      * For system mode, enforce alignment if the cpu configuration
888      * requires it.  For user-mode, the Linux kernel will have fixed up
889      * any unaligned access, so emulate that by *not* setting MO_ALIGN.
890      */
891 #ifndef CONFIG_USER_ONLY
892     if (size > MO_8 &&
893         (dc->tb_flags & MSR_EE) &&
894         dc->cfg->unaligned_exceptions) {
895         record_unaligned_ess(dc, rd, size, true);
896         mop |= MO_ALIGN;
897     }
898 #endif
899 
900     tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop);
901     return true;
902 }
903 
904 static bool trans_sb(DisasContext *dc, arg_typea *arg)
905 {
906     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
907     return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
908 }
909 
910 static bool trans_sbr(DisasContext *dc, arg_typea *arg)
911 {
912     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
913     return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
914 }
915 
916 static bool trans_sbea(DisasContext *dc, arg_typea *arg)
917 {
918     if (trap_userspace(dc, true)) {
919         return true;
920     }
921 #ifdef CONFIG_USER_ONLY
922     return true;
923 #else
924     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
925     return do_store(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
926 #endif
927 }
928 
929 static bool trans_sbi(DisasContext *dc, arg_typeb *arg)
930 {
931     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
932     return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
933 }
934 
935 static bool trans_sh(DisasContext *dc, arg_typea *arg)
936 {
937     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
938     return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
939 }
940 
941 static bool trans_shr(DisasContext *dc, arg_typea *arg)
942 {
943     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
944     return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
945 }
946 
947 static bool trans_shea(DisasContext *dc, arg_typea *arg)
948 {
949     if (trap_userspace(dc, true)) {
950         return true;
951     }
952 #ifdef CONFIG_USER_ONLY
953     return true;
954 #else
955     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
956     return do_store(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
957 #endif
958 }
959 
960 static bool trans_shi(DisasContext *dc, arg_typeb *arg)
961 {
962     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
963     return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
964 }
965 
966 static bool trans_sw(DisasContext *dc, arg_typea *arg)
967 {
968     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
969     return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
970 }
971 
972 static bool trans_swr(DisasContext *dc, arg_typea *arg)
973 {
974     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
975     return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
976 }
977 
978 static bool trans_swea(DisasContext *dc, arg_typea *arg)
979 {
980     if (trap_userspace(dc, true)) {
981         return true;
982     }
983 #ifdef CONFIG_USER_ONLY
984     return true;
985 #else
986     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
987     return do_store(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
988 #endif
989 }
990 
991 static bool trans_swi(DisasContext *dc, arg_typeb *arg)
992 {
993     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
994     return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
995 }
996 
997 static bool trans_swx(DisasContext *dc, arg_typea *arg)
998 {
999     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
1000     TCGLabel *swx_done = gen_new_label();
1001     TCGLabel *swx_fail = gen_new_label();
1002     TCGv_i32 tval;
1003 
1004     /* swx does not throw unaligned access errors, so force alignment */
1005     tcg_gen_andi_tl(addr, addr, ~3);
1006 
1007     /*
1008      * Compare the address vs the one we used during lwx.
1009      * On mismatch, the operation fails.  On match, addr dies at the
1010      * branch, but we know we can use the equal version in the global.
1011      * In either case, addr is no longer needed.
1012      */
1013     tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_fail);
1014 
1015     /*
1016      * Compare the value loaded during lwx with current contents of
1017      * the reserved location.
1018      */
1019     tval = tcg_temp_new_i32();
1020 
1021     tcg_gen_atomic_cmpxchg_i32(tval, cpu_res_addr, cpu_res_val,
1022                                reg_for_write(dc, arg->rd),
1023                                dc->mem_index, MO_TEUL);
1024 
1025     tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_fail);
1026 
1027     /* Success */
1028     tcg_gen_movi_i32(cpu_msr_c, 0);
1029     tcg_gen_br(swx_done);
1030 
1031     /* Failure */
1032     gen_set_label(swx_fail);
1033     tcg_gen_movi_i32(cpu_msr_c, 1);
1034 
1035     gen_set_label(swx_done);
1036 
1037     /*
1038      * Prevent the saved address from working again without another ldx.
1039      * Akin to the pseudocode setting reservation = 0.
1040      */
1041     tcg_gen_movi_tl(cpu_res_addr, -1);
1042     return true;
1043 }
1044 
1045 static void setup_dslot(DisasContext *dc, bool type_b)
1046 {
1047     dc->tb_flags_to_set |= D_FLAG;
1048     if (type_b && (dc->tb_flags & IMM_FLAG)) {
1049         dc->tb_flags_to_set |= BIMM_FLAG;
1050     }
1051 }
1052 
1053 static bool do_branch(DisasContext *dc, int dest_rb, int dest_imm,
1054                       bool delay, bool abs, int link)
1055 {
1056     uint32_t add_pc;
1057 
1058     if (invalid_delay_slot(dc, "branch")) {
1059         return true;
1060     }
1061     if (delay) {
1062         setup_dslot(dc, dest_rb < 0);
1063     }
1064 
1065     if (link) {
1066         tcg_gen_movi_i32(cpu_R[link], dc->base.pc_next);
1067     }
1068 
1069     /* Store the branch taken destination into btarget.  */
1070     add_pc = abs ? 0 : dc->base.pc_next;
1071     if (dest_rb > 0) {
1072         dc->jmp_dest = -1;
1073         tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], add_pc);
1074     } else {
1075         dc->jmp_dest = add_pc + dest_imm;
1076         tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1077     }
1078     dc->jmp_cond = TCG_COND_ALWAYS;
1079     return true;
1080 }
1081 
1082 #define DO_BR(NAME, NAMEI, DELAY, ABS, LINK)                               \
1083     static bool trans_##NAME(DisasContext *dc, arg_typea_br *arg)          \
1084     { return do_branch(dc, arg->rb, 0, DELAY, ABS, LINK ? arg->rd : 0); }  \
1085     static bool trans_##NAMEI(DisasContext *dc, arg_typeb_br *arg)         \
1086     { return do_branch(dc, -1, arg->imm, DELAY, ABS, LINK ? arg->rd : 0); }
1087 
1088 DO_BR(br, bri, false, false, false)
1089 DO_BR(bra, brai, false, true, false)
1090 DO_BR(brd, brid, true, false, false)
1091 DO_BR(brad, braid, true, true, false)
1092 DO_BR(brld, brlid, true, false, true)
1093 DO_BR(brald, bralid, true, true, true)
1094 
1095 static bool do_bcc(DisasContext *dc, int dest_rb, int dest_imm,
1096                    TCGCond cond, int ra, bool delay)
1097 {
1098     TCGv_i32 zero, next;
1099 
1100     if (invalid_delay_slot(dc, "bcc")) {
1101         return true;
1102     }
1103     if (delay) {
1104         setup_dslot(dc, dest_rb < 0);
1105     }
1106 
1107     dc->jmp_cond = cond;
1108 
1109     /* Cache the condition register in cpu_bvalue across any delay slot.  */
1110     tcg_gen_mov_i32(cpu_bvalue, reg_for_read(dc, ra));
1111 
1112     /* Store the branch taken destination into btarget.  */
1113     if (dest_rb > 0) {
1114         dc->jmp_dest = -1;
1115         tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], dc->base.pc_next);
1116     } else {
1117         dc->jmp_dest = dc->base.pc_next + dest_imm;
1118         tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1119     }
1120 
1121     /* Compute the final destination into btarget.  */
1122     zero = tcg_constant_i32(0);
1123     next = tcg_constant_i32(dc->base.pc_next + (delay + 1) * 4);
1124     tcg_gen_movcond_i32(dc->jmp_cond, cpu_btarget,
1125                         reg_for_read(dc, ra), zero,
1126                         cpu_btarget, next);
1127 
1128     return true;
1129 }
1130 
1131 #define DO_BCC(NAME, COND)                                              \
1132     static bool trans_##NAME(DisasContext *dc, arg_typea_bc *arg)       \
1133     { return do_bcc(dc, arg->rb, 0, COND, arg->ra, false); }            \
1134     static bool trans_##NAME##d(DisasContext *dc, arg_typea_bc *arg)    \
1135     { return do_bcc(dc, arg->rb, 0, COND, arg->ra, true); }             \
1136     static bool trans_##NAME##i(DisasContext *dc, arg_typeb_bc *arg)    \
1137     { return do_bcc(dc, -1, arg->imm, COND, arg->ra, false); }          \
1138     static bool trans_##NAME##id(DisasContext *dc, arg_typeb_bc *arg)   \
1139     { return do_bcc(dc, -1, arg->imm, COND, arg->ra, true); }
1140 
1141 DO_BCC(beq, TCG_COND_EQ)
1142 DO_BCC(bge, TCG_COND_GE)
1143 DO_BCC(bgt, TCG_COND_GT)
1144 DO_BCC(ble, TCG_COND_LE)
1145 DO_BCC(blt, TCG_COND_LT)
1146 DO_BCC(bne, TCG_COND_NE)
1147 
1148 static bool trans_brk(DisasContext *dc, arg_typea_br *arg)
1149 {
1150     if (trap_userspace(dc, true)) {
1151         return true;
1152     }
1153     if (invalid_delay_slot(dc, "brk")) {
1154         return true;
1155     }
1156 
1157     tcg_gen_mov_i32(cpu_pc, reg_for_read(dc, arg->rb));
1158     if (arg->rd) {
1159         tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1160     }
1161     tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_BIP);
1162     tcg_gen_movi_tl(cpu_res_addr, -1);
1163 
1164     dc->base.is_jmp = DISAS_EXIT;
1165     return true;
1166 }
1167 
1168 static bool trans_brki(DisasContext *dc, arg_typeb_br *arg)
1169 {
1170     uint32_t imm = arg->imm;
1171 
1172     if (trap_userspace(dc, imm != 0x8 && imm != 0x18)) {
1173         return true;
1174     }
1175     if (invalid_delay_slot(dc, "brki")) {
1176         return true;
1177     }
1178 
1179     tcg_gen_movi_i32(cpu_pc, imm);
1180     if (arg->rd) {
1181         tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1182     }
1183     tcg_gen_movi_tl(cpu_res_addr, -1);
1184 
1185 #ifdef CONFIG_USER_ONLY
1186     switch (imm) {
1187     case 0x8:  /* syscall trap */
1188         gen_raise_exception_sync(dc, EXCP_SYSCALL);
1189         break;
1190     case 0x18: /* debug trap */
1191         gen_raise_exception_sync(dc, EXCP_DEBUG);
1192         break;
1193     default:   /* eliminated with trap_userspace check */
1194         g_assert_not_reached();
1195     }
1196 #else
1197     uint32_t msr_to_set = 0;
1198 
1199     if (imm != 0x18) {
1200         msr_to_set |= MSR_BIP;
1201     }
1202     if (imm == 0x8 || imm == 0x18) {
1203         /* MSR_UM and MSR_VM are in tb_flags, so we know their value. */
1204         msr_to_set |= (dc->tb_flags & (MSR_UM | MSR_VM)) << 1;
1205         tcg_gen_andi_i32(cpu_msr, cpu_msr,
1206                          ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM));
1207     }
1208     tcg_gen_ori_i32(cpu_msr, cpu_msr, msr_to_set);
1209     dc->base.is_jmp = DISAS_EXIT;
1210 #endif
1211 
1212     return true;
1213 }
1214 
1215 static bool trans_mbar(DisasContext *dc, arg_mbar *arg)
1216 {
1217     int mbar_imm = arg->imm;
1218 
1219     /* Note that mbar is a specialized branch instruction. */
1220     if (invalid_delay_slot(dc, "mbar")) {
1221         return true;
1222     }
1223 
1224     /* Data access memory barrier.  */
1225     if ((mbar_imm & 2) == 0) {
1226         tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1227     }
1228 
1229     /* Sleep. */
1230     if (mbar_imm & 16) {
1231         if (trap_userspace(dc, true)) {
1232             /* Sleep is a privileged instruction.  */
1233             return true;
1234         }
1235 
1236         t_sync_flags(dc);
1237 
1238         tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
1239                        -offsetof(MicroBlazeCPU, env)
1240                        +offsetof(CPUState, halted));
1241 
1242         tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
1243 
1244         gen_raise_exception(dc, EXCP_HLT);
1245     }
1246 
1247     /*
1248      * If !(mbar_imm & 1), this is an instruction access memory barrier
1249      * and we need to end the TB so that we recognize self-modified
1250      * code immediately.
1251      *
1252      * However, there are some data mbars that need the TB break
1253      * (and return to main loop) to recognize interrupts right away.
1254      * E.g. recognizing a change to an interrupt controller register.
1255      *
1256      * Therefore, choose to end the TB always.
1257      */
1258     dc->base.is_jmp = DISAS_EXIT_NEXT;
1259     return true;
1260 }
1261 
1262 static bool do_rts(DisasContext *dc, arg_typeb_bc *arg, int to_set)
1263 {
1264     if (trap_userspace(dc, to_set)) {
1265         return true;
1266     }
1267     if (invalid_delay_slot(dc, "rts")) {
1268         return true;
1269     }
1270 
1271     dc->tb_flags_to_set |= to_set;
1272     setup_dslot(dc, true);
1273 
1274     dc->jmp_cond = TCG_COND_ALWAYS;
1275     dc->jmp_dest = -1;
1276     tcg_gen_addi_i32(cpu_btarget, reg_for_read(dc, arg->ra), arg->imm);
1277     return true;
1278 }
1279 
1280 #define DO_RTS(NAME, IFLAG) \
1281     static bool trans_##NAME(DisasContext *dc, arg_typeb_bc *arg) \
1282     { return do_rts(dc, arg, IFLAG); }
1283 
1284 DO_RTS(rtbd, DRTB_FLAG)
1285 DO_RTS(rtid, DRTI_FLAG)
1286 DO_RTS(rted, DRTE_FLAG)
1287 DO_RTS(rtsd, 0)
1288 
1289 static bool trans_zero(DisasContext *dc, arg_zero *arg)
1290 {
1291     /* If opcode_0_illegal, trap.  */
1292     if (dc->cfg->opcode_0_illegal) {
1293         trap_illegal(dc, true);
1294         return true;
1295     }
1296     /*
1297      * Otherwise, this is "add r0, r0, r0".
1298      * Continue to trans_add so that MSR[C] gets cleared.
1299      */
1300     return false;
1301 }
1302 
1303 static void msr_read(DisasContext *dc, TCGv_i32 d)
1304 {
1305     TCGv_i32 t;
1306 
1307     /* Replicate the cpu_msr_c boolean into the proper bit and the copy. */
1308     t = tcg_temp_new_i32();
1309     tcg_gen_muli_i32(t, cpu_msr_c, MSR_C | MSR_CC);
1310     tcg_gen_or_i32(d, cpu_msr, t);
1311 }
1312 
1313 static bool do_msrclrset(DisasContext *dc, arg_type_msr *arg, bool set)
1314 {
1315     uint32_t imm = arg->imm;
1316 
1317     if (trap_userspace(dc, imm != MSR_C)) {
1318         return true;
1319     }
1320 
1321     if (arg->rd) {
1322         msr_read(dc, cpu_R[arg->rd]);
1323     }
1324 
1325     /*
1326      * Handle the carry bit separately.
1327      * This is the only bit that userspace can modify.
1328      */
1329     if (imm & MSR_C) {
1330         tcg_gen_movi_i32(cpu_msr_c, set);
1331     }
1332 
1333     /*
1334      * MSR_C and MSR_CC set above.
1335      * MSR_PVR is not writable, and is always clear.
1336      */
1337     imm &= ~(MSR_C | MSR_CC | MSR_PVR);
1338 
1339     if (imm != 0) {
1340         if (set) {
1341             tcg_gen_ori_i32(cpu_msr, cpu_msr, imm);
1342         } else {
1343             tcg_gen_andi_i32(cpu_msr, cpu_msr, ~imm);
1344         }
1345         dc->base.is_jmp = DISAS_EXIT_NEXT;
1346     }
1347     return true;
1348 }
1349 
1350 static bool trans_msrclr(DisasContext *dc, arg_type_msr *arg)
1351 {
1352     return do_msrclrset(dc, arg, false);
1353 }
1354 
1355 static bool trans_msrset(DisasContext *dc, arg_type_msr *arg)
1356 {
1357     return do_msrclrset(dc, arg, true);
1358 }
1359 
1360 static bool trans_mts(DisasContext *dc, arg_mts *arg)
1361 {
1362     if (trap_userspace(dc, true)) {
1363         return true;
1364     }
1365 
1366 #ifdef CONFIG_USER_ONLY
1367     g_assert_not_reached();
1368 #else
1369     if (arg->e && arg->rs != 0x1003) {
1370         qemu_log_mask(LOG_GUEST_ERROR,
1371                       "Invalid extended mts reg 0x%x\n", arg->rs);
1372         return true;
1373     }
1374 
1375     TCGv_i32 src = reg_for_read(dc, arg->ra);
1376     switch (arg->rs) {
1377     case SR_MSR:
1378         /* Install MSR_C.  */
1379         tcg_gen_extract_i32(cpu_msr_c, src, 2, 1);
1380         /*
1381          * Clear MSR_C and MSR_CC;
1382          * MSR_PVR is not writable, and is always clear.
1383          */
1384         tcg_gen_andi_i32(cpu_msr, src, ~(MSR_C | MSR_CC | MSR_PVR));
1385         break;
1386     case SR_FSR:
1387         tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, fsr));
1388         break;
1389     case 0x800:
1390         tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, slr));
1391         break;
1392     case 0x802:
1393         tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, shr));
1394         break;
1395 
1396     case 0x1000: /* PID */
1397     case 0x1001: /* ZPR */
1398     case 0x1002: /* TLBX */
1399     case 0x1003: /* TLBLO */
1400     case 0x1004: /* TLBHI */
1401     case 0x1005: /* TLBSX */
1402         {
1403             TCGv_i32 tmp_ext = tcg_constant_i32(arg->e);
1404             TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7);
1405 
1406             gen_helper_mmu_write(cpu_env, tmp_ext, tmp_reg, src);
1407         }
1408         break;
1409 
1410     default:
1411         qemu_log_mask(LOG_GUEST_ERROR, "Invalid mts reg 0x%x\n", arg->rs);
1412         return true;
1413     }
1414     dc->base.is_jmp = DISAS_EXIT_NEXT;
1415     return true;
1416 #endif
1417 }
1418 
1419 static bool trans_mfs(DisasContext *dc, arg_mfs *arg)
1420 {
1421     TCGv_i32 dest = reg_for_write(dc, arg->rd);
1422 
1423     if (arg->e) {
1424         switch (arg->rs) {
1425         case SR_EAR:
1426             {
1427                 TCGv_i64 t64 = tcg_temp_new_i64();
1428                 tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear));
1429                 tcg_gen_extrh_i64_i32(dest, t64);
1430             }
1431             return true;
1432 #ifndef CONFIG_USER_ONLY
1433         case 0x1003: /* TLBLO */
1434             /* Handled below. */
1435             break;
1436 #endif
1437         case 0x2006 ... 0x2009:
1438             /* High bits of PVR6-9 not implemented. */
1439             tcg_gen_movi_i32(dest, 0);
1440             return true;
1441         default:
1442             qemu_log_mask(LOG_GUEST_ERROR,
1443                           "Invalid extended mfs reg 0x%x\n", arg->rs);
1444             return true;
1445         }
1446     }
1447 
1448     switch (arg->rs) {
1449     case SR_PC:
1450         tcg_gen_movi_i32(dest, dc->base.pc_next);
1451         break;
1452     case SR_MSR:
1453         msr_read(dc, dest);
1454         break;
1455     case SR_EAR:
1456         {
1457             TCGv_i64 t64 = tcg_temp_new_i64();
1458             tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear));
1459             tcg_gen_extrl_i64_i32(dest, t64);
1460         }
1461         break;
1462     case SR_ESR:
1463         tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, esr));
1464         break;
1465     case SR_FSR:
1466         tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, fsr));
1467         break;
1468     case SR_BTR:
1469         tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, btr));
1470         break;
1471     case SR_EDR:
1472         tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, edr));
1473         break;
1474     case 0x800:
1475         tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, slr));
1476         break;
1477     case 0x802:
1478         tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, shr));
1479         break;
1480 
1481 #ifndef CONFIG_USER_ONLY
1482     case 0x1000: /* PID */
1483     case 0x1001: /* ZPR */
1484     case 0x1002: /* TLBX */
1485     case 0x1003: /* TLBLO */
1486     case 0x1004: /* TLBHI */
1487     case 0x1005: /* TLBSX */
1488         {
1489             TCGv_i32 tmp_ext = tcg_constant_i32(arg->e);
1490             TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7);
1491 
1492             gen_helper_mmu_read(dest, cpu_env, tmp_ext, tmp_reg);
1493         }
1494         break;
1495 #endif
1496 
1497     case 0x2000 ... 0x200c:
1498         tcg_gen_ld_i32(dest, cpu_env,
1499                        offsetof(MicroBlazeCPU, cfg.pvr_regs[arg->rs - 0x2000])
1500                        - offsetof(MicroBlazeCPU, env));
1501         break;
1502     default:
1503         qemu_log_mask(LOG_GUEST_ERROR, "Invalid mfs reg 0x%x\n", arg->rs);
1504         break;
1505     }
1506     return true;
1507 }
1508 
1509 static void do_rti(DisasContext *dc)
1510 {
1511     TCGv_i32 tmp = tcg_temp_new_i32();
1512 
1513     tcg_gen_shri_i32(tmp, cpu_msr, 1);
1514     tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_IE);
1515     tcg_gen_andi_i32(tmp, tmp, MSR_VM | MSR_UM);
1516     tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM));
1517     tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1518 }
1519 
1520 static void do_rtb(DisasContext *dc)
1521 {
1522     TCGv_i32 tmp = tcg_temp_new_i32();
1523 
1524     tcg_gen_shri_i32(tmp, cpu_msr, 1);
1525     tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_BIP));
1526     tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1527     tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1528 }
1529 
1530 static void do_rte(DisasContext *dc)
1531 {
1532     TCGv_i32 tmp = tcg_temp_new_i32();
1533 
1534     tcg_gen_shri_i32(tmp, cpu_msr, 1);
1535     tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_EE);
1536     tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1537     tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_EIP));
1538     tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1539 }
1540 
1541 /* Insns connected to FSL or AXI stream attached devices.  */
1542 static bool do_get(DisasContext *dc, int rd, int rb, int imm, int ctrl)
1543 {
1544     TCGv_i32 t_id, t_ctrl;
1545 
1546     if (trap_userspace(dc, true)) {
1547         return true;
1548     }
1549 
1550     t_id = tcg_temp_new_i32();
1551     if (rb) {
1552         tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
1553     } else {
1554         tcg_gen_movi_i32(t_id, imm);
1555     }
1556 
1557     t_ctrl = tcg_constant_i32(ctrl);
1558     gen_helper_get(reg_for_write(dc, rd), t_id, t_ctrl);
1559     return true;
1560 }
1561 
1562 static bool trans_get(DisasContext *dc, arg_get *arg)
1563 {
1564     return do_get(dc, arg->rd, 0, arg->imm, arg->ctrl);
1565 }
1566 
1567 static bool trans_getd(DisasContext *dc, arg_getd *arg)
1568 {
1569     return do_get(dc, arg->rd, arg->rb, 0, arg->ctrl);
1570 }
1571 
1572 static bool do_put(DisasContext *dc, int ra, int rb, int imm, int ctrl)
1573 {
1574     TCGv_i32 t_id, t_ctrl;
1575 
1576     if (trap_userspace(dc, true)) {
1577         return true;
1578     }
1579 
1580     t_id = tcg_temp_new_i32();
1581     if (rb) {
1582         tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
1583     } else {
1584         tcg_gen_movi_i32(t_id, imm);
1585     }
1586 
1587     t_ctrl = tcg_constant_i32(ctrl);
1588     gen_helper_put(t_id, t_ctrl, reg_for_read(dc, ra));
1589     return true;
1590 }
1591 
1592 static bool trans_put(DisasContext *dc, arg_put *arg)
1593 {
1594     return do_put(dc, arg->ra, 0, arg->imm, arg->ctrl);
1595 }
1596 
1597 static bool trans_putd(DisasContext *dc, arg_putd *arg)
1598 {
1599     return do_put(dc, arg->ra, arg->rb, 0, arg->ctrl);
1600 }
1601 
1602 static void mb_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
1603 {
1604     DisasContext *dc = container_of(dcb, DisasContext, base);
1605     MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1606     int bound;
1607 
1608     dc->cfg = &cpu->cfg;
1609     dc->tb_flags = dc->base.tb->flags;
1610     dc->ext_imm = dc->base.tb->cs_base;
1611     dc->r0 = NULL;
1612     dc->r0_set = false;
1613     dc->mem_index = cpu_mmu_index(&cpu->env, false);
1614     dc->jmp_cond = dc->tb_flags & D_FLAG ? TCG_COND_ALWAYS : TCG_COND_NEVER;
1615     dc->jmp_dest = -1;
1616 
1617     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
1618     dc->base.max_insns = MIN(dc->base.max_insns, bound);
1619 }
1620 
1621 static void mb_tr_tb_start(DisasContextBase *dcb, CPUState *cs)
1622 {
1623 }
1624 
1625 static void mb_tr_insn_start(DisasContextBase *dcb, CPUState *cs)
1626 {
1627     DisasContext *dc = container_of(dcb, DisasContext, base);
1628 
1629     tcg_gen_insn_start(dc->base.pc_next, dc->tb_flags & ~MSR_TB_MASK);
1630     dc->insn_start = tcg_last_op();
1631 }
1632 
1633 static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs)
1634 {
1635     DisasContext *dc = container_of(dcb, DisasContext, base);
1636     CPUMBState *env = cs->env_ptr;
1637     uint32_t ir;
1638 
1639     /* TODO: This should raise an exception, not terminate qemu. */
1640     if (dc->base.pc_next & 3) {
1641         cpu_abort(cs, "Microblaze: unaligned PC=%x\n",
1642                   (uint32_t)dc->base.pc_next);
1643     }
1644 
1645     dc->tb_flags_to_set = 0;
1646 
1647     ir = cpu_ldl_code(env, dc->base.pc_next);
1648     if (!decode(dc, ir)) {
1649         trap_illegal(dc, true);
1650     }
1651 
1652     if (dc->r0) {
1653         dc->r0 = NULL;
1654         dc->r0_set = false;
1655     }
1656 
1657     /* Discard the imm global when its contents cannot be used. */
1658     if ((dc->tb_flags & ~dc->tb_flags_to_set) & IMM_FLAG) {
1659         tcg_gen_discard_i32(cpu_imm);
1660     }
1661 
1662     dc->tb_flags &= ~(IMM_FLAG | BIMM_FLAG | D_FLAG);
1663     dc->tb_flags |= dc->tb_flags_to_set;
1664     dc->base.pc_next += 4;
1665 
1666     if (dc->jmp_cond != TCG_COND_NEVER && !(dc->tb_flags & D_FLAG)) {
1667         /*
1668          * Finish any return-from branch.
1669          */
1670         uint32_t rt_ibe = dc->tb_flags & (DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1671         if (unlikely(rt_ibe != 0)) {
1672             dc->tb_flags &= ~(DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1673             if (rt_ibe & DRTI_FLAG) {
1674                 do_rti(dc);
1675             } else if (rt_ibe & DRTB_FLAG) {
1676                 do_rtb(dc);
1677             } else {
1678                 do_rte(dc);
1679             }
1680         }
1681 
1682         /* Complete the branch, ending the TB. */
1683         switch (dc->base.is_jmp) {
1684         case DISAS_NORETURN:
1685             /*
1686              * E.g. illegal insn in a delay slot.  We've already exited
1687              * and will handle D_FLAG in mb_cpu_do_interrupt.
1688              */
1689             break;
1690         case DISAS_NEXT:
1691             /*
1692              * Normal insn a delay slot.
1693              * However, the return-from-exception type insns should
1694              * return to the main loop, as they have adjusted MSR.
1695              */
1696             dc->base.is_jmp = (rt_ibe ? DISAS_EXIT_JUMP : DISAS_JUMP);
1697             break;
1698         case DISAS_EXIT_NEXT:
1699             /*
1700              * E.g. mts insn in a delay slot.  Continue with btarget,
1701              * but still return to the main loop.
1702              */
1703             dc->base.is_jmp = DISAS_EXIT_JUMP;
1704             break;
1705         default:
1706             g_assert_not_reached();
1707         }
1708     }
1709 }
1710 
1711 static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
1712 {
1713     DisasContext *dc = container_of(dcb, DisasContext, base);
1714 
1715     if (dc->base.is_jmp == DISAS_NORETURN) {
1716         /* We have already exited the TB. */
1717         return;
1718     }
1719 
1720     t_sync_flags(dc);
1721 
1722     switch (dc->base.is_jmp) {
1723     case DISAS_TOO_MANY:
1724         gen_goto_tb(dc, 0, dc->base.pc_next);
1725         return;
1726 
1727     case DISAS_EXIT:
1728         break;
1729     case DISAS_EXIT_NEXT:
1730         tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
1731         break;
1732     case DISAS_EXIT_JUMP:
1733         tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1734         tcg_gen_discard_i32(cpu_btarget);
1735         break;
1736 
1737     case DISAS_JUMP:
1738         if (dc->jmp_dest != -1 && !(tb_cflags(dc->base.tb) & CF_NO_GOTO_TB)) {
1739             /* Direct jump. */
1740             tcg_gen_discard_i32(cpu_btarget);
1741 
1742             if (dc->jmp_cond != TCG_COND_ALWAYS) {
1743                 /* Conditional direct jump. */
1744                 TCGLabel *taken = gen_new_label();
1745                 TCGv_i32 tmp = tcg_temp_new_i32();
1746 
1747                 /*
1748                  * Copy bvalue to a temp now, so we can discard bvalue.
1749                  * This can avoid writing bvalue to memory when the
1750                  * delay slot cannot raise an exception.
1751                  */
1752                 tcg_gen_mov_i32(tmp, cpu_bvalue);
1753                 tcg_gen_discard_i32(cpu_bvalue);
1754 
1755                 tcg_gen_brcondi_i32(dc->jmp_cond, tmp, 0, taken);
1756                 gen_goto_tb(dc, 1, dc->base.pc_next);
1757                 gen_set_label(taken);
1758             }
1759             gen_goto_tb(dc, 0, dc->jmp_dest);
1760             return;
1761         }
1762 
1763         /* Indirect jump (or direct jump w/ goto_tb disabled) */
1764         tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1765         tcg_gen_discard_i32(cpu_btarget);
1766         tcg_gen_lookup_and_goto_ptr();
1767         return;
1768 
1769     default:
1770         g_assert_not_reached();
1771     }
1772 
1773     /* Finish DISAS_EXIT_* */
1774     if (unlikely(cs->singlestep_enabled)) {
1775         gen_raise_exception(dc, EXCP_DEBUG);
1776     } else {
1777         tcg_gen_exit_tb(NULL, 0);
1778     }
1779 }
1780 
1781 static void mb_tr_disas_log(const DisasContextBase *dcb,
1782                             CPUState *cs, FILE *logfile)
1783 {
1784     fprintf(logfile, "IN: %s\n", lookup_symbol(dcb->pc_first));
1785     target_disas(logfile, cs, dcb->pc_first, dcb->tb->size);
1786 }
1787 
1788 static const TranslatorOps mb_tr_ops = {
1789     .init_disas_context = mb_tr_init_disas_context,
1790     .tb_start           = mb_tr_tb_start,
1791     .insn_start         = mb_tr_insn_start,
1792     .translate_insn     = mb_tr_translate_insn,
1793     .tb_stop            = mb_tr_tb_stop,
1794     .disas_log          = mb_tr_disas_log,
1795 };
1796 
1797 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
1798                            target_ulong pc, void *host_pc)
1799 {
1800     DisasContext dc;
1801     translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base);
1802 }
1803 
1804 void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1805 {
1806     MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1807     CPUMBState *env = &cpu->env;
1808     uint32_t iflags;
1809     int i;
1810 
1811     qemu_fprintf(f, "pc=0x%08x msr=0x%05x mode=%s(saved=%s) eip=%d ie=%d\n",
1812                  env->pc, env->msr,
1813                  (env->msr & MSR_UM) ? "user" : "kernel",
1814                  (env->msr & MSR_UMS) ? "user" : "kernel",
1815                  (bool)(env->msr & MSR_EIP),
1816                  (bool)(env->msr & MSR_IE));
1817 
1818     iflags = env->iflags;
1819     qemu_fprintf(f, "iflags: 0x%08x", iflags);
1820     if (iflags & IMM_FLAG) {
1821         qemu_fprintf(f, " IMM(0x%08x)", env->imm);
1822     }
1823     if (iflags & BIMM_FLAG) {
1824         qemu_fprintf(f, " BIMM");
1825     }
1826     if (iflags & D_FLAG) {
1827         qemu_fprintf(f, " D(btarget=0x%08x)", env->btarget);
1828     }
1829     if (iflags & DRTI_FLAG) {
1830         qemu_fprintf(f, " DRTI");
1831     }
1832     if (iflags & DRTE_FLAG) {
1833         qemu_fprintf(f, " DRTE");
1834     }
1835     if (iflags & DRTB_FLAG) {
1836         qemu_fprintf(f, " DRTB");
1837     }
1838     if (iflags & ESR_ESS_FLAG) {
1839         qemu_fprintf(f, " ESR_ESS(0x%04x)", iflags & ESR_ESS_MASK);
1840     }
1841 
1842     qemu_fprintf(f, "\nesr=0x%04x fsr=0x%02x btr=0x%08x edr=0x%x\n"
1843                  "ear=0x" TARGET_FMT_lx " slr=0x%x shr=0x%x\n",
1844                  env->esr, env->fsr, env->btr, env->edr,
1845                  env->ear, env->slr, env->shr);
1846 
1847     for (i = 0; i < 32; i++) {
1848         qemu_fprintf(f, "r%2.2d=%08x%c",
1849                      i, env->regs[i], i % 4 == 3 ? '\n' : ' ');
1850     }
1851     qemu_fprintf(f, "\n");
1852 }
1853 
1854 void mb_tcg_init(void)
1855 {
1856 #define R(X)  { &cpu_R[X], offsetof(CPUMBState, regs[X]), "r" #X }
1857 #define SP(X) { &cpu_##X, offsetof(CPUMBState, X), #X }
1858 
1859     static const struct {
1860         TCGv_i32 *var; int ofs; char name[8];
1861     } i32s[] = {
1862         /*
1863          * Note that r0 is handled specially in reg_for_read
1864          * and reg_for_write.  Nothing should touch cpu_R[0].
1865          * Leave that element NULL, which will assert quickly
1866          * inside the tcg generator functions.
1867          */
1868                R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
1869         R(8),  R(9),  R(10), R(11), R(12), R(13), R(14), R(15),
1870         R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23),
1871         R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31),
1872 
1873         SP(pc),
1874         SP(msr),
1875         SP(msr_c),
1876         SP(imm),
1877         SP(iflags),
1878         SP(bvalue),
1879         SP(btarget),
1880         SP(res_val),
1881     };
1882 
1883 #undef R
1884 #undef SP
1885 
1886     for (int i = 0; i < ARRAY_SIZE(i32s); ++i) {
1887         *i32s[i].var =
1888           tcg_global_mem_new_i32(cpu_env, i32s[i].ofs, i32s[i].name);
1889     }
1890 
1891     cpu_res_addr =
1892         tcg_global_mem_new(cpu_env, offsetof(CPUMBState, res_addr), "res_addr");
1893 }
1894