1 /*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "qemu/qemu-print.h"
31
32 #include "trace-tcg.h"
33 #include "exec/log.h"
34
35 #define EXTRACT_FIELD(src, start, end) \
36 (((src) >> start) & ((1 << (end - start + 1)) - 1))
37
38 /* is_jmp field values */
39 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
40 #define DISAS_EXIT DISAS_TARGET_1 /* all cpu state modified dynamically */
41
42 /* cpu state besides pc was modified dynamically; update pc to next */
43 #define DISAS_EXIT_NEXT DISAS_TARGET_2
44 /* cpu state besides pc was modified dynamically; update pc to btarget */
45 #define DISAS_EXIT_JUMP DISAS_TARGET_3
46
47 static TCGv_i32 cpu_R[32];
48 static TCGv_i32 cpu_pc;
49 static TCGv_i32 cpu_msr;
50 static TCGv_i32 cpu_msr_c;
51 static TCGv_i32 cpu_imm;
52 static TCGv_i32 cpu_bvalue;
53 static TCGv_i32 cpu_btarget;
54 static TCGv_i32 cpu_iflags;
55 static TCGv cpu_res_addr;
56 static TCGv_i32 cpu_res_val;
57
58 #include "exec/gen-icount.h"
59
60 /* This is the state at translation time. */
61 typedef struct DisasContext {
62 DisasContextBase base;
63 const MicroBlazeCPUConfig *cfg;
64
65 /* TCG op of the current insn_start. */
66 TCGOp *insn_start;
67
68 TCGv_i32 r0;
69 bool r0_set;
70
71 /* Decoder. */
72 uint32_t ext_imm;
73 unsigned int tb_flags;
74 unsigned int tb_flags_to_set;
75 int mem_index;
76
77 /* Condition under which to jump, including NEVER and ALWAYS. */
78 TCGCond jmp_cond;
79
80 /* Immediate branch-taken destination, or -1 for indirect. */
81 uint32_t jmp_dest;
82 } DisasContext;
83
typeb_imm(DisasContext * dc,int x)84 static int typeb_imm(DisasContext *dc, int x)
85 {
86 if (dc->tb_flags & IMM_FLAG) {
87 return deposit32(dc->ext_imm, 0, 16, x);
88 }
89 return x;
90 }
91
92 /* Include the auto-generated decoder. */
93 #include "decode-insns.c.inc"
94
t_sync_flags(DisasContext * dc)95 static void t_sync_flags(DisasContext *dc)
96 {
97 /* Synch the tb dependent flags between translator and runtime. */
98 if ((dc->tb_flags ^ dc->base.tb->flags) & IFLAGS_TB_MASK) {
99 tcg_gen_movi_i32(cpu_iflags, dc->tb_flags & IFLAGS_TB_MASK);
100 }
101 }
102
gen_raise_exception(DisasContext * dc,uint32_t index)103 static void gen_raise_exception(DisasContext *dc, uint32_t index)
104 {
105 TCGv_i32 tmp = tcg_const_i32(index);
106
107 gen_helper_raise_exception(cpu_env, tmp);
108 tcg_temp_free_i32(tmp);
109 dc->base.is_jmp = DISAS_NORETURN;
110 }
111
gen_raise_exception_sync(DisasContext * dc,uint32_t index)112 static void gen_raise_exception_sync(DisasContext *dc, uint32_t index)
113 {
114 t_sync_flags(dc);
115 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
116 gen_raise_exception(dc, index);
117 }
118
gen_raise_hw_excp(DisasContext * dc,uint32_t esr_ec)119 static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
120 {
121 TCGv_i32 tmp = tcg_const_i32(esr_ec);
122 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUMBState, esr));
123 tcg_temp_free_i32(tmp);
124
125 gen_raise_exception_sync(dc, EXCP_HW_EXCP);
126 }
127
use_goto_tb(DisasContext * dc,target_ulong dest)128 static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
129 {
130 #ifndef CONFIG_USER_ONLY
131 return (dc->base.pc_first & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
132 #else
133 return true;
134 #endif
135 }
136
gen_goto_tb(DisasContext * dc,int n,target_ulong dest)137 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
138 {
139 if (dc->base.singlestep_enabled) {
140 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
141 tcg_gen_movi_i32(cpu_pc, dest);
142 gen_helper_raise_exception(cpu_env, tmp);
143 tcg_temp_free_i32(tmp);
144 } else if (use_goto_tb(dc, dest)) {
145 tcg_gen_goto_tb(n);
146 tcg_gen_movi_i32(cpu_pc, dest);
147 tcg_gen_exit_tb(dc->base.tb, n);
148 } else {
149 tcg_gen_movi_i32(cpu_pc, dest);
150 tcg_gen_lookup_and_goto_ptr();
151 }
152 dc->base.is_jmp = DISAS_NORETURN;
153 }
154
155 /*
156 * Returns true if the insn an illegal operation.
157 * If exceptions are enabled, an exception is raised.
158 */
trap_illegal(DisasContext * dc,bool cond)159 static bool trap_illegal(DisasContext *dc, bool cond)
160 {
161 if (cond && (dc->tb_flags & MSR_EE)
162 && dc->cfg->illegal_opcode_exception) {
163 gen_raise_hw_excp(dc, ESR_EC_ILLEGAL_OP);
164 }
165 return cond;
166 }
167
168 /*
169 * Returns true if the insn is illegal in userspace.
170 * If exceptions are enabled, an exception is raised.
171 */
trap_userspace(DisasContext * dc,bool cond)172 static bool trap_userspace(DisasContext *dc, bool cond)
173 {
174 bool cond_user = cond && dc->mem_index == MMU_USER_IDX;
175
176 if (cond_user && (dc->tb_flags & MSR_EE)) {
177 gen_raise_hw_excp(dc, ESR_EC_PRIVINSN);
178 }
179 return cond_user;
180 }
181
182 /*
183 * Return true, and log an error, if the current insn is
184 * within a delay slot.
185 */
invalid_delay_slot(DisasContext * dc,const char * insn_type)186 static bool invalid_delay_slot(DisasContext *dc, const char *insn_type)
187 {
188 if (dc->tb_flags & D_FLAG) {
189 qemu_log_mask(LOG_GUEST_ERROR,
190 "Invalid insn in delay slot: %s at %08x\n",
191 insn_type, (uint32_t)dc->base.pc_next);
192 return true;
193 }
194 return false;
195 }
196
reg_for_read(DisasContext * dc,int reg)197 static TCGv_i32 reg_for_read(DisasContext *dc, int reg)
198 {
199 if (likely(reg != 0)) {
200 return cpu_R[reg];
201 }
202 if (!dc->r0_set) {
203 if (dc->r0 == NULL) {
204 dc->r0 = tcg_temp_new_i32();
205 }
206 tcg_gen_movi_i32(dc->r0, 0);
207 dc->r0_set = true;
208 }
209 return dc->r0;
210 }
211
reg_for_write(DisasContext * dc,int reg)212 static TCGv_i32 reg_for_write(DisasContext *dc, int reg)
213 {
214 if (likely(reg != 0)) {
215 return cpu_R[reg];
216 }
217 if (dc->r0 == NULL) {
218 dc->r0 = tcg_temp_new_i32();
219 }
220 return dc->r0;
221 }
222
do_typea(DisasContext * dc,arg_typea * arg,bool side_effects,void (* fn)(TCGv_i32,TCGv_i32,TCGv_i32))223 static bool do_typea(DisasContext *dc, arg_typea *arg, bool side_effects,
224 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
225 {
226 TCGv_i32 rd, ra, rb;
227
228 if (arg->rd == 0 && !side_effects) {
229 return true;
230 }
231
232 rd = reg_for_write(dc, arg->rd);
233 ra = reg_for_read(dc, arg->ra);
234 rb = reg_for_read(dc, arg->rb);
235 fn(rd, ra, rb);
236 return true;
237 }
238
do_typea0(DisasContext * dc,arg_typea0 * arg,bool side_effects,void (* fn)(TCGv_i32,TCGv_i32))239 static bool do_typea0(DisasContext *dc, arg_typea0 *arg, bool side_effects,
240 void (*fn)(TCGv_i32, TCGv_i32))
241 {
242 TCGv_i32 rd, ra;
243
244 if (arg->rd == 0 && !side_effects) {
245 return true;
246 }
247
248 rd = reg_for_write(dc, arg->rd);
249 ra = reg_for_read(dc, arg->ra);
250 fn(rd, ra);
251 return true;
252 }
253
do_typeb_imm(DisasContext * dc,arg_typeb * arg,bool side_effects,void (* fni)(TCGv_i32,TCGv_i32,int32_t))254 static bool do_typeb_imm(DisasContext *dc, arg_typeb *arg, bool side_effects,
255 void (*fni)(TCGv_i32, TCGv_i32, int32_t))
256 {
257 TCGv_i32 rd, ra;
258
259 if (arg->rd == 0 && !side_effects) {
260 return true;
261 }
262
263 rd = reg_for_write(dc, arg->rd);
264 ra = reg_for_read(dc, arg->ra);
265 fni(rd, ra, arg->imm);
266 return true;
267 }
268
do_typeb_val(DisasContext * dc,arg_typeb * arg,bool side_effects,void (* fn)(TCGv_i32,TCGv_i32,TCGv_i32))269 static bool do_typeb_val(DisasContext *dc, arg_typeb *arg, bool side_effects,
270 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
271 {
272 TCGv_i32 rd, ra, imm;
273
274 if (arg->rd == 0 && !side_effects) {
275 return true;
276 }
277
278 rd = reg_for_write(dc, arg->rd);
279 ra = reg_for_read(dc, arg->ra);
280 imm = tcg_const_i32(arg->imm);
281
282 fn(rd, ra, imm);
283
284 tcg_temp_free_i32(imm);
285 return true;
286 }
287
288 #define DO_TYPEA(NAME, SE, FN) \
289 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
290 { return do_typea(dc, a, SE, FN); }
291
292 #define DO_TYPEA_CFG(NAME, CFG, SE, FN) \
293 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
294 { return dc->cfg->CFG && do_typea(dc, a, SE, FN); }
295
296 #define DO_TYPEA0(NAME, SE, FN) \
297 static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
298 { return do_typea0(dc, a, SE, FN); }
299
300 #define DO_TYPEA0_CFG(NAME, CFG, SE, FN) \
301 static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
302 { return dc->cfg->CFG && do_typea0(dc, a, SE, FN); }
303
304 #define DO_TYPEBI(NAME, SE, FNI) \
305 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
306 { return do_typeb_imm(dc, a, SE, FNI); }
307
308 #define DO_TYPEBI_CFG(NAME, CFG, SE, FNI) \
309 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
310 { return dc->cfg->CFG && do_typeb_imm(dc, a, SE, FNI); }
311
312 #define DO_TYPEBV(NAME, SE, FN) \
313 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
314 { return do_typeb_val(dc, a, SE, FN); }
315
316 #define ENV_WRAPPER2(NAME, HELPER) \
317 static void NAME(TCGv_i32 out, TCGv_i32 ina) \
318 { HELPER(out, cpu_env, ina); }
319
320 #define ENV_WRAPPER3(NAME, HELPER) \
321 static void NAME(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) \
322 { HELPER(out, cpu_env, ina, inb); }
323
324 /* No input carry, but output carry. */
gen_add(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)325 static void gen_add(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
326 {
327 TCGv_i32 zero = tcg_const_i32(0);
328
329 tcg_gen_add2_i32(out, cpu_msr_c, ina, zero, inb, zero);
330
331 tcg_temp_free_i32(zero);
332 }
333
334 /* Input and output carry. */
gen_addc(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)335 static void gen_addc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
336 {
337 TCGv_i32 zero = tcg_const_i32(0);
338 TCGv_i32 tmp = tcg_temp_new_i32();
339
340 tcg_gen_add2_i32(tmp, cpu_msr_c, ina, zero, cpu_msr_c, zero);
341 tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
342
343 tcg_temp_free_i32(tmp);
344 tcg_temp_free_i32(zero);
345 }
346
347 /* Input carry, but no output carry. */
gen_addkc(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)348 static void gen_addkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
349 {
350 tcg_gen_add_i32(out, ina, inb);
351 tcg_gen_add_i32(out, out, cpu_msr_c);
352 }
353
DO_TYPEA(add,true,gen_add)354 DO_TYPEA(add, true, gen_add)
355 DO_TYPEA(addc, true, gen_addc)
356 DO_TYPEA(addk, false, tcg_gen_add_i32)
357 DO_TYPEA(addkc, true, gen_addkc)
358
359 DO_TYPEBV(addi, true, gen_add)
360 DO_TYPEBV(addic, true, gen_addc)
361 DO_TYPEBI(addik, false, tcg_gen_addi_i32)
362 DO_TYPEBV(addikc, true, gen_addkc)
363
364 static void gen_andni(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
365 {
366 tcg_gen_andi_i32(out, ina, ~imm);
367 }
368
DO_TYPEA(and,false,tcg_gen_and_i32)369 DO_TYPEA(and, false, tcg_gen_and_i32)
370 DO_TYPEBI(andi, false, tcg_gen_andi_i32)
371 DO_TYPEA(andn, false, tcg_gen_andc_i32)
372 DO_TYPEBI(andni, false, gen_andni)
373
374 static void gen_bsra(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
375 {
376 TCGv_i32 tmp = tcg_temp_new_i32();
377 tcg_gen_andi_i32(tmp, inb, 31);
378 tcg_gen_sar_i32(out, ina, tmp);
379 tcg_temp_free_i32(tmp);
380 }
381
gen_bsrl(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)382 static void gen_bsrl(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
383 {
384 TCGv_i32 tmp = tcg_temp_new_i32();
385 tcg_gen_andi_i32(tmp, inb, 31);
386 tcg_gen_shr_i32(out, ina, tmp);
387 tcg_temp_free_i32(tmp);
388 }
389
gen_bsll(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)390 static void gen_bsll(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
391 {
392 TCGv_i32 tmp = tcg_temp_new_i32();
393 tcg_gen_andi_i32(tmp, inb, 31);
394 tcg_gen_shl_i32(out, ina, tmp);
395 tcg_temp_free_i32(tmp);
396 }
397
gen_bsefi(TCGv_i32 out,TCGv_i32 ina,int32_t imm)398 static void gen_bsefi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
399 {
400 /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
401 int imm_w = extract32(imm, 5, 5);
402 int imm_s = extract32(imm, 0, 5);
403
404 if (imm_w + imm_s > 32 || imm_w == 0) {
405 /* These inputs have an undefined behavior. */
406 qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
407 imm_w, imm_s);
408 } else {
409 tcg_gen_extract_i32(out, ina, imm_s, imm_w);
410 }
411 }
412
gen_bsifi(TCGv_i32 out,TCGv_i32 ina,int32_t imm)413 static void gen_bsifi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
414 {
415 /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
416 int imm_w = extract32(imm, 5, 5);
417 int imm_s = extract32(imm, 0, 5);
418 int width = imm_w - imm_s + 1;
419
420 if (imm_w < imm_s) {
421 /* These inputs have an undefined behavior. */
422 qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
423 imm_w, imm_s);
424 } else {
425 tcg_gen_deposit_i32(out, out, ina, imm_s, width);
426 }
427 }
428
DO_TYPEA_CFG(bsra,use_barrel,false,gen_bsra)429 DO_TYPEA_CFG(bsra, use_barrel, false, gen_bsra)
430 DO_TYPEA_CFG(bsrl, use_barrel, false, gen_bsrl)
431 DO_TYPEA_CFG(bsll, use_barrel, false, gen_bsll)
432
433 DO_TYPEBI_CFG(bsrai, use_barrel, false, tcg_gen_sari_i32)
434 DO_TYPEBI_CFG(bsrli, use_barrel, false, tcg_gen_shri_i32)
435 DO_TYPEBI_CFG(bslli, use_barrel, false, tcg_gen_shli_i32)
436
437 DO_TYPEBI_CFG(bsefi, use_barrel, false, gen_bsefi)
438 DO_TYPEBI_CFG(bsifi, use_barrel, false, gen_bsifi)
439
440 static void gen_clz(TCGv_i32 out, TCGv_i32 ina)
441 {
442 tcg_gen_clzi_i32(out, ina, 32);
443 }
444
DO_TYPEA0_CFG(clz,use_pcmp_instr,false,gen_clz)445 DO_TYPEA0_CFG(clz, use_pcmp_instr, false, gen_clz)
446
447 static void gen_cmp(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
448 {
449 TCGv_i32 lt = tcg_temp_new_i32();
450
451 tcg_gen_setcond_i32(TCG_COND_LT, lt, inb, ina);
452 tcg_gen_sub_i32(out, inb, ina);
453 tcg_gen_deposit_i32(out, out, lt, 31, 1);
454 tcg_temp_free_i32(lt);
455 }
456
gen_cmpu(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)457 static void gen_cmpu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
458 {
459 TCGv_i32 lt = tcg_temp_new_i32();
460
461 tcg_gen_setcond_i32(TCG_COND_LTU, lt, inb, ina);
462 tcg_gen_sub_i32(out, inb, ina);
463 tcg_gen_deposit_i32(out, out, lt, 31, 1);
464 tcg_temp_free_i32(lt);
465 }
466
DO_TYPEA(cmp,false,gen_cmp)467 DO_TYPEA(cmp, false, gen_cmp)
468 DO_TYPEA(cmpu, false, gen_cmpu)
469
470 ENV_WRAPPER3(gen_fadd, gen_helper_fadd)
471 ENV_WRAPPER3(gen_frsub, gen_helper_frsub)
472 ENV_WRAPPER3(gen_fmul, gen_helper_fmul)
473 ENV_WRAPPER3(gen_fdiv, gen_helper_fdiv)
474 ENV_WRAPPER3(gen_fcmp_un, gen_helper_fcmp_un)
475 ENV_WRAPPER3(gen_fcmp_lt, gen_helper_fcmp_lt)
476 ENV_WRAPPER3(gen_fcmp_eq, gen_helper_fcmp_eq)
477 ENV_WRAPPER3(gen_fcmp_le, gen_helper_fcmp_le)
478 ENV_WRAPPER3(gen_fcmp_gt, gen_helper_fcmp_gt)
479 ENV_WRAPPER3(gen_fcmp_ne, gen_helper_fcmp_ne)
480 ENV_WRAPPER3(gen_fcmp_ge, gen_helper_fcmp_ge)
481
482 DO_TYPEA_CFG(fadd, use_fpu, true, gen_fadd)
483 DO_TYPEA_CFG(frsub, use_fpu, true, gen_frsub)
484 DO_TYPEA_CFG(fmul, use_fpu, true, gen_fmul)
485 DO_TYPEA_CFG(fdiv, use_fpu, true, gen_fdiv)
486 DO_TYPEA_CFG(fcmp_un, use_fpu, true, gen_fcmp_un)
487 DO_TYPEA_CFG(fcmp_lt, use_fpu, true, gen_fcmp_lt)
488 DO_TYPEA_CFG(fcmp_eq, use_fpu, true, gen_fcmp_eq)
489 DO_TYPEA_CFG(fcmp_le, use_fpu, true, gen_fcmp_le)
490 DO_TYPEA_CFG(fcmp_gt, use_fpu, true, gen_fcmp_gt)
491 DO_TYPEA_CFG(fcmp_ne, use_fpu, true, gen_fcmp_ne)
492 DO_TYPEA_CFG(fcmp_ge, use_fpu, true, gen_fcmp_ge)
493
494 ENV_WRAPPER2(gen_flt, gen_helper_flt)
495 ENV_WRAPPER2(gen_fint, gen_helper_fint)
496 ENV_WRAPPER2(gen_fsqrt, gen_helper_fsqrt)
497
498 DO_TYPEA0_CFG(flt, use_fpu >= 2, true, gen_flt)
499 DO_TYPEA0_CFG(fint, use_fpu >= 2, true, gen_fint)
500 DO_TYPEA0_CFG(fsqrt, use_fpu >= 2, true, gen_fsqrt)
501
502 /* Does not use ENV_WRAPPER3, because arguments are swapped as well. */
503 static void gen_idiv(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
504 {
505 gen_helper_divs(out, cpu_env, inb, ina);
506 }
507
gen_idivu(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)508 static void gen_idivu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
509 {
510 gen_helper_divu(out, cpu_env, inb, ina);
511 }
512
DO_TYPEA_CFG(idiv,use_div,true,gen_idiv)513 DO_TYPEA_CFG(idiv, use_div, true, gen_idiv)
514 DO_TYPEA_CFG(idivu, use_div, true, gen_idivu)
515
516 static bool trans_imm(DisasContext *dc, arg_imm *arg)
517 {
518 if (invalid_delay_slot(dc, "imm")) {
519 return true;
520 }
521 dc->ext_imm = arg->imm << 16;
522 tcg_gen_movi_i32(cpu_imm, dc->ext_imm);
523 dc->tb_flags_to_set = IMM_FLAG;
524 return true;
525 }
526
gen_mulh(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)527 static void gen_mulh(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
528 {
529 TCGv_i32 tmp = tcg_temp_new_i32();
530 tcg_gen_muls2_i32(tmp, out, ina, inb);
531 tcg_temp_free_i32(tmp);
532 }
533
gen_mulhu(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)534 static void gen_mulhu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
535 {
536 TCGv_i32 tmp = tcg_temp_new_i32();
537 tcg_gen_mulu2_i32(tmp, out, ina, inb);
538 tcg_temp_free_i32(tmp);
539 }
540
gen_mulhsu(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)541 static void gen_mulhsu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
542 {
543 TCGv_i32 tmp = tcg_temp_new_i32();
544 tcg_gen_mulsu2_i32(tmp, out, ina, inb);
545 tcg_temp_free_i32(tmp);
546 }
547
DO_TYPEA_CFG(mul,use_hw_mul,false,tcg_gen_mul_i32)548 DO_TYPEA_CFG(mul, use_hw_mul, false, tcg_gen_mul_i32)
549 DO_TYPEA_CFG(mulh, use_hw_mul >= 2, false, gen_mulh)
550 DO_TYPEA_CFG(mulhu, use_hw_mul >= 2, false, gen_mulhu)
551 DO_TYPEA_CFG(mulhsu, use_hw_mul >= 2, false, gen_mulhsu)
552 DO_TYPEBI_CFG(muli, use_hw_mul, false, tcg_gen_muli_i32)
553
554 DO_TYPEA(or, false, tcg_gen_or_i32)
555 DO_TYPEBI(ori, false, tcg_gen_ori_i32)
556
557 static void gen_pcmpeq(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
558 {
559 tcg_gen_setcond_i32(TCG_COND_EQ, out, ina, inb);
560 }
561
gen_pcmpne(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)562 static void gen_pcmpne(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
563 {
564 tcg_gen_setcond_i32(TCG_COND_NE, out, ina, inb);
565 }
566
DO_TYPEA_CFG(pcmpbf,use_pcmp_instr,false,gen_helper_pcmpbf)567 DO_TYPEA_CFG(pcmpbf, use_pcmp_instr, false, gen_helper_pcmpbf)
568 DO_TYPEA_CFG(pcmpeq, use_pcmp_instr, false, gen_pcmpeq)
569 DO_TYPEA_CFG(pcmpne, use_pcmp_instr, false, gen_pcmpne)
570
571 /* No input carry, but output carry. */
572 static void gen_rsub(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
573 {
574 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_msr_c, inb, ina);
575 tcg_gen_sub_i32(out, inb, ina);
576 }
577
578 /* Input and output carry. */
gen_rsubc(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)579 static void gen_rsubc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
580 {
581 TCGv_i32 zero = tcg_const_i32(0);
582 TCGv_i32 tmp = tcg_temp_new_i32();
583
584 tcg_gen_not_i32(tmp, ina);
585 tcg_gen_add2_i32(tmp, cpu_msr_c, tmp, zero, cpu_msr_c, zero);
586 tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
587
588 tcg_temp_free_i32(zero);
589 tcg_temp_free_i32(tmp);
590 }
591
592 /* No input or output carry. */
gen_rsubk(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)593 static void gen_rsubk(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
594 {
595 tcg_gen_sub_i32(out, inb, ina);
596 }
597
598 /* Input carry, no output carry. */
gen_rsubkc(TCGv_i32 out,TCGv_i32 ina,TCGv_i32 inb)599 static void gen_rsubkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
600 {
601 TCGv_i32 nota = tcg_temp_new_i32();
602
603 tcg_gen_not_i32(nota, ina);
604 tcg_gen_add_i32(out, inb, nota);
605 tcg_gen_add_i32(out, out, cpu_msr_c);
606
607 tcg_temp_free_i32(nota);
608 }
609
DO_TYPEA(rsub,true,gen_rsub)610 DO_TYPEA(rsub, true, gen_rsub)
611 DO_TYPEA(rsubc, true, gen_rsubc)
612 DO_TYPEA(rsubk, false, gen_rsubk)
613 DO_TYPEA(rsubkc, true, gen_rsubkc)
614
615 DO_TYPEBV(rsubi, true, gen_rsub)
616 DO_TYPEBV(rsubic, true, gen_rsubc)
617 DO_TYPEBV(rsubik, false, gen_rsubk)
618 DO_TYPEBV(rsubikc, true, gen_rsubkc)
619
620 DO_TYPEA0(sext8, false, tcg_gen_ext8s_i32)
621 DO_TYPEA0(sext16, false, tcg_gen_ext16s_i32)
622
623 static void gen_sra(TCGv_i32 out, TCGv_i32 ina)
624 {
625 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
626 tcg_gen_sari_i32(out, ina, 1);
627 }
628
gen_src(TCGv_i32 out,TCGv_i32 ina)629 static void gen_src(TCGv_i32 out, TCGv_i32 ina)
630 {
631 TCGv_i32 tmp = tcg_temp_new_i32();
632
633 tcg_gen_mov_i32(tmp, cpu_msr_c);
634 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
635 tcg_gen_extract2_i32(out, ina, tmp, 1);
636
637 tcg_temp_free_i32(tmp);
638 }
639
gen_srl(TCGv_i32 out,TCGv_i32 ina)640 static void gen_srl(TCGv_i32 out, TCGv_i32 ina)
641 {
642 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
643 tcg_gen_shri_i32(out, ina, 1);
644 }
645
DO_TYPEA0(sra,false,gen_sra)646 DO_TYPEA0(sra, false, gen_sra)
647 DO_TYPEA0(src, false, gen_src)
648 DO_TYPEA0(srl, false, gen_srl)
649
650 static void gen_swaph(TCGv_i32 out, TCGv_i32 ina)
651 {
652 tcg_gen_rotri_i32(out, ina, 16);
653 }
654
DO_TYPEA0(swapb,false,tcg_gen_bswap32_i32)655 DO_TYPEA0(swapb, false, tcg_gen_bswap32_i32)
656 DO_TYPEA0(swaph, false, gen_swaph)
657
658 static bool trans_wdic(DisasContext *dc, arg_wdic *a)
659 {
660 /* Cache operations are nops: only check for supervisor mode. */
661 trap_userspace(dc, true);
662 return true;
663 }
664
DO_TYPEA(xor,false,tcg_gen_xor_i32)665 DO_TYPEA(xor, false, tcg_gen_xor_i32)
666 DO_TYPEBI(xori, false, tcg_gen_xori_i32)
667
668 static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
669 {
670 TCGv ret = tcg_temp_new();
671
672 /* If any of the regs is r0, set t to the value of the other reg. */
673 if (ra && rb) {
674 TCGv_i32 tmp = tcg_temp_new_i32();
675 tcg_gen_add_i32(tmp, cpu_R[ra], cpu_R[rb]);
676 tcg_gen_extu_i32_tl(ret, tmp);
677 tcg_temp_free_i32(tmp);
678 } else if (ra) {
679 tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
680 } else if (rb) {
681 tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
682 } else {
683 tcg_gen_movi_tl(ret, 0);
684 }
685
686 if ((ra == 1 || rb == 1) && dc->cfg->stackprot) {
687 gen_helper_stackprot(cpu_env, ret);
688 }
689 return ret;
690 }
691
compute_ldst_addr_typeb(DisasContext * dc,int ra,int imm)692 static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
693 {
694 TCGv ret = tcg_temp_new();
695
696 /* If any of the regs is r0, set t to the value of the other reg. */
697 if (ra) {
698 TCGv_i32 tmp = tcg_temp_new_i32();
699 tcg_gen_addi_i32(tmp, cpu_R[ra], imm);
700 tcg_gen_extu_i32_tl(ret, tmp);
701 tcg_temp_free_i32(tmp);
702 } else {
703 tcg_gen_movi_tl(ret, (uint32_t)imm);
704 }
705
706 if (ra == 1 && dc->cfg->stackprot) {
707 gen_helper_stackprot(cpu_env, ret);
708 }
709 return ret;
710 }
711
712 #ifndef CONFIG_USER_ONLY
compute_ldst_addr_ea(DisasContext * dc,int ra,int rb)713 static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
714 {
715 int addr_size = dc->cfg->addr_size;
716 TCGv ret = tcg_temp_new();
717
718 if (addr_size == 32 || ra == 0) {
719 if (rb) {
720 tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
721 } else {
722 tcg_gen_movi_tl(ret, 0);
723 }
724 } else {
725 if (rb) {
726 tcg_gen_concat_i32_i64(ret, cpu_R[rb], cpu_R[ra]);
727 } else {
728 tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
729 tcg_gen_shli_tl(ret, ret, 32);
730 }
731 if (addr_size < 64) {
732 /* Mask off out of range bits. */
733 tcg_gen_andi_i64(ret, ret, MAKE_64BIT_MASK(0, addr_size));
734 }
735 }
736 return ret;
737 }
738 #endif
739
record_unaligned_ess(DisasContext * dc,int rd,MemOp size,bool store)740 static void record_unaligned_ess(DisasContext *dc, int rd,
741 MemOp size, bool store)
742 {
743 uint32_t iflags = tcg_get_insn_start_param(dc->insn_start, 1);
744
745 iflags |= ESR_ESS_FLAG;
746 iflags |= rd << 5;
747 iflags |= store * ESR_S;
748 iflags |= (size == MO_32) * ESR_W;
749
750 tcg_set_insn_start_param(dc->insn_start, 1, iflags);
751 }
752
do_load(DisasContext * dc,int rd,TCGv addr,MemOp mop,int mem_index,bool rev)753 static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
754 int mem_index, bool rev)
755 {
756 MemOp size = mop & MO_SIZE;
757
758 /*
759 * When doing reverse accesses we need to do two things.
760 *
761 * 1. Reverse the address wrt endianness.
762 * 2. Byteswap the data lanes on the way back into the CPU core.
763 */
764 if (rev) {
765 if (size > MO_8) {
766 mop ^= MO_BSWAP;
767 }
768 if (size < MO_32) {
769 tcg_gen_xori_tl(addr, addr, 3 - size);
770 }
771 }
772
773 if (size > MO_8 &&
774 (dc->tb_flags & MSR_EE) &&
775 dc->cfg->unaligned_exceptions) {
776 record_unaligned_ess(dc, rd, size, false);
777 mop |= MO_ALIGN;
778 }
779
780 tcg_gen_qemu_ld_i32(reg_for_write(dc, rd), addr, mem_index, mop);
781
782 tcg_temp_free(addr);
783 return true;
784 }
785
trans_lbu(DisasContext * dc,arg_typea * arg)786 static bool trans_lbu(DisasContext *dc, arg_typea *arg)
787 {
788 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
789 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
790 }
791
trans_lbur(DisasContext * dc,arg_typea * arg)792 static bool trans_lbur(DisasContext *dc, arg_typea *arg)
793 {
794 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
795 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
796 }
797
trans_lbuea(DisasContext * dc,arg_typea * arg)798 static bool trans_lbuea(DisasContext *dc, arg_typea *arg)
799 {
800 if (trap_userspace(dc, true)) {
801 return true;
802 }
803 #ifdef CONFIG_USER_ONLY
804 return true;
805 #else
806 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
807 return do_load(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
808 #endif
809 }
810
trans_lbui(DisasContext * dc,arg_typeb * arg)811 static bool trans_lbui(DisasContext *dc, arg_typeb *arg)
812 {
813 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
814 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
815 }
816
trans_lhu(DisasContext * dc,arg_typea * arg)817 static bool trans_lhu(DisasContext *dc, arg_typea *arg)
818 {
819 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
820 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
821 }
822
trans_lhur(DisasContext * dc,arg_typea * arg)823 static bool trans_lhur(DisasContext *dc, arg_typea *arg)
824 {
825 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
826 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
827 }
828
trans_lhuea(DisasContext * dc,arg_typea * arg)829 static bool trans_lhuea(DisasContext *dc, arg_typea *arg)
830 {
831 if (trap_userspace(dc, true)) {
832 return true;
833 }
834 #ifdef CONFIG_USER_ONLY
835 return true;
836 #else
837 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
838 return do_load(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
839 #endif
840 }
841
trans_lhui(DisasContext * dc,arg_typeb * arg)842 static bool trans_lhui(DisasContext *dc, arg_typeb *arg)
843 {
844 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
845 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
846 }
847
trans_lw(DisasContext * dc,arg_typea * arg)848 static bool trans_lw(DisasContext *dc, arg_typea *arg)
849 {
850 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
851 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
852 }
853
trans_lwr(DisasContext * dc,arg_typea * arg)854 static bool trans_lwr(DisasContext *dc, arg_typea *arg)
855 {
856 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
857 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
858 }
859
trans_lwea(DisasContext * dc,arg_typea * arg)860 static bool trans_lwea(DisasContext *dc, arg_typea *arg)
861 {
862 if (trap_userspace(dc, true)) {
863 return true;
864 }
865 #ifdef CONFIG_USER_ONLY
866 return true;
867 #else
868 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
869 return do_load(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
870 #endif
871 }
872
trans_lwi(DisasContext * dc,arg_typeb * arg)873 static bool trans_lwi(DisasContext *dc, arg_typeb *arg)
874 {
875 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
876 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
877 }
878
trans_lwx(DisasContext * dc,arg_typea * arg)879 static bool trans_lwx(DisasContext *dc, arg_typea *arg)
880 {
881 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
882
883 /* lwx does not throw unaligned access errors, so force alignment */
884 tcg_gen_andi_tl(addr, addr, ~3);
885
886 tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index, MO_TEUL);
887 tcg_gen_mov_tl(cpu_res_addr, addr);
888 tcg_temp_free(addr);
889
890 if (arg->rd) {
891 tcg_gen_mov_i32(cpu_R[arg->rd], cpu_res_val);
892 }
893
894 /* No support for AXI exclusive so always clear C */
895 tcg_gen_movi_i32(cpu_msr_c, 0);
896 return true;
897 }
898
do_store(DisasContext * dc,int rd,TCGv addr,MemOp mop,int mem_index,bool rev)899 static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop,
900 int mem_index, bool rev)
901 {
902 MemOp size = mop & MO_SIZE;
903
904 /*
905 * When doing reverse accesses we need to do two things.
906 *
907 * 1. Reverse the address wrt endianness.
908 * 2. Byteswap the data lanes on the way back into the CPU core.
909 */
910 if (rev) {
911 if (size > MO_8) {
912 mop ^= MO_BSWAP;
913 }
914 if (size < MO_32) {
915 tcg_gen_xori_tl(addr, addr, 3 - size);
916 }
917 }
918
919 if (size > MO_8 &&
920 (dc->tb_flags & MSR_EE) &&
921 dc->cfg->unaligned_exceptions) {
922 record_unaligned_ess(dc, rd, size, true);
923 mop |= MO_ALIGN;
924 }
925
926 tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop);
927
928 tcg_temp_free(addr);
929 return true;
930 }
931
trans_sb(DisasContext * dc,arg_typea * arg)932 static bool trans_sb(DisasContext *dc, arg_typea *arg)
933 {
934 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
935 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
936 }
937
trans_sbr(DisasContext * dc,arg_typea * arg)938 static bool trans_sbr(DisasContext *dc, arg_typea *arg)
939 {
940 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
941 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
942 }
943
trans_sbea(DisasContext * dc,arg_typea * arg)944 static bool trans_sbea(DisasContext *dc, arg_typea *arg)
945 {
946 if (trap_userspace(dc, true)) {
947 return true;
948 }
949 #ifdef CONFIG_USER_ONLY
950 return true;
951 #else
952 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
953 return do_store(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
954 #endif
955 }
956
trans_sbi(DisasContext * dc,arg_typeb * arg)957 static bool trans_sbi(DisasContext *dc, arg_typeb *arg)
958 {
959 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
960 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
961 }
962
trans_sh(DisasContext * dc,arg_typea * arg)963 static bool trans_sh(DisasContext *dc, arg_typea *arg)
964 {
965 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
966 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
967 }
968
trans_shr(DisasContext * dc,arg_typea * arg)969 static bool trans_shr(DisasContext *dc, arg_typea *arg)
970 {
971 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
972 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
973 }
974
trans_shea(DisasContext * dc,arg_typea * arg)975 static bool trans_shea(DisasContext *dc, arg_typea *arg)
976 {
977 if (trap_userspace(dc, true)) {
978 return true;
979 }
980 #ifdef CONFIG_USER_ONLY
981 return true;
982 #else
983 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
984 return do_store(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
985 #endif
986 }
987
trans_shi(DisasContext * dc,arg_typeb * arg)988 static bool trans_shi(DisasContext *dc, arg_typeb *arg)
989 {
990 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
991 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
992 }
993
trans_sw(DisasContext * dc,arg_typea * arg)994 static bool trans_sw(DisasContext *dc, arg_typea *arg)
995 {
996 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
997 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
998 }
999
trans_swr(DisasContext * dc,arg_typea * arg)1000 static bool trans_swr(DisasContext *dc, arg_typea *arg)
1001 {
1002 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
1003 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
1004 }
1005
trans_swea(DisasContext * dc,arg_typea * arg)1006 static bool trans_swea(DisasContext *dc, arg_typea *arg)
1007 {
1008 if (trap_userspace(dc, true)) {
1009 return true;
1010 }
1011 #ifdef CONFIG_USER_ONLY
1012 return true;
1013 #else
1014 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
1015 return do_store(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
1016 #endif
1017 }
1018
trans_swi(DisasContext * dc,arg_typeb * arg)1019 static bool trans_swi(DisasContext *dc, arg_typeb *arg)
1020 {
1021 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
1022 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
1023 }
1024
trans_swx(DisasContext * dc,arg_typea * arg)1025 static bool trans_swx(DisasContext *dc, arg_typea *arg)
1026 {
1027 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
1028 TCGLabel *swx_done = gen_new_label();
1029 TCGLabel *swx_fail = gen_new_label();
1030 TCGv_i32 tval;
1031
1032 /* swx does not throw unaligned access errors, so force alignment */
1033 tcg_gen_andi_tl(addr, addr, ~3);
1034
1035 /*
1036 * Compare the address vs the one we used during lwx.
1037 * On mismatch, the operation fails. On match, addr dies at the
1038 * branch, but we know we can use the equal version in the global.
1039 * In either case, addr is no longer needed.
1040 */
1041 tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_fail);
1042 tcg_temp_free(addr);
1043
1044 /*
1045 * Compare the value loaded during lwx with current contents of
1046 * the reserved location.
1047 */
1048 tval = tcg_temp_new_i32();
1049
1050 tcg_gen_atomic_cmpxchg_i32(tval, cpu_res_addr, cpu_res_val,
1051 reg_for_write(dc, arg->rd),
1052 dc->mem_index, MO_TEUL);
1053
1054 tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_fail);
1055 tcg_temp_free_i32(tval);
1056
1057 /* Success */
1058 tcg_gen_movi_i32(cpu_msr_c, 0);
1059 tcg_gen_br(swx_done);
1060
1061 /* Failure */
1062 gen_set_label(swx_fail);
1063 tcg_gen_movi_i32(cpu_msr_c, 1);
1064
1065 gen_set_label(swx_done);
1066
1067 /*
1068 * Prevent the saved address from working again without another ldx.
1069 * Akin to the pseudocode setting reservation = 0.
1070 */
1071 tcg_gen_movi_tl(cpu_res_addr, -1);
1072 return true;
1073 }
1074
setup_dslot(DisasContext * dc,bool type_b)1075 static void setup_dslot(DisasContext *dc, bool type_b)
1076 {
1077 dc->tb_flags_to_set |= D_FLAG;
1078 if (type_b && (dc->tb_flags & IMM_FLAG)) {
1079 dc->tb_flags_to_set |= BIMM_FLAG;
1080 }
1081 }
1082
do_branch(DisasContext * dc,int dest_rb,int dest_imm,bool delay,bool abs,int link)1083 static bool do_branch(DisasContext *dc, int dest_rb, int dest_imm,
1084 bool delay, bool abs, int link)
1085 {
1086 uint32_t add_pc;
1087
1088 if (invalid_delay_slot(dc, "branch")) {
1089 return true;
1090 }
1091 if (delay) {
1092 setup_dslot(dc, dest_rb < 0);
1093 }
1094
1095 if (link) {
1096 tcg_gen_movi_i32(cpu_R[link], dc->base.pc_next);
1097 }
1098
1099 /* Store the branch taken destination into btarget. */
1100 add_pc = abs ? 0 : dc->base.pc_next;
1101 if (dest_rb > 0) {
1102 dc->jmp_dest = -1;
1103 tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], add_pc);
1104 } else {
1105 dc->jmp_dest = add_pc + dest_imm;
1106 tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1107 }
1108 dc->jmp_cond = TCG_COND_ALWAYS;
1109 return true;
1110 }
1111
1112 #define DO_BR(NAME, NAMEI, DELAY, ABS, LINK) \
1113 static bool trans_##NAME(DisasContext *dc, arg_typea_br *arg) \
1114 { return do_branch(dc, arg->rb, 0, DELAY, ABS, LINK ? arg->rd : 0); } \
1115 static bool trans_##NAMEI(DisasContext *dc, arg_typeb_br *arg) \
1116 { return do_branch(dc, -1, arg->imm, DELAY, ABS, LINK ? arg->rd : 0); }
1117
DO_BR(br,bri,false,false,false)1118 DO_BR(br, bri, false, false, false)
1119 DO_BR(bra, brai, false, true, false)
1120 DO_BR(brd, brid, true, false, false)
1121 DO_BR(brad, braid, true, true, false)
1122 DO_BR(brld, brlid, true, false, true)
1123 DO_BR(brald, bralid, true, true, true)
1124
1125 static bool do_bcc(DisasContext *dc, int dest_rb, int dest_imm,
1126 TCGCond cond, int ra, bool delay)
1127 {
1128 TCGv_i32 zero, next;
1129
1130 if (invalid_delay_slot(dc, "bcc")) {
1131 return true;
1132 }
1133 if (delay) {
1134 setup_dslot(dc, dest_rb < 0);
1135 }
1136
1137 dc->jmp_cond = cond;
1138
1139 /* Cache the condition register in cpu_bvalue across any delay slot. */
1140 tcg_gen_mov_i32(cpu_bvalue, reg_for_read(dc, ra));
1141
1142 /* Store the branch taken destination into btarget. */
1143 if (dest_rb > 0) {
1144 dc->jmp_dest = -1;
1145 tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], dc->base.pc_next);
1146 } else {
1147 dc->jmp_dest = dc->base.pc_next + dest_imm;
1148 tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1149 }
1150
1151 /* Compute the final destination into btarget. */
1152 zero = tcg_const_i32(0);
1153 next = tcg_const_i32(dc->base.pc_next + (delay + 1) * 4);
1154 tcg_gen_movcond_i32(dc->jmp_cond, cpu_btarget,
1155 reg_for_read(dc, ra), zero,
1156 cpu_btarget, next);
1157 tcg_temp_free_i32(zero);
1158 tcg_temp_free_i32(next);
1159
1160 return true;
1161 }
1162
1163 #define DO_BCC(NAME, COND) \
1164 static bool trans_##NAME(DisasContext *dc, arg_typea_bc *arg) \
1165 { return do_bcc(dc, arg->rb, 0, COND, arg->ra, false); } \
1166 static bool trans_##NAME##d(DisasContext *dc, arg_typea_bc *arg) \
1167 { return do_bcc(dc, arg->rb, 0, COND, arg->ra, true); } \
1168 static bool trans_##NAME##i(DisasContext *dc, arg_typeb_bc *arg) \
1169 { return do_bcc(dc, -1, arg->imm, COND, arg->ra, false); } \
1170 static bool trans_##NAME##id(DisasContext *dc, arg_typeb_bc *arg) \
1171 { return do_bcc(dc, -1, arg->imm, COND, arg->ra, true); }
1172
DO_BCC(beq,TCG_COND_EQ)1173 DO_BCC(beq, TCG_COND_EQ)
1174 DO_BCC(bge, TCG_COND_GE)
1175 DO_BCC(bgt, TCG_COND_GT)
1176 DO_BCC(ble, TCG_COND_LE)
1177 DO_BCC(blt, TCG_COND_LT)
1178 DO_BCC(bne, TCG_COND_NE)
1179
1180 static bool trans_brk(DisasContext *dc, arg_typea_br *arg)
1181 {
1182 if (trap_userspace(dc, true)) {
1183 return true;
1184 }
1185 if (invalid_delay_slot(dc, "brk")) {
1186 return true;
1187 }
1188
1189 tcg_gen_mov_i32(cpu_pc, reg_for_read(dc, arg->rb));
1190 if (arg->rd) {
1191 tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1192 }
1193 tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_BIP);
1194 tcg_gen_movi_tl(cpu_res_addr, -1);
1195
1196 dc->base.is_jmp = DISAS_EXIT;
1197 return true;
1198 }
1199
trans_brki(DisasContext * dc,arg_typeb_br * arg)1200 static bool trans_brki(DisasContext *dc, arg_typeb_br *arg)
1201 {
1202 uint32_t imm = arg->imm;
1203
1204 if (trap_userspace(dc, imm != 0x8 && imm != 0x18)) {
1205 return true;
1206 }
1207 if (invalid_delay_slot(dc, "brki")) {
1208 return true;
1209 }
1210
1211 tcg_gen_movi_i32(cpu_pc, imm);
1212 if (arg->rd) {
1213 tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1214 }
1215 tcg_gen_movi_tl(cpu_res_addr, -1);
1216
1217 #ifdef CONFIG_USER_ONLY
1218 switch (imm) {
1219 case 0x8: /* syscall trap */
1220 gen_raise_exception_sync(dc, EXCP_SYSCALL);
1221 break;
1222 case 0x18: /* debug trap */
1223 gen_raise_exception_sync(dc, EXCP_DEBUG);
1224 break;
1225 default: /* eliminated with trap_userspace check */
1226 g_assert_not_reached();
1227 }
1228 #else
1229 uint32_t msr_to_set = 0;
1230
1231 if (imm != 0x18) {
1232 msr_to_set |= MSR_BIP;
1233 }
1234 if (imm == 0x8 || imm == 0x18) {
1235 /* MSR_UM and MSR_VM are in tb_flags, so we know their value. */
1236 msr_to_set |= (dc->tb_flags & (MSR_UM | MSR_VM)) << 1;
1237 tcg_gen_andi_i32(cpu_msr, cpu_msr,
1238 ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM));
1239 }
1240 tcg_gen_ori_i32(cpu_msr, cpu_msr, msr_to_set);
1241 dc->base.is_jmp = DISAS_EXIT;
1242 #endif
1243
1244 return true;
1245 }
1246
trans_mbar(DisasContext * dc,arg_mbar * arg)1247 static bool trans_mbar(DisasContext *dc, arg_mbar *arg)
1248 {
1249 int mbar_imm = arg->imm;
1250
1251 /* Note that mbar is a specialized branch instruction. */
1252 if (invalid_delay_slot(dc, "mbar")) {
1253 return true;
1254 }
1255
1256 /* Data access memory barrier. */
1257 if ((mbar_imm & 2) == 0) {
1258 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1259 }
1260
1261 /* Sleep. */
1262 if (mbar_imm & 16) {
1263 TCGv_i32 tmp_1;
1264
1265 if (trap_userspace(dc, true)) {
1266 /* Sleep is a privileged instruction. */
1267 return true;
1268 }
1269
1270 t_sync_flags(dc);
1271
1272 tmp_1 = tcg_const_i32(1);
1273 tcg_gen_st_i32(tmp_1, cpu_env,
1274 -offsetof(MicroBlazeCPU, env)
1275 +offsetof(CPUState, halted));
1276 tcg_temp_free_i32(tmp_1);
1277
1278 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
1279
1280 gen_raise_exception(dc, EXCP_HLT);
1281 }
1282
1283 /*
1284 * If !(mbar_imm & 1), this is an instruction access memory barrier
1285 * and we need to end the TB so that we recognize self-modified
1286 * code immediately.
1287 *
1288 * However, there are some data mbars that need the TB break
1289 * (and return to main loop) to recognize interrupts right away.
1290 * E.g. recognizing a change to an interrupt controller register.
1291 *
1292 * Therefore, choose to end the TB always.
1293 */
1294 dc->base.is_jmp = DISAS_EXIT_NEXT;
1295 return true;
1296 }
1297
do_rts(DisasContext * dc,arg_typeb_bc * arg,int to_set)1298 static bool do_rts(DisasContext *dc, arg_typeb_bc *arg, int to_set)
1299 {
1300 if (trap_userspace(dc, to_set)) {
1301 return true;
1302 }
1303 if (invalid_delay_slot(dc, "rts")) {
1304 return true;
1305 }
1306
1307 dc->tb_flags_to_set |= to_set;
1308 setup_dslot(dc, true);
1309
1310 dc->jmp_cond = TCG_COND_ALWAYS;
1311 dc->jmp_dest = -1;
1312 tcg_gen_addi_i32(cpu_btarget, reg_for_read(dc, arg->ra), arg->imm);
1313 return true;
1314 }
1315
1316 #define DO_RTS(NAME, IFLAG) \
1317 static bool trans_##NAME(DisasContext *dc, arg_typeb_bc *arg) \
1318 { return do_rts(dc, arg, IFLAG); }
1319
DO_RTS(rtbd,DRTB_FLAG)1320 DO_RTS(rtbd, DRTB_FLAG)
1321 DO_RTS(rtid, DRTI_FLAG)
1322 DO_RTS(rted, DRTE_FLAG)
1323 DO_RTS(rtsd, 0)
1324
1325 static bool trans_zero(DisasContext *dc, arg_zero *arg)
1326 {
1327 /* If opcode_0_illegal, trap. */
1328 if (dc->cfg->opcode_0_illegal) {
1329 trap_illegal(dc, true);
1330 return true;
1331 }
1332 /*
1333 * Otherwise, this is "add r0, r0, r0".
1334 * Continue to trans_add so that MSR[C] gets cleared.
1335 */
1336 return false;
1337 }
1338
msr_read(DisasContext * dc,TCGv_i32 d)1339 static void msr_read(DisasContext *dc, TCGv_i32 d)
1340 {
1341 TCGv_i32 t;
1342
1343 /* Replicate the cpu_msr_c boolean into the proper bit and the copy. */
1344 t = tcg_temp_new_i32();
1345 tcg_gen_muli_i32(t, cpu_msr_c, MSR_C | MSR_CC);
1346 tcg_gen_or_i32(d, cpu_msr, t);
1347 tcg_temp_free_i32(t);
1348 }
1349
do_msrclrset(DisasContext * dc,arg_type_msr * arg,bool set)1350 static bool do_msrclrset(DisasContext *dc, arg_type_msr *arg, bool set)
1351 {
1352 uint32_t imm = arg->imm;
1353
1354 if (trap_userspace(dc, imm != MSR_C)) {
1355 return true;
1356 }
1357
1358 if (arg->rd) {
1359 msr_read(dc, cpu_R[arg->rd]);
1360 }
1361
1362 /*
1363 * Handle the carry bit separately.
1364 * This is the only bit that userspace can modify.
1365 */
1366 if (imm & MSR_C) {
1367 tcg_gen_movi_i32(cpu_msr_c, set);
1368 }
1369
1370 /*
1371 * MSR_C and MSR_CC set above.
1372 * MSR_PVR is not writable, and is always clear.
1373 */
1374 imm &= ~(MSR_C | MSR_CC | MSR_PVR);
1375
1376 if (imm != 0) {
1377 if (set) {
1378 tcg_gen_ori_i32(cpu_msr, cpu_msr, imm);
1379 } else {
1380 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~imm);
1381 }
1382 dc->base.is_jmp = DISAS_EXIT_NEXT;
1383 }
1384 return true;
1385 }
1386
trans_msrclr(DisasContext * dc,arg_type_msr * arg)1387 static bool trans_msrclr(DisasContext *dc, arg_type_msr *arg)
1388 {
1389 return do_msrclrset(dc, arg, false);
1390 }
1391
trans_msrset(DisasContext * dc,arg_type_msr * arg)1392 static bool trans_msrset(DisasContext *dc, arg_type_msr *arg)
1393 {
1394 return do_msrclrset(dc, arg, true);
1395 }
1396
trans_mts(DisasContext * dc,arg_mts * arg)1397 static bool trans_mts(DisasContext *dc, arg_mts *arg)
1398 {
1399 if (trap_userspace(dc, true)) {
1400 return true;
1401 }
1402
1403 #ifdef CONFIG_USER_ONLY
1404 g_assert_not_reached();
1405 #else
1406 if (arg->e && arg->rs != 0x1003) {
1407 qemu_log_mask(LOG_GUEST_ERROR,
1408 "Invalid extended mts reg 0x%x\n", arg->rs);
1409 return true;
1410 }
1411
1412 TCGv_i32 src = reg_for_read(dc, arg->ra);
1413 switch (arg->rs) {
1414 case SR_MSR:
1415 /* Install MSR_C. */
1416 tcg_gen_extract_i32(cpu_msr_c, src, 2, 1);
1417 /*
1418 * Clear MSR_C and MSR_CC;
1419 * MSR_PVR is not writable, and is always clear.
1420 */
1421 tcg_gen_andi_i32(cpu_msr, src, ~(MSR_C | MSR_CC | MSR_PVR));
1422 break;
1423 case SR_FSR:
1424 tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, fsr));
1425 break;
1426 case 0x800:
1427 tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, slr));
1428 break;
1429 case 0x802:
1430 tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, shr));
1431 break;
1432
1433 case 0x1000: /* PID */
1434 case 0x1001: /* ZPR */
1435 case 0x1002: /* TLBX */
1436 case 0x1003: /* TLBLO */
1437 case 0x1004: /* TLBHI */
1438 case 0x1005: /* TLBSX */
1439 {
1440 TCGv_i32 tmp_ext = tcg_const_i32(arg->e);
1441 TCGv_i32 tmp_reg = tcg_const_i32(arg->rs & 7);
1442
1443 gen_helper_mmu_write(cpu_env, tmp_ext, tmp_reg, src);
1444 tcg_temp_free_i32(tmp_reg);
1445 tcg_temp_free_i32(tmp_ext);
1446 }
1447 break;
1448
1449 default:
1450 qemu_log_mask(LOG_GUEST_ERROR, "Invalid mts reg 0x%x\n", arg->rs);
1451 return true;
1452 }
1453 dc->base.is_jmp = DISAS_EXIT_NEXT;
1454 return true;
1455 #endif
1456 }
1457
trans_mfs(DisasContext * dc,arg_mfs * arg)1458 static bool trans_mfs(DisasContext *dc, arg_mfs *arg)
1459 {
1460 TCGv_i32 dest = reg_for_write(dc, arg->rd);
1461
1462 if (arg->e) {
1463 switch (arg->rs) {
1464 case SR_EAR:
1465 {
1466 TCGv_i64 t64 = tcg_temp_new_i64();
1467 tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear));
1468 tcg_gen_extrh_i64_i32(dest, t64);
1469 tcg_temp_free_i64(t64);
1470 }
1471 return true;
1472 #ifndef CONFIG_USER_ONLY
1473 case 0x1003: /* TLBLO */
1474 /* Handled below. */
1475 break;
1476 #endif
1477 case 0x2006 ... 0x2009:
1478 /* High bits of PVR6-9 not implemented. */
1479 tcg_gen_movi_i32(dest, 0);
1480 return true;
1481 default:
1482 qemu_log_mask(LOG_GUEST_ERROR,
1483 "Invalid extended mfs reg 0x%x\n", arg->rs);
1484 return true;
1485 }
1486 }
1487
1488 switch (arg->rs) {
1489 case SR_PC:
1490 tcg_gen_movi_i32(dest, dc->base.pc_next);
1491 break;
1492 case SR_MSR:
1493 msr_read(dc, dest);
1494 break;
1495 case SR_EAR:
1496 {
1497 TCGv_i64 t64 = tcg_temp_new_i64();
1498 tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear));
1499 tcg_gen_extrl_i64_i32(dest, t64);
1500 tcg_temp_free_i64(t64);
1501 }
1502 break;
1503 case SR_ESR:
1504 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, esr));
1505 break;
1506 case SR_FSR:
1507 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, fsr));
1508 break;
1509 case SR_BTR:
1510 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, btr));
1511 break;
1512 case SR_EDR:
1513 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, edr));
1514 break;
1515 case 0x800:
1516 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, slr));
1517 break;
1518 case 0x802:
1519 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, shr));
1520 break;
1521
1522 #ifndef CONFIG_USER_ONLY
1523 case 0x1000: /* PID */
1524 case 0x1001: /* ZPR */
1525 case 0x1002: /* TLBX */
1526 case 0x1003: /* TLBLO */
1527 case 0x1004: /* TLBHI */
1528 case 0x1005: /* TLBSX */
1529 {
1530 TCGv_i32 tmp_ext = tcg_const_i32(arg->e);
1531 TCGv_i32 tmp_reg = tcg_const_i32(arg->rs & 7);
1532
1533 gen_helper_mmu_read(dest, cpu_env, tmp_ext, tmp_reg);
1534 tcg_temp_free_i32(tmp_reg);
1535 tcg_temp_free_i32(tmp_ext);
1536 }
1537 break;
1538 #endif
1539
1540 case 0x2000 ... 0x200c:
1541 tcg_gen_ld_i32(dest, cpu_env,
1542 offsetof(MicroBlazeCPU, cfg.pvr_regs[arg->rs - 0x2000])
1543 - offsetof(MicroBlazeCPU, env));
1544 break;
1545 default:
1546 qemu_log_mask(LOG_GUEST_ERROR, "Invalid mfs reg 0x%x\n", arg->rs);
1547 break;
1548 }
1549 return true;
1550 }
1551
do_rti(DisasContext * dc)1552 static void do_rti(DisasContext *dc)
1553 {
1554 TCGv_i32 tmp = tcg_temp_new_i32();
1555
1556 tcg_gen_shri_i32(tmp, cpu_msr, 1);
1557 tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_IE);
1558 tcg_gen_andi_i32(tmp, tmp, MSR_VM | MSR_UM);
1559 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM));
1560 tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1561
1562 tcg_temp_free_i32(tmp);
1563 }
1564
do_rtb(DisasContext * dc)1565 static void do_rtb(DisasContext *dc)
1566 {
1567 TCGv_i32 tmp = tcg_temp_new_i32();
1568
1569 tcg_gen_shri_i32(tmp, cpu_msr, 1);
1570 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_BIP));
1571 tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1572 tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1573
1574 tcg_temp_free_i32(tmp);
1575 }
1576
do_rte(DisasContext * dc)1577 static void do_rte(DisasContext *dc)
1578 {
1579 TCGv_i32 tmp = tcg_temp_new_i32();
1580
1581 tcg_gen_shri_i32(tmp, cpu_msr, 1);
1582 tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_EE);
1583 tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1584 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_EIP));
1585 tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1586
1587 tcg_temp_free_i32(tmp);
1588 }
1589
1590 /* Insns connected to FSL or AXI stream attached devices. */
do_get(DisasContext * dc,int rd,int rb,int imm,int ctrl)1591 static bool do_get(DisasContext *dc, int rd, int rb, int imm, int ctrl)
1592 {
1593 TCGv_i32 t_id, t_ctrl;
1594
1595 if (trap_userspace(dc, true)) {
1596 return true;
1597 }
1598
1599 t_id = tcg_temp_new_i32();
1600 if (rb) {
1601 tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
1602 } else {
1603 tcg_gen_movi_i32(t_id, imm);
1604 }
1605
1606 t_ctrl = tcg_const_i32(ctrl);
1607 gen_helper_get(reg_for_write(dc, rd), t_id, t_ctrl);
1608 tcg_temp_free_i32(t_id);
1609 tcg_temp_free_i32(t_ctrl);
1610 return true;
1611 }
1612
trans_get(DisasContext * dc,arg_get * arg)1613 static bool trans_get(DisasContext *dc, arg_get *arg)
1614 {
1615 return do_get(dc, arg->rd, 0, arg->imm, arg->ctrl);
1616 }
1617
trans_getd(DisasContext * dc,arg_getd * arg)1618 static bool trans_getd(DisasContext *dc, arg_getd *arg)
1619 {
1620 return do_get(dc, arg->rd, arg->rb, 0, arg->ctrl);
1621 }
1622
do_put(DisasContext * dc,int ra,int rb,int imm,int ctrl)1623 static bool do_put(DisasContext *dc, int ra, int rb, int imm, int ctrl)
1624 {
1625 TCGv_i32 t_id, t_ctrl;
1626
1627 if (trap_userspace(dc, true)) {
1628 return true;
1629 }
1630
1631 t_id = tcg_temp_new_i32();
1632 if (rb) {
1633 tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
1634 } else {
1635 tcg_gen_movi_i32(t_id, imm);
1636 }
1637
1638 t_ctrl = tcg_const_i32(ctrl);
1639 gen_helper_put(t_id, t_ctrl, reg_for_read(dc, ra));
1640 tcg_temp_free_i32(t_id);
1641 tcg_temp_free_i32(t_ctrl);
1642 return true;
1643 }
1644
trans_put(DisasContext * dc,arg_put * arg)1645 static bool trans_put(DisasContext *dc, arg_put *arg)
1646 {
1647 return do_put(dc, arg->ra, 0, arg->imm, arg->ctrl);
1648 }
1649
trans_putd(DisasContext * dc,arg_putd * arg)1650 static bool trans_putd(DisasContext *dc, arg_putd *arg)
1651 {
1652 return do_put(dc, arg->ra, arg->rb, 0, arg->ctrl);
1653 }
1654
mb_tr_init_disas_context(DisasContextBase * dcb,CPUState * cs)1655 static void mb_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
1656 {
1657 DisasContext *dc = container_of(dcb, DisasContext, base);
1658 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1659 int bound;
1660
1661 dc->cfg = &cpu->cfg;
1662 dc->tb_flags = dc->base.tb->flags;
1663 dc->ext_imm = dc->base.tb->cs_base;
1664 dc->r0 = NULL;
1665 dc->r0_set = false;
1666 dc->mem_index = cpu_mmu_index(&cpu->env, false);
1667 dc->jmp_cond = dc->tb_flags & D_FLAG ? TCG_COND_ALWAYS : TCG_COND_NEVER;
1668 dc->jmp_dest = -1;
1669
1670 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
1671 dc->base.max_insns = MIN(dc->base.max_insns, bound);
1672 }
1673
mb_tr_tb_start(DisasContextBase * dcb,CPUState * cs)1674 static void mb_tr_tb_start(DisasContextBase *dcb, CPUState *cs)
1675 {
1676 }
1677
mb_tr_insn_start(DisasContextBase * dcb,CPUState * cs)1678 static void mb_tr_insn_start(DisasContextBase *dcb, CPUState *cs)
1679 {
1680 DisasContext *dc = container_of(dcb, DisasContext, base);
1681
1682 tcg_gen_insn_start(dc->base.pc_next, dc->tb_flags & ~MSR_TB_MASK);
1683 dc->insn_start = tcg_last_op();
1684 }
1685
mb_tr_breakpoint_check(DisasContextBase * dcb,CPUState * cs,const CPUBreakpoint * bp)1686 static bool mb_tr_breakpoint_check(DisasContextBase *dcb, CPUState *cs,
1687 const CPUBreakpoint *bp)
1688 {
1689 DisasContext *dc = container_of(dcb, DisasContext, base);
1690
1691 gen_raise_exception_sync(dc, EXCP_DEBUG);
1692
1693 /*
1694 * The address covered by the breakpoint must be included in
1695 * [tb->pc, tb->pc + tb->size) in order to for it to be
1696 * properly cleared -- thus we increment the PC here so that
1697 * the logic setting tb->size below does the right thing.
1698 */
1699 dc->base.pc_next += 4;
1700 return true;
1701 }
1702
mb_tr_translate_insn(DisasContextBase * dcb,CPUState * cs)1703 static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs)
1704 {
1705 DisasContext *dc = container_of(dcb, DisasContext, base);
1706 CPUMBState *env = cs->env_ptr;
1707 uint32_t ir;
1708
1709 /* TODO: This should raise an exception, not terminate qemu. */
1710 if (dc->base.pc_next & 3) {
1711 cpu_abort(cs, "Microblaze: unaligned PC=%x\n",
1712 (uint32_t)dc->base.pc_next);
1713 }
1714
1715 dc->tb_flags_to_set = 0;
1716
1717 ir = cpu_ldl_code(env, dc->base.pc_next);
1718 if (!decode(dc, ir)) {
1719 trap_illegal(dc, true);
1720 }
1721
1722 if (dc->r0) {
1723 tcg_temp_free_i32(dc->r0);
1724 dc->r0 = NULL;
1725 dc->r0_set = false;
1726 }
1727
1728 /* Discard the imm global when its contents cannot be used. */
1729 if ((dc->tb_flags & ~dc->tb_flags_to_set) & IMM_FLAG) {
1730 tcg_gen_discard_i32(cpu_imm);
1731 }
1732
1733 dc->tb_flags &= ~(IMM_FLAG | BIMM_FLAG | D_FLAG);
1734 dc->tb_flags |= dc->tb_flags_to_set;
1735 dc->base.pc_next += 4;
1736
1737 if (dc->jmp_cond != TCG_COND_NEVER && !(dc->tb_flags & D_FLAG)) {
1738 /*
1739 * Finish any return-from branch.
1740 */
1741 uint32_t rt_ibe = dc->tb_flags & (DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1742 if (unlikely(rt_ibe != 0)) {
1743 dc->tb_flags &= ~(DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1744 if (rt_ibe & DRTI_FLAG) {
1745 do_rti(dc);
1746 } else if (rt_ibe & DRTB_FLAG) {
1747 do_rtb(dc);
1748 } else {
1749 do_rte(dc);
1750 }
1751 }
1752
1753 /* Complete the branch, ending the TB. */
1754 switch (dc->base.is_jmp) {
1755 case DISAS_NORETURN:
1756 /*
1757 * E.g. illegal insn in a delay slot. We've already exited
1758 * and will handle D_FLAG in mb_cpu_do_interrupt.
1759 */
1760 break;
1761 case DISAS_NEXT:
1762 /*
1763 * Normal insn a delay slot.
1764 * However, the return-from-exception type insns should
1765 * return to the main loop, as they have adjusted MSR.
1766 */
1767 dc->base.is_jmp = (rt_ibe ? DISAS_EXIT_JUMP : DISAS_JUMP);
1768 break;
1769 case DISAS_EXIT_NEXT:
1770 /*
1771 * E.g. mts insn in a delay slot. Continue with btarget,
1772 * but still return to the main loop.
1773 */
1774 dc->base.is_jmp = DISAS_EXIT_JUMP;
1775 break;
1776 default:
1777 g_assert_not_reached();
1778 }
1779 }
1780 }
1781
mb_tr_tb_stop(DisasContextBase * dcb,CPUState * cs)1782 static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
1783 {
1784 DisasContext *dc = container_of(dcb, DisasContext, base);
1785
1786 if (dc->base.is_jmp == DISAS_NORETURN) {
1787 /* We have already exited the TB. */
1788 return;
1789 }
1790
1791 t_sync_flags(dc);
1792
1793 switch (dc->base.is_jmp) {
1794 case DISAS_TOO_MANY:
1795 gen_goto_tb(dc, 0, dc->base.pc_next);
1796 return;
1797
1798 case DISAS_EXIT:
1799 break;
1800 case DISAS_EXIT_NEXT:
1801 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
1802 break;
1803 case DISAS_EXIT_JUMP:
1804 tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1805 tcg_gen_discard_i32(cpu_btarget);
1806 break;
1807
1808 case DISAS_JUMP:
1809 if (dc->jmp_dest != -1 && !cs->singlestep_enabled) {
1810 /* Direct jump. */
1811 tcg_gen_discard_i32(cpu_btarget);
1812
1813 if (dc->jmp_cond != TCG_COND_ALWAYS) {
1814 /* Conditional direct jump. */
1815 TCGLabel *taken = gen_new_label();
1816 TCGv_i32 tmp = tcg_temp_new_i32();
1817
1818 /*
1819 * Copy bvalue to a temp now, so we can discard bvalue.
1820 * This can avoid writing bvalue to memory when the
1821 * delay slot cannot raise an exception.
1822 */
1823 tcg_gen_mov_i32(tmp, cpu_bvalue);
1824 tcg_gen_discard_i32(cpu_bvalue);
1825
1826 tcg_gen_brcondi_i32(dc->jmp_cond, tmp, 0, taken);
1827 gen_goto_tb(dc, 1, dc->base.pc_next);
1828 gen_set_label(taken);
1829 }
1830 gen_goto_tb(dc, 0, dc->jmp_dest);
1831 return;
1832 }
1833
1834 /* Indirect jump (or direct jump w/ singlestep) */
1835 tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1836 tcg_gen_discard_i32(cpu_btarget);
1837
1838 if (unlikely(cs->singlestep_enabled)) {
1839 gen_raise_exception(dc, EXCP_DEBUG);
1840 } else {
1841 tcg_gen_lookup_and_goto_ptr();
1842 }
1843 return;
1844
1845 default:
1846 g_assert_not_reached();
1847 }
1848
1849 /* Finish DISAS_EXIT_* */
1850 if (unlikely(cs->singlestep_enabled)) {
1851 gen_raise_exception(dc, EXCP_DEBUG);
1852 } else {
1853 tcg_gen_exit_tb(NULL, 0);
1854 }
1855 }
1856
mb_tr_disas_log(const DisasContextBase * dcb,CPUState * cs)1857 static void mb_tr_disas_log(const DisasContextBase *dcb, CPUState *cs)
1858 {
1859 qemu_log("IN: %s\n", lookup_symbol(dcb->pc_first));
1860 log_target_disas(cs, dcb->pc_first, dcb->tb->size);
1861 }
1862
1863 static const TranslatorOps mb_tr_ops = {
1864 .init_disas_context = mb_tr_init_disas_context,
1865 .tb_start = mb_tr_tb_start,
1866 .insn_start = mb_tr_insn_start,
1867 .breakpoint_check = mb_tr_breakpoint_check,
1868 .translate_insn = mb_tr_translate_insn,
1869 .tb_stop = mb_tr_tb_stop,
1870 .disas_log = mb_tr_disas_log,
1871 };
1872
gen_intermediate_code(CPUState * cpu,TranslationBlock * tb,int max_insns)1873 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
1874 {
1875 DisasContext dc;
1876 translator_loop(&mb_tr_ops, &dc.base, cpu, tb, max_insns);
1877 }
1878
mb_cpu_dump_state(CPUState * cs,FILE * f,int flags)1879 void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1880 {
1881 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1882 CPUMBState *env = &cpu->env;
1883 uint32_t iflags;
1884 int i;
1885
1886 qemu_fprintf(f, "pc=0x%08x msr=0x%05x mode=%s(saved=%s) eip=%d ie=%d\n",
1887 env->pc, env->msr,
1888 (env->msr & MSR_UM) ? "user" : "kernel",
1889 (env->msr & MSR_UMS) ? "user" : "kernel",
1890 (bool)(env->msr & MSR_EIP),
1891 (bool)(env->msr & MSR_IE));
1892
1893 iflags = env->iflags;
1894 qemu_fprintf(f, "iflags: 0x%08x", iflags);
1895 if (iflags & IMM_FLAG) {
1896 qemu_fprintf(f, " IMM(0x%08x)", env->imm);
1897 }
1898 if (iflags & BIMM_FLAG) {
1899 qemu_fprintf(f, " BIMM");
1900 }
1901 if (iflags & D_FLAG) {
1902 qemu_fprintf(f, " D(btarget=0x%08x)", env->btarget);
1903 }
1904 if (iflags & DRTI_FLAG) {
1905 qemu_fprintf(f, " DRTI");
1906 }
1907 if (iflags & DRTE_FLAG) {
1908 qemu_fprintf(f, " DRTE");
1909 }
1910 if (iflags & DRTB_FLAG) {
1911 qemu_fprintf(f, " DRTB");
1912 }
1913 if (iflags & ESR_ESS_FLAG) {
1914 qemu_fprintf(f, " ESR_ESS(0x%04x)", iflags & ESR_ESS_MASK);
1915 }
1916
1917 qemu_fprintf(f, "\nesr=0x%04x fsr=0x%02x btr=0x%08x edr=0x%x\n"
1918 "ear=0x" TARGET_FMT_lx " slr=0x%x shr=0x%x\n",
1919 env->esr, env->fsr, env->btr, env->edr,
1920 env->ear, env->slr, env->shr);
1921
1922 for (i = 0; i < 32; i++) {
1923 qemu_fprintf(f, "r%2.2d=%08x%c",
1924 i, env->regs[i], i % 4 == 3 ? '\n' : ' ');
1925 }
1926 qemu_fprintf(f, "\n");
1927 }
1928
mb_tcg_init(void)1929 void mb_tcg_init(void)
1930 {
1931 #define R(X) { &cpu_R[X], offsetof(CPUMBState, regs[X]), "r" #X }
1932 #define SP(X) { &cpu_##X, offsetof(CPUMBState, X), #X }
1933
1934 static const struct {
1935 TCGv_i32 *var; int ofs; char name[8];
1936 } i32s[] = {
1937 /*
1938 * Note that r0 is handled specially in reg_for_read
1939 * and reg_for_write. Nothing should touch cpu_R[0].
1940 * Leave that element NULL, which will assert quickly
1941 * inside the tcg generator functions.
1942 */
1943 R(1), R(2), R(3), R(4), R(5), R(6), R(7),
1944 R(8), R(9), R(10), R(11), R(12), R(13), R(14), R(15),
1945 R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23),
1946 R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31),
1947
1948 SP(pc),
1949 SP(msr),
1950 SP(msr_c),
1951 SP(imm),
1952 SP(iflags),
1953 SP(bvalue),
1954 SP(btarget),
1955 SP(res_val),
1956 };
1957
1958 #undef R
1959 #undef SP
1960
1961 for (int i = 0; i < ARRAY_SIZE(i32s); ++i) {
1962 *i32s[i].var =
1963 tcg_global_mem_new_i32(cpu_env, i32s[i].ofs, i32s[i].name);
1964 }
1965
1966 cpu_res_addr =
1967 tcg_global_mem_new(cpu_env, offsetof(CPUMBState, res_addr), "res_addr");
1968 }
1969
restore_state_to_opc(CPUMBState * env,TranslationBlock * tb,target_ulong * data)1970 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1971 target_ulong *data)
1972 {
1973 env->pc = data[0];
1974 env->iflags = data[1];
1975 }
1976