xref: /qemu/target/openrisc/translate.c (revision 52f2b896)
1 /*
2  * OpenRISC translation
3  *
4  * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
5  *                         Feng Gao <gf91597@gmail.com>
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "disas/disas.h"
25 #include "tcg-op.h"
26 #include "qemu-common.h"
27 #include "qemu/log.h"
28 #include "qemu/bitops.h"
29 #include "qemu/qemu-print.h"
30 #include "exec/cpu_ldst.h"
31 #include "exec/translator.h"
32 
33 #include "exec/helper-proto.h"
34 #include "exec/helper-gen.h"
35 #include "exec/gen-icount.h"
36 
37 #include "trace-tcg.h"
38 #include "exec/log.h"
39 
40 /* is_jmp field values */
41 #define DISAS_EXIT    DISAS_TARGET_0  /* force exit to main loop */
42 #define DISAS_JUMP    DISAS_TARGET_1  /* exit via jmp_pc/jmp_pc_imm */
43 
44 typedef struct DisasContext {
45     DisasContextBase base;
46     uint32_t mem_idx;
47     uint32_t tb_flags;
48     uint32_t delayed_branch;
49 
50     /* If not -1, jmp_pc contains this value and so is a direct jump.  */
51     target_ulong jmp_pc_imm;
52 } DisasContext;
53 
54 static inline bool is_user(DisasContext *dc)
55 {
56 #ifdef CONFIG_USER_ONLY
57     return true;
58 #else
59     return !(dc->tb_flags & TB_FLAGS_SM);
60 #endif
61 }
62 
63 /* Include the auto-generated decoder.  */
64 #include "decode.inc.c"
65 
66 static TCGv cpu_sr;
67 static TCGv cpu_R[32];
68 static TCGv cpu_R0;
69 static TCGv cpu_pc;
70 static TCGv jmp_pc;            /* l.jr/l.jalr temp pc */
71 static TCGv cpu_ppc;
72 static TCGv cpu_sr_f;           /* bf/bnf, F flag taken */
73 static TCGv cpu_sr_cy;          /* carry (unsigned overflow) */
74 static TCGv cpu_sr_ov;          /* signed overflow */
75 static TCGv cpu_lock_addr;
76 static TCGv cpu_lock_value;
77 static TCGv_i32 fpcsr;
78 static TCGv_i64 cpu_mac;        /* MACHI:MACLO */
79 static TCGv_i32 cpu_dflag;
80 
81 void openrisc_translate_init(void)
82 {
83     static const char * const regnames[] = {
84         "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
85         "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
86         "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
87         "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
88     };
89     int i;
90 
91     cpu_sr = tcg_global_mem_new(cpu_env,
92                                 offsetof(CPUOpenRISCState, sr), "sr");
93     cpu_dflag = tcg_global_mem_new_i32(cpu_env,
94                                        offsetof(CPUOpenRISCState, dflag),
95                                        "dflag");
96     cpu_pc = tcg_global_mem_new(cpu_env,
97                                 offsetof(CPUOpenRISCState, pc), "pc");
98     cpu_ppc = tcg_global_mem_new(cpu_env,
99                                  offsetof(CPUOpenRISCState, ppc), "ppc");
100     jmp_pc = tcg_global_mem_new(cpu_env,
101                                 offsetof(CPUOpenRISCState, jmp_pc), "jmp_pc");
102     cpu_sr_f = tcg_global_mem_new(cpu_env,
103                                   offsetof(CPUOpenRISCState, sr_f), "sr_f");
104     cpu_sr_cy = tcg_global_mem_new(cpu_env,
105                                    offsetof(CPUOpenRISCState, sr_cy), "sr_cy");
106     cpu_sr_ov = tcg_global_mem_new(cpu_env,
107                                    offsetof(CPUOpenRISCState, sr_ov), "sr_ov");
108     cpu_lock_addr = tcg_global_mem_new(cpu_env,
109                                        offsetof(CPUOpenRISCState, lock_addr),
110                                        "lock_addr");
111     cpu_lock_value = tcg_global_mem_new(cpu_env,
112                                         offsetof(CPUOpenRISCState, lock_value),
113                                         "lock_value");
114     fpcsr = tcg_global_mem_new_i32(cpu_env,
115                                    offsetof(CPUOpenRISCState, fpcsr),
116                                    "fpcsr");
117     cpu_mac = tcg_global_mem_new_i64(cpu_env,
118                                      offsetof(CPUOpenRISCState, mac),
119                                      "mac");
120     for (i = 0; i < 32; i++) {
121         cpu_R[i] = tcg_global_mem_new(cpu_env,
122                                       offsetof(CPUOpenRISCState,
123                                                shadow_gpr[0][i]),
124                                       regnames[i]);
125     }
126     cpu_R0 = cpu_R[0];
127 }
128 
129 static void gen_exception(DisasContext *dc, unsigned int excp)
130 {
131     TCGv_i32 tmp = tcg_const_i32(excp);
132     gen_helper_exception(cpu_env, tmp);
133     tcg_temp_free_i32(tmp);
134 }
135 
136 static void gen_illegal_exception(DisasContext *dc)
137 {
138     tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
139     gen_exception(dc, EXCP_ILLEGAL);
140     dc->base.is_jmp = DISAS_NORETURN;
141 }
142 
143 /* not used yet, open it when we need or64.  */
144 /*#ifdef TARGET_OPENRISC64
145 static void check_ob64s(DisasContext *dc)
146 {
147     if (!(dc->flags & CPUCFGR_OB64S)) {
148         gen_illegal_exception(dc);
149     }
150 }
151 
152 static void check_of64s(DisasContext *dc)
153 {
154     if (!(dc->flags & CPUCFGR_OF64S)) {
155         gen_illegal_exception(dc);
156     }
157 }
158 
159 static void check_ov64s(DisasContext *dc)
160 {
161     if (!(dc->flags & CPUCFGR_OV64S)) {
162         gen_illegal_exception(dc);
163     }
164 }
165 #endif*/
166 
167 /* We're about to write to REG.  On the off-chance that the user is
168    writing to R0, re-instate the architectural register.  */
169 #define check_r0_write(reg)             \
170     do {                                \
171         if (unlikely(reg == 0)) {       \
172             cpu_R[0] = cpu_R0;          \
173         }                               \
174     } while (0)
175 
176 static void gen_ove_cy(DisasContext *dc)
177 {
178     if (dc->tb_flags & SR_OVE) {
179         gen_helper_ove_cy(cpu_env);
180     }
181 }
182 
183 static void gen_ove_ov(DisasContext *dc)
184 {
185     if (dc->tb_flags & SR_OVE) {
186         gen_helper_ove_ov(cpu_env);
187     }
188 }
189 
190 static void gen_ove_cyov(DisasContext *dc)
191 {
192     if (dc->tb_flags & SR_OVE) {
193         gen_helper_ove_cyov(cpu_env);
194     }
195 }
196 
197 static void gen_add(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
198 {
199     TCGv t0 = tcg_const_tl(0);
200     TCGv res = tcg_temp_new();
201 
202     tcg_gen_add2_tl(res, cpu_sr_cy, srca, t0, srcb, t0);
203     tcg_gen_xor_tl(cpu_sr_ov, srca, srcb);
204     tcg_gen_xor_tl(t0, res, srcb);
205     tcg_gen_andc_tl(cpu_sr_ov, t0, cpu_sr_ov);
206     tcg_temp_free(t0);
207 
208     tcg_gen_mov_tl(dest, res);
209     tcg_temp_free(res);
210 
211     gen_ove_cyov(dc);
212 }
213 
214 static void gen_addc(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
215 {
216     TCGv t0 = tcg_const_tl(0);
217     TCGv res = tcg_temp_new();
218 
219     tcg_gen_add2_tl(res, cpu_sr_cy, srca, t0, cpu_sr_cy, t0);
220     tcg_gen_add2_tl(res, cpu_sr_cy, res, cpu_sr_cy, srcb, t0);
221     tcg_gen_xor_tl(cpu_sr_ov, srca, srcb);
222     tcg_gen_xor_tl(t0, res, srcb);
223     tcg_gen_andc_tl(cpu_sr_ov, t0, cpu_sr_ov);
224     tcg_temp_free(t0);
225 
226     tcg_gen_mov_tl(dest, res);
227     tcg_temp_free(res);
228 
229     gen_ove_cyov(dc);
230 }
231 
232 static void gen_sub(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
233 {
234     TCGv res = tcg_temp_new();
235 
236     tcg_gen_sub_tl(res, srca, srcb);
237     tcg_gen_xor_tl(cpu_sr_cy, srca, srcb);
238     tcg_gen_xor_tl(cpu_sr_ov, res, srcb);
239     tcg_gen_and_tl(cpu_sr_ov, cpu_sr_ov, cpu_sr_cy);
240     tcg_gen_setcond_tl(TCG_COND_LTU, cpu_sr_cy, srca, srcb);
241 
242     tcg_gen_mov_tl(dest, res);
243     tcg_temp_free(res);
244 
245     gen_ove_cyov(dc);
246 }
247 
248 static void gen_mul(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
249 {
250     TCGv t0 = tcg_temp_new();
251 
252     tcg_gen_muls2_tl(dest, cpu_sr_ov, srca, srcb);
253     tcg_gen_sari_tl(t0, dest, TARGET_LONG_BITS - 1);
254     tcg_gen_setcond_tl(TCG_COND_NE, cpu_sr_ov, cpu_sr_ov, t0);
255     tcg_temp_free(t0);
256 
257     tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov);
258     gen_ove_ov(dc);
259 }
260 
261 static void gen_mulu(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
262 {
263     tcg_gen_muls2_tl(dest, cpu_sr_cy, srca, srcb);
264     tcg_gen_setcondi_tl(TCG_COND_NE, cpu_sr_cy, cpu_sr_cy, 0);
265 
266     gen_ove_cy(dc);
267 }
268 
269 static void gen_div(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
270 {
271     TCGv t0 = tcg_temp_new();
272 
273     tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_ov, srcb, 0);
274     /* The result of divide-by-zero is undefined.
275        Supress the host-side exception by dividing by 1.  */
276     tcg_gen_or_tl(t0, srcb, cpu_sr_ov);
277     tcg_gen_div_tl(dest, srca, t0);
278     tcg_temp_free(t0);
279 
280     tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov);
281     gen_ove_ov(dc);
282 }
283 
284 static void gen_divu(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
285 {
286     TCGv t0 = tcg_temp_new();
287 
288     tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_cy, srcb, 0);
289     /* The result of divide-by-zero is undefined.
290        Supress the host-side exception by dividing by 1.  */
291     tcg_gen_or_tl(t0, srcb, cpu_sr_cy);
292     tcg_gen_divu_tl(dest, srca, t0);
293     tcg_temp_free(t0);
294 
295     gen_ove_cy(dc);
296 }
297 
298 static void gen_muld(DisasContext *dc, TCGv srca, TCGv srcb)
299 {
300     TCGv_i64 t1 = tcg_temp_new_i64();
301     TCGv_i64 t2 = tcg_temp_new_i64();
302 
303     tcg_gen_ext_tl_i64(t1, srca);
304     tcg_gen_ext_tl_i64(t2, srcb);
305     if (TARGET_LONG_BITS == 32) {
306         tcg_gen_mul_i64(cpu_mac, t1, t2);
307         tcg_gen_movi_tl(cpu_sr_ov, 0);
308     } else {
309         TCGv_i64 high = tcg_temp_new_i64();
310 
311         tcg_gen_muls2_i64(cpu_mac, high, t1, t2);
312         tcg_gen_sari_i64(t1, cpu_mac, 63);
313         tcg_gen_setcond_i64(TCG_COND_NE, t1, t1, high);
314         tcg_temp_free_i64(high);
315         tcg_gen_trunc_i64_tl(cpu_sr_ov, t1);
316         tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov);
317 
318         gen_ove_ov(dc);
319     }
320     tcg_temp_free_i64(t1);
321     tcg_temp_free_i64(t2);
322 }
323 
324 static void gen_muldu(DisasContext *dc, TCGv srca, TCGv srcb)
325 {
326     TCGv_i64 t1 = tcg_temp_new_i64();
327     TCGv_i64 t2 = tcg_temp_new_i64();
328 
329     tcg_gen_extu_tl_i64(t1, srca);
330     tcg_gen_extu_tl_i64(t2, srcb);
331     if (TARGET_LONG_BITS == 32) {
332         tcg_gen_mul_i64(cpu_mac, t1, t2);
333         tcg_gen_movi_tl(cpu_sr_cy, 0);
334     } else {
335         TCGv_i64 high = tcg_temp_new_i64();
336 
337         tcg_gen_mulu2_i64(cpu_mac, high, t1, t2);
338         tcg_gen_setcondi_i64(TCG_COND_NE, high, high, 0);
339         tcg_gen_trunc_i64_tl(cpu_sr_cy, high);
340         tcg_temp_free_i64(high);
341 
342         gen_ove_cy(dc);
343     }
344     tcg_temp_free_i64(t1);
345     tcg_temp_free_i64(t2);
346 }
347 
348 static void gen_mac(DisasContext *dc, TCGv srca, TCGv srcb)
349 {
350     TCGv_i64 t1 = tcg_temp_new_i64();
351     TCGv_i64 t2 = tcg_temp_new_i64();
352 
353     tcg_gen_ext_tl_i64(t1, srca);
354     tcg_gen_ext_tl_i64(t2, srcb);
355     tcg_gen_mul_i64(t1, t1, t2);
356 
357     /* Note that overflow is only computed during addition stage.  */
358     tcg_gen_xor_i64(t2, cpu_mac, t1);
359     tcg_gen_add_i64(cpu_mac, cpu_mac, t1);
360     tcg_gen_xor_i64(t1, t1, cpu_mac);
361     tcg_gen_andc_i64(t1, t1, t2);
362     tcg_temp_free_i64(t2);
363 
364 #if TARGET_LONG_BITS == 32
365     tcg_gen_extrh_i64_i32(cpu_sr_ov, t1);
366 #else
367     tcg_gen_mov_i64(cpu_sr_ov, t1);
368 #endif
369     tcg_temp_free_i64(t1);
370 
371     gen_ove_ov(dc);
372 }
373 
374 static void gen_macu(DisasContext *dc, TCGv srca, TCGv srcb)
375 {
376     TCGv_i64 t1 = tcg_temp_new_i64();
377     TCGv_i64 t2 = tcg_temp_new_i64();
378 
379     tcg_gen_extu_tl_i64(t1, srca);
380     tcg_gen_extu_tl_i64(t2, srcb);
381     tcg_gen_mul_i64(t1, t1, t2);
382     tcg_temp_free_i64(t2);
383 
384     /* Note that overflow is only computed during addition stage.  */
385     tcg_gen_add_i64(cpu_mac, cpu_mac, t1);
386     tcg_gen_setcond_i64(TCG_COND_LTU, t1, cpu_mac, t1);
387     tcg_gen_trunc_i64_tl(cpu_sr_cy, t1);
388     tcg_temp_free_i64(t1);
389 
390     gen_ove_cy(dc);
391 }
392 
393 static void gen_msb(DisasContext *dc, TCGv srca, TCGv srcb)
394 {
395     TCGv_i64 t1 = tcg_temp_new_i64();
396     TCGv_i64 t2 = tcg_temp_new_i64();
397 
398     tcg_gen_ext_tl_i64(t1, srca);
399     tcg_gen_ext_tl_i64(t2, srcb);
400     tcg_gen_mul_i64(t1, t1, t2);
401 
402     /* Note that overflow is only computed during subtraction stage.  */
403     tcg_gen_xor_i64(t2, cpu_mac, t1);
404     tcg_gen_sub_i64(cpu_mac, cpu_mac, t1);
405     tcg_gen_xor_i64(t1, t1, cpu_mac);
406     tcg_gen_and_i64(t1, t1, t2);
407     tcg_temp_free_i64(t2);
408 
409 #if TARGET_LONG_BITS == 32
410     tcg_gen_extrh_i64_i32(cpu_sr_ov, t1);
411 #else
412     tcg_gen_mov_i64(cpu_sr_ov, t1);
413 #endif
414     tcg_temp_free_i64(t1);
415 
416     gen_ove_ov(dc);
417 }
418 
419 static void gen_msbu(DisasContext *dc, TCGv srca, TCGv srcb)
420 {
421     TCGv_i64 t1 = tcg_temp_new_i64();
422     TCGv_i64 t2 = tcg_temp_new_i64();
423 
424     tcg_gen_extu_tl_i64(t1, srca);
425     tcg_gen_extu_tl_i64(t2, srcb);
426     tcg_gen_mul_i64(t1, t1, t2);
427 
428     /* Note that overflow is only computed during subtraction stage.  */
429     tcg_gen_setcond_i64(TCG_COND_LTU, t2, cpu_mac, t1);
430     tcg_gen_sub_i64(cpu_mac, cpu_mac, t1);
431     tcg_gen_trunc_i64_tl(cpu_sr_cy, t2);
432     tcg_temp_free_i64(t2);
433     tcg_temp_free_i64(t1);
434 
435     gen_ove_cy(dc);
436 }
437 
438 static bool trans_l_add(DisasContext *dc, arg_dab *a)
439 {
440     check_r0_write(a->d);
441     gen_add(dc, cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
442     return true;
443 }
444 
445 static bool trans_l_addc(DisasContext *dc, arg_dab *a)
446 {
447     check_r0_write(a->d);
448     gen_addc(dc, cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
449     return true;
450 }
451 
452 static bool trans_l_sub(DisasContext *dc, arg_dab *a)
453 {
454     check_r0_write(a->d);
455     gen_sub(dc, cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
456     return true;
457 }
458 
459 static bool trans_l_and(DisasContext *dc, arg_dab *a)
460 {
461     check_r0_write(a->d);
462     tcg_gen_and_tl(cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
463     return true;
464 }
465 
466 static bool trans_l_or(DisasContext *dc, arg_dab *a)
467 {
468     check_r0_write(a->d);
469     tcg_gen_or_tl(cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
470     return true;
471 }
472 
473 static bool trans_l_xor(DisasContext *dc, arg_dab *a)
474 {
475     check_r0_write(a->d);
476     tcg_gen_xor_tl(cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
477     return true;
478 }
479 
480 static bool trans_l_sll(DisasContext *dc, arg_dab *a)
481 {
482     check_r0_write(a->d);
483     tcg_gen_shl_tl(cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
484     return true;
485 }
486 
487 static bool trans_l_srl(DisasContext *dc, arg_dab *a)
488 {
489     check_r0_write(a->d);
490     tcg_gen_shr_tl(cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
491     return true;
492 }
493 
494 static bool trans_l_sra(DisasContext *dc, arg_dab *a)
495 {
496     check_r0_write(a->d);
497     tcg_gen_sar_tl(cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
498     return true;
499 }
500 
501 static bool trans_l_ror(DisasContext *dc, arg_dab *a)
502 {
503     check_r0_write(a->d);
504     tcg_gen_rotr_tl(cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
505     return true;
506 }
507 
508 static bool trans_l_exths(DisasContext *dc, arg_da *a)
509 {
510     check_r0_write(a->d);
511     tcg_gen_ext16s_tl(cpu_R[a->d], cpu_R[a->a]);
512     return true;
513 }
514 
515 static bool trans_l_extbs(DisasContext *dc, arg_da *a)
516 {
517     check_r0_write(a->d);
518     tcg_gen_ext8s_tl(cpu_R[a->d], cpu_R[a->a]);
519     return true;
520 }
521 
522 static bool trans_l_exthz(DisasContext *dc, arg_da *a)
523 {
524     check_r0_write(a->d);
525     tcg_gen_ext16u_tl(cpu_R[a->d], cpu_R[a->a]);
526     return true;
527 }
528 
529 static bool trans_l_extbz(DisasContext *dc, arg_da *a)
530 {
531     check_r0_write(a->d);
532     tcg_gen_ext8u_tl(cpu_R[a->d], cpu_R[a->a]);
533     return true;
534 }
535 
536 static bool trans_l_cmov(DisasContext *dc, arg_dab *a)
537 {
538     TCGv zero;
539 
540     check_r0_write(a->d);
541     zero = tcg_const_tl(0);
542     tcg_gen_movcond_tl(TCG_COND_NE, cpu_R[a->d], cpu_sr_f, zero,
543                        cpu_R[a->a], cpu_R[a->b]);
544     tcg_temp_free(zero);
545     return true;
546 }
547 
548 static bool trans_l_ff1(DisasContext *dc, arg_da *a)
549 {
550     check_r0_write(a->d);
551     tcg_gen_ctzi_tl(cpu_R[a->d], cpu_R[a->a], -1);
552     tcg_gen_addi_tl(cpu_R[a->d], cpu_R[a->d], 1);
553     return true;
554 }
555 
556 static bool trans_l_fl1(DisasContext *dc, arg_da *a)
557 {
558     check_r0_write(a->d);
559     tcg_gen_clzi_tl(cpu_R[a->d], cpu_R[a->a], TARGET_LONG_BITS);
560     tcg_gen_subfi_tl(cpu_R[a->d], TARGET_LONG_BITS, cpu_R[a->d]);
561     return true;
562 }
563 
564 static bool trans_l_mul(DisasContext *dc, arg_dab *a)
565 {
566     check_r0_write(a->d);
567     gen_mul(dc, cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
568     return true;
569 }
570 
571 static bool trans_l_mulu(DisasContext *dc, arg_dab *a)
572 {
573     check_r0_write(a->d);
574     gen_mulu(dc, cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
575     return true;
576 }
577 
578 static bool trans_l_div(DisasContext *dc, arg_dab *a)
579 {
580     check_r0_write(a->d);
581     gen_div(dc, cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
582     return true;
583 }
584 
585 static bool trans_l_divu(DisasContext *dc, arg_dab *a)
586 {
587     check_r0_write(a->d);
588     gen_divu(dc, cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
589     return true;
590 }
591 
592 static bool trans_l_muld(DisasContext *dc, arg_ab *a)
593 {
594     gen_muld(dc, cpu_R[a->a], cpu_R[a->b]);
595     return true;
596 }
597 
598 static bool trans_l_muldu(DisasContext *dc, arg_ab *a)
599 {
600     gen_muldu(dc, cpu_R[a->a], cpu_R[a->b]);
601     return true;
602 }
603 
604 static bool trans_l_j(DisasContext *dc, arg_l_j *a)
605 {
606     target_ulong tmp_pc = dc->base.pc_next + a->n * 4;
607 
608     tcg_gen_movi_tl(jmp_pc, tmp_pc);
609     dc->jmp_pc_imm = tmp_pc;
610     dc->delayed_branch = 2;
611     return true;
612 }
613 
614 static bool trans_l_jal(DisasContext *dc, arg_l_jal *a)
615 {
616     target_ulong tmp_pc = dc->base.pc_next + a->n * 4;
617     target_ulong ret_pc = dc->base.pc_next + 8;
618 
619     tcg_gen_movi_tl(cpu_R[9], ret_pc);
620     /* Optimize jal being used to load the PC for PIC.  */
621     if (tmp_pc != ret_pc) {
622         tcg_gen_movi_tl(jmp_pc, tmp_pc);
623         dc->jmp_pc_imm = tmp_pc;
624         dc->delayed_branch = 2;
625     }
626     return true;
627 }
628 
629 static void do_bf(DisasContext *dc, arg_l_bf *a, TCGCond cond)
630 {
631     target_ulong tmp_pc = dc->base.pc_next + a->n * 4;
632     TCGv t_next = tcg_const_tl(dc->base.pc_next + 8);
633     TCGv t_true = tcg_const_tl(tmp_pc);
634     TCGv t_zero = tcg_const_tl(0);
635 
636     tcg_gen_movcond_tl(cond, jmp_pc, cpu_sr_f, t_zero, t_true, t_next);
637 
638     tcg_temp_free(t_next);
639     tcg_temp_free(t_true);
640     tcg_temp_free(t_zero);
641     dc->delayed_branch = 2;
642 }
643 
644 static bool trans_l_bf(DisasContext *dc, arg_l_bf *a)
645 {
646     do_bf(dc, a, TCG_COND_NE);
647     return true;
648 }
649 
650 static bool trans_l_bnf(DisasContext *dc, arg_l_bf *a)
651 {
652     do_bf(dc, a, TCG_COND_EQ);
653     return true;
654 }
655 
656 static bool trans_l_jr(DisasContext *dc, arg_l_jr *a)
657 {
658     tcg_gen_mov_tl(jmp_pc, cpu_R[a->b]);
659     dc->delayed_branch = 2;
660     return true;
661 }
662 
663 static bool trans_l_jalr(DisasContext *dc, arg_l_jalr *a)
664 {
665     tcg_gen_mov_tl(jmp_pc, cpu_R[a->b]);
666     tcg_gen_movi_tl(cpu_R[9], dc->base.pc_next + 8);
667     dc->delayed_branch = 2;
668     return true;
669 }
670 
671 static bool trans_l_lwa(DisasContext *dc, arg_load *a)
672 {
673     TCGv ea;
674 
675     check_r0_write(a->d);
676     ea = tcg_temp_new();
677     tcg_gen_addi_tl(ea, cpu_R[a->a], a->i);
678     tcg_gen_qemu_ld_tl(cpu_R[a->d], ea, dc->mem_idx, MO_TEUL);
679     tcg_gen_mov_tl(cpu_lock_addr, ea);
680     tcg_gen_mov_tl(cpu_lock_value, cpu_R[a->d]);
681     tcg_temp_free(ea);
682     return true;
683 }
684 
685 static void do_load(DisasContext *dc, arg_load *a, TCGMemOp mop)
686 {
687     TCGv ea;
688 
689     check_r0_write(a->d);
690     ea = tcg_temp_new();
691     tcg_gen_addi_tl(ea, cpu_R[a->a], a->i);
692     tcg_gen_qemu_ld_tl(cpu_R[a->d], ea, dc->mem_idx, mop);
693     tcg_temp_free(ea);
694 }
695 
696 static bool trans_l_lwz(DisasContext *dc, arg_load *a)
697 {
698     do_load(dc, a, MO_TEUL);
699     return true;
700 }
701 
702 static bool trans_l_lws(DisasContext *dc, arg_load *a)
703 {
704     do_load(dc, a, MO_TESL);
705     return true;
706 }
707 
708 static bool trans_l_lbz(DisasContext *dc, arg_load *a)
709 {
710     do_load(dc, a, MO_UB);
711     return true;
712 }
713 
714 static bool trans_l_lbs(DisasContext *dc, arg_load *a)
715 {
716     do_load(dc, a, MO_SB);
717     return true;
718 }
719 
720 static bool trans_l_lhz(DisasContext *dc, arg_load *a)
721 {
722     do_load(dc, a, MO_TEUW);
723     return true;
724 }
725 
726 static bool trans_l_lhs(DisasContext *dc, arg_load *a)
727 {
728     do_load(dc, a, MO_TESW);
729     return true;
730 }
731 
732 static bool trans_l_swa(DisasContext *dc, arg_store *a)
733 {
734     TCGv ea, val;
735     TCGLabel *lab_fail, *lab_done;
736 
737     ea = tcg_temp_new();
738     tcg_gen_addi_tl(ea, cpu_R[a->a], a->i);
739 
740     /* For TB_FLAGS_R0_0, the branch below invalidates the temporary assigned
741        to cpu_R[0].  Since l.swa is quite often immediately followed by a
742        branch, don't bother reallocating; finish the TB using the "real" R0.
743        This also takes care of RB input across the branch.  */
744     cpu_R[0] = cpu_R0;
745 
746     lab_fail = gen_new_label();
747     lab_done = gen_new_label();
748     tcg_gen_brcond_tl(TCG_COND_NE, ea, cpu_lock_addr, lab_fail);
749     tcg_temp_free(ea);
750 
751     val = tcg_temp_new();
752     tcg_gen_atomic_cmpxchg_tl(val, cpu_lock_addr, cpu_lock_value,
753                               cpu_R[a->b], dc->mem_idx, MO_TEUL);
754     tcg_gen_setcond_tl(TCG_COND_EQ, cpu_sr_f, val, cpu_lock_value);
755     tcg_temp_free(val);
756 
757     tcg_gen_br(lab_done);
758 
759     gen_set_label(lab_fail);
760     tcg_gen_movi_tl(cpu_sr_f, 0);
761 
762     gen_set_label(lab_done);
763     tcg_gen_movi_tl(cpu_lock_addr, -1);
764     return true;
765 }
766 
767 static void do_store(DisasContext *dc, arg_store *a, TCGMemOp mop)
768 {
769     TCGv t0 = tcg_temp_new();
770     tcg_gen_addi_tl(t0, cpu_R[a->a], a->i);
771     tcg_gen_qemu_st_tl(cpu_R[a->b], t0, dc->mem_idx, mop);
772     tcg_temp_free(t0);
773 }
774 
775 static bool trans_l_sw(DisasContext *dc, arg_store *a)
776 {
777     do_store(dc, a, MO_TEUL);
778     return true;
779 }
780 
781 static bool trans_l_sb(DisasContext *dc, arg_store *a)
782 {
783     do_store(dc, a, MO_UB);
784     return true;
785 }
786 
787 static bool trans_l_sh(DisasContext *dc, arg_store *a)
788 {
789     do_store(dc, a, MO_TEUW);
790     return true;
791 }
792 
793 static bool trans_l_nop(DisasContext *dc, arg_l_nop *a)
794 {
795     return true;
796 }
797 
798 static bool trans_l_addi(DisasContext *dc, arg_rri *a)
799 {
800     TCGv t0;
801 
802     check_r0_write(a->d);
803     t0 = tcg_const_tl(a->i);
804     gen_add(dc, cpu_R[a->d], cpu_R[a->a], t0);
805     tcg_temp_free(t0);
806     return true;
807 }
808 
809 static bool trans_l_addic(DisasContext *dc, arg_rri *a)
810 {
811     TCGv t0;
812 
813     check_r0_write(a->d);
814     t0 = tcg_const_tl(a->i);
815     gen_addc(dc, cpu_R[a->d], cpu_R[a->a], t0);
816     tcg_temp_free(t0);
817     return true;
818 }
819 
820 static bool trans_l_muli(DisasContext *dc, arg_rri *a)
821 {
822     TCGv t0;
823 
824     check_r0_write(a->d);
825     t0 = tcg_const_tl(a->i);
826     gen_mul(dc, cpu_R[a->d], cpu_R[a->a], t0);
827     tcg_temp_free(t0);
828     return true;
829 }
830 
831 static bool trans_l_maci(DisasContext *dc, arg_l_maci *a)
832 {
833     TCGv t0;
834 
835     t0 = tcg_const_tl(a->i);
836     gen_mac(dc, cpu_R[a->a], t0);
837     tcg_temp_free(t0);
838     return true;
839 }
840 
841 static bool trans_l_andi(DisasContext *dc, arg_rrk *a)
842 {
843     check_r0_write(a->d);
844     tcg_gen_andi_tl(cpu_R[a->d], cpu_R[a->a], a->k);
845     return true;
846 }
847 
848 static bool trans_l_ori(DisasContext *dc, arg_rrk *a)
849 {
850     check_r0_write(a->d);
851     tcg_gen_ori_tl(cpu_R[a->d], cpu_R[a->a], a->k);
852     return true;
853 }
854 
855 static bool trans_l_xori(DisasContext *dc, arg_rri *a)
856 {
857     check_r0_write(a->d);
858     tcg_gen_xori_tl(cpu_R[a->d], cpu_R[a->a], a->i);
859     return true;
860 }
861 
862 static bool trans_l_mfspr(DisasContext *dc, arg_l_mfspr *a)
863 {
864     check_r0_write(a->d);
865 
866     if (is_user(dc)) {
867         gen_illegal_exception(dc);
868     } else {
869         TCGv spr = tcg_temp_new();
870         tcg_gen_ori_tl(spr, cpu_R[a->a], a->k);
871         gen_helper_mfspr(cpu_R[a->d], cpu_env, cpu_R[a->d], spr);
872         tcg_temp_free(spr);
873     }
874     return true;
875 }
876 
877 static bool trans_l_mtspr(DisasContext *dc, arg_l_mtspr *a)
878 {
879     if (is_user(dc)) {
880         gen_illegal_exception(dc);
881     } else {
882         TCGv spr;
883 
884         /* For SR, we will need to exit the TB to recognize the new
885          * exception state.  For NPC, in theory this counts as a branch
886          * (although the SPR only exists for use by an ICE).  Save all
887          * of the cpu state first, allowing it to be overwritten.
888          */
889         if (dc->delayed_branch) {
890             tcg_gen_mov_tl(cpu_pc, jmp_pc);
891             tcg_gen_discard_tl(jmp_pc);
892         } else {
893             tcg_gen_movi_tl(cpu_pc, dc->base.pc_next + 4);
894         }
895         dc->base.is_jmp = DISAS_EXIT;
896 
897         spr = tcg_temp_new();
898         tcg_gen_ori_tl(spr, cpu_R[a->a], a->k);
899         gen_helper_mtspr(cpu_env, spr, cpu_R[a->b]);
900         tcg_temp_free(spr);
901     }
902     return true;
903 }
904 
905 static bool trans_l_mac(DisasContext *dc, arg_ab *a)
906 {
907     gen_mac(dc, cpu_R[a->a], cpu_R[a->b]);
908     return true;
909 }
910 
911 static bool trans_l_msb(DisasContext *dc, arg_ab *a)
912 {
913     gen_msb(dc, cpu_R[a->a], cpu_R[a->b]);
914     return true;
915 }
916 
917 static bool trans_l_macu(DisasContext *dc, arg_ab *a)
918 {
919     gen_macu(dc, cpu_R[a->a], cpu_R[a->b]);
920     return true;
921 }
922 
923 static bool trans_l_msbu(DisasContext *dc, arg_ab *a)
924 {
925     gen_msbu(dc, cpu_R[a->a], cpu_R[a->b]);
926     return true;
927 }
928 
929 static bool trans_l_slli(DisasContext *dc, arg_dal *a)
930 {
931     check_r0_write(a->d);
932     tcg_gen_shli_tl(cpu_R[a->d], cpu_R[a->a], a->l & (TARGET_LONG_BITS - 1));
933     return true;
934 }
935 
936 static bool trans_l_srli(DisasContext *dc, arg_dal *a)
937 {
938     check_r0_write(a->d);
939     tcg_gen_shri_tl(cpu_R[a->d], cpu_R[a->a], a->l & (TARGET_LONG_BITS - 1));
940     return true;
941 }
942 
943 static bool trans_l_srai(DisasContext *dc, arg_dal *a)
944 {
945     check_r0_write(a->d);
946     tcg_gen_sari_tl(cpu_R[a->d], cpu_R[a->a], a->l & (TARGET_LONG_BITS - 1));
947     return true;
948 }
949 
950 static bool trans_l_rori(DisasContext *dc, arg_dal *a)
951 {
952     check_r0_write(a->d);
953     tcg_gen_rotri_tl(cpu_R[a->d], cpu_R[a->a], a->l & (TARGET_LONG_BITS - 1));
954     return true;
955 }
956 
957 static bool trans_l_movhi(DisasContext *dc, arg_l_movhi *a)
958 {
959     check_r0_write(a->d);
960     tcg_gen_movi_tl(cpu_R[a->d], a->k << 16);
961     return true;
962 }
963 
964 static bool trans_l_macrc(DisasContext *dc, arg_l_macrc *a)
965 {
966     check_r0_write(a->d);
967     tcg_gen_trunc_i64_tl(cpu_R[a->d], cpu_mac);
968     tcg_gen_movi_i64(cpu_mac, 0);
969     return true;
970 }
971 
972 static bool trans_l_sfeq(DisasContext *dc, arg_ab *a)
973 {
974     tcg_gen_setcond_tl(TCG_COND_EQ, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
975     return true;
976 }
977 
978 static bool trans_l_sfne(DisasContext *dc, arg_ab *a)
979 {
980     tcg_gen_setcond_tl(TCG_COND_NE, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
981     return true;
982 }
983 
984 static bool trans_l_sfgtu(DisasContext *dc, arg_ab *a)
985 {
986     tcg_gen_setcond_tl(TCG_COND_GTU, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
987     return true;
988 }
989 
990 static bool trans_l_sfgeu(DisasContext *dc, arg_ab *a)
991 {
992     tcg_gen_setcond_tl(TCG_COND_GEU, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
993     return true;
994 }
995 
996 static bool trans_l_sfltu(DisasContext *dc, arg_ab *a)
997 {
998     tcg_gen_setcond_tl(TCG_COND_LTU, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
999     return true;
1000 }
1001 
1002 static bool trans_l_sfleu(DisasContext *dc, arg_ab *a)
1003 {
1004     tcg_gen_setcond_tl(TCG_COND_LEU, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
1005     return true;
1006 }
1007 
1008 static bool trans_l_sfgts(DisasContext *dc, arg_ab *a)
1009 {
1010     tcg_gen_setcond_tl(TCG_COND_GT, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
1011     return true;
1012 }
1013 
1014 static bool trans_l_sfges(DisasContext *dc, arg_ab *a)
1015 {
1016     tcg_gen_setcond_tl(TCG_COND_GE, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
1017     return true;
1018 }
1019 
1020 static bool trans_l_sflts(DisasContext *dc, arg_ab *a)
1021 {
1022     tcg_gen_setcond_tl(TCG_COND_LT, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
1023     return true;
1024 }
1025 
1026 static bool trans_l_sfles(DisasContext *dc, arg_ab *a)
1027 {
1028     tcg_gen_setcond_tl(TCG_COND_LE, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
1029     return true;
1030 }
1031 
1032 static bool trans_l_sfeqi(DisasContext *dc, arg_ai *a)
1033 {
1034     tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_f, cpu_R[a->a], a->i);
1035     return true;
1036 }
1037 
1038 static bool trans_l_sfnei(DisasContext *dc, arg_ai *a)
1039 {
1040     tcg_gen_setcondi_tl(TCG_COND_NE, cpu_sr_f, cpu_R[a->a], a->i);
1041     return true;
1042 }
1043 
1044 static bool trans_l_sfgtui(DisasContext *dc, arg_ai *a)
1045 {
1046     tcg_gen_setcondi_tl(TCG_COND_GTU, cpu_sr_f, cpu_R[a->a], a->i);
1047     return true;
1048 }
1049 
1050 static bool trans_l_sfgeui(DisasContext *dc, arg_ai *a)
1051 {
1052     tcg_gen_setcondi_tl(TCG_COND_GEU, cpu_sr_f, cpu_R[a->a], a->i);
1053     return true;
1054 }
1055 
1056 static bool trans_l_sfltui(DisasContext *dc, arg_ai *a)
1057 {
1058     tcg_gen_setcondi_tl(TCG_COND_LTU, cpu_sr_f, cpu_R[a->a], a->i);
1059     return true;
1060 }
1061 
1062 static bool trans_l_sfleui(DisasContext *dc, arg_ai *a)
1063 {
1064     tcg_gen_setcondi_tl(TCG_COND_LEU, cpu_sr_f, cpu_R[a->a], a->i);
1065     return true;
1066 }
1067 
1068 static bool trans_l_sfgtsi(DisasContext *dc, arg_ai *a)
1069 {
1070     tcg_gen_setcondi_tl(TCG_COND_GT, cpu_sr_f, cpu_R[a->a], a->i);
1071     return true;
1072 }
1073 
1074 static bool trans_l_sfgesi(DisasContext *dc, arg_ai *a)
1075 {
1076     tcg_gen_setcondi_tl(TCG_COND_GE, cpu_sr_f, cpu_R[a->a], a->i);
1077     return true;
1078 }
1079 
1080 static bool trans_l_sfltsi(DisasContext *dc, arg_ai *a)
1081 {
1082     tcg_gen_setcondi_tl(TCG_COND_LT, cpu_sr_f, cpu_R[a->a], a->i);
1083     return true;
1084 }
1085 
1086 static bool trans_l_sflesi(DisasContext *dc, arg_ai *a)
1087 {
1088     tcg_gen_setcondi_tl(TCG_COND_LE, cpu_sr_f, cpu_R[a->a], a->i);
1089     return true;
1090 }
1091 
1092 static bool trans_l_sys(DisasContext *dc, arg_l_sys *a)
1093 {
1094     tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
1095     gen_exception(dc, EXCP_SYSCALL);
1096     dc->base.is_jmp = DISAS_NORETURN;
1097     return true;
1098 }
1099 
1100 static bool trans_l_trap(DisasContext *dc, arg_l_trap *a)
1101 {
1102     tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
1103     gen_exception(dc, EXCP_TRAP);
1104     dc->base.is_jmp = DISAS_NORETURN;
1105     return true;
1106 }
1107 
1108 static bool trans_l_msync(DisasContext *dc, arg_l_msync *a)
1109 {
1110     tcg_gen_mb(TCG_MO_ALL);
1111     return true;
1112 }
1113 
1114 static bool trans_l_psync(DisasContext *dc, arg_l_psync *a)
1115 {
1116     return true;
1117 }
1118 
1119 static bool trans_l_csync(DisasContext *dc, arg_l_csync *a)
1120 {
1121     return true;
1122 }
1123 
1124 static bool trans_l_rfe(DisasContext *dc, arg_l_rfe *a)
1125 {
1126     if (is_user(dc)) {
1127         gen_illegal_exception(dc);
1128     } else {
1129         gen_helper_rfe(cpu_env);
1130         dc->base.is_jmp = DISAS_EXIT;
1131     }
1132     return true;
1133 }
1134 
1135 static void do_fp2(DisasContext *dc, arg_da *a,
1136                    void (*fn)(TCGv, TCGv_env, TCGv))
1137 {
1138     check_r0_write(a->d);
1139     fn(cpu_R[a->d], cpu_env, cpu_R[a->a]);
1140     gen_helper_update_fpcsr(cpu_env);
1141 }
1142 
1143 static void do_fp3(DisasContext *dc, arg_dab *a,
1144                    void (*fn)(TCGv, TCGv_env, TCGv, TCGv))
1145 {
1146     check_r0_write(a->d);
1147     fn(cpu_R[a->d], cpu_env, cpu_R[a->a], cpu_R[a->b]);
1148     gen_helper_update_fpcsr(cpu_env);
1149 }
1150 
1151 static void do_fpcmp(DisasContext *dc, arg_ab *a,
1152                      void (*fn)(TCGv, TCGv_env, TCGv, TCGv),
1153                      bool inv, bool swap)
1154 {
1155     if (swap) {
1156         fn(cpu_sr_f, cpu_env, cpu_R[a->b], cpu_R[a->a]);
1157     } else {
1158         fn(cpu_sr_f, cpu_env, cpu_R[a->a], cpu_R[a->b]);
1159     }
1160     if (inv) {
1161         tcg_gen_xori_tl(cpu_sr_f, cpu_sr_f, 1);
1162     }
1163     gen_helper_update_fpcsr(cpu_env);
1164 }
1165 
1166 static bool trans_lf_add_s(DisasContext *dc, arg_dab *a)
1167 {
1168     do_fp3(dc, a, gen_helper_float_add_s);
1169     return true;
1170 }
1171 
1172 static bool trans_lf_sub_s(DisasContext *dc, arg_dab *a)
1173 {
1174     do_fp3(dc, a, gen_helper_float_sub_s);
1175     return true;
1176 }
1177 
1178 static bool trans_lf_mul_s(DisasContext *dc, arg_dab *a)
1179 {
1180     do_fp3(dc, a, gen_helper_float_mul_s);
1181     return true;
1182 }
1183 
1184 static bool trans_lf_div_s(DisasContext *dc, arg_dab *a)
1185 {
1186     do_fp3(dc, a, gen_helper_float_div_s);
1187     return true;
1188 }
1189 
1190 static bool trans_lf_rem_s(DisasContext *dc, arg_dab *a)
1191 {
1192     do_fp3(dc, a, gen_helper_float_rem_s);
1193     return true;
1194 }
1195 
1196 static bool trans_lf_itof_s(DisasContext *dc, arg_da *a)
1197 {
1198     do_fp2(dc, a, gen_helper_itofs);
1199     return true;
1200 }
1201 
1202 static bool trans_lf_ftoi_s(DisasContext *dc, arg_da *a)
1203 {
1204     do_fp2(dc, a, gen_helper_ftois);
1205     return true;
1206 }
1207 
1208 static bool trans_lf_madd_s(DisasContext *dc, arg_dab *a)
1209 {
1210     check_r0_write(a->d);
1211     gen_helper_float_madd_s(cpu_R[a->d], cpu_env, cpu_R[a->d],
1212                             cpu_R[a->a], cpu_R[a->b]);
1213     gen_helper_update_fpcsr(cpu_env);
1214     return true;
1215 }
1216 
1217 static bool trans_lf_sfeq_s(DisasContext *dc, arg_ab *a)
1218 {
1219     do_fpcmp(dc, a, gen_helper_float_eq_s, false, false);
1220     return true;
1221 }
1222 
1223 static bool trans_lf_sfne_s(DisasContext *dc, arg_ab *a)
1224 {
1225     do_fpcmp(dc, a, gen_helper_float_eq_s, true, false);
1226     return true;
1227 }
1228 
1229 static bool trans_lf_sfgt_s(DisasContext *dc, arg_ab *a)
1230 {
1231     do_fpcmp(dc, a, gen_helper_float_lt_s, false, true);
1232     return true;
1233 }
1234 
1235 static bool trans_lf_sfge_s(DisasContext *dc, arg_ab *a)
1236 {
1237     do_fpcmp(dc, a, gen_helper_float_le_s, false, true);
1238     return true;
1239 }
1240 
1241 static bool trans_lf_sflt_s(DisasContext *dc, arg_ab *a)
1242 {
1243     do_fpcmp(dc, a, gen_helper_float_lt_s, false, false);
1244     return true;
1245 }
1246 
1247 static bool trans_lf_sfle_s(DisasContext *dc, arg_ab *a)
1248 {
1249     do_fpcmp(dc, a, gen_helper_float_le_s, false, false);
1250     return true;
1251 }
1252 
1253 static void openrisc_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
1254 {
1255     DisasContext *dc = container_of(dcb, DisasContext, base);
1256     CPUOpenRISCState *env = cs->env_ptr;
1257     int bound;
1258 
1259     dc->mem_idx = cpu_mmu_index(env, false);
1260     dc->tb_flags = dc->base.tb->flags;
1261     dc->delayed_branch = (dc->tb_flags & TB_FLAGS_DFLAG) != 0;
1262     dc->jmp_pc_imm = -1;
1263 
1264     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
1265     dc->base.max_insns = MIN(dc->base.max_insns, bound);
1266 }
1267 
1268 static void openrisc_tr_tb_start(DisasContextBase *db, CPUState *cs)
1269 {
1270     DisasContext *dc = container_of(db, DisasContext, base);
1271 
1272     /* Allow the TCG optimizer to see that R0 == 0,
1273        when it's true, which is the common case.  */
1274     if (dc->tb_flags & TB_FLAGS_R0_0) {
1275         cpu_R[0] = tcg_const_tl(0);
1276     } else {
1277         cpu_R[0] = cpu_R0;
1278     }
1279 }
1280 
1281 static void openrisc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
1282 {
1283     DisasContext *dc = container_of(dcbase, DisasContext, base);
1284 
1285     tcg_gen_insn_start(dc->base.pc_next, (dc->delayed_branch ? 1 : 0)
1286                        | (dc->base.num_insns > 1 ? 2 : 0));
1287 }
1288 
1289 static bool openrisc_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
1290                                          const CPUBreakpoint *bp)
1291 {
1292     DisasContext *dc = container_of(dcbase, DisasContext, base);
1293 
1294     tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
1295     gen_exception(dc, EXCP_DEBUG);
1296     dc->base.is_jmp = DISAS_NORETURN;
1297     /* The address covered by the breakpoint must be included in
1298        [tb->pc, tb->pc + tb->size) in order to for it to be
1299        properly cleared -- thus we increment the PC here so that
1300        the logic setting tb->size below does the right thing.  */
1301     dc->base.pc_next += 4;
1302     return true;
1303 }
1304 
1305 static void openrisc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
1306 {
1307     DisasContext *dc = container_of(dcbase, DisasContext, base);
1308     OpenRISCCPU *cpu = OPENRISC_CPU(cs);
1309     uint32_t insn = cpu_ldl_code(&cpu->env, dc->base.pc_next);
1310 
1311     if (!decode(dc, insn)) {
1312         gen_illegal_exception(dc);
1313     }
1314     dc->base.pc_next += 4;
1315 
1316     /* When exiting the delay slot normally, exit via jmp_pc.
1317      * For DISAS_NORETURN, we have raised an exception and already exited.
1318      * For DISAS_EXIT, we found l.rfe in a delay slot.  There's nothing
1319      * in the manual saying this is illegal, but it surely it should.
1320      * At least or1ksim overrides pcnext and ignores the branch.
1321      */
1322     if (dc->delayed_branch
1323         && --dc->delayed_branch == 0
1324         && dc->base.is_jmp == DISAS_NEXT) {
1325         dc->base.is_jmp = DISAS_JUMP;
1326     }
1327 }
1328 
1329 static void openrisc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
1330 {
1331     DisasContext *dc = container_of(dcbase, DisasContext, base);
1332     target_ulong jmp_dest;
1333 
1334     /* If we have already exited the TB, nothing following has effect.  */
1335     if (dc->base.is_jmp == DISAS_NORETURN) {
1336         return;
1337     }
1338 
1339     /* Adjust the delayed branch state for the next TB.  */
1340     if ((dc->tb_flags & TB_FLAGS_DFLAG ? 1 : 0) != (dc->delayed_branch != 0)) {
1341         tcg_gen_movi_i32(cpu_dflag, dc->delayed_branch != 0);
1342     }
1343 
1344     /* For DISAS_TOO_MANY, jump to the next insn.  */
1345     jmp_dest = dc->base.pc_next;
1346     tcg_gen_movi_tl(cpu_ppc, jmp_dest - 4);
1347 
1348     switch (dc->base.is_jmp) {
1349     case DISAS_JUMP:
1350         jmp_dest = dc->jmp_pc_imm;
1351         if (jmp_dest == -1) {
1352             /* The jump destination is indirect/computed; use jmp_pc.  */
1353             tcg_gen_mov_tl(cpu_pc, jmp_pc);
1354             tcg_gen_discard_tl(jmp_pc);
1355             if (unlikely(dc->base.singlestep_enabled)) {
1356                 gen_exception(dc, EXCP_DEBUG);
1357             } else {
1358                 tcg_gen_lookup_and_goto_ptr();
1359             }
1360             break;
1361         }
1362         /* The jump destination is direct; use jmp_pc_imm.
1363            However, we will have stored into jmp_pc as well;
1364            we know now that it wasn't needed.  */
1365         tcg_gen_discard_tl(jmp_pc);
1366         /* fallthru */
1367 
1368     case DISAS_TOO_MANY:
1369         if (unlikely(dc->base.singlestep_enabled)) {
1370             tcg_gen_movi_tl(cpu_pc, jmp_dest);
1371             gen_exception(dc, EXCP_DEBUG);
1372         } else if ((dc->base.pc_first ^ jmp_dest) & TARGET_PAGE_MASK) {
1373             tcg_gen_movi_tl(cpu_pc, jmp_dest);
1374             tcg_gen_lookup_and_goto_ptr();
1375         } else {
1376             tcg_gen_goto_tb(0);
1377             tcg_gen_movi_tl(cpu_pc, jmp_dest);
1378             tcg_gen_exit_tb(dc->base.tb, 0);
1379         }
1380         break;
1381 
1382     case DISAS_EXIT:
1383         if (unlikely(dc->base.singlestep_enabled)) {
1384             gen_exception(dc, EXCP_DEBUG);
1385         } else {
1386             tcg_gen_exit_tb(NULL, 0);
1387         }
1388         break;
1389     default:
1390         g_assert_not_reached();
1391     }
1392 }
1393 
1394 static void openrisc_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
1395 {
1396     DisasContext *s = container_of(dcbase, DisasContext, base);
1397 
1398     qemu_log("IN: %s\n", lookup_symbol(s->base.pc_first));
1399     log_target_disas(cs, s->base.pc_first, s->base.tb->size);
1400 }
1401 
1402 static const TranslatorOps openrisc_tr_ops = {
1403     .init_disas_context = openrisc_tr_init_disas_context,
1404     .tb_start           = openrisc_tr_tb_start,
1405     .insn_start         = openrisc_tr_insn_start,
1406     .breakpoint_check   = openrisc_tr_breakpoint_check,
1407     .translate_insn     = openrisc_tr_translate_insn,
1408     .tb_stop            = openrisc_tr_tb_stop,
1409     .disas_log          = openrisc_tr_disas_log,
1410 };
1411 
1412 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
1413 {
1414     DisasContext ctx;
1415 
1416     translator_loop(&openrisc_tr_ops, &ctx.base, cs, tb, max_insns);
1417 }
1418 
1419 void openrisc_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1420 {
1421     OpenRISCCPU *cpu = OPENRISC_CPU(cs);
1422     CPUOpenRISCState *env = &cpu->env;
1423     int i;
1424 
1425     qemu_fprintf(f, "PC=%08x\n", env->pc);
1426     for (i = 0; i < 32; ++i) {
1427         qemu_fprintf(f, "R%02d=%08x%c", i, cpu_get_gpr(env, i),
1428                      (i % 4) == 3 ? '\n' : ' ');
1429     }
1430 }
1431 
1432 void restore_state_to_opc(CPUOpenRISCState *env, TranslationBlock *tb,
1433                           target_ulong *data)
1434 {
1435     env->pc = data[0];
1436     env->dflag = data[1] & 1;
1437     if (data[1] & 2) {
1438         env->ppc = env->pc - 4;
1439     }
1440 }
1441