xref: /qemu/target/hppa/translate.c (revision 940bb5fa)
1 /*
2  * HPPA emulation cpu translation for qemu.
3  *
4  * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "tcg/tcg-op-gvec.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/log.h"
31 
32 #define HELPER_H "helper.h"
33 #include "exec/helper-info.c.inc"
34 #undef  HELPER_H
35 
36 /* Choose to use explicit sizes within this file. */
37 #undef tcg_temp_new
38 
39 typedef struct DisasCond {
40     TCGCond c;
41     TCGv_i64 a0, a1;
42 } DisasCond;
43 
44 typedef struct DisasContext {
45     DisasContextBase base;
46     CPUState *cs;
47     TCGOp *insn_start;
48 
49     uint64_t iaoq_f;
50     uint64_t iaoq_b;
51     uint64_t iaoq_n;
52     TCGv_i64 iaoq_n_var;
53 
54     DisasCond null_cond;
55     TCGLabel *null_lab;
56 
57     TCGv_i64 zero;
58 
59     uint32_t insn;
60     uint32_t tb_flags;
61     int mmu_idx;
62     int privilege;
63     bool psw_n_nonzero;
64     bool is_pa20;
65 
66 #ifdef CONFIG_USER_ONLY
67     MemOp unalign;
68 #endif
69 } DisasContext;
70 
71 #ifdef CONFIG_USER_ONLY
72 #define UNALIGN(C)  (C)->unalign
73 #else
74 #define UNALIGN(C)  MO_ALIGN
75 #endif
76 
77 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently.  */
78 static int expand_sm_imm(DisasContext *ctx, int val)
79 {
80     if (val & PSW_SM_E) {
81         val = (val & ~PSW_SM_E) | PSW_E;
82     }
83     if (val & PSW_SM_W) {
84         val = (val & ~PSW_SM_W) | PSW_W;
85     }
86     return val;
87 }
88 
89 /* Inverted space register indicates 0 means sr0 not inferred from base.  */
90 static int expand_sr3x(DisasContext *ctx, int val)
91 {
92     return ~val;
93 }
94 
95 /* Convert the M:A bits within a memory insn to the tri-state value
96    we use for the final M.  */
97 static int ma_to_m(DisasContext *ctx, int val)
98 {
99     return val & 2 ? (val & 1 ? -1 : 1) : 0;
100 }
101 
102 /* Convert the sign of the displacement to a pre or post-modify.  */
103 static int pos_to_m(DisasContext *ctx, int val)
104 {
105     return val ? 1 : -1;
106 }
107 
108 static int neg_to_m(DisasContext *ctx, int val)
109 {
110     return val ? -1 : 1;
111 }
112 
113 /* Used for branch targets and fp memory ops.  */
114 static int expand_shl2(DisasContext *ctx, int val)
115 {
116     return val << 2;
117 }
118 
119 /* Used for fp memory ops.  */
120 static int expand_shl3(DisasContext *ctx, int val)
121 {
122     return val << 3;
123 }
124 
125 /* Used for assemble_21.  */
126 static int expand_shl11(DisasContext *ctx, int val)
127 {
128     return val << 11;
129 }
130 
131 static int assemble_6(DisasContext *ctx, int val)
132 {
133     /*
134      * Officially, 32 * x + 32 - y.
135      * Here, x is already in bit 5, and y is [4:0].
136      * Since -y = ~y + 1, in 5 bits 32 - y => y ^ 31 + 1,
137      * with the overflow from bit 4 summing with x.
138      */
139     return (val ^ 31) + 1;
140 }
141 
142 /* Translate CMPI doubleword conditions to standard. */
143 static int cmpbid_c(DisasContext *ctx, int val)
144 {
145     return val ? val : 4; /* 0 == "*<<" */
146 }
147 
148 
149 /* Include the auto-generated decoder.  */
150 #include "decode-insns.c.inc"
151 
152 /* We are not using a goto_tb (for whatever reason), but have updated
153    the iaq (for whatever reason), so don't do it again on exit.  */
154 #define DISAS_IAQ_N_UPDATED  DISAS_TARGET_0
155 
156 /* We are exiting the TB, but have neither emitted a goto_tb, nor
157    updated the iaq for the next instruction to be executed.  */
158 #define DISAS_IAQ_N_STALE    DISAS_TARGET_1
159 
160 /* Similarly, but we want to return to the main loop immediately
161    to recognize unmasked interrupts.  */
162 #define DISAS_IAQ_N_STALE_EXIT      DISAS_TARGET_2
163 #define DISAS_EXIT                  DISAS_TARGET_3
164 
165 /* global register indexes */
166 static TCGv_i64 cpu_gr[32];
167 static TCGv_i64 cpu_sr[4];
168 static TCGv_i64 cpu_srH;
169 static TCGv_i64 cpu_iaoq_f;
170 static TCGv_i64 cpu_iaoq_b;
171 static TCGv_i64 cpu_iasq_f;
172 static TCGv_i64 cpu_iasq_b;
173 static TCGv_i64 cpu_sar;
174 static TCGv_i64 cpu_psw_n;
175 static TCGv_i64 cpu_psw_v;
176 static TCGv_i64 cpu_psw_cb;
177 static TCGv_i64 cpu_psw_cb_msb;
178 
179 void hppa_translate_init(void)
180 {
181 #define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
182 
183     typedef struct { TCGv_i64 *var; const char *name; int ofs; } GlobalVar;
184     static const GlobalVar vars[] = {
185         { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
186         DEF_VAR(psw_n),
187         DEF_VAR(psw_v),
188         DEF_VAR(psw_cb),
189         DEF_VAR(psw_cb_msb),
190         DEF_VAR(iaoq_f),
191         DEF_VAR(iaoq_b),
192     };
193 
194 #undef DEF_VAR
195 
196     /* Use the symbolic register names that match the disassembler.  */
197     static const char gr_names[32][4] = {
198         "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
199         "r8",  "r9",  "r10", "r11", "r12", "r13", "r14", "r15",
200         "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
201         "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
202     };
203     /* SR[4-7] are not global registers so that we can index them.  */
204     static const char sr_names[5][4] = {
205         "sr0", "sr1", "sr2", "sr3", "srH"
206     };
207 
208     int i;
209 
210     cpu_gr[0] = NULL;
211     for (i = 1; i < 32; i++) {
212         cpu_gr[i] = tcg_global_mem_new(tcg_env,
213                                        offsetof(CPUHPPAState, gr[i]),
214                                        gr_names[i]);
215     }
216     for (i = 0; i < 4; i++) {
217         cpu_sr[i] = tcg_global_mem_new_i64(tcg_env,
218                                            offsetof(CPUHPPAState, sr[i]),
219                                            sr_names[i]);
220     }
221     cpu_srH = tcg_global_mem_new_i64(tcg_env,
222                                      offsetof(CPUHPPAState, sr[4]),
223                                      sr_names[4]);
224 
225     for (i = 0; i < ARRAY_SIZE(vars); ++i) {
226         const GlobalVar *v = &vars[i];
227         *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name);
228     }
229 
230     cpu_iasq_f = tcg_global_mem_new_i64(tcg_env,
231                                         offsetof(CPUHPPAState, iasq_f),
232                                         "iasq_f");
233     cpu_iasq_b = tcg_global_mem_new_i64(tcg_env,
234                                         offsetof(CPUHPPAState, iasq_b),
235                                         "iasq_b");
236 }
237 
238 static void set_insn_breg(DisasContext *ctx, int breg)
239 {
240     assert(ctx->insn_start != NULL);
241     tcg_set_insn_start_param(ctx->insn_start, 2, breg);
242     ctx->insn_start = NULL;
243 }
244 
245 static DisasCond cond_make_f(void)
246 {
247     return (DisasCond){
248         .c = TCG_COND_NEVER,
249         .a0 = NULL,
250         .a1 = NULL,
251     };
252 }
253 
254 static DisasCond cond_make_t(void)
255 {
256     return (DisasCond){
257         .c = TCG_COND_ALWAYS,
258         .a0 = NULL,
259         .a1 = NULL,
260     };
261 }
262 
263 static DisasCond cond_make_n(void)
264 {
265     return (DisasCond){
266         .c = TCG_COND_NE,
267         .a0 = cpu_psw_n,
268         .a1 = tcg_constant_i64(0)
269     };
270 }
271 
272 static DisasCond cond_make_tmp(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
273 {
274     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
275     return (DisasCond){ .c = c, .a0 = a0, .a1 = a1 };
276 }
277 
278 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_i64 a0)
279 {
280     return cond_make_tmp(c, a0, tcg_constant_i64(0));
281 }
282 
283 static DisasCond cond_make_0(TCGCond c, TCGv_i64 a0)
284 {
285     TCGv_i64 tmp = tcg_temp_new_i64();
286     tcg_gen_mov_i64(tmp, a0);
287     return cond_make_0_tmp(c, tmp);
288 }
289 
290 static DisasCond cond_make(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
291 {
292     TCGv_i64 t0 = tcg_temp_new_i64();
293     TCGv_i64 t1 = tcg_temp_new_i64();
294 
295     tcg_gen_mov_i64(t0, a0);
296     tcg_gen_mov_i64(t1, a1);
297     return cond_make_tmp(c, t0, t1);
298 }
299 
300 static void cond_free(DisasCond *cond)
301 {
302     switch (cond->c) {
303     default:
304         cond->a0 = NULL;
305         cond->a1 = NULL;
306         /* fallthru */
307     case TCG_COND_ALWAYS:
308         cond->c = TCG_COND_NEVER;
309         break;
310     case TCG_COND_NEVER:
311         break;
312     }
313 }
314 
315 static TCGv_i64 load_gpr(DisasContext *ctx, unsigned reg)
316 {
317     if (reg == 0) {
318         return ctx->zero;
319     } else {
320         return cpu_gr[reg];
321     }
322 }
323 
324 static TCGv_i64 dest_gpr(DisasContext *ctx, unsigned reg)
325 {
326     if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
327         return tcg_temp_new_i64();
328     } else {
329         return cpu_gr[reg];
330     }
331 }
332 
333 static void save_or_nullify(DisasContext *ctx, TCGv_i64 dest, TCGv_i64 t)
334 {
335     if (ctx->null_cond.c != TCG_COND_NEVER) {
336         tcg_gen_movcond_i64(ctx->null_cond.c, dest, ctx->null_cond.a0,
337                             ctx->null_cond.a1, dest, t);
338     } else {
339         tcg_gen_mov_i64(dest, t);
340     }
341 }
342 
343 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_i64 t)
344 {
345     if (reg != 0) {
346         save_or_nullify(ctx, cpu_gr[reg], t);
347     }
348 }
349 
350 #if HOST_BIG_ENDIAN
351 # define HI_OFS  0
352 # define LO_OFS  4
353 #else
354 # define HI_OFS  4
355 # define LO_OFS  0
356 #endif
357 
358 static TCGv_i32 load_frw_i32(unsigned rt)
359 {
360     TCGv_i32 ret = tcg_temp_new_i32();
361     tcg_gen_ld_i32(ret, tcg_env,
362                    offsetof(CPUHPPAState, fr[rt & 31])
363                    + (rt & 32 ? LO_OFS : HI_OFS));
364     return ret;
365 }
366 
367 static TCGv_i32 load_frw0_i32(unsigned rt)
368 {
369     if (rt == 0) {
370         TCGv_i32 ret = tcg_temp_new_i32();
371         tcg_gen_movi_i32(ret, 0);
372         return ret;
373     } else {
374         return load_frw_i32(rt);
375     }
376 }
377 
378 static TCGv_i64 load_frw0_i64(unsigned rt)
379 {
380     TCGv_i64 ret = tcg_temp_new_i64();
381     if (rt == 0) {
382         tcg_gen_movi_i64(ret, 0);
383     } else {
384         tcg_gen_ld32u_i64(ret, tcg_env,
385                           offsetof(CPUHPPAState, fr[rt & 31])
386                           + (rt & 32 ? LO_OFS : HI_OFS));
387     }
388     return ret;
389 }
390 
391 static void save_frw_i32(unsigned rt, TCGv_i32 val)
392 {
393     tcg_gen_st_i32(val, tcg_env,
394                    offsetof(CPUHPPAState, fr[rt & 31])
395                    + (rt & 32 ? LO_OFS : HI_OFS));
396 }
397 
398 #undef HI_OFS
399 #undef LO_OFS
400 
401 static TCGv_i64 load_frd(unsigned rt)
402 {
403     TCGv_i64 ret = tcg_temp_new_i64();
404     tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt]));
405     return ret;
406 }
407 
408 static TCGv_i64 load_frd0(unsigned rt)
409 {
410     if (rt == 0) {
411         TCGv_i64 ret = tcg_temp_new_i64();
412         tcg_gen_movi_i64(ret, 0);
413         return ret;
414     } else {
415         return load_frd(rt);
416     }
417 }
418 
419 static void save_frd(unsigned rt, TCGv_i64 val)
420 {
421     tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt]));
422 }
423 
424 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
425 {
426 #ifdef CONFIG_USER_ONLY
427     tcg_gen_movi_i64(dest, 0);
428 #else
429     if (reg < 4) {
430         tcg_gen_mov_i64(dest, cpu_sr[reg]);
431     } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
432         tcg_gen_mov_i64(dest, cpu_srH);
433     } else {
434         tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg]));
435     }
436 #endif
437 }
438 
439 /* Skip over the implementation of an insn that has been nullified.
440    Use this when the insn is too complex for a conditional move.  */
441 static void nullify_over(DisasContext *ctx)
442 {
443     if (ctx->null_cond.c != TCG_COND_NEVER) {
444         /* The always condition should have been handled in the main loop.  */
445         assert(ctx->null_cond.c != TCG_COND_ALWAYS);
446 
447         ctx->null_lab = gen_new_label();
448 
449         /* If we're using PSW[N], copy it to a temp because... */
450         if (ctx->null_cond.a0 == cpu_psw_n) {
451             ctx->null_cond.a0 = tcg_temp_new_i64();
452             tcg_gen_mov_i64(ctx->null_cond.a0, cpu_psw_n);
453         }
454         /* ... we clear it before branching over the implementation,
455            so that (1) it's clear after nullifying this insn and
456            (2) if this insn nullifies the next, PSW[N] is valid.  */
457         if (ctx->psw_n_nonzero) {
458             ctx->psw_n_nonzero = false;
459             tcg_gen_movi_i64(cpu_psw_n, 0);
460         }
461 
462         tcg_gen_brcond_i64(ctx->null_cond.c, ctx->null_cond.a0,
463                            ctx->null_cond.a1, ctx->null_lab);
464         cond_free(&ctx->null_cond);
465     }
466 }
467 
468 /* Save the current nullification state to PSW[N].  */
469 static void nullify_save(DisasContext *ctx)
470 {
471     if (ctx->null_cond.c == TCG_COND_NEVER) {
472         if (ctx->psw_n_nonzero) {
473             tcg_gen_movi_i64(cpu_psw_n, 0);
474         }
475         return;
476     }
477     if (ctx->null_cond.a0 != cpu_psw_n) {
478         tcg_gen_setcond_i64(ctx->null_cond.c, cpu_psw_n,
479                             ctx->null_cond.a0, ctx->null_cond.a1);
480         ctx->psw_n_nonzero = true;
481     }
482     cond_free(&ctx->null_cond);
483 }
484 
485 /* Set a PSW[N] to X.  The intention is that this is used immediately
486    before a goto_tb/exit_tb, so that there is no fallthru path to other
487    code within the TB.  Therefore we do not update psw_n_nonzero.  */
488 static void nullify_set(DisasContext *ctx, bool x)
489 {
490     if (ctx->psw_n_nonzero || x) {
491         tcg_gen_movi_i64(cpu_psw_n, x);
492     }
493 }
494 
495 /* Mark the end of an instruction that may have been nullified.
496    This is the pair to nullify_over.  Always returns true so that
497    it may be tail-called from a translate function.  */
498 static bool nullify_end(DisasContext *ctx)
499 {
500     TCGLabel *null_lab = ctx->null_lab;
501     DisasJumpType status = ctx->base.is_jmp;
502 
503     /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
504        For UPDATED, we cannot update on the nullified path.  */
505     assert(status != DISAS_IAQ_N_UPDATED);
506 
507     if (likely(null_lab == NULL)) {
508         /* The current insn wasn't conditional or handled the condition
509            applied to it without a branch, so the (new) setting of
510            NULL_COND can be applied directly to the next insn.  */
511         return true;
512     }
513     ctx->null_lab = NULL;
514 
515     if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
516         /* The next instruction will be unconditional,
517            and NULL_COND already reflects that.  */
518         gen_set_label(null_lab);
519     } else {
520         /* The insn that we just executed is itself nullifying the next
521            instruction.  Store the condition in the PSW[N] global.
522            We asserted PSW[N] = 0 in nullify_over, so that after the
523            label we have the proper value in place.  */
524         nullify_save(ctx);
525         gen_set_label(null_lab);
526         ctx->null_cond = cond_make_n();
527     }
528     if (status == DISAS_NORETURN) {
529         ctx->base.is_jmp = DISAS_NEXT;
530     }
531     return true;
532 }
533 
534 static uint64_t gva_offset_mask(DisasContext *ctx)
535 {
536     return (ctx->tb_flags & PSW_W
537             ? MAKE_64BIT_MASK(0, 62)
538             : MAKE_64BIT_MASK(0, 32));
539 }
540 
541 static void copy_iaoq_entry(DisasContext *ctx, TCGv_i64 dest,
542                             uint64_t ival, TCGv_i64 vval)
543 {
544     uint64_t mask = gva_offset_mask(ctx);
545 
546     if (ival != -1) {
547         tcg_gen_movi_i64(dest, ival & mask);
548         return;
549     }
550     tcg_debug_assert(vval != NULL);
551 
552     /*
553      * We know that the IAOQ is already properly masked.
554      * This optimization is primarily for "iaoq_f = iaoq_b".
555      */
556     if (vval == cpu_iaoq_f || vval == cpu_iaoq_b) {
557         tcg_gen_mov_i64(dest, vval);
558     } else {
559         tcg_gen_andi_i64(dest, vval, mask);
560     }
561 }
562 
563 static inline uint64_t iaoq_dest(DisasContext *ctx, int64_t disp)
564 {
565     return ctx->iaoq_f + disp + 8;
566 }
567 
568 static void gen_excp_1(int exception)
569 {
570     gen_helper_excp(tcg_env, tcg_constant_i32(exception));
571 }
572 
573 static void gen_excp(DisasContext *ctx, int exception)
574 {
575     copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
576     copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
577     nullify_save(ctx);
578     gen_excp_1(exception);
579     ctx->base.is_jmp = DISAS_NORETURN;
580 }
581 
582 static bool gen_excp_iir(DisasContext *ctx, int exc)
583 {
584     nullify_over(ctx);
585     tcg_gen_st_i64(tcg_constant_i64(ctx->insn),
586                    tcg_env, offsetof(CPUHPPAState, cr[CR_IIR]));
587     gen_excp(ctx, exc);
588     return nullify_end(ctx);
589 }
590 
591 static bool gen_illegal(DisasContext *ctx)
592 {
593     return gen_excp_iir(ctx, EXCP_ILL);
594 }
595 
596 #ifdef CONFIG_USER_ONLY
597 #define CHECK_MOST_PRIVILEGED(EXCP) \
598     return gen_excp_iir(ctx, EXCP)
599 #else
600 #define CHECK_MOST_PRIVILEGED(EXCP) \
601     do {                                     \
602         if (ctx->privilege != 0) {           \
603             return gen_excp_iir(ctx, EXCP);  \
604         }                                    \
605     } while (0)
606 #endif
607 
608 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
609 {
610     return translator_use_goto_tb(&ctx->base, dest);
611 }
612 
613 /* If the next insn is to be nullified, and it's on the same page,
614    and we're not attempting to set a breakpoint on it, then we can
615    totally skip the nullified insn.  This avoids creating and
616    executing a TB that merely branches to the next TB.  */
617 static bool use_nullify_skip(DisasContext *ctx)
618 {
619     return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
620             && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
621 }
622 
623 static void gen_goto_tb(DisasContext *ctx, int which,
624                         uint64_t f, uint64_t b)
625 {
626     if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
627         tcg_gen_goto_tb(which);
628         copy_iaoq_entry(ctx, cpu_iaoq_f, f, NULL);
629         copy_iaoq_entry(ctx, cpu_iaoq_b, b, NULL);
630         tcg_gen_exit_tb(ctx->base.tb, which);
631     } else {
632         copy_iaoq_entry(ctx, cpu_iaoq_f, f, cpu_iaoq_b);
633         copy_iaoq_entry(ctx, cpu_iaoq_b, b, ctx->iaoq_n_var);
634         tcg_gen_lookup_and_goto_ptr();
635     }
636 }
637 
638 static bool cond_need_sv(int c)
639 {
640     return c == 2 || c == 3 || c == 6;
641 }
642 
643 static bool cond_need_cb(int c)
644 {
645     return c == 4 || c == 5;
646 }
647 
648 /* Need extensions from TCGv_i32 to TCGv_i64. */
649 static bool cond_need_ext(DisasContext *ctx, bool d)
650 {
651     return !(ctx->is_pa20 && d);
652 }
653 
654 /*
655  * Compute conditional for arithmetic.  See Page 5-3, Table 5-1, of
656  * the Parisc 1.1 Architecture Reference Manual for details.
657  */
658 
659 static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d,
660                          TCGv_i64 res, TCGv_i64 cb_msb, TCGv_i64 sv)
661 {
662     DisasCond cond;
663     TCGv_i64 tmp;
664 
665     switch (cf >> 1) {
666     case 0: /* Never / TR    (0 / 1) */
667         cond = cond_make_f();
668         break;
669     case 1: /* = / <>        (Z / !Z) */
670         if (cond_need_ext(ctx, d)) {
671             tmp = tcg_temp_new_i64();
672             tcg_gen_ext32u_i64(tmp, res);
673             res = tmp;
674         }
675         cond = cond_make_0(TCG_COND_EQ, res);
676         break;
677     case 2: /* < / >=        (N ^ V / !(N ^ V) */
678         tmp = tcg_temp_new_i64();
679         tcg_gen_xor_i64(tmp, res, sv);
680         if (cond_need_ext(ctx, d)) {
681             tcg_gen_ext32s_i64(tmp, tmp);
682         }
683         cond = cond_make_0_tmp(TCG_COND_LT, tmp);
684         break;
685     case 3: /* <= / >        (N ^ V) | Z / !((N ^ V) | Z) */
686         /*
687          * Simplify:
688          *   (N ^ V) | Z
689          *   ((res < 0) ^ (sv < 0)) | !res
690          *   ((res ^ sv) < 0) | !res
691          *   (~(res ^ sv) >= 0) | !res
692          *   !(~(res ^ sv) >> 31) | !res
693          *   !(~(res ^ sv) >> 31 & res)
694          */
695         tmp = tcg_temp_new_i64();
696         tcg_gen_eqv_i64(tmp, res, sv);
697         if (cond_need_ext(ctx, d)) {
698             tcg_gen_sextract_i64(tmp, tmp, 31, 1);
699             tcg_gen_and_i64(tmp, tmp, res);
700             tcg_gen_ext32u_i64(tmp, tmp);
701         } else {
702             tcg_gen_sari_i64(tmp, tmp, 63);
703             tcg_gen_and_i64(tmp, tmp, res);
704         }
705         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
706         break;
707     case 4: /* NUV / UV      (!C / C) */
708         /* Only bit 0 of cb_msb is ever set. */
709         cond = cond_make_0(TCG_COND_EQ, cb_msb);
710         break;
711     case 5: /* ZNV / VNZ     (!C | Z / C & !Z) */
712         tmp = tcg_temp_new_i64();
713         tcg_gen_neg_i64(tmp, cb_msb);
714         tcg_gen_and_i64(tmp, tmp, res);
715         if (cond_need_ext(ctx, d)) {
716             tcg_gen_ext32u_i64(tmp, tmp);
717         }
718         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
719         break;
720     case 6: /* SV / NSV      (V / !V) */
721         if (cond_need_ext(ctx, d)) {
722             tmp = tcg_temp_new_i64();
723             tcg_gen_ext32s_i64(tmp, sv);
724             sv = tmp;
725         }
726         cond = cond_make_0(TCG_COND_LT, sv);
727         break;
728     case 7: /* OD / EV */
729         tmp = tcg_temp_new_i64();
730         tcg_gen_andi_i64(tmp, res, 1);
731         cond = cond_make_0_tmp(TCG_COND_NE, tmp);
732         break;
733     default:
734         g_assert_not_reached();
735     }
736     if (cf & 1) {
737         cond.c = tcg_invert_cond(cond.c);
738     }
739 
740     return cond;
741 }
742 
743 /* Similar, but for the special case of subtraction without borrow, we
744    can use the inputs directly.  This can allow other computation to be
745    deleted as unused.  */
746 
747 static DisasCond do_sub_cond(DisasContext *ctx, unsigned cf, bool d,
748                              TCGv_i64 res, TCGv_i64 in1,
749                              TCGv_i64 in2, TCGv_i64 sv)
750 {
751     TCGCond tc;
752     bool ext_uns;
753 
754     switch (cf >> 1) {
755     case 1: /* = / <> */
756         tc = TCG_COND_EQ;
757         ext_uns = true;
758         break;
759     case 2: /* < / >= */
760         tc = TCG_COND_LT;
761         ext_uns = false;
762         break;
763     case 3: /* <= / > */
764         tc = TCG_COND_LE;
765         ext_uns = false;
766         break;
767     case 4: /* << / >>= */
768         tc = TCG_COND_LTU;
769         ext_uns = true;
770         break;
771     case 5: /* <<= / >> */
772         tc = TCG_COND_LEU;
773         ext_uns = true;
774         break;
775     default:
776         return do_cond(ctx, cf, d, res, NULL, sv);
777     }
778 
779     if (cf & 1) {
780         tc = tcg_invert_cond(tc);
781     }
782     if (cond_need_ext(ctx, d)) {
783         TCGv_i64 t1 = tcg_temp_new_i64();
784         TCGv_i64 t2 = tcg_temp_new_i64();
785 
786         if (ext_uns) {
787             tcg_gen_ext32u_i64(t1, in1);
788             tcg_gen_ext32u_i64(t2, in2);
789         } else {
790             tcg_gen_ext32s_i64(t1, in1);
791             tcg_gen_ext32s_i64(t2, in2);
792         }
793         return cond_make_tmp(tc, t1, t2);
794     }
795     return cond_make(tc, in1, in2);
796 }
797 
798 /*
799  * Similar, but for logicals, where the carry and overflow bits are not
800  * computed, and use of them is undefined.
801  *
802  * Undefined or not, hardware does not trap.  It seems reasonable to
803  * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
804  * how cases c={2,3} are treated.
805  */
806 
807 static DisasCond do_log_cond(DisasContext *ctx, unsigned cf, bool d,
808                              TCGv_i64 res)
809 {
810     TCGCond tc;
811     bool ext_uns;
812 
813     switch (cf) {
814     case 0:  /* never */
815     case 9:  /* undef, C */
816     case 11: /* undef, C & !Z */
817     case 12: /* undef, V */
818         return cond_make_f();
819 
820     case 1:  /* true */
821     case 8:  /* undef, !C */
822     case 10: /* undef, !C | Z */
823     case 13: /* undef, !V */
824         return cond_make_t();
825 
826     case 2:  /* == */
827         tc = TCG_COND_EQ;
828         ext_uns = true;
829         break;
830     case 3:  /* <> */
831         tc = TCG_COND_NE;
832         ext_uns = true;
833         break;
834     case 4:  /* < */
835         tc = TCG_COND_LT;
836         ext_uns = false;
837         break;
838     case 5:  /* >= */
839         tc = TCG_COND_GE;
840         ext_uns = false;
841         break;
842     case 6:  /* <= */
843         tc = TCG_COND_LE;
844         ext_uns = false;
845         break;
846     case 7:  /* > */
847         tc = TCG_COND_GT;
848         ext_uns = false;
849         break;
850 
851     case 14: /* OD */
852     case 15: /* EV */
853         return do_cond(ctx, cf, d, res, NULL, NULL);
854 
855     default:
856         g_assert_not_reached();
857     }
858 
859     if (cond_need_ext(ctx, d)) {
860         TCGv_i64 tmp = tcg_temp_new_i64();
861 
862         if (ext_uns) {
863             tcg_gen_ext32u_i64(tmp, res);
864         } else {
865             tcg_gen_ext32s_i64(tmp, res);
866         }
867         return cond_make_0_tmp(tc, tmp);
868     }
869     return cond_make_0(tc, res);
870 }
871 
872 /* Similar, but for shift/extract/deposit conditions.  */
873 
874 static DisasCond do_sed_cond(DisasContext *ctx, unsigned orig, bool d,
875                              TCGv_i64 res)
876 {
877     unsigned c, f;
878 
879     /* Convert the compressed condition codes to standard.
880        0-2 are the same as logicals (nv,<,<=), while 3 is OD.
881        4-7 are the reverse of 0-3.  */
882     c = orig & 3;
883     if (c == 3) {
884         c = 7;
885     }
886     f = (orig & 4) / 4;
887 
888     return do_log_cond(ctx, c * 2 + f, d, res);
889 }
890 
891 /* Similar, but for unit conditions.  */
892 
893 static DisasCond do_unit_cond(unsigned cf, bool d, TCGv_i64 res,
894                               TCGv_i64 in1, TCGv_i64 in2)
895 {
896     DisasCond cond;
897     TCGv_i64 tmp, cb = NULL;
898     uint64_t d_repl = d ? 0x0000000100000001ull : 1;
899 
900     if (cf & 8) {
901         /* Since we want to test lots of carry-out bits all at once, do not
902          * do our normal thing and compute carry-in of bit B+1 since that
903          * leaves us with carry bits spread across two words.
904          */
905         cb = tcg_temp_new_i64();
906         tmp = tcg_temp_new_i64();
907         tcg_gen_or_i64(cb, in1, in2);
908         tcg_gen_and_i64(tmp, in1, in2);
909         tcg_gen_andc_i64(cb, cb, res);
910         tcg_gen_or_i64(cb, cb, tmp);
911     }
912 
913     switch (cf >> 1) {
914     case 0: /* never / TR */
915     case 1: /* undefined */
916     case 5: /* undefined */
917         cond = cond_make_f();
918         break;
919 
920     case 2: /* SBZ / NBZ */
921         /* See hasless(v,1) from
922          * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
923          */
924         tmp = tcg_temp_new_i64();
925         tcg_gen_subi_i64(tmp, res, d_repl * 0x01010101u);
926         tcg_gen_andc_i64(tmp, tmp, res);
927         tcg_gen_andi_i64(tmp, tmp, d_repl * 0x80808080u);
928         cond = cond_make_0(TCG_COND_NE, tmp);
929         break;
930 
931     case 3: /* SHZ / NHZ */
932         tmp = tcg_temp_new_i64();
933         tcg_gen_subi_i64(tmp, res, d_repl * 0x00010001u);
934         tcg_gen_andc_i64(tmp, tmp, res);
935         tcg_gen_andi_i64(tmp, tmp, d_repl * 0x80008000u);
936         cond = cond_make_0(TCG_COND_NE, tmp);
937         break;
938 
939     case 4: /* SDC / NDC */
940         tcg_gen_andi_i64(cb, cb, d_repl * 0x88888888u);
941         cond = cond_make_0(TCG_COND_NE, cb);
942         break;
943 
944     case 6: /* SBC / NBC */
945         tcg_gen_andi_i64(cb, cb, d_repl * 0x80808080u);
946         cond = cond_make_0(TCG_COND_NE, cb);
947         break;
948 
949     case 7: /* SHC / NHC */
950         tcg_gen_andi_i64(cb, cb, d_repl * 0x80008000u);
951         cond = cond_make_0(TCG_COND_NE, cb);
952         break;
953 
954     default:
955         g_assert_not_reached();
956     }
957     if (cf & 1) {
958         cond.c = tcg_invert_cond(cond.c);
959     }
960 
961     return cond;
962 }
963 
964 static TCGv_i64 get_carry(DisasContext *ctx, bool d,
965                           TCGv_i64 cb, TCGv_i64 cb_msb)
966 {
967     if (cond_need_ext(ctx, d)) {
968         TCGv_i64 t = tcg_temp_new_i64();
969         tcg_gen_extract_i64(t, cb, 32, 1);
970         return t;
971     }
972     return cb_msb;
973 }
974 
975 static TCGv_i64 get_psw_carry(DisasContext *ctx, bool d)
976 {
977     return get_carry(ctx, d, cpu_psw_cb, cpu_psw_cb_msb);
978 }
979 
980 /* Compute signed overflow for addition.  */
981 static TCGv_i64 do_add_sv(DisasContext *ctx, TCGv_i64 res,
982                           TCGv_i64 in1, TCGv_i64 in2)
983 {
984     TCGv_i64 sv = tcg_temp_new_i64();
985     TCGv_i64 tmp = tcg_temp_new_i64();
986 
987     tcg_gen_xor_i64(sv, res, in1);
988     tcg_gen_xor_i64(tmp, in1, in2);
989     tcg_gen_andc_i64(sv, sv, tmp);
990 
991     return sv;
992 }
993 
994 /* Compute signed overflow for subtraction.  */
995 static TCGv_i64 do_sub_sv(DisasContext *ctx, TCGv_i64 res,
996                           TCGv_i64 in1, TCGv_i64 in2)
997 {
998     TCGv_i64 sv = tcg_temp_new_i64();
999     TCGv_i64 tmp = tcg_temp_new_i64();
1000 
1001     tcg_gen_xor_i64(sv, res, in1);
1002     tcg_gen_xor_i64(tmp, in1, in2);
1003     tcg_gen_and_i64(sv, sv, tmp);
1004 
1005     return sv;
1006 }
1007 
1008 static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1009                    TCGv_i64 in2, unsigned shift, bool is_l,
1010                    bool is_tsv, bool is_tc, bool is_c, unsigned cf, bool d)
1011 {
1012     TCGv_i64 dest, cb, cb_msb, cb_cond, sv, tmp;
1013     unsigned c = cf >> 1;
1014     DisasCond cond;
1015 
1016     dest = tcg_temp_new_i64();
1017     cb = NULL;
1018     cb_msb = NULL;
1019     cb_cond = NULL;
1020 
1021     if (shift) {
1022         tmp = tcg_temp_new_i64();
1023         tcg_gen_shli_i64(tmp, in1, shift);
1024         in1 = tmp;
1025     }
1026 
1027     if (!is_l || cond_need_cb(c)) {
1028         cb_msb = tcg_temp_new_i64();
1029         cb = tcg_temp_new_i64();
1030 
1031         tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero);
1032         if (is_c) {
1033             tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb,
1034                              get_psw_carry(ctx, d), ctx->zero);
1035         }
1036         tcg_gen_xor_i64(cb, in1, in2);
1037         tcg_gen_xor_i64(cb, cb, dest);
1038         if (cond_need_cb(c)) {
1039             cb_cond = get_carry(ctx, d, cb, cb_msb);
1040         }
1041     } else {
1042         tcg_gen_add_i64(dest, in1, in2);
1043         if (is_c) {
1044             tcg_gen_add_i64(dest, dest, get_psw_carry(ctx, d));
1045         }
1046     }
1047 
1048     /* Compute signed overflow if required.  */
1049     sv = NULL;
1050     if (is_tsv || cond_need_sv(c)) {
1051         sv = do_add_sv(ctx, dest, in1, in2);
1052         if (is_tsv) {
1053             /* ??? Need to include overflow from shift.  */
1054             gen_helper_tsv(tcg_env, sv);
1055         }
1056     }
1057 
1058     /* Emit any conditional trap before any writeback.  */
1059     cond = do_cond(ctx, cf, d, dest, cb_cond, sv);
1060     if (is_tc) {
1061         tmp = tcg_temp_new_i64();
1062         tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1063         gen_helper_tcond(tcg_env, tmp);
1064     }
1065 
1066     /* Write back the result.  */
1067     if (!is_l) {
1068         save_or_nullify(ctx, cpu_psw_cb, cb);
1069         save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1070     }
1071     save_gpr(ctx, rt, dest);
1072 
1073     /* Install the new nullification.  */
1074     cond_free(&ctx->null_cond);
1075     ctx->null_cond = cond;
1076 }
1077 
1078 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_d_sh *a,
1079                        bool is_l, bool is_tsv, bool is_tc, bool is_c)
1080 {
1081     TCGv_i64 tcg_r1, tcg_r2;
1082 
1083     if (a->cf) {
1084         nullify_over(ctx);
1085     }
1086     tcg_r1 = load_gpr(ctx, a->r1);
1087     tcg_r2 = load_gpr(ctx, a->r2);
1088     do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l,
1089            is_tsv, is_tc, is_c, a->cf, a->d);
1090     return nullify_end(ctx);
1091 }
1092 
1093 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1094                        bool is_tsv, bool is_tc)
1095 {
1096     TCGv_i64 tcg_im, tcg_r2;
1097 
1098     if (a->cf) {
1099         nullify_over(ctx);
1100     }
1101     tcg_im = tcg_constant_i64(a->i);
1102     tcg_r2 = load_gpr(ctx, a->r);
1103     /* All ADDI conditions are 32-bit. */
1104     do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf, false);
1105     return nullify_end(ctx);
1106 }
1107 
1108 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1109                    TCGv_i64 in2, bool is_tsv, bool is_b,
1110                    bool is_tc, unsigned cf, bool d)
1111 {
1112     TCGv_i64 dest, sv, cb, cb_msb, tmp;
1113     unsigned c = cf >> 1;
1114     DisasCond cond;
1115 
1116     dest = tcg_temp_new_i64();
1117     cb = tcg_temp_new_i64();
1118     cb_msb = tcg_temp_new_i64();
1119 
1120     if (is_b) {
1121         /* DEST,C = IN1 + ~IN2 + C.  */
1122         tcg_gen_not_i64(cb, in2);
1123         tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero,
1124                          get_psw_carry(ctx, d), ctx->zero);
1125         tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb, cb, ctx->zero);
1126         tcg_gen_xor_i64(cb, cb, in1);
1127         tcg_gen_xor_i64(cb, cb, dest);
1128     } else {
1129         /*
1130          * DEST,C = IN1 + ~IN2 + 1.  We can produce the same result in fewer
1131          * operations by seeding the high word with 1 and subtracting.
1132          */
1133         TCGv_i64 one = tcg_constant_i64(1);
1134         tcg_gen_sub2_i64(dest, cb_msb, in1, one, in2, ctx->zero);
1135         tcg_gen_eqv_i64(cb, in1, in2);
1136         tcg_gen_xor_i64(cb, cb, dest);
1137     }
1138 
1139     /* Compute signed overflow if required.  */
1140     sv = NULL;
1141     if (is_tsv || cond_need_sv(c)) {
1142         sv = do_sub_sv(ctx, dest, in1, in2);
1143         if (is_tsv) {
1144             gen_helper_tsv(tcg_env, sv);
1145         }
1146     }
1147 
1148     /* Compute the condition.  We cannot use the special case for borrow.  */
1149     if (!is_b) {
1150         cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1151     } else {
1152         cond = do_cond(ctx, cf, d, dest, get_carry(ctx, d, cb, cb_msb), sv);
1153     }
1154 
1155     /* Emit any conditional trap before any writeback.  */
1156     if (is_tc) {
1157         tmp = tcg_temp_new_i64();
1158         tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1159         gen_helper_tcond(tcg_env, tmp);
1160     }
1161 
1162     /* Write back the result.  */
1163     save_or_nullify(ctx, cpu_psw_cb, cb);
1164     save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1165     save_gpr(ctx, rt, dest);
1166 
1167     /* Install the new nullification.  */
1168     cond_free(&ctx->null_cond);
1169     ctx->null_cond = cond;
1170 }
1171 
1172 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1173                        bool is_tsv, bool is_b, bool is_tc)
1174 {
1175     TCGv_i64 tcg_r1, tcg_r2;
1176 
1177     if (a->cf) {
1178         nullify_over(ctx);
1179     }
1180     tcg_r1 = load_gpr(ctx, a->r1);
1181     tcg_r2 = load_gpr(ctx, a->r2);
1182     do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf, a->d);
1183     return nullify_end(ctx);
1184 }
1185 
1186 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1187 {
1188     TCGv_i64 tcg_im, tcg_r2;
1189 
1190     if (a->cf) {
1191         nullify_over(ctx);
1192     }
1193     tcg_im = tcg_constant_i64(a->i);
1194     tcg_r2 = load_gpr(ctx, a->r);
1195     /* All SUBI conditions are 32-bit. */
1196     do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf, false);
1197     return nullify_end(ctx);
1198 }
1199 
1200 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1201                       TCGv_i64 in2, unsigned cf, bool d)
1202 {
1203     TCGv_i64 dest, sv;
1204     DisasCond cond;
1205 
1206     dest = tcg_temp_new_i64();
1207     tcg_gen_sub_i64(dest, in1, in2);
1208 
1209     /* Compute signed overflow if required.  */
1210     sv = NULL;
1211     if (cond_need_sv(cf >> 1)) {
1212         sv = do_sub_sv(ctx, dest, in1, in2);
1213     }
1214 
1215     /* Form the condition for the compare.  */
1216     cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1217 
1218     /* Clear.  */
1219     tcg_gen_movi_i64(dest, 0);
1220     save_gpr(ctx, rt, dest);
1221 
1222     /* Install the new nullification.  */
1223     cond_free(&ctx->null_cond);
1224     ctx->null_cond = cond;
1225 }
1226 
1227 static void do_log(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1228                    TCGv_i64 in2, unsigned cf, bool d,
1229                    void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1230 {
1231     TCGv_i64 dest = dest_gpr(ctx, rt);
1232 
1233     /* Perform the operation, and writeback.  */
1234     fn(dest, in1, in2);
1235     save_gpr(ctx, rt, dest);
1236 
1237     /* Install the new nullification.  */
1238     cond_free(&ctx->null_cond);
1239     if (cf) {
1240         ctx->null_cond = do_log_cond(ctx, cf, d, dest);
1241     }
1242 }
1243 
1244 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1245                        void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1246 {
1247     TCGv_i64 tcg_r1, tcg_r2;
1248 
1249     if (a->cf) {
1250         nullify_over(ctx);
1251     }
1252     tcg_r1 = load_gpr(ctx, a->r1);
1253     tcg_r2 = load_gpr(ctx, a->r2);
1254     do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, fn);
1255     return nullify_end(ctx);
1256 }
1257 
1258 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1259                     TCGv_i64 in2, unsigned cf, bool d, bool is_tc,
1260                     void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1261 {
1262     TCGv_i64 dest;
1263     DisasCond cond;
1264 
1265     if (cf == 0) {
1266         dest = dest_gpr(ctx, rt);
1267         fn(dest, in1, in2);
1268         save_gpr(ctx, rt, dest);
1269         cond_free(&ctx->null_cond);
1270     } else {
1271         dest = tcg_temp_new_i64();
1272         fn(dest, in1, in2);
1273 
1274         cond = do_unit_cond(cf, d, dest, in1, in2);
1275 
1276         if (is_tc) {
1277             TCGv_i64 tmp = tcg_temp_new_i64();
1278             tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1279             gen_helper_tcond(tcg_env, tmp);
1280         }
1281         save_gpr(ctx, rt, dest);
1282 
1283         cond_free(&ctx->null_cond);
1284         ctx->null_cond = cond;
1285     }
1286 }
1287 
1288 #ifndef CONFIG_USER_ONLY
1289 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1290    from the top 2 bits of the base register.  There are a few system
1291    instructions that have a 3-bit space specifier, for which SR0 is
1292    not special.  To handle this, pass ~SP.  */
1293 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_i64 base)
1294 {
1295     TCGv_ptr ptr;
1296     TCGv_i64 tmp;
1297     TCGv_i64 spc;
1298 
1299     if (sp != 0) {
1300         if (sp < 0) {
1301             sp = ~sp;
1302         }
1303         spc = tcg_temp_new_i64();
1304         load_spr(ctx, spc, sp);
1305         return spc;
1306     }
1307     if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1308         return cpu_srH;
1309     }
1310 
1311     ptr = tcg_temp_new_ptr();
1312     tmp = tcg_temp_new_i64();
1313     spc = tcg_temp_new_i64();
1314 
1315     /* Extract top 2 bits of the address, shift left 3 for uint64_t index. */
1316     tcg_gen_shri_i64(tmp, base, (ctx->tb_flags & PSW_W ? 64 : 32) - 5);
1317     tcg_gen_andi_i64(tmp, tmp, 030);
1318     tcg_gen_trunc_i64_ptr(ptr, tmp);
1319 
1320     tcg_gen_add_ptr(ptr, ptr, tcg_env);
1321     tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1322 
1323     return spc;
1324 }
1325 #endif
1326 
1327 static void form_gva(DisasContext *ctx, TCGv_i64 *pgva, TCGv_i64 *pofs,
1328                      unsigned rb, unsigned rx, int scale, int64_t disp,
1329                      unsigned sp, int modify, bool is_phys)
1330 {
1331     TCGv_i64 base = load_gpr(ctx, rb);
1332     TCGv_i64 ofs;
1333     TCGv_i64 addr;
1334 
1335     set_insn_breg(ctx, rb);
1336 
1337     /* Note that RX is mutually exclusive with DISP.  */
1338     if (rx) {
1339         ofs = tcg_temp_new_i64();
1340         tcg_gen_shli_i64(ofs, cpu_gr[rx], scale);
1341         tcg_gen_add_i64(ofs, ofs, base);
1342     } else if (disp || modify) {
1343         ofs = tcg_temp_new_i64();
1344         tcg_gen_addi_i64(ofs, base, disp);
1345     } else {
1346         ofs = base;
1347     }
1348 
1349     *pofs = ofs;
1350     *pgva = addr = tcg_temp_new_i64();
1351     tcg_gen_andi_i64(addr, modify <= 0 ? ofs : base, gva_offset_mask(ctx));
1352 #ifndef CONFIG_USER_ONLY
1353     if (!is_phys) {
1354         tcg_gen_or_i64(addr, addr, space_select(ctx, sp, base));
1355     }
1356 #endif
1357 }
1358 
1359 /* Emit a memory load.  The modify parameter should be
1360  * < 0 for pre-modify,
1361  * > 0 for post-modify,
1362  * = 0 for no base register update.
1363  */
1364 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1365                        unsigned rx, int scale, int64_t disp,
1366                        unsigned sp, int modify, MemOp mop)
1367 {
1368     TCGv_i64 ofs;
1369     TCGv_i64 addr;
1370 
1371     /* Caller uses nullify_over/nullify_end.  */
1372     assert(ctx->null_cond.c == TCG_COND_NEVER);
1373 
1374     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1375              ctx->mmu_idx == MMU_PHYS_IDX);
1376     tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1377     if (modify) {
1378         save_gpr(ctx, rb, ofs);
1379     }
1380 }
1381 
1382 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1383                        unsigned rx, int scale, int64_t disp,
1384                        unsigned sp, int modify, MemOp mop)
1385 {
1386     TCGv_i64 ofs;
1387     TCGv_i64 addr;
1388 
1389     /* Caller uses nullify_over/nullify_end.  */
1390     assert(ctx->null_cond.c == TCG_COND_NEVER);
1391 
1392     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1393              ctx->mmu_idx == MMU_PHYS_IDX);
1394     tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1395     if (modify) {
1396         save_gpr(ctx, rb, ofs);
1397     }
1398 }
1399 
1400 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1401                         unsigned rx, int scale, int64_t disp,
1402                         unsigned sp, int modify, MemOp mop)
1403 {
1404     TCGv_i64 ofs;
1405     TCGv_i64 addr;
1406 
1407     /* Caller uses nullify_over/nullify_end.  */
1408     assert(ctx->null_cond.c == TCG_COND_NEVER);
1409 
1410     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1411              ctx->mmu_idx == MMU_PHYS_IDX);
1412     tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1413     if (modify) {
1414         save_gpr(ctx, rb, ofs);
1415     }
1416 }
1417 
1418 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1419                         unsigned rx, int scale, int64_t disp,
1420                         unsigned sp, int modify, MemOp mop)
1421 {
1422     TCGv_i64 ofs;
1423     TCGv_i64 addr;
1424 
1425     /* Caller uses nullify_over/nullify_end.  */
1426     assert(ctx->null_cond.c == TCG_COND_NEVER);
1427 
1428     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1429              ctx->mmu_idx == MMU_PHYS_IDX);
1430     tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1431     if (modify) {
1432         save_gpr(ctx, rb, ofs);
1433     }
1434 }
1435 
1436 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1437                     unsigned rx, int scale, int64_t disp,
1438                     unsigned sp, int modify, MemOp mop)
1439 {
1440     TCGv_i64 dest;
1441 
1442     nullify_over(ctx);
1443 
1444     if (modify == 0) {
1445         /* No base register update.  */
1446         dest = dest_gpr(ctx, rt);
1447     } else {
1448         /* Make sure if RT == RB, we see the result of the load.  */
1449         dest = tcg_temp_new_i64();
1450     }
1451     do_load_64(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1452     save_gpr(ctx, rt, dest);
1453 
1454     return nullify_end(ctx);
1455 }
1456 
1457 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1458                       unsigned rx, int scale, int64_t disp,
1459                       unsigned sp, int modify)
1460 {
1461     TCGv_i32 tmp;
1462 
1463     nullify_over(ctx);
1464 
1465     tmp = tcg_temp_new_i32();
1466     do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1467     save_frw_i32(rt, tmp);
1468 
1469     if (rt == 0) {
1470         gen_helper_loaded_fr0(tcg_env);
1471     }
1472 
1473     return nullify_end(ctx);
1474 }
1475 
1476 static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1477 {
1478     return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1479                      a->disp, a->sp, a->m);
1480 }
1481 
1482 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1483                       unsigned rx, int scale, int64_t disp,
1484                       unsigned sp, int modify)
1485 {
1486     TCGv_i64 tmp;
1487 
1488     nullify_over(ctx);
1489 
1490     tmp = tcg_temp_new_i64();
1491     do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1492     save_frd(rt, tmp);
1493 
1494     if (rt == 0) {
1495         gen_helper_loaded_fr0(tcg_env);
1496     }
1497 
1498     return nullify_end(ctx);
1499 }
1500 
1501 static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1502 {
1503     return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1504                      a->disp, a->sp, a->m);
1505 }
1506 
1507 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1508                      int64_t disp, unsigned sp,
1509                      int modify, MemOp mop)
1510 {
1511     nullify_over(ctx);
1512     do_store_64(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1513     return nullify_end(ctx);
1514 }
1515 
1516 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1517                        unsigned rx, int scale, int64_t disp,
1518                        unsigned sp, int modify)
1519 {
1520     TCGv_i32 tmp;
1521 
1522     nullify_over(ctx);
1523 
1524     tmp = load_frw_i32(rt);
1525     do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1526 
1527     return nullify_end(ctx);
1528 }
1529 
1530 static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1531 {
1532     return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1533                       a->disp, a->sp, a->m);
1534 }
1535 
1536 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1537                        unsigned rx, int scale, int64_t disp,
1538                        unsigned sp, int modify)
1539 {
1540     TCGv_i64 tmp;
1541 
1542     nullify_over(ctx);
1543 
1544     tmp = load_frd(rt);
1545     do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1546 
1547     return nullify_end(ctx);
1548 }
1549 
1550 static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1551 {
1552     return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1553                       a->disp, a->sp, a->m);
1554 }
1555 
1556 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1557                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1558 {
1559     TCGv_i32 tmp;
1560 
1561     nullify_over(ctx);
1562     tmp = load_frw0_i32(ra);
1563 
1564     func(tmp, tcg_env, tmp);
1565 
1566     save_frw_i32(rt, tmp);
1567     return nullify_end(ctx);
1568 }
1569 
1570 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1571                        void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1572 {
1573     TCGv_i32 dst;
1574     TCGv_i64 src;
1575 
1576     nullify_over(ctx);
1577     src = load_frd(ra);
1578     dst = tcg_temp_new_i32();
1579 
1580     func(dst, tcg_env, src);
1581 
1582     save_frw_i32(rt, dst);
1583     return nullify_end(ctx);
1584 }
1585 
1586 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1587                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1588 {
1589     TCGv_i64 tmp;
1590 
1591     nullify_over(ctx);
1592     tmp = load_frd0(ra);
1593 
1594     func(tmp, tcg_env, tmp);
1595 
1596     save_frd(rt, tmp);
1597     return nullify_end(ctx);
1598 }
1599 
1600 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1601                        void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1602 {
1603     TCGv_i32 src;
1604     TCGv_i64 dst;
1605 
1606     nullify_over(ctx);
1607     src = load_frw0_i32(ra);
1608     dst = tcg_temp_new_i64();
1609 
1610     func(dst, tcg_env, src);
1611 
1612     save_frd(rt, dst);
1613     return nullify_end(ctx);
1614 }
1615 
1616 static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1617                         unsigned ra, unsigned rb,
1618                         void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1619 {
1620     TCGv_i32 a, b;
1621 
1622     nullify_over(ctx);
1623     a = load_frw0_i32(ra);
1624     b = load_frw0_i32(rb);
1625 
1626     func(a, tcg_env, a, b);
1627 
1628     save_frw_i32(rt, a);
1629     return nullify_end(ctx);
1630 }
1631 
1632 static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1633                         unsigned ra, unsigned rb,
1634                         void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1635 {
1636     TCGv_i64 a, b;
1637 
1638     nullify_over(ctx);
1639     a = load_frd0(ra);
1640     b = load_frd0(rb);
1641 
1642     func(a, tcg_env, a, b);
1643 
1644     save_frd(rt, a);
1645     return nullify_end(ctx);
1646 }
1647 
1648 /* Emit an unconditional branch to a direct target, which may or may not
1649    have already had nullification handled.  */
1650 static bool do_dbranch(DisasContext *ctx, uint64_t dest,
1651                        unsigned link, bool is_n)
1652 {
1653     if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1654         if (link != 0) {
1655             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1656         }
1657         ctx->iaoq_n = dest;
1658         if (is_n) {
1659             ctx->null_cond.c = TCG_COND_ALWAYS;
1660         }
1661     } else {
1662         nullify_over(ctx);
1663 
1664         if (link != 0) {
1665             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1666         }
1667 
1668         if (is_n && use_nullify_skip(ctx)) {
1669             nullify_set(ctx, 0);
1670             gen_goto_tb(ctx, 0, dest, dest + 4);
1671         } else {
1672             nullify_set(ctx, is_n);
1673             gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1674         }
1675 
1676         nullify_end(ctx);
1677 
1678         nullify_set(ctx, 0);
1679         gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1680         ctx->base.is_jmp = DISAS_NORETURN;
1681     }
1682     return true;
1683 }
1684 
1685 /* Emit a conditional branch to a direct target.  If the branch itself
1686    is nullified, we should have already used nullify_over.  */
1687 static bool do_cbranch(DisasContext *ctx, int64_t disp, bool is_n,
1688                        DisasCond *cond)
1689 {
1690     uint64_t dest = iaoq_dest(ctx, disp);
1691     TCGLabel *taken = NULL;
1692     TCGCond c = cond->c;
1693     bool n;
1694 
1695     assert(ctx->null_cond.c == TCG_COND_NEVER);
1696 
1697     /* Handle TRUE and NEVER as direct branches.  */
1698     if (c == TCG_COND_ALWAYS) {
1699         return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1700     }
1701     if (c == TCG_COND_NEVER) {
1702         return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1703     }
1704 
1705     taken = gen_new_label();
1706     tcg_gen_brcond_i64(c, cond->a0, cond->a1, taken);
1707     cond_free(cond);
1708 
1709     /* Not taken: Condition not satisfied; nullify on backward branches. */
1710     n = is_n && disp < 0;
1711     if (n && use_nullify_skip(ctx)) {
1712         nullify_set(ctx, 0);
1713         gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1714     } else {
1715         if (!n && ctx->null_lab) {
1716             gen_set_label(ctx->null_lab);
1717             ctx->null_lab = NULL;
1718         }
1719         nullify_set(ctx, n);
1720         if (ctx->iaoq_n == -1) {
1721             /* The temporary iaoq_n_var died at the branch above.
1722                Regenerate it here instead of saving it.  */
1723             tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1724         }
1725         gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1726     }
1727 
1728     gen_set_label(taken);
1729 
1730     /* Taken: Condition satisfied; nullify on forward branches.  */
1731     n = is_n && disp >= 0;
1732     if (n && use_nullify_skip(ctx)) {
1733         nullify_set(ctx, 0);
1734         gen_goto_tb(ctx, 1, dest, dest + 4);
1735     } else {
1736         nullify_set(ctx, n);
1737         gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1738     }
1739 
1740     /* Not taken: the branch itself was nullified.  */
1741     if (ctx->null_lab) {
1742         gen_set_label(ctx->null_lab);
1743         ctx->null_lab = NULL;
1744         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1745     } else {
1746         ctx->base.is_jmp = DISAS_NORETURN;
1747     }
1748     return true;
1749 }
1750 
1751 /* Emit an unconditional branch to an indirect target.  This handles
1752    nullification of the branch itself.  */
1753 static bool do_ibranch(DisasContext *ctx, TCGv_i64 dest,
1754                        unsigned link, bool is_n)
1755 {
1756     TCGv_i64 a0, a1, next, tmp;
1757     TCGCond c;
1758 
1759     assert(ctx->null_lab == NULL);
1760 
1761     if (ctx->null_cond.c == TCG_COND_NEVER) {
1762         if (link != 0) {
1763             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1764         }
1765         next = tcg_temp_new_i64();
1766         tcg_gen_mov_i64(next, dest);
1767         if (is_n) {
1768             if (use_nullify_skip(ctx)) {
1769                 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, next);
1770                 tcg_gen_addi_i64(next, next, 4);
1771                 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1772                 nullify_set(ctx, 0);
1773                 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1774                 return true;
1775             }
1776             ctx->null_cond.c = TCG_COND_ALWAYS;
1777         }
1778         ctx->iaoq_n = -1;
1779         ctx->iaoq_n_var = next;
1780     } else if (is_n && use_nullify_skip(ctx)) {
1781         /* The (conditional) branch, B, nullifies the next insn, N,
1782            and we're allowed to skip execution N (no single-step or
1783            tracepoint in effect).  Since the goto_ptr that we must use
1784            for the indirect branch consumes no special resources, we
1785            can (conditionally) skip B and continue execution.  */
1786         /* The use_nullify_skip test implies we have a known control path.  */
1787         tcg_debug_assert(ctx->iaoq_b != -1);
1788         tcg_debug_assert(ctx->iaoq_n != -1);
1789 
1790         /* We do have to handle the non-local temporary, DEST, before
1791            branching.  Since IOAQ_F is not really live at this point, we
1792            can simply store DEST optimistically.  Similarly with IAOQ_B.  */
1793         copy_iaoq_entry(ctx, cpu_iaoq_f, -1, dest);
1794         next = tcg_temp_new_i64();
1795         tcg_gen_addi_i64(next, dest, 4);
1796         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1797 
1798         nullify_over(ctx);
1799         if (link != 0) {
1800             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1801         }
1802         tcg_gen_lookup_and_goto_ptr();
1803         return nullify_end(ctx);
1804     } else {
1805         c = ctx->null_cond.c;
1806         a0 = ctx->null_cond.a0;
1807         a1 = ctx->null_cond.a1;
1808 
1809         tmp = tcg_temp_new_i64();
1810         next = tcg_temp_new_i64();
1811 
1812         copy_iaoq_entry(ctx, tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1813         tcg_gen_movcond_i64(c, next, a0, a1, tmp, dest);
1814         ctx->iaoq_n = -1;
1815         ctx->iaoq_n_var = next;
1816 
1817         if (link != 0) {
1818             tcg_gen_movcond_i64(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1819         }
1820 
1821         if (is_n) {
1822             /* The branch nullifies the next insn, which means the state of N
1823                after the branch is the inverse of the state of N that applied
1824                to the branch.  */
1825             tcg_gen_setcond_i64(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1826             cond_free(&ctx->null_cond);
1827             ctx->null_cond = cond_make_n();
1828             ctx->psw_n_nonzero = true;
1829         } else {
1830             cond_free(&ctx->null_cond);
1831         }
1832     }
1833     return true;
1834 }
1835 
1836 /* Implement
1837  *    if (IAOQ_Front{30..31} < GR[b]{30..31})
1838  *      IAOQ_Next{30..31} ← GR[b]{30..31};
1839  *    else
1840  *      IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1841  * which keeps the privilege level from being increased.
1842  */
1843 static TCGv_i64 do_ibranch_priv(DisasContext *ctx, TCGv_i64 offset)
1844 {
1845     TCGv_i64 dest;
1846     switch (ctx->privilege) {
1847     case 0:
1848         /* Privilege 0 is maximum and is allowed to decrease.  */
1849         return offset;
1850     case 3:
1851         /* Privilege 3 is minimum and is never allowed to increase.  */
1852         dest = tcg_temp_new_i64();
1853         tcg_gen_ori_i64(dest, offset, 3);
1854         break;
1855     default:
1856         dest = tcg_temp_new_i64();
1857         tcg_gen_andi_i64(dest, offset, -4);
1858         tcg_gen_ori_i64(dest, dest, ctx->privilege);
1859         tcg_gen_movcond_i64(TCG_COND_GTU, dest, dest, offset, dest, offset);
1860         break;
1861     }
1862     return dest;
1863 }
1864 
1865 #ifdef CONFIG_USER_ONLY
1866 /* On Linux, page zero is normally marked execute only + gateway.
1867    Therefore normal read or write is supposed to fail, but specific
1868    offsets have kernel code mapped to raise permissions to implement
1869    system calls.  Handling this via an explicit check here, rather
1870    in than the "be disp(sr2,r0)" instruction that probably sent us
1871    here, is the easiest way to handle the branch delay slot on the
1872    aforementioned BE.  */
1873 static void do_page_zero(DisasContext *ctx)
1874 {
1875     TCGv_i64 tmp;
1876 
1877     /* If by some means we get here with PSW[N]=1, that implies that
1878        the B,GATE instruction would be skipped, and we'd fault on the
1879        next insn within the privileged page.  */
1880     switch (ctx->null_cond.c) {
1881     case TCG_COND_NEVER:
1882         break;
1883     case TCG_COND_ALWAYS:
1884         tcg_gen_movi_i64(cpu_psw_n, 0);
1885         goto do_sigill;
1886     default:
1887         /* Since this is always the first (and only) insn within the
1888            TB, we should know the state of PSW[N] from TB->FLAGS.  */
1889         g_assert_not_reached();
1890     }
1891 
1892     /* Check that we didn't arrive here via some means that allowed
1893        non-sequential instruction execution.  Normally the PSW[B] bit
1894        detects this by disallowing the B,GATE instruction to execute
1895        under such conditions.  */
1896     if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1897         goto do_sigill;
1898     }
1899 
1900     switch (ctx->iaoq_f & -4) {
1901     case 0x00: /* Null pointer call */
1902         gen_excp_1(EXCP_IMP);
1903         ctx->base.is_jmp = DISAS_NORETURN;
1904         break;
1905 
1906     case 0xb0: /* LWS */
1907         gen_excp_1(EXCP_SYSCALL_LWS);
1908         ctx->base.is_jmp = DISAS_NORETURN;
1909         break;
1910 
1911     case 0xe0: /* SET_THREAD_POINTER */
1912         tcg_gen_st_i64(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27]));
1913         tmp = tcg_temp_new_i64();
1914         tcg_gen_ori_i64(tmp, cpu_gr[31], 3);
1915         copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
1916         tcg_gen_addi_i64(tmp, tmp, 4);
1917         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
1918         ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1919         break;
1920 
1921     case 0x100: /* SYSCALL */
1922         gen_excp_1(EXCP_SYSCALL);
1923         ctx->base.is_jmp = DISAS_NORETURN;
1924         break;
1925 
1926     default:
1927     do_sigill:
1928         gen_excp_1(EXCP_ILL);
1929         ctx->base.is_jmp = DISAS_NORETURN;
1930         break;
1931     }
1932 }
1933 #endif
1934 
1935 static bool trans_nop(DisasContext *ctx, arg_nop *a)
1936 {
1937     cond_free(&ctx->null_cond);
1938     return true;
1939 }
1940 
1941 static bool trans_break(DisasContext *ctx, arg_break *a)
1942 {
1943     return gen_excp_iir(ctx, EXCP_BREAK);
1944 }
1945 
1946 static bool trans_sync(DisasContext *ctx, arg_sync *a)
1947 {
1948     /* No point in nullifying the memory barrier.  */
1949     tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1950 
1951     cond_free(&ctx->null_cond);
1952     return true;
1953 }
1954 
1955 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
1956 {
1957     unsigned rt = a->t;
1958     TCGv_i64 tmp = dest_gpr(ctx, rt);
1959     tcg_gen_movi_i64(tmp, ctx->iaoq_f);
1960     save_gpr(ctx, rt, tmp);
1961 
1962     cond_free(&ctx->null_cond);
1963     return true;
1964 }
1965 
1966 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
1967 {
1968     unsigned rt = a->t;
1969     unsigned rs = a->sp;
1970     TCGv_i64 t0 = tcg_temp_new_i64();
1971 
1972     load_spr(ctx, t0, rs);
1973     tcg_gen_shri_i64(t0, t0, 32);
1974 
1975     save_gpr(ctx, rt, t0);
1976 
1977     cond_free(&ctx->null_cond);
1978     return true;
1979 }
1980 
1981 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
1982 {
1983     unsigned rt = a->t;
1984     unsigned ctl = a->r;
1985     TCGv_i64 tmp;
1986 
1987     switch (ctl) {
1988     case CR_SAR:
1989         if (a->e == 0) {
1990             /* MFSAR without ,W masks low 5 bits.  */
1991             tmp = dest_gpr(ctx, rt);
1992             tcg_gen_andi_i64(tmp, cpu_sar, 31);
1993             save_gpr(ctx, rt, tmp);
1994             goto done;
1995         }
1996         save_gpr(ctx, rt, cpu_sar);
1997         goto done;
1998     case CR_IT: /* Interval Timer */
1999         /* FIXME: Respect PSW_S bit.  */
2000         nullify_over(ctx);
2001         tmp = dest_gpr(ctx, rt);
2002         if (translator_io_start(&ctx->base)) {
2003             gen_helper_read_interval_timer(tmp);
2004             ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2005         } else {
2006             gen_helper_read_interval_timer(tmp);
2007         }
2008         save_gpr(ctx, rt, tmp);
2009         return nullify_end(ctx);
2010     case 26:
2011     case 27:
2012         break;
2013     default:
2014         /* All other control registers are privileged.  */
2015         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2016         break;
2017     }
2018 
2019     tmp = tcg_temp_new_i64();
2020     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2021     save_gpr(ctx, rt, tmp);
2022 
2023  done:
2024     cond_free(&ctx->null_cond);
2025     return true;
2026 }
2027 
2028 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2029 {
2030     unsigned rr = a->r;
2031     unsigned rs = a->sp;
2032     TCGv_i64 tmp;
2033 
2034     if (rs >= 5) {
2035         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2036     }
2037     nullify_over(ctx);
2038 
2039     tmp = tcg_temp_new_i64();
2040     tcg_gen_shli_i64(tmp, load_gpr(ctx, rr), 32);
2041 
2042     if (rs >= 4) {
2043         tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, sr[rs]));
2044         ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2045     } else {
2046         tcg_gen_mov_i64(cpu_sr[rs], tmp);
2047     }
2048 
2049     return nullify_end(ctx);
2050 }
2051 
2052 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2053 {
2054     unsigned ctl = a->t;
2055     TCGv_i64 reg;
2056     TCGv_i64 tmp;
2057 
2058     if (ctl == CR_SAR) {
2059         reg = load_gpr(ctx, a->r);
2060         tmp = tcg_temp_new_i64();
2061         tcg_gen_andi_i64(tmp, reg, ctx->is_pa20 ? 63 : 31);
2062         save_or_nullify(ctx, cpu_sar, tmp);
2063 
2064         cond_free(&ctx->null_cond);
2065         return true;
2066     }
2067 
2068     /* All other control registers are privileged or read-only.  */
2069     CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2070 
2071 #ifndef CONFIG_USER_ONLY
2072     nullify_over(ctx);
2073 
2074     if (ctx->is_pa20) {
2075         reg = load_gpr(ctx, a->r);
2076     } else {
2077         reg = tcg_temp_new_i64();
2078         tcg_gen_ext32u_i64(reg, load_gpr(ctx, a->r));
2079     }
2080 
2081     switch (ctl) {
2082     case CR_IT:
2083         gen_helper_write_interval_timer(tcg_env, reg);
2084         break;
2085     case CR_EIRR:
2086         gen_helper_write_eirr(tcg_env, reg);
2087         break;
2088     case CR_EIEM:
2089         gen_helper_write_eiem(tcg_env, reg);
2090         ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2091         break;
2092 
2093     case CR_IIASQ:
2094     case CR_IIAOQ:
2095         /* FIXME: Respect PSW_Q bit */
2096         /* The write advances the queue and stores to the back element.  */
2097         tmp = tcg_temp_new_i64();
2098         tcg_gen_ld_i64(tmp, tcg_env,
2099                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2100         tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2101         tcg_gen_st_i64(reg, tcg_env,
2102                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2103         break;
2104 
2105     case CR_PID1:
2106     case CR_PID2:
2107     case CR_PID3:
2108     case CR_PID4:
2109         tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2110 #ifndef CONFIG_USER_ONLY
2111         gen_helper_change_prot_id(tcg_env);
2112 #endif
2113         break;
2114 
2115     default:
2116         tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2117         break;
2118     }
2119     return nullify_end(ctx);
2120 #endif
2121 }
2122 
2123 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2124 {
2125     TCGv_i64 tmp = tcg_temp_new_i64();
2126 
2127     tcg_gen_not_i64(tmp, load_gpr(ctx, a->r));
2128     tcg_gen_andi_i64(tmp, tmp, ctx->is_pa20 ? 63 : 31);
2129     save_or_nullify(ctx, cpu_sar, tmp);
2130 
2131     cond_free(&ctx->null_cond);
2132     return true;
2133 }
2134 
2135 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2136 {
2137     TCGv_i64 dest = dest_gpr(ctx, a->t);
2138 
2139 #ifdef CONFIG_USER_ONLY
2140     /* We don't implement space registers in user mode. */
2141     tcg_gen_movi_i64(dest, 0);
2142 #else
2143     tcg_gen_mov_i64(dest, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2144     tcg_gen_shri_i64(dest, dest, 32);
2145 #endif
2146     save_gpr(ctx, a->t, dest);
2147 
2148     cond_free(&ctx->null_cond);
2149     return true;
2150 }
2151 
2152 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2153 {
2154     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2155 #ifndef CONFIG_USER_ONLY
2156     TCGv_i64 tmp;
2157 
2158     nullify_over(ctx);
2159 
2160     tmp = tcg_temp_new_i64();
2161     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2162     tcg_gen_andi_i64(tmp, tmp, ~a->i);
2163     gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2164     save_gpr(ctx, a->t, tmp);
2165 
2166     /* Exit the TB to recognize new interrupts, e.g. PSW_M.  */
2167     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2168     return nullify_end(ctx);
2169 #endif
2170 }
2171 
2172 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2173 {
2174     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2175 #ifndef CONFIG_USER_ONLY
2176     TCGv_i64 tmp;
2177 
2178     nullify_over(ctx);
2179 
2180     tmp = tcg_temp_new_i64();
2181     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2182     tcg_gen_ori_i64(tmp, tmp, a->i);
2183     gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2184     save_gpr(ctx, a->t, tmp);
2185 
2186     /* Exit the TB to recognize new interrupts, e.g. PSW_I.  */
2187     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2188     return nullify_end(ctx);
2189 #endif
2190 }
2191 
2192 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2193 {
2194     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2195 #ifndef CONFIG_USER_ONLY
2196     TCGv_i64 tmp, reg;
2197     nullify_over(ctx);
2198 
2199     reg = load_gpr(ctx, a->r);
2200     tmp = tcg_temp_new_i64();
2201     gen_helper_swap_system_mask(tmp, tcg_env, reg);
2202 
2203     /* Exit the TB to recognize new interrupts.  */
2204     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2205     return nullify_end(ctx);
2206 #endif
2207 }
2208 
2209 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2210 {
2211     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2212 #ifndef CONFIG_USER_ONLY
2213     nullify_over(ctx);
2214 
2215     if (rfi_r) {
2216         gen_helper_rfi_r(tcg_env);
2217     } else {
2218         gen_helper_rfi(tcg_env);
2219     }
2220     /* Exit the TB to recognize new interrupts.  */
2221     tcg_gen_exit_tb(NULL, 0);
2222     ctx->base.is_jmp = DISAS_NORETURN;
2223 
2224     return nullify_end(ctx);
2225 #endif
2226 }
2227 
2228 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2229 {
2230     return do_rfi(ctx, false);
2231 }
2232 
2233 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2234 {
2235     return do_rfi(ctx, true);
2236 }
2237 
2238 static bool trans_halt(DisasContext *ctx, arg_halt *a)
2239 {
2240     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2241 #ifndef CONFIG_USER_ONLY
2242     nullify_over(ctx);
2243     gen_helper_halt(tcg_env);
2244     ctx->base.is_jmp = DISAS_NORETURN;
2245     return nullify_end(ctx);
2246 #endif
2247 }
2248 
2249 static bool trans_reset(DisasContext *ctx, arg_reset *a)
2250 {
2251     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2252 #ifndef CONFIG_USER_ONLY
2253     nullify_over(ctx);
2254     gen_helper_reset(tcg_env);
2255     ctx->base.is_jmp = DISAS_NORETURN;
2256     return nullify_end(ctx);
2257 #endif
2258 }
2259 
2260 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2261 {
2262     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2263 #ifndef CONFIG_USER_ONLY
2264     nullify_over(ctx);
2265     gen_helper_getshadowregs(tcg_env);
2266     return nullify_end(ctx);
2267 #endif
2268 }
2269 
2270 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2271 {
2272     if (a->m) {
2273         TCGv_i64 dest = dest_gpr(ctx, a->b);
2274         TCGv_i64 src1 = load_gpr(ctx, a->b);
2275         TCGv_i64 src2 = load_gpr(ctx, a->x);
2276 
2277         /* The only thing we need to do is the base register modification.  */
2278         tcg_gen_add_i64(dest, src1, src2);
2279         save_gpr(ctx, a->b, dest);
2280     }
2281     cond_free(&ctx->null_cond);
2282     return true;
2283 }
2284 
2285 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2286 {
2287     TCGv_i64 dest, ofs;
2288     TCGv_i32 level, want;
2289     TCGv_i64 addr;
2290 
2291     nullify_over(ctx);
2292 
2293     dest = dest_gpr(ctx, a->t);
2294     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2295 
2296     if (a->imm) {
2297         level = tcg_constant_i32(a->ri);
2298     } else {
2299         level = tcg_temp_new_i32();
2300         tcg_gen_extrl_i64_i32(level, load_gpr(ctx, a->ri));
2301         tcg_gen_andi_i32(level, level, 3);
2302     }
2303     want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
2304 
2305     gen_helper_probe(dest, tcg_env, addr, level, want);
2306 
2307     save_gpr(ctx, a->t, dest);
2308     return nullify_end(ctx);
2309 }
2310 
2311 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2312 {
2313     if (ctx->is_pa20) {
2314         return false;
2315     }
2316     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2317 #ifndef CONFIG_USER_ONLY
2318     TCGv_i64 addr;
2319     TCGv_i64 ofs, reg;
2320 
2321     nullify_over(ctx);
2322 
2323     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2324     reg = load_gpr(ctx, a->r);
2325     if (a->addr) {
2326         gen_helper_itlba_pa11(tcg_env, addr, reg);
2327     } else {
2328         gen_helper_itlbp_pa11(tcg_env, addr, reg);
2329     }
2330 
2331     /* Exit TB for TLB change if mmu is enabled.  */
2332     if (ctx->tb_flags & PSW_C) {
2333         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2334     }
2335     return nullify_end(ctx);
2336 #endif
2337 }
2338 
2339 static bool do_pxtlb(DisasContext *ctx, arg_ldst *a, bool local)
2340 {
2341     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2342 #ifndef CONFIG_USER_ONLY
2343     TCGv_i64 addr;
2344     TCGv_i64 ofs;
2345 
2346     nullify_over(ctx);
2347 
2348     form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2349 
2350     /*
2351      * Page align now, rather than later, so that we can add in the
2352      * page_size field from pa2.0 from the low 4 bits of GR[b].
2353      */
2354     tcg_gen_andi_i64(addr, addr, TARGET_PAGE_MASK);
2355     if (ctx->is_pa20) {
2356         tcg_gen_deposit_i64(addr, addr, load_gpr(ctx, a->b), 0, 4);
2357     }
2358 
2359     if (local) {
2360         gen_helper_ptlb_l(tcg_env, addr);
2361     } else {
2362         gen_helper_ptlb(tcg_env, addr);
2363     }
2364 
2365     if (a->m) {
2366         save_gpr(ctx, a->b, ofs);
2367     }
2368 
2369     /* Exit TB for TLB change if mmu is enabled.  */
2370     if (ctx->tb_flags & PSW_C) {
2371         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2372     }
2373     return nullify_end(ctx);
2374 #endif
2375 }
2376 
2377 static bool trans_pxtlb(DisasContext *ctx, arg_ldst *a)
2378 {
2379     return do_pxtlb(ctx, a, false);
2380 }
2381 
2382 static bool trans_pxtlb_l(DisasContext *ctx, arg_ldst *a)
2383 {
2384     return ctx->is_pa20 && do_pxtlb(ctx, a, true);
2385 }
2386 
2387 static bool trans_pxtlbe(DisasContext *ctx, arg_ldst *a)
2388 {
2389     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2390 #ifndef CONFIG_USER_ONLY
2391     nullify_over(ctx);
2392 
2393     trans_nop_addrx(ctx, a);
2394     gen_helper_ptlbe(tcg_env);
2395 
2396     /* Exit TB for TLB change if mmu is enabled.  */
2397     if (ctx->tb_flags & PSW_C) {
2398         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2399     }
2400     return nullify_end(ctx);
2401 #endif
2402 }
2403 
2404 /*
2405  * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2406  * See
2407  *     https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2408  *     page 13-9 (195/206)
2409  */
2410 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2411 {
2412     if (ctx->is_pa20) {
2413         return false;
2414     }
2415     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2416 #ifndef CONFIG_USER_ONLY
2417     TCGv_i64 addr, atl, stl;
2418     TCGv_i64 reg;
2419 
2420     nullify_over(ctx);
2421 
2422     /*
2423      * FIXME:
2424      *  if (not (pcxl or pcxl2))
2425      *    return gen_illegal(ctx);
2426      */
2427 
2428     atl = tcg_temp_new_i64();
2429     stl = tcg_temp_new_i64();
2430     addr = tcg_temp_new_i64();
2431 
2432     tcg_gen_ld32u_i64(stl, tcg_env,
2433                       a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2434                       : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2435     tcg_gen_ld32u_i64(atl, tcg_env,
2436                       a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2437                       : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2438     tcg_gen_shli_i64(stl, stl, 32);
2439     tcg_gen_or_i64(addr, atl, stl);
2440 
2441     reg = load_gpr(ctx, a->r);
2442     if (a->addr) {
2443         gen_helper_itlba_pa11(tcg_env, addr, reg);
2444     } else {
2445         gen_helper_itlbp_pa11(tcg_env, addr, reg);
2446     }
2447 
2448     /* Exit TB for TLB change if mmu is enabled.  */
2449     if (ctx->tb_flags & PSW_C) {
2450         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2451     }
2452     return nullify_end(ctx);
2453 #endif
2454 }
2455 
2456 static bool trans_ixtlbt(DisasContext *ctx, arg_ixtlbt *a)
2457 {
2458     if (!ctx->is_pa20) {
2459         return false;
2460     }
2461     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2462 #ifndef CONFIG_USER_ONLY
2463     nullify_over(ctx);
2464     {
2465         TCGv_i64 src1 = load_gpr(ctx, a->r1);
2466         TCGv_i64 src2 = load_gpr(ctx, a->r2);
2467 
2468         if (a->data) {
2469             gen_helper_idtlbt_pa20(tcg_env, src1, src2);
2470         } else {
2471             gen_helper_iitlbt_pa20(tcg_env, src1, src2);
2472         }
2473     }
2474     /* Exit TB for TLB change if mmu is enabled.  */
2475     if (ctx->tb_flags & PSW_C) {
2476         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2477     }
2478     return nullify_end(ctx);
2479 #endif
2480 }
2481 
2482 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2483 {
2484     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2485 #ifndef CONFIG_USER_ONLY
2486     TCGv_i64 vaddr;
2487     TCGv_i64 ofs, paddr;
2488 
2489     nullify_over(ctx);
2490 
2491     form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2492 
2493     paddr = tcg_temp_new_i64();
2494     gen_helper_lpa(paddr, tcg_env, vaddr);
2495 
2496     /* Note that physical address result overrides base modification.  */
2497     if (a->m) {
2498         save_gpr(ctx, a->b, ofs);
2499     }
2500     save_gpr(ctx, a->t, paddr);
2501 
2502     return nullify_end(ctx);
2503 #endif
2504 }
2505 
2506 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2507 {
2508     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2509 
2510     /* The Coherence Index is an implementation-defined function of the
2511        physical address.  Two addresses with the same CI have a coherent
2512        view of the cache.  Our implementation is to return 0 for all,
2513        since the entire address space is coherent.  */
2514     save_gpr(ctx, a->t, ctx->zero);
2515 
2516     cond_free(&ctx->null_cond);
2517     return true;
2518 }
2519 
2520 static bool trans_add(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2521 {
2522     return do_add_reg(ctx, a, false, false, false, false);
2523 }
2524 
2525 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2526 {
2527     return do_add_reg(ctx, a, true, false, false, false);
2528 }
2529 
2530 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2531 {
2532     return do_add_reg(ctx, a, false, true, false, false);
2533 }
2534 
2535 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2536 {
2537     return do_add_reg(ctx, a, false, false, false, true);
2538 }
2539 
2540 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2541 {
2542     return do_add_reg(ctx, a, false, true, false, true);
2543 }
2544 
2545 static bool trans_sub(DisasContext *ctx, arg_rrr_cf_d *a)
2546 {
2547     return do_sub_reg(ctx, a, false, false, false);
2548 }
2549 
2550 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2551 {
2552     return do_sub_reg(ctx, a, true, false, false);
2553 }
2554 
2555 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2556 {
2557     return do_sub_reg(ctx, a, false, false, true);
2558 }
2559 
2560 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2561 {
2562     return do_sub_reg(ctx, a, true, false, true);
2563 }
2564 
2565 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf_d *a)
2566 {
2567     return do_sub_reg(ctx, a, false, true, false);
2568 }
2569 
2570 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2571 {
2572     return do_sub_reg(ctx, a, true, true, false);
2573 }
2574 
2575 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf_d *a)
2576 {
2577     return do_log_reg(ctx, a, tcg_gen_andc_i64);
2578 }
2579 
2580 static bool trans_and(DisasContext *ctx, arg_rrr_cf_d *a)
2581 {
2582     return do_log_reg(ctx, a, tcg_gen_and_i64);
2583 }
2584 
2585 static bool trans_or(DisasContext *ctx, arg_rrr_cf_d *a)
2586 {
2587     if (a->cf == 0) {
2588         unsigned r2 = a->r2;
2589         unsigned r1 = a->r1;
2590         unsigned rt = a->t;
2591 
2592         if (rt == 0) { /* NOP */
2593             cond_free(&ctx->null_cond);
2594             return true;
2595         }
2596         if (r2 == 0) { /* COPY */
2597             if (r1 == 0) {
2598                 TCGv_i64 dest = dest_gpr(ctx, rt);
2599                 tcg_gen_movi_i64(dest, 0);
2600                 save_gpr(ctx, rt, dest);
2601             } else {
2602                 save_gpr(ctx, rt, cpu_gr[r1]);
2603             }
2604             cond_free(&ctx->null_cond);
2605             return true;
2606         }
2607 #ifndef CONFIG_USER_ONLY
2608         /* These are QEMU extensions and are nops in the real architecture:
2609          *
2610          * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2611          * or %r31,%r31,%r31 -- death loop; offline cpu
2612          *                      currently implemented as idle.
2613          */
2614         if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2615             /* No need to check for supervisor, as userland can only pause
2616                until the next timer interrupt.  */
2617             nullify_over(ctx);
2618 
2619             /* Advance the instruction queue.  */
2620             copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2621             copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2622             nullify_set(ctx, 0);
2623 
2624             /* Tell the qemu main loop to halt until this cpu has work.  */
2625             tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
2626                            offsetof(CPUState, halted) - offsetof(HPPACPU, env));
2627             gen_excp_1(EXCP_HALTED);
2628             ctx->base.is_jmp = DISAS_NORETURN;
2629 
2630             return nullify_end(ctx);
2631         }
2632 #endif
2633     }
2634     return do_log_reg(ctx, a, tcg_gen_or_i64);
2635 }
2636 
2637 static bool trans_xor(DisasContext *ctx, arg_rrr_cf_d *a)
2638 {
2639     return do_log_reg(ctx, a, tcg_gen_xor_i64);
2640 }
2641 
2642 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf_d *a)
2643 {
2644     TCGv_i64 tcg_r1, tcg_r2;
2645 
2646     if (a->cf) {
2647         nullify_over(ctx);
2648     }
2649     tcg_r1 = load_gpr(ctx, a->r1);
2650     tcg_r2 = load_gpr(ctx, a->r2);
2651     do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d);
2652     return nullify_end(ctx);
2653 }
2654 
2655 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf_d *a)
2656 {
2657     TCGv_i64 tcg_r1, tcg_r2;
2658 
2659     if (a->cf) {
2660         nullify_over(ctx);
2661     }
2662     tcg_r1 = load_gpr(ctx, a->r1);
2663     tcg_r2 = load_gpr(ctx, a->r2);
2664     do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, false, tcg_gen_xor_i64);
2665     return nullify_end(ctx);
2666 }
2667 
2668 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a, bool is_tc)
2669 {
2670     TCGv_i64 tcg_r1, tcg_r2, tmp;
2671 
2672     if (a->cf) {
2673         nullify_over(ctx);
2674     }
2675     tcg_r1 = load_gpr(ctx, a->r1);
2676     tcg_r2 = load_gpr(ctx, a->r2);
2677     tmp = tcg_temp_new_i64();
2678     tcg_gen_not_i64(tmp, tcg_r2);
2679     do_unit(ctx, a->t, tcg_r1, tmp, a->cf, a->d, is_tc, tcg_gen_add_i64);
2680     return nullify_end(ctx);
2681 }
2682 
2683 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a)
2684 {
2685     return do_uaddcm(ctx, a, false);
2686 }
2687 
2688 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2689 {
2690     return do_uaddcm(ctx, a, true);
2691 }
2692 
2693 static bool do_dcor(DisasContext *ctx, arg_rr_cf_d *a, bool is_i)
2694 {
2695     TCGv_i64 tmp;
2696 
2697     nullify_over(ctx);
2698 
2699     tmp = tcg_temp_new_i64();
2700     tcg_gen_shri_i64(tmp, cpu_psw_cb, 3);
2701     if (!is_i) {
2702         tcg_gen_not_i64(tmp, tmp);
2703     }
2704     tcg_gen_andi_i64(tmp, tmp, (uint64_t)0x1111111111111111ull);
2705     tcg_gen_muli_i64(tmp, tmp, 6);
2706     do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, a->d, false,
2707             is_i ? tcg_gen_add_i64 : tcg_gen_sub_i64);
2708     return nullify_end(ctx);
2709 }
2710 
2711 static bool trans_dcor(DisasContext *ctx, arg_rr_cf_d *a)
2712 {
2713     return do_dcor(ctx, a, false);
2714 }
2715 
2716 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf_d *a)
2717 {
2718     return do_dcor(ctx, a, true);
2719 }
2720 
2721 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2722 {
2723     TCGv_i64 dest, add1, add2, addc, in1, in2;
2724     TCGv_i64 cout;
2725 
2726     nullify_over(ctx);
2727 
2728     in1 = load_gpr(ctx, a->r1);
2729     in2 = load_gpr(ctx, a->r2);
2730 
2731     add1 = tcg_temp_new_i64();
2732     add2 = tcg_temp_new_i64();
2733     addc = tcg_temp_new_i64();
2734     dest = tcg_temp_new_i64();
2735 
2736     /* Form R1 << 1 | PSW[CB]{8}.  */
2737     tcg_gen_add_i64(add1, in1, in1);
2738     tcg_gen_add_i64(add1, add1, get_psw_carry(ctx, false));
2739 
2740     /*
2741      * Add or subtract R2, depending on PSW[V].  Proper computation of
2742      * carry requires that we subtract via + ~R2 + 1, as described in
2743      * the manual.  By extracting and masking V, we can produce the
2744      * proper inputs to the addition without movcond.
2745      */
2746     tcg_gen_sextract_i64(addc, cpu_psw_v, 31, 1);
2747     tcg_gen_xor_i64(add2, in2, addc);
2748     tcg_gen_andi_i64(addc, addc, 1);
2749 
2750     tcg_gen_add2_i64(dest, cpu_psw_cb_msb, add1, ctx->zero, add2, ctx->zero);
2751     tcg_gen_add2_i64(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb,
2752                      addc, ctx->zero);
2753 
2754     /* Write back the result register.  */
2755     save_gpr(ctx, a->t, dest);
2756 
2757     /* Write back PSW[CB].  */
2758     tcg_gen_xor_i64(cpu_psw_cb, add1, add2);
2759     tcg_gen_xor_i64(cpu_psw_cb, cpu_psw_cb, dest);
2760 
2761     /* Write back PSW[V] for the division step.  */
2762     cout = get_psw_carry(ctx, false);
2763     tcg_gen_neg_i64(cpu_psw_v, cout);
2764     tcg_gen_xor_i64(cpu_psw_v, cpu_psw_v, in2);
2765 
2766     /* Install the new nullification.  */
2767     if (a->cf) {
2768         TCGv_i64 sv = NULL;
2769         if (cond_need_sv(a->cf >> 1)) {
2770             /* ??? The lshift is supposed to contribute to overflow.  */
2771             sv = do_add_sv(ctx, dest, add1, add2);
2772         }
2773         ctx->null_cond = do_cond(ctx, a->cf, false, dest, cout, sv);
2774     }
2775 
2776     return nullify_end(ctx);
2777 }
2778 
2779 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
2780 {
2781     return do_add_imm(ctx, a, false, false);
2782 }
2783 
2784 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2785 {
2786     return do_add_imm(ctx, a, true, false);
2787 }
2788 
2789 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
2790 {
2791     return do_add_imm(ctx, a, false, true);
2792 }
2793 
2794 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2795 {
2796     return do_add_imm(ctx, a, true, true);
2797 }
2798 
2799 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2800 {
2801     return do_sub_imm(ctx, a, false);
2802 }
2803 
2804 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2805 {
2806     return do_sub_imm(ctx, a, true);
2807 }
2808 
2809 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf_d *a)
2810 {
2811     TCGv_i64 tcg_im, tcg_r2;
2812 
2813     if (a->cf) {
2814         nullify_over(ctx);
2815     }
2816 
2817     tcg_im = tcg_constant_i64(a->i);
2818     tcg_r2 = load_gpr(ctx, a->r);
2819     do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf, a->d);
2820 
2821     return nullify_end(ctx);
2822 }
2823 
2824 static bool do_multimedia(DisasContext *ctx, arg_rrr *a,
2825                           void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
2826 {
2827     TCGv_i64 r1, r2, dest;
2828 
2829     if (!ctx->is_pa20) {
2830         return false;
2831     }
2832 
2833     nullify_over(ctx);
2834 
2835     r1 = load_gpr(ctx, a->r1);
2836     r2 = load_gpr(ctx, a->r2);
2837     dest = dest_gpr(ctx, a->t);
2838 
2839     fn(dest, r1, r2);
2840     save_gpr(ctx, a->t, dest);
2841 
2842     return nullify_end(ctx);
2843 }
2844 
2845 static bool do_multimedia_sh(DisasContext *ctx, arg_rri *a,
2846                              void (*fn)(TCGv_i64, TCGv_i64, int64_t))
2847 {
2848     TCGv_i64 r, dest;
2849 
2850     if (!ctx->is_pa20) {
2851         return false;
2852     }
2853 
2854     nullify_over(ctx);
2855 
2856     r = load_gpr(ctx, a->r);
2857     dest = dest_gpr(ctx, a->t);
2858 
2859     fn(dest, r, a->i);
2860     save_gpr(ctx, a->t, dest);
2861 
2862     return nullify_end(ctx);
2863 }
2864 
2865 static bool do_multimedia_shadd(DisasContext *ctx, arg_rrr_sh *a,
2866                                 void (*fn)(TCGv_i64, TCGv_i64,
2867                                            TCGv_i64, TCGv_i32))
2868 {
2869     TCGv_i64 r1, r2, dest;
2870 
2871     if (!ctx->is_pa20) {
2872         return false;
2873     }
2874 
2875     nullify_over(ctx);
2876 
2877     r1 = load_gpr(ctx, a->r1);
2878     r2 = load_gpr(ctx, a->r2);
2879     dest = dest_gpr(ctx, a->t);
2880 
2881     fn(dest, r1, r2, tcg_constant_i32(a->sh));
2882     save_gpr(ctx, a->t, dest);
2883 
2884     return nullify_end(ctx);
2885 }
2886 
2887 static bool trans_hadd(DisasContext *ctx, arg_rrr *a)
2888 {
2889     return do_multimedia(ctx, a, tcg_gen_vec_add16_i64);
2890 }
2891 
2892 static bool trans_hadd_ss(DisasContext *ctx, arg_rrr *a)
2893 {
2894     return do_multimedia(ctx, a, gen_helper_hadd_ss);
2895 }
2896 
2897 static bool trans_hadd_us(DisasContext *ctx, arg_rrr *a)
2898 {
2899     return do_multimedia(ctx, a, gen_helper_hadd_us);
2900 }
2901 
2902 static bool trans_havg(DisasContext *ctx, arg_rrr *a)
2903 {
2904     return do_multimedia(ctx, a, gen_helper_havg);
2905 }
2906 
2907 static bool trans_hshl(DisasContext *ctx, arg_rri *a)
2908 {
2909     return do_multimedia_sh(ctx, a, tcg_gen_vec_shl16i_i64);
2910 }
2911 
2912 static bool trans_hshr_s(DisasContext *ctx, arg_rri *a)
2913 {
2914     return do_multimedia_sh(ctx, a, tcg_gen_vec_sar16i_i64);
2915 }
2916 
2917 static bool trans_hshr_u(DisasContext *ctx, arg_rri *a)
2918 {
2919     return do_multimedia_sh(ctx, a, tcg_gen_vec_shr16i_i64);
2920 }
2921 
2922 static bool trans_hshladd(DisasContext *ctx, arg_rrr_sh *a)
2923 {
2924     return do_multimedia_shadd(ctx, a, gen_helper_hshladd);
2925 }
2926 
2927 static bool trans_hshradd(DisasContext *ctx, arg_rrr_sh *a)
2928 {
2929     return do_multimedia_shadd(ctx, a, gen_helper_hshradd);
2930 }
2931 
2932 static bool trans_hsub(DisasContext *ctx, arg_rrr *a)
2933 {
2934     return do_multimedia(ctx, a, tcg_gen_vec_sub16_i64);
2935 }
2936 
2937 static bool trans_hsub_ss(DisasContext *ctx, arg_rrr *a)
2938 {
2939     return do_multimedia(ctx, a, gen_helper_hsub_ss);
2940 }
2941 
2942 static bool trans_hsub_us(DisasContext *ctx, arg_rrr *a)
2943 {
2944     return do_multimedia(ctx, a, gen_helper_hsub_us);
2945 }
2946 
2947 static void gen_mixh_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
2948 {
2949     uint64_t mask = 0xffff0000ffff0000ull;
2950     TCGv_i64 tmp = tcg_temp_new_i64();
2951 
2952     tcg_gen_andi_i64(tmp, r2, mask);
2953     tcg_gen_andi_i64(dst, r1, mask);
2954     tcg_gen_shri_i64(tmp, tmp, 16);
2955     tcg_gen_or_i64(dst, dst, tmp);
2956 }
2957 
2958 static bool trans_mixh_l(DisasContext *ctx, arg_rrr *a)
2959 {
2960     return do_multimedia(ctx, a, gen_mixh_l);
2961 }
2962 
2963 static void gen_mixh_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
2964 {
2965     uint64_t mask = 0x0000ffff0000ffffull;
2966     TCGv_i64 tmp = tcg_temp_new_i64();
2967 
2968     tcg_gen_andi_i64(tmp, r1, mask);
2969     tcg_gen_andi_i64(dst, r2, mask);
2970     tcg_gen_shli_i64(tmp, tmp, 16);
2971     tcg_gen_or_i64(dst, dst, tmp);
2972 }
2973 
2974 static bool trans_mixh_r(DisasContext *ctx, arg_rrr *a)
2975 {
2976     return do_multimedia(ctx, a, gen_mixh_r);
2977 }
2978 
2979 static void gen_mixw_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
2980 {
2981     TCGv_i64 tmp = tcg_temp_new_i64();
2982 
2983     tcg_gen_shri_i64(tmp, r2, 32);
2984     tcg_gen_deposit_i64(dst, r1, tmp, 0, 32);
2985 }
2986 
2987 static bool trans_mixw_l(DisasContext *ctx, arg_rrr *a)
2988 {
2989     return do_multimedia(ctx, a, gen_mixw_l);
2990 }
2991 
2992 static void gen_mixw_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
2993 {
2994     tcg_gen_deposit_i64(dst, r2, r1, 32, 32);
2995 }
2996 
2997 static bool trans_mixw_r(DisasContext *ctx, arg_rrr *a)
2998 {
2999     return do_multimedia(ctx, a, gen_mixw_r);
3000 }
3001 
3002 static bool trans_permh(DisasContext *ctx, arg_permh *a)
3003 {
3004     TCGv_i64 r, t0, t1, t2, t3;
3005 
3006     if (!ctx->is_pa20) {
3007         return false;
3008     }
3009 
3010     nullify_over(ctx);
3011 
3012     r = load_gpr(ctx, a->r1);
3013     t0 = tcg_temp_new_i64();
3014     t1 = tcg_temp_new_i64();
3015     t2 = tcg_temp_new_i64();
3016     t3 = tcg_temp_new_i64();
3017 
3018     tcg_gen_extract_i64(t0, r, (3 - a->c0) * 16, 16);
3019     tcg_gen_extract_i64(t1, r, (3 - a->c1) * 16, 16);
3020     tcg_gen_extract_i64(t2, r, (3 - a->c2) * 16, 16);
3021     tcg_gen_extract_i64(t3, r, (3 - a->c3) * 16, 16);
3022 
3023     tcg_gen_deposit_i64(t0, t1, t0, 16, 48);
3024     tcg_gen_deposit_i64(t2, t3, t2, 16, 48);
3025     tcg_gen_deposit_i64(t0, t2, t0, 32, 32);
3026 
3027     save_gpr(ctx, a->t, t0);
3028     return nullify_end(ctx);
3029 }
3030 
3031 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
3032 {
3033     if (ctx->is_pa20) {
3034        /*
3035         * With pa20, LDB, LDH, LDW, LDD to %g0 are prefetches.
3036         * Any base modification still occurs.
3037         */
3038         if (a->t == 0) {
3039             return trans_nop_addrx(ctx, a);
3040         }
3041     } else if (a->size > MO_32) {
3042         return gen_illegal(ctx);
3043     }
3044     return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
3045                    a->disp, a->sp, a->m, a->size | MO_TE);
3046 }
3047 
3048 static bool trans_st(DisasContext *ctx, arg_ldst *a)
3049 {
3050     assert(a->x == 0 && a->scale == 0);
3051     if (!ctx->is_pa20 && a->size > MO_32) {
3052         return gen_illegal(ctx);
3053     }
3054     return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
3055 }
3056 
3057 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
3058 {
3059     MemOp mop = MO_TE | MO_ALIGN | a->size;
3060     TCGv_i64 dest, ofs;
3061     TCGv_i64 addr;
3062 
3063     if (!ctx->is_pa20 && a->size > MO_32) {
3064         return gen_illegal(ctx);
3065     }
3066 
3067     nullify_over(ctx);
3068 
3069     if (a->m) {
3070         /* Base register modification.  Make sure if RT == RB,
3071            we see the result of the load.  */
3072         dest = tcg_temp_new_i64();
3073     } else {
3074         dest = dest_gpr(ctx, a->t);
3075     }
3076 
3077     form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
3078              a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
3079 
3080     /*
3081      * For hppa1.1, LDCW is undefined unless aligned mod 16.
3082      * However actual hardware succeeds with aligned mod 4.
3083      * Detect this case and log a GUEST_ERROR.
3084      *
3085      * TODO: HPPA64 relaxes the over-alignment requirement
3086      * with the ,co completer.
3087      */
3088     gen_helper_ldc_check(addr);
3089 
3090     tcg_gen_atomic_xchg_i64(dest, addr, ctx->zero, ctx->mmu_idx, mop);
3091 
3092     if (a->m) {
3093         save_gpr(ctx, a->b, ofs);
3094     }
3095     save_gpr(ctx, a->t, dest);
3096 
3097     return nullify_end(ctx);
3098 }
3099 
3100 static bool trans_stby(DisasContext *ctx, arg_stby *a)
3101 {
3102     TCGv_i64 ofs, val;
3103     TCGv_i64 addr;
3104 
3105     nullify_over(ctx);
3106 
3107     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
3108              ctx->mmu_idx == MMU_PHYS_IDX);
3109     val = load_gpr(ctx, a->r);
3110     if (a->a) {
3111         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3112             gen_helper_stby_e_parallel(tcg_env, addr, val);
3113         } else {
3114             gen_helper_stby_e(tcg_env, addr, val);
3115         }
3116     } else {
3117         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3118             gen_helper_stby_b_parallel(tcg_env, addr, val);
3119         } else {
3120             gen_helper_stby_b(tcg_env, addr, val);
3121         }
3122     }
3123     if (a->m) {
3124         tcg_gen_andi_i64(ofs, ofs, ~3);
3125         save_gpr(ctx, a->b, ofs);
3126     }
3127 
3128     return nullify_end(ctx);
3129 }
3130 
3131 static bool trans_stdby(DisasContext *ctx, arg_stby *a)
3132 {
3133     TCGv_i64 ofs, val;
3134     TCGv_i64 addr;
3135 
3136     if (!ctx->is_pa20) {
3137         return false;
3138     }
3139     nullify_over(ctx);
3140 
3141     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
3142              ctx->mmu_idx == MMU_PHYS_IDX);
3143     val = load_gpr(ctx, a->r);
3144     if (a->a) {
3145         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3146             gen_helper_stdby_e_parallel(tcg_env, addr, val);
3147         } else {
3148             gen_helper_stdby_e(tcg_env, addr, val);
3149         }
3150     } else {
3151         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3152             gen_helper_stdby_b_parallel(tcg_env, addr, val);
3153         } else {
3154             gen_helper_stdby_b(tcg_env, addr, val);
3155         }
3156     }
3157     if (a->m) {
3158         tcg_gen_andi_i64(ofs, ofs, ~7);
3159         save_gpr(ctx, a->b, ofs);
3160     }
3161 
3162     return nullify_end(ctx);
3163 }
3164 
3165 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
3166 {
3167     int hold_mmu_idx = ctx->mmu_idx;
3168 
3169     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3170     ctx->mmu_idx = MMU_PHYS_IDX;
3171     trans_ld(ctx, a);
3172     ctx->mmu_idx = hold_mmu_idx;
3173     return true;
3174 }
3175 
3176 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
3177 {
3178     int hold_mmu_idx = ctx->mmu_idx;
3179 
3180     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3181     ctx->mmu_idx = MMU_PHYS_IDX;
3182     trans_st(ctx, a);
3183     ctx->mmu_idx = hold_mmu_idx;
3184     return true;
3185 }
3186 
3187 static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
3188 {
3189     TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
3190 
3191     tcg_gen_movi_i64(tcg_rt, a->i);
3192     save_gpr(ctx, a->t, tcg_rt);
3193     cond_free(&ctx->null_cond);
3194     return true;
3195 }
3196 
3197 static bool trans_addil(DisasContext *ctx, arg_addil *a)
3198 {
3199     TCGv_i64 tcg_rt = load_gpr(ctx, a->r);
3200     TCGv_i64 tcg_r1 = dest_gpr(ctx, 1);
3201 
3202     tcg_gen_addi_i64(tcg_r1, tcg_rt, a->i);
3203     save_gpr(ctx, 1, tcg_r1);
3204     cond_free(&ctx->null_cond);
3205     return true;
3206 }
3207 
3208 static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
3209 {
3210     TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
3211 
3212     /* Special case rb == 0, for the LDI pseudo-op.
3213        The COPY pseudo-op is handled for free within tcg_gen_addi_i64.  */
3214     if (a->b == 0) {
3215         tcg_gen_movi_i64(tcg_rt, a->i);
3216     } else {
3217         tcg_gen_addi_i64(tcg_rt, cpu_gr[a->b], a->i);
3218     }
3219     save_gpr(ctx, a->t, tcg_rt);
3220     cond_free(&ctx->null_cond);
3221     return true;
3222 }
3223 
3224 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
3225                     unsigned c, unsigned f, bool d, unsigned n, int disp)
3226 {
3227     TCGv_i64 dest, in2, sv;
3228     DisasCond cond;
3229 
3230     in2 = load_gpr(ctx, r);
3231     dest = tcg_temp_new_i64();
3232 
3233     tcg_gen_sub_i64(dest, in1, in2);
3234 
3235     sv = NULL;
3236     if (cond_need_sv(c)) {
3237         sv = do_sub_sv(ctx, dest, in1, in2);
3238     }
3239 
3240     cond = do_sub_cond(ctx, c * 2 + f, d, dest, in1, in2, sv);
3241     return do_cbranch(ctx, disp, n, &cond);
3242 }
3243 
3244 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
3245 {
3246     if (!ctx->is_pa20 && a->d) {
3247         return false;
3248     }
3249     nullify_over(ctx);
3250     return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1),
3251                    a->c, a->f, a->d, a->n, a->disp);
3252 }
3253 
3254 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3255 {
3256     if (!ctx->is_pa20 && a->d) {
3257         return false;
3258     }
3259     nullify_over(ctx);
3260     return do_cmpb(ctx, a->r, tcg_constant_i64(a->i),
3261                    a->c, a->f, a->d, a->n, a->disp);
3262 }
3263 
3264 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
3265                     unsigned c, unsigned f, unsigned n, int disp)
3266 {
3267     TCGv_i64 dest, in2, sv, cb_cond;
3268     DisasCond cond;
3269     bool d = false;
3270 
3271     /*
3272      * For hppa64, the ADDB conditions change with PSW.W,
3273      * dropping ZNV, SV, OD in favor of double-word EQ, LT, LE.
3274      */
3275     if (ctx->tb_flags & PSW_W) {
3276         d = c >= 5;
3277         if (d) {
3278             c &= 3;
3279         }
3280     }
3281 
3282     in2 = load_gpr(ctx, r);
3283     dest = tcg_temp_new_i64();
3284     sv = NULL;
3285     cb_cond = NULL;
3286 
3287     if (cond_need_cb(c)) {
3288         TCGv_i64 cb = tcg_temp_new_i64();
3289         TCGv_i64 cb_msb = tcg_temp_new_i64();
3290 
3291         tcg_gen_movi_i64(cb_msb, 0);
3292         tcg_gen_add2_i64(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3293         tcg_gen_xor_i64(cb, in1, in2);
3294         tcg_gen_xor_i64(cb, cb, dest);
3295         cb_cond = get_carry(ctx, d, cb, cb_msb);
3296     } else {
3297         tcg_gen_add_i64(dest, in1, in2);
3298     }
3299     if (cond_need_sv(c)) {
3300         sv = do_add_sv(ctx, dest, in1, in2);
3301     }
3302 
3303     cond = do_cond(ctx, c * 2 + f, d, dest, cb_cond, sv);
3304     save_gpr(ctx, r, dest);
3305     return do_cbranch(ctx, disp, n, &cond);
3306 }
3307 
3308 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3309 {
3310     nullify_over(ctx);
3311     return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3312 }
3313 
3314 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3315 {
3316     nullify_over(ctx);
3317     return do_addb(ctx, a->r, tcg_constant_i64(a->i), a->c, a->f, a->n, a->disp);
3318 }
3319 
3320 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3321 {
3322     TCGv_i64 tmp, tcg_r;
3323     DisasCond cond;
3324 
3325     nullify_over(ctx);
3326 
3327     tmp = tcg_temp_new_i64();
3328     tcg_r = load_gpr(ctx, a->r);
3329     if (cond_need_ext(ctx, a->d)) {
3330         /* Force shift into [32,63] */
3331         tcg_gen_ori_i64(tmp, cpu_sar, 32);
3332         tcg_gen_shl_i64(tmp, tcg_r, tmp);
3333     } else {
3334         tcg_gen_shl_i64(tmp, tcg_r, cpu_sar);
3335     }
3336 
3337     cond = cond_make_0_tmp(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3338     return do_cbranch(ctx, a->disp, a->n, &cond);
3339 }
3340 
3341 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3342 {
3343     TCGv_i64 tmp, tcg_r;
3344     DisasCond cond;
3345     int p;
3346 
3347     nullify_over(ctx);
3348 
3349     tmp = tcg_temp_new_i64();
3350     tcg_r = load_gpr(ctx, a->r);
3351     p = a->p | (cond_need_ext(ctx, a->d) ? 32 : 0);
3352     tcg_gen_shli_i64(tmp, tcg_r, p);
3353 
3354     cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3355     return do_cbranch(ctx, a->disp, a->n, &cond);
3356 }
3357 
3358 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3359 {
3360     TCGv_i64 dest;
3361     DisasCond cond;
3362 
3363     nullify_over(ctx);
3364 
3365     dest = dest_gpr(ctx, a->r2);
3366     if (a->r1 == 0) {
3367         tcg_gen_movi_i64(dest, 0);
3368     } else {
3369         tcg_gen_mov_i64(dest, cpu_gr[a->r1]);
3370     }
3371 
3372     /* All MOVB conditions are 32-bit. */
3373     cond = do_sed_cond(ctx, a->c, false, dest);
3374     return do_cbranch(ctx, a->disp, a->n, &cond);
3375 }
3376 
3377 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3378 {
3379     TCGv_i64 dest;
3380     DisasCond cond;
3381 
3382     nullify_over(ctx);
3383 
3384     dest = dest_gpr(ctx, a->r);
3385     tcg_gen_movi_i64(dest, a->i);
3386 
3387     /* All MOVBI conditions are 32-bit. */
3388     cond = do_sed_cond(ctx, a->c, false, dest);
3389     return do_cbranch(ctx, a->disp, a->n, &cond);
3390 }
3391 
3392 static bool trans_shrp_sar(DisasContext *ctx, arg_shrp_sar *a)
3393 {
3394     TCGv_i64 dest, src2;
3395 
3396     if (!ctx->is_pa20 && a->d) {
3397         return false;
3398     }
3399     if (a->c) {
3400         nullify_over(ctx);
3401     }
3402 
3403     dest = dest_gpr(ctx, a->t);
3404     src2 = load_gpr(ctx, a->r2);
3405     if (a->r1 == 0) {
3406         if (a->d) {
3407             tcg_gen_shr_i64(dest, src2, cpu_sar);
3408         } else {
3409             TCGv_i64 tmp = tcg_temp_new_i64();
3410 
3411             tcg_gen_ext32u_i64(dest, src2);
3412             tcg_gen_andi_i64(tmp, cpu_sar, 31);
3413             tcg_gen_shr_i64(dest, dest, tmp);
3414         }
3415     } else if (a->r1 == a->r2) {
3416         if (a->d) {
3417             tcg_gen_rotr_i64(dest, src2, cpu_sar);
3418         } else {
3419             TCGv_i32 t32 = tcg_temp_new_i32();
3420             TCGv_i32 s32 = tcg_temp_new_i32();
3421 
3422             tcg_gen_extrl_i64_i32(t32, src2);
3423             tcg_gen_extrl_i64_i32(s32, cpu_sar);
3424             tcg_gen_andi_i32(s32, s32, 31);
3425             tcg_gen_rotr_i32(t32, t32, s32);
3426             tcg_gen_extu_i32_i64(dest, t32);
3427         }
3428     } else {
3429         TCGv_i64 src1 = load_gpr(ctx, a->r1);
3430 
3431         if (a->d) {
3432             TCGv_i64 t = tcg_temp_new_i64();
3433             TCGv_i64 n = tcg_temp_new_i64();
3434 
3435             tcg_gen_xori_i64(n, cpu_sar, 63);
3436             tcg_gen_shl_i64(t, src2, n);
3437             tcg_gen_shli_i64(t, t, 1);
3438             tcg_gen_shr_i64(dest, src1, cpu_sar);
3439             tcg_gen_or_i64(dest, dest, t);
3440         } else {
3441             TCGv_i64 t = tcg_temp_new_i64();
3442             TCGv_i64 s = tcg_temp_new_i64();
3443 
3444             tcg_gen_concat32_i64(t, src2, src1);
3445             tcg_gen_andi_i64(s, cpu_sar, 31);
3446             tcg_gen_shr_i64(dest, t, s);
3447         }
3448     }
3449     save_gpr(ctx, a->t, dest);
3450 
3451     /* Install the new nullification.  */
3452     cond_free(&ctx->null_cond);
3453     if (a->c) {
3454         ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3455     }
3456     return nullify_end(ctx);
3457 }
3458 
3459 static bool trans_shrp_imm(DisasContext *ctx, arg_shrp_imm *a)
3460 {
3461     unsigned width, sa;
3462     TCGv_i64 dest, t2;
3463 
3464     if (!ctx->is_pa20 && a->d) {
3465         return false;
3466     }
3467     if (a->c) {
3468         nullify_over(ctx);
3469     }
3470 
3471     width = a->d ? 64 : 32;
3472     sa = width - 1 - a->cpos;
3473 
3474     dest = dest_gpr(ctx, a->t);
3475     t2 = load_gpr(ctx, a->r2);
3476     if (a->r1 == 0) {
3477         tcg_gen_extract_i64(dest, t2, sa, width - sa);
3478     } else if (width == TARGET_LONG_BITS) {
3479         tcg_gen_extract2_i64(dest, t2, cpu_gr[a->r1], sa);
3480     } else {
3481         assert(!a->d);
3482         if (a->r1 == a->r2) {
3483             TCGv_i32 t32 = tcg_temp_new_i32();
3484             tcg_gen_extrl_i64_i32(t32, t2);
3485             tcg_gen_rotri_i32(t32, t32, sa);
3486             tcg_gen_extu_i32_i64(dest, t32);
3487         } else {
3488             tcg_gen_concat32_i64(dest, t2, cpu_gr[a->r1]);
3489             tcg_gen_extract_i64(dest, dest, sa, 32);
3490         }
3491     }
3492     save_gpr(ctx, a->t, dest);
3493 
3494     /* Install the new nullification.  */
3495     cond_free(&ctx->null_cond);
3496     if (a->c) {
3497         ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3498     }
3499     return nullify_end(ctx);
3500 }
3501 
3502 static bool trans_extr_sar(DisasContext *ctx, arg_extr_sar *a)
3503 {
3504     unsigned widthm1 = a->d ? 63 : 31;
3505     TCGv_i64 dest, src, tmp;
3506 
3507     if (!ctx->is_pa20 && a->d) {
3508         return false;
3509     }
3510     if (a->c) {
3511         nullify_over(ctx);
3512     }
3513 
3514     dest = dest_gpr(ctx, a->t);
3515     src = load_gpr(ctx, a->r);
3516     tmp = tcg_temp_new_i64();
3517 
3518     /* Recall that SAR is using big-endian bit numbering.  */
3519     tcg_gen_andi_i64(tmp, cpu_sar, widthm1);
3520     tcg_gen_xori_i64(tmp, tmp, widthm1);
3521 
3522     if (a->se) {
3523         if (!a->d) {
3524             tcg_gen_ext32s_i64(dest, src);
3525             src = dest;
3526         }
3527         tcg_gen_sar_i64(dest, src, tmp);
3528         tcg_gen_sextract_i64(dest, dest, 0, a->len);
3529     } else {
3530         if (!a->d) {
3531             tcg_gen_ext32u_i64(dest, src);
3532             src = dest;
3533         }
3534         tcg_gen_shr_i64(dest, src, tmp);
3535         tcg_gen_extract_i64(dest, dest, 0, a->len);
3536     }
3537     save_gpr(ctx, a->t, dest);
3538 
3539     /* Install the new nullification.  */
3540     cond_free(&ctx->null_cond);
3541     if (a->c) {
3542         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3543     }
3544     return nullify_end(ctx);
3545 }
3546 
3547 static bool trans_extr_imm(DisasContext *ctx, arg_extr_imm *a)
3548 {
3549     unsigned len, cpos, width;
3550     TCGv_i64 dest, src;
3551 
3552     if (!ctx->is_pa20 && a->d) {
3553         return false;
3554     }
3555     if (a->c) {
3556         nullify_over(ctx);
3557     }
3558 
3559     len = a->len;
3560     width = a->d ? 64 : 32;
3561     cpos = width - 1 - a->pos;
3562     if (cpos + len > width) {
3563         len = width - cpos;
3564     }
3565 
3566     dest = dest_gpr(ctx, a->t);
3567     src = load_gpr(ctx, a->r);
3568     if (a->se) {
3569         tcg_gen_sextract_i64(dest, src, cpos, len);
3570     } else {
3571         tcg_gen_extract_i64(dest, src, cpos, len);
3572     }
3573     save_gpr(ctx, a->t, dest);
3574 
3575     /* Install the new nullification.  */
3576     cond_free(&ctx->null_cond);
3577     if (a->c) {
3578         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3579     }
3580     return nullify_end(ctx);
3581 }
3582 
3583 static bool trans_depi_imm(DisasContext *ctx, arg_depi_imm *a)
3584 {
3585     unsigned len, width;
3586     uint64_t mask0, mask1;
3587     TCGv_i64 dest;
3588 
3589     if (!ctx->is_pa20 && a->d) {
3590         return false;
3591     }
3592     if (a->c) {
3593         nullify_over(ctx);
3594     }
3595 
3596     len = a->len;
3597     width = a->d ? 64 : 32;
3598     if (a->cpos + len > width) {
3599         len = width - a->cpos;
3600     }
3601 
3602     dest = dest_gpr(ctx, a->t);
3603     mask0 = deposit64(0, a->cpos, len, a->i);
3604     mask1 = deposit64(-1, a->cpos, len, a->i);
3605 
3606     if (a->nz) {
3607         TCGv_i64 src = load_gpr(ctx, a->t);
3608         tcg_gen_andi_i64(dest, src, mask1);
3609         tcg_gen_ori_i64(dest, dest, mask0);
3610     } else {
3611         tcg_gen_movi_i64(dest, mask0);
3612     }
3613     save_gpr(ctx, a->t, dest);
3614 
3615     /* Install the new nullification.  */
3616     cond_free(&ctx->null_cond);
3617     if (a->c) {
3618         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3619     }
3620     return nullify_end(ctx);
3621 }
3622 
3623 static bool trans_dep_imm(DisasContext *ctx, arg_dep_imm *a)
3624 {
3625     unsigned rs = a->nz ? a->t : 0;
3626     unsigned len, width;
3627     TCGv_i64 dest, val;
3628 
3629     if (!ctx->is_pa20 && a->d) {
3630         return false;
3631     }
3632     if (a->c) {
3633         nullify_over(ctx);
3634     }
3635 
3636     len = a->len;
3637     width = a->d ? 64 : 32;
3638     if (a->cpos + len > width) {
3639         len = width - a->cpos;
3640     }
3641 
3642     dest = dest_gpr(ctx, a->t);
3643     val = load_gpr(ctx, a->r);
3644     if (rs == 0) {
3645         tcg_gen_deposit_z_i64(dest, val, a->cpos, len);
3646     } else {
3647         tcg_gen_deposit_i64(dest, cpu_gr[rs], val, a->cpos, len);
3648     }
3649     save_gpr(ctx, a->t, dest);
3650 
3651     /* Install the new nullification.  */
3652     cond_free(&ctx->null_cond);
3653     if (a->c) {
3654         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3655     }
3656     return nullify_end(ctx);
3657 }
3658 
3659 static bool do_dep_sar(DisasContext *ctx, unsigned rt, unsigned c,
3660                        bool d, bool nz, unsigned len, TCGv_i64 val)
3661 {
3662     unsigned rs = nz ? rt : 0;
3663     unsigned widthm1 = d ? 63 : 31;
3664     TCGv_i64 mask, tmp, shift, dest;
3665     uint64_t msb = 1ULL << (len - 1);
3666 
3667     dest = dest_gpr(ctx, rt);
3668     shift = tcg_temp_new_i64();
3669     tmp = tcg_temp_new_i64();
3670 
3671     /* Convert big-endian bit numbering in SAR to left-shift.  */
3672     tcg_gen_andi_i64(shift, cpu_sar, widthm1);
3673     tcg_gen_xori_i64(shift, shift, widthm1);
3674 
3675     mask = tcg_temp_new_i64();
3676     tcg_gen_movi_i64(mask, msb + (msb - 1));
3677     tcg_gen_and_i64(tmp, val, mask);
3678     if (rs) {
3679         tcg_gen_shl_i64(mask, mask, shift);
3680         tcg_gen_shl_i64(tmp, tmp, shift);
3681         tcg_gen_andc_i64(dest, cpu_gr[rs], mask);
3682         tcg_gen_or_i64(dest, dest, tmp);
3683     } else {
3684         tcg_gen_shl_i64(dest, tmp, shift);
3685     }
3686     save_gpr(ctx, rt, dest);
3687 
3688     /* Install the new nullification.  */
3689     cond_free(&ctx->null_cond);
3690     if (c) {
3691         ctx->null_cond = do_sed_cond(ctx, c, d, dest);
3692     }
3693     return nullify_end(ctx);
3694 }
3695 
3696 static bool trans_dep_sar(DisasContext *ctx, arg_dep_sar *a)
3697 {
3698     if (!ctx->is_pa20 && a->d) {
3699         return false;
3700     }
3701     if (a->c) {
3702         nullify_over(ctx);
3703     }
3704     return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3705                       load_gpr(ctx, a->r));
3706 }
3707 
3708 static bool trans_depi_sar(DisasContext *ctx, arg_depi_sar *a)
3709 {
3710     if (!ctx->is_pa20 && a->d) {
3711         return false;
3712     }
3713     if (a->c) {
3714         nullify_over(ctx);
3715     }
3716     return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3717                       tcg_constant_i64(a->i));
3718 }
3719 
3720 static bool trans_be(DisasContext *ctx, arg_be *a)
3721 {
3722     TCGv_i64 tmp;
3723 
3724 #ifdef CONFIG_USER_ONLY
3725     /* ??? It seems like there should be a good way of using
3726        "be disp(sr2, r0)", the canonical gateway entry mechanism
3727        to our advantage.  But that appears to be inconvenient to
3728        manage along side branch delay slots.  Therefore we handle
3729        entry into the gateway page via absolute address.  */
3730     /* Since we don't implement spaces, just branch.  Do notice the special
3731        case of "be disp(*,r0)" using a direct branch to disp, so that we can
3732        goto_tb to the TB containing the syscall.  */
3733     if (a->b == 0) {
3734         return do_dbranch(ctx, a->disp, a->l, a->n);
3735     }
3736 #else
3737     nullify_over(ctx);
3738 #endif
3739 
3740     tmp = tcg_temp_new_i64();
3741     tcg_gen_addi_i64(tmp, load_gpr(ctx, a->b), a->disp);
3742     tmp = do_ibranch_priv(ctx, tmp);
3743 
3744 #ifdef CONFIG_USER_ONLY
3745     return do_ibranch(ctx, tmp, a->l, a->n);
3746 #else
3747     TCGv_i64 new_spc = tcg_temp_new_i64();
3748 
3749     load_spr(ctx, new_spc, a->sp);
3750     if (a->l) {
3751         copy_iaoq_entry(ctx, cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3752         tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3753     }
3754     if (a->n && use_nullify_skip(ctx)) {
3755         copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
3756         tcg_gen_addi_i64(tmp, tmp, 4);
3757         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3758         tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3759         tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3760     } else {
3761         copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3762         if (ctx->iaoq_b == -1) {
3763             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3764         }
3765         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3766         tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3767         nullify_set(ctx, a->n);
3768     }
3769     tcg_gen_lookup_and_goto_ptr();
3770     ctx->base.is_jmp = DISAS_NORETURN;
3771     return nullify_end(ctx);
3772 #endif
3773 }
3774 
3775 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3776 {
3777     return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
3778 }
3779 
3780 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3781 {
3782     uint64_t dest = iaoq_dest(ctx, a->disp);
3783 
3784     nullify_over(ctx);
3785 
3786     /* Make sure the caller hasn't done something weird with the queue.
3787      * ??? This is not quite the same as the PSW[B] bit, which would be
3788      * expensive to track.  Real hardware will trap for
3789      *    b  gateway
3790      *    b  gateway+4  (in delay slot of first branch)
3791      * However, checking for a non-sequential instruction queue *will*
3792      * diagnose the security hole
3793      *    b  gateway
3794      *    b  evil
3795      * in which instructions at evil would run with increased privs.
3796      */
3797     if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3798         return gen_illegal(ctx);
3799     }
3800 
3801 #ifndef CONFIG_USER_ONLY
3802     if (ctx->tb_flags & PSW_C) {
3803         CPUHPPAState *env = cpu_env(ctx->cs);
3804         int type = hppa_artype_for_page(env, ctx->base.pc_next);
3805         /* If we could not find a TLB entry, then we need to generate an
3806            ITLB miss exception so the kernel will provide it.
3807            The resulting TLB fill operation will invalidate this TB and
3808            we will re-translate, at which point we *will* be able to find
3809            the TLB entry and determine if this is in fact a gateway page.  */
3810         if (type < 0) {
3811             gen_excp(ctx, EXCP_ITLB_MISS);
3812             return true;
3813         }
3814         /* No change for non-gateway pages or for priv decrease.  */
3815         if (type >= 4 && type - 4 < ctx->privilege) {
3816             dest = deposit32(dest, 0, 2, type - 4);
3817         }
3818     } else {
3819         dest &= -4;  /* priv = 0 */
3820     }
3821 #endif
3822 
3823     if (a->l) {
3824         TCGv_i64 tmp = dest_gpr(ctx, a->l);
3825         if (ctx->privilege < 3) {
3826             tcg_gen_andi_i64(tmp, tmp, -4);
3827         }
3828         tcg_gen_ori_i64(tmp, tmp, ctx->privilege);
3829         save_gpr(ctx, a->l, tmp);
3830     }
3831 
3832     return do_dbranch(ctx, dest, 0, a->n);
3833 }
3834 
3835 static bool trans_blr(DisasContext *ctx, arg_blr *a)
3836 {
3837     if (a->x) {
3838         TCGv_i64 tmp = tcg_temp_new_i64();
3839         tcg_gen_shli_i64(tmp, load_gpr(ctx, a->x), 3);
3840         tcg_gen_addi_i64(tmp, tmp, ctx->iaoq_f + 8);
3841         /* The computation here never changes privilege level.  */
3842         return do_ibranch(ctx, tmp, a->l, a->n);
3843     } else {
3844         /* BLR R0,RX is a good way to load PC+8 into RX.  */
3845         return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3846     }
3847 }
3848 
3849 static bool trans_bv(DisasContext *ctx, arg_bv *a)
3850 {
3851     TCGv_i64 dest;
3852 
3853     if (a->x == 0) {
3854         dest = load_gpr(ctx, a->b);
3855     } else {
3856         dest = tcg_temp_new_i64();
3857         tcg_gen_shli_i64(dest, load_gpr(ctx, a->x), 3);
3858         tcg_gen_add_i64(dest, dest, load_gpr(ctx, a->b));
3859     }
3860     dest = do_ibranch_priv(ctx, dest);
3861     return do_ibranch(ctx, dest, 0, a->n);
3862 }
3863 
3864 static bool trans_bve(DisasContext *ctx, arg_bve *a)
3865 {
3866     TCGv_i64 dest;
3867 
3868 #ifdef CONFIG_USER_ONLY
3869     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3870     return do_ibranch(ctx, dest, a->l, a->n);
3871 #else
3872     nullify_over(ctx);
3873     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3874 
3875     copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3876     if (ctx->iaoq_b == -1) {
3877         tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3878     }
3879     copy_iaoq_entry(ctx, cpu_iaoq_b, -1, dest);
3880     tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3881     if (a->l) {
3882         copy_iaoq_entry(ctx, cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
3883     }
3884     nullify_set(ctx, a->n);
3885     tcg_gen_lookup_and_goto_ptr();
3886     ctx->base.is_jmp = DISAS_NORETURN;
3887     return nullify_end(ctx);
3888 #endif
3889 }
3890 
3891 static bool trans_nopbts(DisasContext *ctx, arg_nopbts *a)
3892 {
3893     /* All branch target stack instructions implement as nop. */
3894     return ctx->is_pa20;
3895 }
3896 
3897 /*
3898  * Float class 0
3899  */
3900 
3901 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3902 {
3903     tcg_gen_mov_i32(dst, src);
3904 }
3905 
3906 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
3907 {
3908     uint64_t ret;
3909 
3910     if (ctx->is_pa20) {
3911         ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
3912     } else {
3913         ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
3914     }
3915 
3916     nullify_over(ctx);
3917     save_frd(0, tcg_constant_i64(ret));
3918     return nullify_end(ctx);
3919 }
3920 
3921 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
3922 {
3923     return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
3924 }
3925 
3926 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3927 {
3928     tcg_gen_mov_i64(dst, src);
3929 }
3930 
3931 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
3932 {
3933     return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
3934 }
3935 
3936 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3937 {
3938     tcg_gen_andi_i32(dst, src, INT32_MAX);
3939 }
3940 
3941 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
3942 {
3943     return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
3944 }
3945 
3946 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3947 {
3948     tcg_gen_andi_i64(dst, src, INT64_MAX);
3949 }
3950 
3951 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
3952 {
3953     return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
3954 }
3955 
3956 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
3957 {
3958     return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
3959 }
3960 
3961 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
3962 {
3963     return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
3964 }
3965 
3966 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
3967 {
3968     return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
3969 }
3970 
3971 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
3972 {
3973     return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
3974 }
3975 
3976 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3977 {
3978     tcg_gen_xori_i32(dst, src, INT32_MIN);
3979 }
3980 
3981 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
3982 {
3983     return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
3984 }
3985 
3986 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3987 {
3988     tcg_gen_xori_i64(dst, src, INT64_MIN);
3989 }
3990 
3991 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3992 {
3993     return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3994 }
3995 
3996 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3997 {
3998     tcg_gen_ori_i32(dst, src, INT32_MIN);
3999 }
4000 
4001 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
4002 {
4003     return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
4004 }
4005 
4006 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4007 {
4008     tcg_gen_ori_i64(dst, src, INT64_MIN);
4009 }
4010 
4011 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
4012 {
4013     return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
4014 }
4015 
4016 /*
4017  * Float class 1
4018  */
4019 
4020 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
4021 {
4022     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
4023 }
4024 
4025 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
4026 {
4027     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
4028 }
4029 
4030 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
4031 {
4032     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
4033 }
4034 
4035 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
4036 {
4037     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
4038 }
4039 
4040 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
4041 {
4042     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
4043 }
4044 
4045 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
4046 {
4047     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
4048 }
4049 
4050 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
4051 {
4052     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
4053 }
4054 
4055 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
4056 {
4057     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
4058 }
4059 
4060 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
4061 {
4062     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
4063 }
4064 
4065 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
4066 {
4067     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
4068 }
4069 
4070 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
4071 {
4072     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
4073 }
4074 
4075 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
4076 {
4077     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
4078 }
4079 
4080 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
4081 {
4082     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
4083 }
4084 
4085 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
4086 {
4087     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
4088 }
4089 
4090 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
4091 {
4092     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
4093 }
4094 
4095 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
4096 {
4097     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
4098 }
4099 
4100 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
4101 {
4102     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
4103 }
4104 
4105 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
4106 {
4107     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
4108 }
4109 
4110 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
4111 {
4112     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
4113 }
4114 
4115 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
4116 {
4117     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
4118 }
4119 
4120 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
4121 {
4122     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
4123 }
4124 
4125 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
4126 {
4127     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
4128 }
4129 
4130 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
4131 {
4132     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
4133 }
4134 
4135 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
4136 {
4137     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
4138 }
4139 
4140 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
4141 {
4142     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
4143 }
4144 
4145 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
4146 {
4147     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
4148 }
4149 
4150 /*
4151  * Float class 2
4152  */
4153 
4154 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
4155 {
4156     TCGv_i32 ta, tb, tc, ty;
4157 
4158     nullify_over(ctx);
4159 
4160     ta = load_frw0_i32(a->r1);
4161     tb = load_frw0_i32(a->r2);
4162     ty = tcg_constant_i32(a->y);
4163     tc = tcg_constant_i32(a->c);
4164 
4165     gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc);
4166 
4167     return nullify_end(ctx);
4168 }
4169 
4170 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
4171 {
4172     TCGv_i64 ta, tb;
4173     TCGv_i32 tc, ty;
4174 
4175     nullify_over(ctx);
4176 
4177     ta = load_frd0(a->r1);
4178     tb = load_frd0(a->r2);
4179     ty = tcg_constant_i32(a->y);
4180     tc = tcg_constant_i32(a->c);
4181 
4182     gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc);
4183 
4184     return nullify_end(ctx);
4185 }
4186 
4187 static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
4188 {
4189     TCGv_i64 t;
4190 
4191     nullify_over(ctx);
4192 
4193     t = tcg_temp_new_i64();
4194     tcg_gen_ld32u_i64(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow));
4195 
4196     if (a->y == 1) {
4197         int mask;
4198         bool inv = false;
4199 
4200         switch (a->c) {
4201         case 0: /* simple */
4202             tcg_gen_andi_i64(t, t, 0x4000000);
4203             ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4204             goto done;
4205         case 2: /* rej */
4206             inv = true;
4207             /* fallthru */
4208         case 1: /* acc */
4209             mask = 0x43ff800;
4210             break;
4211         case 6: /* rej8 */
4212             inv = true;
4213             /* fallthru */
4214         case 5: /* acc8 */
4215             mask = 0x43f8000;
4216             break;
4217         case 9: /* acc6 */
4218             mask = 0x43e0000;
4219             break;
4220         case 13: /* acc4 */
4221             mask = 0x4380000;
4222             break;
4223         case 17: /* acc2 */
4224             mask = 0x4200000;
4225             break;
4226         default:
4227             gen_illegal(ctx);
4228             return true;
4229         }
4230         if (inv) {
4231             TCGv_i64 c = tcg_constant_i64(mask);
4232             tcg_gen_or_i64(t, t, c);
4233             ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
4234         } else {
4235             tcg_gen_andi_i64(t, t, mask);
4236             ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
4237         }
4238     } else {
4239         unsigned cbit = (a->y ^ 1) - 1;
4240 
4241         tcg_gen_extract_i64(t, t, 21 - cbit, 1);
4242         ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4243     }
4244 
4245  done:
4246     return nullify_end(ctx);
4247 }
4248 
4249 /*
4250  * Float class 2
4251  */
4252 
4253 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
4254 {
4255     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
4256 }
4257 
4258 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
4259 {
4260     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
4261 }
4262 
4263 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
4264 {
4265     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
4266 }
4267 
4268 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
4269 {
4270     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
4271 }
4272 
4273 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
4274 {
4275     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
4276 }
4277 
4278 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
4279 {
4280     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
4281 }
4282 
4283 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
4284 {
4285     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
4286 }
4287 
4288 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
4289 {
4290     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
4291 }
4292 
4293 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
4294 {
4295     TCGv_i64 x, y;
4296 
4297     nullify_over(ctx);
4298 
4299     x = load_frw0_i64(a->r1);
4300     y = load_frw0_i64(a->r2);
4301     tcg_gen_mul_i64(x, x, y);
4302     save_frd(a->t, x);
4303 
4304     return nullify_end(ctx);
4305 }
4306 
4307 /* Convert the fmpyadd single-precision register encodings to standard.  */
4308 static inline int fmpyadd_s_reg(unsigned r)
4309 {
4310     return (r & 16) * 2 + 16 + (r & 15);
4311 }
4312 
4313 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4314 {
4315     int tm = fmpyadd_s_reg(a->tm);
4316     int ra = fmpyadd_s_reg(a->ra);
4317     int ta = fmpyadd_s_reg(a->ta);
4318     int rm2 = fmpyadd_s_reg(a->rm2);
4319     int rm1 = fmpyadd_s_reg(a->rm1);
4320 
4321     nullify_over(ctx);
4322 
4323     do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4324     do_fop_weww(ctx, ta, ta, ra,
4325                 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4326 
4327     return nullify_end(ctx);
4328 }
4329 
4330 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
4331 {
4332     return do_fmpyadd_s(ctx, a, false);
4333 }
4334 
4335 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4336 {
4337     return do_fmpyadd_s(ctx, a, true);
4338 }
4339 
4340 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4341 {
4342     nullify_over(ctx);
4343 
4344     do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4345     do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4346                 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4347 
4348     return nullify_end(ctx);
4349 }
4350 
4351 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4352 {
4353     return do_fmpyadd_d(ctx, a, false);
4354 }
4355 
4356 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4357 {
4358     return do_fmpyadd_d(ctx, a, true);
4359 }
4360 
4361 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
4362 {
4363     TCGv_i32 x, y, z;
4364 
4365     nullify_over(ctx);
4366     x = load_frw0_i32(a->rm1);
4367     y = load_frw0_i32(a->rm2);
4368     z = load_frw0_i32(a->ra3);
4369 
4370     if (a->neg) {
4371         gen_helper_fmpynfadd_s(x, tcg_env, x, y, z);
4372     } else {
4373         gen_helper_fmpyfadd_s(x, tcg_env, x, y, z);
4374     }
4375 
4376     save_frw_i32(a->t, x);
4377     return nullify_end(ctx);
4378 }
4379 
4380 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4381 {
4382     TCGv_i64 x, y, z;
4383 
4384     nullify_over(ctx);
4385     x = load_frd0(a->rm1);
4386     y = load_frd0(a->rm2);
4387     z = load_frd0(a->ra3);
4388 
4389     if (a->neg) {
4390         gen_helper_fmpynfadd_d(x, tcg_env, x, y, z);
4391     } else {
4392         gen_helper_fmpyfadd_d(x, tcg_env, x, y, z);
4393     }
4394 
4395     save_frd(a->t, x);
4396     return nullify_end(ctx);
4397 }
4398 
4399 static bool trans_diag(DisasContext *ctx, arg_diag *a)
4400 {
4401     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4402 #ifndef CONFIG_USER_ONLY
4403     if (a->i == 0x100) {
4404         /* emulate PDC BTLB, called by SeaBIOS-hppa */
4405         nullify_over(ctx);
4406         gen_helper_diag_btlb(tcg_env);
4407         return nullify_end(ctx);
4408     }
4409 #endif
4410     qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i);
4411     return true;
4412 }
4413 
4414 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4415 {
4416     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4417     int bound;
4418 
4419     ctx->cs = cs;
4420     ctx->tb_flags = ctx->base.tb->flags;
4421     ctx->is_pa20 = hppa_is_pa20(cpu_env(cs));
4422 
4423 #ifdef CONFIG_USER_ONLY
4424     ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX);
4425     ctx->mmu_idx = MMU_USER_IDX;
4426     ctx->iaoq_f = ctx->base.pc_first | ctx->privilege;
4427     ctx->iaoq_b = ctx->base.tb->cs_base | ctx->privilege;
4428     ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
4429 #else
4430     ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4431     ctx->mmu_idx = (ctx->tb_flags & PSW_D
4432                     ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
4433                     : MMU_PHYS_IDX);
4434 
4435     /* Recover the IAOQ values from the GVA + PRIV.  */
4436     uint64_t cs_base = ctx->base.tb->cs_base;
4437     uint64_t iasq_f = cs_base & ~0xffffffffull;
4438     int32_t diff = cs_base;
4439 
4440     ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4441     ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4442 #endif
4443     ctx->iaoq_n = -1;
4444     ctx->iaoq_n_var = NULL;
4445 
4446     ctx->zero = tcg_constant_i64(0);
4447 
4448     /* Bound the number of instructions by those left on the page.  */
4449     bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4450     ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4451 }
4452 
4453 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4454 {
4455     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4456 
4457     /* Seed the nullification status from PSW[N], as saved in TB->FLAGS.  */
4458     ctx->null_cond = cond_make_f();
4459     ctx->psw_n_nonzero = false;
4460     if (ctx->tb_flags & PSW_N) {
4461         ctx->null_cond.c = TCG_COND_ALWAYS;
4462         ctx->psw_n_nonzero = true;
4463     }
4464     ctx->null_lab = NULL;
4465 }
4466 
4467 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4468 {
4469     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4470 
4471     tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b, 0);
4472     ctx->insn_start = tcg_last_op();
4473 }
4474 
4475 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4476 {
4477     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4478     CPUHPPAState *env = cpu_env(cs);
4479     DisasJumpType ret;
4480 
4481     /* Execute one insn.  */
4482 #ifdef CONFIG_USER_ONLY
4483     if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4484         do_page_zero(ctx);
4485         ret = ctx->base.is_jmp;
4486         assert(ret != DISAS_NEXT);
4487     } else
4488 #endif
4489     {
4490         /* Always fetch the insn, even if nullified, so that we check
4491            the page permissions for execute.  */
4492         uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
4493 
4494         /* Set up the IA queue for the next insn.
4495            This will be overwritten by a branch.  */
4496         if (ctx->iaoq_b == -1) {
4497             ctx->iaoq_n = -1;
4498             ctx->iaoq_n_var = tcg_temp_new_i64();
4499             tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4500         } else {
4501             ctx->iaoq_n = ctx->iaoq_b + 4;
4502             ctx->iaoq_n_var = NULL;
4503         }
4504 
4505         if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4506             ctx->null_cond.c = TCG_COND_NEVER;
4507             ret = DISAS_NEXT;
4508         } else {
4509             ctx->insn = insn;
4510             if (!decode(ctx, insn)) {
4511                 gen_illegal(ctx);
4512             }
4513             ret = ctx->base.is_jmp;
4514             assert(ctx->null_lab == NULL);
4515         }
4516     }
4517 
4518     /* Advance the insn queue.  Note that this check also detects
4519        a priority change within the instruction queue.  */
4520     if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4521         if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4522             && use_goto_tb(ctx, ctx->iaoq_b)
4523             && (ctx->null_cond.c == TCG_COND_NEVER
4524                 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4525             nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4526             gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4527             ctx->base.is_jmp = ret = DISAS_NORETURN;
4528         } else {
4529             ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4530         }
4531     }
4532     ctx->iaoq_f = ctx->iaoq_b;
4533     ctx->iaoq_b = ctx->iaoq_n;
4534     ctx->base.pc_next += 4;
4535 
4536     switch (ret) {
4537     case DISAS_NORETURN:
4538     case DISAS_IAQ_N_UPDATED:
4539         break;
4540 
4541     case DISAS_NEXT:
4542     case DISAS_IAQ_N_STALE:
4543     case DISAS_IAQ_N_STALE_EXIT:
4544         if (ctx->iaoq_f == -1) {
4545             copy_iaoq_entry(ctx, cpu_iaoq_f, -1, cpu_iaoq_b);
4546             copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4547 #ifndef CONFIG_USER_ONLY
4548             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4549 #endif
4550             nullify_save(ctx);
4551             ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4552                                 ? DISAS_EXIT
4553                                 : DISAS_IAQ_N_UPDATED);
4554         } else if (ctx->iaoq_b == -1) {
4555             copy_iaoq_entry(ctx, cpu_iaoq_b, -1, ctx->iaoq_n_var);
4556         }
4557         break;
4558 
4559     default:
4560         g_assert_not_reached();
4561     }
4562 }
4563 
4564 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4565 {
4566     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4567     DisasJumpType is_jmp = ctx->base.is_jmp;
4568 
4569     switch (is_jmp) {
4570     case DISAS_NORETURN:
4571         break;
4572     case DISAS_TOO_MANY:
4573     case DISAS_IAQ_N_STALE:
4574     case DISAS_IAQ_N_STALE_EXIT:
4575         copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4576         copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4577         nullify_save(ctx);
4578         /* FALLTHRU */
4579     case DISAS_IAQ_N_UPDATED:
4580         if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
4581             tcg_gen_lookup_and_goto_ptr();
4582             break;
4583         }
4584         /* FALLTHRU */
4585     case DISAS_EXIT:
4586         tcg_gen_exit_tb(NULL, 0);
4587         break;
4588     default:
4589         g_assert_not_reached();
4590     }
4591 }
4592 
4593 static void hppa_tr_disas_log(const DisasContextBase *dcbase,
4594                               CPUState *cs, FILE *logfile)
4595 {
4596     target_ulong pc = dcbase->pc_first;
4597 
4598 #ifdef CONFIG_USER_ONLY
4599     switch (pc) {
4600     case 0x00:
4601         fprintf(logfile, "IN:\n0x00000000:  (null)\n");
4602         return;
4603     case 0xb0:
4604         fprintf(logfile, "IN:\n0x000000b0:  light-weight-syscall\n");
4605         return;
4606     case 0xe0:
4607         fprintf(logfile, "IN:\n0x000000e0:  set-thread-pointer-syscall\n");
4608         return;
4609     case 0x100:
4610         fprintf(logfile, "IN:\n0x00000100:  syscall\n");
4611         return;
4612     }
4613 #endif
4614 
4615     fprintf(logfile, "IN: %s\n", lookup_symbol(pc));
4616     target_disas(logfile, cs, pc, dcbase->tb->size);
4617 }
4618 
4619 static const TranslatorOps hppa_tr_ops = {
4620     .init_disas_context = hppa_tr_init_disas_context,
4621     .tb_start           = hppa_tr_tb_start,
4622     .insn_start         = hppa_tr_insn_start,
4623     .translate_insn     = hppa_tr_translate_insn,
4624     .tb_stop            = hppa_tr_tb_stop,
4625     .disas_log          = hppa_tr_disas_log,
4626 };
4627 
4628 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
4629                            target_ulong pc, void *host_pc)
4630 {
4631     DisasContext ctx;
4632     translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
4633 }
4634