xref: /qemu/target/hppa/translate.c (revision e3404e01)
1 /*
2  * HPPA emulation cpu translation for qemu.
3  *
4  * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "tcg/tcg-op-gvec.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/log.h"
31 
32 #define HELPER_H "helper.h"
33 #include "exec/helper-info.c.inc"
34 #undef  HELPER_H
35 
36 /* Choose to use explicit sizes within this file. */
37 #undef tcg_temp_new
38 
39 typedef struct DisasCond {
40     TCGCond c;
41     TCGv_i64 a0, a1;
42 } DisasCond;
43 
44 typedef struct DisasContext {
45     DisasContextBase base;
46     CPUState *cs;
47     TCGOp *insn_start;
48 
49     uint64_t iaoq_f;
50     uint64_t iaoq_b;
51     uint64_t iaoq_n;
52     TCGv_i64 iaoq_n_var;
53 
54     DisasCond null_cond;
55     TCGLabel *null_lab;
56 
57     TCGv_i64 zero;
58 
59     uint32_t insn;
60     uint32_t tb_flags;
61     int mmu_idx;
62     int privilege;
63     bool psw_n_nonzero;
64     bool is_pa20;
65 
66 #ifdef CONFIG_USER_ONLY
67     MemOp unalign;
68 #endif
69 } DisasContext;
70 
71 #ifdef CONFIG_USER_ONLY
72 #define UNALIGN(C)       (C)->unalign
73 #define MMU_DISABLED(C)  false
74 #else
75 #define UNALIGN(C)       MO_ALIGN
76 #define MMU_DISABLED(C)  MMU_IDX_MMU_DISABLED((C)->mmu_idx)
77 #endif
78 
79 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently.  */
80 static int expand_sm_imm(DisasContext *ctx, int val)
81 {
82     /* Keep unimplemented bits disabled -- see cpu_hppa_put_psw. */
83     if (ctx->is_pa20) {
84         if (val & PSW_SM_W) {
85             val |= PSW_W;
86         }
87         val &= ~(PSW_SM_W | PSW_SM_E | PSW_G);
88     } else {
89         val &= ~(PSW_SM_W | PSW_SM_E | PSW_O);
90     }
91     return val;
92 }
93 
94 /* Inverted space register indicates 0 means sr0 not inferred from base.  */
95 static int expand_sr3x(DisasContext *ctx, int val)
96 {
97     return ~val;
98 }
99 
100 /* Convert the M:A bits within a memory insn to the tri-state value
101    we use for the final M.  */
102 static int ma_to_m(DisasContext *ctx, int val)
103 {
104     return val & 2 ? (val & 1 ? -1 : 1) : 0;
105 }
106 
107 /* Convert the sign of the displacement to a pre or post-modify.  */
108 static int pos_to_m(DisasContext *ctx, int val)
109 {
110     return val ? 1 : -1;
111 }
112 
113 static int neg_to_m(DisasContext *ctx, int val)
114 {
115     return val ? -1 : 1;
116 }
117 
118 /* Used for branch targets and fp memory ops.  */
119 static int expand_shl2(DisasContext *ctx, int val)
120 {
121     return val << 2;
122 }
123 
124 /* Used for assemble_21.  */
125 static int expand_shl11(DisasContext *ctx, int val)
126 {
127     return val << 11;
128 }
129 
130 static int assemble_6(DisasContext *ctx, int val)
131 {
132     /*
133      * Officially, 32 * x + 32 - y.
134      * Here, x is already in bit 5, and y is [4:0].
135      * Since -y = ~y + 1, in 5 bits 32 - y => y ^ 31 + 1,
136      * with the overflow from bit 4 summing with x.
137      */
138     return (val ^ 31) + 1;
139 }
140 
141 /* Expander for assemble_16a(s,cat(im10a,0),i). */
142 static int expand_11a(DisasContext *ctx, int val)
143 {
144     /*
145      * @val is bit 0 and bits [4:15].
146      * Swizzle thing around depending on PSW.W.
147      */
148     int im10a = extract32(val, 1, 10);
149     int s = extract32(val, 11, 2);
150     int i = (-(val & 1) << 13) | (im10a << 3);
151 
152     if (ctx->tb_flags & PSW_W) {
153         i ^= s << 13;
154     }
155     return i;
156 }
157 
158 /* Expander for assemble_16a(s,im11a,i). */
159 static int expand_12a(DisasContext *ctx, int val)
160 {
161     /*
162      * @val is bit 0 and bits [3:15].
163      * Swizzle thing around depending on PSW.W.
164      */
165     int im11a = extract32(val, 1, 11);
166     int s = extract32(val, 12, 2);
167     int i = (-(val & 1) << 13) | (im11a << 2);
168 
169     if (ctx->tb_flags & PSW_W) {
170         i ^= s << 13;
171     }
172     return i;
173 }
174 
175 /* Expander for assemble_16(s,im14). */
176 static int expand_16(DisasContext *ctx, int val)
177 {
178     /*
179      * @val is bits [0:15], containing both im14 and s.
180      * Swizzle thing around depending on PSW.W.
181      */
182     int s = extract32(val, 14, 2);
183     int i = (-(val & 1) << 13) | extract32(val, 1, 13);
184 
185     if (ctx->tb_flags & PSW_W) {
186         i ^= s << 13;
187     }
188     return i;
189 }
190 
191 /* The sp field is only present with !PSW_W. */
192 static int sp0_if_wide(DisasContext *ctx, int sp)
193 {
194     return ctx->tb_flags & PSW_W ? 0 : sp;
195 }
196 
197 /* Translate CMPI doubleword conditions to standard. */
198 static int cmpbid_c(DisasContext *ctx, int val)
199 {
200     return val ? val : 4; /* 0 == "*<<" */
201 }
202 
203 /*
204  * In many places pa1.x did not decode the bit that later became
205  * the pa2.0 D bit.  Suppress D unless the cpu is pa2.0.
206  */
207 static int pa20_d(DisasContext *ctx, int val)
208 {
209     return ctx->is_pa20 & val;
210 }
211 
212 /* Include the auto-generated decoder.  */
213 #include "decode-insns.c.inc"
214 
215 /* We are not using a goto_tb (for whatever reason), but have updated
216    the iaq (for whatever reason), so don't do it again on exit.  */
217 #define DISAS_IAQ_N_UPDATED  DISAS_TARGET_0
218 
219 /* We are exiting the TB, but have neither emitted a goto_tb, nor
220    updated the iaq for the next instruction to be executed.  */
221 #define DISAS_IAQ_N_STALE    DISAS_TARGET_1
222 
223 /* Similarly, but we want to return to the main loop immediately
224    to recognize unmasked interrupts.  */
225 #define DISAS_IAQ_N_STALE_EXIT      DISAS_TARGET_2
226 #define DISAS_EXIT                  DISAS_TARGET_3
227 
228 /* global register indexes */
229 static TCGv_i64 cpu_gr[32];
230 static TCGv_i64 cpu_sr[4];
231 static TCGv_i64 cpu_srH;
232 static TCGv_i64 cpu_iaoq_f;
233 static TCGv_i64 cpu_iaoq_b;
234 static TCGv_i64 cpu_iasq_f;
235 static TCGv_i64 cpu_iasq_b;
236 static TCGv_i64 cpu_sar;
237 static TCGv_i64 cpu_psw_n;
238 static TCGv_i64 cpu_psw_v;
239 static TCGv_i64 cpu_psw_cb;
240 static TCGv_i64 cpu_psw_cb_msb;
241 
242 void hppa_translate_init(void)
243 {
244 #define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
245 
246     typedef struct { TCGv_i64 *var; const char *name; int ofs; } GlobalVar;
247     static const GlobalVar vars[] = {
248         { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
249         DEF_VAR(psw_n),
250         DEF_VAR(psw_v),
251         DEF_VAR(psw_cb),
252         DEF_VAR(psw_cb_msb),
253         DEF_VAR(iaoq_f),
254         DEF_VAR(iaoq_b),
255     };
256 
257 #undef DEF_VAR
258 
259     /* Use the symbolic register names that match the disassembler.  */
260     static const char gr_names[32][4] = {
261         "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
262         "r8",  "r9",  "r10", "r11", "r12", "r13", "r14", "r15",
263         "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
264         "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
265     };
266     /* SR[4-7] are not global registers so that we can index them.  */
267     static const char sr_names[5][4] = {
268         "sr0", "sr1", "sr2", "sr3", "srH"
269     };
270 
271     int i;
272 
273     cpu_gr[0] = NULL;
274     for (i = 1; i < 32; i++) {
275         cpu_gr[i] = tcg_global_mem_new(tcg_env,
276                                        offsetof(CPUHPPAState, gr[i]),
277                                        gr_names[i]);
278     }
279     for (i = 0; i < 4; i++) {
280         cpu_sr[i] = tcg_global_mem_new_i64(tcg_env,
281                                            offsetof(CPUHPPAState, sr[i]),
282                                            sr_names[i]);
283     }
284     cpu_srH = tcg_global_mem_new_i64(tcg_env,
285                                      offsetof(CPUHPPAState, sr[4]),
286                                      sr_names[4]);
287 
288     for (i = 0; i < ARRAY_SIZE(vars); ++i) {
289         const GlobalVar *v = &vars[i];
290         *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name);
291     }
292 
293     cpu_iasq_f = tcg_global_mem_new_i64(tcg_env,
294                                         offsetof(CPUHPPAState, iasq_f),
295                                         "iasq_f");
296     cpu_iasq_b = tcg_global_mem_new_i64(tcg_env,
297                                         offsetof(CPUHPPAState, iasq_b),
298                                         "iasq_b");
299 }
300 
301 static void set_insn_breg(DisasContext *ctx, int breg)
302 {
303     assert(ctx->insn_start != NULL);
304     tcg_set_insn_start_param(ctx->insn_start, 2, breg);
305     ctx->insn_start = NULL;
306 }
307 
308 static DisasCond cond_make_f(void)
309 {
310     return (DisasCond){
311         .c = TCG_COND_NEVER,
312         .a0 = NULL,
313         .a1 = NULL,
314     };
315 }
316 
317 static DisasCond cond_make_t(void)
318 {
319     return (DisasCond){
320         .c = TCG_COND_ALWAYS,
321         .a0 = NULL,
322         .a1 = NULL,
323     };
324 }
325 
326 static DisasCond cond_make_n(void)
327 {
328     return (DisasCond){
329         .c = TCG_COND_NE,
330         .a0 = cpu_psw_n,
331         .a1 = tcg_constant_i64(0)
332     };
333 }
334 
335 static DisasCond cond_make_tmp(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
336 {
337     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
338     return (DisasCond){ .c = c, .a0 = a0, .a1 = a1 };
339 }
340 
341 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_i64 a0)
342 {
343     return cond_make_tmp(c, a0, tcg_constant_i64(0));
344 }
345 
346 static DisasCond cond_make_0(TCGCond c, TCGv_i64 a0)
347 {
348     TCGv_i64 tmp = tcg_temp_new_i64();
349     tcg_gen_mov_i64(tmp, a0);
350     return cond_make_0_tmp(c, tmp);
351 }
352 
353 static DisasCond cond_make(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
354 {
355     TCGv_i64 t0 = tcg_temp_new_i64();
356     TCGv_i64 t1 = tcg_temp_new_i64();
357 
358     tcg_gen_mov_i64(t0, a0);
359     tcg_gen_mov_i64(t1, a1);
360     return cond_make_tmp(c, t0, t1);
361 }
362 
363 static void cond_free(DisasCond *cond)
364 {
365     switch (cond->c) {
366     default:
367         cond->a0 = NULL;
368         cond->a1 = NULL;
369         /* fallthru */
370     case TCG_COND_ALWAYS:
371         cond->c = TCG_COND_NEVER;
372         break;
373     case TCG_COND_NEVER:
374         break;
375     }
376 }
377 
378 static TCGv_i64 load_gpr(DisasContext *ctx, unsigned reg)
379 {
380     if (reg == 0) {
381         return ctx->zero;
382     } else {
383         return cpu_gr[reg];
384     }
385 }
386 
387 static TCGv_i64 dest_gpr(DisasContext *ctx, unsigned reg)
388 {
389     if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
390         return tcg_temp_new_i64();
391     } else {
392         return cpu_gr[reg];
393     }
394 }
395 
396 static void save_or_nullify(DisasContext *ctx, TCGv_i64 dest, TCGv_i64 t)
397 {
398     if (ctx->null_cond.c != TCG_COND_NEVER) {
399         tcg_gen_movcond_i64(ctx->null_cond.c, dest, ctx->null_cond.a0,
400                             ctx->null_cond.a1, dest, t);
401     } else {
402         tcg_gen_mov_i64(dest, t);
403     }
404 }
405 
406 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_i64 t)
407 {
408     if (reg != 0) {
409         save_or_nullify(ctx, cpu_gr[reg], t);
410     }
411 }
412 
413 #if HOST_BIG_ENDIAN
414 # define HI_OFS  0
415 # define LO_OFS  4
416 #else
417 # define HI_OFS  4
418 # define LO_OFS  0
419 #endif
420 
421 static TCGv_i32 load_frw_i32(unsigned rt)
422 {
423     TCGv_i32 ret = tcg_temp_new_i32();
424     tcg_gen_ld_i32(ret, tcg_env,
425                    offsetof(CPUHPPAState, fr[rt & 31])
426                    + (rt & 32 ? LO_OFS : HI_OFS));
427     return ret;
428 }
429 
430 static TCGv_i32 load_frw0_i32(unsigned rt)
431 {
432     if (rt == 0) {
433         TCGv_i32 ret = tcg_temp_new_i32();
434         tcg_gen_movi_i32(ret, 0);
435         return ret;
436     } else {
437         return load_frw_i32(rt);
438     }
439 }
440 
441 static TCGv_i64 load_frw0_i64(unsigned rt)
442 {
443     TCGv_i64 ret = tcg_temp_new_i64();
444     if (rt == 0) {
445         tcg_gen_movi_i64(ret, 0);
446     } else {
447         tcg_gen_ld32u_i64(ret, tcg_env,
448                           offsetof(CPUHPPAState, fr[rt & 31])
449                           + (rt & 32 ? LO_OFS : HI_OFS));
450     }
451     return ret;
452 }
453 
454 static void save_frw_i32(unsigned rt, TCGv_i32 val)
455 {
456     tcg_gen_st_i32(val, tcg_env,
457                    offsetof(CPUHPPAState, fr[rt & 31])
458                    + (rt & 32 ? LO_OFS : HI_OFS));
459 }
460 
461 #undef HI_OFS
462 #undef LO_OFS
463 
464 static TCGv_i64 load_frd(unsigned rt)
465 {
466     TCGv_i64 ret = tcg_temp_new_i64();
467     tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt]));
468     return ret;
469 }
470 
471 static TCGv_i64 load_frd0(unsigned rt)
472 {
473     if (rt == 0) {
474         TCGv_i64 ret = tcg_temp_new_i64();
475         tcg_gen_movi_i64(ret, 0);
476         return ret;
477     } else {
478         return load_frd(rt);
479     }
480 }
481 
482 static void save_frd(unsigned rt, TCGv_i64 val)
483 {
484     tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt]));
485 }
486 
487 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
488 {
489 #ifdef CONFIG_USER_ONLY
490     tcg_gen_movi_i64(dest, 0);
491 #else
492     if (reg < 4) {
493         tcg_gen_mov_i64(dest, cpu_sr[reg]);
494     } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
495         tcg_gen_mov_i64(dest, cpu_srH);
496     } else {
497         tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg]));
498     }
499 #endif
500 }
501 
502 /* Skip over the implementation of an insn that has been nullified.
503    Use this when the insn is too complex for a conditional move.  */
504 static void nullify_over(DisasContext *ctx)
505 {
506     if (ctx->null_cond.c != TCG_COND_NEVER) {
507         /* The always condition should have been handled in the main loop.  */
508         assert(ctx->null_cond.c != TCG_COND_ALWAYS);
509 
510         ctx->null_lab = gen_new_label();
511 
512         /* If we're using PSW[N], copy it to a temp because... */
513         if (ctx->null_cond.a0 == cpu_psw_n) {
514             ctx->null_cond.a0 = tcg_temp_new_i64();
515             tcg_gen_mov_i64(ctx->null_cond.a0, cpu_psw_n);
516         }
517         /* ... we clear it before branching over the implementation,
518            so that (1) it's clear after nullifying this insn and
519            (2) if this insn nullifies the next, PSW[N] is valid.  */
520         if (ctx->psw_n_nonzero) {
521             ctx->psw_n_nonzero = false;
522             tcg_gen_movi_i64(cpu_psw_n, 0);
523         }
524 
525         tcg_gen_brcond_i64(ctx->null_cond.c, ctx->null_cond.a0,
526                            ctx->null_cond.a1, ctx->null_lab);
527         cond_free(&ctx->null_cond);
528     }
529 }
530 
531 /* Save the current nullification state to PSW[N].  */
532 static void nullify_save(DisasContext *ctx)
533 {
534     if (ctx->null_cond.c == TCG_COND_NEVER) {
535         if (ctx->psw_n_nonzero) {
536             tcg_gen_movi_i64(cpu_psw_n, 0);
537         }
538         return;
539     }
540     if (ctx->null_cond.a0 != cpu_psw_n) {
541         tcg_gen_setcond_i64(ctx->null_cond.c, cpu_psw_n,
542                             ctx->null_cond.a0, ctx->null_cond.a1);
543         ctx->psw_n_nonzero = true;
544     }
545     cond_free(&ctx->null_cond);
546 }
547 
548 /* Set a PSW[N] to X.  The intention is that this is used immediately
549    before a goto_tb/exit_tb, so that there is no fallthru path to other
550    code within the TB.  Therefore we do not update psw_n_nonzero.  */
551 static void nullify_set(DisasContext *ctx, bool x)
552 {
553     if (ctx->psw_n_nonzero || x) {
554         tcg_gen_movi_i64(cpu_psw_n, x);
555     }
556 }
557 
558 /* Mark the end of an instruction that may have been nullified.
559    This is the pair to nullify_over.  Always returns true so that
560    it may be tail-called from a translate function.  */
561 static bool nullify_end(DisasContext *ctx)
562 {
563     TCGLabel *null_lab = ctx->null_lab;
564     DisasJumpType status = ctx->base.is_jmp;
565 
566     /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
567        For UPDATED, we cannot update on the nullified path.  */
568     assert(status != DISAS_IAQ_N_UPDATED);
569 
570     if (likely(null_lab == NULL)) {
571         /* The current insn wasn't conditional or handled the condition
572            applied to it without a branch, so the (new) setting of
573            NULL_COND can be applied directly to the next insn.  */
574         return true;
575     }
576     ctx->null_lab = NULL;
577 
578     if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
579         /* The next instruction will be unconditional,
580            and NULL_COND already reflects that.  */
581         gen_set_label(null_lab);
582     } else {
583         /* The insn that we just executed is itself nullifying the next
584            instruction.  Store the condition in the PSW[N] global.
585            We asserted PSW[N] = 0 in nullify_over, so that after the
586            label we have the proper value in place.  */
587         nullify_save(ctx);
588         gen_set_label(null_lab);
589         ctx->null_cond = cond_make_n();
590     }
591     if (status == DISAS_NORETURN) {
592         ctx->base.is_jmp = DISAS_NEXT;
593     }
594     return true;
595 }
596 
597 static void copy_iaoq_entry(DisasContext *ctx, TCGv_i64 dest,
598                             uint64_t ival, TCGv_i64 vval)
599 {
600     uint64_t mask = gva_offset_mask(ctx->tb_flags);
601 
602     if (ival != -1) {
603         tcg_gen_movi_i64(dest, ival & mask);
604         return;
605     }
606     tcg_debug_assert(vval != NULL);
607 
608     /*
609      * We know that the IAOQ is already properly masked.
610      * This optimization is primarily for "iaoq_f = iaoq_b".
611      */
612     if (vval == cpu_iaoq_f || vval == cpu_iaoq_b) {
613         tcg_gen_mov_i64(dest, vval);
614     } else {
615         tcg_gen_andi_i64(dest, vval, mask);
616     }
617 }
618 
619 static inline uint64_t iaoq_dest(DisasContext *ctx, int64_t disp)
620 {
621     return ctx->iaoq_f + disp + 8;
622 }
623 
624 static void gen_excp_1(int exception)
625 {
626     gen_helper_excp(tcg_env, tcg_constant_i32(exception));
627 }
628 
629 static void gen_excp(DisasContext *ctx, int exception)
630 {
631     copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
632     copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
633     nullify_save(ctx);
634     gen_excp_1(exception);
635     ctx->base.is_jmp = DISAS_NORETURN;
636 }
637 
638 static bool gen_excp_iir(DisasContext *ctx, int exc)
639 {
640     nullify_over(ctx);
641     tcg_gen_st_i64(tcg_constant_i64(ctx->insn),
642                    tcg_env, offsetof(CPUHPPAState, cr[CR_IIR]));
643     gen_excp(ctx, exc);
644     return nullify_end(ctx);
645 }
646 
647 static bool gen_illegal(DisasContext *ctx)
648 {
649     return gen_excp_iir(ctx, EXCP_ILL);
650 }
651 
652 #ifdef CONFIG_USER_ONLY
653 #define CHECK_MOST_PRIVILEGED(EXCP) \
654     return gen_excp_iir(ctx, EXCP)
655 #else
656 #define CHECK_MOST_PRIVILEGED(EXCP) \
657     do {                                     \
658         if (ctx->privilege != 0) {           \
659             return gen_excp_iir(ctx, EXCP);  \
660         }                                    \
661     } while (0)
662 #endif
663 
664 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
665 {
666     return translator_use_goto_tb(&ctx->base, dest);
667 }
668 
669 /* If the next insn is to be nullified, and it's on the same page,
670    and we're not attempting to set a breakpoint on it, then we can
671    totally skip the nullified insn.  This avoids creating and
672    executing a TB that merely branches to the next TB.  */
673 static bool use_nullify_skip(DisasContext *ctx)
674 {
675     return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
676             && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
677 }
678 
679 static void gen_goto_tb(DisasContext *ctx, int which,
680                         uint64_t f, uint64_t b)
681 {
682     if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
683         tcg_gen_goto_tb(which);
684         copy_iaoq_entry(ctx, cpu_iaoq_f, f, NULL);
685         copy_iaoq_entry(ctx, cpu_iaoq_b, b, NULL);
686         tcg_gen_exit_tb(ctx->base.tb, which);
687     } else {
688         copy_iaoq_entry(ctx, cpu_iaoq_f, f, cpu_iaoq_b);
689         copy_iaoq_entry(ctx, cpu_iaoq_b, b, ctx->iaoq_n_var);
690         tcg_gen_lookup_and_goto_ptr();
691     }
692 }
693 
694 static bool cond_need_sv(int c)
695 {
696     return c == 2 || c == 3 || c == 6;
697 }
698 
699 static bool cond_need_cb(int c)
700 {
701     return c == 4 || c == 5;
702 }
703 
704 /*
705  * Compute conditional for arithmetic.  See Page 5-3, Table 5-1, of
706  * the Parisc 1.1 Architecture Reference Manual for details.
707  */
708 
709 static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d,
710                          TCGv_i64 res, TCGv_i64 uv, TCGv_i64 sv)
711 {
712     DisasCond cond;
713     TCGv_i64 tmp;
714 
715     switch (cf >> 1) {
716     case 0: /* Never / TR    (0 / 1) */
717         cond = cond_make_f();
718         break;
719     case 1: /* = / <>        (Z / !Z) */
720         if (!d) {
721             tmp = tcg_temp_new_i64();
722             tcg_gen_ext32u_i64(tmp, res);
723             res = tmp;
724         }
725         cond = cond_make_0(TCG_COND_EQ, res);
726         break;
727     case 2: /* < / >=        (N ^ V / !(N ^ V) */
728         tmp = tcg_temp_new_i64();
729         tcg_gen_xor_i64(tmp, res, sv);
730         if (!d) {
731             tcg_gen_ext32s_i64(tmp, tmp);
732         }
733         cond = cond_make_0_tmp(TCG_COND_LT, tmp);
734         break;
735     case 3: /* <= / >        (N ^ V) | Z / !((N ^ V) | Z) */
736         /*
737          * Simplify:
738          *   (N ^ V) | Z
739          *   ((res < 0) ^ (sv < 0)) | !res
740          *   ((res ^ sv) < 0) | !res
741          *   (~(res ^ sv) >= 0) | !res
742          *   !(~(res ^ sv) >> 31) | !res
743          *   !(~(res ^ sv) >> 31 & res)
744          */
745         tmp = tcg_temp_new_i64();
746         tcg_gen_eqv_i64(tmp, res, sv);
747         if (!d) {
748             tcg_gen_sextract_i64(tmp, tmp, 31, 1);
749             tcg_gen_and_i64(tmp, tmp, res);
750             tcg_gen_ext32u_i64(tmp, tmp);
751         } else {
752             tcg_gen_sari_i64(tmp, tmp, 63);
753             tcg_gen_and_i64(tmp, tmp, res);
754         }
755         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
756         break;
757     case 4: /* NUV / UV      (!UV / UV) */
758         cond = cond_make_0(TCG_COND_EQ, uv);
759         break;
760     case 5: /* ZNV / VNZ     (!UV | Z / UV & !Z) */
761         tmp = tcg_temp_new_i64();
762         tcg_gen_movcond_i64(TCG_COND_EQ, tmp, uv, ctx->zero, ctx->zero, res);
763         if (!d) {
764             tcg_gen_ext32u_i64(tmp, tmp);
765         }
766         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
767         break;
768     case 6: /* SV / NSV      (V / !V) */
769         if (!d) {
770             tmp = tcg_temp_new_i64();
771             tcg_gen_ext32s_i64(tmp, sv);
772             sv = tmp;
773         }
774         cond = cond_make_0(TCG_COND_LT, sv);
775         break;
776     case 7: /* OD / EV */
777         tmp = tcg_temp_new_i64();
778         tcg_gen_andi_i64(tmp, res, 1);
779         cond = cond_make_0_tmp(TCG_COND_NE, tmp);
780         break;
781     default:
782         g_assert_not_reached();
783     }
784     if (cf & 1) {
785         cond.c = tcg_invert_cond(cond.c);
786     }
787 
788     return cond;
789 }
790 
791 /* Similar, but for the special case of subtraction without borrow, we
792    can use the inputs directly.  This can allow other computation to be
793    deleted as unused.  */
794 
795 static DisasCond do_sub_cond(DisasContext *ctx, unsigned cf, bool d,
796                              TCGv_i64 res, TCGv_i64 in1,
797                              TCGv_i64 in2, TCGv_i64 sv)
798 {
799     TCGCond tc;
800     bool ext_uns;
801 
802     switch (cf >> 1) {
803     case 1: /* = / <> */
804         tc = TCG_COND_EQ;
805         ext_uns = true;
806         break;
807     case 2: /* < / >= */
808         tc = TCG_COND_LT;
809         ext_uns = false;
810         break;
811     case 3: /* <= / > */
812         tc = TCG_COND_LE;
813         ext_uns = false;
814         break;
815     case 4: /* << / >>= */
816         tc = TCG_COND_LTU;
817         ext_uns = true;
818         break;
819     case 5: /* <<= / >> */
820         tc = TCG_COND_LEU;
821         ext_uns = true;
822         break;
823     default:
824         return do_cond(ctx, cf, d, res, NULL, sv);
825     }
826 
827     if (cf & 1) {
828         tc = tcg_invert_cond(tc);
829     }
830     if (!d) {
831         TCGv_i64 t1 = tcg_temp_new_i64();
832         TCGv_i64 t2 = tcg_temp_new_i64();
833 
834         if (ext_uns) {
835             tcg_gen_ext32u_i64(t1, in1);
836             tcg_gen_ext32u_i64(t2, in2);
837         } else {
838             tcg_gen_ext32s_i64(t1, in1);
839             tcg_gen_ext32s_i64(t2, in2);
840         }
841         return cond_make_tmp(tc, t1, t2);
842     }
843     return cond_make(tc, in1, in2);
844 }
845 
846 /*
847  * Similar, but for logicals, where the carry and overflow bits are not
848  * computed, and use of them is undefined.
849  *
850  * Undefined or not, hardware does not trap.  It seems reasonable to
851  * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
852  * how cases c={2,3} are treated.
853  */
854 
855 static DisasCond do_log_cond(DisasContext *ctx, unsigned cf, bool d,
856                              TCGv_i64 res)
857 {
858     TCGCond tc;
859     bool ext_uns;
860 
861     switch (cf) {
862     case 0:  /* never */
863     case 9:  /* undef, C */
864     case 11: /* undef, C & !Z */
865     case 12: /* undef, V */
866         return cond_make_f();
867 
868     case 1:  /* true */
869     case 8:  /* undef, !C */
870     case 10: /* undef, !C | Z */
871     case 13: /* undef, !V */
872         return cond_make_t();
873 
874     case 2:  /* == */
875         tc = TCG_COND_EQ;
876         ext_uns = true;
877         break;
878     case 3:  /* <> */
879         tc = TCG_COND_NE;
880         ext_uns = true;
881         break;
882     case 4:  /* < */
883         tc = TCG_COND_LT;
884         ext_uns = false;
885         break;
886     case 5:  /* >= */
887         tc = TCG_COND_GE;
888         ext_uns = false;
889         break;
890     case 6:  /* <= */
891         tc = TCG_COND_LE;
892         ext_uns = false;
893         break;
894     case 7:  /* > */
895         tc = TCG_COND_GT;
896         ext_uns = false;
897         break;
898 
899     case 14: /* OD */
900     case 15: /* EV */
901         return do_cond(ctx, cf, d, res, NULL, NULL);
902 
903     default:
904         g_assert_not_reached();
905     }
906 
907     if (!d) {
908         TCGv_i64 tmp = tcg_temp_new_i64();
909 
910         if (ext_uns) {
911             tcg_gen_ext32u_i64(tmp, res);
912         } else {
913             tcg_gen_ext32s_i64(tmp, res);
914         }
915         return cond_make_0_tmp(tc, tmp);
916     }
917     return cond_make_0(tc, res);
918 }
919 
920 /* Similar, but for shift/extract/deposit conditions.  */
921 
922 static DisasCond do_sed_cond(DisasContext *ctx, unsigned orig, bool d,
923                              TCGv_i64 res)
924 {
925     unsigned c, f;
926 
927     /* Convert the compressed condition codes to standard.
928        0-2 are the same as logicals (nv,<,<=), while 3 is OD.
929        4-7 are the reverse of 0-3.  */
930     c = orig & 3;
931     if (c == 3) {
932         c = 7;
933     }
934     f = (orig & 4) / 4;
935 
936     return do_log_cond(ctx, c * 2 + f, d, res);
937 }
938 
939 /* Similar, but for unit zero conditions.  */
940 static DisasCond do_unit_zero_cond(unsigned cf, bool d, TCGv_i64 res)
941 {
942     TCGv_i64 tmp;
943     uint64_t d_repl = d ? 0x0000000100000001ull : 1;
944     uint64_t ones = 0, sgns = 0;
945 
946     switch (cf >> 1) {
947     case 1: /* SBW / NBW */
948         if (d) {
949             ones = d_repl;
950             sgns = d_repl << 31;
951         }
952         break;
953     case 2: /* SBZ / NBZ */
954         ones = d_repl * 0x01010101u;
955         sgns = ones << 7;
956         break;
957     case 3: /* SHZ / NHZ */
958         ones = d_repl * 0x00010001u;
959         sgns = ones << 15;
960         break;
961     }
962     if (ones == 0) {
963         /* Undefined, or 0/1 (never/always). */
964         return cf & 1 ? cond_make_t() : cond_make_f();
965     }
966 
967     /*
968      * See hasless(v,1) from
969      * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
970      */
971     tmp = tcg_temp_new_i64();
972     tcg_gen_subi_i64(tmp, res, ones);
973     tcg_gen_andc_i64(tmp, tmp, res);
974     tcg_gen_andi_i64(tmp, tmp, sgns);
975 
976     return cond_make_0_tmp(cf & 1 ? TCG_COND_EQ : TCG_COND_NE, tmp);
977 }
978 
979 static TCGv_i64 get_carry(DisasContext *ctx, bool d,
980                           TCGv_i64 cb, TCGv_i64 cb_msb)
981 {
982     if (!d) {
983         TCGv_i64 t = tcg_temp_new_i64();
984         tcg_gen_extract_i64(t, cb, 32, 1);
985         return t;
986     }
987     return cb_msb;
988 }
989 
990 static TCGv_i64 get_psw_carry(DisasContext *ctx, bool d)
991 {
992     return get_carry(ctx, d, cpu_psw_cb, cpu_psw_cb_msb);
993 }
994 
995 /* Compute signed overflow for addition.  */
996 static TCGv_i64 do_add_sv(DisasContext *ctx, TCGv_i64 res,
997                           TCGv_i64 in1, TCGv_i64 in2,
998                           TCGv_i64 orig_in1, int shift, bool d)
999 {
1000     TCGv_i64 sv = tcg_temp_new_i64();
1001     TCGv_i64 tmp = tcg_temp_new_i64();
1002 
1003     tcg_gen_xor_i64(sv, res, in1);
1004     tcg_gen_xor_i64(tmp, in1, in2);
1005     tcg_gen_andc_i64(sv, sv, tmp);
1006 
1007     switch (shift) {
1008     case 0:
1009         break;
1010     case 1:
1011         /* Shift left by one and compare the sign. */
1012         tcg_gen_add_i64(tmp, orig_in1, orig_in1);
1013         tcg_gen_xor_i64(tmp, tmp, orig_in1);
1014         /* Incorporate into the overflow. */
1015         tcg_gen_or_i64(sv, sv, tmp);
1016         break;
1017     default:
1018         {
1019             int sign_bit = d ? 63 : 31;
1020 
1021             /* Compare the sign against all lower bits. */
1022             tcg_gen_sextract_i64(tmp, orig_in1, sign_bit, 1);
1023             tcg_gen_xor_i64(tmp, tmp, orig_in1);
1024             /*
1025              * If one of the bits shifting into or through the sign
1026              * differs, then we have overflow.
1027              */
1028             tcg_gen_extract_i64(tmp, tmp, sign_bit - shift, shift);
1029             tcg_gen_movcond_i64(TCG_COND_NE, sv, tmp, ctx->zero,
1030                                 tcg_constant_i64(-1), sv);
1031         }
1032     }
1033     return sv;
1034 }
1035 
1036 /* Compute unsigned overflow for addition.  */
1037 static TCGv_i64 do_add_uv(DisasContext *ctx, TCGv_i64 cb, TCGv_i64 cb_msb,
1038                           TCGv_i64 in1, int shift, bool d)
1039 {
1040     if (shift == 0) {
1041         return get_carry(ctx, d, cb, cb_msb);
1042     } else {
1043         TCGv_i64 tmp = tcg_temp_new_i64();
1044         tcg_gen_extract_i64(tmp, in1, (d ? 63 : 31) - shift, shift);
1045         tcg_gen_or_i64(tmp, tmp, get_carry(ctx, d, cb, cb_msb));
1046         return tmp;
1047     }
1048 }
1049 
1050 /* Compute signed overflow for subtraction.  */
1051 static TCGv_i64 do_sub_sv(DisasContext *ctx, TCGv_i64 res,
1052                           TCGv_i64 in1, TCGv_i64 in2)
1053 {
1054     TCGv_i64 sv = tcg_temp_new_i64();
1055     TCGv_i64 tmp = tcg_temp_new_i64();
1056 
1057     tcg_gen_xor_i64(sv, res, in1);
1058     tcg_gen_xor_i64(tmp, in1, in2);
1059     tcg_gen_and_i64(sv, sv, tmp);
1060 
1061     return sv;
1062 }
1063 
1064 static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 orig_in1,
1065                    TCGv_i64 in2, unsigned shift, bool is_l,
1066                    bool is_tsv, bool is_tc, bool is_c, unsigned cf, bool d)
1067 {
1068     TCGv_i64 dest, cb, cb_msb, in1, uv, sv, tmp;
1069     unsigned c = cf >> 1;
1070     DisasCond cond;
1071 
1072     dest = tcg_temp_new_i64();
1073     cb = NULL;
1074     cb_msb = NULL;
1075 
1076     in1 = orig_in1;
1077     if (shift) {
1078         tmp = tcg_temp_new_i64();
1079         tcg_gen_shli_i64(tmp, in1, shift);
1080         in1 = tmp;
1081     }
1082 
1083     if (!is_l || cond_need_cb(c)) {
1084         cb_msb = tcg_temp_new_i64();
1085         cb = tcg_temp_new_i64();
1086 
1087         tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero);
1088         if (is_c) {
1089             tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb,
1090                              get_psw_carry(ctx, d), ctx->zero);
1091         }
1092         tcg_gen_xor_i64(cb, in1, in2);
1093         tcg_gen_xor_i64(cb, cb, dest);
1094     } else {
1095         tcg_gen_add_i64(dest, in1, in2);
1096         if (is_c) {
1097             tcg_gen_add_i64(dest, dest, get_psw_carry(ctx, d));
1098         }
1099     }
1100 
1101     /* Compute signed overflow if required.  */
1102     sv = NULL;
1103     if (is_tsv || cond_need_sv(c)) {
1104         sv = do_add_sv(ctx, dest, in1, in2, orig_in1, shift, d);
1105         if (is_tsv) {
1106             if (!d) {
1107                 tcg_gen_ext32s_i64(sv, sv);
1108             }
1109             gen_helper_tsv(tcg_env, sv);
1110         }
1111     }
1112 
1113     /* Compute unsigned overflow if required.  */
1114     uv = NULL;
1115     if (cond_need_cb(c)) {
1116         uv = do_add_uv(ctx, cb, cb_msb, orig_in1, shift, d);
1117     }
1118 
1119     /* Emit any conditional trap before any writeback.  */
1120     cond = do_cond(ctx, cf, d, dest, uv, sv);
1121     if (is_tc) {
1122         tmp = tcg_temp_new_i64();
1123         tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1124         gen_helper_tcond(tcg_env, tmp);
1125     }
1126 
1127     /* Write back the result.  */
1128     if (!is_l) {
1129         save_or_nullify(ctx, cpu_psw_cb, cb);
1130         save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1131     }
1132     save_gpr(ctx, rt, dest);
1133 
1134     /* Install the new nullification.  */
1135     cond_free(&ctx->null_cond);
1136     ctx->null_cond = cond;
1137 }
1138 
1139 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_d_sh *a,
1140                        bool is_l, bool is_tsv, bool is_tc, bool is_c)
1141 {
1142     TCGv_i64 tcg_r1, tcg_r2;
1143 
1144     if (a->cf) {
1145         nullify_over(ctx);
1146     }
1147     tcg_r1 = load_gpr(ctx, a->r1);
1148     tcg_r2 = load_gpr(ctx, a->r2);
1149     do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l,
1150            is_tsv, is_tc, is_c, a->cf, a->d);
1151     return nullify_end(ctx);
1152 }
1153 
1154 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1155                        bool is_tsv, bool is_tc)
1156 {
1157     TCGv_i64 tcg_im, tcg_r2;
1158 
1159     if (a->cf) {
1160         nullify_over(ctx);
1161     }
1162     tcg_im = tcg_constant_i64(a->i);
1163     tcg_r2 = load_gpr(ctx, a->r);
1164     /* All ADDI conditions are 32-bit. */
1165     do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf, false);
1166     return nullify_end(ctx);
1167 }
1168 
1169 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1170                    TCGv_i64 in2, bool is_tsv, bool is_b,
1171                    bool is_tc, unsigned cf, bool d)
1172 {
1173     TCGv_i64 dest, sv, cb, cb_msb, tmp;
1174     unsigned c = cf >> 1;
1175     DisasCond cond;
1176 
1177     dest = tcg_temp_new_i64();
1178     cb = tcg_temp_new_i64();
1179     cb_msb = tcg_temp_new_i64();
1180 
1181     if (is_b) {
1182         /* DEST,C = IN1 + ~IN2 + C.  */
1183         tcg_gen_not_i64(cb, in2);
1184         tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero,
1185                          get_psw_carry(ctx, d), ctx->zero);
1186         tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb, cb, ctx->zero);
1187         tcg_gen_xor_i64(cb, cb, in1);
1188         tcg_gen_xor_i64(cb, cb, dest);
1189     } else {
1190         /*
1191          * DEST,C = IN1 + ~IN2 + 1.  We can produce the same result in fewer
1192          * operations by seeding the high word with 1 and subtracting.
1193          */
1194         TCGv_i64 one = tcg_constant_i64(1);
1195         tcg_gen_sub2_i64(dest, cb_msb, in1, one, in2, ctx->zero);
1196         tcg_gen_eqv_i64(cb, in1, in2);
1197         tcg_gen_xor_i64(cb, cb, dest);
1198     }
1199 
1200     /* Compute signed overflow if required.  */
1201     sv = NULL;
1202     if (is_tsv || cond_need_sv(c)) {
1203         sv = do_sub_sv(ctx, dest, in1, in2);
1204         if (is_tsv) {
1205             if (!d) {
1206                 tcg_gen_ext32s_i64(sv, sv);
1207             }
1208             gen_helper_tsv(tcg_env, sv);
1209         }
1210     }
1211 
1212     /* Compute the condition.  We cannot use the special case for borrow.  */
1213     if (!is_b) {
1214         cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1215     } else {
1216         cond = do_cond(ctx, cf, d, dest, get_carry(ctx, d, cb, cb_msb), sv);
1217     }
1218 
1219     /* Emit any conditional trap before any writeback.  */
1220     if (is_tc) {
1221         tmp = tcg_temp_new_i64();
1222         tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1223         gen_helper_tcond(tcg_env, tmp);
1224     }
1225 
1226     /* Write back the result.  */
1227     save_or_nullify(ctx, cpu_psw_cb, cb);
1228     save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1229     save_gpr(ctx, rt, dest);
1230 
1231     /* Install the new nullification.  */
1232     cond_free(&ctx->null_cond);
1233     ctx->null_cond = cond;
1234 }
1235 
1236 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1237                        bool is_tsv, bool is_b, bool is_tc)
1238 {
1239     TCGv_i64 tcg_r1, tcg_r2;
1240 
1241     if (a->cf) {
1242         nullify_over(ctx);
1243     }
1244     tcg_r1 = load_gpr(ctx, a->r1);
1245     tcg_r2 = load_gpr(ctx, a->r2);
1246     do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf, a->d);
1247     return nullify_end(ctx);
1248 }
1249 
1250 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1251 {
1252     TCGv_i64 tcg_im, tcg_r2;
1253 
1254     if (a->cf) {
1255         nullify_over(ctx);
1256     }
1257     tcg_im = tcg_constant_i64(a->i);
1258     tcg_r2 = load_gpr(ctx, a->r);
1259     /* All SUBI conditions are 32-bit. */
1260     do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf, false);
1261     return nullify_end(ctx);
1262 }
1263 
1264 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1265                       TCGv_i64 in2, unsigned cf, bool d)
1266 {
1267     TCGv_i64 dest, sv;
1268     DisasCond cond;
1269 
1270     dest = tcg_temp_new_i64();
1271     tcg_gen_sub_i64(dest, in1, in2);
1272 
1273     /* Compute signed overflow if required.  */
1274     sv = NULL;
1275     if (cond_need_sv(cf >> 1)) {
1276         sv = do_sub_sv(ctx, dest, in1, in2);
1277     }
1278 
1279     /* Form the condition for the compare.  */
1280     cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1281 
1282     /* Clear.  */
1283     tcg_gen_movi_i64(dest, 0);
1284     save_gpr(ctx, rt, dest);
1285 
1286     /* Install the new nullification.  */
1287     cond_free(&ctx->null_cond);
1288     ctx->null_cond = cond;
1289 }
1290 
1291 static void do_log(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1292                    TCGv_i64 in2, unsigned cf, bool d,
1293                    void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1294 {
1295     TCGv_i64 dest = dest_gpr(ctx, rt);
1296 
1297     /* Perform the operation, and writeback.  */
1298     fn(dest, in1, in2);
1299     save_gpr(ctx, rt, dest);
1300 
1301     /* Install the new nullification.  */
1302     cond_free(&ctx->null_cond);
1303     if (cf) {
1304         ctx->null_cond = do_log_cond(ctx, cf, d, dest);
1305     }
1306 }
1307 
1308 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1309                        void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1310 {
1311     TCGv_i64 tcg_r1, tcg_r2;
1312 
1313     if (a->cf) {
1314         nullify_over(ctx);
1315     }
1316     tcg_r1 = load_gpr(ctx, a->r1);
1317     tcg_r2 = load_gpr(ctx, a->r2);
1318     do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, fn);
1319     return nullify_end(ctx);
1320 }
1321 
1322 static void do_unit_addsub(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1323                            TCGv_i64 in2, unsigned cf, bool d,
1324                            bool is_tc, bool is_add)
1325 {
1326     TCGv_i64 dest = tcg_temp_new_i64();
1327     uint64_t test_cb = 0;
1328     DisasCond cond;
1329 
1330     /* Select which carry-out bits to test. */
1331     switch (cf >> 1) {
1332     case 4: /* NDC / SDC -- 4-bit carries */
1333         test_cb = dup_const(MO_8, 0x88);
1334         break;
1335     case 5: /* NWC / SWC -- 32-bit carries */
1336         if (d) {
1337             test_cb = dup_const(MO_32, INT32_MIN);
1338         } else {
1339             cf &= 1; /* undefined -- map to never/always */
1340         }
1341         break;
1342     case 6: /* NBC / SBC -- 8-bit carries */
1343         test_cb = dup_const(MO_8, INT8_MIN);
1344         break;
1345     case 7: /* NHC / SHC -- 16-bit carries */
1346         test_cb = dup_const(MO_16, INT16_MIN);
1347         break;
1348     }
1349     if (!d) {
1350         test_cb = (uint32_t)test_cb;
1351     }
1352 
1353     if (!test_cb) {
1354         /* No need to compute carries if we don't need to test them. */
1355         if (is_add) {
1356             tcg_gen_add_i64(dest, in1, in2);
1357         } else {
1358             tcg_gen_sub_i64(dest, in1, in2);
1359         }
1360         cond = do_unit_zero_cond(cf, d, dest);
1361     } else {
1362         TCGv_i64 cb = tcg_temp_new_i64();
1363 
1364         if (d) {
1365             TCGv_i64 cb_msb = tcg_temp_new_i64();
1366             if (is_add) {
1367                 tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero);
1368                 tcg_gen_xor_i64(cb, in1, in2);
1369             } else {
1370                 /* See do_sub, !is_b. */
1371                 TCGv_i64 one = tcg_constant_i64(1);
1372                 tcg_gen_sub2_i64(dest, cb_msb, in1, one, in2, ctx->zero);
1373                 tcg_gen_eqv_i64(cb, in1, in2);
1374             }
1375             tcg_gen_xor_i64(cb, cb, dest);
1376             tcg_gen_extract2_i64(cb, cb, cb_msb, 1);
1377         } else {
1378             if (is_add) {
1379                 tcg_gen_add_i64(dest, in1, in2);
1380                 tcg_gen_xor_i64(cb, in1, in2);
1381             } else {
1382                 tcg_gen_sub_i64(dest, in1, in2);
1383                 tcg_gen_eqv_i64(cb, in1, in2);
1384             }
1385             tcg_gen_xor_i64(cb, cb, dest);
1386             tcg_gen_shri_i64(cb, cb, 1);
1387         }
1388 
1389         tcg_gen_andi_i64(cb, cb, test_cb);
1390         cond = cond_make_0_tmp(cf & 1 ? TCG_COND_EQ : TCG_COND_NE, cb);
1391     }
1392 
1393     if (is_tc) {
1394         TCGv_i64 tmp = tcg_temp_new_i64();
1395         tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1396         gen_helper_tcond(tcg_env, tmp);
1397     }
1398     save_gpr(ctx, rt, dest);
1399 
1400     cond_free(&ctx->null_cond);
1401     ctx->null_cond = cond;
1402 }
1403 
1404 #ifndef CONFIG_USER_ONLY
1405 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1406    from the top 2 bits of the base register.  There are a few system
1407    instructions that have a 3-bit space specifier, for which SR0 is
1408    not special.  To handle this, pass ~SP.  */
1409 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_i64 base)
1410 {
1411     TCGv_ptr ptr;
1412     TCGv_i64 tmp;
1413     TCGv_i64 spc;
1414 
1415     if (sp != 0) {
1416         if (sp < 0) {
1417             sp = ~sp;
1418         }
1419         spc = tcg_temp_new_i64();
1420         load_spr(ctx, spc, sp);
1421         return spc;
1422     }
1423     if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1424         return cpu_srH;
1425     }
1426 
1427     ptr = tcg_temp_new_ptr();
1428     tmp = tcg_temp_new_i64();
1429     spc = tcg_temp_new_i64();
1430 
1431     /* Extract top 2 bits of the address, shift left 3 for uint64_t index. */
1432     tcg_gen_shri_i64(tmp, base, (ctx->tb_flags & PSW_W ? 64 : 32) - 5);
1433     tcg_gen_andi_i64(tmp, tmp, 030);
1434     tcg_gen_trunc_i64_ptr(ptr, tmp);
1435 
1436     tcg_gen_add_ptr(ptr, ptr, tcg_env);
1437     tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1438 
1439     return spc;
1440 }
1441 #endif
1442 
1443 static void form_gva(DisasContext *ctx, TCGv_i64 *pgva, TCGv_i64 *pofs,
1444                      unsigned rb, unsigned rx, int scale, int64_t disp,
1445                      unsigned sp, int modify, bool is_phys)
1446 {
1447     TCGv_i64 base = load_gpr(ctx, rb);
1448     TCGv_i64 ofs;
1449     TCGv_i64 addr;
1450 
1451     set_insn_breg(ctx, rb);
1452 
1453     /* Note that RX is mutually exclusive with DISP.  */
1454     if (rx) {
1455         ofs = tcg_temp_new_i64();
1456         tcg_gen_shli_i64(ofs, cpu_gr[rx], scale);
1457         tcg_gen_add_i64(ofs, ofs, base);
1458     } else if (disp || modify) {
1459         ofs = tcg_temp_new_i64();
1460         tcg_gen_addi_i64(ofs, base, disp);
1461     } else {
1462         ofs = base;
1463     }
1464 
1465     *pofs = ofs;
1466     *pgva = addr = tcg_temp_new_i64();
1467     tcg_gen_andi_i64(addr, modify <= 0 ? ofs : base,
1468                      gva_offset_mask(ctx->tb_flags));
1469 #ifndef CONFIG_USER_ONLY
1470     if (!is_phys) {
1471         tcg_gen_or_i64(addr, addr, space_select(ctx, sp, base));
1472     }
1473 #endif
1474 }
1475 
1476 /* Emit a memory load.  The modify parameter should be
1477  * < 0 for pre-modify,
1478  * > 0 for post-modify,
1479  * = 0 for no base register update.
1480  */
1481 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1482                        unsigned rx, int scale, int64_t disp,
1483                        unsigned sp, int modify, MemOp mop)
1484 {
1485     TCGv_i64 ofs;
1486     TCGv_i64 addr;
1487 
1488     /* Caller uses nullify_over/nullify_end.  */
1489     assert(ctx->null_cond.c == TCG_COND_NEVER);
1490 
1491     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1492              MMU_DISABLED(ctx));
1493     tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1494     if (modify) {
1495         save_gpr(ctx, rb, ofs);
1496     }
1497 }
1498 
1499 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1500                        unsigned rx, int scale, int64_t disp,
1501                        unsigned sp, int modify, MemOp mop)
1502 {
1503     TCGv_i64 ofs;
1504     TCGv_i64 addr;
1505 
1506     /* Caller uses nullify_over/nullify_end.  */
1507     assert(ctx->null_cond.c == TCG_COND_NEVER);
1508 
1509     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1510              MMU_DISABLED(ctx));
1511     tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1512     if (modify) {
1513         save_gpr(ctx, rb, ofs);
1514     }
1515 }
1516 
1517 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1518                         unsigned rx, int scale, int64_t disp,
1519                         unsigned sp, int modify, MemOp mop)
1520 {
1521     TCGv_i64 ofs;
1522     TCGv_i64 addr;
1523 
1524     /* Caller uses nullify_over/nullify_end.  */
1525     assert(ctx->null_cond.c == TCG_COND_NEVER);
1526 
1527     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1528              MMU_DISABLED(ctx));
1529     tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1530     if (modify) {
1531         save_gpr(ctx, rb, ofs);
1532     }
1533 }
1534 
1535 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1536                         unsigned rx, int scale, int64_t disp,
1537                         unsigned sp, int modify, MemOp mop)
1538 {
1539     TCGv_i64 ofs;
1540     TCGv_i64 addr;
1541 
1542     /* Caller uses nullify_over/nullify_end.  */
1543     assert(ctx->null_cond.c == TCG_COND_NEVER);
1544 
1545     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1546              MMU_DISABLED(ctx));
1547     tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1548     if (modify) {
1549         save_gpr(ctx, rb, ofs);
1550     }
1551 }
1552 
1553 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1554                     unsigned rx, int scale, int64_t disp,
1555                     unsigned sp, int modify, MemOp mop)
1556 {
1557     TCGv_i64 dest;
1558 
1559     nullify_over(ctx);
1560 
1561     if (modify == 0) {
1562         /* No base register update.  */
1563         dest = dest_gpr(ctx, rt);
1564     } else {
1565         /* Make sure if RT == RB, we see the result of the load.  */
1566         dest = tcg_temp_new_i64();
1567     }
1568     do_load_64(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1569     save_gpr(ctx, rt, dest);
1570 
1571     return nullify_end(ctx);
1572 }
1573 
1574 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1575                       unsigned rx, int scale, int64_t disp,
1576                       unsigned sp, int modify)
1577 {
1578     TCGv_i32 tmp;
1579 
1580     nullify_over(ctx);
1581 
1582     tmp = tcg_temp_new_i32();
1583     do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1584     save_frw_i32(rt, tmp);
1585 
1586     if (rt == 0) {
1587         gen_helper_loaded_fr0(tcg_env);
1588     }
1589 
1590     return nullify_end(ctx);
1591 }
1592 
1593 static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1594 {
1595     return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1596                      a->disp, a->sp, a->m);
1597 }
1598 
1599 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1600                       unsigned rx, int scale, int64_t disp,
1601                       unsigned sp, int modify)
1602 {
1603     TCGv_i64 tmp;
1604 
1605     nullify_over(ctx);
1606 
1607     tmp = tcg_temp_new_i64();
1608     do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1609     save_frd(rt, tmp);
1610 
1611     if (rt == 0) {
1612         gen_helper_loaded_fr0(tcg_env);
1613     }
1614 
1615     return nullify_end(ctx);
1616 }
1617 
1618 static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1619 {
1620     return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1621                      a->disp, a->sp, a->m);
1622 }
1623 
1624 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1625                      int64_t disp, unsigned sp,
1626                      int modify, MemOp mop)
1627 {
1628     nullify_over(ctx);
1629     do_store_64(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1630     return nullify_end(ctx);
1631 }
1632 
1633 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1634                        unsigned rx, int scale, int64_t disp,
1635                        unsigned sp, int modify)
1636 {
1637     TCGv_i32 tmp;
1638 
1639     nullify_over(ctx);
1640 
1641     tmp = load_frw_i32(rt);
1642     do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1643 
1644     return nullify_end(ctx);
1645 }
1646 
1647 static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1648 {
1649     return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1650                       a->disp, a->sp, a->m);
1651 }
1652 
1653 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1654                        unsigned rx, int scale, int64_t disp,
1655                        unsigned sp, int modify)
1656 {
1657     TCGv_i64 tmp;
1658 
1659     nullify_over(ctx);
1660 
1661     tmp = load_frd(rt);
1662     do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1663 
1664     return nullify_end(ctx);
1665 }
1666 
1667 static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1668 {
1669     return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1670                       a->disp, a->sp, a->m);
1671 }
1672 
1673 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1674                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1675 {
1676     TCGv_i32 tmp;
1677 
1678     nullify_over(ctx);
1679     tmp = load_frw0_i32(ra);
1680 
1681     func(tmp, tcg_env, tmp);
1682 
1683     save_frw_i32(rt, tmp);
1684     return nullify_end(ctx);
1685 }
1686 
1687 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1688                        void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1689 {
1690     TCGv_i32 dst;
1691     TCGv_i64 src;
1692 
1693     nullify_over(ctx);
1694     src = load_frd(ra);
1695     dst = tcg_temp_new_i32();
1696 
1697     func(dst, tcg_env, src);
1698 
1699     save_frw_i32(rt, dst);
1700     return nullify_end(ctx);
1701 }
1702 
1703 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1704                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1705 {
1706     TCGv_i64 tmp;
1707 
1708     nullify_over(ctx);
1709     tmp = load_frd0(ra);
1710 
1711     func(tmp, tcg_env, tmp);
1712 
1713     save_frd(rt, tmp);
1714     return nullify_end(ctx);
1715 }
1716 
1717 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1718                        void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1719 {
1720     TCGv_i32 src;
1721     TCGv_i64 dst;
1722 
1723     nullify_over(ctx);
1724     src = load_frw0_i32(ra);
1725     dst = tcg_temp_new_i64();
1726 
1727     func(dst, tcg_env, src);
1728 
1729     save_frd(rt, dst);
1730     return nullify_end(ctx);
1731 }
1732 
1733 static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1734                         unsigned ra, unsigned rb,
1735                         void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1736 {
1737     TCGv_i32 a, b;
1738 
1739     nullify_over(ctx);
1740     a = load_frw0_i32(ra);
1741     b = load_frw0_i32(rb);
1742 
1743     func(a, tcg_env, a, b);
1744 
1745     save_frw_i32(rt, a);
1746     return nullify_end(ctx);
1747 }
1748 
1749 static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1750                         unsigned ra, unsigned rb,
1751                         void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1752 {
1753     TCGv_i64 a, b;
1754 
1755     nullify_over(ctx);
1756     a = load_frd0(ra);
1757     b = load_frd0(rb);
1758 
1759     func(a, tcg_env, a, b);
1760 
1761     save_frd(rt, a);
1762     return nullify_end(ctx);
1763 }
1764 
1765 /* Emit an unconditional branch to a direct target, which may or may not
1766    have already had nullification handled.  */
1767 static bool do_dbranch(DisasContext *ctx, uint64_t dest,
1768                        unsigned link, bool is_n)
1769 {
1770     if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1771         if (link != 0) {
1772             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1773         }
1774         ctx->iaoq_n = dest;
1775         if (is_n) {
1776             ctx->null_cond.c = TCG_COND_ALWAYS;
1777         }
1778     } else {
1779         nullify_over(ctx);
1780 
1781         if (link != 0) {
1782             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1783         }
1784 
1785         if (is_n && use_nullify_skip(ctx)) {
1786             nullify_set(ctx, 0);
1787             gen_goto_tb(ctx, 0, dest, dest + 4);
1788         } else {
1789             nullify_set(ctx, is_n);
1790             gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1791         }
1792 
1793         nullify_end(ctx);
1794 
1795         nullify_set(ctx, 0);
1796         gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1797         ctx->base.is_jmp = DISAS_NORETURN;
1798     }
1799     return true;
1800 }
1801 
1802 /* Emit a conditional branch to a direct target.  If the branch itself
1803    is nullified, we should have already used nullify_over.  */
1804 static bool do_cbranch(DisasContext *ctx, int64_t disp, bool is_n,
1805                        DisasCond *cond)
1806 {
1807     uint64_t dest = iaoq_dest(ctx, disp);
1808     TCGLabel *taken = NULL;
1809     TCGCond c = cond->c;
1810     bool n;
1811 
1812     assert(ctx->null_cond.c == TCG_COND_NEVER);
1813 
1814     /* Handle TRUE and NEVER as direct branches.  */
1815     if (c == TCG_COND_ALWAYS) {
1816         return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1817     }
1818     if (c == TCG_COND_NEVER) {
1819         return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1820     }
1821 
1822     taken = gen_new_label();
1823     tcg_gen_brcond_i64(c, cond->a0, cond->a1, taken);
1824     cond_free(cond);
1825 
1826     /* Not taken: Condition not satisfied; nullify on backward branches. */
1827     n = is_n && disp < 0;
1828     if (n && use_nullify_skip(ctx)) {
1829         nullify_set(ctx, 0);
1830         gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1831     } else {
1832         if (!n && ctx->null_lab) {
1833             gen_set_label(ctx->null_lab);
1834             ctx->null_lab = NULL;
1835         }
1836         nullify_set(ctx, n);
1837         if (ctx->iaoq_n == -1) {
1838             /* The temporary iaoq_n_var died at the branch above.
1839                Regenerate it here instead of saving it.  */
1840             tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1841         }
1842         gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1843     }
1844 
1845     gen_set_label(taken);
1846 
1847     /* Taken: Condition satisfied; nullify on forward branches.  */
1848     n = is_n && disp >= 0;
1849     if (n && use_nullify_skip(ctx)) {
1850         nullify_set(ctx, 0);
1851         gen_goto_tb(ctx, 1, dest, dest + 4);
1852     } else {
1853         nullify_set(ctx, n);
1854         gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1855     }
1856 
1857     /* Not taken: the branch itself was nullified.  */
1858     if (ctx->null_lab) {
1859         gen_set_label(ctx->null_lab);
1860         ctx->null_lab = NULL;
1861         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1862     } else {
1863         ctx->base.is_jmp = DISAS_NORETURN;
1864     }
1865     return true;
1866 }
1867 
1868 /* Emit an unconditional branch to an indirect target.  This handles
1869    nullification of the branch itself.  */
1870 static bool do_ibranch(DisasContext *ctx, TCGv_i64 dest,
1871                        unsigned link, bool is_n)
1872 {
1873     TCGv_i64 a0, a1, next, tmp;
1874     TCGCond c;
1875 
1876     assert(ctx->null_lab == NULL);
1877 
1878     if (ctx->null_cond.c == TCG_COND_NEVER) {
1879         if (link != 0) {
1880             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1881         }
1882         next = tcg_temp_new_i64();
1883         tcg_gen_mov_i64(next, dest);
1884         if (is_n) {
1885             if (use_nullify_skip(ctx)) {
1886                 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, next);
1887                 tcg_gen_addi_i64(next, next, 4);
1888                 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1889                 nullify_set(ctx, 0);
1890                 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1891                 return true;
1892             }
1893             ctx->null_cond.c = TCG_COND_ALWAYS;
1894         }
1895         ctx->iaoq_n = -1;
1896         ctx->iaoq_n_var = next;
1897     } else if (is_n && use_nullify_skip(ctx)) {
1898         /* The (conditional) branch, B, nullifies the next insn, N,
1899            and we're allowed to skip execution N (no single-step or
1900            tracepoint in effect).  Since the goto_ptr that we must use
1901            for the indirect branch consumes no special resources, we
1902            can (conditionally) skip B and continue execution.  */
1903         /* The use_nullify_skip test implies we have a known control path.  */
1904         tcg_debug_assert(ctx->iaoq_b != -1);
1905         tcg_debug_assert(ctx->iaoq_n != -1);
1906 
1907         /* We do have to handle the non-local temporary, DEST, before
1908            branching.  Since IOAQ_F is not really live at this point, we
1909            can simply store DEST optimistically.  Similarly with IAOQ_B.  */
1910         copy_iaoq_entry(ctx, cpu_iaoq_f, -1, dest);
1911         next = tcg_temp_new_i64();
1912         tcg_gen_addi_i64(next, dest, 4);
1913         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1914 
1915         nullify_over(ctx);
1916         if (link != 0) {
1917             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1918         }
1919         tcg_gen_lookup_and_goto_ptr();
1920         return nullify_end(ctx);
1921     } else {
1922         c = ctx->null_cond.c;
1923         a0 = ctx->null_cond.a0;
1924         a1 = ctx->null_cond.a1;
1925 
1926         tmp = tcg_temp_new_i64();
1927         next = tcg_temp_new_i64();
1928 
1929         copy_iaoq_entry(ctx, tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1930         tcg_gen_movcond_i64(c, next, a0, a1, tmp, dest);
1931         ctx->iaoq_n = -1;
1932         ctx->iaoq_n_var = next;
1933 
1934         if (link != 0) {
1935             tcg_gen_movcond_i64(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1936         }
1937 
1938         if (is_n) {
1939             /* The branch nullifies the next insn, which means the state of N
1940                after the branch is the inverse of the state of N that applied
1941                to the branch.  */
1942             tcg_gen_setcond_i64(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1943             cond_free(&ctx->null_cond);
1944             ctx->null_cond = cond_make_n();
1945             ctx->psw_n_nonzero = true;
1946         } else {
1947             cond_free(&ctx->null_cond);
1948         }
1949     }
1950     return true;
1951 }
1952 
1953 /* Implement
1954  *    if (IAOQ_Front{30..31} < GR[b]{30..31})
1955  *      IAOQ_Next{30..31} ← GR[b]{30..31};
1956  *    else
1957  *      IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1958  * which keeps the privilege level from being increased.
1959  */
1960 static TCGv_i64 do_ibranch_priv(DisasContext *ctx, TCGv_i64 offset)
1961 {
1962     TCGv_i64 dest;
1963     switch (ctx->privilege) {
1964     case 0:
1965         /* Privilege 0 is maximum and is allowed to decrease.  */
1966         return offset;
1967     case 3:
1968         /* Privilege 3 is minimum and is never allowed to increase.  */
1969         dest = tcg_temp_new_i64();
1970         tcg_gen_ori_i64(dest, offset, 3);
1971         break;
1972     default:
1973         dest = tcg_temp_new_i64();
1974         tcg_gen_andi_i64(dest, offset, -4);
1975         tcg_gen_ori_i64(dest, dest, ctx->privilege);
1976         tcg_gen_movcond_i64(TCG_COND_GTU, dest, dest, offset, dest, offset);
1977         break;
1978     }
1979     return dest;
1980 }
1981 
1982 #ifdef CONFIG_USER_ONLY
1983 /* On Linux, page zero is normally marked execute only + gateway.
1984    Therefore normal read or write is supposed to fail, but specific
1985    offsets have kernel code mapped to raise permissions to implement
1986    system calls.  Handling this via an explicit check here, rather
1987    in than the "be disp(sr2,r0)" instruction that probably sent us
1988    here, is the easiest way to handle the branch delay slot on the
1989    aforementioned BE.  */
1990 static void do_page_zero(DisasContext *ctx)
1991 {
1992     TCGv_i64 tmp;
1993 
1994     /* If by some means we get here with PSW[N]=1, that implies that
1995        the B,GATE instruction would be skipped, and we'd fault on the
1996        next insn within the privileged page.  */
1997     switch (ctx->null_cond.c) {
1998     case TCG_COND_NEVER:
1999         break;
2000     case TCG_COND_ALWAYS:
2001         tcg_gen_movi_i64(cpu_psw_n, 0);
2002         goto do_sigill;
2003     default:
2004         /* Since this is always the first (and only) insn within the
2005            TB, we should know the state of PSW[N] from TB->FLAGS.  */
2006         g_assert_not_reached();
2007     }
2008 
2009     /* Check that we didn't arrive here via some means that allowed
2010        non-sequential instruction execution.  Normally the PSW[B] bit
2011        detects this by disallowing the B,GATE instruction to execute
2012        under such conditions.  */
2013     if (ctx->iaoq_b != ctx->iaoq_f + 4) {
2014         goto do_sigill;
2015     }
2016 
2017     switch (ctx->iaoq_f & -4) {
2018     case 0x00: /* Null pointer call */
2019         gen_excp_1(EXCP_IMP);
2020         ctx->base.is_jmp = DISAS_NORETURN;
2021         break;
2022 
2023     case 0xb0: /* LWS */
2024         gen_excp_1(EXCP_SYSCALL_LWS);
2025         ctx->base.is_jmp = DISAS_NORETURN;
2026         break;
2027 
2028     case 0xe0: /* SET_THREAD_POINTER */
2029         tcg_gen_st_i64(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27]));
2030         tmp = tcg_temp_new_i64();
2031         tcg_gen_ori_i64(tmp, cpu_gr[31], 3);
2032         copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
2033         tcg_gen_addi_i64(tmp, tmp, 4);
2034         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
2035         ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
2036         break;
2037 
2038     case 0x100: /* SYSCALL */
2039         gen_excp_1(EXCP_SYSCALL);
2040         ctx->base.is_jmp = DISAS_NORETURN;
2041         break;
2042 
2043     default:
2044     do_sigill:
2045         gen_excp_1(EXCP_ILL);
2046         ctx->base.is_jmp = DISAS_NORETURN;
2047         break;
2048     }
2049 }
2050 #endif
2051 
2052 static bool trans_nop(DisasContext *ctx, arg_nop *a)
2053 {
2054     cond_free(&ctx->null_cond);
2055     return true;
2056 }
2057 
2058 static bool trans_break(DisasContext *ctx, arg_break *a)
2059 {
2060     return gen_excp_iir(ctx, EXCP_BREAK);
2061 }
2062 
2063 static bool trans_sync(DisasContext *ctx, arg_sync *a)
2064 {
2065     /* No point in nullifying the memory barrier.  */
2066     tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2067 
2068     cond_free(&ctx->null_cond);
2069     return true;
2070 }
2071 
2072 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
2073 {
2074     unsigned rt = a->t;
2075     TCGv_i64 tmp = dest_gpr(ctx, rt);
2076     tcg_gen_movi_i64(tmp, ctx->iaoq_f & ~3ULL);
2077     save_gpr(ctx, rt, tmp);
2078 
2079     cond_free(&ctx->null_cond);
2080     return true;
2081 }
2082 
2083 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
2084 {
2085     unsigned rt = a->t;
2086     unsigned rs = a->sp;
2087     TCGv_i64 t0 = tcg_temp_new_i64();
2088 
2089     load_spr(ctx, t0, rs);
2090     tcg_gen_shri_i64(t0, t0, 32);
2091 
2092     save_gpr(ctx, rt, t0);
2093 
2094     cond_free(&ctx->null_cond);
2095     return true;
2096 }
2097 
2098 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
2099 {
2100     unsigned rt = a->t;
2101     unsigned ctl = a->r;
2102     TCGv_i64 tmp;
2103 
2104     switch (ctl) {
2105     case CR_SAR:
2106         if (a->e == 0) {
2107             /* MFSAR without ,W masks low 5 bits.  */
2108             tmp = dest_gpr(ctx, rt);
2109             tcg_gen_andi_i64(tmp, cpu_sar, 31);
2110             save_gpr(ctx, rt, tmp);
2111             goto done;
2112         }
2113         save_gpr(ctx, rt, cpu_sar);
2114         goto done;
2115     case CR_IT: /* Interval Timer */
2116         /* FIXME: Respect PSW_S bit.  */
2117         nullify_over(ctx);
2118         tmp = dest_gpr(ctx, rt);
2119         if (translator_io_start(&ctx->base)) {
2120             ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2121         }
2122         gen_helper_read_interval_timer(tmp);
2123         save_gpr(ctx, rt, tmp);
2124         return nullify_end(ctx);
2125     case 26:
2126     case 27:
2127         break;
2128     default:
2129         /* All other control registers are privileged.  */
2130         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2131         break;
2132     }
2133 
2134     tmp = tcg_temp_new_i64();
2135     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2136     save_gpr(ctx, rt, tmp);
2137 
2138  done:
2139     cond_free(&ctx->null_cond);
2140     return true;
2141 }
2142 
2143 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2144 {
2145     unsigned rr = a->r;
2146     unsigned rs = a->sp;
2147     TCGv_i64 tmp;
2148 
2149     if (rs >= 5) {
2150         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2151     }
2152     nullify_over(ctx);
2153 
2154     tmp = tcg_temp_new_i64();
2155     tcg_gen_shli_i64(tmp, load_gpr(ctx, rr), 32);
2156 
2157     if (rs >= 4) {
2158         tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, sr[rs]));
2159         ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2160     } else {
2161         tcg_gen_mov_i64(cpu_sr[rs], tmp);
2162     }
2163 
2164     return nullify_end(ctx);
2165 }
2166 
2167 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2168 {
2169     unsigned ctl = a->t;
2170     TCGv_i64 reg;
2171     TCGv_i64 tmp;
2172 
2173     if (ctl == CR_SAR) {
2174         reg = load_gpr(ctx, a->r);
2175         tmp = tcg_temp_new_i64();
2176         tcg_gen_andi_i64(tmp, reg, ctx->is_pa20 ? 63 : 31);
2177         save_or_nullify(ctx, cpu_sar, tmp);
2178 
2179         cond_free(&ctx->null_cond);
2180         return true;
2181     }
2182 
2183     /* All other control registers are privileged or read-only.  */
2184     CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2185 
2186 #ifndef CONFIG_USER_ONLY
2187     nullify_over(ctx);
2188 
2189     if (ctx->is_pa20) {
2190         reg = load_gpr(ctx, a->r);
2191     } else {
2192         reg = tcg_temp_new_i64();
2193         tcg_gen_ext32u_i64(reg, load_gpr(ctx, a->r));
2194     }
2195 
2196     switch (ctl) {
2197     case CR_IT:
2198         if (translator_io_start(&ctx->base)) {
2199             ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2200         }
2201         gen_helper_write_interval_timer(tcg_env, reg);
2202         break;
2203     case CR_EIRR:
2204         /* Helper modifies interrupt lines and is therefore IO. */
2205         translator_io_start(&ctx->base);
2206         gen_helper_write_eirr(tcg_env, reg);
2207         /* Exit to re-evaluate interrupts in the main loop. */
2208         ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2209         break;
2210 
2211     case CR_IIASQ:
2212     case CR_IIAOQ:
2213         /* FIXME: Respect PSW_Q bit */
2214         /* The write advances the queue and stores to the back element.  */
2215         tmp = tcg_temp_new_i64();
2216         tcg_gen_ld_i64(tmp, tcg_env,
2217                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2218         tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2219         tcg_gen_st_i64(reg, tcg_env,
2220                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2221         break;
2222 
2223     case CR_PID1:
2224     case CR_PID2:
2225     case CR_PID3:
2226     case CR_PID4:
2227         tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2228 #ifndef CONFIG_USER_ONLY
2229         gen_helper_change_prot_id(tcg_env);
2230 #endif
2231         break;
2232 
2233     case CR_EIEM:
2234         /* Exit to re-evaluate interrupts in the main loop. */
2235         ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2236         /* FALLTHRU */
2237     default:
2238         tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2239         break;
2240     }
2241     return nullify_end(ctx);
2242 #endif
2243 }
2244 
2245 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2246 {
2247     TCGv_i64 tmp = tcg_temp_new_i64();
2248 
2249     tcg_gen_not_i64(tmp, load_gpr(ctx, a->r));
2250     tcg_gen_andi_i64(tmp, tmp, ctx->is_pa20 ? 63 : 31);
2251     save_or_nullify(ctx, cpu_sar, tmp);
2252 
2253     cond_free(&ctx->null_cond);
2254     return true;
2255 }
2256 
2257 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2258 {
2259     TCGv_i64 dest = dest_gpr(ctx, a->t);
2260 
2261 #ifdef CONFIG_USER_ONLY
2262     /* We don't implement space registers in user mode. */
2263     tcg_gen_movi_i64(dest, 0);
2264 #else
2265     tcg_gen_mov_i64(dest, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2266     tcg_gen_shri_i64(dest, dest, 32);
2267 #endif
2268     save_gpr(ctx, a->t, dest);
2269 
2270     cond_free(&ctx->null_cond);
2271     return true;
2272 }
2273 
2274 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2275 {
2276 #ifdef CONFIG_USER_ONLY
2277     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2278 #else
2279     TCGv_i64 tmp;
2280 
2281     /* HP-UX 11i and HP ODE use rsm for read-access to PSW */
2282     if (a->i) {
2283         CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2284     }
2285 
2286     nullify_over(ctx);
2287 
2288     tmp = tcg_temp_new_i64();
2289     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2290     tcg_gen_andi_i64(tmp, tmp, ~a->i);
2291     gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2292     save_gpr(ctx, a->t, tmp);
2293 
2294     /* Exit the TB to recognize new interrupts, e.g. PSW_M.  */
2295     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2296     return nullify_end(ctx);
2297 #endif
2298 }
2299 
2300 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2301 {
2302     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2303 #ifndef CONFIG_USER_ONLY
2304     TCGv_i64 tmp;
2305 
2306     nullify_over(ctx);
2307 
2308     tmp = tcg_temp_new_i64();
2309     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2310     tcg_gen_ori_i64(tmp, tmp, a->i);
2311     gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2312     save_gpr(ctx, a->t, tmp);
2313 
2314     /* Exit the TB to recognize new interrupts, e.g. PSW_I.  */
2315     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2316     return nullify_end(ctx);
2317 #endif
2318 }
2319 
2320 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2321 {
2322     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2323 #ifndef CONFIG_USER_ONLY
2324     TCGv_i64 tmp, reg;
2325     nullify_over(ctx);
2326 
2327     reg = load_gpr(ctx, a->r);
2328     tmp = tcg_temp_new_i64();
2329     gen_helper_swap_system_mask(tmp, tcg_env, reg);
2330 
2331     /* Exit the TB to recognize new interrupts.  */
2332     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2333     return nullify_end(ctx);
2334 #endif
2335 }
2336 
2337 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2338 {
2339     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2340 #ifndef CONFIG_USER_ONLY
2341     nullify_over(ctx);
2342 
2343     if (rfi_r) {
2344         gen_helper_rfi_r(tcg_env);
2345     } else {
2346         gen_helper_rfi(tcg_env);
2347     }
2348     /* Exit the TB to recognize new interrupts.  */
2349     tcg_gen_exit_tb(NULL, 0);
2350     ctx->base.is_jmp = DISAS_NORETURN;
2351 
2352     return nullify_end(ctx);
2353 #endif
2354 }
2355 
2356 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2357 {
2358     return do_rfi(ctx, false);
2359 }
2360 
2361 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2362 {
2363     return do_rfi(ctx, true);
2364 }
2365 
2366 static bool trans_halt(DisasContext *ctx, arg_halt *a)
2367 {
2368     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2369 #ifndef CONFIG_USER_ONLY
2370     nullify_over(ctx);
2371     gen_helper_halt(tcg_env);
2372     ctx->base.is_jmp = DISAS_NORETURN;
2373     return nullify_end(ctx);
2374 #endif
2375 }
2376 
2377 static bool trans_reset(DisasContext *ctx, arg_reset *a)
2378 {
2379     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2380 #ifndef CONFIG_USER_ONLY
2381     nullify_over(ctx);
2382     gen_helper_reset(tcg_env);
2383     ctx->base.is_jmp = DISAS_NORETURN;
2384     return nullify_end(ctx);
2385 #endif
2386 }
2387 
2388 static bool do_getshadowregs(DisasContext *ctx)
2389 {
2390     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2391     nullify_over(ctx);
2392     tcg_gen_ld_i64(cpu_gr[1], tcg_env, offsetof(CPUHPPAState, shadow[0]));
2393     tcg_gen_ld_i64(cpu_gr[8], tcg_env, offsetof(CPUHPPAState, shadow[1]));
2394     tcg_gen_ld_i64(cpu_gr[9], tcg_env, offsetof(CPUHPPAState, shadow[2]));
2395     tcg_gen_ld_i64(cpu_gr[16], tcg_env, offsetof(CPUHPPAState, shadow[3]));
2396     tcg_gen_ld_i64(cpu_gr[17], tcg_env, offsetof(CPUHPPAState, shadow[4]));
2397     tcg_gen_ld_i64(cpu_gr[24], tcg_env, offsetof(CPUHPPAState, shadow[5]));
2398     tcg_gen_ld_i64(cpu_gr[25], tcg_env, offsetof(CPUHPPAState, shadow[6]));
2399     return nullify_end(ctx);
2400 }
2401 
2402 static bool do_putshadowregs(DisasContext *ctx)
2403 {
2404     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2405     nullify_over(ctx);
2406     tcg_gen_st_i64(cpu_gr[1], tcg_env, offsetof(CPUHPPAState, shadow[0]));
2407     tcg_gen_st_i64(cpu_gr[8], tcg_env, offsetof(CPUHPPAState, shadow[1]));
2408     tcg_gen_st_i64(cpu_gr[9], tcg_env, offsetof(CPUHPPAState, shadow[2]));
2409     tcg_gen_st_i64(cpu_gr[16], tcg_env, offsetof(CPUHPPAState, shadow[3]));
2410     tcg_gen_st_i64(cpu_gr[17], tcg_env, offsetof(CPUHPPAState, shadow[4]));
2411     tcg_gen_st_i64(cpu_gr[24], tcg_env, offsetof(CPUHPPAState, shadow[5]));
2412     tcg_gen_st_i64(cpu_gr[25], tcg_env, offsetof(CPUHPPAState, shadow[6]));
2413     return nullify_end(ctx);
2414 }
2415 
2416 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2417 {
2418     return do_getshadowregs(ctx);
2419 }
2420 
2421 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2422 {
2423     if (a->m) {
2424         TCGv_i64 dest = dest_gpr(ctx, a->b);
2425         TCGv_i64 src1 = load_gpr(ctx, a->b);
2426         TCGv_i64 src2 = load_gpr(ctx, a->x);
2427 
2428         /* The only thing we need to do is the base register modification.  */
2429         tcg_gen_add_i64(dest, src1, src2);
2430         save_gpr(ctx, a->b, dest);
2431     }
2432     cond_free(&ctx->null_cond);
2433     return true;
2434 }
2435 
2436 static bool trans_fic(DisasContext *ctx, arg_ldst *a)
2437 {
2438     /* End TB for flush instruction cache, so we pick up new insns. */
2439     ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2440     return trans_nop_addrx(ctx, a);
2441 }
2442 
2443 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2444 {
2445     TCGv_i64 dest, ofs;
2446     TCGv_i32 level, want;
2447     TCGv_i64 addr;
2448 
2449     nullify_over(ctx);
2450 
2451     dest = dest_gpr(ctx, a->t);
2452     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2453 
2454     if (a->imm) {
2455         level = tcg_constant_i32(a->ri & 3);
2456     } else {
2457         level = tcg_temp_new_i32();
2458         tcg_gen_extrl_i64_i32(level, load_gpr(ctx, a->ri));
2459         tcg_gen_andi_i32(level, level, 3);
2460     }
2461     want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
2462 
2463     gen_helper_probe(dest, tcg_env, addr, level, want);
2464 
2465     save_gpr(ctx, a->t, dest);
2466     return nullify_end(ctx);
2467 }
2468 
2469 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2470 {
2471     if (ctx->is_pa20) {
2472         return false;
2473     }
2474     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2475 #ifndef CONFIG_USER_ONLY
2476     TCGv_i64 addr;
2477     TCGv_i64 ofs, reg;
2478 
2479     nullify_over(ctx);
2480 
2481     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2482     reg = load_gpr(ctx, a->r);
2483     if (a->addr) {
2484         gen_helper_itlba_pa11(tcg_env, addr, reg);
2485     } else {
2486         gen_helper_itlbp_pa11(tcg_env, addr, reg);
2487     }
2488 
2489     /* Exit TB for TLB change if mmu is enabled.  */
2490     if (ctx->tb_flags & PSW_C) {
2491         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2492     }
2493     return nullify_end(ctx);
2494 #endif
2495 }
2496 
2497 static bool do_pxtlb(DisasContext *ctx, arg_ldst *a, bool local)
2498 {
2499     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2500 #ifndef CONFIG_USER_ONLY
2501     TCGv_i64 addr;
2502     TCGv_i64 ofs;
2503 
2504     nullify_over(ctx);
2505 
2506     form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2507 
2508     /*
2509      * Page align now, rather than later, so that we can add in the
2510      * page_size field from pa2.0 from the low 4 bits of GR[b].
2511      */
2512     tcg_gen_andi_i64(addr, addr, TARGET_PAGE_MASK);
2513     if (ctx->is_pa20) {
2514         tcg_gen_deposit_i64(addr, addr, load_gpr(ctx, a->b), 0, 4);
2515     }
2516 
2517     if (local) {
2518         gen_helper_ptlb_l(tcg_env, addr);
2519     } else {
2520         gen_helper_ptlb(tcg_env, addr);
2521     }
2522 
2523     if (a->m) {
2524         save_gpr(ctx, a->b, ofs);
2525     }
2526 
2527     /* Exit TB for TLB change if mmu is enabled.  */
2528     if (ctx->tb_flags & PSW_C) {
2529         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2530     }
2531     return nullify_end(ctx);
2532 #endif
2533 }
2534 
2535 static bool trans_pxtlb(DisasContext *ctx, arg_ldst *a)
2536 {
2537     return do_pxtlb(ctx, a, false);
2538 }
2539 
2540 static bool trans_pxtlb_l(DisasContext *ctx, arg_ldst *a)
2541 {
2542     return ctx->is_pa20 && do_pxtlb(ctx, a, true);
2543 }
2544 
2545 static bool trans_pxtlbe(DisasContext *ctx, arg_ldst *a)
2546 {
2547     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2548 #ifndef CONFIG_USER_ONLY
2549     nullify_over(ctx);
2550 
2551     trans_nop_addrx(ctx, a);
2552     gen_helper_ptlbe(tcg_env);
2553 
2554     /* Exit TB for TLB change if mmu is enabled.  */
2555     if (ctx->tb_flags & PSW_C) {
2556         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2557     }
2558     return nullify_end(ctx);
2559 #endif
2560 }
2561 
2562 /*
2563  * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2564  * See
2565  *     https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2566  *     page 13-9 (195/206)
2567  */
2568 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2569 {
2570     if (ctx->is_pa20) {
2571         return false;
2572     }
2573     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2574 #ifndef CONFIG_USER_ONLY
2575     TCGv_i64 addr, atl, stl;
2576     TCGv_i64 reg;
2577 
2578     nullify_over(ctx);
2579 
2580     /*
2581      * FIXME:
2582      *  if (not (pcxl or pcxl2))
2583      *    return gen_illegal(ctx);
2584      */
2585 
2586     atl = tcg_temp_new_i64();
2587     stl = tcg_temp_new_i64();
2588     addr = tcg_temp_new_i64();
2589 
2590     tcg_gen_ld32u_i64(stl, tcg_env,
2591                       a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2592                       : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2593     tcg_gen_ld32u_i64(atl, tcg_env,
2594                       a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2595                       : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2596     tcg_gen_shli_i64(stl, stl, 32);
2597     tcg_gen_or_i64(addr, atl, stl);
2598 
2599     reg = load_gpr(ctx, a->r);
2600     if (a->addr) {
2601         gen_helper_itlba_pa11(tcg_env, addr, reg);
2602     } else {
2603         gen_helper_itlbp_pa11(tcg_env, addr, reg);
2604     }
2605 
2606     /* Exit TB for TLB change if mmu is enabled.  */
2607     if (ctx->tb_flags & PSW_C) {
2608         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2609     }
2610     return nullify_end(ctx);
2611 #endif
2612 }
2613 
2614 static bool trans_ixtlbt(DisasContext *ctx, arg_ixtlbt *a)
2615 {
2616     if (!ctx->is_pa20) {
2617         return false;
2618     }
2619     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2620 #ifndef CONFIG_USER_ONLY
2621     nullify_over(ctx);
2622     {
2623         TCGv_i64 src1 = load_gpr(ctx, a->r1);
2624         TCGv_i64 src2 = load_gpr(ctx, a->r2);
2625 
2626         if (a->data) {
2627             gen_helper_idtlbt_pa20(tcg_env, src1, src2);
2628         } else {
2629             gen_helper_iitlbt_pa20(tcg_env, src1, src2);
2630         }
2631     }
2632     /* Exit TB for TLB change if mmu is enabled.  */
2633     if (ctx->tb_flags & PSW_C) {
2634         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2635     }
2636     return nullify_end(ctx);
2637 #endif
2638 }
2639 
2640 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2641 {
2642     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2643 #ifndef CONFIG_USER_ONLY
2644     TCGv_i64 vaddr;
2645     TCGv_i64 ofs, paddr;
2646 
2647     nullify_over(ctx);
2648 
2649     form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2650 
2651     paddr = tcg_temp_new_i64();
2652     gen_helper_lpa(paddr, tcg_env, vaddr);
2653 
2654     /* Note that physical address result overrides base modification.  */
2655     if (a->m) {
2656         save_gpr(ctx, a->b, ofs);
2657     }
2658     save_gpr(ctx, a->t, paddr);
2659 
2660     return nullify_end(ctx);
2661 #endif
2662 }
2663 
2664 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2665 {
2666     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2667 
2668     /* The Coherence Index is an implementation-defined function of the
2669        physical address.  Two addresses with the same CI have a coherent
2670        view of the cache.  Our implementation is to return 0 for all,
2671        since the entire address space is coherent.  */
2672     save_gpr(ctx, a->t, ctx->zero);
2673 
2674     cond_free(&ctx->null_cond);
2675     return true;
2676 }
2677 
2678 static bool trans_add(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2679 {
2680     return do_add_reg(ctx, a, false, false, false, false);
2681 }
2682 
2683 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2684 {
2685     return do_add_reg(ctx, a, true, false, false, false);
2686 }
2687 
2688 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2689 {
2690     return do_add_reg(ctx, a, false, true, false, false);
2691 }
2692 
2693 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2694 {
2695     return do_add_reg(ctx, a, false, false, false, true);
2696 }
2697 
2698 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2699 {
2700     return do_add_reg(ctx, a, false, true, false, true);
2701 }
2702 
2703 static bool trans_sub(DisasContext *ctx, arg_rrr_cf_d *a)
2704 {
2705     return do_sub_reg(ctx, a, false, false, false);
2706 }
2707 
2708 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2709 {
2710     return do_sub_reg(ctx, a, true, false, false);
2711 }
2712 
2713 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2714 {
2715     return do_sub_reg(ctx, a, false, false, true);
2716 }
2717 
2718 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2719 {
2720     return do_sub_reg(ctx, a, true, false, true);
2721 }
2722 
2723 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf_d *a)
2724 {
2725     return do_sub_reg(ctx, a, false, true, false);
2726 }
2727 
2728 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2729 {
2730     return do_sub_reg(ctx, a, true, true, false);
2731 }
2732 
2733 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf_d *a)
2734 {
2735     return do_log_reg(ctx, a, tcg_gen_andc_i64);
2736 }
2737 
2738 static bool trans_and(DisasContext *ctx, arg_rrr_cf_d *a)
2739 {
2740     return do_log_reg(ctx, a, tcg_gen_and_i64);
2741 }
2742 
2743 static bool trans_or(DisasContext *ctx, arg_rrr_cf_d *a)
2744 {
2745     if (a->cf == 0) {
2746         unsigned r2 = a->r2;
2747         unsigned r1 = a->r1;
2748         unsigned rt = a->t;
2749 
2750         if (rt == 0) { /* NOP */
2751             cond_free(&ctx->null_cond);
2752             return true;
2753         }
2754         if (r2 == 0) { /* COPY */
2755             if (r1 == 0) {
2756                 TCGv_i64 dest = dest_gpr(ctx, rt);
2757                 tcg_gen_movi_i64(dest, 0);
2758                 save_gpr(ctx, rt, dest);
2759             } else {
2760                 save_gpr(ctx, rt, cpu_gr[r1]);
2761             }
2762             cond_free(&ctx->null_cond);
2763             return true;
2764         }
2765 #ifndef CONFIG_USER_ONLY
2766         /* These are QEMU extensions and are nops in the real architecture:
2767          *
2768          * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2769          * or %r31,%r31,%r31 -- death loop; offline cpu
2770          *                      currently implemented as idle.
2771          */
2772         if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2773             /* No need to check for supervisor, as userland can only pause
2774                until the next timer interrupt.  */
2775             nullify_over(ctx);
2776 
2777             /* Advance the instruction queue.  */
2778             copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2779             copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2780             nullify_set(ctx, 0);
2781 
2782             /* Tell the qemu main loop to halt until this cpu has work.  */
2783             tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
2784                            offsetof(CPUState, halted) - offsetof(HPPACPU, env));
2785             gen_excp_1(EXCP_HALTED);
2786             ctx->base.is_jmp = DISAS_NORETURN;
2787 
2788             return nullify_end(ctx);
2789         }
2790 #endif
2791     }
2792     return do_log_reg(ctx, a, tcg_gen_or_i64);
2793 }
2794 
2795 static bool trans_xor(DisasContext *ctx, arg_rrr_cf_d *a)
2796 {
2797     return do_log_reg(ctx, a, tcg_gen_xor_i64);
2798 }
2799 
2800 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf_d *a)
2801 {
2802     TCGv_i64 tcg_r1, tcg_r2;
2803 
2804     if (a->cf) {
2805         nullify_over(ctx);
2806     }
2807     tcg_r1 = load_gpr(ctx, a->r1);
2808     tcg_r2 = load_gpr(ctx, a->r2);
2809     do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d);
2810     return nullify_end(ctx);
2811 }
2812 
2813 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf_d *a)
2814 {
2815     TCGv_i64 tcg_r1, tcg_r2, dest;
2816 
2817     if (a->cf) {
2818         nullify_over(ctx);
2819     }
2820 
2821     tcg_r1 = load_gpr(ctx, a->r1);
2822     tcg_r2 = load_gpr(ctx, a->r2);
2823     dest = dest_gpr(ctx, a->t);
2824 
2825     tcg_gen_xor_i64(dest, tcg_r1, tcg_r2);
2826     save_gpr(ctx, a->t, dest);
2827 
2828     cond_free(&ctx->null_cond);
2829     if (a->cf) {
2830         ctx->null_cond = do_unit_zero_cond(a->cf, a->d, dest);
2831     }
2832 
2833     return nullify_end(ctx);
2834 }
2835 
2836 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a, bool is_tc)
2837 {
2838     TCGv_i64 tcg_r1, tcg_r2, tmp;
2839 
2840     if (a->cf == 0) {
2841         tcg_r2 = load_gpr(ctx, a->r2);
2842         tmp = dest_gpr(ctx, a->t);
2843 
2844         if (a->r1 == 0) {
2845             /* UADDCM r0,src,dst is the common idiom for dst = ~src. */
2846             tcg_gen_not_i64(tmp, tcg_r2);
2847         } else {
2848             /*
2849              * Recall that r1 - r2 == r1 + ~r2 + 1.
2850              * Thus r1 + ~r2 == r1 - r2 - 1,
2851              * which does not require an extra temporary.
2852              */
2853             tcg_r1 = load_gpr(ctx, a->r1);
2854             tcg_gen_sub_i64(tmp, tcg_r1, tcg_r2);
2855             tcg_gen_subi_i64(tmp, tmp, 1);
2856         }
2857         save_gpr(ctx, a->t, tmp);
2858         cond_free(&ctx->null_cond);
2859         return true;
2860     }
2861 
2862     nullify_over(ctx);
2863     tcg_r1 = load_gpr(ctx, a->r1);
2864     tcg_r2 = load_gpr(ctx, a->r2);
2865     tmp = tcg_temp_new_i64();
2866     tcg_gen_not_i64(tmp, tcg_r2);
2867     do_unit_addsub(ctx, a->t, tcg_r1, tmp, a->cf, a->d, is_tc, true);
2868     return nullify_end(ctx);
2869 }
2870 
2871 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a)
2872 {
2873     return do_uaddcm(ctx, a, false);
2874 }
2875 
2876 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2877 {
2878     return do_uaddcm(ctx, a, true);
2879 }
2880 
2881 static bool do_dcor(DisasContext *ctx, arg_rr_cf_d *a, bool is_i)
2882 {
2883     TCGv_i64 tmp;
2884 
2885     nullify_over(ctx);
2886 
2887     tmp = tcg_temp_new_i64();
2888     tcg_gen_extract2_i64(tmp, cpu_psw_cb, cpu_psw_cb_msb, 4);
2889     if (!is_i) {
2890         tcg_gen_not_i64(tmp, tmp);
2891     }
2892     tcg_gen_andi_i64(tmp, tmp, (uint64_t)0x1111111111111111ull);
2893     tcg_gen_muli_i64(tmp, tmp, 6);
2894     do_unit_addsub(ctx, a->t, load_gpr(ctx, a->r), tmp,
2895                    a->cf, a->d, false, is_i);
2896     return nullify_end(ctx);
2897 }
2898 
2899 static bool trans_dcor(DisasContext *ctx, arg_rr_cf_d *a)
2900 {
2901     return do_dcor(ctx, a, false);
2902 }
2903 
2904 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf_d *a)
2905 {
2906     return do_dcor(ctx, a, true);
2907 }
2908 
2909 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2910 {
2911     TCGv_i64 dest, add1, add2, addc, in1, in2;
2912 
2913     nullify_over(ctx);
2914 
2915     in1 = load_gpr(ctx, a->r1);
2916     in2 = load_gpr(ctx, a->r2);
2917 
2918     add1 = tcg_temp_new_i64();
2919     add2 = tcg_temp_new_i64();
2920     addc = tcg_temp_new_i64();
2921     dest = tcg_temp_new_i64();
2922 
2923     /* Form R1 << 1 | PSW[CB]{8}.  */
2924     tcg_gen_add_i64(add1, in1, in1);
2925     tcg_gen_add_i64(add1, add1, get_psw_carry(ctx, false));
2926 
2927     /*
2928      * Add or subtract R2, depending on PSW[V].  Proper computation of
2929      * carry requires that we subtract via + ~R2 + 1, as described in
2930      * the manual.  By extracting and masking V, we can produce the
2931      * proper inputs to the addition without movcond.
2932      */
2933     tcg_gen_sextract_i64(addc, cpu_psw_v, 31, 1);
2934     tcg_gen_xor_i64(add2, in2, addc);
2935     tcg_gen_andi_i64(addc, addc, 1);
2936 
2937     tcg_gen_add2_i64(dest, cpu_psw_cb_msb, add1, ctx->zero, add2, ctx->zero);
2938     tcg_gen_add2_i64(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb,
2939                      addc, ctx->zero);
2940 
2941     /* Write back the result register.  */
2942     save_gpr(ctx, a->t, dest);
2943 
2944     /* Write back PSW[CB].  */
2945     tcg_gen_xor_i64(cpu_psw_cb, add1, add2);
2946     tcg_gen_xor_i64(cpu_psw_cb, cpu_psw_cb, dest);
2947 
2948     /*
2949      * Write back PSW[V] for the division step.
2950      * Shift cb{8} from where it lives in bit 32 to bit 31,
2951      * so that it overlaps r2{32} in bit 31.
2952      */
2953     tcg_gen_shri_i64(cpu_psw_v, cpu_psw_cb, 1);
2954     tcg_gen_xor_i64(cpu_psw_v, cpu_psw_v, in2);
2955 
2956     /* Install the new nullification.  */
2957     if (a->cf) {
2958         TCGv_i64 sv = NULL, uv = NULL;
2959         if (cond_need_sv(a->cf >> 1)) {
2960             sv = do_add_sv(ctx, dest, add1, add2, in1, 1, false);
2961         } else if (cond_need_cb(a->cf >> 1)) {
2962             uv = do_add_uv(ctx, cpu_psw_cb, NULL, in1, 1, false);
2963         }
2964         ctx->null_cond = do_cond(ctx, a->cf, false, dest, uv, sv);
2965     }
2966 
2967     return nullify_end(ctx);
2968 }
2969 
2970 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
2971 {
2972     return do_add_imm(ctx, a, false, false);
2973 }
2974 
2975 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2976 {
2977     return do_add_imm(ctx, a, true, false);
2978 }
2979 
2980 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
2981 {
2982     return do_add_imm(ctx, a, false, true);
2983 }
2984 
2985 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2986 {
2987     return do_add_imm(ctx, a, true, true);
2988 }
2989 
2990 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2991 {
2992     return do_sub_imm(ctx, a, false);
2993 }
2994 
2995 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2996 {
2997     return do_sub_imm(ctx, a, true);
2998 }
2999 
3000 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf_d *a)
3001 {
3002     TCGv_i64 tcg_im, tcg_r2;
3003 
3004     if (a->cf) {
3005         nullify_over(ctx);
3006     }
3007 
3008     tcg_im = tcg_constant_i64(a->i);
3009     tcg_r2 = load_gpr(ctx, a->r);
3010     do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf, a->d);
3011 
3012     return nullify_end(ctx);
3013 }
3014 
3015 static bool do_multimedia(DisasContext *ctx, arg_rrr *a,
3016                           void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
3017 {
3018     TCGv_i64 r1, r2, dest;
3019 
3020     if (!ctx->is_pa20) {
3021         return false;
3022     }
3023 
3024     nullify_over(ctx);
3025 
3026     r1 = load_gpr(ctx, a->r1);
3027     r2 = load_gpr(ctx, a->r2);
3028     dest = dest_gpr(ctx, a->t);
3029 
3030     fn(dest, r1, r2);
3031     save_gpr(ctx, a->t, dest);
3032 
3033     return nullify_end(ctx);
3034 }
3035 
3036 static bool do_multimedia_sh(DisasContext *ctx, arg_rri *a,
3037                              void (*fn)(TCGv_i64, TCGv_i64, int64_t))
3038 {
3039     TCGv_i64 r, dest;
3040 
3041     if (!ctx->is_pa20) {
3042         return false;
3043     }
3044 
3045     nullify_over(ctx);
3046 
3047     r = load_gpr(ctx, a->r);
3048     dest = dest_gpr(ctx, a->t);
3049 
3050     fn(dest, r, a->i);
3051     save_gpr(ctx, a->t, dest);
3052 
3053     return nullify_end(ctx);
3054 }
3055 
3056 static bool do_multimedia_shadd(DisasContext *ctx, arg_rrr_sh *a,
3057                                 void (*fn)(TCGv_i64, TCGv_i64,
3058                                            TCGv_i64, TCGv_i32))
3059 {
3060     TCGv_i64 r1, r2, dest;
3061 
3062     if (!ctx->is_pa20) {
3063         return false;
3064     }
3065 
3066     nullify_over(ctx);
3067 
3068     r1 = load_gpr(ctx, a->r1);
3069     r2 = load_gpr(ctx, a->r2);
3070     dest = dest_gpr(ctx, a->t);
3071 
3072     fn(dest, r1, r2, tcg_constant_i32(a->sh));
3073     save_gpr(ctx, a->t, dest);
3074 
3075     return nullify_end(ctx);
3076 }
3077 
3078 static bool trans_hadd(DisasContext *ctx, arg_rrr *a)
3079 {
3080     return do_multimedia(ctx, a, tcg_gen_vec_add16_i64);
3081 }
3082 
3083 static bool trans_hadd_ss(DisasContext *ctx, arg_rrr *a)
3084 {
3085     return do_multimedia(ctx, a, gen_helper_hadd_ss);
3086 }
3087 
3088 static bool trans_hadd_us(DisasContext *ctx, arg_rrr *a)
3089 {
3090     return do_multimedia(ctx, a, gen_helper_hadd_us);
3091 }
3092 
3093 static bool trans_havg(DisasContext *ctx, arg_rrr *a)
3094 {
3095     return do_multimedia(ctx, a, gen_helper_havg);
3096 }
3097 
3098 static bool trans_hshl(DisasContext *ctx, arg_rri *a)
3099 {
3100     return do_multimedia_sh(ctx, a, tcg_gen_vec_shl16i_i64);
3101 }
3102 
3103 static bool trans_hshr_s(DisasContext *ctx, arg_rri *a)
3104 {
3105     return do_multimedia_sh(ctx, a, tcg_gen_vec_sar16i_i64);
3106 }
3107 
3108 static bool trans_hshr_u(DisasContext *ctx, arg_rri *a)
3109 {
3110     return do_multimedia_sh(ctx, a, tcg_gen_vec_shr16i_i64);
3111 }
3112 
3113 static bool trans_hshladd(DisasContext *ctx, arg_rrr_sh *a)
3114 {
3115     return do_multimedia_shadd(ctx, a, gen_helper_hshladd);
3116 }
3117 
3118 static bool trans_hshradd(DisasContext *ctx, arg_rrr_sh *a)
3119 {
3120     return do_multimedia_shadd(ctx, a, gen_helper_hshradd);
3121 }
3122 
3123 static bool trans_hsub(DisasContext *ctx, arg_rrr *a)
3124 {
3125     return do_multimedia(ctx, a, tcg_gen_vec_sub16_i64);
3126 }
3127 
3128 static bool trans_hsub_ss(DisasContext *ctx, arg_rrr *a)
3129 {
3130     return do_multimedia(ctx, a, gen_helper_hsub_ss);
3131 }
3132 
3133 static bool trans_hsub_us(DisasContext *ctx, arg_rrr *a)
3134 {
3135     return do_multimedia(ctx, a, gen_helper_hsub_us);
3136 }
3137 
3138 static void gen_mixh_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
3139 {
3140     uint64_t mask = 0xffff0000ffff0000ull;
3141     TCGv_i64 tmp = tcg_temp_new_i64();
3142 
3143     tcg_gen_andi_i64(tmp, r2, mask);
3144     tcg_gen_andi_i64(dst, r1, mask);
3145     tcg_gen_shri_i64(tmp, tmp, 16);
3146     tcg_gen_or_i64(dst, dst, tmp);
3147 }
3148 
3149 static bool trans_mixh_l(DisasContext *ctx, arg_rrr *a)
3150 {
3151     return do_multimedia(ctx, a, gen_mixh_l);
3152 }
3153 
3154 static void gen_mixh_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
3155 {
3156     uint64_t mask = 0x0000ffff0000ffffull;
3157     TCGv_i64 tmp = tcg_temp_new_i64();
3158 
3159     tcg_gen_andi_i64(tmp, r1, mask);
3160     tcg_gen_andi_i64(dst, r2, mask);
3161     tcg_gen_shli_i64(tmp, tmp, 16);
3162     tcg_gen_or_i64(dst, dst, tmp);
3163 }
3164 
3165 static bool trans_mixh_r(DisasContext *ctx, arg_rrr *a)
3166 {
3167     return do_multimedia(ctx, a, gen_mixh_r);
3168 }
3169 
3170 static void gen_mixw_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
3171 {
3172     TCGv_i64 tmp = tcg_temp_new_i64();
3173 
3174     tcg_gen_shri_i64(tmp, r2, 32);
3175     tcg_gen_deposit_i64(dst, r1, tmp, 0, 32);
3176 }
3177 
3178 static bool trans_mixw_l(DisasContext *ctx, arg_rrr *a)
3179 {
3180     return do_multimedia(ctx, a, gen_mixw_l);
3181 }
3182 
3183 static void gen_mixw_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
3184 {
3185     tcg_gen_deposit_i64(dst, r2, r1, 32, 32);
3186 }
3187 
3188 static bool trans_mixw_r(DisasContext *ctx, arg_rrr *a)
3189 {
3190     return do_multimedia(ctx, a, gen_mixw_r);
3191 }
3192 
3193 static bool trans_permh(DisasContext *ctx, arg_permh *a)
3194 {
3195     TCGv_i64 r, t0, t1, t2, t3;
3196 
3197     if (!ctx->is_pa20) {
3198         return false;
3199     }
3200 
3201     nullify_over(ctx);
3202 
3203     r = load_gpr(ctx, a->r1);
3204     t0 = tcg_temp_new_i64();
3205     t1 = tcg_temp_new_i64();
3206     t2 = tcg_temp_new_i64();
3207     t3 = tcg_temp_new_i64();
3208 
3209     tcg_gen_extract_i64(t0, r, (3 - a->c0) * 16, 16);
3210     tcg_gen_extract_i64(t1, r, (3 - a->c1) * 16, 16);
3211     tcg_gen_extract_i64(t2, r, (3 - a->c2) * 16, 16);
3212     tcg_gen_extract_i64(t3, r, (3 - a->c3) * 16, 16);
3213 
3214     tcg_gen_deposit_i64(t0, t1, t0, 16, 48);
3215     tcg_gen_deposit_i64(t2, t3, t2, 16, 48);
3216     tcg_gen_deposit_i64(t0, t2, t0, 32, 32);
3217 
3218     save_gpr(ctx, a->t, t0);
3219     return nullify_end(ctx);
3220 }
3221 
3222 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
3223 {
3224     if (ctx->is_pa20) {
3225        /*
3226         * With pa20, LDB, LDH, LDW, LDD to %g0 are prefetches.
3227         * Any base modification still occurs.
3228         */
3229         if (a->t == 0) {
3230             return trans_nop_addrx(ctx, a);
3231         }
3232     } else if (a->size > MO_32) {
3233         return gen_illegal(ctx);
3234     }
3235     return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
3236                    a->disp, a->sp, a->m, a->size | MO_TE);
3237 }
3238 
3239 static bool trans_st(DisasContext *ctx, arg_ldst *a)
3240 {
3241     assert(a->x == 0 && a->scale == 0);
3242     if (!ctx->is_pa20 && a->size > MO_32) {
3243         return gen_illegal(ctx);
3244     }
3245     return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
3246 }
3247 
3248 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
3249 {
3250     MemOp mop = MO_TE | MO_ALIGN | a->size;
3251     TCGv_i64 dest, ofs;
3252     TCGv_i64 addr;
3253 
3254     if (!ctx->is_pa20 && a->size > MO_32) {
3255         return gen_illegal(ctx);
3256     }
3257 
3258     nullify_over(ctx);
3259 
3260     if (a->m) {
3261         /* Base register modification.  Make sure if RT == RB,
3262            we see the result of the load.  */
3263         dest = tcg_temp_new_i64();
3264     } else {
3265         dest = dest_gpr(ctx, a->t);
3266     }
3267 
3268     form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? 3 : 0,
3269              a->disp, a->sp, a->m, MMU_DISABLED(ctx));
3270 
3271     /*
3272      * For hppa1.1, LDCW is undefined unless aligned mod 16.
3273      * However actual hardware succeeds with aligned mod 4.
3274      * Detect this case and log a GUEST_ERROR.
3275      *
3276      * TODO: HPPA64 relaxes the over-alignment requirement
3277      * with the ,co completer.
3278      */
3279     gen_helper_ldc_check(addr);
3280 
3281     tcg_gen_atomic_xchg_i64(dest, addr, ctx->zero, ctx->mmu_idx, mop);
3282 
3283     if (a->m) {
3284         save_gpr(ctx, a->b, ofs);
3285     }
3286     save_gpr(ctx, a->t, dest);
3287 
3288     return nullify_end(ctx);
3289 }
3290 
3291 static bool trans_stby(DisasContext *ctx, arg_stby *a)
3292 {
3293     TCGv_i64 ofs, val;
3294     TCGv_i64 addr;
3295 
3296     nullify_over(ctx);
3297 
3298     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
3299              MMU_DISABLED(ctx));
3300     val = load_gpr(ctx, a->r);
3301     if (a->a) {
3302         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3303             gen_helper_stby_e_parallel(tcg_env, addr, val);
3304         } else {
3305             gen_helper_stby_e(tcg_env, addr, val);
3306         }
3307     } else {
3308         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3309             gen_helper_stby_b_parallel(tcg_env, addr, val);
3310         } else {
3311             gen_helper_stby_b(tcg_env, addr, val);
3312         }
3313     }
3314     if (a->m) {
3315         tcg_gen_andi_i64(ofs, ofs, ~3);
3316         save_gpr(ctx, a->b, ofs);
3317     }
3318 
3319     return nullify_end(ctx);
3320 }
3321 
3322 static bool trans_stdby(DisasContext *ctx, arg_stby *a)
3323 {
3324     TCGv_i64 ofs, val;
3325     TCGv_i64 addr;
3326 
3327     if (!ctx->is_pa20) {
3328         return false;
3329     }
3330     nullify_over(ctx);
3331 
3332     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
3333              MMU_DISABLED(ctx));
3334     val = load_gpr(ctx, a->r);
3335     if (a->a) {
3336         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3337             gen_helper_stdby_e_parallel(tcg_env, addr, val);
3338         } else {
3339             gen_helper_stdby_e(tcg_env, addr, val);
3340         }
3341     } else {
3342         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3343             gen_helper_stdby_b_parallel(tcg_env, addr, val);
3344         } else {
3345             gen_helper_stdby_b(tcg_env, addr, val);
3346         }
3347     }
3348     if (a->m) {
3349         tcg_gen_andi_i64(ofs, ofs, ~7);
3350         save_gpr(ctx, a->b, ofs);
3351     }
3352 
3353     return nullify_end(ctx);
3354 }
3355 
3356 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
3357 {
3358     int hold_mmu_idx = ctx->mmu_idx;
3359 
3360     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3361     ctx->mmu_idx = ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX;
3362     trans_ld(ctx, a);
3363     ctx->mmu_idx = hold_mmu_idx;
3364     return true;
3365 }
3366 
3367 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
3368 {
3369     int hold_mmu_idx = ctx->mmu_idx;
3370 
3371     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3372     ctx->mmu_idx = ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX;
3373     trans_st(ctx, a);
3374     ctx->mmu_idx = hold_mmu_idx;
3375     return true;
3376 }
3377 
3378 static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
3379 {
3380     TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
3381 
3382     tcg_gen_movi_i64(tcg_rt, a->i);
3383     save_gpr(ctx, a->t, tcg_rt);
3384     cond_free(&ctx->null_cond);
3385     return true;
3386 }
3387 
3388 static bool trans_addil(DisasContext *ctx, arg_addil *a)
3389 {
3390     TCGv_i64 tcg_rt = load_gpr(ctx, a->r);
3391     TCGv_i64 tcg_r1 = dest_gpr(ctx, 1);
3392 
3393     tcg_gen_addi_i64(tcg_r1, tcg_rt, a->i);
3394     save_gpr(ctx, 1, tcg_r1);
3395     cond_free(&ctx->null_cond);
3396     return true;
3397 }
3398 
3399 static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
3400 {
3401     TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
3402 
3403     /* Special case rb == 0, for the LDI pseudo-op.
3404        The COPY pseudo-op is handled for free within tcg_gen_addi_i64.  */
3405     if (a->b == 0) {
3406         tcg_gen_movi_i64(tcg_rt, a->i);
3407     } else {
3408         tcg_gen_addi_i64(tcg_rt, cpu_gr[a->b], a->i);
3409     }
3410     save_gpr(ctx, a->t, tcg_rt);
3411     cond_free(&ctx->null_cond);
3412     return true;
3413 }
3414 
3415 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
3416                     unsigned c, unsigned f, bool d, unsigned n, int disp)
3417 {
3418     TCGv_i64 dest, in2, sv;
3419     DisasCond cond;
3420 
3421     in2 = load_gpr(ctx, r);
3422     dest = tcg_temp_new_i64();
3423 
3424     tcg_gen_sub_i64(dest, in1, in2);
3425 
3426     sv = NULL;
3427     if (cond_need_sv(c)) {
3428         sv = do_sub_sv(ctx, dest, in1, in2);
3429     }
3430 
3431     cond = do_sub_cond(ctx, c * 2 + f, d, dest, in1, in2, sv);
3432     return do_cbranch(ctx, disp, n, &cond);
3433 }
3434 
3435 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
3436 {
3437     if (!ctx->is_pa20 && a->d) {
3438         return false;
3439     }
3440     nullify_over(ctx);
3441     return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1),
3442                    a->c, a->f, a->d, a->n, a->disp);
3443 }
3444 
3445 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3446 {
3447     if (!ctx->is_pa20 && a->d) {
3448         return false;
3449     }
3450     nullify_over(ctx);
3451     return do_cmpb(ctx, a->r, tcg_constant_i64(a->i),
3452                    a->c, a->f, a->d, a->n, a->disp);
3453 }
3454 
3455 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
3456                     unsigned c, unsigned f, unsigned n, int disp)
3457 {
3458     TCGv_i64 dest, in2, sv, cb_cond;
3459     DisasCond cond;
3460     bool d = false;
3461 
3462     /*
3463      * For hppa64, the ADDB conditions change with PSW.W,
3464      * dropping ZNV, SV, OD in favor of double-word EQ, LT, LE.
3465      */
3466     if (ctx->tb_flags & PSW_W) {
3467         d = c >= 5;
3468         if (d) {
3469             c &= 3;
3470         }
3471     }
3472 
3473     in2 = load_gpr(ctx, r);
3474     dest = tcg_temp_new_i64();
3475     sv = NULL;
3476     cb_cond = NULL;
3477 
3478     if (cond_need_cb(c)) {
3479         TCGv_i64 cb = tcg_temp_new_i64();
3480         TCGv_i64 cb_msb = tcg_temp_new_i64();
3481 
3482         tcg_gen_movi_i64(cb_msb, 0);
3483         tcg_gen_add2_i64(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3484         tcg_gen_xor_i64(cb, in1, in2);
3485         tcg_gen_xor_i64(cb, cb, dest);
3486         cb_cond = get_carry(ctx, d, cb, cb_msb);
3487     } else {
3488         tcg_gen_add_i64(dest, in1, in2);
3489     }
3490     if (cond_need_sv(c)) {
3491         sv = do_add_sv(ctx, dest, in1, in2, in1, 0, d);
3492     }
3493 
3494     cond = do_cond(ctx, c * 2 + f, d, dest, cb_cond, sv);
3495     save_gpr(ctx, r, dest);
3496     return do_cbranch(ctx, disp, n, &cond);
3497 }
3498 
3499 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3500 {
3501     nullify_over(ctx);
3502     return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3503 }
3504 
3505 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3506 {
3507     nullify_over(ctx);
3508     return do_addb(ctx, a->r, tcg_constant_i64(a->i), a->c, a->f, a->n, a->disp);
3509 }
3510 
3511 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3512 {
3513     TCGv_i64 tmp, tcg_r;
3514     DisasCond cond;
3515 
3516     nullify_over(ctx);
3517 
3518     tmp = tcg_temp_new_i64();
3519     tcg_r = load_gpr(ctx, a->r);
3520     if (a->d) {
3521         tcg_gen_shl_i64(tmp, tcg_r, cpu_sar);
3522     } else {
3523         /* Force shift into [32,63] */
3524         tcg_gen_ori_i64(tmp, cpu_sar, 32);
3525         tcg_gen_shl_i64(tmp, tcg_r, tmp);
3526     }
3527 
3528     cond = cond_make_0_tmp(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3529     return do_cbranch(ctx, a->disp, a->n, &cond);
3530 }
3531 
3532 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3533 {
3534     TCGv_i64 tmp, tcg_r;
3535     DisasCond cond;
3536     int p;
3537 
3538     nullify_over(ctx);
3539 
3540     tmp = tcg_temp_new_i64();
3541     tcg_r = load_gpr(ctx, a->r);
3542     p = a->p | (a->d ? 0 : 32);
3543     tcg_gen_shli_i64(tmp, tcg_r, p);
3544 
3545     cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3546     return do_cbranch(ctx, a->disp, a->n, &cond);
3547 }
3548 
3549 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3550 {
3551     TCGv_i64 dest;
3552     DisasCond cond;
3553 
3554     nullify_over(ctx);
3555 
3556     dest = dest_gpr(ctx, a->r2);
3557     if (a->r1 == 0) {
3558         tcg_gen_movi_i64(dest, 0);
3559     } else {
3560         tcg_gen_mov_i64(dest, cpu_gr[a->r1]);
3561     }
3562 
3563     /* All MOVB conditions are 32-bit. */
3564     cond = do_sed_cond(ctx, a->c, false, dest);
3565     return do_cbranch(ctx, a->disp, a->n, &cond);
3566 }
3567 
3568 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3569 {
3570     TCGv_i64 dest;
3571     DisasCond cond;
3572 
3573     nullify_over(ctx);
3574 
3575     dest = dest_gpr(ctx, a->r);
3576     tcg_gen_movi_i64(dest, a->i);
3577 
3578     /* All MOVBI conditions are 32-bit. */
3579     cond = do_sed_cond(ctx, a->c, false, dest);
3580     return do_cbranch(ctx, a->disp, a->n, &cond);
3581 }
3582 
3583 static bool trans_shrp_sar(DisasContext *ctx, arg_shrp_sar *a)
3584 {
3585     TCGv_i64 dest, src2;
3586 
3587     if (!ctx->is_pa20 && a->d) {
3588         return false;
3589     }
3590     if (a->c) {
3591         nullify_over(ctx);
3592     }
3593 
3594     dest = dest_gpr(ctx, a->t);
3595     src2 = load_gpr(ctx, a->r2);
3596     if (a->r1 == 0) {
3597         if (a->d) {
3598             tcg_gen_shr_i64(dest, src2, cpu_sar);
3599         } else {
3600             TCGv_i64 tmp = tcg_temp_new_i64();
3601 
3602             tcg_gen_ext32u_i64(dest, src2);
3603             tcg_gen_andi_i64(tmp, cpu_sar, 31);
3604             tcg_gen_shr_i64(dest, dest, tmp);
3605         }
3606     } else if (a->r1 == a->r2) {
3607         if (a->d) {
3608             tcg_gen_rotr_i64(dest, src2, cpu_sar);
3609         } else {
3610             TCGv_i32 t32 = tcg_temp_new_i32();
3611             TCGv_i32 s32 = tcg_temp_new_i32();
3612 
3613             tcg_gen_extrl_i64_i32(t32, src2);
3614             tcg_gen_extrl_i64_i32(s32, cpu_sar);
3615             tcg_gen_andi_i32(s32, s32, 31);
3616             tcg_gen_rotr_i32(t32, t32, s32);
3617             tcg_gen_extu_i32_i64(dest, t32);
3618         }
3619     } else {
3620         TCGv_i64 src1 = load_gpr(ctx, a->r1);
3621 
3622         if (a->d) {
3623             TCGv_i64 t = tcg_temp_new_i64();
3624             TCGv_i64 n = tcg_temp_new_i64();
3625 
3626             tcg_gen_xori_i64(n, cpu_sar, 63);
3627             tcg_gen_shl_i64(t, src1, n);
3628             tcg_gen_shli_i64(t, t, 1);
3629             tcg_gen_shr_i64(dest, src2, cpu_sar);
3630             tcg_gen_or_i64(dest, dest, t);
3631         } else {
3632             TCGv_i64 t = tcg_temp_new_i64();
3633             TCGv_i64 s = tcg_temp_new_i64();
3634 
3635             tcg_gen_concat32_i64(t, src2, src1);
3636             tcg_gen_andi_i64(s, cpu_sar, 31);
3637             tcg_gen_shr_i64(dest, t, s);
3638         }
3639     }
3640     save_gpr(ctx, a->t, dest);
3641 
3642     /* Install the new nullification.  */
3643     cond_free(&ctx->null_cond);
3644     if (a->c) {
3645         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3646     }
3647     return nullify_end(ctx);
3648 }
3649 
3650 static bool trans_shrp_imm(DisasContext *ctx, arg_shrp_imm *a)
3651 {
3652     unsigned width, sa;
3653     TCGv_i64 dest, t2;
3654 
3655     if (!ctx->is_pa20 && a->d) {
3656         return false;
3657     }
3658     if (a->c) {
3659         nullify_over(ctx);
3660     }
3661 
3662     width = a->d ? 64 : 32;
3663     sa = width - 1 - a->cpos;
3664 
3665     dest = dest_gpr(ctx, a->t);
3666     t2 = load_gpr(ctx, a->r2);
3667     if (a->r1 == 0) {
3668         tcg_gen_extract_i64(dest, t2, sa, width - sa);
3669     } else if (width == TARGET_LONG_BITS) {
3670         tcg_gen_extract2_i64(dest, t2, cpu_gr[a->r1], sa);
3671     } else {
3672         assert(!a->d);
3673         if (a->r1 == a->r2) {
3674             TCGv_i32 t32 = tcg_temp_new_i32();
3675             tcg_gen_extrl_i64_i32(t32, t2);
3676             tcg_gen_rotri_i32(t32, t32, sa);
3677             tcg_gen_extu_i32_i64(dest, t32);
3678         } else {
3679             tcg_gen_concat32_i64(dest, t2, cpu_gr[a->r1]);
3680             tcg_gen_extract_i64(dest, dest, sa, 32);
3681         }
3682     }
3683     save_gpr(ctx, a->t, dest);
3684 
3685     /* Install the new nullification.  */
3686     cond_free(&ctx->null_cond);
3687     if (a->c) {
3688         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3689     }
3690     return nullify_end(ctx);
3691 }
3692 
3693 static bool trans_extr_sar(DisasContext *ctx, arg_extr_sar *a)
3694 {
3695     unsigned widthm1 = a->d ? 63 : 31;
3696     TCGv_i64 dest, src, tmp;
3697 
3698     if (!ctx->is_pa20 && a->d) {
3699         return false;
3700     }
3701     if (a->c) {
3702         nullify_over(ctx);
3703     }
3704 
3705     dest = dest_gpr(ctx, a->t);
3706     src = load_gpr(ctx, a->r);
3707     tmp = tcg_temp_new_i64();
3708 
3709     /* Recall that SAR is using big-endian bit numbering.  */
3710     tcg_gen_andi_i64(tmp, cpu_sar, widthm1);
3711     tcg_gen_xori_i64(tmp, tmp, widthm1);
3712 
3713     if (a->se) {
3714         if (!a->d) {
3715             tcg_gen_ext32s_i64(dest, src);
3716             src = dest;
3717         }
3718         tcg_gen_sar_i64(dest, src, tmp);
3719         tcg_gen_sextract_i64(dest, dest, 0, a->len);
3720     } else {
3721         if (!a->d) {
3722             tcg_gen_ext32u_i64(dest, src);
3723             src = dest;
3724         }
3725         tcg_gen_shr_i64(dest, src, tmp);
3726         tcg_gen_extract_i64(dest, dest, 0, a->len);
3727     }
3728     save_gpr(ctx, a->t, dest);
3729 
3730     /* Install the new nullification.  */
3731     cond_free(&ctx->null_cond);
3732     if (a->c) {
3733         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3734     }
3735     return nullify_end(ctx);
3736 }
3737 
3738 static bool trans_extr_imm(DisasContext *ctx, arg_extr_imm *a)
3739 {
3740     unsigned len, cpos, width;
3741     TCGv_i64 dest, src;
3742 
3743     if (!ctx->is_pa20 && a->d) {
3744         return false;
3745     }
3746     if (a->c) {
3747         nullify_over(ctx);
3748     }
3749 
3750     len = a->len;
3751     width = a->d ? 64 : 32;
3752     cpos = width - 1 - a->pos;
3753     if (cpos + len > width) {
3754         len = width - cpos;
3755     }
3756 
3757     dest = dest_gpr(ctx, a->t);
3758     src = load_gpr(ctx, a->r);
3759     if (a->se) {
3760         tcg_gen_sextract_i64(dest, src, cpos, len);
3761     } else {
3762         tcg_gen_extract_i64(dest, src, cpos, len);
3763     }
3764     save_gpr(ctx, a->t, dest);
3765 
3766     /* Install the new nullification.  */
3767     cond_free(&ctx->null_cond);
3768     if (a->c) {
3769         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3770     }
3771     return nullify_end(ctx);
3772 }
3773 
3774 static bool trans_depi_imm(DisasContext *ctx, arg_depi_imm *a)
3775 {
3776     unsigned len, width;
3777     uint64_t mask0, mask1;
3778     TCGv_i64 dest;
3779 
3780     if (!ctx->is_pa20 && a->d) {
3781         return false;
3782     }
3783     if (a->c) {
3784         nullify_over(ctx);
3785     }
3786 
3787     len = a->len;
3788     width = a->d ? 64 : 32;
3789     if (a->cpos + len > width) {
3790         len = width - a->cpos;
3791     }
3792 
3793     dest = dest_gpr(ctx, a->t);
3794     mask0 = deposit64(0, a->cpos, len, a->i);
3795     mask1 = deposit64(-1, a->cpos, len, a->i);
3796 
3797     if (a->nz) {
3798         TCGv_i64 src = load_gpr(ctx, a->t);
3799         tcg_gen_andi_i64(dest, src, mask1);
3800         tcg_gen_ori_i64(dest, dest, mask0);
3801     } else {
3802         tcg_gen_movi_i64(dest, mask0);
3803     }
3804     save_gpr(ctx, a->t, dest);
3805 
3806     /* Install the new nullification.  */
3807     cond_free(&ctx->null_cond);
3808     if (a->c) {
3809         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3810     }
3811     return nullify_end(ctx);
3812 }
3813 
3814 static bool trans_dep_imm(DisasContext *ctx, arg_dep_imm *a)
3815 {
3816     unsigned rs = a->nz ? a->t : 0;
3817     unsigned len, width;
3818     TCGv_i64 dest, val;
3819 
3820     if (!ctx->is_pa20 && a->d) {
3821         return false;
3822     }
3823     if (a->c) {
3824         nullify_over(ctx);
3825     }
3826 
3827     len = a->len;
3828     width = a->d ? 64 : 32;
3829     if (a->cpos + len > width) {
3830         len = width - a->cpos;
3831     }
3832 
3833     dest = dest_gpr(ctx, a->t);
3834     val = load_gpr(ctx, a->r);
3835     if (rs == 0) {
3836         tcg_gen_deposit_z_i64(dest, val, a->cpos, len);
3837     } else {
3838         tcg_gen_deposit_i64(dest, cpu_gr[rs], val, a->cpos, len);
3839     }
3840     save_gpr(ctx, a->t, dest);
3841 
3842     /* Install the new nullification.  */
3843     cond_free(&ctx->null_cond);
3844     if (a->c) {
3845         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3846     }
3847     return nullify_end(ctx);
3848 }
3849 
3850 static bool do_dep_sar(DisasContext *ctx, unsigned rt, unsigned c,
3851                        bool d, bool nz, unsigned len, TCGv_i64 val)
3852 {
3853     unsigned rs = nz ? rt : 0;
3854     unsigned widthm1 = d ? 63 : 31;
3855     TCGv_i64 mask, tmp, shift, dest;
3856     uint64_t msb = 1ULL << (len - 1);
3857 
3858     dest = dest_gpr(ctx, rt);
3859     shift = tcg_temp_new_i64();
3860     tmp = tcg_temp_new_i64();
3861 
3862     /* Convert big-endian bit numbering in SAR to left-shift.  */
3863     tcg_gen_andi_i64(shift, cpu_sar, widthm1);
3864     tcg_gen_xori_i64(shift, shift, widthm1);
3865 
3866     mask = tcg_temp_new_i64();
3867     tcg_gen_movi_i64(mask, msb + (msb - 1));
3868     tcg_gen_and_i64(tmp, val, mask);
3869     if (rs) {
3870         tcg_gen_shl_i64(mask, mask, shift);
3871         tcg_gen_shl_i64(tmp, tmp, shift);
3872         tcg_gen_andc_i64(dest, cpu_gr[rs], mask);
3873         tcg_gen_or_i64(dest, dest, tmp);
3874     } else {
3875         tcg_gen_shl_i64(dest, tmp, shift);
3876     }
3877     save_gpr(ctx, rt, dest);
3878 
3879     /* Install the new nullification.  */
3880     cond_free(&ctx->null_cond);
3881     if (c) {
3882         ctx->null_cond = do_sed_cond(ctx, c, d, dest);
3883     }
3884     return nullify_end(ctx);
3885 }
3886 
3887 static bool trans_dep_sar(DisasContext *ctx, arg_dep_sar *a)
3888 {
3889     if (!ctx->is_pa20 && a->d) {
3890         return false;
3891     }
3892     if (a->c) {
3893         nullify_over(ctx);
3894     }
3895     return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3896                       load_gpr(ctx, a->r));
3897 }
3898 
3899 static bool trans_depi_sar(DisasContext *ctx, arg_depi_sar *a)
3900 {
3901     if (!ctx->is_pa20 && a->d) {
3902         return false;
3903     }
3904     if (a->c) {
3905         nullify_over(ctx);
3906     }
3907     return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3908                       tcg_constant_i64(a->i));
3909 }
3910 
3911 static bool trans_be(DisasContext *ctx, arg_be *a)
3912 {
3913     TCGv_i64 tmp;
3914 
3915 #ifdef CONFIG_USER_ONLY
3916     /* ??? It seems like there should be a good way of using
3917        "be disp(sr2, r0)", the canonical gateway entry mechanism
3918        to our advantage.  But that appears to be inconvenient to
3919        manage along side branch delay slots.  Therefore we handle
3920        entry into the gateway page via absolute address.  */
3921     /* Since we don't implement spaces, just branch.  Do notice the special
3922        case of "be disp(*,r0)" using a direct branch to disp, so that we can
3923        goto_tb to the TB containing the syscall.  */
3924     if (a->b == 0) {
3925         return do_dbranch(ctx, a->disp, a->l, a->n);
3926     }
3927 #else
3928     nullify_over(ctx);
3929 #endif
3930 
3931     tmp = tcg_temp_new_i64();
3932     tcg_gen_addi_i64(tmp, load_gpr(ctx, a->b), a->disp);
3933     tmp = do_ibranch_priv(ctx, tmp);
3934 
3935 #ifdef CONFIG_USER_ONLY
3936     return do_ibranch(ctx, tmp, a->l, a->n);
3937 #else
3938     TCGv_i64 new_spc = tcg_temp_new_i64();
3939 
3940     load_spr(ctx, new_spc, a->sp);
3941     if (a->l) {
3942         copy_iaoq_entry(ctx, cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3943         tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_b);
3944     }
3945     if (a->n && use_nullify_skip(ctx)) {
3946         copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
3947         tcg_gen_addi_i64(tmp, tmp, 4);
3948         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3949         tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3950         tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3951         nullify_set(ctx, 0);
3952     } else {
3953         copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3954         if (ctx->iaoq_b == -1) {
3955             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3956         }
3957         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3958         tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3959         nullify_set(ctx, a->n);
3960     }
3961     tcg_gen_lookup_and_goto_ptr();
3962     ctx->base.is_jmp = DISAS_NORETURN;
3963     return nullify_end(ctx);
3964 #endif
3965 }
3966 
3967 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3968 {
3969     return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
3970 }
3971 
3972 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3973 {
3974     uint64_t dest = iaoq_dest(ctx, a->disp);
3975 
3976     nullify_over(ctx);
3977 
3978     /* Make sure the caller hasn't done something weird with the queue.
3979      * ??? This is not quite the same as the PSW[B] bit, which would be
3980      * expensive to track.  Real hardware will trap for
3981      *    b  gateway
3982      *    b  gateway+4  (in delay slot of first branch)
3983      * However, checking for a non-sequential instruction queue *will*
3984      * diagnose the security hole
3985      *    b  gateway
3986      *    b  evil
3987      * in which instructions at evil would run with increased privs.
3988      */
3989     if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3990         return gen_illegal(ctx);
3991     }
3992 
3993 #ifndef CONFIG_USER_ONLY
3994     if (ctx->tb_flags & PSW_C) {
3995         int type = hppa_artype_for_page(cpu_env(ctx->cs), ctx->base.pc_next);
3996         /* If we could not find a TLB entry, then we need to generate an
3997            ITLB miss exception so the kernel will provide it.
3998            The resulting TLB fill operation will invalidate this TB and
3999            we will re-translate, at which point we *will* be able to find
4000            the TLB entry and determine if this is in fact a gateway page.  */
4001         if (type < 0) {
4002             gen_excp(ctx, EXCP_ITLB_MISS);
4003             return true;
4004         }
4005         /* No change for non-gateway pages or for priv decrease.  */
4006         if (type >= 4 && type - 4 < ctx->privilege) {
4007             dest = deposit64(dest, 0, 2, type - 4);
4008         }
4009     } else {
4010         dest &= -4;  /* priv = 0 */
4011     }
4012 #endif
4013 
4014     if (a->l) {
4015         TCGv_i64 tmp = dest_gpr(ctx, a->l);
4016         if (ctx->privilege < 3) {
4017             tcg_gen_andi_i64(tmp, tmp, -4);
4018         }
4019         tcg_gen_ori_i64(tmp, tmp, ctx->privilege);
4020         save_gpr(ctx, a->l, tmp);
4021     }
4022 
4023     return do_dbranch(ctx, dest, 0, a->n);
4024 }
4025 
4026 static bool trans_blr(DisasContext *ctx, arg_blr *a)
4027 {
4028     if (a->x) {
4029         TCGv_i64 tmp = tcg_temp_new_i64();
4030         tcg_gen_shli_i64(tmp, load_gpr(ctx, a->x), 3);
4031         tcg_gen_addi_i64(tmp, tmp, ctx->iaoq_f + 8);
4032         /* The computation here never changes privilege level.  */
4033         return do_ibranch(ctx, tmp, a->l, a->n);
4034     } else {
4035         /* BLR R0,RX is a good way to load PC+8 into RX.  */
4036         return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
4037     }
4038 }
4039 
4040 static bool trans_bv(DisasContext *ctx, arg_bv *a)
4041 {
4042     TCGv_i64 dest;
4043 
4044     if (a->x == 0) {
4045         dest = load_gpr(ctx, a->b);
4046     } else {
4047         dest = tcg_temp_new_i64();
4048         tcg_gen_shli_i64(dest, load_gpr(ctx, a->x), 3);
4049         tcg_gen_add_i64(dest, dest, load_gpr(ctx, a->b));
4050     }
4051     dest = do_ibranch_priv(ctx, dest);
4052     return do_ibranch(ctx, dest, 0, a->n);
4053 }
4054 
4055 static bool trans_bve(DisasContext *ctx, arg_bve *a)
4056 {
4057     TCGv_i64 dest;
4058 
4059 #ifdef CONFIG_USER_ONLY
4060     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
4061     return do_ibranch(ctx, dest, a->l, a->n);
4062 #else
4063     nullify_over(ctx);
4064     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
4065 
4066     copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
4067     if (ctx->iaoq_b == -1) {
4068         tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4069     }
4070     copy_iaoq_entry(ctx, cpu_iaoq_b, -1, dest);
4071     tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
4072     if (a->l) {
4073         copy_iaoq_entry(ctx, cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
4074     }
4075     nullify_set(ctx, a->n);
4076     tcg_gen_lookup_and_goto_ptr();
4077     ctx->base.is_jmp = DISAS_NORETURN;
4078     return nullify_end(ctx);
4079 #endif
4080 }
4081 
4082 static bool trans_nopbts(DisasContext *ctx, arg_nopbts *a)
4083 {
4084     /* All branch target stack instructions implement as nop. */
4085     return ctx->is_pa20;
4086 }
4087 
4088 /*
4089  * Float class 0
4090  */
4091 
4092 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4093 {
4094     tcg_gen_mov_i32(dst, src);
4095 }
4096 
4097 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
4098 {
4099     uint64_t ret;
4100 
4101     if (ctx->is_pa20) {
4102         ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
4103     } else {
4104         ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
4105     }
4106 
4107     nullify_over(ctx);
4108     save_frd(0, tcg_constant_i64(ret));
4109     return nullify_end(ctx);
4110 }
4111 
4112 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
4113 {
4114     return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
4115 }
4116 
4117 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4118 {
4119     tcg_gen_mov_i64(dst, src);
4120 }
4121 
4122 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
4123 {
4124     return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
4125 }
4126 
4127 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4128 {
4129     tcg_gen_andi_i32(dst, src, INT32_MAX);
4130 }
4131 
4132 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
4133 {
4134     return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
4135 }
4136 
4137 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4138 {
4139     tcg_gen_andi_i64(dst, src, INT64_MAX);
4140 }
4141 
4142 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
4143 {
4144     return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
4145 }
4146 
4147 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
4148 {
4149     return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
4150 }
4151 
4152 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
4153 {
4154     return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
4155 }
4156 
4157 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
4158 {
4159     return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
4160 }
4161 
4162 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
4163 {
4164     return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
4165 }
4166 
4167 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4168 {
4169     tcg_gen_xori_i32(dst, src, INT32_MIN);
4170 }
4171 
4172 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
4173 {
4174     return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
4175 }
4176 
4177 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4178 {
4179     tcg_gen_xori_i64(dst, src, INT64_MIN);
4180 }
4181 
4182 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
4183 {
4184     return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
4185 }
4186 
4187 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4188 {
4189     tcg_gen_ori_i32(dst, src, INT32_MIN);
4190 }
4191 
4192 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
4193 {
4194     return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
4195 }
4196 
4197 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4198 {
4199     tcg_gen_ori_i64(dst, src, INT64_MIN);
4200 }
4201 
4202 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
4203 {
4204     return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
4205 }
4206 
4207 /*
4208  * Float class 1
4209  */
4210 
4211 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
4212 {
4213     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
4214 }
4215 
4216 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
4217 {
4218     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
4219 }
4220 
4221 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
4222 {
4223     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
4224 }
4225 
4226 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
4227 {
4228     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
4229 }
4230 
4231 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
4232 {
4233     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
4234 }
4235 
4236 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
4237 {
4238     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
4239 }
4240 
4241 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
4242 {
4243     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
4244 }
4245 
4246 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
4247 {
4248     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
4249 }
4250 
4251 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
4252 {
4253     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
4254 }
4255 
4256 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
4257 {
4258     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
4259 }
4260 
4261 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
4262 {
4263     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
4264 }
4265 
4266 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
4267 {
4268     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
4269 }
4270 
4271 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
4272 {
4273     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
4274 }
4275 
4276 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
4277 {
4278     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
4279 }
4280 
4281 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
4282 {
4283     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
4284 }
4285 
4286 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
4287 {
4288     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
4289 }
4290 
4291 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
4292 {
4293     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
4294 }
4295 
4296 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
4297 {
4298     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
4299 }
4300 
4301 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
4302 {
4303     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
4304 }
4305 
4306 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
4307 {
4308     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
4309 }
4310 
4311 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
4312 {
4313     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
4314 }
4315 
4316 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
4317 {
4318     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
4319 }
4320 
4321 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
4322 {
4323     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
4324 }
4325 
4326 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
4327 {
4328     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
4329 }
4330 
4331 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
4332 {
4333     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
4334 }
4335 
4336 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
4337 {
4338     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
4339 }
4340 
4341 /*
4342  * Float class 2
4343  */
4344 
4345 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
4346 {
4347     TCGv_i32 ta, tb, tc, ty;
4348 
4349     nullify_over(ctx);
4350 
4351     ta = load_frw0_i32(a->r1);
4352     tb = load_frw0_i32(a->r2);
4353     ty = tcg_constant_i32(a->y);
4354     tc = tcg_constant_i32(a->c);
4355 
4356     gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc);
4357 
4358     return nullify_end(ctx);
4359 }
4360 
4361 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
4362 {
4363     TCGv_i64 ta, tb;
4364     TCGv_i32 tc, ty;
4365 
4366     nullify_over(ctx);
4367 
4368     ta = load_frd0(a->r1);
4369     tb = load_frd0(a->r2);
4370     ty = tcg_constant_i32(a->y);
4371     tc = tcg_constant_i32(a->c);
4372 
4373     gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc);
4374 
4375     return nullify_end(ctx);
4376 }
4377 
4378 static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
4379 {
4380     TCGv_i64 t;
4381 
4382     nullify_over(ctx);
4383 
4384     t = tcg_temp_new_i64();
4385     tcg_gen_ld32u_i64(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow));
4386 
4387     if (a->y == 1) {
4388         int mask;
4389         bool inv = false;
4390 
4391         switch (a->c) {
4392         case 0: /* simple */
4393             tcg_gen_andi_i64(t, t, 0x4000000);
4394             ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4395             goto done;
4396         case 2: /* rej */
4397             inv = true;
4398             /* fallthru */
4399         case 1: /* acc */
4400             mask = 0x43ff800;
4401             break;
4402         case 6: /* rej8 */
4403             inv = true;
4404             /* fallthru */
4405         case 5: /* acc8 */
4406             mask = 0x43f8000;
4407             break;
4408         case 9: /* acc6 */
4409             mask = 0x43e0000;
4410             break;
4411         case 13: /* acc4 */
4412             mask = 0x4380000;
4413             break;
4414         case 17: /* acc2 */
4415             mask = 0x4200000;
4416             break;
4417         default:
4418             gen_illegal(ctx);
4419             return true;
4420         }
4421         if (inv) {
4422             TCGv_i64 c = tcg_constant_i64(mask);
4423             tcg_gen_or_i64(t, t, c);
4424             ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
4425         } else {
4426             tcg_gen_andi_i64(t, t, mask);
4427             ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
4428         }
4429     } else {
4430         unsigned cbit = (a->y ^ 1) - 1;
4431 
4432         tcg_gen_extract_i64(t, t, 21 - cbit, 1);
4433         ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4434     }
4435 
4436  done:
4437     return nullify_end(ctx);
4438 }
4439 
4440 /*
4441  * Float class 2
4442  */
4443 
4444 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
4445 {
4446     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
4447 }
4448 
4449 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
4450 {
4451     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
4452 }
4453 
4454 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
4455 {
4456     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
4457 }
4458 
4459 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
4460 {
4461     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
4462 }
4463 
4464 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
4465 {
4466     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
4467 }
4468 
4469 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
4470 {
4471     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
4472 }
4473 
4474 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
4475 {
4476     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
4477 }
4478 
4479 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
4480 {
4481     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
4482 }
4483 
4484 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
4485 {
4486     TCGv_i64 x, y;
4487 
4488     nullify_over(ctx);
4489 
4490     x = load_frw0_i64(a->r1);
4491     y = load_frw0_i64(a->r2);
4492     tcg_gen_mul_i64(x, x, y);
4493     save_frd(a->t, x);
4494 
4495     return nullify_end(ctx);
4496 }
4497 
4498 /* Convert the fmpyadd single-precision register encodings to standard.  */
4499 static inline int fmpyadd_s_reg(unsigned r)
4500 {
4501     return (r & 16) * 2 + 16 + (r & 15);
4502 }
4503 
4504 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4505 {
4506     int tm = fmpyadd_s_reg(a->tm);
4507     int ra = fmpyadd_s_reg(a->ra);
4508     int ta = fmpyadd_s_reg(a->ta);
4509     int rm2 = fmpyadd_s_reg(a->rm2);
4510     int rm1 = fmpyadd_s_reg(a->rm1);
4511 
4512     nullify_over(ctx);
4513 
4514     do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4515     do_fop_weww(ctx, ta, ta, ra,
4516                 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4517 
4518     return nullify_end(ctx);
4519 }
4520 
4521 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
4522 {
4523     return do_fmpyadd_s(ctx, a, false);
4524 }
4525 
4526 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4527 {
4528     return do_fmpyadd_s(ctx, a, true);
4529 }
4530 
4531 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4532 {
4533     nullify_over(ctx);
4534 
4535     do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4536     do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4537                 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4538 
4539     return nullify_end(ctx);
4540 }
4541 
4542 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4543 {
4544     return do_fmpyadd_d(ctx, a, false);
4545 }
4546 
4547 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4548 {
4549     return do_fmpyadd_d(ctx, a, true);
4550 }
4551 
4552 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
4553 {
4554     TCGv_i32 x, y, z;
4555 
4556     nullify_over(ctx);
4557     x = load_frw0_i32(a->rm1);
4558     y = load_frw0_i32(a->rm2);
4559     z = load_frw0_i32(a->ra3);
4560 
4561     if (a->neg) {
4562         gen_helper_fmpynfadd_s(x, tcg_env, x, y, z);
4563     } else {
4564         gen_helper_fmpyfadd_s(x, tcg_env, x, y, z);
4565     }
4566 
4567     save_frw_i32(a->t, x);
4568     return nullify_end(ctx);
4569 }
4570 
4571 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4572 {
4573     TCGv_i64 x, y, z;
4574 
4575     nullify_over(ctx);
4576     x = load_frd0(a->rm1);
4577     y = load_frd0(a->rm2);
4578     z = load_frd0(a->ra3);
4579 
4580     if (a->neg) {
4581         gen_helper_fmpynfadd_d(x, tcg_env, x, y, z);
4582     } else {
4583         gen_helper_fmpyfadd_d(x, tcg_env, x, y, z);
4584     }
4585 
4586     save_frd(a->t, x);
4587     return nullify_end(ctx);
4588 }
4589 
4590 /* Emulate PDC BTLB, called by SeaBIOS-hppa */
4591 static bool trans_diag_btlb(DisasContext *ctx, arg_diag_btlb *a)
4592 {
4593     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4594 #ifndef CONFIG_USER_ONLY
4595     nullify_over(ctx);
4596     gen_helper_diag_btlb(tcg_env);
4597     return nullify_end(ctx);
4598 #endif
4599 }
4600 
4601 /* Print char in %r26 to first serial console, used by SeaBIOS-hppa */
4602 static bool trans_diag_cout(DisasContext *ctx, arg_diag_cout *a)
4603 {
4604     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4605 #ifndef CONFIG_USER_ONLY
4606     nullify_over(ctx);
4607     gen_helper_diag_console_output(tcg_env);
4608     return nullify_end(ctx);
4609 #endif
4610 }
4611 
4612 static bool trans_diag_getshadowregs_pa1(DisasContext *ctx, arg_empty *a)
4613 {
4614     return !ctx->is_pa20 && do_getshadowregs(ctx);
4615 }
4616 
4617 static bool trans_diag_getshadowregs_pa2(DisasContext *ctx, arg_empty *a)
4618 {
4619     return ctx->is_pa20 && do_getshadowregs(ctx);
4620 }
4621 
4622 static bool trans_diag_putshadowregs_pa1(DisasContext *ctx, arg_empty *a)
4623 {
4624     return !ctx->is_pa20 && do_putshadowregs(ctx);
4625 }
4626 
4627 static bool trans_diag_putshadowregs_pa2(DisasContext *ctx, arg_empty *a)
4628 {
4629     return ctx->is_pa20 && do_putshadowregs(ctx);
4630 }
4631 
4632 static bool trans_diag_unimp(DisasContext *ctx, arg_diag_unimp *a)
4633 {
4634     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4635     qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i);
4636     return true;
4637 }
4638 
4639 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4640 {
4641     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4642     int bound;
4643 
4644     ctx->cs = cs;
4645     ctx->tb_flags = ctx->base.tb->flags;
4646     ctx->is_pa20 = hppa_is_pa20(cpu_env(cs));
4647 
4648 #ifdef CONFIG_USER_ONLY
4649     ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX);
4650     ctx->mmu_idx = MMU_USER_IDX;
4651     ctx->iaoq_f = ctx->base.pc_first | ctx->privilege;
4652     ctx->iaoq_b = ctx->base.tb->cs_base | ctx->privilege;
4653     ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
4654 #else
4655     ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4656     ctx->mmu_idx = (ctx->tb_flags & PSW_D
4657                     ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
4658                     : ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX);
4659 
4660     /* Recover the IAOQ values from the GVA + PRIV.  */
4661     uint64_t cs_base = ctx->base.tb->cs_base;
4662     uint64_t iasq_f = cs_base & ~0xffffffffull;
4663     int32_t diff = cs_base;
4664 
4665     ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4666     ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4667 #endif
4668     ctx->iaoq_n = -1;
4669     ctx->iaoq_n_var = NULL;
4670 
4671     ctx->zero = tcg_constant_i64(0);
4672 
4673     /* Bound the number of instructions by those left on the page.  */
4674     bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4675     ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4676 }
4677 
4678 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4679 {
4680     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4681 
4682     /* Seed the nullification status from PSW[N], as saved in TB->FLAGS.  */
4683     ctx->null_cond = cond_make_f();
4684     ctx->psw_n_nonzero = false;
4685     if (ctx->tb_flags & PSW_N) {
4686         ctx->null_cond.c = TCG_COND_ALWAYS;
4687         ctx->psw_n_nonzero = true;
4688     }
4689     ctx->null_lab = NULL;
4690 }
4691 
4692 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4693 {
4694     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4695 
4696     tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b, 0);
4697     ctx->insn_start = tcg_last_op();
4698 }
4699 
4700 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4701 {
4702     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4703     CPUHPPAState *env = cpu_env(cs);
4704     DisasJumpType ret;
4705 
4706     /* Execute one insn.  */
4707 #ifdef CONFIG_USER_ONLY
4708     if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4709         do_page_zero(ctx);
4710         ret = ctx->base.is_jmp;
4711         assert(ret != DISAS_NEXT);
4712     } else
4713 #endif
4714     {
4715         /* Always fetch the insn, even if nullified, so that we check
4716            the page permissions for execute.  */
4717         uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
4718 
4719         /* Set up the IA queue for the next insn.
4720            This will be overwritten by a branch.  */
4721         if (ctx->iaoq_b == -1) {
4722             ctx->iaoq_n = -1;
4723             ctx->iaoq_n_var = tcg_temp_new_i64();
4724             tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4725         } else {
4726             ctx->iaoq_n = ctx->iaoq_b + 4;
4727             ctx->iaoq_n_var = NULL;
4728         }
4729 
4730         if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4731             ctx->null_cond.c = TCG_COND_NEVER;
4732             ret = DISAS_NEXT;
4733         } else {
4734             ctx->insn = insn;
4735             if (!decode(ctx, insn)) {
4736                 gen_illegal(ctx);
4737             }
4738             ret = ctx->base.is_jmp;
4739             assert(ctx->null_lab == NULL);
4740         }
4741     }
4742 
4743     /* Advance the insn queue.  Note that this check also detects
4744        a priority change within the instruction queue.  */
4745     if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4746         if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4747             && use_goto_tb(ctx, ctx->iaoq_b)
4748             && (ctx->null_cond.c == TCG_COND_NEVER
4749                 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4750             nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4751             gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4752             ctx->base.is_jmp = ret = DISAS_NORETURN;
4753         } else {
4754             ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4755         }
4756     }
4757     ctx->iaoq_f = ctx->iaoq_b;
4758     ctx->iaoq_b = ctx->iaoq_n;
4759     ctx->base.pc_next += 4;
4760 
4761     switch (ret) {
4762     case DISAS_NORETURN:
4763     case DISAS_IAQ_N_UPDATED:
4764         break;
4765 
4766     case DISAS_NEXT:
4767     case DISAS_IAQ_N_STALE:
4768     case DISAS_IAQ_N_STALE_EXIT:
4769         if (ctx->iaoq_f == -1) {
4770             copy_iaoq_entry(ctx, cpu_iaoq_f, -1, cpu_iaoq_b);
4771             copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4772 #ifndef CONFIG_USER_ONLY
4773             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4774 #endif
4775             nullify_save(ctx);
4776             ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4777                                 ? DISAS_EXIT
4778                                 : DISAS_IAQ_N_UPDATED);
4779         } else if (ctx->iaoq_b == -1) {
4780             copy_iaoq_entry(ctx, cpu_iaoq_b, -1, ctx->iaoq_n_var);
4781         }
4782         break;
4783 
4784     default:
4785         g_assert_not_reached();
4786     }
4787 }
4788 
4789 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4790 {
4791     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4792     DisasJumpType is_jmp = ctx->base.is_jmp;
4793 
4794     switch (is_jmp) {
4795     case DISAS_NORETURN:
4796         break;
4797     case DISAS_TOO_MANY:
4798     case DISAS_IAQ_N_STALE:
4799     case DISAS_IAQ_N_STALE_EXIT:
4800         copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4801         copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4802         nullify_save(ctx);
4803         /* FALLTHRU */
4804     case DISAS_IAQ_N_UPDATED:
4805         if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
4806             tcg_gen_lookup_and_goto_ptr();
4807             break;
4808         }
4809         /* FALLTHRU */
4810     case DISAS_EXIT:
4811         tcg_gen_exit_tb(NULL, 0);
4812         break;
4813     default:
4814         g_assert_not_reached();
4815     }
4816 }
4817 
4818 static void hppa_tr_disas_log(const DisasContextBase *dcbase,
4819                               CPUState *cs, FILE *logfile)
4820 {
4821     target_ulong pc = dcbase->pc_first;
4822 
4823 #ifdef CONFIG_USER_ONLY
4824     switch (pc) {
4825     case 0x00:
4826         fprintf(logfile, "IN:\n0x00000000:  (null)\n");
4827         return;
4828     case 0xb0:
4829         fprintf(logfile, "IN:\n0x000000b0:  light-weight-syscall\n");
4830         return;
4831     case 0xe0:
4832         fprintf(logfile, "IN:\n0x000000e0:  set-thread-pointer-syscall\n");
4833         return;
4834     case 0x100:
4835         fprintf(logfile, "IN:\n0x00000100:  syscall\n");
4836         return;
4837     }
4838 #endif
4839 
4840     fprintf(logfile, "IN: %s\n", lookup_symbol(pc));
4841     target_disas(logfile, cs, pc, dcbase->tb->size);
4842 }
4843 
4844 static const TranslatorOps hppa_tr_ops = {
4845     .init_disas_context = hppa_tr_init_disas_context,
4846     .tb_start           = hppa_tr_tb_start,
4847     .insn_start         = hppa_tr_insn_start,
4848     .translate_insn     = hppa_tr_translate_insn,
4849     .tb_stop            = hppa_tr_tb_stop,
4850     .disas_log          = hppa_tr_disas_log,
4851 };
4852 
4853 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
4854                            vaddr pc, void *host_pc)
4855 {
4856     DisasContext ctx;
4857     translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
4858 }
4859