xref: /qemu/target/hppa/translate.c (revision 9cf2112b)
1 /*
2  * HPPA emulation cpu translation for qemu.
3  *
4  * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
28 #include "exec/translator.h"
29 #include "exec/log.h"
30 
31 #define HELPER_H "helper.h"
32 #include "exec/helper-info.c.inc"
33 #undef  HELPER_H
34 
35 
36 /* Since we have a distinction between register size and address size,
37    we need to redefine all of these.  */
38 
39 #undef TCGv
40 #undef tcg_temp_new
41 #undef tcg_global_mem_new
42 
43 #if TARGET_LONG_BITS == 64
44 #define TCGv_tl              TCGv_i64
45 #define tcg_temp_new_tl      tcg_temp_new_i64
46 #if TARGET_REGISTER_BITS == 64
47 #define tcg_gen_extu_reg_tl  tcg_gen_mov_i64
48 #else
49 #define tcg_gen_extu_reg_tl  tcg_gen_extu_i32_i64
50 #endif
51 #else
52 #define TCGv_tl              TCGv_i32
53 #define tcg_temp_new_tl      tcg_temp_new_i32
54 #define tcg_gen_extu_reg_tl  tcg_gen_mov_i32
55 #endif
56 
57 #if TARGET_REGISTER_BITS == 64
58 #define TCGv_reg             TCGv_i64
59 
60 #define tcg_temp_new         tcg_temp_new_i64
61 #define tcg_global_mem_new   tcg_global_mem_new_i64
62 
63 #define tcg_gen_movi_reg     tcg_gen_movi_i64
64 #define tcg_gen_mov_reg      tcg_gen_mov_i64
65 #define tcg_gen_ld8u_reg     tcg_gen_ld8u_i64
66 #define tcg_gen_ld8s_reg     tcg_gen_ld8s_i64
67 #define tcg_gen_ld16u_reg    tcg_gen_ld16u_i64
68 #define tcg_gen_ld16s_reg    tcg_gen_ld16s_i64
69 #define tcg_gen_ld32u_reg    tcg_gen_ld32u_i64
70 #define tcg_gen_ld32s_reg    tcg_gen_ld32s_i64
71 #define tcg_gen_ld_reg       tcg_gen_ld_i64
72 #define tcg_gen_st8_reg      tcg_gen_st8_i64
73 #define tcg_gen_st16_reg     tcg_gen_st16_i64
74 #define tcg_gen_st32_reg     tcg_gen_st32_i64
75 #define tcg_gen_st_reg       tcg_gen_st_i64
76 #define tcg_gen_add_reg      tcg_gen_add_i64
77 #define tcg_gen_addi_reg     tcg_gen_addi_i64
78 #define tcg_gen_sub_reg      tcg_gen_sub_i64
79 #define tcg_gen_neg_reg      tcg_gen_neg_i64
80 #define tcg_gen_subfi_reg    tcg_gen_subfi_i64
81 #define tcg_gen_subi_reg     tcg_gen_subi_i64
82 #define tcg_gen_and_reg      tcg_gen_and_i64
83 #define tcg_gen_andi_reg     tcg_gen_andi_i64
84 #define tcg_gen_or_reg       tcg_gen_or_i64
85 #define tcg_gen_ori_reg      tcg_gen_ori_i64
86 #define tcg_gen_xor_reg      tcg_gen_xor_i64
87 #define tcg_gen_xori_reg     tcg_gen_xori_i64
88 #define tcg_gen_not_reg      tcg_gen_not_i64
89 #define tcg_gen_shl_reg      tcg_gen_shl_i64
90 #define tcg_gen_shli_reg     tcg_gen_shli_i64
91 #define tcg_gen_shr_reg      tcg_gen_shr_i64
92 #define tcg_gen_shri_reg     tcg_gen_shri_i64
93 #define tcg_gen_sar_reg      tcg_gen_sar_i64
94 #define tcg_gen_sari_reg     tcg_gen_sari_i64
95 #define tcg_gen_brcond_reg   tcg_gen_brcond_i64
96 #define tcg_gen_brcondi_reg  tcg_gen_brcondi_i64
97 #define tcg_gen_setcond_reg  tcg_gen_setcond_i64
98 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
99 #define tcg_gen_mul_reg      tcg_gen_mul_i64
100 #define tcg_gen_muli_reg     tcg_gen_muli_i64
101 #define tcg_gen_div_reg      tcg_gen_div_i64
102 #define tcg_gen_rem_reg      tcg_gen_rem_i64
103 #define tcg_gen_divu_reg     tcg_gen_divu_i64
104 #define tcg_gen_remu_reg     tcg_gen_remu_i64
105 #define tcg_gen_discard_reg  tcg_gen_discard_i64
106 #define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
107 #define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
108 #define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
109 #define tcg_gen_ext_i32_reg  tcg_gen_ext_i32_i64
110 #define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
111 #define tcg_gen_ext_reg_i64  tcg_gen_mov_i64
112 #define tcg_gen_ext8u_reg    tcg_gen_ext8u_i64
113 #define tcg_gen_ext8s_reg    tcg_gen_ext8s_i64
114 #define tcg_gen_ext16u_reg   tcg_gen_ext16u_i64
115 #define tcg_gen_ext16s_reg   tcg_gen_ext16s_i64
116 #define tcg_gen_ext32u_reg   tcg_gen_ext32u_i64
117 #define tcg_gen_ext32s_reg   tcg_gen_ext32s_i64
118 #define tcg_gen_bswap16_reg  tcg_gen_bswap16_i64
119 #define tcg_gen_bswap32_reg  tcg_gen_bswap32_i64
120 #define tcg_gen_bswap64_reg  tcg_gen_bswap64_i64
121 #define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
122 #define tcg_gen_andc_reg     tcg_gen_andc_i64
123 #define tcg_gen_eqv_reg      tcg_gen_eqv_i64
124 #define tcg_gen_nand_reg     tcg_gen_nand_i64
125 #define tcg_gen_nor_reg      tcg_gen_nor_i64
126 #define tcg_gen_orc_reg      tcg_gen_orc_i64
127 #define tcg_gen_clz_reg      tcg_gen_clz_i64
128 #define tcg_gen_ctz_reg      tcg_gen_ctz_i64
129 #define tcg_gen_clzi_reg     tcg_gen_clzi_i64
130 #define tcg_gen_ctzi_reg     tcg_gen_ctzi_i64
131 #define tcg_gen_clrsb_reg    tcg_gen_clrsb_i64
132 #define tcg_gen_ctpop_reg    tcg_gen_ctpop_i64
133 #define tcg_gen_rotl_reg     tcg_gen_rotl_i64
134 #define tcg_gen_rotli_reg    tcg_gen_rotli_i64
135 #define tcg_gen_rotr_reg     tcg_gen_rotr_i64
136 #define tcg_gen_rotri_reg    tcg_gen_rotri_i64
137 #define tcg_gen_deposit_reg  tcg_gen_deposit_i64
138 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
139 #define tcg_gen_extract_reg  tcg_gen_extract_i64
140 #define tcg_gen_sextract_reg tcg_gen_sextract_i64
141 #define tcg_gen_extract2_reg tcg_gen_extract2_i64
142 #define tcg_constant_reg     tcg_constant_i64
143 #define tcg_gen_movcond_reg  tcg_gen_movcond_i64
144 #define tcg_gen_add2_reg     tcg_gen_add2_i64
145 #define tcg_gen_sub2_reg     tcg_gen_sub2_i64
146 #define tcg_gen_qemu_ld_reg  tcg_gen_qemu_ld_i64
147 #define tcg_gen_qemu_st_reg  tcg_gen_qemu_st_i64
148 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
149 #define tcg_gen_trunc_reg_ptr   tcg_gen_trunc_i64_ptr
150 #else
151 #define TCGv_reg             TCGv_i32
152 #define tcg_temp_new         tcg_temp_new_i32
153 #define tcg_global_mem_new   tcg_global_mem_new_i32
154 
155 #define tcg_gen_movi_reg     tcg_gen_movi_i32
156 #define tcg_gen_mov_reg      tcg_gen_mov_i32
157 #define tcg_gen_ld8u_reg     tcg_gen_ld8u_i32
158 #define tcg_gen_ld8s_reg     tcg_gen_ld8s_i32
159 #define tcg_gen_ld16u_reg    tcg_gen_ld16u_i32
160 #define tcg_gen_ld16s_reg    tcg_gen_ld16s_i32
161 #define tcg_gen_ld32u_reg    tcg_gen_ld_i32
162 #define tcg_gen_ld32s_reg    tcg_gen_ld_i32
163 #define tcg_gen_ld_reg       tcg_gen_ld_i32
164 #define tcg_gen_st8_reg      tcg_gen_st8_i32
165 #define tcg_gen_st16_reg     tcg_gen_st16_i32
166 #define tcg_gen_st32_reg     tcg_gen_st32_i32
167 #define tcg_gen_st_reg       tcg_gen_st_i32
168 #define tcg_gen_add_reg      tcg_gen_add_i32
169 #define tcg_gen_addi_reg     tcg_gen_addi_i32
170 #define tcg_gen_sub_reg      tcg_gen_sub_i32
171 #define tcg_gen_neg_reg      tcg_gen_neg_i32
172 #define tcg_gen_subfi_reg    tcg_gen_subfi_i32
173 #define tcg_gen_subi_reg     tcg_gen_subi_i32
174 #define tcg_gen_and_reg      tcg_gen_and_i32
175 #define tcg_gen_andi_reg     tcg_gen_andi_i32
176 #define tcg_gen_or_reg       tcg_gen_or_i32
177 #define tcg_gen_ori_reg      tcg_gen_ori_i32
178 #define tcg_gen_xor_reg      tcg_gen_xor_i32
179 #define tcg_gen_xori_reg     tcg_gen_xori_i32
180 #define tcg_gen_not_reg      tcg_gen_not_i32
181 #define tcg_gen_shl_reg      tcg_gen_shl_i32
182 #define tcg_gen_shli_reg     tcg_gen_shli_i32
183 #define tcg_gen_shr_reg      tcg_gen_shr_i32
184 #define tcg_gen_shri_reg     tcg_gen_shri_i32
185 #define tcg_gen_sar_reg      tcg_gen_sar_i32
186 #define tcg_gen_sari_reg     tcg_gen_sari_i32
187 #define tcg_gen_brcond_reg   tcg_gen_brcond_i32
188 #define tcg_gen_brcondi_reg  tcg_gen_brcondi_i32
189 #define tcg_gen_setcond_reg  tcg_gen_setcond_i32
190 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
191 #define tcg_gen_mul_reg      tcg_gen_mul_i32
192 #define tcg_gen_muli_reg     tcg_gen_muli_i32
193 #define tcg_gen_div_reg      tcg_gen_div_i32
194 #define tcg_gen_rem_reg      tcg_gen_rem_i32
195 #define tcg_gen_divu_reg     tcg_gen_divu_i32
196 #define tcg_gen_remu_reg     tcg_gen_remu_i32
197 #define tcg_gen_discard_reg  tcg_gen_discard_i32
198 #define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
199 #define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
200 #define tcg_gen_extu_i32_reg tcg_gen_mov_i32
201 #define tcg_gen_ext_i32_reg  tcg_gen_mov_i32
202 #define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
203 #define tcg_gen_ext_reg_i64  tcg_gen_ext_i32_i64
204 #define tcg_gen_ext8u_reg    tcg_gen_ext8u_i32
205 #define tcg_gen_ext8s_reg    tcg_gen_ext8s_i32
206 #define tcg_gen_ext16u_reg   tcg_gen_ext16u_i32
207 #define tcg_gen_ext16s_reg   tcg_gen_ext16s_i32
208 #define tcg_gen_ext32u_reg   tcg_gen_mov_i32
209 #define tcg_gen_ext32s_reg   tcg_gen_mov_i32
210 #define tcg_gen_bswap16_reg  tcg_gen_bswap16_i32
211 #define tcg_gen_bswap32_reg  tcg_gen_bswap32_i32
212 #define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
213 #define tcg_gen_andc_reg     tcg_gen_andc_i32
214 #define tcg_gen_eqv_reg      tcg_gen_eqv_i32
215 #define tcg_gen_nand_reg     tcg_gen_nand_i32
216 #define tcg_gen_nor_reg      tcg_gen_nor_i32
217 #define tcg_gen_orc_reg      tcg_gen_orc_i32
218 #define tcg_gen_clz_reg      tcg_gen_clz_i32
219 #define tcg_gen_ctz_reg      tcg_gen_ctz_i32
220 #define tcg_gen_clzi_reg     tcg_gen_clzi_i32
221 #define tcg_gen_ctzi_reg     tcg_gen_ctzi_i32
222 #define tcg_gen_clrsb_reg    tcg_gen_clrsb_i32
223 #define tcg_gen_ctpop_reg    tcg_gen_ctpop_i32
224 #define tcg_gen_rotl_reg     tcg_gen_rotl_i32
225 #define tcg_gen_rotli_reg    tcg_gen_rotli_i32
226 #define tcg_gen_rotr_reg     tcg_gen_rotr_i32
227 #define tcg_gen_rotri_reg    tcg_gen_rotri_i32
228 #define tcg_gen_deposit_reg  tcg_gen_deposit_i32
229 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
230 #define tcg_gen_extract_reg  tcg_gen_extract_i32
231 #define tcg_gen_sextract_reg tcg_gen_sextract_i32
232 #define tcg_gen_extract2_reg tcg_gen_extract2_i32
233 #define tcg_constant_reg     tcg_constant_i32
234 #define tcg_gen_movcond_reg  tcg_gen_movcond_i32
235 #define tcg_gen_add2_reg     tcg_gen_add2_i32
236 #define tcg_gen_sub2_reg     tcg_gen_sub2_i32
237 #define tcg_gen_qemu_ld_reg  tcg_gen_qemu_ld_i32
238 #define tcg_gen_qemu_st_reg  tcg_gen_qemu_st_i32
239 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
240 #define tcg_gen_trunc_reg_ptr   tcg_gen_ext_i32_ptr
241 #endif /* TARGET_REGISTER_BITS */
242 
243 typedef struct DisasCond {
244     TCGCond c;
245     TCGv_reg a0, a1;
246 } DisasCond;
247 
248 typedef struct DisasContext {
249     DisasContextBase base;
250     CPUState *cs;
251 
252     target_ureg iaoq_f;
253     target_ureg iaoq_b;
254     target_ureg iaoq_n;
255     TCGv_reg iaoq_n_var;
256 
257     DisasCond null_cond;
258     TCGLabel *null_lab;
259 
260     uint32_t insn;
261     uint32_t tb_flags;
262     int mmu_idx;
263     int privilege;
264     bool psw_n_nonzero;
265     bool is_pa20;
266 
267 #ifdef CONFIG_USER_ONLY
268     MemOp unalign;
269 #endif
270 } DisasContext;
271 
272 #ifdef CONFIG_USER_ONLY
273 #define UNALIGN(C)  (C)->unalign
274 #else
275 #define UNALIGN(C)  MO_ALIGN
276 #endif
277 
278 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently.  */
279 static int expand_sm_imm(DisasContext *ctx, int val)
280 {
281     if (val & PSW_SM_E) {
282         val = (val & ~PSW_SM_E) | PSW_E;
283     }
284     if (val & PSW_SM_W) {
285         val = (val & ~PSW_SM_W) | PSW_W;
286     }
287     return val;
288 }
289 
290 /* Inverted space register indicates 0 means sr0 not inferred from base.  */
291 static int expand_sr3x(DisasContext *ctx, int val)
292 {
293     return ~val;
294 }
295 
296 /* Convert the M:A bits within a memory insn to the tri-state value
297    we use for the final M.  */
298 static int ma_to_m(DisasContext *ctx, int val)
299 {
300     return val & 2 ? (val & 1 ? -1 : 1) : 0;
301 }
302 
303 /* Convert the sign of the displacement to a pre or post-modify.  */
304 static int pos_to_m(DisasContext *ctx, int val)
305 {
306     return val ? 1 : -1;
307 }
308 
309 static int neg_to_m(DisasContext *ctx, int val)
310 {
311     return val ? -1 : 1;
312 }
313 
314 /* Used for branch targets and fp memory ops.  */
315 static int expand_shl2(DisasContext *ctx, int val)
316 {
317     return val << 2;
318 }
319 
320 /* Used for fp memory ops.  */
321 static int expand_shl3(DisasContext *ctx, int val)
322 {
323     return val << 3;
324 }
325 
326 /* Used for assemble_21.  */
327 static int expand_shl11(DisasContext *ctx, int val)
328 {
329     return val << 11;
330 }
331 
332 
333 /* Include the auto-generated decoder.  */
334 #include "decode-insns.c.inc"
335 
336 /* We are not using a goto_tb (for whatever reason), but have updated
337    the iaq (for whatever reason), so don't do it again on exit.  */
338 #define DISAS_IAQ_N_UPDATED  DISAS_TARGET_0
339 
340 /* We are exiting the TB, but have neither emitted a goto_tb, nor
341    updated the iaq for the next instruction to be executed.  */
342 #define DISAS_IAQ_N_STALE    DISAS_TARGET_1
343 
344 /* Similarly, but we want to return to the main loop immediately
345    to recognize unmasked interrupts.  */
346 #define DISAS_IAQ_N_STALE_EXIT      DISAS_TARGET_2
347 #define DISAS_EXIT                  DISAS_TARGET_3
348 
349 /* global register indexes */
350 static TCGv_reg cpu_gr[32];
351 static TCGv_i64 cpu_sr[4];
352 static TCGv_i64 cpu_srH;
353 static TCGv_reg cpu_iaoq_f;
354 static TCGv_reg cpu_iaoq_b;
355 static TCGv_i64 cpu_iasq_f;
356 static TCGv_i64 cpu_iasq_b;
357 static TCGv_reg cpu_sar;
358 static TCGv_reg cpu_psw_n;
359 static TCGv_reg cpu_psw_v;
360 static TCGv_reg cpu_psw_cb;
361 static TCGv_reg cpu_psw_cb_msb;
362 
363 void hppa_translate_init(void)
364 {
365 #define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
366 
367     typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
368     static const GlobalVar vars[] = {
369         { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
370         DEF_VAR(psw_n),
371         DEF_VAR(psw_v),
372         DEF_VAR(psw_cb),
373         DEF_VAR(psw_cb_msb),
374         DEF_VAR(iaoq_f),
375         DEF_VAR(iaoq_b),
376     };
377 
378 #undef DEF_VAR
379 
380     /* Use the symbolic register names that match the disassembler.  */
381     static const char gr_names[32][4] = {
382         "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
383         "r8",  "r9",  "r10", "r11", "r12", "r13", "r14", "r15",
384         "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
385         "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
386     };
387     /* SR[4-7] are not global registers so that we can index them.  */
388     static const char sr_names[5][4] = {
389         "sr0", "sr1", "sr2", "sr3", "srH"
390     };
391 
392     int i;
393 
394     cpu_gr[0] = NULL;
395     for (i = 1; i < 32; i++) {
396         cpu_gr[i] = tcg_global_mem_new(tcg_env,
397                                        offsetof(CPUHPPAState, gr[i]),
398                                        gr_names[i]);
399     }
400     for (i = 0; i < 4; i++) {
401         cpu_sr[i] = tcg_global_mem_new_i64(tcg_env,
402                                            offsetof(CPUHPPAState, sr[i]),
403                                            sr_names[i]);
404     }
405     cpu_srH = tcg_global_mem_new_i64(tcg_env,
406                                      offsetof(CPUHPPAState, sr[4]),
407                                      sr_names[4]);
408 
409     for (i = 0; i < ARRAY_SIZE(vars); ++i) {
410         const GlobalVar *v = &vars[i];
411         *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name);
412     }
413 
414     cpu_iasq_f = tcg_global_mem_new_i64(tcg_env,
415                                         offsetof(CPUHPPAState, iasq_f),
416                                         "iasq_f");
417     cpu_iasq_b = tcg_global_mem_new_i64(tcg_env,
418                                         offsetof(CPUHPPAState, iasq_b),
419                                         "iasq_b");
420 }
421 
422 static DisasCond cond_make_f(void)
423 {
424     return (DisasCond){
425         .c = TCG_COND_NEVER,
426         .a0 = NULL,
427         .a1 = NULL,
428     };
429 }
430 
431 static DisasCond cond_make_t(void)
432 {
433     return (DisasCond){
434         .c = TCG_COND_ALWAYS,
435         .a0 = NULL,
436         .a1 = NULL,
437     };
438 }
439 
440 static DisasCond cond_make_n(void)
441 {
442     return (DisasCond){
443         .c = TCG_COND_NE,
444         .a0 = cpu_psw_n,
445         .a1 = tcg_constant_reg(0)
446     };
447 }
448 
449 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_reg a0)
450 {
451     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
452     return (DisasCond){
453         .c = c, .a0 = a0, .a1 = tcg_constant_reg(0)
454     };
455 }
456 
457 static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
458 {
459     TCGv_reg tmp = tcg_temp_new();
460     tcg_gen_mov_reg(tmp, a0);
461     return cond_make_0_tmp(c, tmp);
462 }
463 
464 static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
465 {
466     DisasCond r = { .c = c };
467 
468     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
469     r.a0 = tcg_temp_new();
470     tcg_gen_mov_reg(r.a0, a0);
471     r.a1 = tcg_temp_new();
472     tcg_gen_mov_reg(r.a1, a1);
473 
474     return r;
475 }
476 
477 static void cond_free(DisasCond *cond)
478 {
479     switch (cond->c) {
480     default:
481         cond->a0 = NULL;
482         cond->a1 = NULL;
483         /* fallthru */
484     case TCG_COND_ALWAYS:
485         cond->c = TCG_COND_NEVER;
486         break;
487     case TCG_COND_NEVER:
488         break;
489     }
490 }
491 
492 static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
493 {
494     if (reg == 0) {
495         TCGv_reg t = tcg_temp_new();
496         tcg_gen_movi_reg(t, 0);
497         return t;
498     } else {
499         return cpu_gr[reg];
500     }
501 }
502 
503 static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
504 {
505     if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
506         return tcg_temp_new();
507     } else {
508         return cpu_gr[reg];
509     }
510 }
511 
512 static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
513 {
514     if (ctx->null_cond.c != TCG_COND_NEVER) {
515         tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
516                             ctx->null_cond.a1, dest, t);
517     } else {
518         tcg_gen_mov_reg(dest, t);
519     }
520 }
521 
522 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
523 {
524     if (reg != 0) {
525         save_or_nullify(ctx, cpu_gr[reg], t);
526     }
527 }
528 
529 #if HOST_BIG_ENDIAN
530 # define HI_OFS  0
531 # define LO_OFS  4
532 #else
533 # define HI_OFS  4
534 # define LO_OFS  0
535 #endif
536 
537 static TCGv_i32 load_frw_i32(unsigned rt)
538 {
539     TCGv_i32 ret = tcg_temp_new_i32();
540     tcg_gen_ld_i32(ret, tcg_env,
541                    offsetof(CPUHPPAState, fr[rt & 31])
542                    + (rt & 32 ? LO_OFS : HI_OFS));
543     return ret;
544 }
545 
546 static TCGv_i32 load_frw0_i32(unsigned rt)
547 {
548     if (rt == 0) {
549         TCGv_i32 ret = tcg_temp_new_i32();
550         tcg_gen_movi_i32(ret, 0);
551         return ret;
552     } else {
553         return load_frw_i32(rt);
554     }
555 }
556 
557 static TCGv_i64 load_frw0_i64(unsigned rt)
558 {
559     TCGv_i64 ret = tcg_temp_new_i64();
560     if (rt == 0) {
561         tcg_gen_movi_i64(ret, 0);
562     } else {
563         tcg_gen_ld32u_i64(ret, tcg_env,
564                           offsetof(CPUHPPAState, fr[rt & 31])
565                           + (rt & 32 ? LO_OFS : HI_OFS));
566     }
567     return ret;
568 }
569 
570 static void save_frw_i32(unsigned rt, TCGv_i32 val)
571 {
572     tcg_gen_st_i32(val, tcg_env,
573                    offsetof(CPUHPPAState, fr[rt & 31])
574                    + (rt & 32 ? LO_OFS : HI_OFS));
575 }
576 
577 #undef HI_OFS
578 #undef LO_OFS
579 
580 static TCGv_i64 load_frd(unsigned rt)
581 {
582     TCGv_i64 ret = tcg_temp_new_i64();
583     tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt]));
584     return ret;
585 }
586 
587 static TCGv_i64 load_frd0(unsigned rt)
588 {
589     if (rt == 0) {
590         TCGv_i64 ret = tcg_temp_new_i64();
591         tcg_gen_movi_i64(ret, 0);
592         return ret;
593     } else {
594         return load_frd(rt);
595     }
596 }
597 
598 static void save_frd(unsigned rt, TCGv_i64 val)
599 {
600     tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt]));
601 }
602 
603 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
604 {
605 #ifdef CONFIG_USER_ONLY
606     tcg_gen_movi_i64(dest, 0);
607 #else
608     if (reg < 4) {
609         tcg_gen_mov_i64(dest, cpu_sr[reg]);
610     } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
611         tcg_gen_mov_i64(dest, cpu_srH);
612     } else {
613         tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg]));
614     }
615 #endif
616 }
617 
618 /* Skip over the implementation of an insn that has been nullified.
619    Use this when the insn is too complex for a conditional move.  */
620 static void nullify_over(DisasContext *ctx)
621 {
622     if (ctx->null_cond.c != TCG_COND_NEVER) {
623         /* The always condition should have been handled in the main loop.  */
624         assert(ctx->null_cond.c != TCG_COND_ALWAYS);
625 
626         ctx->null_lab = gen_new_label();
627 
628         /* If we're using PSW[N], copy it to a temp because... */
629         if (ctx->null_cond.a0 == cpu_psw_n) {
630             ctx->null_cond.a0 = tcg_temp_new();
631             tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
632         }
633         /* ... we clear it before branching over the implementation,
634            so that (1) it's clear after nullifying this insn and
635            (2) if this insn nullifies the next, PSW[N] is valid.  */
636         if (ctx->psw_n_nonzero) {
637             ctx->psw_n_nonzero = false;
638             tcg_gen_movi_reg(cpu_psw_n, 0);
639         }
640 
641         tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
642                            ctx->null_cond.a1, ctx->null_lab);
643         cond_free(&ctx->null_cond);
644     }
645 }
646 
647 /* Save the current nullification state to PSW[N].  */
648 static void nullify_save(DisasContext *ctx)
649 {
650     if (ctx->null_cond.c == TCG_COND_NEVER) {
651         if (ctx->psw_n_nonzero) {
652             tcg_gen_movi_reg(cpu_psw_n, 0);
653         }
654         return;
655     }
656     if (ctx->null_cond.a0 != cpu_psw_n) {
657         tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
658                             ctx->null_cond.a0, ctx->null_cond.a1);
659         ctx->psw_n_nonzero = true;
660     }
661     cond_free(&ctx->null_cond);
662 }
663 
664 /* Set a PSW[N] to X.  The intention is that this is used immediately
665    before a goto_tb/exit_tb, so that there is no fallthru path to other
666    code within the TB.  Therefore we do not update psw_n_nonzero.  */
667 static void nullify_set(DisasContext *ctx, bool x)
668 {
669     if (ctx->psw_n_nonzero || x) {
670         tcg_gen_movi_reg(cpu_psw_n, x);
671     }
672 }
673 
674 /* Mark the end of an instruction that may have been nullified.
675    This is the pair to nullify_over.  Always returns true so that
676    it may be tail-called from a translate function.  */
677 static bool nullify_end(DisasContext *ctx)
678 {
679     TCGLabel *null_lab = ctx->null_lab;
680     DisasJumpType status = ctx->base.is_jmp;
681 
682     /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
683        For UPDATED, we cannot update on the nullified path.  */
684     assert(status != DISAS_IAQ_N_UPDATED);
685 
686     if (likely(null_lab == NULL)) {
687         /* The current insn wasn't conditional or handled the condition
688            applied to it without a branch, so the (new) setting of
689            NULL_COND can be applied directly to the next insn.  */
690         return true;
691     }
692     ctx->null_lab = NULL;
693 
694     if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
695         /* The next instruction will be unconditional,
696            and NULL_COND already reflects that.  */
697         gen_set_label(null_lab);
698     } else {
699         /* The insn that we just executed is itself nullifying the next
700            instruction.  Store the condition in the PSW[N] global.
701            We asserted PSW[N] = 0 in nullify_over, so that after the
702            label we have the proper value in place.  */
703         nullify_save(ctx);
704         gen_set_label(null_lab);
705         ctx->null_cond = cond_make_n();
706     }
707     if (status == DISAS_NORETURN) {
708         ctx->base.is_jmp = DISAS_NEXT;
709     }
710     return true;
711 }
712 
713 static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval)
714 {
715     if (unlikely(ival == -1)) {
716         tcg_gen_mov_reg(dest, vval);
717     } else {
718         tcg_gen_movi_reg(dest, ival);
719     }
720 }
721 
722 static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
723 {
724     return ctx->iaoq_f + disp + 8;
725 }
726 
727 static void gen_excp_1(int exception)
728 {
729     gen_helper_excp(tcg_env, tcg_constant_i32(exception));
730 }
731 
732 static void gen_excp(DisasContext *ctx, int exception)
733 {
734     copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
735     copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
736     nullify_save(ctx);
737     gen_excp_1(exception);
738     ctx->base.is_jmp = DISAS_NORETURN;
739 }
740 
741 static bool gen_excp_iir(DisasContext *ctx, int exc)
742 {
743     nullify_over(ctx);
744     tcg_gen_st_reg(tcg_constant_reg(ctx->insn),
745                    tcg_env, offsetof(CPUHPPAState, cr[CR_IIR]));
746     gen_excp(ctx, exc);
747     return nullify_end(ctx);
748 }
749 
750 static bool gen_illegal(DisasContext *ctx)
751 {
752     return gen_excp_iir(ctx, EXCP_ILL);
753 }
754 
755 #ifdef CONFIG_USER_ONLY
756 #define CHECK_MOST_PRIVILEGED(EXCP) \
757     return gen_excp_iir(ctx, EXCP)
758 #else
759 #define CHECK_MOST_PRIVILEGED(EXCP) \
760     do {                                     \
761         if (ctx->privilege != 0) {           \
762             return gen_excp_iir(ctx, EXCP);  \
763         }                                    \
764     } while (0)
765 #endif
766 
767 static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
768 {
769     return translator_use_goto_tb(&ctx->base, dest);
770 }
771 
772 /* If the next insn is to be nullified, and it's on the same page,
773    and we're not attempting to set a breakpoint on it, then we can
774    totally skip the nullified insn.  This avoids creating and
775    executing a TB that merely branches to the next TB.  */
776 static bool use_nullify_skip(DisasContext *ctx)
777 {
778     return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
779             && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
780 }
781 
782 static void gen_goto_tb(DisasContext *ctx, int which,
783                         target_ureg f, target_ureg b)
784 {
785     if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
786         tcg_gen_goto_tb(which);
787         tcg_gen_movi_reg(cpu_iaoq_f, f);
788         tcg_gen_movi_reg(cpu_iaoq_b, b);
789         tcg_gen_exit_tb(ctx->base.tb, which);
790     } else {
791         copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
792         copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
793         tcg_gen_lookup_and_goto_ptr();
794     }
795 }
796 
797 static bool cond_need_sv(int c)
798 {
799     return c == 2 || c == 3 || c == 6;
800 }
801 
802 static bool cond_need_cb(int c)
803 {
804     return c == 4 || c == 5;
805 }
806 
807 /* Need extensions from TCGv_i32 to TCGv_reg. */
808 static bool cond_need_ext(DisasContext *ctx, bool d)
809 {
810     return TARGET_REGISTER_BITS == 64 && !d;
811 }
812 
813 /*
814  * Compute conditional for arithmetic.  See Page 5-3, Table 5-1, of
815  * the Parisc 1.1 Architecture Reference Manual for details.
816  */
817 
818 static DisasCond do_cond(unsigned cf, TCGv_reg res,
819                          TCGv_reg cb_msb, TCGv_reg sv)
820 {
821     DisasCond cond;
822     TCGv_reg tmp;
823 
824     switch (cf >> 1) {
825     case 0: /* Never / TR    (0 / 1) */
826         cond = cond_make_f();
827         break;
828     case 1: /* = / <>        (Z / !Z) */
829         cond = cond_make_0(TCG_COND_EQ, res);
830         break;
831     case 2: /* < / >=        (N ^ V / !(N ^ V) */
832         tmp = tcg_temp_new();
833         tcg_gen_xor_reg(tmp, res, sv);
834         cond = cond_make_0_tmp(TCG_COND_LT, tmp);
835         break;
836     case 3: /* <= / >        (N ^ V) | Z / !((N ^ V) | Z) */
837         /*
838          * Simplify:
839          *   (N ^ V) | Z
840          *   ((res < 0) ^ (sv < 0)) | !res
841          *   ((res ^ sv) < 0) | !res
842          *   (~(res ^ sv) >= 0) | !res
843          *   !(~(res ^ sv) >> 31) | !res
844          *   !(~(res ^ sv) >> 31 & res)
845          */
846         tmp = tcg_temp_new();
847         tcg_gen_eqv_reg(tmp, res, sv);
848         tcg_gen_sari_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
849         tcg_gen_and_reg(tmp, tmp, res);
850         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
851         break;
852     case 4: /* NUV / UV      (!C / C) */
853         cond = cond_make_0(TCG_COND_EQ, cb_msb);
854         break;
855     case 5: /* ZNV / VNZ     (!C | Z / C & !Z) */
856         tmp = tcg_temp_new();
857         tcg_gen_neg_reg(tmp, cb_msb);
858         tcg_gen_and_reg(tmp, tmp, res);
859         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
860         break;
861     case 6: /* SV / NSV      (V / !V) */
862         cond = cond_make_0(TCG_COND_LT, sv);
863         break;
864     case 7: /* OD / EV */
865         tmp = tcg_temp_new();
866         tcg_gen_andi_reg(tmp, res, 1);
867         cond = cond_make_0_tmp(TCG_COND_NE, tmp);
868         break;
869     default:
870         g_assert_not_reached();
871     }
872     if (cf & 1) {
873         cond.c = tcg_invert_cond(cond.c);
874     }
875 
876     return cond;
877 }
878 
879 /* Similar, but for the special case of subtraction without borrow, we
880    can use the inputs directly.  This can allow other computation to be
881    deleted as unused.  */
882 
883 static DisasCond do_sub_cond(unsigned cf, TCGv_reg res,
884                              TCGv_reg in1, TCGv_reg in2, TCGv_reg sv)
885 {
886     DisasCond cond;
887 
888     switch (cf >> 1) {
889     case 1: /* = / <> */
890         cond = cond_make(TCG_COND_EQ, in1, in2);
891         break;
892     case 2: /* < / >= */
893         cond = cond_make(TCG_COND_LT, in1, in2);
894         break;
895     case 3: /* <= / > */
896         cond = cond_make(TCG_COND_LE, in1, in2);
897         break;
898     case 4: /* << / >>= */
899         cond = cond_make(TCG_COND_LTU, in1, in2);
900         break;
901     case 5: /* <<= / >> */
902         cond = cond_make(TCG_COND_LEU, in1, in2);
903         break;
904     default:
905         return do_cond(cf, res, NULL, sv);
906     }
907     if (cf & 1) {
908         cond.c = tcg_invert_cond(cond.c);
909     }
910 
911     return cond;
912 }
913 
914 /*
915  * Similar, but for logicals, where the carry and overflow bits are not
916  * computed, and use of them is undefined.
917  *
918  * Undefined or not, hardware does not trap.  It seems reasonable to
919  * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
920  * how cases c={2,3} are treated.
921  */
922 
923 static DisasCond do_log_cond(unsigned cf, TCGv_reg res)
924 {
925     switch (cf) {
926     case 0:  /* never */
927     case 9:  /* undef, C */
928     case 11: /* undef, C & !Z */
929     case 12: /* undef, V */
930         return cond_make_f();
931 
932     case 1:  /* true */
933     case 8:  /* undef, !C */
934     case 10: /* undef, !C | Z */
935     case 13: /* undef, !V */
936         return cond_make_t();
937 
938     case 2:  /* == */
939         return cond_make_0(TCG_COND_EQ, res);
940     case 3:  /* <> */
941         return cond_make_0(TCG_COND_NE, res);
942     case 4:  /* < */
943         return cond_make_0(TCG_COND_LT, res);
944     case 5:  /* >= */
945         return cond_make_0(TCG_COND_GE, res);
946     case 6:  /* <= */
947         return cond_make_0(TCG_COND_LE, res);
948     case 7:  /* > */
949         return cond_make_0(TCG_COND_GT, res);
950 
951     case 14: /* OD */
952     case 15: /* EV */
953         return do_cond(cf, res, NULL, NULL);
954 
955     default:
956         g_assert_not_reached();
957     }
958 }
959 
960 /* Similar, but for shift/extract/deposit conditions.  */
961 
962 static DisasCond do_sed_cond(unsigned orig, TCGv_reg res)
963 {
964     unsigned c, f;
965 
966     /* Convert the compressed condition codes to standard.
967        0-2 are the same as logicals (nv,<,<=), while 3 is OD.
968        4-7 are the reverse of 0-3.  */
969     c = orig & 3;
970     if (c == 3) {
971         c = 7;
972     }
973     f = (orig & 4) / 4;
974 
975     return do_log_cond(c * 2 + f, res);
976 }
977 
978 /* Similar, but for unit conditions.  */
979 
980 static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
981                               TCGv_reg in1, TCGv_reg in2)
982 {
983     DisasCond cond;
984     TCGv_reg tmp, cb = NULL;
985 
986     if (cf & 8) {
987         /* Since we want to test lots of carry-out bits all at once, do not
988          * do our normal thing and compute carry-in of bit B+1 since that
989          * leaves us with carry bits spread across two words.
990          */
991         cb = tcg_temp_new();
992         tmp = tcg_temp_new();
993         tcg_gen_or_reg(cb, in1, in2);
994         tcg_gen_and_reg(tmp, in1, in2);
995         tcg_gen_andc_reg(cb, cb, res);
996         tcg_gen_or_reg(cb, cb, tmp);
997     }
998 
999     switch (cf >> 1) {
1000     case 0: /* never / TR */
1001     case 1: /* undefined */
1002     case 5: /* undefined */
1003         cond = cond_make_f();
1004         break;
1005 
1006     case 2: /* SBZ / NBZ */
1007         /* See hasless(v,1) from
1008          * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1009          */
1010         tmp = tcg_temp_new();
1011         tcg_gen_subi_reg(tmp, res, 0x01010101u);
1012         tcg_gen_andc_reg(tmp, tmp, res);
1013         tcg_gen_andi_reg(tmp, tmp, 0x80808080u);
1014         cond = cond_make_0(TCG_COND_NE, tmp);
1015         break;
1016 
1017     case 3: /* SHZ / NHZ */
1018         tmp = tcg_temp_new();
1019         tcg_gen_subi_reg(tmp, res, 0x00010001u);
1020         tcg_gen_andc_reg(tmp, tmp, res);
1021         tcg_gen_andi_reg(tmp, tmp, 0x80008000u);
1022         cond = cond_make_0(TCG_COND_NE, tmp);
1023         break;
1024 
1025     case 4: /* SDC / NDC */
1026         tcg_gen_andi_reg(cb, cb, 0x88888888u);
1027         cond = cond_make_0(TCG_COND_NE, cb);
1028         break;
1029 
1030     case 6: /* SBC / NBC */
1031         tcg_gen_andi_reg(cb, cb, 0x80808080u);
1032         cond = cond_make_0(TCG_COND_NE, cb);
1033         break;
1034 
1035     case 7: /* SHC / NHC */
1036         tcg_gen_andi_reg(cb, cb, 0x80008000u);
1037         cond = cond_make_0(TCG_COND_NE, cb);
1038         break;
1039 
1040     default:
1041         g_assert_not_reached();
1042     }
1043     if (cf & 1) {
1044         cond.c = tcg_invert_cond(cond.c);
1045     }
1046 
1047     return cond;
1048 }
1049 
1050 static TCGv_reg get_carry(DisasContext *ctx, bool d,
1051                           TCGv_reg cb, TCGv_reg cb_msb)
1052 {
1053     if (cond_need_ext(ctx, d)) {
1054         TCGv_reg t = tcg_temp_new();
1055         tcg_gen_extract_reg(t, cb, 32, 1);
1056         return t;
1057     }
1058     return cb_msb;
1059 }
1060 
1061 static TCGv_reg get_psw_carry(DisasContext *ctx, bool d)
1062 {
1063     return get_carry(ctx, d, cpu_psw_cb, cpu_psw_cb_msb);
1064 }
1065 
1066 /* Compute signed overflow for addition.  */
1067 static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
1068                           TCGv_reg in1, TCGv_reg in2)
1069 {
1070     TCGv_reg sv = tcg_temp_new();
1071     TCGv_reg tmp = tcg_temp_new();
1072 
1073     tcg_gen_xor_reg(sv, res, in1);
1074     tcg_gen_xor_reg(tmp, in1, in2);
1075     tcg_gen_andc_reg(sv, sv, tmp);
1076 
1077     return sv;
1078 }
1079 
1080 /* Compute signed overflow for subtraction.  */
1081 static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
1082                           TCGv_reg in1, TCGv_reg in2)
1083 {
1084     TCGv_reg sv = tcg_temp_new();
1085     TCGv_reg tmp = tcg_temp_new();
1086 
1087     tcg_gen_xor_reg(sv, res, in1);
1088     tcg_gen_xor_reg(tmp, in1, in2);
1089     tcg_gen_and_reg(sv, sv, tmp);
1090 
1091     return sv;
1092 }
1093 
1094 static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1095                    TCGv_reg in2, unsigned shift, bool is_l,
1096                    bool is_tsv, bool is_tc, bool is_c, unsigned cf)
1097 {
1098     TCGv_reg dest, cb, cb_msb, cb_cond, sv, tmp;
1099     unsigned c = cf >> 1;
1100     DisasCond cond;
1101     bool d = false;
1102 
1103     dest = tcg_temp_new();
1104     cb = NULL;
1105     cb_msb = NULL;
1106     cb_cond = NULL;
1107 
1108     if (shift) {
1109         tmp = tcg_temp_new();
1110         tcg_gen_shli_reg(tmp, in1, shift);
1111         in1 = tmp;
1112     }
1113 
1114     if (!is_l || cond_need_cb(c)) {
1115         TCGv_reg zero = tcg_constant_reg(0);
1116         cb_msb = tcg_temp_new();
1117         cb = tcg_temp_new();
1118 
1119         tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
1120         if (is_c) {
1121             tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb,
1122                              get_psw_carry(ctx, d), zero);
1123         }
1124         tcg_gen_xor_reg(cb, in1, in2);
1125         tcg_gen_xor_reg(cb, cb, dest);
1126         if (cond_need_cb(c)) {
1127             cb_cond = get_carry(ctx, d, cb, cb_msb);
1128         }
1129     } else {
1130         tcg_gen_add_reg(dest, in1, in2);
1131         if (is_c) {
1132             tcg_gen_add_reg(dest, dest, get_psw_carry(ctx, d));
1133         }
1134     }
1135 
1136     /* Compute signed overflow if required.  */
1137     sv = NULL;
1138     if (is_tsv || cond_need_sv(c)) {
1139         sv = do_add_sv(ctx, dest, in1, in2);
1140         if (is_tsv) {
1141             /* ??? Need to include overflow from shift.  */
1142             gen_helper_tsv(tcg_env, sv);
1143         }
1144     }
1145 
1146     /* Emit any conditional trap before any writeback.  */
1147     cond = do_cond(cf, dest, cb_cond, sv);
1148     if (is_tc) {
1149         tmp = tcg_temp_new();
1150         tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1151         gen_helper_tcond(tcg_env, tmp);
1152     }
1153 
1154     /* Write back the result.  */
1155     if (!is_l) {
1156         save_or_nullify(ctx, cpu_psw_cb, cb);
1157         save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1158     }
1159     save_gpr(ctx, rt, dest);
1160 
1161     /* Install the new nullification.  */
1162     cond_free(&ctx->null_cond);
1163     ctx->null_cond = cond;
1164 }
1165 
1166 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_sh *a,
1167                        bool is_l, bool is_tsv, bool is_tc, bool is_c)
1168 {
1169     TCGv_reg tcg_r1, tcg_r2;
1170 
1171     if (a->cf) {
1172         nullify_over(ctx);
1173     }
1174     tcg_r1 = load_gpr(ctx, a->r1);
1175     tcg_r2 = load_gpr(ctx, a->r2);
1176     do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, is_tsv, is_tc, is_c, a->cf);
1177     return nullify_end(ctx);
1178 }
1179 
1180 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1181                        bool is_tsv, bool is_tc)
1182 {
1183     TCGv_reg tcg_im, tcg_r2;
1184 
1185     if (a->cf) {
1186         nullify_over(ctx);
1187     }
1188     tcg_im = tcg_constant_reg(a->i);
1189     tcg_r2 = load_gpr(ctx, a->r);
1190     do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf);
1191     return nullify_end(ctx);
1192 }
1193 
1194 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1195                    TCGv_reg in2, bool is_tsv, bool is_b,
1196                    bool is_tc, unsigned cf)
1197 {
1198     TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
1199     unsigned c = cf >> 1;
1200     DisasCond cond;
1201     bool d = false;
1202 
1203     dest = tcg_temp_new();
1204     cb = tcg_temp_new();
1205     cb_msb = tcg_temp_new();
1206 
1207     zero = tcg_constant_reg(0);
1208     if (is_b) {
1209         /* DEST,C = IN1 + ~IN2 + C.  */
1210         tcg_gen_not_reg(cb, in2);
1211         tcg_gen_add2_reg(dest, cb_msb, in1, zero, get_psw_carry(ctx, d), zero);
1212         tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
1213         tcg_gen_xor_reg(cb, cb, in1);
1214         tcg_gen_xor_reg(cb, cb, dest);
1215     } else {
1216         /*
1217          * DEST,C = IN1 + ~IN2 + 1.  We can produce the same result in fewer
1218          * operations by seeding the high word with 1 and subtracting.
1219          */
1220         TCGv_reg one = tcg_constant_reg(1);
1221         tcg_gen_sub2_reg(dest, cb_msb, in1, one, in2, zero);
1222         tcg_gen_eqv_reg(cb, in1, in2);
1223         tcg_gen_xor_reg(cb, cb, dest);
1224     }
1225 
1226     /* Compute signed overflow if required.  */
1227     sv = NULL;
1228     if (is_tsv || cond_need_sv(c)) {
1229         sv = do_sub_sv(ctx, dest, in1, in2);
1230         if (is_tsv) {
1231             gen_helper_tsv(tcg_env, sv);
1232         }
1233     }
1234 
1235     /* Compute the condition.  We cannot use the special case for borrow.  */
1236     if (!is_b) {
1237         cond = do_sub_cond(cf, dest, in1, in2, sv);
1238     } else {
1239         cond = do_cond(cf, dest, get_carry(ctx, d, cb, cb_msb), sv);
1240     }
1241 
1242     /* Emit any conditional trap before any writeback.  */
1243     if (is_tc) {
1244         tmp = tcg_temp_new();
1245         tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1246         gen_helper_tcond(tcg_env, tmp);
1247     }
1248 
1249     /* Write back the result.  */
1250     save_or_nullify(ctx, cpu_psw_cb, cb);
1251     save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1252     save_gpr(ctx, rt, dest);
1253 
1254     /* Install the new nullification.  */
1255     cond_free(&ctx->null_cond);
1256     ctx->null_cond = cond;
1257 }
1258 
1259 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf *a,
1260                        bool is_tsv, bool is_b, bool is_tc)
1261 {
1262     TCGv_reg tcg_r1, tcg_r2;
1263 
1264     if (a->cf) {
1265         nullify_over(ctx);
1266     }
1267     tcg_r1 = load_gpr(ctx, a->r1);
1268     tcg_r2 = load_gpr(ctx, a->r2);
1269     do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf);
1270     return nullify_end(ctx);
1271 }
1272 
1273 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1274 {
1275     TCGv_reg tcg_im, tcg_r2;
1276 
1277     if (a->cf) {
1278         nullify_over(ctx);
1279     }
1280     tcg_im = tcg_constant_reg(a->i);
1281     tcg_r2 = load_gpr(ctx, a->r);
1282     do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf);
1283     return nullify_end(ctx);
1284 }
1285 
1286 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1287                       TCGv_reg in2, unsigned cf)
1288 {
1289     TCGv_reg dest, sv;
1290     DisasCond cond;
1291 
1292     dest = tcg_temp_new();
1293     tcg_gen_sub_reg(dest, in1, in2);
1294 
1295     /* Compute signed overflow if required.  */
1296     sv = NULL;
1297     if (cond_need_sv(cf >> 1)) {
1298         sv = do_sub_sv(ctx, dest, in1, in2);
1299     }
1300 
1301     /* Form the condition for the compare.  */
1302     cond = do_sub_cond(cf, dest, in1, in2, sv);
1303 
1304     /* Clear.  */
1305     tcg_gen_movi_reg(dest, 0);
1306     save_gpr(ctx, rt, dest);
1307 
1308     /* Install the new nullification.  */
1309     cond_free(&ctx->null_cond);
1310     ctx->null_cond = cond;
1311 }
1312 
1313 static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1314                    TCGv_reg in2, unsigned cf,
1315                    void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1316 {
1317     TCGv_reg dest = dest_gpr(ctx, rt);
1318 
1319     /* Perform the operation, and writeback.  */
1320     fn(dest, in1, in2);
1321     save_gpr(ctx, rt, dest);
1322 
1323     /* Install the new nullification.  */
1324     cond_free(&ctx->null_cond);
1325     if (cf) {
1326         ctx->null_cond = do_log_cond(cf, dest);
1327     }
1328 }
1329 
1330 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf *a,
1331                        void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1332 {
1333     TCGv_reg tcg_r1, tcg_r2;
1334 
1335     if (a->cf) {
1336         nullify_over(ctx);
1337     }
1338     tcg_r1 = load_gpr(ctx, a->r1);
1339     tcg_r2 = load_gpr(ctx, a->r2);
1340     do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, fn);
1341     return nullify_end(ctx);
1342 }
1343 
1344 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1345                     TCGv_reg in2, unsigned cf, bool is_tc,
1346                     void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1347 {
1348     TCGv_reg dest;
1349     DisasCond cond;
1350 
1351     if (cf == 0) {
1352         dest = dest_gpr(ctx, rt);
1353         fn(dest, in1, in2);
1354         save_gpr(ctx, rt, dest);
1355         cond_free(&ctx->null_cond);
1356     } else {
1357         dest = tcg_temp_new();
1358         fn(dest, in1, in2);
1359 
1360         cond = do_unit_cond(cf, dest, in1, in2);
1361 
1362         if (is_tc) {
1363             TCGv_reg tmp = tcg_temp_new();
1364             tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1365             gen_helper_tcond(tcg_env, tmp);
1366         }
1367         save_gpr(ctx, rt, dest);
1368 
1369         cond_free(&ctx->null_cond);
1370         ctx->null_cond = cond;
1371     }
1372 }
1373 
1374 #ifndef CONFIG_USER_ONLY
1375 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1376    from the top 2 bits of the base register.  There are a few system
1377    instructions that have a 3-bit space specifier, for which SR0 is
1378    not special.  To handle this, pass ~SP.  */
1379 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
1380 {
1381     TCGv_ptr ptr;
1382     TCGv_reg tmp;
1383     TCGv_i64 spc;
1384 
1385     if (sp != 0) {
1386         if (sp < 0) {
1387             sp = ~sp;
1388         }
1389         spc = tcg_temp_new_tl();
1390         load_spr(ctx, spc, sp);
1391         return spc;
1392     }
1393     if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1394         return cpu_srH;
1395     }
1396 
1397     ptr = tcg_temp_new_ptr();
1398     tmp = tcg_temp_new();
1399     spc = tcg_temp_new_tl();
1400 
1401     tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5);
1402     tcg_gen_andi_reg(tmp, tmp, 030);
1403     tcg_gen_trunc_reg_ptr(ptr, tmp);
1404 
1405     tcg_gen_add_ptr(ptr, ptr, tcg_env);
1406     tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1407 
1408     return spc;
1409 }
1410 #endif
1411 
1412 static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
1413                      unsigned rb, unsigned rx, int scale, target_sreg disp,
1414                      unsigned sp, int modify, bool is_phys)
1415 {
1416     TCGv_reg base = load_gpr(ctx, rb);
1417     TCGv_reg ofs;
1418 
1419     /* Note that RX is mutually exclusive with DISP.  */
1420     if (rx) {
1421         ofs = tcg_temp_new();
1422         tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
1423         tcg_gen_add_reg(ofs, ofs, base);
1424     } else if (disp || modify) {
1425         ofs = tcg_temp_new();
1426         tcg_gen_addi_reg(ofs, base, disp);
1427     } else {
1428         ofs = base;
1429     }
1430 
1431     *pofs = ofs;
1432 #ifdef CONFIG_USER_ONLY
1433     *pgva = (modify <= 0 ? ofs : base);
1434 #else
1435     TCGv_tl addr = tcg_temp_new_tl();
1436     tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
1437     if (ctx->tb_flags & PSW_W) {
1438         tcg_gen_andi_tl(addr, addr, 0x3fffffffffffffffull);
1439     }
1440     if (!is_phys) {
1441         tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
1442     }
1443     *pgva = addr;
1444 #endif
1445 }
1446 
1447 /* Emit a memory load.  The modify parameter should be
1448  * < 0 for pre-modify,
1449  * > 0 for post-modify,
1450  * = 0 for no base register update.
1451  */
1452 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1453                        unsigned rx, int scale, target_sreg disp,
1454                        unsigned sp, int modify, MemOp mop)
1455 {
1456     TCGv_reg ofs;
1457     TCGv_tl addr;
1458 
1459     /* Caller uses nullify_over/nullify_end.  */
1460     assert(ctx->null_cond.c == TCG_COND_NEVER);
1461 
1462     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1463              ctx->mmu_idx == MMU_PHYS_IDX);
1464     tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1465     if (modify) {
1466         save_gpr(ctx, rb, ofs);
1467     }
1468 }
1469 
1470 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1471                        unsigned rx, int scale, target_sreg disp,
1472                        unsigned sp, int modify, MemOp mop)
1473 {
1474     TCGv_reg ofs;
1475     TCGv_tl addr;
1476 
1477     /* Caller uses nullify_over/nullify_end.  */
1478     assert(ctx->null_cond.c == TCG_COND_NEVER);
1479 
1480     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1481              ctx->mmu_idx == MMU_PHYS_IDX);
1482     tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1483     if (modify) {
1484         save_gpr(ctx, rb, ofs);
1485     }
1486 }
1487 
1488 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1489                         unsigned rx, int scale, target_sreg disp,
1490                         unsigned sp, int modify, MemOp mop)
1491 {
1492     TCGv_reg ofs;
1493     TCGv_tl addr;
1494 
1495     /* Caller uses nullify_over/nullify_end.  */
1496     assert(ctx->null_cond.c == TCG_COND_NEVER);
1497 
1498     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1499              ctx->mmu_idx == MMU_PHYS_IDX);
1500     tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1501     if (modify) {
1502         save_gpr(ctx, rb, ofs);
1503     }
1504 }
1505 
1506 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1507                         unsigned rx, int scale, target_sreg disp,
1508                         unsigned sp, int modify, MemOp mop)
1509 {
1510     TCGv_reg ofs;
1511     TCGv_tl addr;
1512 
1513     /* Caller uses nullify_over/nullify_end.  */
1514     assert(ctx->null_cond.c == TCG_COND_NEVER);
1515 
1516     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1517              ctx->mmu_idx == MMU_PHYS_IDX);
1518     tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1519     if (modify) {
1520         save_gpr(ctx, rb, ofs);
1521     }
1522 }
1523 
1524 #if TARGET_REGISTER_BITS == 64
1525 #define do_load_reg   do_load_64
1526 #define do_store_reg  do_store_64
1527 #else
1528 #define do_load_reg   do_load_32
1529 #define do_store_reg  do_store_32
1530 #endif
1531 
1532 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1533                     unsigned rx, int scale, target_sreg disp,
1534                     unsigned sp, int modify, MemOp mop)
1535 {
1536     TCGv_reg dest;
1537 
1538     nullify_over(ctx);
1539 
1540     if (modify == 0) {
1541         /* No base register update.  */
1542         dest = dest_gpr(ctx, rt);
1543     } else {
1544         /* Make sure if RT == RB, we see the result of the load.  */
1545         dest = tcg_temp_new();
1546     }
1547     do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1548     save_gpr(ctx, rt, dest);
1549 
1550     return nullify_end(ctx);
1551 }
1552 
1553 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1554                       unsigned rx, int scale, target_sreg disp,
1555                       unsigned sp, int modify)
1556 {
1557     TCGv_i32 tmp;
1558 
1559     nullify_over(ctx);
1560 
1561     tmp = tcg_temp_new_i32();
1562     do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1563     save_frw_i32(rt, tmp);
1564 
1565     if (rt == 0) {
1566         gen_helper_loaded_fr0(tcg_env);
1567     }
1568 
1569     return nullify_end(ctx);
1570 }
1571 
1572 static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1573 {
1574     return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1575                      a->disp, a->sp, a->m);
1576 }
1577 
1578 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1579                       unsigned rx, int scale, target_sreg disp,
1580                       unsigned sp, int modify)
1581 {
1582     TCGv_i64 tmp;
1583 
1584     nullify_over(ctx);
1585 
1586     tmp = tcg_temp_new_i64();
1587     do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1588     save_frd(rt, tmp);
1589 
1590     if (rt == 0) {
1591         gen_helper_loaded_fr0(tcg_env);
1592     }
1593 
1594     return nullify_end(ctx);
1595 }
1596 
1597 static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1598 {
1599     return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1600                      a->disp, a->sp, a->m);
1601 }
1602 
1603 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1604                      target_sreg disp, unsigned sp,
1605                      int modify, MemOp mop)
1606 {
1607     nullify_over(ctx);
1608     do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1609     return nullify_end(ctx);
1610 }
1611 
1612 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1613                        unsigned rx, int scale, target_sreg disp,
1614                        unsigned sp, int modify)
1615 {
1616     TCGv_i32 tmp;
1617 
1618     nullify_over(ctx);
1619 
1620     tmp = load_frw_i32(rt);
1621     do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1622 
1623     return nullify_end(ctx);
1624 }
1625 
1626 static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1627 {
1628     return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1629                       a->disp, a->sp, a->m);
1630 }
1631 
1632 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1633                        unsigned rx, int scale, target_sreg disp,
1634                        unsigned sp, int modify)
1635 {
1636     TCGv_i64 tmp;
1637 
1638     nullify_over(ctx);
1639 
1640     tmp = load_frd(rt);
1641     do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1642 
1643     return nullify_end(ctx);
1644 }
1645 
1646 static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1647 {
1648     return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1649                       a->disp, a->sp, a->m);
1650 }
1651 
1652 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1653                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1654 {
1655     TCGv_i32 tmp;
1656 
1657     nullify_over(ctx);
1658     tmp = load_frw0_i32(ra);
1659 
1660     func(tmp, tcg_env, tmp);
1661 
1662     save_frw_i32(rt, tmp);
1663     return nullify_end(ctx);
1664 }
1665 
1666 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1667                        void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1668 {
1669     TCGv_i32 dst;
1670     TCGv_i64 src;
1671 
1672     nullify_over(ctx);
1673     src = load_frd(ra);
1674     dst = tcg_temp_new_i32();
1675 
1676     func(dst, tcg_env, src);
1677 
1678     save_frw_i32(rt, dst);
1679     return nullify_end(ctx);
1680 }
1681 
1682 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1683                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1684 {
1685     TCGv_i64 tmp;
1686 
1687     nullify_over(ctx);
1688     tmp = load_frd0(ra);
1689 
1690     func(tmp, tcg_env, tmp);
1691 
1692     save_frd(rt, tmp);
1693     return nullify_end(ctx);
1694 }
1695 
1696 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1697                        void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1698 {
1699     TCGv_i32 src;
1700     TCGv_i64 dst;
1701 
1702     nullify_over(ctx);
1703     src = load_frw0_i32(ra);
1704     dst = tcg_temp_new_i64();
1705 
1706     func(dst, tcg_env, src);
1707 
1708     save_frd(rt, dst);
1709     return nullify_end(ctx);
1710 }
1711 
1712 static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1713                         unsigned ra, unsigned rb,
1714                         void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1715 {
1716     TCGv_i32 a, b;
1717 
1718     nullify_over(ctx);
1719     a = load_frw0_i32(ra);
1720     b = load_frw0_i32(rb);
1721 
1722     func(a, tcg_env, a, b);
1723 
1724     save_frw_i32(rt, a);
1725     return nullify_end(ctx);
1726 }
1727 
1728 static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1729                         unsigned ra, unsigned rb,
1730                         void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1731 {
1732     TCGv_i64 a, b;
1733 
1734     nullify_over(ctx);
1735     a = load_frd0(ra);
1736     b = load_frd0(rb);
1737 
1738     func(a, tcg_env, a, b);
1739 
1740     save_frd(rt, a);
1741     return nullify_end(ctx);
1742 }
1743 
1744 /* Emit an unconditional branch to a direct target, which may or may not
1745    have already had nullification handled.  */
1746 static bool do_dbranch(DisasContext *ctx, target_ureg dest,
1747                        unsigned link, bool is_n)
1748 {
1749     if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1750         if (link != 0) {
1751             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1752         }
1753         ctx->iaoq_n = dest;
1754         if (is_n) {
1755             ctx->null_cond.c = TCG_COND_ALWAYS;
1756         }
1757     } else {
1758         nullify_over(ctx);
1759 
1760         if (link != 0) {
1761             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1762         }
1763 
1764         if (is_n && use_nullify_skip(ctx)) {
1765             nullify_set(ctx, 0);
1766             gen_goto_tb(ctx, 0, dest, dest + 4);
1767         } else {
1768             nullify_set(ctx, is_n);
1769             gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1770         }
1771 
1772         nullify_end(ctx);
1773 
1774         nullify_set(ctx, 0);
1775         gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1776         ctx->base.is_jmp = DISAS_NORETURN;
1777     }
1778     return true;
1779 }
1780 
1781 /* Emit a conditional branch to a direct target.  If the branch itself
1782    is nullified, we should have already used nullify_over.  */
1783 static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
1784                        DisasCond *cond)
1785 {
1786     target_ureg dest = iaoq_dest(ctx, disp);
1787     TCGLabel *taken = NULL;
1788     TCGCond c = cond->c;
1789     bool n;
1790 
1791     assert(ctx->null_cond.c == TCG_COND_NEVER);
1792 
1793     /* Handle TRUE and NEVER as direct branches.  */
1794     if (c == TCG_COND_ALWAYS) {
1795         return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1796     }
1797     if (c == TCG_COND_NEVER) {
1798         return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1799     }
1800 
1801     taken = gen_new_label();
1802     tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
1803     cond_free(cond);
1804 
1805     /* Not taken: Condition not satisfied; nullify on backward branches. */
1806     n = is_n && disp < 0;
1807     if (n && use_nullify_skip(ctx)) {
1808         nullify_set(ctx, 0);
1809         gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1810     } else {
1811         if (!n && ctx->null_lab) {
1812             gen_set_label(ctx->null_lab);
1813             ctx->null_lab = NULL;
1814         }
1815         nullify_set(ctx, n);
1816         if (ctx->iaoq_n == -1) {
1817             /* The temporary iaoq_n_var died at the branch above.
1818                Regenerate it here instead of saving it.  */
1819             tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1820         }
1821         gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1822     }
1823 
1824     gen_set_label(taken);
1825 
1826     /* Taken: Condition satisfied; nullify on forward branches.  */
1827     n = is_n && disp >= 0;
1828     if (n && use_nullify_skip(ctx)) {
1829         nullify_set(ctx, 0);
1830         gen_goto_tb(ctx, 1, dest, dest + 4);
1831     } else {
1832         nullify_set(ctx, n);
1833         gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1834     }
1835 
1836     /* Not taken: the branch itself was nullified.  */
1837     if (ctx->null_lab) {
1838         gen_set_label(ctx->null_lab);
1839         ctx->null_lab = NULL;
1840         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1841     } else {
1842         ctx->base.is_jmp = DISAS_NORETURN;
1843     }
1844     return true;
1845 }
1846 
1847 /* Emit an unconditional branch to an indirect target.  This handles
1848    nullification of the branch itself.  */
1849 static bool do_ibranch(DisasContext *ctx, TCGv_reg dest,
1850                        unsigned link, bool is_n)
1851 {
1852     TCGv_reg a0, a1, next, tmp;
1853     TCGCond c;
1854 
1855     assert(ctx->null_lab == NULL);
1856 
1857     if (ctx->null_cond.c == TCG_COND_NEVER) {
1858         if (link != 0) {
1859             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1860         }
1861         next = tcg_temp_new();
1862         tcg_gen_mov_reg(next, dest);
1863         if (is_n) {
1864             if (use_nullify_skip(ctx)) {
1865                 tcg_gen_mov_reg(cpu_iaoq_f, next);
1866                 tcg_gen_addi_reg(cpu_iaoq_b, next, 4);
1867                 nullify_set(ctx, 0);
1868                 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1869                 return true;
1870             }
1871             ctx->null_cond.c = TCG_COND_ALWAYS;
1872         }
1873         ctx->iaoq_n = -1;
1874         ctx->iaoq_n_var = next;
1875     } else if (is_n && use_nullify_skip(ctx)) {
1876         /* The (conditional) branch, B, nullifies the next insn, N,
1877            and we're allowed to skip execution N (no single-step or
1878            tracepoint in effect).  Since the goto_ptr that we must use
1879            for the indirect branch consumes no special resources, we
1880            can (conditionally) skip B and continue execution.  */
1881         /* The use_nullify_skip test implies we have a known control path.  */
1882         tcg_debug_assert(ctx->iaoq_b != -1);
1883         tcg_debug_assert(ctx->iaoq_n != -1);
1884 
1885         /* We do have to handle the non-local temporary, DEST, before
1886            branching.  Since IOAQ_F is not really live at this point, we
1887            can simply store DEST optimistically.  Similarly with IAOQ_B.  */
1888         tcg_gen_mov_reg(cpu_iaoq_f, dest);
1889         tcg_gen_addi_reg(cpu_iaoq_b, dest, 4);
1890 
1891         nullify_over(ctx);
1892         if (link != 0) {
1893             tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n);
1894         }
1895         tcg_gen_lookup_and_goto_ptr();
1896         return nullify_end(ctx);
1897     } else {
1898         c = ctx->null_cond.c;
1899         a0 = ctx->null_cond.a0;
1900         a1 = ctx->null_cond.a1;
1901 
1902         tmp = tcg_temp_new();
1903         next = tcg_temp_new();
1904 
1905         copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1906         tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
1907         ctx->iaoq_n = -1;
1908         ctx->iaoq_n_var = next;
1909 
1910         if (link != 0) {
1911             tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1912         }
1913 
1914         if (is_n) {
1915             /* The branch nullifies the next insn, which means the state of N
1916                after the branch is the inverse of the state of N that applied
1917                to the branch.  */
1918             tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1919             cond_free(&ctx->null_cond);
1920             ctx->null_cond = cond_make_n();
1921             ctx->psw_n_nonzero = true;
1922         } else {
1923             cond_free(&ctx->null_cond);
1924         }
1925     }
1926     return true;
1927 }
1928 
1929 /* Implement
1930  *    if (IAOQ_Front{30..31} < GR[b]{30..31})
1931  *      IAOQ_Next{30..31} ← GR[b]{30..31};
1932  *    else
1933  *      IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1934  * which keeps the privilege level from being increased.
1935  */
1936 static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
1937 {
1938     TCGv_reg dest;
1939     switch (ctx->privilege) {
1940     case 0:
1941         /* Privilege 0 is maximum and is allowed to decrease.  */
1942         return offset;
1943     case 3:
1944         /* Privilege 3 is minimum and is never allowed to increase.  */
1945         dest = tcg_temp_new();
1946         tcg_gen_ori_reg(dest, offset, 3);
1947         break;
1948     default:
1949         dest = tcg_temp_new();
1950         tcg_gen_andi_reg(dest, offset, -4);
1951         tcg_gen_ori_reg(dest, dest, ctx->privilege);
1952         tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
1953         break;
1954     }
1955     return dest;
1956 }
1957 
1958 #ifdef CONFIG_USER_ONLY
1959 /* On Linux, page zero is normally marked execute only + gateway.
1960    Therefore normal read or write is supposed to fail, but specific
1961    offsets have kernel code mapped to raise permissions to implement
1962    system calls.  Handling this via an explicit check here, rather
1963    in than the "be disp(sr2,r0)" instruction that probably sent us
1964    here, is the easiest way to handle the branch delay slot on the
1965    aforementioned BE.  */
1966 static void do_page_zero(DisasContext *ctx)
1967 {
1968     /* If by some means we get here with PSW[N]=1, that implies that
1969        the B,GATE instruction would be skipped, and we'd fault on the
1970        next insn within the privileged page.  */
1971     switch (ctx->null_cond.c) {
1972     case TCG_COND_NEVER:
1973         break;
1974     case TCG_COND_ALWAYS:
1975         tcg_gen_movi_reg(cpu_psw_n, 0);
1976         goto do_sigill;
1977     default:
1978         /* Since this is always the first (and only) insn within the
1979            TB, we should know the state of PSW[N] from TB->FLAGS.  */
1980         g_assert_not_reached();
1981     }
1982 
1983     /* Check that we didn't arrive here via some means that allowed
1984        non-sequential instruction execution.  Normally the PSW[B] bit
1985        detects this by disallowing the B,GATE instruction to execute
1986        under such conditions.  */
1987     if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1988         goto do_sigill;
1989     }
1990 
1991     switch (ctx->iaoq_f & -4) {
1992     case 0x00: /* Null pointer call */
1993         gen_excp_1(EXCP_IMP);
1994         ctx->base.is_jmp = DISAS_NORETURN;
1995         break;
1996 
1997     case 0xb0: /* LWS */
1998         gen_excp_1(EXCP_SYSCALL_LWS);
1999         ctx->base.is_jmp = DISAS_NORETURN;
2000         break;
2001 
2002     case 0xe0: /* SET_THREAD_POINTER */
2003         tcg_gen_st_reg(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27]));
2004         tcg_gen_ori_reg(cpu_iaoq_f, cpu_gr[31], 3);
2005         tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
2006         ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
2007         break;
2008 
2009     case 0x100: /* SYSCALL */
2010         gen_excp_1(EXCP_SYSCALL);
2011         ctx->base.is_jmp = DISAS_NORETURN;
2012         break;
2013 
2014     default:
2015     do_sigill:
2016         gen_excp_1(EXCP_ILL);
2017         ctx->base.is_jmp = DISAS_NORETURN;
2018         break;
2019     }
2020 }
2021 #endif
2022 
2023 static bool trans_nop(DisasContext *ctx, arg_nop *a)
2024 {
2025     cond_free(&ctx->null_cond);
2026     return true;
2027 }
2028 
2029 static bool trans_break(DisasContext *ctx, arg_break *a)
2030 {
2031     return gen_excp_iir(ctx, EXCP_BREAK);
2032 }
2033 
2034 static bool trans_sync(DisasContext *ctx, arg_sync *a)
2035 {
2036     /* No point in nullifying the memory barrier.  */
2037     tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2038 
2039     cond_free(&ctx->null_cond);
2040     return true;
2041 }
2042 
2043 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
2044 {
2045     unsigned rt = a->t;
2046     TCGv_reg tmp = dest_gpr(ctx, rt);
2047     tcg_gen_movi_reg(tmp, ctx->iaoq_f);
2048     save_gpr(ctx, rt, tmp);
2049 
2050     cond_free(&ctx->null_cond);
2051     return true;
2052 }
2053 
2054 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
2055 {
2056     unsigned rt = a->t;
2057     unsigned rs = a->sp;
2058     TCGv_i64 t0 = tcg_temp_new_i64();
2059     TCGv_reg t1 = tcg_temp_new();
2060 
2061     load_spr(ctx, t0, rs);
2062     tcg_gen_shri_i64(t0, t0, 32);
2063     tcg_gen_trunc_i64_reg(t1, t0);
2064 
2065     save_gpr(ctx, rt, t1);
2066 
2067     cond_free(&ctx->null_cond);
2068     return true;
2069 }
2070 
2071 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
2072 {
2073     unsigned rt = a->t;
2074     unsigned ctl = a->r;
2075     TCGv_reg tmp;
2076 
2077     switch (ctl) {
2078     case CR_SAR:
2079 #ifdef TARGET_HPPA64
2080         if (a->e == 0) {
2081             /* MFSAR without ,W masks low 5 bits.  */
2082             tmp = dest_gpr(ctx, rt);
2083             tcg_gen_andi_reg(tmp, cpu_sar, 31);
2084             save_gpr(ctx, rt, tmp);
2085             goto done;
2086         }
2087 #endif
2088         save_gpr(ctx, rt, cpu_sar);
2089         goto done;
2090     case CR_IT: /* Interval Timer */
2091         /* FIXME: Respect PSW_S bit.  */
2092         nullify_over(ctx);
2093         tmp = dest_gpr(ctx, rt);
2094         if (translator_io_start(&ctx->base)) {
2095             gen_helper_read_interval_timer(tmp);
2096             ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2097         } else {
2098             gen_helper_read_interval_timer(tmp);
2099         }
2100         save_gpr(ctx, rt, tmp);
2101         return nullify_end(ctx);
2102     case 26:
2103     case 27:
2104         break;
2105     default:
2106         /* All other control registers are privileged.  */
2107         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2108         break;
2109     }
2110 
2111     tmp = tcg_temp_new();
2112     tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2113     save_gpr(ctx, rt, tmp);
2114 
2115  done:
2116     cond_free(&ctx->null_cond);
2117     return true;
2118 }
2119 
2120 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2121 {
2122     unsigned rr = a->r;
2123     unsigned rs = a->sp;
2124     TCGv_i64 t64;
2125 
2126     if (rs >= 5) {
2127         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2128     }
2129     nullify_over(ctx);
2130 
2131     t64 = tcg_temp_new_i64();
2132     tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
2133     tcg_gen_shli_i64(t64, t64, 32);
2134 
2135     if (rs >= 4) {
2136         tcg_gen_st_i64(t64, tcg_env, offsetof(CPUHPPAState, sr[rs]));
2137         ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2138     } else {
2139         tcg_gen_mov_i64(cpu_sr[rs], t64);
2140     }
2141 
2142     return nullify_end(ctx);
2143 }
2144 
2145 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2146 {
2147     unsigned ctl = a->t;
2148     TCGv_reg reg;
2149     TCGv_reg tmp;
2150 
2151     if (ctl == CR_SAR) {
2152         reg = load_gpr(ctx, a->r);
2153         tmp = tcg_temp_new();
2154         tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1);
2155         save_or_nullify(ctx, cpu_sar, tmp);
2156 
2157         cond_free(&ctx->null_cond);
2158         return true;
2159     }
2160 
2161     /* All other control registers are privileged or read-only.  */
2162     CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2163 
2164 #ifndef CONFIG_USER_ONLY
2165     nullify_over(ctx);
2166     reg = load_gpr(ctx, a->r);
2167 
2168     switch (ctl) {
2169     case CR_IT:
2170         gen_helper_write_interval_timer(tcg_env, reg);
2171         break;
2172     case CR_EIRR:
2173         gen_helper_write_eirr(tcg_env, reg);
2174         break;
2175     case CR_EIEM:
2176         gen_helper_write_eiem(tcg_env, reg);
2177         ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2178         break;
2179 
2180     case CR_IIASQ:
2181     case CR_IIAOQ:
2182         /* FIXME: Respect PSW_Q bit */
2183         /* The write advances the queue and stores to the back element.  */
2184         tmp = tcg_temp_new();
2185         tcg_gen_ld_reg(tmp, tcg_env,
2186                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2187         tcg_gen_st_reg(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2188         tcg_gen_st_reg(reg, tcg_env,
2189                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2190         break;
2191 
2192     case CR_PID1:
2193     case CR_PID2:
2194     case CR_PID3:
2195     case CR_PID4:
2196         tcg_gen_st_reg(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2197 #ifndef CONFIG_USER_ONLY
2198         gen_helper_change_prot_id(tcg_env);
2199 #endif
2200         break;
2201 
2202     default:
2203         tcg_gen_st_reg(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2204         break;
2205     }
2206     return nullify_end(ctx);
2207 #endif
2208 }
2209 
2210 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2211 {
2212     TCGv_reg tmp = tcg_temp_new();
2213 
2214     tcg_gen_not_reg(tmp, load_gpr(ctx, a->r));
2215     tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
2216     save_or_nullify(ctx, cpu_sar, tmp);
2217 
2218     cond_free(&ctx->null_cond);
2219     return true;
2220 }
2221 
2222 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2223 {
2224     TCGv_reg dest = dest_gpr(ctx, a->t);
2225 
2226 #ifdef CONFIG_USER_ONLY
2227     /* We don't implement space registers in user mode. */
2228     tcg_gen_movi_reg(dest, 0);
2229 #else
2230     TCGv_i64 t0 = tcg_temp_new_i64();
2231 
2232     tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2233     tcg_gen_shri_i64(t0, t0, 32);
2234     tcg_gen_trunc_i64_reg(dest, t0);
2235 #endif
2236     save_gpr(ctx, a->t, dest);
2237 
2238     cond_free(&ctx->null_cond);
2239     return true;
2240 }
2241 
2242 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2243 {
2244     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2245 #ifndef CONFIG_USER_ONLY
2246     TCGv_reg tmp;
2247 
2248     nullify_over(ctx);
2249 
2250     tmp = tcg_temp_new();
2251     tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2252     tcg_gen_andi_reg(tmp, tmp, ~a->i);
2253     gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2254     save_gpr(ctx, a->t, tmp);
2255 
2256     /* Exit the TB to recognize new interrupts, e.g. PSW_M.  */
2257     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2258     return nullify_end(ctx);
2259 #endif
2260 }
2261 
2262 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2263 {
2264     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2265 #ifndef CONFIG_USER_ONLY
2266     TCGv_reg tmp;
2267 
2268     nullify_over(ctx);
2269 
2270     tmp = tcg_temp_new();
2271     tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2272     tcg_gen_ori_reg(tmp, tmp, a->i);
2273     gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2274     save_gpr(ctx, a->t, tmp);
2275 
2276     /* Exit the TB to recognize new interrupts, e.g. PSW_I.  */
2277     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2278     return nullify_end(ctx);
2279 #endif
2280 }
2281 
2282 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2283 {
2284     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2285 #ifndef CONFIG_USER_ONLY
2286     TCGv_reg tmp, reg;
2287     nullify_over(ctx);
2288 
2289     reg = load_gpr(ctx, a->r);
2290     tmp = tcg_temp_new();
2291     gen_helper_swap_system_mask(tmp, tcg_env, reg);
2292 
2293     /* Exit the TB to recognize new interrupts.  */
2294     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2295     return nullify_end(ctx);
2296 #endif
2297 }
2298 
2299 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2300 {
2301     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2302 #ifndef CONFIG_USER_ONLY
2303     nullify_over(ctx);
2304 
2305     if (rfi_r) {
2306         gen_helper_rfi_r(tcg_env);
2307     } else {
2308         gen_helper_rfi(tcg_env);
2309     }
2310     /* Exit the TB to recognize new interrupts.  */
2311     tcg_gen_exit_tb(NULL, 0);
2312     ctx->base.is_jmp = DISAS_NORETURN;
2313 
2314     return nullify_end(ctx);
2315 #endif
2316 }
2317 
2318 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2319 {
2320     return do_rfi(ctx, false);
2321 }
2322 
2323 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2324 {
2325     return do_rfi(ctx, true);
2326 }
2327 
2328 static bool trans_halt(DisasContext *ctx, arg_halt *a)
2329 {
2330     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2331 #ifndef CONFIG_USER_ONLY
2332     nullify_over(ctx);
2333     gen_helper_halt(tcg_env);
2334     ctx->base.is_jmp = DISAS_NORETURN;
2335     return nullify_end(ctx);
2336 #endif
2337 }
2338 
2339 static bool trans_reset(DisasContext *ctx, arg_reset *a)
2340 {
2341     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2342 #ifndef CONFIG_USER_ONLY
2343     nullify_over(ctx);
2344     gen_helper_reset(tcg_env);
2345     ctx->base.is_jmp = DISAS_NORETURN;
2346     return nullify_end(ctx);
2347 #endif
2348 }
2349 
2350 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2351 {
2352     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2353 #ifndef CONFIG_USER_ONLY
2354     nullify_over(ctx);
2355     gen_helper_getshadowregs(tcg_env);
2356     return nullify_end(ctx);
2357 #endif
2358 }
2359 
2360 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2361 {
2362     if (a->m) {
2363         TCGv_reg dest = dest_gpr(ctx, a->b);
2364         TCGv_reg src1 = load_gpr(ctx, a->b);
2365         TCGv_reg src2 = load_gpr(ctx, a->x);
2366 
2367         /* The only thing we need to do is the base register modification.  */
2368         tcg_gen_add_reg(dest, src1, src2);
2369         save_gpr(ctx, a->b, dest);
2370     }
2371     cond_free(&ctx->null_cond);
2372     return true;
2373 }
2374 
2375 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2376 {
2377     TCGv_reg dest, ofs;
2378     TCGv_i32 level, want;
2379     TCGv_tl addr;
2380 
2381     nullify_over(ctx);
2382 
2383     dest = dest_gpr(ctx, a->t);
2384     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2385 
2386     if (a->imm) {
2387         level = tcg_constant_i32(a->ri);
2388     } else {
2389         level = tcg_temp_new_i32();
2390         tcg_gen_trunc_reg_i32(level, load_gpr(ctx, a->ri));
2391         tcg_gen_andi_i32(level, level, 3);
2392     }
2393     want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
2394 
2395     gen_helper_probe(dest, tcg_env, addr, level, want);
2396 
2397     save_gpr(ctx, a->t, dest);
2398     return nullify_end(ctx);
2399 }
2400 
2401 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2402 {
2403     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2404 #ifndef CONFIG_USER_ONLY
2405     TCGv_tl addr;
2406     TCGv_reg ofs, reg;
2407 
2408     nullify_over(ctx);
2409 
2410     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2411     reg = load_gpr(ctx, a->r);
2412     if (a->addr) {
2413         gen_helper_itlba(tcg_env, addr, reg);
2414     } else {
2415         gen_helper_itlbp(tcg_env, addr, reg);
2416     }
2417 
2418     /* Exit TB for TLB change if mmu is enabled.  */
2419     if (ctx->tb_flags & PSW_C) {
2420         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2421     }
2422     return nullify_end(ctx);
2423 #endif
2424 }
2425 
2426 static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
2427 {
2428     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2429 #ifndef CONFIG_USER_ONLY
2430     TCGv_tl addr;
2431     TCGv_reg ofs;
2432 
2433     nullify_over(ctx);
2434 
2435     form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2436     if (a->m) {
2437         save_gpr(ctx, a->b, ofs);
2438     }
2439     if (a->local) {
2440         gen_helper_ptlbe(tcg_env);
2441     } else {
2442         gen_helper_ptlb(tcg_env, addr);
2443     }
2444 
2445     /* Exit TB for TLB change if mmu is enabled.  */
2446     if (ctx->tb_flags & PSW_C) {
2447         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2448     }
2449     return nullify_end(ctx);
2450 #endif
2451 }
2452 
2453 /*
2454  * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2455  * See
2456  *     https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2457  *     page 13-9 (195/206)
2458  */
2459 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2460 {
2461     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2462 #ifndef CONFIG_USER_ONLY
2463     TCGv_tl addr, atl, stl;
2464     TCGv_reg reg;
2465 
2466     nullify_over(ctx);
2467 
2468     /*
2469      * FIXME:
2470      *  if (not (pcxl or pcxl2))
2471      *    return gen_illegal(ctx);
2472      *
2473      * Note for future: these are 32-bit systems; no hppa64.
2474      */
2475 
2476     atl = tcg_temp_new_tl();
2477     stl = tcg_temp_new_tl();
2478     addr = tcg_temp_new_tl();
2479 
2480     tcg_gen_ld32u_i64(stl, tcg_env,
2481                       a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2482                       : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2483     tcg_gen_ld32u_i64(atl, tcg_env,
2484                       a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2485                       : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2486     tcg_gen_shli_i64(stl, stl, 32);
2487     tcg_gen_or_tl(addr, atl, stl);
2488 
2489     reg = load_gpr(ctx, a->r);
2490     if (a->addr) {
2491         gen_helper_itlba(tcg_env, addr, reg);
2492     } else {
2493         gen_helper_itlbp(tcg_env, addr, reg);
2494     }
2495 
2496     /* Exit TB for TLB change if mmu is enabled.  */
2497     if (ctx->tb_flags & PSW_C) {
2498         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2499     }
2500     return nullify_end(ctx);
2501 #endif
2502 }
2503 
2504 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2505 {
2506     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2507 #ifndef CONFIG_USER_ONLY
2508     TCGv_tl vaddr;
2509     TCGv_reg ofs, paddr;
2510 
2511     nullify_over(ctx);
2512 
2513     form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2514 
2515     paddr = tcg_temp_new();
2516     gen_helper_lpa(paddr, tcg_env, vaddr);
2517 
2518     /* Note that physical address result overrides base modification.  */
2519     if (a->m) {
2520         save_gpr(ctx, a->b, ofs);
2521     }
2522     save_gpr(ctx, a->t, paddr);
2523 
2524     return nullify_end(ctx);
2525 #endif
2526 }
2527 
2528 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2529 {
2530     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2531 
2532     /* The Coherence Index is an implementation-defined function of the
2533        physical address.  Two addresses with the same CI have a coherent
2534        view of the cache.  Our implementation is to return 0 for all,
2535        since the entire address space is coherent.  */
2536     save_gpr(ctx, a->t, tcg_constant_reg(0));
2537 
2538     cond_free(&ctx->null_cond);
2539     return true;
2540 }
2541 
2542 static bool trans_add(DisasContext *ctx, arg_rrr_cf_sh *a)
2543 {
2544     return do_add_reg(ctx, a, false, false, false, false);
2545 }
2546 
2547 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_sh *a)
2548 {
2549     return do_add_reg(ctx, a, true, false, false, false);
2550 }
2551 
2552 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2553 {
2554     return do_add_reg(ctx, a, false, true, false, false);
2555 }
2556 
2557 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_sh *a)
2558 {
2559     return do_add_reg(ctx, a, false, false, false, true);
2560 }
2561 
2562 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2563 {
2564     return do_add_reg(ctx, a, false, true, false, true);
2565 }
2566 
2567 static bool trans_sub(DisasContext *ctx, arg_rrr_cf *a)
2568 {
2569     return do_sub_reg(ctx, a, false, false, false);
2570 }
2571 
2572 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf *a)
2573 {
2574     return do_sub_reg(ctx, a, true, false, false);
2575 }
2576 
2577 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf *a)
2578 {
2579     return do_sub_reg(ctx, a, false, false, true);
2580 }
2581 
2582 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf *a)
2583 {
2584     return do_sub_reg(ctx, a, true, false, true);
2585 }
2586 
2587 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf *a)
2588 {
2589     return do_sub_reg(ctx, a, false, true, false);
2590 }
2591 
2592 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf *a)
2593 {
2594     return do_sub_reg(ctx, a, true, true, false);
2595 }
2596 
2597 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf *a)
2598 {
2599     return do_log_reg(ctx, a, tcg_gen_andc_reg);
2600 }
2601 
2602 static bool trans_and(DisasContext *ctx, arg_rrr_cf *a)
2603 {
2604     return do_log_reg(ctx, a, tcg_gen_and_reg);
2605 }
2606 
2607 static bool trans_or(DisasContext *ctx, arg_rrr_cf *a)
2608 {
2609     if (a->cf == 0) {
2610         unsigned r2 = a->r2;
2611         unsigned r1 = a->r1;
2612         unsigned rt = a->t;
2613 
2614         if (rt == 0) { /* NOP */
2615             cond_free(&ctx->null_cond);
2616             return true;
2617         }
2618         if (r2 == 0) { /* COPY */
2619             if (r1 == 0) {
2620                 TCGv_reg dest = dest_gpr(ctx, rt);
2621                 tcg_gen_movi_reg(dest, 0);
2622                 save_gpr(ctx, rt, dest);
2623             } else {
2624                 save_gpr(ctx, rt, cpu_gr[r1]);
2625             }
2626             cond_free(&ctx->null_cond);
2627             return true;
2628         }
2629 #ifndef CONFIG_USER_ONLY
2630         /* These are QEMU extensions and are nops in the real architecture:
2631          *
2632          * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2633          * or %r31,%r31,%r31 -- death loop; offline cpu
2634          *                      currently implemented as idle.
2635          */
2636         if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2637             /* No need to check for supervisor, as userland can only pause
2638                until the next timer interrupt.  */
2639             nullify_over(ctx);
2640 
2641             /* Advance the instruction queue.  */
2642             copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2643             copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2644             nullify_set(ctx, 0);
2645 
2646             /* Tell the qemu main loop to halt until this cpu has work.  */
2647             tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
2648                            offsetof(CPUState, halted) - offsetof(HPPACPU, env));
2649             gen_excp_1(EXCP_HALTED);
2650             ctx->base.is_jmp = DISAS_NORETURN;
2651 
2652             return nullify_end(ctx);
2653         }
2654 #endif
2655     }
2656     return do_log_reg(ctx, a, tcg_gen_or_reg);
2657 }
2658 
2659 static bool trans_xor(DisasContext *ctx, arg_rrr_cf *a)
2660 {
2661     return do_log_reg(ctx, a, tcg_gen_xor_reg);
2662 }
2663 
2664 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf *a)
2665 {
2666     TCGv_reg tcg_r1, tcg_r2;
2667 
2668     if (a->cf) {
2669         nullify_over(ctx);
2670     }
2671     tcg_r1 = load_gpr(ctx, a->r1);
2672     tcg_r2 = load_gpr(ctx, a->r2);
2673     do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf);
2674     return nullify_end(ctx);
2675 }
2676 
2677 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf *a)
2678 {
2679     TCGv_reg tcg_r1, tcg_r2;
2680 
2681     if (a->cf) {
2682         nullify_over(ctx);
2683     }
2684     tcg_r1 = load_gpr(ctx, a->r1);
2685     tcg_r2 = load_gpr(ctx, a->r2);
2686     do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, false, tcg_gen_xor_reg);
2687     return nullify_end(ctx);
2688 }
2689 
2690 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf *a, bool is_tc)
2691 {
2692     TCGv_reg tcg_r1, tcg_r2, tmp;
2693 
2694     if (a->cf) {
2695         nullify_over(ctx);
2696     }
2697     tcg_r1 = load_gpr(ctx, a->r1);
2698     tcg_r2 = load_gpr(ctx, a->r2);
2699     tmp = tcg_temp_new();
2700     tcg_gen_not_reg(tmp, tcg_r2);
2701     do_unit(ctx, a->t, tcg_r1, tmp, a->cf, is_tc, tcg_gen_add_reg);
2702     return nullify_end(ctx);
2703 }
2704 
2705 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf *a)
2706 {
2707     return do_uaddcm(ctx, a, false);
2708 }
2709 
2710 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf *a)
2711 {
2712     return do_uaddcm(ctx, a, true);
2713 }
2714 
2715 static bool do_dcor(DisasContext *ctx, arg_rr_cf *a, bool is_i)
2716 {
2717     TCGv_reg tmp;
2718 
2719     nullify_over(ctx);
2720 
2721     tmp = tcg_temp_new();
2722     tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
2723     if (!is_i) {
2724         tcg_gen_not_reg(tmp, tmp);
2725     }
2726     tcg_gen_andi_reg(tmp, tmp, 0x11111111);
2727     tcg_gen_muli_reg(tmp, tmp, 6);
2728     do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, false,
2729             is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
2730     return nullify_end(ctx);
2731 }
2732 
2733 static bool trans_dcor(DisasContext *ctx, arg_rr_cf *a)
2734 {
2735     return do_dcor(ctx, a, false);
2736 }
2737 
2738 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf *a)
2739 {
2740     return do_dcor(ctx, a, true);
2741 }
2742 
2743 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2744 {
2745     TCGv_reg dest, add1, add2, addc, zero, in1, in2;
2746     TCGv_reg cout;
2747 
2748     nullify_over(ctx);
2749 
2750     in1 = load_gpr(ctx, a->r1);
2751     in2 = load_gpr(ctx, a->r2);
2752 
2753     add1 = tcg_temp_new();
2754     add2 = tcg_temp_new();
2755     addc = tcg_temp_new();
2756     dest = tcg_temp_new();
2757     zero = tcg_constant_reg(0);
2758 
2759     /* Form R1 << 1 | PSW[CB]{8}.  */
2760     tcg_gen_add_reg(add1, in1, in1);
2761     tcg_gen_add_reg(add1, add1, get_psw_carry(ctx, false));
2762 
2763     /*
2764      * Add or subtract R2, depending on PSW[V].  Proper computation of
2765      * carry requires that we subtract via + ~R2 + 1, as described in
2766      * the manual.  By extracting and masking V, we can produce the
2767      * proper inputs to the addition without movcond.
2768      */
2769     tcg_gen_sextract_reg(addc, cpu_psw_v, 31, 1);
2770     tcg_gen_xor_reg(add2, in2, addc);
2771     tcg_gen_andi_reg(addc, addc, 1);
2772 
2773     tcg_gen_add2_reg(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2774     tcg_gen_add2_reg(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2775 
2776     /* Write back the result register.  */
2777     save_gpr(ctx, a->t, dest);
2778 
2779     /* Write back PSW[CB].  */
2780     tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
2781     tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
2782 
2783     /* Write back PSW[V] for the division step.  */
2784     cout = get_psw_carry(ctx, false);
2785     tcg_gen_neg_reg(cpu_psw_v, cout);
2786     tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
2787 
2788     /* Install the new nullification.  */
2789     if (a->cf) {
2790         TCGv_reg sv = NULL;
2791         if (cond_need_sv(a->cf >> 1)) {
2792             /* ??? The lshift is supposed to contribute to overflow.  */
2793             sv = do_add_sv(ctx, dest, add1, add2);
2794         }
2795         ctx->null_cond = do_cond(a->cf, dest, cout, sv);
2796     }
2797 
2798     return nullify_end(ctx);
2799 }
2800 
2801 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
2802 {
2803     return do_add_imm(ctx, a, false, false);
2804 }
2805 
2806 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2807 {
2808     return do_add_imm(ctx, a, true, false);
2809 }
2810 
2811 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
2812 {
2813     return do_add_imm(ctx, a, false, true);
2814 }
2815 
2816 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2817 {
2818     return do_add_imm(ctx, a, true, true);
2819 }
2820 
2821 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2822 {
2823     return do_sub_imm(ctx, a, false);
2824 }
2825 
2826 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2827 {
2828     return do_sub_imm(ctx, a, true);
2829 }
2830 
2831 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf *a)
2832 {
2833     TCGv_reg tcg_im, tcg_r2;
2834 
2835     if (a->cf) {
2836         nullify_over(ctx);
2837     }
2838 
2839     tcg_im = tcg_constant_reg(a->i);
2840     tcg_r2 = load_gpr(ctx, a->r);
2841     do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf);
2842 
2843     return nullify_end(ctx);
2844 }
2845 
2846 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
2847 {
2848     if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) {
2849         return gen_illegal(ctx);
2850     } else {
2851         return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
2852                    a->disp, a->sp, a->m, a->size | MO_TE);
2853     }
2854 }
2855 
2856 static bool trans_st(DisasContext *ctx, arg_ldst *a)
2857 {
2858     assert(a->x == 0 && a->scale == 0);
2859     if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) {
2860         return gen_illegal(ctx);
2861     } else {
2862         return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
2863     }
2864 }
2865 
2866 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
2867 {
2868     MemOp mop = MO_TE | MO_ALIGN | a->size;
2869     TCGv_reg zero, dest, ofs;
2870     TCGv_tl addr;
2871 
2872     nullify_over(ctx);
2873 
2874     if (a->m) {
2875         /* Base register modification.  Make sure if RT == RB,
2876            we see the result of the load.  */
2877         dest = tcg_temp_new();
2878     } else {
2879         dest = dest_gpr(ctx, a->t);
2880     }
2881 
2882     form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
2883              a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
2884 
2885     /*
2886      * For hppa1.1, LDCW is undefined unless aligned mod 16.
2887      * However actual hardware succeeds with aligned mod 4.
2888      * Detect this case and log a GUEST_ERROR.
2889      *
2890      * TODO: HPPA64 relaxes the over-alignment requirement
2891      * with the ,co completer.
2892      */
2893     gen_helper_ldc_check(addr);
2894 
2895     zero = tcg_constant_reg(0);
2896     tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
2897 
2898     if (a->m) {
2899         save_gpr(ctx, a->b, ofs);
2900     }
2901     save_gpr(ctx, a->t, dest);
2902 
2903     return nullify_end(ctx);
2904 }
2905 
2906 static bool trans_stby(DisasContext *ctx, arg_stby *a)
2907 {
2908     TCGv_reg ofs, val;
2909     TCGv_tl addr;
2910 
2911     nullify_over(ctx);
2912 
2913     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
2914              ctx->mmu_idx == MMU_PHYS_IDX);
2915     val = load_gpr(ctx, a->r);
2916     if (a->a) {
2917         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2918             gen_helper_stby_e_parallel(tcg_env, addr, val);
2919         } else {
2920             gen_helper_stby_e(tcg_env, addr, val);
2921         }
2922     } else {
2923         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2924             gen_helper_stby_b_parallel(tcg_env, addr, val);
2925         } else {
2926             gen_helper_stby_b(tcg_env, addr, val);
2927         }
2928     }
2929     if (a->m) {
2930         tcg_gen_andi_reg(ofs, ofs, ~3);
2931         save_gpr(ctx, a->b, ofs);
2932     }
2933 
2934     return nullify_end(ctx);
2935 }
2936 
2937 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
2938 {
2939     int hold_mmu_idx = ctx->mmu_idx;
2940 
2941     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2942     ctx->mmu_idx = MMU_PHYS_IDX;
2943     trans_ld(ctx, a);
2944     ctx->mmu_idx = hold_mmu_idx;
2945     return true;
2946 }
2947 
2948 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
2949 {
2950     int hold_mmu_idx = ctx->mmu_idx;
2951 
2952     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2953     ctx->mmu_idx = MMU_PHYS_IDX;
2954     trans_st(ctx, a);
2955     ctx->mmu_idx = hold_mmu_idx;
2956     return true;
2957 }
2958 
2959 static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
2960 {
2961     TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
2962 
2963     tcg_gen_movi_reg(tcg_rt, a->i);
2964     save_gpr(ctx, a->t, tcg_rt);
2965     cond_free(&ctx->null_cond);
2966     return true;
2967 }
2968 
2969 static bool trans_addil(DisasContext *ctx, arg_addil *a)
2970 {
2971     TCGv_reg tcg_rt = load_gpr(ctx, a->r);
2972     TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
2973 
2974     tcg_gen_addi_reg(tcg_r1, tcg_rt, a->i);
2975     save_gpr(ctx, 1, tcg_r1);
2976     cond_free(&ctx->null_cond);
2977     return true;
2978 }
2979 
2980 static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
2981 {
2982     TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
2983 
2984     /* Special case rb == 0, for the LDI pseudo-op.
2985        The COPY pseudo-op is handled for free within tcg_gen_addi_tl.  */
2986     if (a->b == 0) {
2987         tcg_gen_movi_reg(tcg_rt, a->i);
2988     } else {
2989         tcg_gen_addi_reg(tcg_rt, cpu_gr[a->b], a->i);
2990     }
2991     save_gpr(ctx, a->t, tcg_rt);
2992     cond_free(&ctx->null_cond);
2993     return true;
2994 }
2995 
2996 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_reg in1,
2997                     unsigned c, unsigned f, unsigned n, int disp)
2998 {
2999     TCGv_reg dest, in2, sv;
3000     DisasCond cond;
3001 
3002     in2 = load_gpr(ctx, r);
3003     dest = tcg_temp_new();
3004 
3005     tcg_gen_sub_reg(dest, in1, in2);
3006 
3007     sv = NULL;
3008     if (cond_need_sv(c)) {
3009         sv = do_sub_sv(ctx, dest, in1, in2);
3010     }
3011 
3012     cond = do_sub_cond(c * 2 + f, dest, in1, in2, sv);
3013     return do_cbranch(ctx, disp, n, &cond);
3014 }
3015 
3016 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
3017 {
3018     nullify_over(ctx);
3019     return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3020 }
3021 
3022 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3023 {
3024     nullify_over(ctx);
3025     return do_cmpb(ctx, a->r, tcg_constant_reg(a->i), a->c, a->f, a->n, a->disp);
3026 }
3027 
3028 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3029                     unsigned c, unsigned f, unsigned n, int disp)
3030 {
3031     TCGv_reg dest, in2, sv, cb_cond;
3032     DisasCond cond;
3033     bool d = false;
3034 
3035     in2 = load_gpr(ctx, r);
3036     dest = tcg_temp_new();
3037     sv = NULL;
3038     cb_cond = NULL;
3039 
3040     if (cond_need_cb(c)) {
3041         TCGv_reg cb = tcg_temp_new();
3042         TCGv_reg cb_msb = tcg_temp_new();
3043 
3044         tcg_gen_movi_reg(cb_msb, 0);
3045         tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3046         tcg_gen_xor_reg(cb, in1, in2);
3047         tcg_gen_xor_reg(cb, cb, dest);
3048         cb_cond = get_carry(ctx, d, cb, cb_msb);
3049     } else {
3050         tcg_gen_add_reg(dest, in1, in2);
3051     }
3052     if (cond_need_sv(c)) {
3053         sv = do_add_sv(ctx, dest, in1, in2);
3054     }
3055 
3056     cond = do_cond(c * 2 + f, dest, cb_cond, sv);
3057     save_gpr(ctx, r, dest);
3058     return do_cbranch(ctx, disp, n, &cond);
3059 }
3060 
3061 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3062 {
3063     nullify_over(ctx);
3064     return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3065 }
3066 
3067 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3068 {
3069     nullify_over(ctx);
3070     return do_addb(ctx, a->r, tcg_constant_reg(a->i), a->c, a->f, a->n, a->disp);
3071 }
3072 
3073 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3074 {
3075     TCGv_reg tmp, tcg_r;
3076     DisasCond cond;
3077     bool d = false;
3078 
3079     nullify_over(ctx);
3080 
3081     tmp = tcg_temp_new();
3082     tcg_r = load_gpr(ctx, a->r);
3083     if (cond_need_ext(ctx, d)) {
3084         /* Force shift into [32,63] */
3085         tcg_gen_ori_reg(tmp, cpu_sar, 32);
3086         tcg_gen_shl_reg(tmp, tcg_r, tmp);
3087     } else {
3088         tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
3089     }
3090 
3091     cond = cond_make_0_tmp(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3092     return do_cbranch(ctx, a->disp, a->n, &cond);
3093 }
3094 
3095 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3096 {
3097     TCGv_reg tmp, tcg_r;
3098     DisasCond cond;
3099     bool d = false;
3100     int p;
3101 
3102     nullify_over(ctx);
3103 
3104     tmp = tcg_temp_new();
3105     tcg_r = load_gpr(ctx, a->r);
3106     p = a->p | (cond_need_ext(ctx, d) ? 32 : 0);
3107     tcg_gen_shli_reg(tmp, tcg_r, p);
3108 
3109     cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3110     return do_cbranch(ctx, a->disp, a->n, &cond);
3111 }
3112 
3113 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3114 {
3115     TCGv_reg dest;
3116     DisasCond cond;
3117 
3118     nullify_over(ctx);
3119 
3120     dest = dest_gpr(ctx, a->r2);
3121     if (a->r1 == 0) {
3122         tcg_gen_movi_reg(dest, 0);
3123     } else {
3124         tcg_gen_mov_reg(dest, cpu_gr[a->r1]);
3125     }
3126 
3127     cond = do_sed_cond(a->c, dest);
3128     return do_cbranch(ctx, a->disp, a->n, &cond);
3129 }
3130 
3131 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3132 {
3133     TCGv_reg dest;
3134     DisasCond cond;
3135 
3136     nullify_over(ctx);
3137 
3138     dest = dest_gpr(ctx, a->r);
3139     tcg_gen_movi_reg(dest, a->i);
3140 
3141     cond = do_sed_cond(a->c, dest);
3142     return do_cbranch(ctx, a->disp, a->n, &cond);
3143 }
3144 
3145 static bool trans_shrpw_sar(DisasContext *ctx, arg_shrpw_sar *a)
3146 {
3147     TCGv_reg dest;
3148 
3149     if (a->c) {
3150         nullify_over(ctx);
3151     }
3152 
3153     dest = dest_gpr(ctx, a->t);
3154     if (a->r1 == 0) {
3155         tcg_gen_ext32u_reg(dest, load_gpr(ctx, a->r2));
3156         tcg_gen_shr_reg(dest, dest, cpu_sar);
3157     } else if (a->r1 == a->r2) {
3158         TCGv_i32 t32 = tcg_temp_new_i32();
3159         TCGv_i32 s32 = tcg_temp_new_i32();
3160 
3161         tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, a->r2));
3162         tcg_gen_trunc_reg_i32(s32, cpu_sar);
3163         tcg_gen_rotr_i32(t32, t32, s32);
3164         tcg_gen_extu_i32_reg(dest, t32);
3165     } else {
3166         TCGv_i64 t = tcg_temp_new_i64();
3167         TCGv_i64 s = tcg_temp_new_i64();
3168 
3169         tcg_gen_concat_reg_i64(t, load_gpr(ctx, a->r2), load_gpr(ctx, a->r1));
3170         tcg_gen_extu_reg_i64(s, cpu_sar);
3171         tcg_gen_shr_i64(t, t, s);
3172         tcg_gen_trunc_i64_reg(dest, t);
3173     }
3174     save_gpr(ctx, a->t, dest);
3175 
3176     /* Install the new nullification.  */
3177     cond_free(&ctx->null_cond);
3178     if (a->c) {
3179         ctx->null_cond = do_sed_cond(a->c, dest);
3180     }
3181     return nullify_end(ctx);
3182 }
3183 
3184 static bool trans_shrpw_imm(DisasContext *ctx, arg_shrpw_imm *a)
3185 {
3186     unsigned sa = 31 - a->cpos;
3187     TCGv_reg dest, t2;
3188 
3189     if (a->c) {
3190         nullify_over(ctx);
3191     }
3192 
3193     dest = dest_gpr(ctx, a->t);
3194     t2 = load_gpr(ctx, a->r2);
3195     if (a->r1 == 0) {
3196         tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
3197     } else if (TARGET_REGISTER_BITS == 32) {
3198         tcg_gen_extract2_reg(dest, t2, cpu_gr[a->r1], sa);
3199     } else if (a->r1 == a->r2) {
3200         TCGv_i32 t32 = tcg_temp_new_i32();
3201         tcg_gen_trunc_reg_i32(t32, t2);
3202         tcg_gen_rotri_i32(t32, t32, sa);
3203         tcg_gen_extu_i32_reg(dest, t32);
3204     } else {
3205         TCGv_i64 t64 = tcg_temp_new_i64();
3206         tcg_gen_concat_reg_i64(t64, t2, cpu_gr[a->r1]);
3207         tcg_gen_shri_i64(t64, t64, sa);
3208         tcg_gen_trunc_i64_reg(dest, t64);
3209     }
3210     save_gpr(ctx, a->t, dest);
3211 
3212     /* Install the new nullification.  */
3213     cond_free(&ctx->null_cond);
3214     if (a->c) {
3215         ctx->null_cond = do_sed_cond(a->c, dest);
3216     }
3217     return nullify_end(ctx);
3218 }
3219 
3220 static bool trans_extrw_sar(DisasContext *ctx, arg_extrw_sar *a)
3221 {
3222     unsigned len = 32 - a->clen;
3223     TCGv_reg dest, src, tmp;
3224 
3225     if (a->c) {
3226         nullify_over(ctx);
3227     }
3228 
3229     dest = dest_gpr(ctx, a->t);
3230     src = load_gpr(ctx, a->r);
3231     tmp = tcg_temp_new();
3232 
3233     /* Recall that SAR is using big-endian bit numbering.  */
3234     tcg_gen_andi_reg(tmp, cpu_sar, 31);
3235     tcg_gen_xori_reg(tmp, tmp, 31);
3236 
3237     if (a->se) {
3238         tcg_gen_sar_reg(dest, src, tmp);
3239         tcg_gen_sextract_reg(dest, dest, 0, len);
3240     } else {
3241         tcg_gen_shr_reg(dest, src, tmp);
3242         tcg_gen_extract_reg(dest, dest, 0, len);
3243     }
3244     save_gpr(ctx, a->t, dest);
3245 
3246     /* Install the new nullification.  */
3247     cond_free(&ctx->null_cond);
3248     if (a->c) {
3249         ctx->null_cond = do_sed_cond(a->c, dest);
3250     }
3251     return nullify_end(ctx);
3252 }
3253 
3254 static bool trans_extrw_imm(DisasContext *ctx, arg_extrw_imm *a)
3255 {
3256     unsigned len = 32 - a->clen;
3257     unsigned cpos = 31 - a->pos;
3258     TCGv_reg dest, src;
3259 
3260     if (a->c) {
3261         nullify_over(ctx);
3262     }
3263 
3264     dest = dest_gpr(ctx, a->t);
3265     src = load_gpr(ctx, a->r);
3266     if (a->se) {
3267         tcg_gen_sextract_reg(dest, src, cpos, len);
3268     } else {
3269         tcg_gen_extract_reg(dest, src, cpos, len);
3270     }
3271     save_gpr(ctx, a->t, dest);
3272 
3273     /* Install the new nullification.  */
3274     cond_free(&ctx->null_cond);
3275     if (a->c) {
3276         ctx->null_cond = do_sed_cond(a->c, dest);
3277     }
3278     return nullify_end(ctx);
3279 }
3280 
3281 static bool trans_depwi_imm(DisasContext *ctx, arg_depwi_imm *a)
3282 {
3283     unsigned len = 32 - a->clen;
3284     target_sreg mask0, mask1;
3285     TCGv_reg dest;
3286 
3287     if (a->c) {
3288         nullify_over(ctx);
3289     }
3290     if (a->cpos + len > 32) {
3291         len = 32 - a->cpos;
3292     }
3293 
3294     dest = dest_gpr(ctx, a->t);
3295     mask0 = deposit64(0, a->cpos, len, a->i);
3296     mask1 = deposit64(-1, a->cpos, len, a->i);
3297 
3298     if (a->nz) {
3299         TCGv_reg src = load_gpr(ctx, a->t);
3300         if (mask1 != -1) {
3301             tcg_gen_andi_reg(dest, src, mask1);
3302             src = dest;
3303         }
3304         tcg_gen_ori_reg(dest, src, mask0);
3305     } else {
3306         tcg_gen_movi_reg(dest, mask0);
3307     }
3308     save_gpr(ctx, a->t, dest);
3309 
3310     /* Install the new nullification.  */
3311     cond_free(&ctx->null_cond);
3312     if (a->c) {
3313         ctx->null_cond = do_sed_cond(a->c, dest);
3314     }
3315     return nullify_end(ctx);
3316 }
3317 
3318 static bool trans_depw_imm(DisasContext *ctx, arg_depw_imm *a)
3319 {
3320     unsigned rs = a->nz ? a->t : 0;
3321     unsigned len = 32 - a->clen;
3322     TCGv_reg dest, val;
3323 
3324     if (a->c) {
3325         nullify_over(ctx);
3326     }
3327     if (a->cpos + len > 32) {
3328         len = 32 - a->cpos;
3329     }
3330 
3331     dest = dest_gpr(ctx, a->t);
3332     val = load_gpr(ctx, a->r);
3333     if (rs == 0) {
3334         tcg_gen_deposit_z_reg(dest, val, a->cpos, len);
3335     } else {
3336         tcg_gen_deposit_reg(dest, cpu_gr[rs], val, a->cpos, len);
3337     }
3338     save_gpr(ctx, a->t, dest);
3339 
3340     /* Install the new nullification.  */
3341     cond_free(&ctx->null_cond);
3342     if (a->c) {
3343         ctx->null_cond = do_sed_cond(a->c, dest);
3344     }
3345     return nullify_end(ctx);
3346 }
3347 
3348 static bool do_depw_sar(DisasContext *ctx, unsigned rt, unsigned c,
3349                         unsigned nz, unsigned clen, TCGv_reg val)
3350 {
3351     unsigned rs = nz ? rt : 0;
3352     unsigned len = 32 - clen;
3353     TCGv_reg mask, tmp, shift, dest;
3354     unsigned msb = 1U << (len - 1);
3355 
3356     dest = dest_gpr(ctx, rt);
3357     shift = tcg_temp_new();
3358     tmp = tcg_temp_new();
3359 
3360     /* Convert big-endian bit numbering in SAR to left-shift.  */
3361     tcg_gen_andi_reg(shift, cpu_sar, 31);
3362     tcg_gen_xori_reg(shift, shift, 31);
3363 
3364     mask = tcg_temp_new();
3365     tcg_gen_movi_reg(mask, msb + (msb - 1));
3366     tcg_gen_and_reg(tmp, val, mask);
3367     if (rs) {
3368         tcg_gen_shl_reg(mask, mask, shift);
3369         tcg_gen_shl_reg(tmp, tmp, shift);
3370         tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
3371         tcg_gen_or_reg(dest, dest, tmp);
3372     } else {
3373         tcg_gen_shl_reg(dest, tmp, shift);
3374     }
3375     save_gpr(ctx, rt, dest);
3376 
3377     /* Install the new nullification.  */
3378     cond_free(&ctx->null_cond);
3379     if (c) {
3380         ctx->null_cond = do_sed_cond(c, dest);
3381     }
3382     return nullify_end(ctx);
3383 }
3384 
3385 static bool trans_depw_sar(DisasContext *ctx, arg_depw_sar *a)
3386 {
3387     if (a->c) {
3388         nullify_over(ctx);
3389     }
3390     return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_gpr(ctx, a->r));
3391 }
3392 
3393 static bool trans_depwi_sar(DisasContext *ctx, arg_depwi_sar *a)
3394 {
3395     if (a->c) {
3396         nullify_over(ctx);
3397     }
3398     return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, tcg_constant_reg(a->i));
3399 }
3400 
3401 static bool trans_be(DisasContext *ctx, arg_be *a)
3402 {
3403     TCGv_reg tmp;
3404 
3405 #ifdef CONFIG_USER_ONLY
3406     /* ??? It seems like there should be a good way of using
3407        "be disp(sr2, r0)", the canonical gateway entry mechanism
3408        to our advantage.  But that appears to be inconvenient to
3409        manage along side branch delay slots.  Therefore we handle
3410        entry into the gateway page via absolute address.  */
3411     /* Since we don't implement spaces, just branch.  Do notice the special
3412        case of "be disp(*,r0)" using a direct branch to disp, so that we can
3413        goto_tb to the TB containing the syscall.  */
3414     if (a->b == 0) {
3415         return do_dbranch(ctx, a->disp, a->l, a->n);
3416     }
3417 #else
3418     nullify_over(ctx);
3419 #endif
3420 
3421     tmp = tcg_temp_new();
3422     tcg_gen_addi_reg(tmp, load_gpr(ctx, a->b), a->disp);
3423     tmp = do_ibranch_priv(ctx, tmp);
3424 
3425 #ifdef CONFIG_USER_ONLY
3426     return do_ibranch(ctx, tmp, a->l, a->n);
3427 #else
3428     TCGv_i64 new_spc = tcg_temp_new_i64();
3429 
3430     load_spr(ctx, new_spc, a->sp);
3431     if (a->l) {
3432         copy_iaoq_entry(cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3433         tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3434     }
3435     if (a->n && use_nullify_skip(ctx)) {
3436         tcg_gen_mov_reg(cpu_iaoq_f, tmp);
3437         tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
3438         tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3439         tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3440     } else {
3441         copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3442         if (ctx->iaoq_b == -1) {
3443             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3444         }
3445         tcg_gen_mov_reg(cpu_iaoq_b, tmp);
3446         tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3447         nullify_set(ctx, a->n);
3448     }
3449     tcg_gen_lookup_and_goto_ptr();
3450     ctx->base.is_jmp = DISAS_NORETURN;
3451     return nullify_end(ctx);
3452 #endif
3453 }
3454 
3455 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3456 {
3457     return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
3458 }
3459 
3460 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3461 {
3462     target_ureg dest = iaoq_dest(ctx, a->disp);
3463 
3464     nullify_over(ctx);
3465 
3466     /* Make sure the caller hasn't done something weird with the queue.
3467      * ??? This is not quite the same as the PSW[B] bit, which would be
3468      * expensive to track.  Real hardware will trap for
3469      *    b  gateway
3470      *    b  gateway+4  (in delay slot of first branch)
3471      * However, checking for a non-sequential instruction queue *will*
3472      * diagnose the security hole
3473      *    b  gateway
3474      *    b  evil
3475      * in which instructions at evil would run with increased privs.
3476      */
3477     if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3478         return gen_illegal(ctx);
3479     }
3480 
3481 #ifndef CONFIG_USER_ONLY
3482     if (ctx->tb_flags & PSW_C) {
3483         CPUHPPAState *env = cpu_env(ctx->cs);
3484         int type = hppa_artype_for_page(env, ctx->base.pc_next);
3485         /* If we could not find a TLB entry, then we need to generate an
3486            ITLB miss exception so the kernel will provide it.
3487            The resulting TLB fill operation will invalidate this TB and
3488            we will re-translate, at which point we *will* be able to find
3489            the TLB entry and determine if this is in fact a gateway page.  */
3490         if (type < 0) {
3491             gen_excp(ctx, EXCP_ITLB_MISS);
3492             return true;
3493         }
3494         /* No change for non-gateway pages or for priv decrease.  */
3495         if (type >= 4 && type - 4 < ctx->privilege) {
3496             dest = deposit32(dest, 0, 2, type - 4);
3497         }
3498     } else {
3499         dest &= -4;  /* priv = 0 */
3500     }
3501 #endif
3502 
3503     if (a->l) {
3504         TCGv_reg tmp = dest_gpr(ctx, a->l);
3505         if (ctx->privilege < 3) {
3506             tcg_gen_andi_reg(tmp, tmp, -4);
3507         }
3508         tcg_gen_ori_reg(tmp, tmp, ctx->privilege);
3509         save_gpr(ctx, a->l, tmp);
3510     }
3511 
3512     return do_dbranch(ctx, dest, 0, a->n);
3513 }
3514 
3515 static bool trans_blr(DisasContext *ctx, arg_blr *a)
3516 {
3517     if (a->x) {
3518         TCGv_reg tmp = tcg_temp_new();
3519         tcg_gen_shli_reg(tmp, load_gpr(ctx, a->x), 3);
3520         tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
3521         /* The computation here never changes privilege level.  */
3522         return do_ibranch(ctx, tmp, a->l, a->n);
3523     } else {
3524         /* BLR R0,RX is a good way to load PC+8 into RX.  */
3525         return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3526     }
3527 }
3528 
3529 static bool trans_bv(DisasContext *ctx, arg_bv *a)
3530 {
3531     TCGv_reg dest;
3532 
3533     if (a->x == 0) {
3534         dest = load_gpr(ctx, a->b);
3535     } else {
3536         dest = tcg_temp_new();
3537         tcg_gen_shli_reg(dest, load_gpr(ctx, a->x), 3);
3538         tcg_gen_add_reg(dest, dest, load_gpr(ctx, a->b));
3539     }
3540     dest = do_ibranch_priv(ctx, dest);
3541     return do_ibranch(ctx, dest, 0, a->n);
3542 }
3543 
3544 static bool trans_bve(DisasContext *ctx, arg_bve *a)
3545 {
3546     TCGv_reg dest;
3547 
3548 #ifdef CONFIG_USER_ONLY
3549     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3550     return do_ibranch(ctx, dest, a->l, a->n);
3551 #else
3552     nullify_over(ctx);
3553     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3554 
3555     copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3556     if (ctx->iaoq_b == -1) {
3557         tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3558     }
3559     copy_iaoq_entry(cpu_iaoq_b, -1, dest);
3560     tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3561     if (a->l) {
3562         copy_iaoq_entry(cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
3563     }
3564     nullify_set(ctx, a->n);
3565     tcg_gen_lookup_and_goto_ptr();
3566     ctx->base.is_jmp = DISAS_NORETURN;
3567     return nullify_end(ctx);
3568 #endif
3569 }
3570 
3571 /*
3572  * Float class 0
3573  */
3574 
3575 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3576 {
3577     tcg_gen_mov_i32(dst, src);
3578 }
3579 
3580 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
3581 {
3582     uint64_t ret;
3583 
3584     if (TARGET_REGISTER_BITS == 64) {
3585         ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
3586     } else {
3587         ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
3588     }
3589 
3590     nullify_over(ctx);
3591     save_frd(0, tcg_constant_i64(ret));
3592     return nullify_end(ctx);
3593 }
3594 
3595 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
3596 {
3597     return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
3598 }
3599 
3600 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3601 {
3602     tcg_gen_mov_i64(dst, src);
3603 }
3604 
3605 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
3606 {
3607     return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
3608 }
3609 
3610 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3611 {
3612     tcg_gen_andi_i32(dst, src, INT32_MAX);
3613 }
3614 
3615 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
3616 {
3617     return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
3618 }
3619 
3620 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3621 {
3622     tcg_gen_andi_i64(dst, src, INT64_MAX);
3623 }
3624 
3625 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
3626 {
3627     return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
3628 }
3629 
3630 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
3631 {
3632     return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
3633 }
3634 
3635 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
3636 {
3637     return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
3638 }
3639 
3640 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
3641 {
3642     return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
3643 }
3644 
3645 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
3646 {
3647     return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
3648 }
3649 
3650 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3651 {
3652     tcg_gen_xori_i32(dst, src, INT32_MIN);
3653 }
3654 
3655 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
3656 {
3657     return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
3658 }
3659 
3660 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3661 {
3662     tcg_gen_xori_i64(dst, src, INT64_MIN);
3663 }
3664 
3665 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3666 {
3667     return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3668 }
3669 
3670 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3671 {
3672     tcg_gen_ori_i32(dst, src, INT32_MIN);
3673 }
3674 
3675 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
3676 {
3677     return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
3678 }
3679 
3680 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3681 {
3682     tcg_gen_ori_i64(dst, src, INT64_MIN);
3683 }
3684 
3685 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
3686 {
3687     return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
3688 }
3689 
3690 /*
3691  * Float class 1
3692  */
3693 
3694 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
3695 {
3696     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
3697 }
3698 
3699 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
3700 {
3701     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
3702 }
3703 
3704 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
3705 {
3706     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
3707 }
3708 
3709 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
3710 {
3711     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
3712 }
3713 
3714 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
3715 {
3716     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
3717 }
3718 
3719 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
3720 {
3721     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
3722 }
3723 
3724 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
3725 {
3726     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
3727 }
3728 
3729 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
3730 {
3731     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
3732 }
3733 
3734 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
3735 {
3736     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
3737 }
3738 
3739 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
3740 {
3741     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
3742 }
3743 
3744 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
3745 {
3746     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
3747 }
3748 
3749 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
3750 {
3751     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
3752 }
3753 
3754 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
3755 {
3756     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
3757 }
3758 
3759 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
3760 {
3761     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
3762 }
3763 
3764 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
3765 {
3766     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
3767 }
3768 
3769 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
3770 {
3771     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
3772 }
3773 
3774 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
3775 {
3776     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
3777 }
3778 
3779 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
3780 {
3781     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
3782 }
3783 
3784 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
3785 {
3786     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
3787 }
3788 
3789 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
3790 {
3791     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
3792 }
3793 
3794 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
3795 {
3796     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
3797 }
3798 
3799 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
3800 {
3801     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
3802 }
3803 
3804 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
3805 {
3806     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
3807 }
3808 
3809 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
3810 {
3811     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
3812 }
3813 
3814 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
3815 {
3816     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
3817 }
3818 
3819 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
3820 {
3821     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
3822 }
3823 
3824 /*
3825  * Float class 2
3826  */
3827 
3828 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
3829 {
3830     TCGv_i32 ta, tb, tc, ty;
3831 
3832     nullify_over(ctx);
3833 
3834     ta = load_frw0_i32(a->r1);
3835     tb = load_frw0_i32(a->r2);
3836     ty = tcg_constant_i32(a->y);
3837     tc = tcg_constant_i32(a->c);
3838 
3839     gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc);
3840 
3841     return nullify_end(ctx);
3842 }
3843 
3844 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
3845 {
3846     TCGv_i64 ta, tb;
3847     TCGv_i32 tc, ty;
3848 
3849     nullify_over(ctx);
3850 
3851     ta = load_frd0(a->r1);
3852     tb = load_frd0(a->r2);
3853     ty = tcg_constant_i32(a->y);
3854     tc = tcg_constant_i32(a->c);
3855 
3856     gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc);
3857 
3858     return nullify_end(ctx);
3859 }
3860 
3861 static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
3862 {
3863     TCGv_reg t;
3864 
3865     nullify_over(ctx);
3866 
3867     t = tcg_temp_new();
3868     tcg_gen_ld32u_reg(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow));
3869 
3870     if (a->y == 1) {
3871         int mask;
3872         bool inv = false;
3873 
3874         switch (a->c) {
3875         case 0: /* simple */
3876             tcg_gen_andi_reg(t, t, 0x4000000);
3877             ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3878             goto done;
3879         case 2: /* rej */
3880             inv = true;
3881             /* fallthru */
3882         case 1: /* acc */
3883             mask = 0x43ff800;
3884             break;
3885         case 6: /* rej8 */
3886             inv = true;
3887             /* fallthru */
3888         case 5: /* acc8 */
3889             mask = 0x43f8000;
3890             break;
3891         case 9: /* acc6 */
3892             mask = 0x43e0000;
3893             break;
3894         case 13: /* acc4 */
3895             mask = 0x4380000;
3896             break;
3897         case 17: /* acc2 */
3898             mask = 0x4200000;
3899             break;
3900         default:
3901             gen_illegal(ctx);
3902             return true;
3903         }
3904         if (inv) {
3905             TCGv_reg c = tcg_constant_reg(mask);
3906             tcg_gen_or_reg(t, t, c);
3907             ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
3908         } else {
3909             tcg_gen_andi_reg(t, t, mask);
3910             ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
3911         }
3912     } else {
3913         unsigned cbit = (a->y ^ 1) - 1;
3914 
3915         tcg_gen_extract_reg(t, t, 21 - cbit, 1);
3916         ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3917     }
3918 
3919  done:
3920     return nullify_end(ctx);
3921 }
3922 
3923 /*
3924  * Float class 2
3925  */
3926 
3927 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
3928 {
3929     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
3930 }
3931 
3932 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
3933 {
3934     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
3935 }
3936 
3937 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
3938 {
3939     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
3940 }
3941 
3942 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
3943 {
3944     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
3945 }
3946 
3947 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
3948 {
3949     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
3950 }
3951 
3952 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
3953 {
3954     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
3955 }
3956 
3957 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
3958 {
3959     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
3960 }
3961 
3962 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
3963 {
3964     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
3965 }
3966 
3967 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
3968 {
3969     TCGv_i64 x, y;
3970 
3971     nullify_over(ctx);
3972 
3973     x = load_frw0_i64(a->r1);
3974     y = load_frw0_i64(a->r2);
3975     tcg_gen_mul_i64(x, x, y);
3976     save_frd(a->t, x);
3977 
3978     return nullify_end(ctx);
3979 }
3980 
3981 /* Convert the fmpyadd single-precision register encodings to standard.  */
3982 static inline int fmpyadd_s_reg(unsigned r)
3983 {
3984     return (r & 16) * 2 + 16 + (r & 15);
3985 }
3986 
3987 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
3988 {
3989     int tm = fmpyadd_s_reg(a->tm);
3990     int ra = fmpyadd_s_reg(a->ra);
3991     int ta = fmpyadd_s_reg(a->ta);
3992     int rm2 = fmpyadd_s_reg(a->rm2);
3993     int rm1 = fmpyadd_s_reg(a->rm1);
3994 
3995     nullify_over(ctx);
3996 
3997     do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
3998     do_fop_weww(ctx, ta, ta, ra,
3999                 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4000 
4001     return nullify_end(ctx);
4002 }
4003 
4004 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
4005 {
4006     return do_fmpyadd_s(ctx, a, false);
4007 }
4008 
4009 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4010 {
4011     return do_fmpyadd_s(ctx, a, true);
4012 }
4013 
4014 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4015 {
4016     nullify_over(ctx);
4017 
4018     do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4019     do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4020                 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4021 
4022     return nullify_end(ctx);
4023 }
4024 
4025 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4026 {
4027     return do_fmpyadd_d(ctx, a, false);
4028 }
4029 
4030 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4031 {
4032     return do_fmpyadd_d(ctx, a, true);
4033 }
4034 
4035 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
4036 {
4037     TCGv_i32 x, y, z;
4038 
4039     nullify_over(ctx);
4040     x = load_frw0_i32(a->rm1);
4041     y = load_frw0_i32(a->rm2);
4042     z = load_frw0_i32(a->ra3);
4043 
4044     if (a->neg) {
4045         gen_helper_fmpynfadd_s(x, tcg_env, x, y, z);
4046     } else {
4047         gen_helper_fmpyfadd_s(x, tcg_env, x, y, z);
4048     }
4049 
4050     save_frw_i32(a->t, x);
4051     return nullify_end(ctx);
4052 }
4053 
4054 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4055 {
4056     TCGv_i64 x, y, z;
4057 
4058     nullify_over(ctx);
4059     x = load_frd0(a->rm1);
4060     y = load_frd0(a->rm2);
4061     z = load_frd0(a->ra3);
4062 
4063     if (a->neg) {
4064         gen_helper_fmpynfadd_d(x, tcg_env, x, y, z);
4065     } else {
4066         gen_helper_fmpyfadd_d(x, tcg_env, x, y, z);
4067     }
4068 
4069     save_frd(a->t, x);
4070     return nullify_end(ctx);
4071 }
4072 
4073 static bool trans_diag(DisasContext *ctx, arg_diag *a)
4074 {
4075     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4076 #ifndef CONFIG_USER_ONLY
4077     if (a->i == 0x100) {
4078         /* emulate PDC BTLB, called by SeaBIOS-hppa */
4079         nullify_over(ctx);
4080         gen_helper_diag_btlb(tcg_env);
4081         return nullify_end(ctx);
4082     }
4083 #endif
4084     qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i);
4085     return true;
4086 }
4087 
4088 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4089 {
4090     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4091     int bound;
4092 
4093     ctx->cs = cs;
4094     ctx->tb_flags = ctx->base.tb->flags;
4095     ctx->is_pa20 = hppa_is_pa20(cpu_env(cs));
4096 
4097 #ifdef CONFIG_USER_ONLY
4098     ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX);
4099     ctx->mmu_idx = MMU_USER_IDX;
4100     ctx->iaoq_f = ctx->base.pc_first | ctx->privilege;
4101     ctx->iaoq_b = ctx->base.tb->cs_base | ctx->privilege;
4102     ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
4103 #else
4104     ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4105     ctx->mmu_idx = (ctx->tb_flags & PSW_D
4106                     ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
4107                     : MMU_PHYS_IDX);
4108 
4109     /* Recover the IAOQ values from the GVA + PRIV.  */
4110     uint64_t cs_base = ctx->base.tb->cs_base;
4111     uint64_t iasq_f = cs_base & ~0xffffffffull;
4112     int32_t diff = cs_base;
4113 
4114     ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4115     ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4116 #endif
4117     ctx->iaoq_n = -1;
4118     ctx->iaoq_n_var = NULL;
4119 
4120     /* Bound the number of instructions by those left on the page.  */
4121     bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4122     ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4123 }
4124 
4125 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4126 {
4127     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4128 
4129     /* Seed the nullification status from PSW[N], as saved in TB->FLAGS.  */
4130     ctx->null_cond = cond_make_f();
4131     ctx->psw_n_nonzero = false;
4132     if (ctx->tb_flags & PSW_N) {
4133         ctx->null_cond.c = TCG_COND_ALWAYS;
4134         ctx->psw_n_nonzero = true;
4135     }
4136     ctx->null_lab = NULL;
4137 }
4138 
4139 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4140 {
4141     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4142 
4143     tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4144 }
4145 
4146 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4147 {
4148     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4149     CPUHPPAState *env = cpu_env(cs);
4150     DisasJumpType ret;
4151 
4152     /* Execute one insn.  */
4153 #ifdef CONFIG_USER_ONLY
4154     if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4155         do_page_zero(ctx);
4156         ret = ctx->base.is_jmp;
4157         assert(ret != DISAS_NEXT);
4158     } else
4159 #endif
4160     {
4161         /* Always fetch the insn, even if nullified, so that we check
4162            the page permissions for execute.  */
4163         uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
4164 
4165         /* Set up the IA queue for the next insn.
4166            This will be overwritten by a branch.  */
4167         if (ctx->iaoq_b == -1) {
4168             ctx->iaoq_n = -1;
4169             ctx->iaoq_n_var = tcg_temp_new();
4170             tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4171         } else {
4172             ctx->iaoq_n = ctx->iaoq_b + 4;
4173             ctx->iaoq_n_var = NULL;
4174         }
4175 
4176         if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4177             ctx->null_cond.c = TCG_COND_NEVER;
4178             ret = DISAS_NEXT;
4179         } else {
4180             ctx->insn = insn;
4181             if (!decode(ctx, insn)) {
4182                 gen_illegal(ctx);
4183             }
4184             ret = ctx->base.is_jmp;
4185             assert(ctx->null_lab == NULL);
4186         }
4187     }
4188 
4189     /* Advance the insn queue.  Note that this check also detects
4190        a priority change within the instruction queue.  */
4191     if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4192         if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4193             && use_goto_tb(ctx, ctx->iaoq_b)
4194             && (ctx->null_cond.c == TCG_COND_NEVER
4195                 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4196             nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4197             gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4198             ctx->base.is_jmp = ret = DISAS_NORETURN;
4199         } else {
4200             ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4201         }
4202     }
4203     ctx->iaoq_f = ctx->iaoq_b;
4204     ctx->iaoq_b = ctx->iaoq_n;
4205     ctx->base.pc_next += 4;
4206 
4207     switch (ret) {
4208     case DISAS_NORETURN:
4209     case DISAS_IAQ_N_UPDATED:
4210         break;
4211 
4212     case DISAS_NEXT:
4213     case DISAS_IAQ_N_STALE:
4214     case DISAS_IAQ_N_STALE_EXIT:
4215         if (ctx->iaoq_f == -1) {
4216             tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b);
4217             copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4218 #ifndef CONFIG_USER_ONLY
4219             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4220 #endif
4221             nullify_save(ctx);
4222             ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4223                                 ? DISAS_EXIT
4224                                 : DISAS_IAQ_N_UPDATED);
4225         } else if (ctx->iaoq_b == -1) {
4226             tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var);
4227         }
4228         break;
4229 
4230     default:
4231         g_assert_not_reached();
4232     }
4233 }
4234 
4235 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4236 {
4237     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4238     DisasJumpType is_jmp = ctx->base.is_jmp;
4239 
4240     switch (is_jmp) {
4241     case DISAS_NORETURN:
4242         break;
4243     case DISAS_TOO_MANY:
4244     case DISAS_IAQ_N_STALE:
4245     case DISAS_IAQ_N_STALE_EXIT:
4246         copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4247         copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4248         nullify_save(ctx);
4249         /* FALLTHRU */
4250     case DISAS_IAQ_N_UPDATED:
4251         if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
4252             tcg_gen_lookup_and_goto_ptr();
4253             break;
4254         }
4255         /* FALLTHRU */
4256     case DISAS_EXIT:
4257         tcg_gen_exit_tb(NULL, 0);
4258         break;
4259     default:
4260         g_assert_not_reached();
4261     }
4262 }
4263 
4264 static void hppa_tr_disas_log(const DisasContextBase *dcbase,
4265                               CPUState *cs, FILE *logfile)
4266 {
4267     target_ulong pc = dcbase->pc_first;
4268 
4269 #ifdef CONFIG_USER_ONLY
4270     switch (pc) {
4271     case 0x00:
4272         fprintf(logfile, "IN:\n0x00000000:  (null)\n");
4273         return;
4274     case 0xb0:
4275         fprintf(logfile, "IN:\n0x000000b0:  light-weight-syscall\n");
4276         return;
4277     case 0xe0:
4278         fprintf(logfile, "IN:\n0x000000e0:  set-thread-pointer-syscall\n");
4279         return;
4280     case 0x100:
4281         fprintf(logfile, "IN:\n0x00000100:  syscall\n");
4282         return;
4283     }
4284 #endif
4285 
4286     fprintf(logfile, "IN: %s\n", lookup_symbol(pc));
4287     target_disas(logfile, cs, pc, dcbase->tb->size);
4288 }
4289 
4290 static const TranslatorOps hppa_tr_ops = {
4291     .init_disas_context = hppa_tr_init_disas_context,
4292     .tb_start           = hppa_tr_tb_start,
4293     .insn_start         = hppa_tr_insn_start,
4294     .translate_insn     = hppa_tr_translate_insn,
4295     .tb_stop            = hppa_tr_tb_stop,
4296     .disas_log          = hppa_tr_disas_log,
4297 };
4298 
4299 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
4300                            target_ulong pc, void *host_pc)
4301 {
4302     DisasContext ctx;
4303     translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
4304 }
4305