xref: /qemu/target/hppa/translate.c (revision f9d07071)
1 /*
2  * HPPA emulation cpu translation for qemu.
3  *
4  * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/log.h"
31 
32 /* Since we have a distinction between register size and address size,
33    we need to redefine all of these.  */
34 
35 #undef TCGv
36 #undef tcg_temp_new
37 #undef tcg_global_mem_new
38 #undef tcg_temp_local_new
39 #undef tcg_temp_free
40 
41 #if TARGET_LONG_BITS == 64
42 #define TCGv_tl              TCGv_i64
43 #define tcg_temp_new_tl      tcg_temp_new_i64
44 #define tcg_temp_free_tl     tcg_temp_free_i64
45 #if TARGET_REGISTER_BITS == 64
46 #define tcg_gen_extu_reg_tl  tcg_gen_mov_i64
47 #else
48 #define tcg_gen_extu_reg_tl  tcg_gen_extu_i32_i64
49 #endif
50 #else
51 #define TCGv_tl              TCGv_i32
52 #define tcg_temp_new_tl      tcg_temp_new_i32
53 #define tcg_temp_free_tl     tcg_temp_free_i32
54 #define tcg_gen_extu_reg_tl  tcg_gen_mov_i32
55 #endif
56 
57 #if TARGET_REGISTER_BITS == 64
58 #define TCGv_reg             TCGv_i64
59 
60 #define tcg_temp_new         tcg_temp_new_i64
61 #define tcg_global_mem_new   tcg_global_mem_new_i64
62 #define tcg_temp_local_new   tcg_temp_local_new_i64
63 #define tcg_temp_free        tcg_temp_free_i64
64 
65 #define tcg_gen_movi_reg     tcg_gen_movi_i64
66 #define tcg_gen_mov_reg      tcg_gen_mov_i64
67 #define tcg_gen_ld8u_reg     tcg_gen_ld8u_i64
68 #define tcg_gen_ld8s_reg     tcg_gen_ld8s_i64
69 #define tcg_gen_ld16u_reg    tcg_gen_ld16u_i64
70 #define tcg_gen_ld16s_reg    tcg_gen_ld16s_i64
71 #define tcg_gen_ld32u_reg    tcg_gen_ld32u_i64
72 #define tcg_gen_ld32s_reg    tcg_gen_ld32s_i64
73 #define tcg_gen_ld_reg       tcg_gen_ld_i64
74 #define tcg_gen_st8_reg      tcg_gen_st8_i64
75 #define tcg_gen_st16_reg     tcg_gen_st16_i64
76 #define tcg_gen_st32_reg     tcg_gen_st32_i64
77 #define tcg_gen_st_reg       tcg_gen_st_i64
78 #define tcg_gen_add_reg      tcg_gen_add_i64
79 #define tcg_gen_addi_reg     tcg_gen_addi_i64
80 #define tcg_gen_sub_reg      tcg_gen_sub_i64
81 #define tcg_gen_neg_reg      tcg_gen_neg_i64
82 #define tcg_gen_subfi_reg    tcg_gen_subfi_i64
83 #define tcg_gen_subi_reg     tcg_gen_subi_i64
84 #define tcg_gen_and_reg      tcg_gen_and_i64
85 #define tcg_gen_andi_reg     tcg_gen_andi_i64
86 #define tcg_gen_or_reg       tcg_gen_or_i64
87 #define tcg_gen_ori_reg      tcg_gen_ori_i64
88 #define tcg_gen_xor_reg      tcg_gen_xor_i64
89 #define tcg_gen_xori_reg     tcg_gen_xori_i64
90 #define tcg_gen_not_reg      tcg_gen_not_i64
91 #define tcg_gen_shl_reg      tcg_gen_shl_i64
92 #define tcg_gen_shli_reg     tcg_gen_shli_i64
93 #define tcg_gen_shr_reg      tcg_gen_shr_i64
94 #define tcg_gen_shri_reg     tcg_gen_shri_i64
95 #define tcg_gen_sar_reg      tcg_gen_sar_i64
96 #define tcg_gen_sari_reg     tcg_gen_sari_i64
97 #define tcg_gen_brcond_reg   tcg_gen_brcond_i64
98 #define tcg_gen_brcondi_reg  tcg_gen_brcondi_i64
99 #define tcg_gen_setcond_reg  tcg_gen_setcond_i64
100 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
101 #define tcg_gen_mul_reg      tcg_gen_mul_i64
102 #define tcg_gen_muli_reg     tcg_gen_muli_i64
103 #define tcg_gen_div_reg      tcg_gen_div_i64
104 #define tcg_gen_rem_reg      tcg_gen_rem_i64
105 #define tcg_gen_divu_reg     tcg_gen_divu_i64
106 #define tcg_gen_remu_reg     tcg_gen_remu_i64
107 #define tcg_gen_discard_reg  tcg_gen_discard_i64
108 #define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
109 #define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
110 #define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
111 #define tcg_gen_ext_i32_reg  tcg_gen_ext_i32_i64
112 #define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
113 #define tcg_gen_ext_reg_i64  tcg_gen_mov_i64
114 #define tcg_gen_ext8u_reg    tcg_gen_ext8u_i64
115 #define tcg_gen_ext8s_reg    tcg_gen_ext8s_i64
116 #define tcg_gen_ext16u_reg   tcg_gen_ext16u_i64
117 #define tcg_gen_ext16s_reg   tcg_gen_ext16s_i64
118 #define tcg_gen_ext32u_reg   tcg_gen_ext32u_i64
119 #define tcg_gen_ext32s_reg   tcg_gen_ext32s_i64
120 #define tcg_gen_bswap16_reg  tcg_gen_bswap16_i64
121 #define tcg_gen_bswap32_reg  tcg_gen_bswap32_i64
122 #define tcg_gen_bswap64_reg  tcg_gen_bswap64_i64
123 #define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
124 #define tcg_gen_andc_reg     tcg_gen_andc_i64
125 #define tcg_gen_eqv_reg      tcg_gen_eqv_i64
126 #define tcg_gen_nand_reg     tcg_gen_nand_i64
127 #define tcg_gen_nor_reg      tcg_gen_nor_i64
128 #define tcg_gen_orc_reg      tcg_gen_orc_i64
129 #define tcg_gen_clz_reg      tcg_gen_clz_i64
130 #define tcg_gen_ctz_reg      tcg_gen_ctz_i64
131 #define tcg_gen_clzi_reg     tcg_gen_clzi_i64
132 #define tcg_gen_ctzi_reg     tcg_gen_ctzi_i64
133 #define tcg_gen_clrsb_reg    tcg_gen_clrsb_i64
134 #define tcg_gen_ctpop_reg    tcg_gen_ctpop_i64
135 #define tcg_gen_rotl_reg     tcg_gen_rotl_i64
136 #define tcg_gen_rotli_reg    tcg_gen_rotli_i64
137 #define tcg_gen_rotr_reg     tcg_gen_rotr_i64
138 #define tcg_gen_rotri_reg    tcg_gen_rotri_i64
139 #define tcg_gen_deposit_reg  tcg_gen_deposit_i64
140 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
141 #define tcg_gen_extract_reg  tcg_gen_extract_i64
142 #define tcg_gen_sextract_reg tcg_gen_sextract_i64
143 #define tcg_gen_extract2_reg tcg_gen_extract2_i64
144 #define tcg_const_reg        tcg_const_i64
145 #define tcg_const_local_reg  tcg_const_local_i64
146 #define tcg_constant_reg     tcg_constant_i64
147 #define tcg_gen_movcond_reg  tcg_gen_movcond_i64
148 #define tcg_gen_add2_reg     tcg_gen_add2_i64
149 #define tcg_gen_sub2_reg     tcg_gen_sub2_i64
150 #define tcg_gen_qemu_ld_reg  tcg_gen_qemu_ld_i64
151 #define tcg_gen_qemu_st_reg  tcg_gen_qemu_st_i64
152 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
153 #define tcg_gen_trunc_reg_ptr   tcg_gen_trunc_i64_ptr
154 #else
155 #define TCGv_reg             TCGv_i32
156 #define tcg_temp_new         tcg_temp_new_i32
157 #define tcg_global_mem_new   tcg_global_mem_new_i32
158 #define tcg_temp_local_new   tcg_temp_local_new_i32
159 #define tcg_temp_free        tcg_temp_free_i32
160 
161 #define tcg_gen_movi_reg     tcg_gen_movi_i32
162 #define tcg_gen_mov_reg      tcg_gen_mov_i32
163 #define tcg_gen_ld8u_reg     tcg_gen_ld8u_i32
164 #define tcg_gen_ld8s_reg     tcg_gen_ld8s_i32
165 #define tcg_gen_ld16u_reg    tcg_gen_ld16u_i32
166 #define tcg_gen_ld16s_reg    tcg_gen_ld16s_i32
167 #define tcg_gen_ld32u_reg    tcg_gen_ld_i32
168 #define tcg_gen_ld32s_reg    tcg_gen_ld_i32
169 #define tcg_gen_ld_reg       tcg_gen_ld_i32
170 #define tcg_gen_st8_reg      tcg_gen_st8_i32
171 #define tcg_gen_st16_reg     tcg_gen_st16_i32
172 #define tcg_gen_st32_reg     tcg_gen_st32_i32
173 #define tcg_gen_st_reg       tcg_gen_st_i32
174 #define tcg_gen_add_reg      tcg_gen_add_i32
175 #define tcg_gen_addi_reg     tcg_gen_addi_i32
176 #define tcg_gen_sub_reg      tcg_gen_sub_i32
177 #define tcg_gen_neg_reg      tcg_gen_neg_i32
178 #define tcg_gen_subfi_reg    tcg_gen_subfi_i32
179 #define tcg_gen_subi_reg     tcg_gen_subi_i32
180 #define tcg_gen_and_reg      tcg_gen_and_i32
181 #define tcg_gen_andi_reg     tcg_gen_andi_i32
182 #define tcg_gen_or_reg       tcg_gen_or_i32
183 #define tcg_gen_ori_reg      tcg_gen_ori_i32
184 #define tcg_gen_xor_reg      tcg_gen_xor_i32
185 #define tcg_gen_xori_reg     tcg_gen_xori_i32
186 #define tcg_gen_not_reg      tcg_gen_not_i32
187 #define tcg_gen_shl_reg      tcg_gen_shl_i32
188 #define tcg_gen_shli_reg     tcg_gen_shli_i32
189 #define tcg_gen_shr_reg      tcg_gen_shr_i32
190 #define tcg_gen_shri_reg     tcg_gen_shri_i32
191 #define tcg_gen_sar_reg      tcg_gen_sar_i32
192 #define tcg_gen_sari_reg     tcg_gen_sari_i32
193 #define tcg_gen_brcond_reg   tcg_gen_brcond_i32
194 #define tcg_gen_brcondi_reg  tcg_gen_brcondi_i32
195 #define tcg_gen_setcond_reg  tcg_gen_setcond_i32
196 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
197 #define tcg_gen_mul_reg      tcg_gen_mul_i32
198 #define tcg_gen_muli_reg     tcg_gen_muli_i32
199 #define tcg_gen_div_reg      tcg_gen_div_i32
200 #define tcg_gen_rem_reg      tcg_gen_rem_i32
201 #define tcg_gen_divu_reg     tcg_gen_divu_i32
202 #define tcg_gen_remu_reg     tcg_gen_remu_i32
203 #define tcg_gen_discard_reg  tcg_gen_discard_i32
204 #define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
205 #define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
206 #define tcg_gen_extu_i32_reg tcg_gen_mov_i32
207 #define tcg_gen_ext_i32_reg  tcg_gen_mov_i32
208 #define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
209 #define tcg_gen_ext_reg_i64  tcg_gen_ext_i32_i64
210 #define tcg_gen_ext8u_reg    tcg_gen_ext8u_i32
211 #define tcg_gen_ext8s_reg    tcg_gen_ext8s_i32
212 #define tcg_gen_ext16u_reg   tcg_gen_ext16u_i32
213 #define tcg_gen_ext16s_reg   tcg_gen_ext16s_i32
214 #define tcg_gen_ext32u_reg   tcg_gen_mov_i32
215 #define tcg_gen_ext32s_reg   tcg_gen_mov_i32
216 #define tcg_gen_bswap16_reg  tcg_gen_bswap16_i32
217 #define tcg_gen_bswap32_reg  tcg_gen_bswap32_i32
218 #define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
219 #define tcg_gen_andc_reg     tcg_gen_andc_i32
220 #define tcg_gen_eqv_reg      tcg_gen_eqv_i32
221 #define tcg_gen_nand_reg     tcg_gen_nand_i32
222 #define tcg_gen_nor_reg      tcg_gen_nor_i32
223 #define tcg_gen_orc_reg      tcg_gen_orc_i32
224 #define tcg_gen_clz_reg      tcg_gen_clz_i32
225 #define tcg_gen_ctz_reg      tcg_gen_ctz_i32
226 #define tcg_gen_clzi_reg     tcg_gen_clzi_i32
227 #define tcg_gen_ctzi_reg     tcg_gen_ctzi_i32
228 #define tcg_gen_clrsb_reg    tcg_gen_clrsb_i32
229 #define tcg_gen_ctpop_reg    tcg_gen_ctpop_i32
230 #define tcg_gen_rotl_reg     tcg_gen_rotl_i32
231 #define tcg_gen_rotli_reg    tcg_gen_rotli_i32
232 #define tcg_gen_rotr_reg     tcg_gen_rotr_i32
233 #define tcg_gen_rotri_reg    tcg_gen_rotri_i32
234 #define tcg_gen_deposit_reg  tcg_gen_deposit_i32
235 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
236 #define tcg_gen_extract_reg  tcg_gen_extract_i32
237 #define tcg_gen_sextract_reg tcg_gen_sextract_i32
238 #define tcg_gen_extract2_reg tcg_gen_extract2_i32
239 #define tcg_const_reg        tcg_const_i32
240 #define tcg_const_local_reg  tcg_const_local_i32
241 #define tcg_constant_reg     tcg_constant_i32
242 #define tcg_gen_movcond_reg  tcg_gen_movcond_i32
243 #define tcg_gen_add2_reg     tcg_gen_add2_i32
244 #define tcg_gen_sub2_reg     tcg_gen_sub2_i32
245 #define tcg_gen_qemu_ld_reg  tcg_gen_qemu_ld_i32
246 #define tcg_gen_qemu_st_reg  tcg_gen_qemu_st_i32
247 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
248 #define tcg_gen_trunc_reg_ptr   tcg_gen_ext_i32_ptr
249 #endif /* TARGET_REGISTER_BITS */
250 
251 typedef struct DisasCond {
252     TCGCond c;
253     TCGv_reg a0, a1;
254 } DisasCond;
255 
256 typedef struct DisasContext {
257     DisasContextBase base;
258     CPUState *cs;
259 
260     target_ureg iaoq_f;
261     target_ureg iaoq_b;
262     target_ureg iaoq_n;
263     TCGv_reg iaoq_n_var;
264 
265     int ntempr, ntempl;
266     TCGv_reg tempr[8];
267     TCGv_tl  templ[4];
268 
269     DisasCond null_cond;
270     TCGLabel *null_lab;
271 
272     uint32_t insn;
273     uint32_t tb_flags;
274     int mmu_idx;
275     int privilege;
276     bool psw_n_nonzero;
277 } DisasContext;
278 
279 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently.  */
280 static int expand_sm_imm(DisasContext *ctx, int val)
281 {
282     if (val & PSW_SM_E) {
283         val = (val & ~PSW_SM_E) | PSW_E;
284     }
285     if (val & PSW_SM_W) {
286         val = (val & ~PSW_SM_W) | PSW_W;
287     }
288     return val;
289 }
290 
291 /* Inverted space register indicates 0 means sr0 not inferred from base.  */
292 static int expand_sr3x(DisasContext *ctx, int val)
293 {
294     return ~val;
295 }
296 
297 /* Convert the M:A bits within a memory insn to the tri-state value
298    we use for the final M.  */
299 static int ma_to_m(DisasContext *ctx, int val)
300 {
301     return val & 2 ? (val & 1 ? -1 : 1) : 0;
302 }
303 
304 /* Convert the sign of the displacement to a pre or post-modify.  */
305 static int pos_to_m(DisasContext *ctx, int val)
306 {
307     return val ? 1 : -1;
308 }
309 
310 static int neg_to_m(DisasContext *ctx, int val)
311 {
312     return val ? -1 : 1;
313 }
314 
315 /* Used for branch targets and fp memory ops.  */
316 static int expand_shl2(DisasContext *ctx, int val)
317 {
318     return val << 2;
319 }
320 
321 /* Used for fp memory ops.  */
322 static int expand_shl3(DisasContext *ctx, int val)
323 {
324     return val << 3;
325 }
326 
327 /* Used for assemble_21.  */
328 static int expand_shl11(DisasContext *ctx, int val)
329 {
330     return val << 11;
331 }
332 
333 
334 /* Include the auto-generated decoder.  */
335 #include "decode-insns.c.inc"
336 
337 /* We are not using a goto_tb (for whatever reason), but have updated
338    the iaq (for whatever reason), so don't do it again on exit.  */
339 #define DISAS_IAQ_N_UPDATED  DISAS_TARGET_0
340 
341 /* We are exiting the TB, but have neither emitted a goto_tb, nor
342    updated the iaq for the next instruction to be executed.  */
343 #define DISAS_IAQ_N_STALE    DISAS_TARGET_1
344 
345 /* Similarly, but we want to return to the main loop immediately
346    to recognize unmasked interrupts.  */
347 #define DISAS_IAQ_N_STALE_EXIT      DISAS_TARGET_2
348 #define DISAS_EXIT                  DISAS_TARGET_3
349 
350 /* global register indexes */
351 static TCGv_reg cpu_gr[32];
352 static TCGv_i64 cpu_sr[4];
353 static TCGv_i64 cpu_srH;
354 static TCGv_reg cpu_iaoq_f;
355 static TCGv_reg cpu_iaoq_b;
356 static TCGv_i64 cpu_iasq_f;
357 static TCGv_i64 cpu_iasq_b;
358 static TCGv_reg cpu_sar;
359 static TCGv_reg cpu_psw_n;
360 static TCGv_reg cpu_psw_v;
361 static TCGv_reg cpu_psw_cb;
362 static TCGv_reg cpu_psw_cb_msb;
363 
364 #include "exec/gen-icount.h"
365 
366 void hppa_translate_init(void)
367 {
368 #define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
369 
370     typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
371     static const GlobalVar vars[] = {
372         { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
373         DEF_VAR(psw_n),
374         DEF_VAR(psw_v),
375         DEF_VAR(psw_cb),
376         DEF_VAR(psw_cb_msb),
377         DEF_VAR(iaoq_f),
378         DEF_VAR(iaoq_b),
379     };
380 
381 #undef DEF_VAR
382 
383     /* Use the symbolic register names that match the disassembler.  */
384     static const char gr_names[32][4] = {
385         "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
386         "r8",  "r9",  "r10", "r11", "r12", "r13", "r14", "r15",
387         "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
388         "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
389     };
390     /* SR[4-7] are not global registers so that we can index them.  */
391     static const char sr_names[5][4] = {
392         "sr0", "sr1", "sr2", "sr3", "srH"
393     };
394 
395     int i;
396 
397     cpu_gr[0] = NULL;
398     for (i = 1; i < 32; i++) {
399         cpu_gr[i] = tcg_global_mem_new(cpu_env,
400                                        offsetof(CPUHPPAState, gr[i]),
401                                        gr_names[i]);
402     }
403     for (i = 0; i < 4; i++) {
404         cpu_sr[i] = tcg_global_mem_new_i64(cpu_env,
405                                            offsetof(CPUHPPAState, sr[i]),
406                                            sr_names[i]);
407     }
408     cpu_srH = tcg_global_mem_new_i64(cpu_env,
409                                      offsetof(CPUHPPAState, sr[4]),
410                                      sr_names[4]);
411 
412     for (i = 0; i < ARRAY_SIZE(vars); ++i) {
413         const GlobalVar *v = &vars[i];
414         *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
415     }
416 
417     cpu_iasq_f = tcg_global_mem_new_i64(cpu_env,
418                                         offsetof(CPUHPPAState, iasq_f),
419                                         "iasq_f");
420     cpu_iasq_b = tcg_global_mem_new_i64(cpu_env,
421                                         offsetof(CPUHPPAState, iasq_b),
422                                         "iasq_b");
423 }
424 
425 static DisasCond cond_make_f(void)
426 {
427     return (DisasCond){
428         .c = TCG_COND_NEVER,
429         .a0 = NULL,
430         .a1 = NULL,
431     };
432 }
433 
434 static DisasCond cond_make_t(void)
435 {
436     return (DisasCond){
437         .c = TCG_COND_ALWAYS,
438         .a0 = NULL,
439         .a1 = NULL,
440     };
441 }
442 
443 static DisasCond cond_make_n(void)
444 {
445     return (DisasCond){
446         .c = TCG_COND_NE,
447         .a0 = cpu_psw_n,
448         .a1 = tcg_constant_reg(0)
449     };
450 }
451 
452 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_reg a0)
453 {
454     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
455     return (DisasCond){
456         .c = c, .a0 = a0, .a1 = tcg_constant_reg(0)
457     };
458 }
459 
460 static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
461 {
462     TCGv_reg tmp = tcg_temp_new();
463     tcg_gen_mov_reg(tmp, a0);
464     return cond_make_0_tmp(c, tmp);
465 }
466 
467 static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
468 {
469     DisasCond r = { .c = c };
470 
471     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
472     r.a0 = tcg_temp_new();
473     tcg_gen_mov_reg(r.a0, a0);
474     r.a1 = tcg_temp_new();
475     tcg_gen_mov_reg(r.a1, a1);
476 
477     return r;
478 }
479 
480 static void cond_free(DisasCond *cond)
481 {
482     switch (cond->c) {
483     default:
484         if (cond->a0 != cpu_psw_n) {
485             tcg_temp_free(cond->a0);
486         }
487         tcg_temp_free(cond->a1);
488         cond->a0 = NULL;
489         cond->a1 = NULL;
490         /* fallthru */
491     case TCG_COND_ALWAYS:
492         cond->c = TCG_COND_NEVER;
493         break;
494     case TCG_COND_NEVER:
495         break;
496     }
497 }
498 
499 static TCGv_reg get_temp(DisasContext *ctx)
500 {
501     unsigned i = ctx->ntempr++;
502     g_assert(i < ARRAY_SIZE(ctx->tempr));
503     return ctx->tempr[i] = tcg_temp_new();
504 }
505 
506 #ifndef CONFIG_USER_ONLY
507 static TCGv_tl get_temp_tl(DisasContext *ctx)
508 {
509     unsigned i = ctx->ntempl++;
510     g_assert(i < ARRAY_SIZE(ctx->templ));
511     return ctx->templ[i] = tcg_temp_new_tl();
512 }
513 #endif
514 
515 static TCGv_reg load_const(DisasContext *ctx, target_sreg v)
516 {
517     TCGv_reg t = get_temp(ctx);
518     tcg_gen_movi_reg(t, v);
519     return t;
520 }
521 
522 static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
523 {
524     if (reg == 0) {
525         TCGv_reg t = get_temp(ctx);
526         tcg_gen_movi_reg(t, 0);
527         return t;
528     } else {
529         return cpu_gr[reg];
530     }
531 }
532 
533 static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
534 {
535     if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
536         return get_temp(ctx);
537     } else {
538         return cpu_gr[reg];
539     }
540 }
541 
542 static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
543 {
544     if (ctx->null_cond.c != TCG_COND_NEVER) {
545         tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
546                             ctx->null_cond.a1, dest, t);
547     } else {
548         tcg_gen_mov_reg(dest, t);
549     }
550 }
551 
552 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
553 {
554     if (reg != 0) {
555         save_or_nullify(ctx, cpu_gr[reg], t);
556     }
557 }
558 
559 #ifdef HOST_WORDS_BIGENDIAN
560 # define HI_OFS  0
561 # define LO_OFS  4
562 #else
563 # define HI_OFS  4
564 # define LO_OFS  0
565 #endif
566 
567 static TCGv_i32 load_frw_i32(unsigned rt)
568 {
569     TCGv_i32 ret = tcg_temp_new_i32();
570     tcg_gen_ld_i32(ret, cpu_env,
571                    offsetof(CPUHPPAState, fr[rt & 31])
572                    + (rt & 32 ? LO_OFS : HI_OFS));
573     return ret;
574 }
575 
576 static TCGv_i32 load_frw0_i32(unsigned rt)
577 {
578     if (rt == 0) {
579         return tcg_const_i32(0);
580     } else {
581         return load_frw_i32(rt);
582     }
583 }
584 
585 static TCGv_i64 load_frw0_i64(unsigned rt)
586 {
587     if (rt == 0) {
588         return tcg_const_i64(0);
589     } else {
590         TCGv_i64 ret = tcg_temp_new_i64();
591         tcg_gen_ld32u_i64(ret, cpu_env,
592                           offsetof(CPUHPPAState, fr[rt & 31])
593                           + (rt & 32 ? LO_OFS : HI_OFS));
594         return ret;
595     }
596 }
597 
598 static void save_frw_i32(unsigned rt, TCGv_i32 val)
599 {
600     tcg_gen_st_i32(val, cpu_env,
601                    offsetof(CPUHPPAState, fr[rt & 31])
602                    + (rt & 32 ? LO_OFS : HI_OFS));
603 }
604 
605 #undef HI_OFS
606 #undef LO_OFS
607 
608 static TCGv_i64 load_frd(unsigned rt)
609 {
610     TCGv_i64 ret = tcg_temp_new_i64();
611     tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt]));
612     return ret;
613 }
614 
615 static TCGv_i64 load_frd0(unsigned rt)
616 {
617     if (rt == 0) {
618         return tcg_const_i64(0);
619     } else {
620         return load_frd(rt);
621     }
622 }
623 
624 static void save_frd(unsigned rt, TCGv_i64 val)
625 {
626     tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt]));
627 }
628 
629 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
630 {
631 #ifdef CONFIG_USER_ONLY
632     tcg_gen_movi_i64(dest, 0);
633 #else
634     if (reg < 4) {
635         tcg_gen_mov_i64(dest, cpu_sr[reg]);
636     } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
637         tcg_gen_mov_i64(dest, cpu_srH);
638     } else {
639         tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUHPPAState, sr[reg]));
640     }
641 #endif
642 }
643 
644 /* Skip over the implementation of an insn that has been nullified.
645    Use this when the insn is too complex for a conditional move.  */
646 static void nullify_over(DisasContext *ctx)
647 {
648     if (ctx->null_cond.c != TCG_COND_NEVER) {
649         /* The always condition should have been handled in the main loop.  */
650         assert(ctx->null_cond.c != TCG_COND_ALWAYS);
651 
652         ctx->null_lab = gen_new_label();
653 
654         /* If we're using PSW[N], copy it to a temp because... */
655         if (ctx->null_cond.a0 == cpu_psw_n) {
656             ctx->null_cond.a0 = tcg_temp_new();
657             tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
658         }
659         /* ... we clear it before branching over the implementation,
660            so that (1) it's clear after nullifying this insn and
661            (2) if this insn nullifies the next, PSW[N] is valid.  */
662         if (ctx->psw_n_nonzero) {
663             ctx->psw_n_nonzero = false;
664             tcg_gen_movi_reg(cpu_psw_n, 0);
665         }
666 
667         tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
668                            ctx->null_cond.a1, ctx->null_lab);
669         cond_free(&ctx->null_cond);
670     }
671 }
672 
673 /* Save the current nullification state to PSW[N].  */
674 static void nullify_save(DisasContext *ctx)
675 {
676     if (ctx->null_cond.c == TCG_COND_NEVER) {
677         if (ctx->psw_n_nonzero) {
678             tcg_gen_movi_reg(cpu_psw_n, 0);
679         }
680         return;
681     }
682     if (ctx->null_cond.a0 != cpu_psw_n) {
683         tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
684                             ctx->null_cond.a0, ctx->null_cond.a1);
685         ctx->psw_n_nonzero = true;
686     }
687     cond_free(&ctx->null_cond);
688 }
689 
690 /* Set a PSW[N] to X.  The intention is that this is used immediately
691    before a goto_tb/exit_tb, so that there is no fallthru path to other
692    code within the TB.  Therefore we do not update psw_n_nonzero.  */
693 static void nullify_set(DisasContext *ctx, bool x)
694 {
695     if (ctx->psw_n_nonzero || x) {
696         tcg_gen_movi_reg(cpu_psw_n, x);
697     }
698 }
699 
700 /* Mark the end of an instruction that may have been nullified.
701    This is the pair to nullify_over.  Always returns true so that
702    it may be tail-called from a translate function.  */
703 static bool nullify_end(DisasContext *ctx)
704 {
705     TCGLabel *null_lab = ctx->null_lab;
706     DisasJumpType status = ctx->base.is_jmp;
707 
708     /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
709        For UPDATED, we cannot update on the nullified path.  */
710     assert(status != DISAS_IAQ_N_UPDATED);
711 
712     if (likely(null_lab == NULL)) {
713         /* The current insn wasn't conditional or handled the condition
714            applied to it without a branch, so the (new) setting of
715            NULL_COND can be applied directly to the next insn.  */
716         return true;
717     }
718     ctx->null_lab = NULL;
719 
720     if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
721         /* The next instruction will be unconditional,
722            and NULL_COND already reflects that.  */
723         gen_set_label(null_lab);
724     } else {
725         /* The insn that we just executed is itself nullifying the next
726            instruction.  Store the condition in the PSW[N] global.
727            We asserted PSW[N] = 0 in nullify_over, so that after the
728            label we have the proper value in place.  */
729         nullify_save(ctx);
730         gen_set_label(null_lab);
731         ctx->null_cond = cond_make_n();
732     }
733     if (status == DISAS_NORETURN) {
734         ctx->base.is_jmp = DISAS_NEXT;
735     }
736     return true;
737 }
738 
739 static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval)
740 {
741     if (unlikely(ival == -1)) {
742         tcg_gen_mov_reg(dest, vval);
743     } else {
744         tcg_gen_movi_reg(dest, ival);
745     }
746 }
747 
748 static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
749 {
750     return ctx->iaoq_f + disp + 8;
751 }
752 
753 static void gen_excp_1(int exception)
754 {
755     gen_helper_excp(cpu_env, tcg_constant_i32(exception));
756 }
757 
758 static void gen_excp(DisasContext *ctx, int exception)
759 {
760     copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
761     copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
762     nullify_save(ctx);
763     gen_excp_1(exception);
764     ctx->base.is_jmp = DISAS_NORETURN;
765 }
766 
767 static bool gen_excp_iir(DisasContext *ctx, int exc)
768 {
769     nullify_over(ctx);
770     tcg_gen_st_reg(tcg_constant_reg(ctx->insn),
771                    cpu_env, offsetof(CPUHPPAState, cr[CR_IIR]));
772     gen_excp(ctx, exc);
773     return nullify_end(ctx);
774 }
775 
776 static bool gen_illegal(DisasContext *ctx)
777 {
778     return gen_excp_iir(ctx, EXCP_ILL);
779 }
780 
781 #ifdef CONFIG_USER_ONLY
782 #define CHECK_MOST_PRIVILEGED(EXCP) \
783     return gen_excp_iir(ctx, EXCP)
784 #else
785 #define CHECK_MOST_PRIVILEGED(EXCP) \
786     do {                                     \
787         if (ctx->privilege != 0) {           \
788             return gen_excp_iir(ctx, EXCP);  \
789         }                                    \
790     } while (0)
791 #endif
792 
793 static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
794 {
795     return translator_use_goto_tb(&ctx->base, dest);
796 }
797 
798 /* If the next insn is to be nullified, and it's on the same page,
799    and we're not attempting to set a breakpoint on it, then we can
800    totally skip the nullified insn.  This avoids creating and
801    executing a TB that merely branches to the next TB.  */
802 static bool use_nullify_skip(DisasContext *ctx)
803 {
804     return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
805             && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
806 }
807 
808 static void gen_goto_tb(DisasContext *ctx, int which,
809                         target_ureg f, target_ureg b)
810 {
811     if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
812         tcg_gen_goto_tb(which);
813         tcg_gen_movi_reg(cpu_iaoq_f, f);
814         tcg_gen_movi_reg(cpu_iaoq_b, b);
815         tcg_gen_exit_tb(ctx->base.tb, which);
816     } else {
817         copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
818         copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
819         tcg_gen_lookup_and_goto_ptr();
820     }
821 }
822 
823 static bool cond_need_sv(int c)
824 {
825     return c == 2 || c == 3 || c == 6;
826 }
827 
828 static bool cond_need_cb(int c)
829 {
830     return c == 4 || c == 5;
831 }
832 
833 /*
834  * Compute conditional for arithmetic.  See Page 5-3, Table 5-1, of
835  * the Parisc 1.1 Architecture Reference Manual for details.
836  */
837 
838 static DisasCond do_cond(unsigned cf, TCGv_reg res,
839                          TCGv_reg cb_msb, TCGv_reg sv)
840 {
841     DisasCond cond;
842     TCGv_reg tmp;
843 
844     switch (cf >> 1) {
845     case 0: /* Never / TR    (0 / 1) */
846         cond = cond_make_f();
847         break;
848     case 1: /* = / <>        (Z / !Z) */
849         cond = cond_make_0(TCG_COND_EQ, res);
850         break;
851     case 2: /* < / >=        (N ^ V / !(N ^ V) */
852         tmp = tcg_temp_new();
853         tcg_gen_xor_reg(tmp, res, sv);
854         cond = cond_make_0_tmp(TCG_COND_LT, tmp);
855         break;
856     case 3: /* <= / >        (N ^ V) | Z / !((N ^ V) | Z) */
857         /*
858          * Simplify:
859          *   (N ^ V) | Z
860          *   ((res < 0) ^ (sv < 0)) | !res
861          *   ((res ^ sv) < 0) | !res
862          *   (~(res ^ sv) >= 0) | !res
863          *   !(~(res ^ sv) >> 31) | !res
864          *   !(~(res ^ sv) >> 31 & res)
865          */
866         tmp = tcg_temp_new();
867         tcg_gen_eqv_reg(tmp, res, sv);
868         tcg_gen_sari_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
869         tcg_gen_and_reg(tmp, tmp, res);
870         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
871         break;
872     case 4: /* NUV / UV      (!C / C) */
873         cond = cond_make_0(TCG_COND_EQ, cb_msb);
874         break;
875     case 5: /* ZNV / VNZ     (!C | Z / C & !Z) */
876         tmp = tcg_temp_new();
877         tcg_gen_neg_reg(tmp, cb_msb);
878         tcg_gen_and_reg(tmp, tmp, res);
879         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
880         break;
881     case 6: /* SV / NSV      (V / !V) */
882         cond = cond_make_0(TCG_COND_LT, sv);
883         break;
884     case 7: /* OD / EV */
885         tmp = tcg_temp_new();
886         tcg_gen_andi_reg(tmp, res, 1);
887         cond = cond_make_0_tmp(TCG_COND_NE, tmp);
888         break;
889     default:
890         g_assert_not_reached();
891     }
892     if (cf & 1) {
893         cond.c = tcg_invert_cond(cond.c);
894     }
895 
896     return cond;
897 }
898 
899 /* Similar, but for the special case of subtraction without borrow, we
900    can use the inputs directly.  This can allow other computation to be
901    deleted as unused.  */
902 
903 static DisasCond do_sub_cond(unsigned cf, TCGv_reg res,
904                              TCGv_reg in1, TCGv_reg in2, TCGv_reg sv)
905 {
906     DisasCond cond;
907 
908     switch (cf >> 1) {
909     case 1: /* = / <> */
910         cond = cond_make(TCG_COND_EQ, in1, in2);
911         break;
912     case 2: /* < / >= */
913         cond = cond_make(TCG_COND_LT, in1, in2);
914         break;
915     case 3: /* <= / > */
916         cond = cond_make(TCG_COND_LE, in1, in2);
917         break;
918     case 4: /* << / >>= */
919         cond = cond_make(TCG_COND_LTU, in1, in2);
920         break;
921     case 5: /* <<= / >> */
922         cond = cond_make(TCG_COND_LEU, in1, in2);
923         break;
924     default:
925         return do_cond(cf, res, NULL, sv);
926     }
927     if (cf & 1) {
928         cond.c = tcg_invert_cond(cond.c);
929     }
930 
931     return cond;
932 }
933 
934 /*
935  * Similar, but for logicals, where the carry and overflow bits are not
936  * computed, and use of them is undefined.
937  *
938  * Undefined or not, hardware does not trap.  It seems reasonable to
939  * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
940  * how cases c={2,3} are treated.
941  */
942 
943 static DisasCond do_log_cond(unsigned cf, TCGv_reg res)
944 {
945     switch (cf) {
946     case 0:  /* never */
947     case 9:  /* undef, C */
948     case 11: /* undef, C & !Z */
949     case 12: /* undef, V */
950         return cond_make_f();
951 
952     case 1:  /* true */
953     case 8:  /* undef, !C */
954     case 10: /* undef, !C | Z */
955     case 13: /* undef, !V */
956         return cond_make_t();
957 
958     case 2:  /* == */
959         return cond_make_0(TCG_COND_EQ, res);
960     case 3:  /* <> */
961         return cond_make_0(TCG_COND_NE, res);
962     case 4:  /* < */
963         return cond_make_0(TCG_COND_LT, res);
964     case 5:  /* >= */
965         return cond_make_0(TCG_COND_GE, res);
966     case 6:  /* <= */
967         return cond_make_0(TCG_COND_LE, res);
968     case 7:  /* > */
969         return cond_make_0(TCG_COND_GT, res);
970 
971     case 14: /* OD */
972     case 15: /* EV */
973         return do_cond(cf, res, NULL, NULL);
974 
975     default:
976         g_assert_not_reached();
977     }
978 }
979 
980 /* Similar, but for shift/extract/deposit conditions.  */
981 
982 static DisasCond do_sed_cond(unsigned orig, TCGv_reg res)
983 {
984     unsigned c, f;
985 
986     /* Convert the compressed condition codes to standard.
987        0-2 are the same as logicals (nv,<,<=), while 3 is OD.
988        4-7 are the reverse of 0-3.  */
989     c = orig & 3;
990     if (c == 3) {
991         c = 7;
992     }
993     f = (orig & 4) / 4;
994 
995     return do_log_cond(c * 2 + f, res);
996 }
997 
998 /* Similar, but for unit conditions.  */
999 
1000 static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
1001                               TCGv_reg in1, TCGv_reg in2)
1002 {
1003     DisasCond cond;
1004     TCGv_reg tmp, cb = NULL;
1005 
1006     if (cf & 8) {
1007         /* Since we want to test lots of carry-out bits all at once, do not
1008          * do our normal thing and compute carry-in of bit B+1 since that
1009          * leaves us with carry bits spread across two words.
1010          */
1011         cb = tcg_temp_new();
1012         tmp = tcg_temp_new();
1013         tcg_gen_or_reg(cb, in1, in2);
1014         tcg_gen_and_reg(tmp, in1, in2);
1015         tcg_gen_andc_reg(cb, cb, res);
1016         tcg_gen_or_reg(cb, cb, tmp);
1017         tcg_temp_free(tmp);
1018     }
1019 
1020     switch (cf >> 1) {
1021     case 0: /* never / TR */
1022     case 1: /* undefined */
1023     case 5: /* undefined */
1024         cond = cond_make_f();
1025         break;
1026 
1027     case 2: /* SBZ / NBZ */
1028         /* See hasless(v,1) from
1029          * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1030          */
1031         tmp = tcg_temp_new();
1032         tcg_gen_subi_reg(tmp, res, 0x01010101u);
1033         tcg_gen_andc_reg(tmp, tmp, res);
1034         tcg_gen_andi_reg(tmp, tmp, 0x80808080u);
1035         cond = cond_make_0(TCG_COND_NE, tmp);
1036         tcg_temp_free(tmp);
1037         break;
1038 
1039     case 3: /* SHZ / NHZ */
1040         tmp = tcg_temp_new();
1041         tcg_gen_subi_reg(tmp, res, 0x00010001u);
1042         tcg_gen_andc_reg(tmp, tmp, res);
1043         tcg_gen_andi_reg(tmp, tmp, 0x80008000u);
1044         cond = cond_make_0(TCG_COND_NE, tmp);
1045         tcg_temp_free(tmp);
1046         break;
1047 
1048     case 4: /* SDC / NDC */
1049         tcg_gen_andi_reg(cb, cb, 0x88888888u);
1050         cond = cond_make_0(TCG_COND_NE, cb);
1051         break;
1052 
1053     case 6: /* SBC / NBC */
1054         tcg_gen_andi_reg(cb, cb, 0x80808080u);
1055         cond = cond_make_0(TCG_COND_NE, cb);
1056         break;
1057 
1058     case 7: /* SHC / NHC */
1059         tcg_gen_andi_reg(cb, cb, 0x80008000u);
1060         cond = cond_make_0(TCG_COND_NE, cb);
1061         break;
1062 
1063     default:
1064         g_assert_not_reached();
1065     }
1066     if (cf & 8) {
1067         tcg_temp_free(cb);
1068     }
1069     if (cf & 1) {
1070         cond.c = tcg_invert_cond(cond.c);
1071     }
1072 
1073     return cond;
1074 }
1075 
1076 /* Compute signed overflow for addition.  */
1077 static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
1078                           TCGv_reg in1, TCGv_reg in2)
1079 {
1080     TCGv_reg sv = get_temp(ctx);
1081     TCGv_reg tmp = tcg_temp_new();
1082 
1083     tcg_gen_xor_reg(sv, res, in1);
1084     tcg_gen_xor_reg(tmp, in1, in2);
1085     tcg_gen_andc_reg(sv, sv, tmp);
1086     tcg_temp_free(tmp);
1087 
1088     return sv;
1089 }
1090 
1091 /* Compute signed overflow for subtraction.  */
1092 static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
1093                           TCGv_reg in1, TCGv_reg in2)
1094 {
1095     TCGv_reg sv = get_temp(ctx);
1096     TCGv_reg tmp = tcg_temp_new();
1097 
1098     tcg_gen_xor_reg(sv, res, in1);
1099     tcg_gen_xor_reg(tmp, in1, in2);
1100     tcg_gen_and_reg(sv, sv, tmp);
1101     tcg_temp_free(tmp);
1102 
1103     return sv;
1104 }
1105 
1106 static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1107                    TCGv_reg in2, unsigned shift, bool is_l,
1108                    bool is_tsv, bool is_tc, bool is_c, unsigned cf)
1109 {
1110     TCGv_reg dest, cb, cb_msb, sv, tmp;
1111     unsigned c = cf >> 1;
1112     DisasCond cond;
1113 
1114     dest = tcg_temp_new();
1115     cb = NULL;
1116     cb_msb = NULL;
1117 
1118     if (shift) {
1119         tmp = get_temp(ctx);
1120         tcg_gen_shli_reg(tmp, in1, shift);
1121         in1 = tmp;
1122     }
1123 
1124     if (!is_l || cond_need_cb(c)) {
1125         TCGv_reg zero = tcg_constant_reg(0);
1126         cb_msb = get_temp(ctx);
1127         tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
1128         if (is_c) {
1129             tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
1130         }
1131         if (!is_l) {
1132             cb = get_temp(ctx);
1133             tcg_gen_xor_reg(cb, in1, in2);
1134             tcg_gen_xor_reg(cb, cb, dest);
1135         }
1136     } else {
1137         tcg_gen_add_reg(dest, in1, in2);
1138         if (is_c) {
1139             tcg_gen_add_reg(dest, dest, cpu_psw_cb_msb);
1140         }
1141     }
1142 
1143     /* Compute signed overflow if required.  */
1144     sv = NULL;
1145     if (is_tsv || cond_need_sv(c)) {
1146         sv = do_add_sv(ctx, dest, in1, in2);
1147         if (is_tsv) {
1148             /* ??? Need to include overflow from shift.  */
1149             gen_helper_tsv(cpu_env, sv);
1150         }
1151     }
1152 
1153     /* Emit any conditional trap before any writeback.  */
1154     cond = do_cond(cf, dest, cb_msb, sv);
1155     if (is_tc) {
1156         tmp = tcg_temp_new();
1157         tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1158         gen_helper_tcond(cpu_env, tmp);
1159         tcg_temp_free(tmp);
1160     }
1161 
1162     /* Write back the result.  */
1163     if (!is_l) {
1164         save_or_nullify(ctx, cpu_psw_cb, cb);
1165         save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1166     }
1167     save_gpr(ctx, rt, dest);
1168     tcg_temp_free(dest);
1169 
1170     /* Install the new nullification.  */
1171     cond_free(&ctx->null_cond);
1172     ctx->null_cond = cond;
1173 }
1174 
1175 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_sh *a,
1176                        bool is_l, bool is_tsv, bool is_tc, bool is_c)
1177 {
1178     TCGv_reg tcg_r1, tcg_r2;
1179 
1180     if (a->cf) {
1181         nullify_over(ctx);
1182     }
1183     tcg_r1 = load_gpr(ctx, a->r1);
1184     tcg_r2 = load_gpr(ctx, a->r2);
1185     do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, is_tsv, is_tc, is_c, a->cf);
1186     return nullify_end(ctx);
1187 }
1188 
1189 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1190                        bool is_tsv, bool is_tc)
1191 {
1192     TCGv_reg tcg_im, tcg_r2;
1193 
1194     if (a->cf) {
1195         nullify_over(ctx);
1196     }
1197     tcg_im = load_const(ctx, a->i);
1198     tcg_r2 = load_gpr(ctx, a->r);
1199     do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf);
1200     return nullify_end(ctx);
1201 }
1202 
1203 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1204                    TCGv_reg in2, bool is_tsv, bool is_b,
1205                    bool is_tc, unsigned cf)
1206 {
1207     TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
1208     unsigned c = cf >> 1;
1209     DisasCond cond;
1210 
1211     dest = tcg_temp_new();
1212     cb = tcg_temp_new();
1213     cb_msb = tcg_temp_new();
1214 
1215     zero = tcg_constant_reg(0);
1216     if (is_b) {
1217         /* DEST,C = IN1 + ~IN2 + C.  */
1218         tcg_gen_not_reg(cb, in2);
1219         tcg_gen_add2_reg(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
1220         tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
1221         tcg_gen_xor_reg(cb, cb, in1);
1222         tcg_gen_xor_reg(cb, cb, dest);
1223     } else {
1224         /* DEST,C = IN1 + ~IN2 + 1.  We can produce the same result in fewer
1225            operations by seeding the high word with 1 and subtracting.  */
1226         tcg_gen_movi_reg(cb_msb, 1);
1227         tcg_gen_sub2_reg(dest, cb_msb, in1, cb_msb, in2, zero);
1228         tcg_gen_eqv_reg(cb, in1, in2);
1229         tcg_gen_xor_reg(cb, cb, dest);
1230     }
1231 
1232     /* Compute signed overflow if required.  */
1233     sv = NULL;
1234     if (is_tsv || cond_need_sv(c)) {
1235         sv = do_sub_sv(ctx, dest, in1, in2);
1236         if (is_tsv) {
1237             gen_helper_tsv(cpu_env, sv);
1238         }
1239     }
1240 
1241     /* Compute the condition.  We cannot use the special case for borrow.  */
1242     if (!is_b) {
1243         cond = do_sub_cond(cf, dest, in1, in2, sv);
1244     } else {
1245         cond = do_cond(cf, dest, cb_msb, sv);
1246     }
1247 
1248     /* Emit any conditional trap before any writeback.  */
1249     if (is_tc) {
1250         tmp = tcg_temp_new();
1251         tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1252         gen_helper_tcond(cpu_env, tmp);
1253         tcg_temp_free(tmp);
1254     }
1255 
1256     /* Write back the result.  */
1257     save_or_nullify(ctx, cpu_psw_cb, cb);
1258     save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1259     save_gpr(ctx, rt, dest);
1260     tcg_temp_free(dest);
1261     tcg_temp_free(cb);
1262     tcg_temp_free(cb_msb);
1263 
1264     /* Install the new nullification.  */
1265     cond_free(&ctx->null_cond);
1266     ctx->null_cond = cond;
1267 }
1268 
1269 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf *a,
1270                        bool is_tsv, bool is_b, bool is_tc)
1271 {
1272     TCGv_reg tcg_r1, tcg_r2;
1273 
1274     if (a->cf) {
1275         nullify_over(ctx);
1276     }
1277     tcg_r1 = load_gpr(ctx, a->r1);
1278     tcg_r2 = load_gpr(ctx, a->r2);
1279     do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf);
1280     return nullify_end(ctx);
1281 }
1282 
1283 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1284 {
1285     TCGv_reg tcg_im, tcg_r2;
1286 
1287     if (a->cf) {
1288         nullify_over(ctx);
1289     }
1290     tcg_im = load_const(ctx, a->i);
1291     tcg_r2 = load_gpr(ctx, a->r);
1292     do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf);
1293     return nullify_end(ctx);
1294 }
1295 
1296 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1297                       TCGv_reg in2, unsigned cf)
1298 {
1299     TCGv_reg dest, sv;
1300     DisasCond cond;
1301 
1302     dest = tcg_temp_new();
1303     tcg_gen_sub_reg(dest, in1, in2);
1304 
1305     /* Compute signed overflow if required.  */
1306     sv = NULL;
1307     if (cond_need_sv(cf >> 1)) {
1308         sv = do_sub_sv(ctx, dest, in1, in2);
1309     }
1310 
1311     /* Form the condition for the compare.  */
1312     cond = do_sub_cond(cf, dest, in1, in2, sv);
1313 
1314     /* Clear.  */
1315     tcg_gen_movi_reg(dest, 0);
1316     save_gpr(ctx, rt, dest);
1317     tcg_temp_free(dest);
1318 
1319     /* Install the new nullification.  */
1320     cond_free(&ctx->null_cond);
1321     ctx->null_cond = cond;
1322 }
1323 
1324 static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1325                    TCGv_reg in2, unsigned cf,
1326                    void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1327 {
1328     TCGv_reg dest = dest_gpr(ctx, rt);
1329 
1330     /* Perform the operation, and writeback.  */
1331     fn(dest, in1, in2);
1332     save_gpr(ctx, rt, dest);
1333 
1334     /* Install the new nullification.  */
1335     cond_free(&ctx->null_cond);
1336     if (cf) {
1337         ctx->null_cond = do_log_cond(cf, dest);
1338     }
1339 }
1340 
1341 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf *a,
1342                        void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1343 {
1344     TCGv_reg tcg_r1, tcg_r2;
1345 
1346     if (a->cf) {
1347         nullify_over(ctx);
1348     }
1349     tcg_r1 = load_gpr(ctx, a->r1);
1350     tcg_r2 = load_gpr(ctx, a->r2);
1351     do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, fn);
1352     return nullify_end(ctx);
1353 }
1354 
1355 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1356                     TCGv_reg in2, unsigned cf, bool is_tc,
1357                     void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1358 {
1359     TCGv_reg dest;
1360     DisasCond cond;
1361 
1362     if (cf == 0) {
1363         dest = dest_gpr(ctx, rt);
1364         fn(dest, in1, in2);
1365         save_gpr(ctx, rt, dest);
1366         cond_free(&ctx->null_cond);
1367     } else {
1368         dest = tcg_temp_new();
1369         fn(dest, in1, in2);
1370 
1371         cond = do_unit_cond(cf, dest, in1, in2);
1372 
1373         if (is_tc) {
1374             TCGv_reg tmp = tcg_temp_new();
1375             tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1376             gen_helper_tcond(cpu_env, tmp);
1377             tcg_temp_free(tmp);
1378         }
1379         save_gpr(ctx, rt, dest);
1380 
1381         cond_free(&ctx->null_cond);
1382         ctx->null_cond = cond;
1383     }
1384 }
1385 
1386 #ifndef CONFIG_USER_ONLY
1387 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1388    from the top 2 bits of the base register.  There are a few system
1389    instructions that have a 3-bit space specifier, for which SR0 is
1390    not special.  To handle this, pass ~SP.  */
1391 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
1392 {
1393     TCGv_ptr ptr;
1394     TCGv_reg tmp;
1395     TCGv_i64 spc;
1396 
1397     if (sp != 0) {
1398         if (sp < 0) {
1399             sp = ~sp;
1400         }
1401         spc = get_temp_tl(ctx);
1402         load_spr(ctx, spc, sp);
1403         return spc;
1404     }
1405     if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1406         return cpu_srH;
1407     }
1408 
1409     ptr = tcg_temp_new_ptr();
1410     tmp = tcg_temp_new();
1411     spc = get_temp_tl(ctx);
1412 
1413     tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5);
1414     tcg_gen_andi_reg(tmp, tmp, 030);
1415     tcg_gen_trunc_reg_ptr(ptr, tmp);
1416     tcg_temp_free(tmp);
1417 
1418     tcg_gen_add_ptr(ptr, ptr, cpu_env);
1419     tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1420     tcg_temp_free_ptr(ptr);
1421 
1422     return spc;
1423 }
1424 #endif
1425 
1426 static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
1427                      unsigned rb, unsigned rx, int scale, target_sreg disp,
1428                      unsigned sp, int modify, bool is_phys)
1429 {
1430     TCGv_reg base = load_gpr(ctx, rb);
1431     TCGv_reg ofs;
1432 
1433     /* Note that RX is mutually exclusive with DISP.  */
1434     if (rx) {
1435         ofs = get_temp(ctx);
1436         tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
1437         tcg_gen_add_reg(ofs, ofs, base);
1438     } else if (disp || modify) {
1439         ofs = get_temp(ctx);
1440         tcg_gen_addi_reg(ofs, base, disp);
1441     } else {
1442         ofs = base;
1443     }
1444 
1445     *pofs = ofs;
1446 #ifdef CONFIG_USER_ONLY
1447     *pgva = (modify <= 0 ? ofs : base);
1448 #else
1449     TCGv_tl addr = get_temp_tl(ctx);
1450     tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
1451     if (ctx->tb_flags & PSW_W) {
1452         tcg_gen_andi_tl(addr, addr, 0x3fffffffffffffffull);
1453     }
1454     if (!is_phys) {
1455         tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
1456     }
1457     *pgva = addr;
1458 #endif
1459 }
1460 
1461 /* Emit a memory load.  The modify parameter should be
1462  * < 0 for pre-modify,
1463  * > 0 for post-modify,
1464  * = 0 for no base register update.
1465  */
1466 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1467                        unsigned rx, int scale, target_sreg disp,
1468                        unsigned sp, int modify, MemOp mop)
1469 {
1470     TCGv_reg ofs;
1471     TCGv_tl addr;
1472 
1473     /* Caller uses nullify_over/nullify_end.  */
1474     assert(ctx->null_cond.c == TCG_COND_NEVER);
1475 
1476     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1477              ctx->mmu_idx == MMU_PHYS_IDX);
1478     tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop);
1479     if (modify) {
1480         save_gpr(ctx, rb, ofs);
1481     }
1482 }
1483 
1484 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1485                        unsigned rx, int scale, target_sreg disp,
1486                        unsigned sp, int modify, MemOp mop)
1487 {
1488     TCGv_reg ofs;
1489     TCGv_tl addr;
1490 
1491     /* Caller uses nullify_over/nullify_end.  */
1492     assert(ctx->null_cond.c == TCG_COND_NEVER);
1493 
1494     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1495              ctx->mmu_idx == MMU_PHYS_IDX);
1496     tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop);
1497     if (modify) {
1498         save_gpr(ctx, rb, ofs);
1499     }
1500 }
1501 
1502 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1503                         unsigned rx, int scale, target_sreg disp,
1504                         unsigned sp, int modify, MemOp mop)
1505 {
1506     TCGv_reg ofs;
1507     TCGv_tl addr;
1508 
1509     /* Caller uses nullify_over/nullify_end.  */
1510     assert(ctx->null_cond.c == TCG_COND_NEVER);
1511 
1512     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1513              ctx->mmu_idx == MMU_PHYS_IDX);
1514     tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop);
1515     if (modify) {
1516         save_gpr(ctx, rb, ofs);
1517     }
1518 }
1519 
1520 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1521                         unsigned rx, int scale, target_sreg disp,
1522                         unsigned sp, int modify, MemOp mop)
1523 {
1524     TCGv_reg ofs;
1525     TCGv_tl addr;
1526 
1527     /* Caller uses nullify_over/nullify_end.  */
1528     assert(ctx->null_cond.c == TCG_COND_NEVER);
1529 
1530     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1531              ctx->mmu_idx == MMU_PHYS_IDX);
1532     tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop);
1533     if (modify) {
1534         save_gpr(ctx, rb, ofs);
1535     }
1536 }
1537 
1538 #if TARGET_REGISTER_BITS == 64
1539 #define do_load_reg   do_load_64
1540 #define do_store_reg  do_store_64
1541 #else
1542 #define do_load_reg   do_load_32
1543 #define do_store_reg  do_store_32
1544 #endif
1545 
1546 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1547                     unsigned rx, int scale, target_sreg disp,
1548                     unsigned sp, int modify, MemOp mop)
1549 {
1550     TCGv_reg dest;
1551 
1552     nullify_over(ctx);
1553 
1554     if (modify == 0) {
1555         /* No base register update.  */
1556         dest = dest_gpr(ctx, rt);
1557     } else {
1558         /* Make sure if RT == RB, we see the result of the load.  */
1559         dest = get_temp(ctx);
1560     }
1561     do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1562     save_gpr(ctx, rt, dest);
1563 
1564     return nullify_end(ctx);
1565 }
1566 
1567 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1568                       unsigned rx, int scale, target_sreg disp,
1569                       unsigned sp, int modify)
1570 {
1571     TCGv_i32 tmp;
1572 
1573     nullify_over(ctx);
1574 
1575     tmp = tcg_temp_new_i32();
1576     do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1577     save_frw_i32(rt, tmp);
1578     tcg_temp_free_i32(tmp);
1579 
1580     if (rt == 0) {
1581         gen_helper_loaded_fr0(cpu_env);
1582     }
1583 
1584     return nullify_end(ctx);
1585 }
1586 
1587 static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1588 {
1589     return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1590                      a->disp, a->sp, a->m);
1591 }
1592 
1593 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1594                       unsigned rx, int scale, target_sreg disp,
1595                       unsigned sp, int modify)
1596 {
1597     TCGv_i64 tmp;
1598 
1599     nullify_over(ctx);
1600 
1601     tmp = tcg_temp_new_i64();
1602     do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
1603     save_frd(rt, tmp);
1604     tcg_temp_free_i64(tmp);
1605 
1606     if (rt == 0) {
1607         gen_helper_loaded_fr0(cpu_env);
1608     }
1609 
1610     return nullify_end(ctx);
1611 }
1612 
1613 static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1614 {
1615     return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1616                      a->disp, a->sp, a->m);
1617 }
1618 
1619 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1620                      target_sreg disp, unsigned sp,
1621                      int modify, MemOp mop)
1622 {
1623     nullify_over(ctx);
1624     do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1625     return nullify_end(ctx);
1626 }
1627 
1628 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1629                        unsigned rx, int scale, target_sreg disp,
1630                        unsigned sp, int modify)
1631 {
1632     TCGv_i32 tmp;
1633 
1634     nullify_over(ctx);
1635 
1636     tmp = load_frw_i32(rt);
1637     do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1638     tcg_temp_free_i32(tmp);
1639 
1640     return nullify_end(ctx);
1641 }
1642 
1643 static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1644 {
1645     return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1646                       a->disp, a->sp, a->m);
1647 }
1648 
1649 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1650                        unsigned rx, int scale, target_sreg disp,
1651                        unsigned sp, int modify)
1652 {
1653     TCGv_i64 tmp;
1654 
1655     nullify_over(ctx);
1656 
1657     tmp = load_frd(rt);
1658     do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
1659     tcg_temp_free_i64(tmp);
1660 
1661     return nullify_end(ctx);
1662 }
1663 
1664 static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1665 {
1666     return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1667                       a->disp, a->sp, a->m);
1668 }
1669 
1670 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1671                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1672 {
1673     TCGv_i32 tmp;
1674 
1675     nullify_over(ctx);
1676     tmp = load_frw0_i32(ra);
1677 
1678     func(tmp, cpu_env, tmp);
1679 
1680     save_frw_i32(rt, tmp);
1681     tcg_temp_free_i32(tmp);
1682     return nullify_end(ctx);
1683 }
1684 
1685 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1686                        void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1687 {
1688     TCGv_i32 dst;
1689     TCGv_i64 src;
1690 
1691     nullify_over(ctx);
1692     src = load_frd(ra);
1693     dst = tcg_temp_new_i32();
1694 
1695     func(dst, cpu_env, src);
1696 
1697     tcg_temp_free_i64(src);
1698     save_frw_i32(rt, dst);
1699     tcg_temp_free_i32(dst);
1700     return nullify_end(ctx);
1701 }
1702 
1703 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1704                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1705 {
1706     TCGv_i64 tmp;
1707 
1708     nullify_over(ctx);
1709     tmp = load_frd0(ra);
1710 
1711     func(tmp, cpu_env, tmp);
1712 
1713     save_frd(rt, tmp);
1714     tcg_temp_free_i64(tmp);
1715     return nullify_end(ctx);
1716 }
1717 
1718 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1719                        void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1720 {
1721     TCGv_i32 src;
1722     TCGv_i64 dst;
1723 
1724     nullify_over(ctx);
1725     src = load_frw0_i32(ra);
1726     dst = tcg_temp_new_i64();
1727 
1728     func(dst, cpu_env, src);
1729 
1730     tcg_temp_free_i32(src);
1731     save_frd(rt, dst);
1732     tcg_temp_free_i64(dst);
1733     return nullify_end(ctx);
1734 }
1735 
1736 static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1737                         unsigned ra, unsigned rb,
1738                         void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1739 {
1740     TCGv_i32 a, b;
1741 
1742     nullify_over(ctx);
1743     a = load_frw0_i32(ra);
1744     b = load_frw0_i32(rb);
1745 
1746     func(a, cpu_env, a, b);
1747 
1748     tcg_temp_free_i32(b);
1749     save_frw_i32(rt, a);
1750     tcg_temp_free_i32(a);
1751     return nullify_end(ctx);
1752 }
1753 
1754 static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1755                         unsigned ra, unsigned rb,
1756                         void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1757 {
1758     TCGv_i64 a, b;
1759 
1760     nullify_over(ctx);
1761     a = load_frd0(ra);
1762     b = load_frd0(rb);
1763 
1764     func(a, cpu_env, a, b);
1765 
1766     tcg_temp_free_i64(b);
1767     save_frd(rt, a);
1768     tcg_temp_free_i64(a);
1769     return nullify_end(ctx);
1770 }
1771 
1772 /* Emit an unconditional branch to a direct target, which may or may not
1773    have already had nullification handled.  */
1774 static bool do_dbranch(DisasContext *ctx, target_ureg dest,
1775                        unsigned link, bool is_n)
1776 {
1777     if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1778         if (link != 0) {
1779             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1780         }
1781         ctx->iaoq_n = dest;
1782         if (is_n) {
1783             ctx->null_cond.c = TCG_COND_ALWAYS;
1784         }
1785     } else {
1786         nullify_over(ctx);
1787 
1788         if (link != 0) {
1789             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1790         }
1791 
1792         if (is_n && use_nullify_skip(ctx)) {
1793             nullify_set(ctx, 0);
1794             gen_goto_tb(ctx, 0, dest, dest + 4);
1795         } else {
1796             nullify_set(ctx, is_n);
1797             gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1798         }
1799 
1800         nullify_end(ctx);
1801 
1802         nullify_set(ctx, 0);
1803         gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1804         ctx->base.is_jmp = DISAS_NORETURN;
1805     }
1806     return true;
1807 }
1808 
1809 /* Emit a conditional branch to a direct target.  If the branch itself
1810    is nullified, we should have already used nullify_over.  */
1811 static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
1812                        DisasCond *cond)
1813 {
1814     target_ureg dest = iaoq_dest(ctx, disp);
1815     TCGLabel *taken = NULL;
1816     TCGCond c = cond->c;
1817     bool n;
1818 
1819     assert(ctx->null_cond.c == TCG_COND_NEVER);
1820 
1821     /* Handle TRUE and NEVER as direct branches.  */
1822     if (c == TCG_COND_ALWAYS) {
1823         return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1824     }
1825     if (c == TCG_COND_NEVER) {
1826         return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1827     }
1828 
1829     taken = gen_new_label();
1830     tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
1831     cond_free(cond);
1832 
1833     /* Not taken: Condition not satisfied; nullify on backward branches. */
1834     n = is_n && disp < 0;
1835     if (n && use_nullify_skip(ctx)) {
1836         nullify_set(ctx, 0);
1837         gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1838     } else {
1839         if (!n && ctx->null_lab) {
1840             gen_set_label(ctx->null_lab);
1841             ctx->null_lab = NULL;
1842         }
1843         nullify_set(ctx, n);
1844         if (ctx->iaoq_n == -1) {
1845             /* The temporary iaoq_n_var died at the branch above.
1846                Regenerate it here instead of saving it.  */
1847             tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1848         }
1849         gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1850     }
1851 
1852     gen_set_label(taken);
1853 
1854     /* Taken: Condition satisfied; nullify on forward branches.  */
1855     n = is_n && disp >= 0;
1856     if (n && use_nullify_skip(ctx)) {
1857         nullify_set(ctx, 0);
1858         gen_goto_tb(ctx, 1, dest, dest + 4);
1859     } else {
1860         nullify_set(ctx, n);
1861         gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1862     }
1863 
1864     /* Not taken: the branch itself was nullified.  */
1865     if (ctx->null_lab) {
1866         gen_set_label(ctx->null_lab);
1867         ctx->null_lab = NULL;
1868         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1869     } else {
1870         ctx->base.is_jmp = DISAS_NORETURN;
1871     }
1872     return true;
1873 }
1874 
1875 /* Emit an unconditional branch to an indirect target.  This handles
1876    nullification of the branch itself.  */
1877 static bool do_ibranch(DisasContext *ctx, TCGv_reg dest,
1878                        unsigned link, bool is_n)
1879 {
1880     TCGv_reg a0, a1, next, tmp;
1881     TCGCond c;
1882 
1883     assert(ctx->null_lab == NULL);
1884 
1885     if (ctx->null_cond.c == TCG_COND_NEVER) {
1886         if (link != 0) {
1887             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1888         }
1889         next = get_temp(ctx);
1890         tcg_gen_mov_reg(next, dest);
1891         if (is_n) {
1892             if (use_nullify_skip(ctx)) {
1893                 tcg_gen_mov_reg(cpu_iaoq_f, next);
1894                 tcg_gen_addi_reg(cpu_iaoq_b, next, 4);
1895                 nullify_set(ctx, 0);
1896                 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1897                 return true;
1898             }
1899             ctx->null_cond.c = TCG_COND_ALWAYS;
1900         }
1901         ctx->iaoq_n = -1;
1902         ctx->iaoq_n_var = next;
1903     } else if (is_n && use_nullify_skip(ctx)) {
1904         /* The (conditional) branch, B, nullifies the next insn, N,
1905            and we're allowed to skip execution N (no single-step or
1906            tracepoint in effect).  Since the goto_ptr that we must use
1907            for the indirect branch consumes no special resources, we
1908            can (conditionally) skip B and continue execution.  */
1909         /* The use_nullify_skip test implies we have a known control path.  */
1910         tcg_debug_assert(ctx->iaoq_b != -1);
1911         tcg_debug_assert(ctx->iaoq_n != -1);
1912 
1913         /* We do have to handle the non-local temporary, DEST, before
1914            branching.  Since IOAQ_F is not really live at this point, we
1915            can simply store DEST optimistically.  Similarly with IAOQ_B.  */
1916         tcg_gen_mov_reg(cpu_iaoq_f, dest);
1917         tcg_gen_addi_reg(cpu_iaoq_b, dest, 4);
1918 
1919         nullify_over(ctx);
1920         if (link != 0) {
1921             tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n);
1922         }
1923         tcg_gen_lookup_and_goto_ptr();
1924         return nullify_end(ctx);
1925     } else {
1926         c = ctx->null_cond.c;
1927         a0 = ctx->null_cond.a0;
1928         a1 = ctx->null_cond.a1;
1929 
1930         tmp = tcg_temp_new();
1931         next = get_temp(ctx);
1932 
1933         copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1934         tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
1935         ctx->iaoq_n = -1;
1936         ctx->iaoq_n_var = next;
1937 
1938         if (link != 0) {
1939             tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1940         }
1941 
1942         if (is_n) {
1943             /* The branch nullifies the next insn, which means the state of N
1944                after the branch is the inverse of the state of N that applied
1945                to the branch.  */
1946             tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1947             cond_free(&ctx->null_cond);
1948             ctx->null_cond = cond_make_n();
1949             ctx->psw_n_nonzero = true;
1950         } else {
1951             cond_free(&ctx->null_cond);
1952         }
1953     }
1954     return true;
1955 }
1956 
1957 /* Implement
1958  *    if (IAOQ_Front{30..31} < GR[b]{30..31})
1959  *      IAOQ_Next{30..31} ← GR[b]{30..31};
1960  *    else
1961  *      IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1962  * which keeps the privilege level from being increased.
1963  */
1964 static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
1965 {
1966     TCGv_reg dest;
1967     switch (ctx->privilege) {
1968     case 0:
1969         /* Privilege 0 is maximum and is allowed to decrease.  */
1970         return offset;
1971     case 3:
1972         /* Privilege 3 is minimum and is never allowed to increase.  */
1973         dest = get_temp(ctx);
1974         tcg_gen_ori_reg(dest, offset, 3);
1975         break;
1976     default:
1977         dest = get_temp(ctx);
1978         tcg_gen_andi_reg(dest, offset, -4);
1979         tcg_gen_ori_reg(dest, dest, ctx->privilege);
1980         tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
1981         break;
1982     }
1983     return dest;
1984 }
1985 
1986 #ifdef CONFIG_USER_ONLY
1987 /* On Linux, page zero is normally marked execute only + gateway.
1988    Therefore normal read or write is supposed to fail, but specific
1989    offsets have kernel code mapped to raise permissions to implement
1990    system calls.  Handling this via an explicit check here, rather
1991    in than the "be disp(sr2,r0)" instruction that probably sent us
1992    here, is the easiest way to handle the branch delay slot on the
1993    aforementioned BE.  */
1994 static void do_page_zero(DisasContext *ctx)
1995 {
1996     /* If by some means we get here with PSW[N]=1, that implies that
1997        the B,GATE instruction would be skipped, and we'd fault on the
1998        next insn within the privilaged page.  */
1999     switch (ctx->null_cond.c) {
2000     case TCG_COND_NEVER:
2001         break;
2002     case TCG_COND_ALWAYS:
2003         tcg_gen_movi_reg(cpu_psw_n, 0);
2004         goto do_sigill;
2005     default:
2006         /* Since this is always the first (and only) insn within the
2007            TB, we should know the state of PSW[N] from TB->FLAGS.  */
2008         g_assert_not_reached();
2009     }
2010 
2011     /* Check that we didn't arrive here via some means that allowed
2012        non-sequential instruction execution.  Normally the PSW[B] bit
2013        detects this by disallowing the B,GATE instruction to execute
2014        under such conditions.  */
2015     if (ctx->iaoq_b != ctx->iaoq_f + 4) {
2016         goto do_sigill;
2017     }
2018 
2019     switch (ctx->iaoq_f & -4) {
2020     case 0x00: /* Null pointer call */
2021         gen_excp_1(EXCP_IMP);
2022         ctx->base.is_jmp = DISAS_NORETURN;
2023         break;
2024 
2025     case 0xb0: /* LWS */
2026         gen_excp_1(EXCP_SYSCALL_LWS);
2027         ctx->base.is_jmp = DISAS_NORETURN;
2028         break;
2029 
2030     case 0xe0: /* SET_THREAD_POINTER */
2031         tcg_gen_st_reg(cpu_gr[26], cpu_env, offsetof(CPUHPPAState, cr[27]));
2032         tcg_gen_ori_reg(cpu_iaoq_f, cpu_gr[31], 3);
2033         tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
2034         ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
2035         break;
2036 
2037     case 0x100: /* SYSCALL */
2038         gen_excp_1(EXCP_SYSCALL);
2039         ctx->base.is_jmp = DISAS_NORETURN;
2040         break;
2041 
2042     default:
2043     do_sigill:
2044         gen_excp_1(EXCP_ILL);
2045         ctx->base.is_jmp = DISAS_NORETURN;
2046         break;
2047     }
2048 }
2049 #endif
2050 
2051 static bool trans_nop(DisasContext *ctx, arg_nop *a)
2052 {
2053     cond_free(&ctx->null_cond);
2054     return true;
2055 }
2056 
2057 static bool trans_break(DisasContext *ctx, arg_break *a)
2058 {
2059     return gen_excp_iir(ctx, EXCP_BREAK);
2060 }
2061 
2062 static bool trans_sync(DisasContext *ctx, arg_sync *a)
2063 {
2064     /* No point in nullifying the memory barrier.  */
2065     tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2066 
2067     cond_free(&ctx->null_cond);
2068     return true;
2069 }
2070 
2071 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
2072 {
2073     unsigned rt = a->t;
2074     TCGv_reg tmp = dest_gpr(ctx, rt);
2075     tcg_gen_movi_reg(tmp, ctx->iaoq_f);
2076     save_gpr(ctx, rt, tmp);
2077 
2078     cond_free(&ctx->null_cond);
2079     return true;
2080 }
2081 
2082 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
2083 {
2084     unsigned rt = a->t;
2085     unsigned rs = a->sp;
2086     TCGv_i64 t0 = tcg_temp_new_i64();
2087     TCGv_reg t1 = tcg_temp_new();
2088 
2089     load_spr(ctx, t0, rs);
2090     tcg_gen_shri_i64(t0, t0, 32);
2091     tcg_gen_trunc_i64_reg(t1, t0);
2092 
2093     save_gpr(ctx, rt, t1);
2094     tcg_temp_free(t1);
2095     tcg_temp_free_i64(t0);
2096 
2097     cond_free(&ctx->null_cond);
2098     return true;
2099 }
2100 
2101 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
2102 {
2103     unsigned rt = a->t;
2104     unsigned ctl = a->r;
2105     TCGv_reg tmp;
2106 
2107     switch (ctl) {
2108     case CR_SAR:
2109 #ifdef TARGET_HPPA64
2110         if (a->e == 0) {
2111             /* MFSAR without ,W masks low 5 bits.  */
2112             tmp = dest_gpr(ctx, rt);
2113             tcg_gen_andi_reg(tmp, cpu_sar, 31);
2114             save_gpr(ctx, rt, tmp);
2115             goto done;
2116         }
2117 #endif
2118         save_gpr(ctx, rt, cpu_sar);
2119         goto done;
2120     case CR_IT: /* Interval Timer */
2121         /* FIXME: Respect PSW_S bit.  */
2122         nullify_over(ctx);
2123         tmp = dest_gpr(ctx, rt);
2124         if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
2125             gen_io_start();
2126             gen_helper_read_interval_timer(tmp);
2127             ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2128         } else {
2129             gen_helper_read_interval_timer(tmp);
2130         }
2131         save_gpr(ctx, rt, tmp);
2132         return nullify_end(ctx);
2133     case 26:
2134     case 27:
2135         break;
2136     default:
2137         /* All other control registers are privileged.  */
2138         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2139         break;
2140     }
2141 
2142     tmp = get_temp(ctx);
2143     tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2144     save_gpr(ctx, rt, tmp);
2145 
2146  done:
2147     cond_free(&ctx->null_cond);
2148     return true;
2149 }
2150 
2151 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2152 {
2153     unsigned rr = a->r;
2154     unsigned rs = a->sp;
2155     TCGv_i64 t64;
2156 
2157     if (rs >= 5) {
2158         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2159     }
2160     nullify_over(ctx);
2161 
2162     t64 = tcg_temp_new_i64();
2163     tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
2164     tcg_gen_shli_i64(t64, t64, 32);
2165 
2166     if (rs >= 4) {
2167         tcg_gen_st_i64(t64, cpu_env, offsetof(CPUHPPAState, sr[rs]));
2168         ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2169     } else {
2170         tcg_gen_mov_i64(cpu_sr[rs], t64);
2171     }
2172     tcg_temp_free_i64(t64);
2173 
2174     return nullify_end(ctx);
2175 }
2176 
2177 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2178 {
2179     unsigned ctl = a->t;
2180     TCGv_reg reg;
2181     TCGv_reg tmp;
2182 
2183     if (ctl == CR_SAR) {
2184         reg = load_gpr(ctx, a->r);
2185         tmp = tcg_temp_new();
2186         tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1);
2187         save_or_nullify(ctx, cpu_sar, tmp);
2188         tcg_temp_free(tmp);
2189 
2190         cond_free(&ctx->null_cond);
2191         return true;
2192     }
2193 
2194     /* All other control registers are privileged or read-only.  */
2195     CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2196 
2197 #ifndef CONFIG_USER_ONLY
2198     nullify_over(ctx);
2199     reg = load_gpr(ctx, a->r);
2200 
2201     switch (ctl) {
2202     case CR_IT:
2203         gen_helper_write_interval_timer(cpu_env, reg);
2204         break;
2205     case CR_EIRR:
2206         gen_helper_write_eirr(cpu_env, reg);
2207         break;
2208     case CR_EIEM:
2209         gen_helper_write_eiem(cpu_env, reg);
2210         ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2211         break;
2212 
2213     case CR_IIASQ:
2214     case CR_IIAOQ:
2215         /* FIXME: Respect PSW_Q bit */
2216         /* The write advances the queue and stores to the back element.  */
2217         tmp = get_temp(ctx);
2218         tcg_gen_ld_reg(tmp, cpu_env,
2219                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2220         tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2221         tcg_gen_st_reg(reg, cpu_env,
2222                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2223         break;
2224 
2225     case CR_PID1:
2226     case CR_PID2:
2227     case CR_PID3:
2228     case CR_PID4:
2229         tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2230 #ifndef CONFIG_USER_ONLY
2231         gen_helper_change_prot_id(cpu_env);
2232 #endif
2233         break;
2234 
2235     default:
2236         tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2237         break;
2238     }
2239     return nullify_end(ctx);
2240 #endif
2241 }
2242 
2243 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2244 {
2245     TCGv_reg tmp = tcg_temp_new();
2246 
2247     tcg_gen_not_reg(tmp, load_gpr(ctx, a->r));
2248     tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
2249     save_or_nullify(ctx, cpu_sar, tmp);
2250     tcg_temp_free(tmp);
2251 
2252     cond_free(&ctx->null_cond);
2253     return true;
2254 }
2255 
2256 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2257 {
2258     TCGv_reg dest = dest_gpr(ctx, a->t);
2259 
2260 #ifdef CONFIG_USER_ONLY
2261     /* We don't implement space registers in user mode. */
2262     tcg_gen_movi_reg(dest, 0);
2263 #else
2264     TCGv_i64 t0 = tcg_temp_new_i64();
2265 
2266     tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2267     tcg_gen_shri_i64(t0, t0, 32);
2268     tcg_gen_trunc_i64_reg(dest, t0);
2269 
2270     tcg_temp_free_i64(t0);
2271 #endif
2272     save_gpr(ctx, a->t, dest);
2273 
2274     cond_free(&ctx->null_cond);
2275     return true;
2276 }
2277 
2278 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2279 {
2280     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2281 #ifndef CONFIG_USER_ONLY
2282     TCGv_reg tmp;
2283 
2284     nullify_over(ctx);
2285 
2286     tmp = get_temp(ctx);
2287     tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
2288     tcg_gen_andi_reg(tmp, tmp, ~a->i);
2289     gen_helper_swap_system_mask(tmp, cpu_env, tmp);
2290     save_gpr(ctx, a->t, tmp);
2291 
2292     /* Exit the TB to recognize new interrupts, e.g. PSW_M.  */
2293     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2294     return nullify_end(ctx);
2295 #endif
2296 }
2297 
2298 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2299 {
2300     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2301 #ifndef CONFIG_USER_ONLY
2302     TCGv_reg tmp;
2303 
2304     nullify_over(ctx);
2305 
2306     tmp = get_temp(ctx);
2307     tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
2308     tcg_gen_ori_reg(tmp, tmp, a->i);
2309     gen_helper_swap_system_mask(tmp, cpu_env, tmp);
2310     save_gpr(ctx, a->t, tmp);
2311 
2312     /* Exit the TB to recognize new interrupts, e.g. PSW_I.  */
2313     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2314     return nullify_end(ctx);
2315 #endif
2316 }
2317 
2318 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2319 {
2320     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2321 #ifndef CONFIG_USER_ONLY
2322     TCGv_reg tmp, reg;
2323     nullify_over(ctx);
2324 
2325     reg = load_gpr(ctx, a->r);
2326     tmp = get_temp(ctx);
2327     gen_helper_swap_system_mask(tmp, cpu_env, reg);
2328 
2329     /* Exit the TB to recognize new interrupts.  */
2330     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2331     return nullify_end(ctx);
2332 #endif
2333 }
2334 
2335 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2336 {
2337     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2338 #ifndef CONFIG_USER_ONLY
2339     nullify_over(ctx);
2340 
2341     if (rfi_r) {
2342         gen_helper_rfi_r(cpu_env);
2343     } else {
2344         gen_helper_rfi(cpu_env);
2345     }
2346     /* Exit the TB to recognize new interrupts.  */
2347     tcg_gen_exit_tb(NULL, 0);
2348     ctx->base.is_jmp = DISAS_NORETURN;
2349 
2350     return nullify_end(ctx);
2351 #endif
2352 }
2353 
2354 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2355 {
2356     return do_rfi(ctx, false);
2357 }
2358 
2359 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2360 {
2361     return do_rfi(ctx, true);
2362 }
2363 
2364 static bool trans_halt(DisasContext *ctx, arg_halt *a)
2365 {
2366     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2367 #ifndef CONFIG_USER_ONLY
2368     nullify_over(ctx);
2369     gen_helper_halt(cpu_env);
2370     ctx->base.is_jmp = DISAS_NORETURN;
2371     return nullify_end(ctx);
2372 #endif
2373 }
2374 
2375 static bool trans_reset(DisasContext *ctx, arg_reset *a)
2376 {
2377     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2378 #ifndef CONFIG_USER_ONLY
2379     nullify_over(ctx);
2380     gen_helper_reset(cpu_env);
2381     ctx->base.is_jmp = DISAS_NORETURN;
2382     return nullify_end(ctx);
2383 #endif
2384 }
2385 
2386 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2387 {
2388     if (a->m) {
2389         TCGv_reg dest = dest_gpr(ctx, a->b);
2390         TCGv_reg src1 = load_gpr(ctx, a->b);
2391         TCGv_reg src2 = load_gpr(ctx, a->x);
2392 
2393         /* The only thing we need to do is the base register modification.  */
2394         tcg_gen_add_reg(dest, src1, src2);
2395         save_gpr(ctx, a->b, dest);
2396     }
2397     cond_free(&ctx->null_cond);
2398     return true;
2399 }
2400 
2401 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2402 {
2403     TCGv_reg dest, ofs;
2404     TCGv_i32 level, want;
2405     TCGv_tl addr;
2406 
2407     nullify_over(ctx);
2408 
2409     dest = dest_gpr(ctx, a->t);
2410     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2411 
2412     if (a->imm) {
2413         level = tcg_constant_i32(a->ri);
2414     } else {
2415         level = tcg_temp_new_i32();
2416         tcg_gen_trunc_reg_i32(level, load_gpr(ctx, a->ri));
2417         tcg_gen_andi_i32(level, level, 3);
2418     }
2419     want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
2420 
2421     gen_helper_probe(dest, cpu_env, addr, level, want);
2422 
2423     tcg_temp_free_i32(level);
2424 
2425     save_gpr(ctx, a->t, dest);
2426     return nullify_end(ctx);
2427 }
2428 
2429 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2430 {
2431     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2432 #ifndef CONFIG_USER_ONLY
2433     TCGv_tl addr;
2434     TCGv_reg ofs, reg;
2435 
2436     nullify_over(ctx);
2437 
2438     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2439     reg = load_gpr(ctx, a->r);
2440     if (a->addr) {
2441         gen_helper_itlba(cpu_env, addr, reg);
2442     } else {
2443         gen_helper_itlbp(cpu_env, addr, reg);
2444     }
2445 
2446     /* Exit TB for TLB change if mmu is enabled.  */
2447     if (ctx->tb_flags & PSW_C) {
2448         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2449     }
2450     return nullify_end(ctx);
2451 #endif
2452 }
2453 
2454 static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
2455 {
2456     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2457 #ifndef CONFIG_USER_ONLY
2458     TCGv_tl addr;
2459     TCGv_reg ofs;
2460 
2461     nullify_over(ctx);
2462 
2463     form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2464     if (a->m) {
2465         save_gpr(ctx, a->b, ofs);
2466     }
2467     if (a->local) {
2468         gen_helper_ptlbe(cpu_env);
2469     } else {
2470         gen_helper_ptlb(cpu_env, addr);
2471     }
2472 
2473     /* Exit TB for TLB change if mmu is enabled.  */
2474     if (ctx->tb_flags & PSW_C) {
2475         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2476     }
2477     return nullify_end(ctx);
2478 #endif
2479 }
2480 
2481 /*
2482  * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2483  * See
2484  *     https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2485  *     page 13-9 (195/206)
2486  */
2487 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2488 {
2489     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2490 #ifndef CONFIG_USER_ONLY
2491     TCGv_tl addr, atl, stl;
2492     TCGv_reg reg;
2493 
2494     nullify_over(ctx);
2495 
2496     /*
2497      * FIXME:
2498      *  if (not (pcxl or pcxl2))
2499      *    return gen_illegal(ctx);
2500      *
2501      * Note for future: these are 32-bit systems; no hppa64.
2502      */
2503 
2504     atl = tcg_temp_new_tl();
2505     stl = tcg_temp_new_tl();
2506     addr = tcg_temp_new_tl();
2507 
2508     tcg_gen_ld32u_i64(stl, cpu_env,
2509                       a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2510                       : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2511     tcg_gen_ld32u_i64(atl, cpu_env,
2512                       a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2513                       : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2514     tcg_gen_shli_i64(stl, stl, 32);
2515     tcg_gen_or_tl(addr, atl, stl);
2516     tcg_temp_free_tl(atl);
2517     tcg_temp_free_tl(stl);
2518 
2519     reg = load_gpr(ctx, a->r);
2520     if (a->addr) {
2521         gen_helper_itlba(cpu_env, addr, reg);
2522     } else {
2523         gen_helper_itlbp(cpu_env, addr, reg);
2524     }
2525     tcg_temp_free_tl(addr);
2526 
2527     /* Exit TB for TLB change if mmu is enabled.  */
2528     if (ctx->tb_flags & PSW_C) {
2529         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2530     }
2531     return nullify_end(ctx);
2532 #endif
2533 }
2534 
2535 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2536 {
2537     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2538 #ifndef CONFIG_USER_ONLY
2539     TCGv_tl vaddr;
2540     TCGv_reg ofs, paddr;
2541 
2542     nullify_over(ctx);
2543 
2544     form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2545 
2546     paddr = tcg_temp_new();
2547     gen_helper_lpa(paddr, cpu_env, vaddr);
2548 
2549     /* Note that physical address result overrides base modification.  */
2550     if (a->m) {
2551         save_gpr(ctx, a->b, ofs);
2552     }
2553     save_gpr(ctx, a->t, paddr);
2554     tcg_temp_free(paddr);
2555 
2556     return nullify_end(ctx);
2557 #endif
2558 }
2559 
2560 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2561 {
2562     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2563 
2564     /* The Coherence Index is an implementation-defined function of the
2565        physical address.  Two addresses with the same CI have a coherent
2566        view of the cache.  Our implementation is to return 0 for all,
2567        since the entire address space is coherent.  */
2568     save_gpr(ctx, a->t, tcg_constant_reg(0));
2569 
2570     cond_free(&ctx->null_cond);
2571     return true;
2572 }
2573 
2574 static bool trans_add(DisasContext *ctx, arg_rrr_cf_sh *a)
2575 {
2576     return do_add_reg(ctx, a, false, false, false, false);
2577 }
2578 
2579 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_sh *a)
2580 {
2581     return do_add_reg(ctx, a, true, false, false, false);
2582 }
2583 
2584 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2585 {
2586     return do_add_reg(ctx, a, false, true, false, false);
2587 }
2588 
2589 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_sh *a)
2590 {
2591     return do_add_reg(ctx, a, false, false, false, true);
2592 }
2593 
2594 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2595 {
2596     return do_add_reg(ctx, a, false, true, false, true);
2597 }
2598 
2599 static bool trans_sub(DisasContext *ctx, arg_rrr_cf *a)
2600 {
2601     return do_sub_reg(ctx, a, false, false, false);
2602 }
2603 
2604 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf *a)
2605 {
2606     return do_sub_reg(ctx, a, true, false, false);
2607 }
2608 
2609 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf *a)
2610 {
2611     return do_sub_reg(ctx, a, false, false, true);
2612 }
2613 
2614 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf *a)
2615 {
2616     return do_sub_reg(ctx, a, true, false, true);
2617 }
2618 
2619 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf *a)
2620 {
2621     return do_sub_reg(ctx, a, false, true, false);
2622 }
2623 
2624 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf *a)
2625 {
2626     return do_sub_reg(ctx, a, true, true, false);
2627 }
2628 
2629 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf *a)
2630 {
2631     return do_log_reg(ctx, a, tcg_gen_andc_reg);
2632 }
2633 
2634 static bool trans_and(DisasContext *ctx, arg_rrr_cf *a)
2635 {
2636     return do_log_reg(ctx, a, tcg_gen_and_reg);
2637 }
2638 
2639 static bool trans_or(DisasContext *ctx, arg_rrr_cf *a)
2640 {
2641     if (a->cf == 0) {
2642         unsigned r2 = a->r2;
2643         unsigned r1 = a->r1;
2644         unsigned rt = a->t;
2645 
2646         if (rt == 0) { /* NOP */
2647             cond_free(&ctx->null_cond);
2648             return true;
2649         }
2650         if (r2 == 0) { /* COPY */
2651             if (r1 == 0) {
2652                 TCGv_reg dest = dest_gpr(ctx, rt);
2653                 tcg_gen_movi_reg(dest, 0);
2654                 save_gpr(ctx, rt, dest);
2655             } else {
2656                 save_gpr(ctx, rt, cpu_gr[r1]);
2657             }
2658             cond_free(&ctx->null_cond);
2659             return true;
2660         }
2661 #ifndef CONFIG_USER_ONLY
2662         /* These are QEMU extensions and are nops in the real architecture:
2663          *
2664          * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2665          * or %r31,%r31,%r31 -- death loop; offline cpu
2666          *                      currently implemented as idle.
2667          */
2668         if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2669             /* No need to check for supervisor, as userland can only pause
2670                until the next timer interrupt.  */
2671             nullify_over(ctx);
2672 
2673             /* Advance the instruction queue.  */
2674             copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2675             copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2676             nullify_set(ctx, 0);
2677 
2678             /* Tell the qemu main loop to halt until this cpu has work.  */
2679             tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
2680                            offsetof(CPUState, halted) - offsetof(HPPACPU, env));
2681             gen_excp_1(EXCP_HALTED);
2682             ctx->base.is_jmp = DISAS_NORETURN;
2683 
2684             return nullify_end(ctx);
2685         }
2686 #endif
2687     }
2688     return do_log_reg(ctx, a, tcg_gen_or_reg);
2689 }
2690 
2691 static bool trans_xor(DisasContext *ctx, arg_rrr_cf *a)
2692 {
2693     return do_log_reg(ctx, a, tcg_gen_xor_reg);
2694 }
2695 
2696 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf *a)
2697 {
2698     TCGv_reg tcg_r1, tcg_r2;
2699 
2700     if (a->cf) {
2701         nullify_over(ctx);
2702     }
2703     tcg_r1 = load_gpr(ctx, a->r1);
2704     tcg_r2 = load_gpr(ctx, a->r2);
2705     do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf);
2706     return nullify_end(ctx);
2707 }
2708 
2709 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf *a)
2710 {
2711     TCGv_reg tcg_r1, tcg_r2;
2712 
2713     if (a->cf) {
2714         nullify_over(ctx);
2715     }
2716     tcg_r1 = load_gpr(ctx, a->r1);
2717     tcg_r2 = load_gpr(ctx, a->r2);
2718     do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, false, tcg_gen_xor_reg);
2719     return nullify_end(ctx);
2720 }
2721 
2722 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf *a, bool is_tc)
2723 {
2724     TCGv_reg tcg_r1, tcg_r2, tmp;
2725 
2726     if (a->cf) {
2727         nullify_over(ctx);
2728     }
2729     tcg_r1 = load_gpr(ctx, a->r1);
2730     tcg_r2 = load_gpr(ctx, a->r2);
2731     tmp = get_temp(ctx);
2732     tcg_gen_not_reg(tmp, tcg_r2);
2733     do_unit(ctx, a->t, tcg_r1, tmp, a->cf, is_tc, tcg_gen_add_reg);
2734     return nullify_end(ctx);
2735 }
2736 
2737 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf *a)
2738 {
2739     return do_uaddcm(ctx, a, false);
2740 }
2741 
2742 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf *a)
2743 {
2744     return do_uaddcm(ctx, a, true);
2745 }
2746 
2747 static bool do_dcor(DisasContext *ctx, arg_rr_cf *a, bool is_i)
2748 {
2749     TCGv_reg tmp;
2750 
2751     nullify_over(ctx);
2752 
2753     tmp = get_temp(ctx);
2754     tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
2755     if (!is_i) {
2756         tcg_gen_not_reg(tmp, tmp);
2757     }
2758     tcg_gen_andi_reg(tmp, tmp, 0x11111111);
2759     tcg_gen_muli_reg(tmp, tmp, 6);
2760     do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, false,
2761             is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
2762     return nullify_end(ctx);
2763 }
2764 
2765 static bool trans_dcor(DisasContext *ctx, arg_rr_cf *a)
2766 {
2767     return do_dcor(ctx, a, false);
2768 }
2769 
2770 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf *a)
2771 {
2772     return do_dcor(ctx, a, true);
2773 }
2774 
2775 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2776 {
2777     TCGv_reg dest, add1, add2, addc, zero, in1, in2;
2778 
2779     nullify_over(ctx);
2780 
2781     in1 = load_gpr(ctx, a->r1);
2782     in2 = load_gpr(ctx, a->r2);
2783 
2784     add1 = tcg_temp_new();
2785     add2 = tcg_temp_new();
2786     addc = tcg_temp_new();
2787     dest = tcg_temp_new();
2788     zero = tcg_constant_reg(0);
2789 
2790     /* Form R1 << 1 | PSW[CB]{8}.  */
2791     tcg_gen_add_reg(add1, in1, in1);
2792     tcg_gen_add_reg(add1, add1, cpu_psw_cb_msb);
2793 
2794     /* Add or subtract R2, depending on PSW[V].  Proper computation of
2795        carry{8} requires that we subtract via + ~R2 + 1, as described in
2796        the manual.  By extracting and masking V, we can produce the
2797        proper inputs to the addition without movcond.  */
2798     tcg_gen_sari_reg(addc, cpu_psw_v, TARGET_REGISTER_BITS - 1);
2799     tcg_gen_xor_reg(add2, in2, addc);
2800     tcg_gen_andi_reg(addc, addc, 1);
2801     /* ??? This is only correct for 32-bit.  */
2802     tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2803     tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2804 
2805     tcg_temp_free(addc);
2806 
2807     /* Write back the result register.  */
2808     save_gpr(ctx, a->t, dest);
2809 
2810     /* Write back PSW[CB].  */
2811     tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
2812     tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
2813 
2814     /* Write back PSW[V] for the division step.  */
2815     tcg_gen_neg_reg(cpu_psw_v, cpu_psw_cb_msb);
2816     tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
2817 
2818     /* Install the new nullification.  */
2819     if (a->cf) {
2820         TCGv_reg sv = NULL;
2821         if (cond_need_sv(a->cf >> 1)) {
2822             /* ??? The lshift is supposed to contribute to overflow.  */
2823             sv = do_add_sv(ctx, dest, add1, add2);
2824         }
2825         ctx->null_cond = do_cond(a->cf, dest, cpu_psw_cb_msb, sv);
2826     }
2827 
2828     tcg_temp_free(add1);
2829     tcg_temp_free(add2);
2830     tcg_temp_free(dest);
2831 
2832     return nullify_end(ctx);
2833 }
2834 
2835 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
2836 {
2837     return do_add_imm(ctx, a, false, false);
2838 }
2839 
2840 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2841 {
2842     return do_add_imm(ctx, a, true, false);
2843 }
2844 
2845 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
2846 {
2847     return do_add_imm(ctx, a, false, true);
2848 }
2849 
2850 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2851 {
2852     return do_add_imm(ctx, a, true, true);
2853 }
2854 
2855 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2856 {
2857     return do_sub_imm(ctx, a, false);
2858 }
2859 
2860 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2861 {
2862     return do_sub_imm(ctx, a, true);
2863 }
2864 
2865 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf *a)
2866 {
2867     TCGv_reg tcg_im, tcg_r2;
2868 
2869     if (a->cf) {
2870         nullify_over(ctx);
2871     }
2872 
2873     tcg_im = load_const(ctx, a->i);
2874     tcg_r2 = load_gpr(ctx, a->r);
2875     do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf);
2876 
2877     return nullify_end(ctx);
2878 }
2879 
2880 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
2881 {
2882     return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
2883                    a->disp, a->sp, a->m, a->size | MO_TE);
2884 }
2885 
2886 static bool trans_st(DisasContext *ctx, arg_ldst *a)
2887 {
2888     assert(a->x == 0 && a->scale == 0);
2889     return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
2890 }
2891 
2892 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
2893 {
2894     MemOp mop = MO_TE | MO_ALIGN | a->size;
2895     TCGv_reg zero, dest, ofs;
2896     TCGv_tl addr;
2897 
2898     nullify_over(ctx);
2899 
2900     if (a->m) {
2901         /* Base register modification.  Make sure if RT == RB,
2902            we see the result of the load.  */
2903         dest = get_temp(ctx);
2904     } else {
2905         dest = dest_gpr(ctx, a->t);
2906     }
2907 
2908     form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
2909              a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
2910 
2911     /*
2912      * For hppa1.1, LDCW is undefined unless aligned mod 16.
2913      * However actual hardware succeeds with aligned mod 4.
2914      * Detect this case and log a GUEST_ERROR.
2915      *
2916      * TODO: HPPA64 relaxes the over-alignment requirement
2917      * with the ,co completer.
2918      */
2919     gen_helper_ldc_check(addr);
2920 
2921     zero = tcg_constant_reg(0);
2922     tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
2923 
2924     if (a->m) {
2925         save_gpr(ctx, a->b, ofs);
2926     }
2927     save_gpr(ctx, a->t, dest);
2928 
2929     return nullify_end(ctx);
2930 }
2931 
2932 static bool trans_stby(DisasContext *ctx, arg_stby *a)
2933 {
2934     TCGv_reg ofs, val;
2935     TCGv_tl addr;
2936 
2937     nullify_over(ctx);
2938 
2939     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
2940              ctx->mmu_idx == MMU_PHYS_IDX);
2941     val = load_gpr(ctx, a->r);
2942     if (a->a) {
2943         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2944             gen_helper_stby_e_parallel(cpu_env, addr, val);
2945         } else {
2946             gen_helper_stby_e(cpu_env, addr, val);
2947         }
2948     } else {
2949         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2950             gen_helper_stby_b_parallel(cpu_env, addr, val);
2951         } else {
2952             gen_helper_stby_b(cpu_env, addr, val);
2953         }
2954     }
2955     if (a->m) {
2956         tcg_gen_andi_reg(ofs, ofs, ~3);
2957         save_gpr(ctx, a->b, ofs);
2958     }
2959 
2960     return nullify_end(ctx);
2961 }
2962 
2963 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
2964 {
2965     int hold_mmu_idx = ctx->mmu_idx;
2966 
2967     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2968     ctx->mmu_idx = MMU_PHYS_IDX;
2969     trans_ld(ctx, a);
2970     ctx->mmu_idx = hold_mmu_idx;
2971     return true;
2972 }
2973 
2974 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
2975 {
2976     int hold_mmu_idx = ctx->mmu_idx;
2977 
2978     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2979     ctx->mmu_idx = MMU_PHYS_IDX;
2980     trans_st(ctx, a);
2981     ctx->mmu_idx = hold_mmu_idx;
2982     return true;
2983 }
2984 
2985 static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
2986 {
2987     TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
2988 
2989     tcg_gen_movi_reg(tcg_rt, a->i);
2990     save_gpr(ctx, a->t, tcg_rt);
2991     cond_free(&ctx->null_cond);
2992     return true;
2993 }
2994 
2995 static bool trans_addil(DisasContext *ctx, arg_addil *a)
2996 {
2997     TCGv_reg tcg_rt = load_gpr(ctx, a->r);
2998     TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
2999 
3000     tcg_gen_addi_reg(tcg_r1, tcg_rt, a->i);
3001     save_gpr(ctx, 1, tcg_r1);
3002     cond_free(&ctx->null_cond);
3003     return true;
3004 }
3005 
3006 static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
3007 {
3008     TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
3009 
3010     /* Special case rb == 0, for the LDI pseudo-op.
3011        The COPY pseudo-op is handled for free within tcg_gen_addi_tl.  */
3012     if (a->b == 0) {
3013         tcg_gen_movi_reg(tcg_rt, a->i);
3014     } else {
3015         tcg_gen_addi_reg(tcg_rt, cpu_gr[a->b], a->i);
3016     }
3017     save_gpr(ctx, a->t, tcg_rt);
3018     cond_free(&ctx->null_cond);
3019     return true;
3020 }
3021 
3022 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3023                     unsigned c, unsigned f, unsigned n, int disp)
3024 {
3025     TCGv_reg dest, in2, sv;
3026     DisasCond cond;
3027 
3028     in2 = load_gpr(ctx, r);
3029     dest = get_temp(ctx);
3030 
3031     tcg_gen_sub_reg(dest, in1, in2);
3032 
3033     sv = NULL;
3034     if (cond_need_sv(c)) {
3035         sv = do_sub_sv(ctx, dest, in1, in2);
3036     }
3037 
3038     cond = do_sub_cond(c * 2 + f, dest, in1, in2, sv);
3039     return do_cbranch(ctx, disp, n, &cond);
3040 }
3041 
3042 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
3043 {
3044     nullify_over(ctx);
3045     return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3046 }
3047 
3048 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3049 {
3050     nullify_over(ctx);
3051     return do_cmpb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
3052 }
3053 
3054 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3055                     unsigned c, unsigned f, unsigned n, int disp)
3056 {
3057     TCGv_reg dest, in2, sv, cb_msb;
3058     DisasCond cond;
3059 
3060     in2 = load_gpr(ctx, r);
3061     dest = tcg_temp_new();
3062     sv = NULL;
3063     cb_msb = NULL;
3064 
3065     if (cond_need_cb(c)) {
3066         cb_msb = get_temp(ctx);
3067         tcg_gen_movi_reg(cb_msb, 0);
3068         tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3069     } else {
3070         tcg_gen_add_reg(dest, in1, in2);
3071     }
3072     if (cond_need_sv(c)) {
3073         sv = do_add_sv(ctx, dest, in1, in2);
3074     }
3075 
3076     cond = do_cond(c * 2 + f, dest, cb_msb, sv);
3077     save_gpr(ctx, r, dest);
3078     tcg_temp_free(dest);
3079     return do_cbranch(ctx, disp, n, &cond);
3080 }
3081 
3082 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3083 {
3084     nullify_over(ctx);
3085     return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3086 }
3087 
3088 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3089 {
3090     nullify_over(ctx);
3091     return do_addb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
3092 }
3093 
3094 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3095 {
3096     TCGv_reg tmp, tcg_r;
3097     DisasCond cond;
3098 
3099     nullify_over(ctx);
3100 
3101     tmp = tcg_temp_new();
3102     tcg_r = load_gpr(ctx, a->r);
3103     tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
3104 
3105     cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3106     tcg_temp_free(tmp);
3107     return do_cbranch(ctx, a->disp, a->n, &cond);
3108 }
3109 
3110 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3111 {
3112     TCGv_reg tmp, tcg_r;
3113     DisasCond cond;
3114 
3115     nullify_over(ctx);
3116 
3117     tmp = tcg_temp_new();
3118     tcg_r = load_gpr(ctx, a->r);
3119     tcg_gen_shli_reg(tmp, tcg_r, a->p);
3120 
3121     cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3122     tcg_temp_free(tmp);
3123     return do_cbranch(ctx, a->disp, a->n, &cond);
3124 }
3125 
3126 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3127 {
3128     TCGv_reg dest;
3129     DisasCond cond;
3130 
3131     nullify_over(ctx);
3132 
3133     dest = dest_gpr(ctx, a->r2);
3134     if (a->r1 == 0) {
3135         tcg_gen_movi_reg(dest, 0);
3136     } else {
3137         tcg_gen_mov_reg(dest, cpu_gr[a->r1]);
3138     }
3139 
3140     cond = do_sed_cond(a->c, dest);
3141     return do_cbranch(ctx, a->disp, a->n, &cond);
3142 }
3143 
3144 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3145 {
3146     TCGv_reg dest;
3147     DisasCond cond;
3148 
3149     nullify_over(ctx);
3150 
3151     dest = dest_gpr(ctx, a->r);
3152     tcg_gen_movi_reg(dest, a->i);
3153 
3154     cond = do_sed_cond(a->c, dest);
3155     return do_cbranch(ctx, a->disp, a->n, &cond);
3156 }
3157 
3158 static bool trans_shrpw_sar(DisasContext *ctx, arg_shrpw_sar *a)
3159 {
3160     TCGv_reg dest;
3161 
3162     if (a->c) {
3163         nullify_over(ctx);
3164     }
3165 
3166     dest = dest_gpr(ctx, a->t);
3167     if (a->r1 == 0) {
3168         tcg_gen_ext32u_reg(dest, load_gpr(ctx, a->r2));
3169         tcg_gen_shr_reg(dest, dest, cpu_sar);
3170     } else if (a->r1 == a->r2) {
3171         TCGv_i32 t32 = tcg_temp_new_i32();
3172         tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, a->r2));
3173         tcg_gen_rotr_i32(t32, t32, cpu_sar);
3174         tcg_gen_extu_i32_reg(dest, t32);
3175         tcg_temp_free_i32(t32);
3176     } else {
3177         TCGv_i64 t = tcg_temp_new_i64();
3178         TCGv_i64 s = tcg_temp_new_i64();
3179 
3180         tcg_gen_concat_reg_i64(t, load_gpr(ctx, a->r2), load_gpr(ctx, a->r1));
3181         tcg_gen_extu_reg_i64(s, cpu_sar);
3182         tcg_gen_shr_i64(t, t, s);
3183         tcg_gen_trunc_i64_reg(dest, t);
3184 
3185         tcg_temp_free_i64(t);
3186         tcg_temp_free_i64(s);
3187     }
3188     save_gpr(ctx, a->t, dest);
3189 
3190     /* Install the new nullification.  */
3191     cond_free(&ctx->null_cond);
3192     if (a->c) {
3193         ctx->null_cond = do_sed_cond(a->c, dest);
3194     }
3195     return nullify_end(ctx);
3196 }
3197 
3198 static bool trans_shrpw_imm(DisasContext *ctx, arg_shrpw_imm *a)
3199 {
3200     unsigned sa = 31 - a->cpos;
3201     TCGv_reg dest, t2;
3202 
3203     if (a->c) {
3204         nullify_over(ctx);
3205     }
3206 
3207     dest = dest_gpr(ctx, a->t);
3208     t2 = load_gpr(ctx, a->r2);
3209     if (a->r1 == 0) {
3210         tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
3211     } else if (TARGET_REGISTER_BITS == 32) {
3212         tcg_gen_extract2_reg(dest, t2, cpu_gr[a->r1], sa);
3213     } else if (a->r1 == a->r2) {
3214         TCGv_i32 t32 = tcg_temp_new_i32();
3215         tcg_gen_trunc_reg_i32(t32, t2);
3216         tcg_gen_rotri_i32(t32, t32, sa);
3217         tcg_gen_extu_i32_reg(dest, t32);
3218         tcg_temp_free_i32(t32);
3219     } else {
3220         TCGv_i64 t64 = tcg_temp_new_i64();
3221         tcg_gen_concat_reg_i64(t64, t2, cpu_gr[a->r1]);
3222         tcg_gen_shri_i64(t64, t64, sa);
3223         tcg_gen_trunc_i64_reg(dest, t64);
3224         tcg_temp_free_i64(t64);
3225     }
3226     save_gpr(ctx, a->t, dest);
3227 
3228     /* Install the new nullification.  */
3229     cond_free(&ctx->null_cond);
3230     if (a->c) {
3231         ctx->null_cond = do_sed_cond(a->c, dest);
3232     }
3233     return nullify_end(ctx);
3234 }
3235 
3236 static bool trans_extrw_sar(DisasContext *ctx, arg_extrw_sar *a)
3237 {
3238     unsigned len = 32 - a->clen;
3239     TCGv_reg dest, src, tmp;
3240 
3241     if (a->c) {
3242         nullify_over(ctx);
3243     }
3244 
3245     dest = dest_gpr(ctx, a->t);
3246     src = load_gpr(ctx, a->r);
3247     tmp = tcg_temp_new();
3248 
3249     /* Recall that SAR is using big-endian bit numbering.  */
3250     tcg_gen_xori_reg(tmp, cpu_sar, TARGET_REGISTER_BITS - 1);
3251     if (a->se) {
3252         tcg_gen_sar_reg(dest, src, tmp);
3253         tcg_gen_sextract_reg(dest, dest, 0, len);
3254     } else {
3255         tcg_gen_shr_reg(dest, src, tmp);
3256         tcg_gen_extract_reg(dest, dest, 0, len);
3257     }
3258     tcg_temp_free(tmp);
3259     save_gpr(ctx, a->t, dest);
3260 
3261     /* Install the new nullification.  */
3262     cond_free(&ctx->null_cond);
3263     if (a->c) {
3264         ctx->null_cond = do_sed_cond(a->c, dest);
3265     }
3266     return nullify_end(ctx);
3267 }
3268 
3269 static bool trans_extrw_imm(DisasContext *ctx, arg_extrw_imm *a)
3270 {
3271     unsigned len = 32 - a->clen;
3272     unsigned cpos = 31 - a->pos;
3273     TCGv_reg dest, src;
3274 
3275     if (a->c) {
3276         nullify_over(ctx);
3277     }
3278 
3279     dest = dest_gpr(ctx, a->t);
3280     src = load_gpr(ctx, a->r);
3281     if (a->se) {
3282         tcg_gen_sextract_reg(dest, src, cpos, len);
3283     } else {
3284         tcg_gen_extract_reg(dest, src, cpos, len);
3285     }
3286     save_gpr(ctx, a->t, dest);
3287 
3288     /* Install the new nullification.  */
3289     cond_free(&ctx->null_cond);
3290     if (a->c) {
3291         ctx->null_cond = do_sed_cond(a->c, dest);
3292     }
3293     return nullify_end(ctx);
3294 }
3295 
3296 static bool trans_depwi_imm(DisasContext *ctx, arg_depwi_imm *a)
3297 {
3298     unsigned len = 32 - a->clen;
3299     target_sreg mask0, mask1;
3300     TCGv_reg dest;
3301 
3302     if (a->c) {
3303         nullify_over(ctx);
3304     }
3305     if (a->cpos + len > 32) {
3306         len = 32 - a->cpos;
3307     }
3308 
3309     dest = dest_gpr(ctx, a->t);
3310     mask0 = deposit64(0, a->cpos, len, a->i);
3311     mask1 = deposit64(-1, a->cpos, len, a->i);
3312 
3313     if (a->nz) {
3314         TCGv_reg src = load_gpr(ctx, a->t);
3315         if (mask1 != -1) {
3316             tcg_gen_andi_reg(dest, src, mask1);
3317             src = dest;
3318         }
3319         tcg_gen_ori_reg(dest, src, mask0);
3320     } else {
3321         tcg_gen_movi_reg(dest, mask0);
3322     }
3323     save_gpr(ctx, a->t, dest);
3324 
3325     /* Install the new nullification.  */
3326     cond_free(&ctx->null_cond);
3327     if (a->c) {
3328         ctx->null_cond = do_sed_cond(a->c, dest);
3329     }
3330     return nullify_end(ctx);
3331 }
3332 
3333 static bool trans_depw_imm(DisasContext *ctx, arg_depw_imm *a)
3334 {
3335     unsigned rs = a->nz ? a->t : 0;
3336     unsigned len = 32 - a->clen;
3337     TCGv_reg dest, val;
3338 
3339     if (a->c) {
3340         nullify_over(ctx);
3341     }
3342     if (a->cpos + len > 32) {
3343         len = 32 - a->cpos;
3344     }
3345 
3346     dest = dest_gpr(ctx, a->t);
3347     val = load_gpr(ctx, a->r);
3348     if (rs == 0) {
3349         tcg_gen_deposit_z_reg(dest, val, a->cpos, len);
3350     } else {
3351         tcg_gen_deposit_reg(dest, cpu_gr[rs], val, a->cpos, len);
3352     }
3353     save_gpr(ctx, a->t, dest);
3354 
3355     /* Install the new nullification.  */
3356     cond_free(&ctx->null_cond);
3357     if (a->c) {
3358         ctx->null_cond = do_sed_cond(a->c, dest);
3359     }
3360     return nullify_end(ctx);
3361 }
3362 
3363 static bool do_depw_sar(DisasContext *ctx, unsigned rt, unsigned c,
3364                         unsigned nz, unsigned clen, TCGv_reg val)
3365 {
3366     unsigned rs = nz ? rt : 0;
3367     unsigned len = 32 - clen;
3368     TCGv_reg mask, tmp, shift, dest;
3369     unsigned msb = 1U << (len - 1);
3370 
3371     dest = dest_gpr(ctx, rt);
3372     shift = tcg_temp_new();
3373     tmp = tcg_temp_new();
3374 
3375     /* Convert big-endian bit numbering in SAR to left-shift.  */
3376     tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1);
3377 
3378     mask = tcg_const_reg(msb + (msb - 1));
3379     tcg_gen_and_reg(tmp, val, mask);
3380     if (rs) {
3381         tcg_gen_shl_reg(mask, mask, shift);
3382         tcg_gen_shl_reg(tmp, tmp, shift);
3383         tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
3384         tcg_gen_or_reg(dest, dest, tmp);
3385     } else {
3386         tcg_gen_shl_reg(dest, tmp, shift);
3387     }
3388     tcg_temp_free(shift);
3389     tcg_temp_free(mask);
3390     tcg_temp_free(tmp);
3391     save_gpr(ctx, rt, dest);
3392 
3393     /* Install the new nullification.  */
3394     cond_free(&ctx->null_cond);
3395     if (c) {
3396         ctx->null_cond = do_sed_cond(c, dest);
3397     }
3398     return nullify_end(ctx);
3399 }
3400 
3401 static bool trans_depw_sar(DisasContext *ctx, arg_depw_sar *a)
3402 {
3403     if (a->c) {
3404         nullify_over(ctx);
3405     }
3406     return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_gpr(ctx, a->r));
3407 }
3408 
3409 static bool trans_depwi_sar(DisasContext *ctx, arg_depwi_sar *a)
3410 {
3411     if (a->c) {
3412         nullify_over(ctx);
3413     }
3414     return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_const(ctx, a->i));
3415 }
3416 
3417 static bool trans_be(DisasContext *ctx, arg_be *a)
3418 {
3419     TCGv_reg tmp;
3420 
3421 #ifdef CONFIG_USER_ONLY
3422     /* ??? It seems like there should be a good way of using
3423        "be disp(sr2, r0)", the canonical gateway entry mechanism
3424        to our advantage.  But that appears to be inconvenient to
3425        manage along side branch delay slots.  Therefore we handle
3426        entry into the gateway page via absolute address.  */
3427     /* Since we don't implement spaces, just branch.  Do notice the special
3428        case of "be disp(*,r0)" using a direct branch to disp, so that we can
3429        goto_tb to the TB containing the syscall.  */
3430     if (a->b == 0) {
3431         return do_dbranch(ctx, a->disp, a->l, a->n);
3432     }
3433 #else
3434     nullify_over(ctx);
3435 #endif
3436 
3437     tmp = get_temp(ctx);
3438     tcg_gen_addi_reg(tmp, load_gpr(ctx, a->b), a->disp);
3439     tmp = do_ibranch_priv(ctx, tmp);
3440 
3441 #ifdef CONFIG_USER_ONLY
3442     return do_ibranch(ctx, tmp, a->l, a->n);
3443 #else
3444     TCGv_i64 new_spc = tcg_temp_new_i64();
3445 
3446     load_spr(ctx, new_spc, a->sp);
3447     if (a->l) {
3448         copy_iaoq_entry(cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3449         tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3450     }
3451     if (a->n && use_nullify_skip(ctx)) {
3452         tcg_gen_mov_reg(cpu_iaoq_f, tmp);
3453         tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
3454         tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3455         tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3456     } else {
3457         copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3458         if (ctx->iaoq_b == -1) {
3459             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3460         }
3461         tcg_gen_mov_reg(cpu_iaoq_b, tmp);
3462         tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3463         nullify_set(ctx, a->n);
3464     }
3465     tcg_temp_free_i64(new_spc);
3466     tcg_gen_lookup_and_goto_ptr();
3467     ctx->base.is_jmp = DISAS_NORETURN;
3468     return nullify_end(ctx);
3469 #endif
3470 }
3471 
3472 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3473 {
3474     return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
3475 }
3476 
3477 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3478 {
3479     target_ureg dest = iaoq_dest(ctx, a->disp);
3480 
3481     nullify_over(ctx);
3482 
3483     /* Make sure the caller hasn't done something weird with the queue.
3484      * ??? This is not quite the same as the PSW[B] bit, which would be
3485      * expensive to track.  Real hardware will trap for
3486      *    b  gateway
3487      *    b  gateway+4  (in delay slot of first branch)
3488      * However, checking for a non-sequential instruction queue *will*
3489      * diagnose the security hole
3490      *    b  gateway
3491      *    b  evil
3492      * in which instructions at evil would run with increased privs.
3493      */
3494     if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3495         return gen_illegal(ctx);
3496     }
3497 
3498 #ifndef CONFIG_USER_ONLY
3499     if (ctx->tb_flags & PSW_C) {
3500         CPUHPPAState *env = ctx->cs->env_ptr;
3501         int type = hppa_artype_for_page(env, ctx->base.pc_next);
3502         /* If we could not find a TLB entry, then we need to generate an
3503            ITLB miss exception so the kernel will provide it.
3504            The resulting TLB fill operation will invalidate this TB and
3505            we will re-translate, at which point we *will* be able to find
3506            the TLB entry and determine if this is in fact a gateway page.  */
3507         if (type < 0) {
3508             gen_excp(ctx, EXCP_ITLB_MISS);
3509             return true;
3510         }
3511         /* No change for non-gateway pages or for priv decrease.  */
3512         if (type >= 4 && type - 4 < ctx->privilege) {
3513             dest = deposit32(dest, 0, 2, type - 4);
3514         }
3515     } else {
3516         dest &= -4;  /* priv = 0 */
3517     }
3518 #endif
3519 
3520     if (a->l) {
3521         TCGv_reg tmp = dest_gpr(ctx, a->l);
3522         if (ctx->privilege < 3) {
3523             tcg_gen_andi_reg(tmp, tmp, -4);
3524         }
3525         tcg_gen_ori_reg(tmp, tmp, ctx->privilege);
3526         save_gpr(ctx, a->l, tmp);
3527     }
3528 
3529     return do_dbranch(ctx, dest, 0, a->n);
3530 }
3531 
3532 static bool trans_blr(DisasContext *ctx, arg_blr *a)
3533 {
3534     if (a->x) {
3535         TCGv_reg tmp = get_temp(ctx);
3536         tcg_gen_shli_reg(tmp, load_gpr(ctx, a->x), 3);
3537         tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
3538         /* The computation here never changes privilege level.  */
3539         return do_ibranch(ctx, tmp, a->l, a->n);
3540     } else {
3541         /* BLR R0,RX is a good way to load PC+8 into RX.  */
3542         return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3543     }
3544 }
3545 
3546 static bool trans_bv(DisasContext *ctx, arg_bv *a)
3547 {
3548     TCGv_reg dest;
3549 
3550     if (a->x == 0) {
3551         dest = load_gpr(ctx, a->b);
3552     } else {
3553         dest = get_temp(ctx);
3554         tcg_gen_shli_reg(dest, load_gpr(ctx, a->x), 3);
3555         tcg_gen_add_reg(dest, dest, load_gpr(ctx, a->b));
3556     }
3557     dest = do_ibranch_priv(ctx, dest);
3558     return do_ibranch(ctx, dest, 0, a->n);
3559 }
3560 
3561 static bool trans_bve(DisasContext *ctx, arg_bve *a)
3562 {
3563     TCGv_reg dest;
3564 
3565 #ifdef CONFIG_USER_ONLY
3566     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3567     return do_ibranch(ctx, dest, a->l, a->n);
3568 #else
3569     nullify_over(ctx);
3570     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3571 
3572     copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3573     if (ctx->iaoq_b == -1) {
3574         tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3575     }
3576     copy_iaoq_entry(cpu_iaoq_b, -1, dest);
3577     tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3578     if (a->l) {
3579         copy_iaoq_entry(cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
3580     }
3581     nullify_set(ctx, a->n);
3582     tcg_gen_lookup_and_goto_ptr();
3583     ctx->base.is_jmp = DISAS_NORETURN;
3584     return nullify_end(ctx);
3585 #endif
3586 }
3587 
3588 /*
3589  * Float class 0
3590  */
3591 
3592 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3593 {
3594     tcg_gen_mov_i32(dst, src);
3595 }
3596 
3597 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
3598 {
3599     return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
3600 }
3601 
3602 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3603 {
3604     tcg_gen_mov_i64(dst, src);
3605 }
3606 
3607 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
3608 {
3609     return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
3610 }
3611 
3612 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3613 {
3614     tcg_gen_andi_i32(dst, src, INT32_MAX);
3615 }
3616 
3617 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
3618 {
3619     return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
3620 }
3621 
3622 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3623 {
3624     tcg_gen_andi_i64(dst, src, INT64_MAX);
3625 }
3626 
3627 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
3628 {
3629     return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
3630 }
3631 
3632 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
3633 {
3634     return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
3635 }
3636 
3637 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
3638 {
3639     return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
3640 }
3641 
3642 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
3643 {
3644     return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
3645 }
3646 
3647 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
3648 {
3649     return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
3650 }
3651 
3652 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3653 {
3654     tcg_gen_xori_i32(dst, src, INT32_MIN);
3655 }
3656 
3657 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
3658 {
3659     return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
3660 }
3661 
3662 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3663 {
3664     tcg_gen_xori_i64(dst, src, INT64_MIN);
3665 }
3666 
3667 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3668 {
3669     return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3670 }
3671 
3672 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3673 {
3674     tcg_gen_ori_i32(dst, src, INT32_MIN);
3675 }
3676 
3677 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
3678 {
3679     return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
3680 }
3681 
3682 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3683 {
3684     tcg_gen_ori_i64(dst, src, INT64_MIN);
3685 }
3686 
3687 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
3688 {
3689     return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
3690 }
3691 
3692 /*
3693  * Float class 1
3694  */
3695 
3696 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
3697 {
3698     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
3699 }
3700 
3701 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
3702 {
3703     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
3704 }
3705 
3706 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
3707 {
3708     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
3709 }
3710 
3711 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
3712 {
3713     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
3714 }
3715 
3716 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
3717 {
3718     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
3719 }
3720 
3721 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
3722 {
3723     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
3724 }
3725 
3726 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
3727 {
3728     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
3729 }
3730 
3731 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
3732 {
3733     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
3734 }
3735 
3736 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
3737 {
3738     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
3739 }
3740 
3741 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
3742 {
3743     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
3744 }
3745 
3746 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
3747 {
3748     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
3749 }
3750 
3751 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
3752 {
3753     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
3754 }
3755 
3756 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
3757 {
3758     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
3759 }
3760 
3761 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
3762 {
3763     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
3764 }
3765 
3766 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
3767 {
3768     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
3769 }
3770 
3771 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
3772 {
3773     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
3774 }
3775 
3776 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
3777 {
3778     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
3779 }
3780 
3781 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
3782 {
3783     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
3784 }
3785 
3786 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
3787 {
3788     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
3789 }
3790 
3791 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
3792 {
3793     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
3794 }
3795 
3796 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
3797 {
3798     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
3799 }
3800 
3801 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
3802 {
3803     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
3804 }
3805 
3806 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
3807 {
3808     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
3809 }
3810 
3811 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
3812 {
3813     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
3814 }
3815 
3816 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
3817 {
3818     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
3819 }
3820 
3821 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
3822 {
3823     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
3824 }
3825 
3826 /*
3827  * Float class 2
3828  */
3829 
3830 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
3831 {
3832     TCGv_i32 ta, tb, tc, ty;
3833 
3834     nullify_over(ctx);
3835 
3836     ta = load_frw0_i32(a->r1);
3837     tb = load_frw0_i32(a->r2);
3838     ty = tcg_constant_i32(a->y);
3839     tc = tcg_constant_i32(a->c);
3840 
3841     gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc);
3842 
3843     tcg_temp_free_i32(ta);
3844     tcg_temp_free_i32(tb);
3845 
3846     return nullify_end(ctx);
3847 }
3848 
3849 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
3850 {
3851     TCGv_i64 ta, tb;
3852     TCGv_i32 tc, ty;
3853 
3854     nullify_over(ctx);
3855 
3856     ta = load_frd0(a->r1);
3857     tb = load_frd0(a->r2);
3858     ty = tcg_constant_i32(a->y);
3859     tc = tcg_constant_i32(a->c);
3860 
3861     gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc);
3862 
3863     tcg_temp_free_i64(ta);
3864     tcg_temp_free_i64(tb);
3865 
3866     return nullify_end(ctx);
3867 }
3868 
3869 static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
3870 {
3871     TCGv_reg t;
3872 
3873     nullify_over(ctx);
3874 
3875     t = get_temp(ctx);
3876     tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
3877 
3878     if (a->y == 1) {
3879         int mask;
3880         bool inv = false;
3881 
3882         switch (a->c) {
3883         case 0: /* simple */
3884             tcg_gen_andi_reg(t, t, 0x4000000);
3885             ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3886             goto done;
3887         case 2: /* rej */
3888             inv = true;
3889             /* fallthru */
3890         case 1: /* acc */
3891             mask = 0x43ff800;
3892             break;
3893         case 6: /* rej8 */
3894             inv = true;
3895             /* fallthru */
3896         case 5: /* acc8 */
3897             mask = 0x43f8000;
3898             break;
3899         case 9: /* acc6 */
3900             mask = 0x43e0000;
3901             break;
3902         case 13: /* acc4 */
3903             mask = 0x4380000;
3904             break;
3905         case 17: /* acc2 */
3906             mask = 0x4200000;
3907             break;
3908         default:
3909             gen_illegal(ctx);
3910             return true;
3911         }
3912         if (inv) {
3913             TCGv_reg c = load_const(ctx, mask);
3914             tcg_gen_or_reg(t, t, c);
3915             ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
3916         } else {
3917             tcg_gen_andi_reg(t, t, mask);
3918             ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
3919         }
3920     } else {
3921         unsigned cbit = (a->y ^ 1) - 1;
3922 
3923         tcg_gen_extract_reg(t, t, 21 - cbit, 1);
3924         ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3925         tcg_temp_free(t);
3926     }
3927 
3928  done:
3929     return nullify_end(ctx);
3930 }
3931 
3932 /*
3933  * Float class 2
3934  */
3935 
3936 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
3937 {
3938     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
3939 }
3940 
3941 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
3942 {
3943     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
3944 }
3945 
3946 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
3947 {
3948     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
3949 }
3950 
3951 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
3952 {
3953     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
3954 }
3955 
3956 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
3957 {
3958     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
3959 }
3960 
3961 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
3962 {
3963     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
3964 }
3965 
3966 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
3967 {
3968     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
3969 }
3970 
3971 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
3972 {
3973     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
3974 }
3975 
3976 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
3977 {
3978     TCGv_i64 x, y;
3979 
3980     nullify_over(ctx);
3981 
3982     x = load_frw0_i64(a->r1);
3983     y = load_frw0_i64(a->r2);
3984     tcg_gen_mul_i64(x, x, y);
3985     save_frd(a->t, x);
3986     tcg_temp_free_i64(x);
3987     tcg_temp_free_i64(y);
3988 
3989     return nullify_end(ctx);
3990 }
3991 
3992 /* Convert the fmpyadd single-precision register encodings to standard.  */
3993 static inline int fmpyadd_s_reg(unsigned r)
3994 {
3995     return (r & 16) * 2 + 16 + (r & 15);
3996 }
3997 
3998 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
3999 {
4000     int tm = fmpyadd_s_reg(a->tm);
4001     int ra = fmpyadd_s_reg(a->ra);
4002     int ta = fmpyadd_s_reg(a->ta);
4003     int rm2 = fmpyadd_s_reg(a->rm2);
4004     int rm1 = fmpyadd_s_reg(a->rm1);
4005 
4006     nullify_over(ctx);
4007 
4008     do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4009     do_fop_weww(ctx, ta, ta, ra,
4010                 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4011 
4012     return nullify_end(ctx);
4013 }
4014 
4015 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
4016 {
4017     return do_fmpyadd_s(ctx, a, false);
4018 }
4019 
4020 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4021 {
4022     return do_fmpyadd_s(ctx, a, true);
4023 }
4024 
4025 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4026 {
4027     nullify_over(ctx);
4028 
4029     do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4030     do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4031                 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4032 
4033     return nullify_end(ctx);
4034 }
4035 
4036 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4037 {
4038     return do_fmpyadd_d(ctx, a, false);
4039 }
4040 
4041 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4042 {
4043     return do_fmpyadd_d(ctx, a, true);
4044 }
4045 
4046 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
4047 {
4048     TCGv_i32 x, y, z;
4049 
4050     nullify_over(ctx);
4051     x = load_frw0_i32(a->rm1);
4052     y = load_frw0_i32(a->rm2);
4053     z = load_frw0_i32(a->ra3);
4054 
4055     if (a->neg) {
4056         gen_helper_fmpynfadd_s(x, cpu_env, x, y, z);
4057     } else {
4058         gen_helper_fmpyfadd_s(x, cpu_env, x, y, z);
4059     }
4060 
4061     tcg_temp_free_i32(y);
4062     tcg_temp_free_i32(z);
4063     save_frw_i32(a->t, x);
4064     tcg_temp_free_i32(x);
4065     return nullify_end(ctx);
4066 }
4067 
4068 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4069 {
4070     TCGv_i64 x, y, z;
4071 
4072     nullify_over(ctx);
4073     x = load_frd0(a->rm1);
4074     y = load_frd0(a->rm2);
4075     z = load_frd0(a->ra3);
4076 
4077     if (a->neg) {
4078         gen_helper_fmpynfadd_d(x, cpu_env, x, y, z);
4079     } else {
4080         gen_helper_fmpyfadd_d(x, cpu_env, x, y, z);
4081     }
4082 
4083     tcg_temp_free_i64(y);
4084     tcg_temp_free_i64(z);
4085     save_frd(a->t, x);
4086     tcg_temp_free_i64(x);
4087     return nullify_end(ctx);
4088 }
4089 
4090 static bool trans_diag(DisasContext *ctx, arg_diag *a)
4091 {
4092     qemu_log_mask(LOG_UNIMP, "DIAG opcode ignored\n");
4093     cond_free(&ctx->null_cond);
4094     return true;
4095 }
4096 
4097 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4098 {
4099     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4100     int bound;
4101 
4102     ctx->cs = cs;
4103     ctx->tb_flags = ctx->base.tb->flags;
4104 
4105 #ifdef CONFIG_USER_ONLY
4106     ctx->privilege = MMU_USER_IDX;
4107     ctx->mmu_idx = MMU_USER_IDX;
4108     ctx->iaoq_f = ctx->base.pc_first | MMU_USER_IDX;
4109     ctx->iaoq_b = ctx->base.tb->cs_base | MMU_USER_IDX;
4110 #else
4111     ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4112     ctx->mmu_idx = (ctx->tb_flags & PSW_D ? ctx->privilege : MMU_PHYS_IDX);
4113 
4114     /* Recover the IAOQ values from the GVA + PRIV.  */
4115     uint64_t cs_base = ctx->base.tb->cs_base;
4116     uint64_t iasq_f = cs_base & ~0xffffffffull;
4117     int32_t diff = cs_base;
4118 
4119     ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4120     ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4121 #endif
4122     ctx->iaoq_n = -1;
4123     ctx->iaoq_n_var = NULL;
4124 
4125     /* Bound the number of instructions by those left on the page.  */
4126     bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4127     ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4128 
4129     ctx->ntempr = 0;
4130     ctx->ntempl = 0;
4131     memset(ctx->tempr, 0, sizeof(ctx->tempr));
4132     memset(ctx->templ, 0, sizeof(ctx->templ));
4133 }
4134 
4135 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4136 {
4137     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4138 
4139     /* Seed the nullification status from PSW[N], as saved in TB->FLAGS.  */
4140     ctx->null_cond = cond_make_f();
4141     ctx->psw_n_nonzero = false;
4142     if (ctx->tb_flags & PSW_N) {
4143         ctx->null_cond.c = TCG_COND_ALWAYS;
4144         ctx->psw_n_nonzero = true;
4145     }
4146     ctx->null_lab = NULL;
4147 }
4148 
4149 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4150 {
4151     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4152 
4153     tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4154 }
4155 
4156 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4157 {
4158     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4159     CPUHPPAState *env = cs->env_ptr;
4160     DisasJumpType ret;
4161     int i, n;
4162 
4163     /* Execute one insn.  */
4164 #ifdef CONFIG_USER_ONLY
4165     if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4166         do_page_zero(ctx);
4167         ret = ctx->base.is_jmp;
4168         assert(ret != DISAS_NEXT);
4169     } else
4170 #endif
4171     {
4172         /* Always fetch the insn, even if nullified, so that we check
4173            the page permissions for execute.  */
4174         uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
4175 
4176         /* Set up the IA queue for the next insn.
4177            This will be overwritten by a branch.  */
4178         if (ctx->iaoq_b == -1) {
4179             ctx->iaoq_n = -1;
4180             ctx->iaoq_n_var = get_temp(ctx);
4181             tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4182         } else {
4183             ctx->iaoq_n = ctx->iaoq_b + 4;
4184             ctx->iaoq_n_var = NULL;
4185         }
4186 
4187         if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4188             ctx->null_cond.c = TCG_COND_NEVER;
4189             ret = DISAS_NEXT;
4190         } else {
4191             ctx->insn = insn;
4192             if (!decode(ctx, insn)) {
4193                 gen_illegal(ctx);
4194             }
4195             ret = ctx->base.is_jmp;
4196             assert(ctx->null_lab == NULL);
4197         }
4198     }
4199 
4200     /* Free any temporaries allocated.  */
4201     for (i = 0, n = ctx->ntempr; i < n; ++i) {
4202         tcg_temp_free(ctx->tempr[i]);
4203         ctx->tempr[i] = NULL;
4204     }
4205     for (i = 0, n = ctx->ntempl; i < n; ++i) {
4206         tcg_temp_free_tl(ctx->templ[i]);
4207         ctx->templ[i] = NULL;
4208     }
4209     ctx->ntempr = 0;
4210     ctx->ntempl = 0;
4211 
4212     /* Advance the insn queue.  Note that this check also detects
4213        a priority change within the instruction queue.  */
4214     if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4215         if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4216             && use_goto_tb(ctx, ctx->iaoq_b)
4217             && (ctx->null_cond.c == TCG_COND_NEVER
4218                 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4219             nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4220             gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4221             ctx->base.is_jmp = ret = DISAS_NORETURN;
4222         } else {
4223             ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4224         }
4225     }
4226     ctx->iaoq_f = ctx->iaoq_b;
4227     ctx->iaoq_b = ctx->iaoq_n;
4228     ctx->base.pc_next += 4;
4229 
4230     switch (ret) {
4231     case DISAS_NORETURN:
4232     case DISAS_IAQ_N_UPDATED:
4233         break;
4234 
4235     case DISAS_NEXT:
4236     case DISAS_IAQ_N_STALE:
4237     case DISAS_IAQ_N_STALE_EXIT:
4238         if (ctx->iaoq_f == -1) {
4239             tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b);
4240             copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4241 #ifndef CONFIG_USER_ONLY
4242             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4243 #endif
4244             nullify_save(ctx);
4245             ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4246                                 ? DISAS_EXIT
4247                                 : DISAS_IAQ_N_UPDATED);
4248         } else if (ctx->iaoq_b == -1) {
4249             tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var);
4250         }
4251         break;
4252 
4253     default:
4254         g_assert_not_reached();
4255     }
4256 }
4257 
4258 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4259 {
4260     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4261     DisasJumpType is_jmp = ctx->base.is_jmp;
4262 
4263     switch (is_jmp) {
4264     case DISAS_NORETURN:
4265         break;
4266     case DISAS_TOO_MANY:
4267     case DISAS_IAQ_N_STALE:
4268     case DISAS_IAQ_N_STALE_EXIT:
4269         copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4270         copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4271         nullify_save(ctx);
4272         /* FALLTHRU */
4273     case DISAS_IAQ_N_UPDATED:
4274         if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
4275             tcg_gen_lookup_and_goto_ptr();
4276             break;
4277         }
4278         /* FALLTHRU */
4279     case DISAS_EXIT:
4280         tcg_gen_exit_tb(NULL, 0);
4281         break;
4282     default:
4283         g_assert_not_reached();
4284     }
4285 }
4286 
4287 static void hppa_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
4288 {
4289     target_ulong pc = dcbase->pc_first;
4290 
4291 #ifdef CONFIG_USER_ONLY
4292     switch (pc) {
4293     case 0x00:
4294         qemu_log("IN:\n0x00000000:  (null)\n");
4295         return;
4296     case 0xb0:
4297         qemu_log("IN:\n0x000000b0:  light-weight-syscall\n");
4298         return;
4299     case 0xe0:
4300         qemu_log("IN:\n0x000000e0:  set-thread-pointer-syscall\n");
4301         return;
4302     case 0x100:
4303         qemu_log("IN:\n0x00000100:  syscall\n");
4304         return;
4305     }
4306 #endif
4307 
4308     qemu_log("IN: %s\n", lookup_symbol(pc));
4309     log_target_disas(cs, pc, dcbase->tb->size);
4310 }
4311 
4312 static const TranslatorOps hppa_tr_ops = {
4313     .init_disas_context = hppa_tr_init_disas_context,
4314     .tb_start           = hppa_tr_tb_start,
4315     .insn_start         = hppa_tr_insn_start,
4316     .translate_insn     = hppa_tr_translate_insn,
4317     .tb_stop            = hppa_tr_tb_stop,
4318     .disas_log          = hppa_tr_disas_log,
4319 };
4320 
4321 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
4322 {
4323     DisasContext ctx;
4324     translator_loop(&hppa_tr_ops, &ctx.base, cs, tb, max_insns);
4325 }
4326 
4327 void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
4328                           target_ulong *data)
4329 {
4330     env->iaoq_f = data[0];
4331     if (data[1] != (target_ureg)-1) {
4332         env->iaoq_b = data[1];
4333     }
4334     /* Since we were executing the instruction at IAOQ_F, and took some
4335        sort of action that provoked the cpu_restore_state, we can infer
4336        that the instruction was not nullified.  */
4337     env->psw_n = 0;
4338 }
4339