xref: /qemu/target/s390x/tcg/translate.c (revision 60f782b6)
1 /*
2  *  S/390 translation
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2010 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24 
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 #  define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 #  define LOG_DISAS(...) do { } while (0)
29 #endif
30 
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "s390x-internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg/tcg-op.h"
37 #include "tcg/tcg-op-gvec.h"
38 #include "qemu/log.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
44 
45 #include "exec/translator.h"
46 #include "exec/log.h"
47 #include "qemu/atomic128.h"
48 
49 
50 /* Information that (most) every instruction needs to manipulate.  */
51 typedef struct DisasContext DisasContext;
52 typedef struct DisasInsn DisasInsn;
53 typedef struct DisasFields DisasFields;
54 
55 /*
56  * Define a structure to hold the decoded fields.  We'll store each inside
57  * an array indexed by an enum.  In order to conserve memory, we'll arrange
58  * for fields that do not exist at the same time to overlap, thus the "C"
59  * for compact.  For checking purposes there is an "O" for original index
60  * as well that will be applied to availability bitmaps.
61  */
62 
63 enum DisasFieldIndexO {
64     FLD_O_r1,
65     FLD_O_r2,
66     FLD_O_r3,
67     FLD_O_m1,
68     FLD_O_m3,
69     FLD_O_m4,
70     FLD_O_m5,
71     FLD_O_m6,
72     FLD_O_b1,
73     FLD_O_b2,
74     FLD_O_b4,
75     FLD_O_d1,
76     FLD_O_d2,
77     FLD_O_d4,
78     FLD_O_x2,
79     FLD_O_l1,
80     FLD_O_l2,
81     FLD_O_i1,
82     FLD_O_i2,
83     FLD_O_i3,
84     FLD_O_i4,
85     FLD_O_i5,
86     FLD_O_v1,
87     FLD_O_v2,
88     FLD_O_v3,
89     FLD_O_v4,
90 };
91 
92 enum DisasFieldIndexC {
93     FLD_C_r1 = 0,
94     FLD_C_m1 = 0,
95     FLD_C_b1 = 0,
96     FLD_C_i1 = 0,
97     FLD_C_v1 = 0,
98 
99     FLD_C_r2 = 1,
100     FLD_C_b2 = 1,
101     FLD_C_i2 = 1,
102 
103     FLD_C_r3 = 2,
104     FLD_C_m3 = 2,
105     FLD_C_i3 = 2,
106     FLD_C_v3 = 2,
107 
108     FLD_C_m4 = 3,
109     FLD_C_b4 = 3,
110     FLD_C_i4 = 3,
111     FLD_C_l1 = 3,
112     FLD_C_v4 = 3,
113 
114     FLD_C_i5 = 4,
115     FLD_C_d1 = 4,
116     FLD_C_m5 = 4,
117 
118     FLD_C_d2 = 5,
119     FLD_C_m6 = 5,
120 
121     FLD_C_d4 = 6,
122     FLD_C_x2 = 6,
123     FLD_C_l2 = 6,
124     FLD_C_v2 = 6,
125 
126     NUM_C_FIELD = 7
127 };
128 
129 struct DisasFields {
130     uint64_t raw_insn;
131     unsigned op:8;
132     unsigned op2:8;
133     unsigned presentC:16;
134     unsigned int presentO;
135     int c[NUM_C_FIELD];
136 };
137 
138 struct DisasContext {
139     DisasContextBase base;
140     const DisasInsn *insn;
141     TCGOp *insn_start;
142     DisasFields fields;
143     uint64_t ex_value;
144     /*
145      * During translate_one(), pc_tmp is used to determine the instruction
146      * to be executed after base.pc_next - e.g. next sequential instruction
147      * or a branch target.
148      */
149     uint64_t pc_tmp;
150     uint32_t ilen;
151     enum cc_op cc_op;
152     bool exit_to_mainloop;
153 };
154 
155 /* Information carried about a condition to be evaluated.  */
156 typedef struct {
157     TCGCond cond:8;
158     bool is_64;
159     union {
160         struct { TCGv_i64 a, b; } s64;
161         struct { TCGv_i32 a, b; } s32;
162     } u;
163 } DisasCompare;
164 
165 #ifdef DEBUG_INLINE_BRANCHES
166 static uint64_t inline_branch_hit[CC_OP_MAX];
167 static uint64_t inline_branch_miss[CC_OP_MAX];
168 #endif
169 
170 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
171 {
172     if (s->base.tb->flags & FLAG_MASK_32) {
173         if (s->base.tb->flags & FLAG_MASK_64) {
174             tcg_gen_movi_i64(out, pc);
175             return;
176         }
177         pc |= 0x80000000;
178     }
179     assert(!(s->base.tb->flags & FLAG_MASK_64));
180     tcg_gen_deposit_i64(out, out, tcg_constant_i64(pc), 0, 32);
181 }
182 
183 static TCGv_i64 psw_addr;
184 static TCGv_i64 psw_mask;
185 static TCGv_i64 gbea;
186 
187 static TCGv_i32 cc_op;
188 static TCGv_i64 cc_src;
189 static TCGv_i64 cc_dst;
190 static TCGv_i64 cc_vr;
191 
192 static char cpu_reg_names[16][4];
193 static TCGv_i64 regs[16];
194 
195 void s390x_translate_init(void)
196 {
197     int i;
198 
199     psw_addr = tcg_global_mem_new_i64(cpu_env,
200                                       offsetof(CPUS390XState, psw.addr),
201                                       "psw_addr");
202     psw_mask = tcg_global_mem_new_i64(cpu_env,
203                                       offsetof(CPUS390XState, psw.mask),
204                                       "psw_mask");
205     gbea = tcg_global_mem_new_i64(cpu_env,
206                                   offsetof(CPUS390XState, gbea),
207                                   "gbea");
208 
209     cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
210                                    "cc_op");
211     cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
212                                     "cc_src");
213     cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
214                                     "cc_dst");
215     cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
216                                    "cc_vr");
217 
218     for (i = 0; i < 16; i++) {
219         snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
220         regs[i] = tcg_global_mem_new(cpu_env,
221                                      offsetof(CPUS390XState, regs[i]),
222                                      cpu_reg_names[i]);
223     }
224 }
225 
226 static inline int vec_full_reg_offset(uint8_t reg)
227 {
228     g_assert(reg < 32);
229     return offsetof(CPUS390XState, vregs[reg][0]);
230 }
231 
232 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
233 {
234     /* Convert element size (es) - e.g. MO_8 - to bytes */
235     const uint8_t bytes = 1 << es;
236     int offs = enr * bytes;
237 
238     /*
239      * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
240      * of the 16 byte vector, on both, little and big endian systems.
241      *
242      * Big Endian (target/possible host)
243      * B:  [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
244      * HW: [     0][     1][     2][     3] - [     4][     5][     6][     7]
245      * W:  [             0][             1] - [             2][             3]
246      * DW: [                             0] - [                             1]
247      *
248      * Little Endian (possible host)
249      * B:  [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
250      * HW: [     3][     2][     1][     0] - [     7][     6][     5][     4]
251      * W:  [             1][             0] - [             3][             2]
252      * DW: [                             0] - [                             1]
253      *
254      * For 16 byte elements, the two 8 byte halves will not form a host
255      * int128 if the host is little endian, since they're in the wrong order.
256      * Some operations (e.g. xor) do not care. For operations like addition,
257      * the two 8 byte elements have to be loaded separately. Let's force all
258      * 16 byte operations to handle it in a special way.
259      */
260     g_assert(es <= MO_64);
261 #if !HOST_BIG_ENDIAN
262     offs ^= (8 - bytes);
263 #endif
264     return offs + vec_full_reg_offset(reg);
265 }
266 
267 static inline int freg64_offset(uint8_t reg)
268 {
269     g_assert(reg < 16);
270     return vec_reg_offset(reg, 0, MO_64);
271 }
272 
273 static inline int freg32_offset(uint8_t reg)
274 {
275     g_assert(reg < 16);
276     return vec_reg_offset(reg, 0, MO_32);
277 }
278 
279 static TCGv_i64 load_reg(int reg)
280 {
281     TCGv_i64 r = tcg_temp_new_i64();
282     tcg_gen_mov_i64(r, regs[reg]);
283     return r;
284 }
285 
286 static TCGv_i64 load_freg(int reg)
287 {
288     TCGv_i64 r = tcg_temp_new_i64();
289 
290     tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
291     return r;
292 }
293 
294 static TCGv_i64 load_freg32_i64(int reg)
295 {
296     TCGv_i64 r = tcg_temp_new_i64();
297 
298     tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
299     return r;
300 }
301 
302 static TCGv_i128 load_freg_128(int reg)
303 {
304     TCGv_i64 h = load_freg(reg);
305     TCGv_i64 l = load_freg(reg + 2);
306     TCGv_i128 r = tcg_temp_new_i128();
307 
308     tcg_gen_concat_i64_i128(r, l, h);
309     return r;
310 }
311 
312 static void store_reg(int reg, TCGv_i64 v)
313 {
314     tcg_gen_mov_i64(regs[reg], v);
315 }
316 
317 static void store_freg(int reg, TCGv_i64 v)
318 {
319     tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
320 }
321 
322 static void store_reg32_i64(int reg, TCGv_i64 v)
323 {
324     /* 32 bit register writes keep the upper half */
325     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
326 }
327 
328 static void store_reg32h_i64(int reg, TCGv_i64 v)
329 {
330     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
331 }
332 
333 static void store_freg32_i64(int reg, TCGv_i64 v)
334 {
335     tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
336 }
337 
338 static void update_psw_addr(DisasContext *s)
339 {
340     /* psw.addr */
341     tcg_gen_movi_i64(psw_addr, s->base.pc_next);
342 }
343 
344 static void per_branch(DisasContext *s, bool to_next)
345 {
346 #ifndef CONFIG_USER_ONLY
347     tcg_gen_movi_i64(gbea, s->base.pc_next);
348 
349     if (s->base.tb->flags & FLAG_MASK_PER) {
350         TCGv_i64 next_pc = to_next ? tcg_constant_i64(s->pc_tmp) : psw_addr;
351         gen_helper_per_branch(cpu_env, gbea, next_pc);
352     }
353 #endif
354 }
355 
356 static void per_branch_cond(DisasContext *s, TCGCond cond,
357                             TCGv_i64 arg1, TCGv_i64 arg2)
358 {
359 #ifndef CONFIG_USER_ONLY
360     if (s->base.tb->flags & FLAG_MASK_PER) {
361         TCGLabel *lab = gen_new_label();
362         tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
363 
364         tcg_gen_movi_i64(gbea, s->base.pc_next);
365         gen_helper_per_branch(cpu_env, gbea, psw_addr);
366 
367         gen_set_label(lab);
368     } else {
369         TCGv_i64 pc = tcg_constant_i64(s->base.pc_next);
370         tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
371     }
372 #endif
373 }
374 
375 static void per_breaking_event(DisasContext *s)
376 {
377     tcg_gen_movi_i64(gbea, s->base.pc_next);
378 }
379 
380 static void update_cc_op(DisasContext *s)
381 {
382     if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
383         tcg_gen_movi_i32(cc_op, s->cc_op);
384     }
385 }
386 
387 static inline uint64_t ld_code2(CPUS390XState *env, DisasContext *s,
388                                 uint64_t pc)
389 {
390     return (uint64_t)translator_lduw(env, &s->base, pc);
391 }
392 
393 static inline uint64_t ld_code4(CPUS390XState *env, DisasContext *s,
394                                 uint64_t pc)
395 {
396     return (uint64_t)(uint32_t)translator_ldl(env, &s->base, pc);
397 }
398 
399 static int get_mem_index(DisasContext *s)
400 {
401 #ifdef CONFIG_USER_ONLY
402     return MMU_USER_IDX;
403 #else
404     if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
405         return MMU_REAL_IDX;
406     }
407 
408     switch (s->base.tb->flags & FLAG_MASK_ASC) {
409     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
410         return MMU_PRIMARY_IDX;
411     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
412         return MMU_SECONDARY_IDX;
413     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
414         return MMU_HOME_IDX;
415     default:
416         g_assert_not_reached();
417         break;
418     }
419 #endif
420 }
421 
422 static void gen_exception(int excp)
423 {
424     gen_helper_exception(cpu_env, tcg_constant_i32(excp));
425 }
426 
427 static void gen_program_exception(DisasContext *s, int code)
428 {
429     /* Remember what pgm exeption this was.  */
430     tcg_gen_st_i32(tcg_constant_i32(code), cpu_env,
431                    offsetof(CPUS390XState, int_pgm_code));
432 
433     tcg_gen_st_i32(tcg_constant_i32(s->ilen), cpu_env,
434                    offsetof(CPUS390XState, int_pgm_ilen));
435 
436     /* update the psw */
437     update_psw_addr(s);
438 
439     /* Save off cc.  */
440     update_cc_op(s);
441 
442     /* Trigger exception.  */
443     gen_exception(EXCP_PGM);
444 }
445 
446 static inline void gen_illegal_opcode(DisasContext *s)
447 {
448     gen_program_exception(s, PGM_OPERATION);
449 }
450 
451 static inline void gen_data_exception(uint8_t dxc)
452 {
453     gen_helper_data_exception(cpu_env, tcg_constant_i32(dxc));
454 }
455 
456 static inline void gen_trap(DisasContext *s)
457 {
458     /* Set DXC to 0xff */
459     gen_data_exception(0xff);
460 }
461 
462 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
463                                   int64_t imm)
464 {
465     tcg_gen_addi_i64(dst, src, imm);
466     if (!(s->base.tb->flags & FLAG_MASK_64)) {
467         if (s->base.tb->flags & FLAG_MASK_32) {
468             tcg_gen_andi_i64(dst, dst, 0x7fffffff);
469         } else {
470             tcg_gen_andi_i64(dst, dst, 0x00ffffff);
471         }
472     }
473 }
474 
475 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
476 {
477     TCGv_i64 tmp = tcg_temp_new_i64();
478 
479     /*
480      * Note that d2 is limited to 20 bits, signed.  If we crop negative
481      * displacements early we create larger immediate addends.
482      */
483     if (b2 && x2) {
484         tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
485         gen_addi_and_wrap_i64(s, tmp, tmp, d2);
486     } else if (b2) {
487         gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
488     } else if (x2) {
489         gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
490     } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
491         if (s->base.tb->flags & FLAG_MASK_32) {
492             tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
493         } else {
494             tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
495         }
496     } else {
497         tcg_gen_movi_i64(tmp, d2);
498     }
499 
500     return tmp;
501 }
502 
503 static inline bool live_cc_data(DisasContext *s)
504 {
505     return (s->cc_op != CC_OP_DYNAMIC
506             && s->cc_op != CC_OP_STATIC
507             && s->cc_op > 3);
508 }
509 
510 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
511 {
512     if (live_cc_data(s)) {
513         tcg_gen_discard_i64(cc_src);
514         tcg_gen_discard_i64(cc_dst);
515         tcg_gen_discard_i64(cc_vr);
516     }
517     s->cc_op = CC_OP_CONST0 + val;
518 }
519 
520 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
521 {
522     if (live_cc_data(s)) {
523         tcg_gen_discard_i64(cc_src);
524         tcg_gen_discard_i64(cc_vr);
525     }
526     tcg_gen_mov_i64(cc_dst, dst);
527     s->cc_op = op;
528 }
529 
530 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
531                                   TCGv_i64 dst)
532 {
533     if (live_cc_data(s)) {
534         tcg_gen_discard_i64(cc_vr);
535     }
536     tcg_gen_mov_i64(cc_src, src);
537     tcg_gen_mov_i64(cc_dst, dst);
538     s->cc_op = op;
539 }
540 
541 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
542                                   TCGv_i64 dst, TCGv_i64 vr)
543 {
544     tcg_gen_mov_i64(cc_src, src);
545     tcg_gen_mov_i64(cc_dst, dst);
546     tcg_gen_mov_i64(cc_vr, vr);
547     s->cc_op = op;
548 }
549 
550 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
551 {
552     gen_op_update1_cc_i64(s, CC_OP_NZ, val);
553 }
554 
555 /* CC value is in env->cc_op */
556 static void set_cc_static(DisasContext *s)
557 {
558     if (live_cc_data(s)) {
559         tcg_gen_discard_i64(cc_src);
560         tcg_gen_discard_i64(cc_dst);
561         tcg_gen_discard_i64(cc_vr);
562     }
563     s->cc_op = CC_OP_STATIC;
564 }
565 
566 /* calculates cc into cc_op */
567 static void gen_op_calc_cc(DisasContext *s)
568 {
569     TCGv_i32 local_cc_op = NULL;
570     TCGv_i64 dummy = NULL;
571 
572     switch (s->cc_op) {
573     default:
574         dummy = tcg_constant_i64(0);
575         /* FALLTHRU */
576     case CC_OP_ADD_64:
577     case CC_OP_SUB_64:
578     case CC_OP_ADD_32:
579     case CC_OP_SUB_32:
580         local_cc_op = tcg_constant_i32(s->cc_op);
581         break;
582     case CC_OP_CONST0:
583     case CC_OP_CONST1:
584     case CC_OP_CONST2:
585     case CC_OP_CONST3:
586     case CC_OP_STATIC:
587     case CC_OP_DYNAMIC:
588         break;
589     }
590 
591     switch (s->cc_op) {
592     case CC_OP_CONST0:
593     case CC_OP_CONST1:
594     case CC_OP_CONST2:
595     case CC_OP_CONST3:
596         /* s->cc_op is the cc value */
597         tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
598         break;
599     case CC_OP_STATIC:
600         /* env->cc_op already is the cc value */
601         break;
602     case CC_OP_NZ:
603         tcg_gen_setcondi_i64(TCG_COND_NE, cc_dst, cc_dst, 0);
604         tcg_gen_extrl_i64_i32(cc_op, cc_dst);
605         break;
606     case CC_OP_ABS_64:
607     case CC_OP_NABS_64:
608     case CC_OP_ABS_32:
609     case CC_OP_NABS_32:
610     case CC_OP_LTGT0_32:
611     case CC_OP_LTGT0_64:
612     case CC_OP_COMP_32:
613     case CC_OP_COMP_64:
614     case CC_OP_NZ_F32:
615     case CC_OP_NZ_F64:
616     case CC_OP_FLOGR:
617     case CC_OP_LCBB:
618     case CC_OP_MULS_32:
619         /* 1 argument */
620         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
621         break;
622     case CC_OP_ADDU:
623     case CC_OP_ICM:
624     case CC_OP_LTGT_32:
625     case CC_OP_LTGT_64:
626     case CC_OP_LTUGTU_32:
627     case CC_OP_LTUGTU_64:
628     case CC_OP_TM_32:
629     case CC_OP_TM_64:
630     case CC_OP_SLA:
631     case CC_OP_SUBU:
632     case CC_OP_NZ_F128:
633     case CC_OP_VC:
634     case CC_OP_MULS_64:
635         /* 2 arguments */
636         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
637         break;
638     case CC_OP_ADD_64:
639     case CC_OP_SUB_64:
640     case CC_OP_ADD_32:
641     case CC_OP_SUB_32:
642         /* 3 arguments */
643         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
644         break;
645     case CC_OP_DYNAMIC:
646         /* unknown operation - assume 3 arguments and cc_op in env */
647         gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
648         break;
649     default:
650         g_assert_not_reached();
651     }
652 
653     /* We now have cc in cc_op as constant */
654     set_cc_static(s);
655 }
656 
657 static bool use_goto_tb(DisasContext *s, uint64_t dest)
658 {
659     if (unlikely(s->base.tb->flags & FLAG_MASK_PER)) {
660         return false;
661     }
662     return translator_use_goto_tb(&s->base, dest);
663 }
664 
665 static void account_noninline_branch(DisasContext *s, int cc_op)
666 {
667 #ifdef DEBUG_INLINE_BRANCHES
668     inline_branch_miss[cc_op]++;
669 #endif
670 }
671 
672 static void account_inline_branch(DisasContext *s, int cc_op)
673 {
674 #ifdef DEBUG_INLINE_BRANCHES
675     inline_branch_hit[cc_op]++;
676 #endif
677 }
678 
679 /* Table of mask values to comparison codes, given a comparison as input.
680    For such, CC=3 should not be possible.  */
681 static const TCGCond ltgt_cond[16] = {
682     TCG_COND_NEVER,  TCG_COND_NEVER,     /*    |    |    | x */
683     TCG_COND_GT,     TCG_COND_GT,        /*    |    | GT | x */
684     TCG_COND_LT,     TCG_COND_LT,        /*    | LT |    | x */
685     TCG_COND_NE,     TCG_COND_NE,        /*    | LT | GT | x */
686     TCG_COND_EQ,     TCG_COND_EQ,        /* EQ |    |    | x */
687     TCG_COND_GE,     TCG_COND_GE,        /* EQ |    | GT | x */
688     TCG_COND_LE,     TCG_COND_LE,        /* EQ | LT |    | x */
689     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | LT | GT | x */
690 };
691 
692 /* Table of mask values to comparison codes, given a logic op as input.
693    For such, only CC=0 and CC=1 should be possible.  */
694 static const TCGCond nz_cond[16] = {
695     TCG_COND_NEVER, TCG_COND_NEVER,      /*    |    | x | x */
696     TCG_COND_NEVER, TCG_COND_NEVER,
697     TCG_COND_NE, TCG_COND_NE,            /*    | NE | x | x */
698     TCG_COND_NE, TCG_COND_NE,
699     TCG_COND_EQ, TCG_COND_EQ,            /* EQ |    | x | x */
700     TCG_COND_EQ, TCG_COND_EQ,
701     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | NE | x | x */
702     TCG_COND_ALWAYS, TCG_COND_ALWAYS,
703 };
704 
705 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
706    details required to generate a TCG comparison.  */
707 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
708 {
709     TCGCond cond;
710     enum cc_op old_cc_op = s->cc_op;
711 
712     if (mask == 15 || mask == 0) {
713         c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
714         c->u.s32.a = cc_op;
715         c->u.s32.b = cc_op;
716         c->is_64 = false;
717         return;
718     }
719 
720     /* Find the TCG condition for the mask + cc op.  */
721     switch (old_cc_op) {
722     case CC_OP_LTGT0_32:
723     case CC_OP_LTGT0_64:
724     case CC_OP_LTGT_32:
725     case CC_OP_LTGT_64:
726         cond = ltgt_cond[mask];
727         if (cond == TCG_COND_NEVER) {
728             goto do_dynamic;
729         }
730         account_inline_branch(s, old_cc_op);
731         break;
732 
733     case CC_OP_LTUGTU_32:
734     case CC_OP_LTUGTU_64:
735         cond = tcg_unsigned_cond(ltgt_cond[mask]);
736         if (cond == TCG_COND_NEVER) {
737             goto do_dynamic;
738         }
739         account_inline_branch(s, old_cc_op);
740         break;
741 
742     case CC_OP_NZ:
743         cond = nz_cond[mask];
744         if (cond == TCG_COND_NEVER) {
745             goto do_dynamic;
746         }
747         account_inline_branch(s, old_cc_op);
748         break;
749 
750     case CC_OP_TM_32:
751     case CC_OP_TM_64:
752         switch (mask) {
753         case 8:
754             cond = TCG_COND_EQ;
755             break;
756         case 4 | 2 | 1:
757             cond = TCG_COND_NE;
758             break;
759         default:
760             goto do_dynamic;
761         }
762         account_inline_branch(s, old_cc_op);
763         break;
764 
765     case CC_OP_ICM:
766         switch (mask) {
767         case 8:
768             cond = TCG_COND_EQ;
769             break;
770         case 4 | 2 | 1:
771         case 4 | 2:
772             cond = TCG_COND_NE;
773             break;
774         default:
775             goto do_dynamic;
776         }
777         account_inline_branch(s, old_cc_op);
778         break;
779 
780     case CC_OP_FLOGR:
781         switch (mask & 0xa) {
782         case 8: /* src == 0 -> no one bit found */
783             cond = TCG_COND_EQ;
784             break;
785         case 2: /* src != 0 -> one bit found */
786             cond = TCG_COND_NE;
787             break;
788         default:
789             goto do_dynamic;
790         }
791         account_inline_branch(s, old_cc_op);
792         break;
793 
794     case CC_OP_ADDU:
795     case CC_OP_SUBU:
796         switch (mask) {
797         case 8 | 2: /* result == 0 */
798             cond = TCG_COND_EQ;
799             break;
800         case 4 | 1: /* result != 0 */
801             cond = TCG_COND_NE;
802             break;
803         case 8 | 4: /* !carry (borrow) */
804             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_EQ : TCG_COND_NE;
805             break;
806         case 2 | 1: /* carry (!borrow) */
807             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_NE : TCG_COND_EQ;
808             break;
809         default:
810             goto do_dynamic;
811         }
812         account_inline_branch(s, old_cc_op);
813         break;
814 
815     default:
816     do_dynamic:
817         /* Calculate cc value.  */
818         gen_op_calc_cc(s);
819         /* FALLTHRU */
820 
821     case CC_OP_STATIC:
822         /* Jump based on CC.  We'll load up the real cond below;
823            the assignment here merely avoids a compiler warning.  */
824         account_noninline_branch(s, old_cc_op);
825         old_cc_op = CC_OP_STATIC;
826         cond = TCG_COND_NEVER;
827         break;
828     }
829 
830     /* Load up the arguments of the comparison.  */
831     c->is_64 = true;
832     switch (old_cc_op) {
833     case CC_OP_LTGT0_32:
834         c->is_64 = false;
835         c->u.s32.a = tcg_temp_new_i32();
836         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
837         c->u.s32.b = tcg_constant_i32(0);
838         break;
839     case CC_OP_LTGT_32:
840     case CC_OP_LTUGTU_32:
841         c->is_64 = false;
842         c->u.s32.a = tcg_temp_new_i32();
843         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
844         c->u.s32.b = tcg_temp_new_i32();
845         tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
846         break;
847 
848     case CC_OP_LTGT0_64:
849     case CC_OP_NZ:
850     case CC_OP_FLOGR:
851         c->u.s64.a = cc_dst;
852         c->u.s64.b = tcg_constant_i64(0);
853         break;
854     case CC_OP_LTGT_64:
855     case CC_OP_LTUGTU_64:
856         c->u.s64.a = cc_src;
857         c->u.s64.b = cc_dst;
858         break;
859 
860     case CC_OP_TM_32:
861     case CC_OP_TM_64:
862     case CC_OP_ICM:
863         c->u.s64.a = tcg_temp_new_i64();
864         c->u.s64.b = tcg_constant_i64(0);
865         tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
866         break;
867 
868     case CC_OP_ADDU:
869     case CC_OP_SUBU:
870         c->is_64 = true;
871         c->u.s64.b = tcg_constant_i64(0);
872         switch (mask) {
873         case 8 | 2:
874         case 4 | 1: /* result */
875             c->u.s64.a = cc_dst;
876             break;
877         case 8 | 4:
878         case 2 | 1: /* carry */
879             c->u.s64.a = cc_src;
880             break;
881         default:
882             g_assert_not_reached();
883         }
884         break;
885 
886     case CC_OP_STATIC:
887         c->is_64 = false;
888         c->u.s32.a = cc_op;
889         switch (mask) {
890         case 0x8 | 0x4 | 0x2: /* cc != 3 */
891             cond = TCG_COND_NE;
892             c->u.s32.b = tcg_constant_i32(3);
893             break;
894         case 0x8 | 0x4 | 0x1: /* cc != 2 */
895             cond = TCG_COND_NE;
896             c->u.s32.b = tcg_constant_i32(2);
897             break;
898         case 0x8 | 0x2 | 0x1: /* cc != 1 */
899             cond = TCG_COND_NE;
900             c->u.s32.b = tcg_constant_i32(1);
901             break;
902         case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
903             cond = TCG_COND_EQ;
904             c->u.s32.a = tcg_temp_new_i32();
905             c->u.s32.b = tcg_constant_i32(0);
906             tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
907             break;
908         case 0x8 | 0x4: /* cc < 2 */
909             cond = TCG_COND_LTU;
910             c->u.s32.b = tcg_constant_i32(2);
911             break;
912         case 0x8: /* cc == 0 */
913             cond = TCG_COND_EQ;
914             c->u.s32.b = tcg_constant_i32(0);
915             break;
916         case 0x4 | 0x2 | 0x1: /* cc != 0 */
917             cond = TCG_COND_NE;
918             c->u.s32.b = tcg_constant_i32(0);
919             break;
920         case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
921             cond = TCG_COND_NE;
922             c->u.s32.a = tcg_temp_new_i32();
923             c->u.s32.b = tcg_constant_i32(0);
924             tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
925             break;
926         case 0x4: /* cc == 1 */
927             cond = TCG_COND_EQ;
928             c->u.s32.b = tcg_constant_i32(1);
929             break;
930         case 0x2 | 0x1: /* cc > 1 */
931             cond = TCG_COND_GTU;
932             c->u.s32.b = tcg_constant_i32(1);
933             break;
934         case 0x2: /* cc == 2 */
935             cond = TCG_COND_EQ;
936             c->u.s32.b = tcg_constant_i32(2);
937             break;
938         case 0x1: /* cc == 3 */
939             cond = TCG_COND_EQ;
940             c->u.s32.b = tcg_constant_i32(3);
941             break;
942         default:
943             /* CC is masked by something else: (8 >> cc) & mask.  */
944             cond = TCG_COND_NE;
945             c->u.s32.a = tcg_temp_new_i32();
946             c->u.s32.b = tcg_constant_i32(0);
947             tcg_gen_shr_i32(c->u.s32.a, tcg_constant_i32(8), cc_op);
948             tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
949             break;
950         }
951         break;
952 
953     default:
954         abort();
955     }
956     c->cond = cond;
957 }
958 
959 /* ====================================================================== */
960 /* Define the insn format enumeration.  */
961 #define F0(N)                         FMT_##N,
962 #define F1(N, X1)                     F0(N)
963 #define F2(N, X1, X2)                 F0(N)
964 #define F3(N, X1, X2, X3)             F0(N)
965 #define F4(N, X1, X2, X3, X4)         F0(N)
966 #define F5(N, X1, X2, X3, X4, X5)     F0(N)
967 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
968 
969 typedef enum {
970 #include "insn-format.h.inc"
971 } DisasFormat;
972 
973 #undef F0
974 #undef F1
975 #undef F2
976 #undef F3
977 #undef F4
978 #undef F5
979 #undef F6
980 
981 /* This is the way fields are to be accessed out of DisasFields.  */
982 #define have_field(S, F)  have_field1((S), FLD_O_##F)
983 #define get_field(S, F)   get_field1((S), FLD_O_##F, FLD_C_##F)
984 
985 static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
986 {
987     return (s->fields.presentO >> c) & 1;
988 }
989 
990 static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
991                       enum DisasFieldIndexC c)
992 {
993     assert(have_field1(s, o));
994     return s->fields.c[c];
995 }
996 
997 /* Describe the layout of each field in each format.  */
998 typedef struct DisasField {
999     unsigned int beg:8;
1000     unsigned int size:8;
1001     unsigned int type:2;
1002     unsigned int indexC:6;
1003     enum DisasFieldIndexO indexO:8;
1004 } DisasField;
1005 
1006 typedef struct DisasFormatInfo {
1007     DisasField op[NUM_C_FIELD];
1008 } DisasFormatInfo;
1009 
1010 #define R(N, B)       {  B,  4, 0, FLD_C_r##N, FLD_O_r##N }
1011 #define M(N, B)       {  B,  4, 0, FLD_C_m##N, FLD_O_m##N }
1012 #define V(N, B)       {  B,  4, 3, FLD_C_v##N, FLD_O_v##N }
1013 #define BD(N, BB, BD) { BB,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1014                       { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1015 #define BXD(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1016                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1017                       { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1018 #define BDL(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1019                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1020 #define BXDL(N)       { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1021                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1022                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1023 #define I(N, B, S)    {  B,  S, 1, FLD_C_i##N, FLD_O_i##N }
1024 #define L(N, B, S)    {  B,  S, 0, FLD_C_l##N, FLD_O_l##N }
1025 
1026 #define F0(N)                     { { } },
1027 #define F1(N, X1)                 { { X1 } },
1028 #define F2(N, X1, X2)             { { X1, X2 } },
1029 #define F3(N, X1, X2, X3)         { { X1, X2, X3 } },
1030 #define F4(N, X1, X2, X3, X4)     { { X1, X2, X3, X4 } },
1031 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1032 #define F6(N, X1, X2, X3, X4, X5, X6)       { { X1, X2, X3, X4, X5, X6 } },
1033 
1034 static const DisasFormatInfo format_info[] = {
1035 #include "insn-format.h.inc"
1036 };
1037 
1038 #undef F0
1039 #undef F1
1040 #undef F2
1041 #undef F3
1042 #undef F4
1043 #undef F5
1044 #undef F6
1045 #undef R
1046 #undef M
1047 #undef V
1048 #undef BD
1049 #undef BXD
1050 #undef BDL
1051 #undef BXDL
1052 #undef I
1053 #undef L
1054 
1055 /* Generally, we'll extract operands into this structures, operate upon
1056    them, and store them back.  See the "in1", "in2", "prep", "wout" sets
1057    of routines below for more details.  */
1058 typedef struct {
1059     TCGv_i64 out, out2, in1, in2;
1060     TCGv_i64 addr1;
1061     TCGv_i128 out_128, in1_128, in2_128;
1062 } DisasOps;
1063 
1064 /* Instructions can place constraints on their operands, raising specification
1065    exceptions if they are violated.  To make this easy to automate, each "in1",
1066    "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1067    of the following, or 0.  To make this easy to document, we'll put the
1068    SPEC_<name> defines next to <name>.  */
1069 
1070 #define SPEC_r1_even    1
1071 #define SPEC_r2_even    2
1072 #define SPEC_r3_even    4
1073 #define SPEC_r1_f128    8
1074 #define SPEC_r2_f128    16
1075 
1076 /* Return values from translate_one, indicating the state of the TB.  */
1077 
1078 /* We are not using a goto_tb (for whatever reason), but have updated
1079    the PC (for whatever reason), so there's no need to do it again on
1080    exiting the TB.  */
1081 #define DISAS_PC_UPDATED        DISAS_TARGET_0
1082 
1083 /* We have updated the PC and CC values.  */
1084 #define DISAS_PC_CC_UPDATED     DISAS_TARGET_2
1085 
1086 
1087 /* Instruction flags */
1088 #define IF_AFP1     0x0001      /* r1 is a fp reg for HFP/FPS instructions */
1089 #define IF_AFP2     0x0002      /* r2 is a fp reg for HFP/FPS instructions */
1090 #define IF_AFP3     0x0004      /* r3 is a fp reg for HFP/FPS instructions */
1091 #define IF_BFP      0x0008      /* binary floating point instruction */
1092 #define IF_DFP      0x0010      /* decimal floating point instruction */
1093 #define IF_PRIV     0x0020      /* privileged instruction */
1094 #define IF_VEC      0x0040      /* vector instruction */
1095 #define IF_IO       0x0080      /* input/output instruction */
1096 
1097 struct DisasInsn {
1098     unsigned opc:16;
1099     unsigned flags:16;
1100     DisasFormat fmt:8;
1101     unsigned fac:8;
1102     unsigned spec:8;
1103 
1104     const char *name;
1105 
1106     /* Pre-process arguments before HELP_OP.  */
1107     void (*help_in1)(DisasContext *, DisasOps *);
1108     void (*help_in2)(DisasContext *, DisasOps *);
1109     void (*help_prep)(DisasContext *, DisasOps *);
1110 
1111     /*
1112      * Post-process output after HELP_OP.
1113      * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1114      */
1115     void (*help_wout)(DisasContext *, DisasOps *);
1116     void (*help_cout)(DisasContext *, DisasOps *);
1117 
1118     /* Implement the operation itself.  */
1119     DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1120 
1121     uint64_t data;
1122 };
1123 
1124 /* ====================================================================== */
1125 /* Miscellaneous helpers, used by several operations.  */
1126 
1127 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1128 {
1129     if (dest == s->pc_tmp) {
1130         per_branch(s, true);
1131         return DISAS_NEXT;
1132     }
1133     if (use_goto_tb(s, dest)) {
1134         update_cc_op(s);
1135         per_breaking_event(s);
1136         tcg_gen_goto_tb(0);
1137         tcg_gen_movi_i64(psw_addr, dest);
1138         tcg_gen_exit_tb(s->base.tb, 0);
1139         return DISAS_NORETURN;
1140     } else {
1141         tcg_gen_movi_i64(psw_addr, dest);
1142         per_branch(s, false);
1143         return DISAS_PC_UPDATED;
1144     }
1145 }
1146 
1147 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1148                                  bool is_imm, int imm, TCGv_i64 cdest)
1149 {
1150     DisasJumpType ret;
1151     uint64_t dest = s->base.pc_next + (int64_t)imm * 2;
1152     TCGLabel *lab;
1153 
1154     /* Take care of the special cases first.  */
1155     if (c->cond == TCG_COND_NEVER) {
1156         ret = DISAS_NEXT;
1157         goto egress;
1158     }
1159     if (is_imm) {
1160         if (dest == s->pc_tmp) {
1161             /* Branch to next.  */
1162             per_branch(s, true);
1163             ret = DISAS_NEXT;
1164             goto egress;
1165         }
1166         if (c->cond == TCG_COND_ALWAYS) {
1167             ret = help_goto_direct(s, dest);
1168             goto egress;
1169         }
1170     } else {
1171         if (!cdest) {
1172             /* E.g. bcr %r0 -> no branch.  */
1173             ret = DISAS_NEXT;
1174             goto egress;
1175         }
1176         if (c->cond == TCG_COND_ALWAYS) {
1177             tcg_gen_mov_i64(psw_addr, cdest);
1178             per_branch(s, false);
1179             ret = DISAS_PC_UPDATED;
1180             goto egress;
1181         }
1182     }
1183 
1184     if (use_goto_tb(s, s->pc_tmp)) {
1185         if (is_imm && use_goto_tb(s, dest)) {
1186             /* Both exits can use goto_tb.  */
1187             update_cc_op(s);
1188 
1189             lab = gen_new_label();
1190             if (c->is_64) {
1191                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1192             } else {
1193                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1194             }
1195 
1196             /* Branch not taken.  */
1197             tcg_gen_goto_tb(0);
1198             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1199             tcg_gen_exit_tb(s->base.tb, 0);
1200 
1201             /* Branch taken.  */
1202             gen_set_label(lab);
1203             per_breaking_event(s);
1204             tcg_gen_goto_tb(1);
1205             tcg_gen_movi_i64(psw_addr, dest);
1206             tcg_gen_exit_tb(s->base.tb, 1);
1207 
1208             ret = DISAS_NORETURN;
1209         } else {
1210             /* Fallthru can use goto_tb, but taken branch cannot.  */
1211             /* Store taken branch destination before the brcond.  This
1212                avoids having to allocate a new local temp to hold it.
1213                We'll overwrite this in the not taken case anyway.  */
1214             if (!is_imm) {
1215                 tcg_gen_mov_i64(psw_addr, cdest);
1216             }
1217 
1218             lab = gen_new_label();
1219             if (c->is_64) {
1220                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1221             } else {
1222                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1223             }
1224 
1225             /* Branch not taken.  */
1226             update_cc_op(s);
1227             tcg_gen_goto_tb(0);
1228             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1229             tcg_gen_exit_tb(s->base.tb, 0);
1230 
1231             gen_set_label(lab);
1232             if (is_imm) {
1233                 tcg_gen_movi_i64(psw_addr, dest);
1234             }
1235             per_breaking_event(s);
1236             ret = DISAS_PC_UPDATED;
1237         }
1238     } else {
1239         /* Fallthru cannot use goto_tb.  This by itself is vanishingly rare.
1240            Most commonly we're single-stepping or some other condition that
1241            disables all use of goto_tb.  Just update the PC and exit.  */
1242 
1243         TCGv_i64 next = tcg_constant_i64(s->pc_tmp);
1244         if (is_imm) {
1245             cdest = tcg_constant_i64(dest);
1246         }
1247 
1248         if (c->is_64) {
1249             tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1250                                 cdest, next);
1251             per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1252         } else {
1253             TCGv_i32 t0 = tcg_temp_new_i32();
1254             TCGv_i64 t1 = tcg_temp_new_i64();
1255             TCGv_i64 z = tcg_constant_i64(0);
1256             tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1257             tcg_gen_extu_i32_i64(t1, t0);
1258             tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1259             per_branch_cond(s, TCG_COND_NE, t1, z);
1260         }
1261 
1262         ret = DISAS_PC_UPDATED;
1263     }
1264 
1265  egress:
1266     return ret;
1267 }
1268 
1269 /* ====================================================================== */
1270 /* The operations.  These perform the bulk of the work for any insn,
1271    usually after the operands have been loaded and output initialized.  */
1272 
1273 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1274 {
1275     tcg_gen_abs_i64(o->out, o->in2);
1276     return DISAS_NEXT;
1277 }
1278 
1279 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1280 {
1281     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1282     return DISAS_NEXT;
1283 }
1284 
1285 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1286 {
1287     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1288     return DISAS_NEXT;
1289 }
1290 
1291 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1292 {
1293     tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1294     tcg_gen_mov_i64(o->out2, o->in2);
1295     return DISAS_NEXT;
1296 }
1297 
1298 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1299 {
1300     tcg_gen_add_i64(o->out, o->in1, o->in2);
1301     return DISAS_NEXT;
1302 }
1303 
1304 static DisasJumpType op_addu64(DisasContext *s, DisasOps *o)
1305 {
1306     tcg_gen_movi_i64(cc_src, 0);
1307     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1308     return DISAS_NEXT;
1309 }
1310 
1311 /* Compute carry into cc_src. */
1312 static void compute_carry(DisasContext *s)
1313 {
1314     switch (s->cc_op) {
1315     case CC_OP_ADDU:
1316         /* The carry value is already in cc_src (1,0). */
1317         break;
1318     case CC_OP_SUBU:
1319         tcg_gen_addi_i64(cc_src, cc_src, 1);
1320         break;
1321     default:
1322         gen_op_calc_cc(s);
1323         /* fall through */
1324     case CC_OP_STATIC:
1325         /* The carry flag is the msb of CC; compute into cc_src. */
1326         tcg_gen_extu_i32_i64(cc_src, cc_op);
1327         tcg_gen_shri_i64(cc_src, cc_src, 1);
1328         break;
1329     }
1330 }
1331 
1332 static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
1333 {
1334     compute_carry(s);
1335     tcg_gen_add_i64(o->out, o->in1, o->in2);
1336     tcg_gen_add_i64(o->out, o->out, cc_src);
1337     return DISAS_NEXT;
1338 }
1339 
1340 static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
1341 {
1342     compute_carry(s);
1343 
1344     TCGv_i64 zero = tcg_constant_i64(0);
1345     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
1346     tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
1347 
1348     return DISAS_NEXT;
1349 }
1350 
1351 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1352 {
1353     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1354 
1355     o->in1 = tcg_temp_new_i64();
1356     if (non_atomic) {
1357         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1358     } else {
1359         /* Perform the atomic addition in memory. */
1360         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1361                                      s->insn->data);
1362     }
1363 
1364     /* Recompute also for atomic case: needed for setting CC. */
1365     tcg_gen_add_i64(o->out, o->in1, o->in2);
1366 
1367     if (non_atomic) {
1368         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1369     }
1370     return DISAS_NEXT;
1371 }
1372 
1373 static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o)
1374 {
1375     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1376 
1377     o->in1 = tcg_temp_new_i64();
1378     if (non_atomic) {
1379         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1380     } else {
1381         /* Perform the atomic addition in memory. */
1382         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1383                                      s->insn->data);
1384     }
1385 
1386     /* Recompute also for atomic case: needed for setting CC. */
1387     tcg_gen_movi_i64(cc_src, 0);
1388     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1389 
1390     if (non_atomic) {
1391         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1392     }
1393     return DISAS_NEXT;
1394 }
1395 
1396 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1397 {
1398     gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1399     return DISAS_NEXT;
1400 }
1401 
1402 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1403 {
1404     gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1405     return DISAS_NEXT;
1406 }
1407 
1408 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1409 {
1410     gen_helper_axb(o->out_128, cpu_env, o->in1_128, o->in2_128);
1411     return DISAS_NEXT;
1412 }
1413 
1414 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1415 {
1416     tcg_gen_and_i64(o->out, o->in1, o->in2);
1417     return DISAS_NEXT;
1418 }
1419 
1420 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1421 {
1422     int shift = s->insn->data & 0xff;
1423     int size = s->insn->data >> 8;
1424     uint64_t mask = ((1ull << size) - 1) << shift;
1425     TCGv_i64 t = tcg_temp_new_i64();
1426 
1427     tcg_gen_shli_i64(t, o->in2, shift);
1428     tcg_gen_ori_i64(t, t, ~mask);
1429     tcg_gen_and_i64(o->out, o->in1, t);
1430 
1431     /* Produce the CC from only the bits manipulated.  */
1432     tcg_gen_andi_i64(cc_dst, o->out, mask);
1433     set_cc_nz_u64(s, cc_dst);
1434     return DISAS_NEXT;
1435 }
1436 
1437 static DisasJumpType op_andc(DisasContext *s, DisasOps *o)
1438 {
1439     tcg_gen_andc_i64(o->out, o->in1, o->in2);
1440     return DISAS_NEXT;
1441 }
1442 
1443 static DisasJumpType op_orc(DisasContext *s, DisasOps *o)
1444 {
1445     tcg_gen_orc_i64(o->out, o->in1, o->in2);
1446     return DISAS_NEXT;
1447 }
1448 
1449 static DisasJumpType op_nand(DisasContext *s, DisasOps *o)
1450 {
1451     tcg_gen_nand_i64(o->out, o->in1, o->in2);
1452     return DISAS_NEXT;
1453 }
1454 
1455 static DisasJumpType op_nor(DisasContext *s, DisasOps *o)
1456 {
1457     tcg_gen_nor_i64(o->out, o->in1, o->in2);
1458     return DISAS_NEXT;
1459 }
1460 
1461 static DisasJumpType op_nxor(DisasContext *s, DisasOps *o)
1462 {
1463     tcg_gen_eqv_i64(o->out, o->in1, o->in2);
1464     return DISAS_NEXT;
1465 }
1466 
1467 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1468 {
1469     o->in1 = tcg_temp_new_i64();
1470 
1471     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1472         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1473     } else {
1474         /* Perform the atomic operation in memory. */
1475         tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1476                                      s->insn->data);
1477     }
1478 
1479     /* Recompute also for atomic case: needed for setting CC. */
1480     tcg_gen_and_i64(o->out, o->in1, o->in2);
1481 
1482     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1483         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1484     }
1485     return DISAS_NEXT;
1486 }
1487 
1488 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1489 {
1490     pc_to_link_info(o->out, s, s->pc_tmp);
1491     if (o->in2) {
1492         tcg_gen_mov_i64(psw_addr, o->in2);
1493         per_branch(s, false);
1494         return DISAS_PC_UPDATED;
1495     } else {
1496         return DISAS_NEXT;
1497     }
1498 }
1499 
1500 static void save_link_info(DisasContext *s, DisasOps *o)
1501 {
1502     TCGv_i64 t;
1503 
1504     if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1505         pc_to_link_info(o->out, s, s->pc_tmp);
1506         return;
1507     }
1508     gen_op_calc_cc(s);
1509     tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1510     tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1511     t = tcg_temp_new_i64();
1512     tcg_gen_shri_i64(t, psw_mask, 16);
1513     tcg_gen_andi_i64(t, t, 0x0f000000);
1514     tcg_gen_or_i64(o->out, o->out, t);
1515     tcg_gen_extu_i32_i64(t, cc_op);
1516     tcg_gen_shli_i64(t, t, 28);
1517     tcg_gen_or_i64(o->out, o->out, t);
1518 }
1519 
1520 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1521 {
1522     save_link_info(s, o);
1523     if (o->in2) {
1524         tcg_gen_mov_i64(psw_addr, o->in2);
1525         per_branch(s, false);
1526         return DISAS_PC_UPDATED;
1527     } else {
1528         return DISAS_NEXT;
1529     }
1530 }
1531 
1532 /*
1533  * Disassemble the target of a branch. The results are returned in a form
1534  * suitable for passing into help_branch():
1535  *
1536  * - bool IS_IMM reflects whether the target is fixed or computed. Non-EXECUTEd
1537  *   branches, whose DisasContext *S contains the relative immediate field RI,
1538  *   are considered fixed. All the other branches are considered computed.
1539  * - int IMM is the value of RI.
1540  * - TCGv_i64 CDEST is the address of the computed target.
1541  */
1542 #define disas_jdest(s, ri, is_imm, imm, cdest) do {                            \
1543     if (have_field(s, ri)) {                                                   \
1544         if (unlikely(s->ex_value)) {                                           \
1545             cdest = tcg_temp_new_i64();                                        \
1546             tcg_gen_ld_i64(cdest, cpu_env, offsetof(CPUS390XState, ex_target));\
1547             tcg_gen_addi_i64(cdest, cdest, (int64_t)get_field(s, ri) * 2);     \
1548             is_imm = false;                                                    \
1549         } else {                                                               \
1550             is_imm = true;                                                     \
1551         }                                                                      \
1552     } else {                                                                   \
1553         is_imm = false;                                                        \
1554     }                                                                          \
1555     imm = is_imm ? get_field(s, ri) : 0;                                       \
1556 } while (false)
1557 
1558 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1559 {
1560     DisasCompare c;
1561     bool is_imm;
1562     int imm;
1563 
1564     pc_to_link_info(o->out, s, s->pc_tmp);
1565 
1566     disas_jdest(s, i2, is_imm, imm, o->in2);
1567     disas_jcc(s, &c, 0xf);
1568     return help_branch(s, &c, is_imm, imm, o->in2);
1569 }
1570 
1571 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1572 {
1573     int m1 = get_field(s, m1);
1574     DisasCompare c;
1575     bool is_imm;
1576     int imm;
1577 
1578     /* BCR with R2 = 0 causes no branching */
1579     if (have_field(s, r2) && get_field(s, r2) == 0) {
1580         if (m1 == 14) {
1581             /* Perform serialization */
1582             /* FIXME: check for fast-BCR-serialization facility */
1583             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1584         }
1585         if (m1 == 15) {
1586             /* Perform serialization */
1587             /* FIXME: perform checkpoint-synchronisation */
1588             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1589         }
1590         return DISAS_NEXT;
1591     }
1592 
1593     disas_jdest(s, i2, is_imm, imm, o->in2);
1594     disas_jcc(s, &c, m1);
1595     return help_branch(s, &c, is_imm, imm, o->in2);
1596 }
1597 
1598 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1599 {
1600     int r1 = get_field(s, r1);
1601     DisasCompare c;
1602     bool is_imm;
1603     TCGv_i64 t;
1604     int imm;
1605 
1606     c.cond = TCG_COND_NE;
1607     c.is_64 = false;
1608 
1609     t = tcg_temp_new_i64();
1610     tcg_gen_subi_i64(t, regs[r1], 1);
1611     store_reg32_i64(r1, t);
1612     c.u.s32.a = tcg_temp_new_i32();
1613     c.u.s32.b = tcg_constant_i32(0);
1614     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1615 
1616     disas_jdest(s, i2, is_imm, imm, o->in2);
1617     return help_branch(s, &c, is_imm, imm, o->in2);
1618 }
1619 
1620 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1621 {
1622     int r1 = get_field(s, r1);
1623     int imm = get_field(s, i2);
1624     DisasCompare c;
1625     TCGv_i64 t;
1626 
1627     c.cond = TCG_COND_NE;
1628     c.is_64 = false;
1629 
1630     t = tcg_temp_new_i64();
1631     tcg_gen_shri_i64(t, regs[r1], 32);
1632     tcg_gen_subi_i64(t, t, 1);
1633     store_reg32h_i64(r1, t);
1634     c.u.s32.a = tcg_temp_new_i32();
1635     c.u.s32.b = tcg_constant_i32(0);
1636     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1637 
1638     return help_branch(s, &c, 1, imm, o->in2);
1639 }
1640 
1641 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1642 {
1643     int r1 = get_field(s, r1);
1644     DisasCompare c;
1645     bool is_imm;
1646     int imm;
1647 
1648     c.cond = TCG_COND_NE;
1649     c.is_64 = true;
1650 
1651     tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1652     c.u.s64.a = regs[r1];
1653     c.u.s64.b = tcg_constant_i64(0);
1654 
1655     disas_jdest(s, i2, is_imm, imm, o->in2);
1656     return help_branch(s, &c, is_imm, imm, o->in2);
1657 }
1658 
1659 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1660 {
1661     int r1 = get_field(s, r1);
1662     int r3 = get_field(s, r3);
1663     DisasCompare c;
1664     bool is_imm;
1665     TCGv_i64 t;
1666     int imm;
1667 
1668     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1669     c.is_64 = false;
1670 
1671     t = tcg_temp_new_i64();
1672     tcg_gen_add_i64(t, regs[r1], regs[r3]);
1673     c.u.s32.a = tcg_temp_new_i32();
1674     c.u.s32.b = tcg_temp_new_i32();
1675     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1676     tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1677     store_reg32_i64(r1, t);
1678 
1679     disas_jdest(s, i2, is_imm, imm, o->in2);
1680     return help_branch(s, &c, is_imm, imm, o->in2);
1681 }
1682 
1683 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1684 {
1685     int r1 = get_field(s, r1);
1686     int r3 = get_field(s, r3);
1687     DisasCompare c;
1688     bool is_imm;
1689     int imm;
1690 
1691     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1692     c.is_64 = true;
1693 
1694     if (r1 == (r3 | 1)) {
1695         c.u.s64.b = load_reg(r3 | 1);
1696     } else {
1697         c.u.s64.b = regs[r3 | 1];
1698     }
1699 
1700     tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1701     c.u.s64.a = regs[r1];
1702 
1703     disas_jdest(s, i2, is_imm, imm, o->in2);
1704     return help_branch(s, &c, is_imm, imm, o->in2);
1705 }
1706 
1707 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1708 {
1709     int imm, m3 = get_field(s, m3);
1710     bool is_imm;
1711     DisasCompare c;
1712 
1713     c.cond = ltgt_cond[m3];
1714     if (s->insn->data) {
1715         c.cond = tcg_unsigned_cond(c.cond);
1716     }
1717     c.is_64 = true;
1718     c.u.s64.a = o->in1;
1719     c.u.s64.b = o->in2;
1720 
1721     o->out = NULL;
1722     disas_jdest(s, i4, is_imm, imm, o->out);
1723     if (!is_imm && !o->out) {
1724         imm = 0;
1725         o->out = get_address(s, 0, get_field(s, b4),
1726                              get_field(s, d4));
1727     }
1728 
1729     return help_branch(s, &c, is_imm, imm, o->out);
1730 }
1731 
1732 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1733 {
1734     gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1735     set_cc_static(s);
1736     return DISAS_NEXT;
1737 }
1738 
1739 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1740 {
1741     gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1742     set_cc_static(s);
1743     return DISAS_NEXT;
1744 }
1745 
1746 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1747 {
1748     gen_helper_cxb(cc_op, cpu_env, o->in1_128, o->in2_128);
1749     set_cc_static(s);
1750     return DISAS_NEXT;
1751 }
1752 
1753 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1754                                    bool m4_with_fpe)
1755 {
1756     const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1757     uint8_t m3 = get_field(s, m3);
1758     uint8_t m4 = get_field(s, m4);
1759 
1760     /* m3 field was introduced with FPE */
1761     if (!fpe && m3_with_fpe) {
1762         m3 = 0;
1763     }
1764     /* m4 field was introduced with FPE */
1765     if (!fpe && m4_with_fpe) {
1766         m4 = 0;
1767     }
1768 
1769     /* Check for valid rounding modes. Mode 3 was introduced later. */
1770     if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1771         gen_program_exception(s, PGM_SPECIFICATION);
1772         return NULL;
1773     }
1774 
1775     return tcg_constant_i32(deposit32(m3, 4, 4, m4));
1776 }
1777 
1778 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1779 {
1780     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1781 
1782     if (!m34) {
1783         return DISAS_NORETURN;
1784     }
1785     gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
1786     set_cc_static(s);
1787     return DISAS_NEXT;
1788 }
1789 
1790 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1791 {
1792     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1793 
1794     if (!m34) {
1795         return DISAS_NORETURN;
1796     }
1797     gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
1798     set_cc_static(s);
1799     return DISAS_NEXT;
1800 }
1801 
1802 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1803 {
1804     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1805 
1806     if (!m34) {
1807         return DISAS_NORETURN;
1808     }
1809     gen_helper_cfxb(o->out, cpu_env, o->in2_128, m34);
1810     set_cc_static(s);
1811     return DISAS_NEXT;
1812 }
1813 
1814 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1815 {
1816     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1817 
1818     if (!m34) {
1819         return DISAS_NORETURN;
1820     }
1821     gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
1822     set_cc_static(s);
1823     return DISAS_NEXT;
1824 }
1825 
1826 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1827 {
1828     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1829 
1830     if (!m34) {
1831         return DISAS_NORETURN;
1832     }
1833     gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
1834     set_cc_static(s);
1835     return DISAS_NEXT;
1836 }
1837 
1838 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1839 {
1840     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1841 
1842     if (!m34) {
1843         return DISAS_NORETURN;
1844     }
1845     gen_helper_cgxb(o->out, cpu_env, o->in2_128, m34);
1846     set_cc_static(s);
1847     return DISAS_NEXT;
1848 }
1849 
1850 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1851 {
1852     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1853 
1854     if (!m34) {
1855         return DISAS_NORETURN;
1856     }
1857     gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
1858     set_cc_static(s);
1859     return DISAS_NEXT;
1860 }
1861 
1862 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1863 {
1864     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1865 
1866     if (!m34) {
1867         return DISAS_NORETURN;
1868     }
1869     gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
1870     set_cc_static(s);
1871     return DISAS_NEXT;
1872 }
1873 
1874 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1875 {
1876     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1877 
1878     if (!m34) {
1879         return DISAS_NORETURN;
1880     }
1881     gen_helper_clfxb(o->out, cpu_env, o->in2_128, m34);
1882     set_cc_static(s);
1883     return DISAS_NEXT;
1884 }
1885 
1886 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1887 {
1888     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1889 
1890     if (!m34) {
1891         return DISAS_NORETURN;
1892     }
1893     gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
1894     set_cc_static(s);
1895     return DISAS_NEXT;
1896 }
1897 
1898 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1899 {
1900     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1901 
1902     if (!m34) {
1903         return DISAS_NORETURN;
1904     }
1905     gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
1906     set_cc_static(s);
1907     return DISAS_NEXT;
1908 }
1909 
1910 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1911 {
1912     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1913 
1914     if (!m34) {
1915         return DISAS_NORETURN;
1916     }
1917     gen_helper_clgxb(o->out, cpu_env, o->in2_128, m34);
1918     set_cc_static(s);
1919     return DISAS_NEXT;
1920 }
1921 
1922 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1923 {
1924     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1925 
1926     if (!m34) {
1927         return DISAS_NORETURN;
1928     }
1929     gen_helper_cegb(o->out, cpu_env, o->in2, m34);
1930     return DISAS_NEXT;
1931 }
1932 
1933 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1934 {
1935     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1936 
1937     if (!m34) {
1938         return DISAS_NORETURN;
1939     }
1940     gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
1941     return DISAS_NEXT;
1942 }
1943 
1944 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
1945 {
1946     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1947 
1948     if (!m34) {
1949         return DISAS_NORETURN;
1950     }
1951     gen_helper_cxgb(o->out_128, cpu_env, o->in2, m34);
1952     return DISAS_NEXT;
1953 }
1954 
1955 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
1956 {
1957     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1958 
1959     if (!m34) {
1960         return DISAS_NORETURN;
1961     }
1962     gen_helper_celgb(o->out, cpu_env, o->in2, m34);
1963     return DISAS_NEXT;
1964 }
1965 
1966 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
1967 {
1968     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1969 
1970     if (!m34) {
1971         return DISAS_NORETURN;
1972     }
1973     gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
1974     return DISAS_NEXT;
1975 }
1976 
1977 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
1978 {
1979     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1980 
1981     if (!m34) {
1982         return DISAS_NORETURN;
1983     }
1984     gen_helper_cxlgb(o->out_128, cpu_env, o->in2, m34);
1985     return DISAS_NEXT;
1986 }
1987 
1988 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
1989 {
1990     int r2 = get_field(s, r2);
1991     TCGv_i128 pair = tcg_temp_new_i128();
1992     TCGv_i64 len = tcg_temp_new_i64();
1993 
1994     gen_helper_cksm(pair, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1995     set_cc_static(s);
1996     tcg_gen_extr_i128_i64(o->out, len, pair);
1997 
1998     tcg_gen_add_i64(regs[r2], regs[r2], len);
1999     tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
2000 
2001     return DISAS_NEXT;
2002 }
2003 
2004 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
2005 {
2006     int l = get_field(s, l1);
2007     TCGv_i32 vl;
2008     MemOp mop;
2009 
2010     switch (l + 1) {
2011     case 1:
2012     case 2:
2013     case 4:
2014     case 8:
2015         mop = ctz32(l + 1) | MO_TE;
2016         tcg_gen_qemu_ld_tl(cc_src, o->addr1, get_mem_index(s), mop);
2017         tcg_gen_qemu_ld_tl(cc_dst, o->in2, get_mem_index(s), mop);
2018         gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2019         return DISAS_NEXT;
2020     default:
2021         vl = tcg_constant_i32(l);
2022         gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2023         set_cc_static(s);
2024         return DISAS_NEXT;
2025     }
2026 }
2027 
2028 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2029 {
2030     int r1 = get_field(s, r1);
2031     int r2 = get_field(s, r2);
2032     TCGv_i32 t1, t2;
2033 
2034     /* r1 and r2 must be even.  */
2035     if (r1 & 1 || r2 & 1) {
2036         gen_program_exception(s, PGM_SPECIFICATION);
2037         return DISAS_NORETURN;
2038     }
2039 
2040     t1 = tcg_constant_i32(r1);
2041     t2 = tcg_constant_i32(r2);
2042     gen_helper_clcl(cc_op, cpu_env, t1, t2);
2043     set_cc_static(s);
2044     return DISAS_NEXT;
2045 }
2046 
2047 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2048 {
2049     int r1 = get_field(s, r1);
2050     int r3 = get_field(s, r3);
2051     TCGv_i32 t1, t3;
2052 
2053     /* r1 and r3 must be even.  */
2054     if (r1 & 1 || r3 & 1) {
2055         gen_program_exception(s, PGM_SPECIFICATION);
2056         return DISAS_NORETURN;
2057     }
2058 
2059     t1 = tcg_constant_i32(r1);
2060     t3 = tcg_constant_i32(r3);
2061     gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
2062     set_cc_static(s);
2063     return DISAS_NEXT;
2064 }
2065 
2066 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2067 {
2068     int r1 = get_field(s, r1);
2069     int r3 = get_field(s, r3);
2070     TCGv_i32 t1, t3;
2071 
2072     /* r1 and r3 must be even.  */
2073     if (r1 & 1 || r3 & 1) {
2074         gen_program_exception(s, PGM_SPECIFICATION);
2075         return DISAS_NORETURN;
2076     }
2077 
2078     t1 = tcg_constant_i32(r1);
2079     t3 = tcg_constant_i32(r3);
2080     gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
2081     set_cc_static(s);
2082     return DISAS_NEXT;
2083 }
2084 
2085 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2086 {
2087     TCGv_i32 m3 = tcg_constant_i32(get_field(s, m3));
2088     TCGv_i32 t1 = tcg_temp_new_i32();
2089 
2090     tcg_gen_extrl_i64_i32(t1, o->in1);
2091     gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2092     set_cc_static(s);
2093     return DISAS_NEXT;
2094 }
2095 
2096 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2097 {
2098     TCGv_i128 pair = tcg_temp_new_i128();
2099 
2100     gen_helper_clst(pair, cpu_env, regs[0], o->in1, o->in2);
2101     tcg_gen_extr_i128_i64(o->in2, o->in1, pair);
2102 
2103     set_cc_static(s);
2104     return DISAS_NEXT;
2105 }
2106 
2107 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2108 {
2109     TCGv_i64 t = tcg_temp_new_i64();
2110     tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2111     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2112     tcg_gen_or_i64(o->out, o->out, t);
2113     return DISAS_NEXT;
2114 }
2115 
2116 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2117 {
2118     int d2 = get_field(s, d2);
2119     int b2 = get_field(s, b2);
2120     TCGv_i64 addr, cc;
2121 
2122     /* Note that in1 = R3 (new value) and
2123        in2 = (zero-extended) R1 (expected value).  */
2124 
2125     addr = get_address(s, 0, b2, d2);
2126     tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2127                                get_mem_index(s), s->insn->data | MO_ALIGN);
2128 
2129     /* Are the memory and expected values (un)equal?  Note that this setcond
2130        produces the output CC value, thus the NE sense of the test.  */
2131     cc = tcg_temp_new_i64();
2132     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2133     tcg_gen_extrl_i64_i32(cc_op, cc);
2134     set_cc_static(s);
2135 
2136     return DISAS_NEXT;
2137 }
2138 
2139 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2140 {
2141     int r1 = get_field(s, r1);
2142 
2143     o->out_128 = tcg_temp_new_i128();
2144     tcg_gen_concat_i64_i128(o->out_128, regs[r1 + 1], regs[r1]);
2145 
2146     /* Note out (R1:R1+1) = expected value and in2 (R3:R3+1) = new value.  */
2147     tcg_gen_atomic_cmpxchg_i128(o->out_128, o->addr1, o->out_128, o->in2_128,
2148                                 get_mem_index(s), MO_BE | MO_128 | MO_ALIGN);
2149 
2150     /*
2151      * Extract result into cc_dst:cc_src, compare vs the expected value
2152      * in the as yet unmodified input registers, then update CC_OP.
2153      */
2154     tcg_gen_extr_i128_i64(cc_src, cc_dst, o->out_128);
2155     tcg_gen_xor_i64(cc_dst, cc_dst, regs[r1]);
2156     tcg_gen_xor_i64(cc_src, cc_src, regs[r1 + 1]);
2157     tcg_gen_or_i64(cc_dst, cc_dst, cc_src);
2158     set_cc_nz_u64(s, cc_dst);
2159 
2160     return DISAS_NEXT;
2161 }
2162 
2163 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2164 {
2165     int r3 = get_field(s, r3);
2166     TCGv_i32 t_r3 = tcg_constant_i32(r3);
2167 
2168     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2169         gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2170     } else {
2171         gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2172     }
2173 
2174     set_cc_static(s);
2175     return DISAS_NEXT;
2176 }
2177 
2178 #ifndef CONFIG_USER_ONLY
2179 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2180 {
2181     MemOp mop = s->insn->data;
2182     TCGv_i64 addr, old, cc;
2183     TCGLabel *lab = gen_new_label();
2184 
2185     /* Note that in1 = R1 (zero-extended expected value),
2186        out = R1 (original reg), out2 = R1+1 (new value).  */
2187 
2188     addr = tcg_temp_new_i64();
2189     old = tcg_temp_new_i64();
2190     tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2191     tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2192                                get_mem_index(s), mop | MO_ALIGN);
2193 
2194     /* Are the memory and expected values (un)equal?  */
2195     cc = tcg_temp_new_i64();
2196     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2197     tcg_gen_extrl_i64_i32(cc_op, cc);
2198 
2199     /* Write back the output now, so that it happens before the
2200        following branch, so that we don't need local temps.  */
2201     if ((mop & MO_SIZE) == MO_32) {
2202         tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2203     } else {
2204         tcg_gen_mov_i64(o->out, old);
2205     }
2206 
2207     /* If the comparison was equal, and the LSB of R2 was set,
2208        then we need to flush the TLB (for all cpus).  */
2209     tcg_gen_xori_i64(cc, cc, 1);
2210     tcg_gen_and_i64(cc, cc, o->in2);
2211     tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2212 
2213     gen_helper_purge(cpu_env);
2214     gen_set_label(lab);
2215 
2216     return DISAS_NEXT;
2217 }
2218 #endif
2219 
2220 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2221 {
2222     TCGv_i64 t1 = tcg_temp_new_i64();
2223     TCGv_i32 t2 = tcg_temp_new_i32();
2224     tcg_gen_extrl_i64_i32(t2, o->in1);
2225     gen_helper_cvd(t1, t2);
2226     tcg_gen_qemu_st_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
2227     return DISAS_NEXT;
2228 }
2229 
2230 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2231 {
2232     int m3 = get_field(s, m3);
2233     TCGLabel *lab = gen_new_label();
2234     TCGCond c;
2235 
2236     c = tcg_invert_cond(ltgt_cond[m3]);
2237     if (s->insn->data) {
2238         c = tcg_unsigned_cond(c);
2239     }
2240     tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2241 
2242     /* Trap.  */
2243     gen_trap(s);
2244 
2245     gen_set_label(lab);
2246     return DISAS_NEXT;
2247 }
2248 
2249 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2250 {
2251     int m3 = get_field(s, m3);
2252     int r1 = get_field(s, r1);
2253     int r2 = get_field(s, r2);
2254     TCGv_i32 tr1, tr2, chk;
2255 
2256     /* R1 and R2 must both be even.  */
2257     if ((r1 | r2) & 1) {
2258         gen_program_exception(s, PGM_SPECIFICATION);
2259         return DISAS_NORETURN;
2260     }
2261     if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2262         m3 = 0;
2263     }
2264 
2265     tr1 = tcg_constant_i32(r1);
2266     tr2 = tcg_constant_i32(r2);
2267     chk = tcg_constant_i32(m3);
2268 
2269     switch (s->insn->data) {
2270     case 12:
2271         gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2272         break;
2273     case 14:
2274         gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2275         break;
2276     case 21:
2277         gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2278         break;
2279     case 24:
2280         gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2281         break;
2282     case 41:
2283         gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2284         break;
2285     case 42:
2286         gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2287         break;
2288     default:
2289         g_assert_not_reached();
2290     }
2291 
2292     set_cc_static(s);
2293     return DISAS_NEXT;
2294 }
2295 
2296 #ifndef CONFIG_USER_ONLY
2297 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2298 {
2299     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2300     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2301     TCGv_i32 func_code = tcg_constant_i32(get_field(s, i2));
2302 
2303     gen_helper_diag(cpu_env, r1, r3, func_code);
2304     return DISAS_NEXT;
2305 }
2306 #endif
2307 
2308 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2309 {
2310     gen_helper_divs32(o->out, cpu_env, o->in1, o->in2);
2311     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2312     return DISAS_NEXT;
2313 }
2314 
2315 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2316 {
2317     gen_helper_divu32(o->out, cpu_env, o->in1, o->in2);
2318     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2319     return DISAS_NEXT;
2320 }
2321 
2322 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2323 {
2324     TCGv_i128 t = tcg_temp_new_i128();
2325 
2326     gen_helper_divs64(t, cpu_env, o->in1, o->in2);
2327     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2328     return DISAS_NEXT;
2329 }
2330 
2331 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2332 {
2333     TCGv_i128 t = tcg_temp_new_i128();
2334 
2335     gen_helper_divu64(t, cpu_env, o->out, o->out2, o->in2);
2336     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2337     return DISAS_NEXT;
2338 }
2339 
2340 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2341 {
2342     gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2343     return DISAS_NEXT;
2344 }
2345 
2346 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2347 {
2348     gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2349     return DISAS_NEXT;
2350 }
2351 
2352 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2353 {
2354     gen_helper_dxb(o->out_128, cpu_env, o->in1_128, o->in2_128);
2355     return DISAS_NEXT;
2356 }
2357 
2358 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2359 {
2360     int r2 = get_field(s, r2);
2361     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2362     return DISAS_NEXT;
2363 }
2364 
2365 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2366 {
2367     /* No cache information provided.  */
2368     tcg_gen_movi_i64(o->out, -1);
2369     return DISAS_NEXT;
2370 }
2371 
2372 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2373 {
2374     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2375     return DISAS_NEXT;
2376 }
2377 
2378 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2379 {
2380     int r1 = get_field(s, r1);
2381     int r2 = get_field(s, r2);
2382     TCGv_i64 t = tcg_temp_new_i64();
2383 
2384     /* Note the "subsequently" in the PoO, which implies a defined result
2385        if r1 == r2.  Thus we cannot defer these writes to an output hook.  */
2386     tcg_gen_shri_i64(t, psw_mask, 32);
2387     store_reg32_i64(r1, t);
2388     if (r2 != 0) {
2389         store_reg32_i64(r2, psw_mask);
2390     }
2391     return DISAS_NEXT;
2392 }
2393 
2394 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2395 {
2396     int r1 = get_field(s, r1);
2397     TCGv_i32 ilen;
2398     TCGv_i64 v1;
2399 
2400     /* Nested EXECUTE is not allowed.  */
2401     if (unlikely(s->ex_value)) {
2402         gen_program_exception(s, PGM_EXECUTE);
2403         return DISAS_NORETURN;
2404     }
2405 
2406     update_psw_addr(s);
2407     update_cc_op(s);
2408 
2409     if (r1 == 0) {
2410         v1 = tcg_constant_i64(0);
2411     } else {
2412         v1 = regs[r1];
2413     }
2414 
2415     ilen = tcg_constant_i32(s->ilen);
2416     gen_helper_ex(cpu_env, ilen, v1, o->in2);
2417 
2418     return DISAS_PC_CC_UPDATED;
2419 }
2420 
2421 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2422 {
2423     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2424 
2425     if (!m34) {
2426         return DISAS_NORETURN;
2427     }
2428     gen_helper_fieb(o->out, cpu_env, o->in2, m34);
2429     return DISAS_NEXT;
2430 }
2431 
2432 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2433 {
2434     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2435 
2436     if (!m34) {
2437         return DISAS_NORETURN;
2438     }
2439     gen_helper_fidb(o->out, cpu_env, o->in2, m34);
2440     return DISAS_NEXT;
2441 }
2442 
2443 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2444 {
2445     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2446 
2447     if (!m34) {
2448         return DISAS_NORETURN;
2449     }
2450     gen_helper_fixb(o->out_128, cpu_env, o->in2_128, m34);
2451     return DISAS_NEXT;
2452 }
2453 
2454 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2455 {
2456     /* We'll use the original input for cc computation, since we get to
2457        compare that against 0, which ought to be better than comparing
2458        the real output against 64.  It also lets cc_dst be a convenient
2459        temporary during our computation.  */
2460     gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2461 
2462     /* R1 = IN ? CLZ(IN) : 64.  */
2463     tcg_gen_clzi_i64(o->out, o->in2, 64);
2464 
2465     /* R1+1 = IN & ~(found bit).  Note that we may attempt to shift this
2466        value by 64, which is undefined.  But since the shift is 64 iff the
2467        input is zero, we still get the correct result after and'ing.  */
2468     tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2469     tcg_gen_shr_i64(o->out2, o->out2, o->out);
2470     tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2471     return DISAS_NEXT;
2472 }
2473 
2474 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2475 {
2476     int m3 = get_field(s, m3);
2477     int pos, len, base = s->insn->data;
2478     TCGv_i64 tmp = tcg_temp_new_i64();
2479     uint64_t ccm;
2480 
2481     switch (m3) {
2482     case 0xf:
2483         /* Effectively a 32-bit load.  */
2484         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
2485         len = 32;
2486         goto one_insert;
2487 
2488     case 0xc:
2489     case 0x6:
2490     case 0x3:
2491         /* Effectively a 16-bit load.  */
2492         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
2493         len = 16;
2494         goto one_insert;
2495 
2496     case 0x8:
2497     case 0x4:
2498     case 0x2:
2499     case 0x1:
2500         /* Effectively an 8-bit load.  */
2501         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2502         len = 8;
2503         goto one_insert;
2504 
2505     one_insert:
2506         pos = base + ctz32(m3) * 8;
2507         tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2508         ccm = ((1ull << len) - 1) << pos;
2509         break;
2510 
2511     default:
2512         /* This is going to be a sequence of loads and inserts.  */
2513         pos = base + 32 - 8;
2514         ccm = 0;
2515         while (m3) {
2516             if (m3 & 0x8) {
2517                 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2518                 tcg_gen_addi_i64(o->in2, o->in2, 1);
2519                 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2520                 ccm |= 0xffull << pos;
2521             }
2522             m3 = (m3 << 1) & 0xf;
2523             pos -= 8;
2524         }
2525         break;
2526     }
2527 
2528     tcg_gen_movi_i64(tmp, ccm);
2529     gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2530     return DISAS_NEXT;
2531 }
2532 
2533 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2534 {
2535     int shift = s->insn->data & 0xff;
2536     int size = s->insn->data >> 8;
2537     tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2538     return DISAS_NEXT;
2539 }
2540 
2541 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2542 {
2543     TCGv_i64 t1, t2;
2544 
2545     gen_op_calc_cc(s);
2546     t1 = tcg_temp_new_i64();
2547     tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2548     t2 = tcg_temp_new_i64();
2549     tcg_gen_extu_i32_i64(t2, cc_op);
2550     tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2551     tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2552     return DISAS_NEXT;
2553 }
2554 
2555 #ifndef CONFIG_USER_ONLY
2556 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2557 {
2558     TCGv_i32 m4;
2559 
2560     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2561         m4 = tcg_constant_i32(get_field(s, m4));
2562     } else {
2563         m4 = tcg_constant_i32(0);
2564     }
2565     gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2566     return DISAS_NEXT;
2567 }
2568 
2569 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2570 {
2571     TCGv_i32 m4;
2572 
2573     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2574         m4 = tcg_constant_i32(get_field(s, m4));
2575     } else {
2576         m4 = tcg_constant_i32(0);
2577     }
2578     gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2579     return DISAS_NEXT;
2580 }
2581 
2582 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2583 {
2584     gen_helper_iske(o->out, cpu_env, o->in2);
2585     return DISAS_NEXT;
2586 }
2587 #endif
2588 
2589 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2590 {
2591     int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
2592     int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
2593     int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
2594     TCGv_i32 t_r1, t_r2, t_r3, type;
2595 
2596     switch (s->insn->data) {
2597     case S390_FEAT_TYPE_KMA:
2598         if (r3 == r1 || r3 == r2) {
2599             gen_program_exception(s, PGM_SPECIFICATION);
2600             return DISAS_NORETURN;
2601         }
2602         /* FALL THROUGH */
2603     case S390_FEAT_TYPE_KMCTR:
2604         if (r3 & 1 || !r3) {
2605             gen_program_exception(s, PGM_SPECIFICATION);
2606             return DISAS_NORETURN;
2607         }
2608         /* FALL THROUGH */
2609     case S390_FEAT_TYPE_PPNO:
2610     case S390_FEAT_TYPE_KMF:
2611     case S390_FEAT_TYPE_KMC:
2612     case S390_FEAT_TYPE_KMO:
2613     case S390_FEAT_TYPE_KM:
2614         if (r1 & 1 || !r1) {
2615             gen_program_exception(s, PGM_SPECIFICATION);
2616             return DISAS_NORETURN;
2617         }
2618         /* FALL THROUGH */
2619     case S390_FEAT_TYPE_KMAC:
2620     case S390_FEAT_TYPE_KIMD:
2621     case S390_FEAT_TYPE_KLMD:
2622         if (r2 & 1 || !r2) {
2623             gen_program_exception(s, PGM_SPECIFICATION);
2624             return DISAS_NORETURN;
2625         }
2626         /* FALL THROUGH */
2627     case S390_FEAT_TYPE_PCKMO:
2628     case S390_FEAT_TYPE_PCC:
2629         break;
2630     default:
2631         g_assert_not_reached();
2632     };
2633 
2634     t_r1 = tcg_constant_i32(r1);
2635     t_r2 = tcg_constant_i32(r2);
2636     t_r3 = tcg_constant_i32(r3);
2637     type = tcg_constant_i32(s->insn->data);
2638     gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2639     set_cc_static(s);
2640     return DISAS_NEXT;
2641 }
2642 
2643 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2644 {
2645     gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2646     set_cc_static(s);
2647     return DISAS_NEXT;
2648 }
2649 
2650 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2651 {
2652     gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2653     set_cc_static(s);
2654     return DISAS_NEXT;
2655 }
2656 
2657 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2658 {
2659     gen_helper_kxb(cc_op, cpu_env, o->in1_128, o->in2_128);
2660     set_cc_static(s);
2661     return DISAS_NEXT;
2662 }
2663 
2664 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2665 {
2666     /* The real output is indeed the original value in memory;
2667        recompute the addition for the computation of CC.  */
2668     tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2669                                  s->insn->data | MO_ALIGN);
2670     /* However, we need to recompute the addition for setting CC.  */
2671     tcg_gen_add_i64(o->out, o->in1, o->in2);
2672     return DISAS_NEXT;
2673 }
2674 
2675 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2676 {
2677     /* The real output is indeed the original value in memory;
2678        recompute the addition for the computation of CC.  */
2679     tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2680                                  s->insn->data | MO_ALIGN);
2681     /* However, we need to recompute the operation for setting CC.  */
2682     tcg_gen_and_i64(o->out, o->in1, o->in2);
2683     return DISAS_NEXT;
2684 }
2685 
2686 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2687 {
2688     /* The real output is indeed the original value in memory;
2689        recompute the addition for the computation of CC.  */
2690     tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2691                                 s->insn->data | MO_ALIGN);
2692     /* However, we need to recompute the operation for setting CC.  */
2693     tcg_gen_or_i64(o->out, o->in1, o->in2);
2694     return DISAS_NEXT;
2695 }
2696 
2697 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2698 {
2699     /* The real output is indeed the original value in memory;
2700        recompute the addition for the computation of CC.  */
2701     tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2702                                  s->insn->data | MO_ALIGN);
2703     /* However, we need to recompute the operation for setting CC.  */
2704     tcg_gen_xor_i64(o->out, o->in1, o->in2);
2705     return DISAS_NEXT;
2706 }
2707 
2708 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2709 {
2710     gen_helper_ldeb(o->out, cpu_env, o->in2);
2711     return DISAS_NEXT;
2712 }
2713 
2714 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2715 {
2716     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2717 
2718     if (!m34) {
2719         return DISAS_NORETURN;
2720     }
2721     gen_helper_ledb(o->out, cpu_env, o->in2, m34);
2722     return DISAS_NEXT;
2723 }
2724 
2725 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2726 {
2727     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2728 
2729     if (!m34) {
2730         return DISAS_NORETURN;
2731     }
2732     gen_helper_ldxb(o->out, cpu_env, o->in2_128, m34);
2733     return DISAS_NEXT;
2734 }
2735 
2736 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2737 {
2738     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2739 
2740     if (!m34) {
2741         return DISAS_NORETURN;
2742     }
2743     gen_helper_lexb(o->out, cpu_env, o->in2_128, m34);
2744     return DISAS_NEXT;
2745 }
2746 
2747 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2748 {
2749     gen_helper_lxdb(o->out_128, cpu_env, o->in2);
2750     return DISAS_NEXT;
2751 }
2752 
2753 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2754 {
2755     gen_helper_lxeb(o->out_128, cpu_env, o->in2);
2756     return DISAS_NEXT;
2757 }
2758 
2759 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2760 {
2761     tcg_gen_shli_i64(o->out, o->in2, 32);
2762     return DISAS_NEXT;
2763 }
2764 
2765 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2766 {
2767     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2768     return DISAS_NEXT;
2769 }
2770 
2771 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2772 {
2773     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_SB);
2774     return DISAS_NEXT;
2775 }
2776 
2777 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2778 {
2779     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_UB);
2780     return DISAS_NEXT;
2781 }
2782 
2783 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2784 {
2785     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TESW);
2786     return DISAS_NEXT;
2787 }
2788 
2789 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2790 {
2791     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUW);
2792     return DISAS_NEXT;
2793 }
2794 
2795 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2796 {
2797     tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2798                        MO_TESL | s->insn->data);
2799     return DISAS_NEXT;
2800 }
2801 
2802 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2803 {
2804     tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2805                        MO_TEUL | s->insn->data);
2806     return DISAS_NEXT;
2807 }
2808 
2809 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2810 {
2811     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s),
2812                         MO_TEUQ | s->insn->data);
2813     return DISAS_NEXT;
2814 }
2815 
2816 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2817 {
2818     TCGLabel *lab = gen_new_label();
2819     store_reg32_i64(get_field(s, r1), o->in2);
2820     /* The value is stored even in case of trap. */
2821     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2822     gen_trap(s);
2823     gen_set_label(lab);
2824     return DISAS_NEXT;
2825 }
2826 
2827 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2828 {
2829     TCGLabel *lab = gen_new_label();
2830     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUQ);
2831     /* The value is stored even in case of trap. */
2832     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2833     gen_trap(s);
2834     gen_set_label(lab);
2835     return DISAS_NEXT;
2836 }
2837 
2838 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2839 {
2840     TCGLabel *lab = gen_new_label();
2841     store_reg32h_i64(get_field(s, r1), o->in2);
2842     /* The value is stored even in case of trap. */
2843     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2844     gen_trap(s);
2845     gen_set_label(lab);
2846     return DISAS_NEXT;
2847 }
2848 
2849 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2850 {
2851     TCGLabel *lab = gen_new_label();
2852 
2853     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
2854     /* The value is stored even in case of trap. */
2855     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2856     gen_trap(s);
2857     gen_set_label(lab);
2858     return DISAS_NEXT;
2859 }
2860 
2861 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2862 {
2863     TCGLabel *lab = gen_new_label();
2864     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2865     /* The value is stored even in case of trap. */
2866     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2867     gen_trap(s);
2868     gen_set_label(lab);
2869     return DISAS_NEXT;
2870 }
2871 
2872 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2873 {
2874     DisasCompare c;
2875 
2876     if (have_field(s, m3)) {
2877         /* LOAD * ON CONDITION */
2878         disas_jcc(s, &c, get_field(s, m3));
2879     } else {
2880         /* SELECT */
2881         disas_jcc(s, &c, get_field(s, m4));
2882     }
2883 
2884     if (c.is_64) {
2885         tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2886                             o->in2, o->in1);
2887     } else {
2888         TCGv_i32 t32 = tcg_temp_new_i32();
2889         TCGv_i64 t, z;
2890 
2891         tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2892 
2893         t = tcg_temp_new_i64();
2894         tcg_gen_extu_i32_i64(t, t32);
2895 
2896         z = tcg_constant_i64(0);
2897         tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2898     }
2899 
2900     return DISAS_NEXT;
2901 }
2902 
2903 #ifndef CONFIG_USER_ONLY
2904 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
2905 {
2906     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2907     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2908 
2909     gen_helper_lctl(cpu_env, r1, o->in2, r3);
2910     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2911     s->exit_to_mainloop = true;
2912     return DISAS_TOO_MANY;
2913 }
2914 
2915 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
2916 {
2917     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2918     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2919 
2920     gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2921     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2922     s->exit_to_mainloop = true;
2923     return DISAS_TOO_MANY;
2924 }
2925 
2926 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
2927 {
2928     gen_helper_lra(o->out, cpu_env, o->in2);
2929     set_cc_static(s);
2930     return DISAS_NEXT;
2931 }
2932 
2933 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
2934 {
2935     tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
2936     return DISAS_NEXT;
2937 }
2938 
2939 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
2940 {
2941     TCGv_i64 mask, addr;
2942 
2943     per_breaking_event(s);
2944 
2945     /*
2946      * Convert the short PSW into the normal PSW, similar to what
2947      * s390_cpu_load_normal() does.
2948      */
2949     mask = tcg_temp_new_i64();
2950     addr = tcg_temp_new_i64();
2951     tcg_gen_qemu_ld_i64(mask, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN_8);
2952     tcg_gen_andi_i64(addr, mask, PSW_MASK_SHORT_ADDR);
2953     tcg_gen_andi_i64(mask, mask, PSW_MASK_SHORT_CTRL);
2954     tcg_gen_xori_i64(mask, mask, PSW_MASK_SHORTPSW);
2955     gen_helper_load_psw(cpu_env, mask, addr);
2956     return DISAS_NORETURN;
2957 }
2958 
2959 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
2960 {
2961     TCGv_i64 t1, t2;
2962 
2963     per_breaking_event(s);
2964 
2965     t1 = tcg_temp_new_i64();
2966     t2 = tcg_temp_new_i64();
2967     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
2968                         MO_TEUQ | MO_ALIGN_8);
2969     tcg_gen_addi_i64(o->in2, o->in2, 8);
2970     tcg_gen_qemu_ld_i64(t2, o->in2, get_mem_index(s), MO_TEUQ);
2971     gen_helper_load_psw(cpu_env, t1, t2);
2972     return DISAS_NORETURN;
2973 }
2974 #endif
2975 
2976 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
2977 {
2978     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2979     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2980 
2981     gen_helper_lam(cpu_env, r1, o->in2, r3);
2982     return DISAS_NEXT;
2983 }
2984 
2985 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
2986 {
2987     int r1 = get_field(s, r1);
2988     int r3 = get_field(s, r3);
2989     TCGv_i64 t1, t2;
2990 
2991     /* Only one register to read. */
2992     t1 = tcg_temp_new_i64();
2993     if (unlikely(r1 == r3)) {
2994         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
2995         store_reg32_i64(r1, t1);
2996         return DISAS_NEXT;
2997     }
2998 
2999     /* First load the values of the first and last registers to trigger
3000        possible page faults. */
3001     t2 = tcg_temp_new_i64();
3002     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3003     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3004     tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
3005     store_reg32_i64(r1, t1);
3006     store_reg32_i64(r3, t2);
3007 
3008     /* Only two registers to read. */
3009     if (((r1 + 1) & 15) == r3) {
3010         return DISAS_NEXT;
3011     }
3012 
3013     /* Then load the remaining registers. Page fault can't occur. */
3014     r3 = (r3 - 1) & 15;
3015     tcg_gen_movi_i64(t2, 4);
3016     while (r1 != r3) {
3017         r1 = (r1 + 1) & 15;
3018         tcg_gen_add_i64(o->in2, o->in2, t2);
3019         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3020         store_reg32_i64(r1, t1);
3021     }
3022     return DISAS_NEXT;
3023 }
3024 
3025 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3026 {
3027     int r1 = get_field(s, r1);
3028     int r3 = get_field(s, r3);
3029     TCGv_i64 t1, t2;
3030 
3031     /* Only one register to read. */
3032     t1 = tcg_temp_new_i64();
3033     if (unlikely(r1 == r3)) {
3034         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3035         store_reg32h_i64(r1, t1);
3036         return DISAS_NEXT;
3037     }
3038 
3039     /* First load the values of the first and last registers to trigger
3040        possible page faults. */
3041     t2 = tcg_temp_new_i64();
3042     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3043     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3044     tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
3045     store_reg32h_i64(r1, t1);
3046     store_reg32h_i64(r3, t2);
3047 
3048     /* Only two registers to read. */
3049     if (((r1 + 1) & 15) == r3) {
3050         return DISAS_NEXT;
3051     }
3052 
3053     /* Then load the remaining registers. Page fault can't occur. */
3054     r3 = (r3 - 1) & 15;
3055     tcg_gen_movi_i64(t2, 4);
3056     while (r1 != r3) {
3057         r1 = (r1 + 1) & 15;
3058         tcg_gen_add_i64(o->in2, o->in2, t2);
3059         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3060         store_reg32h_i64(r1, t1);
3061     }
3062     return DISAS_NEXT;
3063 }
3064 
3065 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3066 {
3067     int r1 = get_field(s, r1);
3068     int r3 = get_field(s, r3);
3069     TCGv_i64 t1, t2;
3070 
3071     /* Only one register to read. */
3072     if (unlikely(r1 == r3)) {
3073         tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3074         return DISAS_NEXT;
3075     }
3076 
3077     /* First load the values of the first and last registers to trigger
3078        possible page faults. */
3079     t1 = tcg_temp_new_i64();
3080     t2 = tcg_temp_new_i64();
3081     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
3082     tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3083     tcg_gen_qemu_ld_i64(regs[r3], t2, get_mem_index(s), MO_TEUQ);
3084     tcg_gen_mov_i64(regs[r1], t1);
3085 
3086     /* Only two registers to read. */
3087     if (((r1 + 1) & 15) == r3) {
3088         return DISAS_NEXT;
3089     }
3090 
3091     /* Then load the remaining registers. Page fault can't occur. */
3092     r3 = (r3 - 1) & 15;
3093     tcg_gen_movi_i64(t1, 8);
3094     while (r1 != r3) {
3095         r1 = (r1 + 1) & 15;
3096         tcg_gen_add_i64(o->in2, o->in2, t1);
3097         tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3098     }
3099     return DISAS_NEXT;
3100 }
3101 
3102 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3103 {
3104     TCGv_i64 a1, a2;
3105     MemOp mop = s->insn->data;
3106 
3107     /* In a parallel context, stop the world and single step.  */
3108     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3109         update_psw_addr(s);
3110         update_cc_op(s);
3111         gen_exception(EXCP_ATOMIC);
3112         return DISAS_NORETURN;
3113     }
3114 
3115     /* In a serial context, perform the two loads ... */
3116     a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
3117     a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3118     tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3119     tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3120 
3121     /* ... and indicate that we performed them while interlocked.  */
3122     gen_op_movi_cc(s, 0);
3123     return DISAS_NEXT;
3124 }
3125 
3126 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3127 {
3128     o->out_128 = tcg_temp_new_i128();
3129     tcg_gen_qemu_ld_i128(o->out_128, o->in2, get_mem_index(s),
3130                          MO_TE | MO_128 | MO_ALIGN);
3131     return DISAS_NEXT;
3132 }
3133 
3134 #ifndef CONFIG_USER_ONLY
3135 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3136 {
3137     tcg_gen_qemu_ld_tl(o->out, o->in2, MMU_REAL_IDX, s->insn->data);
3138     return DISAS_NEXT;
3139 }
3140 #endif
3141 
3142 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3143 {
3144     tcg_gen_andi_i64(o->out, o->in2, -256);
3145     return DISAS_NEXT;
3146 }
3147 
3148 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3149 {
3150     const int64_t block_size = (1ull << (get_field(s, m3) + 6));
3151 
3152     if (get_field(s, m3) > 6) {
3153         gen_program_exception(s, PGM_SPECIFICATION);
3154         return DISAS_NORETURN;
3155     }
3156 
3157     tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3158     tcg_gen_neg_i64(o->addr1, o->addr1);
3159     tcg_gen_movi_i64(o->out, 16);
3160     tcg_gen_umin_i64(o->out, o->out, o->addr1);
3161     gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3162     return DISAS_NEXT;
3163 }
3164 
3165 static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
3166 {
3167     const uint16_t monitor_class = get_field(s, i2);
3168 
3169     if (monitor_class & 0xff00) {
3170         gen_program_exception(s, PGM_SPECIFICATION);
3171         return DISAS_NORETURN;
3172     }
3173 
3174 #if !defined(CONFIG_USER_ONLY)
3175     gen_helper_monitor_call(cpu_env, o->addr1,
3176                             tcg_constant_i32(monitor_class));
3177 #endif
3178     /* Defaults to a NOP. */
3179     return DISAS_NEXT;
3180 }
3181 
3182 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3183 {
3184     o->out = o->in2;
3185     o->in2 = NULL;
3186     return DISAS_NEXT;
3187 }
3188 
3189 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3190 {
3191     int b2 = get_field(s, b2);
3192     TCGv ar1 = tcg_temp_new_i64();
3193 
3194     o->out = o->in2;
3195     o->in2 = NULL;
3196 
3197     switch (s->base.tb->flags & FLAG_MASK_ASC) {
3198     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3199         tcg_gen_movi_i64(ar1, 0);
3200         break;
3201     case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3202         tcg_gen_movi_i64(ar1, 1);
3203         break;
3204     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3205         if (b2) {
3206             tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3207         } else {
3208             tcg_gen_movi_i64(ar1, 0);
3209         }
3210         break;
3211     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3212         tcg_gen_movi_i64(ar1, 2);
3213         break;
3214     }
3215 
3216     tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3217     return DISAS_NEXT;
3218 }
3219 
3220 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3221 {
3222     o->out = o->in1;
3223     o->out2 = o->in2;
3224     o->in1 = NULL;
3225     o->in2 = NULL;
3226     return DISAS_NEXT;
3227 }
3228 
3229 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3230 {
3231     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3232 
3233     gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3234     return DISAS_NEXT;
3235 }
3236 
3237 static DisasJumpType op_mvcrl(DisasContext *s, DisasOps *o)
3238 {
3239     gen_helper_mvcrl(cpu_env, regs[0], o->addr1, o->in2);
3240     return DISAS_NEXT;
3241 }
3242 
3243 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3244 {
3245     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3246 
3247     gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3248     return DISAS_NEXT;
3249 }
3250 
3251 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3252 {
3253     int r1 = get_field(s, r1);
3254     int r2 = get_field(s, r2);
3255     TCGv_i32 t1, t2;
3256 
3257     /* r1 and r2 must be even.  */
3258     if (r1 & 1 || r2 & 1) {
3259         gen_program_exception(s, PGM_SPECIFICATION);
3260         return DISAS_NORETURN;
3261     }
3262 
3263     t1 = tcg_constant_i32(r1);
3264     t2 = tcg_constant_i32(r2);
3265     gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3266     set_cc_static(s);
3267     return DISAS_NEXT;
3268 }
3269 
3270 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3271 {
3272     int r1 = get_field(s, r1);
3273     int r3 = get_field(s, r3);
3274     TCGv_i32 t1, t3;
3275 
3276     /* r1 and r3 must be even.  */
3277     if (r1 & 1 || r3 & 1) {
3278         gen_program_exception(s, PGM_SPECIFICATION);
3279         return DISAS_NORETURN;
3280     }
3281 
3282     t1 = tcg_constant_i32(r1);
3283     t3 = tcg_constant_i32(r3);
3284     gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3285     set_cc_static(s);
3286     return DISAS_NEXT;
3287 }
3288 
3289 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3290 {
3291     int r1 = get_field(s, r1);
3292     int r3 = get_field(s, r3);
3293     TCGv_i32 t1, t3;
3294 
3295     /* r1 and r3 must be even.  */
3296     if (r1 & 1 || r3 & 1) {
3297         gen_program_exception(s, PGM_SPECIFICATION);
3298         return DISAS_NORETURN;
3299     }
3300 
3301     t1 = tcg_constant_i32(r1);
3302     t3 = tcg_constant_i32(r3);
3303     gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3304     set_cc_static(s);
3305     return DISAS_NEXT;
3306 }
3307 
3308 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3309 {
3310     int r3 = get_field(s, r3);
3311     gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3312     set_cc_static(s);
3313     return DISAS_NEXT;
3314 }
3315 
3316 #ifndef CONFIG_USER_ONLY
3317 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3318 {
3319     int r1 = get_field(s, l1);
3320     int r3 = get_field(s, r3);
3321     gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2, regs[r3]);
3322     set_cc_static(s);
3323     return DISAS_NEXT;
3324 }
3325 
3326 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3327 {
3328     int r1 = get_field(s, l1);
3329     int r3 = get_field(s, r3);
3330     gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2, regs[r3]);
3331     set_cc_static(s);
3332     return DISAS_NEXT;
3333 }
3334 #endif
3335 
3336 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3337 {
3338     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3339 
3340     gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3341     return DISAS_NEXT;
3342 }
3343 
3344 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3345 {
3346     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3347 
3348     gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3349     return DISAS_NEXT;
3350 }
3351 
3352 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3353 {
3354     TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3355     TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3356 
3357     gen_helper_mvpg(cc_op, cpu_env, regs[0], t1, t2);
3358     set_cc_static(s);
3359     return DISAS_NEXT;
3360 }
3361 
3362 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3363 {
3364     TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3365     TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3366 
3367     gen_helper_mvst(cc_op, cpu_env, t1, t2);
3368     set_cc_static(s);
3369     return DISAS_NEXT;
3370 }
3371 
3372 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3373 {
3374     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3375 
3376     gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3377     return DISAS_NEXT;
3378 }
3379 
3380 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3381 {
3382     tcg_gen_mul_i64(o->out, o->in1, o->in2);
3383     return DISAS_NEXT;
3384 }
3385 
3386 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3387 {
3388     tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3389     return DISAS_NEXT;
3390 }
3391 
3392 static DisasJumpType op_muls128(DisasContext *s, DisasOps *o)
3393 {
3394     tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2);
3395     return DISAS_NEXT;
3396 }
3397 
3398 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3399 {
3400     gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3401     return DISAS_NEXT;
3402 }
3403 
3404 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3405 {
3406     gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3407     return DISAS_NEXT;
3408 }
3409 
3410 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3411 {
3412     gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3413     return DISAS_NEXT;
3414 }
3415 
3416 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3417 {
3418     gen_helper_mxb(o->out_128, cpu_env, o->in1_128, o->in2_128);
3419     return DISAS_NEXT;
3420 }
3421 
3422 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3423 {
3424     gen_helper_mxdb(o->out_128, cpu_env, o->in1_128, o->in2);
3425     return DISAS_NEXT;
3426 }
3427 
3428 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3429 {
3430     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3431     gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3432     return DISAS_NEXT;
3433 }
3434 
3435 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3436 {
3437     TCGv_i64 r3 = load_freg(get_field(s, r3));
3438     gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
3439     return DISAS_NEXT;
3440 }
3441 
3442 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3443 {
3444     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3445     gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3446     return DISAS_NEXT;
3447 }
3448 
3449 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3450 {
3451     TCGv_i64 r3 = load_freg(get_field(s, r3));
3452     gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
3453     return DISAS_NEXT;
3454 }
3455 
3456 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3457 {
3458     TCGv_i64 z = tcg_constant_i64(0);
3459     TCGv_i64 n = tcg_temp_new_i64();
3460 
3461     tcg_gen_neg_i64(n, o->in2);
3462     tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3463     return DISAS_NEXT;
3464 }
3465 
3466 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3467 {
3468     tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3469     return DISAS_NEXT;
3470 }
3471 
3472 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3473 {
3474     tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3475     return DISAS_NEXT;
3476 }
3477 
3478 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3479 {
3480     tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3481     tcg_gen_mov_i64(o->out2, o->in2);
3482     return DISAS_NEXT;
3483 }
3484 
3485 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3486 {
3487     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3488 
3489     gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3490     set_cc_static(s);
3491     return DISAS_NEXT;
3492 }
3493 
3494 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3495 {
3496     tcg_gen_neg_i64(o->out, o->in2);
3497     return DISAS_NEXT;
3498 }
3499 
3500 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3501 {
3502     tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3503     return DISAS_NEXT;
3504 }
3505 
3506 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3507 {
3508     tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3509     return DISAS_NEXT;
3510 }
3511 
3512 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3513 {
3514     tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3515     tcg_gen_mov_i64(o->out2, o->in2);
3516     return DISAS_NEXT;
3517 }
3518 
3519 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3520 {
3521     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3522 
3523     gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3524     set_cc_static(s);
3525     return DISAS_NEXT;
3526 }
3527 
3528 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3529 {
3530     tcg_gen_or_i64(o->out, o->in1, o->in2);
3531     return DISAS_NEXT;
3532 }
3533 
3534 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3535 {
3536     int shift = s->insn->data & 0xff;
3537     int size = s->insn->data >> 8;
3538     uint64_t mask = ((1ull << size) - 1) << shift;
3539     TCGv_i64 t = tcg_temp_new_i64();
3540 
3541     tcg_gen_shli_i64(t, o->in2, shift);
3542     tcg_gen_or_i64(o->out, o->in1, t);
3543 
3544     /* Produce the CC from only the bits manipulated.  */
3545     tcg_gen_andi_i64(cc_dst, o->out, mask);
3546     set_cc_nz_u64(s, cc_dst);
3547     return DISAS_NEXT;
3548 }
3549 
3550 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3551 {
3552     o->in1 = tcg_temp_new_i64();
3553 
3554     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3555         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3556     } else {
3557         /* Perform the atomic operation in memory. */
3558         tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3559                                     s->insn->data);
3560     }
3561 
3562     /* Recompute also for atomic case: needed for setting CC. */
3563     tcg_gen_or_i64(o->out, o->in1, o->in2);
3564 
3565     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3566         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3567     }
3568     return DISAS_NEXT;
3569 }
3570 
3571 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3572 {
3573     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3574 
3575     gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3576     return DISAS_NEXT;
3577 }
3578 
3579 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3580 {
3581     int l2 = get_field(s, l2) + 1;
3582     TCGv_i32 l;
3583 
3584     /* The length must not exceed 32 bytes.  */
3585     if (l2 > 32) {
3586         gen_program_exception(s, PGM_SPECIFICATION);
3587         return DISAS_NORETURN;
3588     }
3589     l = tcg_constant_i32(l2);
3590     gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3591     return DISAS_NEXT;
3592 }
3593 
3594 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3595 {
3596     int l2 = get_field(s, l2) + 1;
3597     TCGv_i32 l;
3598 
3599     /* The length must be even and should not exceed 64 bytes.  */
3600     if ((l2 & 1) || (l2 > 64)) {
3601         gen_program_exception(s, PGM_SPECIFICATION);
3602         return DISAS_NORETURN;
3603     }
3604     l = tcg_constant_i32(l2);
3605     gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3606     return DISAS_NEXT;
3607 }
3608 
3609 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3610 {
3611     const uint8_t m3 = get_field(s, m3);
3612 
3613     if ((m3 & 8) && s390_has_feat(S390_FEAT_MISC_INSTRUCTION_EXT3)) {
3614         tcg_gen_ctpop_i64(o->out, o->in2);
3615     } else {
3616         gen_helper_popcnt(o->out, o->in2);
3617     }
3618     return DISAS_NEXT;
3619 }
3620 
3621 #ifndef CONFIG_USER_ONLY
3622 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3623 {
3624     gen_helper_ptlb(cpu_env);
3625     return DISAS_NEXT;
3626 }
3627 #endif
3628 
3629 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3630 {
3631     int i3 = get_field(s, i3);
3632     int i4 = get_field(s, i4);
3633     int i5 = get_field(s, i5);
3634     int do_zero = i4 & 0x80;
3635     uint64_t mask, imask, pmask;
3636     int pos, len, rot;
3637 
3638     /* Adjust the arguments for the specific insn.  */
3639     switch (s->fields.op2) {
3640     case 0x55: /* risbg */
3641     case 0x59: /* risbgn */
3642         i3 &= 63;
3643         i4 &= 63;
3644         pmask = ~0;
3645         break;
3646     case 0x5d: /* risbhg */
3647         i3 &= 31;
3648         i4 &= 31;
3649         pmask = 0xffffffff00000000ull;
3650         break;
3651     case 0x51: /* risblg */
3652         i3 = (i3 & 31) + 32;
3653         i4 = (i4 & 31) + 32;
3654         pmask = 0x00000000ffffffffull;
3655         break;
3656     default:
3657         g_assert_not_reached();
3658     }
3659 
3660     /* MASK is the set of bits to be inserted from R2. */
3661     if (i3 <= i4) {
3662         /* [0...i3---i4...63] */
3663         mask = (-1ull >> i3) & (-1ull << (63 - i4));
3664     } else {
3665         /* [0---i4...i3---63] */
3666         mask = (-1ull >> i3) | (-1ull << (63 - i4));
3667     }
3668     /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
3669     mask &= pmask;
3670 
3671     /* IMASK is the set of bits to be kept from R1.  In the case of the high/low
3672        insns, we need to keep the other half of the register.  */
3673     imask = ~mask | ~pmask;
3674     if (do_zero) {
3675         imask = ~pmask;
3676     }
3677 
3678     len = i4 - i3 + 1;
3679     pos = 63 - i4;
3680     rot = i5 & 63;
3681 
3682     /* In some cases we can implement this with extract.  */
3683     if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3684         tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3685         return DISAS_NEXT;
3686     }
3687 
3688     /* In some cases we can implement this with deposit.  */
3689     if (len > 0 && (imask == 0 || ~mask == imask)) {
3690         /* Note that we rotate the bits to be inserted to the lsb, not to
3691            the position as described in the PoO.  */
3692         rot = (rot - pos) & 63;
3693     } else {
3694         pos = -1;
3695     }
3696 
3697     /* Rotate the input as necessary.  */
3698     tcg_gen_rotli_i64(o->in2, o->in2, rot);
3699 
3700     /* Insert the selected bits into the output.  */
3701     if (pos >= 0) {
3702         if (imask == 0) {
3703             tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3704         } else {
3705             tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3706         }
3707     } else if (imask == 0) {
3708         tcg_gen_andi_i64(o->out, o->in2, mask);
3709     } else {
3710         tcg_gen_andi_i64(o->in2, o->in2, mask);
3711         tcg_gen_andi_i64(o->out, o->out, imask);
3712         tcg_gen_or_i64(o->out, o->out, o->in2);
3713     }
3714     return DISAS_NEXT;
3715 }
3716 
3717 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3718 {
3719     int i3 = get_field(s, i3);
3720     int i4 = get_field(s, i4);
3721     int i5 = get_field(s, i5);
3722     TCGv_i64 orig_out;
3723     uint64_t mask;
3724 
3725     /* If this is a test-only form, arrange to discard the result.  */
3726     if (i3 & 0x80) {
3727         tcg_debug_assert(o->out != NULL);
3728         orig_out = o->out;
3729         o->out = tcg_temp_new_i64();
3730         tcg_gen_mov_i64(o->out, orig_out);
3731     }
3732 
3733     i3 &= 63;
3734     i4 &= 63;
3735     i5 &= 63;
3736 
3737     /* MASK is the set of bits to be operated on from R2.
3738        Take care for I3/I4 wraparound.  */
3739     mask = ~0ull >> i3;
3740     if (i3 <= i4) {
3741         mask ^= ~0ull >> i4 >> 1;
3742     } else {
3743         mask |= ~(~0ull >> i4 >> 1);
3744     }
3745 
3746     /* Rotate the input as necessary.  */
3747     tcg_gen_rotli_i64(o->in2, o->in2, i5);
3748 
3749     /* Operate.  */
3750     switch (s->fields.op2) {
3751     case 0x54: /* AND */
3752         tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3753         tcg_gen_and_i64(o->out, o->out, o->in2);
3754         break;
3755     case 0x56: /* OR */
3756         tcg_gen_andi_i64(o->in2, o->in2, mask);
3757         tcg_gen_or_i64(o->out, o->out, o->in2);
3758         break;
3759     case 0x57: /* XOR */
3760         tcg_gen_andi_i64(o->in2, o->in2, mask);
3761         tcg_gen_xor_i64(o->out, o->out, o->in2);
3762         break;
3763     default:
3764         abort();
3765     }
3766 
3767     /* Set the CC.  */
3768     tcg_gen_andi_i64(cc_dst, o->out, mask);
3769     set_cc_nz_u64(s, cc_dst);
3770     return DISAS_NEXT;
3771 }
3772 
3773 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3774 {
3775     tcg_gen_bswap16_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3776     return DISAS_NEXT;
3777 }
3778 
3779 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3780 {
3781     tcg_gen_bswap32_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3782     return DISAS_NEXT;
3783 }
3784 
3785 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3786 {
3787     tcg_gen_bswap64_i64(o->out, o->in2);
3788     return DISAS_NEXT;
3789 }
3790 
3791 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3792 {
3793     TCGv_i32 t1 = tcg_temp_new_i32();
3794     TCGv_i32 t2 = tcg_temp_new_i32();
3795     TCGv_i32 to = tcg_temp_new_i32();
3796     tcg_gen_extrl_i64_i32(t1, o->in1);
3797     tcg_gen_extrl_i64_i32(t2, o->in2);
3798     tcg_gen_rotl_i32(to, t1, t2);
3799     tcg_gen_extu_i32_i64(o->out, to);
3800     return DISAS_NEXT;
3801 }
3802 
3803 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3804 {
3805     tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3806     return DISAS_NEXT;
3807 }
3808 
3809 #ifndef CONFIG_USER_ONLY
3810 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3811 {
3812     gen_helper_rrbe(cc_op, cpu_env, o->in2);
3813     set_cc_static(s);
3814     return DISAS_NEXT;
3815 }
3816 
3817 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3818 {
3819     gen_helper_sacf(cpu_env, o->in2);
3820     /* Addressing mode has changed, so end the block.  */
3821     return DISAS_TOO_MANY;
3822 }
3823 #endif
3824 
3825 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3826 {
3827     int sam = s->insn->data;
3828     TCGv_i64 tsam;
3829     uint64_t mask;
3830 
3831     switch (sam) {
3832     case 0:
3833         mask = 0xffffff;
3834         break;
3835     case 1:
3836         mask = 0x7fffffff;
3837         break;
3838     default:
3839         mask = -1;
3840         break;
3841     }
3842 
3843     /* Bizarre but true, we check the address of the current insn for the
3844        specification exception, not the next to be executed.  Thus the PoO
3845        documents that Bad Things Happen two bytes before the end.  */
3846     if (s->base.pc_next & ~mask) {
3847         gen_program_exception(s, PGM_SPECIFICATION);
3848         return DISAS_NORETURN;
3849     }
3850     s->pc_tmp &= mask;
3851 
3852     tsam = tcg_constant_i64(sam);
3853     tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3854 
3855     /* Always exit the TB, since we (may have) changed execution mode.  */
3856     return DISAS_TOO_MANY;
3857 }
3858 
3859 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
3860 {
3861     int r1 = get_field(s, r1);
3862     tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3863     return DISAS_NEXT;
3864 }
3865 
3866 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
3867 {
3868     gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3869     return DISAS_NEXT;
3870 }
3871 
3872 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
3873 {
3874     gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3875     return DISAS_NEXT;
3876 }
3877 
3878 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
3879 {
3880     gen_helper_sxb(o->out_128, cpu_env, o->in1_128, o->in2_128);
3881     return DISAS_NEXT;
3882 }
3883 
3884 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
3885 {
3886     gen_helper_sqeb(o->out, cpu_env, o->in2);
3887     return DISAS_NEXT;
3888 }
3889 
3890 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
3891 {
3892     gen_helper_sqdb(o->out, cpu_env, o->in2);
3893     return DISAS_NEXT;
3894 }
3895 
3896 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
3897 {
3898     gen_helper_sqxb(o->out_128, cpu_env, o->in2_128);
3899     return DISAS_NEXT;
3900 }
3901 
3902 #ifndef CONFIG_USER_ONLY
3903 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
3904 {
3905     gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3906     set_cc_static(s);
3907     return DISAS_NEXT;
3908 }
3909 
3910 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
3911 {
3912     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
3913     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
3914 
3915     gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
3916     set_cc_static(s);
3917     return DISAS_NEXT;
3918 }
3919 #endif
3920 
3921 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
3922 {
3923     DisasCompare c;
3924     TCGv_i64 a, h;
3925     TCGLabel *lab;
3926     int r1;
3927 
3928     disas_jcc(s, &c, get_field(s, m3));
3929 
3930     /* We want to store when the condition is fulfilled, so branch
3931        out when it's not */
3932     c.cond = tcg_invert_cond(c.cond);
3933 
3934     lab = gen_new_label();
3935     if (c.is_64) {
3936         tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3937     } else {
3938         tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3939     }
3940 
3941     r1 = get_field(s, r1);
3942     a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3943     switch (s->insn->data) {
3944     case 1: /* STOCG */
3945         tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUQ);
3946         break;
3947     case 0: /* STOC */
3948         tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUL);
3949         break;
3950     case 2: /* STOCFH */
3951         h = tcg_temp_new_i64();
3952         tcg_gen_shri_i64(h, regs[r1], 32);
3953         tcg_gen_qemu_st_i64(h, a, get_mem_index(s), MO_TEUL);
3954         break;
3955     default:
3956         g_assert_not_reached();
3957     }
3958 
3959     gen_set_label(lab);
3960     return DISAS_NEXT;
3961 }
3962 
3963 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
3964 {
3965     TCGv_i64 t;
3966     uint64_t sign = 1ull << s->insn->data;
3967     if (s->insn->data == 31) {
3968         t = tcg_temp_new_i64();
3969         tcg_gen_shli_i64(t, o->in1, 32);
3970     } else {
3971         t = o->in1;
3972     }
3973     gen_op_update2_cc_i64(s, CC_OP_SLA, t, o->in2);
3974     tcg_gen_shl_i64(o->out, o->in1, o->in2);
3975     /* The arithmetic left shift is curious in that it does not affect
3976        the sign bit.  Copy that over from the source unchanged.  */
3977     tcg_gen_andi_i64(o->out, o->out, ~sign);
3978     tcg_gen_andi_i64(o->in1, o->in1, sign);
3979     tcg_gen_or_i64(o->out, o->out, o->in1);
3980     return DISAS_NEXT;
3981 }
3982 
3983 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
3984 {
3985     tcg_gen_shl_i64(o->out, o->in1, o->in2);
3986     return DISAS_NEXT;
3987 }
3988 
3989 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
3990 {
3991     tcg_gen_sar_i64(o->out, o->in1, o->in2);
3992     return DISAS_NEXT;
3993 }
3994 
3995 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
3996 {
3997     tcg_gen_shr_i64(o->out, o->in1, o->in2);
3998     return DISAS_NEXT;
3999 }
4000 
4001 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4002 {
4003     gen_helper_sfpc(cpu_env, o->in2);
4004     return DISAS_NEXT;
4005 }
4006 
4007 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4008 {
4009     gen_helper_sfas(cpu_env, o->in2);
4010     return DISAS_NEXT;
4011 }
4012 
4013 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4014 {
4015     /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4016     tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4017     gen_helper_srnm(cpu_env, o->addr1);
4018     return DISAS_NEXT;
4019 }
4020 
4021 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4022 {
4023     /* Bits 0-55 are are ignored. */
4024     tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4025     gen_helper_srnm(cpu_env, o->addr1);
4026     return DISAS_NEXT;
4027 }
4028 
4029 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4030 {
4031     TCGv_i64 tmp = tcg_temp_new_i64();
4032 
4033     /* Bits other than 61-63 are ignored. */
4034     tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4035 
4036     /* No need to call a helper, we don't implement dfp */
4037     tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4038     tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4039     tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4040     return DISAS_NEXT;
4041 }
4042 
4043 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4044 {
4045     tcg_gen_extrl_i64_i32(cc_op, o->in1);
4046     tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4047     set_cc_static(s);
4048 
4049     tcg_gen_shri_i64(o->in1, o->in1, 24);
4050     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4051     return DISAS_NEXT;
4052 }
4053 
4054 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4055 {
4056     int b1 = get_field(s, b1);
4057     int d1 = get_field(s, d1);
4058     int b2 = get_field(s, b2);
4059     int d2 = get_field(s, d2);
4060     int r3 = get_field(s, r3);
4061     TCGv_i64 tmp = tcg_temp_new_i64();
4062 
4063     /* fetch all operands first */
4064     o->in1 = tcg_temp_new_i64();
4065     tcg_gen_addi_i64(o->in1, regs[b1], d1);
4066     o->in2 = tcg_temp_new_i64();
4067     tcg_gen_addi_i64(o->in2, regs[b2], d2);
4068     o->addr1 = tcg_temp_new_i64();
4069     gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0);
4070 
4071     /* load the third operand into r3 before modifying anything */
4072     tcg_gen_qemu_ld_i64(regs[r3], o->addr1, get_mem_index(s), MO_TEUQ);
4073 
4074     /* subtract CPU timer from first operand and store in GR0 */
4075     gen_helper_stpt(tmp, cpu_env);
4076     tcg_gen_sub_i64(regs[0], o->in1, tmp);
4077 
4078     /* store second operand in GR1 */
4079     tcg_gen_mov_i64(regs[1], o->in2);
4080     return DISAS_NEXT;
4081 }
4082 
4083 #ifndef CONFIG_USER_ONLY
4084 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4085 {
4086     tcg_gen_shri_i64(o->in2, o->in2, 4);
4087     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4088     return DISAS_NEXT;
4089 }
4090 
4091 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4092 {
4093     gen_helper_sske(cpu_env, o->in1, o->in2);
4094     return DISAS_NEXT;
4095 }
4096 
4097 static void gen_check_psw_mask(DisasContext *s)
4098 {
4099     TCGv_i64 reserved = tcg_temp_new_i64();
4100     TCGLabel *ok = gen_new_label();
4101 
4102     tcg_gen_andi_i64(reserved, psw_mask, PSW_MASK_RESERVED);
4103     tcg_gen_brcondi_i64(TCG_COND_EQ, reserved, 0, ok);
4104     gen_program_exception(s, PGM_SPECIFICATION);
4105     gen_set_label(ok);
4106 }
4107 
4108 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4109 {
4110     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4111 
4112     gen_check_psw_mask(s);
4113 
4114     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4115     s->exit_to_mainloop = true;
4116     return DISAS_TOO_MANY;
4117 }
4118 
4119 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4120 {
4121     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
4122     return DISAS_NEXT;
4123 }
4124 #endif
4125 
4126 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4127 {
4128     gen_helper_stck(o->out, cpu_env);
4129     /* ??? We don't implement clock states.  */
4130     gen_op_movi_cc(s, 0);
4131     return DISAS_NEXT;
4132 }
4133 
4134 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4135 {
4136     TCGv_i64 c1 = tcg_temp_new_i64();
4137     TCGv_i64 c2 = tcg_temp_new_i64();
4138     TCGv_i64 todpr = tcg_temp_new_i64();
4139     gen_helper_stck(c1, cpu_env);
4140     /* 16 bit value store in an uint32_t (only valid bits set) */
4141     tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
4142     /* Shift the 64-bit value into its place as a zero-extended
4143        104-bit value.  Note that "bit positions 64-103 are always
4144        non-zero so that they compare differently to STCK"; we set
4145        the least significant bit to 1.  */
4146     tcg_gen_shli_i64(c2, c1, 56);
4147     tcg_gen_shri_i64(c1, c1, 8);
4148     tcg_gen_ori_i64(c2, c2, 0x10000);
4149     tcg_gen_or_i64(c2, c2, todpr);
4150     tcg_gen_qemu_st_i64(c1, o->in2, get_mem_index(s), MO_TEUQ);
4151     tcg_gen_addi_i64(o->in2, o->in2, 8);
4152     tcg_gen_qemu_st_i64(c2, o->in2, get_mem_index(s), MO_TEUQ);
4153     /* ??? We don't implement clock states.  */
4154     gen_op_movi_cc(s, 0);
4155     return DISAS_NEXT;
4156 }
4157 
4158 #ifndef CONFIG_USER_ONLY
4159 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4160 {
4161     gen_helper_sck(cc_op, cpu_env, o->in2);
4162     set_cc_static(s);
4163     return DISAS_NEXT;
4164 }
4165 
4166 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4167 {
4168     gen_helper_sckc(cpu_env, o->in2);
4169     return DISAS_NEXT;
4170 }
4171 
4172 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4173 {
4174     gen_helper_sckpf(cpu_env, regs[0]);
4175     return DISAS_NEXT;
4176 }
4177 
4178 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4179 {
4180     gen_helper_stckc(o->out, cpu_env);
4181     return DISAS_NEXT;
4182 }
4183 
4184 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4185 {
4186     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4187     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4188 
4189     gen_helper_stctg(cpu_env, r1, o->in2, r3);
4190     return DISAS_NEXT;
4191 }
4192 
4193 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4194 {
4195     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4196     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4197 
4198     gen_helper_stctl(cpu_env, r1, o->in2, r3);
4199     return DISAS_NEXT;
4200 }
4201 
4202 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4203 {
4204     tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4205     return DISAS_NEXT;
4206 }
4207 
4208 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4209 {
4210     gen_helper_spt(cpu_env, o->in2);
4211     return DISAS_NEXT;
4212 }
4213 
4214 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4215 {
4216     gen_helper_stfl(cpu_env);
4217     return DISAS_NEXT;
4218 }
4219 
4220 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4221 {
4222     gen_helper_stpt(o->out, cpu_env);
4223     return DISAS_NEXT;
4224 }
4225 
4226 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4227 {
4228     gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4229     set_cc_static(s);
4230     return DISAS_NEXT;
4231 }
4232 
4233 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4234 {
4235     gen_helper_spx(cpu_env, o->in2);
4236     return DISAS_NEXT;
4237 }
4238 
4239 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4240 {
4241     gen_helper_xsch(cpu_env, regs[1]);
4242     set_cc_static(s);
4243     return DISAS_NEXT;
4244 }
4245 
4246 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4247 {
4248     gen_helper_csch(cpu_env, regs[1]);
4249     set_cc_static(s);
4250     return DISAS_NEXT;
4251 }
4252 
4253 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4254 {
4255     gen_helper_hsch(cpu_env, regs[1]);
4256     set_cc_static(s);
4257     return DISAS_NEXT;
4258 }
4259 
4260 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4261 {
4262     gen_helper_msch(cpu_env, regs[1], o->in2);
4263     set_cc_static(s);
4264     return DISAS_NEXT;
4265 }
4266 
4267 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4268 {
4269     gen_helper_rchp(cpu_env, regs[1]);
4270     set_cc_static(s);
4271     return DISAS_NEXT;
4272 }
4273 
4274 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4275 {
4276     gen_helper_rsch(cpu_env, regs[1]);
4277     set_cc_static(s);
4278     return DISAS_NEXT;
4279 }
4280 
4281 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4282 {
4283     gen_helper_sal(cpu_env, regs[1]);
4284     return DISAS_NEXT;
4285 }
4286 
4287 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4288 {
4289     gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4290     return DISAS_NEXT;
4291 }
4292 
4293 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4294 {
4295     /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4296     gen_op_movi_cc(s, 3);
4297     return DISAS_NEXT;
4298 }
4299 
4300 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4301 {
4302     /* The instruction is suppressed if not provided. */
4303     return DISAS_NEXT;
4304 }
4305 
4306 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4307 {
4308     gen_helper_ssch(cpu_env, regs[1], o->in2);
4309     set_cc_static(s);
4310     return DISAS_NEXT;
4311 }
4312 
4313 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4314 {
4315     gen_helper_stsch(cpu_env, regs[1], o->in2);
4316     set_cc_static(s);
4317     return DISAS_NEXT;
4318 }
4319 
4320 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4321 {
4322     gen_helper_stcrw(cpu_env, o->in2);
4323     set_cc_static(s);
4324     return DISAS_NEXT;
4325 }
4326 
4327 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4328 {
4329     gen_helper_tpi(cc_op, cpu_env, o->addr1);
4330     set_cc_static(s);
4331     return DISAS_NEXT;
4332 }
4333 
4334 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4335 {
4336     gen_helper_tsch(cpu_env, regs[1], o->in2);
4337     set_cc_static(s);
4338     return DISAS_NEXT;
4339 }
4340 
4341 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4342 {
4343     gen_helper_chsc(cpu_env, o->in2);
4344     set_cc_static(s);
4345     return DISAS_NEXT;
4346 }
4347 
4348 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4349 {
4350     tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4351     tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4352     return DISAS_NEXT;
4353 }
4354 
4355 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4356 {
4357     uint64_t i2 = get_field(s, i2);
4358     TCGv_i64 t;
4359 
4360     /* It is important to do what the instruction name says: STORE THEN.
4361        If we let the output hook perform the store then if we fault and
4362        restart, we'll have the wrong SYSTEM MASK in place.  */
4363     t = tcg_temp_new_i64();
4364     tcg_gen_shri_i64(t, psw_mask, 56);
4365     tcg_gen_qemu_st_i64(t, o->addr1, get_mem_index(s), MO_UB);
4366 
4367     if (s->fields.op == 0xac) {
4368         tcg_gen_andi_i64(psw_mask, psw_mask,
4369                          (i2 << 56) | 0x00ffffffffffffffull);
4370     } else {
4371         tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4372     }
4373 
4374     gen_check_psw_mask(s);
4375 
4376     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4377     s->exit_to_mainloop = true;
4378     return DISAS_TOO_MANY;
4379 }
4380 
4381 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4382 {
4383     tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data);
4384 
4385     if (s->base.tb->flags & FLAG_MASK_PER) {
4386         update_psw_addr(s);
4387         gen_helper_per_store_real(cpu_env);
4388     }
4389     return DISAS_NEXT;
4390 }
4391 #endif
4392 
4393 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4394 {
4395     gen_helper_stfle(cc_op, cpu_env, o->in2);
4396     set_cc_static(s);
4397     return DISAS_NEXT;
4398 }
4399 
4400 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4401 {
4402     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_UB);
4403     return DISAS_NEXT;
4404 }
4405 
4406 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4407 {
4408     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_TEUW);
4409     return DISAS_NEXT;
4410 }
4411 
4412 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4413 {
4414     tcg_gen_qemu_st_tl(o->in1, o->in2, get_mem_index(s),
4415                        MO_TEUL | s->insn->data);
4416     return DISAS_NEXT;
4417 }
4418 
4419 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4420 {
4421     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s),
4422                         MO_TEUQ | s->insn->data);
4423     return DISAS_NEXT;
4424 }
4425 
4426 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4427 {
4428     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4429     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4430 
4431     gen_helper_stam(cpu_env, r1, o->in2, r3);
4432     return DISAS_NEXT;
4433 }
4434 
4435 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4436 {
4437     int m3 = get_field(s, m3);
4438     int pos, base = s->insn->data;
4439     TCGv_i64 tmp = tcg_temp_new_i64();
4440 
4441     pos = base + ctz32(m3) * 8;
4442     switch (m3) {
4443     case 0xf:
4444         /* Effectively a 32-bit store.  */
4445         tcg_gen_shri_i64(tmp, o->in1, pos);
4446         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
4447         break;
4448 
4449     case 0xc:
4450     case 0x6:
4451     case 0x3:
4452         /* Effectively a 16-bit store.  */
4453         tcg_gen_shri_i64(tmp, o->in1, pos);
4454         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
4455         break;
4456 
4457     case 0x8:
4458     case 0x4:
4459     case 0x2:
4460     case 0x1:
4461         /* Effectively an 8-bit store.  */
4462         tcg_gen_shri_i64(tmp, o->in1, pos);
4463         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4464         break;
4465 
4466     default:
4467         /* This is going to be a sequence of shifts and stores.  */
4468         pos = base + 32 - 8;
4469         while (m3) {
4470             if (m3 & 0x8) {
4471                 tcg_gen_shri_i64(tmp, o->in1, pos);
4472                 tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4473                 tcg_gen_addi_i64(o->in2, o->in2, 1);
4474             }
4475             m3 = (m3 << 1) & 0xf;
4476             pos -= 8;
4477         }
4478         break;
4479     }
4480     return DISAS_NEXT;
4481 }
4482 
4483 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4484 {
4485     int r1 = get_field(s, r1);
4486     int r3 = get_field(s, r3);
4487     int size = s->insn->data;
4488     TCGv_i64 tsize = tcg_constant_i64(size);
4489 
4490     while (1) {
4491         tcg_gen_qemu_st_i64(regs[r1], o->in2, get_mem_index(s),
4492                             size == 8 ? MO_TEUQ : MO_TEUL);
4493         if (r1 == r3) {
4494             break;
4495         }
4496         tcg_gen_add_i64(o->in2, o->in2, tsize);
4497         r1 = (r1 + 1) & 15;
4498     }
4499 
4500     return DISAS_NEXT;
4501 }
4502 
4503 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4504 {
4505     int r1 = get_field(s, r1);
4506     int r3 = get_field(s, r3);
4507     TCGv_i64 t = tcg_temp_new_i64();
4508     TCGv_i64 t4 = tcg_constant_i64(4);
4509     TCGv_i64 t32 = tcg_constant_i64(32);
4510 
4511     while (1) {
4512         tcg_gen_shl_i64(t, regs[r1], t32);
4513         tcg_gen_qemu_st_i64(t, o->in2, get_mem_index(s), MO_TEUL);
4514         if (r1 == r3) {
4515             break;
4516         }
4517         tcg_gen_add_i64(o->in2, o->in2, t4);
4518         r1 = (r1 + 1) & 15;
4519     }
4520     return DISAS_NEXT;
4521 }
4522 
4523 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4524 {
4525     TCGv_i128 t16 = tcg_temp_new_i128();
4526 
4527     tcg_gen_concat_i64_i128(t16, o->out2, o->out);
4528     tcg_gen_qemu_st_i128(t16, o->in2, get_mem_index(s),
4529                          MO_TE | MO_128 | MO_ALIGN);
4530     return DISAS_NEXT;
4531 }
4532 
4533 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4534 {
4535     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4536     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4537 
4538     gen_helper_srst(cpu_env, r1, r2);
4539     set_cc_static(s);
4540     return DISAS_NEXT;
4541 }
4542 
4543 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4544 {
4545     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4546     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4547 
4548     gen_helper_srstu(cpu_env, r1, r2);
4549     set_cc_static(s);
4550     return DISAS_NEXT;
4551 }
4552 
4553 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4554 {
4555     tcg_gen_sub_i64(o->out, o->in1, o->in2);
4556     return DISAS_NEXT;
4557 }
4558 
4559 static DisasJumpType op_subu64(DisasContext *s, DisasOps *o)
4560 {
4561     tcg_gen_movi_i64(cc_src, 0);
4562     tcg_gen_sub2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
4563     return DISAS_NEXT;
4564 }
4565 
4566 /* Compute borrow (0, -1) into cc_src. */
4567 static void compute_borrow(DisasContext *s)
4568 {
4569     switch (s->cc_op) {
4570     case CC_OP_SUBU:
4571         /* The borrow value is already in cc_src (0,-1). */
4572         break;
4573     default:
4574         gen_op_calc_cc(s);
4575         /* fall through */
4576     case CC_OP_STATIC:
4577         /* The carry flag is the msb of CC; compute into cc_src. */
4578         tcg_gen_extu_i32_i64(cc_src, cc_op);
4579         tcg_gen_shri_i64(cc_src, cc_src, 1);
4580         /* fall through */
4581     case CC_OP_ADDU:
4582         /* Convert carry (1,0) to borrow (0,-1). */
4583         tcg_gen_subi_i64(cc_src, cc_src, 1);
4584         break;
4585     }
4586 }
4587 
4588 static DisasJumpType op_subb32(DisasContext *s, DisasOps *o)
4589 {
4590     compute_borrow(s);
4591 
4592     /* Borrow is {0, -1}, so add to subtract. */
4593     tcg_gen_add_i64(o->out, o->in1, cc_src);
4594     tcg_gen_sub_i64(o->out, o->out, o->in2);
4595     return DISAS_NEXT;
4596 }
4597 
4598 static DisasJumpType op_subb64(DisasContext *s, DisasOps *o)
4599 {
4600     compute_borrow(s);
4601 
4602     /*
4603      * Borrow is {0, -1}, so add to subtract; replicate the
4604      * borrow input to produce 128-bit -1 for the addition.
4605      */
4606     TCGv_i64 zero = tcg_constant_i64(0);
4607     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src);
4608     tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
4609 
4610     return DISAS_NEXT;
4611 }
4612 
4613 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4614 {
4615     TCGv_i32 t;
4616 
4617     update_psw_addr(s);
4618     update_cc_op(s);
4619 
4620     t = tcg_constant_i32(get_field(s, i1) & 0xff);
4621     tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4622 
4623     t = tcg_constant_i32(s->ilen);
4624     tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4625 
4626     gen_exception(EXCP_SVC);
4627     return DISAS_NORETURN;
4628 }
4629 
4630 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4631 {
4632     int cc = 0;
4633 
4634     cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4635     cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4636     gen_op_movi_cc(s, cc);
4637     return DISAS_NEXT;
4638 }
4639 
4640 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4641 {
4642     gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4643     set_cc_static(s);
4644     return DISAS_NEXT;
4645 }
4646 
4647 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4648 {
4649     gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4650     set_cc_static(s);
4651     return DISAS_NEXT;
4652 }
4653 
4654 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4655 {
4656     gen_helper_tcxb(cc_op, cpu_env, o->in1_128, o->in2);
4657     set_cc_static(s);
4658     return DISAS_NEXT;
4659 }
4660 
4661 #ifndef CONFIG_USER_ONLY
4662 
4663 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4664 {
4665     gen_helper_testblock(cc_op, cpu_env, o->in2);
4666     set_cc_static(s);
4667     return DISAS_NEXT;
4668 }
4669 
4670 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4671 {
4672     gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4673     set_cc_static(s);
4674     return DISAS_NEXT;
4675 }
4676 
4677 #endif
4678 
4679 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4680 {
4681     TCGv_i32 l1 = tcg_constant_i32(get_field(s, l1) + 1);
4682 
4683     gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4684     set_cc_static(s);
4685     return DISAS_NEXT;
4686 }
4687 
4688 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4689 {
4690     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4691 
4692     gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4693     set_cc_static(s);
4694     return DISAS_NEXT;
4695 }
4696 
4697 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4698 {
4699     TCGv_i128 pair = tcg_temp_new_i128();
4700 
4701     gen_helper_tre(pair, cpu_env, o->out, o->out2, o->in2);
4702     tcg_gen_extr_i128_i64(o->out2, o->out, pair);
4703     set_cc_static(s);
4704     return DISAS_NEXT;
4705 }
4706 
4707 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4708 {
4709     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4710 
4711     gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4712     set_cc_static(s);
4713     return DISAS_NEXT;
4714 }
4715 
4716 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4717 {
4718     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4719 
4720     gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4721     set_cc_static(s);
4722     return DISAS_NEXT;
4723 }
4724 
4725 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4726 {
4727     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4728     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4729     TCGv_i32 sizes = tcg_constant_i32(s->insn->opc & 3);
4730     TCGv_i32 tst = tcg_temp_new_i32();
4731     int m3 = get_field(s, m3);
4732 
4733     if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4734         m3 = 0;
4735     }
4736     if (m3 & 1) {
4737         tcg_gen_movi_i32(tst, -1);
4738     } else {
4739         tcg_gen_extrl_i64_i32(tst, regs[0]);
4740         if (s->insn->opc & 3) {
4741             tcg_gen_ext8u_i32(tst, tst);
4742         } else {
4743             tcg_gen_ext16u_i32(tst, tst);
4744         }
4745     }
4746     gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4747 
4748     set_cc_static(s);
4749     return DISAS_NEXT;
4750 }
4751 
4752 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4753 {
4754     TCGv_i32 t1 = tcg_constant_i32(0xff);
4755 
4756     tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4757     tcg_gen_extract_i32(cc_op, t1, 7, 1);
4758     set_cc_static(s);
4759     return DISAS_NEXT;
4760 }
4761 
4762 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4763 {
4764     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4765 
4766     gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4767     return DISAS_NEXT;
4768 }
4769 
4770 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4771 {
4772     int l1 = get_field(s, l1) + 1;
4773     TCGv_i32 l;
4774 
4775     /* The length must not exceed 32 bytes.  */
4776     if (l1 > 32) {
4777         gen_program_exception(s, PGM_SPECIFICATION);
4778         return DISAS_NORETURN;
4779     }
4780     l = tcg_constant_i32(l1);
4781     gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4782     set_cc_static(s);
4783     return DISAS_NEXT;
4784 }
4785 
4786 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4787 {
4788     int l1 = get_field(s, l1) + 1;
4789     TCGv_i32 l;
4790 
4791     /* The length must be even and should not exceed 64 bytes.  */
4792     if ((l1 & 1) || (l1 > 64)) {
4793         gen_program_exception(s, PGM_SPECIFICATION);
4794         return DISAS_NORETURN;
4795     }
4796     l = tcg_constant_i32(l1);
4797     gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4798     set_cc_static(s);
4799     return DISAS_NEXT;
4800 }
4801 
4802 
4803 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4804 {
4805     int d1 = get_field(s, d1);
4806     int d2 = get_field(s, d2);
4807     int b1 = get_field(s, b1);
4808     int b2 = get_field(s, b2);
4809     int l = get_field(s, l1);
4810     TCGv_i32 t32;
4811 
4812     o->addr1 = get_address(s, 0, b1, d1);
4813 
4814     /* If the addresses are identical, this is a store/memset of zero.  */
4815     if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4816         o->in2 = tcg_constant_i64(0);
4817 
4818         l++;
4819         while (l >= 8) {
4820             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UQ);
4821             l -= 8;
4822             if (l > 0) {
4823                 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4824             }
4825         }
4826         if (l >= 4) {
4827             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UL);
4828             l -= 4;
4829             if (l > 0) {
4830                 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4831             }
4832         }
4833         if (l >= 2) {
4834             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UW);
4835             l -= 2;
4836             if (l > 0) {
4837                 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4838             }
4839         }
4840         if (l) {
4841             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UB);
4842         }
4843         gen_op_movi_cc(s, 0);
4844         return DISAS_NEXT;
4845     }
4846 
4847     /* But in general we'll defer to a helper.  */
4848     o->in2 = get_address(s, 0, b2, d2);
4849     t32 = tcg_constant_i32(l);
4850     gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4851     set_cc_static(s);
4852     return DISAS_NEXT;
4853 }
4854 
4855 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
4856 {
4857     tcg_gen_xor_i64(o->out, o->in1, o->in2);
4858     return DISAS_NEXT;
4859 }
4860 
4861 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
4862 {
4863     int shift = s->insn->data & 0xff;
4864     int size = s->insn->data >> 8;
4865     uint64_t mask = ((1ull << size) - 1) << shift;
4866     TCGv_i64 t = tcg_temp_new_i64();
4867 
4868     tcg_gen_shli_i64(t, o->in2, shift);
4869     tcg_gen_xor_i64(o->out, o->in1, t);
4870 
4871     /* Produce the CC from only the bits manipulated.  */
4872     tcg_gen_andi_i64(cc_dst, o->out, mask);
4873     set_cc_nz_u64(s, cc_dst);
4874     return DISAS_NEXT;
4875 }
4876 
4877 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
4878 {
4879     o->in1 = tcg_temp_new_i64();
4880 
4881     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4882         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
4883     } else {
4884         /* Perform the atomic operation in memory. */
4885         tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
4886                                      s->insn->data);
4887     }
4888 
4889     /* Recompute also for atomic case: needed for setting CC. */
4890     tcg_gen_xor_i64(o->out, o->in1, o->in2);
4891 
4892     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4893         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
4894     }
4895     return DISAS_NEXT;
4896 }
4897 
4898 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
4899 {
4900     o->out = tcg_constant_i64(0);
4901     return DISAS_NEXT;
4902 }
4903 
4904 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
4905 {
4906     o->out = tcg_constant_i64(0);
4907     o->out2 = o->out;
4908     return DISAS_NEXT;
4909 }
4910 
4911 #ifndef CONFIG_USER_ONLY
4912 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
4913 {
4914     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4915 
4916     gen_helper_clp(cpu_env, r2);
4917     set_cc_static(s);
4918     return DISAS_NEXT;
4919 }
4920 
4921 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
4922 {
4923     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4924     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4925 
4926     gen_helper_pcilg(cpu_env, r1, r2);
4927     set_cc_static(s);
4928     return DISAS_NEXT;
4929 }
4930 
4931 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
4932 {
4933     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4934     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4935 
4936     gen_helper_pcistg(cpu_env, r1, r2);
4937     set_cc_static(s);
4938     return DISAS_NEXT;
4939 }
4940 
4941 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
4942 {
4943     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4944     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4945 
4946     gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
4947     set_cc_static(s);
4948     return DISAS_NEXT;
4949 }
4950 
4951 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
4952 {
4953     gen_helper_sic(cpu_env, o->in1, o->in2);
4954     return DISAS_NEXT;
4955 }
4956 
4957 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
4958 {
4959     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4960     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4961 
4962     gen_helper_rpcit(cpu_env, r1, r2);
4963     set_cc_static(s);
4964     return DISAS_NEXT;
4965 }
4966 
4967 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
4968 {
4969     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4970     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4971     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4972 
4973     gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
4974     set_cc_static(s);
4975     return DISAS_NEXT;
4976 }
4977 
4978 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
4979 {
4980     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4981     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4982 
4983     gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
4984     set_cc_static(s);
4985     return DISAS_NEXT;
4986 }
4987 #endif
4988 
4989 #include "translate_vx.c.inc"
4990 
4991 /* ====================================================================== */
4992 /* The "Cc OUTput" generators.  Given the generated output (and in some cases
4993    the original inputs), update the various cc data structures in order to
4994    be able to compute the new condition code.  */
4995 
4996 static void cout_abs32(DisasContext *s, DisasOps *o)
4997 {
4998     gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4999 }
5000 
5001 static void cout_abs64(DisasContext *s, DisasOps *o)
5002 {
5003     gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5004 }
5005 
5006 static void cout_adds32(DisasContext *s, DisasOps *o)
5007 {
5008     gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5009 }
5010 
5011 static void cout_adds64(DisasContext *s, DisasOps *o)
5012 {
5013     gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5014 }
5015 
5016 static void cout_addu32(DisasContext *s, DisasOps *o)
5017 {
5018     tcg_gen_shri_i64(cc_src, o->out, 32);
5019     tcg_gen_ext32u_i64(cc_dst, o->out);
5020     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst);
5021 }
5022 
5023 static void cout_addu64(DisasContext *s, DisasOps *o)
5024 {
5025     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out);
5026 }
5027 
5028 static void cout_cmps32(DisasContext *s, DisasOps *o)
5029 {
5030     gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5031 }
5032 
5033 static void cout_cmps64(DisasContext *s, DisasOps *o)
5034 {
5035     gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5036 }
5037 
5038 static void cout_cmpu32(DisasContext *s, DisasOps *o)
5039 {
5040     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5041 }
5042 
5043 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5044 {
5045     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5046 }
5047 
5048 static void cout_f32(DisasContext *s, DisasOps *o)
5049 {
5050     gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5051 }
5052 
5053 static void cout_f64(DisasContext *s, DisasOps *o)
5054 {
5055     gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5056 }
5057 
5058 static void cout_f128(DisasContext *s, DisasOps *o)
5059 {
5060     gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5061 }
5062 
5063 static void cout_nabs32(DisasContext *s, DisasOps *o)
5064 {
5065     gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5066 }
5067 
5068 static void cout_nabs64(DisasContext *s, DisasOps *o)
5069 {
5070     gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5071 }
5072 
5073 static void cout_neg32(DisasContext *s, DisasOps *o)
5074 {
5075     gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5076 }
5077 
5078 static void cout_neg64(DisasContext *s, DisasOps *o)
5079 {
5080     gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5081 }
5082 
5083 static void cout_nz32(DisasContext *s, DisasOps *o)
5084 {
5085     tcg_gen_ext32u_i64(cc_dst, o->out);
5086     gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5087 }
5088 
5089 static void cout_nz64(DisasContext *s, DisasOps *o)
5090 {
5091     gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5092 }
5093 
5094 static void cout_s32(DisasContext *s, DisasOps *o)
5095 {
5096     gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5097 }
5098 
5099 static void cout_s64(DisasContext *s, DisasOps *o)
5100 {
5101     gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5102 }
5103 
5104 static void cout_subs32(DisasContext *s, DisasOps *o)
5105 {
5106     gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5107 }
5108 
5109 static void cout_subs64(DisasContext *s, DisasOps *o)
5110 {
5111     gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5112 }
5113 
5114 static void cout_subu32(DisasContext *s, DisasOps *o)
5115 {
5116     tcg_gen_sari_i64(cc_src, o->out, 32);
5117     tcg_gen_ext32u_i64(cc_dst, o->out);
5118     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, cc_dst);
5119 }
5120 
5121 static void cout_subu64(DisasContext *s, DisasOps *o)
5122 {
5123     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, o->out);
5124 }
5125 
5126 static void cout_tm32(DisasContext *s, DisasOps *o)
5127 {
5128     gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5129 }
5130 
5131 static void cout_tm64(DisasContext *s, DisasOps *o)
5132 {
5133     gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5134 }
5135 
5136 static void cout_muls32(DisasContext *s, DisasOps *o)
5137 {
5138     gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out);
5139 }
5140 
5141 static void cout_muls64(DisasContext *s, DisasOps *o)
5142 {
5143     /* out contains "high" part, out2 contains "low" part of 128 bit result */
5144     gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2);
5145 }
5146 
5147 /* ====================================================================== */
5148 /* The "PREParation" generators.  These initialize the DisasOps.OUT fields
5149    with the TCG register to which we will write.  Used in combination with
5150    the "wout" generators, in some cases we need a new temporary, and in
5151    some cases we can write to a TCG global.  */
5152 
5153 static void prep_new(DisasContext *s, DisasOps *o)
5154 {
5155     o->out = tcg_temp_new_i64();
5156 }
5157 #define SPEC_prep_new 0
5158 
5159 static void prep_new_P(DisasContext *s, DisasOps *o)
5160 {
5161     o->out = tcg_temp_new_i64();
5162     o->out2 = tcg_temp_new_i64();
5163 }
5164 #define SPEC_prep_new_P 0
5165 
5166 static void prep_new_x(DisasContext *s, DisasOps *o)
5167 {
5168     o->out_128 = tcg_temp_new_i128();
5169 }
5170 #define SPEC_prep_new_x 0
5171 
5172 static void prep_r1(DisasContext *s, DisasOps *o)
5173 {
5174     o->out = regs[get_field(s, r1)];
5175 }
5176 #define SPEC_prep_r1 0
5177 
5178 static void prep_r1_P(DisasContext *s, DisasOps *o)
5179 {
5180     int r1 = get_field(s, r1);
5181     o->out = regs[r1];
5182     o->out2 = regs[r1 + 1];
5183 }
5184 #define SPEC_prep_r1_P SPEC_r1_even
5185 
5186 static void prep_x1(DisasContext *s, DisasOps *o)
5187 {
5188     o->out_128 = load_freg_128(get_field(s, r1));
5189 }
5190 #define SPEC_prep_x1 SPEC_r1_f128
5191 
5192 /* ====================================================================== */
5193 /* The "Write OUTput" generators.  These generally perform some non-trivial
5194    copy of data to TCG globals, or to main memory.  The trivial cases are
5195    generally handled by having a "prep" generator install the TCG global
5196    as the destination of the operation.  */
5197 
5198 static void wout_r1(DisasContext *s, DisasOps *o)
5199 {
5200     store_reg(get_field(s, r1), o->out);
5201 }
5202 #define SPEC_wout_r1 0
5203 
5204 static void wout_out2_r1(DisasContext *s, DisasOps *o)
5205 {
5206     store_reg(get_field(s, r1), o->out2);
5207 }
5208 #define SPEC_wout_out2_r1 0
5209 
5210 static void wout_r1_8(DisasContext *s, DisasOps *o)
5211 {
5212     int r1 = get_field(s, r1);
5213     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5214 }
5215 #define SPEC_wout_r1_8 0
5216 
5217 static void wout_r1_16(DisasContext *s, DisasOps *o)
5218 {
5219     int r1 = get_field(s, r1);
5220     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5221 }
5222 #define SPEC_wout_r1_16 0
5223 
5224 static void wout_r1_32(DisasContext *s, DisasOps *o)
5225 {
5226     store_reg32_i64(get_field(s, r1), o->out);
5227 }
5228 #define SPEC_wout_r1_32 0
5229 
5230 static void wout_r1_32h(DisasContext *s, DisasOps *o)
5231 {
5232     store_reg32h_i64(get_field(s, r1), o->out);
5233 }
5234 #define SPEC_wout_r1_32h 0
5235 
5236 static void wout_r1_P32(DisasContext *s, DisasOps *o)
5237 {
5238     int r1 = get_field(s, r1);
5239     store_reg32_i64(r1, o->out);
5240     store_reg32_i64(r1 + 1, o->out2);
5241 }
5242 #define SPEC_wout_r1_P32 SPEC_r1_even
5243 
5244 static void wout_r1_D32(DisasContext *s, DisasOps *o)
5245 {
5246     int r1 = get_field(s, r1);
5247     TCGv_i64 t = tcg_temp_new_i64();
5248     store_reg32_i64(r1 + 1, o->out);
5249     tcg_gen_shri_i64(t, o->out, 32);
5250     store_reg32_i64(r1, t);
5251 }
5252 #define SPEC_wout_r1_D32 SPEC_r1_even
5253 
5254 static void wout_r1_D64(DisasContext *s, DisasOps *o)
5255 {
5256     int r1 = get_field(s, r1);
5257     tcg_gen_extr_i128_i64(regs[r1 + 1], regs[r1], o->out_128);
5258 }
5259 #define SPEC_wout_r1_D64 SPEC_r1_even
5260 
5261 static void wout_r3_P32(DisasContext *s, DisasOps *o)
5262 {
5263     int r3 = get_field(s, r3);
5264     store_reg32_i64(r3, o->out);
5265     store_reg32_i64(r3 + 1, o->out2);
5266 }
5267 #define SPEC_wout_r3_P32 SPEC_r3_even
5268 
5269 static void wout_r3_P64(DisasContext *s, DisasOps *o)
5270 {
5271     int r3 = get_field(s, r3);
5272     store_reg(r3, o->out);
5273     store_reg(r3 + 1, o->out2);
5274 }
5275 #define SPEC_wout_r3_P64 SPEC_r3_even
5276 
5277 static void wout_e1(DisasContext *s, DisasOps *o)
5278 {
5279     store_freg32_i64(get_field(s, r1), o->out);
5280 }
5281 #define SPEC_wout_e1 0
5282 
5283 static void wout_f1(DisasContext *s, DisasOps *o)
5284 {
5285     store_freg(get_field(s, r1), o->out);
5286 }
5287 #define SPEC_wout_f1 0
5288 
5289 static void wout_x1(DisasContext *s, DisasOps *o)
5290 {
5291     int f1 = get_field(s, r1);
5292 
5293     /* Split out_128 into out+out2 for cout_f128. */
5294     tcg_debug_assert(o->out == NULL);
5295     o->out = tcg_temp_new_i64();
5296     o->out2 = tcg_temp_new_i64();
5297 
5298     tcg_gen_extr_i128_i64(o->out2, o->out, o->out_128);
5299     store_freg(f1, o->out);
5300     store_freg(f1 + 2, o->out2);
5301 }
5302 #define SPEC_wout_x1 SPEC_r1_f128
5303 
5304 static void wout_x1_P(DisasContext *s, DisasOps *o)
5305 {
5306     int f1 = get_field(s, r1);
5307     store_freg(f1, o->out);
5308     store_freg(f1 + 2, o->out2);
5309 }
5310 #define SPEC_wout_x1_P SPEC_r1_f128
5311 
5312 static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
5313 {
5314     if (get_field(s, r1) != get_field(s, r2)) {
5315         store_reg32_i64(get_field(s, r1), o->out);
5316     }
5317 }
5318 #define SPEC_wout_cond_r1r2_32 0
5319 
5320 static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
5321 {
5322     if (get_field(s, r1) != get_field(s, r2)) {
5323         store_freg32_i64(get_field(s, r1), o->out);
5324     }
5325 }
5326 #define SPEC_wout_cond_e1e2 0
5327 
5328 static void wout_m1_8(DisasContext *s, DisasOps *o)
5329 {
5330     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_UB);
5331 }
5332 #define SPEC_wout_m1_8 0
5333 
5334 static void wout_m1_16(DisasContext *s, DisasOps *o)
5335 {
5336     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUW);
5337 }
5338 #define SPEC_wout_m1_16 0
5339 
5340 #ifndef CONFIG_USER_ONLY
5341 static void wout_m1_16a(DisasContext *s, DisasOps *o)
5342 {
5343     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5344 }
5345 #define SPEC_wout_m1_16a 0
5346 #endif
5347 
5348 static void wout_m1_32(DisasContext *s, DisasOps *o)
5349 {
5350     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUL);
5351 }
5352 #define SPEC_wout_m1_32 0
5353 
5354 #ifndef CONFIG_USER_ONLY
5355 static void wout_m1_32a(DisasContext *s, DisasOps *o)
5356 {
5357     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5358 }
5359 #define SPEC_wout_m1_32a 0
5360 #endif
5361 
5362 static void wout_m1_64(DisasContext *s, DisasOps *o)
5363 {
5364     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ);
5365 }
5366 #define SPEC_wout_m1_64 0
5367 
5368 #ifndef CONFIG_USER_ONLY
5369 static void wout_m1_64a(DisasContext *s, DisasOps *o)
5370 {
5371     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5372 }
5373 #define SPEC_wout_m1_64a 0
5374 #endif
5375 
5376 static void wout_m2_32(DisasContext *s, DisasOps *o)
5377 {
5378     tcg_gen_qemu_st_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
5379 }
5380 #define SPEC_wout_m2_32 0
5381 
5382 static void wout_in2_r1(DisasContext *s, DisasOps *o)
5383 {
5384     store_reg(get_field(s, r1), o->in2);
5385 }
5386 #define SPEC_wout_in2_r1 0
5387 
5388 static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
5389 {
5390     store_reg32_i64(get_field(s, r1), o->in2);
5391 }
5392 #define SPEC_wout_in2_r1_32 0
5393 
5394 /* ====================================================================== */
5395 /* The "INput 1" generators.  These load the first operand to an insn.  */
5396 
5397 static void in1_r1(DisasContext *s, DisasOps *o)
5398 {
5399     o->in1 = load_reg(get_field(s, r1));
5400 }
5401 #define SPEC_in1_r1 0
5402 
5403 static void in1_r1_o(DisasContext *s, DisasOps *o)
5404 {
5405     o->in1 = regs[get_field(s, r1)];
5406 }
5407 #define SPEC_in1_r1_o 0
5408 
5409 static void in1_r1_32s(DisasContext *s, DisasOps *o)
5410 {
5411     o->in1 = tcg_temp_new_i64();
5412     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
5413 }
5414 #define SPEC_in1_r1_32s 0
5415 
5416 static void in1_r1_32u(DisasContext *s, DisasOps *o)
5417 {
5418     o->in1 = tcg_temp_new_i64();
5419     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
5420 }
5421 #define SPEC_in1_r1_32u 0
5422 
5423 static void in1_r1_sr32(DisasContext *s, DisasOps *o)
5424 {
5425     o->in1 = tcg_temp_new_i64();
5426     tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
5427 }
5428 #define SPEC_in1_r1_sr32 0
5429 
5430 static void in1_r1p1(DisasContext *s, DisasOps *o)
5431 {
5432     o->in1 = load_reg(get_field(s, r1) + 1);
5433 }
5434 #define SPEC_in1_r1p1 SPEC_r1_even
5435 
5436 static void in1_r1p1_o(DisasContext *s, DisasOps *o)
5437 {
5438     o->in1 = regs[get_field(s, r1) + 1];
5439 }
5440 #define SPEC_in1_r1p1_o SPEC_r1_even
5441 
5442 static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
5443 {
5444     o->in1 = tcg_temp_new_i64();
5445     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
5446 }
5447 #define SPEC_in1_r1p1_32s SPEC_r1_even
5448 
5449 static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
5450 {
5451     o->in1 = tcg_temp_new_i64();
5452     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
5453 }
5454 #define SPEC_in1_r1p1_32u SPEC_r1_even
5455 
5456 static void in1_r1_D32(DisasContext *s, DisasOps *o)
5457 {
5458     int r1 = get_field(s, r1);
5459     o->in1 = tcg_temp_new_i64();
5460     tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5461 }
5462 #define SPEC_in1_r1_D32 SPEC_r1_even
5463 
5464 static void in1_r2(DisasContext *s, DisasOps *o)
5465 {
5466     o->in1 = load_reg(get_field(s, r2));
5467 }
5468 #define SPEC_in1_r2 0
5469 
5470 static void in1_r2_sr32(DisasContext *s, DisasOps *o)
5471 {
5472     o->in1 = tcg_temp_new_i64();
5473     tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
5474 }
5475 #define SPEC_in1_r2_sr32 0
5476 
5477 static void in1_r2_32u(DisasContext *s, DisasOps *o)
5478 {
5479     o->in1 = tcg_temp_new_i64();
5480     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]);
5481 }
5482 #define SPEC_in1_r2_32u 0
5483 
5484 static void in1_r3(DisasContext *s, DisasOps *o)
5485 {
5486     o->in1 = load_reg(get_field(s, r3));
5487 }
5488 #define SPEC_in1_r3 0
5489 
5490 static void in1_r3_o(DisasContext *s, DisasOps *o)
5491 {
5492     o->in1 = regs[get_field(s, r3)];
5493 }
5494 #define SPEC_in1_r3_o 0
5495 
5496 static void in1_r3_32s(DisasContext *s, DisasOps *o)
5497 {
5498     o->in1 = tcg_temp_new_i64();
5499     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
5500 }
5501 #define SPEC_in1_r3_32s 0
5502 
5503 static void in1_r3_32u(DisasContext *s, DisasOps *o)
5504 {
5505     o->in1 = tcg_temp_new_i64();
5506     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
5507 }
5508 #define SPEC_in1_r3_32u 0
5509 
5510 static void in1_r3_D32(DisasContext *s, DisasOps *o)
5511 {
5512     int r3 = get_field(s, r3);
5513     o->in1 = tcg_temp_new_i64();
5514     tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5515 }
5516 #define SPEC_in1_r3_D32 SPEC_r3_even
5517 
5518 static void in1_r3_sr32(DisasContext *s, DisasOps *o)
5519 {
5520     o->in1 = tcg_temp_new_i64();
5521     tcg_gen_shri_i64(o->in1, regs[get_field(s, r3)], 32);
5522 }
5523 #define SPEC_in1_r3_sr32 0
5524 
5525 static void in1_e1(DisasContext *s, DisasOps *o)
5526 {
5527     o->in1 = load_freg32_i64(get_field(s, r1));
5528 }
5529 #define SPEC_in1_e1 0
5530 
5531 static void in1_f1(DisasContext *s, DisasOps *o)
5532 {
5533     o->in1 = load_freg(get_field(s, r1));
5534 }
5535 #define SPEC_in1_f1 0
5536 
5537 static void in1_x1(DisasContext *s, DisasOps *o)
5538 {
5539     o->in1_128 = load_freg_128(get_field(s, r1));
5540 }
5541 #define SPEC_in1_x1 SPEC_r1_f128
5542 
5543 /* Load the high double word of an extended (128-bit) format FP number */
5544 static void in1_x2h(DisasContext *s, DisasOps *o)
5545 {
5546     o->in1 = load_freg(get_field(s, r2));
5547 }
5548 #define SPEC_in1_x2h SPEC_r2_f128
5549 
5550 static void in1_f3(DisasContext *s, DisasOps *o)
5551 {
5552     o->in1 = load_freg(get_field(s, r3));
5553 }
5554 #define SPEC_in1_f3 0
5555 
5556 static void in1_la1(DisasContext *s, DisasOps *o)
5557 {
5558     o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
5559 }
5560 #define SPEC_in1_la1 0
5561 
5562 static void in1_la2(DisasContext *s, DisasOps *o)
5563 {
5564     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5565     o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5566 }
5567 #define SPEC_in1_la2 0
5568 
5569 static void in1_m1_8u(DisasContext *s, DisasOps *o)
5570 {
5571     in1_la1(s, o);
5572     o->in1 = tcg_temp_new_i64();
5573     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_UB);
5574 }
5575 #define SPEC_in1_m1_8u 0
5576 
5577 static void in1_m1_16s(DisasContext *s, DisasOps *o)
5578 {
5579     in1_la1(s, o);
5580     o->in1 = tcg_temp_new_i64();
5581     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESW);
5582 }
5583 #define SPEC_in1_m1_16s 0
5584 
5585 static void in1_m1_16u(DisasContext *s, DisasOps *o)
5586 {
5587     in1_la1(s, o);
5588     o->in1 = tcg_temp_new_i64();
5589     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUW);
5590 }
5591 #define SPEC_in1_m1_16u 0
5592 
5593 static void in1_m1_32s(DisasContext *s, DisasOps *o)
5594 {
5595     in1_la1(s, o);
5596     o->in1 = tcg_temp_new_i64();
5597     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESL);
5598 }
5599 #define SPEC_in1_m1_32s 0
5600 
5601 static void in1_m1_32u(DisasContext *s, DisasOps *o)
5602 {
5603     in1_la1(s, o);
5604     o->in1 = tcg_temp_new_i64();
5605     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUL);
5606 }
5607 #define SPEC_in1_m1_32u 0
5608 
5609 static void in1_m1_64(DisasContext *s, DisasOps *o)
5610 {
5611     in1_la1(s, o);
5612     o->in1 = tcg_temp_new_i64();
5613     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUQ);
5614 }
5615 #define SPEC_in1_m1_64 0
5616 
5617 /* ====================================================================== */
5618 /* The "INput 2" generators.  These load the second operand to an insn.  */
5619 
5620 static void in2_r1_o(DisasContext *s, DisasOps *o)
5621 {
5622     o->in2 = regs[get_field(s, r1)];
5623 }
5624 #define SPEC_in2_r1_o 0
5625 
5626 static void in2_r1_16u(DisasContext *s, DisasOps *o)
5627 {
5628     o->in2 = tcg_temp_new_i64();
5629     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
5630 }
5631 #define SPEC_in2_r1_16u 0
5632 
5633 static void in2_r1_32u(DisasContext *s, DisasOps *o)
5634 {
5635     o->in2 = tcg_temp_new_i64();
5636     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
5637 }
5638 #define SPEC_in2_r1_32u 0
5639 
5640 static void in2_r1_D32(DisasContext *s, DisasOps *o)
5641 {
5642     int r1 = get_field(s, r1);
5643     o->in2 = tcg_temp_new_i64();
5644     tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5645 }
5646 #define SPEC_in2_r1_D32 SPEC_r1_even
5647 
5648 static void in2_r2(DisasContext *s, DisasOps *o)
5649 {
5650     o->in2 = load_reg(get_field(s, r2));
5651 }
5652 #define SPEC_in2_r2 0
5653 
5654 static void in2_r2_o(DisasContext *s, DisasOps *o)
5655 {
5656     o->in2 = regs[get_field(s, r2)];
5657 }
5658 #define SPEC_in2_r2_o 0
5659 
5660 static void in2_r2_nz(DisasContext *s, DisasOps *o)
5661 {
5662     int r2 = get_field(s, r2);
5663     if (r2 != 0) {
5664         o->in2 = load_reg(r2);
5665     }
5666 }
5667 #define SPEC_in2_r2_nz 0
5668 
5669 static void in2_r2_8s(DisasContext *s, DisasOps *o)
5670 {
5671     o->in2 = tcg_temp_new_i64();
5672     tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
5673 }
5674 #define SPEC_in2_r2_8s 0
5675 
5676 static void in2_r2_8u(DisasContext *s, DisasOps *o)
5677 {
5678     o->in2 = tcg_temp_new_i64();
5679     tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
5680 }
5681 #define SPEC_in2_r2_8u 0
5682 
5683 static void in2_r2_16s(DisasContext *s, DisasOps *o)
5684 {
5685     o->in2 = tcg_temp_new_i64();
5686     tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
5687 }
5688 #define SPEC_in2_r2_16s 0
5689 
5690 static void in2_r2_16u(DisasContext *s, DisasOps *o)
5691 {
5692     o->in2 = tcg_temp_new_i64();
5693     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
5694 }
5695 #define SPEC_in2_r2_16u 0
5696 
5697 static void in2_r3(DisasContext *s, DisasOps *o)
5698 {
5699     o->in2 = load_reg(get_field(s, r3));
5700 }
5701 #define SPEC_in2_r3 0
5702 
5703 static void in2_r3_D64(DisasContext *s, DisasOps *o)
5704 {
5705     int r3 = get_field(s, r3);
5706     o->in2_128 = tcg_temp_new_i128();
5707     tcg_gen_concat_i64_i128(o->in2_128, regs[r3 + 1], regs[r3]);
5708 }
5709 #define SPEC_in2_r3_D64 SPEC_r3_even
5710 
5711 static void in2_r3_sr32(DisasContext *s, DisasOps *o)
5712 {
5713     o->in2 = tcg_temp_new_i64();
5714     tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
5715 }
5716 #define SPEC_in2_r3_sr32 0
5717 
5718 static void in2_r3_32u(DisasContext *s, DisasOps *o)
5719 {
5720     o->in2 = tcg_temp_new_i64();
5721     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
5722 }
5723 #define SPEC_in2_r3_32u 0
5724 
5725 static void in2_r2_32s(DisasContext *s, DisasOps *o)
5726 {
5727     o->in2 = tcg_temp_new_i64();
5728     tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
5729 }
5730 #define SPEC_in2_r2_32s 0
5731 
5732 static void in2_r2_32u(DisasContext *s, DisasOps *o)
5733 {
5734     o->in2 = tcg_temp_new_i64();
5735     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
5736 }
5737 #define SPEC_in2_r2_32u 0
5738 
5739 static void in2_r2_sr32(DisasContext *s, DisasOps *o)
5740 {
5741     o->in2 = tcg_temp_new_i64();
5742     tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
5743 }
5744 #define SPEC_in2_r2_sr32 0
5745 
5746 static void in2_e2(DisasContext *s, DisasOps *o)
5747 {
5748     o->in2 = load_freg32_i64(get_field(s, r2));
5749 }
5750 #define SPEC_in2_e2 0
5751 
5752 static void in2_f2(DisasContext *s, DisasOps *o)
5753 {
5754     o->in2 = load_freg(get_field(s, r2));
5755 }
5756 #define SPEC_in2_f2 0
5757 
5758 static void in2_x2(DisasContext *s, DisasOps *o)
5759 {
5760     o->in2_128 = load_freg_128(get_field(s, r2));
5761 }
5762 #define SPEC_in2_x2 SPEC_r2_f128
5763 
5764 /* Load the low double word of an extended (128-bit) format FP number */
5765 static void in2_x2l(DisasContext *s, DisasOps *o)
5766 {
5767     o->in2 = load_freg(get_field(s, r2) + 2);
5768 }
5769 #define SPEC_in2_x2l SPEC_r2_f128
5770 
5771 static void in2_ra2(DisasContext *s, DisasOps *o)
5772 {
5773     int r2 = get_field(s, r2);
5774 
5775     /* Note: *don't* treat !r2 as 0, use the reg value. */
5776     o->in2 = tcg_temp_new_i64();
5777     gen_addi_and_wrap_i64(s, o->in2, regs[r2], 0);
5778 }
5779 #define SPEC_in2_ra2 0
5780 
5781 static void in2_a2(DisasContext *s, DisasOps *o)
5782 {
5783     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5784     o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5785 }
5786 #define SPEC_in2_a2 0
5787 
5788 static TCGv gen_ri2(DisasContext *s)
5789 {
5790     TCGv ri2 = NULL;
5791     bool is_imm;
5792     int imm;
5793 
5794     disas_jdest(s, i2, is_imm, imm, ri2);
5795     if (is_imm) {
5796         ri2 = tcg_constant_i64(s->base.pc_next + imm * 2);
5797     }
5798 
5799     return ri2;
5800 }
5801 
5802 static void in2_ri2(DisasContext *s, DisasOps *o)
5803 {
5804     o->in2 = gen_ri2(s);
5805 }
5806 #define SPEC_in2_ri2 0
5807 
5808 static void in2_sh(DisasContext *s, DisasOps *o)
5809 {
5810     int b2 = get_field(s, b2);
5811     int d2 = get_field(s, d2);
5812 
5813     if (b2 == 0) {
5814         o->in2 = tcg_constant_i64(d2 & 0x3f);
5815     } else {
5816         o->in2 = get_address(s, 0, b2, d2);
5817         tcg_gen_andi_i64(o->in2, o->in2, 0x3f);
5818     }
5819 }
5820 #define SPEC_in2_sh 0
5821 
5822 static void in2_m2_8u(DisasContext *s, DisasOps *o)
5823 {
5824     in2_a2(s, o);
5825     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_UB);
5826 }
5827 #define SPEC_in2_m2_8u 0
5828 
5829 static void in2_m2_16s(DisasContext *s, DisasOps *o)
5830 {
5831     in2_a2(s, o);
5832     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESW);
5833 }
5834 #define SPEC_in2_m2_16s 0
5835 
5836 static void in2_m2_16u(DisasContext *s, DisasOps *o)
5837 {
5838     in2_a2(s, o);
5839     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUW);
5840 }
5841 #define SPEC_in2_m2_16u 0
5842 
5843 static void in2_m2_32s(DisasContext *s, DisasOps *o)
5844 {
5845     in2_a2(s, o);
5846     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESL);
5847 }
5848 #define SPEC_in2_m2_32s 0
5849 
5850 static void in2_m2_32u(DisasContext *s, DisasOps *o)
5851 {
5852     in2_a2(s, o);
5853     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUL);
5854 }
5855 #define SPEC_in2_m2_32u 0
5856 
5857 #ifndef CONFIG_USER_ONLY
5858 static void in2_m2_32ua(DisasContext *s, DisasOps *o)
5859 {
5860     in2_a2(s, o);
5861     tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5862 }
5863 #define SPEC_in2_m2_32ua 0
5864 #endif
5865 
5866 static void in2_m2_64(DisasContext *s, DisasOps *o)
5867 {
5868     in2_a2(s, o);
5869     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5870 }
5871 #define SPEC_in2_m2_64 0
5872 
5873 static void in2_m2_64w(DisasContext *s, DisasOps *o)
5874 {
5875     in2_a2(s, o);
5876     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5877     gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
5878 }
5879 #define SPEC_in2_m2_64w 0
5880 
5881 #ifndef CONFIG_USER_ONLY
5882 static void in2_m2_64a(DisasContext *s, DisasOps *o)
5883 {
5884     in2_a2(s, o);
5885     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5886 }
5887 #define SPEC_in2_m2_64a 0
5888 #endif
5889 
5890 static void in2_mri2_16s(DisasContext *s, DisasOps *o)
5891 {
5892     o->in2 = tcg_temp_new_i64();
5893     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TESW);
5894 }
5895 #define SPEC_in2_mri2_16s 0
5896 
5897 static void in2_mri2_16u(DisasContext *s, DisasOps *o)
5898 {
5899     o->in2 = tcg_temp_new_i64();
5900     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TEUW);
5901 }
5902 #define SPEC_in2_mri2_16u 0
5903 
5904 static void in2_mri2_32s(DisasContext *s, DisasOps *o)
5905 {
5906     o->in2 = tcg_temp_new_i64();
5907     tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5908                        MO_TESL | MO_ALIGN);
5909 }
5910 #define SPEC_in2_mri2_32s 0
5911 
5912 static void in2_mri2_32u(DisasContext *s, DisasOps *o)
5913 {
5914     o->in2 = tcg_temp_new_i64();
5915     tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5916                        MO_TEUL | MO_ALIGN);
5917 }
5918 #define SPEC_in2_mri2_32u 0
5919 
5920 static void in2_mri2_64(DisasContext *s, DisasOps *o)
5921 {
5922     o->in2 = tcg_temp_new_i64();
5923     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s),
5924                         MO_TEUQ | MO_ALIGN);
5925 }
5926 #define SPEC_in2_mri2_64 0
5927 
5928 static void in2_i2(DisasContext *s, DisasOps *o)
5929 {
5930     o->in2 = tcg_constant_i64(get_field(s, i2));
5931 }
5932 #define SPEC_in2_i2 0
5933 
5934 static void in2_i2_8u(DisasContext *s, DisasOps *o)
5935 {
5936     o->in2 = tcg_constant_i64((uint8_t)get_field(s, i2));
5937 }
5938 #define SPEC_in2_i2_8u 0
5939 
5940 static void in2_i2_16u(DisasContext *s, DisasOps *o)
5941 {
5942     o->in2 = tcg_constant_i64((uint16_t)get_field(s, i2));
5943 }
5944 #define SPEC_in2_i2_16u 0
5945 
5946 static void in2_i2_32u(DisasContext *s, DisasOps *o)
5947 {
5948     o->in2 = tcg_constant_i64((uint32_t)get_field(s, i2));
5949 }
5950 #define SPEC_in2_i2_32u 0
5951 
5952 static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
5953 {
5954     uint64_t i2 = (uint16_t)get_field(s, i2);
5955     o->in2 = tcg_constant_i64(i2 << s->insn->data);
5956 }
5957 #define SPEC_in2_i2_16u_shl 0
5958 
5959 static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
5960 {
5961     uint64_t i2 = (uint32_t)get_field(s, i2);
5962     o->in2 = tcg_constant_i64(i2 << s->insn->data);
5963 }
5964 #define SPEC_in2_i2_32u_shl 0
5965 
5966 #ifndef CONFIG_USER_ONLY
5967 static void in2_insn(DisasContext *s, DisasOps *o)
5968 {
5969     o->in2 = tcg_constant_i64(s->fields.raw_insn);
5970 }
5971 #define SPEC_in2_insn 0
5972 #endif
5973 
5974 /* ====================================================================== */
5975 
5976 /* Find opc within the table of insns.  This is formulated as a switch
5977    statement so that (1) we get compile-time notice of cut-paste errors
5978    for duplicated opcodes, and (2) the compiler generates the binary
5979    search tree, rather than us having to post-process the table.  */
5980 
5981 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5982     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
5983 
5984 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5985     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
5986 
5987 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
5988     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
5989 
5990 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
5991 
5992 enum DisasInsnEnum {
5993 #include "insn-data.h.inc"
5994 };
5995 
5996 #undef E
5997 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) {                   \
5998     .opc = OPC,                                                             \
5999     .flags = FL,                                                            \
6000     .fmt = FMT_##FT,                                                        \
6001     .fac = FAC_##FC,                                                        \
6002     .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W,  \
6003     .name = #NM,                                                            \
6004     .help_in1 = in1_##I1,                                                   \
6005     .help_in2 = in2_##I2,                                                   \
6006     .help_prep = prep_##P,                                                  \
6007     .help_wout = wout_##W,                                                  \
6008     .help_cout = cout_##CC,                                                 \
6009     .help_op = op_##OP,                                                     \
6010     .data = D                                                               \
6011  },
6012 
6013 /* Allow 0 to be used for NULL in the table below.  */
6014 #define in1_0  NULL
6015 #define in2_0  NULL
6016 #define prep_0  NULL
6017 #define wout_0  NULL
6018 #define cout_0  NULL
6019 #define op_0  NULL
6020 
6021 #define SPEC_in1_0 0
6022 #define SPEC_in2_0 0
6023 #define SPEC_prep_0 0
6024 #define SPEC_wout_0 0
6025 
6026 /* Give smaller names to the various facilities.  */
6027 #define FAC_Z           S390_FEAT_ZARCH
6028 #define FAC_CASS        S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6029 #define FAC_DFP         S390_FEAT_DFP
6030 #define FAC_DFPR        S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* DFP-rounding */
6031 #define FAC_DO          S390_FEAT_STFLE_45 /* distinct-operands */
6032 #define FAC_EE          S390_FEAT_EXECUTE_EXT
6033 #define FAC_EI          S390_FEAT_EXTENDED_IMMEDIATE
6034 #define FAC_FPE         S390_FEAT_FLOATING_POINT_EXT
6035 #define FAC_FPSSH       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPS-sign-handling */
6036 #define FAC_FPRGR       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPR-GR-transfer */
6037 #define FAC_GIE         S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6038 #define FAC_HFP_MA      S390_FEAT_HFP_MADDSUB
6039 #define FAC_HW          S390_FEAT_STFLE_45 /* high-word */
6040 #define FAC_IEEEE_SIM   S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* IEEE-exception-simulation */
6041 #define FAC_MIE         S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6042 #define FAC_LAT         S390_FEAT_STFLE_49 /* load-and-trap */
6043 #define FAC_LOC         S390_FEAT_STFLE_45 /* load/store on condition 1 */
6044 #define FAC_LOC2        S390_FEAT_STFLE_53 /* load/store on condition 2 */
6045 #define FAC_LD          S390_FEAT_LONG_DISPLACEMENT
6046 #define FAC_PC          S390_FEAT_STFLE_45 /* population count */
6047 #define FAC_SCF         S390_FEAT_STORE_CLOCK_FAST
6048 #define FAC_SFLE        S390_FEAT_STFLE
6049 #define FAC_ILA         S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6050 #define FAC_MVCOS       S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6051 #define FAC_LPP         S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6052 #define FAC_DAT_ENH     S390_FEAT_DAT_ENH
6053 #define FAC_E2          S390_FEAT_EXTENDED_TRANSLATION_2
6054 #define FAC_EH          S390_FEAT_STFLE_49 /* execution-hint */
6055 #define FAC_PPA         S390_FEAT_STFLE_49 /* processor-assist */
6056 #define FAC_LZRB        S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6057 #define FAC_ETF3        S390_FEAT_EXTENDED_TRANSLATION_3
6058 #define FAC_MSA         S390_FEAT_MSA /* message-security-assist facility */
6059 #define FAC_MSA3        S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6060 #define FAC_MSA4        S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6061 #define FAC_MSA5        S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6062 #define FAC_MSA8        S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6063 #define FAC_ECT         S390_FEAT_EXTRACT_CPU_TIME
6064 #define FAC_PCI         S390_FEAT_ZPCI /* z/PCI facility */
6065 #define FAC_AIS         S390_FEAT_ADAPTER_INT_SUPPRESSION
6066 #define FAC_V           S390_FEAT_VECTOR /* vector facility */
6067 #define FAC_VE          S390_FEAT_VECTOR_ENH  /* vector enhancements facility 1 */
6068 #define FAC_VE2         S390_FEAT_VECTOR_ENH2 /* vector enhancements facility 2 */
6069 #define FAC_MIE2        S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6070 #define FAC_MIE3        S390_FEAT_MISC_INSTRUCTION_EXT3 /* miscellaneous-instruction-extensions facility 3 */
6071 
6072 static const DisasInsn insn_info[] = {
6073 #include "insn-data.h.inc"
6074 };
6075 
6076 #undef E
6077 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6078     case OPC: return &insn_info[insn_ ## NM];
6079 
6080 static const DisasInsn *lookup_opc(uint16_t opc)
6081 {
6082     switch (opc) {
6083 #include "insn-data.h.inc"
6084     default:
6085         return NULL;
6086     }
6087 }
6088 
6089 #undef F
6090 #undef E
6091 #undef D
6092 #undef C
6093 
6094 /* Extract a field from the insn.  The INSN should be left-aligned in
6095    the uint64_t so that we can more easily utilize the big-bit-endian
6096    definitions we extract from the Principals of Operation.  */
6097 
6098 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6099 {
6100     uint32_t r, m;
6101 
6102     if (f->size == 0) {
6103         return;
6104     }
6105 
6106     /* Zero extract the field from the insn.  */
6107     r = (insn << f->beg) >> (64 - f->size);
6108 
6109     /* Sign-extend, or un-swap the field as necessary.  */
6110     switch (f->type) {
6111     case 0: /* unsigned */
6112         break;
6113     case 1: /* signed */
6114         assert(f->size <= 32);
6115         m = 1u << (f->size - 1);
6116         r = (r ^ m) - m;
6117         break;
6118     case 2: /* dl+dh split, signed 20 bit. */
6119         r = ((int8_t)r << 12) | (r >> 8);
6120         break;
6121     case 3: /* MSB stored in RXB */
6122         g_assert(f->size == 4);
6123         switch (f->beg) {
6124         case 8:
6125             r |= extract64(insn, 63 - 36, 1) << 4;
6126             break;
6127         case 12:
6128             r |= extract64(insn, 63 - 37, 1) << 4;
6129             break;
6130         case 16:
6131             r |= extract64(insn, 63 - 38, 1) << 4;
6132             break;
6133         case 32:
6134             r |= extract64(insn, 63 - 39, 1) << 4;
6135             break;
6136         default:
6137             g_assert_not_reached();
6138         }
6139         break;
6140     default:
6141         abort();
6142     }
6143 
6144     /*
6145      * Validate that the "compressed" encoding we selected above is valid.
6146      * I.e. we haven't made two different original fields overlap.
6147      */
6148     assert(((o->presentC >> f->indexC) & 1) == 0);
6149     o->presentC |= 1 << f->indexC;
6150     o->presentO |= 1 << f->indexO;
6151 
6152     o->c[f->indexC] = r;
6153 }
6154 
6155 /* Lookup the insn at the current PC, extracting the operands into O and
6156    returning the info struct for the insn.  Returns NULL for invalid insn.  */
6157 
6158 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
6159 {
6160     uint64_t insn, pc = s->base.pc_next;
6161     int op, op2, ilen;
6162     const DisasInsn *info;
6163 
6164     if (unlikely(s->ex_value)) {
6165         /* Drop the EX data now, so that it's clear on exception paths.  */
6166         tcg_gen_st_i64(tcg_constant_i64(0), cpu_env,
6167                        offsetof(CPUS390XState, ex_value));
6168 
6169         /* Extract the values saved by EXECUTE.  */
6170         insn = s->ex_value & 0xffffffffffff0000ull;
6171         ilen = s->ex_value & 0xf;
6172 
6173         /* Register insn bytes with translator so plugins work. */
6174         for (int i = 0; i < ilen; i++) {
6175             uint8_t byte = extract64(insn, 56 - (i * 8), 8);
6176             translator_fake_ldb(byte, pc + i);
6177         }
6178         op = insn >> 56;
6179     } else {
6180         insn = ld_code2(env, s, pc);
6181         op = (insn >> 8) & 0xff;
6182         ilen = get_ilen(op);
6183         switch (ilen) {
6184         case 2:
6185             insn = insn << 48;
6186             break;
6187         case 4:
6188             insn = ld_code4(env, s, pc) << 32;
6189             break;
6190         case 6:
6191             insn = (insn << 48) | (ld_code4(env, s, pc + 2) << 16);
6192             break;
6193         default:
6194             g_assert_not_reached();
6195         }
6196     }
6197     s->pc_tmp = s->base.pc_next + ilen;
6198     s->ilen = ilen;
6199 
6200     /* We can't actually determine the insn format until we've looked up
6201        the full insn opcode.  Which we can't do without locating the
6202        secondary opcode.  Assume by default that OP2 is at bit 40; for
6203        those smaller insns that don't actually have a secondary opcode
6204        this will correctly result in OP2 = 0. */
6205     switch (op) {
6206     case 0x01: /* E */
6207     case 0x80: /* S */
6208     case 0x82: /* S */
6209     case 0x93: /* S */
6210     case 0xb2: /* S, RRF, RRE, IE */
6211     case 0xb3: /* RRE, RRD, RRF */
6212     case 0xb9: /* RRE, RRF */
6213     case 0xe5: /* SSE, SIL */
6214         op2 = (insn << 8) >> 56;
6215         break;
6216     case 0xa5: /* RI */
6217     case 0xa7: /* RI */
6218     case 0xc0: /* RIL */
6219     case 0xc2: /* RIL */
6220     case 0xc4: /* RIL */
6221     case 0xc6: /* RIL */
6222     case 0xc8: /* SSF */
6223     case 0xcc: /* RIL */
6224         op2 = (insn << 12) >> 60;
6225         break;
6226     case 0xc5: /* MII */
6227     case 0xc7: /* SMI */
6228     case 0xd0 ... 0xdf: /* SS */
6229     case 0xe1: /* SS */
6230     case 0xe2: /* SS */
6231     case 0xe8: /* SS */
6232     case 0xe9: /* SS */
6233     case 0xea: /* SS */
6234     case 0xee ... 0xf3: /* SS */
6235     case 0xf8 ... 0xfd: /* SS */
6236         op2 = 0;
6237         break;
6238     default:
6239         op2 = (insn << 40) >> 56;
6240         break;
6241     }
6242 
6243     memset(&s->fields, 0, sizeof(s->fields));
6244     s->fields.raw_insn = insn;
6245     s->fields.op = op;
6246     s->fields.op2 = op2;
6247 
6248     /* Lookup the instruction.  */
6249     info = lookup_opc(op << 8 | op2);
6250     s->insn = info;
6251 
6252     /* If we found it, extract the operands.  */
6253     if (info != NULL) {
6254         DisasFormat fmt = info->fmt;
6255         int i;
6256 
6257         for (i = 0; i < NUM_C_FIELD; ++i) {
6258             extract_field(&s->fields, &format_info[fmt].op[i], insn);
6259         }
6260     }
6261     return info;
6262 }
6263 
6264 static bool is_afp_reg(int reg)
6265 {
6266     return reg % 2 || reg > 6;
6267 }
6268 
6269 static bool is_fp_pair(int reg)
6270 {
6271     /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6272     return !(reg & 0x2);
6273 }
6274 
6275 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6276 {
6277     const DisasInsn *insn;
6278     DisasJumpType ret = DISAS_NEXT;
6279     DisasOps o = {};
6280     bool icount = false;
6281 
6282     /* Search for the insn in the table.  */
6283     insn = extract_insn(env, s);
6284 
6285     /* Update insn_start now that we know the ILEN.  */
6286     tcg_set_insn_start_param(s->insn_start, 2, s->ilen);
6287 
6288     /* Not found means unimplemented/illegal opcode.  */
6289     if (insn == NULL) {
6290         qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6291                       s->fields.op, s->fields.op2);
6292         gen_illegal_opcode(s);
6293         ret = DISAS_NORETURN;
6294         goto out;
6295     }
6296 
6297 #ifndef CONFIG_USER_ONLY
6298     if (s->base.tb->flags & FLAG_MASK_PER) {
6299         TCGv_i64 addr = tcg_constant_i64(s->base.pc_next);
6300         gen_helper_per_ifetch(cpu_env, addr);
6301     }
6302 #endif
6303 
6304     /* process flags */
6305     if (insn->flags) {
6306         /* privileged instruction */
6307         if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6308             gen_program_exception(s, PGM_PRIVILEGED);
6309             ret = DISAS_NORETURN;
6310             goto out;
6311         }
6312 
6313         /* if AFP is not enabled, instructions and registers are forbidden */
6314         if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6315             uint8_t dxc = 0;
6316 
6317             if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
6318                 dxc = 1;
6319             }
6320             if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
6321                 dxc = 1;
6322             }
6323             if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
6324                 dxc = 1;
6325             }
6326             if (insn->flags & IF_BFP) {
6327                 dxc = 2;
6328             }
6329             if (insn->flags & IF_DFP) {
6330                 dxc = 3;
6331             }
6332             if (insn->flags & IF_VEC) {
6333                 dxc = 0xfe;
6334             }
6335             if (dxc) {
6336                 gen_data_exception(dxc);
6337                 ret = DISAS_NORETURN;
6338                 goto out;
6339             }
6340         }
6341 
6342         /* if vector instructions not enabled, executing them is forbidden */
6343         if (insn->flags & IF_VEC) {
6344             if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6345                 gen_data_exception(0xfe);
6346                 ret = DISAS_NORETURN;
6347                 goto out;
6348             }
6349         }
6350 
6351         /* input/output is the special case for icount mode */
6352         if (unlikely(insn->flags & IF_IO)) {
6353             icount = tb_cflags(s->base.tb) & CF_USE_ICOUNT;
6354             if (icount) {
6355                 gen_io_start();
6356             }
6357         }
6358     }
6359 
6360     /* Check for insn specification exceptions.  */
6361     if (insn->spec) {
6362         if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
6363             (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
6364             (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
6365             (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
6366             (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
6367             gen_program_exception(s, PGM_SPECIFICATION);
6368             ret = DISAS_NORETURN;
6369             goto out;
6370         }
6371     }
6372 
6373     /* Implement the instruction.  */
6374     if (insn->help_in1) {
6375         insn->help_in1(s, &o);
6376     }
6377     if (insn->help_in2) {
6378         insn->help_in2(s, &o);
6379     }
6380     if (insn->help_prep) {
6381         insn->help_prep(s, &o);
6382     }
6383     if (insn->help_op) {
6384         ret = insn->help_op(s, &o);
6385     }
6386     if (ret != DISAS_NORETURN) {
6387         if (insn->help_wout) {
6388             insn->help_wout(s, &o);
6389         }
6390         if (insn->help_cout) {
6391             insn->help_cout(s, &o);
6392         }
6393     }
6394 
6395     /* io should be the last instruction in tb when icount is enabled */
6396     if (unlikely(icount && ret == DISAS_NEXT)) {
6397         ret = DISAS_TOO_MANY;
6398     }
6399 
6400 #ifndef CONFIG_USER_ONLY
6401     if (s->base.tb->flags & FLAG_MASK_PER) {
6402         /* An exception might be triggered, save PSW if not already done.  */
6403         if (ret == DISAS_NEXT || ret == DISAS_TOO_MANY) {
6404             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6405         }
6406 
6407         /* Call the helper to check for a possible PER exception.  */
6408         gen_helper_per_check_exception(cpu_env);
6409     }
6410 #endif
6411 
6412 out:
6413     /* Advance to the next instruction.  */
6414     s->base.pc_next = s->pc_tmp;
6415     return ret;
6416 }
6417 
6418 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6419 {
6420     DisasContext *dc = container_of(dcbase, DisasContext, base);
6421 
6422     /* 31-bit mode */
6423     if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6424         dc->base.pc_first &= 0x7fffffff;
6425         dc->base.pc_next = dc->base.pc_first;
6426     }
6427 
6428     dc->cc_op = CC_OP_DYNAMIC;
6429     dc->ex_value = dc->base.tb->cs_base;
6430     dc->exit_to_mainloop = (dc->base.tb->flags & FLAG_MASK_PER) || dc->ex_value;
6431 }
6432 
6433 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6434 {
6435 }
6436 
6437 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6438 {
6439     DisasContext *dc = container_of(dcbase, DisasContext, base);
6440 
6441     /* Delay the set of ilen until we've read the insn. */
6442     tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 0);
6443     dc->insn_start = tcg_last_op();
6444 }
6445 
6446 static target_ulong get_next_pc(CPUS390XState *env, DisasContext *s,
6447                                 uint64_t pc)
6448 {
6449     uint64_t insn = cpu_lduw_code(env, pc);
6450 
6451     return pc + get_ilen((insn >> 8) & 0xff);
6452 }
6453 
6454 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6455 {
6456     CPUS390XState *env = cs->env_ptr;
6457     DisasContext *dc = container_of(dcbase, DisasContext, base);
6458 
6459     dc->base.is_jmp = translate_one(env, dc);
6460     if (dc->base.is_jmp == DISAS_NEXT) {
6461         if (dc->ex_value ||
6462             !is_same_page(dcbase, dc->base.pc_next) ||
6463             !is_same_page(dcbase, get_next_pc(env, dc, dc->base.pc_next))) {
6464             dc->base.is_jmp = DISAS_TOO_MANY;
6465         }
6466     }
6467 }
6468 
6469 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6470 {
6471     DisasContext *dc = container_of(dcbase, DisasContext, base);
6472 
6473     switch (dc->base.is_jmp) {
6474     case DISAS_NORETURN:
6475         break;
6476     case DISAS_TOO_MANY:
6477         update_psw_addr(dc);
6478         /* FALLTHRU */
6479     case DISAS_PC_UPDATED:
6480         /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6481            cc op type is in env */
6482         update_cc_op(dc);
6483         /* FALLTHRU */
6484     case DISAS_PC_CC_UPDATED:
6485         /* Exit the TB, either by raising a debug exception or by return.  */
6486         if (dc->exit_to_mainloop) {
6487             tcg_gen_exit_tb(NULL, 0);
6488         } else {
6489             tcg_gen_lookup_and_goto_ptr();
6490         }
6491         break;
6492     default:
6493         g_assert_not_reached();
6494     }
6495 }
6496 
6497 static void s390x_tr_disas_log(const DisasContextBase *dcbase,
6498                                CPUState *cs, FILE *logfile)
6499 {
6500     DisasContext *dc = container_of(dcbase, DisasContext, base);
6501 
6502     if (unlikely(dc->ex_value)) {
6503         /* ??? Unfortunately target_disas can't use host memory.  */
6504         fprintf(logfile, "IN: EXECUTE %016" PRIx64, dc->ex_value);
6505     } else {
6506         fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
6507         target_disas(logfile, cs, dc->base.pc_first, dc->base.tb->size);
6508     }
6509 }
6510 
6511 static const TranslatorOps s390x_tr_ops = {
6512     .init_disas_context = s390x_tr_init_disas_context,
6513     .tb_start           = s390x_tr_tb_start,
6514     .insn_start         = s390x_tr_insn_start,
6515     .translate_insn     = s390x_tr_translate_insn,
6516     .tb_stop            = s390x_tr_tb_stop,
6517     .disas_log          = s390x_tr_disas_log,
6518 };
6519 
6520 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
6521                            target_ulong pc, void *host_pc)
6522 {
6523     DisasContext dc;
6524 
6525     translator_loop(cs, tb, max_insns, pc, host_pc, &s390x_tr_ops, &dc.base);
6526 }
6527 
6528 void s390x_restore_state_to_opc(CPUState *cs,
6529                                 const TranslationBlock *tb,
6530                                 const uint64_t *data)
6531 {
6532     S390CPU *cpu = S390_CPU(cs);
6533     CPUS390XState *env = &cpu->env;
6534     int cc_op = data[1];
6535 
6536     env->psw.addr = data[0];
6537 
6538     /* Update the CC opcode if it is not already up-to-date.  */
6539     if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6540         env->cc_op = cc_op;
6541     }
6542 
6543     /* Record ILEN.  */
6544     env->int_pgm_ilen = data[2];
6545 }
6546