xref: /qemu/target/s390x/tcg/translate.c (revision 02326733)
1 /*
2  *  S/390 translation
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2010 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24 
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 #  define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 #  define LOG_DISAS(...) do { } while (0)
29 #endif
30 
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "s390x-internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg/tcg-op.h"
37 #include "tcg/tcg-op-gvec.h"
38 #include "qemu/log.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/helper-proto.h"
42 #include "exec/helper-gen.h"
43 
44 #include "exec/translator.h"
45 #include "exec/log.h"
46 #include "qemu/atomic128.h"
47 
48 #define HELPER_H "helper.h"
49 #include "exec/helper-info.c.inc"
50 #undef  HELPER_H
51 
52 
53 /* Information that (most) every instruction needs to manipulate.  */
54 typedef struct DisasContext DisasContext;
55 typedef struct DisasInsn DisasInsn;
56 typedef struct DisasFields DisasFields;
57 
58 /*
59  * Define a structure to hold the decoded fields.  We'll store each inside
60  * an array indexed by an enum.  In order to conserve memory, we'll arrange
61  * for fields that do not exist at the same time to overlap, thus the "C"
62  * for compact.  For checking purposes there is an "O" for original index
63  * as well that will be applied to availability bitmaps.
64  */
65 
66 enum DisasFieldIndexO {
67     FLD_O_r1,
68     FLD_O_r2,
69     FLD_O_r3,
70     FLD_O_m1,
71     FLD_O_m3,
72     FLD_O_m4,
73     FLD_O_m5,
74     FLD_O_m6,
75     FLD_O_b1,
76     FLD_O_b2,
77     FLD_O_b4,
78     FLD_O_d1,
79     FLD_O_d2,
80     FLD_O_d4,
81     FLD_O_x2,
82     FLD_O_l1,
83     FLD_O_l2,
84     FLD_O_i1,
85     FLD_O_i2,
86     FLD_O_i3,
87     FLD_O_i4,
88     FLD_O_i5,
89     FLD_O_v1,
90     FLD_O_v2,
91     FLD_O_v3,
92     FLD_O_v4,
93 };
94 
95 enum DisasFieldIndexC {
96     FLD_C_r1 = 0,
97     FLD_C_m1 = 0,
98     FLD_C_b1 = 0,
99     FLD_C_i1 = 0,
100     FLD_C_v1 = 0,
101 
102     FLD_C_r2 = 1,
103     FLD_C_b2 = 1,
104     FLD_C_i2 = 1,
105 
106     FLD_C_r3 = 2,
107     FLD_C_m3 = 2,
108     FLD_C_i3 = 2,
109     FLD_C_v3 = 2,
110 
111     FLD_C_m4 = 3,
112     FLD_C_b4 = 3,
113     FLD_C_i4 = 3,
114     FLD_C_l1 = 3,
115     FLD_C_v4 = 3,
116 
117     FLD_C_i5 = 4,
118     FLD_C_d1 = 4,
119     FLD_C_m5 = 4,
120 
121     FLD_C_d2 = 5,
122     FLD_C_m6 = 5,
123 
124     FLD_C_d4 = 6,
125     FLD_C_x2 = 6,
126     FLD_C_l2 = 6,
127     FLD_C_v2 = 6,
128 
129     NUM_C_FIELD = 7
130 };
131 
132 struct DisasFields {
133     uint64_t raw_insn;
134     unsigned op:8;
135     unsigned op2:8;
136     unsigned presentC:16;
137     unsigned int presentO;
138     int c[NUM_C_FIELD];
139 };
140 
141 struct DisasContext {
142     DisasContextBase base;
143     const DisasInsn *insn;
144     DisasFields fields;
145     uint64_t ex_value;
146     /*
147      * During translate_one(), pc_tmp is used to determine the instruction
148      * to be executed after base.pc_next - e.g. next sequential instruction
149      * or a branch target.
150      */
151     uint64_t pc_tmp;
152     uint32_t ilen;
153     enum cc_op cc_op;
154     bool exit_to_mainloop;
155 };
156 
157 /* Information carried about a condition to be evaluated.  */
158 typedef struct {
159     TCGCond cond:8;
160     bool is_64;
161     union {
162         struct { TCGv_i64 a, b; } s64;
163         struct { TCGv_i32 a, b; } s32;
164     } u;
165 } DisasCompare;
166 
167 #ifdef DEBUG_INLINE_BRANCHES
168 static uint64_t inline_branch_hit[CC_OP_MAX];
169 static uint64_t inline_branch_miss[CC_OP_MAX];
170 #endif
171 
172 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
173 {
174     if (s->base.tb->flags & FLAG_MASK_32) {
175         if (s->base.tb->flags & FLAG_MASK_64) {
176             tcg_gen_movi_i64(out, pc);
177             return;
178         }
179         pc |= 0x80000000;
180     }
181     assert(!(s->base.tb->flags & FLAG_MASK_64));
182     tcg_gen_deposit_i64(out, out, tcg_constant_i64(pc), 0, 32);
183 }
184 
185 static TCGv_i64 psw_addr;
186 static TCGv_i64 psw_mask;
187 static TCGv_i64 gbea;
188 
189 static TCGv_i32 cc_op;
190 static TCGv_i64 cc_src;
191 static TCGv_i64 cc_dst;
192 static TCGv_i64 cc_vr;
193 
194 static char cpu_reg_names[16][4];
195 static TCGv_i64 regs[16];
196 
197 void s390x_translate_init(void)
198 {
199     int i;
200 
201     psw_addr = tcg_global_mem_new_i64(tcg_env,
202                                       offsetof(CPUS390XState, psw.addr),
203                                       "psw_addr");
204     psw_mask = tcg_global_mem_new_i64(tcg_env,
205                                       offsetof(CPUS390XState, psw.mask),
206                                       "psw_mask");
207     gbea = tcg_global_mem_new_i64(tcg_env,
208                                   offsetof(CPUS390XState, gbea),
209                                   "gbea");
210 
211     cc_op = tcg_global_mem_new_i32(tcg_env, offsetof(CPUS390XState, cc_op),
212                                    "cc_op");
213     cc_src = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_src),
214                                     "cc_src");
215     cc_dst = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_dst),
216                                     "cc_dst");
217     cc_vr = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_vr),
218                                    "cc_vr");
219 
220     for (i = 0; i < 16; i++) {
221         snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
222         regs[i] = tcg_global_mem_new(tcg_env,
223                                      offsetof(CPUS390XState, regs[i]),
224                                      cpu_reg_names[i]);
225     }
226 }
227 
228 static inline int vec_full_reg_offset(uint8_t reg)
229 {
230     g_assert(reg < 32);
231     return offsetof(CPUS390XState, vregs[reg][0]);
232 }
233 
234 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
235 {
236     /* Convert element size (es) - e.g. MO_8 - to bytes */
237     const uint8_t bytes = 1 << es;
238     int offs = enr * bytes;
239 
240     /*
241      * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
242      * of the 16 byte vector, on both, little and big endian systems.
243      *
244      * Big Endian (target/possible host)
245      * B:  [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
246      * HW: [     0][     1][     2][     3] - [     4][     5][     6][     7]
247      * W:  [             0][             1] - [             2][             3]
248      * DW: [                             0] - [                             1]
249      *
250      * Little Endian (possible host)
251      * B:  [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
252      * HW: [     3][     2][     1][     0] - [     7][     6][     5][     4]
253      * W:  [             1][             0] - [             3][             2]
254      * DW: [                             0] - [                             1]
255      *
256      * For 16 byte elements, the two 8 byte halves will not form a host
257      * int128 if the host is little endian, since they're in the wrong order.
258      * Some operations (e.g. xor) do not care. For operations like addition,
259      * the two 8 byte elements have to be loaded separately. Let's force all
260      * 16 byte operations to handle it in a special way.
261      */
262     g_assert(es <= MO_64);
263 #if !HOST_BIG_ENDIAN
264     offs ^= (8 - bytes);
265 #endif
266     return offs + vec_full_reg_offset(reg);
267 }
268 
269 static inline int freg64_offset(uint8_t reg)
270 {
271     g_assert(reg < 16);
272     return vec_reg_offset(reg, 0, MO_64);
273 }
274 
275 static inline int freg32_offset(uint8_t reg)
276 {
277     g_assert(reg < 16);
278     return vec_reg_offset(reg, 0, MO_32);
279 }
280 
281 static TCGv_i64 load_reg(int reg)
282 {
283     TCGv_i64 r = tcg_temp_new_i64();
284     tcg_gen_mov_i64(r, regs[reg]);
285     return r;
286 }
287 
288 static TCGv_i64 load_freg(int reg)
289 {
290     TCGv_i64 r = tcg_temp_new_i64();
291 
292     tcg_gen_ld_i64(r, tcg_env, freg64_offset(reg));
293     return r;
294 }
295 
296 static TCGv_i64 load_freg32_i64(int reg)
297 {
298     TCGv_i64 r = tcg_temp_new_i64();
299 
300     tcg_gen_ld32u_i64(r, tcg_env, freg32_offset(reg));
301     return r;
302 }
303 
304 static TCGv_i128 load_freg_128(int reg)
305 {
306     TCGv_i64 h = load_freg(reg);
307     TCGv_i64 l = load_freg(reg + 2);
308     TCGv_i128 r = tcg_temp_new_i128();
309 
310     tcg_gen_concat_i64_i128(r, l, h);
311     return r;
312 }
313 
314 static void store_reg(int reg, TCGv_i64 v)
315 {
316     tcg_gen_mov_i64(regs[reg], v);
317 }
318 
319 static void store_freg(int reg, TCGv_i64 v)
320 {
321     tcg_gen_st_i64(v, tcg_env, freg64_offset(reg));
322 }
323 
324 static void store_reg32_i64(int reg, TCGv_i64 v)
325 {
326     /* 32 bit register writes keep the upper half */
327     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
328 }
329 
330 static void store_reg32h_i64(int reg, TCGv_i64 v)
331 {
332     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
333 }
334 
335 static void store_freg32_i64(int reg, TCGv_i64 v)
336 {
337     tcg_gen_st32_i64(v, tcg_env, freg32_offset(reg));
338 }
339 
340 static void update_psw_addr(DisasContext *s)
341 {
342     /* psw.addr */
343     tcg_gen_movi_i64(psw_addr, s->base.pc_next);
344 }
345 
346 static void per_branch(DisasContext *s, bool to_next)
347 {
348 #ifndef CONFIG_USER_ONLY
349     tcg_gen_movi_i64(gbea, s->base.pc_next);
350 
351     if (s->base.tb->flags & FLAG_MASK_PER) {
352         TCGv_i64 next_pc = to_next ? tcg_constant_i64(s->pc_tmp) : psw_addr;
353         gen_helper_per_branch(tcg_env, gbea, next_pc);
354     }
355 #endif
356 }
357 
358 static void per_branch_cond(DisasContext *s, TCGCond cond,
359                             TCGv_i64 arg1, TCGv_i64 arg2)
360 {
361 #ifndef CONFIG_USER_ONLY
362     if (s->base.tb->flags & FLAG_MASK_PER) {
363         TCGLabel *lab = gen_new_label();
364         tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
365 
366         tcg_gen_movi_i64(gbea, s->base.pc_next);
367         gen_helper_per_branch(tcg_env, gbea, psw_addr);
368 
369         gen_set_label(lab);
370     } else {
371         TCGv_i64 pc = tcg_constant_i64(s->base.pc_next);
372         tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
373     }
374 #endif
375 }
376 
377 static void per_breaking_event(DisasContext *s)
378 {
379     tcg_gen_movi_i64(gbea, s->base.pc_next);
380 }
381 
382 static void update_cc_op(DisasContext *s)
383 {
384     if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
385         tcg_gen_movi_i32(cc_op, s->cc_op);
386     }
387 }
388 
389 static inline uint64_t ld_code2(CPUS390XState *env, DisasContext *s,
390                                 uint64_t pc)
391 {
392     return (uint64_t)translator_lduw(env, &s->base, pc);
393 }
394 
395 static inline uint64_t ld_code4(CPUS390XState *env, DisasContext *s,
396                                 uint64_t pc)
397 {
398     return (uint64_t)(uint32_t)translator_ldl(env, &s->base, pc);
399 }
400 
401 static int get_mem_index(DisasContext *s)
402 {
403 #ifdef CONFIG_USER_ONLY
404     return MMU_USER_IDX;
405 #else
406     if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
407         return MMU_REAL_IDX;
408     }
409 
410     switch (s->base.tb->flags & FLAG_MASK_ASC) {
411     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
412         return MMU_PRIMARY_IDX;
413     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
414         return MMU_SECONDARY_IDX;
415     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
416         return MMU_HOME_IDX;
417     default:
418         g_assert_not_reached();
419         break;
420     }
421 #endif
422 }
423 
424 static void gen_exception(int excp)
425 {
426     gen_helper_exception(tcg_env, tcg_constant_i32(excp));
427 }
428 
429 static void gen_program_exception(DisasContext *s, int code)
430 {
431     /* Remember what pgm exception this was.  */
432     tcg_gen_st_i32(tcg_constant_i32(code), tcg_env,
433                    offsetof(CPUS390XState, int_pgm_code));
434 
435     tcg_gen_st_i32(tcg_constant_i32(s->ilen), tcg_env,
436                    offsetof(CPUS390XState, int_pgm_ilen));
437 
438     /* update the psw */
439     update_psw_addr(s);
440 
441     /* Save off cc.  */
442     update_cc_op(s);
443 
444     /* Trigger exception.  */
445     gen_exception(EXCP_PGM);
446 }
447 
448 static inline void gen_illegal_opcode(DisasContext *s)
449 {
450     gen_program_exception(s, PGM_OPERATION);
451 }
452 
453 static inline void gen_data_exception(uint8_t dxc)
454 {
455     gen_helper_data_exception(tcg_env, tcg_constant_i32(dxc));
456 }
457 
458 static inline void gen_trap(DisasContext *s)
459 {
460     /* Set DXC to 0xff */
461     gen_data_exception(0xff);
462 }
463 
464 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
465                                   int64_t imm)
466 {
467     tcg_gen_addi_i64(dst, src, imm);
468     if (!(s->base.tb->flags & FLAG_MASK_64)) {
469         if (s->base.tb->flags & FLAG_MASK_32) {
470             tcg_gen_andi_i64(dst, dst, 0x7fffffff);
471         } else {
472             tcg_gen_andi_i64(dst, dst, 0x00ffffff);
473         }
474     }
475 }
476 
477 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
478 {
479     TCGv_i64 tmp = tcg_temp_new_i64();
480 
481     /*
482      * Note that d2 is limited to 20 bits, signed.  If we crop negative
483      * displacements early we create larger immediate addends.
484      */
485     if (b2 && x2) {
486         tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
487         gen_addi_and_wrap_i64(s, tmp, tmp, d2);
488     } else if (b2) {
489         gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
490     } else if (x2) {
491         gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
492     } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
493         if (s->base.tb->flags & FLAG_MASK_32) {
494             tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
495         } else {
496             tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
497         }
498     } else {
499         tcg_gen_movi_i64(tmp, d2);
500     }
501 
502     return tmp;
503 }
504 
505 static inline bool live_cc_data(DisasContext *s)
506 {
507     return (s->cc_op != CC_OP_DYNAMIC
508             && s->cc_op != CC_OP_STATIC
509             && s->cc_op > 3);
510 }
511 
512 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
513 {
514     if (live_cc_data(s)) {
515         tcg_gen_discard_i64(cc_src);
516         tcg_gen_discard_i64(cc_dst);
517         tcg_gen_discard_i64(cc_vr);
518     }
519     s->cc_op = CC_OP_CONST0 + val;
520 }
521 
522 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
523 {
524     if (live_cc_data(s)) {
525         tcg_gen_discard_i64(cc_src);
526         tcg_gen_discard_i64(cc_vr);
527     }
528     tcg_gen_mov_i64(cc_dst, dst);
529     s->cc_op = op;
530 }
531 
532 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
533                                   TCGv_i64 dst)
534 {
535     if (live_cc_data(s)) {
536         tcg_gen_discard_i64(cc_vr);
537     }
538     tcg_gen_mov_i64(cc_src, src);
539     tcg_gen_mov_i64(cc_dst, dst);
540     s->cc_op = op;
541 }
542 
543 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
544                                   TCGv_i64 dst, TCGv_i64 vr)
545 {
546     tcg_gen_mov_i64(cc_src, src);
547     tcg_gen_mov_i64(cc_dst, dst);
548     tcg_gen_mov_i64(cc_vr, vr);
549     s->cc_op = op;
550 }
551 
552 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
553 {
554     gen_op_update1_cc_i64(s, CC_OP_NZ, val);
555 }
556 
557 /* CC value is in env->cc_op */
558 static void set_cc_static(DisasContext *s)
559 {
560     if (live_cc_data(s)) {
561         tcg_gen_discard_i64(cc_src);
562         tcg_gen_discard_i64(cc_dst);
563         tcg_gen_discard_i64(cc_vr);
564     }
565     s->cc_op = CC_OP_STATIC;
566 }
567 
568 /* calculates cc into cc_op */
569 static void gen_op_calc_cc(DisasContext *s)
570 {
571     TCGv_i32 local_cc_op = NULL;
572     TCGv_i64 dummy = NULL;
573 
574     switch (s->cc_op) {
575     default:
576         dummy = tcg_constant_i64(0);
577         /* FALLTHRU */
578     case CC_OP_ADD_64:
579     case CC_OP_SUB_64:
580     case CC_OP_ADD_32:
581     case CC_OP_SUB_32:
582         local_cc_op = tcg_constant_i32(s->cc_op);
583         break;
584     case CC_OP_CONST0:
585     case CC_OP_CONST1:
586     case CC_OP_CONST2:
587     case CC_OP_CONST3:
588     case CC_OP_STATIC:
589     case CC_OP_DYNAMIC:
590         break;
591     }
592 
593     switch (s->cc_op) {
594     case CC_OP_CONST0:
595     case CC_OP_CONST1:
596     case CC_OP_CONST2:
597     case CC_OP_CONST3:
598         /* s->cc_op is the cc value */
599         tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
600         break;
601     case CC_OP_STATIC:
602         /* env->cc_op already is the cc value */
603         break;
604     case CC_OP_NZ:
605         tcg_gen_setcondi_i64(TCG_COND_NE, cc_dst, cc_dst, 0);
606         tcg_gen_extrl_i64_i32(cc_op, cc_dst);
607         break;
608     case CC_OP_ABS_64:
609     case CC_OP_NABS_64:
610     case CC_OP_ABS_32:
611     case CC_OP_NABS_32:
612     case CC_OP_LTGT0_32:
613     case CC_OP_LTGT0_64:
614     case CC_OP_COMP_32:
615     case CC_OP_COMP_64:
616     case CC_OP_NZ_F32:
617     case CC_OP_NZ_F64:
618     case CC_OP_FLOGR:
619     case CC_OP_LCBB:
620     case CC_OP_MULS_32:
621         /* 1 argument */
622         gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, dummy, cc_dst, dummy);
623         break;
624     case CC_OP_ADDU:
625     case CC_OP_ICM:
626     case CC_OP_LTGT_32:
627     case CC_OP_LTGT_64:
628     case CC_OP_LTUGTU_32:
629     case CC_OP_LTUGTU_64:
630     case CC_OP_TM_32:
631     case CC_OP_TM_64:
632     case CC_OP_SLA:
633     case CC_OP_SUBU:
634     case CC_OP_NZ_F128:
635     case CC_OP_VC:
636     case CC_OP_MULS_64:
637         /* 2 arguments */
638         gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, cc_src, cc_dst, dummy);
639         break;
640     case CC_OP_ADD_64:
641     case CC_OP_SUB_64:
642     case CC_OP_ADD_32:
643     case CC_OP_SUB_32:
644         /* 3 arguments */
645         gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, cc_src, cc_dst, cc_vr);
646         break;
647     case CC_OP_DYNAMIC:
648         /* unknown operation - assume 3 arguments and cc_op in env */
649         gen_helper_calc_cc(cc_op, tcg_env, cc_op, cc_src, cc_dst, cc_vr);
650         break;
651     default:
652         g_assert_not_reached();
653     }
654 
655     /* We now have cc in cc_op as constant */
656     set_cc_static(s);
657 }
658 
659 static bool use_goto_tb(DisasContext *s, uint64_t dest)
660 {
661     if (unlikely(s->base.tb->flags & FLAG_MASK_PER)) {
662         return false;
663     }
664     return translator_use_goto_tb(&s->base, dest);
665 }
666 
667 static void account_noninline_branch(DisasContext *s, int cc_op)
668 {
669 #ifdef DEBUG_INLINE_BRANCHES
670     inline_branch_miss[cc_op]++;
671 #endif
672 }
673 
674 static void account_inline_branch(DisasContext *s, int cc_op)
675 {
676 #ifdef DEBUG_INLINE_BRANCHES
677     inline_branch_hit[cc_op]++;
678 #endif
679 }
680 
681 /* Table of mask values to comparison codes, given a comparison as input.
682    For such, CC=3 should not be possible.  */
683 static const TCGCond ltgt_cond[16] = {
684     TCG_COND_NEVER,  TCG_COND_NEVER,     /*    |    |    | x */
685     TCG_COND_GT,     TCG_COND_GT,        /*    |    | GT | x */
686     TCG_COND_LT,     TCG_COND_LT,        /*    | LT |    | x */
687     TCG_COND_NE,     TCG_COND_NE,        /*    | LT | GT | x */
688     TCG_COND_EQ,     TCG_COND_EQ,        /* EQ |    |    | x */
689     TCG_COND_GE,     TCG_COND_GE,        /* EQ |    | GT | x */
690     TCG_COND_LE,     TCG_COND_LE,        /* EQ | LT |    | x */
691     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | LT | GT | x */
692 };
693 
694 /* Table of mask values to comparison codes, given a logic op as input.
695    For such, only CC=0 and CC=1 should be possible.  */
696 static const TCGCond nz_cond[16] = {
697     TCG_COND_NEVER, TCG_COND_NEVER,      /*    |    | x | x */
698     TCG_COND_NEVER, TCG_COND_NEVER,
699     TCG_COND_NE, TCG_COND_NE,            /*    | NE | x | x */
700     TCG_COND_NE, TCG_COND_NE,
701     TCG_COND_EQ, TCG_COND_EQ,            /* EQ |    | x | x */
702     TCG_COND_EQ, TCG_COND_EQ,
703     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | NE | x | x */
704     TCG_COND_ALWAYS, TCG_COND_ALWAYS,
705 };
706 
707 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
708    details required to generate a TCG comparison.  */
709 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
710 {
711     TCGCond cond;
712     enum cc_op old_cc_op = s->cc_op;
713 
714     if (mask == 15 || mask == 0) {
715         c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
716         c->u.s32.a = cc_op;
717         c->u.s32.b = cc_op;
718         c->is_64 = false;
719         return;
720     }
721 
722     /* Find the TCG condition for the mask + cc op.  */
723     switch (old_cc_op) {
724     case CC_OP_LTGT0_32:
725     case CC_OP_LTGT0_64:
726     case CC_OP_LTGT_32:
727     case CC_OP_LTGT_64:
728         cond = ltgt_cond[mask];
729         if (cond == TCG_COND_NEVER) {
730             goto do_dynamic;
731         }
732         account_inline_branch(s, old_cc_op);
733         break;
734 
735     case CC_OP_LTUGTU_32:
736     case CC_OP_LTUGTU_64:
737         cond = tcg_unsigned_cond(ltgt_cond[mask]);
738         if (cond == TCG_COND_NEVER) {
739             goto do_dynamic;
740         }
741         account_inline_branch(s, old_cc_op);
742         break;
743 
744     case CC_OP_NZ:
745         cond = nz_cond[mask];
746         if (cond == TCG_COND_NEVER) {
747             goto do_dynamic;
748         }
749         account_inline_branch(s, old_cc_op);
750         break;
751 
752     case CC_OP_TM_32:
753     case CC_OP_TM_64:
754         switch (mask) {
755         case 8:
756             cond = TCG_COND_TSTEQ;
757             break;
758         case 4 | 2 | 1:
759             cond = TCG_COND_TSTNE;
760             break;
761         default:
762             goto do_dynamic;
763         }
764         account_inline_branch(s, old_cc_op);
765         break;
766 
767     case CC_OP_ICM:
768         switch (mask) {
769         case 8:
770             cond = TCG_COND_TSTEQ;
771             break;
772         case 4 | 2 | 1:
773         case 4 | 2:
774             cond = TCG_COND_TSTNE;
775             break;
776         default:
777             goto do_dynamic;
778         }
779         account_inline_branch(s, old_cc_op);
780         break;
781 
782     case CC_OP_FLOGR:
783         switch (mask & 0xa) {
784         case 8: /* src == 0 -> no one bit found */
785             cond = TCG_COND_EQ;
786             break;
787         case 2: /* src != 0 -> one bit found */
788             cond = TCG_COND_NE;
789             break;
790         default:
791             goto do_dynamic;
792         }
793         account_inline_branch(s, old_cc_op);
794         break;
795 
796     case CC_OP_ADDU:
797     case CC_OP_SUBU:
798         switch (mask) {
799         case 8 | 2: /* result == 0 */
800             cond = TCG_COND_EQ;
801             break;
802         case 4 | 1: /* result != 0 */
803             cond = TCG_COND_NE;
804             break;
805         case 8 | 4: /* !carry (borrow) */
806             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_EQ : TCG_COND_NE;
807             break;
808         case 2 | 1: /* carry (!borrow) */
809             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_NE : TCG_COND_EQ;
810             break;
811         default:
812             goto do_dynamic;
813         }
814         account_inline_branch(s, old_cc_op);
815         break;
816 
817     default:
818     do_dynamic:
819         /* Calculate cc value.  */
820         gen_op_calc_cc(s);
821         /* FALLTHRU */
822 
823     case CC_OP_STATIC:
824         /* Jump based on CC.  We'll load up the real cond below;
825            the assignment here merely avoids a compiler warning.  */
826         account_noninline_branch(s, old_cc_op);
827         old_cc_op = CC_OP_STATIC;
828         cond = TCG_COND_NEVER;
829         break;
830     }
831 
832     /* Load up the arguments of the comparison.  */
833     c->is_64 = true;
834     switch (old_cc_op) {
835     case CC_OP_LTGT0_32:
836         c->is_64 = false;
837         c->u.s32.a = tcg_temp_new_i32();
838         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
839         c->u.s32.b = tcg_constant_i32(0);
840         break;
841     case CC_OP_LTGT_32:
842     case CC_OP_LTUGTU_32:
843         c->is_64 = false;
844         c->u.s32.a = tcg_temp_new_i32();
845         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
846         c->u.s32.b = tcg_temp_new_i32();
847         tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
848         break;
849 
850     case CC_OP_LTGT0_64:
851     case CC_OP_NZ:
852     case CC_OP_FLOGR:
853         c->u.s64.a = cc_dst;
854         c->u.s64.b = tcg_constant_i64(0);
855         break;
856 
857     case CC_OP_LTGT_64:
858     case CC_OP_LTUGTU_64:
859     case CC_OP_TM_32:
860     case CC_OP_TM_64:
861     case CC_OP_ICM:
862         c->u.s64.a = cc_src;
863         c->u.s64.b = cc_dst;
864         break;
865 
866     case CC_OP_ADDU:
867     case CC_OP_SUBU:
868         c->is_64 = true;
869         c->u.s64.b = tcg_constant_i64(0);
870         switch (mask) {
871         case 8 | 2:
872         case 4 | 1: /* result */
873             c->u.s64.a = cc_dst;
874             break;
875         case 8 | 4:
876         case 2 | 1: /* carry */
877             c->u.s64.a = cc_src;
878             break;
879         default:
880             g_assert_not_reached();
881         }
882         break;
883 
884     case CC_OP_STATIC:
885         c->is_64 = false;
886         c->u.s32.a = cc_op;
887 
888         /* Fold half of the cases using bit 3 to invert. */
889         switch (mask & 8 ? mask ^ 0xf : mask) {
890         case 0x1: /* cc == 3 */
891             cond = TCG_COND_EQ;
892             c->u.s32.b = tcg_constant_i32(3);
893             break;
894         case 0x2: /* cc == 2 */
895             cond = TCG_COND_EQ;
896             c->u.s32.b = tcg_constant_i32(2);
897             break;
898         case 0x4: /* cc == 1 */
899             cond = TCG_COND_EQ;
900             c->u.s32.b = tcg_constant_i32(1);
901             break;
902         case 0x2 | 0x1: /* cc == 2 || cc == 3 => cc > 1 */
903             cond = TCG_COND_GTU;
904             c->u.s32.b = tcg_constant_i32(1);
905             break;
906         case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
907             cond = TCG_COND_TSTNE;
908             c->u.s32.b = tcg_constant_i32(1);
909             break;
910         case 0x4 | 0x2: /* cc == 1 || cc == 2 => (cc - 1) <= 1 */
911             cond = TCG_COND_LEU;
912             c->u.s32.a = tcg_temp_new_i32();
913             c->u.s32.b = tcg_constant_i32(1);
914             tcg_gen_addi_i32(c->u.s32.a, cc_op, -1);
915             break;
916         case 0x4 | 0x2 | 0x1: /* cc != 0 */
917             cond = TCG_COND_NE;
918             c->u.s32.b = tcg_constant_i32(0);
919             break;
920         default:
921             /* case 0: never, handled above. */
922             g_assert_not_reached();
923         }
924         if (mask & 8) {
925             cond = tcg_invert_cond(cond);
926         }
927         break;
928 
929     default:
930         abort();
931     }
932     c->cond = cond;
933 }
934 
935 /* ====================================================================== */
936 /* Define the insn format enumeration.  */
937 #define F0(N)                         FMT_##N,
938 #define F1(N, X1)                     F0(N)
939 #define F2(N, X1, X2)                 F0(N)
940 #define F3(N, X1, X2, X3)             F0(N)
941 #define F4(N, X1, X2, X3, X4)         F0(N)
942 #define F5(N, X1, X2, X3, X4, X5)     F0(N)
943 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
944 
945 typedef enum {
946 #include "insn-format.h.inc"
947 } DisasFormat;
948 
949 #undef F0
950 #undef F1
951 #undef F2
952 #undef F3
953 #undef F4
954 #undef F5
955 #undef F6
956 
957 /* This is the way fields are to be accessed out of DisasFields.  */
958 #define have_field(S, F)  have_field1((S), FLD_O_##F)
959 #define get_field(S, F)   get_field1((S), FLD_O_##F, FLD_C_##F)
960 
961 static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
962 {
963     return (s->fields.presentO >> c) & 1;
964 }
965 
966 static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
967                       enum DisasFieldIndexC c)
968 {
969     assert(have_field1(s, o));
970     return s->fields.c[c];
971 }
972 
973 /* Describe the layout of each field in each format.  */
974 typedef struct DisasField {
975     unsigned int beg:8;
976     unsigned int size:8;
977     unsigned int type:2;
978     unsigned int indexC:6;
979     enum DisasFieldIndexO indexO:8;
980 } DisasField;
981 
982 typedef struct DisasFormatInfo {
983     DisasField op[NUM_C_FIELD];
984 } DisasFormatInfo;
985 
986 #define R(N, B)       {  B,  4, 0, FLD_C_r##N, FLD_O_r##N }
987 #define M(N, B)       {  B,  4, 0, FLD_C_m##N, FLD_O_m##N }
988 #define V(N, B)       {  B,  4, 3, FLD_C_v##N, FLD_O_v##N }
989 #define BD(N, BB, BD) { BB,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
990                       { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
991 #define BXD(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
992                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
993                       { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
994 #define BDL(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
995                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
996 #define BXDL(N)       { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
997                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
998                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
999 #define I(N, B, S)    {  B,  S, 1, FLD_C_i##N, FLD_O_i##N }
1000 #define L(N, B, S)    {  B,  S, 0, FLD_C_l##N, FLD_O_l##N }
1001 
1002 #define F0(N)                     { { } },
1003 #define F1(N, X1)                 { { X1 } },
1004 #define F2(N, X1, X2)             { { X1, X2 } },
1005 #define F3(N, X1, X2, X3)         { { X1, X2, X3 } },
1006 #define F4(N, X1, X2, X3, X4)     { { X1, X2, X3, X4 } },
1007 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1008 #define F6(N, X1, X2, X3, X4, X5, X6)       { { X1, X2, X3, X4, X5, X6 } },
1009 
1010 static const DisasFormatInfo format_info[] = {
1011 #include "insn-format.h.inc"
1012 };
1013 
1014 #undef F0
1015 #undef F1
1016 #undef F2
1017 #undef F3
1018 #undef F4
1019 #undef F5
1020 #undef F6
1021 #undef R
1022 #undef M
1023 #undef V
1024 #undef BD
1025 #undef BXD
1026 #undef BDL
1027 #undef BXDL
1028 #undef I
1029 #undef L
1030 
1031 /* Generally, we'll extract operands into this structures, operate upon
1032    them, and store them back.  See the "in1", "in2", "prep", "wout" sets
1033    of routines below for more details.  */
1034 typedef struct {
1035     TCGv_i64 out, out2, in1, in2;
1036     TCGv_i64 addr1;
1037     TCGv_i128 out_128, in1_128, in2_128;
1038 } DisasOps;
1039 
1040 /* Instructions can place constraints on their operands, raising specification
1041    exceptions if they are violated.  To make this easy to automate, each "in1",
1042    "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1043    of the following, or 0.  To make this easy to document, we'll put the
1044    SPEC_<name> defines next to <name>.  */
1045 
1046 #define SPEC_r1_even    1
1047 #define SPEC_r2_even    2
1048 #define SPEC_r3_even    4
1049 #define SPEC_r1_f128    8
1050 #define SPEC_r2_f128    16
1051 
1052 /* Return values from translate_one, indicating the state of the TB.  */
1053 
1054 /* We are not using a goto_tb (for whatever reason), but have updated
1055    the PC (for whatever reason), so there's no need to do it again on
1056    exiting the TB.  */
1057 #define DISAS_PC_UPDATED        DISAS_TARGET_0
1058 
1059 /* We have updated the PC and CC values.  */
1060 #define DISAS_PC_CC_UPDATED     DISAS_TARGET_2
1061 
1062 
1063 /* Instruction flags */
1064 #define IF_AFP1     0x0001      /* r1 is a fp reg for HFP/FPS instructions */
1065 #define IF_AFP2     0x0002      /* r2 is a fp reg for HFP/FPS instructions */
1066 #define IF_AFP3     0x0004      /* r3 is a fp reg for HFP/FPS instructions */
1067 #define IF_BFP      0x0008      /* binary floating point instruction */
1068 #define IF_DFP      0x0010      /* decimal floating point instruction */
1069 #define IF_PRIV     0x0020      /* privileged instruction */
1070 #define IF_VEC      0x0040      /* vector instruction */
1071 #define IF_IO       0x0080      /* input/output instruction */
1072 
1073 struct DisasInsn {
1074     unsigned opc:16;
1075     unsigned flags:16;
1076     DisasFormat fmt:8;
1077     unsigned fac:8;
1078     unsigned spec:8;
1079 
1080     const char *name;
1081 
1082     /* Pre-process arguments before HELP_OP.  */
1083     void (*help_in1)(DisasContext *, DisasOps *);
1084     void (*help_in2)(DisasContext *, DisasOps *);
1085     void (*help_prep)(DisasContext *, DisasOps *);
1086 
1087     /*
1088      * Post-process output after HELP_OP.
1089      * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1090      */
1091     void (*help_wout)(DisasContext *, DisasOps *);
1092     void (*help_cout)(DisasContext *, DisasOps *);
1093 
1094     /* Implement the operation itself.  */
1095     DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1096 
1097     uint64_t data;
1098 };
1099 
1100 /* ====================================================================== */
1101 /* Miscellaneous helpers, used by several operations.  */
1102 
1103 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1104 {
1105     if (dest == s->pc_tmp) {
1106         per_branch(s, true);
1107         return DISAS_NEXT;
1108     }
1109     if (use_goto_tb(s, dest)) {
1110         update_cc_op(s);
1111         per_breaking_event(s);
1112         tcg_gen_goto_tb(0);
1113         tcg_gen_movi_i64(psw_addr, dest);
1114         tcg_gen_exit_tb(s->base.tb, 0);
1115         return DISAS_NORETURN;
1116     } else {
1117         tcg_gen_movi_i64(psw_addr, dest);
1118         per_branch(s, false);
1119         return DISAS_PC_UPDATED;
1120     }
1121 }
1122 
1123 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1124                                  bool is_imm, int imm, TCGv_i64 cdest)
1125 {
1126     DisasJumpType ret;
1127     uint64_t dest = s->base.pc_next + (int64_t)imm * 2;
1128     TCGLabel *lab;
1129 
1130     /* Take care of the special cases first.  */
1131     if (c->cond == TCG_COND_NEVER) {
1132         ret = DISAS_NEXT;
1133         goto egress;
1134     }
1135     if (is_imm) {
1136         if (dest == s->pc_tmp) {
1137             /* Branch to next.  */
1138             per_branch(s, true);
1139             ret = DISAS_NEXT;
1140             goto egress;
1141         }
1142         if (c->cond == TCG_COND_ALWAYS) {
1143             ret = help_goto_direct(s, dest);
1144             goto egress;
1145         }
1146     } else {
1147         if (!cdest) {
1148             /* E.g. bcr %r0 -> no branch.  */
1149             ret = DISAS_NEXT;
1150             goto egress;
1151         }
1152         if (c->cond == TCG_COND_ALWAYS) {
1153             tcg_gen_mov_i64(psw_addr, cdest);
1154             per_branch(s, false);
1155             ret = DISAS_PC_UPDATED;
1156             goto egress;
1157         }
1158     }
1159 
1160     if (use_goto_tb(s, s->pc_tmp)) {
1161         if (is_imm && use_goto_tb(s, dest)) {
1162             /* Both exits can use goto_tb.  */
1163             update_cc_op(s);
1164 
1165             lab = gen_new_label();
1166             if (c->is_64) {
1167                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1168             } else {
1169                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1170             }
1171 
1172             /* Branch not taken.  */
1173             tcg_gen_goto_tb(0);
1174             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1175             tcg_gen_exit_tb(s->base.tb, 0);
1176 
1177             /* Branch taken.  */
1178             gen_set_label(lab);
1179             per_breaking_event(s);
1180             tcg_gen_goto_tb(1);
1181             tcg_gen_movi_i64(psw_addr, dest);
1182             tcg_gen_exit_tb(s->base.tb, 1);
1183 
1184             ret = DISAS_NORETURN;
1185         } else {
1186             /* Fallthru can use goto_tb, but taken branch cannot.  */
1187             /* Store taken branch destination before the brcond.  This
1188                avoids having to allocate a new local temp to hold it.
1189                We'll overwrite this in the not taken case anyway.  */
1190             if (!is_imm) {
1191                 tcg_gen_mov_i64(psw_addr, cdest);
1192             }
1193 
1194             lab = gen_new_label();
1195             if (c->is_64) {
1196                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1197             } else {
1198                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1199             }
1200 
1201             /* Branch not taken.  */
1202             update_cc_op(s);
1203             tcg_gen_goto_tb(0);
1204             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1205             tcg_gen_exit_tb(s->base.tb, 0);
1206 
1207             gen_set_label(lab);
1208             if (is_imm) {
1209                 tcg_gen_movi_i64(psw_addr, dest);
1210             }
1211             per_breaking_event(s);
1212             ret = DISAS_PC_UPDATED;
1213         }
1214     } else {
1215         /* Fallthru cannot use goto_tb.  This by itself is vanishingly rare.
1216            Most commonly we're single-stepping or some other condition that
1217            disables all use of goto_tb.  Just update the PC and exit.  */
1218 
1219         TCGv_i64 next = tcg_constant_i64(s->pc_tmp);
1220         if (is_imm) {
1221             cdest = tcg_constant_i64(dest);
1222         }
1223 
1224         if (c->is_64) {
1225             tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1226                                 cdest, next);
1227             per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1228         } else {
1229             TCGv_i32 t0 = tcg_temp_new_i32();
1230             TCGv_i64 t1 = tcg_temp_new_i64();
1231             TCGv_i64 z = tcg_constant_i64(0);
1232             tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1233             tcg_gen_extu_i32_i64(t1, t0);
1234             tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1235             per_branch_cond(s, TCG_COND_NE, t1, z);
1236         }
1237 
1238         ret = DISAS_PC_UPDATED;
1239     }
1240 
1241  egress:
1242     return ret;
1243 }
1244 
1245 /* ====================================================================== */
1246 /* The operations.  These perform the bulk of the work for any insn,
1247    usually after the operands have been loaded and output initialized.  */
1248 
1249 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1250 {
1251     tcg_gen_abs_i64(o->out, o->in2);
1252     return DISAS_NEXT;
1253 }
1254 
1255 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1256 {
1257     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1258     return DISAS_NEXT;
1259 }
1260 
1261 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1262 {
1263     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1264     return DISAS_NEXT;
1265 }
1266 
1267 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1268 {
1269     tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1270     tcg_gen_mov_i64(o->out2, o->in2);
1271     return DISAS_NEXT;
1272 }
1273 
1274 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1275 {
1276     tcg_gen_add_i64(o->out, o->in1, o->in2);
1277     return DISAS_NEXT;
1278 }
1279 
1280 static DisasJumpType op_addu64(DisasContext *s, DisasOps *o)
1281 {
1282     tcg_gen_movi_i64(cc_src, 0);
1283     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1284     return DISAS_NEXT;
1285 }
1286 
1287 /* Compute carry into cc_src. */
1288 static void compute_carry(DisasContext *s)
1289 {
1290     switch (s->cc_op) {
1291     case CC_OP_ADDU:
1292         /* The carry value is already in cc_src (1,0). */
1293         break;
1294     case CC_OP_SUBU:
1295         tcg_gen_addi_i64(cc_src, cc_src, 1);
1296         break;
1297     default:
1298         gen_op_calc_cc(s);
1299         /* fall through */
1300     case CC_OP_STATIC:
1301         /* The carry flag is the msb of CC; compute into cc_src. */
1302         tcg_gen_extu_i32_i64(cc_src, cc_op);
1303         tcg_gen_shri_i64(cc_src, cc_src, 1);
1304         break;
1305     }
1306 }
1307 
1308 static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
1309 {
1310     compute_carry(s);
1311     tcg_gen_add_i64(o->out, o->in1, o->in2);
1312     tcg_gen_add_i64(o->out, o->out, cc_src);
1313     return DISAS_NEXT;
1314 }
1315 
1316 static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
1317 {
1318     compute_carry(s);
1319 
1320     TCGv_i64 zero = tcg_constant_i64(0);
1321     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
1322     tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
1323 
1324     return DISAS_NEXT;
1325 }
1326 
1327 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1328 {
1329     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1330 
1331     o->in1 = tcg_temp_new_i64();
1332     if (non_atomic) {
1333         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1334     } else {
1335         /* Perform the atomic addition in memory. */
1336         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1337                                      s->insn->data);
1338     }
1339 
1340     /* Recompute also for atomic case: needed for setting CC. */
1341     tcg_gen_add_i64(o->out, o->in1, o->in2);
1342 
1343     if (non_atomic) {
1344         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1345     }
1346     return DISAS_NEXT;
1347 }
1348 
1349 static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o)
1350 {
1351     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1352 
1353     o->in1 = tcg_temp_new_i64();
1354     if (non_atomic) {
1355         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1356     } else {
1357         /* Perform the atomic addition in memory. */
1358         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1359                                      s->insn->data);
1360     }
1361 
1362     /* Recompute also for atomic case: needed for setting CC. */
1363     tcg_gen_movi_i64(cc_src, 0);
1364     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1365 
1366     if (non_atomic) {
1367         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1368     }
1369     return DISAS_NEXT;
1370 }
1371 
1372 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1373 {
1374     gen_helper_aeb(o->out, tcg_env, o->in1, o->in2);
1375     return DISAS_NEXT;
1376 }
1377 
1378 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1379 {
1380     gen_helper_adb(o->out, tcg_env, o->in1, o->in2);
1381     return DISAS_NEXT;
1382 }
1383 
1384 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1385 {
1386     gen_helper_axb(o->out_128, tcg_env, o->in1_128, o->in2_128);
1387     return DISAS_NEXT;
1388 }
1389 
1390 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1391 {
1392     tcg_gen_and_i64(o->out, o->in1, o->in2);
1393     return DISAS_NEXT;
1394 }
1395 
1396 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1397 {
1398     int shift = s->insn->data & 0xff;
1399     int size = s->insn->data >> 8;
1400     uint64_t mask = ((1ull << size) - 1) << shift;
1401     TCGv_i64 t = tcg_temp_new_i64();
1402 
1403     tcg_gen_shli_i64(t, o->in2, shift);
1404     tcg_gen_ori_i64(t, t, ~mask);
1405     tcg_gen_and_i64(o->out, o->in1, t);
1406 
1407     /* Produce the CC from only the bits manipulated.  */
1408     tcg_gen_andi_i64(cc_dst, o->out, mask);
1409     set_cc_nz_u64(s, cc_dst);
1410     return DISAS_NEXT;
1411 }
1412 
1413 static DisasJumpType op_andc(DisasContext *s, DisasOps *o)
1414 {
1415     tcg_gen_andc_i64(o->out, o->in1, o->in2);
1416     return DISAS_NEXT;
1417 }
1418 
1419 static DisasJumpType op_orc(DisasContext *s, DisasOps *o)
1420 {
1421     tcg_gen_orc_i64(o->out, o->in1, o->in2);
1422     return DISAS_NEXT;
1423 }
1424 
1425 static DisasJumpType op_nand(DisasContext *s, DisasOps *o)
1426 {
1427     tcg_gen_nand_i64(o->out, o->in1, o->in2);
1428     return DISAS_NEXT;
1429 }
1430 
1431 static DisasJumpType op_nor(DisasContext *s, DisasOps *o)
1432 {
1433     tcg_gen_nor_i64(o->out, o->in1, o->in2);
1434     return DISAS_NEXT;
1435 }
1436 
1437 static DisasJumpType op_nxor(DisasContext *s, DisasOps *o)
1438 {
1439     tcg_gen_eqv_i64(o->out, o->in1, o->in2);
1440     return DISAS_NEXT;
1441 }
1442 
1443 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1444 {
1445     o->in1 = tcg_temp_new_i64();
1446 
1447     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1448         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1449     } else {
1450         /* Perform the atomic operation in memory. */
1451         tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1452                                      s->insn->data);
1453     }
1454 
1455     /* Recompute also for atomic case: needed for setting CC. */
1456     tcg_gen_and_i64(o->out, o->in1, o->in2);
1457 
1458     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1459         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1460     }
1461     return DISAS_NEXT;
1462 }
1463 
1464 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1465 {
1466     pc_to_link_info(o->out, s, s->pc_tmp);
1467     if (o->in2) {
1468         tcg_gen_mov_i64(psw_addr, o->in2);
1469         per_branch(s, false);
1470         return DISAS_PC_UPDATED;
1471     } else {
1472         return DISAS_NEXT;
1473     }
1474 }
1475 
1476 static void save_link_info(DisasContext *s, DisasOps *o)
1477 {
1478     TCGv_i64 t;
1479 
1480     if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1481         pc_to_link_info(o->out, s, s->pc_tmp);
1482         return;
1483     }
1484     gen_op_calc_cc(s);
1485     tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1486     tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1487     t = tcg_temp_new_i64();
1488     tcg_gen_shri_i64(t, psw_mask, 16);
1489     tcg_gen_andi_i64(t, t, 0x0f000000);
1490     tcg_gen_or_i64(o->out, o->out, t);
1491     tcg_gen_extu_i32_i64(t, cc_op);
1492     tcg_gen_shli_i64(t, t, 28);
1493     tcg_gen_or_i64(o->out, o->out, t);
1494 }
1495 
1496 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1497 {
1498     save_link_info(s, o);
1499     if (o->in2) {
1500         tcg_gen_mov_i64(psw_addr, o->in2);
1501         per_branch(s, false);
1502         return DISAS_PC_UPDATED;
1503     } else {
1504         return DISAS_NEXT;
1505     }
1506 }
1507 
1508 /*
1509  * Disassemble the target of a branch. The results are returned in a form
1510  * suitable for passing into help_branch():
1511  *
1512  * - bool IS_IMM reflects whether the target is fixed or computed. Non-EXECUTEd
1513  *   branches, whose DisasContext *S contains the relative immediate field RI,
1514  *   are considered fixed. All the other branches are considered computed.
1515  * - int IMM is the value of RI.
1516  * - TCGv_i64 CDEST is the address of the computed target.
1517  */
1518 #define disas_jdest(s, ri, is_imm, imm, cdest) do {                            \
1519     if (have_field(s, ri)) {                                                   \
1520         if (unlikely(s->ex_value)) {                                           \
1521             cdest = tcg_temp_new_i64();                                        \
1522             tcg_gen_ld_i64(cdest, tcg_env, offsetof(CPUS390XState, ex_target));\
1523             tcg_gen_addi_i64(cdest, cdest, (int64_t)get_field(s, ri) * 2);     \
1524             is_imm = false;                                                    \
1525         } else {                                                               \
1526             is_imm = true;                                                     \
1527         }                                                                      \
1528     } else {                                                                   \
1529         is_imm = false;                                                        \
1530     }                                                                          \
1531     imm = is_imm ? get_field(s, ri) : 0;                                       \
1532 } while (false)
1533 
1534 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1535 {
1536     DisasCompare c;
1537     bool is_imm;
1538     int imm;
1539 
1540     pc_to_link_info(o->out, s, s->pc_tmp);
1541 
1542     disas_jdest(s, i2, is_imm, imm, o->in2);
1543     disas_jcc(s, &c, 0xf);
1544     return help_branch(s, &c, is_imm, imm, o->in2);
1545 }
1546 
1547 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1548 {
1549     int m1 = get_field(s, m1);
1550     DisasCompare c;
1551     bool is_imm;
1552     int imm;
1553 
1554     /* BCR with R2 = 0 causes no branching */
1555     if (have_field(s, r2) && get_field(s, r2) == 0) {
1556         if (m1 == 14) {
1557             /* Perform serialization */
1558             /* FIXME: check for fast-BCR-serialization facility */
1559             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1560         }
1561         if (m1 == 15) {
1562             /* Perform serialization */
1563             /* FIXME: perform checkpoint-synchronisation */
1564             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1565         }
1566         return DISAS_NEXT;
1567     }
1568 
1569     disas_jdest(s, i2, is_imm, imm, o->in2);
1570     disas_jcc(s, &c, m1);
1571     return help_branch(s, &c, is_imm, imm, o->in2);
1572 }
1573 
1574 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1575 {
1576     int r1 = get_field(s, r1);
1577     DisasCompare c;
1578     bool is_imm;
1579     TCGv_i64 t;
1580     int imm;
1581 
1582     c.cond = TCG_COND_NE;
1583     c.is_64 = false;
1584 
1585     t = tcg_temp_new_i64();
1586     tcg_gen_subi_i64(t, regs[r1], 1);
1587     store_reg32_i64(r1, t);
1588     c.u.s32.a = tcg_temp_new_i32();
1589     c.u.s32.b = tcg_constant_i32(0);
1590     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1591 
1592     disas_jdest(s, i2, is_imm, imm, o->in2);
1593     return help_branch(s, &c, is_imm, imm, o->in2);
1594 }
1595 
1596 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1597 {
1598     int r1 = get_field(s, r1);
1599     int imm = get_field(s, i2);
1600     DisasCompare c;
1601     TCGv_i64 t;
1602 
1603     c.cond = TCG_COND_NE;
1604     c.is_64 = false;
1605 
1606     t = tcg_temp_new_i64();
1607     tcg_gen_shri_i64(t, regs[r1], 32);
1608     tcg_gen_subi_i64(t, t, 1);
1609     store_reg32h_i64(r1, t);
1610     c.u.s32.a = tcg_temp_new_i32();
1611     c.u.s32.b = tcg_constant_i32(0);
1612     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1613 
1614     return help_branch(s, &c, 1, imm, o->in2);
1615 }
1616 
1617 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1618 {
1619     int r1 = get_field(s, r1);
1620     DisasCompare c;
1621     bool is_imm;
1622     int imm;
1623 
1624     c.cond = TCG_COND_NE;
1625     c.is_64 = true;
1626 
1627     tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1628     c.u.s64.a = regs[r1];
1629     c.u.s64.b = tcg_constant_i64(0);
1630 
1631     disas_jdest(s, i2, is_imm, imm, o->in2);
1632     return help_branch(s, &c, is_imm, imm, o->in2);
1633 }
1634 
1635 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1636 {
1637     int r1 = get_field(s, r1);
1638     int r3 = get_field(s, r3);
1639     DisasCompare c;
1640     bool is_imm;
1641     TCGv_i64 t;
1642     int imm;
1643 
1644     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1645     c.is_64 = false;
1646 
1647     t = tcg_temp_new_i64();
1648     tcg_gen_add_i64(t, regs[r1], regs[r3]);
1649     c.u.s32.a = tcg_temp_new_i32();
1650     c.u.s32.b = tcg_temp_new_i32();
1651     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1652     tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1653     store_reg32_i64(r1, t);
1654 
1655     disas_jdest(s, i2, is_imm, imm, o->in2);
1656     return help_branch(s, &c, is_imm, imm, o->in2);
1657 }
1658 
1659 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1660 {
1661     int r1 = get_field(s, r1);
1662     int r3 = get_field(s, r3);
1663     DisasCompare c;
1664     bool is_imm;
1665     int imm;
1666 
1667     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1668     c.is_64 = true;
1669 
1670     if (r1 == (r3 | 1)) {
1671         c.u.s64.b = load_reg(r3 | 1);
1672     } else {
1673         c.u.s64.b = regs[r3 | 1];
1674     }
1675 
1676     tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1677     c.u.s64.a = regs[r1];
1678 
1679     disas_jdest(s, i2, is_imm, imm, o->in2);
1680     return help_branch(s, &c, is_imm, imm, o->in2);
1681 }
1682 
1683 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1684 {
1685     int imm, m3 = get_field(s, m3);
1686     bool is_imm;
1687     DisasCompare c;
1688 
1689     c.cond = ltgt_cond[m3];
1690     if (s->insn->data) {
1691         c.cond = tcg_unsigned_cond(c.cond);
1692     }
1693     c.is_64 = true;
1694     c.u.s64.a = o->in1;
1695     c.u.s64.b = o->in2;
1696 
1697     o->out = NULL;
1698     disas_jdest(s, i4, is_imm, imm, o->out);
1699     if (!is_imm && !o->out) {
1700         imm = 0;
1701         o->out = get_address(s, 0, get_field(s, b4),
1702                              get_field(s, d4));
1703     }
1704 
1705     return help_branch(s, &c, is_imm, imm, o->out);
1706 }
1707 
1708 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1709 {
1710     gen_helper_ceb(cc_op, tcg_env, o->in1, o->in2);
1711     set_cc_static(s);
1712     return DISAS_NEXT;
1713 }
1714 
1715 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1716 {
1717     gen_helper_cdb(cc_op, tcg_env, o->in1, o->in2);
1718     set_cc_static(s);
1719     return DISAS_NEXT;
1720 }
1721 
1722 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1723 {
1724     gen_helper_cxb(cc_op, tcg_env, o->in1_128, o->in2_128);
1725     set_cc_static(s);
1726     return DISAS_NEXT;
1727 }
1728 
1729 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1730                                    bool m4_with_fpe)
1731 {
1732     const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1733     uint8_t m3 = get_field(s, m3);
1734     uint8_t m4 = get_field(s, m4);
1735 
1736     /* m3 field was introduced with FPE */
1737     if (!fpe && m3_with_fpe) {
1738         m3 = 0;
1739     }
1740     /* m4 field was introduced with FPE */
1741     if (!fpe && m4_with_fpe) {
1742         m4 = 0;
1743     }
1744 
1745     /* Check for valid rounding modes. Mode 3 was introduced later. */
1746     if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1747         gen_program_exception(s, PGM_SPECIFICATION);
1748         return NULL;
1749     }
1750 
1751     return tcg_constant_i32(deposit32(m3, 4, 4, m4));
1752 }
1753 
1754 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1755 {
1756     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1757 
1758     if (!m34) {
1759         return DISAS_NORETURN;
1760     }
1761     gen_helper_cfeb(o->out, tcg_env, o->in2, m34);
1762     set_cc_static(s);
1763     return DISAS_NEXT;
1764 }
1765 
1766 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1767 {
1768     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1769 
1770     if (!m34) {
1771         return DISAS_NORETURN;
1772     }
1773     gen_helper_cfdb(o->out, tcg_env, o->in2, m34);
1774     set_cc_static(s);
1775     return DISAS_NEXT;
1776 }
1777 
1778 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1779 {
1780     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1781 
1782     if (!m34) {
1783         return DISAS_NORETURN;
1784     }
1785     gen_helper_cfxb(o->out, tcg_env, o->in2_128, m34);
1786     set_cc_static(s);
1787     return DISAS_NEXT;
1788 }
1789 
1790 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1791 {
1792     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1793 
1794     if (!m34) {
1795         return DISAS_NORETURN;
1796     }
1797     gen_helper_cgeb(o->out, tcg_env, o->in2, m34);
1798     set_cc_static(s);
1799     return DISAS_NEXT;
1800 }
1801 
1802 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1803 {
1804     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1805 
1806     if (!m34) {
1807         return DISAS_NORETURN;
1808     }
1809     gen_helper_cgdb(o->out, tcg_env, o->in2, m34);
1810     set_cc_static(s);
1811     return DISAS_NEXT;
1812 }
1813 
1814 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1815 {
1816     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1817 
1818     if (!m34) {
1819         return DISAS_NORETURN;
1820     }
1821     gen_helper_cgxb(o->out, tcg_env, o->in2_128, m34);
1822     set_cc_static(s);
1823     return DISAS_NEXT;
1824 }
1825 
1826 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1827 {
1828     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1829 
1830     if (!m34) {
1831         return DISAS_NORETURN;
1832     }
1833     gen_helper_clfeb(o->out, tcg_env, o->in2, m34);
1834     set_cc_static(s);
1835     return DISAS_NEXT;
1836 }
1837 
1838 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1839 {
1840     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1841 
1842     if (!m34) {
1843         return DISAS_NORETURN;
1844     }
1845     gen_helper_clfdb(o->out, tcg_env, o->in2, m34);
1846     set_cc_static(s);
1847     return DISAS_NEXT;
1848 }
1849 
1850 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1851 {
1852     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1853 
1854     if (!m34) {
1855         return DISAS_NORETURN;
1856     }
1857     gen_helper_clfxb(o->out, tcg_env, o->in2_128, m34);
1858     set_cc_static(s);
1859     return DISAS_NEXT;
1860 }
1861 
1862 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1863 {
1864     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1865 
1866     if (!m34) {
1867         return DISAS_NORETURN;
1868     }
1869     gen_helper_clgeb(o->out, tcg_env, o->in2, m34);
1870     set_cc_static(s);
1871     return DISAS_NEXT;
1872 }
1873 
1874 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1875 {
1876     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1877 
1878     if (!m34) {
1879         return DISAS_NORETURN;
1880     }
1881     gen_helper_clgdb(o->out, tcg_env, o->in2, m34);
1882     set_cc_static(s);
1883     return DISAS_NEXT;
1884 }
1885 
1886 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1887 {
1888     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1889 
1890     if (!m34) {
1891         return DISAS_NORETURN;
1892     }
1893     gen_helper_clgxb(o->out, tcg_env, o->in2_128, m34);
1894     set_cc_static(s);
1895     return DISAS_NEXT;
1896 }
1897 
1898 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1899 {
1900     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1901 
1902     if (!m34) {
1903         return DISAS_NORETURN;
1904     }
1905     gen_helper_cegb(o->out, tcg_env, o->in2, m34);
1906     return DISAS_NEXT;
1907 }
1908 
1909 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1910 {
1911     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1912 
1913     if (!m34) {
1914         return DISAS_NORETURN;
1915     }
1916     gen_helper_cdgb(o->out, tcg_env, o->in2, m34);
1917     return DISAS_NEXT;
1918 }
1919 
1920 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
1921 {
1922     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1923 
1924     if (!m34) {
1925         return DISAS_NORETURN;
1926     }
1927     gen_helper_cxgb(o->out_128, tcg_env, o->in2, m34);
1928     return DISAS_NEXT;
1929 }
1930 
1931 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
1932 {
1933     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1934 
1935     if (!m34) {
1936         return DISAS_NORETURN;
1937     }
1938     gen_helper_celgb(o->out, tcg_env, o->in2, m34);
1939     return DISAS_NEXT;
1940 }
1941 
1942 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
1943 {
1944     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1945 
1946     if (!m34) {
1947         return DISAS_NORETURN;
1948     }
1949     gen_helper_cdlgb(o->out, tcg_env, o->in2, m34);
1950     return DISAS_NEXT;
1951 }
1952 
1953 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
1954 {
1955     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1956 
1957     if (!m34) {
1958         return DISAS_NORETURN;
1959     }
1960     gen_helper_cxlgb(o->out_128, tcg_env, o->in2, m34);
1961     return DISAS_NEXT;
1962 }
1963 
1964 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
1965 {
1966     int r2 = get_field(s, r2);
1967     TCGv_i128 pair = tcg_temp_new_i128();
1968     TCGv_i64 len = tcg_temp_new_i64();
1969 
1970     gen_helper_cksm(pair, tcg_env, o->in1, o->in2, regs[r2 + 1]);
1971     set_cc_static(s);
1972     tcg_gen_extr_i128_i64(o->out, len, pair);
1973 
1974     tcg_gen_add_i64(regs[r2], regs[r2], len);
1975     tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1976 
1977     return DISAS_NEXT;
1978 }
1979 
1980 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
1981 {
1982     int l = get_field(s, l1);
1983     TCGv_i64 src;
1984     TCGv_i32 vl;
1985     MemOp mop;
1986 
1987     switch (l + 1) {
1988     case 1:
1989     case 2:
1990     case 4:
1991     case 8:
1992         mop = ctz32(l + 1) | MO_TE;
1993         /* Do not update cc_src yet: loading cc_dst may cause an exception. */
1994         src = tcg_temp_new_i64();
1995         tcg_gen_qemu_ld_tl(src, o->addr1, get_mem_index(s), mop);
1996         tcg_gen_qemu_ld_tl(cc_dst, o->in2, get_mem_index(s), mop);
1997         gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, src, cc_dst);
1998         return DISAS_NEXT;
1999     default:
2000         vl = tcg_constant_i32(l);
2001         gen_helper_clc(cc_op, tcg_env, vl, o->addr1, o->in2);
2002         set_cc_static(s);
2003         return DISAS_NEXT;
2004     }
2005 }
2006 
2007 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2008 {
2009     int r1 = get_field(s, r1);
2010     int r2 = get_field(s, r2);
2011     TCGv_i32 t1, t2;
2012 
2013     /* r1 and r2 must be even.  */
2014     if (r1 & 1 || r2 & 1) {
2015         gen_program_exception(s, PGM_SPECIFICATION);
2016         return DISAS_NORETURN;
2017     }
2018 
2019     t1 = tcg_constant_i32(r1);
2020     t2 = tcg_constant_i32(r2);
2021     gen_helper_clcl(cc_op, tcg_env, t1, t2);
2022     set_cc_static(s);
2023     return DISAS_NEXT;
2024 }
2025 
2026 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2027 {
2028     int r1 = get_field(s, r1);
2029     int r3 = get_field(s, r3);
2030     TCGv_i32 t1, t3;
2031 
2032     /* r1 and r3 must be even.  */
2033     if (r1 & 1 || r3 & 1) {
2034         gen_program_exception(s, PGM_SPECIFICATION);
2035         return DISAS_NORETURN;
2036     }
2037 
2038     t1 = tcg_constant_i32(r1);
2039     t3 = tcg_constant_i32(r3);
2040     gen_helper_clcle(cc_op, tcg_env, t1, o->in2, t3);
2041     set_cc_static(s);
2042     return DISAS_NEXT;
2043 }
2044 
2045 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2046 {
2047     int r1 = get_field(s, r1);
2048     int r3 = get_field(s, r3);
2049     TCGv_i32 t1, t3;
2050 
2051     /* r1 and r3 must be even.  */
2052     if (r1 & 1 || r3 & 1) {
2053         gen_program_exception(s, PGM_SPECIFICATION);
2054         return DISAS_NORETURN;
2055     }
2056 
2057     t1 = tcg_constant_i32(r1);
2058     t3 = tcg_constant_i32(r3);
2059     gen_helper_clclu(cc_op, tcg_env, t1, o->in2, t3);
2060     set_cc_static(s);
2061     return DISAS_NEXT;
2062 }
2063 
2064 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2065 {
2066     TCGv_i32 m3 = tcg_constant_i32(get_field(s, m3));
2067     TCGv_i32 t1 = tcg_temp_new_i32();
2068 
2069     tcg_gen_extrl_i64_i32(t1, o->in1);
2070     gen_helper_clm(cc_op, tcg_env, t1, m3, o->in2);
2071     set_cc_static(s);
2072     return DISAS_NEXT;
2073 }
2074 
2075 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2076 {
2077     TCGv_i128 pair = tcg_temp_new_i128();
2078 
2079     gen_helper_clst(pair, tcg_env, regs[0], o->in1, o->in2);
2080     tcg_gen_extr_i128_i64(o->in2, o->in1, pair);
2081 
2082     set_cc_static(s);
2083     return DISAS_NEXT;
2084 }
2085 
2086 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2087 {
2088     TCGv_i64 t = tcg_temp_new_i64();
2089     tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2090     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2091     tcg_gen_or_i64(o->out, o->out, t);
2092     return DISAS_NEXT;
2093 }
2094 
2095 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2096 {
2097     int d2 = get_field(s, d2);
2098     int b2 = get_field(s, b2);
2099     TCGv_i64 addr, cc;
2100 
2101     /* Note that in1 = R3 (new value) and
2102        in2 = (zero-extended) R1 (expected value).  */
2103 
2104     addr = get_address(s, 0, b2, d2);
2105     tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2106                                get_mem_index(s), s->insn->data | MO_ALIGN);
2107 
2108     /* Are the memory and expected values (un)equal?  Note that this setcond
2109        produces the output CC value, thus the NE sense of the test.  */
2110     cc = tcg_temp_new_i64();
2111     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2112     tcg_gen_extrl_i64_i32(cc_op, cc);
2113     set_cc_static(s);
2114 
2115     return DISAS_NEXT;
2116 }
2117 
2118 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2119 {
2120     int r1 = get_field(s, r1);
2121 
2122     o->out_128 = tcg_temp_new_i128();
2123     tcg_gen_concat_i64_i128(o->out_128, regs[r1 + 1], regs[r1]);
2124 
2125     /* Note out (R1:R1+1) = expected value and in2 (R3:R3+1) = new value.  */
2126     tcg_gen_atomic_cmpxchg_i128(o->out_128, o->addr1, o->out_128, o->in2_128,
2127                                 get_mem_index(s), MO_BE | MO_128 | MO_ALIGN);
2128 
2129     /*
2130      * Extract result into cc_dst:cc_src, compare vs the expected value
2131      * in the as yet unmodified input registers, then update CC_OP.
2132      */
2133     tcg_gen_extr_i128_i64(cc_src, cc_dst, o->out_128);
2134     tcg_gen_xor_i64(cc_dst, cc_dst, regs[r1]);
2135     tcg_gen_xor_i64(cc_src, cc_src, regs[r1 + 1]);
2136     tcg_gen_or_i64(cc_dst, cc_dst, cc_src);
2137     set_cc_nz_u64(s, cc_dst);
2138 
2139     return DISAS_NEXT;
2140 }
2141 
2142 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2143 {
2144     int r3 = get_field(s, r3);
2145     TCGv_i32 t_r3 = tcg_constant_i32(r3);
2146 
2147     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2148         gen_helper_csst_parallel(cc_op, tcg_env, t_r3, o->addr1, o->in2);
2149     } else {
2150         gen_helper_csst(cc_op, tcg_env, t_r3, o->addr1, o->in2);
2151     }
2152 
2153     set_cc_static(s);
2154     return DISAS_NEXT;
2155 }
2156 
2157 #ifndef CONFIG_USER_ONLY
2158 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2159 {
2160     MemOp mop = s->insn->data;
2161     TCGv_i64 addr, old, cc;
2162     TCGLabel *lab = gen_new_label();
2163 
2164     /* Note that in1 = R1 (zero-extended expected value),
2165        out = R1 (original reg), out2 = R1+1 (new value).  */
2166 
2167     addr = tcg_temp_new_i64();
2168     old = tcg_temp_new_i64();
2169     tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2170     tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2171                                get_mem_index(s), mop | MO_ALIGN);
2172 
2173     /* Are the memory and expected values (un)equal?  */
2174     cc = tcg_temp_new_i64();
2175     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2176     tcg_gen_extrl_i64_i32(cc_op, cc);
2177 
2178     /* Write back the output now, so that it happens before the
2179        following branch, so that we don't need local temps.  */
2180     if ((mop & MO_SIZE) == MO_32) {
2181         tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2182     } else {
2183         tcg_gen_mov_i64(o->out, old);
2184     }
2185 
2186     /* If the comparison was equal, and the LSB of R2 was set,
2187        then we need to flush the TLB (for all cpus).  */
2188     tcg_gen_xori_i64(cc, cc, 1);
2189     tcg_gen_and_i64(cc, cc, o->in2);
2190     tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2191 
2192     gen_helper_purge(tcg_env);
2193     gen_set_label(lab);
2194 
2195     return DISAS_NEXT;
2196 }
2197 #endif
2198 
2199 static DisasJumpType op_cvb(DisasContext *s, DisasOps *o)
2200 {
2201     TCGv_i64 t = tcg_temp_new_i64();
2202     tcg_gen_qemu_ld_i64(t, o->addr1, get_mem_index(s), MO_TEUQ);
2203     gen_helper_cvb(tcg_env, tcg_constant_i32(get_field(s, r1)), t);
2204     return DISAS_NEXT;
2205 }
2206 
2207 static DisasJumpType op_cvbg(DisasContext *s, DisasOps *o)
2208 {
2209     TCGv_i128 t = tcg_temp_new_i128();
2210     tcg_gen_qemu_ld_i128(t, o->addr1, get_mem_index(s), MO_TE | MO_128);
2211     gen_helper_cvbg(o->out, tcg_env, t);
2212     return DISAS_NEXT;
2213 }
2214 
2215 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2216 {
2217     TCGv_i64 t1 = tcg_temp_new_i64();
2218     TCGv_i32 t2 = tcg_temp_new_i32();
2219     tcg_gen_extrl_i64_i32(t2, o->in1);
2220     gen_helper_cvd(t1, t2);
2221     tcg_gen_qemu_st_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
2222     return DISAS_NEXT;
2223 }
2224 
2225 static DisasJumpType op_cvdg(DisasContext *s, DisasOps *o)
2226 {
2227     TCGv_i128 t = tcg_temp_new_i128();
2228     gen_helper_cvdg(t, o->in1);
2229     tcg_gen_qemu_st_i128(t, o->in2, get_mem_index(s), MO_TE | MO_128);
2230     return DISAS_NEXT;
2231 }
2232 
2233 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2234 {
2235     int m3 = get_field(s, m3);
2236     TCGLabel *lab = gen_new_label();
2237     TCGCond c;
2238 
2239     c = tcg_invert_cond(ltgt_cond[m3]);
2240     if (s->insn->data) {
2241         c = tcg_unsigned_cond(c);
2242     }
2243     tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2244 
2245     /* Trap.  */
2246     gen_trap(s);
2247 
2248     gen_set_label(lab);
2249     return DISAS_NEXT;
2250 }
2251 
2252 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2253 {
2254     int m3 = get_field(s, m3);
2255     int r1 = get_field(s, r1);
2256     int r2 = get_field(s, r2);
2257     TCGv_i32 tr1, tr2, chk;
2258 
2259     /* R1 and R2 must both be even.  */
2260     if ((r1 | r2) & 1) {
2261         gen_program_exception(s, PGM_SPECIFICATION);
2262         return DISAS_NORETURN;
2263     }
2264     if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2265         m3 = 0;
2266     }
2267 
2268     tr1 = tcg_constant_i32(r1);
2269     tr2 = tcg_constant_i32(r2);
2270     chk = tcg_constant_i32(m3);
2271 
2272     switch (s->insn->data) {
2273     case 12:
2274         gen_helper_cu12(cc_op, tcg_env, tr1, tr2, chk);
2275         break;
2276     case 14:
2277         gen_helper_cu14(cc_op, tcg_env, tr1, tr2, chk);
2278         break;
2279     case 21:
2280         gen_helper_cu21(cc_op, tcg_env, tr1, tr2, chk);
2281         break;
2282     case 24:
2283         gen_helper_cu24(cc_op, tcg_env, tr1, tr2, chk);
2284         break;
2285     case 41:
2286         gen_helper_cu41(cc_op, tcg_env, tr1, tr2, chk);
2287         break;
2288     case 42:
2289         gen_helper_cu42(cc_op, tcg_env, tr1, tr2, chk);
2290         break;
2291     default:
2292         g_assert_not_reached();
2293     }
2294 
2295     set_cc_static(s);
2296     return DISAS_NEXT;
2297 }
2298 
2299 #ifndef CONFIG_USER_ONLY
2300 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2301 {
2302     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2303     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2304     TCGv_i32 func_code = tcg_constant_i32(get_field(s, i2));
2305 
2306     gen_helper_diag(tcg_env, r1, r3, func_code);
2307     return DISAS_NEXT;
2308 }
2309 #endif
2310 
2311 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2312 {
2313     gen_helper_divs32(o->out, tcg_env, o->in1, o->in2);
2314     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2315     return DISAS_NEXT;
2316 }
2317 
2318 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2319 {
2320     gen_helper_divu32(o->out, tcg_env, o->in1, o->in2);
2321     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2322     return DISAS_NEXT;
2323 }
2324 
2325 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2326 {
2327     TCGv_i128 t = tcg_temp_new_i128();
2328 
2329     gen_helper_divs64(t, tcg_env, o->in1, o->in2);
2330     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2331     return DISAS_NEXT;
2332 }
2333 
2334 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2335 {
2336     TCGv_i128 t = tcg_temp_new_i128();
2337 
2338     gen_helper_divu64(t, tcg_env, o->out, o->out2, o->in2);
2339     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2340     return DISAS_NEXT;
2341 }
2342 
2343 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2344 {
2345     gen_helper_deb(o->out, tcg_env, o->in1, o->in2);
2346     return DISAS_NEXT;
2347 }
2348 
2349 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2350 {
2351     gen_helper_ddb(o->out, tcg_env, o->in1, o->in2);
2352     return DISAS_NEXT;
2353 }
2354 
2355 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2356 {
2357     gen_helper_dxb(o->out_128, tcg_env, o->in1_128, o->in2_128);
2358     return DISAS_NEXT;
2359 }
2360 
2361 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2362 {
2363     int r2 = get_field(s, r2);
2364     tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, aregs[r2]));
2365     return DISAS_NEXT;
2366 }
2367 
2368 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2369 {
2370     /* No cache information provided.  */
2371     tcg_gen_movi_i64(o->out, -1);
2372     return DISAS_NEXT;
2373 }
2374 
2375 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2376 {
2377     tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, fpc));
2378     return DISAS_NEXT;
2379 }
2380 
2381 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2382 {
2383     int r1 = get_field(s, r1);
2384     int r2 = get_field(s, r2);
2385     TCGv_i64 t = tcg_temp_new_i64();
2386     TCGv_i64 t_cc = tcg_temp_new_i64();
2387 
2388     /* Note the "subsequently" in the PoO, which implies a defined result
2389        if r1 == r2.  Thus we cannot defer these writes to an output hook.  */
2390     gen_op_calc_cc(s);
2391     tcg_gen_extu_i32_i64(t_cc, cc_op);
2392     tcg_gen_shri_i64(t, psw_mask, 32);
2393     tcg_gen_deposit_i64(t, t, t_cc, 12, 2);
2394     store_reg32_i64(r1, t);
2395     if (r2 != 0) {
2396         store_reg32_i64(r2, psw_mask);
2397     }
2398     return DISAS_NEXT;
2399 }
2400 
2401 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2402 {
2403     int r1 = get_field(s, r1);
2404     TCGv_i32 ilen;
2405     TCGv_i64 v1;
2406 
2407     /* Nested EXECUTE is not allowed.  */
2408     if (unlikely(s->ex_value)) {
2409         gen_program_exception(s, PGM_EXECUTE);
2410         return DISAS_NORETURN;
2411     }
2412 
2413     update_psw_addr(s);
2414     update_cc_op(s);
2415 
2416     if (r1 == 0) {
2417         v1 = tcg_constant_i64(0);
2418     } else {
2419         v1 = regs[r1];
2420     }
2421 
2422     ilen = tcg_constant_i32(s->ilen);
2423     gen_helper_ex(tcg_env, ilen, v1, o->in2);
2424 
2425     return DISAS_PC_CC_UPDATED;
2426 }
2427 
2428 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2429 {
2430     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2431 
2432     if (!m34) {
2433         return DISAS_NORETURN;
2434     }
2435     gen_helper_fieb(o->out, tcg_env, o->in2, m34);
2436     return DISAS_NEXT;
2437 }
2438 
2439 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2440 {
2441     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2442 
2443     if (!m34) {
2444         return DISAS_NORETURN;
2445     }
2446     gen_helper_fidb(o->out, tcg_env, o->in2, m34);
2447     return DISAS_NEXT;
2448 }
2449 
2450 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2451 {
2452     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2453 
2454     if (!m34) {
2455         return DISAS_NORETURN;
2456     }
2457     gen_helper_fixb(o->out_128, tcg_env, o->in2_128, m34);
2458     return DISAS_NEXT;
2459 }
2460 
2461 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2462 {
2463     /* We'll use the original input for cc computation, since we get to
2464        compare that against 0, which ought to be better than comparing
2465        the real output against 64.  It also lets cc_dst be a convenient
2466        temporary during our computation.  */
2467     gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2468 
2469     /* R1 = IN ? CLZ(IN) : 64.  */
2470     tcg_gen_clzi_i64(o->out, o->in2, 64);
2471 
2472     /* R1+1 = IN & ~(found bit).  Note that we may attempt to shift this
2473        value by 64, which is undefined.  But since the shift is 64 iff the
2474        input is zero, we still get the correct result after and'ing.  */
2475     tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2476     tcg_gen_shr_i64(o->out2, o->out2, o->out);
2477     tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2478     return DISAS_NEXT;
2479 }
2480 
2481 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2482 {
2483     int m3 = get_field(s, m3);
2484     int pos, len, base = s->insn->data;
2485     TCGv_i64 tmp = tcg_temp_new_i64();
2486     uint64_t ccm;
2487 
2488     switch (m3) {
2489     case 0xf:
2490         /* Effectively a 32-bit load.  */
2491         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
2492         len = 32;
2493         goto one_insert;
2494 
2495     case 0xc:
2496     case 0x6:
2497     case 0x3:
2498         /* Effectively a 16-bit load.  */
2499         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
2500         len = 16;
2501         goto one_insert;
2502 
2503     case 0x8:
2504     case 0x4:
2505     case 0x2:
2506     case 0x1:
2507         /* Effectively an 8-bit load.  */
2508         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2509         len = 8;
2510         goto one_insert;
2511 
2512     one_insert:
2513         pos = base + ctz32(m3) * 8;
2514         tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2515         ccm = ((1ull << len) - 1) << pos;
2516         break;
2517 
2518     case 0:
2519         /* Recognize access exceptions for the first byte.  */
2520         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2521         gen_op_movi_cc(s, 0);
2522         return DISAS_NEXT;
2523 
2524     default:
2525         /* This is going to be a sequence of loads and inserts.  */
2526         pos = base + 32 - 8;
2527         ccm = 0;
2528         while (m3) {
2529             if (m3 & 0x8) {
2530                 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2531                 tcg_gen_addi_i64(o->in2, o->in2, 1);
2532                 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2533                 ccm |= 0xffull << pos;
2534             }
2535             m3 = (m3 << 1) & 0xf;
2536             pos -= 8;
2537         }
2538         break;
2539     }
2540 
2541     tcg_gen_movi_i64(tmp, ccm);
2542     gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2543     return DISAS_NEXT;
2544 }
2545 
2546 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2547 {
2548     int shift = s->insn->data & 0xff;
2549     int size = s->insn->data >> 8;
2550     tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2551     return DISAS_NEXT;
2552 }
2553 
2554 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2555 {
2556     TCGv_i64 t1, t2;
2557 
2558     gen_op_calc_cc(s);
2559     t1 = tcg_temp_new_i64();
2560     tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2561     t2 = tcg_temp_new_i64();
2562     tcg_gen_extu_i32_i64(t2, cc_op);
2563     tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2564     tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2565     return DISAS_NEXT;
2566 }
2567 
2568 #ifndef CONFIG_USER_ONLY
2569 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2570 {
2571     TCGv_i32 m4;
2572 
2573     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2574         m4 = tcg_constant_i32(get_field(s, m4));
2575     } else {
2576         m4 = tcg_constant_i32(0);
2577     }
2578     gen_helper_idte(tcg_env, o->in1, o->in2, m4);
2579     return DISAS_NEXT;
2580 }
2581 
2582 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2583 {
2584     TCGv_i32 m4;
2585 
2586     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2587         m4 = tcg_constant_i32(get_field(s, m4));
2588     } else {
2589         m4 = tcg_constant_i32(0);
2590     }
2591     gen_helper_ipte(tcg_env, o->in1, o->in2, m4);
2592     return DISAS_NEXT;
2593 }
2594 
2595 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2596 {
2597     gen_helper_iske(o->out, tcg_env, o->in2);
2598     return DISAS_NEXT;
2599 }
2600 #endif
2601 
2602 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2603 {
2604     int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
2605     int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
2606     int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
2607     TCGv_i32 t_r1, t_r2, t_r3, type;
2608 
2609     switch (s->insn->data) {
2610     case S390_FEAT_TYPE_KMA:
2611         if (r3 == r1 || r3 == r2) {
2612             gen_program_exception(s, PGM_SPECIFICATION);
2613             return DISAS_NORETURN;
2614         }
2615         /* FALL THROUGH */
2616     case S390_FEAT_TYPE_KMCTR:
2617         if (r3 & 1 || !r3) {
2618             gen_program_exception(s, PGM_SPECIFICATION);
2619             return DISAS_NORETURN;
2620         }
2621         /* FALL THROUGH */
2622     case S390_FEAT_TYPE_PPNO:
2623     case S390_FEAT_TYPE_KMF:
2624     case S390_FEAT_TYPE_KMC:
2625     case S390_FEAT_TYPE_KMO:
2626     case S390_FEAT_TYPE_KM:
2627         if (r1 & 1 || !r1) {
2628             gen_program_exception(s, PGM_SPECIFICATION);
2629             return DISAS_NORETURN;
2630         }
2631         /* FALL THROUGH */
2632     case S390_FEAT_TYPE_KMAC:
2633     case S390_FEAT_TYPE_KIMD:
2634     case S390_FEAT_TYPE_KLMD:
2635         if (r2 & 1 || !r2) {
2636             gen_program_exception(s, PGM_SPECIFICATION);
2637             return DISAS_NORETURN;
2638         }
2639         /* FALL THROUGH */
2640     case S390_FEAT_TYPE_PCKMO:
2641     case S390_FEAT_TYPE_PCC:
2642         break;
2643     default:
2644         g_assert_not_reached();
2645     };
2646 
2647     t_r1 = tcg_constant_i32(r1);
2648     t_r2 = tcg_constant_i32(r2);
2649     t_r3 = tcg_constant_i32(r3);
2650     type = tcg_constant_i32(s->insn->data);
2651     gen_helper_msa(cc_op, tcg_env, t_r1, t_r2, t_r3, type);
2652     set_cc_static(s);
2653     return DISAS_NEXT;
2654 }
2655 
2656 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2657 {
2658     gen_helper_keb(cc_op, tcg_env, o->in1, o->in2);
2659     set_cc_static(s);
2660     return DISAS_NEXT;
2661 }
2662 
2663 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2664 {
2665     gen_helper_kdb(cc_op, tcg_env, o->in1, o->in2);
2666     set_cc_static(s);
2667     return DISAS_NEXT;
2668 }
2669 
2670 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2671 {
2672     gen_helper_kxb(cc_op, tcg_env, o->in1_128, o->in2_128);
2673     set_cc_static(s);
2674     return DISAS_NEXT;
2675 }
2676 
2677 static DisasJumpType help_laa(DisasContext *s, DisasOps *o, bool addu64)
2678 {
2679     /* The real output is indeed the original value in memory;
2680        recompute the addition for the computation of CC.  */
2681     tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2682                                  s->insn->data | MO_ALIGN);
2683     /* However, we need to recompute the addition for setting CC.  */
2684     if (addu64) {
2685         tcg_gen_movi_i64(cc_src, 0);
2686         tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
2687     } else {
2688         tcg_gen_add_i64(o->out, o->in1, o->in2);
2689     }
2690     return DISAS_NEXT;
2691 }
2692 
2693 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2694 {
2695     return help_laa(s, o, false);
2696 }
2697 
2698 static DisasJumpType op_laa_addu64(DisasContext *s, DisasOps *o)
2699 {
2700     return help_laa(s, o, true);
2701 }
2702 
2703 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2704 {
2705     /* The real output is indeed the original value in memory;
2706        recompute the addition for the computation of CC.  */
2707     tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2708                                  s->insn->data | MO_ALIGN);
2709     /* However, we need to recompute the operation for setting CC.  */
2710     tcg_gen_and_i64(o->out, o->in1, o->in2);
2711     return DISAS_NEXT;
2712 }
2713 
2714 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2715 {
2716     /* The real output is indeed the original value in memory;
2717        recompute the addition for the computation of CC.  */
2718     tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2719                                 s->insn->data | MO_ALIGN);
2720     /* However, we need to recompute the operation for setting CC.  */
2721     tcg_gen_or_i64(o->out, o->in1, o->in2);
2722     return DISAS_NEXT;
2723 }
2724 
2725 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2726 {
2727     /* The real output is indeed the original value in memory;
2728        recompute the addition for the computation of CC.  */
2729     tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2730                                  s->insn->data | MO_ALIGN);
2731     /* However, we need to recompute the operation for setting CC.  */
2732     tcg_gen_xor_i64(o->out, o->in1, o->in2);
2733     return DISAS_NEXT;
2734 }
2735 
2736 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2737 {
2738     gen_helper_ldeb(o->out, tcg_env, o->in2);
2739     return DISAS_NEXT;
2740 }
2741 
2742 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2743 {
2744     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2745 
2746     if (!m34) {
2747         return DISAS_NORETURN;
2748     }
2749     gen_helper_ledb(o->out, tcg_env, o->in2, m34);
2750     return DISAS_NEXT;
2751 }
2752 
2753 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2754 {
2755     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2756 
2757     if (!m34) {
2758         return DISAS_NORETURN;
2759     }
2760     gen_helper_ldxb(o->out, tcg_env, o->in2_128, m34);
2761     return DISAS_NEXT;
2762 }
2763 
2764 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2765 {
2766     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2767 
2768     if (!m34) {
2769         return DISAS_NORETURN;
2770     }
2771     gen_helper_lexb(o->out, tcg_env, o->in2_128, m34);
2772     return DISAS_NEXT;
2773 }
2774 
2775 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2776 {
2777     gen_helper_lxdb(o->out_128, tcg_env, o->in2);
2778     return DISAS_NEXT;
2779 }
2780 
2781 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2782 {
2783     gen_helper_lxeb(o->out_128, tcg_env, o->in2);
2784     return DISAS_NEXT;
2785 }
2786 
2787 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2788 {
2789     tcg_gen_shli_i64(o->out, o->in2, 32);
2790     return DISAS_NEXT;
2791 }
2792 
2793 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2794 {
2795     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2796     return DISAS_NEXT;
2797 }
2798 
2799 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2800 {
2801     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_SB);
2802     return DISAS_NEXT;
2803 }
2804 
2805 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2806 {
2807     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_UB);
2808     return DISAS_NEXT;
2809 }
2810 
2811 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2812 {
2813     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TESW);
2814     return DISAS_NEXT;
2815 }
2816 
2817 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2818 {
2819     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUW);
2820     return DISAS_NEXT;
2821 }
2822 
2823 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2824 {
2825     tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2826                        MO_TESL | s->insn->data);
2827     return DISAS_NEXT;
2828 }
2829 
2830 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2831 {
2832     tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2833                        MO_TEUL | s->insn->data);
2834     return DISAS_NEXT;
2835 }
2836 
2837 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2838 {
2839     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s),
2840                         MO_TEUQ | s->insn->data);
2841     return DISAS_NEXT;
2842 }
2843 
2844 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2845 {
2846     TCGLabel *lab = gen_new_label();
2847     store_reg32_i64(get_field(s, r1), o->in2);
2848     /* The value is stored even in case of trap. */
2849     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2850     gen_trap(s);
2851     gen_set_label(lab);
2852     return DISAS_NEXT;
2853 }
2854 
2855 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2856 {
2857     TCGLabel *lab = gen_new_label();
2858     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUQ);
2859     /* The value is stored even in case of trap. */
2860     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2861     gen_trap(s);
2862     gen_set_label(lab);
2863     return DISAS_NEXT;
2864 }
2865 
2866 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2867 {
2868     TCGLabel *lab = gen_new_label();
2869     store_reg32h_i64(get_field(s, r1), o->in2);
2870     /* The value is stored even in case of trap. */
2871     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2872     gen_trap(s);
2873     gen_set_label(lab);
2874     return DISAS_NEXT;
2875 }
2876 
2877 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2878 {
2879     TCGLabel *lab = gen_new_label();
2880 
2881     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
2882     /* The value is stored even in case of trap. */
2883     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2884     gen_trap(s);
2885     gen_set_label(lab);
2886     return DISAS_NEXT;
2887 }
2888 
2889 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2890 {
2891     TCGLabel *lab = gen_new_label();
2892     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2893     /* The value is stored even in case of trap. */
2894     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2895     gen_trap(s);
2896     gen_set_label(lab);
2897     return DISAS_NEXT;
2898 }
2899 
2900 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2901 {
2902     DisasCompare c;
2903 
2904     if (have_field(s, m3)) {
2905         /* LOAD * ON CONDITION */
2906         disas_jcc(s, &c, get_field(s, m3));
2907     } else {
2908         /* SELECT */
2909         disas_jcc(s, &c, get_field(s, m4));
2910     }
2911 
2912     if (c.is_64) {
2913         tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2914                             o->in2, o->in1);
2915     } else {
2916         TCGv_i32 t32 = tcg_temp_new_i32();
2917         TCGv_i64 t, z;
2918 
2919         tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2920 
2921         t = tcg_temp_new_i64();
2922         tcg_gen_extu_i32_i64(t, t32);
2923 
2924         z = tcg_constant_i64(0);
2925         tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2926     }
2927 
2928     return DISAS_NEXT;
2929 }
2930 
2931 #ifndef CONFIG_USER_ONLY
2932 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
2933 {
2934     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2935     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2936 
2937     gen_helper_lctl(tcg_env, r1, o->in2, r3);
2938     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2939     s->exit_to_mainloop = true;
2940     return DISAS_TOO_MANY;
2941 }
2942 
2943 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
2944 {
2945     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2946     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2947 
2948     gen_helper_lctlg(tcg_env, r1, o->in2, r3);
2949     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2950     s->exit_to_mainloop = true;
2951     return DISAS_TOO_MANY;
2952 }
2953 
2954 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
2955 {
2956     gen_helper_lra(o->out, tcg_env, o->out, o->in2);
2957     set_cc_static(s);
2958     return DISAS_NEXT;
2959 }
2960 
2961 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
2962 {
2963     tcg_gen_st_i64(o->in2, tcg_env, offsetof(CPUS390XState, pp));
2964     return DISAS_NEXT;
2965 }
2966 
2967 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
2968 {
2969     TCGv_i64 mask, addr;
2970 
2971     per_breaking_event(s);
2972 
2973     /*
2974      * Convert the short PSW into the normal PSW, similar to what
2975      * s390_cpu_load_normal() does.
2976      */
2977     mask = tcg_temp_new_i64();
2978     addr = tcg_temp_new_i64();
2979     tcg_gen_qemu_ld_i64(mask, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN_8);
2980     tcg_gen_andi_i64(addr, mask, PSW_MASK_SHORT_ADDR);
2981     tcg_gen_andi_i64(mask, mask, PSW_MASK_SHORT_CTRL);
2982     tcg_gen_xori_i64(mask, mask, PSW_MASK_SHORTPSW);
2983     gen_helper_load_psw(tcg_env, mask, addr);
2984     return DISAS_NORETURN;
2985 }
2986 
2987 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
2988 {
2989     TCGv_i64 t1, t2;
2990 
2991     per_breaking_event(s);
2992 
2993     t1 = tcg_temp_new_i64();
2994     t2 = tcg_temp_new_i64();
2995     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
2996                         MO_TEUQ | MO_ALIGN_8);
2997     tcg_gen_addi_i64(o->in2, o->in2, 8);
2998     tcg_gen_qemu_ld_i64(t2, o->in2, get_mem_index(s), MO_TEUQ);
2999     gen_helper_load_psw(tcg_env, t1, t2);
3000     return DISAS_NORETURN;
3001 }
3002 #endif
3003 
3004 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
3005 {
3006     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
3007     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
3008 
3009     gen_helper_lam(tcg_env, r1, o->in2, r3);
3010     return DISAS_NEXT;
3011 }
3012 
3013 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
3014 {
3015     int r1 = get_field(s, r1);
3016     int r3 = get_field(s, r3);
3017     TCGv_i64 t1, t2;
3018 
3019     /* Only one register to read. */
3020     t1 = tcg_temp_new_i64();
3021     if (unlikely(r1 == r3)) {
3022         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3023         store_reg32_i64(r1, t1);
3024         return DISAS_NEXT;
3025     }
3026 
3027     /* First load the values of the first and last registers to trigger
3028        possible page faults. */
3029     t2 = tcg_temp_new_i64();
3030     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3031     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3032     tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
3033     store_reg32_i64(r1, t1);
3034     store_reg32_i64(r3, t2);
3035 
3036     /* Only two registers to read. */
3037     if (((r1 + 1) & 15) == r3) {
3038         return DISAS_NEXT;
3039     }
3040 
3041     /* Then load the remaining registers. Page fault can't occur. */
3042     r3 = (r3 - 1) & 15;
3043     tcg_gen_movi_i64(t2, 4);
3044     while (r1 != r3) {
3045         r1 = (r1 + 1) & 15;
3046         tcg_gen_add_i64(o->in2, o->in2, t2);
3047         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3048         store_reg32_i64(r1, t1);
3049     }
3050     return DISAS_NEXT;
3051 }
3052 
3053 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3054 {
3055     int r1 = get_field(s, r1);
3056     int r3 = get_field(s, r3);
3057     TCGv_i64 t1, t2;
3058 
3059     /* Only one register to read. */
3060     t1 = tcg_temp_new_i64();
3061     if (unlikely(r1 == r3)) {
3062         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3063         store_reg32h_i64(r1, t1);
3064         return DISAS_NEXT;
3065     }
3066 
3067     /* First load the values of the first and last registers to trigger
3068        possible page faults. */
3069     t2 = tcg_temp_new_i64();
3070     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3071     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3072     tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
3073     store_reg32h_i64(r1, t1);
3074     store_reg32h_i64(r3, t2);
3075 
3076     /* Only two registers to read. */
3077     if (((r1 + 1) & 15) == r3) {
3078         return DISAS_NEXT;
3079     }
3080 
3081     /* Then load the remaining registers. Page fault can't occur. */
3082     r3 = (r3 - 1) & 15;
3083     tcg_gen_movi_i64(t2, 4);
3084     while (r1 != r3) {
3085         r1 = (r1 + 1) & 15;
3086         tcg_gen_add_i64(o->in2, o->in2, t2);
3087         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3088         store_reg32h_i64(r1, t1);
3089     }
3090     return DISAS_NEXT;
3091 }
3092 
3093 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3094 {
3095     int r1 = get_field(s, r1);
3096     int r3 = get_field(s, r3);
3097     TCGv_i64 t1, t2;
3098 
3099     /* Only one register to read. */
3100     if (unlikely(r1 == r3)) {
3101         tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3102         return DISAS_NEXT;
3103     }
3104 
3105     /* First load the values of the first and last registers to trigger
3106        possible page faults. */
3107     t1 = tcg_temp_new_i64();
3108     t2 = tcg_temp_new_i64();
3109     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
3110     tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3111     tcg_gen_qemu_ld_i64(regs[r3], t2, get_mem_index(s), MO_TEUQ);
3112     tcg_gen_mov_i64(regs[r1], t1);
3113 
3114     /* Only two registers to read. */
3115     if (((r1 + 1) & 15) == r3) {
3116         return DISAS_NEXT;
3117     }
3118 
3119     /* Then load the remaining registers. Page fault can't occur. */
3120     r3 = (r3 - 1) & 15;
3121     tcg_gen_movi_i64(t1, 8);
3122     while (r1 != r3) {
3123         r1 = (r1 + 1) & 15;
3124         tcg_gen_add_i64(o->in2, o->in2, t1);
3125         tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3126     }
3127     return DISAS_NEXT;
3128 }
3129 
3130 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3131 {
3132     TCGv_i64 a1, a2;
3133     MemOp mop = s->insn->data;
3134 
3135     /* In a parallel context, stop the world and single step.  */
3136     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3137         update_psw_addr(s);
3138         update_cc_op(s);
3139         gen_exception(EXCP_ATOMIC);
3140         return DISAS_NORETURN;
3141     }
3142 
3143     /* In a serial context, perform the two loads ... */
3144     a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
3145     a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3146     tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3147     tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3148 
3149     /* ... and indicate that we performed them while interlocked.  */
3150     gen_op_movi_cc(s, 0);
3151     return DISAS_NEXT;
3152 }
3153 
3154 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3155 {
3156     o->out_128 = tcg_temp_new_i128();
3157     tcg_gen_qemu_ld_i128(o->out_128, o->in2, get_mem_index(s),
3158                          MO_TE | MO_128 | MO_ALIGN);
3159     return DISAS_NEXT;
3160 }
3161 
3162 #ifndef CONFIG_USER_ONLY
3163 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3164 {
3165     tcg_gen_qemu_ld_tl(o->out, o->in2, MMU_REAL_IDX, s->insn->data);
3166     return DISAS_NEXT;
3167 }
3168 #endif
3169 
3170 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3171 {
3172     tcg_gen_andi_i64(o->out, o->in2, -256);
3173     return DISAS_NEXT;
3174 }
3175 
3176 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3177 {
3178     const int64_t block_size = (1ull << (get_field(s, m3) + 6));
3179 
3180     if (get_field(s, m3) > 6) {
3181         gen_program_exception(s, PGM_SPECIFICATION);
3182         return DISAS_NORETURN;
3183     }
3184 
3185     tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3186     tcg_gen_neg_i64(o->addr1, o->addr1);
3187     tcg_gen_movi_i64(o->out, 16);
3188     tcg_gen_umin_i64(o->out, o->out, o->addr1);
3189     gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3190     return DISAS_NEXT;
3191 }
3192 
3193 static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
3194 {
3195     const uint8_t monitor_class = get_field(s, i2);
3196 
3197     if (monitor_class & 0xf0) {
3198         gen_program_exception(s, PGM_SPECIFICATION);
3199         return DISAS_NORETURN;
3200     }
3201 
3202 #if !defined(CONFIG_USER_ONLY)
3203     gen_helper_monitor_call(tcg_env, o->addr1,
3204                             tcg_constant_i32(monitor_class));
3205 #endif
3206     /* Defaults to a NOP. */
3207     return DISAS_NEXT;
3208 }
3209 
3210 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3211 {
3212     o->out = o->in2;
3213     o->in2 = NULL;
3214     return DISAS_NEXT;
3215 }
3216 
3217 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3218 {
3219     int b2 = get_field(s, b2);
3220     TCGv ar1 = tcg_temp_new_i64();
3221     int r1 = get_field(s, r1);
3222 
3223     o->out = o->in2;
3224     o->in2 = NULL;
3225 
3226     switch (s->base.tb->flags & FLAG_MASK_ASC) {
3227     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3228         tcg_gen_movi_i64(ar1, 0);
3229         break;
3230     case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3231         tcg_gen_movi_i64(ar1, 1);
3232         break;
3233     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3234         if (b2) {
3235             tcg_gen_ld32u_i64(ar1, tcg_env, offsetof(CPUS390XState, aregs[b2]));
3236         } else {
3237             tcg_gen_movi_i64(ar1, 0);
3238         }
3239         break;
3240     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3241         tcg_gen_movi_i64(ar1, 2);
3242         break;
3243     }
3244 
3245     tcg_gen_st32_i64(ar1, tcg_env, offsetof(CPUS390XState, aregs[r1]));
3246     return DISAS_NEXT;
3247 }
3248 
3249 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3250 {
3251     o->out = o->in1;
3252     o->out2 = o->in2;
3253     o->in1 = NULL;
3254     o->in2 = NULL;
3255     return DISAS_NEXT;
3256 }
3257 
3258 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3259 {
3260     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3261 
3262     gen_helper_mvc(tcg_env, l, o->addr1, o->in2);
3263     return DISAS_NEXT;
3264 }
3265 
3266 static DisasJumpType op_mvcrl(DisasContext *s, DisasOps *o)
3267 {
3268     gen_helper_mvcrl(tcg_env, regs[0], o->addr1, o->in2);
3269     return DISAS_NEXT;
3270 }
3271 
3272 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3273 {
3274     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3275 
3276     gen_helper_mvcin(tcg_env, l, o->addr1, o->in2);
3277     return DISAS_NEXT;
3278 }
3279 
3280 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3281 {
3282     int r1 = get_field(s, r1);
3283     int r2 = get_field(s, r2);
3284     TCGv_i32 t1, t2;
3285 
3286     /* r1 and r2 must be even.  */
3287     if (r1 & 1 || r2 & 1) {
3288         gen_program_exception(s, PGM_SPECIFICATION);
3289         return DISAS_NORETURN;
3290     }
3291 
3292     t1 = tcg_constant_i32(r1);
3293     t2 = tcg_constant_i32(r2);
3294     gen_helper_mvcl(cc_op, tcg_env, t1, t2);
3295     set_cc_static(s);
3296     return DISAS_NEXT;
3297 }
3298 
3299 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3300 {
3301     int r1 = get_field(s, r1);
3302     int r3 = get_field(s, r3);
3303     TCGv_i32 t1, t3;
3304 
3305     /* r1 and r3 must be even.  */
3306     if (r1 & 1 || r3 & 1) {
3307         gen_program_exception(s, PGM_SPECIFICATION);
3308         return DISAS_NORETURN;
3309     }
3310 
3311     t1 = tcg_constant_i32(r1);
3312     t3 = tcg_constant_i32(r3);
3313     gen_helper_mvcle(cc_op, tcg_env, t1, o->in2, t3);
3314     set_cc_static(s);
3315     return DISAS_NEXT;
3316 }
3317 
3318 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3319 {
3320     int r1 = get_field(s, r1);
3321     int r3 = get_field(s, r3);
3322     TCGv_i32 t1, t3;
3323 
3324     /* r1 and r3 must be even.  */
3325     if (r1 & 1 || r3 & 1) {
3326         gen_program_exception(s, PGM_SPECIFICATION);
3327         return DISAS_NORETURN;
3328     }
3329 
3330     t1 = tcg_constant_i32(r1);
3331     t3 = tcg_constant_i32(r3);
3332     gen_helper_mvclu(cc_op, tcg_env, t1, o->in2, t3);
3333     set_cc_static(s);
3334     return DISAS_NEXT;
3335 }
3336 
3337 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3338 {
3339     int r3 = get_field(s, r3);
3340     gen_helper_mvcos(cc_op, tcg_env, o->addr1, o->in2, regs[r3]);
3341     set_cc_static(s);
3342     return DISAS_NEXT;
3343 }
3344 
3345 #ifndef CONFIG_USER_ONLY
3346 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3347 {
3348     int r1 = get_field(s, l1);
3349     int r3 = get_field(s, r3);
3350     gen_helper_mvcp(cc_op, tcg_env, regs[r1], o->addr1, o->in2, regs[r3]);
3351     set_cc_static(s);
3352     return DISAS_NEXT;
3353 }
3354 
3355 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3356 {
3357     int r1 = get_field(s, l1);
3358     int r3 = get_field(s, r3);
3359     gen_helper_mvcs(cc_op, tcg_env, regs[r1], o->addr1, o->in2, regs[r3]);
3360     set_cc_static(s);
3361     return DISAS_NEXT;
3362 }
3363 #endif
3364 
3365 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3366 {
3367     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3368 
3369     gen_helper_mvn(tcg_env, l, o->addr1, o->in2);
3370     return DISAS_NEXT;
3371 }
3372 
3373 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3374 {
3375     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3376 
3377     gen_helper_mvo(tcg_env, l, o->addr1, o->in2);
3378     return DISAS_NEXT;
3379 }
3380 
3381 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3382 {
3383     TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3384     TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3385 
3386     gen_helper_mvpg(cc_op, tcg_env, regs[0], t1, t2);
3387     set_cc_static(s);
3388     return DISAS_NEXT;
3389 }
3390 
3391 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3392 {
3393     TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3394     TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3395 
3396     gen_helper_mvst(cc_op, tcg_env, t1, t2);
3397     set_cc_static(s);
3398     return DISAS_NEXT;
3399 }
3400 
3401 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3402 {
3403     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3404 
3405     gen_helper_mvz(tcg_env, l, o->addr1, o->in2);
3406     return DISAS_NEXT;
3407 }
3408 
3409 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3410 {
3411     tcg_gen_mul_i64(o->out, o->in1, o->in2);
3412     return DISAS_NEXT;
3413 }
3414 
3415 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3416 {
3417     tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3418     return DISAS_NEXT;
3419 }
3420 
3421 static DisasJumpType op_muls128(DisasContext *s, DisasOps *o)
3422 {
3423     tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2);
3424     return DISAS_NEXT;
3425 }
3426 
3427 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3428 {
3429     gen_helper_meeb(o->out, tcg_env, o->in1, o->in2);
3430     return DISAS_NEXT;
3431 }
3432 
3433 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3434 {
3435     gen_helper_mdeb(o->out, tcg_env, o->in1, o->in2);
3436     return DISAS_NEXT;
3437 }
3438 
3439 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3440 {
3441     gen_helper_mdb(o->out, tcg_env, o->in1, o->in2);
3442     return DISAS_NEXT;
3443 }
3444 
3445 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3446 {
3447     gen_helper_mxb(o->out_128, tcg_env, o->in1_128, o->in2_128);
3448     return DISAS_NEXT;
3449 }
3450 
3451 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3452 {
3453     gen_helper_mxdb(o->out_128, tcg_env, o->in1, o->in2);
3454     return DISAS_NEXT;
3455 }
3456 
3457 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3458 {
3459     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3460     gen_helper_maeb(o->out, tcg_env, o->in1, o->in2, r3);
3461     return DISAS_NEXT;
3462 }
3463 
3464 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3465 {
3466     TCGv_i64 r3 = load_freg(get_field(s, r3));
3467     gen_helper_madb(o->out, tcg_env, o->in1, o->in2, r3);
3468     return DISAS_NEXT;
3469 }
3470 
3471 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3472 {
3473     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3474     gen_helper_mseb(o->out, tcg_env, o->in1, o->in2, r3);
3475     return DISAS_NEXT;
3476 }
3477 
3478 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3479 {
3480     TCGv_i64 r3 = load_freg(get_field(s, r3));
3481     gen_helper_msdb(o->out, tcg_env, o->in1, o->in2, r3);
3482     return DISAS_NEXT;
3483 }
3484 
3485 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3486 {
3487     TCGv_i64 z = tcg_constant_i64(0);
3488     TCGv_i64 n = tcg_temp_new_i64();
3489 
3490     tcg_gen_neg_i64(n, o->in2);
3491     tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3492     return DISAS_NEXT;
3493 }
3494 
3495 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3496 {
3497     tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3498     return DISAS_NEXT;
3499 }
3500 
3501 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3502 {
3503     tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3504     return DISAS_NEXT;
3505 }
3506 
3507 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3508 {
3509     tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3510     tcg_gen_mov_i64(o->out2, o->in2);
3511     return DISAS_NEXT;
3512 }
3513 
3514 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3515 {
3516     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3517 
3518     gen_helper_nc(cc_op, tcg_env, l, o->addr1, o->in2);
3519     set_cc_static(s);
3520     return DISAS_NEXT;
3521 }
3522 
3523 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3524 {
3525     tcg_gen_neg_i64(o->out, o->in2);
3526     return DISAS_NEXT;
3527 }
3528 
3529 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3530 {
3531     tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3532     return DISAS_NEXT;
3533 }
3534 
3535 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3536 {
3537     tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3538     return DISAS_NEXT;
3539 }
3540 
3541 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3542 {
3543     tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3544     tcg_gen_mov_i64(o->out2, o->in2);
3545     return DISAS_NEXT;
3546 }
3547 
3548 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3549 {
3550     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3551 
3552     gen_helper_oc(cc_op, tcg_env, l, o->addr1, o->in2);
3553     set_cc_static(s);
3554     return DISAS_NEXT;
3555 }
3556 
3557 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3558 {
3559     tcg_gen_or_i64(o->out, o->in1, o->in2);
3560     return DISAS_NEXT;
3561 }
3562 
3563 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3564 {
3565     int shift = s->insn->data & 0xff;
3566     int size = s->insn->data >> 8;
3567     uint64_t mask = ((1ull << size) - 1) << shift;
3568     TCGv_i64 t = tcg_temp_new_i64();
3569 
3570     tcg_gen_shli_i64(t, o->in2, shift);
3571     tcg_gen_or_i64(o->out, o->in1, t);
3572 
3573     /* Produce the CC from only the bits manipulated.  */
3574     tcg_gen_andi_i64(cc_dst, o->out, mask);
3575     set_cc_nz_u64(s, cc_dst);
3576     return DISAS_NEXT;
3577 }
3578 
3579 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3580 {
3581     o->in1 = tcg_temp_new_i64();
3582 
3583     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3584         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3585     } else {
3586         /* Perform the atomic operation in memory. */
3587         tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3588                                     s->insn->data);
3589     }
3590 
3591     /* Recompute also for atomic case: needed for setting CC. */
3592     tcg_gen_or_i64(o->out, o->in1, o->in2);
3593 
3594     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3595         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3596     }
3597     return DISAS_NEXT;
3598 }
3599 
3600 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3601 {
3602     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3603 
3604     gen_helper_pack(tcg_env, l, o->addr1, o->in2);
3605     return DISAS_NEXT;
3606 }
3607 
3608 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3609 {
3610     int l2 = get_field(s, l2) + 1;
3611     TCGv_i32 l;
3612 
3613     /* The length must not exceed 32 bytes.  */
3614     if (l2 > 32) {
3615         gen_program_exception(s, PGM_SPECIFICATION);
3616         return DISAS_NORETURN;
3617     }
3618     l = tcg_constant_i32(l2);
3619     gen_helper_pka(tcg_env, o->addr1, o->in2, l);
3620     return DISAS_NEXT;
3621 }
3622 
3623 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3624 {
3625     int l2 = get_field(s, l2) + 1;
3626     TCGv_i32 l;
3627 
3628     /* The length must be even and should not exceed 64 bytes.  */
3629     if ((l2 & 1) || (l2 > 64)) {
3630         gen_program_exception(s, PGM_SPECIFICATION);
3631         return DISAS_NORETURN;
3632     }
3633     l = tcg_constant_i32(l2);
3634     gen_helper_pku(tcg_env, o->addr1, o->in2, l);
3635     return DISAS_NEXT;
3636 }
3637 
3638 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3639 {
3640     const uint8_t m3 = get_field(s, m3);
3641 
3642     if ((m3 & 8) && s390_has_feat(S390_FEAT_MISC_INSTRUCTION_EXT3)) {
3643         tcg_gen_ctpop_i64(o->out, o->in2);
3644     } else {
3645         gen_helper_popcnt(o->out, o->in2);
3646     }
3647     return DISAS_NEXT;
3648 }
3649 
3650 #ifndef CONFIG_USER_ONLY
3651 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3652 {
3653     gen_helper_ptlb(tcg_env);
3654     return DISAS_NEXT;
3655 }
3656 #endif
3657 
3658 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3659 {
3660     int i3 = get_field(s, i3);
3661     int i4 = get_field(s, i4);
3662     int i5 = get_field(s, i5);
3663     int do_zero = i4 & 0x80;
3664     uint64_t mask, imask, pmask;
3665     int pos, len, rot;
3666 
3667     /* Adjust the arguments for the specific insn.  */
3668     switch (s->fields.op2) {
3669     case 0x55: /* risbg */
3670     case 0x59: /* risbgn */
3671         i3 &= 63;
3672         i4 &= 63;
3673         pmask = ~0;
3674         break;
3675     case 0x5d: /* risbhg */
3676         i3 &= 31;
3677         i4 &= 31;
3678         pmask = 0xffffffff00000000ull;
3679         break;
3680     case 0x51: /* risblg */
3681         i3 = (i3 & 31) + 32;
3682         i4 = (i4 & 31) + 32;
3683         pmask = 0x00000000ffffffffull;
3684         break;
3685     default:
3686         g_assert_not_reached();
3687     }
3688 
3689     /* MASK is the set of bits to be inserted from R2. */
3690     if (i3 <= i4) {
3691         /* [0...i3---i4...63] */
3692         mask = (-1ull >> i3) & (-1ull << (63 - i4));
3693     } else {
3694         /* [0---i4...i3---63] */
3695         mask = (-1ull >> i3) | (-1ull << (63 - i4));
3696     }
3697     /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
3698     mask &= pmask;
3699 
3700     /* IMASK is the set of bits to be kept from R1.  In the case of the high/low
3701        insns, we need to keep the other half of the register.  */
3702     imask = ~mask | ~pmask;
3703     if (do_zero) {
3704         imask = ~pmask;
3705     }
3706 
3707     len = i4 - i3 + 1;
3708     pos = 63 - i4;
3709     rot = i5 & 63;
3710 
3711     /* In some cases we can implement this with extract.  */
3712     if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3713         tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3714         return DISAS_NEXT;
3715     }
3716 
3717     /* In some cases we can implement this with deposit.  */
3718     if (len > 0 && (imask == 0 || ~mask == imask)) {
3719         /* Note that we rotate the bits to be inserted to the lsb, not to
3720            the position as described in the PoO.  */
3721         rot = (rot - pos) & 63;
3722     } else {
3723         pos = -1;
3724     }
3725 
3726     /* Rotate the input as necessary.  */
3727     tcg_gen_rotli_i64(o->in2, o->in2, rot);
3728 
3729     /* Insert the selected bits into the output.  */
3730     if (pos >= 0) {
3731         if (imask == 0) {
3732             tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3733         } else {
3734             tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3735         }
3736     } else if (imask == 0) {
3737         tcg_gen_andi_i64(o->out, o->in2, mask);
3738     } else {
3739         tcg_gen_andi_i64(o->in2, o->in2, mask);
3740         tcg_gen_andi_i64(o->out, o->out, imask);
3741         tcg_gen_or_i64(o->out, o->out, o->in2);
3742     }
3743     return DISAS_NEXT;
3744 }
3745 
3746 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3747 {
3748     int i3 = get_field(s, i3);
3749     int i4 = get_field(s, i4);
3750     int i5 = get_field(s, i5);
3751     TCGv_i64 orig_out;
3752     uint64_t mask;
3753 
3754     /* If this is a test-only form, arrange to discard the result.  */
3755     if (i3 & 0x80) {
3756         tcg_debug_assert(o->out != NULL);
3757         orig_out = o->out;
3758         o->out = tcg_temp_new_i64();
3759         tcg_gen_mov_i64(o->out, orig_out);
3760     }
3761 
3762     i3 &= 63;
3763     i4 &= 63;
3764     i5 &= 63;
3765 
3766     /* MASK is the set of bits to be operated on from R2.
3767        Take care for I3/I4 wraparound.  */
3768     mask = ~0ull >> i3;
3769     if (i3 <= i4) {
3770         mask ^= ~0ull >> i4 >> 1;
3771     } else {
3772         mask |= ~(~0ull >> i4 >> 1);
3773     }
3774 
3775     /* Rotate the input as necessary.  */
3776     tcg_gen_rotli_i64(o->in2, o->in2, i5);
3777 
3778     /* Operate.  */
3779     switch (s->fields.op2) {
3780     case 0x54: /* AND */
3781         tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3782         tcg_gen_and_i64(o->out, o->out, o->in2);
3783         break;
3784     case 0x56: /* OR */
3785         tcg_gen_andi_i64(o->in2, o->in2, mask);
3786         tcg_gen_or_i64(o->out, o->out, o->in2);
3787         break;
3788     case 0x57: /* XOR */
3789         tcg_gen_andi_i64(o->in2, o->in2, mask);
3790         tcg_gen_xor_i64(o->out, o->out, o->in2);
3791         break;
3792     default:
3793         abort();
3794     }
3795 
3796     /* Set the CC.  */
3797     tcg_gen_andi_i64(cc_dst, o->out, mask);
3798     set_cc_nz_u64(s, cc_dst);
3799     return DISAS_NEXT;
3800 }
3801 
3802 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3803 {
3804     tcg_gen_bswap16_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3805     return DISAS_NEXT;
3806 }
3807 
3808 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3809 {
3810     tcg_gen_bswap32_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3811     return DISAS_NEXT;
3812 }
3813 
3814 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3815 {
3816     tcg_gen_bswap64_i64(o->out, o->in2);
3817     return DISAS_NEXT;
3818 }
3819 
3820 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3821 {
3822     TCGv_i32 t1 = tcg_temp_new_i32();
3823     TCGv_i32 t2 = tcg_temp_new_i32();
3824     TCGv_i32 to = tcg_temp_new_i32();
3825     tcg_gen_extrl_i64_i32(t1, o->in1);
3826     tcg_gen_extrl_i64_i32(t2, o->in2);
3827     tcg_gen_rotl_i32(to, t1, t2);
3828     tcg_gen_extu_i32_i64(o->out, to);
3829     return DISAS_NEXT;
3830 }
3831 
3832 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3833 {
3834     tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3835     return DISAS_NEXT;
3836 }
3837 
3838 #ifndef CONFIG_USER_ONLY
3839 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3840 {
3841     gen_helper_rrbe(cc_op, tcg_env, o->in2);
3842     set_cc_static(s);
3843     return DISAS_NEXT;
3844 }
3845 
3846 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3847 {
3848     gen_helper_sacf(tcg_env, o->in2);
3849     /* Addressing mode has changed, so end the block.  */
3850     return DISAS_TOO_MANY;
3851 }
3852 #endif
3853 
3854 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3855 {
3856     int sam = s->insn->data;
3857     TCGv_i64 tsam;
3858     uint64_t mask;
3859 
3860     switch (sam) {
3861     case 0:
3862         mask = 0xffffff;
3863         break;
3864     case 1:
3865         mask = 0x7fffffff;
3866         break;
3867     default:
3868         mask = -1;
3869         break;
3870     }
3871 
3872     /* Bizarre but true, we check the address of the current insn for the
3873        specification exception, not the next to be executed.  Thus the PoO
3874        documents that Bad Things Happen two bytes before the end.  */
3875     if (s->base.pc_next & ~mask) {
3876         gen_program_exception(s, PGM_SPECIFICATION);
3877         return DISAS_NORETURN;
3878     }
3879     s->pc_tmp &= mask;
3880 
3881     tsam = tcg_constant_i64(sam);
3882     tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3883 
3884     /* Always exit the TB, since we (may have) changed execution mode.  */
3885     return DISAS_TOO_MANY;
3886 }
3887 
3888 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
3889 {
3890     int r1 = get_field(s, r1);
3891     tcg_gen_st32_i64(o->in2, tcg_env, offsetof(CPUS390XState, aregs[r1]));
3892     return DISAS_NEXT;
3893 }
3894 
3895 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
3896 {
3897     gen_helper_seb(o->out, tcg_env, o->in1, o->in2);
3898     return DISAS_NEXT;
3899 }
3900 
3901 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
3902 {
3903     gen_helper_sdb(o->out, tcg_env, o->in1, o->in2);
3904     return DISAS_NEXT;
3905 }
3906 
3907 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
3908 {
3909     gen_helper_sxb(o->out_128, tcg_env, o->in1_128, o->in2_128);
3910     return DISAS_NEXT;
3911 }
3912 
3913 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
3914 {
3915     gen_helper_sqeb(o->out, tcg_env, o->in2);
3916     return DISAS_NEXT;
3917 }
3918 
3919 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
3920 {
3921     gen_helper_sqdb(o->out, tcg_env, o->in2);
3922     return DISAS_NEXT;
3923 }
3924 
3925 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
3926 {
3927     gen_helper_sqxb(o->out_128, tcg_env, o->in2_128);
3928     return DISAS_NEXT;
3929 }
3930 
3931 #ifndef CONFIG_USER_ONLY
3932 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
3933 {
3934     gen_helper_servc(cc_op, tcg_env, o->in2, o->in1);
3935     set_cc_static(s);
3936     return DISAS_NEXT;
3937 }
3938 
3939 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
3940 {
3941     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
3942     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
3943 
3944     gen_helper_sigp(cc_op, tcg_env, o->in2, r1, r3);
3945     set_cc_static(s);
3946     return DISAS_NEXT;
3947 }
3948 #endif
3949 
3950 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
3951 {
3952     DisasCompare c;
3953     TCGv_i64 a, h;
3954     TCGLabel *lab;
3955     int r1;
3956 
3957     disas_jcc(s, &c, get_field(s, m3));
3958 
3959     /* We want to store when the condition is fulfilled, so branch
3960        out when it's not */
3961     c.cond = tcg_invert_cond(c.cond);
3962 
3963     lab = gen_new_label();
3964     if (c.is_64) {
3965         tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3966     } else {
3967         tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3968     }
3969 
3970     r1 = get_field(s, r1);
3971     a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3972     switch (s->insn->data) {
3973     case 1: /* STOCG */
3974         tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUQ);
3975         break;
3976     case 0: /* STOC */
3977         tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUL);
3978         break;
3979     case 2: /* STOCFH */
3980         h = tcg_temp_new_i64();
3981         tcg_gen_shri_i64(h, regs[r1], 32);
3982         tcg_gen_qemu_st_i64(h, a, get_mem_index(s), MO_TEUL);
3983         break;
3984     default:
3985         g_assert_not_reached();
3986     }
3987 
3988     gen_set_label(lab);
3989     return DISAS_NEXT;
3990 }
3991 
3992 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
3993 {
3994     TCGv_i64 t;
3995     uint64_t sign = 1ull << s->insn->data;
3996     if (s->insn->data == 31) {
3997         t = tcg_temp_new_i64();
3998         tcg_gen_shli_i64(t, o->in1, 32);
3999     } else {
4000         t = o->in1;
4001     }
4002     gen_op_update2_cc_i64(s, CC_OP_SLA, t, o->in2);
4003     tcg_gen_shl_i64(o->out, o->in1, o->in2);
4004     /* The arithmetic left shift is curious in that it does not affect
4005        the sign bit.  Copy that over from the source unchanged.  */
4006     tcg_gen_andi_i64(o->out, o->out, ~sign);
4007     tcg_gen_andi_i64(o->in1, o->in1, sign);
4008     tcg_gen_or_i64(o->out, o->out, o->in1);
4009     return DISAS_NEXT;
4010 }
4011 
4012 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
4013 {
4014     tcg_gen_shl_i64(o->out, o->in1, o->in2);
4015     return DISAS_NEXT;
4016 }
4017 
4018 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
4019 {
4020     tcg_gen_sar_i64(o->out, o->in1, o->in2);
4021     return DISAS_NEXT;
4022 }
4023 
4024 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
4025 {
4026     tcg_gen_shr_i64(o->out, o->in1, o->in2);
4027     return DISAS_NEXT;
4028 }
4029 
4030 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4031 {
4032     gen_helper_sfpc(tcg_env, o->in2);
4033     return DISAS_NEXT;
4034 }
4035 
4036 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4037 {
4038     gen_helper_sfas(tcg_env, o->in2);
4039     return DISAS_NEXT;
4040 }
4041 
4042 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4043 {
4044     /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4045     tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4046     gen_helper_srnm(tcg_env, o->addr1);
4047     return DISAS_NEXT;
4048 }
4049 
4050 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4051 {
4052     /* Bits 0-55 are are ignored. */
4053     tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4054     gen_helper_srnm(tcg_env, o->addr1);
4055     return DISAS_NEXT;
4056 }
4057 
4058 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4059 {
4060     TCGv_i64 tmp = tcg_temp_new_i64();
4061 
4062     /* Bits other than 61-63 are ignored. */
4063     tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4064 
4065     /* No need to call a helper, we don't implement dfp */
4066     tcg_gen_ld32u_i64(tmp, tcg_env, offsetof(CPUS390XState, fpc));
4067     tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4068     tcg_gen_st32_i64(tmp, tcg_env, offsetof(CPUS390XState, fpc));
4069     return DISAS_NEXT;
4070 }
4071 
4072 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4073 {
4074     tcg_gen_extrl_i64_i32(cc_op, o->in1);
4075     tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4076     set_cc_static(s);
4077 
4078     tcg_gen_shri_i64(o->in1, o->in1, 24);
4079     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4080     return DISAS_NEXT;
4081 }
4082 
4083 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4084 {
4085     int b1 = get_field(s, b1);
4086     int d1 = get_field(s, d1);
4087     int b2 = get_field(s, b2);
4088     int d2 = get_field(s, d2);
4089     int r3 = get_field(s, r3);
4090     TCGv_i64 tmp = tcg_temp_new_i64();
4091 
4092     /* fetch all operands first */
4093     o->in1 = tcg_temp_new_i64();
4094     tcg_gen_addi_i64(o->in1, regs[b1], d1);
4095     o->in2 = tcg_temp_new_i64();
4096     tcg_gen_addi_i64(o->in2, regs[b2], d2);
4097     o->addr1 = tcg_temp_new_i64();
4098     gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0);
4099 
4100     /* load the third operand into r3 before modifying anything */
4101     tcg_gen_qemu_ld_i64(regs[r3], o->addr1, get_mem_index(s), MO_TEUQ);
4102 
4103     /* subtract CPU timer from first operand and store in GR0 */
4104     gen_helper_stpt(tmp, tcg_env);
4105     tcg_gen_sub_i64(regs[0], o->in1, tmp);
4106 
4107     /* store second operand in GR1 */
4108     tcg_gen_mov_i64(regs[1], o->in2);
4109     return DISAS_NEXT;
4110 }
4111 
4112 #ifndef CONFIG_USER_ONLY
4113 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4114 {
4115     tcg_gen_shri_i64(o->in2, o->in2, 4);
4116     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4117     return DISAS_NEXT;
4118 }
4119 
4120 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4121 {
4122     gen_helper_sske(tcg_env, o->in1, o->in2);
4123     return DISAS_NEXT;
4124 }
4125 
4126 static void gen_check_psw_mask(DisasContext *s)
4127 {
4128     TCGv_i64 reserved = tcg_temp_new_i64();
4129     TCGLabel *ok = gen_new_label();
4130 
4131     tcg_gen_andi_i64(reserved, psw_mask, PSW_MASK_RESERVED);
4132     tcg_gen_brcondi_i64(TCG_COND_EQ, reserved, 0, ok);
4133     gen_program_exception(s, PGM_SPECIFICATION);
4134     gen_set_label(ok);
4135 }
4136 
4137 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4138 {
4139     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4140 
4141     gen_check_psw_mask(s);
4142 
4143     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4144     s->exit_to_mainloop = true;
4145     return DISAS_TOO_MANY;
4146 }
4147 
4148 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4149 {
4150     tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, core_id));
4151     return DISAS_NEXT;
4152 }
4153 #endif
4154 
4155 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4156 {
4157     gen_helper_stck(o->out, tcg_env);
4158     /* ??? We don't implement clock states.  */
4159     gen_op_movi_cc(s, 0);
4160     return DISAS_NEXT;
4161 }
4162 
4163 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4164 {
4165     TCGv_i64 c1 = tcg_temp_new_i64();
4166     TCGv_i64 c2 = tcg_temp_new_i64();
4167     TCGv_i64 todpr = tcg_temp_new_i64();
4168     gen_helper_stck(c1, tcg_env);
4169     /* 16 bit value store in an uint32_t (only valid bits set) */
4170     tcg_gen_ld32u_i64(todpr, tcg_env, offsetof(CPUS390XState, todpr));
4171     /* Shift the 64-bit value into its place as a zero-extended
4172        104-bit value.  Note that "bit positions 64-103 are always
4173        non-zero so that they compare differently to STCK"; we set
4174        the least significant bit to 1.  */
4175     tcg_gen_shli_i64(c2, c1, 56);
4176     tcg_gen_shri_i64(c1, c1, 8);
4177     tcg_gen_ori_i64(c2, c2, 0x10000);
4178     tcg_gen_or_i64(c2, c2, todpr);
4179     tcg_gen_qemu_st_i64(c1, o->in2, get_mem_index(s), MO_TEUQ);
4180     tcg_gen_addi_i64(o->in2, o->in2, 8);
4181     tcg_gen_qemu_st_i64(c2, o->in2, get_mem_index(s), MO_TEUQ);
4182     /* ??? We don't implement clock states.  */
4183     gen_op_movi_cc(s, 0);
4184     return DISAS_NEXT;
4185 }
4186 
4187 #ifndef CONFIG_USER_ONLY
4188 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4189 {
4190     gen_helper_sck(cc_op, tcg_env, o->in2);
4191     set_cc_static(s);
4192     return DISAS_NEXT;
4193 }
4194 
4195 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4196 {
4197     gen_helper_sckc(tcg_env, o->in2);
4198     return DISAS_NEXT;
4199 }
4200 
4201 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4202 {
4203     gen_helper_sckpf(tcg_env, regs[0]);
4204     return DISAS_NEXT;
4205 }
4206 
4207 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4208 {
4209     gen_helper_stckc(o->out, tcg_env);
4210     return DISAS_NEXT;
4211 }
4212 
4213 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4214 {
4215     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4216     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4217 
4218     gen_helper_stctg(tcg_env, r1, o->in2, r3);
4219     return DISAS_NEXT;
4220 }
4221 
4222 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4223 {
4224     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4225     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4226 
4227     gen_helper_stctl(tcg_env, r1, o->in2, r3);
4228     return DISAS_NEXT;
4229 }
4230 
4231 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4232 {
4233     tcg_gen_ld_i64(o->out, tcg_env, offsetof(CPUS390XState, cpuid));
4234     return DISAS_NEXT;
4235 }
4236 
4237 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4238 {
4239     gen_helper_spt(tcg_env, o->in2);
4240     return DISAS_NEXT;
4241 }
4242 
4243 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4244 {
4245     gen_helper_stfl(tcg_env);
4246     return DISAS_NEXT;
4247 }
4248 
4249 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4250 {
4251     gen_helper_stpt(o->out, tcg_env);
4252     return DISAS_NEXT;
4253 }
4254 
4255 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4256 {
4257     gen_helper_stsi(cc_op, tcg_env, o->in2, regs[0], regs[1]);
4258     set_cc_static(s);
4259     return DISAS_NEXT;
4260 }
4261 
4262 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4263 {
4264     gen_helper_spx(tcg_env, o->in2);
4265     return DISAS_NEXT;
4266 }
4267 
4268 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4269 {
4270     gen_helper_xsch(tcg_env, regs[1]);
4271     set_cc_static(s);
4272     return DISAS_NEXT;
4273 }
4274 
4275 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4276 {
4277     gen_helper_csch(tcg_env, regs[1]);
4278     set_cc_static(s);
4279     return DISAS_NEXT;
4280 }
4281 
4282 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4283 {
4284     gen_helper_hsch(tcg_env, regs[1]);
4285     set_cc_static(s);
4286     return DISAS_NEXT;
4287 }
4288 
4289 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4290 {
4291     gen_helper_msch(tcg_env, regs[1], o->in2);
4292     set_cc_static(s);
4293     return DISAS_NEXT;
4294 }
4295 
4296 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4297 {
4298     gen_helper_rchp(tcg_env, regs[1]);
4299     set_cc_static(s);
4300     return DISAS_NEXT;
4301 }
4302 
4303 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4304 {
4305     gen_helper_rsch(tcg_env, regs[1]);
4306     set_cc_static(s);
4307     return DISAS_NEXT;
4308 }
4309 
4310 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4311 {
4312     gen_helper_sal(tcg_env, regs[1]);
4313     return DISAS_NEXT;
4314 }
4315 
4316 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4317 {
4318     gen_helper_schm(tcg_env, regs[1], regs[2], o->in2);
4319     return DISAS_NEXT;
4320 }
4321 
4322 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4323 {
4324     /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4325     gen_op_movi_cc(s, 3);
4326     return DISAS_NEXT;
4327 }
4328 
4329 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4330 {
4331     /* The instruction is suppressed if not provided. */
4332     return DISAS_NEXT;
4333 }
4334 
4335 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4336 {
4337     gen_helper_ssch(tcg_env, regs[1], o->in2);
4338     set_cc_static(s);
4339     return DISAS_NEXT;
4340 }
4341 
4342 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4343 {
4344     gen_helper_stsch(tcg_env, regs[1], o->in2);
4345     set_cc_static(s);
4346     return DISAS_NEXT;
4347 }
4348 
4349 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4350 {
4351     gen_helper_stcrw(tcg_env, o->in2);
4352     set_cc_static(s);
4353     return DISAS_NEXT;
4354 }
4355 
4356 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4357 {
4358     gen_helper_tpi(cc_op, tcg_env, o->addr1);
4359     set_cc_static(s);
4360     return DISAS_NEXT;
4361 }
4362 
4363 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4364 {
4365     gen_helper_tsch(tcg_env, regs[1], o->in2);
4366     set_cc_static(s);
4367     return DISAS_NEXT;
4368 }
4369 
4370 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4371 {
4372     gen_helper_chsc(tcg_env, o->in2);
4373     set_cc_static(s);
4374     return DISAS_NEXT;
4375 }
4376 
4377 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4378 {
4379     tcg_gen_ld_i64(o->out, tcg_env, offsetof(CPUS390XState, psa));
4380     tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4381     return DISAS_NEXT;
4382 }
4383 
4384 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4385 {
4386     uint64_t i2 = get_field(s, i2);
4387     TCGv_i64 t;
4388 
4389     /* It is important to do what the instruction name says: STORE THEN.
4390        If we let the output hook perform the store then if we fault and
4391        restart, we'll have the wrong SYSTEM MASK in place.  */
4392     t = tcg_temp_new_i64();
4393     tcg_gen_shri_i64(t, psw_mask, 56);
4394     tcg_gen_qemu_st_i64(t, o->addr1, get_mem_index(s), MO_UB);
4395 
4396     if (s->fields.op == 0xac) {
4397         tcg_gen_andi_i64(psw_mask, psw_mask,
4398                          (i2 << 56) | 0x00ffffffffffffffull);
4399     } else {
4400         tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4401     }
4402 
4403     gen_check_psw_mask(s);
4404 
4405     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4406     s->exit_to_mainloop = true;
4407     return DISAS_TOO_MANY;
4408 }
4409 
4410 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4411 {
4412     tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data);
4413 
4414     if (s->base.tb->flags & FLAG_MASK_PER) {
4415         update_psw_addr(s);
4416         gen_helper_per_store_real(tcg_env);
4417     }
4418     return DISAS_NEXT;
4419 }
4420 #endif
4421 
4422 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4423 {
4424     gen_helper_stfle(cc_op, tcg_env, o->in2);
4425     set_cc_static(s);
4426     return DISAS_NEXT;
4427 }
4428 
4429 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4430 {
4431     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_UB);
4432     return DISAS_NEXT;
4433 }
4434 
4435 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4436 {
4437     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_TEUW);
4438     return DISAS_NEXT;
4439 }
4440 
4441 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4442 {
4443     tcg_gen_qemu_st_tl(o->in1, o->in2, get_mem_index(s),
4444                        MO_TEUL | s->insn->data);
4445     return DISAS_NEXT;
4446 }
4447 
4448 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4449 {
4450     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s),
4451                         MO_TEUQ | s->insn->data);
4452     return DISAS_NEXT;
4453 }
4454 
4455 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4456 {
4457     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4458     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4459 
4460     gen_helper_stam(tcg_env, r1, o->in2, r3);
4461     return DISAS_NEXT;
4462 }
4463 
4464 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4465 {
4466     int m3 = get_field(s, m3);
4467     int pos, base = s->insn->data;
4468     TCGv_i64 tmp = tcg_temp_new_i64();
4469 
4470     pos = base + ctz32(m3) * 8;
4471     switch (m3) {
4472     case 0xf:
4473         /* Effectively a 32-bit store.  */
4474         tcg_gen_shri_i64(tmp, o->in1, pos);
4475         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
4476         break;
4477 
4478     case 0xc:
4479     case 0x6:
4480     case 0x3:
4481         /* Effectively a 16-bit store.  */
4482         tcg_gen_shri_i64(tmp, o->in1, pos);
4483         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
4484         break;
4485 
4486     case 0x8:
4487     case 0x4:
4488     case 0x2:
4489     case 0x1:
4490         /* Effectively an 8-bit store.  */
4491         tcg_gen_shri_i64(tmp, o->in1, pos);
4492         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4493         break;
4494 
4495     default:
4496         /* This is going to be a sequence of shifts and stores.  */
4497         pos = base + 32 - 8;
4498         while (m3) {
4499             if (m3 & 0x8) {
4500                 tcg_gen_shri_i64(tmp, o->in1, pos);
4501                 tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4502                 tcg_gen_addi_i64(o->in2, o->in2, 1);
4503             }
4504             m3 = (m3 << 1) & 0xf;
4505             pos -= 8;
4506         }
4507         break;
4508     }
4509     return DISAS_NEXT;
4510 }
4511 
4512 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4513 {
4514     int r1 = get_field(s, r1);
4515     int r3 = get_field(s, r3);
4516     int size = s->insn->data;
4517     TCGv_i64 tsize = tcg_constant_i64(size);
4518 
4519     while (1) {
4520         tcg_gen_qemu_st_i64(regs[r1], o->in2, get_mem_index(s),
4521                             size == 8 ? MO_TEUQ : MO_TEUL);
4522         if (r1 == r3) {
4523             break;
4524         }
4525         tcg_gen_add_i64(o->in2, o->in2, tsize);
4526         r1 = (r1 + 1) & 15;
4527     }
4528 
4529     return DISAS_NEXT;
4530 }
4531 
4532 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4533 {
4534     int r1 = get_field(s, r1);
4535     int r3 = get_field(s, r3);
4536     TCGv_i64 t = tcg_temp_new_i64();
4537     TCGv_i64 t4 = tcg_constant_i64(4);
4538     TCGv_i64 t32 = tcg_constant_i64(32);
4539 
4540     while (1) {
4541         tcg_gen_shl_i64(t, regs[r1], t32);
4542         tcg_gen_qemu_st_i64(t, o->in2, get_mem_index(s), MO_TEUL);
4543         if (r1 == r3) {
4544             break;
4545         }
4546         tcg_gen_add_i64(o->in2, o->in2, t4);
4547         r1 = (r1 + 1) & 15;
4548     }
4549     return DISAS_NEXT;
4550 }
4551 
4552 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4553 {
4554     TCGv_i128 t16 = tcg_temp_new_i128();
4555 
4556     tcg_gen_concat_i64_i128(t16, o->out2, o->out);
4557     tcg_gen_qemu_st_i128(t16, o->in2, get_mem_index(s),
4558                          MO_TE | MO_128 | MO_ALIGN);
4559     return DISAS_NEXT;
4560 }
4561 
4562 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4563 {
4564     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4565     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4566 
4567     gen_helper_srst(tcg_env, r1, r2);
4568     set_cc_static(s);
4569     return DISAS_NEXT;
4570 }
4571 
4572 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4573 {
4574     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4575     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4576 
4577     gen_helper_srstu(tcg_env, r1, r2);
4578     set_cc_static(s);
4579     return DISAS_NEXT;
4580 }
4581 
4582 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4583 {
4584     tcg_gen_sub_i64(o->out, o->in1, o->in2);
4585     return DISAS_NEXT;
4586 }
4587 
4588 static DisasJumpType op_subu64(DisasContext *s, DisasOps *o)
4589 {
4590     tcg_gen_movi_i64(cc_src, 0);
4591     tcg_gen_sub2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
4592     return DISAS_NEXT;
4593 }
4594 
4595 /* Compute borrow (0, -1) into cc_src. */
4596 static void compute_borrow(DisasContext *s)
4597 {
4598     switch (s->cc_op) {
4599     case CC_OP_SUBU:
4600         /* The borrow value is already in cc_src (0,-1). */
4601         break;
4602     default:
4603         gen_op_calc_cc(s);
4604         /* fall through */
4605     case CC_OP_STATIC:
4606         /* The carry flag is the msb of CC; compute into cc_src. */
4607         tcg_gen_extu_i32_i64(cc_src, cc_op);
4608         tcg_gen_shri_i64(cc_src, cc_src, 1);
4609         /* fall through */
4610     case CC_OP_ADDU:
4611         /* Convert carry (1,0) to borrow (0,-1). */
4612         tcg_gen_subi_i64(cc_src, cc_src, 1);
4613         break;
4614     }
4615 }
4616 
4617 static DisasJumpType op_subb32(DisasContext *s, DisasOps *o)
4618 {
4619     compute_borrow(s);
4620 
4621     /* Borrow is {0, -1}, so add to subtract. */
4622     tcg_gen_add_i64(o->out, o->in1, cc_src);
4623     tcg_gen_sub_i64(o->out, o->out, o->in2);
4624     return DISAS_NEXT;
4625 }
4626 
4627 static DisasJumpType op_subb64(DisasContext *s, DisasOps *o)
4628 {
4629     compute_borrow(s);
4630 
4631     /*
4632      * Borrow is {0, -1}, so add to subtract; replicate the
4633      * borrow input to produce 128-bit -1 for the addition.
4634      */
4635     TCGv_i64 zero = tcg_constant_i64(0);
4636     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src);
4637     tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
4638 
4639     return DISAS_NEXT;
4640 }
4641 
4642 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4643 {
4644     TCGv_i32 t;
4645 
4646     update_psw_addr(s);
4647     update_cc_op(s);
4648 
4649     t = tcg_constant_i32(get_field(s, i1) & 0xff);
4650     tcg_gen_st_i32(t, tcg_env, offsetof(CPUS390XState, int_svc_code));
4651 
4652     t = tcg_constant_i32(s->ilen);
4653     tcg_gen_st_i32(t, tcg_env, offsetof(CPUS390XState, int_svc_ilen));
4654 
4655     gen_exception(EXCP_SVC);
4656     return DISAS_NORETURN;
4657 }
4658 
4659 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4660 {
4661     int cc = 0;
4662 
4663     cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4664     cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4665     gen_op_movi_cc(s, cc);
4666     return DISAS_NEXT;
4667 }
4668 
4669 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4670 {
4671     gen_helper_tceb(cc_op, tcg_env, o->in1, o->in2);
4672     set_cc_static(s);
4673     return DISAS_NEXT;
4674 }
4675 
4676 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4677 {
4678     gen_helper_tcdb(cc_op, tcg_env, o->in1, o->in2);
4679     set_cc_static(s);
4680     return DISAS_NEXT;
4681 }
4682 
4683 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4684 {
4685     gen_helper_tcxb(cc_op, tcg_env, o->in1_128, o->in2);
4686     set_cc_static(s);
4687     return DISAS_NEXT;
4688 }
4689 
4690 #ifndef CONFIG_USER_ONLY
4691 
4692 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4693 {
4694     gen_helper_testblock(cc_op, tcg_env, o->in2);
4695     set_cc_static(s);
4696     return DISAS_NEXT;
4697 }
4698 
4699 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4700 {
4701     gen_helper_tprot(cc_op, tcg_env, o->addr1, o->in2);
4702     set_cc_static(s);
4703     return DISAS_NEXT;
4704 }
4705 
4706 #endif
4707 
4708 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4709 {
4710     TCGv_i32 l1 = tcg_constant_i32(get_field(s, l1) + 1);
4711 
4712     gen_helper_tp(cc_op, tcg_env, o->addr1, l1);
4713     set_cc_static(s);
4714     return DISAS_NEXT;
4715 }
4716 
4717 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4718 {
4719     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4720 
4721     gen_helper_tr(tcg_env, l, o->addr1, o->in2);
4722     set_cc_static(s);
4723     return DISAS_NEXT;
4724 }
4725 
4726 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4727 {
4728     TCGv_i128 pair = tcg_temp_new_i128();
4729 
4730     gen_helper_tre(pair, tcg_env, o->out, o->out2, o->in2);
4731     tcg_gen_extr_i128_i64(o->out2, o->out, pair);
4732     set_cc_static(s);
4733     return DISAS_NEXT;
4734 }
4735 
4736 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4737 {
4738     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4739 
4740     gen_helper_trt(cc_op, tcg_env, l, o->addr1, o->in2);
4741     set_cc_static(s);
4742     return DISAS_NEXT;
4743 }
4744 
4745 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4746 {
4747     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4748 
4749     gen_helper_trtr(cc_op, tcg_env, l, o->addr1, o->in2);
4750     set_cc_static(s);
4751     return DISAS_NEXT;
4752 }
4753 
4754 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4755 {
4756     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4757     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4758     TCGv_i32 sizes = tcg_constant_i32(s->insn->opc & 3);
4759     TCGv_i32 tst = tcg_temp_new_i32();
4760     int m3 = get_field(s, m3);
4761 
4762     if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4763         m3 = 0;
4764     }
4765     if (m3 & 1) {
4766         tcg_gen_movi_i32(tst, -1);
4767     } else {
4768         tcg_gen_extrl_i64_i32(tst, regs[0]);
4769         if (s->insn->opc & 3) {
4770             tcg_gen_ext8u_i32(tst, tst);
4771         } else {
4772             tcg_gen_ext16u_i32(tst, tst);
4773         }
4774     }
4775     gen_helper_trXX(cc_op, tcg_env, r1, r2, tst, sizes);
4776 
4777     set_cc_static(s);
4778     return DISAS_NEXT;
4779 }
4780 
4781 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4782 {
4783     TCGv_i32 ff = tcg_constant_i32(0xff);
4784     TCGv_i32 t1 = tcg_temp_new_i32();
4785 
4786     tcg_gen_atomic_xchg_i32(t1, o->in2, ff, get_mem_index(s), MO_UB);
4787     tcg_gen_extract_i32(cc_op, t1, 7, 1);
4788     set_cc_static(s);
4789     return DISAS_NEXT;
4790 }
4791 
4792 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4793 {
4794     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4795 
4796     gen_helper_unpk(tcg_env, l, o->addr1, o->in2);
4797     return DISAS_NEXT;
4798 }
4799 
4800 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4801 {
4802     int l1 = get_field(s, l1) + 1;
4803     TCGv_i32 l;
4804 
4805     /* The length must not exceed 32 bytes.  */
4806     if (l1 > 32) {
4807         gen_program_exception(s, PGM_SPECIFICATION);
4808         return DISAS_NORETURN;
4809     }
4810     l = tcg_constant_i32(l1);
4811     gen_helper_unpka(cc_op, tcg_env, o->addr1, l, o->in2);
4812     set_cc_static(s);
4813     return DISAS_NEXT;
4814 }
4815 
4816 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4817 {
4818     int l1 = get_field(s, l1) + 1;
4819     TCGv_i32 l;
4820 
4821     /* The length must be even and should not exceed 64 bytes.  */
4822     if ((l1 & 1) || (l1 > 64)) {
4823         gen_program_exception(s, PGM_SPECIFICATION);
4824         return DISAS_NORETURN;
4825     }
4826     l = tcg_constant_i32(l1);
4827     gen_helper_unpku(cc_op, tcg_env, o->addr1, l, o->in2);
4828     set_cc_static(s);
4829     return DISAS_NEXT;
4830 }
4831 
4832 
4833 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4834 {
4835     int d1 = get_field(s, d1);
4836     int d2 = get_field(s, d2);
4837     int b1 = get_field(s, b1);
4838     int b2 = get_field(s, b2);
4839     int l = get_field(s, l1);
4840     TCGv_i32 t32;
4841 
4842     o->addr1 = get_address(s, 0, b1, d1);
4843 
4844     /* If the addresses are identical, this is a store/memset of zero.  */
4845     if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4846         o->in2 = tcg_constant_i64(0);
4847 
4848         l++;
4849         while (l >= 8) {
4850             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UQ);
4851             l -= 8;
4852             if (l > 0) {
4853                 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4854             }
4855         }
4856         if (l >= 4) {
4857             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UL);
4858             l -= 4;
4859             if (l > 0) {
4860                 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4861             }
4862         }
4863         if (l >= 2) {
4864             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UW);
4865             l -= 2;
4866             if (l > 0) {
4867                 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4868             }
4869         }
4870         if (l) {
4871             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UB);
4872         }
4873         gen_op_movi_cc(s, 0);
4874         return DISAS_NEXT;
4875     }
4876 
4877     /* But in general we'll defer to a helper.  */
4878     o->in2 = get_address(s, 0, b2, d2);
4879     t32 = tcg_constant_i32(l);
4880     gen_helper_xc(cc_op, tcg_env, t32, o->addr1, o->in2);
4881     set_cc_static(s);
4882     return DISAS_NEXT;
4883 }
4884 
4885 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
4886 {
4887     tcg_gen_xor_i64(o->out, o->in1, o->in2);
4888     return DISAS_NEXT;
4889 }
4890 
4891 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
4892 {
4893     int shift = s->insn->data & 0xff;
4894     int size = s->insn->data >> 8;
4895     uint64_t mask = ((1ull << size) - 1) << shift;
4896     TCGv_i64 t = tcg_temp_new_i64();
4897 
4898     tcg_gen_shli_i64(t, o->in2, shift);
4899     tcg_gen_xor_i64(o->out, o->in1, t);
4900 
4901     /* Produce the CC from only the bits manipulated.  */
4902     tcg_gen_andi_i64(cc_dst, o->out, mask);
4903     set_cc_nz_u64(s, cc_dst);
4904     return DISAS_NEXT;
4905 }
4906 
4907 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
4908 {
4909     o->in1 = tcg_temp_new_i64();
4910 
4911     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4912         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
4913     } else {
4914         /* Perform the atomic operation in memory. */
4915         tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
4916                                      s->insn->data);
4917     }
4918 
4919     /* Recompute also for atomic case: needed for setting CC. */
4920     tcg_gen_xor_i64(o->out, o->in1, o->in2);
4921 
4922     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4923         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
4924     }
4925     return DISAS_NEXT;
4926 }
4927 
4928 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
4929 {
4930     o->out = tcg_constant_i64(0);
4931     return DISAS_NEXT;
4932 }
4933 
4934 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
4935 {
4936     o->out = tcg_constant_i64(0);
4937     o->out2 = o->out;
4938     return DISAS_NEXT;
4939 }
4940 
4941 #ifndef CONFIG_USER_ONLY
4942 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
4943 {
4944     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4945 
4946     gen_helper_clp(tcg_env, r2);
4947     set_cc_static(s);
4948     return DISAS_NEXT;
4949 }
4950 
4951 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
4952 {
4953     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4954     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4955 
4956     gen_helper_pcilg(tcg_env, r1, r2);
4957     set_cc_static(s);
4958     return DISAS_NEXT;
4959 }
4960 
4961 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
4962 {
4963     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4964     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4965 
4966     gen_helper_pcistg(tcg_env, r1, r2);
4967     set_cc_static(s);
4968     return DISAS_NEXT;
4969 }
4970 
4971 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
4972 {
4973     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4974     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4975 
4976     gen_helper_stpcifc(tcg_env, r1, o->addr1, ar);
4977     set_cc_static(s);
4978     return DISAS_NEXT;
4979 }
4980 
4981 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
4982 {
4983     gen_helper_sic(tcg_env, o->in1, o->in2);
4984     return DISAS_NEXT;
4985 }
4986 
4987 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
4988 {
4989     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4990     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4991 
4992     gen_helper_rpcit(tcg_env, r1, r2);
4993     set_cc_static(s);
4994     return DISAS_NEXT;
4995 }
4996 
4997 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
4998 {
4999     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
5000     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
5001     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
5002 
5003     gen_helper_pcistb(tcg_env, r1, r3, o->addr1, ar);
5004     set_cc_static(s);
5005     return DISAS_NEXT;
5006 }
5007 
5008 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
5009 {
5010     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
5011     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
5012 
5013     gen_helper_mpcifc(tcg_env, r1, o->addr1, ar);
5014     set_cc_static(s);
5015     return DISAS_NEXT;
5016 }
5017 #endif
5018 
5019 #include "translate_vx.c.inc"
5020 
5021 /* ====================================================================== */
5022 /* The "Cc OUTput" generators.  Given the generated output (and in some cases
5023    the original inputs), update the various cc data structures in order to
5024    be able to compute the new condition code.  */
5025 
5026 static void cout_abs32(DisasContext *s, DisasOps *o)
5027 {
5028     gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
5029 }
5030 
5031 static void cout_abs64(DisasContext *s, DisasOps *o)
5032 {
5033     gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5034 }
5035 
5036 static void cout_adds32(DisasContext *s, DisasOps *o)
5037 {
5038     gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5039 }
5040 
5041 static void cout_adds64(DisasContext *s, DisasOps *o)
5042 {
5043     gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5044 }
5045 
5046 static void cout_addu32(DisasContext *s, DisasOps *o)
5047 {
5048     tcg_gen_shri_i64(cc_src, o->out, 32);
5049     tcg_gen_ext32u_i64(cc_dst, o->out);
5050     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst);
5051 }
5052 
5053 static void cout_addu64(DisasContext *s, DisasOps *o)
5054 {
5055     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out);
5056 }
5057 
5058 static void cout_cmps32(DisasContext *s, DisasOps *o)
5059 {
5060     gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5061 }
5062 
5063 static void cout_cmps64(DisasContext *s, DisasOps *o)
5064 {
5065     gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5066 }
5067 
5068 static void cout_cmpu32(DisasContext *s, DisasOps *o)
5069 {
5070     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5071 }
5072 
5073 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5074 {
5075     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5076 }
5077 
5078 static void cout_f32(DisasContext *s, DisasOps *o)
5079 {
5080     gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5081 }
5082 
5083 static void cout_f64(DisasContext *s, DisasOps *o)
5084 {
5085     gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5086 }
5087 
5088 static void cout_f128(DisasContext *s, DisasOps *o)
5089 {
5090     gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5091 }
5092 
5093 static void cout_nabs32(DisasContext *s, DisasOps *o)
5094 {
5095     gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5096 }
5097 
5098 static void cout_nabs64(DisasContext *s, DisasOps *o)
5099 {
5100     gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5101 }
5102 
5103 static void cout_neg32(DisasContext *s, DisasOps *o)
5104 {
5105     gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5106 }
5107 
5108 static void cout_neg64(DisasContext *s, DisasOps *o)
5109 {
5110     gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5111 }
5112 
5113 static void cout_nz32(DisasContext *s, DisasOps *o)
5114 {
5115     tcg_gen_ext32u_i64(cc_dst, o->out);
5116     gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5117 }
5118 
5119 static void cout_nz64(DisasContext *s, DisasOps *o)
5120 {
5121     gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5122 }
5123 
5124 static void cout_s32(DisasContext *s, DisasOps *o)
5125 {
5126     gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5127 }
5128 
5129 static void cout_s64(DisasContext *s, DisasOps *o)
5130 {
5131     gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5132 }
5133 
5134 static void cout_subs32(DisasContext *s, DisasOps *o)
5135 {
5136     gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5137 }
5138 
5139 static void cout_subs64(DisasContext *s, DisasOps *o)
5140 {
5141     gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5142 }
5143 
5144 static void cout_subu32(DisasContext *s, DisasOps *o)
5145 {
5146     tcg_gen_sari_i64(cc_src, o->out, 32);
5147     tcg_gen_ext32u_i64(cc_dst, o->out);
5148     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, cc_dst);
5149 }
5150 
5151 static void cout_subu64(DisasContext *s, DisasOps *o)
5152 {
5153     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, o->out);
5154 }
5155 
5156 static void cout_tm32(DisasContext *s, DisasOps *o)
5157 {
5158     gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5159 }
5160 
5161 static void cout_tm64(DisasContext *s, DisasOps *o)
5162 {
5163     gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5164 }
5165 
5166 static void cout_muls32(DisasContext *s, DisasOps *o)
5167 {
5168     gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out);
5169 }
5170 
5171 static void cout_muls64(DisasContext *s, DisasOps *o)
5172 {
5173     /* out contains "high" part, out2 contains "low" part of 128 bit result */
5174     gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2);
5175 }
5176 
5177 /* ====================================================================== */
5178 /* The "PREParation" generators.  These initialize the DisasOps.OUT fields
5179    with the TCG register to which we will write.  Used in combination with
5180    the "wout" generators, in some cases we need a new temporary, and in
5181    some cases we can write to a TCG global.  */
5182 
5183 static void prep_new(DisasContext *s, DisasOps *o)
5184 {
5185     o->out = tcg_temp_new_i64();
5186 }
5187 #define SPEC_prep_new 0
5188 
5189 static void prep_new_P(DisasContext *s, DisasOps *o)
5190 {
5191     o->out = tcg_temp_new_i64();
5192     o->out2 = tcg_temp_new_i64();
5193 }
5194 #define SPEC_prep_new_P 0
5195 
5196 static void prep_new_x(DisasContext *s, DisasOps *o)
5197 {
5198     o->out_128 = tcg_temp_new_i128();
5199 }
5200 #define SPEC_prep_new_x 0
5201 
5202 static void prep_r1(DisasContext *s, DisasOps *o)
5203 {
5204     o->out = regs[get_field(s, r1)];
5205 }
5206 #define SPEC_prep_r1 0
5207 
5208 static void prep_r1_P(DisasContext *s, DisasOps *o)
5209 {
5210     int r1 = get_field(s, r1);
5211     o->out = regs[r1];
5212     o->out2 = regs[r1 + 1];
5213 }
5214 #define SPEC_prep_r1_P SPEC_r1_even
5215 
5216 /* ====================================================================== */
5217 /* The "Write OUTput" generators.  These generally perform some non-trivial
5218    copy of data to TCG globals, or to main memory.  The trivial cases are
5219    generally handled by having a "prep" generator install the TCG global
5220    as the destination of the operation.  */
5221 
5222 static void wout_r1(DisasContext *s, DisasOps *o)
5223 {
5224     store_reg(get_field(s, r1), o->out);
5225 }
5226 #define SPEC_wout_r1 0
5227 
5228 static void wout_out2_r1(DisasContext *s, DisasOps *o)
5229 {
5230     store_reg(get_field(s, r1), o->out2);
5231 }
5232 #define SPEC_wout_out2_r1 0
5233 
5234 static void wout_r1_8(DisasContext *s, DisasOps *o)
5235 {
5236     int r1 = get_field(s, r1);
5237     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5238 }
5239 #define SPEC_wout_r1_8 0
5240 
5241 static void wout_r1_16(DisasContext *s, DisasOps *o)
5242 {
5243     int r1 = get_field(s, r1);
5244     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5245 }
5246 #define SPEC_wout_r1_16 0
5247 
5248 static void wout_r1_32(DisasContext *s, DisasOps *o)
5249 {
5250     store_reg32_i64(get_field(s, r1), o->out);
5251 }
5252 #define SPEC_wout_r1_32 0
5253 
5254 static void wout_r1_32h(DisasContext *s, DisasOps *o)
5255 {
5256     store_reg32h_i64(get_field(s, r1), o->out);
5257 }
5258 #define SPEC_wout_r1_32h 0
5259 
5260 static void wout_r1_P32(DisasContext *s, DisasOps *o)
5261 {
5262     int r1 = get_field(s, r1);
5263     store_reg32_i64(r1, o->out);
5264     store_reg32_i64(r1 + 1, o->out2);
5265 }
5266 #define SPEC_wout_r1_P32 SPEC_r1_even
5267 
5268 static void wout_r1_D32(DisasContext *s, DisasOps *o)
5269 {
5270     int r1 = get_field(s, r1);
5271     TCGv_i64 t = tcg_temp_new_i64();
5272     store_reg32_i64(r1 + 1, o->out);
5273     tcg_gen_shri_i64(t, o->out, 32);
5274     store_reg32_i64(r1, t);
5275 }
5276 #define SPEC_wout_r1_D32 SPEC_r1_even
5277 
5278 static void wout_r1_D64(DisasContext *s, DisasOps *o)
5279 {
5280     int r1 = get_field(s, r1);
5281     tcg_gen_extr_i128_i64(regs[r1 + 1], regs[r1], o->out_128);
5282 }
5283 #define SPEC_wout_r1_D64 SPEC_r1_even
5284 
5285 static void wout_r3_P32(DisasContext *s, DisasOps *o)
5286 {
5287     int r3 = get_field(s, r3);
5288     store_reg32_i64(r3, o->out);
5289     store_reg32_i64(r3 + 1, o->out2);
5290 }
5291 #define SPEC_wout_r3_P32 SPEC_r3_even
5292 
5293 static void wout_r3_P64(DisasContext *s, DisasOps *o)
5294 {
5295     int r3 = get_field(s, r3);
5296     store_reg(r3, o->out);
5297     store_reg(r3 + 1, o->out2);
5298 }
5299 #define SPEC_wout_r3_P64 SPEC_r3_even
5300 
5301 static void wout_e1(DisasContext *s, DisasOps *o)
5302 {
5303     store_freg32_i64(get_field(s, r1), o->out);
5304 }
5305 #define SPEC_wout_e1 0
5306 
5307 static void wout_f1(DisasContext *s, DisasOps *o)
5308 {
5309     store_freg(get_field(s, r1), o->out);
5310 }
5311 #define SPEC_wout_f1 0
5312 
5313 static void wout_x1(DisasContext *s, DisasOps *o)
5314 {
5315     int f1 = get_field(s, r1);
5316 
5317     /* Split out_128 into out+out2 for cout_f128. */
5318     tcg_debug_assert(o->out == NULL);
5319     o->out = tcg_temp_new_i64();
5320     o->out2 = tcg_temp_new_i64();
5321 
5322     tcg_gen_extr_i128_i64(o->out2, o->out, o->out_128);
5323     store_freg(f1, o->out);
5324     store_freg(f1 + 2, o->out2);
5325 }
5326 #define SPEC_wout_x1 SPEC_r1_f128
5327 
5328 static void wout_x1_P(DisasContext *s, DisasOps *o)
5329 {
5330     int f1 = get_field(s, r1);
5331     store_freg(f1, o->out);
5332     store_freg(f1 + 2, o->out2);
5333 }
5334 #define SPEC_wout_x1_P SPEC_r1_f128
5335 
5336 static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
5337 {
5338     if (get_field(s, r1) != get_field(s, r2)) {
5339         store_reg32_i64(get_field(s, r1), o->out);
5340     }
5341 }
5342 #define SPEC_wout_cond_r1r2_32 0
5343 
5344 static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
5345 {
5346     if (get_field(s, r1) != get_field(s, r2)) {
5347         store_freg32_i64(get_field(s, r1), o->out);
5348     }
5349 }
5350 #define SPEC_wout_cond_e1e2 0
5351 
5352 static void wout_m1_8(DisasContext *s, DisasOps *o)
5353 {
5354     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_UB);
5355 }
5356 #define SPEC_wout_m1_8 0
5357 
5358 static void wout_m1_16(DisasContext *s, DisasOps *o)
5359 {
5360     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUW);
5361 }
5362 #define SPEC_wout_m1_16 0
5363 
5364 #ifndef CONFIG_USER_ONLY
5365 static void wout_m1_16a(DisasContext *s, DisasOps *o)
5366 {
5367     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5368 }
5369 #define SPEC_wout_m1_16a 0
5370 #endif
5371 
5372 static void wout_m1_32(DisasContext *s, DisasOps *o)
5373 {
5374     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUL);
5375 }
5376 #define SPEC_wout_m1_32 0
5377 
5378 #ifndef CONFIG_USER_ONLY
5379 static void wout_m1_32a(DisasContext *s, DisasOps *o)
5380 {
5381     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5382 }
5383 #define SPEC_wout_m1_32a 0
5384 #endif
5385 
5386 static void wout_m1_64(DisasContext *s, DisasOps *o)
5387 {
5388     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ);
5389 }
5390 #define SPEC_wout_m1_64 0
5391 
5392 #ifndef CONFIG_USER_ONLY
5393 static void wout_m1_64a(DisasContext *s, DisasOps *o)
5394 {
5395     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5396 }
5397 #define SPEC_wout_m1_64a 0
5398 #endif
5399 
5400 static void wout_m2_32(DisasContext *s, DisasOps *o)
5401 {
5402     tcg_gen_qemu_st_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
5403 }
5404 #define SPEC_wout_m2_32 0
5405 
5406 static void wout_in2_r1(DisasContext *s, DisasOps *o)
5407 {
5408     store_reg(get_field(s, r1), o->in2);
5409 }
5410 #define SPEC_wout_in2_r1 0
5411 
5412 static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
5413 {
5414     store_reg32_i64(get_field(s, r1), o->in2);
5415 }
5416 #define SPEC_wout_in2_r1_32 0
5417 
5418 /* ====================================================================== */
5419 /* The "INput 1" generators.  These load the first operand to an insn.  */
5420 
5421 static void in1_r1(DisasContext *s, DisasOps *o)
5422 {
5423     o->in1 = load_reg(get_field(s, r1));
5424 }
5425 #define SPEC_in1_r1 0
5426 
5427 static void in1_r1_o(DisasContext *s, DisasOps *o)
5428 {
5429     o->in1 = regs[get_field(s, r1)];
5430 }
5431 #define SPEC_in1_r1_o 0
5432 
5433 static void in1_r1_32s(DisasContext *s, DisasOps *o)
5434 {
5435     o->in1 = tcg_temp_new_i64();
5436     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
5437 }
5438 #define SPEC_in1_r1_32s 0
5439 
5440 static void in1_r1_32u(DisasContext *s, DisasOps *o)
5441 {
5442     o->in1 = tcg_temp_new_i64();
5443     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
5444 }
5445 #define SPEC_in1_r1_32u 0
5446 
5447 static void in1_r1_sr32(DisasContext *s, DisasOps *o)
5448 {
5449     o->in1 = tcg_temp_new_i64();
5450     tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
5451 }
5452 #define SPEC_in1_r1_sr32 0
5453 
5454 static void in1_r1p1(DisasContext *s, DisasOps *o)
5455 {
5456     o->in1 = load_reg(get_field(s, r1) + 1);
5457 }
5458 #define SPEC_in1_r1p1 SPEC_r1_even
5459 
5460 static void in1_r1p1_o(DisasContext *s, DisasOps *o)
5461 {
5462     o->in1 = regs[get_field(s, r1) + 1];
5463 }
5464 #define SPEC_in1_r1p1_o SPEC_r1_even
5465 
5466 static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
5467 {
5468     o->in1 = tcg_temp_new_i64();
5469     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
5470 }
5471 #define SPEC_in1_r1p1_32s SPEC_r1_even
5472 
5473 static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
5474 {
5475     o->in1 = tcg_temp_new_i64();
5476     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
5477 }
5478 #define SPEC_in1_r1p1_32u SPEC_r1_even
5479 
5480 static void in1_r1_D32(DisasContext *s, DisasOps *o)
5481 {
5482     int r1 = get_field(s, r1);
5483     o->in1 = tcg_temp_new_i64();
5484     tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5485 }
5486 #define SPEC_in1_r1_D32 SPEC_r1_even
5487 
5488 static void in1_r2(DisasContext *s, DisasOps *o)
5489 {
5490     o->in1 = load_reg(get_field(s, r2));
5491 }
5492 #define SPEC_in1_r2 0
5493 
5494 static void in1_r2_sr32(DisasContext *s, DisasOps *o)
5495 {
5496     o->in1 = tcg_temp_new_i64();
5497     tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
5498 }
5499 #define SPEC_in1_r2_sr32 0
5500 
5501 static void in1_r2_32u(DisasContext *s, DisasOps *o)
5502 {
5503     o->in1 = tcg_temp_new_i64();
5504     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]);
5505 }
5506 #define SPEC_in1_r2_32u 0
5507 
5508 static void in1_r3(DisasContext *s, DisasOps *o)
5509 {
5510     o->in1 = load_reg(get_field(s, r3));
5511 }
5512 #define SPEC_in1_r3 0
5513 
5514 static void in1_r3_o(DisasContext *s, DisasOps *o)
5515 {
5516     o->in1 = regs[get_field(s, r3)];
5517 }
5518 #define SPEC_in1_r3_o 0
5519 
5520 static void in1_r3_32s(DisasContext *s, DisasOps *o)
5521 {
5522     o->in1 = tcg_temp_new_i64();
5523     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
5524 }
5525 #define SPEC_in1_r3_32s 0
5526 
5527 static void in1_r3_32u(DisasContext *s, DisasOps *o)
5528 {
5529     o->in1 = tcg_temp_new_i64();
5530     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
5531 }
5532 #define SPEC_in1_r3_32u 0
5533 
5534 static void in1_r3_D32(DisasContext *s, DisasOps *o)
5535 {
5536     int r3 = get_field(s, r3);
5537     o->in1 = tcg_temp_new_i64();
5538     tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5539 }
5540 #define SPEC_in1_r3_D32 SPEC_r3_even
5541 
5542 static void in1_r3_sr32(DisasContext *s, DisasOps *o)
5543 {
5544     o->in1 = tcg_temp_new_i64();
5545     tcg_gen_shri_i64(o->in1, regs[get_field(s, r3)], 32);
5546 }
5547 #define SPEC_in1_r3_sr32 0
5548 
5549 static void in1_e1(DisasContext *s, DisasOps *o)
5550 {
5551     o->in1 = load_freg32_i64(get_field(s, r1));
5552 }
5553 #define SPEC_in1_e1 0
5554 
5555 static void in1_f1(DisasContext *s, DisasOps *o)
5556 {
5557     o->in1 = load_freg(get_field(s, r1));
5558 }
5559 #define SPEC_in1_f1 0
5560 
5561 static void in1_x1(DisasContext *s, DisasOps *o)
5562 {
5563     o->in1_128 = load_freg_128(get_field(s, r1));
5564 }
5565 #define SPEC_in1_x1 SPEC_r1_f128
5566 
5567 /* Load the high double word of an extended (128-bit) format FP number */
5568 static void in1_x2h(DisasContext *s, DisasOps *o)
5569 {
5570     o->in1 = load_freg(get_field(s, r2));
5571 }
5572 #define SPEC_in1_x2h SPEC_r2_f128
5573 
5574 static void in1_f3(DisasContext *s, DisasOps *o)
5575 {
5576     o->in1 = load_freg(get_field(s, r3));
5577 }
5578 #define SPEC_in1_f3 0
5579 
5580 static void in1_la1(DisasContext *s, DisasOps *o)
5581 {
5582     o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
5583 }
5584 #define SPEC_in1_la1 0
5585 
5586 static void in1_la2(DisasContext *s, DisasOps *o)
5587 {
5588     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5589     o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5590 }
5591 #define SPEC_in1_la2 0
5592 
5593 static void in1_m1_8u(DisasContext *s, DisasOps *o)
5594 {
5595     in1_la1(s, o);
5596     o->in1 = tcg_temp_new_i64();
5597     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_UB);
5598 }
5599 #define SPEC_in1_m1_8u 0
5600 
5601 static void in1_m1_16s(DisasContext *s, DisasOps *o)
5602 {
5603     in1_la1(s, o);
5604     o->in1 = tcg_temp_new_i64();
5605     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESW);
5606 }
5607 #define SPEC_in1_m1_16s 0
5608 
5609 static void in1_m1_16u(DisasContext *s, DisasOps *o)
5610 {
5611     in1_la1(s, o);
5612     o->in1 = tcg_temp_new_i64();
5613     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUW);
5614 }
5615 #define SPEC_in1_m1_16u 0
5616 
5617 static void in1_m1_32s(DisasContext *s, DisasOps *o)
5618 {
5619     in1_la1(s, o);
5620     o->in1 = tcg_temp_new_i64();
5621     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESL);
5622 }
5623 #define SPEC_in1_m1_32s 0
5624 
5625 static void in1_m1_32u(DisasContext *s, DisasOps *o)
5626 {
5627     in1_la1(s, o);
5628     o->in1 = tcg_temp_new_i64();
5629     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUL);
5630 }
5631 #define SPEC_in1_m1_32u 0
5632 
5633 static void in1_m1_64(DisasContext *s, DisasOps *o)
5634 {
5635     in1_la1(s, o);
5636     o->in1 = tcg_temp_new_i64();
5637     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUQ);
5638 }
5639 #define SPEC_in1_m1_64 0
5640 
5641 /* ====================================================================== */
5642 /* The "INput 2" generators.  These load the second operand to an insn.  */
5643 
5644 static void in2_r1_o(DisasContext *s, DisasOps *o)
5645 {
5646     o->in2 = regs[get_field(s, r1)];
5647 }
5648 #define SPEC_in2_r1_o 0
5649 
5650 static void in2_r1_16u(DisasContext *s, DisasOps *o)
5651 {
5652     o->in2 = tcg_temp_new_i64();
5653     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
5654 }
5655 #define SPEC_in2_r1_16u 0
5656 
5657 static void in2_r1_32u(DisasContext *s, DisasOps *o)
5658 {
5659     o->in2 = tcg_temp_new_i64();
5660     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
5661 }
5662 #define SPEC_in2_r1_32u 0
5663 
5664 static void in2_r1_D32(DisasContext *s, DisasOps *o)
5665 {
5666     int r1 = get_field(s, r1);
5667     o->in2 = tcg_temp_new_i64();
5668     tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5669 }
5670 #define SPEC_in2_r1_D32 SPEC_r1_even
5671 
5672 static void in2_r2(DisasContext *s, DisasOps *o)
5673 {
5674     o->in2 = load_reg(get_field(s, r2));
5675 }
5676 #define SPEC_in2_r2 0
5677 
5678 static void in2_r2_o(DisasContext *s, DisasOps *o)
5679 {
5680     o->in2 = regs[get_field(s, r2)];
5681 }
5682 #define SPEC_in2_r2_o 0
5683 
5684 static void in2_r2_nz(DisasContext *s, DisasOps *o)
5685 {
5686     int r2 = get_field(s, r2);
5687     if (r2 != 0) {
5688         o->in2 = load_reg(r2);
5689     }
5690 }
5691 #define SPEC_in2_r2_nz 0
5692 
5693 static void in2_r2_8s(DisasContext *s, DisasOps *o)
5694 {
5695     o->in2 = tcg_temp_new_i64();
5696     tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
5697 }
5698 #define SPEC_in2_r2_8s 0
5699 
5700 static void in2_r2_8u(DisasContext *s, DisasOps *o)
5701 {
5702     o->in2 = tcg_temp_new_i64();
5703     tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
5704 }
5705 #define SPEC_in2_r2_8u 0
5706 
5707 static void in2_r2_16s(DisasContext *s, DisasOps *o)
5708 {
5709     o->in2 = tcg_temp_new_i64();
5710     tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
5711 }
5712 #define SPEC_in2_r2_16s 0
5713 
5714 static void in2_r2_16u(DisasContext *s, DisasOps *o)
5715 {
5716     o->in2 = tcg_temp_new_i64();
5717     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
5718 }
5719 #define SPEC_in2_r2_16u 0
5720 
5721 static void in2_r3(DisasContext *s, DisasOps *o)
5722 {
5723     o->in2 = load_reg(get_field(s, r3));
5724 }
5725 #define SPEC_in2_r3 0
5726 
5727 static void in2_r3_D64(DisasContext *s, DisasOps *o)
5728 {
5729     int r3 = get_field(s, r3);
5730     o->in2_128 = tcg_temp_new_i128();
5731     tcg_gen_concat_i64_i128(o->in2_128, regs[r3 + 1], regs[r3]);
5732 }
5733 #define SPEC_in2_r3_D64 SPEC_r3_even
5734 
5735 static void in2_r3_sr32(DisasContext *s, DisasOps *o)
5736 {
5737     o->in2 = tcg_temp_new_i64();
5738     tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
5739 }
5740 #define SPEC_in2_r3_sr32 0
5741 
5742 static void in2_r3_32u(DisasContext *s, DisasOps *o)
5743 {
5744     o->in2 = tcg_temp_new_i64();
5745     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
5746 }
5747 #define SPEC_in2_r3_32u 0
5748 
5749 static void in2_r2_32s(DisasContext *s, DisasOps *o)
5750 {
5751     o->in2 = tcg_temp_new_i64();
5752     tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
5753 }
5754 #define SPEC_in2_r2_32s 0
5755 
5756 static void in2_r2_32u(DisasContext *s, DisasOps *o)
5757 {
5758     o->in2 = tcg_temp_new_i64();
5759     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
5760 }
5761 #define SPEC_in2_r2_32u 0
5762 
5763 static void in2_r2_sr32(DisasContext *s, DisasOps *o)
5764 {
5765     o->in2 = tcg_temp_new_i64();
5766     tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
5767 }
5768 #define SPEC_in2_r2_sr32 0
5769 
5770 static void in2_e2(DisasContext *s, DisasOps *o)
5771 {
5772     o->in2 = load_freg32_i64(get_field(s, r2));
5773 }
5774 #define SPEC_in2_e2 0
5775 
5776 static void in2_f2(DisasContext *s, DisasOps *o)
5777 {
5778     o->in2 = load_freg(get_field(s, r2));
5779 }
5780 #define SPEC_in2_f2 0
5781 
5782 static void in2_x2(DisasContext *s, DisasOps *o)
5783 {
5784     o->in2_128 = load_freg_128(get_field(s, r2));
5785 }
5786 #define SPEC_in2_x2 SPEC_r2_f128
5787 
5788 /* Load the low double word of an extended (128-bit) format FP number */
5789 static void in2_x2l(DisasContext *s, DisasOps *o)
5790 {
5791     o->in2 = load_freg(get_field(s, r2) + 2);
5792 }
5793 #define SPEC_in2_x2l SPEC_r2_f128
5794 
5795 static void in2_ra2(DisasContext *s, DisasOps *o)
5796 {
5797     int r2 = get_field(s, r2);
5798 
5799     /* Note: *don't* treat !r2 as 0, use the reg value. */
5800     o->in2 = tcg_temp_new_i64();
5801     gen_addi_and_wrap_i64(s, o->in2, regs[r2], 0);
5802 }
5803 #define SPEC_in2_ra2 0
5804 
5805 static void in2_ra2_E(DisasContext *s, DisasOps *o)
5806 {
5807     return in2_ra2(s, o);
5808 }
5809 #define SPEC_in2_ra2_E SPEC_r2_even
5810 
5811 static void in2_a2(DisasContext *s, DisasOps *o)
5812 {
5813     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5814     o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5815 }
5816 #define SPEC_in2_a2 0
5817 
5818 static TCGv gen_ri2(DisasContext *s)
5819 {
5820     TCGv ri2 = NULL;
5821     bool is_imm;
5822     int imm;
5823 
5824     disas_jdest(s, i2, is_imm, imm, ri2);
5825     if (is_imm) {
5826         ri2 = tcg_constant_i64(s->base.pc_next + (int64_t)imm * 2);
5827     }
5828 
5829     return ri2;
5830 }
5831 
5832 static void in2_ri2(DisasContext *s, DisasOps *o)
5833 {
5834     o->in2 = gen_ri2(s);
5835 }
5836 #define SPEC_in2_ri2 0
5837 
5838 static void in2_sh(DisasContext *s, DisasOps *o)
5839 {
5840     int b2 = get_field(s, b2);
5841     int d2 = get_field(s, d2);
5842 
5843     if (b2 == 0) {
5844         o->in2 = tcg_constant_i64(d2 & 0x3f);
5845     } else {
5846         o->in2 = get_address(s, 0, b2, d2);
5847         tcg_gen_andi_i64(o->in2, o->in2, 0x3f);
5848     }
5849 }
5850 #define SPEC_in2_sh 0
5851 
5852 static void in2_m2_8u(DisasContext *s, DisasOps *o)
5853 {
5854     in2_a2(s, o);
5855     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_UB);
5856 }
5857 #define SPEC_in2_m2_8u 0
5858 
5859 static void in2_m2_16s(DisasContext *s, DisasOps *o)
5860 {
5861     in2_a2(s, o);
5862     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESW);
5863 }
5864 #define SPEC_in2_m2_16s 0
5865 
5866 static void in2_m2_16u(DisasContext *s, DisasOps *o)
5867 {
5868     in2_a2(s, o);
5869     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUW);
5870 }
5871 #define SPEC_in2_m2_16u 0
5872 
5873 static void in2_m2_32s(DisasContext *s, DisasOps *o)
5874 {
5875     in2_a2(s, o);
5876     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESL);
5877 }
5878 #define SPEC_in2_m2_32s 0
5879 
5880 static void in2_m2_32u(DisasContext *s, DisasOps *o)
5881 {
5882     in2_a2(s, o);
5883     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUL);
5884 }
5885 #define SPEC_in2_m2_32u 0
5886 
5887 #ifndef CONFIG_USER_ONLY
5888 static void in2_m2_32ua(DisasContext *s, DisasOps *o)
5889 {
5890     in2_a2(s, o);
5891     tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5892 }
5893 #define SPEC_in2_m2_32ua 0
5894 #endif
5895 
5896 static void in2_m2_64(DisasContext *s, DisasOps *o)
5897 {
5898     in2_a2(s, o);
5899     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5900 }
5901 #define SPEC_in2_m2_64 0
5902 
5903 static void in2_m2_64w(DisasContext *s, DisasOps *o)
5904 {
5905     in2_a2(s, o);
5906     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5907     gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
5908 }
5909 #define SPEC_in2_m2_64w 0
5910 
5911 #ifndef CONFIG_USER_ONLY
5912 static void in2_m2_64a(DisasContext *s, DisasOps *o)
5913 {
5914     in2_a2(s, o);
5915     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5916 }
5917 #define SPEC_in2_m2_64a 0
5918 #endif
5919 
5920 static void in2_mri2_16s(DisasContext *s, DisasOps *o)
5921 {
5922     o->in2 = tcg_temp_new_i64();
5923     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TESW);
5924 }
5925 #define SPEC_in2_mri2_16s 0
5926 
5927 static void in2_mri2_16u(DisasContext *s, DisasOps *o)
5928 {
5929     o->in2 = tcg_temp_new_i64();
5930     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TEUW);
5931 }
5932 #define SPEC_in2_mri2_16u 0
5933 
5934 static void in2_mri2_32s(DisasContext *s, DisasOps *o)
5935 {
5936     o->in2 = tcg_temp_new_i64();
5937     tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5938                        MO_TESL | MO_ALIGN);
5939 }
5940 #define SPEC_in2_mri2_32s 0
5941 
5942 static void in2_mri2_32u(DisasContext *s, DisasOps *o)
5943 {
5944     o->in2 = tcg_temp_new_i64();
5945     tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5946                        MO_TEUL | MO_ALIGN);
5947 }
5948 #define SPEC_in2_mri2_32u 0
5949 
5950 static void in2_mri2_64(DisasContext *s, DisasOps *o)
5951 {
5952     o->in2 = tcg_temp_new_i64();
5953     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s),
5954                         MO_TEUQ | MO_ALIGN);
5955 }
5956 #define SPEC_in2_mri2_64 0
5957 
5958 static void in2_i2(DisasContext *s, DisasOps *o)
5959 {
5960     o->in2 = tcg_constant_i64(get_field(s, i2));
5961 }
5962 #define SPEC_in2_i2 0
5963 
5964 static void in2_i2_8u(DisasContext *s, DisasOps *o)
5965 {
5966     o->in2 = tcg_constant_i64((uint8_t)get_field(s, i2));
5967 }
5968 #define SPEC_in2_i2_8u 0
5969 
5970 static void in2_i2_16u(DisasContext *s, DisasOps *o)
5971 {
5972     o->in2 = tcg_constant_i64((uint16_t)get_field(s, i2));
5973 }
5974 #define SPEC_in2_i2_16u 0
5975 
5976 static void in2_i2_32u(DisasContext *s, DisasOps *o)
5977 {
5978     o->in2 = tcg_constant_i64((uint32_t)get_field(s, i2));
5979 }
5980 #define SPEC_in2_i2_32u 0
5981 
5982 static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
5983 {
5984     uint64_t i2 = (uint16_t)get_field(s, i2);
5985     o->in2 = tcg_constant_i64(i2 << s->insn->data);
5986 }
5987 #define SPEC_in2_i2_16u_shl 0
5988 
5989 static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
5990 {
5991     uint64_t i2 = (uint32_t)get_field(s, i2);
5992     o->in2 = tcg_constant_i64(i2 << s->insn->data);
5993 }
5994 #define SPEC_in2_i2_32u_shl 0
5995 
5996 #ifndef CONFIG_USER_ONLY
5997 static void in2_insn(DisasContext *s, DisasOps *o)
5998 {
5999     o->in2 = tcg_constant_i64(s->fields.raw_insn);
6000 }
6001 #define SPEC_in2_insn 0
6002 #endif
6003 
6004 /* ====================================================================== */
6005 
6006 /* Find opc within the table of insns.  This is formulated as a switch
6007    statement so that (1) we get compile-time notice of cut-paste errors
6008    for duplicated opcodes, and (2) the compiler generates the binary
6009    search tree, rather than us having to post-process the table.  */
6010 
6011 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
6012     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
6013 
6014 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
6015     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
6016 
6017 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
6018     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
6019 
6020 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
6021 
6022 enum DisasInsnEnum {
6023 #include "insn-data.h.inc"
6024 };
6025 
6026 #undef E
6027 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) {                   \
6028     .opc = OPC,                                                             \
6029     .flags = FL,                                                            \
6030     .fmt = FMT_##FT,                                                        \
6031     .fac = FAC_##FC,                                                        \
6032     .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W,  \
6033     .name = #NM,                                                            \
6034     .help_in1 = in1_##I1,                                                   \
6035     .help_in2 = in2_##I2,                                                   \
6036     .help_prep = prep_##P,                                                  \
6037     .help_wout = wout_##W,                                                  \
6038     .help_cout = cout_##CC,                                                 \
6039     .help_op = op_##OP,                                                     \
6040     .data = D                                                               \
6041  },
6042 
6043 /* Allow 0 to be used for NULL in the table below.  */
6044 #define in1_0  NULL
6045 #define in2_0  NULL
6046 #define prep_0  NULL
6047 #define wout_0  NULL
6048 #define cout_0  NULL
6049 #define op_0  NULL
6050 
6051 #define SPEC_in1_0 0
6052 #define SPEC_in2_0 0
6053 #define SPEC_prep_0 0
6054 #define SPEC_wout_0 0
6055 
6056 /* Give smaller names to the various facilities.  */
6057 #define FAC_Z           S390_FEAT_ZARCH
6058 #define FAC_CASS        S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6059 #define FAC_DFP         S390_FEAT_DFP
6060 #define FAC_DFPR        S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* DFP-rounding */
6061 #define FAC_DO          S390_FEAT_STFLE_45 /* distinct-operands */
6062 #define FAC_EE          S390_FEAT_EXECUTE_EXT
6063 #define FAC_EI          S390_FEAT_EXTENDED_IMMEDIATE
6064 #define FAC_FPE         S390_FEAT_FLOATING_POINT_EXT
6065 #define FAC_FPSSH       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPS-sign-handling */
6066 #define FAC_FPRGR       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPR-GR-transfer */
6067 #define FAC_GIE         S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6068 #define FAC_HFP_MA      S390_FEAT_HFP_MADDSUB
6069 #define FAC_HW          S390_FEAT_STFLE_45 /* high-word */
6070 #define FAC_IEEEE_SIM   S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* IEEE-exception-simulation */
6071 #define FAC_MIE         S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6072 #define FAC_LAT         S390_FEAT_STFLE_49 /* load-and-trap */
6073 #define FAC_LOC         S390_FEAT_STFLE_45 /* load/store on condition 1 */
6074 #define FAC_LOC2        S390_FEAT_STFLE_53 /* load/store on condition 2 */
6075 #define FAC_LD          S390_FEAT_LONG_DISPLACEMENT
6076 #define FAC_PC          S390_FEAT_STFLE_45 /* population count */
6077 #define FAC_SCF         S390_FEAT_STORE_CLOCK_FAST
6078 #define FAC_SFLE        S390_FEAT_STFLE
6079 #define FAC_ILA         S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6080 #define FAC_MVCOS       S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6081 #define FAC_LPP         S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6082 #define FAC_DAT_ENH     S390_FEAT_DAT_ENH
6083 #define FAC_E2          S390_FEAT_EXTENDED_TRANSLATION_2
6084 #define FAC_EH          S390_FEAT_STFLE_49 /* execution-hint */
6085 #define FAC_PPA         S390_FEAT_STFLE_49 /* processor-assist */
6086 #define FAC_LZRB        S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6087 #define FAC_ETF3        S390_FEAT_EXTENDED_TRANSLATION_3
6088 #define FAC_MSA         S390_FEAT_MSA /* message-security-assist facility */
6089 #define FAC_MSA3        S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6090 #define FAC_MSA4        S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6091 #define FAC_MSA5        S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6092 #define FAC_MSA8        S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6093 #define FAC_ECT         S390_FEAT_EXTRACT_CPU_TIME
6094 #define FAC_PCI         S390_FEAT_ZPCI /* z/PCI facility */
6095 #define FAC_AIS         S390_FEAT_ADAPTER_INT_SUPPRESSION
6096 #define FAC_V           S390_FEAT_VECTOR /* vector facility */
6097 #define FAC_VE          S390_FEAT_VECTOR_ENH  /* vector enhancements facility 1 */
6098 #define FAC_VE2         S390_FEAT_VECTOR_ENH2 /* vector enhancements facility 2 */
6099 #define FAC_MIE2        S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6100 #define FAC_MIE3        S390_FEAT_MISC_INSTRUCTION_EXT3 /* miscellaneous-instruction-extensions facility 3 */
6101 
6102 static const DisasInsn insn_info[] = {
6103 #include "insn-data.h.inc"
6104 };
6105 
6106 #undef E
6107 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6108     case OPC: return &insn_info[insn_ ## NM];
6109 
6110 static const DisasInsn *lookup_opc(uint16_t opc)
6111 {
6112     switch (opc) {
6113 #include "insn-data.h.inc"
6114     default:
6115         return NULL;
6116     }
6117 }
6118 
6119 #undef F
6120 #undef E
6121 #undef D
6122 #undef C
6123 
6124 /* Extract a field from the insn.  The INSN should be left-aligned in
6125    the uint64_t so that we can more easily utilize the big-bit-endian
6126    definitions we extract from the Principals of Operation.  */
6127 
6128 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6129 {
6130     uint32_t r, m;
6131 
6132     if (f->size == 0) {
6133         return;
6134     }
6135 
6136     /* Zero extract the field from the insn.  */
6137     r = (insn << f->beg) >> (64 - f->size);
6138 
6139     /* Sign-extend, or un-swap the field as necessary.  */
6140     switch (f->type) {
6141     case 0: /* unsigned */
6142         break;
6143     case 1: /* signed */
6144         assert(f->size <= 32);
6145         m = 1u << (f->size - 1);
6146         r = (r ^ m) - m;
6147         break;
6148     case 2: /* dl+dh split, signed 20 bit. */
6149         r = ((int8_t)r << 12) | (r >> 8);
6150         break;
6151     case 3: /* MSB stored in RXB */
6152         g_assert(f->size == 4);
6153         switch (f->beg) {
6154         case 8:
6155             r |= extract64(insn, 63 - 36, 1) << 4;
6156             break;
6157         case 12:
6158             r |= extract64(insn, 63 - 37, 1) << 4;
6159             break;
6160         case 16:
6161             r |= extract64(insn, 63 - 38, 1) << 4;
6162             break;
6163         case 32:
6164             r |= extract64(insn, 63 - 39, 1) << 4;
6165             break;
6166         default:
6167             g_assert_not_reached();
6168         }
6169         break;
6170     default:
6171         abort();
6172     }
6173 
6174     /*
6175      * Validate that the "compressed" encoding we selected above is valid.
6176      * I.e. we haven't made two different original fields overlap.
6177      */
6178     assert(((o->presentC >> f->indexC) & 1) == 0);
6179     o->presentC |= 1 << f->indexC;
6180     o->presentO |= 1 << f->indexO;
6181 
6182     o->c[f->indexC] = r;
6183 }
6184 
6185 /* Lookup the insn at the current PC, extracting the operands into O and
6186    returning the info struct for the insn.  Returns NULL for invalid insn.  */
6187 
6188 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
6189 {
6190     uint64_t insn, pc = s->base.pc_next;
6191     int op, op2, ilen;
6192     const DisasInsn *info;
6193 
6194     if (unlikely(s->ex_value)) {
6195         /* Drop the EX data now, so that it's clear on exception paths.  */
6196         tcg_gen_st_i64(tcg_constant_i64(0), tcg_env,
6197                        offsetof(CPUS390XState, ex_value));
6198 
6199         /* Extract the values saved by EXECUTE.  */
6200         insn = s->ex_value & 0xffffffffffff0000ull;
6201         ilen = s->ex_value & 0xf;
6202 
6203         /* Register insn bytes with translator so plugins work. */
6204         for (int i = 0; i < ilen; i++) {
6205             uint8_t byte = extract64(insn, 56 - (i * 8), 8);
6206             translator_fake_ldb(byte, pc + i);
6207         }
6208         op = insn >> 56;
6209     } else {
6210         insn = ld_code2(env, s, pc);
6211         op = (insn >> 8) & 0xff;
6212         ilen = get_ilen(op);
6213         switch (ilen) {
6214         case 2:
6215             insn = insn << 48;
6216             break;
6217         case 4:
6218             insn = ld_code4(env, s, pc) << 32;
6219             break;
6220         case 6:
6221             insn = (insn << 48) | (ld_code4(env, s, pc + 2) << 16);
6222             break;
6223         default:
6224             g_assert_not_reached();
6225         }
6226     }
6227     s->pc_tmp = s->base.pc_next + ilen;
6228     s->ilen = ilen;
6229 
6230     /* We can't actually determine the insn format until we've looked up
6231        the full insn opcode.  Which we can't do without locating the
6232        secondary opcode.  Assume by default that OP2 is at bit 40; for
6233        those smaller insns that don't actually have a secondary opcode
6234        this will correctly result in OP2 = 0. */
6235     switch (op) {
6236     case 0x01: /* E */
6237     case 0x80: /* S */
6238     case 0x82: /* S */
6239     case 0x93: /* S */
6240     case 0xb2: /* S, RRF, RRE, IE */
6241     case 0xb3: /* RRE, RRD, RRF */
6242     case 0xb9: /* RRE, RRF */
6243     case 0xe5: /* SSE, SIL */
6244         op2 = (insn << 8) >> 56;
6245         break;
6246     case 0xa5: /* RI */
6247     case 0xa7: /* RI */
6248     case 0xc0: /* RIL */
6249     case 0xc2: /* RIL */
6250     case 0xc4: /* RIL */
6251     case 0xc6: /* RIL */
6252     case 0xc8: /* SSF */
6253     case 0xcc: /* RIL */
6254         op2 = (insn << 12) >> 60;
6255         break;
6256     case 0xc5: /* MII */
6257     case 0xc7: /* SMI */
6258     case 0xd0 ... 0xdf: /* SS */
6259     case 0xe1: /* SS */
6260     case 0xe2: /* SS */
6261     case 0xe8: /* SS */
6262     case 0xe9: /* SS */
6263     case 0xea: /* SS */
6264     case 0xee ... 0xf3: /* SS */
6265     case 0xf8 ... 0xfd: /* SS */
6266         op2 = 0;
6267         break;
6268     default:
6269         op2 = (insn << 40) >> 56;
6270         break;
6271     }
6272 
6273     memset(&s->fields, 0, sizeof(s->fields));
6274     s->fields.raw_insn = insn;
6275     s->fields.op = op;
6276     s->fields.op2 = op2;
6277 
6278     /* Lookup the instruction.  */
6279     info = lookup_opc(op << 8 | op2);
6280     s->insn = info;
6281 
6282     /* If we found it, extract the operands.  */
6283     if (info != NULL) {
6284         DisasFormat fmt = info->fmt;
6285         int i;
6286 
6287         for (i = 0; i < NUM_C_FIELD; ++i) {
6288             extract_field(&s->fields, &format_info[fmt].op[i], insn);
6289         }
6290     }
6291     return info;
6292 }
6293 
6294 static bool is_afp_reg(int reg)
6295 {
6296     return reg % 2 || reg > 6;
6297 }
6298 
6299 static bool is_fp_pair(int reg)
6300 {
6301     /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6302     return !(reg & 0x2);
6303 }
6304 
6305 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6306 {
6307     const DisasInsn *insn;
6308     DisasJumpType ret = DISAS_NEXT;
6309     DisasOps o = {};
6310     bool icount = false;
6311 
6312     /* Search for the insn in the table.  */
6313     insn = extract_insn(env, s);
6314 
6315     /* Update insn_start now that we know the ILEN.  */
6316     tcg_set_insn_start_param(s->base.insn_start, 2, s->ilen);
6317 
6318     /* Not found means unimplemented/illegal opcode.  */
6319     if (insn == NULL) {
6320         qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6321                       s->fields.op, s->fields.op2);
6322         gen_illegal_opcode(s);
6323         ret = DISAS_NORETURN;
6324         goto out;
6325     }
6326 
6327 #ifndef CONFIG_USER_ONLY
6328     if (s->base.tb->flags & FLAG_MASK_PER) {
6329         TCGv_i64 addr = tcg_constant_i64(s->base.pc_next);
6330         gen_helper_per_ifetch(tcg_env, addr);
6331     }
6332 #endif
6333 
6334     /* process flags */
6335     if (insn->flags) {
6336         /* privileged instruction */
6337         if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6338             gen_program_exception(s, PGM_PRIVILEGED);
6339             ret = DISAS_NORETURN;
6340             goto out;
6341         }
6342 
6343         /* if AFP is not enabled, instructions and registers are forbidden */
6344         if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6345             uint8_t dxc = 0;
6346 
6347             if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
6348                 dxc = 1;
6349             }
6350             if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
6351                 dxc = 1;
6352             }
6353             if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
6354                 dxc = 1;
6355             }
6356             if (insn->flags & IF_BFP) {
6357                 dxc = 2;
6358             }
6359             if (insn->flags & IF_DFP) {
6360                 dxc = 3;
6361             }
6362             if (insn->flags & IF_VEC) {
6363                 dxc = 0xfe;
6364             }
6365             if (dxc) {
6366                 gen_data_exception(dxc);
6367                 ret = DISAS_NORETURN;
6368                 goto out;
6369             }
6370         }
6371 
6372         /* if vector instructions not enabled, executing them is forbidden */
6373         if (insn->flags & IF_VEC) {
6374             if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6375                 gen_data_exception(0xfe);
6376                 ret = DISAS_NORETURN;
6377                 goto out;
6378             }
6379         }
6380 
6381         /* input/output is the special case for icount mode */
6382         if (unlikely(insn->flags & IF_IO)) {
6383             icount = translator_io_start(&s->base);
6384         }
6385     }
6386 
6387     /* Check for insn specification exceptions.  */
6388     if (insn->spec) {
6389         if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
6390             (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
6391             (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
6392             (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
6393             (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
6394             gen_program_exception(s, PGM_SPECIFICATION);
6395             ret = DISAS_NORETURN;
6396             goto out;
6397         }
6398     }
6399 
6400     /* Implement the instruction.  */
6401     if (insn->help_in1) {
6402         insn->help_in1(s, &o);
6403     }
6404     if (insn->help_in2) {
6405         insn->help_in2(s, &o);
6406     }
6407     if (insn->help_prep) {
6408         insn->help_prep(s, &o);
6409     }
6410     if (insn->help_op) {
6411         ret = insn->help_op(s, &o);
6412     }
6413     if (ret != DISAS_NORETURN) {
6414         if (insn->help_wout) {
6415             insn->help_wout(s, &o);
6416         }
6417         if (insn->help_cout) {
6418             insn->help_cout(s, &o);
6419         }
6420     }
6421 
6422     /* io should be the last instruction in tb when icount is enabled */
6423     if (unlikely(icount && ret == DISAS_NEXT)) {
6424         ret = DISAS_TOO_MANY;
6425     }
6426 
6427 #ifndef CONFIG_USER_ONLY
6428     if (s->base.tb->flags & FLAG_MASK_PER) {
6429         /* An exception might be triggered, save PSW if not already done.  */
6430         if (ret == DISAS_NEXT || ret == DISAS_TOO_MANY) {
6431             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6432         }
6433 
6434         /* Call the helper to check for a possible PER exception.  */
6435         gen_helper_per_check_exception(tcg_env);
6436     }
6437 #endif
6438 
6439 out:
6440     /* Advance to the next instruction.  */
6441     s->base.pc_next = s->pc_tmp;
6442     return ret;
6443 }
6444 
6445 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6446 {
6447     DisasContext *dc = container_of(dcbase, DisasContext, base);
6448 
6449     /* 31-bit mode */
6450     if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6451         dc->base.pc_first &= 0x7fffffff;
6452         dc->base.pc_next = dc->base.pc_first;
6453     }
6454 
6455     dc->cc_op = CC_OP_DYNAMIC;
6456     dc->ex_value = dc->base.tb->cs_base;
6457     dc->exit_to_mainloop = (dc->base.tb->flags & FLAG_MASK_PER) || dc->ex_value;
6458 }
6459 
6460 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6461 {
6462 }
6463 
6464 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6465 {
6466     DisasContext *dc = container_of(dcbase, DisasContext, base);
6467 
6468     /* Delay the set of ilen until we've read the insn. */
6469     tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 0);
6470 }
6471 
6472 static target_ulong get_next_pc(CPUS390XState *env, DisasContext *s,
6473                                 uint64_t pc)
6474 {
6475     uint64_t insn = cpu_lduw_code(env, pc);
6476 
6477     return pc + get_ilen((insn >> 8) & 0xff);
6478 }
6479 
6480 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6481 {
6482     CPUS390XState *env = cpu_env(cs);
6483     DisasContext *dc = container_of(dcbase, DisasContext, base);
6484 
6485     dc->base.is_jmp = translate_one(env, dc);
6486     if (dc->base.is_jmp == DISAS_NEXT) {
6487         if (dc->ex_value ||
6488             !is_same_page(dcbase, dc->base.pc_next) ||
6489             !is_same_page(dcbase, get_next_pc(env, dc, dc->base.pc_next))) {
6490             dc->base.is_jmp = DISAS_TOO_MANY;
6491         }
6492     }
6493 }
6494 
6495 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6496 {
6497     DisasContext *dc = container_of(dcbase, DisasContext, base);
6498 
6499     switch (dc->base.is_jmp) {
6500     case DISAS_NORETURN:
6501         break;
6502     case DISAS_TOO_MANY:
6503         update_psw_addr(dc);
6504         /* FALLTHRU */
6505     case DISAS_PC_UPDATED:
6506         /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6507            cc op type is in env */
6508         update_cc_op(dc);
6509         /* FALLTHRU */
6510     case DISAS_PC_CC_UPDATED:
6511         /* Exit the TB, either by raising a debug exception or by return.  */
6512         if (dc->exit_to_mainloop) {
6513             tcg_gen_exit_tb(NULL, 0);
6514         } else {
6515             tcg_gen_lookup_and_goto_ptr();
6516         }
6517         break;
6518     default:
6519         g_assert_not_reached();
6520     }
6521 }
6522 
6523 static void s390x_tr_disas_log(const DisasContextBase *dcbase,
6524                                CPUState *cs, FILE *logfile)
6525 {
6526     DisasContext *dc = container_of(dcbase, DisasContext, base);
6527 
6528     if (unlikely(dc->ex_value)) {
6529         /* ??? Unfortunately target_disas can't use host memory.  */
6530         fprintf(logfile, "IN: EXECUTE %016" PRIx64, dc->ex_value);
6531     } else {
6532         fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
6533         target_disas(logfile, cs, dc->base.pc_first, dc->base.tb->size);
6534     }
6535 }
6536 
6537 static const TranslatorOps s390x_tr_ops = {
6538     .init_disas_context = s390x_tr_init_disas_context,
6539     .tb_start           = s390x_tr_tb_start,
6540     .insn_start         = s390x_tr_insn_start,
6541     .translate_insn     = s390x_tr_translate_insn,
6542     .tb_stop            = s390x_tr_tb_stop,
6543     .disas_log          = s390x_tr_disas_log,
6544 };
6545 
6546 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
6547                            vaddr pc, void *host_pc)
6548 {
6549     DisasContext dc;
6550 
6551     translator_loop(cs, tb, max_insns, pc, host_pc, &s390x_tr_ops, &dc.base);
6552 }
6553 
6554 void s390x_restore_state_to_opc(CPUState *cs,
6555                                 const TranslationBlock *tb,
6556                                 const uint64_t *data)
6557 {
6558     CPUS390XState *env = cpu_env(cs);
6559     int cc_op = data[1];
6560 
6561     env->psw.addr = data[0];
6562 
6563     /* Update the CC opcode if it is not already up-to-date.  */
6564     if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6565         env->cc_op = cc_op;
6566     }
6567 
6568     /* Record ILEN.  */
6569     env->int_pgm_ilen = data[2];
6570 }
6571