xref: /qemu/target/s390x/tcg/translate.c (revision f8ed3648)
1 /*
2  *  S/390 translation
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2010 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24 
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 #  define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 #  define LOG_DISAS(...) do { } while (0)
29 #endif
30 
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "s390x-internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg/tcg-op.h"
37 #include "tcg/tcg-op-gvec.h"
38 #include "qemu/log.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/helper-proto.h"
42 #include "exec/helper-gen.h"
43 
44 #include "exec/translator.h"
45 #include "exec/log.h"
46 #include "qemu/atomic128.h"
47 
48 #define HELPER_H "helper.h"
49 #include "exec/helper-info.c.inc"
50 #undef  HELPER_H
51 
52 
53 /* Information that (most) every instruction needs to manipulate.  */
54 typedef struct DisasContext DisasContext;
55 typedef struct DisasInsn DisasInsn;
56 typedef struct DisasFields DisasFields;
57 
58 /*
59  * Define a structure to hold the decoded fields.  We'll store each inside
60  * an array indexed by an enum.  In order to conserve memory, we'll arrange
61  * for fields that do not exist at the same time to overlap, thus the "C"
62  * for compact.  For checking purposes there is an "O" for original index
63  * as well that will be applied to availability bitmaps.
64  */
65 
66 enum DisasFieldIndexO {
67     FLD_O_r1,
68     FLD_O_r2,
69     FLD_O_r3,
70     FLD_O_m1,
71     FLD_O_m3,
72     FLD_O_m4,
73     FLD_O_m5,
74     FLD_O_m6,
75     FLD_O_b1,
76     FLD_O_b2,
77     FLD_O_b4,
78     FLD_O_d1,
79     FLD_O_d2,
80     FLD_O_d4,
81     FLD_O_x2,
82     FLD_O_l1,
83     FLD_O_l2,
84     FLD_O_i1,
85     FLD_O_i2,
86     FLD_O_i3,
87     FLD_O_i4,
88     FLD_O_i5,
89     FLD_O_v1,
90     FLD_O_v2,
91     FLD_O_v3,
92     FLD_O_v4,
93 };
94 
95 enum DisasFieldIndexC {
96     FLD_C_r1 = 0,
97     FLD_C_m1 = 0,
98     FLD_C_b1 = 0,
99     FLD_C_i1 = 0,
100     FLD_C_v1 = 0,
101 
102     FLD_C_r2 = 1,
103     FLD_C_b2 = 1,
104     FLD_C_i2 = 1,
105 
106     FLD_C_r3 = 2,
107     FLD_C_m3 = 2,
108     FLD_C_i3 = 2,
109     FLD_C_v3 = 2,
110 
111     FLD_C_m4 = 3,
112     FLD_C_b4 = 3,
113     FLD_C_i4 = 3,
114     FLD_C_l1 = 3,
115     FLD_C_v4 = 3,
116 
117     FLD_C_i5 = 4,
118     FLD_C_d1 = 4,
119     FLD_C_m5 = 4,
120 
121     FLD_C_d2 = 5,
122     FLD_C_m6 = 5,
123 
124     FLD_C_d4 = 6,
125     FLD_C_x2 = 6,
126     FLD_C_l2 = 6,
127     FLD_C_v2 = 6,
128 
129     NUM_C_FIELD = 7
130 };
131 
132 struct DisasFields {
133     uint64_t raw_insn;
134     unsigned op:8;
135     unsigned op2:8;
136     unsigned presentC:16;
137     unsigned int presentO;
138     int c[NUM_C_FIELD];
139 };
140 
141 struct DisasContext {
142     DisasContextBase base;
143     const DisasInsn *insn;
144     TCGOp *insn_start;
145     DisasFields fields;
146     uint64_t ex_value;
147     /*
148      * During translate_one(), pc_tmp is used to determine the instruction
149      * to be executed after base.pc_next - e.g. next sequential instruction
150      * or a branch target.
151      */
152     uint64_t pc_tmp;
153     uint32_t ilen;
154     enum cc_op cc_op;
155     bool exit_to_mainloop;
156 };
157 
158 /* Information carried about a condition to be evaluated.  */
159 typedef struct {
160     TCGCond cond:8;
161     bool is_64;
162     union {
163         struct { TCGv_i64 a, b; } s64;
164         struct { TCGv_i32 a, b; } s32;
165     } u;
166 } DisasCompare;
167 
168 #ifdef DEBUG_INLINE_BRANCHES
169 static uint64_t inline_branch_hit[CC_OP_MAX];
170 static uint64_t inline_branch_miss[CC_OP_MAX];
171 #endif
172 
173 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
174 {
175     if (s->base.tb->flags & FLAG_MASK_32) {
176         if (s->base.tb->flags & FLAG_MASK_64) {
177             tcg_gen_movi_i64(out, pc);
178             return;
179         }
180         pc |= 0x80000000;
181     }
182     assert(!(s->base.tb->flags & FLAG_MASK_64));
183     tcg_gen_deposit_i64(out, out, tcg_constant_i64(pc), 0, 32);
184 }
185 
186 static TCGv_i64 psw_addr;
187 static TCGv_i64 psw_mask;
188 static TCGv_i64 gbea;
189 
190 static TCGv_i32 cc_op;
191 static TCGv_i64 cc_src;
192 static TCGv_i64 cc_dst;
193 static TCGv_i64 cc_vr;
194 
195 static char cpu_reg_names[16][4];
196 static TCGv_i64 regs[16];
197 
198 void s390x_translate_init(void)
199 {
200     int i;
201 
202     psw_addr = tcg_global_mem_new_i64(cpu_env,
203                                       offsetof(CPUS390XState, psw.addr),
204                                       "psw_addr");
205     psw_mask = tcg_global_mem_new_i64(cpu_env,
206                                       offsetof(CPUS390XState, psw.mask),
207                                       "psw_mask");
208     gbea = tcg_global_mem_new_i64(cpu_env,
209                                   offsetof(CPUS390XState, gbea),
210                                   "gbea");
211 
212     cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
213                                    "cc_op");
214     cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
215                                     "cc_src");
216     cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
217                                     "cc_dst");
218     cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
219                                    "cc_vr");
220 
221     for (i = 0; i < 16; i++) {
222         snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
223         regs[i] = tcg_global_mem_new(cpu_env,
224                                      offsetof(CPUS390XState, regs[i]),
225                                      cpu_reg_names[i]);
226     }
227 }
228 
229 static inline int vec_full_reg_offset(uint8_t reg)
230 {
231     g_assert(reg < 32);
232     return offsetof(CPUS390XState, vregs[reg][0]);
233 }
234 
235 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
236 {
237     /* Convert element size (es) - e.g. MO_8 - to bytes */
238     const uint8_t bytes = 1 << es;
239     int offs = enr * bytes;
240 
241     /*
242      * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
243      * of the 16 byte vector, on both, little and big endian systems.
244      *
245      * Big Endian (target/possible host)
246      * B:  [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
247      * HW: [     0][     1][     2][     3] - [     4][     5][     6][     7]
248      * W:  [             0][             1] - [             2][             3]
249      * DW: [                             0] - [                             1]
250      *
251      * Little Endian (possible host)
252      * B:  [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
253      * HW: [     3][     2][     1][     0] - [     7][     6][     5][     4]
254      * W:  [             1][             0] - [             3][             2]
255      * DW: [                             0] - [                             1]
256      *
257      * For 16 byte elements, the two 8 byte halves will not form a host
258      * int128 if the host is little endian, since they're in the wrong order.
259      * Some operations (e.g. xor) do not care. For operations like addition,
260      * the two 8 byte elements have to be loaded separately. Let's force all
261      * 16 byte operations to handle it in a special way.
262      */
263     g_assert(es <= MO_64);
264 #if !HOST_BIG_ENDIAN
265     offs ^= (8 - bytes);
266 #endif
267     return offs + vec_full_reg_offset(reg);
268 }
269 
270 static inline int freg64_offset(uint8_t reg)
271 {
272     g_assert(reg < 16);
273     return vec_reg_offset(reg, 0, MO_64);
274 }
275 
276 static inline int freg32_offset(uint8_t reg)
277 {
278     g_assert(reg < 16);
279     return vec_reg_offset(reg, 0, MO_32);
280 }
281 
282 static TCGv_i64 load_reg(int reg)
283 {
284     TCGv_i64 r = tcg_temp_new_i64();
285     tcg_gen_mov_i64(r, regs[reg]);
286     return r;
287 }
288 
289 static TCGv_i64 load_freg(int reg)
290 {
291     TCGv_i64 r = tcg_temp_new_i64();
292 
293     tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
294     return r;
295 }
296 
297 static TCGv_i64 load_freg32_i64(int reg)
298 {
299     TCGv_i64 r = tcg_temp_new_i64();
300 
301     tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
302     return r;
303 }
304 
305 static TCGv_i128 load_freg_128(int reg)
306 {
307     TCGv_i64 h = load_freg(reg);
308     TCGv_i64 l = load_freg(reg + 2);
309     TCGv_i128 r = tcg_temp_new_i128();
310 
311     tcg_gen_concat_i64_i128(r, l, h);
312     return r;
313 }
314 
315 static void store_reg(int reg, TCGv_i64 v)
316 {
317     tcg_gen_mov_i64(regs[reg], v);
318 }
319 
320 static void store_freg(int reg, TCGv_i64 v)
321 {
322     tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
323 }
324 
325 static void store_reg32_i64(int reg, TCGv_i64 v)
326 {
327     /* 32 bit register writes keep the upper half */
328     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
329 }
330 
331 static void store_reg32h_i64(int reg, TCGv_i64 v)
332 {
333     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
334 }
335 
336 static void store_freg32_i64(int reg, TCGv_i64 v)
337 {
338     tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
339 }
340 
341 static void update_psw_addr(DisasContext *s)
342 {
343     /* psw.addr */
344     tcg_gen_movi_i64(psw_addr, s->base.pc_next);
345 }
346 
347 static void per_branch(DisasContext *s, bool to_next)
348 {
349 #ifndef CONFIG_USER_ONLY
350     tcg_gen_movi_i64(gbea, s->base.pc_next);
351 
352     if (s->base.tb->flags & FLAG_MASK_PER) {
353         TCGv_i64 next_pc = to_next ? tcg_constant_i64(s->pc_tmp) : psw_addr;
354         gen_helper_per_branch(cpu_env, gbea, next_pc);
355     }
356 #endif
357 }
358 
359 static void per_branch_cond(DisasContext *s, TCGCond cond,
360                             TCGv_i64 arg1, TCGv_i64 arg2)
361 {
362 #ifndef CONFIG_USER_ONLY
363     if (s->base.tb->flags & FLAG_MASK_PER) {
364         TCGLabel *lab = gen_new_label();
365         tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
366 
367         tcg_gen_movi_i64(gbea, s->base.pc_next);
368         gen_helper_per_branch(cpu_env, gbea, psw_addr);
369 
370         gen_set_label(lab);
371     } else {
372         TCGv_i64 pc = tcg_constant_i64(s->base.pc_next);
373         tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
374     }
375 #endif
376 }
377 
378 static void per_breaking_event(DisasContext *s)
379 {
380     tcg_gen_movi_i64(gbea, s->base.pc_next);
381 }
382 
383 static void update_cc_op(DisasContext *s)
384 {
385     if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
386         tcg_gen_movi_i32(cc_op, s->cc_op);
387     }
388 }
389 
390 static inline uint64_t ld_code2(CPUS390XState *env, DisasContext *s,
391                                 uint64_t pc)
392 {
393     return (uint64_t)translator_lduw(env, &s->base, pc);
394 }
395 
396 static inline uint64_t ld_code4(CPUS390XState *env, DisasContext *s,
397                                 uint64_t pc)
398 {
399     return (uint64_t)(uint32_t)translator_ldl(env, &s->base, pc);
400 }
401 
402 static int get_mem_index(DisasContext *s)
403 {
404 #ifdef CONFIG_USER_ONLY
405     return MMU_USER_IDX;
406 #else
407     if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
408         return MMU_REAL_IDX;
409     }
410 
411     switch (s->base.tb->flags & FLAG_MASK_ASC) {
412     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
413         return MMU_PRIMARY_IDX;
414     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
415         return MMU_SECONDARY_IDX;
416     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
417         return MMU_HOME_IDX;
418     default:
419         g_assert_not_reached();
420         break;
421     }
422 #endif
423 }
424 
425 static void gen_exception(int excp)
426 {
427     gen_helper_exception(cpu_env, tcg_constant_i32(excp));
428 }
429 
430 static void gen_program_exception(DisasContext *s, int code)
431 {
432     /* Remember what pgm exeption this was.  */
433     tcg_gen_st_i32(tcg_constant_i32(code), cpu_env,
434                    offsetof(CPUS390XState, int_pgm_code));
435 
436     tcg_gen_st_i32(tcg_constant_i32(s->ilen), cpu_env,
437                    offsetof(CPUS390XState, int_pgm_ilen));
438 
439     /* update the psw */
440     update_psw_addr(s);
441 
442     /* Save off cc.  */
443     update_cc_op(s);
444 
445     /* Trigger exception.  */
446     gen_exception(EXCP_PGM);
447 }
448 
449 static inline void gen_illegal_opcode(DisasContext *s)
450 {
451     gen_program_exception(s, PGM_OPERATION);
452 }
453 
454 static inline void gen_data_exception(uint8_t dxc)
455 {
456     gen_helper_data_exception(cpu_env, tcg_constant_i32(dxc));
457 }
458 
459 static inline void gen_trap(DisasContext *s)
460 {
461     /* Set DXC to 0xff */
462     gen_data_exception(0xff);
463 }
464 
465 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
466                                   int64_t imm)
467 {
468     tcg_gen_addi_i64(dst, src, imm);
469     if (!(s->base.tb->flags & FLAG_MASK_64)) {
470         if (s->base.tb->flags & FLAG_MASK_32) {
471             tcg_gen_andi_i64(dst, dst, 0x7fffffff);
472         } else {
473             tcg_gen_andi_i64(dst, dst, 0x00ffffff);
474         }
475     }
476 }
477 
478 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
479 {
480     TCGv_i64 tmp = tcg_temp_new_i64();
481 
482     /*
483      * Note that d2 is limited to 20 bits, signed.  If we crop negative
484      * displacements early we create larger immediate addends.
485      */
486     if (b2 && x2) {
487         tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
488         gen_addi_and_wrap_i64(s, tmp, tmp, d2);
489     } else if (b2) {
490         gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
491     } else if (x2) {
492         gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
493     } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
494         if (s->base.tb->flags & FLAG_MASK_32) {
495             tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
496         } else {
497             tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
498         }
499     } else {
500         tcg_gen_movi_i64(tmp, d2);
501     }
502 
503     return tmp;
504 }
505 
506 static inline bool live_cc_data(DisasContext *s)
507 {
508     return (s->cc_op != CC_OP_DYNAMIC
509             && s->cc_op != CC_OP_STATIC
510             && s->cc_op > 3);
511 }
512 
513 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
514 {
515     if (live_cc_data(s)) {
516         tcg_gen_discard_i64(cc_src);
517         tcg_gen_discard_i64(cc_dst);
518         tcg_gen_discard_i64(cc_vr);
519     }
520     s->cc_op = CC_OP_CONST0 + val;
521 }
522 
523 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
524 {
525     if (live_cc_data(s)) {
526         tcg_gen_discard_i64(cc_src);
527         tcg_gen_discard_i64(cc_vr);
528     }
529     tcg_gen_mov_i64(cc_dst, dst);
530     s->cc_op = op;
531 }
532 
533 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
534                                   TCGv_i64 dst)
535 {
536     if (live_cc_data(s)) {
537         tcg_gen_discard_i64(cc_vr);
538     }
539     tcg_gen_mov_i64(cc_src, src);
540     tcg_gen_mov_i64(cc_dst, dst);
541     s->cc_op = op;
542 }
543 
544 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
545                                   TCGv_i64 dst, TCGv_i64 vr)
546 {
547     tcg_gen_mov_i64(cc_src, src);
548     tcg_gen_mov_i64(cc_dst, dst);
549     tcg_gen_mov_i64(cc_vr, vr);
550     s->cc_op = op;
551 }
552 
553 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
554 {
555     gen_op_update1_cc_i64(s, CC_OP_NZ, val);
556 }
557 
558 /* CC value is in env->cc_op */
559 static void set_cc_static(DisasContext *s)
560 {
561     if (live_cc_data(s)) {
562         tcg_gen_discard_i64(cc_src);
563         tcg_gen_discard_i64(cc_dst);
564         tcg_gen_discard_i64(cc_vr);
565     }
566     s->cc_op = CC_OP_STATIC;
567 }
568 
569 /* calculates cc into cc_op */
570 static void gen_op_calc_cc(DisasContext *s)
571 {
572     TCGv_i32 local_cc_op = NULL;
573     TCGv_i64 dummy = NULL;
574 
575     switch (s->cc_op) {
576     default:
577         dummy = tcg_constant_i64(0);
578         /* FALLTHRU */
579     case CC_OP_ADD_64:
580     case CC_OP_SUB_64:
581     case CC_OP_ADD_32:
582     case CC_OP_SUB_32:
583         local_cc_op = tcg_constant_i32(s->cc_op);
584         break;
585     case CC_OP_CONST0:
586     case CC_OP_CONST1:
587     case CC_OP_CONST2:
588     case CC_OP_CONST3:
589     case CC_OP_STATIC:
590     case CC_OP_DYNAMIC:
591         break;
592     }
593 
594     switch (s->cc_op) {
595     case CC_OP_CONST0:
596     case CC_OP_CONST1:
597     case CC_OP_CONST2:
598     case CC_OP_CONST3:
599         /* s->cc_op is the cc value */
600         tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
601         break;
602     case CC_OP_STATIC:
603         /* env->cc_op already is the cc value */
604         break;
605     case CC_OP_NZ:
606         tcg_gen_setcondi_i64(TCG_COND_NE, cc_dst, cc_dst, 0);
607         tcg_gen_extrl_i64_i32(cc_op, cc_dst);
608         break;
609     case CC_OP_ABS_64:
610     case CC_OP_NABS_64:
611     case CC_OP_ABS_32:
612     case CC_OP_NABS_32:
613     case CC_OP_LTGT0_32:
614     case CC_OP_LTGT0_64:
615     case CC_OP_COMP_32:
616     case CC_OP_COMP_64:
617     case CC_OP_NZ_F32:
618     case CC_OP_NZ_F64:
619     case CC_OP_FLOGR:
620     case CC_OP_LCBB:
621     case CC_OP_MULS_32:
622         /* 1 argument */
623         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
624         break;
625     case CC_OP_ADDU:
626     case CC_OP_ICM:
627     case CC_OP_LTGT_32:
628     case CC_OP_LTGT_64:
629     case CC_OP_LTUGTU_32:
630     case CC_OP_LTUGTU_64:
631     case CC_OP_TM_32:
632     case CC_OP_TM_64:
633     case CC_OP_SLA:
634     case CC_OP_SUBU:
635     case CC_OP_NZ_F128:
636     case CC_OP_VC:
637     case CC_OP_MULS_64:
638         /* 2 arguments */
639         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
640         break;
641     case CC_OP_ADD_64:
642     case CC_OP_SUB_64:
643     case CC_OP_ADD_32:
644     case CC_OP_SUB_32:
645         /* 3 arguments */
646         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
647         break;
648     case CC_OP_DYNAMIC:
649         /* unknown operation - assume 3 arguments and cc_op in env */
650         gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
651         break;
652     default:
653         g_assert_not_reached();
654     }
655 
656     /* We now have cc in cc_op as constant */
657     set_cc_static(s);
658 }
659 
660 static bool use_goto_tb(DisasContext *s, uint64_t dest)
661 {
662     if (unlikely(s->base.tb->flags & FLAG_MASK_PER)) {
663         return false;
664     }
665     return translator_use_goto_tb(&s->base, dest);
666 }
667 
668 static void account_noninline_branch(DisasContext *s, int cc_op)
669 {
670 #ifdef DEBUG_INLINE_BRANCHES
671     inline_branch_miss[cc_op]++;
672 #endif
673 }
674 
675 static void account_inline_branch(DisasContext *s, int cc_op)
676 {
677 #ifdef DEBUG_INLINE_BRANCHES
678     inline_branch_hit[cc_op]++;
679 #endif
680 }
681 
682 /* Table of mask values to comparison codes, given a comparison as input.
683    For such, CC=3 should not be possible.  */
684 static const TCGCond ltgt_cond[16] = {
685     TCG_COND_NEVER,  TCG_COND_NEVER,     /*    |    |    | x */
686     TCG_COND_GT,     TCG_COND_GT,        /*    |    | GT | x */
687     TCG_COND_LT,     TCG_COND_LT,        /*    | LT |    | x */
688     TCG_COND_NE,     TCG_COND_NE,        /*    | LT | GT | x */
689     TCG_COND_EQ,     TCG_COND_EQ,        /* EQ |    |    | x */
690     TCG_COND_GE,     TCG_COND_GE,        /* EQ |    | GT | x */
691     TCG_COND_LE,     TCG_COND_LE,        /* EQ | LT |    | x */
692     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | LT | GT | x */
693 };
694 
695 /* Table of mask values to comparison codes, given a logic op as input.
696    For such, only CC=0 and CC=1 should be possible.  */
697 static const TCGCond nz_cond[16] = {
698     TCG_COND_NEVER, TCG_COND_NEVER,      /*    |    | x | x */
699     TCG_COND_NEVER, TCG_COND_NEVER,
700     TCG_COND_NE, TCG_COND_NE,            /*    | NE | x | x */
701     TCG_COND_NE, TCG_COND_NE,
702     TCG_COND_EQ, TCG_COND_EQ,            /* EQ |    | x | x */
703     TCG_COND_EQ, TCG_COND_EQ,
704     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | NE | x | x */
705     TCG_COND_ALWAYS, TCG_COND_ALWAYS,
706 };
707 
708 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
709    details required to generate a TCG comparison.  */
710 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
711 {
712     TCGCond cond;
713     enum cc_op old_cc_op = s->cc_op;
714 
715     if (mask == 15 || mask == 0) {
716         c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
717         c->u.s32.a = cc_op;
718         c->u.s32.b = cc_op;
719         c->is_64 = false;
720         return;
721     }
722 
723     /* Find the TCG condition for the mask + cc op.  */
724     switch (old_cc_op) {
725     case CC_OP_LTGT0_32:
726     case CC_OP_LTGT0_64:
727     case CC_OP_LTGT_32:
728     case CC_OP_LTGT_64:
729         cond = ltgt_cond[mask];
730         if (cond == TCG_COND_NEVER) {
731             goto do_dynamic;
732         }
733         account_inline_branch(s, old_cc_op);
734         break;
735 
736     case CC_OP_LTUGTU_32:
737     case CC_OP_LTUGTU_64:
738         cond = tcg_unsigned_cond(ltgt_cond[mask]);
739         if (cond == TCG_COND_NEVER) {
740             goto do_dynamic;
741         }
742         account_inline_branch(s, old_cc_op);
743         break;
744 
745     case CC_OP_NZ:
746         cond = nz_cond[mask];
747         if (cond == TCG_COND_NEVER) {
748             goto do_dynamic;
749         }
750         account_inline_branch(s, old_cc_op);
751         break;
752 
753     case CC_OP_TM_32:
754     case CC_OP_TM_64:
755         switch (mask) {
756         case 8:
757             cond = TCG_COND_EQ;
758             break;
759         case 4 | 2 | 1:
760             cond = TCG_COND_NE;
761             break;
762         default:
763             goto do_dynamic;
764         }
765         account_inline_branch(s, old_cc_op);
766         break;
767 
768     case CC_OP_ICM:
769         switch (mask) {
770         case 8:
771             cond = TCG_COND_EQ;
772             break;
773         case 4 | 2 | 1:
774         case 4 | 2:
775             cond = TCG_COND_NE;
776             break;
777         default:
778             goto do_dynamic;
779         }
780         account_inline_branch(s, old_cc_op);
781         break;
782 
783     case CC_OP_FLOGR:
784         switch (mask & 0xa) {
785         case 8: /* src == 0 -> no one bit found */
786             cond = TCG_COND_EQ;
787             break;
788         case 2: /* src != 0 -> one bit found */
789             cond = TCG_COND_NE;
790             break;
791         default:
792             goto do_dynamic;
793         }
794         account_inline_branch(s, old_cc_op);
795         break;
796 
797     case CC_OP_ADDU:
798     case CC_OP_SUBU:
799         switch (mask) {
800         case 8 | 2: /* result == 0 */
801             cond = TCG_COND_EQ;
802             break;
803         case 4 | 1: /* result != 0 */
804             cond = TCG_COND_NE;
805             break;
806         case 8 | 4: /* !carry (borrow) */
807             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_EQ : TCG_COND_NE;
808             break;
809         case 2 | 1: /* carry (!borrow) */
810             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_NE : TCG_COND_EQ;
811             break;
812         default:
813             goto do_dynamic;
814         }
815         account_inline_branch(s, old_cc_op);
816         break;
817 
818     default:
819     do_dynamic:
820         /* Calculate cc value.  */
821         gen_op_calc_cc(s);
822         /* FALLTHRU */
823 
824     case CC_OP_STATIC:
825         /* Jump based on CC.  We'll load up the real cond below;
826            the assignment here merely avoids a compiler warning.  */
827         account_noninline_branch(s, old_cc_op);
828         old_cc_op = CC_OP_STATIC;
829         cond = TCG_COND_NEVER;
830         break;
831     }
832 
833     /* Load up the arguments of the comparison.  */
834     c->is_64 = true;
835     switch (old_cc_op) {
836     case CC_OP_LTGT0_32:
837         c->is_64 = false;
838         c->u.s32.a = tcg_temp_new_i32();
839         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
840         c->u.s32.b = tcg_constant_i32(0);
841         break;
842     case CC_OP_LTGT_32:
843     case CC_OP_LTUGTU_32:
844         c->is_64 = false;
845         c->u.s32.a = tcg_temp_new_i32();
846         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
847         c->u.s32.b = tcg_temp_new_i32();
848         tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
849         break;
850 
851     case CC_OP_LTGT0_64:
852     case CC_OP_NZ:
853     case CC_OP_FLOGR:
854         c->u.s64.a = cc_dst;
855         c->u.s64.b = tcg_constant_i64(0);
856         break;
857     case CC_OP_LTGT_64:
858     case CC_OP_LTUGTU_64:
859         c->u.s64.a = cc_src;
860         c->u.s64.b = cc_dst;
861         break;
862 
863     case CC_OP_TM_32:
864     case CC_OP_TM_64:
865     case CC_OP_ICM:
866         c->u.s64.a = tcg_temp_new_i64();
867         c->u.s64.b = tcg_constant_i64(0);
868         tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
869         break;
870 
871     case CC_OP_ADDU:
872     case CC_OP_SUBU:
873         c->is_64 = true;
874         c->u.s64.b = tcg_constant_i64(0);
875         switch (mask) {
876         case 8 | 2:
877         case 4 | 1: /* result */
878             c->u.s64.a = cc_dst;
879             break;
880         case 8 | 4:
881         case 2 | 1: /* carry */
882             c->u.s64.a = cc_src;
883             break;
884         default:
885             g_assert_not_reached();
886         }
887         break;
888 
889     case CC_OP_STATIC:
890         c->is_64 = false;
891         c->u.s32.a = cc_op;
892         switch (mask) {
893         case 0x8 | 0x4 | 0x2: /* cc != 3 */
894             cond = TCG_COND_NE;
895             c->u.s32.b = tcg_constant_i32(3);
896             break;
897         case 0x8 | 0x4 | 0x1: /* cc != 2 */
898             cond = TCG_COND_NE;
899             c->u.s32.b = tcg_constant_i32(2);
900             break;
901         case 0x8 | 0x2 | 0x1: /* cc != 1 */
902             cond = TCG_COND_NE;
903             c->u.s32.b = tcg_constant_i32(1);
904             break;
905         case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
906             cond = TCG_COND_EQ;
907             c->u.s32.a = tcg_temp_new_i32();
908             c->u.s32.b = tcg_constant_i32(0);
909             tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
910             break;
911         case 0x8 | 0x4: /* cc < 2 */
912             cond = TCG_COND_LTU;
913             c->u.s32.b = tcg_constant_i32(2);
914             break;
915         case 0x8: /* cc == 0 */
916             cond = TCG_COND_EQ;
917             c->u.s32.b = tcg_constant_i32(0);
918             break;
919         case 0x4 | 0x2 | 0x1: /* cc != 0 */
920             cond = TCG_COND_NE;
921             c->u.s32.b = tcg_constant_i32(0);
922             break;
923         case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
924             cond = TCG_COND_NE;
925             c->u.s32.a = tcg_temp_new_i32();
926             c->u.s32.b = tcg_constant_i32(0);
927             tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
928             break;
929         case 0x4: /* cc == 1 */
930             cond = TCG_COND_EQ;
931             c->u.s32.b = tcg_constant_i32(1);
932             break;
933         case 0x2 | 0x1: /* cc > 1 */
934             cond = TCG_COND_GTU;
935             c->u.s32.b = tcg_constant_i32(1);
936             break;
937         case 0x2: /* cc == 2 */
938             cond = TCG_COND_EQ;
939             c->u.s32.b = tcg_constant_i32(2);
940             break;
941         case 0x1: /* cc == 3 */
942             cond = TCG_COND_EQ;
943             c->u.s32.b = tcg_constant_i32(3);
944             break;
945         default:
946             /* CC is masked by something else: (8 >> cc) & mask.  */
947             cond = TCG_COND_NE;
948             c->u.s32.a = tcg_temp_new_i32();
949             c->u.s32.b = tcg_constant_i32(0);
950             tcg_gen_shr_i32(c->u.s32.a, tcg_constant_i32(8), cc_op);
951             tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
952             break;
953         }
954         break;
955 
956     default:
957         abort();
958     }
959     c->cond = cond;
960 }
961 
962 /* ====================================================================== */
963 /* Define the insn format enumeration.  */
964 #define F0(N)                         FMT_##N,
965 #define F1(N, X1)                     F0(N)
966 #define F2(N, X1, X2)                 F0(N)
967 #define F3(N, X1, X2, X3)             F0(N)
968 #define F4(N, X1, X2, X3, X4)         F0(N)
969 #define F5(N, X1, X2, X3, X4, X5)     F0(N)
970 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
971 
972 typedef enum {
973 #include "insn-format.h.inc"
974 } DisasFormat;
975 
976 #undef F0
977 #undef F1
978 #undef F2
979 #undef F3
980 #undef F4
981 #undef F5
982 #undef F6
983 
984 /* This is the way fields are to be accessed out of DisasFields.  */
985 #define have_field(S, F)  have_field1((S), FLD_O_##F)
986 #define get_field(S, F)   get_field1((S), FLD_O_##F, FLD_C_##F)
987 
988 static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
989 {
990     return (s->fields.presentO >> c) & 1;
991 }
992 
993 static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
994                       enum DisasFieldIndexC c)
995 {
996     assert(have_field1(s, o));
997     return s->fields.c[c];
998 }
999 
1000 /* Describe the layout of each field in each format.  */
1001 typedef struct DisasField {
1002     unsigned int beg:8;
1003     unsigned int size:8;
1004     unsigned int type:2;
1005     unsigned int indexC:6;
1006     enum DisasFieldIndexO indexO:8;
1007 } DisasField;
1008 
1009 typedef struct DisasFormatInfo {
1010     DisasField op[NUM_C_FIELD];
1011 } DisasFormatInfo;
1012 
1013 #define R(N, B)       {  B,  4, 0, FLD_C_r##N, FLD_O_r##N }
1014 #define M(N, B)       {  B,  4, 0, FLD_C_m##N, FLD_O_m##N }
1015 #define V(N, B)       {  B,  4, 3, FLD_C_v##N, FLD_O_v##N }
1016 #define BD(N, BB, BD) { BB,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1017                       { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1018 #define BXD(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1019                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1020                       { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1021 #define BDL(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1022                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1023 #define BXDL(N)       { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1024                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1025                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1026 #define I(N, B, S)    {  B,  S, 1, FLD_C_i##N, FLD_O_i##N }
1027 #define L(N, B, S)    {  B,  S, 0, FLD_C_l##N, FLD_O_l##N }
1028 
1029 #define F0(N)                     { { } },
1030 #define F1(N, X1)                 { { X1 } },
1031 #define F2(N, X1, X2)             { { X1, X2 } },
1032 #define F3(N, X1, X2, X3)         { { X1, X2, X3 } },
1033 #define F4(N, X1, X2, X3, X4)     { { X1, X2, X3, X4 } },
1034 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1035 #define F6(N, X1, X2, X3, X4, X5, X6)       { { X1, X2, X3, X4, X5, X6 } },
1036 
1037 static const DisasFormatInfo format_info[] = {
1038 #include "insn-format.h.inc"
1039 };
1040 
1041 #undef F0
1042 #undef F1
1043 #undef F2
1044 #undef F3
1045 #undef F4
1046 #undef F5
1047 #undef F6
1048 #undef R
1049 #undef M
1050 #undef V
1051 #undef BD
1052 #undef BXD
1053 #undef BDL
1054 #undef BXDL
1055 #undef I
1056 #undef L
1057 
1058 /* Generally, we'll extract operands into this structures, operate upon
1059    them, and store them back.  See the "in1", "in2", "prep", "wout" sets
1060    of routines below for more details.  */
1061 typedef struct {
1062     TCGv_i64 out, out2, in1, in2;
1063     TCGv_i64 addr1;
1064     TCGv_i128 out_128, in1_128, in2_128;
1065 } DisasOps;
1066 
1067 /* Instructions can place constraints on their operands, raising specification
1068    exceptions if they are violated.  To make this easy to automate, each "in1",
1069    "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1070    of the following, or 0.  To make this easy to document, we'll put the
1071    SPEC_<name> defines next to <name>.  */
1072 
1073 #define SPEC_r1_even    1
1074 #define SPEC_r2_even    2
1075 #define SPEC_r3_even    4
1076 #define SPEC_r1_f128    8
1077 #define SPEC_r2_f128    16
1078 
1079 /* Return values from translate_one, indicating the state of the TB.  */
1080 
1081 /* We are not using a goto_tb (for whatever reason), but have updated
1082    the PC (for whatever reason), so there's no need to do it again on
1083    exiting the TB.  */
1084 #define DISAS_PC_UPDATED        DISAS_TARGET_0
1085 
1086 /* We have updated the PC and CC values.  */
1087 #define DISAS_PC_CC_UPDATED     DISAS_TARGET_2
1088 
1089 
1090 /* Instruction flags */
1091 #define IF_AFP1     0x0001      /* r1 is a fp reg for HFP/FPS instructions */
1092 #define IF_AFP2     0x0002      /* r2 is a fp reg for HFP/FPS instructions */
1093 #define IF_AFP3     0x0004      /* r3 is a fp reg for HFP/FPS instructions */
1094 #define IF_BFP      0x0008      /* binary floating point instruction */
1095 #define IF_DFP      0x0010      /* decimal floating point instruction */
1096 #define IF_PRIV     0x0020      /* privileged instruction */
1097 #define IF_VEC      0x0040      /* vector instruction */
1098 #define IF_IO       0x0080      /* input/output instruction */
1099 
1100 struct DisasInsn {
1101     unsigned opc:16;
1102     unsigned flags:16;
1103     DisasFormat fmt:8;
1104     unsigned fac:8;
1105     unsigned spec:8;
1106 
1107     const char *name;
1108 
1109     /* Pre-process arguments before HELP_OP.  */
1110     void (*help_in1)(DisasContext *, DisasOps *);
1111     void (*help_in2)(DisasContext *, DisasOps *);
1112     void (*help_prep)(DisasContext *, DisasOps *);
1113 
1114     /*
1115      * Post-process output after HELP_OP.
1116      * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1117      */
1118     void (*help_wout)(DisasContext *, DisasOps *);
1119     void (*help_cout)(DisasContext *, DisasOps *);
1120 
1121     /* Implement the operation itself.  */
1122     DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1123 
1124     uint64_t data;
1125 };
1126 
1127 /* ====================================================================== */
1128 /* Miscellaneous helpers, used by several operations.  */
1129 
1130 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1131 {
1132     if (dest == s->pc_tmp) {
1133         per_branch(s, true);
1134         return DISAS_NEXT;
1135     }
1136     if (use_goto_tb(s, dest)) {
1137         update_cc_op(s);
1138         per_breaking_event(s);
1139         tcg_gen_goto_tb(0);
1140         tcg_gen_movi_i64(psw_addr, dest);
1141         tcg_gen_exit_tb(s->base.tb, 0);
1142         return DISAS_NORETURN;
1143     } else {
1144         tcg_gen_movi_i64(psw_addr, dest);
1145         per_branch(s, false);
1146         return DISAS_PC_UPDATED;
1147     }
1148 }
1149 
1150 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1151                                  bool is_imm, int imm, TCGv_i64 cdest)
1152 {
1153     DisasJumpType ret;
1154     uint64_t dest = s->base.pc_next + (int64_t)imm * 2;
1155     TCGLabel *lab;
1156 
1157     /* Take care of the special cases first.  */
1158     if (c->cond == TCG_COND_NEVER) {
1159         ret = DISAS_NEXT;
1160         goto egress;
1161     }
1162     if (is_imm) {
1163         if (dest == s->pc_tmp) {
1164             /* Branch to next.  */
1165             per_branch(s, true);
1166             ret = DISAS_NEXT;
1167             goto egress;
1168         }
1169         if (c->cond == TCG_COND_ALWAYS) {
1170             ret = help_goto_direct(s, dest);
1171             goto egress;
1172         }
1173     } else {
1174         if (!cdest) {
1175             /* E.g. bcr %r0 -> no branch.  */
1176             ret = DISAS_NEXT;
1177             goto egress;
1178         }
1179         if (c->cond == TCG_COND_ALWAYS) {
1180             tcg_gen_mov_i64(psw_addr, cdest);
1181             per_branch(s, false);
1182             ret = DISAS_PC_UPDATED;
1183             goto egress;
1184         }
1185     }
1186 
1187     if (use_goto_tb(s, s->pc_tmp)) {
1188         if (is_imm && use_goto_tb(s, dest)) {
1189             /* Both exits can use goto_tb.  */
1190             update_cc_op(s);
1191 
1192             lab = gen_new_label();
1193             if (c->is_64) {
1194                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1195             } else {
1196                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1197             }
1198 
1199             /* Branch not taken.  */
1200             tcg_gen_goto_tb(0);
1201             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1202             tcg_gen_exit_tb(s->base.tb, 0);
1203 
1204             /* Branch taken.  */
1205             gen_set_label(lab);
1206             per_breaking_event(s);
1207             tcg_gen_goto_tb(1);
1208             tcg_gen_movi_i64(psw_addr, dest);
1209             tcg_gen_exit_tb(s->base.tb, 1);
1210 
1211             ret = DISAS_NORETURN;
1212         } else {
1213             /* Fallthru can use goto_tb, but taken branch cannot.  */
1214             /* Store taken branch destination before the brcond.  This
1215                avoids having to allocate a new local temp to hold it.
1216                We'll overwrite this in the not taken case anyway.  */
1217             if (!is_imm) {
1218                 tcg_gen_mov_i64(psw_addr, cdest);
1219             }
1220 
1221             lab = gen_new_label();
1222             if (c->is_64) {
1223                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1224             } else {
1225                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1226             }
1227 
1228             /* Branch not taken.  */
1229             update_cc_op(s);
1230             tcg_gen_goto_tb(0);
1231             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1232             tcg_gen_exit_tb(s->base.tb, 0);
1233 
1234             gen_set_label(lab);
1235             if (is_imm) {
1236                 tcg_gen_movi_i64(psw_addr, dest);
1237             }
1238             per_breaking_event(s);
1239             ret = DISAS_PC_UPDATED;
1240         }
1241     } else {
1242         /* Fallthru cannot use goto_tb.  This by itself is vanishingly rare.
1243            Most commonly we're single-stepping or some other condition that
1244            disables all use of goto_tb.  Just update the PC and exit.  */
1245 
1246         TCGv_i64 next = tcg_constant_i64(s->pc_tmp);
1247         if (is_imm) {
1248             cdest = tcg_constant_i64(dest);
1249         }
1250 
1251         if (c->is_64) {
1252             tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1253                                 cdest, next);
1254             per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1255         } else {
1256             TCGv_i32 t0 = tcg_temp_new_i32();
1257             TCGv_i64 t1 = tcg_temp_new_i64();
1258             TCGv_i64 z = tcg_constant_i64(0);
1259             tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1260             tcg_gen_extu_i32_i64(t1, t0);
1261             tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1262             per_branch_cond(s, TCG_COND_NE, t1, z);
1263         }
1264 
1265         ret = DISAS_PC_UPDATED;
1266     }
1267 
1268  egress:
1269     return ret;
1270 }
1271 
1272 /* ====================================================================== */
1273 /* The operations.  These perform the bulk of the work for any insn,
1274    usually after the operands have been loaded and output initialized.  */
1275 
1276 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1277 {
1278     tcg_gen_abs_i64(o->out, o->in2);
1279     return DISAS_NEXT;
1280 }
1281 
1282 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1283 {
1284     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1285     return DISAS_NEXT;
1286 }
1287 
1288 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1289 {
1290     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1291     return DISAS_NEXT;
1292 }
1293 
1294 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1295 {
1296     tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1297     tcg_gen_mov_i64(o->out2, o->in2);
1298     return DISAS_NEXT;
1299 }
1300 
1301 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1302 {
1303     tcg_gen_add_i64(o->out, o->in1, o->in2);
1304     return DISAS_NEXT;
1305 }
1306 
1307 static DisasJumpType op_addu64(DisasContext *s, DisasOps *o)
1308 {
1309     tcg_gen_movi_i64(cc_src, 0);
1310     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1311     return DISAS_NEXT;
1312 }
1313 
1314 /* Compute carry into cc_src. */
1315 static void compute_carry(DisasContext *s)
1316 {
1317     switch (s->cc_op) {
1318     case CC_OP_ADDU:
1319         /* The carry value is already in cc_src (1,0). */
1320         break;
1321     case CC_OP_SUBU:
1322         tcg_gen_addi_i64(cc_src, cc_src, 1);
1323         break;
1324     default:
1325         gen_op_calc_cc(s);
1326         /* fall through */
1327     case CC_OP_STATIC:
1328         /* The carry flag is the msb of CC; compute into cc_src. */
1329         tcg_gen_extu_i32_i64(cc_src, cc_op);
1330         tcg_gen_shri_i64(cc_src, cc_src, 1);
1331         break;
1332     }
1333 }
1334 
1335 static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
1336 {
1337     compute_carry(s);
1338     tcg_gen_add_i64(o->out, o->in1, o->in2);
1339     tcg_gen_add_i64(o->out, o->out, cc_src);
1340     return DISAS_NEXT;
1341 }
1342 
1343 static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
1344 {
1345     compute_carry(s);
1346 
1347     TCGv_i64 zero = tcg_constant_i64(0);
1348     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
1349     tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
1350 
1351     return DISAS_NEXT;
1352 }
1353 
1354 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1355 {
1356     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1357 
1358     o->in1 = tcg_temp_new_i64();
1359     if (non_atomic) {
1360         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1361     } else {
1362         /* Perform the atomic addition in memory. */
1363         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1364                                      s->insn->data);
1365     }
1366 
1367     /* Recompute also for atomic case: needed for setting CC. */
1368     tcg_gen_add_i64(o->out, o->in1, o->in2);
1369 
1370     if (non_atomic) {
1371         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1372     }
1373     return DISAS_NEXT;
1374 }
1375 
1376 static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o)
1377 {
1378     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1379 
1380     o->in1 = tcg_temp_new_i64();
1381     if (non_atomic) {
1382         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1383     } else {
1384         /* Perform the atomic addition in memory. */
1385         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1386                                      s->insn->data);
1387     }
1388 
1389     /* Recompute also for atomic case: needed for setting CC. */
1390     tcg_gen_movi_i64(cc_src, 0);
1391     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1392 
1393     if (non_atomic) {
1394         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1395     }
1396     return DISAS_NEXT;
1397 }
1398 
1399 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1400 {
1401     gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1402     return DISAS_NEXT;
1403 }
1404 
1405 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1406 {
1407     gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1408     return DISAS_NEXT;
1409 }
1410 
1411 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1412 {
1413     gen_helper_axb(o->out_128, cpu_env, o->in1_128, o->in2_128);
1414     return DISAS_NEXT;
1415 }
1416 
1417 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1418 {
1419     tcg_gen_and_i64(o->out, o->in1, o->in2);
1420     return DISAS_NEXT;
1421 }
1422 
1423 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1424 {
1425     int shift = s->insn->data & 0xff;
1426     int size = s->insn->data >> 8;
1427     uint64_t mask = ((1ull << size) - 1) << shift;
1428     TCGv_i64 t = tcg_temp_new_i64();
1429 
1430     tcg_gen_shli_i64(t, o->in2, shift);
1431     tcg_gen_ori_i64(t, t, ~mask);
1432     tcg_gen_and_i64(o->out, o->in1, t);
1433 
1434     /* Produce the CC from only the bits manipulated.  */
1435     tcg_gen_andi_i64(cc_dst, o->out, mask);
1436     set_cc_nz_u64(s, cc_dst);
1437     return DISAS_NEXT;
1438 }
1439 
1440 static DisasJumpType op_andc(DisasContext *s, DisasOps *o)
1441 {
1442     tcg_gen_andc_i64(o->out, o->in1, o->in2);
1443     return DISAS_NEXT;
1444 }
1445 
1446 static DisasJumpType op_orc(DisasContext *s, DisasOps *o)
1447 {
1448     tcg_gen_orc_i64(o->out, o->in1, o->in2);
1449     return DISAS_NEXT;
1450 }
1451 
1452 static DisasJumpType op_nand(DisasContext *s, DisasOps *o)
1453 {
1454     tcg_gen_nand_i64(o->out, o->in1, o->in2);
1455     return DISAS_NEXT;
1456 }
1457 
1458 static DisasJumpType op_nor(DisasContext *s, DisasOps *o)
1459 {
1460     tcg_gen_nor_i64(o->out, o->in1, o->in2);
1461     return DISAS_NEXT;
1462 }
1463 
1464 static DisasJumpType op_nxor(DisasContext *s, DisasOps *o)
1465 {
1466     tcg_gen_eqv_i64(o->out, o->in1, o->in2);
1467     return DISAS_NEXT;
1468 }
1469 
1470 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1471 {
1472     o->in1 = tcg_temp_new_i64();
1473 
1474     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1475         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1476     } else {
1477         /* Perform the atomic operation in memory. */
1478         tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1479                                      s->insn->data);
1480     }
1481 
1482     /* Recompute also for atomic case: needed for setting CC. */
1483     tcg_gen_and_i64(o->out, o->in1, o->in2);
1484 
1485     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1486         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1487     }
1488     return DISAS_NEXT;
1489 }
1490 
1491 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1492 {
1493     pc_to_link_info(o->out, s, s->pc_tmp);
1494     if (o->in2) {
1495         tcg_gen_mov_i64(psw_addr, o->in2);
1496         per_branch(s, false);
1497         return DISAS_PC_UPDATED;
1498     } else {
1499         return DISAS_NEXT;
1500     }
1501 }
1502 
1503 static void save_link_info(DisasContext *s, DisasOps *o)
1504 {
1505     TCGv_i64 t;
1506 
1507     if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1508         pc_to_link_info(o->out, s, s->pc_tmp);
1509         return;
1510     }
1511     gen_op_calc_cc(s);
1512     tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1513     tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1514     t = tcg_temp_new_i64();
1515     tcg_gen_shri_i64(t, psw_mask, 16);
1516     tcg_gen_andi_i64(t, t, 0x0f000000);
1517     tcg_gen_or_i64(o->out, o->out, t);
1518     tcg_gen_extu_i32_i64(t, cc_op);
1519     tcg_gen_shli_i64(t, t, 28);
1520     tcg_gen_or_i64(o->out, o->out, t);
1521 }
1522 
1523 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1524 {
1525     save_link_info(s, o);
1526     if (o->in2) {
1527         tcg_gen_mov_i64(psw_addr, o->in2);
1528         per_branch(s, false);
1529         return DISAS_PC_UPDATED;
1530     } else {
1531         return DISAS_NEXT;
1532     }
1533 }
1534 
1535 /*
1536  * Disassemble the target of a branch. The results are returned in a form
1537  * suitable for passing into help_branch():
1538  *
1539  * - bool IS_IMM reflects whether the target is fixed or computed. Non-EXECUTEd
1540  *   branches, whose DisasContext *S contains the relative immediate field RI,
1541  *   are considered fixed. All the other branches are considered computed.
1542  * - int IMM is the value of RI.
1543  * - TCGv_i64 CDEST is the address of the computed target.
1544  */
1545 #define disas_jdest(s, ri, is_imm, imm, cdest) do {                            \
1546     if (have_field(s, ri)) {                                                   \
1547         if (unlikely(s->ex_value)) {                                           \
1548             cdest = tcg_temp_new_i64();                                        \
1549             tcg_gen_ld_i64(cdest, cpu_env, offsetof(CPUS390XState, ex_target));\
1550             tcg_gen_addi_i64(cdest, cdest, (int64_t)get_field(s, ri) * 2);     \
1551             is_imm = false;                                                    \
1552         } else {                                                               \
1553             is_imm = true;                                                     \
1554         }                                                                      \
1555     } else {                                                                   \
1556         is_imm = false;                                                        \
1557     }                                                                          \
1558     imm = is_imm ? get_field(s, ri) : 0;                                       \
1559 } while (false)
1560 
1561 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1562 {
1563     DisasCompare c;
1564     bool is_imm;
1565     int imm;
1566 
1567     pc_to_link_info(o->out, s, s->pc_tmp);
1568 
1569     disas_jdest(s, i2, is_imm, imm, o->in2);
1570     disas_jcc(s, &c, 0xf);
1571     return help_branch(s, &c, is_imm, imm, o->in2);
1572 }
1573 
1574 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1575 {
1576     int m1 = get_field(s, m1);
1577     DisasCompare c;
1578     bool is_imm;
1579     int imm;
1580 
1581     /* BCR with R2 = 0 causes no branching */
1582     if (have_field(s, r2) && get_field(s, r2) == 0) {
1583         if (m1 == 14) {
1584             /* Perform serialization */
1585             /* FIXME: check for fast-BCR-serialization facility */
1586             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1587         }
1588         if (m1 == 15) {
1589             /* Perform serialization */
1590             /* FIXME: perform checkpoint-synchronisation */
1591             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1592         }
1593         return DISAS_NEXT;
1594     }
1595 
1596     disas_jdest(s, i2, is_imm, imm, o->in2);
1597     disas_jcc(s, &c, m1);
1598     return help_branch(s, &c, is_imm, imm, o->in2);
1599 }
1600 
1601 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1602 {
1603     int r1 = get_field(s, r1);
1604     DisasCompare c;
1605     bool is_imm;
1606     TCGv_i64 t;
1607     int imm;
1608 
1609     c.cond = TCG_COND_NE;
1610     c.is_64 = false;
1611 
1612     t = tcg_temp_new_i64();
1613     tcg_gen_subi_i64(t, regs[r1], 1);
1614     store_reg32_i64(r1, t);
1615     c.u.s32.a = tcg_temp_new_i32();
1616     c.u.s32.b = tcg_constant_i32(0);
1617     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1618 
1619     disas_jdest(s, i2, is_imm, imm, o->in2);
1620     return help_branch(s, &c, is_imm, imm, o->in2);
1621 }
1622 
1623 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1624 {
1625     int r1 = get_field(s, r1);
1626     int imm = get_field(s, i2);
1627     DisasCompare c;
1628     TCGv_i64 t;
1629 
1630     c.cond = TCG_COND_NE;
1631     c.is_64 = false;
1632 
1633     t = tcg_temp_new_i64();
1634     tcg_gen_shri_i64(t, regs[r1], 32);
1635     tcg_gen_subi_i64(t, t, 1);
1636     store_reg32h_i64(r1, t);
1637     c.u.s32.a = tcg_temp_new_i32();
1638     c.u.s32.b = tcg_constant_i32(0);
1639     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1640 
1641     return help_branch(s, &c, 1, imm, o->in2);
1642 }
1643 
1644 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1645 {
1646     int r1 = get_field(s, r1);
1647     DisasCompare c;
1648     bool is_imm;
1649     int imm;
1650 
1651     c.cond = TCG_COND_NE;
1652     c.is_64 = true;
1653 
1654     tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1655     c.u.s64.a = regs[r1];
1656     c.u.s64.b = tcg_constant_i64(0);
1657 
1658     disas_jdest(s, i2, is_imm, imm, o->in2);
1659     return help_branch(s, &c, is_imm, imm, o->in2);
1660 }
1661 
1662 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1663 {
1664     int r1 = get_field(s, r1);
1665     int r3 = get_field(s, r3);
1666     DisasCompare c;
1667     bool is_imm;
1668     TCGv_i64 t;
1669     int imm;
1670 
1671     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1672     c.is_64 = false;
1673 
1674     t = tcg_temp_new_i64();
1675     tcg_gen_add_i64(t, regs[r1], regs[r3]);
1676     c.u.s32.a = tcg_temp_new_i32();
1677     c.u.s32.b = tcg_temp_new_i32();
1678     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1679     tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1680     store_reg32_i64(r1, t);
1681 
1682     disas_jdest(s, i2, is_imm, imm, o->in2);
1683     return help_branch(s, &c, is_imm, imm, o->in2);
1684 }
1685 
1686 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1687 {
1688     int r1 = get_field(s, r1);
1689     int r3 = get_field(s, r3);
1690     DisasCompare c;
1691     bool is_imm;
1692     int imm;
1693 
1694     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1695     c.is_64 = true;
1696 
1697     if (r1 == (r3 | 1)) {
1698         c.u.s64.b = load_reg(r3 | 1);
1699     } else {
1700         c.u.s64.b = regs[r3 | 1];
1701     }
1702 
1703     tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1704     c.u.s64.a = regs[r1];
1705 
1706     disas_jdest(s, i2, is_imm, imm, o->in2);
1707     return help_branch(s, &c, is_imm, imm, o->in2);
1708 }
1709 
1710 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1711 {
1712     int imm, m3 = get_field(s, m3);
1713     bool is_imm;
1714     DisasCompare c;
1715 
1716     c.cond = ltgt_cond[m3];
1717     if (s->insn->data) {
1718         c.cond = tcg_unsigned_cond(c.cond);
1719     }
1720     c.is_64 = true;
1721     c.u.s64.a = o->in1;
1722     c.u.s64.b = o->in2;
1723 
1724     o->out = NULL;
1725     disas_jdest(s, i4, is_imm, imm, o->out);
1726     if (!is_imm && !o->out) {
1727         imm = 0;
1728         o->out = get_address(s, 0, get_field(s, b4),
1729                              get_field(s, d4));
1730     }
1731 
1732     return help_branch(s, &c, is_imm, imm, o->out);
1733 }
1734 
1735 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1736 {
1737     gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1738     set_cc_static(s);
1739     return DISAS_NEXT;
1740 }
1741 
1742 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1743 {
1744     gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1745     set_cc_static(s);
1746     return DISAS_NEXT;
1747 }
1748 
1749 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1750 {
1751     gen_helper_cxb(cc_op, cpu_env, o->in1_128, o->in2_128);
1752     set_cc_static(s);
1753     return DISAS_NEXT;
1754 }
1755 
1756 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1757                                    bool m4_with_fpe)
1758 {
1759     const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1760     uint8_t m3 = get_field(s, m3);
1761     uint8_t m4 = get_field(s, m4);
1762 
1763     /* m3 field was introduced with FPE */
1764     if (!fpe && m3_with_fpe) {
1765         m3 = 0;
1766     }
1767     /* m4 field was introduced with FPE */
1768     if (!fpe && m4_with_fpe) {
1769         m4 = 0;
1770     }
1771 
1772     /* Check for valid rounding modes. Mode 3 was introduced later. */
1773     if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1774         gen_program_exception(s, PGM_SPECIFICATION);
1775         return NULL;
1776     }
1777 
1778     return tcg_constant_i32(deposit32(m3, 4, 4, m4));
1779 }
1780 
1781 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1782 {
1783     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1784 
1785     if (!m34) {
1786         return DISAS_NORETURN;
1787     }
1788     gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
1789     set_cc_static(s);
1790     return DISAS_NEXT;
1791 }
1792 
1793 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1794 {
1795     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1796 
1797     if (!m34) {
1798         return DISAS_NORETURN;
1799     }
1800     gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
1801     set_cc_static(s);
1802     return DISAS_NEXT;
1803 }
1804 
1805 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1806 {
1807     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1808 
1809     if (!m34) {
1810         return DISAS_NORETURN;
1811     }
1812     gen_helper_cfxb(o->out, cpu_env, o->in2_128, m34);
1813     set_cc_static(s);
1814     return DISAS_NEXT;
1815 }
1816 
1817 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1818 {
1819     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1820 
1821     if (!m34) {
1822         return DISAS_NORETURN;
1823     }
1824     gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
1825     set_cc_static(s);
1826     return DISAS_NEXT;
1827 }
1828 
1829 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1830 {
1831     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1832 
1833     if (!m34) {
1834         return DISAS_NORETURN;
1835     }
1836     gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
1837     set_cc_static(s);
1838     return DISAS_NEXT;
1839 }
1840 
1841 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1842 {
1843     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1844 
1845     if (!m34) {
1846         return DISAS_NORETURN;
1847     }
1848     gen_helper_cgxb(o->out, cpu_env, o->in2_128, m34);
1849     set_cc_static(s);
1850     return DISAS_NEXT;
1851 }
1852 
1853 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1854 {
1855     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1856 
1857     if (!m34) {
1858         return DISAS_NORETURN;
1859     }
1860     gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
1861     set_cc_static(s);
1862     return DISAS_NEXT;
1863 }
1864 
1865 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1866 {
1867     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1868 
1869     if (!m34) {
1870         return DISAS_NORETURN;
1871     }
1872     gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
1873     set_cc_static(s);
1874     return DISAS_NEXT;
1875 }
1876 
1877 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1878 {
1879     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1880 
1881     if (!m34) {
1882         return DISAS_NORETURN;
1883     }
1884     gen_helper_clfxb(o->out, cpu_env, o->in2_128, m34);
1885     set_cc_static(s);
1886     return DISAS_NEXT;
1887 }
1888 
1889 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1890 {
1891     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1892 
1893     if (!m34) {
1894         return DISAS_NORETURN;
1895     }
1896     gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
1897     set_cc_static(s);
1898     return DISAS_NEXT;
1899 }
1900 
1901 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1902 {
1903     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1904 
1905     if (!m34) {
1906         return DISAS_NORETURN;
1907     }
1908     gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
1909     set_cc_static(s);
1910     return DISAS_NEXT;
1911 }
1912 
1913 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1914 {
1915     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1916 
1917     if (!m34) {
1918         return DISAS_NORETURN;
1919     }
1920     gen_helper_clgxb(o->out, cpu_env, o->in2_128, m34);
1921     set_cc_static(s);
1922     return DISAS_NEXT;
1923 }
1924 
1925 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1926 {
1927     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1928 
1929     if (!m34) {
1930         return DISAS_NORETURN;
1931     }
1932     gen_helper_cegb(o->out, cpu_env, o->in2, m34);
1933     return DISAS_NEXT;
1934 }
1935 
1936 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1937 {
1938     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1939 
1940     if (!m34) {
1941         return DISAS_NORETURN;
1942     }
1943     gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
1944     return DISAS_NEXT;
1945 }
1946 
1947 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
1948 {
1949     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1950 
1951     if (!m34) {
1952         return DISAS_NORETURN;
1953     }
1954     gen_helper_cxgb(o->out_128, cpu_env, o->in2, m34);
1955     return DISAS_NEXT;
1956 }
1957 
1958 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
1959 {
1960     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1961 
1962     if (!m34) {
1963         return DISAS_NORETURN;
1964     }
1965     gen_helper_celgb(o->out, cpu_env, o->in2, m34);
1966     return DISAS_NEXT;
1967 }
1968 
1969 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
1970 {
1971     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1972 
1973     if (!m34) {
1974         return DISAS_NORETURN;
1975     }
1976     gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
1977     return DISAS_NEXT;
1978 }
1979 
1980 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
1981 {
1982     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1983 
1984     if (!m34) {
1985         return DISAS_NORETURN;
1986     }
1987     gen_helper_cxlgb(o->out_128, cpu_env, o->in2, m34);
1988     return DISAS_NEXT;
1989 }
1990 
1991 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
1992 {
1993     int r2 = get_field(s, r2);
1994     TCGv_i128 pair = tcg_temp_new_i128();
1995     TCGv_i64 len = tcg_temp_new_i64();
1996 
1997     gen_helper_cksm(pair, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1998     set_cc_static(s);
1999     tcg_gen_extr_i128_i64(o->out, len, pair);
2000 
2001     tcg_gen_add_i64(regs[r2], regs[r2], len);
2002     tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
2003 
2004     return DISAS_NEXT;
2005 }
2006 
2007 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
2008 {
2009     int l = get_field(s, l1);
2010     TCGv_i32 vl;
2011     MemOp mop;
2012 
2013     switch (l + 1) {
2014     case 1:
2015     case 2:
2016     case 4:
2017     case 8:
2018         mop = ctz32(l + 1) | MO_TE;
2019         tcg_gen_qemu_ld_tl(cc_src, o->addr1, get_mem_index(s), mop);
2020         tcg_gen_qemu_ld_tl(cc_dst, o->in2, get_mem_index(s), mop);
2021         gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2022         return DISAS_NEXT;
2023     default:
2024         vl = tcg_constant_i32(l);
2025         gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2026         set_cc_static(s);
2027         return DISAS_NEXT;
2028     }
2029 }
2030 
2031 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2032 {
2033     int r1 = get_field(s, r1);
2034     int r2 = get_field(s, r2);
2035     TCGv_i32 t1, t2;
2036 
2037     /* r1 and r2 must be even.  */
2038     if (r1 & 1 || r2 & 1) {
2039         gen_program_exception(s, PGM_SPECIFICATION);
2040         return DISAS_NORETURN;
2041     }
2042 
2043     t1 = tcg_constant_i32(r1);
2044     t2 = tcg_constant_i32(r2);
2045     gen_helper_clcl(cc_op, cpu_env, t1, t2);
2046     set_cc_static(s);
2047     return DISAS_NEXT;
2048 }
2049 
2050 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2051 {
2052     int r1 = get_field(s, r1);
2053     int r3 = get_field(s, r3);
2054     TCGv_i32 t1, t3;
2055 
2056     /* r1 and r3 must be even.  */
2057     if (r1 & 1 || r3 & 1) {
2058         gen_program_exception(s, PGM_SPECIFICATION);
2059         return DISAS_NORETURN;
2060     }
2061 
2062     t1 = tcg_constant_i32(r1);
2063     t3 = tcg_constant_i32(r3);
2064     gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
2065     set_cc_static(s);
2066     return DISAS_NEXT;
2067 }
2068 
2069 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2070 {
2071     int r1 = get_field(s, r1);
2072     int r3 = get_field(s, r3);
2073     TCGv_i32 t1, t3;
2074 
2075     /* r1 and r3 must be even.  */
2076     if (r1 & 1 || r3 & 1) {
2077         gen_program_exception(s, PGM_SPECIFICATION);
2078         return DISAS_NORETURN;
2079     }
2080 
2081     t1 = tcg_constant_i32(r1);
2082     t3 = tcg_constant_i32(r3);
2083     gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
2084     set_cc_static(s);
2085     return DISAS_NEXT;
2086 }
2087 
2088 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2089 {
2090     TCGv_i32 m3 = tcg_constant_i32(get_field(s, m3));
2091     TCGv_i32 t1 = tcg_temp_new_i32();
2092 
2093     tcg_gen_extrl_i64_i32(t1, o->in1);
2094     gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2095     set_cc_static(s);
2096     return DISAS_NEXT;
2097 }
2098 
2099 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2100 {
2101     TCGv_i128 pair = tcg_temp_new_i128();
2102 
2103     gen_helper_clst(pair, cpu_env, regs[0], o->in1, o->in2);
2104     tcg_gen_extr_i128_i64(o->in2, o->in1, pair);
2105 
2106     set_cc_static(s);
2107     return DISAS_NEXT;
2108 }
2109 
2110 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2111 {
2112     TCGv_i64 t = tcg_temp_new_i64();
2113     tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2114     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2115     tcg_gen_or_i64(o->out, o->out, t);
2116     return DISAS_NEXT;
2117 }
2118 
2119 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2120 {
2121     int d2 = get_field(s, d2);
2122     int b2 = get_field(s, b2);
2123     TCGv_i64 addr, cc;
2124 
2125     /* Note that in1 = R3 (new value) and
2126        in2 = (zero-extended) R1 (expected value).  */
2127 
2128     addr = get_address(s, 0, b2, d2);
2129     tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2130                                get_mem_index(s), s->insn->data | MO_ALIGN);
2131 
2132     /* Are the memory and expected values (un)equal?  Note that this setcond
2133        produces the output CC value, thus the NE sense of the test.  */
2134     cc = tcg_temp_new_i64();
2135     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2136     tcg_gen_extrl_i64_i32(cc_op, cc);
2137     set_cc_static(s);
2138 
2139     return DISAS_NEXT;
2140 }
2141 
2142 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2143 {
2144     int r1 = get_field(s, r1);
2145 
2146     o->out_128 = tcg_temp_new_i128();
2147     tcg_gen_concat_i64_i128(o->out_128, regs[r1 + 1], regs[r1]);
2148 
2149     /* Note out (R1:R1+1) = expected value and in2 (R3:R3+1) = new value.  */
2150     tcg_gen_atomic_cmpxchg_i128(o->out_128, o->addr1, o->out_128, o->in2_128,
2151                                 get_mem_index(s), MO_BE | MO_128 | MO_ALIGN);
2152 
2153     /*
2154      * Extract result into cc_dst:cc_src, compare vs the expected value
2155      * in the as yet unmodified input registers, then update CC_OP.
2156      */
2157     tcg_gen_extr_i128_i64(cc_src, cc_dst, o->out_128);
2158     tcg_gen_xor_i64(cc_dst, cc_dst, regs[r1]);
2159     tcg_gen_xor_i64(cc_src, cc_src, regs[r1 + 1]);
2160     tcg_gen_or_i64(cc_dst, cc_dst, cc_src);
2161     set_cc_nz_u64(s, cc_dst);
2162 
2163     return DISAS_NEXT;
2164 }
2165 
2166 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2167 {
2168     int r3 = get_field(s, r3);
2169     TCGv_i32 t_r3 = tcg_constant_i32(r3);
2170 
2171     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2172         gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2173     } else {
2174         gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2175     }
2176 
2177     set_cc_static(s);
2178     return DISAS_NEXT;
2179 }
2180 
2181 #ifndef CONFIG_USER_ONLY
2182 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2183 {
2184     MemOp mop = s->insn->data;
2185     TCGv_i64 addr, old, cc;
2186     TCGLabel *lab = gen_new_label();
2187 
2188     /* Note that in1 = R1 (zero-extended expected value),
2189        out = R1 (original reg), out2 = R1+1 (new value).  */
2190 
2191     addr = tcg_temp_new_i64();
2192     old = tcg_temp_new_i64();
2193     tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2194     tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2195                                get_mem_index(s), mop | MO_ALIGN);
2196 
2197     /* Are the memory and expected values (un)equal?  */
2198     cc = tcg_temp_new_i64();
2199     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2200     tcg_gen_extrl_i64_i32(cc_op, cc);
2201 
2202     /* Write back the output now, so that it happens before the
2203        following branch, so that we don't need local temps.  */
2204     if ((mop & MO_SIZE) == MO_32) {
2205         tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2206     } else {
2207         tcg_gen_mov_i64(o->out, old);
2208     }
2209 
2210     /* If the comparison was equal, and the LSB of R2 was set,
2211        then we need to flush the TLB (for all cpus).  */
2212     tcg_gen_xori_i64(cc, cc, 1);
2213     tcg_gen_and_i64(cc, cc, o->in2);
2214     tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2215 
2216     gen_helper_purge(cpu_env);
2217     gen_set_label(lab);
2218 
2219     return DISAS_NEXT;
2220 }
2221 #endif
2222 
2223 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2224 {
2225     TCGv_i64 t1 = tcg_temp_new_i64();
2226     TCGv_i32 t2 = tcg_temp_new_i32();
2227     tcg_gen_extrl_i64_i32(t2, o->in1);
2228     gen_helper_cvd(t1, t2);
2229     tcg_gen_qemu_st_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
2230     return DISAS_NEXT;
2231 }
2232 
2233 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2234 {
2235     int m3 = get_field(s, m3);
2236     TCGLabel *lab = gen_new_label();
2237     TCGCond c;
2238 
2239     c = tcg_invert_cond(ltgt_cond[m3]);
2240     if (s->insn->data) {
2241         c = tcg_unsigned_cond(c);
2242     }
2243     tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2244 
2245     /* Trap.  */
2246     gen_trap(s);
2247 
2248     gen_set_label(lab);
2249     return DISAS_NEXT;
2250 }
2251 
2252 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2253 {
2254     int m3 = get_field(s, m3);
2255     int r1 = get_field(s, r1);
2256     int r2 = get_field(s, r2);
2257     TCGv_i32 tr1, tr2, chk;
2258 
2259     /* R1 and R2 must both be even.  */
2260     if ((r1 | r2) & 1) {
2261         gen_program_exception(s, PGM_SPECIFICATION);
2262         return DISAS_NORETURN;
2263     }
2264     if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2265         m3 = 0;
2266     }
2267 
2268     tr1 = tcg_constant_i32(r1);
2269     tr2 = tcg_constant_i32(r2);
2270     chk = tcg_constant_i32(m3);
2271 
2272     switch (s->insn->data) {
2273     case 12:
2274         gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2275         break;
2276     case 14:
2277         gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2278         break;
2279     case 21:
2280         gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2281         break;
2282     case 24:
2283         gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2284         break;
2285     case 41:
2286         gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2287         break;
2288     case 42:
2289         gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2290         break;
2291     default:
2292         g_assert_not_reached();
2293     }
2294 
2295     set_cc_static(s);
2296     return DISAS_NEXT;
2297 }
2298 
2299 #ifndef CONFIG_USER_ONLY
2300 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2301 {
2302     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2303     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2304     TCGv_i32 func_code = tcg_constant_i32(get_field(s, i2));
2305 
2306     gen_helper_diag(cpu_env, r1, r3, func_code);
2307     return DISAS_NEXT;
2308 }
2309 #endif
2310 
2311 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2312 {
2313     gen_helper_divs32(o->out, cpu_env, o->in1, o->in2);
2314     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2315     return DISAS_NEXT;
2316 }
2317 
2318 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2319 {
2320     gen_helper_divu32(o->out, cpu_env, o->in1, o->in2);
2321     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2322     return DISAS_NEXT;
2323 }
2324 
2325 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2326 {
2327     TCGv_i128 t = tcg_temp_new_i128();
2328 
2329     gen_helper_divs64(t, cpu_env, o->in1, o->in2);
2330     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2331     return DISAS_NEXT;
2332 }
2333 
2334 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2335 {
2336     TCGv_i128 t = tcg_temp_new_i128();
2337 
2338     gen_helper_divu64(t, cpu_env, o->out, o->out2, o->in2);
2339     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2340     return DISAS_NEXT;
2341 }
2342 
2343 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2344 {
2345     gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2346     return DISAS_NEXT;
2347 }
2348 
2349 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2350 {
2351     gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2352     return DISAS_NEXT;
2353 }
2354 
2355 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2356 {
2357     gen_helper_dxb(o->out_128, cpu_env, o->in1_128, o->in2_128);
2358     return DISAS_NEXT;
2359 }
2360 
2361 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2362 {
2363     int r2 = get_field(s, r2);
2364     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2365     return DISAS_NEXT;
2366 }
2367 
2368 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2369 {
2370     /* No cache information provided.  */
2371     tcg_gen_movi_i64(o->out, -1);
2372     return DISAS_NEXT;
2373 }
2374 
2375 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2376 {
2377     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2378     return DISAS_NEXT;
2379 }
2380 
2381 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2382 {
2383     int r1 = get_field(s, r1);
2384     int r2 = get_field(s, r2);
2385     TCGv_i64 t = tcg_temp_new_i64();
2386 
2387     /* Note the "subsequently" in the PoO, which implies a defined result
2388        if r1 == r2.  Thus we cannot defer these writes to an output hook.  */
2389     tcg_gen_shri_i64(t, psw_mask, 32);
2390     store_reg32_i64(r1, t);
2391     if (r2 != 0) {
2392         store_reg32_i64(r2, psw_mask);
2393     }
2394     return DISAS_NEXT;
2395 }
2396 
2397 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2398 {
2399     int r1 = get_field(s, r1);
2400     TCGv_i32 ilen;
2401     TCGv_i64 v1;
2402 
2403     /* Nested EXECUTE is not allowed.  */
2404     if (unlikely(s->ex_value)) {
2405         gen_program_exception(s, PGM_EXECUTE);
2406         return DISAS_NORETURN;
2407     }
2408 
2409     update_psw_addr(s);
2410     update_cc_op(s);
2411 
2412     if (r1 == 0) {
2413         v1 = tcg_constant_i64(0);
2414     } else {
2415         v1 = regs[r1];
2416     }
2417 
2418     ilen = tcg_constant_i32(s->ilen);
2419     gen_helper_ex(cpu_env, ilen, v1, o->in2);
2420 
2421     return DISAS_PC_CC_UPDATED;
2422 }
2423 
2424 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2425 {
2426     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2427 
2428     if (!m34) {
2429         return DISAS_NORETURN;
2430     }
2431     gen_helper_fieb(o->out, cpu_env, o->in2, m34);
2432     return DISAS_NEXT;
2433 }
2434 
2435 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2436 {
2437     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2438 
2439     if (!m34) {
2440         return DISAS_NORETURN;
2441     }
2442     gen_helper_fidb(o->out, cpu_env, o->in2, m34);
2443     return DISAS_NEXT;
2444 }
2445 
2446 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2447 {
2448     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2449 
2450     if (!m34) {
2451         return DISAS_NORETURN;
2452     }
2453     gen_helper_fixb(o->out_128, cpu_env, o->in2_128, m34);
2454     return DISAS_NEXT;
2455 }
2456 
2457 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2458 {
2459     /* We'll use the original input for cc computation, since we get to
2460        compare that against 0, which ought to be better than comparing
2461        the real output against 64.  It also lets cc_dst be a convenient
2462        temporary during our computation.  */
2463     gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2464 
2465     /* R1 = IN ? CLZ(IN) : 64.  */
2466     tcg_gen_clzi_i64(o->out, o->in2, 64);
2467 
2468     /* R1+1 = IN & ~(found bit).  Note that we may attempt to shift this
2469        value by 64, which is undefined.  But since the shift is 64 iff the
2470        input is zero, we still get the correct result after and'ing.  */
2471     tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2472     tcg_gen_shr_i64(o->out2, o->out2, o->out);
2473     tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2474     return DISAS_NEXT;
2475 }
2476 
2477 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2478 {
2479     int m3 = get_field(s, m3);
2480     int pos, len, base = s->insn->data;
2481     TCGv_i64 tmp = tcg_temp_new_i64();
2482     uint64_t ccm;
2483 
2484     switch (m3) {
2485     case 0xf:
2486         /* Effectively a 32-bit load.  */
2487         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
2488         len = 32;
2489         goto one_insert;
2490 
2491     case 0xc:
2492     case 0x6:
2493     case 0x3:
2494         /* Effectively a 16-bit load.  */
2495         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
2496         len = 16;
2497         goto one_insert;
2498 
2499     case 0x8:
2500     case 0x4:
2501     case 0x2:
2502     case 0x1:
2503         /* Effectively an 8-bit load.  */
2504         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2505         len = 8;
2506         goto one_insert;
2507 
2508     one_insert:
2509         pos = base + ctz32(m3) * 8;
2510         tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2511         ccm = ((1ull << len) - 1) << pos;
2512         break;
2513 
2514     default:
2515         /* This is going to be a sequence of loads and inserts.  */
2516         pos = base + 32 - 8;
2517         ccm = 0;
2518         while (m3) {
2519             if (m3 & 0x8) {
2520                 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2521                 tcg_gen_addi_i64(o->in2, o->in2, 1);
2522                 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2523                 ccm |= 0xffull << pos;
2524             }
2525             m3 = (m3 << 1) & 0xf;
2526             pos -= 8;
2527         }
2528         break;
2529     }
2530 
2531     tcg_gen_movi_i64(tmp, ccm);
2532     gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2533     return DISAS_NEXT;
2534 }
2535 
2536 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2537 {
2538     int shift = s->insn->data & 0xff;
2539     int size = s->insn->data >> 8;
2540     tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2541     return DISAS_NEXT;
2542 }
2543 
2544 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2545 {
2546     TCGv_i64 t1, t2;
2547 
2548     gen_op_calc_cc(s);
2549     t1 = tcg_temp_new_i64();
2550     tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2551     t2 = tcg_temp_new_i64();
2552     tcg_gen_extu_i32_i64(t2, cc_op);
2553     tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2554     tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2555     return DISAS_NEXT;
2556 }
2557 
2558 #ifndef CONFIG_USER_ONLY
2559 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2560 {
2561     TCGv_i32 m4;
2562 
2563     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2564         m4 = tcg_constant_i32(get_field(s, m4));
2565     } else {
2566         m4 = tcg_constant_i32(0);
2567     }
2568     gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2569     return DISAS_NEXT;
2570 }
2571 
2572 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2573 {
2574     TCGv_i32 m4;
2575 
2576     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2577         m4 = tcg_constant_i32(get_field(s, m4));
2578     } else {
2579         m4 = tcg_constant_i32(0);
2580     }
2581     gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2582     return DISAS_NEXT;
2583 }
2584 
2585 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2586 {
2587     gen_helper_iske(o->out, cpu_env, o->in2);
2588     return DISAS_NEXT;
2589 }
2590 #endif
2591 
2592 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2593 {
2594     int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
2595     int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
2596     int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
2597     TCGv_i32 t_r1, t_r2, t_r3, type;
2598 
2599     switch (s->insn->data) {
2600     case S390_FEAT_TYPE_KMA:
2601         if (r3 == r1 || r3 == r2) {
2602             gen_program_exception(s, PGM_SPECIFICATION);
2603             return DISAS_NORETURN;
2604         }
2605         /* FALL THROUGH */
2606     case S390_FEAT_TYPE_KMCTR:
2607         if (r3 & 1 || !r3) {
2608             gen_program_exception(s, PGM_SPECIFICATION);
2609             return DISAS_NORETURN;
2610         }
2611         /* FALL THROUGH */
2612     case S390_FEAT_TYPE_PPNO:
2613     case S390_FEAT_TYPE_KMF:
2614     case S390_FEAT_TYPE_KMC:
2615     case S390_FEAT_TYPE_KMO:
2616     case S390_FEAT_TYPE_KM:
2617         if (r1 & 1 || !r1) {
2618             gen_program_exception(s, PGM_SPECIFICATION);
2619             return DISAS_NORETURN;
2620         }
2621         /* FALL THROUGH */
2622     case S390_FEAT_TYPE_KMAC:
2623     case S390_FEAT_TYPE_KIMD:
2624     case S390_FEAT_TYPE_KLMD:
2625         if (r2 & 1 || !r2) {
2626             gen_program_exception(s, PGM_SPECIFICATION);
2627             return DISAS_NORETURN;
2628         }
2629         /* FALL THROUGH */
2630     case S390_FEAT_TYPE_PCKMO:
2631     case S390_FEAT_TYPE_PCC:
2632         break;
2633     default:
2634         g_assert_not_reached();
2635     };
2636 
2637     t_r1 = tcg_constant_i32(r1);
2638     t_r2 = tcg_constant_i32(r2);
2639     t_r3 = tcg_constant_i32(r3);
2640     type = tcg_constant_i32(s->insn->data);
2641     gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2642     set_cc_static(s);
2643     return DISAS_NEXT;
2644 }
2645 
2646 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2647 {
2648     gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2649     set_cc_static(s);
2650     return DISAS_NEXT;
2651 }
2652 
2653 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2654 {
2655     gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2656     set_cc_static(s);
2657     return DISAS_NEXT;
2658 }
2659 
2660 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2661 {
2662     gen_helper_kxb(cc_op, cpu_env, o->in1_128, o->in2_128);
2663     set_cc_static(s);
2664     return DISAS_NEXT;
2665 }
2666 
2667 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2668 {
2669     /* The real output is indeed the original value in memory;
2670        recompute the addition for the computation of CC.  */
2671     tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2672                                  s->insn->data | MO_ALIGN);
2673     /* However, we need to recompute the addition for setting CC.  */
2674     tcg_gen_add_i64(o->out, o->in1, o->in2);
2675     return DISAS_NEXT;
2676 }
2677 
2678 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2679 {
2680     /* The real output is indeed the original value in memory;
2681        recompute the addition for the computation of CC.  */
2682     tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2683                                  s->insn->data | MO_ALIGN);
2684     /* However, we need to recompute the operation for setting CC.  */
2685     tcg_gen_and_i64(o->out, o->in1, o->in2);
2686     return DISAS_NEXT;
2687 }
2688 
2689 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2690 {
2691     /* The real output is indeed the original value in memory;
2692        recompute the addition for the computation of CC.  */
2693     tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2694                                 s->insn->data | MO_ALIGN);
2695     /* However, we need to recompute the operation for setting CC.  */
2696     tcg_gen_or_i64(o->out, o->in1, o->in2);
2697     return DISAS_NEXT;
2698 }
2699 
2700 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2701 {
2702     /* The real output is indeed the original value in memory;
2703        recompute the addition for the computation of CC.  */
2704     tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2705                                  s->insn->data | MO_ALIGN);
2706     /* However, we need to recompute the operation for setting CC.  */
2707     tcg_gen_xor_i64(o->out, o->in1, o->in2);
2708     return DISAS_NEXT;
2709 }
2710 
2711 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2712 {
2713     gen_helper_ldeb(o->out, cpu_env, o->in2);
2714     return DISAS_NEXT;
2715 }
2716 
2717 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2718 {
2719     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2720 
2721     if (!m34) {
2722         return DISAS_NORETURN;
2723     }
2724     gen_helper_ledb(o->out, cpu_env, o->in2, m34);
2725     return DISAS_NEXT;
2726 }
2727 
2728 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2729 {
2730     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2731 
2732     if (!m34) {
2733         return DISAS_NORETURN;
2734     }
2735     gen_helper_ldxb(o->out, cpu_env, o->in2_128, m34);
2736     return DISAS_NEXT;
2737 }
2738 
2739 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2740 {
2741     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2742 
2743     if (!m34) {
2744         return DISAS_NORETURN;
2745     }
2746     gen_helper_lexb(o->out, cpu_env, o->in2_128, m34);
2747     return DISAS_NEXT;
2748 }
2749 
2750 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2751 {
2752     gen_helper_lxdb(o->out_128, cpu_env, o->in2);
2753     return DISAS_NEXT;
2754 }
2755 
2756 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2757 {
2758     gen_helper_lxeb(o->out_128, cpu_env, o->in2);
2759     return DISAS_NEXT;
2760 }
2761 
2762 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2763 {
2764     tcg_gen_shli_i64(o->out, o->in2, 32);
2765     return DISAS_NEXT;
2766 }
2767 
2768 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2769 {
2770     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2771     return DISAS_NEXT;
2772 }
2773 
2774 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2775 {
2776     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_SB);
2777     return DISAS_NEXT;
2778 }
2779 
2780 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2781 {
2782     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_UB);
2783     return DISAS_NEXT;
2784 }
2785 
2786 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2787 {
2788     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TESW);
2789     return DISAS_NEXT;
2790 }
2791 
2792 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2793 {
2794     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUW);
2795     return DISAS_NEXT;
2796 }
2797 
2798 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2799 {
2800     tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2801                        MO_TESL | s->insn->data);
2802     return DISAS_NEXT;
2803 }
2804 
2805 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2806 {
2807     tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2808                        MO_TEUL | s->insn->data);
2809     return DISAS_NEXT;
2810 }
2811 
2812 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2813 {
2814     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s),
2815                         MO_TEUQ | s->insn->data);
2816     return DISAS_NEXT;
2817 }
2818 
2819 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2820 {
2821     TCGLabel *lab = gen_new_label();
2822     store_reg32_i64(get_field(s, r1), o->in2);
2823     /* The value is stored even in case of trap. */
2824     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2825     gen_trap(s);
2826     gen_set_label(lab);
2827     return DISAS_NEXT;
2828 }
2829 
2830 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2831 {
2832     TCGLabel *lab = gen_new_label();
2833     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUQ);
2834     /* The value is stored even in case of trap. */
2835     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2836     gen_trap(s);
2837     gen_set_label(lab);
2838     return DISAS_NEXT;
2839 }
2840 
2841 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2842 {
2843     TCGLabel *lab = gen_new_label();
2844     store_reg32h_i64(get_field(s, r1), o->in2);
2845     /* The value is stored even in case of trap. */
2846     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2847     gen_trap(s);
2848     gen_set_label(lab);
2849     return DISAS_NEXT;
2850 }
2851 
2852 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2853 {
2854     TCGLabel *lab = gen_new_label();
2855 
2856     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
2857     /* The value is stored even in case of trap. */
2858     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2859     gen_trap(s);
2860     gen_set_label(lab);
2861     return DISAS_NEXT;
2862 }
2863 
2864 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2865 {
2866     TCGLabel *lab = gen_new_label();
2867     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2868     /* The value is stored even in case of trap. */
2869     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2870     gen_trap(s);
2871     gen_set_label(lab);
2872     return DISAS_NEXT;
2873 }
2874 
2875 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2876 {
2877     DisasCompare c;
2878 
2879     if (have_field(s, m3)) {
2880         /* LOAD * ON CONDITION */
2881         disas_jcc(s, &c, get_field(s, m3));
2882     } else {
2883         /* SELECT */
2884         disas_jcc(s, &c, get_field(s, m4));
2885     }
2886 
2887     if (c.is_64) {
2888         tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2889                             o->in2, o->in1);
2890     } else {
2891         TCGv_i32 t32 = tcg_temp_new_i32();
2892         TCGv_i64 t, z;
2893 
2894         tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2895 
2896         t = tcg_temp_new_i64();
2897         tcg_gen_extu_i32_i64(t, t32);
2898 
2899         z = tcg_constant_i64(0);
2900         tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2901     }
2902 
2903     return DISAS_NEXT;
2904 }
2905 
2906 #ifndef CONFIG_USER_ONLY
2907 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
2908 {
2909     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2910     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2911 
2912     gen_helper_lctl(cpu_env, r1, o->in2, r3);
2913     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2914     s->exit_to_mainloop = true;
2915     return DISAS_TOO_MANY;
2916 }
2917 
2918 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
2919 {
2920     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2921     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2922 
2923     gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2924     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2925     s->exit_to_mainloop = true;
2926     return DISAS_TOO_MANY;
2927 }
2928 
2929 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
2930 {
2931     gen_helper_lra(o->out, cpu_env, o->in2);
2932     set_cc_static(s);
2933     return DISAS_NEXT;
2934 }
2935 
2936 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
2937 {
2938     tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
2939     return DISAS_NEXT;
2940 }
2941 
2942 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
2943 {
2944     TCGv_i64 mask, addr;
2945 
2946     per_breaking_event(s);
2947 
2948     /*
2949      * Convert the short PSW into the normal PSW, similar to what
2950      * s390_cpu_load_normal() does.
2951      */
2952     mask = tcg_temp_new_i64();
2953     addr = tcg_temp_new_i64();
2954     tcg_gen_qemu_ld_i64(mask, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN_8);
2955     tcg_gen_andi_i64(addr, mask, PSW_MASK_SHORT_ADDR);
2956     tcg_gen_andi_i64(mask, mask, PSW_MASK_SHORT_CTRL);
2957     tcg_gen_xori_i64(mask, mask, PSW_MASK_SHORTPSW);
2958     gen_helper_load_psw(cpu_env, mask, addr);
2959     return DISAS_NORETURN;
2960 }
2961 
2962 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
2963 {
2964     TCGv_i64 t1, t2;
2965 
2966     per_breaking_event(s);
2967 
2968     t1 = tcg_temp_new_i64();
2969     t2 = tcg_temp_new_i64();
2970     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
2971                         MO_TEUQ | MO_ALIGN_8);
2972     tcg_gen_addi_i64(o->in2, o->in2, 8);
2973     tcg_gen_qemu_ld_i64(t2, o->in2, get_mem_index(s), MO_TEUQ);
2974     gen_helper_load_psw(cpu_env, t1, t2);
2975     return DISAS_NORETURN;
2976 }
2977 #endif
2978 
2979 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
2980 {
2981     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2982     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2983 
2984     gen_helper_lam(cpu_env, r1, o->in2, r3);
2985     return DISAS_NEXT;
2986 }
2987 
2988 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
2989 {
2990     int r1 = get_field(s, r1);
2991     int r3 = get_field(s, r3);
2992     TCGv_i64 t1, t2;
2993 
2994     /* Only one register to read. */
2995     t1 = tcg_temp_new_i64();
2996     if (unlikely(r1 == r3)) {
2997         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
2998         store_reg32_i64(r1, t1);
2999         return DISAS_NEXT;
3000     }
3001 
3002     /* First load the values of the first and last registers to trigger
3003        possible page faults. */
3004     t2 = tcg_temp_new_i64();
3005     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3006     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3007     tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
3008     store_reg32_i64(r1, t1);
3009     store_reg32_i64(r3, t2);
3010 
3011     /* Only two registers to read. */
3012     if (((r1 + 1) & 15) == r3) {
3013         return DISAS_NEXT;
3014     }
3015 
3016     /* Then load the remaining registers. Page fault can't occur. */
3017     r3 = (r3 - 1) & 15;
3018     tcg_gen_movi_i64(t2, 4);
3019     while (r1 != r3) {
3020         r1 = (r1 + 1) & 15;
3021         tcg_gen_add_i64(o->in2, o->in2, t2);
3022         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3023         store_reg32_i64(r1, t1);
3024     }
3025     return DISAS_NEXT;
3026 }
3027 
3028 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3029 {
3030     int r1 = get_field(s, r1);
3031     int r3 = get_field(s, r3);
3032     TCGv_i64 t1, t2;
3033 
3034     /* Only one register to read. */
3035     t1 = tcg_temp_new_i64();
3036     if (unlikely(r1 == r3)) {
3037         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3038         store_reg32h_i64(r1, t1);
3039         return DISAS_NEXT;
3040     }
3041 
3042     /* First load the values of the first and last registers to trigger
3043        possible page faults. */
3044     t2 = tcg_temp_new_i64();
3045     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3046     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3047     tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
3048     store_reg32h_i64(r1, t1);
3049     store_reg32h_i64(r3, t2);
3050 
3051     /* Only two registers to read. */
3052     if (((r1 + 1) & 15) == r3) {
3053         return DISAS_NEXT;
3054     }
3055 
3056     /* Then load the remaining registers. Page fault can't occur. */
3057     r3 = (r3 - 1) & 15;
3058     tcg_gen_movi_i64(t2, 4);
3059     while (r1 != r3) {
3060         r1 = (r1 + 1) & 15;
3061         tcg_gen_add_i64(o->in2, o->in2, t2);
3062         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3063         store_reg32h_i64(r1, t1);
3064     }
3065     return DISAS_NEXT;
3066 }
3067 
3068 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3069 {
3070     int r1 = get_field(s, r1);
3071     int r3 = get_field(s, r3);
3072     TCGv_i64 t1, t2;
3073 
3074     /* Only one register to read. */
3075     if (unlikely(r1 == r3)) {
3076         tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3077         return DISAS_NEXT;
3078     }
3079 
3080     /* First load the values of the first and last registers to trigger
3081        possible page faults. */
3082     t1 = tcg_temp_new_i64();
3083     t2 = tcg_temp_new_i64();
3084     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
3085     tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3086     tcg_gen_qemu_ld_i64(regs[r3], t2, get_mem_index(s), MO_TEUQ);
3087     tcg_gen_mov_i64(regs[r1], t1);
3088 
3089     /* Only two registers to read. */
3090     if (((r1 + 1) & 15) == r3) {
3091         return DISAS_NEXT;
3092     }
3093 
3094     /* Then load the remaining registers. Page fault can't occur. */
3095     r3 = (r3 - 1) & 15;
3096     tcg_gen_movi_i64(t1, 8);
3097     while (r1 != r3) {
3098         r1 = (r1 + 1) & 15;
3099         tcg_gen_add_i64(o->in2, o->in2, t1);
3100         tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3101     }
3102     return DISAS_NEXT;
3103 }
3104 
3105 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3106 {
3107     TCGv_i64 a1, a2;
3108     MemOp mop = s->insn->data;
3109 
3110     /* In a parallel context, stop the world and single step.  */
3111     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3112         update_psw_addr(s);
3113         update_cc_op(s);
3114         gen_exception(EXCP_ATOMIC);
3115         return DISAS_NORETURN;
3116     }
3117 
3118     /* In a serial context, perform the two loads ... */
3119     a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
3120     a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3121     tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3122     tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3123 
3124     /* ... and indicate that we performed them while interlocked.  */
3125     gen_op_movi_cc(s, 0);
3126     return DISAS_NEXT;
3127 }
3128 
3129 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3130 {
3131     o->out_128 = tcg_temp_new_i128();
3132     tcg_gen_qemu_ld_i128(o->out_128, o->in2, get_mem_index(s),
3133                          MO_TE | MO_128 | MO_ALIGN);
3134     return DISAS_NEXT;
3135 }
3136 
3137 #ifndef CONFIG_USER_ONLY
3138 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3139 {
3140     tcg_gen_qemu_ld_tl(o->out, o->in2, MMU_REAL_IDX, s->insn->data);
3141     return DISAS_NEXT;
3142 }
3143 #endif
3144 
3145 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3146 {
3147     tcg_gen_andi_i64(o->out, o->in2, -256);
3148     return DISAS_NEXT;
3149 }
3150 
3151 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3152 {
3153     const int64_t block_size = (1ull << (get_field(s, m3) + 6));
3154 
3155     if (get_field(s, m3) > 6) {
3156         gen_program_exception(s, PGM_SPECIFICATION);
3157         return DISAS_NORETURN;
3158     }
3159 
3160     tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3161     tcg_gen_neg_i64(o->addr1, o->addr1);
3162     tcg_gen_movi_i64(o->out, 16);
3163     tcg_gen_umin_i64(o->out, o->out, o->addr1);
3164     gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3165     return DISAS_NEXT;
3166 }
3167 
3168 static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
3169 {
3170     const uint16_t monitor_class = get_field(s, i2);
3171 
3172     if (monitor_class & 0xff00) {
3173         gen_program_exception(s, PGM_SPECIFICATION);
3174         return DISAS_NORETURN;
3175     }
3176 
3177 #if !defined(CONFIG_USER_ONLY)
3178     gen_helper_monitor_call(cpu_env, o->addr1,
3179                             tcg_constant_i32(monitor_class));
3180 #endif
3181     /* Defaults to a NOP. */
3182     return DISAS_NEXT;
3183 }
3184 
3185 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3186 {
3187     o->out = o->in2;
3188     o->in2 = NULL;
3189     return DISAS_NEXT;
3190 }
3191 
3192 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3193 {
3194     int b2 = get_field(s, b2);
3195     TCGv ar1 = tcg_temp_new_i64();
3196 
3197     o->out = o->in2;
3198     o->in2 = NULL;
3199 
3200     switch (s->base.tb->flags & FLAG_MASK_ASC) {
3201     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3202         tcg_gen_movi_i64(ar1, 0);
3203         break;
3204     case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3205         tcg_gen_movi_i64(ar1, 1);
3206         break;
3207     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3208         if (b2) {
3209             tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3210         } else {
3211             tcg_gen_movi_i64(ar1, 0);
3212         }
3213         break;
3214     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3215         tcg_gen_movi_i64(ar1, 2);
3216         break;
3217     }
3218 
3219     tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3220     return DISAS_NEXT;
3221 }
3222 
3223 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3224 {
3225     o->out = o->in1;
3226     o->out2 = o->in2;
3227     o->in1 = NULL;
3228     o->in2 = NULL;
3229     return DISAS_NEXT;
3230 }
3231 
3232 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3233 {
3234     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3235 
3236     gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3237     return DISAS_NEXT;
3238 }
3239 
3240 static DisasJumpType op_mvcrl(DisasContext *s, DisasOps *o)
3241 {
3242     gen_helper_mvcrl(cpu_env, regs[0], o->addr1, o->in2);
3243     return DISAS_NEXT;
3244 }
3245 
3246 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3247 {
3248     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3249 
3250     gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3251     return DISAS_NEXT;
3252 }
3253 
3254 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3255 {
3256     int r1 = get_field(s, r1);
3257     int r2 = get_field(s, r2);
3258     TCGv_i32 t1, t2;
3259 
3260     /* r1 and r2 must be even.  */
3261     if (r1 & 1 || r2 & 1) {
3262         gen_program_exception(s, PGM_SPECIFICATION);
3263         return DISAS_NORETURN;
3264     }
3265 
3266     t1 = tcg_constant_i32(r1);
3267     t2 = tcg_constant_i32(r2);
3268     gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3269     set_cc_static(s);
3270     return DISAS_NEXT;
3271 }
3272 
3273 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3274 {
3275     int r1 = get_field(s, r1);
3276     int r3 = get_field(s, r3);
3277     TCGv_i32 t1, t3;
3278 
3279     /* r1 and r3 must be even.  */
3280     if (r1 & 1 || r3 & 1) {
3281         gen_program_exception(s, PGM_SPECIFICATION);
3282         return DISAS_NORETURN;
3283     }
3284 
3285     t1 = tcg_constant_i32(r1);
3286     t3 = tcg_constant_i32(r3);
3287     gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3288     set_cc_static(s);
3289     return DISAS_NEXT;
3290 }
3291 
3292 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3293 {
3294     int r1 = get_field(s, r1);
3295     int r3 = get_field(s, r3);
3296     TCGv_i32 t1, t3;
3297 
3298     /* r1 and r3 must be even.  */
3299     if (r1 & 1 || r3 & 1) {
3300         gen_program_exception(s, PGM_SPECIFICATION);
3301         return DISAS_NORETURN;
3302     }
3303 
3304     t1 = tcg_constant_i32(r1);
3305     t3 = tcg_constant_i32(r3);
3306     gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3307     set_cc_static(s);
3308     return DISAS_NEXT;
3309 }
3310 
3311 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3312 {
3313     int r3 = get_field(s, r3);
3314     gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3315     set_cc_static(s);
3316     return DISAS_NEXT;
3317 }
3318 
3319 #ifndef CONFIG_USER_ONLY
3320 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3321 {
3322     int r1 = get_field(s, l1);
3323     int r3 = get_field(s, r3);
3324     gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2, regs[r3]);
3325     set_cc_static(s);
3326     return DISAS_NEXT;
3327 }
3328 
3329 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3330 {
3331     int r1 = get_field(s, l1);
3332     int r3 = get_field(s, r3);
3333     gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2, regs[r3]);
3334     set_cc_static(s);
3335     return DISAS_NEXT;
3336 }
3337 #endif
3338 
3339 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3340 {
3341     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3342 
3343     gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3344     return DISAS_NEXT;
3345 }
3346 
3347 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3348 {
3349     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3350 
3351     gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3352     return DISAS_NEXT;
3353 }
3354 
3355 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3356 {
3357     TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3358     TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3359 
3360     gen_helper_mvpg(cc_op, cpu_env, regs[0], t1, t2);
3361     set_cc_static(s);
3362     return DISAS_NEXT;
3363 }
3364 
3365 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3366 {
3367     TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3368     TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3369 
3370     gen_helper_mvst(cc_op, cpu_env, t1, t2);
3371     set_cc_static(s);
3372     return DISAS_NEXT;
3373 }
3374 
3375 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3376 {
3377     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3378 
3379     gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3380     return DISAS_NEXT;
3381 }
3382 
3383 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3384 {
3385     tcg_gen_mul_i64(o->out, o->in1, o->in2);
3386     return DISAS_NEXT;
3387 }
3388 
3389 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3390 {
3391     tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3392     return DISAS_NEXT;
3393 }
3394 
3395 static DisasJumpType op_muls128(DisasContext *s, DisasOps *o)
3396 {
3397     tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2);
3398     return DISAS_NEXT;
3399 }
3400 
3401 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3402 {
3403     gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3404     return DISAS_NEXT;
3405 }
3406 
3407 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3408 {
3409     gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3410     return DISAS_NEXT;
3411 }
3412 
3413 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3414 {
3415     gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3416     return DISAS_NEXT;
3417 }
3418 
3419 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3420 {
3421     gen_helper_mxb(o->out_128, cpu_env, o->in1_128, o->in2_128);
3422     return DISAS_NEXT;
3423 }
3424 
3425 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3426 {
3427     gen_helper_mxdb(o->out_128, cpu_env, o->in1, o->in2);
3428     return DISAS_NEXT;
3429 }
3430 
3431 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3432 {
3433     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3434     gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3435     return DISAS_NEXT;
3436 }
3437 
3438 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3439 {
3440     TCGv_i64 r3 = load_freg(get_field(s, r3));
3441     gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
3442     return DISAS_NEXT;
3443 }
3444 
3445 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3446 {
3447     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3448     gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3449     return DISAS_NEXT;
3450 }
3451 
3452 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3453 {
3454     TCGv_i64 r3 = load_freg(get_field(s, r3));
3455     gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
3456     return DISAS_NEXT;
3457 }
3458 
3459 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3460 {
3461     TCGv_i64 z = tcg_constant_i64(0);
3462     TCGv_i64 n = tcg_temp_new_i64();
3463 
3464     tcg_gen_neg_i64(n, o->in2);
3465     tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3466     return DISAS_NEXT;
3467 }
3468 
3469 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3470 {
3471     tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3472     return DISAS_NEXT;
3473 }
3474 
3475 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3476 {
3477     tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3478     return DISAS_NEXT;
3479 }
3480 
3481 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3482 {
3483     tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3484     tcg_gen_mov_i64(o->out2, o->in2);
3485     return DISAS_NEXT;
3486 }
3487 
3488 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3489 {
3490     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3491 
3492     gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3493     set_cc_static(s);
3494     return DISAS_NEXT;
3495 }
3496 
3497 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3498 {
3499     tcg_gen_neg_i64(o->out, o->in2);
3500     return DISAS_NEXT;
3501 }
3502 
3503 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3504 {
3505     tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3506     return DISAS_NEXT;
3507 }
3508 
3509 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3510 {
3511     tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3512     return DISAS_NEXT;
3513 }
3514 
3515 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3516 {
3517     tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3518     tcg_gen_mov_i64(o->out2, o->in2);
3519     return DISAS_NEXT;
3520 }
3521 
3522 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3523 {
3524     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3525 
3526     gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3527     set_cc_static(s);
3528     return DISAS_NEXT;
3529 }
3530 
3531 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3532 {
3533     tcg_gen_or_i64(o->out, o->in1, o->in2);
3534     return DISAS_NEXT;
3535 }
3536 
3537 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3538 {
3539     int shift = s->insn->data & 0xff;
3540     int size = s->insn->data >> 8;
3541     uint64_t mask = ((1ull << size) - 1) << shift;
3542     TCGv_i64 t = tcg_temp_new_i64();
3543 
3544     tcg_gen_shli_i64(t, o->in2, shift);
3545     tcg_gen_or_i64(o->out, o->in1, t);
3546 
3547     /* Produce the CC from only the bits manipulated.  */
3548     tcg_gen_andi_i64(cc_dst, o->out, mask);
3549     set_cc_nz_u64(s, cc_dst);
3550     return DISAS_NEXT;
3551 }
3552 
3553 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3554 {
3555     o->in1 = tcg_temp_new_i64();
3556 
3557     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3558         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3559     } else {
3560         /* Perform the atomic operation in memory. */
3561         tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3562                                     s->insn->data);
3563     }
3564 
3565     /* Recompute also for atomic case: needed for setting CC. */
3566     tcg_gen_or_i64(o->out, o->in1, o->in2);
3567 
3568     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3569         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3570     }
3571     return DISAS_NEXT;
3572 }
3573 
3574 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3575 {
3576     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3577 
3578     gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3579     return DISAS_NEXT;
3580 }
3581 
3582 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3583 {
3584     int l2 = get_field(s, l2) + 1;
3585     TCGv_i32 l;
3586 
3587     /* The length must not exceed 32 bytes.  */
3588     if (l2 > 32) {
3589         gen_program_exception(s, PGM_SPECIFICATION);
3590         return DISAS_NORETURN;
3591     }
3592     l = tcg_constant_i32(l2);
3593     gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3594     return DISAS_NEXT;
3595 }
3596 
3597 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3598 {
3599     int l2 = get_field(s, l2) + 1;
3600     TCGv_i32 l;
3601 
3602     /* The length must be even and should not exceed 64 bytes.  */
3603     if ((l2 & 1) || (l2 > 64)) {
3604         gen_program_exception(s, PGM_SPECIFICATION);
3605         return DISAS_NORETURN;
3606     }
3607     l = tcg_constant_i32(l2);
3608     gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3609     return DISAS_NEXT;
3610 }
3611 
3612 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3613 {
3614     const uint8_t m3 = get_field(s, m3);
3615 
3616     if ((m3 & 8) && s390_has_feat(S390_FEAT_MISC_INSTRUCTION_EXT3)) {
3617         tcg_gen_ctpop_i64(o->out, o->in2);
3618     } else {
3619         gen_helper_popcnt(o->out, o->in2);
3620     }
3621     return DISAS_NEXT;
3622 }
3623 
3624 #ifndef CONFIG_USER_ONLY
3625 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3626 {
3627     gen_helper_ptlb(cpu_env);
3628     return DISAS_NEXT;
3629 }
3630 #endif
3631 
3632 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3633 {
3634     int i3 = get_field(s, i3);
3635     int i4 = get_field(s, i4);
3636     int i5 = get_field(s, i5);
3637     int do_zero = i4 & 0x80;
3638     uint64_t mask, imask, pmask;
3639     int pos, len, rot;
3640 
3641     /* Adjust the arguments for the specific insn.  */
3642     switch (s->fields.op2) {
3643     case 0x55: /* risbg */
3644     case 0x59: /* risbgn */
3645         i3 &= 63;
3646         i4 &= 63;
3647         pmask = ~0;
3648         break;
3649     case 0x5d: /* risbhg */
3650         i3 &= 31;
3651         i4 &= 31;
3652         pmask = 0xffffffff00000000ull;
3653         break;
3654     case 0x51: /* risblg */
3655         i3 = (i3 & 31) + 32;
3656         i4 = (i4 & 31) + 32;
3657         pmask = 0x00000000ffffffffull;
3658         break;
3659     default:
3660         g_assert_not_reached();
3661     }
3662 
3663     /* MASK is the set of bits to be inserted from R2. */
3664     if (i3 <= i4) {
3665         /* [0...i3---i4...63] */
3666         mask = (-1ull >> i3) & (-1ull << (63 - i4));
3667     } else {
3668         /* [0---i4...i3---63] */
3669         mask = (-1ull >> i3) | (-1ull << (63 - i4));
3670     }
3671     /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
3672     mask &= pmask;
3673 
3674     /* IMASK is the set of bits to be kept from R1.  In the case of the high/low
3675        insns, we need to keep the other half of the register.  */
3676     imask = ~mask | ~pmask;
3677     if (do_zero) {
3678         imask = ~pmask;
3679     }
3680 
3681     len = i4 - i3 + 1;
3682     pos = 63 - i4;
3683     rot = i5 & 63;
3684 
3685     /* In some cases we can implement this with extract.  */
3686     if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3687         tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3688         return DISAS_NEXT;
3689     }
3690 
3691     /* In some cases we can implement this with deposit.  */
3692     if (len > 0 && (imask == 0 || ~mask == imask)) {
3693         /* Note that we rotate the bits to be inserted to the lsb, not to
3694            the position as described in the PoO.  */
3695         rot = (rot - pos) & 63;
3696     } else {
3697         pos = -1;
3698     }
3699 
3700     /* Rotate the input as necessary.  */
3701     tcg_gen_rotli_i64(o->in2, o->in2, rot);
3702 
3703     /* Insert the selected bits into the output.  */
3704     if (pos >= 0) {
3705         if (imask == 0) {
3706             tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3707         } else {
3708             tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3709         }
3710     } else if (imask == 0) {
3711         tcg_gen_andi_i64(o->out, o->in2, mask);
3712     } else {
3713         tcg_gen_andi_i64(o->in2, o->in2, mask);
3714         tcg_gen_andi_i64(o->out, o->out, imask);
3715         tcg_gen_or_i64(o->out, o->out, o->in2);
3716     }
3717     return DISAS_NEXT;
3718 }
3719 
3720 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3721 {
3722     int i3 = get_field(s, i3);
3723     int i4 = get_field(s, i4);
3724     int i5 = get_field(s, i5);
3725     TCGv_i64 orig_out;
3726     uint64_t mask;
3727 
3728     /* If this is a test-only form, arrange to discard the result.  */
3729     if (i3 & 0x80) {
3730         tcg_debug_assert(o->out != NULL);
3731         orig_out = o->out;
3732         o->out = tcg_temp_new_i64();
3733         tcg_gen_mov_i64(o->out, orig_out);
3734     }
3735 
3736     i3 &= 63;
3737     i4 &= 63;
3738     i5 &= 63;
3739 
3740     /* MASK is the set of bits to be operated on from R2.
3741        Take care for I3/I4 wraparound.  */
3742     mask = ~0ull >> i3;
3743     if (i3 <= i4) {
3744         mask ^= ~0ull >> i4 >> 1;
3745     } else {
3746         mask |= ~(~0ull >> i4 >> 1);
3747     }
3748 
3749     /* Rotate the input as necessary.  */
3750     tcg_gen_rotli_i64(o->in2, o->in2, i5);
3751 
3752     /* Operate.  */
3753     switch (s->fields.op2) {
3754     case 0x54: /* AND */
3755         tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3756         tcg_gen_and_i64(o->out, o->out, o->in2);
3757         break;
3758     case 0x56: /* OR */
3759         tcg_gen_andi_i64(o->in2, o->in2, mask);
3760         tcg_gen_or_i64(o->out, o->out, o->in2);
3761         break;
3762     case 0x57: /* XOR */
3763         tcg_gen_andi_i64(o->in2, o->in2, mask);
3764         tcg_gen_xor_i64(o->out, o->out, o->in2);
3765         break;
3766     default:
3767         abort();
3768     }
3769 
3770     /* Set the CC.  */
3771     tcg_gen_andi_i64(cc_dst, o->out, mask);
3772     set_cc_nz_u64(s, cc_dst);
3773     return DISAS_NEXT;
3774 }
3775 
3776 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3777 {
3778     tcg_gen_bswap16_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3779     return DISAS_NEXT;
3780 }
3781 
3782 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3783 {
3784     tcg_gen_bswap32_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3785     return DISAS_NEXT;
3786 }
3787 
3788 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3789 {
3790     tcg_gen_bswap64_i64(o->out, o->in2);
3791     return DISAS_NEXT;
3792 }
3793 
3794 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3795 {
3796     TCGv_i32 t1 = tcg_temp_new_i32();
3797     TCGv_i32 t2 = tcg_temp_new_i32();
3798     TCGv_i32 to = tcg_temp_new_i32();
3799     tcg_gen_extrl_i64_i32(t1, o->in1);
3800     tcg_gen_extrl_i64_i32(t2, o->in2);
3801     tcg_gen_rotl_i32(to, t1, t2);
3802     tcg_gen_extu_i32_i64(o->out, to);
3803     return DISAS_NEXT;
3804 }
3805 
3806 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3807 {
3808     tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3809     return DISAS_NEXT;
3810 }
3811 
3812 #ifndef CONFIG_USER_ONLY
3813 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3814 {
3815     gen_helper_rrbe(cc_op, cpu_env, o->in2);
3816     set_cc_static(s);
3817     return DISAS_NEXT;
3818 }
3819 
3820 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3821 {
3822     gen_helper_sacf(cpu_env, o->in2);
3823     /* Addressing mode has changed, so end the block.  */
3824     return DISAS_TOO_MANY;
3825 }
3826 #endif
3827 
3828 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3829 {
3830     int sam = s->insn->data;
3831     TCGv_i64 tsam;
3832     uint64_t mask;
3833 
3834     switch (sam) {
3835     case 0:
3836         mask = 0xffffff;
3837         break;
3838     case 1:
3839         mask = 0x7fffffff;
3840         break;
3841     default:
3842         mask = -1;
3843         break;
3844     }
3845 
3846     /* Bizarre but true, we check the address of the current insn for the
3847        specification exception, not the next to be executed.  Thus the PoO
3848        documents that Bad Things Happen two bytes before the end.  */
3849     if (s->base.pc_next & ~mask) {
3850         gen_program_exception(s, PGM_SPECIFICATION);
3851         return DISAS_NORETURN;
3852     }
3853     s->pc_tmp &= mask;
3854 
3855     tsam = tcg_constant_i64(sam);
3856     tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3857 
3858     /* Always exit the TB, since we (may have) changed execution mode.  */
3859     return DISAS_TOO_MANY;
3860 }
3861 
3862 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
3863 {
3864     int r1 = get_field(s, r1);
3865     tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3866     return DISAS_NEXT;
3867 }
3868 
3869 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
3870 {
3871     gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3872     return DISAS_NEXT;
3873 }
3874 
3875 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
3876 {
3877     gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3878     return DISAS_NEXT;
3879 }
3880 
3881 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
3882 {
3883     gen_helper_sxb(o->out_128, cpu_env, o->in1_128, o->in2_128);
3884     return DISAS_NEXT;
3885 }
3886 
3887 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
3888 {
3889     gen_helper_sqeb(o->out, cpu_env, o->in2);
3890     return DISAS_NEXT;
3891 }
3892 
3893 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
3894 {
3895     gen_helper_sqdb(o->out, cpu_env, o->in2);
3896     return DISAS_NEXT;
3897 }
3898 
3899 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
3900 {
3901     gen_helper_sqxb(o->out_128, cpu_env, o->in2_128);
3902     return DISAS_NEXT;
3903 }
3904 
3905 #ifndef CONFIG_USER_ONLY
3906 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
3907 {
3908     gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3909     set_cc_static(s);
3910     return DISAS_NEXT;
3911 }
3912 
3913 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
3914 {
3915     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
3916     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
3917 
3918     gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
3919     set_cc_static(s);
3920     return DISAS_NEXT;
3921 }
3922 #endif
3923 
3924 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
3925 {
3926     DisasCompare c;
3927     TCGv_i64 a, h;
3928     TCGLabel *lab;
3929     int r1;
3930 
3931     disas_jcc(s, &c, get_field(s, m3));
3932 
3933     /* We want to store when the condition is fulfilled, so branch
3934        out when it's not */
3935     c.cond = tcg_invert_cond(c.cond);
3936 
3937     lab = gen_new_label();
3938     if (c.is_64) {
3939         tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3940     } else {
3941         tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3942     }
3943 
3944     r1 = get_field(s, r1);
3945     a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3946     switch (s->insn->data) {
3947     case 1: /* STOCG */
3948         tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUQ);
3949         break;
3950     case 0: /* STOC */
3951         tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUL);
3952         break;
3953     case 2: /* STOCFH */
3954         h = tcg_temp_new_i64();
3955         tcg_gen_shri_i64(h, regs[r1], 32);
3956         tcg_gen_qemu_st_i64(h, a, get_mem_index(s), MO_TEUL);
3957         break;
3958     default:
3959         g_assert_not_reached();
3960     }
3961 
3962     gen_set_label(lab);
3963     return DISAS_NEXT;
3964 }
3965 
3966 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
3967 {
3968     TCGv_i64 t;
3969     uint64_t sign = 1ull << s->insn->data;
3970     if (s->insn->data == 31) {
3971         t = tcg_temp_new_i64();
3972         tcg_gen_shli_i64(t, o->in1, 32);
3973     } else {
3974         t = o->in1;
3975     }
3976     gen_op_update2_cc_i64(s, CC_OP_SLA, t, o->in2);
3977     tcg_gen_shl_i64(o->out, o->in1, o->in2);
3978     /* The arithmetic left shift is curious in that it does not affect
3979        the sign bit.  Copy that over from the source unchanged.  */
3980     tcg_gen_andi_i64(o->out, o->out, ~sign);
3981     tcg_gen_andi_i64(o->in1, o->in1, sign);
3982     tcg_gen_or_i64(o->out, o->out, o->in1);
3983     return DISAS_NEXT;
3984 }
3985 
3986 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
3987 {
3988     tcg_gen_shl_i64(o->out, o->in1, o->in2);
3989     return DISAS_NEXT;
3990 }
3991 
3992 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
3993 {
3994     tcg_gen_sar_i64(o->out, o->in1, o->in2);
3995     return DISAS_NEXT;
3996 }
3997 
3998 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
3999 {
4000     tcg_gen_shr_i64(o->out, o->in1, o->in2);
4001     return DISAS_NEXT;
4002 }
4003 
4004 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4005 {
4006     gen_helper_sfpc(cpu_env, o->in2);
4007     return DISAS_NEXT;
4008 }
4009 
4010 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4011 {
4012     gen_helper_sfas(cpu_env, o->in2);
4013     return DISAS_NEXT;
4014 }
4015 
4016 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4017 {
4018     /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4019     tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4020     gen_helper_srnm(cpu_env, o->addr1);
4021     return DISAS_NEXT;
4022 }
4023 
4024 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4025 {
4026     /* Bits 0-55 are are ignored. */
4027     tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4028     gen_helper_srnm(cpu_env, o->addr1);
4029     return DISAS_NEXT;
4030 }
4031 
4032 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4033 {
4034     TCGv_i64 tmp = tcg_temp_new_i64();
4035 
4036     /* Bits other than 61-63 are ignored. */
4037     tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4038 
4039     /* No need to call a helper, we don't implement dfp */
4040     tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4041     tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4042     tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4043     return DISAS_NEXT;
4044 }
4045 
4046 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4047 {
4048     tcg_gen_extrl_i64_i32(cc_op, o->in1);
4049     tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4050     set_cc_static(s);
4051 
4052     tcg_gen_shri_i64(o->in1, o->in1, 24);
4053     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4054     return DISAS_NEXT;
4055 }
4056 
4057 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4058 {
4059     int b1 = get_field(s, b1);
4060     int d1 = get_field(s, d1);
4061     int b2 = get_field(s, b2);
4062     int d2 = get_field(s, d2);
4063     int r3 = get_field(s, r3);
4064     TCGv_i64 tmp = tcg_temp_new_i64();
4065 
4066     /* fetch all operands first */
4067     o->in1 = tcg_temp_new_i64();
4068     tcg_gen_addi_i64(o->in1, regs[b1], d1);
4069     o->in2 = tcg_temp_new_i64();
4070     tcg_gen_addi_i64(o->in2, regs[b2], d2);
4071     o->addr1 = tcg_temp_new_i64();
4072     gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0);
4073 
4074     /* load the third operand into r3 before modifying anything */
4075     tcg_gen_qemu_ld_i64(regs[r3], o->addr1, get_mem_index(s), MO_TEUQ);
4076 
4077     /* subtract CPU timer from first operand and store in GR0 */
4078     gen_helper_stpt(tmp, cpu_env);
4079     tcg_gen_sub_i64(regs[0], o->in1, tmp);
4080 
4081     /* store second operand in GR1 */
4082     tcg_gen_mov_i64(regs[1], o->in2);
4083     return DISAS_NEXT;
4084 }
4085 
4086 #ifndef CONFIG_USER_ONLY
4087 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4088 {
4089     tcg_gen_shri_i64(o->in2, o->in2, 4);
4090     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4091     return DISAS_NEXT;
4092 }
4093 
4094 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4095 {
4096     gen_helper_sske(cpu_env, o->in1, o->in2);
4097     return DISAS_NEXT;
4098 }
4099 
4100 static void gen_check_psw_mask(DisasContext *s)
4101 {
4102     TCGv_i64 reserved = tcg_temp_new_i64();
4103     TCGLabel *ok = gen_new_label();
4104 
4105     tcg_gen_andi_i64(reserved, psw_mask, PSW_MASK_RESERVED);
4106     tcg_gen_brcondi_i64(TCG_COND_EQ, reserved, 0, ok);
4107     gen_program_exception(s, PGM_SPECIFICATION);
4108     gen_set_label(ok);
4109 }
4110 
4111 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4112 {
4113     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4114 
4115     gen_check_psw_mask(s);
4116 
4117     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4118     s->exit_to_mainloop = true;
4119     return DISAS_TOO_MANY;
4120 }
4121 
4122 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4123 {
4124     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
4125     return DISAS_NEXT;
4126 }
4127 #endif
4128 
4129 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4130 {
4131     gen_helper_stck(o->out, cpu_env);
4132     /* ??? We don't implement clock states.  */
4133     gen_op_movi_cc(s, 0);
4134     return DISAS_NEXT;
4135 }
4136 
4137 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4138 {
4139     TCGv_i64 c1 = tcg_temp_new_i64();
4140     TCGv_i64 c2 = tcg_temp_new_i64();
4141     TCGv_i64 todpr = tcg_temp_new_i64();
4142     gen_helper_stck(c1, cpu_env);
4143     /* 16 bit value store in an uint32_t (only valid bits set) */
4144     tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
4145     /* Shift the 64-bit value into its place as a zero-extended
4146        104-bit value.  Note that "bit positions 64-103 are always
4147        non-zero so that they compare differently to STCK"; we set
4148        the least significant bit to 1.  */
4149     tcg_gen_shli_i64(c2, c1, 56);
4150     tcg_gen_shri_i64(c1, c1, 8);
4151     tcg_gen_ori_i64(c2, c2, 0x10000);
4152     tcg_gen_or_i64(c2, c2, todpr);
4153     tcg_gen_qemu_st_i64(c1, o->in2, get_mem_index(s), MO_TEUQ);
4154     tcg_gen_addi_i64(o->in2, o->in2, 8);
4155     tcg_gen_qemu_st_i64(c2, o->in2, get_mem_index(s), MO_TEUQ);
4156     /* ??? We don't implement clock states.  */
4157     gen_op_movi_cc(s, 0);
4158     return DISAS_NEXT;
4159 }
4160 
4161 #ifndef CONFIG_USER_ONLY
4162 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4163 {
4164     gen_helper_sck(cc_op, cpu_env, o->in2);
4165     set_cc_static(s);
4166     return DISAS_NEXT;
4167 }
4168 
4169 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4170 {
4171     gen_helper_sckc(cpu_env, o->in2);
4172     return DISAS_NEXT;
4173 }
4174 
4175 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4176 {
4177     gen_helper_sckpf(cpu_env, regs[0]);
4178     return DISAS_NEXT;
4179 }
4180 
4181 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4182 {
4183     gen_helper_stckc(o->out, cpu_env);
4184     return DISAS_NEXT;
4185 }
4186 
4187 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4188 {
4189     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4190     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4191 
4192     gen_helper_stctg(cpu_env, r1, o->in2, r3);
4193     return DISAS_NEXT;
4194 }
4195 
4196 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4197 {
4198     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4199     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4200 
4201     gen_helper_stctl(cpu_env, r1, o->in2, r3);
4202     return DISAS_NEXT;
4203 }
4204 
4205 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4206 {
4207     tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4208     return DISAS_NEXT;
4209 }
4210 
4211 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4212 {
4213     gen_helper_spt(cpu_env, o->in2);
4214     return DISAS_NEXT;
4215 }
4216 
4217 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4218 {
4219     gen_helper_stfl(cpu_env);
4220     return DISAS_NEXT;
4221 }
4222 
4223 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4224 {
4225     gen_helper_stpt(o->out, cpu_env);
4226     return DISAS_NEXT;
4227 }
4228 
4229 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4230 {
4231     gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4232     set_cc_static(s);
4233     return DISAS_NEXT;
4234 }
4235 
4236 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4237 {
4238     gen_helper_spx(cpu_env, o->in2);
4239     return DISAS_NEXT;
4240 }
4241 
4242 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4243 {
4244     gen_helper_xsch(cpu_env, regs[1]);
4245     set_cc_static(s);
4246     return DISAS_NEXT;
4247 }
4248 
4249 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4250 {
4251     gen_helper_csch(cpu_env, regs[1]);
4252     set_cc_static(s);
4253     return DISAS_NEXT;
4254 }
4255 
4256 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4257 {
4258     gen_helper_hsch(cpu_env, regs[1]);
4259     set_cc_static(s);
4260     return DISAS_NEXT;
4261 }
4262 
4263 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4264 {
4265     gen_helper_msch(cpu_env, regs[1], o->in2);
4266     set_cc_static(s);
4267     return DISAS_NEXT;
4268 }
4269 
4270 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4271 {
4272     gen_helper_rchp(cpu_env, regs[1]);
4273     set_cc_static(s);
4274     return DISAS_NEXT;
4275 }
4276 
4277 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4278 {
4279     gen_helper_rsch(cpu_env, regs[1]);
4280     set_cc_static(s);
4281     return DISAS_NEXT;
4282 }
4283 
4284 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4285 {
4286     gen_helper_sal(cpu_env, regs[1]);
4287     return DISAS_NEXT;
4288 }
4289 
4290 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4291 {
4292     gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4293     return DISAS_NEXT;
4294 }
4295 
4296 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4297 {
4298     /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4299     gen_op_movi_cc(s, 3);
4300     return DISAS_NEXT;
4301 }
4302 
4303 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4304 {
4305     /* The instruction is suppressed if not provided. */
4306     return DISAS_NEXT;
4307 }
4308 
4309 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4310 {
4311     gen_helper_ssch(cpu_env, regs[1], o->in2);
4312     set_cc_static(s);
4313     return DISAS_NEXT;
4314 }
4315 
4316 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4317 {
4318     gen_helper_stsch(cpu_env, regs[1], o->in2);
4319     set_cc_static(s);
4320     return DISAS_NEXT;
4321 }
4322 
4323 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4324 {
4325     gen_helper_stcrw(cpu_env, o->in2);
4326     set_cc_static(s);
4327     return DISAS_NEXT;
4328 }
4329 
4330 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4331 {
4332     gen_helper_tpi(cc_op, cpu_env, o->addr1);
4333     set_cc_static(s);
4334     return DISAS_NEXT;
4335 }
4336 
4337 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4338 {
4339     gen_helper_tsch(cpu_env, regs[1], o->in2);
4340     set_cc_static(s);
4341     return DISAS_NEXT;
4342 }
4343 
4344 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4345 {
4346     gen_helper_chsc(cpu_env, o->in2);
4347     set_cc_static(s);
4348     return DISAS_NEXT;
4349 }
4350 
4351 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4352 {
4353     tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4354     tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4355     return DISAS_NEXT;
4356 }
4357 
4358 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4359 {
4360     uint64_t i2 = get_field(s, i2);
4361     TCGv_i64 t;
4362 
4363     /* It is important to do what the instruction name says: STORE THEN.
4364        If we let the output hook perform the store then if we fault and
4365        restart, we'll have the wrong SYSTEM MASK in place.  */
4366     t = tcg_temp_new_i64();
4367     tcg_gen_shri_i64(t, psw_mask, 56);
4368     tcg_gen_qemu_st_i64(t, o->addr1, get_mem_index(s), MO_UB);
4369 
4370     if (s->fields.op == 0xac) {
4371         tcg_gen_andi_i64(psw_mask, psw_mask,
4372                          (i2 << 56) | 0x00ffffffffffffffull);
4373     } else {
4374         tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4375     }
4376 
4377     gen_check_psw_mask(s);
4378 
4379     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4380     s->exit_to_mainloop = true;
4381     return DISAS_TOO_MANY;
4382 }
4383 
4384 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4385 {
4386     tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data);
4387 
4388     if (s->base.tb->flags & FLAG_MASK_PER) {
4389         update_psw_addr(s);
4390         gen_helper_per_store_real(cpu_env);
4391     }
4392     return DISAS_NEXT;
4393 }
4394 #endif
4395 
4396 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4397 {
4398     gen_helper_stfle(cc_op, cpu_env, o->in2);
4399     set_cc_static(s);
4400     return DISAS_NEXT;
4401 }
4402 
4403 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4404 {
4405     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_UB);
4406     return DISAS_NEXT;
4407 }
4408 
4409 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4410 {
4411     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_TEUW);
4412     return DISAS_NEXT;
4413 }
4414 
4415 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4416 {
4417     tcg_gen_qemu_st_tl(o->in1, o->in2, get_mem_index(s),
4418                        MO_TEUL | s->insn->data);
4419     return DISAS_NEXT;
4420 }
4421 
4422 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4423 {
4424     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s),
4425                         MO_TEUQ | s->insn->data);
4426     return DISAS_NEXT;
4427 }
4428 
4429 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4430 {
4431     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4432     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4433 
4434     gen_helper_stam(cpu_env, r1, o->in2, r3);
4435     return DISAS_NEXT;
4436 }
4437 
4438 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4439 {
4440     int m3 = get_field(s, m3);
4441     int pos, base = s->insn->data;
4442     TCGv_i64 tmp = tcg_temp_new_i64();
4443 
4444     pos = base + ctz32(m3) * 8;
4445     switch (m3) {
4446     case 0xf:
4447         /* Effectively a 32-bit store.  */
4448         tcg_gen_shri_i64(tmp, o->in1, pos);
4449         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
4450         break;
4451 
4452     case 0xc:
4453     case 0x6:
4454     case 0x3:
4455         /* Effectively a 16-bit store.  */
4456         tcg_gen_shri_i64(tmp, o->in1, pos);
4457         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
4458         break;
4459 
4460     case 0x8:
4461     case 0x4:
4462     case 0x2:
4463     case 0x1:
4464         /* Effectively an 8-bit store.  */
4465         tcg_gen_shri_i64(tmp, o->in1, pos);
4466         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4467         break;
4468 
4469     default:
4470         /* This is going to be a sequence of shifts and stores.  */
4471         pos = base + 32 - 8;
4472         while (m3) {
4473             if (m3 & 0x8) {
4474                 tcg_gen_shri_i64(tmp, o->in1, pos);
4475                 tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4476                 tcg_gen_addi_i64(o->in2, o->in2, 1);
4477             }
4478             m3 = (m3 << 1) & 0xf;
4479             pos -= 8;
4480         }
4481         break;
4482     }
4483     return DISAS_NEXT;
4484 }
4485 
4486 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4487 {
4488     int r1 = get_field(s, r1);
4489     int r3 = get_field(s, r3);
4490     int size = s->insn->data;
4491     TCGv_i64 tsize = tcg_constant_i64(size);
4492 
4493     while (1) {
4494         tcg_gen_qemu_st_i64(regs[r1], o->in2, get_mem_index(s),
4495                             size == 8 ? MO_TEUQ : MO_TEUL);
4496         if (r1 == r3) {
4497             break;
4498         }
4499         tcg_gen_add_i64(o->in2, o->in2, tsize);
4500         r1 = (r1 + 1) & 15;
4501     }
4502 
4503     return DISAS_NEXT;
4504 }
4505 
4506 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4507 {
4508     int r1 = get_field(s, r1);
4509     int r3 = get_field(s, r3);
4510     TCGv_i64 t = tcg_temp_new_i64();
4511     TCGv_i64 t4 = tcg_constant_i64(4);
4512     TCGv_i64 t32 = tcg_constant_i64(32);
4513 
4514     while (1) {
4515         tcg_gen_shl_i64(t, regs[r1], t32);
4516         tcg_gen_qemu_st_i64(t, o->in2, get_mem_index(s), MO_TEUL);
4517         if (r1 == r3) {
4518             break;
4519         }
4520         tcg_gen_add_i64(o->in2, o->in2, t4);
4521         r1 = (r1 + 1) & 15;
4522     }
4523     return DISAS_NEXT;
4524 }
4525 
4526 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4527 {
4528     TCGv_i128 t16 = tcg_temp_new_i128();
4529 
4530     tcg_gen_concat_i64_i128(t16, o->out2, o->out);
4531     tcg_gen_qemu_st_i128(t16, o->in2, get_mem_index(s),
4532                          MO_TE | MO_128 | MO_ALIGN);
4533     return DISAS_NEXT;
4534 }
4535 
4536 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4537 {
4538     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4539     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4540 
4541     gen_helper_srst(cpu_env, r1, r2);
4542     set_cc_static(s);
4543     return DISAS_NEXT;
4544 }
4545 
4546 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4547 {
4548     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4549     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4550 
4551     gen_helper_srstu(cpu_env, r1, r2);
4552     set_cc_static(s);
4553     return DISAS_NEXT;
4554 }
4555 
4556 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4557 {
4558     tcg_gen_sub_i64(o->out, o->in1, o->in2);
4559     return DISAS_NEXT;
4560 }
4561 
4562 static DisasJumpType op_subu64(DisasContext *s, DisasOps *o)
4563 {
4564     tcg_gen_movi_i64(cc_src, 0);
4565     tcg_gen_sub2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
4566     return DISAS_NEXT;
4567 }
4568 
4569 /* Compute borrow (0, -1) into cc_src. */
4570 static void compute_borrow(DisasContext *s)
4571 {
4572     switch (s->cc_op) {
4573     case CC_OP_SUBU:
4574         /* The borrow value is already in cc_src (0,-1). */
4575         break;
4576     default:
4577         gen_op_calc_cc(s);
4578         /* fall through */
4579     case CC_OP_STATIC:
4580         /* The carry flag is the msb of CC; compute into cc_src. */
4581         tcg_gen_extu_i32_i64(cc_src, cc_op);
4582         tcg_gen_shri_i64(cc_src, cc_src, 1);
4583         /* fall through */
4584     case CC_OP_ADDU:
4585         /* Convert carry (1,0) to borrow (0,-1). */
4586         tcg_gen_subi_i64(cc_src, cc_src, 1);
4587         break;
4588     }
4589 }
4590 
4591 static DisasJumpType op_subb32(DisasContext *s, DisasOps *o)
4592 {
4593     compute_borrow(s);
4594 
4595     /* Borrow is {0, -1}, so add to subtract. */
4596     tcg_gen_add_i64(o->out, o->in1, cc_src);
4597     tcg_gen_sub_i64(o->out, o->out, o->in2);
4598     return DISAS_NEXT;
4599 }
4600 
4601 static DisasJumpType op_subb64(DisasContext *s, DisasOps *o)
4602 {
4603     compute_borrow(s);
4604 
4605     /*
4606      * Borrow is {0, -1}, so add to subtract; replicate the
4607      * borrow input to produce 128-bit -1 for the addition.
4608      */
4609     TCGv_i64 zero = tcg_constant_i64(0);
4610     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src);
4611     tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
4612 
4613     return DISAS_NEXT;
4614 }
4615 
4616 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4617 {
4618     TCGv_i32 t;
4619 
4620     update_psw_addr(s);
4621     update_cc_op(s);
4622 
4623     t = tcg_constant_i32(get_field(s, i1) & 0xff);
4624     tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4625 
4626     t = tcg_constant_i32(s->ilen);
4627     tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4628 
4629     gen_exception(EXCP_SVC);
4630     return DISAS_NORETURN;
4631 }
4632 
4633 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4634 {
4635     int cc = 0;
4636 
4637     cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4638     cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4639     gen_op_movi_cc(s, cc);
4640     return DISAS_NEXT;
4641 }
4642 
4643 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4644 {
4645     gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4646     set_cc_static(s);
4647     return DISAS_NEXT;
4648 }
4649 
4650 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4651 {
4652     gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4653     set_cc_static(s);
4654     return DISAS_NEXT;
4655 }
4656 
4657 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4658 {
4659     gen_helper_tcxb(cc_op, cpu_env, o->in1_128, o->in2);
4660     set_cc_static(s);
4661     return DISAS_NEXT;
4662 }
4663 
4664 #ifndef CONFIG_USER_ONLY
4665 
4666 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4667 {
4668     gen_helper_testblock(cc_op, cpu_env, o->in2);
4669     set_cc_static(s);
4670     return DISAS_NEXT;
4671 }
4672 
4673 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4674 {
4675     gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4676     set_cc_static(s);
4677     return DISAS_NEXT;
4678 }
4679 
4680 #endif
4681 
4682 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4683 {
4684     TCGv_i32 l1 = tcg_constant_i32(get_field(s, l1) + 1);
4685 
4686     gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4687     set_cc_static(s);
4688     return DISAS_NEXT;
4689 }
4690 
4691 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4692 {
4693     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4694 
4695     gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4696     set_cc_static(s);
4697     return DISAS_NEXT;
4698 }
4699 
4700 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4701 {
4702     TCGv_i128 pair = tcg_temp_new_i128();
4703 
4704     gen_helper_tre(pair, cpu_env, o->out, o->out2, o->in2);
4705     tcg_gen_extr_i128_i64(o->out2, o->out, pair);
4706     set_cc_static(s);
4707     return DISAS_NEXT;
4708 }
4709 
4710 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4711 {
4712     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4713 
4714     gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4715     set_cc_static(s);
4716     return DISAS_NEXT;
4717 }
4718 
4719 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4720 {
4721     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4722 
4723     gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4724     set_cc_static(s);
4725     return DISAS_NEXT;
4726 }
4727 
4728 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4729 {
4730     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4731     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4732     TCGv_i32 sizes = tcg_constant_i32(s->insn->opc & 3);
4733     TCGv_i32 tst = tcg_temp_new_i32();
4734     int m3 = get_field(s, m3);
4735 
4736     if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4737         m3 = 0;
4738     }
4739     if (m3 & 1) {
4740         tcg_gen_movi_i32(tst, -1);
4741     } else {
4742         tcg_gen_extrl_i64_i32(tst, regs[0]);
4743         if (s->insn->opc & 3) {
4744             tcg_gen_ext8u_i32(tst, tst);
4745         } else {
4746             tcg_gen_ext16u_i32(tst, tst);
4747         }
4748     }
4749     gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4750 
4751     set_cc_static(s);
4752     return DISAS_NEXT;
4753 }
4754 
4755 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4756 {
4757     TCGv_i32 t1 = tcg_constant_i32(0xff);
4758 
4759     tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4760     tcg_gen_extract_i32(cc_op, t1, 7, 1);
4761     set_cc_static(s);
4762     return DISAS_NEXT;
4763 }
4764 
4765 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4766 {
4767     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4768 
4769     gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4770     return DISAS_NEXT;
4771 }
4772 
4773 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4774 {
4775     int l1 = get_field(s, l1) + 1;
4776     TCGv_i32 l;
4777 
4778     /* The length must not exceed 32 bytes.  */
4779     if (l1 > 32) {
4780         gen_program_exception(s, PGM_SPECIFICATION);
4781         return DISAS_NORETURN;
4782     }
4783     l = tcg_constant_i32(l1);
4784     gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4785     set_cc_static(s);
4786     return DISAS_NEXT;
4787 }
4788 
4789 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4790 {
4791     int l1 = get_field(s, l1) + 1;
4792     TCGv_i32 l;
4793 
4794     /* The length must be even and should not exceed 64 bytes.  */
4795     if ((l1 & 1) || (l1 > 64)) {
4796         gen_program_exception(s, PGM_SPECIFICATION);
4797         return DISAS_NORETURN;
4798     }
4799     l = tcg_constant_i32(l1);
4800     gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4801     set_cc_static(s);
4802     return DISAS_NEXT;
4803 }
4804 
4805 
4806 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4807 {
4808     int d1 = get_field(s, d1);
4809     int d2 = get_field(s, d2);
4810     int b1 = get_field(s, b1);
4811     int b2 = get_field(s, b2);
4812     int l = get_field(s, l1);
4813     TCGv_i32 t32;
4814 
4815     o->addr1 = get_address(s, 0, b1, d1);
4816 
4817     /* If the addresses are identical, this is a store/memset of zero.  */
4818     if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4819         o->in2 = tcg_constant_i64(0);
4820 
4821         l++;
4822         while (l >= 8) {
4823             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UQ);
4824             l -= 8;
4825             if (l > 0) {
4826                 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4827             }
4828         }
4829         if (l >= 4) {
4830             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UL);
4831             l -= 4;
4832             if (l > 0) {
4833                 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4834             }
4835         }
4836         if (l >= 2) {
4837             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UW);
4838             l -= 2;
4839             if (l > 0) {
4840                 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4841             }
4842         }
4843         if (l) {
4844             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UB);
4845         }
4846         gen_op_movi_cc(s, 0);
4847         return DISAS_NEXT;
4848     }
4849 
4850     /* But in general we'll defer to a helper.  */
4851     o->in2 = get_address(s, 0, b2, d2);
4852     t32 = tcg_constant_i32(l);
4853     gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4854     set_cc_static(s);
4855     return DISAS_NEXT;
4856 }
4857 
4858 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
4859 {
4860     tcg_gen_xor_i64(o->out, o->in1, o->in2);
4861     return DISAS_NEXT;
4862 }
4863 
4864 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
4865 {
4866     int shift = s->insn->data & 0xff;
4867     int size = s->insn->data >> 8;
4868     uint64_t mask = ((1ull << size) - 1) << shift;
4869     TCGv_i64 t = tcg_temp_new_i64();
4870 
4871     tcg_gen_shli_i64(t, o->in2, shift);
4872     tcg_gen_xor_i64(o->out, o->in1, t);
4873 
4874     /* Produce the CC from only the bits manipulated.  */
4875     tcg_gen_andi_i64(cc_dst, o->out, mask);
4876     set_cc_nz_u64(s, cc_dst);
4877     return DISAS_NEXT;
4878 }
4879 
4880 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
4881 {
4882     o->in1 = tcg_temp_new_i64();
4883 
4884     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4885         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
4886     } else {
4887         /* Perform the atomic operation in memory. */
4888         tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
4889                                      s->insn->data);
4890     }
4891 
4892     /* Recompute also for atomic case: needed for setting CC. */
4893     tcg_gen_xor_i64(o->out, o->in1, o->in2);
4894 
4895     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4896         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
4897     }
4898     return DISAS_NEXT;
4899 }
4900 
4901 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
4902 {
4903     o->out = tcg_constant_i64(0);
4904     return DISAS_NEXT;
4905 }
4906 
4907 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
4908 {
4909     o->out = tcg_constant_i64(0);
4910     o->out2 = o->out;
4911     return DISAS_NEXT;
4912 }
4913 
4914 #ifndef CONFIG_USER_ONLY
4915 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
4916 {
4917     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4918 
4919     gen_helper_clp(cpu_env, r2);
4920     set_cc_static(s);
4921     return DISAS_NEXT;
4922 }
4923 
4924 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
4925 {
4926     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4927     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4928 
4929     gen_helper_pcilg(cpu_env, r1, r2);
4930     set_cc_static(s);
4931     return DISAS_NEXT;
4932 }
4933 
4934 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
4935 {
4936     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4937     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4938 
4939     gen_helper_pcistg(cpu_env, r1, r2);
4940     set_cc_static(s);
4941     return DISAS_NEXT;
4942 }
4943 
4944 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
4945 {
4946     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4947     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4948 
4949     gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
4950     set_cc_static(s);
4951     return DISAS_NEXT;
4952 }
4953 
4954 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
4955 {
4956     gen_helper_sic(cpu_env, o->in1, o->in2);
4957     return DISAS_NEXT;
4958 }
4959 
4960 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
4961 {
4962     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4963     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4964 
4965     gen_helper_rpcit(cpu_env, r1, r2);
4966     set_cc_static(s);
4967     return DISAS_NEXT;
4968 }
4969 
4970 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
4971 {
4972     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4973     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4974     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4975 
4976     gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
4977     set_cc_static(s);
4978     return DISAS_NEXT;
4979 }
4980 
4981 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
4982 {
4983     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4984     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4985 
4986     gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
4987     set_cc_static(s);
4988     return DISAS_NEXT;
4989 }
4990 #endif
4991 
4992 #include "translate_vx.c.inc"
4993 
4994 /* ====================================================================== */
4995 /* The "Cc OUTput" generators.  Given the generated output (and in some cases
4996    the original inputs), update the various cc data structures in order to
4997    be able to compute the new condition code.  */
4998 
4999 static void cout_abs32(DisasContext *s, DisasOps *o)
5000 {
5001     gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
5002 }
5003 
5004 static void cout_abs64(DisasContext *s, DisasOps *o)
5005 {
5006     gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5007 }
5008 
5009 static void cout_adds32(DisasContext *s, DisasOps *o)
5010 {
5011     gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5012 }
5013 
5014 static void cout_adds64(DisasContext *s, DisasOps *o)
5015 {
5016     gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5017 }
5018 
5019 static void cout_addu32(DisasContext *s, DisasOps *o)
5020 {
5021     tcg_gen_shri_i64(cc_src, o->out, 32);
5022     tcg_gen_ext32u_i64(cc_dst, o->out);
5023     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst);
5024 }
5025 
5026 static void cout_addu64(DisasContext *s, DisasOps *o)
5027 {
5028     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out);
5029 }
5030 
5031 static void cout_cmps32(DisasContext *s, DisasOps *o)
5032 {
5033     gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5034 }
5035 
5036 static void cout_cmps64(DisasContext *s, DisasOps *o)
5037 {
5038     gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5039 }
5040 
5041 static void cout_cmpu32(DisasContext *s, DisasOps *o)
5042 {
5043     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5044 }
5045 
5046 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5047 {
5048     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5049 }
5050 
5051 static void cout_f32(DisasContext *s, DisasOps *o)
5052 {
5053     gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5054 }
5055 
5056 static void cout_f64(DisasContext *s, DisasOps *o)
5057 {
5058     gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5059 }
5060 
5061 static void cout_f128(DisasContext *s, DisasOps *o)
5062 {
5063     gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5064 }
5065 
5066 static void cout_nabs32(DisasContext *s, DisasOps *o)
5067 {
5068     gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5069 }
5070 
5071 static void cout_nabs64(DisasContext *s, DisasOps *o)
5072 {
5073     gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5074 }
5075 
5076 static void cout_neg32(DisasContext *s, DisasOps *o)
5077 {
5078     gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5079 }
5080 
5081 static void cout_neg64(DisasContext *s, DisasOps *o)
5082 {
5083     gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5084 }
5085 
5086 static void cout_nz32(DisasContext *s, DisasOps *o)
5087 {
5088     tcg_gen_ext32u_i64(cc_dst, o->out);
5089     gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5090 }
5091 
5092 static void cout_nz64(DisasContext *s, DisasOps *o)
5093 {
5094     gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5095 }
5096 
5097 static void cout_s32(DisasContext *s, DisasOps *o)
5098 {
5099     gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5100 }
5101 
5102 static void cout_s64(DisasContext *s, DisasOps *o)
5103 {
5104     gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5105 }
5106 
5107 static void cout_subs32(DisasContext *s, DisasOps *o)
5108 {
5109     gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5110 }
5111 
5112 static void cout_subs64(DisasContext *s, DisasOps *o)
5113 {
5114     gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5115 }
5116 
5117 static void cout_subu32(DisasContext *s, DisasOps *o)
5118 {
5119     tcg_gen_sari_i64(cc_src, o->out, 32);
5120     tcg_gen_ext32u_i64(cc_dst, o->out);
5121     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, cc_dst);
5122 }
5123 
5124 static void cout_subu64(DisasContext *s, DisasOps *o)
5125 {
5126     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, o->out);
5127 }
5128 
5129 static void cout_tm32(DisasContext *s, DisasOps *o)
5130 {
5131     gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5132 }
5133 
5134 static void cout_tm64(DisasContext *s, DisasOps *o)
5135 {
5136     gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5137 }
5138 
5139 static void cout_muls32(DisasContext *s, DisasOps *o)
5140 {
5141     gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out);
5142 }
5143 
5144 static void cout_muls64(DisasContext *s, DisasOps *o)
5145 {
5146     /* out contains "high" part, out2 contains "low" part of 128 bit result */
5147     gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2);
5148 }
5149 
5150 /* ====================================================================== */
5151 /* The "PREParation" generators.  These initialize the DisasOps.OUT fields
5152    with the TCG register to which we will write.  Used in combination with
5153    the "wout" generators, in some cases we need a new temporary, and in
5154    some cases we can write to a TCG global.  */
5155 
5156 static void prep_new(DisasContext *s, DisasOps *o)
5157 {
5158     o->out = tcg_temp_new_i64();
5159 }
5160 #define SPEC_prep_new 0
5161 
5162 static void prep_new_P(DisasContext *s, DisasOps *o)
5163 {
5164     o->out = tcg_temp_new_i64();
5165     o->out2 = tcg_temp_new_i64();
5166 }
5167 #define SPEC_prep_new_P 0
5168 
5169 static void prep_new_x(DisasContext *s, DisasOps *o)
5170 {
5171     o->out_128 = tcg_temp_new_i128();
5172 }
5173 #define SPEC_prep_new_x 0
5174 
5175 static void prep_r1(DisasContext *s, DisasOps *o)
5176 {
5177     o->out = regs[get_field(s, r1)];
5178 }
5179 #define SPEC_prep_r1 0
5180 
5181 static void prep_r1_P(DisasContext *s, DisasOps *o)
5182 {
5183     int r1 = get_field(s, r1);
5184     o->out = regs[r1];
5185     o->out2 = regs[r1 + 1];
5186 }
5187 #define SPEC_prep_r1_P SPEC_r1_even
5188 
5189 /* ====================================================================== */
5190 /* The "Write OUTput" generators.  These generally perform some non-trivial
5191    copy of data to TCG globals, or to main memory.  The trivial cases are
5192    generally handled by having a "prep" generator install the TCG global
5193    as the destination of the operation.  */
5194 
5195 static void wout_r1(DisasContext *s, DisasOps *o)
5196 {
5197     store_reg(get_field(s, r1), o->out);
5198 }
5199 #define SPEC_wout_r1 0
5200 
5201 static void wout_out2_r1(DisasContext *s, DisasOps *o)
5202 {
5203     store_reg(get_field(s, r1), o->out2);
5204 }
5205 #define SPEC_wout_out2_r1 0
5206 
5207 static void wout_r1_8(DisasContext *s, DisasOps *o)
5208 {
5209     int r1 = get_field(s, r1);
5210     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5211 }
5212 #define SPEC_wout_r1_8 0
5213 
5214 static void wout_r1_16(DisasContext *s, DisasOps *o)
5215 {
5216     int r1 = get_field(s, r1);
5217     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5218 }
5219 #define SPEC_wout_r1_16 0
5220 
5221 static void wout_r1_32(DisasContext *s, DisasOps *o)
5222 {
5223     store_reg32_i64(get_field(s, r1), o->out);
5224 }
5225 #define SPEC_wout_r1_32 0
5226 
5227 static void wout_r1_32h(DisasContext *s, DisasOps *o)
5228 {
5229     store_reg32h_i64(get_field(s, r1), o->out);
5230 }
5231 #define SPEC_wout_r1_32h 0
5232 
5233 static void wout_r1_P32(DisasContext *s, DisasOps *o)
5234 {
5235     int r1 = get_field(s, r1);
5236     store_reg32_i64(r1, o->out);
5237     store_reg32_i64(r1 + 1, o->out2);
5238 }
5239 #define SPEC_wout_r1_P32 SPEC_r1_even
5240 
5241 static void wout_r1_D32(DisasContext *s, DisasOps *o)
5242 {
5243     int r1 = get_field(s, r1);
5244     TCGv_i64 t = tcg_temp_new_i64();
5245     store_reg32_i64(r1 + 1, o->out);
5246     tcg_gen_shri_i64(t, o->out, 32);
5247     store_reg32_i64(r1, t);
5248 }
5249 #define SPEC_wout_r1_D32 SPEC_r1_even
5250 
5251 static void wout_r1_D64(DisasContext *s, DisasOps *o)
5252 {
5253     int r1 = get_field(s, r1);
5254     tcg_gen_extr_i128_i64(regs[r1 + 1], regs[r1], o->out_128);
5255 }
5256 #define SPEC_wout_r1_D64 SPEC_r1_even
5257 
5258 static void wout_r3_P32(DisasContext *s, DisasOps *o)
5259 {
5260     int r3 = get_field(s, r3);
5261     store_reg32_i64(r3, o->out);
5262     store_reg32_i64(r3 + 1, o->out2);
5263 }
5264 #define SPEC_wout_r3_P32 SPEC_r3_even
5265 
5266 static void wout_r3_P64(DisasContext *s, DisasOps *o)
5267 {
5268     int r3 = get_field(s, r3);
5269     store_reg(r3, o->out);
5270     store_reg(r3 + 1, o->out2);
5271 }
5272 #define SPEC_wout_r3_P64 SPEC_r3_even
5273 
5274 static void wout_e1(DisasContext *s, DisasOps *o)
5275 {
5276     store_freg32_i64(get_field(s, r1), o->out);
5277 }
5278 #define SPEC_wout_e1 0
5279 
5280 static void wout_f1(DisasContext *s, DisasOps *o)
5281 {
5282     store_freg(get_field(s, r1), o->out);
5283 }
5284 #define SPEC_wout_f1 0
5285 
5286 static void wout_x1(DisasContext *s, DisasOps *o)
5287 {
5288     int f1 = get_field(s, r1);
5289 
5290     /* Split out_128 into out+out2 for cout_f128. */
5291     tcg_debug_assert(o->out == NULL);
5292     o->out = tcg_temp_new_i64();
5293     o->out2 = tcg_temp_new_i64();
5294 
5295     tcg_gen_extr_i128_i64(o->out2, o->out, o->out_128);
5296     store_freg(f1, o->out);
5297     store_freg(f1 + 2, o->out2);
5298 }
5299 #define SPEC_wout_x1 SPEC_r1_f128
5300 
5301 static void wout_x1_P(DisasContext *s, DisasOps *o)
5302 {
5303     int f1 = get_field(s, r1);
5304     store_freg(f1, o->out);
5305     store_freg(f1 + 2, o->out2);
5306 }
5307 #define SPEC_wout_x1_P SPEC_r1_f128
5308 
5309 static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
5310 {
5311     if (get_field(s, r1) != get_field(s, r2)) {
5312         store_reg32_i64(get_field(s, r1), o->out);
5313     }
5314 }
5315 #define SPEC_wout_cond_r1r2_32 0
5316 
5317 static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
5318 {
5319     if (get_field(s, r1) != get_field(s, r2)) {
5320         store_freg32_i64(get_field(s, r1), o->out);
5321     }
5322 }
5323 #define SPEC_wout_cond_e1e2 0
5324 
5325 static void wout_m1_8(DisasContext *s, DisasOps *o)
5326 {
5327     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_UB);
5328 }
5329 #define SPEC_wout_m1_8 0
5330 
5331 static void wout_m1_16(DisasContext *s, DisasOps *o)
5332 {
5333     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUW);
5334 }
5335 #define SPEC_wout_m1_16 0
5336 
5337 #ifndef CONFIG_USER_ONLY
5338 static void wout_m1_16a(DisasContext *s, DisasOps *o)
5339 {
5340     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5341 }
5342 #define SPEC_wout_m1_16a 0
5343 #endif
5344 
5345 static void wout_m1_32(DisasContext *s, DisasOps *o)
5346 {
5347     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUL);
5348 }
5349 #define SPEC_wout_m1_32 0
5350 
5351 #ifndef CONFIG_USER_ONLY
5352 static void wout_m1_32a(DisasContext *s, DisasOps *o)
5353 {
5354     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5355 }
5356 #define SPEC_wout_m1_32a 0
5357 #endif
5358 
5359 static void wout_m1_64(DisasContext *s, DisasOps *o)
5360 {
5361     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ);
5362 }
5363 #define SPEC_wout_m1_64 0
5364 
5365 #ifndef CONFIG_USER_ONLY
5366 static void wout_m1_64a(DisasContext *s, DisasOps *o)
5367 {
5368     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5369 }
5370 #define SPEC_wout_m1_64a 0
5371 #endif
5372 
5373 static void wout_m2_32(DisasContext *s, DisasOps *o)
5374 {
5375     tcg_gen_qemu_st_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
5376 }
5377 #define SPEC_wout_m2_32 0
5378 
5379 static void wout_in2_r1(DisasContext *s, DisasOps *o)
5380 {
5381     store_reg(get_field(s, r1), o->in2);
5382 }
5383 #define SPEC_wout_in2_r1 0
5384 
5385 static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
5386 {
5387     store_reg32_i64(get_field(s, r1), o->in2);
5388 }
5389 #define SPEC_wout_in2_r1_32 0
5390 
5391 /* ====================================================================== */
5392 /* The "INput 1" generators.  These load the first operand to an insn.  */
5393 
5394 static void in1_r1(DisasContext *s, DisasOps *o)
5395 {
5396     o->in1 = load_reg(get_field(s, r1));
5397 }
5398 #define SPEC_in1_r1 0
5399 
5400 static void in1_r1_o(DisasContext *s, DisasOps *o)
5401 {
5402     o->in1 = regs[get_field(s, r1)];
5403 }
5404 #define SPEC_in1_r1_o 0
5405 
5406 static void in1_r1_32s(DisasContext *s, DisasOps *o)
5407 {
5408     o->in1 = tcg_temp_new_i64();
5409     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
5410 }
5411 #define SPEC_in1_r1_32s 0
5412 
5413 static void in1_r1_32u(DisasContext *s, DisasOps *o)
5414 {
5415     o->in1 = tcg_temp_new_i64();
5416     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
5417 }
5418 #define SPEC_in1_r1_32u 0
5419 
5420 static void in1_r1_sr32(DisasContext *s, DisasOps *o)
5421 {
5422     o->in1 = tcg_temp_new_i64();
5423     tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
5424 }
5425 #define SPEC_in1_r1_sr32 0
5426 
5427 static void in1_r1p1(DisasContext *s, DisasOps *o)
5428 {
5429     o->in1 = load_reg(get_field(s, r1) + 1);
5430 }
5431 #define SPEC_in1_r1p1 SPEC_r1_even
5432 
5433 static void in1_r1p1_o(DisasContext *s, DisasOps *o)
5434 {
5435     o->in1 = regs[get_field(s, r1) + 1];
5436 }
5437 #define SPEC_in1_r1p1_o SPEC_r1_even
5438 
5439 static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
5440 {
5441     o->in1 = tcg_temp_new_i64();
5442     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
5443 }
5444 #define SPEC_in1_r1p1_32s SPEC_r1_even
5445 
5446 static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
5447 {
5448     o->in1 = tcg_temp_new_i64();
5449     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
5450 }
5451 #define SPEC_in1_r1p1_32u SPEC_r1_even
5452 
5453 static void in1_r1_D32(DisasContext *s, DisasOps *o)
5454 {
5455     int r1 = get_field(s, r1);
5456     o->in1 = tcg_temp_new_i64();
5457     tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5458 }
5459 #define SPEC_in1_r1_D32 SPEC_r1_even
5460 
5461 static void in1_r2(DisasContext *s, DisasOps *o)
5462 {
5463     o->in1 = load_reg(get_field(s, r2));
5464 }
5465 #define SPEC_in1_r2 0
5466 
5467 static void in1_r2_sr32(DisasContext *s, DisasOps *o)
5468 {
5469     o->in1 = tcg_temp_new_i64();
5470     tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
5471 }
5472 #define SPEC_in1_r2_sr32 0
5473 
5474 static void in1_r2_32u(DisasContext *s, DisasOps *o)
5475 {
5476     o->in1 = tcg_temp_new_i64();
5477     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]);
5478 }
5479 #define SPEC_in1_r2_32u 0
5480 
5481 static void in1_r3(DisasContext *s, DisasOps *o)
5482 {
5483     o->in1 = load_reg(get_field(s, r3));
5484 }
5485 #define SPEC_in1_r3 0
5486 
5487 static void in1_r3_o(DisasContext *s, DisasOps *o)
5488 {
5489     o->in1 = regs[get_field(s, r3)];
5490 }
5491 #define SPEC_in1_r3_o 0
5492 
5493 static void in1_r3_32s(DisasContext *s, DisasOps *o)
5494 {
5495     o->in1 = tcg_temp_new_i64();
5496     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
5497 }
5498 #define SPEC_in1_r3_32s 0
5499 
5500 static void in1_r3_32u(DisasContext *s, DisasOps *o)
5501 {
5502     o->in1 = tcg_temp_new_i64();
5503     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
5504 }
5505 #define SPEC_in1_r3_32u 0
5506 
5507 static void in1_r3_D32(DisasContext *s, DisasOps *o)
5508 {
5509     int r3 = get_field(s, r3);
5510     o->in1 = tcg_temp_new_i64();
5511     tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5512 }
5513 #define SPEC_in1_r3_D32 SPEC_r3_even
5514 
5515 static void in1_r3_sr32(DisasContext *s, DisasOps *o)
5516 {
5517     o->in1 = tcg_temp_new_i64();
5518     tcg_gen_shri_i64(o->in1, regs[get_field(s, r3)], 32);
5519 }
5520 #define SPEC_in1_r3_sr32 0
5521 
5522 static void in1_e1(DisasContext *s, DisasOps *o)
5523 {
5524     o->in1 = load_freg32_i64(get_field(s, r1));
5525 }
5526 #define SPEC_in1_e1 0
5527 
5528 static void in1_f1(DisasContext *s, DisasOps *o)
5529 {
5530     o->in1 = load_freg(get_field(s, r1));
5531 }
5532 #define SPEC_in1_f1 0
5533 
5534 static void in1_x1(DisasContext *s, DisasOps *o)
5535 {
5536     o->in1_128 = load_freg_128(get_field(s, r1));
5537 }
5538 #define SPEC_in1_x1 SPEC_r1_f128
5539 
5540 /* Load the high double word of an extended (128-bit) format FP number */
5541 static void in1_x2h(DisasContext *s, DisasOps *o)
5542 {
5543     o->in1 = load_freg(get_field(s, r2));
5544 }
5545 #define SPEC_in1_x2h SPEC_r2_f128
5546 
5547 static void in1_f3(DisasContext *s, DisasOps *o)
5548 {
5549     o->in1 = load_freg(get_field(s, r3));
5550 }
5551 #define SPEC_in1_f3 0
5552 
5553 static void in1_la1(DisasContext *s, DisasOps *o)
5554 {
5555     o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
5556 }
5557 #define SPEC_in1_la1 0
5558 
5559 static void in1_la2(DisasContext *s, DisasOps *o)
5560 {
5561     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5562     o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5563 }
5564 #define SPEC_in1_la2 0
5565 
5566 static void in1_m1_8u(DisasContext *s, DisasOps *o)
5567 {
5568     in1_la1(s, o);
5569     o->in1 = tcg_temp_new_i64();
5570     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_UB);
5571 }
5572 #define SPEC_in1_m1_8u 0
5573 
5574 static void in1_m1_16s(DisasContext *s, DisasOps *o)
5575 {
5576     in1_la1(s, o);
5577     o->in1 = tcg_temp_new_i64();
5578     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESW);
5579 }
5580 #define SPEC_in1_m1_16s 0
5581 
5582 static void in1_m1_16u(DisasContext *s, DisasOps *o)
5583 {
5584     in1_la1(s, o);
5585     o->in1 = tcg_temp_new_i64();
5586     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUW);
5587 }
5588 #define SPEC_in1_m1_16u 0
5589 
5590 static void in1_m1_32s(DisasContext *s, DisasOps *o)
5591 {
5592     in1_la1(s, o);
5593     o->in1 = tcg_temp_new_i64();
5594     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESL);
5595 }
5596 #define SPEC_in1_m1_32s 0
5597 
5598 static void in1_m1_32u(DisasContext *s, DisasOps *o)
5599 {
5600     in1_la1(s, o);
5601     o->in1 = tcg_temp_new_i64();
5602     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUL);
5603 }
5604 #define SPEC_in1_m1_32u 0
5605 
5606 static void in1_m1_64(DisasContext *s, DisasOps *o)
5607 {
5608     in1_la1(s, o);
5609     o->in1 = tcg_temp_new_i64();
5610     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUQ);
5611 }
5612 #define SPEC_in1_m1_64 0
5613 
5614 /* ====================================================================== */
5615 /* The "INput 2" generators.  These load the second operand to an insn.  */
5616 
5617 static void in2_r1_o(DisasContext *s, DisasOps *o)
5618 {
5619     o->in2 = regs[get_field(s, r1)];
5620 }
5621 #define SPEC_in2_r1_o 0
5622 
5623 static void in2_r1_16u(DisasContext *s, DisasOps *o)
5624 {
5625     o->in2 = tcg_temp_new_i64();
5626     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
5627 }
5628 #define SPEC_in2_r1_16u 0
5629 
5630 static void in2_r1_32u(DisasContext *s, DisasOps *o)
5631 {
5632     o->in2 = tcg_temp_new_i64();
5633     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
5634 }
5635 #define SPEC_in2_r1_32u 0
5636 
5637 static void in2_r1_D32(DisasContext *s, DisasOps *o)
5638 {
5639     int r1 = get_field(s, r1);
5640     o->in2 = tcg_temp_new_i64();
5641     tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5642 }
5643 #define SPEC_in2_r1_D32 SPEC_r1_even
5644 
5645 static void in2_r2(DisasContext *s, DisasOps *o)
5646 {
5647     o->in2 = load_reg(get_field(s, r2));
5648 }
5649 #define SPEC_in2_r2 0
5650 
5651 static void in2_r2_o(DisasContext *s, DisasOps *o)
5652 {
5653     o->in2 = regs[get_field(s, r2)];
5654 }
5655 #define SPEC_in2_r2_o 0
5656 
5657 static void in2_r2_nz(DisasContext *s, DisasOps *o)
5658 {
5659     int r2 = get_field(s, r2);
5660     if (r2 != 0) {
5661         o->in2 = load_reg(r2);
5662     }
5663 }
5664 #define SPEC_in2_r2_nz 0
5665 
5666 static void in2_r2_8s(DisasContext *s, DisasOps *o)
5667 {
5668     o->in2 = tcg_temp_new_i64();
5669     tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
5670 }
5671 #define SPEC_in2_r2_8s 0
5672 
5673 static void in2_r2_8u(DisasContext *s, DisasOps *o)
5674 {
5675     o->in2 = tcg_temp_new_i64();
5676     tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
5677 }
5678 #define SPEC_in2_r2_8u 0
5679 
5680 static void in2_r2_16s(DisasContext *s, DisasOps *o)
5681 {
5682     o->in2 = tcg_temp_new_i64();
5683     tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
5684 }
5685 #define SPEC_in2_r2_16s 0
5686 
5687 static void in2_r2_16u(DisasContext *s, DisasOps *o)
5688 {
5689     o->in2 = tcg_temp_new_i64();
5690     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
5691 }
5692 #define SPEC_in2_r2_16u 0
5693 
5694 static void in2_r3(DisasContext *s, DisasOps *o)
5695 {
5696     o->in2 = load_reg(get_field(s, r3));
5697 }
5698 #define SPEC_in2_r3 0
5699 
5700 static void in2_r3_D64(DisasContext *s, DisasOps *o)
5701 {
5702     int r3 = get_field(s, r3);
5703     o->in2_128 = tcg_temp_new_i128();
5704     tcg_gen_concat_i64_i128(o->in2_128, regs[r3 + 1], regs[r3]);
5705 }
5706 #define SPEC_in2_r3_D64 SPEC_r3_even
5707 
5708 static void in2_r3_sr32(DisasContext *s, DisasOps *o)
5709 {
5710     o->in2 = tcg_temp_new_i64();
5711     tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
5712 }
5713 #define SPEC_in2_r3_sr32 0
5714 
5715 static void in2_r3_32u(DisasContext *s, DisasOps *o)
5716 {
5717     o->in2 = tcg_temp_new_i64();
5718     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
5719 }
5720 #define SPEC_in2_r3_32u 0
5721 
5722 static void in2_r2_32s(DisasContext *s, DisasOps *o)
5723 {
5724     o->in2 = tcg_temp_new_i64();
5725     tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
5726 }
5727 #define SPEC_in2_r2_32s 0
5728 
5729 static void in2_r2_32u(DisasContext *s, DisasOps *o)
5730 {
5731     o->in2 = tcg_temp_new_i64();
5732     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
5733 }
5734 #define SPEC_in2_r2_32u 0
5735 
5736 static void in2_r2_sr32(DisasContext *s, DisasOps *o)
5737 {
5738     o->in2 = tcg_temp_new_i64();
5739     tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
5740 }
5741 #define SPEC_in2_r2_sr32 0
5742 
5743 static void in2_e2(DisasContext *s, DisasOps *o)
5744 {
5745     o->in2 = load_freg32_i64(get_field(s, r2));
5746 }
5747 #define SPEC_in2_e2 0
5748 
5749 static void in2_f2(DisasContext *s, DisasOps *o)
5750 {
5751     o->in2 = load_freg(get_field(s, r2));
5752 }
5753 #define SPEC_in2_f2 0
5754 
5755 static void in2_x2(DisasContext *s, DisasOps *o)
5756 {
5757     o->in2_128 = load_freg_128(get_field(s, r2));
5758 }
5759 #define SPEC_in2_x2 SPEC_r2_f128
5760 
5761 /* Load the low double word of an extended (128-bit) format FP number */
5762 static void in2_x2l(DisasContext *s, DisasOps *o)
5763 {
5764     o->in2 = load_freg(get_field(s, r2) + 2);
5765 }
5766 #define SPEC_in2_x2l SPEC_r2_f128
5767 
5768 static void in2_ra2(DisasContext *s, DisasOps *o)
5769 {
5770     int r2 = get_field(s, r2);
5771 
5772     /* Note: *don't* treat !r2 as 0, use the reg value. */
5773     o->in2 = tcg_temp_new_i64();
5774     gen_addi_and_wrap_i64(s, o->in2, regs[r2], 0);
5775 }
5776 #define SPEC_in2_ra2 0
5777 
5778 static void in2_a2(DisasContext *s, DisasOps *o)
5779 {
5780     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5781     o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5782 }
5783 #define SPEC_in2_a2 0
5784 
5785 static TCGv gen_ri2(DisasContext *s)
5786 {
5787     TCGv ri2 = NULL;
5788     bool is_imm;
5789     int imm;
5790 
5791     disas_jdest(s, i2, is_imm, imm, ri2);
5792     if (is_imm) {
5793         ri2 = tcg_constant_i64(s->base.pc_next + imm * 2);
5794     }
5795 
5796     return ri2;
5797 }
5798 
5799 static void in2_ri2(DisasContext *s, DisasOps *o)
5800 {
5801     o->in2 = gen_ri2(s);
5802 }
5803 #define SPEC_in2_ri2 0
5804 
5805 static void in2_sh(DisasContext *s, DisasOps *o)
5806 {
5807     int b2 = get_field(s, b2);
5808     int d2 = get_field(s, d2);
5809 
5810     if (b2 == 0) {
5811         o->in2 = tcg_constant_i64(d2 & 0x3f);
5812     } else {
5813         o->in2 = get_address(s, 0, b2, d2);
5814         tcg_gen_andi_i64(o->in2, o->in2, 0x3f);
5815     }
5816 }
5817 #define SPEC_in2_sh 0
5818 
5819 static void in2_m2_8u(DisasContext *s, DisasOps *o)
5820 {
5821     in2_a2(s, o);
5822     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_UB);
5823 }
5824 #define SPEC_in2_m2_8u 0
5825 
5826 static void in2_m2_16s(DisasContext *s, DisasOps *o)
5827 {
5828     in2_a2(s, o);
5829     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESW);
5830 }
5831 #define SPEC_in2_m2_16s 0
5832 
5833 static void in2_m2_16u(DisasContext *s, DisasOps *o)
5834 {
5835     in2_a2(s, o);
5836     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUW);
5837 }
5838 #define SPEC_in2_m2_16u 0
5839 
5840 static void in2_m2_32s(DisasContext *s, DisasOps *o)
5841 {
5842     in2_a2(s, o);
5843     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESL);
5844 }
5845 #define SPEC_in2_m2_32s 0
5846 
5847 static void in2_m2_32u(DisasContext *s, DisasOps *o)
5848 {
5849     in2_a2(s, o);
5850     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUL);
5851 }
5852 #define SPEC_in2_m2_32u 0
5853 
5854 #ifndef CONFIG_USER_ONLY
5855 static void in2_m2_32ua(DisasContext *s, DisasOps *o)
5856 {
5857     in2_a2(s, o);
5858     tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5859 }
5860 #define SPEC_in2_m2_32ua 0
5861 #endif
5862 
5863 static void in2_m2_64(DisasContext *s, DisasOps *o)
5864 {
5865     in2_a2(s, o);
5866     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5867 }
5868 #define SPEC_in2_m2_64 0
5869 
5870 static void in2_m2_64w(DisasContext *s, DisasOps *o)
5871 {
5872     in2_a2(s, o);
5873     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5874     gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
5875 }
5876 #define SPEC_in2_m2_64w 0
5877 
5878 #ifndef CONFIG_USER_ONLY
5879 static void in2_m2_64a(DisasContext *s, DisasOps *o)
5880 {
5881     in2_a2(s, o);
5882     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5883 }
5884 #define SPEC_in2_m2_64a 0
5885 #endif
5886 
5887 static void in2_mri2_16s(DisasContext *s, DisasOps *o)
5888 {
5889     o->in2 = tcg_temp_new_i64();
5890     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TESW);
5891 }
5892 #define SPEC_in2_mri2_16s 0
5893 
5894 static void in2_mri2_16u(DisasContext *s, DisasOps *o)
5895 {
5896     o->in2 = tcg_temp_new_i64();
5897     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TEUW);
5898 }
5899 #define SPEC_in2_mri2_16u 0
5900 
5901 static void in2_mri2_32s(DisasContext *s, DisasOps *o)
5902 {
5903     o->in2 = tcg_temp_new_i64();
5904     tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5905                        MO_TESL | MO_ALIGN);
5906 }
5907 #define SPEC_in2_mri2_32s 0
5908 
5909 static void in2_mri2_32u(DisasContext *s, DisasOps *o)
5910 {
5911     o->in2 = tcg_temp_new_i64();
5912     tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5913                        MO_TEUL | MO_ALIGN);
5914 }
5915 #define SPEC_in2_mri2_32u 0
5916 
5917 static void in2_mri2_64(DisasContext *s, DisasOps *o)
5918 {
5919     o->in2 = tcg_temp_new_i64();
5920     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s),
5921                         MO_TEUQ | MO_ALIGN);
5922 }
5923 #define SPEC_in2_mri2_64 0
5924 
5925 static void in2_i2(DisasContext *s, DisasOps *o)
5926 {
5927     o->in2 = tcg_constant_i64(get_field(s, i2));
5928 }
5929 #define SPEC_in2_i2 0
5930 
5931 static void in2_i2_8u(DisasContext *s, DisasOps *o)
5932 {
5933     o->in2 = tcg_constant_i64((uint8_t)get_field(s, i2));
5934 }
5935 #define SPEC_in2_i2_8u 0
5936 
5937 static void in2_i2_16u(DisasContext *s, DisasOps *o)
5938 {
5939     o->in2 = tcg_constant_i64((uint16_t)get_field(s, i2));
5940 }
5941 #define SPEC_in2_i2_16u 0
5942 
5943 static void in2_i2_32u(DisasContext *s, DisasOps *o)
5944 {
5945     o->in2 = tcg_constant_i64((uint32_t)get_field(s, i2));
5946 }
5947 #define SPEC_in2_i2_32u 0
5948 
5949 static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
5950 {
5951     uint64_t i2 = (uint16_t)get_field(s, i2);
5952     o->in2 = tcg_constant_i64(i2 << s->insn->data);
5953 }
5954 #define SPEC_in2_i2_16u_shl 0
5955 
5956 static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
5957 {
5958     uint64_t i2 = (uint32_t)get_field(s, i2);
5959     o->in2 = tcg_constant_i64(i2 << s->insn->data);
5960 }
5961 #define SPEC_in2_i2_32u_shl 0
5962 
5963 #ifndef CONFIG_USER_ONLY
5964 static void in2_insn(DisasContext *s, DisasOps *o)
5965 {
5966     o->in2 = tcg_constant_i64(s->fields.raw_insn);
5967 }
5968 #define SPEC_in2_insn 0
5969 #endif
5970 
5971 /* ====================================================================== */
5972 
5973 /* Find opc within the table of insns.  This is formulated as a switch
5974    statement so that (1) we get compile-time notice of cut-paste errors
5975    for duplicated opcodes, and (2) the compiler generates the binary
5976    search tree, rather than us having to post-process the table.  */
5977 
5978 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5979     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
5980 
5981 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5982     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
5983 
5984 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
5985     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
5986 
5987 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
5988 
5989 enum DisasInsnEnum {
5990 #include "insn-data.h.inc"
5991 };
5992 
5993 #undef E
5994 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) {                   \
5995     .opc = OPC,                                                             \
5996     .flags = FL,                                                            \
5997     .fmt = FMT_##FT,                                                        \
5998     .fac = FAC_##FC,                                                        \
5999     .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W,  \
6000     .name = #NM,                                                            \
6001     .help_in1 = in1_##I1,                                                   \
6002     .help_in2 = in2_##I2,                                                   \
6003     .help_prep = prep_##P,                                                  \
6004     .help_wout = wout_##W,                                                  \
6005     .help_cout = cout_##CC,                                                 \
6006     .help_op = op_##OP,                                                     \
6007     .data = D                                                               \
6008  },
6009 
6010 /* Allow 0 to be used for NULL in the table below.  */
6011 #define in1_0  NULL
6012 #define in2_0  NULL
6013 #define prep_0  NULL
6014 #define wout_0  NULL
6015 #define cout_0  NULL
6016 #define op_0  NULL
6017 
6018 #define SPEC_in1_0 0
6019 #define SPEC_in2_0 0
6020 #define SPEC_prep_0 0
6021 #define SPEC_wout_0 0
6022 
6023 /* Give smaller names to the various facilities.  */
6024 #define FAC_Z           S390_FEAT_ZARCH
6025 #define FAC_CASS        S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6026 #define FAC_DFP         S390_FEAT_DFP
6027 #define FAC_DFPR        S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* DFP-rounding */
6028 #define FAC_DO          S390_FEAT_STFLE_45 /* distinct-operands */
6029 #define FAC_EE          S390_FEAT_EXECUTE_EXT
6030 #define FAC_EI          S390_FEAT_EXTENDED_IMMEDIATE
6031 #define FAC_FPE         S390_FEAT_FLOATING_POINT_EXT
6032 #define FAC_FPSSH       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPS-sign-handling */
6033 #define FAC_FPRGR       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPR-GR-transfer */
6034 #define FAC_GIE         S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6035 #define FAC_HFP_MA      S390_FEAT_HFP_MADDSUB
6036 #define FAC_HW          S390_FEAT_STFLE_45 /* high-word */
6037 #define FAC_IEEEE_SIM   S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* IEEE-exception-simulation */
6038 #define FAC_MIE         S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6039 #define FAC_LAT         S390_FEAT_STFLE_49 /* load-and-trap */
6040 #define FAC_LOC         S390_FEAT_STFLE_45 /* load/store on condition 1 */
6041 #define FAC_LOC2        S390_FEAT_STFLE_53 /* load/store on condition 2 */
6042 #define FAC_LD          S390_FEAT_LONG_DISPLACEMENT
6043 #define FAC_PC          S390_FEAT_STFLE_45 /* population count */
6044 #define FAC_SCF         S390_FEAT_STORE_CLOCK_FAST
6045 #define FAC_SFLE        S390_FEAT_STFLE
6046 #define FAC_ILA         S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6047 #define FAC_MVCOS       S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6048 #define FAC_LPP         S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6049 #define FAC_DAT_ENH     S390_FEAT_DAT_ENH
6050 #define FAC_E2          S390_FEAT_EXTENDED_TRANSLATION_2
6051 #define FAC_EH          S390_FEAT_STFLE_49 /* execution-hint */
6052 #define FAC_PPA         S390_FEAT_STFLE_49 /* processor-assist */
6053 #define FAC_LZRB        S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6054 #define FAC_ETF3        S390_FEAT_EXTENDED_TRANSLATION_3
6055 #define FAC_MSA         S390_FEAT_MSA /* message-security-assist facility */
6056 #define FAC_MSA3        S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6057 #define FAC_MSA4        S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6058 #define FAC_MSA5        S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6059 #define FAC_MSA8        S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6060 #define FAC_ECT         S390_FEAT_EXTRACT_CPU_TIME
6061 #define FAC_PCI         S390_FEAT_ZPCI /* z/PCI facility */
6062 #define FAC_AIS         S390_FEAT_ADAPTER_INT_SUPPRESSION
6063 #define FAC_V           S390_FEAT_VECTOR /* vector facility */
6064 #define FAC_VE          S390_FEAT_VECTOR_ENH  /* vector enhancements facility 1 */
6065 #define FAC_VE2         S390_FEAT_VECTOR_ENH2 /* vector enhancements facility 2 */
6066 #define FAC_MIE2        S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6067 #define FAC_MIE3        S390_FEAT_MISC_INSTRUCTION_EXT3 /* miscellaneous-instruction-extensions facility 3 */
6068 
6069 static const DisasInsn insn_info[] = {
6070 #include "insn-data.h.inc"
6071 };
6072 
6073 #undef E
6074 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6075     case OPC: return &insn_info[insn_ ## NM];
6076 
6077 static const DisasInsn *lookup_opc(uint16_t opc)
6078 {
6079     switch (opc) {
6080 #include "insn-data.h.inc"
6081     default:
6082         return NULL;
6083     }
6084 }
6085 
6086 #undef F
6087 #undef E
6088 #undef D
6089 #undef C
6090 
6091 /* Extract a field from the insn.  The INSN should be left-aligned in
6092    the uint64_t so that we can more easily utilize the big-bit-endian
6093    definitions we extract from the Principals of Operation.  */
6094 
6095 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6096 {
6097     uint32_t r, m;
6098 
6099     if (f->size == 0) {
6100         return;
6101     }
6102 
6103     /* Zero extract the field from the insn.  */
6104     r = (insn << f->beg) >> (64 - f->size);
6105 
6106     /* Sign-extend, or un-swap the field as necessary.  */
6107     switch (f->type) {
6108     case 0: /* unsigned */
6109         break;
6110     case 1: /* signed */
6111         assert(f->size <= 32);
6112         m = 1u << (f->size - 1);
6113         r = (r ^ m) - m;
6114         break;
6115     case 2: /* dl+dh split, signed 20 bit. */
6116         r = ((int8_t)r << 12) | (r >> 8);
6117         break;
6118     case 3: /* MSB stored in RXB */
6119         g_assert(f->size == 4);
6120         switch (f->beg) {
6121         case 8:
6122             r |= extract64(insn, 63 - 36, 1) << 4;
6123             break;
6124         case 12:
6125             r |= extract64(insn, 63 - 37, 1) << 4;
6126             break;
6127         case 16:
6128             r |= extract64(insn, 63 - 38, 1) << 4;
6129             break;
6130         case 32:
6131             r |= extract64(insn, 63 - 39, 1) << 4;
6132             break;
6133         default:
6134             g_assert_not_reached();
6135         }
6136         break;
6137     default:
6138         abort();
6139     }
6140 
6141     /*
6142      * Validate that the "compressed" encoding we selected above is valid.
6143      * I.e. we haven't made two different original fields overlap.
6144      */
6145     assert(((o->presentC >> f->indexC) & 1) == 0);
6146     o->presentC |= 1 << f->indexC;
6147     o->presentO |= 1 << f->indexO;
6148 
6149     o->c[f->indexC] = r;
6150 }
6151 
6152 /* Lookup the insn at the current PC, extracting the operands into O and
6153    returning the info struct for the insn.  Returns NULL for invalid insn.  */
6154 
6155 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
6156 {
6157     uint64_t insn, pc = s->base.pc_next;
6158     int op, op2, ilen;
6159     const DisasInsn *info;
6160 
6161     if (unlikely(s->ex_value)) {
6162         /* Drop the EX data now, so that it's clear on exception paths.  */
6163         tcg_gen_st_i64(tcg_constant_i64(0), cpu_env,
6164                        offsetof(CPUS390XState, ex_value));
6165 
6166         /* Extract the values saved by EXECUTE.  */
6167         insn = s->ex_value & 0xffffffffffff0000ull;
6168         ilen = s->ex_value & 0xf;
6169 
6170         /* Register insn bytes with translator so plugins work. */
6171         for (int i = 0; i < ilen; i++) {
6172             uint8_t byte = extract64(insn, 56 - (i * 8), 8);
6173             translator_fake_ldb(byte, pc + i);
6174         }
6175         op = insn >> 56;
6176     } else {
6177         insn = ld_code2(env, s, pc);
6178         op = (insn >> 8) & 0xff;
6179         ilen = get_ilen(op);
6180         switch (ilen) {
6181         case 2:
6182             insn = insn << 48;
6183             break;
6184         case 4:
6185             insn = ld_code4(env, s, pc) << 32;
6186             break;
6187         case 6:
6188             insn = (insn << 48) | (ld_code4(env, s, pc + 2) << 16);
6189             break;
6190         default:
6191             g_assert_not_reached();
6192         }
6193     }
6194     s->pc_tmp = s->base.pc_next + ilen;
6195     s->ilen = ilen;
6196 
6197     /* We can't actually determine the insn format until we've looked up
6198        the full insn opcode.  Which we can't do without locating the
6199        secondary opcode.  Assume by default that OP2 is at bit 40; for
6200        those smaller insns that don't actually have a secondary opcode
6201        this will correctly result in OP2 = 0. */
6202     switch (op) {
6203     case 0x01: /* E */
6204     case 0x80: /* S */
6205     case 0x82: /* S */
6206     case 0x93: /* S */
6207     case 0xb2: /* S, RRF, RRE, IE */
6208     case 0xb3: /* RRE, RRD, RRF */
6209     case 0xb9: /* RRE, RRF */
6210     case 0xe5: /* SSE, SIL */
6211         op2 = (insn << 8) >> 56;
6212         break;
6213     case 0xa5: /* RI */
6214     case 0xa7: /* RI */
6215     case 0xc0: /* RIL */
6216     case 0xc2: /* RIL */
6217     case 0xc4: /* RIL */
6218     case 0xc6: /* RIL */
6219     case 0xc8: /* SSF */
6220     case 0xcc: /* RIL */
6221         op2 = (insn << 12) >> 60;
6222         break;
6223     case 0xc5: /* MII */
6224     case 0xc7: /* SMI */
6225     case 0xd0 ... 0xdf: /* SS */
6226     case 0xe1: /* SS */
6227     case 0xe2: /* SS */
6228     case 0xe8: /* SS */
6229     case 0xe9: /* SS */
6230     case 0xea: /* SS */
6231     case 0xee ... 0xf3: /* SS */
6232     case 0xf8 ... 0xfd: /* SS */
6233         op2 = 0;
6234         break;
6235     default:
6236         op2 = (insn << 40) >> 56;
6237         break;
6238     }
6239 
6240     memset(&s->fields, 0, sizeof(s->fields));
6241     s->fields.raw_insn = insn;
6242     s->fields.op = op;
6243     s->fields.op2 = op2;
6244 
6245     /* Lookup the instruction.  */
6246     info = lookup_opc(op << 8 | op2);
6247     s->insn = info;
6248 
6249     /* If we found it, extract the operands.  */
6250     if (info != NULL) {
6251         DisasFormat fmt = info->fmt;
6252         int i;
6253 
6254         for (i = 0; i < NUM_C_FIELD; ++i) {
6255             extract_field(&s->fields, &format_info[fmt].op[i], insn);
6256         }
6257     }
6258     return info;
6259 }
6260 
6261 static bool is_afp_reg(int reg)
6262 {
6263     return reg % 2 || reg > 6;
6264 }
6265 
6266 static bool is_fp_pair(int reg)
6267 {
6268     /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6269     return !(reg & 0x2);
6270 }
6271 
6272 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6273 {
6274     const DisasInsn *insn;
6275     DisasJumpType ret = DISAS_NEXT;
6276     DisasOps o = {};
6277     bool icount = false;
6278 
6279     /* Search for the insn in the table.  */
6280     insn = extract_insn(env, s);
6281 
6282     /* Update insn_start now that we know the ILEN.  */
6283     tcg_set_insn_start_param(s->insn_start, 2, s->ilen);
6284 
6285     /* Not found means unimplemented/illegal opcode.  */
6286     if (insn == NULL) {
6287         qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6288                       s->fields.op, s->fields.op2);
6289         gen_illegal_opcode(s);
6290         ret = DISAS_NORETURN;
6291         goto out;
6292     }
6293 
6294 #ifndef CONFIG_USER_ONLY
6295     if (s->base.tb->flags & FLAG_MASK_PER) {
6296         TCGv_i64 addr = tcg_constant_i64(s->base.pc_next);
6297         gen_helper_per_ifetch(cpu_env, addr);
6298     }
6299 #endif
6300 
6301     /* process flags */
6302     if (insn->flags) {
6303         /* privileged instruction */
6304         if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6305             gen_program_exception(s, PGM_PRIVILEGED);
6306             ret = DISAS_NORETURN;
6307             goto out;
6308         }
6309 
6310         /* if AFP is not enabled, instructions and registers are forbidden */
6311         if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6312             uint8_t dxc = 0;
6313 
6314             if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
6315                 dxc = 1;
6316             }
6317             if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
6318                 dxc = 1;
6319             }
6320             if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
6321                 dxc = 1;
6322             }
6323             if (insn->flags & IF_BFP) {
6324                 dxc = 2;
6325             }
6326             if (insn->flags & IF_DFP) {
6327                 dxc = 3;
6328             }
6329             if (insn->flags & IF_VEC) {
6330                 dxc = 0xfe;
6331             }
6332             if (dxc) {
6333                 gen_data_exception(dxc);
6334                 ret = DISAS_NORETURN;
6335                 goto out;
6336             }
6337         }
6338 
6339         /* if vector instructions not enabled, executing them is forbidden */
6340         if (insn->flags & IF_VEC) {
6341             if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6342                 gen_data_exception(0xfe);
6343                 ret = DISAS_NORETURN;
6344                 goto out;
6345             }
6346         }
6347 
6348         /* input/output is the special case for icount mode */
6349         if (unlikely(insn->flags & IF_IO)) {
6350             icount = translator_io_start(&s->base);
6351         }
6352     }
6353 
6354     /* Check for insn specification exceptions.  */
6355     if (insn->spec) {
6356         if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
6357             (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
6358             (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
6359             (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
6360             (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
6361             gen_program_exception(s, PGM_SPECIFICATION);
6362             ret = DISAS_NORETURN;
6363             goto out;
6364         }
6365     }
6366 
6367     /* Implement the instruction.  */
6368     if (insn->help_in1) {
6369         insn->help_in1(s, &o);
6370     }
6371     if (insn->help_in2) {
6372         insn->help_in2(s, &o);
6373     }
6374     if (insn->help_prep) {
6375         insn->help_prep(s, &o);
6376     }
6377     if (insn->help_op) {
6378         ret = insn->help_op(s, &o);
6379     }
6380     if (ret != DISAS_NORETURN) {
6381         if (insn->help_wout) {
6382             insn->help_wout(s, &o);
6383         }
6384         if (insn->help_cout) {
6385             insn->help_cout(s, &o);
6386         }
6387     }
6388 
6389     /* io should be the last instruction in tb when icount is enabled */
6390     if (unlikely(icount && ret == DISAS_NEXT)) {
6391         ret = DISAS_TOO_MANY;
6392     }
6393 
6394 #ifndef CONFIG_USER_ONLY
6395     if (s->base.tb->flags & FLAG_MASK_PER) {
6396         /* An exception might be triggered, save PSW if not already done.  */
6397         if (ret == DISAS_NEXT || ret == DISAS_TOO_MANY) {
6398             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6399         }
6400 
6401         /* Call the helper to check for a possible PER exception.  */
6402         gen_helper_per_check_exception(cpu_env);
6403     }
6404 #endif
6405 
6406 out:
6407     /* Advance to the next instruction.  */
6408     s->base.pc_next = s->pc_tmp;
6409     return ret;
6410 }
6411 
6412 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6413 {
6414     DisasContext *dc = container_of(dcbase, DisasContext, base);
6415 
6416     /* 31-bit mode */
6417     if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6418         dc->base.pc_first &= 0x7fffffff;
6419         dc->base.pc_next = dc->base.pc_first;
6420     }
6421 
6422     dc->cc_op = CC_OP_DYNAMIC;
6423     dc->ex_value = dc->base.tb->cs_base;
6424     dc->exit_to_mainloop = (dc->base.tb->flags & FLAG_MASK_PER) || dc->ex_value;
6425 }
6426 
6427 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6428 {
6429 }
6430 
6431 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6432 {
6433     DisasContext *dc = container_of(dcbase, DisasContext, base);
6434 
6435     /* Delay the set of ilen until we've read the insn. */
6436     tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 0);
6437     dc->insn_start = tcg_last_op();
6438 }
6439 
6440 static target_ulong get_next_pc(CPUS390XState *env, DisasContext *s,
6441                                 uint64_t pc)
6442 {
6443     uint64_t insn = cpu_lduw_code(env, pc);
6444 
6445     return pc + get_ilen((insn >> 8) & 0xff);
6446 }
6447 
6448 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6449 {
6450     CPUS390XState *env = cs->env_ptr;
6451     DisasContext *dc = container_of(dcbase, DisasContext, base);
6452 
6453     dc->base.is_jmp = translate_one(env, dc);
6454     if (dc->base.is_jmp == DISAS_NEXT) {
6455         if (dc->ex_value ||
6456             !is_same_page(dcbase, dc->base.pc_next) ||
6457             !is_same_page(dcbase, get_next_pc(env, dc, dc->base.pc_next))) {
6458             dc->base.is_jmp = DISAS_TOO_MANY;
6459         }
6460     }
6461 }
6462 
6463 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6464 {
6465     DisasContext *dc = container_of(dcbase, DisasContext, base);
6466 
6467     switch (dc->base.is_jmp) {
6468     case DISAS_NORETURN:
6469         break;
6470     case DISAS_TOO_MANY:
6471         update_psw_addr(dc);
6472         /* FALLTHRU */
6473     case DISAS_PC_UPDATED:
6474         /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6475            cc op type is in env */
6476         update_cc_op(dc);
6477         /* FALLTHRU */
6478     case DISAS_PC_CC_UPDATED:
6479         /* Exit the TB, either by raising a debug exception or by return.  */
6480         if (dc->exit_to_mainloop) {
6481             tcg_gen_exit_tb(NULL, 0);
6482         } else {
6483             tcg_gen_lookup_and_goto_ptr();
6484         }
6485         break;
6486     default:
6487         g_assert_not_reached();
6488     }
6489 }
6490 
6491 static void s390x_tr_disas_log(const DisasContextBase *dcbase,
6492                                CPUState *cs, FILE *logfile)
6493 {
6494     DisasContext *dc = container_of(dcbase, DisasContext, base);
6495 
6496     if (unlikely(dc->ex_value)) {
6497         /* ??? Unfortunately target_disas can't use host memory.  */
6498         fprintf(logfile, "IN: EXECUTE %016" PRIx64, dc->ex_value);
6499     } else {
6500         fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
6501         target_disas(logfile, cs, dc->base.pc_first, dc->base.tb->size);
6502     }
6503 }
6504 
6505 static const TranslatorOps s390x_tr_ops = {
6506     .init_disas_context = s390x_tr_init_disas_context,
6507     .tb_start           = s390x_tr_tb_start,
6508     .insn_start         = s390x_tr_insn_start,
6509     .translate_insn     = s390x_tr_translate_insn,
6510     .tb_stop            = s390x_tr_tb_stop,
6511     .disas_log          = s390x_tr_disas_log,
6512 };
6513 
6514 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
6515                            target_ulong pc, void *host_pc)
6516 {
6517     DisasContext dc;
6518 
6519     translator_loop(cs, tb, max_insns, pc, host_pc, &s390x_tr_ops, &dc.base);
6520 }
6521 
6522 void s390x_restore_state_to_opc(CPUState *cs,
6523                                 const TranslationBlock *tb,
6524                                 const uint64_t *data)
6525 {
6526     S390CPU *cpu = S390_CPU(cs);
6527     CPUS390XState *env = &cpu->env;
6528     int cc_op = data[1];
6529 
6530     env->psw.addr = data[0];
6531 
6532     /* Update the CC opcode if it is not already up-to-date.  */
6533     if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6534         env->cc_op = cc_op;
6535     }
6536 
6537     /* Record ILEN.  */
6538     env->int_pgm_ilen = data[2];
6539 }
6540