xref: /qemu/target/s390x/tcg/translate.c (revision 003f1536)
1 /*
2  *  S/390 translation
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2010 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24 
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 #  define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 #  define LOG_DISAS(...) do { } while (0)
29 #endif
30 
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "s390x-internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg/tcg-op.h"
37 #include "tcg/tcg-op-gvec.h"
38 #include "qemu/log.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/helper-proto.h"
42 #include "exec/helper-gen.h"
43 
44 #include "exec/translator.h"
45 #include "exec/log.h"
46 #include "qemu/atomic128.h"
47 
48 #define HELPER_H "helper.h"
49 #include "exec/helper-info.c.inc"
50 #undef  HELPER_H
51 
52 
53 /* Information that (most) every instruction needs to manipulate.  */
54 typedef struct DisasContext DisasContext;
55 typedef struct DisasInsn DisasInsn;
56 typedef struct DisasFields DisasFields;
57 
58 /*
59  * Define a structure to hold the decoded fields.  We'll store each inside
60  * an array indexed by an enum.  In order to conserve memory, we'll arrange
61  * for fields that do not exist at the same time to overlap, thus the "C"
62  * for compact.  For checking purposes there is an "O" for original index
63  * as well that will be applied to availability bitmaps.
64  */
65 
66 enum DisasFieldIndexO {
67     FLD_O_r1,
68     FLD_O_r2,
69     FLD_O_r3,
70     FLD_O_m1,
71     FLD_O_m3,
72     FLD_O_m4,
73     FLD_O_m5,
74     FLD_O_m6,
75     FLD_O_b1,
76     FLD_O_b2,
77     FLD_O_b4,
78     FLD_O_d1,
79     FLD_O_d2,
80     FLD_O_d4,
81     FLD_O_x2,
82     FLD_O_l1,
83     FLD_O_l2,
84     FLD_O_i1,
85     FLD_O_i2,
86     FLD_O_i3,
87     FLD_O_i4,
88     FLD_O_i5,
89     FLD_O_v1,
90     FLD_O_v2,
91     FLD_O_v3,
92     FLD_O_v4,
93 };
94 
95 enum DisasFieldIndexC {
96     FLD_C_r1 = 0,
97     FLD_C_m1 = 0,
98     FLD_C_b1 = 0,
99     FLD_C_i1 = 0,
100     FLD_C_v1 = 0,
101 
102     FLD_C_r2 = 1,
103     FLD_C_b2 = 1,
104     FLD_C_i2 = 1,
105 
106     FLD_C_r3 = 2,
107     FLD_C_m3 = 2,
108     FLD_C_i3 = 2,
109     FLD_C_v3 = 2,
110 
111     FLD_C_m4 = 3,
112     FLD_C_b4 = 3,
113     FLD_C_i4 = 3,
114     FLD_C_l1 = 3,
115     FLD_C_v4 = 3,
116 
117     FLD_C_i5 = 4,
118     FLD_C_d1 = 4,
119     FLD_C_m5 = 4,
120 
121     FLD_C_d2 = 5,
122     FLD_C_m6 = 5,
123 
124     FLD_C_d4 = 6,
125     FLD_C_x2 = 6,
126     FLD_C_l2 = 6,
127     FLD_C_v2 = 6,
128 
129     NUM_C_FIELD = 7
130 };
131 
132 struct DisasFields {
133     uint64_t raw_insn;
134     unsigned op:8;
135     unsigned op2:8;
136     unsigned presentC:16;
137     unsigned int presentO;
138     int c[NUM_C_FIELD];
139 };
140 
141 struct DisasContext {
142     DisasContextBase base;
143     const DisasInsn *insn;
144     TCGOp *insn_start;
145     DisasFields fields;
146     uint64_t ex_value;
147     /*
148      * During translate_one(), pc_tmp is used to determine the instruction
149      * to be executed after base.pc_next - e.g. next sequential instruction
150      * or a branch target.
151      */
152     uint64_t pc_tmp;
153     uint32_t ilen;
154     enum cc_op cc_op;
155     bool exit_to_mainloop;
156 };
157 
158 /* Information carried about a condition to be evaluated.  */
159 typedef struct {
160     TCGCond cond:8;
161     bool is_64;
162     union {
163         struct { TCGv_i64 a, b; } s64;
164         struct { TCGv_i32 a, b; } s32;
165     } u;
166 } DisasCompare;
167 
168 #ifdef DEBUG_INLINE_BRANCHES
169 static uint64_t inline_branch_hit[CC_OP_MAX];
170 static uint64_t inline_branch_miss[CC_OP_MAX];
171 #endif
172 
173 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
174 {
175     if (s->base.tb->flags & FLAG_MASK_32) {
176         if (s->base.tb->flags & FLAG_MASK_64) {
177             tcg_gen_movi_i64(out, pc);
178             return;
179         }
180         pc |= 0x80000000;
181     }
182     assert(!(s->base.tb->flags & FLAG_MASK_64));
183     tcg_gen_deposit_i64(out, out, tcg_constant_i64(pc), 0, 32);
184 }
185 
186 static TCGv_i64 psw_addr;
187 static TCGv_i64 psw_mask;
188 static TCGv_i64 gbea;
189 
190 static TCGv_i32 cc_op;
191 static TCGv_i64 cc_src;
192 static TCGv_i64 cc_dst;
193 static TCGv_i64 cc_vr;
194 
195 static char cpu_reg_names[16][4];
196 static TCGv_i64 regs[16];
197 
198 void s390x_translate_init(void)
199 {
200     int i;
201 
202     psw_addr = tcg_global_mem_new_i64(tcg_env,
203                                       offsetof(CPUS390XState, psw.addr),
204                                       "psw_addr");
205     psw_mask = tcg_global_mem_new_i64(tcg_env,
206                                       offsetof(CPUS390XState, psw.mask),
207                                       "psw_mask");
208     gbea = tcg_global_mem_new_i64(tcg_env,
209                                   offsetof(CPUS390XState, gbea),
210                                   "gbea");
211 
212     cc_op = tcg_global_mem_new_i32(tcg_env, offsetof(CPUS390XState, cc_op),
213                                    "cc_op");
214     cc_src = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_src),
215                                     "cc_src");
216     cc_dst = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_dst),
217                                     "cc_dst");
218     cc_vr = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_vr),
219                                    "cc_vr");
220 
221     for (i = 0; i < 16; i++) {
222         snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
223         regs[i] = tcg_global_mem_new(tcg_env,
224                                      offsetof(CPUS390XState, regs[i]),
225                                      cpu_reg_names[i]);
226     }
227 }
228 
229 static inline int vec_full_reg_offset(uint8_t reg)
230 {
231     g_assert(reg < 32);
232     return offsetof(CPUS390XState, vregs[reg][0]);
233 }
234 
235 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
236 {
237     /* Convert element size (es) - e.g. MO_8 - to bytes */
238     const uint8_t bytes = 1 << es;
239     int offs = enr * bytes;
240 
241     /*
242      * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
243      * of the 16 byte vector, on both, little and big endian systems.
244      *
245      * Big Endian (target/possible host)
246      * B:  [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
247      * HW: [     0][     1][     2][     3] - [     4][     5][     6][     7]
248      * W:  [             0][             1] - [             2][             3]
249      * DW: [                             0] - [                             1]
250      *
251      * Little Endian (possible host)
252      * B:  [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
253      * HW: [     3][     2][     1][     0] - [     7][     6][     5][     4]
254      * W:  [             1][             0] - [             3][             2]
255      * DW: [                             0] - [                             1]
256      *
257      * For 16 byte elements, the two 8 byte halves will not form a host
258      * int128 if the host is little endian, since they're in the wrong order.
259      * Some operations (e.g. xor) do not care. For operations like addition,
260      * the two 8 byte elements have to be loaded separately. Let's force all
261      * 16 byte operations to handle it in a special way.
262      */
263     g_assert(es <= MO_64);
264 #if !HOST_BIG_ENDIAN
265     offs ^= (8 - bytes);
266 #endif
267     return offs + vec_full_reg_offset(reg);
268 }
269 
270 static inline int freg64_offset(uint8_t reg)
271 {
272     g_assert(reg < 16);
273     return vec_reg_offset(reg, 0, MO_64);
274 }
275 
276 static inline int freg32_offset(uint8_t reg)
277 {
278     g_assert(reg < 16);
279     return vec_reg_offset(reg, 0, MO_32);
280 }
281 
282 static TCGv_i64 load_reg(int reg)
283 {
284     TCGv_i64 r = tcg_temp_new_i64();
285     tcg_gen_mov_i64(r, regs[reg]);
286     return r;
287 }
288 
289 static TCGv_i64 load_freg(int reg)
290 {
291     TCGv_i64 r = tcg_temp_new_i64();
292 
293     tcg_gen_ld_i64(r, tcg_env, freg64_offset(reg));
294     return r;
295 }
296 
297 static TCGv_i64 load_freg32_i64(int reg)
298 {
299     TCGv_i64 r = tcg_temp_new_i64();
300 
301     tcg_gen_ld32u_i64(r, tcg_env, freg32_offset(reg));
302     return r;
303 }
304 
305 static TCGv_i128 load_freg_128(int reg)
306 {
307     TCGv_i64 h = load_freg(reg);
308     TCGv_i64 l = load_freg(reg + 2);
309     TCGv_i128 r = tcg_temp_new_i128();
310 
311     tcg_gen_concat_i64_i128(r, l, h);
312     return r;
313 }
314 
315 static void store_reg(int reg, TCGv_i64 v)
316 {
317     tcg_gen_mov_i64(regs[reg], v);
318 }
319 
320 static void store_freg(int reg, TCGv_i64 v)
321 {
322     tcg_gen_st_i64(v, tcg_env, freg64_offset(reg));
323 }
324 
325 static void store_reg32_i64(int reg, TCGv_i64 v)
326 {
327     /* 32 bit register writes keep the upper half */
328     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
329 }
330 
331 static void store_reg32h_i64(int reg, TCGv_i64 v)
332 {
333     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
334 }
335 
336 static void store_freg32_i64(int reg, TCGv_i64 v)
337 {
338     tcg_gen_st32_i64(v, tcg_env, freg32_offset(reg));
339 }
340 
341 static void update_psw_addr(DisasContext *s)
342 {
343     /* psw.addr */
344     tcg_gen_movi_i64(psw_addr, s->base.pc_next);
345 }
346 
347 static void per_branch(DisasContext *s, bool to_next)
348 {
349 #ifndef CONFIG_USER_ONLY
350     tcg_gen_movi_i64(gbea, s->base.pc_next);
351 
352     if (s->base.tb->flags & FLAG_MASK_PER) {
353         TCGv_i64 next_pc = to_next ? tcg_constant_i64(s->pc_tmp) : psw_addr;
354         gen_helper_per_branch(tcg_env, gbea, next_pc);
355     }
356 #endif
357 }
358 
359 static void per_branch_cond(DisasContext *s, TCGCond cond,
360                             TCGv_i64 arg1, TCGv_i64 arg2)
361 {
362 #ifndef CONFIG_USER_ONLY
363     if (s->base.tb->flags & FLAG_MASK_PER) {
364         TCGLabel *lab = gen_new_label();
365         tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
366 
367         tcg_gen_movi_i64(gbea, s->base.pc_next);
368         gen_helper_per_branch(tcg_env, gbea, psw_addr);
369 
370         gen_set_label(lab);
371     } else {
372         TCGv_i64 pc = tcg_constant_i64(s->base.pc_next);
373         tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
374     }
375 #endif
376 }
377 
378 static void per_breaking_event(DisasContext *s)
379 {
380     tcg_gen_movi_i64(gbea, s->base.pc_next);
381 }
382 
383 static void update_cc_op(DisasContext *s)
384 {
385     if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
386         tcg_gen_movi_i32(cc_op, s->cc_op);
387     }
388 }
389 
390 static inline uint64_t ld_code2(CPUS390XState *env, DisasContext *s,
391                                 uint64_t pc)
392 {
393     return (uint64_t)translator_lduw(env, &s->base, pc);
394 }
395 
396 static inline uint64_t ld_code4(CPUS390XState *env, DisasContext *s,
397                                 uint64_t pc)
398 {
399     return (uint64_t)(uint32_t)translator_ldl(env, &s->base, pc);
400 }
401 
402 static int get_mem_index(DisasContext *s)
403 {
404 #ifdef CONFIG_USER_ONLY
405     return MMU_USER_IDX;
406 #else
407     if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
408         return MMU_REAL_IDX;
409     }
410 
411     switch (s->base.tb->flags & FLAG_MASK_ASC) {
412     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
413         return MMU_PRIMARY_IDX;
414     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
415         return MMU_SECONDARY_IDX;
416     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
417         return MMU_HOME_IDX;
418     default:
419         g_assert_not_reached();
420         break;
421     }
422 #endif
423 }
424 
425 static void gen_exception(int excp)
426 {
427     gen_helper_exception(tcg_env, tcg_constant_i32(excp));
428 }
429 
430 static void gen_program_exception(DisasContext *s, int code)
431 {
432     /* Remember what pgm exception this was.  */
433     tcg_gen_st_i32(tcg_constant_i32(code), tcg_env,
434                    offsetof(CPUS390XState, int_pgm_code));
435 
436     tcg_gen_st_i32(tcg_constant_i32(s->ilen), tcg_env,
437                    offsetof(CPUS390XState, int_pgm_ilen));
438 
439     /* update the psw */
440     update_psw_addr(s);
441 
442     /* Save off cc.  */
443     update_cc_op(s);
444 
445     /* Trigger exception.  */
446     gen_exception(EXCP_PGM);
447 }
448 
449 static inline void gen_illegal_opcode(DisasContext *s)
450 {
451     gen_program_exception(s, PGM_OPERATION);
452 }
453 
454 static inline void gen_data_exception(uint8_t dxc)
455 {
456     gen_helper_data_exception(tcg_env, tcg_constant_i32(dxc));
457 }
458 
459 static inline void gen_trap(DisasContext *s)
460 {
461     /* Set DXC to 0xff */
462     gen_data_exception(0xff);
463 }
464 
465 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
466                                   int64_t imm)
467 {
468     tcg_gen_addi_i64(dst, src, imm);
469     if (!(s->base.tb->flags & FLAG_MASK_64)) {
470         if (s->base.tb->flags & FLAG_MASK_32) {
471             tcg_gen_andi_i64(dst, dst, 0x7fffffff);
472         } else {
473             tcg_gen_andi_i64(dst, dst, 0x00ffffff);
474         }
475     }
476 }
477 
478 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
479 {
480     TCGv_i64 tmp = tcg_temp_new_i64();
481 
482     /*
483      * Note that d2 is limited to 20 bits, signed.  If we crop negative
484      * displacements early we create larger immediate addends.
485      */
486     if (b2 && x2) {
487         tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
488         gen_addi_and_wrap_i64(s, tmp, tmp, d2);
489     } else if (b2) {
490         gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
491     } else if (x2) {
492         gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
493     } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
494         if (s->base.tb->flags & FLAG_MASK_32) {
495             tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
496         } else {
497             tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
498         }
499     } else {
500         tcg_gen_movi_i64(tmp, d2);
501     }
502 
503     return tmp;
504 }
505 
506 static inline bool live_cc_data(DisasContext *s)
507 {
508     return (s->cc_op != CC_OP_DYNAMIC
509             && s->cc_op != CC_OP_STATIC
510             && s->cc_op > 3);
511 }
512 
513 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
514 {
515     if (live_cc_data(s)) {
516         tcg_gen_discard_i64(cc_src);
517         tcg_gen_discard_i64(cc_dst);
518         tcg_gen_discard_i64(cc_vr);
519     }
520     s->cc_op = CC_OP_CONST0 + val;
521 }
522 
523 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
524 {
525     if (live_cc_data(s)) {
526         tcg_gen_discard_i64(cc_src);
527         tcg_gen_discard_i64(cc_vr);
528     }
529     tcg_gen_mov_i64(cc_dst, dst);
530     s->cc_op = op;
531 }
532 
533 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
534                                   TCGv_i64 dst)
535 {
536     if (live_cc_data(s)) {
537         tcg_gen_discard_i64(cc_vr);
538     }
539     tcg_gen_mov_i64(cc_src, src);
540     tcg_gen_mov_i64(cc_dst, dst);
541     s->cc_op = op;
542 }
543 
544 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
545                                   TCGv_i64 dst, TCGv_i64 vr)
546 {
547     tcg_gen_mov_i64(cc_src, src);
548     tcg_gen_mov_i64(cc_dst, dst);
549     tcg_gen_mov_i64(cc_vr, vr);
550     s->cc_op = op;
551 }
552 
553 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
554 {
555     gen_op_update1_cc_i64(s, CC_OP_NZ, val);
556 }
557 
558 /* CC value is in env->cc_op */
559 static void set_cc_static(DisasContext *s)
560 {
561     if (live_cc_data(s)) {
562         tcg_gen_discard_i64(cc_src);
563         tcg_gen_discard_i64(cc_dst);
564         tcg_gen_discard_i64(cc_vr);
565     }
566     s->cc_op = CC_OP_STATIC;
567 }
568 
569 /* calculates cc into cc_op */
570 static void gen_op_calc_cc(DisasContext *s)
571 {
572     TCGv_i32 local_cc_op = NULL;
573     TCGv_i64 dummy = NULL;
574 
575     switch (s->cc_op) {
576     default:
577         dummy = tcg_constant_i64(0);
578         /* FALLTHRU */
579     case CC_OP_ADD_64:
580     case CC_OP_SUB_64:
581     case CC_OP_ADD_32:
582     case CC_OP_SUB_32:
583         local_cc_op = tcg_constant_i32(s->cc_op);
584         break;
585     case CC_OP_CONST0:
586     case CC_OP_CONST1:
587     case CC_OP_CONST2:
588     case CC_OP_CONST3:
589     case CC_OP_STATIC:
590     case CC_OP_DYNAMIC:
591         break;
592     }
593 
594     switch (s->cc_op) {
595     case CC_OP_CONST0:
596     case CC_OP_CONST1:
597     case CC_OP_CONST2:
598     case CC_OP_CONST3:
599         /* s->cc_op is the cc value */
600         tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
601         break;
602     case CC_OP_STATIC:
603         /* env->cc_op already is the cc value */
604         break;
605     case CC_OP_NZ:
606         tcg_gen_setcondi_i64(TCG_COND_NE, cc_dst, cc_dst, 0);
607         tcg_gen_extrl_i64_i32(cc_op, cc_dst);
608         break;
609     case CC_OP_ABS_64:
610     case CC_OP_NABS_64:
611     case CC_OP_ABS_32:
612     case CC_OP_NABS_32:
613     case CC_OP_LTGT0_32:
614     case CC_OP_LTGT0_64:
615     case CC_OP_COMP_32:
616     case CC_OP_COMP_64:
617     case CC_OP_NZ_F32:
618     case CC_OP_NZ_F64:
619     case CC_OP_FLOGR:
620     case CC_OP_LCBB:
621     case CC_OP_MULS_32:
622         /* 1 argument */
623         gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, dummy, cc_dst, dummy);
624         break;
625     case CC_OP_ADDU:
626     case CC_OP_ICM:
627     case CC_OP_LTGT_32:
628     case CC_OP_LTGT_64:
629     case CC_OP_LTUGTU_32:
630     case CC_OP_LTUGTU_64:
631     case CC_OP_TM_32:
632     case CC_OP_TM_64:
633     case CC_OP_SLA:
634     case CC_OP_SUBU:
635     case CC_OP_NZ_F128:
636     case CC_OP_VC:
637     case CC_OP_MULS_64:
638         /* 2 arguments */
639         gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, cc_src, cc_dst, dummy);
640         break;
641     case CC_OP_ADD_64:
642     case CC_OP_SUB_64:
643     case CC_OP_ADD_32:
644     case CC_OP_SUB_32:
645         /* 3 arguments */
646         gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, cc_src, cc_dst, cc_vr);
647         break;
648     case CC_OP_DYNAMIC:
649         /* unknown operation - assume 3 arguments and cc_op in env */
650         gen_helper_calc_cc(cc_op, tcg_env, cc_op, cc_src, cc_dst, cc_vr);
651         break;
652     default:
653         g_assert_not_reached();
654     }
655 
656     /* We now have cc in cc_op as constant */
657     set_cc_static(s);
658 }
659 
660 static bool use_goto_tb(DisasContext *s, uint64_t dest)
661 {
662     if (unlikely(s->base.tb->flags & FLAG_MASK_PER)) {
663         return false;
664     }
665     return translator_use_goto_tb(&s->base, dest);
666 }
667 
668 static void account_noninline_branch(DisasContext *s, int cc_op)
669 {
670 #ifdef DEBUG_INLINE_BRANCHES
671     inline_branch_miss[cc_op]++;
672 #endif
673 }
674 
675 static void account_inline_branch(DisasContext *s, int cc_op)
676 {
677 #ifdef DEBUG_INLINE_BRANCHES
678     inline_branch_hit[cc_op]++;
679 #endif
680 }
681 
682 /* Table of mask values to comparison codes, given a comparison as input.
683    For such, CC=3 should not be possible.  */
684 static const TCGCond ltgt_cond[16] = {
685     TCG_COND_NEVER,  TCG_COND_NEVER,     /*    |    |    | x */
686     TCG_COND_GT,     TCG_COND_GT,        /*    |    | GT | x */
687     TCG_COND_LT,     TCG_COND_LT,        /*    | LT |    | x */
688     TCG_COND_NE,     TCG_COND_NE,        /*    | LT | GT | x */
689     TCG_COND_EQ,     TCG_COND_EQ,        /* EQ |    |    | x */
690     TCG_COND_GE,     TCG_COND_GE,        /* EQ |    | GT | x */
691     TCG_COND_LE,     TCG_COND_LE,        /* EQ | LT |    | x */
692     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | LT | GT | x */
693 };
694 
695 /* Table of mask values to comparison codes, given a logic op as input.
696    For such, only CC=0 and CC=1 should be possible.  */
697 static const TCGCond nz_cond[16] = {
698     TCG_COND_NEVER, TCG_COND_NEVER,      /*    |    | x | x */
699     TCG_COND_NEVER, TCG_COND_NEVER,
700     TCG_COND_NE, TCG_COND_NE,            /*    | NE | x | x */
701     TCG_COND_NE, TCG_COND_NE,
702     TCG_COND_EQ, TCG_COND_EQ,            /* EQ |    | x | x */
703     TCG_COND_EQ, TCG_COND_EQ,
704     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | NE | x | x */
705     TCG_COND_ALWAYS, TCG_COND_ALWAYS,
706 };
707 
708 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
709    details required to generate a TCG comparison.  */
710 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
711 {
712     TCGCond cond;
713     enum cc_op old_cc_op = s->cc_op;
714 
715     if (mask == 15 || mask == 0) {
716         c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
717         c->u.s32.a = cc_op;
718         c->u.s32.b = cc_op;
719         c->is_64 = false;
720         return;
721     }
722 
723     /* Find the TCG condition for the mask + cc op.  */
724     switch (old_cc_op) {
725     case CC_OP_LTGT0_32:
726     case CC_OP_LTGT0_64:
727     case CC_OP_LTGT_32:
728     case CC_OP_LTGT_64:
729         cond = ltgt_cond[mask];
730         if (cond == TCG_COND_NEVER) {
731             goto do_dynamic;
732         }
733         account_inline_branch(s, old_cc_op);
734         break;
735 
736     case CC_OP_LTUGTU_32:
737     case CC_OP_LTUGTU_64:
738         cond = tcg_unsigned_cond(ltgt_cond[mask]);
739         if (cond == TCG_COND_NEVER) {
740             goto do_dynamic;
741         }
742         account_inline_branch(s, old_cc_op);
743         break;
744 
745     case CC_OP_NZ:
746         cond = nz_cond[mask];
747         if (cond == TCG_COND_NEVER) {
748             goto do_dynamic;
749         }
750         account_inline_branch(s, old_cc_op);
751         break;
752 
753     case CC_OP_TM_32:
754     case CC_OP_TM_64:
755         switch (mask) {
756         case 8:
757             cond = TCG_COND_EQ;
758             break;
759         case 4 | 2 | 1:
760             cond = TCG_COND_NE;
761             break;
762         default:
763             goto do_dynamic;
764         }
765         account_inline_branch(s, old_cc_op);
766         break;
767 
768     case CC_OP_ICM:
769         switch (mask) {
770         case 8:
771             cond = TCG_COND_EQ;
772             break;
773         case 4 | 2 | 1:
774         case 4 | 2:
775             cond = TCG_COND_NE;
776             break;
777         default:
778             goto do_dynamic;
779         }
780         account_inline_branch(s, old_cc_op);
781         break;
782 
783     case CC_OP_FLOGR:
784         switch (mask & 0xa) {
785         case 8: /* src == 0 -> no one bit found */
786             cond = TCG_COND_EQ;
787             break;
788         case 2: /* src != 0 -> one bit found */
789             cond = TCG_COND_NE;
790             break;
791         default:
792             goto do_dynamic;
793         }
794         account_inline_branch(s, old_cc_op);
795         break;
796 
797     case CC_OP_ADDU:
798     case CC_OP_SUBU:
799         switch (mask) {
800         case 8 | 2: /* result == 0 */
801             cond = TCG_COND_EQ;
802             break;
803         case 4 | 1: /* result != 0 */
804             cond = TCG_COND_NE;
805             break;
806         case 8 | 4: /* !carry (borrow) */
807             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_EQ : TCG_COND_NE;
808             break;
809         case 2 | 1: /* carry (!borrow) */
810             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_NE : TCG_COND_EQ;
811             break;
812         default:
813             goto do_dynamic;
814         }
815         account_inline_branch(s, old_cc_op);
816         break;
817 
818     default:
819     do_dynamic:
820         /* Calculate cc value.  */
821         gen_op_calc_cc(s);
822         /* FALLTHRU */
823 
824     case CC_OP_STATIC:
825         /* Jump based on CC.  We'll load up the real cond below;
826            the assignment here merely avoids a compiler warning.  */
827         account_noninline_branch(s, old_cc_op);
828         old_cc_op = CC_OP_STATIC;
829         cond = TCG_COND_NEVER;
830         break;
831     }
832 
833     /* Load up the arguments of the comparison.  */
834     c->is_64 = true;
835     switch (old_cc_op) {
836     case CC_OP_LTGT0_32:
837         c->is_64 = false;
838         c->u.s32.a = tcg_temp_new_i32();
839         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
840         c->u.s32.b = tcg_constant_i32(0);
841         break;
842     case CC_OP_LTGT_32:
843     case CC_OP_LTUGTU_32:
844         c->is_64 = false;
845         c->u.s32.a = tcg_temp_new_i32();
846         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
847         c->u.s32.b = tcg_temp_new_i32();
848         tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
849         break;
850 
851     case CC_OP_LTGT0_64:
852     case CC_OP_NZ:
853     case CC_OP_FLOGR:
854         c->u.s64.a = cc_dst;
855         c->u.s64.b = tcg_constant_i64(0);
856         break;
857     case CC_OP_LTGT_64:
858     case CC_OP_LTUGTU_64:
859         c->u.s64.a = cc_src;
860         c->u.s64.b = cc_dst;
861         break;
862 
863     case CC_OP_TM_32:
864     case CC_OP_TM_64:
865     case CC_OP_ICM:
866         c->u.s64.a = tcg_temp_new_i64();
867         c->u.s64.b = tcg_constant_i64(0);
868         tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
869         break;
870 
871     case CC_OP_ADDU:
872     case CC_OP_SUBU:
873         c->is_64 = true;
874         c->u.s64.b = tcg_constant_i64(0);
875         switch (mask) {
876         case 8 | 2:
877         case 4 | 1: /* result */
878             c->u.s64.a = cc_dst;
879             break;
880         case 8 | 4:
881         case 2 | 1: /* carry */
882             c->u.s64.a = cc_src;
883             break;
884         default:
885             g_assert_not_reached();
886         }
887         break;
888 
889     case CC_OP_STATIC:
890         c->is_64 = false;
891         c->u.s32.a = cc_op;
892         switch (mask) {
893         case 0x8 | 0x4 | 0x2: /* cc != 3 */
894             cond = TCG_COND_NE;
895             c->u.s32.b = tcg_constant_i32(3);
896             break;
897         case 0x8 | 0x4 | 0x1: /* cc != 2 */
898             cond = TCG_COND_NE;
899             c->u.s32.b = tcg_constant_i32(2);
900             break;
901         case 0x8 | 0x2 | 0x1: /* cc != 1 */
902             cond = TCG_COND_NE;
903             c->u.s32.b = tcg_constant_i32(1);
904             break;
905         case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
906             cond = TCG_COND_EQ;
907             c->u.s32.a = tcg_temp_new_i32();
908             c->u.s32.b = tcg_constant_i32(0);
909             tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
910             break;
911         case 0x8 | 0x4: /* cc < 2 */
912             cond = TCG_COND_LTU;
913             c->u.s32.b = tcg_constant_i32(2);
914             break;
915         case 0x8: /* cc == 0 */
916             cond = TCG_COND_EQ;
917             c->u.s32.b = tcg_constant_i32(0);
918             break;
919         case 0x4 | 0x2 | 0x1: /* cc != 0 */
920             cond = TCG_COND_NE;
921             c->u.s32.b = tcg_constant_i32(0);
922             break;
923         case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
924             cond = TCG_COND_NE;
925             c->u.s32.a = tcg_temp_new_i32();
926             c->u.s32.b = tcg_constant_i32(0);
927             tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
928             break;
929         case 0x4: /* cc == 1 */
930             cond = TCG_COND_EQ;
931             c->u.s32.b = tcg_constant_i32(1);
932             break;
933         case 0x2 | 0x1: /* cc > 1 */
934             cond = TCG_COND_GTU;
935             c->u.s32.b = tcg_constant_i32(1);
936             break;
937         case 0x2: /* cc == 2 */
938             cond = TCG_COND_EQ;
939             c->u.s32.b = tcg_constant_i32(2);
940             break;
941         case 0x1: /* cc == 3 */
942             cond = TCG_COND_EQ;
943             c->u.s32.b = tcg_constant_i32(3);
944             break;
945         default:
946             /* CC is masked by something else: (8 >> cc) & mask.  */
947             cond = TCG_COND_NE;
948             c->u.s32.a = tcg_temp_new_i32();
949             c->u.s32.b = tcg_constant_i32(0);
950             tcg_gen_shr_i32(c->u.s32.a, tcg_constant_i32(8), cc_op);
951             tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
952             break;
953         }
954         break;
955 
956     default:
957         abort();
958     }
959     c->cond = cond;
960 }
961 
962 /* ====================================================================== */
963 /* Define the insn format enumeration.  */
964 #define F0(N)                         FMT_##N,
965 #define F1(N, X1)                     F0(N)
966 #define F2(N, X1, X2)                 F0(N)
967 #define F3(N, X1, X2, X3)             F0(N)
968 #define F4(N, X1, X2, X3, X4)         F0(N)
969 #define F5(N, X1, X2, X3, X4, X5)     F0(N)
970 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
971 
972 typedef enum {
973 #include "insn-format.h.inc"
974 } DisasFormat;
975 
976 #undef F0
977 #undef F1
978 #undef F2
979 #undef F3
980 #undef F4
981 #undef F5
982 #undef F6
983 
984 /* This is the way fields are to be accessed out of DisasFields.  */
985 #define have_field(S, F)  have_field1((S), FLD_O_##F)
986 #define get_field(S, F)   get_field1((S), FLD_O_##F, FLD_C_##F)
987 
988 static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
989 {
990     return (s->fields.presentO >> c) & 1;
991 }
992 
993 static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
994                       enum DisasFieldIndexC c)
995 {
996     assert(have_field1(s, o));
997     return s->fields.c[c];
998 }
999 
1000 /* Describe the layout of each field in each format.  */
1001 typedef struct DisasField {
1002     unsigned int beg:8;
1003     unsigned int size:8;
1004     unsigned int type:2;
1005     unsigned int indexC:6;
1006     enum DisasFieldIndexO indexO:8;
1007 } DisasField;
1008 
1009 typedef struct DisasFormatInfo {
1010     DisasField op[NUM_C_FIELD];
1011 } DisasFormatInfo;
1012 
1013 #define R(N, B)       {  B,  4, 0, FLD_C_r##N, FLD_O_r##N }
1014 #define M(N, B)       {  B,  4, 0, FLD_C_m##N, FLD_O_m##N }
1015 #define V(N, B)       {  B,  4, 3, FLD_C_v##N, FLD_O_v##N }
1016 #define BD(N, BB, BD) { BB,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1017                       { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1018 #define BXD(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1019                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1020                       { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1021 #define BDL(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1022                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1023 #define BXDL(N)       { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1024                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1025                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1026 #define I(N, B, S)    {  B,  S, 1, FLD_C_i##N, FLD_O_i##N }
1027 #define L(N, B, S)    {  B,  S, 0, FLD_C_l##N, FLD_O_l##N }
1028 
1029 #define F0(N)                     { { } },
1030 #define F1(N, X1)                 { { X1 } },
1031 #define F2(N, X1, X2)             { { X1, X2 } },
1032 #define F3(N, X1, X2, X3)         { { X1, X2, X3 } },
1033 #define F4(N, X1, X2, X3, X4)     { { X1, X2, X3, X4 } },
1034 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1035 #define F6(N, X1, X2, X3, X4, X5, X6)       { { X1, X2, X3, X4, X5, X6 } },
1036 
1037 static const DisasFormatInfo format_info[] = {
1038 #include "insn-format.h.inc"
1039 };
1040 
1041 #undef F0
1042 #undef F1
1043 #undef F2
1044 #undef F3
1045 #undef F4
1046 #undef F5
1047 #undef F6
1048 #undef R
1049 #undef M
1050 #undef V
1051 #undef BD
1052 #undef BXD
1053 #undef BDL
1054 #undef BXDL
1055 #undef I
1056 #undef L
1057 
1058 /* Generally, we'll extract operands into this structures, operate upon
1059    them, and store them back.  See the "in1", "in2", "prep", "wout" sets
1060    of routines below for more details.  */
1061 typedef struct {
1062     TCGv_i64 out, out2, in1, in2;
1063     TCGv_i64 addr1;
1064     TCGv_i128 out_128, in1_128, in2_128;
1065 } DisasOps;
1066 
1067 /* Instructions can place constraints on their operands, raising specification
1068    exceptions if they are violated.  To make this easy to automate, each "in1",
1069    "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1070    of the following, or 0.  To make this easy to document, we'll put the
1071    SPEC_<name> defines next to <name>.  */
1072 
1073 #define SPEC_r1_even    1
1074 #define SPEC_r2_even    2
1075 #define SPEC_r3_even    4
1076 #define SPEC_r1_f128    8
1077 #define SPEC_r2_f128    16
1078 
1079 /* Return values from translate_one, indicating the state of the TB.  */
1080 
1081 /* We are not using a goto_tb (for whatever reason), but have updated
1082    the PC (for whatever reason), so there's no need to do it again on
1083    exiting the TB.  */
1084 #define DISAS_PC_UPDATED        DISAS_TARGET_0
1085 
1086 /* We have updated the PC and CC values.  */
1087 #define DISAS_PC_CC_UPDATED     DISAS_TARGET_2
1088 
1089 
1090 /* Instruction flags */
1091 #define IF_AFP1     0x0001      /* r1 is a fp reg for HFP/FPS instructions */
1092 #define IF_AFP2     0x0002      /* r2 is a fp reg for HFP/FPS instructions */
1093 #define IF_AFP3     0x0004      /* r3 is a fp reg for HFP/FPS instructions */
1094 #define IF_BFP      0x0008      /* binary floating point instruction */
1095 #define IF_DFP      0x0010      /* decimal floating point instruction */
1096 #define IF_PRIV     0x0020      /* privileged instruction */
1097 #define IF_VEC      0x0040      /* vector instruction */
1098 #define IF_IO       0x0080      /* input/output instruction */
1099 
1100 struct DisasInsn {
1101     unsigned opc:16;
1102     unsigned flags:16;
1103     DisasFormat fmt:8;
1104     unsigned fac:8;
1105     unsigned spec:8;
1106 
1107     const char *name;
1108 
1109     /* Pre-process arguments before HELP_OP.  */
1110     void (*help_in1)(DisasContext *, DisasOps *);
1111     void (*help_in2)(DisasContext *, DisasOps *);
1112     void (*help_prep)(DisasContext *, DisasOps *);
1113 
1114     /*
1115      * Post-process output after HELP_OP.
1116      * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1117      */
1118     void (*help_wout)(DisasContext *, DisasOps *);
1119     void (*help_cout)(DisasContext *, DisasOps *);
1120 
1121     /* Implement the operation itself.  */
1122     DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1123 
1124     uint64_t data;
1125 };
1126 
1127 /* ====================================================================== */
1128 /* Miscellaneous helpers, used by several operations.  */
1129 
1130 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1131 {
1132     if (dest == s->pc_tmp) {
1133         per_branch(s, true);
1134         return DISAS_NEXT;
1135     }
1136     if (use_goto_tb(s, dest)) {
1137         update_cc_op(s);
1138         per_breaking_event(s);
1139         tcg_gen_goto_tb(0);
1140         tcg_gen_movi_i64(psw_addr, dest);
1141         tcg_gen_exit_tb(s->base.tb, 0);
1142         return DISAS_NORETURN;
1143     } else {
1144         tcg_gen_movi_i64(psw_addr, dest);
1145         per_branch(s, false);
1146         return DISAS_PC_UPDATED;
1147     }
1148 }
1149 
1150 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1151                                  bool is_imm, int imm, TCGv_i64 cdest)
1152 {
1153     DisasJumpType ret;
1154     uint64_t dest = s->base.pc_next + (int64_t)imm * 2;
1155     TCGLabel *lab;
1156 
1157     /* Take care of the special cases first.  */
1158     if (c->cond == TCG_COND_NEVER) {
1159         ret = DISAS_NEXT;
1160         goto egress;
1161     }
1162     if (is_imm) {
1163         if (dest == s->pc_tmp) {
1164             /* Branch to next.  */
1165             per_branch(s, true);
1166             ret = DISAS_NEXT;
1167             goto egress;
1168         }
1169         if (c->cond == TCG_COND_ALWAYS) {
1170             ret = help_goto_direct(s, dest);
1171             goto egress;
1172         }
1173     } else {
1174         if (!cdest) {
1175             /* E.g. bcr %r0 -> no branch.  */
1176             ret = DISAS_NEXT;
1177             goto egress;
1178         }
1179         if (c->cond == TCG_COND_ALWAYS) {
1180             tcg_gen_mov_i64(psw_addr, cdest);
1181             per_branch(s, false);
1182             ret = DISAS_PC_UPDATED;
1183             goto egress;
1184         }
1185     }
1186 
1187     if (use_goto_tb(s, s->pc_tmp)) {
1188         if (is_imm && use_goto_tb(s, dest)) {
1189             /* Both exits can use goto_tb.  */
1190             update_cc_op(s);
1191 
1192             lab = gen_new_label();
1193             if (c->is_64) {
1194                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1195             } else {
1196                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1197             }
1198 
1199             /* Branch not taken.  */
1200             tcg_gen_goto_tb(0);
1201             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1202             tcg_gen_exit_tb(s->base.tb, 0);
1203 
1204             /* Branch taken.  */
1205             gen_set_label(lab);
1206             per_breaking_event(s);
1207             tcg_gen_goto_tb(1);
1208             tcg_gen_movi_i64(psw_addr, dest);
1209             tcg_gen_exit_tb(s->base.tb, 1);
1210 
1211             ret = DISAS_NORETURN;
1212         } else {
1213             /* Fallthru can use goto_tb, but taken branch cannot.  */
1214             /* Store taken branch destination before the brcond.  This
1215                avoids having to allocate a new local temp to hold it.
1216                We'll overwrite this in the not taken case anyway.  */
1217             if (!is_imm) {
1218                 tcg_gen_mov_i64(psw_addr, cdest);
1219             }
1220 
1221             lab = gen_new_label();
1222             if (c->is_64) {
1223                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1224             } else {
1225                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1226             }
1227 
1228             /* Branch not taken.  */
1229             update_cc_op(s);
1230             tcg_gen_goto_tb(0);
1231             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1232             tcg_gen_exit_tb(s->base.tb, 0);
1233 
1234             gen_set_label(lab);
1235             if (is_imm) {
1236                 tcg_gen_movi_i64(psw_addr, dest);
1237             }
1238             per_breaking_event(s);
1239             ret = DISAS_PC_UPDATED;
1240         }
1241     } else {
1242         /* Fallthru cannot use goto_tb.  This by itself is vanishingly rare.
1243            Most commonly we're single-stepping or some other condition that
1244            disables all use of goto_tb.  Just update the PC and exit.  */
1245 
1246         TCGv_i64 next = tcg_constant_i64(s->pc_tmp);
1247         if (is_imm) {
1248             cdest = tcg_constant_i64(dest);
1249         }
1250 
1251         if (c->is_64) {
1252             tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1253                                 cdest, next);
1254             per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1255         } else {
1256             TCGv_i32 t0 = tcg_temp_new_i32();
1257             TCGv_i64 t1 = tcg_temp_new_i64();
1258             TCGv_i64 z = tcg_constant_i64(0);
1259             tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1260             tcg_gen_extu_i32_i64(t1, t0);
1261             tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1262             per_branch_cond(s, TCG_COND_NE, t1, z);
1263         }
1264 
1265         ret = DISAS_PC_UPDATED;
1266     }
1267 
1268  egress:
1269     return ret;
1270 }
1271 
1272 /* ====================================================================== */
1273 /* The operations.  These perform the bulk of the work for any insn,
1274    usually after the operands have been loaded and output initialized.  */
1275 
1276 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1277 {
1278     tcg_gen_abs_i64(o->out, o->in2);
1279     return DISAS_NEXT;
1280 }
1281 
1282 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1283 {
1284     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1285     return DISAS_NEXT;
1286 }
1287 
1288 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1289 {
1290     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1291     return DISAS_NEXT;
1292 }
1293 
1294 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1295 {
1296     tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1297     tcg_gen_mov_i64(o->out2, o->in2);
1298     return DISAS_NEXT;
1299 }
1300 
1301 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1302 {
1303     tcg_gen_add_i64(o->out, o->in1, o->in2);
1304     return DISAS_NEXT;
1305 }
1306 
1307 static DisasJumpType op_addu64(DisasContext *s, DisasOps *o)
1308 {
1309     tcg_gen_movi_i64(cc_src, 0);
1310     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1311     return DISAS_NEXT;
1312 }
1313 
1314 /* Compute carry into cc_src. */
1315 static void compute_carry(DisasContext *s)
1316 {
1317     switch (s->cc_op) {
1318     case CC_OP_ADDU:
1319         /* The carry value is already in cc_src (1,0). */
1320         break;
1321     case CC_OP_SUBU:
1322         tcg_gen_addi_i64(cc_src, cc_src, 1);
1323         break;
1324     default:
1325         gen_op_calc_cc(s);
1326         /* fall through */
1327     case CC_OP_STATIC:
1328         /* The carry flag is the msb of CC; compute into cc_src. */
1329         tcg_gen_extu_i32_i64(cc_src, cc_op);
1330         tcg_gen_shri_i64(cc_src, cc_src, 1);
1331         break;
1332     }
1333 }
1334 
1335 static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
1336 {
1337     compute_carry(s);
1338     tcg_gen_add_i64(o->out, o->in1, o->in2);
1339     tcg_gen_add_i64(o->out, o->out, cc_src);
1340     return DISAS_NEXT;
1341 }
1342 
1343 static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
1344 {
1345     compute_carry(s);
1346 
1347     TCGv_i64 zero = tcg_constant_i64(0);
1348     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
1349     tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
1350 
1351     return DISAS_NEXT;
1352 }
1353 
1354 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1355 {
1356     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1357 
1358     o->in1 = tcg_temp_new_i64();
1359     if (non_atomic) {
1360         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1361     } else {
1362         /* Perform the atomic addition in memory. */
1363         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1364                                      s->insn->data);
1365     }
1366 
1367     /* Recompute also for atomic case: needed for setting CC. */
1368     tcg_gen_add_i64(o->out, o->in1, o->in2);
1369 
1370     if (non_atomic) {
1371         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1372     }
1373     return DISAS_NEXT;
1374 }
1375 
1376 static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o)
1377 {
1378     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1379 
1380     o->in1 = tcg_temp_new_i64();
1381     if (non_atomic) {
1382         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1383     } else {
1384         /* Perform the atomic addition in memory. */
1385         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1386                                      s->insn->data);
1387     }
1388 
1389     /* Recompute also for atomic case: needed for setting CC. */
1390     tcg_gen_movi_i64(cc_src, 0);
1391     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1392 
1393     if (non_atomic) {
1394         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1395     }
1396     return DISAS_NEXT;
1397 }
1398 
1399 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1400 {
1401     gen_helper_aeb(o->out, tcg_env, o->in1, o->in2);
1402     return DISAS_NEXT;
1403 }
1404 
1405 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1406 {
1407     gen_helper_adb(o->out, tcg_env, o->in1, o->in2);
1408     return DISAS_NEXT;
1409 }
1410 
1411 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1412 {
1413     gen_helper_axb(o->out_128, tcg_env, o->in1_128, o->in2_128);
1414     return DISAS_NEXT;
1415 }
1416 
1417 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1418 {
1419     tcg_gen_and_i64(o->out, o->in1, o->in2);
1420     return DISAS_NEXT;
1421 }
1422 
1423 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1424 {
1425     int shift = s->insn->data & 0xff;
1426     int size = s->insn->data >> 8;
1427     uint64_t mask = ((1ull << size) - 1) << shift;
1428     TCGv_i64 t = tcg_temp_new_i64();
1429 
1430     tcg_gen_shli_i64(t, o->in2, shift);
1431     tcg_gen_ori_i64(t, t, ~mask);
1432     tcg_gen_and_i64(o->out, o->in1, t);
1433 
1434     /* Produce the CC from only the bits manipulated.  */
1435     tcg_gen_andi_i64(cc_dst, o->out, mask);
1436     set_cc_nz_u64(s, cc_dst);
1437     return DISAS_NEXT;
1438 }
1439 
1440 static DisasJumpType op_andc(DisasContext *s, DisasOps *o)
1441 {
1442     tcg_gen_andc_i64(o->out, o->in1, o->in2);
1443     return DISAS_NEXT;
1444 }
1445 
1446 static DisasJumpType op_orc(DisasContext *s, DisasOps *o)
1447 {
1448     tcg_gen_orc_i64(o->out, o->in1, o->in2);
1449     return DISAS_NEXT;
1450 }
1451 
1452 static DisasJumpType op_nand(DisasContext *s, DisasOps *o)
1453 {
1454     tcg_gen_nand_i64(o->out, o->in1, o->in2);
1455     return DISAS_NEXT;
1456 }
1457 
1458 static DisasJumpType op_nor(DisasContext *s, DisasOps *o)
1459 {
1460     tcg_gen_nor_i64(o->out, o->in1, o->in2);
1461     return DISAS_NEXT;
1462 }
1463 
1464 static DisasJumpType op_nxor(DisasContext *s, DisasOps *o)
1465 {
1466     tcg_gen_eqv_i64(o->out, o->in1, o->in2);
1467     return DISAS_NEXT;
1468 }
1469 
1470 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1471 {
1472     o->in1 = tcg_temp_new_i64();
1473 
1474     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1475         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1476     } else {
1477         /* Perform the atomic operation in memory. */
1478         tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1479                                      s->insn->data);
1480     }
1481 
1482     /* Recompute also for atomic case: needed for setting CC. */
1483     tcg_gen_and_i64(o->out, o->in1, o->in2);
1484 
1485     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1486         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1487     }
1488     return DISAS_NEXT;
1489 }
1490 
1491 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1492 {
1493     pc_to_link_info(o->out, s, s->pc_tmp);
1494     if (o->in2) {
1495         tcg_gen_mov_i64(psw_addr, o->in2);
1496         per_branch(s, false);
1497         return DISAS_PC_UPDATED;
1498     } else {
1499         return DISAS_NEXT;
1500     }
1501 }
1502 
1503 static void save_link_info(DisasContext *s, DisasOps *o)
1504 {
1505     TCGv_i64 t;
1506 
1507     if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1508         pc_to_link_info(o->out, s, s->pc_tmp);
1509         return;
1510     }
1511     gen_op_calc_cc(s);
1512     tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1513     tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1514     t = tcg_temp_new_i64();
1515     tcg_gen_shri_i64(t, psw_mask, 16);
1516     tcg_gen_andi_i64(t, t, 0x0f000000);
1517     tcg_gen_or_i64(o->out, o->out, t);
1518     tcg_gen_extu_i32_i64(t, cc_op);
1519     tcg_gen_shli_i64(t, t, 28);
1520     tcg_gen_or_i64(o->out, o->out, t);
1521 }
1522 
1523 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1524 {
1525     save_link_info(s, o);
1526     if (o->in2) {
1527         tcg_gen_mov_i64(psw_addr, o->in2);
1528         per_branch(s, false);
1529         return DISAS_PC_UPDATED;
1530     } else {
1531         return DISAS_NEXT;
1532     }
1533 }
1534 
1535 /*
1536  * Disassemble the target of a branch. The results are returned in a form
1537  * suitable for passing into help_branch():
1538  *
1539  * - bool IS_IMM reflects whether the target is fixed or computed. Non-EXECUTEd
1540  *   branches, whose DisasContext *S contains the relative immediate field RI,
1541  *   are considered fixed. All the other branches are considered computed.
1542  * - int IMM is the value of RI.
1543  * - TCGv_i64 CDEST is the address of the computed target.
1544  */
1545 #define disas_jdest(s, ri, is_imm, imm, cdest) do {                            \
1546     if (have_field(s, ri)) {                                                   \
1547         if (unlikely(s->ex_value)) {                                           \
1548             cdest = tcg_temp_new_i64();                                        \
1549             tcg_gen_ld_i64(cdest, tcg_env, offsetof(CPUS390XState, ex_target));\
1550             tcg_gen_addi_i64(cdest, cdest, (int64_t)get_field(s, ri) * 2);     \
1551             is_imm = false;                                                    \
1552         } else {                                                               \
1553             is_imm = true;                                                     \
1554         }                                                                      \
1555     } else {                                                                   \
1556         is_imm = false;                                                        \
1557     }                                                                          \
1558     imm = is_imm ? get_field(s, ri) : 0;                                       \
1559 } while (false)
1560 
1561 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1562 {
1563     DisasCompare c;
1564     bool is_imm;
1565     int imm;
1566 
1567     pc_to_link_info(o->out, s, s->pc_tmp);
1568 
1569     disas_jdest(s, i2, is_imm, imm, o->in2);
1570     disas_jcc(s, &c, 0xf);
1571     return help_branch(s, &c, is_imm, imm, o->in2);
1572 }
1573 
1574 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1575 {
1576     int m1 = get_field(s, m1);
1577     DisasCompare c;
1578     bool is_imm;
1579     int imm;
1580 
1581     /* BCR with R2 = 0 causes no branching */
1582     if (have_field(s, r2) && get_field(s, r2) == 0) {
1583         if (m1 == 14) {
1584             /* Perform serialization */
1585             /* FIXME: check for fast-BCR-serialization facility */
1586             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1587         }
1588         if (m1 == 15) {
1589             /* Perform serialization */
1590             /* FIXME: perform checkpoint-synchronisation */
1591             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1592         }
1593         return DISAS_NEXT;
1594     }
1595 
1596     disas_jdest(s, i2, is_imm, imm, o->in2);
1597     disas_jcc(s, &c, m1);
1598     return help_branch(s, &c, is_imm, imm, o->in2);
1599 }
1600 
1601 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1602 {
1603     int r1 = get_field(s, r1);
1604     DisasCompare c;
1605     bool is_imm;
1606     TCGv_i64 t;
1607     int imm;
1608 
1609     c.cond = TCG_COND_NE;
1610     c.is_64 = false;
1611 
1612     t = tcg_temp_new_i64();
1613     tcg_gen_subi_i64(t, regs[r1], 1);
1614     store_reg32_i64(r1, t);
1615     c.u.s32.a = tcg_temp_new_i32();
1616     c.u.s32.b = tcg_constant_i32(0);
1617     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1618 
1619     disas_jdest(s, i2, is_imm, imm, o->in2);
1620     return help_branch(s, &c, is_imm, imm, o->in2);
1621 }
1622 
1623 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1624 {
1625     int r1 = get_field(s, r1);
1626     int imm = get_field(s, i2);
1627     DisasCompare c;
1628     TCGv_i64 t;
1629 
1630     c.cond = TCG_COND_NE;
1631     c.is_64 = false;
1632 
1633     t = tcg_temp_new_i64();
1634     tcg_gen_shri_i64(t, regs[r1], 32);
1635     tcg_gen_subi_i64(t, t, 1);
1636     store_reg32h_i64(r1, t);
1637     c.u.s32.a = tcg_temp_new_i32();
1638     c.u.s32.b = tcg_constant_i32(0);
1639     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1640 
1641     return help_branch(s, &c, 1, imm, o->in2);
1642 }
1643 
1644 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1645 {
1646     int r1 = get_field(s, r1);
1647     DisasCompare c;
1648     bool is_imm;
1649     int imm;
1650 
1651     c.cond = TCG_COND_NE;
1652     c.is_64 = true;
1653 
1654     tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1655     c.u.s64.a = regs[r1];
1656     c.u.s64.b = tcg_constant_i64(0);
1657 
1658     disas_jdest(s, i2, is_imm, imm, o->in2);
1659     return help_branch(s, &c, is_imm, imm, o->in2);
1660 }
1661 
1662 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1663 {
1664     int r1 = get_field(s, r1);
1665     int r3 = get_field(s, r3);
1666     DisasCompare c;
1667     bool is_imm;
1668     TCGv_i64 t;
1669     int imm;
1670 
1671     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1672     c.is_64 = false;
1673 
1674     t = tcg_temp_new_i64();
1675     tcg_gen_add_i64(t, regs[r1], regs[r3]);
1676     c.u.s32.a = tcg_temp_new_i32();
1677     c.u.s32.b = tcg_temp_new_i32();
1678     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1679     tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1680     store_reg32_i64(r1, t);
1681 
1682     disas_jdest(s, i2, is_imm, imm, o->in2);
1683     return help_branch(s, &c, is_imm, imm, o->in2);
1684 }
1685 
1686 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1687 {
1688     int r1 = get_field(s, r1);
1689     int r3 = get_field(s, r3);
1690     DisasCompare c;
1691     bool is_imm;
1692     int imm;
1693 
1694     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1695     c.is_64 = true;
1696 
1697     if (r1 == (r3 | 1)) {
1698         c.u.s64.b = load_reg(r3 | 1);
1699     } else {
1700         c.u.s64.b = regs[r3 | 1];
1701     }
1702 
1703     tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1704     c.u.s64.a = regs[r1];
1705 
1706     disas_jdest(s, i2, is_imm, imm, o->in2);
1707     return help_branch(s, &c, is_imm, imm, o->in2);
1708 }
1709 
1710 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1711 {
1712     int imm, m3 = get_field(s, m3);
1713     bool is_imm;
1714     DisasCompare c;
1715 
1716     c.cond = ltgt_cond[m3];
1717     if (s->insn->data) {
1718         c.cond = tcg_unsigned_cond(c.cond);
1719     }
1720     c.is_64 = true;
1721     c.u.s64.a = o->in1;
1722     c.u.s64.b = o->in2;
1723 
1724     o->out = NULL;
1725     disas_jdest(s, i4, is_imm, imm, o->out);
1726     if (!is_imm && !o->out) {
1727         imm = 0;
1728         o->out = get_address(s, 0, get_field(s, b4),
1729                              get_field(s, d4));
1730     }
1731 
1732     return help_branch(s, &c, is_imm, imm, o->out);
1733 }
1734 
1735 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1736 {
1737     gen_helper_ceb(cc_op, tcg_env, o->in1, o->in2);
1738     set_cc_static(s);
1739     return DISAS_NEXT;
1740 }
1741 
1742 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1743 {
1744     gen_helper_cdb(cc_op, tcg_env, o->in1, o->in2);
1745     set_cc_static(s);
1746     return DISAS_NEXT;
1747 }
1748 
1749 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1750 {
1751     gen_helper_cxb(cc_op, tcg_env, o->in1_128, o->in2_128);
1752     set_cc_static(s);
1753     return DISAS_NEXT;
1754 }
1755 
1756 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1757                                    bool m4_with_fpe)
1758 {
1759     const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1760     uint8_t m3 = get_field(s, m3);
1761     uint8_t m4 = get_field(s, m4);
1762 
1763     /* m3 field was introduced with FPE */
1764     if (!fpe && m3_with_fpe) {
1765         m3 = 0;
1766     }
1767     /* m4 field was introduced with FPE */
1768     if (!fpe && m4_with_fpe) {
1769         m4 = 0;
1770     }
1771 
1772     /* Check for valid rounding modes. Mode 3 was introduced later. */
1773     if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1774         gen_program_exception(s, PGM_SPECIFICATION);
1775         return NULL;
1776     }
1777 
1778     return tcg_constant_i32(deposit32(m3, 4, 4, m4));
1779 }
1780 
1781 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1782 {
1783     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1784 
1785     if (!m34) {
1786         return DISAS_NORETURN;
1787     }
1788     gen_helper_cfeb(o->out, tcg_env, o->in2, m34);
1789     set_cc_static(s);
1790     return DISAS_NEXT;
1791 }
1792 
1793 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1794 {
1795     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1796 
1797     if (!m34) {
1798         return DISAS_NORETURN;
1799     }
1800     gen_helper_cfdb(o->out, tcg_env, o->in2, m34);
1801     set_cc_static(s);
1802     return DISAS_NEXT;
1803 }
1804 
1805 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1806 {
1807     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1808 
1809     if (!m34) {
1810         return DISAS_NORETURN;
1811     }
1812     gen_helper_cfxb(o->out, tcg_env, o->in2_128, m34);
1813     set_cc_static(s);
1814     return DISAS_NEXT;
1815 }
1816 
1817 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1818 {
1819     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1820 
1821     if (!m34) {
1822         return DISAS_NORETURN;
1823     }
1824     gen_helper_cgeb(o->out, tcg_env, o->in2, m34);
1825     set_cc_static(s);
1826     return DISAS_NEXT;
1827 }
1828 
1829 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1830 {
1831     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1832 
1833     if (!m34) {
1834         return DISAS_NORETURN;
1835     }
1836     gen_helper_cgdb(o->out, tcg_env, o->in2, m34);
1837     set_cc_static(s);
1838     return DISAS_NEXT;
1839 }
1840 
1841 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1842 {
1843     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1844 
1845     if (!m34) {
1846         return DISAS_NORETURN;
1847     }
1848     gen_helper_cgxb(o->out, tcg_env, o->in2_128, m34);
1849     set_cc_static(s);
1850     return DISAS_NEXT;
1851 }
1852 
1853 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1854 {
1855     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1856 
1857     if (!m34) {
1858         return DISAS_NORETURN;
1859     }
1860     gen_helper_clfeb(o->out, tcg_env, o->in2, m34);
1861     set_cc_static(s);
1862     return DISAS_NEXT;
1863 }
1864 
1865 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1866 {
1867     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1868 
1869     if (!m34) {
1870         return DISAS_NORETURN;
1871     }
1872     gen_helper_clfdb(o->out, tcg_env, o->in2, m34);
1873     set_cc_static(s);
1874     return DISAS_NEXT;
1875 }
1876 
1877 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1878 {
1879     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1880 
1881     if (!m34) {
1882         return DISAS_NORETURN;
1883     }
1884     gen_helper_clfxb(o->out, tcg_env, o->in2_128, m34);
1885     set_cc_static(s);
1886     return DISAS_NEXT;
1887 }
1888 
1889 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1890 {
1891     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1892 
1893     if (!m34) {
1894         return DISAS_NORETURN;
1895     }
1896     gen_helper_clgeb(o->out, tcg_env, o->in2, m34);
1897     set_cc_static(s);
1898     return DISAS_NEXT;
1899 }
1900 
1901 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1902 {
1903     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1904 
1905     if (!m34) {
1906         return DISAS_NORETURN;
1907     }
1908     gen_helper_clgdb(o->out, tcg_env, o->in2, m34);
1909     set_cc_static(s);
1910     return DISAS_NEXT;
1911 }
1912 
1913 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1914 {
1915     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1916 
1917     if (!m34) {
1918         return DISAS_NORETURN;
1919     }
1920     gen_helper_clgxb(o->out, tcg_env, o->in2_128, m34);
1921     set_cc_static(s);
1922     return DISAS_NEXT;
1923 }
1924 
1925 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1926 {
1927     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1928 
1929     if (!m34) {
1930         return DISAS_NORETURN;
1931     }
1932     gen_helper_cegb(o->out, tcg_env, o->in2, m34);
1933     return DISAS_NEXT;
1934 }
1935 
1936 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1937 {
1938     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1939 
1940     if (!m34) {
1941         return DISAS_NORETURN;
1942     }
1943     gen_helper_cdgb(o->out, tcg_env, o->in2, m34);
1944     return DISAS_NEXT;
1945 }
1946 
1947 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
1948 {
1949     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1950 
1951     if (!m34) {
1952         return DISAS_NORETURN;
1953     }
1954     gen_helper_cxgb(o->out_128, tcg_env, o->in2, m34);
1955     return DISAS_NEXT;
1956 }
1957 
1958 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
1959 {
1960     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1961 
1962     if (!m34) {
1963         return DISAS_NORETURN;
1964     }
1965     gen_helper_celgb(o->out, tcg_env, o->in2, m34);
1966     return DISAS_NEXT;
1967 }
1968 
1969 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
1970 {
1971     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1972 
1973     if (!m34) {
1974         return DISAS_NORETURN;
1975     }
1976     gen_helper_cdlgb(o->out, tcg_env, o->in2, m34);
1977     return DISAS_NEXT;
1978 }
1979 
1980 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
1981 {
1982     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1983 
1984     if (!m34) {
1985         return DISAS_NORETURN;
1986     }
1987     gen_helper_cxlgb(o->out_128, tcg_env, o->in2, m34);
1988     return DISAS_NEXT;
1989 }
1990 
1991 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
1992 {
1993     int r2 = get_field(s, r2);
1994     TCGv_i128 pair = tcg_temp_new_i128();
1995     TCGv_i64 len = tcg_temp_new_i64();
1996 
1997     gen_helper_cksm(pair, tcg_env, o->in1, o->in2, regs[r2 + 1]);
1998     set_cc_static(s);
1999     tcg_gen_extr_i128_i64(o->out, len, pair);
2000 
2001     tcg_gen_add_i64(regs[r2], regs[r2], len);
2002     tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
2003 
2004     return DISAS_NEXT;
2005 }
2006 
2007 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
2008 {
2009     int l = get_field(s, l1);
2010     TCGv_i64 src;
2011     TCGv_i32 vl;
2012     MemOp mop;
2013 
2014     switch (l + 1) {
2015     case 1:
2016     case 2:
2017     case 4:
2018     case 8:
2019         mop = ctz32(l + 1) | MO_TE;
2020         /* Do not update cc_src yet: loading cc_dst may cause an exception. */
2021         src = tcg_temp_new_i64();
2022         tcg_gen_qemu_ld_tl(src, o->addr1, get_mem_index(s), mop);
2023         tcg_gen_qemu_ld_tl(cc_dst, o->in2, get_mem_index(s), mop);
2024         gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, src, cc_dst);
2025         return DISAS_NEXT;
2026     default:
2027         vl = tcg_constant_i32(l);
2028         gen_helper_clc(cc_op, tcg_env, vl, o->addr1, o->in2);
2029         set_cc_static(s);
2030         return DISAS_NEXT;
2031     }
2032 }
2033 
2034 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2035 {
2036     int r1 = get_field(s, r1);
2037     int r2 = get_field(s, r2);
2038     TCGv_i32 t1, t2;
2039 
2040     /* r1 and r2 must be even.  */
2041     if (r1 & 1 || r2 & 1) {
2042         gen_program_exception(s, PGM_SPECIFICATION);
2043         return DISAS_NORETURN;
2044     }
2045 
2046     t1 = tcg_constant_i32(r1);
2047     t2 = tcg_constant_i32(r2);
2048     gen_helper_clcl(cc_op, tcg_env, t1, t2);
2049     set_cc_static(s);
2050     return DISAS_NEXT;
2051 }
2052 
2053 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2054 {
2055     int r1 = get_field(s, r1);
2056     int r3 = get_field(s, r3);
2057     TCGv_i32 t1, t3;
2058 
2059     /* r1 and r3 must be even.  */
2060     if (r1 & 1 || r3 & 1) {
2061         gen_program_exception(s, PGM_SPECIFICATION);
2062         return DISAS_NORETURN;
2063     }
2064 
2065     t1 = tcg_constant_i32(r1);
2066     t3 = tcg_constant_i32(r3);
2067     gen_helper_clcle(cc_op, tcg_env, t1, o->in2, t3);
2068     set_cc_static(s);
2069     return DISAS_NEXT;
2070 }
2071 
2072 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2073 {
2074     int r1 = get_field(s, r1);
2075     int r3 = get_field(s, r3);
2076     TCGv_i32 t1, t3;
2077 
2078     /* r1 and r3 must be even.  */
2079     if (r1 & 1 || r3 & 1) {
2080         gen_program_exception(s, PGM_SPECIFICATION);
2081         return DISAS_NORETURN;
2082     }
2083 
2084     t1 = tcg_constant_i32(r1);
2085     t3 = tcg_constant_i32(r3);
2086     gen_helper_clclu(cc_op, tcg_env, t1, o->in2, t3);
2087     set_cc_static(s);
2088     return DISAS_NEXT;
2089 }
2090 
2091 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2092 {
2093     TCGv_i32 m3 = tcg_constant_i32(get_field(s, m3));
2094     TCGv_i32 t1 = tcg_temp_new_i32();
2095 
2096     tcg_gen_extrl_i64_i32(t1, o->in1);
2097     gen_helper_clm(cc_op, tcg_env, t1, m3, o->in2);
2098     set_cc_static(s);
2099     return DISAS_NEXT;
2100 }
2101 
2102 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2103 {
2104     TCGv_i128 pair = tcg_temp_new_i128();
2105 
2106     gen_helper_clst(pair, tcg_env, regs[0], o->in1, o->in2);
2107     tcg_gen_extr_i128_i64(o->in2, o->in1, pair);
2108 
2109     set_cc_static(s);
2110     return DISAS_NEXT;
2111 }
2112 
2113 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2114 {
2115     TCGv_i64 t = tcg_temp_new_i64();
2116     tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2117     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2118     tcg_gen_or_i64(o->out, o->out, t);
2119     return DISAS_NEXT;
2120 }
2121 
2122 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2123 {
2124     int d2 = get_field(s, d2);
2125     int b2 = get_field(s, b2);
2126     TCGv_i64 addr, cc;
2127 
2128     /* Note that in1 = R3 (new value) and
2129        in2 = (zero-extended) R1 (expected value).  */
2130 
2131     addr = get_address(s, 0, b2, d2);
2132     tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2133                                get_mem_index(s), s->insn->data | MO_ALIGN);
2134 
2135     /* Are the memory and expected values (un)equal?  Note that this setcond
2136        produces the output CC value, thus the NE sense of the test.  */
2137     cc = tcg_temp_new_i64();
2138     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2139     tcg_gen_extrl_i64_i32(cc_op, cc);
2140     set_cc_static(s);
2141 
2142     return DISAS_NEXT;
2143 }
2144 
2145 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2146 {
2147     int r1 = get_field(s, r1);
2148 
2149     o->out_128 = tcg_temp_new_i128();
2150     tcg_gen_concat_i64_i128(o->out_128, regs[r1 + 1], regs[r1]);
2151 
2152     /* Note out (R1:R1+1) = expected value and in2 (R3:R3+1) = new value.  */
2153     tcg_gen_atomic_cmpxchg_i128(o->out_128, o->addr1, o->out_128, o->in2_128,
2154                                 get_mem_index(s), MO_BE | MO_128 | MO_ALIGN);
2155 
2156     /*
2157      * Extract result into cc_dst:cc_src, compare vs the expected value
2158      * in the as yet unmodified input registers, then update CC_OP.
2159      */
2160     tcg_gen_extr_i128_i64(cc_src, cc_dst, o->out_128);
2161     tcg_gen_xor_i64(cc_dst, cc_dst, regs[r1]);
2162     tcg_gen_xor_i64(cc_src, cc_src, regs[r1 + 1]);
2163     tcg_gen_or_i64(cc_dst, cc_dst, cc_src);
2164     set_cc_nz_u64(s, cc_dst);
2165 
2166     return DISAS_NEXT;
2167 }
2168 
2169 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2170 {
2171     int r3 = get_field(s, r3);
2172     TCGv_i32 t_r3 = tcg_constant_i32(r3);
2173 
2174     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2175         gen_helper_csst_parallel(cc_op, tcg_env, t_r3, o->addr1, o->in2);
2176     } else {
2177         gen_helper_csst(cc_op, tcg_env, t_r3, o->addr1, o->in2);
2178     }
2179 
2180     set_cc_static(s);
2181     return DISAS_NEXT;
2182 }
2183 
2184 #ifndef CONFIG_USER_ONLY
2185 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2186 {
2187     MemOp mop = s->insn->data;
2188     TCGv_i64 addr, old, cc;
2189     TCGLabel *lab = gen_new_label();
2190 
2191     /* Note that in1 = R1 (zero-extended expected value),
2192        out = R1 (original reg), out2 = R1+1 (new value).  */
2193 
2194     addr = tcg_temp_new_i64();
2195     old = tcg_temp_new_i64();
2196     tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2197     tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2198                                get_mem_index(s), mop | MO_ALIGN);
2199 
2200     /* Are the memory and expected values (un)equal?  */
2201     cc = tcg_temp_new_i64();
2202     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2203     tcg_gen_extrl_i64_i32(cc_op, cc);
2204 
2205     /* Write back the output now, so that it happens before the
2206        following branch, so that we don't need local temps.  */
2207     if ((mop & MO_SIZE) == MO_32) {
2208         tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2209     } else {
2210         tcg_gen_mov_i64(o->out, old);
2211     }
2212 
2213     /* If the comparison was equal, and the LSB of R2 was set,
2214        then we need to flush the TLB (for all cpus).  */
2215     tcg_gen_xori_i64(cc, cc, 1);
2216     tcg_gen_and_i64(cc, cc, o->in2);
2217     tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2218 
2219     gen_helper_purge(tcg_env);
2220     gen_set_label(lab);
2221 
2222     return DISAS_NEXT;
2223 }
2224 #endif
2225 
2226 static DisasJumpType op_cvb(DisasContext *s, DisasOps *o)
2227 {
2228     TCGv_i64 t = tcg_temp_new_i64();
2229     tcg_gen_qemu_ld_i64(t, o->addr1, get_mem_index(s), MO_TEUQ);
2230     gen_helper_cvb(tcg_env, tcg_constant_i32(get_field(s, r1)), t);
2231     return DISAS_NEXT;
2232 }
2233 
2234 static DisasJumpType op_cvbg(DisasContext *s, DisasOps *o)
2235 {
2236     TCGv_i128 t = tcg_temp_new_i128();
2237     tcg_gen_qemu_ld_i128(t, o->addr1, get_mem_index(s), MO_TE | MO_128);
2238     gen_helper_cvbg(o->out, tcg_env, t);
2239     return DISAS_NEXT;
2240 }
2241 
2242 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2243 {
2244     TCGv_i64 t1 = tcg_temp_new_i64();
2245     TCGv_i32 t2 = tcg_temp_new_i32();
2246     tcg_gen_extrl_i64_i32(t2, o->in1);
2247     gen_helper_cvd(t1, t2);
2248     tcg_gen_qemu_st_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
2249     return DISAS_NEXT;
2250 }
2251 
2252 static DisasJumpType op_cvdg(DisasContext *s, DisasOps *o)
2253 {
2254     TCGv_i128 t = tcg_temp_new_i128();
2255     gen_helper_cvdg(t, o->in1);
2256     tcg_gen_qemu_st_i128(t, o->in2, get_mem_index(s), MO_TE | MO_128);
2257     return DISAS_NEXT;
2258 }
2259 
2260 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2261 {
2262     int m3 = get_field(s, m3);
2263     TCGLabel *lab = gen_new_label();
2264     TCGCond c;
2265 
2266     c = tcg_invert_cond(ltgt_cond[m3]);
2267     if (s->insn->data) {
2268         c = tcg_unsigned_cond(c);
2269     }
2270     tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2271 
2272     /* Trap.  */
2273     gen_trap(s);
2274 
2275     gen_set_label(lab);
2276     return DISAS_NEXT;
2277 }
2278 
2279 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2280 {
2281     int m3 = get_field(s, m3);
2282     int r1 = get_field(s, r1);
2283     int r2 = get_field(s, r2);
2284     TCGv_i32 tr1, tr2, chk;
2285 
2286     /* R1 and R2 must both be even.  */
2287     if ((r1 | r2) & 1) {
2288         gen_program_exception(s, PGM_SPECIFICATION);
2289         return DISAS_NORETURN;
2290     }
2291     if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2292         m3 = 0;
2293     }
2294 
2295     tr1 = tcg_constant_i32(r1);
2296     tr2 = tcg_constant_i32(r2);
2297     chk = tcg_constant_i32(m3);
2298 
2299     switch (s->insn->data) {
2300     case 12:
2301         gen_helper_cu12(cc_op, tcg_env, tr1, tr2, chk);
2302         break;
2303     case 14:
2304         gen_helper_cu14(cc_op, tcg_env, tr1, tr2, chk);
2305         break;
2306     case 21:
2307         gen_helper_cu21(cc_op, tcg_env, tr1, tr2, chk);
2308         break;
2309     case 24:
2310         gen_helper_cu24(cc_op, tcg_env, tr1, tr2, chk);
2311         break;
2312     case 41:
2313         gen_helper_cu41(cc_op, tcg_env, tr1, tr2, chk);
2314         break;
2315     case 42:
2316         gen_helper_cu42(cc_op, tcg_env, tr1, tr2, chk);
2317         break;
2318     default:
2319         g_assert_not_reached();
2320     }
2321 
2322     set_cc_static(s);
2323     return DISAS_NEXT;
2324 }
2325 
2326 #ifndef CONFIG_USER_ONLY
2327 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2328 {
2329     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2330     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2331     TCGv_i32 func_code = tcg_constant_i32(get_field(s, i2));
2332 
2333     gen_helper_diag(tcg_env, r1, r3, func_code);
2334     return DISAS_NEXT;
2335 }
2336 #endif
2337 
2338 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2339 {
2340     gen_helper_divs32(o->out, tcg_env, o->in1, o->in2);
2341     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2342     return DISAS_NEXT;
2343 }
2344 
2345 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2346 {
2347     gen_helper_divu32(o->out, tcg_env, o->in1, o->in2);
2348     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2349     return DISAS_NEXT;
2350 }
2351 
2352 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2353 {
2354     TCGv_i128 t = tcg_temp_new_i128();
2355 
2356     gen_helper_divs64(t, tcg_env, o->in1, o->in2);
2357     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2358     return DISAS_NEXT;
2359 }
2360 
2361 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2362 {
2363     TCGv_i128 t = tcg_temp_new_i128();
2364 
2365     gen_helper_divu64(t, tcg_env, o->out, o->out2, o->in2);
2366     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2367     return DISAS_NEXT;
2368 }
2369 
2370 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2371 {
2372     gen_helper_deb(o->out, tcg_env, o->in1, o->in2);
2373     return DISAS_NEXT;
2374 }
2375 
2376 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2377 {
2378     gen_helper_ddb(o->out, tcg_env, o->in1, o->in2);
2379     return DISAS_NEXT;
2380 }
2381 
2382 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2383 {
2384     gen_helper_dxb(o->out_128, tcg_env, o->in1_128, o->in2_128);
2385     return DISAS_NEXT;
2386 }
2387 
2388 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2389 {
2390     int r2 = get_field(s, r2);
2391     tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, aregs[r2]));
2392     return DISAS_NEXT;
2393 }
2394 
2395 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2396 {
2397     /* No cache information provided.  */
2398     tcg_gen_movi_i64(o->out, -1);
2399     return DISAS_NEXT;
2400 }
2401 
2402 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2403 {
2404     tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, fpc));
2405     return DISAS_NEXT;
2406 }
2407 
2408 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2409 {
2410     int r1 = get_field(s, r1);
2411     int r2 = get_field(s, r2);
2412     TCGv_i64 t = tcg_temp_new_i64();
2413     TCGv_i64 t_cc = tcg_temp_new_i64();
2414 
2415     /* Note the "subsequently" in the PoO, which implies a defined result
2416        if r1 == r2.  Thus we cannot defer these writes to an output hook.  */
2417     gen_op_calc_cc(s);
2418     tcg_gen_extu_i32_i64(t_cc, cc_op);
2419     tcg_gen_shri_i64(t, psw_mask, 32);
2420     tcg_gen_deposit_i64(t, t, t_cc, 12, 2);
2421     store_reg32_i64(r1, t);
2422     if (r2 != 0) {
2423         store_reg32_i64(r2, psw_mask);
2424     }
2425     return DISAS_NEXT;
2426 }
2427 
2428 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2429 {
2430     int r1 = get_field(s, r1);
2431     TCGv_i32 ilen;
2432     TCGv_i64 v1;
2433 
2434     /* Nested EXECUTE is not allowed.  */
2435     if (unlikely(s->ex_value)) {
2436         gen_program_exception(s, PGM_EXECUTE);
2437         return DISAS_NORETURN;
2438     }
2439 
2440     update_psw_addr(s);
2441     update_cc_op(s);
2442 
2443     if (r1 == 0) {
2444         v1 = tcg_constant_i64(0);
2445     } else {
2446         v1 = regs[r1];
2447     }
2448 
2449     ilen = tcg_constant_i32(s->ilen);
2450     gen_helper_ex(tcg_env, ilen, v1, o->in2);
2451 
2452     return DISAS_PC_CC_UPDATED;
2453 }
2454 
2455 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2456 {
2457     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2458 
2459     if (!m34) {
2460         return DISAS_NORETURN;
2461     }
2462     gen_helper_fieb(o->out, tcg_env, o->in2, m34);
2463     return DISAS_NEXT;
2464 }
2465 
2466 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2467 {
2468     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2469 
2470     if (!m34) {
2471         return DISAS_NORETURN;
2472     }
2473     gen_helper_fidb(o->out, tcg_env, o->in2, m34);
2474     return DISAS_NEXT;
2475 }
2476 
2477 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2478 {
2479     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2480 
2481     if (!m34) {
2482         return DISAS_NORETURN;
2483     }
2484     gen_helper_fixb(o->out_128, tcg_env, o->in2_128, m34);
2485     return DISAS_NEXT;
2486 }
2487 
2488 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2489 {
2490     /* We'll use the original input for cc computation, since we get to
2491        compare that against 0, which ought to be better than comparing
2492        the real output against 64.  It also lets cc_dst be a convenient
2493        temporary during our computation.  */
2494     gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2495 
2496     /* R1 = IN ? CLZ(IN) : 64.  */
2497     tcg_gen_clzi_i64(o->out, o->in2, 64);
2498 
2499     /* R1+1 = IN & ~(found bit).  Note that we may attempt to shift this
2500        value by 64, which is undefined.  But since the shift is 64 iff the
2501        input is zero, we still get the correct result after and'ing.  */
2502     tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2503     tcg_gen_shr_i64(o->out2, o->out2, o->out);
2504     tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2505     return DISAS_NEXT;
2506 }
2507 
2508 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2509 {
2510     int m3 = get_field(s, m3);
2511     int pos, len, base = s->insn->data;
2512     TCGv_i64 tmp = tcg_temp_new_i64();
2513     uint64_t ccm;
2514 
2515     switch (m3) {
2516     case 0xf:
2517         /* Effectively a 32-bit load.  */
2518         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
2519         len = 32;
2520         goto one_insert;
2521 
2522     case 0xc:
2523     case 0x6:
2524     case 0x3:
2525         /* Effectively a 16-bit load.  */
2526         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
2527         len = 16;
2528         goto one_insert;
2529 
2530     case 0x8:
2531     case 0x4:
2532     case 0x2:
2533     case 0x1:
2534         /* Effectively an 8-bit load.  */
2535         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2536         len = 8;
2537         goto one_insert;
2538 
2539     one_insert:
2540         pos = base + ctz32(m3) * 8;
2541         tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2542         ccm = ((1ull << len) - 1) << pos;
2543         break;
2544 
2545     case 0:
2546         /* Recognize access exceptions for the first byte.  */
2547         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2548         gen_op_movi_cc(s, 0);
2549         return DISAS_NEXT;
2550 
2551     default:
2552         /* This is going to be a sequence of loads and inserts.  */
2553         pos = base + 32 - 8;
2554         ccm = 0;
2555         while (m3) {
2556             if (m3 & 0x8) {
2557                 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2558                 tcg_gen_addi_i64(o->in2, o->in2, 1);
2559                 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2560                 ccm |= 0xffull << pos;
2561             }
2562             m3 = (m3 << 1) & 0xf;
2563             pos -= 8;
2564         }
2565         break;
2566     }
2567 
2568     tcg_gen_movi_i64(tmp, ccm);
2569     gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2570     return DISAS_NEXT;
2571 }
2572 
2573 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2574 {
2575     int shift = s->insn->data & 0xff;
2576     int size = s->insn->data >> 8;
2577     tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2578     return DISAS_NEXT;
2579 }
2580 
2581 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2582 {
2583     TCGv_i64 t1, t2;
2584 
2585     gen_op_calc_cc(s);
2586     t1 = tcg_temp_new_i64();
2587     tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2588     t2 = tcg_temp_new_i64();
2589     tcg_gen_extu_i32_i64(t2, cc_op);
2590     tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2591     tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2592     return DISAS_NEXT;
2593 }
2594 
2595 #ifndef CONFIG_USER_ONLY
2596 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2597 {
2598     TCGv_i32 m4;
2599 
2600     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2601         m4 = tcg_constant_i32(get_field(s, m4));
2602     } else {
2603         m4 = tcg_constant_i32(0);
2604     }
2605     gen_helper_idte(tcg_env, o->in1, o->in2, m4);
2606     return DISAS_NEXT;
2607 }
2608 
2609 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2610 {
2611     TCGv_i32 m4;
2612 
2613     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2614         m4 = tcg_constant_i32(get_field(s, m4));
2615     } else {
2616         m4 = tcg_constant_i32(0);
2617     }
2618     gen_helper_ipte(tcg_env, o->in1, o->in2, m4);
2619     return DISAS_NEXT;
2620 }
2621 
2622 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2623 {
2624     gen_helper_iske(o->out, tcg_env, o->in2);
2625     return DISAS_NEXT;
2626 }
2627 #endif
2628 
2629 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2630 {
2631     int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
2632     int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
2633     int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
2634     TCGv_i32 t_r1, t_r2, t_r3, type;
2635 
2636     switch (s->insn->data) {
2637     case S390_FEAT_TYPE_KMA:
2638         if (r3 == r1 || r3 == r2) {
2639             gen_program_exception(s, PGM_SPECIFICATION);
2640             return DISAS_NORETURN;
2641         }
2642         /* FALL THROUGH */
2643     case S390_FEAT_TYPE_KMCTR:
2644         if (r3 & 1 || !r3) {
2645             gen_program_exception(s, PGM_SPECIFICATION);
2646             return DISAS_NORETURN;
2647         }
2648         /* FALL THROUGH */
2649     case S390_FEAT_TYPE_PPNO:
2650     case S390_FEAT_TYPE_KMF:
2651     case S390_FEAT_TYPE_KMC:
2652     case S390_FEAT_TYPE_KMO:
2653     case S390_FEAT_TYPE_KM:
2654         if (r1 & 1 || !r1) {
2655             gen_program_exception(s, PGM_SPECIFICATION);
2656             return DISAS_NORETURN;
2657         }
2658         /* FALL THROUGH */
2659     case S390_FEAT_TYPE_KMAC:
2660     case S390_FEAT_TYPE_KIMD:
2661     case S390_FEAT_TYPE_KLMD:
2662         if (r2 & 1 || !r2) {
2663             gen_program_exception(s, PGM_SPECIFICATION);
2664             return DISAS_NORETURN;
2665         }
2666         /* FALL THROUGH */
2667     case S390_FEAT_TYPE_PCKMO:
2668     case S390_FEAT_TYPE_PCC:
2669         break;
2670     default:
2671         g_assert_not_reached();
2672     };
2673 
2674     t_r1 = tcg_constant_i32(r1);
2675     t_r2 = tcg_constant_i32(r2);
2676     t_r3 = tcg_constant_i32(r3);
2677     type = tcg_constant_i32(s->insn->data);
2678     gen_helper_msa(cc_op, tcg_env, t_r1, t_r2, t_r3, type);
2679     set_cc_static(s);
2680     return DISAS_NEXT;
2681 }
2682 
2683 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2684 {
2685     gen_helper_keb(cc_op, tcg_env, o->in1, o->in2);
2686     set_cc_static(s);
2687     return DISAS_NEXT;
2688 }
2689 
2690 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2691 {
2692     gen_helper_kdb(cc_op, tcg_env, o->in1, o->in2);
2693     set_cc_static(s);
2694     return DISAS_NEXT;
2695 }
2696 
2697 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2698 {
2699     gen_helper_kxb(cc_op, tcg_env, o->in1_128, o->in2_128);
2700     set_cc_static(s);
2701     return DISAS_NEXT;
2702 }
2703 
2704 static DisasJumpType help_laa(DisasContext *s, DisasOps *o, bool addu64)
2705 {
2706     /* The real output is indeed the original value in memory;
2707        recompute the addition for the computation of CC.  */
2708     tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2709                                  s->insn->data | MO_ALIGN);
2710     /* However, we need to recompute the addition for setting CC.  */
2711     if (addu64) {
2712         tcg_gen_movi_i64(cc_src, 0);
2713         tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
2714     } else {
2715         tcg_gen_add_i64(o->out, o->in1, o->in2);
2716     }
2717     return DISAS_NEXT;
2718 }
2719 
2720 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2721 {
2722     return help_laa(s, o, false);
2723 }
2724 
2725 static DisasJumpType op_laa_addu64(DisasContext *s, DisasOps *o)
2726 {
2727     return help_laa(s, o, true);
2728 }
2729 
2730 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2731 {
2732     /* The real output is indeed the original value in memory;
2733        recompute the addition for the computation of CC.  */
2734     tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2735                                  s->insn->data | MO_ALIGN);
2736     /* However, we need to recompute the operation for setting CC.  */
2737     tcg_gen_and_i64(o->out, o->in1, o->in2);
2738     return DISAS_NEXT;
2739 }
2740 
2741 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2742 {
2743     /* The real output is indeed the original value in memory;
2744        recompute the addition for the computation of CC.  */
2745     tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2746                                 s->insn->data | MO_ALIGN);
2747     /* However, we need to recompute the operation for setting CC.  */
2748     tcg_gen_or_i64(o->out, o->in1, o->in2);
2749     return DISAS_NEXT;
2750 }
2751 
2752 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2753 {
2754     /* The real output is indeed the original value in memory;
2755        recompute the addition for the computation of CC.  */
2756     tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2757                                  s->insn->data | MO_ALIGN);
2758     /* However, we need to recompute the operation for setting CC.  */
2759     tcg_gen_xor_i64(o->out, o->in1, o->in2);
2760     return DISAS_NEXT;
2761 }
2762 
2763 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2764 {
2765     gen_helper_ldeb(o->out, tcg_env, o->in2);
2766     return DISAS_NEXT;
2767 }
2768 
2769 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2770 {
2771     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2772 
2773     if (!m34) {
2774         return DISAS_NORETURN;
2775     }
2776     gen_helper_ledb(o->out, tcg_env, o->in2, m34);
2777     return DISAS_NEXT;
2778 }
2779 
2780 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2781 {
2782     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2783 
2784     if (!m34) {
2785         return DISAS_NORETURN;
2786     }
2787     gen_helper_ldxb(o->out, tcg_env, o->in2_128, m34);
2788     return DISAS_NEXT;
2789 }
2790 
2791 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2792 {
2793     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2794 
2795     if (!m34) {
2796         return DISAS_NORETURN;
2797     }
2798     gen_helper_lexb(o->out, tcg_env, o->in2_128, m34);
2799     return DISAS_NEXT;
2800 }
2801 
2802 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2803 {
2804     gen_helper_lxdb(o->out_128, tcg_env, o->in2);
2805     return DISAS_NEXT;
2806 }
2807 
2808 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2809 {
2810     gen_helper_lxeb(o->out_128, tcg_env, o->in2);
2811     return DISAS_NEXT;
2812 }
2813 
2814 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2815 {
2816     tcg_gen_shli_i64(o->out, o->in2, 32);
2817     return DISAS_NEXT;
2818 }
2819 
2820 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2821 {
2822     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2823     return DISAS_NEXT;
2824 }
2825 
2826 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2827 {
2828     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_SB);
2829     return DISAS_NEXT;
2830 }
2831 
2832 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2833 {
2834     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_UB);
2835     return DISAS_NEXT;
2836 }
2837 
2838 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2839 {
2840     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TESW);
2841     return DISAS_NEXT;
2842 }
2843 
2844 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2845 {
2846     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUW);
2847     return DISAS_NEXT;
2848 }
2849 
2850 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2851 {
2852     tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2853                        MO_TESL | s->insn->data);
2854     return DISAS_NEXT;
2855 }
2856 
2857 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2858 {
2859     tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2860                        MO_TEUL | s->insn->data);
2861     return DISAS_NEXT;
2862 }
2863 
2864 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2865 {
2866     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s),
2867                         MO_TEUQ | s->insn->data);
2868     return DISAS_NEXT;
2869 }
2870 
2871 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2872 {
2873     TCGLabel *lab = gen_new_label();
2874     store_reg32_i64(get_field(s, r1), o->in2);
2875     /* The value is stored even in case of trap. */
2876     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2877     gen_trap(s);
2878     gen_set_label(lab);
2879     return DISAS_NEXT;
2880 }
2881 
2882 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2883 {
2884     TCGLabel *lab = gen_new_label();
2885     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUQ);
2886     /* The value is stored even in case of trap. */
2887     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2888     gen_trap(s);
2889     gen_set_label(lab);
2890     return DISAS_NEXT;
2891 }
2892 
2893 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2894 {
2895     TCGLabel *lab = gen_new_label();
2896     store_reg32h_i64(get_field(s, r1), o->in2);
2897     /* The value is stored even in case of trap. */
2898     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2899     gen_trap(s);
2900     gen_set_label(lab);
2901     return DISAS_NEXT;
2902 }
2903 
2904 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2905 {
2906     TCGLabel *lab = gen_new_label();
2907 
2908     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
2909     /* The value is stored even in case of trap. */
2910     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2911     gen_trap(s);
2912     gen_set_label(lab);
2913     return DISAS_NEXT;
2914 }
2915 
2916 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2917 {
2918     TCGLabel *lab = gen_new_label();
2919     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2920     /* The value is stored even in case of trap. */
2921     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2922     gen_trap(s);
2923     gen_set_label(lab);
2924     return DISAS_NEXT;
2925 }
2926 
2927 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2928 {
2929     DisasCompare c;
2930 
2931     if (have_field(s, m3)) {
2932         /* LOAD * ON CONDITION */
2933         disas_jcc(s, &c, get_field(s, m3));
2934     } else {
2935         /* SELECT */
2936         disas_jcc(s, &c, get_field(s, m4));
2937     }
2938 
2939     if (c.is_64) {
2940         tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2941                             o->in2, o->in1);
2942     } else {
2943         TCGv_i32 t32 = tcg_temp_new_i32();
2944         TCGv_i64 t, z;
2945 
2946         tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2947 
2948         t = tcg_temp_new_i64();
2949         tcg_gen_extu_i32_i64(t, t32);
2950 
2951         z = tcg_constant_i64(0);
2952         tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2953     }
2954 
2955     return DISAS_NEXT;
2956 }
2957 
2958 #ifndef CONFIG_USER_ONLY
2959 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
2960 {
2961     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2962     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2963 
2964     gen_helper_lctl(tcg_env, r1, o->in2, r3);
2965     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2966     s->exit_to_mainloop = true;
2967     return DISAS_TOO_MANY;
2968 }
2969 
2970 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
2971 {
2972     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2973     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2974 
2975     gen_helper_lctlg(tcg_env, r1, o->in2, r3);
2976     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2977     s->exit_to_mainloop = true;
2978     return DISAS_TOO_MANY;
2979 }
2980 
2981 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
2982 {
2983     gen_helper_lra(o->out, tcg_env, o->out, o->in2);
2984     set_cc_static(s);
2985     return DISAS_NEXT;
2986 }
2987 
2988 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
2989 {
2990     tcg_gen_st_i64(o->in2, tcg_env, offsetof(CPUS390XState, pp));
2991     return DISAS_NEXT;
2992 }
2993 
2994 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
2995 {
2996     TCGv_i64 mask, addr;
2997 
2998     per_breaking_event(s);
2999 
3000     /*
3001      * Convert the short PSW into the normal PSW, similar to what
3002      * s390_cpu_load_normal() does.
3003      */
3004     mask = tcg_temp_new_i64();
3005     addr = tcg_temp_new_i64();
3006     tcg_gen_qemu_ld_i64(mask, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN_8);
3007     tcg_gen_andi_i64(addr, mask, PSW_MASK_SHORT_ADDR);
3008     tcg_gen_andi_i64(mask, mask, PSW_MASK_SHORT_CTRL);
3009     tcg_gen_xori_i64(mask, mask, PSW_MASK_SHORTPSW);
3010     gen_helper_load_psw(tcg_env, mask, addr);
3011     return DISAS_NORETURN;
3012 }
3013 
3014 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
3015 {
3016     TCGv_i64 t1, t2;
3017 
3018     per_breaking_event(s);
3019 
3020     t1 = tcg_temp_new_i64();
3021     t2 = tcg_temp_new_i64();
3022     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3023                         MO_TEUQ | MO_ALIGN_8);
3024     tcg_gen_addi_i64(o->in2, o->in2, 8);
3025     tcg_gen_qemu_ld_i64(t2, o->in2, get_mem_index(s), MO_TEUQ);
3026     gen_helper_load_psw(tcg_env, t1, t2);
3027     return DISAS_NORETURN;
3028 }
3029 #endif
3030 
3031 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
3032 {
3033     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
3034     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
3035 
3036     gen_helper_lam(tcg_env, r1, o->in2, r3);
3037     return DISAS_NEXT;
3038 }
3039 
3040 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
3041 {
3042     int r1 = get_field(s, r1);
3043     int r3 = get_field(s, r3);
3044     TCGv_i64 t1, t2;
3045 
3046     /* Only one register to read. */
3047     t1 = tcg_temp_new_i64();
3048     if (unlikely(r1 == r3)) {
3049         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3050         store_reg32_i64(r1, t1);
3051         return DISAS_NEXT;
3052     }
3053 
3054     /* First load the values of the first and last registers to trigger
3055        possible page faults. */
3056     t2 = tcg_temp_new_i64();
3057     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3058     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3059     tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
3060     store_reg32_i64(r1, t1);
3061     store_reg32_i64(r3, t2);
3062 
3063     /* Only two registers to read. */
3064     if (((r1 + 1) & 15) == r3) {
3065         return DISAS_NEXT;
3066     }
3067 
3068     /* Then load the remaining registers. Page fault can't occur. */
3069     r3 = (r3 - 1) & 15;
3070     tcg_gen_movi_i64(t2, 4);
3071     while (r1 != r3) {
3072         r1 = (r1 + 1) & 15;
3073         tcg_gen_add_i64(o->in2, o->in2, t2);
3074         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3075         store_reg32_i64(r1, t1);
3076     }
3077     return DISAS_NEXT;
3078 }
3079 
3080 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3081 {
3082     int r1 = get_field(s, r1);
3083     int r3 = get_field(s, r3);
3084     TCGv_i64 t1, t2;
3085 
3086     /* Only one register to read. */
3087     t1 = tcg_temp_new_i64();
3088     if (unlikely(r1 == r3)) {
3089         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3090         store_reg32h_i64(r1, t1);
3091         return DISAS_NEXT;
3092     }
3093 
3094     /* First load the values of the first and last registers to trigger
3095        possible page faults. */
3096     t2 = tcg_temp_new_i64();
3097     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3098     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3099     tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
3100     store_reg32h_i64(r1, t1);
3101     store_reg32h_i64(r3, t2);
3102 
3103     /* Only two registers to read. */
3104     if (((r1 + 1) & 15) == r3) {
3105         return DISAS_NEXT;
3106     }
3107 
3108     /* Then load the remaining registers. Page fault can't occur. */
3109     r3 = (r3 - 1) & 15;
3110     tcg_gen_movi_i64(t2, 4);
3111     while (r1 != r3) {
3112         r1 = (r1 + 1) & 15;
3113         tcg_gen_add_i64(o->in2, o->in2, t2);
3114         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3115         store_reg32h_i64(r1, t1);
3116     }
3117     return DISAS_NEXT;
3118 }
3119 
3120 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3121 {
3122     int r1 = get_field(s, r1);
3123     int r3 = get_field(s, r3);
3124     TCGv_i64 t1, t2;
3125 
3126     /* Only one register to read. */
3127     if (unlikely(r1 == r3)) {
3128         tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3129         return DISAS_NEXT;
3130     }
3131 
3132     /* First load the values of the first and last registers to trigger
3133        possible page faults. */
3134     t1 = tcg_temp_new_i64();
3135     t2 = tcg_temp_new_i64();
3136     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
3137     tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3138     tcg_gen_qemu_ld_i64(regs[r3], t2, get_mem_index(s), MO_TEUQ);
3139     tcg_gen_mov_i64(regs[r1], t1);
3140 
3141     /* Only two registers to read. */
3142     if (((r1 + 1) & 15) == r3) {
3143         return DISAS_NEXT;
3144     }
3145 
3146     /* Then load the remaining registers. Page fault can't occur. */
3147     r3 = (r3 - 1) & 15;
3148     tcg_gen_movi_i64(t1, 8);
3149     while (r1 != r3) {
3150         r1 = (r1 + 1) & 15;
3151         tcg_gen_add_i64(o->in2, o->in2, t1);
3152         tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3153     }
3154     return DISAS_NEXT;
3155 }
3156 
3157 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3158 {
3159     TCGv_i64 a1, a2;
3160     MemOp mop = s->insn->data;
3161 
3162     /* In a parallel context, stop the world and single step.  */
3163     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3164         update_psw_addr(s);
3165         update_cc_op(s);
3166         gen_exception(EXCP_ATOMIC);
3167         return DISAS_NORETURN;
3168     }
3169 
3170     /* In a serial context, perform the two loads ... */
3171     a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
3172     a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3173     tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3174     tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3175 
3176     /* ... and indicate that we performed them while interlocked.  */
3177     gen_op_movi_cc(s, 0);
3178     return DISAS_NEXT;
3179 }
3180 
3181 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3182 {
3183     o->out_128 = tcg_temp_new_i128();
3184     tcg_gen_qemu_ld_i128(o->out_128, o->in2, get_mem_index(s),
3185                          MO_TE | MO_128 | MO_ALIGN);
3186     return DISAS_NEXT;
3187 }
3188 
3189 #ifndef CONFIG_USER_ONLY
3190 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3191 {
3192     tcg_gen_qemu_ld_tl(o->out, o->in2, MMU_REAL_IDX, s->insn->data);
3193     return DISAS_NEXT;
3194 }
3195 #endif
3196 
3197 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3198 {
3199     tcg_gen_andi_i64(o->out, o->in2, -256);
3200     return DISAS_NEXT;
3201 }
3202 
3203 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3204 {
3205     const int64_t block_size = (1ull << (get_field(s, m3) + 6));
3206 
3207     if (get_field(s, m3) > 6) {
3208         gen_program_exception(s, PGM_SPECIFICATION);
3209         return DISAS_NORETURN;
3210     }
3211 
3212     tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3213     tcg_gen_neg_i64(o->addr1, o->addr1);
3214     tcg_gen_movi_i64(o->out, 16);
3215     tcg_gen_umin_i64(o->out, o->out, o->addr1);
3216     gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3217     return DISAS_NEXT;
3218 }
3219 
3220 static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
3221 {
3222     const uint8_t monitor_class = get_field(s, i2);
3223 
3224     if (monitor_class & 0xf0) {
3225         gen_program_exception(s, PGM_SPECIFICATION);
3226         return DISAS_NORETURN;
3227     }
3228 
3229 #if !defined(CONFIG_USER_ONLY)
3230     gen_helper_monitor_call(tcg_env, o->addr1,
3231                             tcg_constant_i32(monitor_class));
3232 #endif
3233     /* Defaults to a NOP. */
3234     return DISAS_NEXT;
3235 }
3236 
3237 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3238 {
3239     o->out = o->in2;
3240     o->in2 = NULL;
3241     return DISAS_NEXT;
3242 }
3243 
3244 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3245 {
3246     int b2 = get_field(s, b2);
3247     TCGv ar1 = tcg_temp_new_i64();
3248     int r1 = get_field(s, r1);
3249 
3250     o->out = o->in2;
3251     o->in2 = NULL;
3252 
3253     switch (s->base.tb->flags & FLAG_MASK_ASC) {
3254     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3255         tcg_gen_movi_i64(ar1, 0);
3256         break;
3257     case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3258         tcg_gen_movi_i64(ar1, 1);
3259         break;
3260     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3261         if (b2) {
3262             tcg_gen_ld32u_i64(ar1, tcg_env, offsetof(CPUS390XState, aregs[b2]));
3263         } else {
3264             tcg_gen_movi_i64(ar1, 0);
3265         }
3266         break;
3267     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3268         tcg_gen_movi_i64(ar1, 2);
3269         break;
3270     }
3271 
3272     tcg_gen_st32_i64(ar1, tcg_env, offsetof(CPUS390XState, aregs[r1]));
3273     return DISAS_NEXT;
3274 }
3275 
3276 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3277 {
3278     o->out = o->in1;
3279     o->out2 = o->in2;
3280     o->in1 = NULL;
3281     o->in2 = NULL;
3282     return DISAS_NEXT;
3283 }
3284 
3285 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3286 {
3287     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3288 
3289     gen_helper_mvc(tcg_env, l, o->addr1, o->in2);
3290     return DISAS_NEXT;
3291 }
3292 
3293 static DisasJumpType op_mvcrl(DisasContext *s, DisasOps *o)
3294 {
3295     gen_helper_mvcrl(tcg_env, regs[0], o->addr1, o->in2);
3296     return DISAS_NEXT;
3297 }
3298 
3299 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3300 {
3301     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3302 
3303     gen_helper_mvcin(tcg_env, l, o->addr1, o->in2);
3304     return DISAS_NEXT;
3305 }
3306 
3307 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3308 {
3309     int r1 = get_field(s, r1);
3310     int r2 = get_field(s, r2);
3311     TCGv_i32 t1, t2;
3312 
3313     /* r1 and r2 must be even.  */
3314     if (r1 & 1 || r2 & 1) {
3315         gen_program_exception(s, PGM_SPECIFICATION);
3316         return DISAS_NORETURN;
3317     }
3318 
3319     t1 = tcg_constant_i32(r1);
3320     t2 = tcg_constant_i32(r2);
3321     gen_helper_mvcl(cc_op, tcg_env, t1, t2);
3322     set_cc_static(s);
3323     return DISAS_NEXT;
3324 }
3325 
3326 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3327 {
3328     int r1 = get_field(s, r1);
3329     int r3 = get_field(s, r3);
3330     TCGv_i32 t1, t3;
3331 
3332     /* r1 and r3 must be even.  */
3333     if (r1 & 1 || r3 & 1) {
3334         gen_program_exception(s, PGM_SPECIFICATION);
3335         return DISAS_NORETURN;
3336     }
3337 
3338     t1 = tcg_constant_i32(r1);
3339     t3 = tcg_constant_i32(r3);
3340     gen_helper_mvcle(cc_op, tcg_env, t1, o->in2, t3);
3341     set_cc_static(s);
3342     return DISAS_NEXT;
3343 }
3344 
3345 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3346 {
3347     int r1 = get_field(s, r1);
3348     int r3 = get_field(s, r3);
3349     TCGv_i32 t1, t3;
3350 
3351     /* r1 and r3 must be even.  */
3352     if (r1 & 1 || r3 & 1) {
3353         gen_program_exception(s, PGM_SPECIFICATION);
3354         return DISAS_NORETURN;
3355     }
3356 
3357     t1 = tcg_constant_i32(r1);
3358     t3 = tcg_constant_i32(r3);
3359     gen_helper_mvclu(cc_op, tcg_env, t1, o->in2, t3);
3360     set_cc_static(s);
3361     return DISAS_NEXT;
3362 }
3363 
3364 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3365 {
3366     int r3 = get_field(s, r3);
3367     gen_helper_mvcos(cc_op, tcg_env, o->addr1, o->in2, regs[r3]);
3368     set_cc_static(s);
3369     return DISAS_NEXT;
3370 }
3371 
3372 #ifndef CONFIG_USER_ONLY
3373 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3374 {
3375     int r1 = get_field(s, l1);
3376     int r3 = get_field(s, r3);
3377     gen_helper_mvcp(cc_op, tcg_env, regs[r1], o->addr1, o->in2, regs[r3]);
3378     set_cc_static(s);
3379     return DISAS_NEXT;
3380 }
3381 
3382 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3383 {
3384     int r1 = get_field(s, l1);
3385     int r3 = get_field(s, r3);
3386     gen_helper_mvcs(cc_op, tcg_env, regs[r1], o->addr1, o->in2, regs[r3]);
3387     set_cc_static(s);
3388     return DISAS_NEXT;
3389 }
3390 #endif
3391 
3392 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3393 {
3394     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3395 
3396     gen_helper_mvn(tcg_env, l, o->addr1, o->in2);
3397     return DISAS_NEXT;
3398 }
3399 
3400 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3401 {
3402     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3403 
3404     gen_helper_mvo(tcg_env, l, o->addr1, o->in2);
3405     return DISAS_NEXT;
3406 }
3407 
3408 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3409 {
3410     TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3411     TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3412 
3413     gen_helper_mvpg(cc_op, tcg_env, regs[0], t1, t2);
3414     set_cc_static(s);
3415     return DISAS_NEXT;
3416 }
3417 
3418 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3419 {
3420     TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3421     TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3422 
3423     gen_helper_mvst(cc_op, tcg_env, t1, t2);
3424     set_cc_static(s);
3425     return DISAS_NEXT;
3426 }
3427 
3428 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3429 {
3430     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3431 
3432     gen_helper_mvz(tcg_env, l, o->addr1, o->in2);
3433     return DISAS_NEXT;
3434 }
3435 
3436 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3437 {
3438     tcg_gen_mul_i64(o->out, o->in1, o->in2);
3439     return DISAS_NEXT;
3440 }
3441 
3442 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3443 {
3444     tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3445     return DISAS_NEXT;
3446 }
3447 
3448 static DisasJumpType op_muls128(DisasContext *s, DisasOps *o)
3449 {
3450     tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2);
3451     return DISAS_NEXT;
3452 }
3453 
3454 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3455 {
3456     gen_helper_meeb(o->out, tcg_env, o->in1, o->in2);
3457     return DISAS_NEXT;
3458 }
3459 
3460 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3461 {
3462     gen_helper_mdeb(o->out, tcg_env, o->in1, o->in2);
3463     return DISAS_NEXT;
3464 }
3465 
3466 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3467 {
3468     gen_helper_mdb(o->out, tcg_env, o->in1, o->in2);
3469     return DISAS_NEXT;
3470 }
3471 
3472 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3473 {
3474     gen_helper_mxb(o->out_128, tcg_env, o->in1_128, o->in2_128);
3475     return DISAS_NEXT;
3476 }
3477 
3478 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3479 {
3480     gen_helper_mxdb(o->out_128, tcg_env, o->in1, o->in2);
3481     return DISAS_NEXT;
3482 }
3483 
3484 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3485 {
3486     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3487     gen_helper_maeb(o->out, tcg_env, o->in1, o->in2, r3);
3488     return DISAS_NEXT;
3489 }
3490 
3491 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3492 {
3493     TCGv_i64 r3 = load_freg(get_field(s, r3));
3494     gen_helper_madb(o->out, tcg_env, o->in1, o->in2, r3);
3495     return DISAS_NEXT;
3496 }
3497 
3498 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3499 {
3500     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3501     gen_helper_mseb(o->out, tcg_env, o->in1, o->in2, r3);
3502     return DISAS_NEXT;
3503 }
3504 
3505 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3506 {
3507     TCGv_i64 r3 = load_freg(get_field(s, r3));
3508     gen_helper_msdb(o->out, tcg_env, o->in1, o->in2, r3);
3509     return DISAS_NEXT;
3510 }
3511 
3512 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3513 {
3514     TCGv_i64 z = tcg_constant_i64(0);
3515     TCGv_i64 n = tcg_temp_new_i64();
3516 
3517     tcg_gen_neg_i64(n, o->in2);
3518     tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3519     return DISAS_NEXT;
3520 }
3521 
3522 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3523 {
3524     tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3525     return DISAS_NEXT;
3526 }
3527 
3528 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3529 {
3530     tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3531     return DISAS_NEXT;
3532 }
3533 
3534 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3535 {
3536     tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3537     tcg_gen_mov_i64(o->out2, o->in2);
3538     return DISAS_NEXT;
3539 }
3540 
3541 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3542 {
3543     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3544 
3545     gen_helper_nc(cc_op, tcg_env, l, o->addr1, o->in2);
3546     set_cc_static(s);
3547     return DISAS_NEXT;
3548 }
3549 
3550 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3551 {
3552     tcg_gen_neg_i64(o->out, o->in2);
3553     return DISAS_NEXT;
3554 }
3555 
3556 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3557 {
3558     tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3559     return DISAS_NEXT;
3560 }
3561 
3562 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3563 {
3564     tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3565     return DISAS_NEXT;
3566 }
3567 
3568 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3569 {
3570     tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3571     tcg_gen_mov_i64(o->out2, o->in2);
3572     return DISAS_NEXT;
3573 }
3574 
3575 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3576 {
3577     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3578 
3579     gen_helper_oc(cc_op, tcg_env, l, o->addr1, o->in2);
3580     set_cc_static(s);
3581     return DISAS_NEXT;
3582 }
3583 
3584 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3585 {
3586     tcg_gen_or_i64(o->out, o->in1, o->in2);
3587     return DISAS_NEXT;
3588 }
3589 
3590 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3591 {
3592     int shift = s->insn->data & 0xff;
3593     int size = s->insn->data >> 8;
3594     uint64_t mask = ((1ull << size) - 1) << shift;
3595     TCGv_i64 t = tcg_temp_new_i64();
3596 
3597     tcg_gen_shli_i64(t, o->in2, shift);
3598     tcg_gen_or_i64(o->out, o->in1, t);
3599 
3600     /* Produce the CC from only the bits manipulated.  */
3601     tcg_gen_andi_i64(cc_dst, o->out, mask);
3602     set_cc_nz_u64(s, cc_dst);
3603     return DISAS_NEXT;
3604 }
3605 
3606 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3607 {
3608     o->in1 = tcg_temp_new_i64();
3609 
3610     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3611         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3612     } else {
3613         /* Perform the atomic operation in memory. */
3614         tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3615                                     s->insn->data);
3616     }
3617 
3618     /* Recompute also for atomic case: needed for setting CC. */
3619     tcg_gen_or_i64(o->out, o->in1, o->in2);
3620 
3621     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3622         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3623     }
3624     return DISAS_NEXT;
3625 }
3626 
3627 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3628 {
3629     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3630 
3631     gen_helper_pack(tcg_env, l, o->addr1, o->in2);
3632     return DISAS_NEXT;
3633 }
3634 
3635 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3636 {
3637     int l2 = get_field(s, l2) + 1;
3638     TCGv_i32 l;
3639 
3640     /* The length must not exceed 32 bytes.  */
3641     if (l2 > 32) {
3642         gen_program_exception(s, PGM_SPECIFICATION);
3643         return DISAS_NORETURN;
3644     }
3645     l = tcg_constant_i32(l2);
3646     gen_helper_pka(tcg_env, o->addr1, o->in2, l);
3647     return DISAS_NEXT;
3648 }
3649 
3650 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3651 {
3652     int l2 = get_field(s, l2) + 1;
3653     TCGv_i32 l;
3654 
3655     /* The length must be even and should not exceed 64 bytes.  */
3656     if ((l2 & 1) || (l2 > 64)) {
3657         gen_program_exception(s, PGM_SPECIFICATION);
3658         return DISAS_NORETURN;
3659     }
3660     l = tcg_constant_i32(l2);
3661     gen_helper_pku(tcg_env, o->addr1, o->in2, l);
3662     return DISAS_NEXT;
3663 }
3664 
3665 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3666 {
3667     const uint8_t m3 = get_field(s, m3);
3668 
3669     if ((m3 & 8) && s390_has_feat(S390_FEAT_MISC_INSTRUCTION_EXT3)) {
3670         tcg_gen_ctpop_i64(o->out, o->in2);
3671     } else {
3672         gen_helper_popcnt(o->out, o->in2);
3673     }
3674     return DISAS_NEXT;
3675 }
3676 
3677 #ifndef CONFIG_USER_ONLY
3678 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3679 {
3680     gen_helper_ptlb(tcg_env);
3681     return DISAS_NEXT;
3682 }
3683 #endif
3684 
3685 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3686 {
3687     int i3 = get_field(s, i3);
3688     int i4 = get_field(s, i4);
3689     int i5 = get_field(s, i5);
3690     int do_zero = i4 & 0x80;
3691     uint64_t mask, imask, pmask;
3692     int pos, len, rot;
3693 
3694     /* Adjust the arguments for the specific insn.  */
3695     switch (s->fields.op2) {
3696     case 0x55: /* risbg */
3697     case 0x59: /* risbgn */
3698         i3 &= 63;
3699         i4 &= 63;
3700         pmask = ~0;
3701         break;
3702     case 0x5d: /* risbhg */
3703         i3 &= 31;
3704         i4 &= 31;
3705         pmask = 0xffffffff00000000ull;
3706         break;
3707     case 0x51: /* risblg */
3708         i3 = (i3 & 31) + 32;
3709         i4 = (i4 & 31) + 32;
3710         pmask = 0x00000000ffffffffull;
3711         break;
3712     default:
3713         g_assert_not_reached();
3714     }
3715 
3716     /* MASK is the set of bits to be inserted from R2. */
3717     if (i3 <= i4) {
3718         /* [0...i3---i4...63] */
3719         mask = (-1ull >> i3) & (-1ull << (63 - i4));
3720     } else {
3721         /* [0---i4...i3---63] */
3722         mask = (-1ull >> i3) | (-1ull << (63 - i4));
3723     }
3724     /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
3725     mask &= pmask;
3726 
3727     /* IMASK is the set of bits to be kept from R1.  In the case of the high/low
3728        insns, we need to keep the other half of the register.  */
3729     imask = ~mask | ~pmask;
3730     if (do_zero) {
3731         imask = ~pmask;
3732     }
3733 
3734     len = i4 - i3 + 1;
3735     pos = 63 - i4;
3736     rot = i5 & 63;
3737 
3738     /* In some cases we can implement this with extract.  */
3739     if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3740         tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3741         return DISAS_NEXT;
3742     }
3743 
3744     /* In some cases we can implement this with deposit.  */
3745     if (len > 0 && (imask == 0 || ~mask == imask)) {
3746         /* Note that we rotate the bits to be inserted to the lsb, not to
3747            the position as described in the PoO.  */
3748         rot = (rot - pos) & 63;
3749     } else {
3750         pos = -1;
3751     }
3752 
3753     /* Rotate the input as necessary.  */
3754     tcg_gen_rotli_i64(o->in2, o->in2, rot);
3755 
3756     /* Insert the selected bits into the output.  */
3757     if (pos >= 0) {
3758         if (imask == 0) {
3759             tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3760         } else {
3761             tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3762         }
3763     } else if (imask == 0) {
3764         tcg_gen_andi_i64(o->out, o->in2, mask);
3765     } else {
3766         tcg_gen_andi_i64(o->in2, o->in2, mask);
3767         tcg_gen_andi_i64(o->out, o->out, imask);
3768         tcg_gen_or_i64(o->out, o->out, o->in2);
3769     }
3770     return DISAS_NEXT;
3771 }
3772 
3773 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3774 {
3775     int i3 = get_field(s, i3);
3776     int i4 = get_field(s, i4);
3777     int i5 = get_field(s, i5);
3778     TCGv_i64 orig_out;
3779     uint64_t mask;
3780 
3781     /* If this is a test-only form, arrange to discard the result.  */
3782     if (i3 & 0x80) {
3783         tcg_debug_assert(o->out != NULL);
3784         orig_out = o->out;
3785         o->out = tcg_temp_new_i64();
3786         tcg_gen_mov_i64(o->out, orig_out);
3787     }
3788 
3789     i3 &= 63;
3790     i4 &= 63;
3791     i5 &= 63;
3792 
3793     /* MASK is the set of bits to be operated on from R2.
3794        Take care for I3/I4 wraparound.  */
3795     mask = ~0ull >> i3;
3796     if (i3 <= i4) {
3797         mask ^= ~0ull >> i4 >> 1;
3798     } else {
3799         mask |= ~(~0ull >> i4 >> 1);
3800     }
3801 
3802     /* Rotate the input as necessary.  */
3803     tcg_gen_rotli_i64(o->in2, o->in2, i5);
3804 
3805     /* Operate.  */
3806     switch (s->fields.op2) {
3807     case 0x54: /* AND */
3808         tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3809         tcg_gen_and_i64(o->out, o->out, o->in2);
3810         break;
3811     case 0x56: /* OR */
3812         tcg_gen_andi_i64(o->in2, o->in2, mask);
3813         tcg_gen_or_i64(o->out, o->out, o->in2);
3814         break;
3815     case 0x57: /* XOR */
3816         tcg_gen_andi_i64(o->in2, o->in2, mask);
3817         tcg_gen_xor_i64(o->out, o->out, o->in2);
3818         break;
3819     default:
3820         abort();
3821     }
3822 
3823     /* Set the CC.  */
3824     tcg_gen_andi_i64(cc_dst, o->out, mask);
3825     set_cc_nz_u64(s, cc_dst);
3826     return DISAS_NEXT;
3827 }
3828 
3829 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3830 {
3831     tcg_gen_bswap16_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3832     return DISAS_NEXT;
3833 }
3834 
3835 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3836 {
3837     tcg_gen_bswap32_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3838     return DISAS_NEXT;
3839 }
3840 
3841 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3842 {
3843     tcg_gen_bswap64_i64(o->out, o->in2);
3844     return DISAS_NEXT;
3845 }
3846 
3847 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3848 {
3849     TCGv_i32 t1 = tcg_temp_new_i32();
3850     TCGv_i32 t2 = tcg_temp_new_i32();
3851     TCGv_i32 to = tcg_temp_new_i32();
3852     tcg_gen_extrl_i64_i32(t1, o->in1);
3853     tcg_gen_extrl_i64_i32(t2, o->in2);
3854     tcg_gen_rotl_i32(to, t1, t2);
3855     tcg_gen_extu_i32_i64(o->out, to);
3856     return DISAS_NEXT;
3857 }
3858 
3859 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3860 {
3861     tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3862     return DISAS_NEXT;
3863 }
3864 
3865 #ifndef CONFIG_USER_ONLY
3866 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3867 {
3868     gen_helper_rrbe(cc_op, tcg_env, o->in2);
3869     set_cc_static(s);
3870     return DISAS_NEXT;
3871 }
3872 
3873 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3874 {
3875     gen_helper_sacf(tcg_env, o->in2);
3876     /* Addressing mode has changed, so end the block.  */
3877     return DISAS_TOO_MANY;
3878 }
3879 #endif
3880 
3881 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3882 {
3883     int sam = s->insn->data;
3884     TCGv_i64 tsam;
3885     uint64_t mask;
3886 
3887     switch (sam) {
3888     case 0:
3889         mask = 0xffffff;
3890         break;
3891     case 1:
3892         mask = 0x7fffffff;
3893         break;
3894     default:
3895         mask = -1;
3896         break;
3897     }
3898 
3899     /* Bizarre but true, we check the address of the current insn for the
3900        specification exception, not the next to be executed.  Thus the PoO
3901        documents that Bad Things Happen two bytes before the end.  */
3902     if (s->base.pc_next & ~mask) {
3903         gen_program_exception(s, PGM_SPECIFICATION);
3904         return DISAS_NORETURN;
3905     }
3906     s->pc_tmp &= mask;
3907 
3908     tsam = tcg_constant_i64(sam);
3909     tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3910 
3911     /* Always exit the TB, since we (may have) changed execution mode.  */
3912     return DISAS_TOO_MANY;
3913 }
3914 
3915 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
3916 {
3917     int r1 = get_field(s, r1);
3918     tcg_gen_st32_i64(o->in2, tcg_env, offsetof(CPUS390XState, aregs[r1]));
3919     return DISAS_NEXT;
3920 }
3921 
3922 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
3923 {
3924     gen_helper_seb(o->out, tcg_env, o->in1, o->in2);
3925     return DISAS_NEXT;
3926 }
3927 
3928 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
3929 {
3930     gen_helper_sdb(o->out, tcg_env, o->in1, o->in2);
3931     return DISAS_NEXT;
3932 }
3933 
3934 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
3935 {
3936     gen_helper_sxb(o->out_128, tcg_env, o->in1_128, o->in2_128);
3937     return DISAS_NEXT;
3938 }
3939 
3940 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
3941 {
3942     gen_helper_sqeb(o->out, tcg_env, o->in2);
3943     return DISAS_NEXT;
3944 }
3945 
3946 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
3947 {
3948     gen_helper_sqdb(o->out, tcg_env, o->in2);
3949     return DISAS_NEXT;
3950 }
3951 
3952 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
3953 {
3954     gen_helper_sqxb(o->out_128, tcg_env, o->in2_128);
3955     return DISAS_NEXT;
3956 }
3957 
3958 #ifndef CONFIG_USER_ONLY
3959 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
3960 {
3961     gen_helper_servc(cc_op, tcg_env, o->in2, o->in1);
3962     set_cc_static(s);
3963     return DISAS_NEXT;
3964 }
3965 
3966 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
3967 {
3968     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
3969     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
3970 
3971     gen_helper_sigp(cc_op, tcg_env, o->in2, r1, r3);
3972     set_cc_static(s);
3973     return DISAS_NEXT;
3974 }
3975 #endif
3976 
3977 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
3978 {
3979     DisasCompare c;
3980     TCGv_i64 a, h;
3981     TCGLabel *lab;
3982     int r1;
3983 
3984     disas_jcc(s, &c, get_field(s, m3));
3985 
3986     /* We want to store when the condition is fulfilled, so branch
3987        out when it's not */
3988     c.cond = tcg_invert_cond(c.cond);
3989 
3990     lab = gen_new_label();
3991     if (c.is_64) {
3992         tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3993     } else {
3994         tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3995     }
3996 
3997     r1 = get_field(s, r1);
3998     a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3999     switch (s->insn->data) {
4000     case 1: /* STOCG */
4001         tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUQ);
4002         break;
4003     case 0: /* STOC */
4004         tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUL);
4005         break;
4006     case 2: /* STOCFH */
4007         h = tcg_temp_new_i64();
4008         tcg_gen_shri_i64(h, regs[r1], 32);
4009         tcg_gen_qemu_st_i64(h, a, get_mem_index(s), MO_TEUL);
4010         break;
4011     default:
4012         g_assert_not_reached();
4013     }
4014 
4015     gen_set_label(lab);
4016     return DISAS_NEXT;
4017 }
4018 
4019 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
4020 {
4021     TCGv_i64 t;
4022     uint64_t sign = 1ull << s->insn->data;
4023     if (s->insn->data == 31) {
4024         t = tcg_temp_new_i64();
4025         tcg_gen_shli_i64(t, o->in1, 32);
4026     } else {
4027         t = o->in1;
4028     }
4029     gen_op_update2_cc_i64(s, CC_OP_SLA, t, o->in2);
4030     tcg_gen_shl_i64(o->out, o->in1, o->in2);
4031     /* The arithmetic left shift is curious in that it does not affect
4032        the sign bit.  Copy that over from the source unchanged.  */
4033     tcg_gen_andi_i64(o->out, o->out, ~sign);
4034     tcg_gen_andi_i64(o->in1, o->in1, sign);
4035     tcg_gen_or_i64(o->out, o->out, o->in1);
4036     return DISAS_NEXT;
4037 }
4038 
4039 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
4040 {
4041     tcg_gen_shl_i64(o->out, o->in1, o->in2);
4042     return DISAS_NEXT;
4043 }
4044 
4045 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
4046 {
4047     tcg_gen_sar_i64(o->out, o->in1, o->in2);
4048     return DISAS_NEXT;
4049 }
4050 
4051 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
4052 {
4053     tcg_gen_shr_i64(o->out, o->in1, o->in2);
4054     return DISAS_NEXT;
4055 }
4056 
4057 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4058 {
4059     gen_helper_sfpc(tcg_env, o->in2);
4060     return DISAS_NEXT;
4061 }
4062 
4063 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4064 {
4065     gen_helper_sfas(tcg_env, o->in2);
4066     return DISAS_NEXT;
4067 }
4068 
4069 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4070 {
4071     /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4072     tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4073     gen_helper_srnm(tcg_env, o->addr1);
4074     return DISAS_NEXT;
4075 }
4076 
4077 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4078 {
4079     /* Bits 0-55 are are ignored. */
4080     tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4081     gen_helper_srnm(tcg_env, o->addr1);
4082     return DISAS_NEXT;
4083 }
4084 
4085 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4086 {
4087     TCGv_i64 tmp = tcg_temp_new_i64();
4088 
4089     /* Bits other than 61-63 are ignored. */
4090     tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4091 
4092     /* No need to call a helper, we don't implement dfp */
4093     tcg_gen_ld32u_i64(tmp, tcg_env, offsetof(CPUS390XState, fpc));
4094     tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4095     tcg_gen_st32_i64(tmp, tcg_env, offsetof(CPUS390XState, fpc));
4096     return DISAS_NEXT;
4097 }
4098 
4099 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4100 {
4101     tcg_gen_extrl_i64_i32(cc_op, o->in1);
4102     tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4103     set_cc_static(s);
4104 
4105     tcg_gen_shri_i64(o->in1, o->in1, 24);
4106     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4107     return DISAS_NEXT;
4108 }
4109 
4110 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4111 {
4112     int b1 = get_field(s, b1);
4113     int d1 = get_field(s, d1);
4114     int b2 = get_field(s, b2);
4115     int d2 = get_field(s, d2);
4116     int r3 = get_field(s, r3);
4117     TCGv_i64 tmp = tcg_temp_new_i64();
4118 
4119     /* fetch all operands first */
4120     o->in1 = tcg_temp_new_i64();
4121     tcg_gen_addi_i64(o->in1, regs[b1], d1);
4122     o->in2 = tcg_temp_new_i64();
4123     tcg_gen_addi_i64(o->in2, regs[b2], d2);
4124     o->addr1 = tcg_temp_new_i64();
4125     gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0);
4126 
4127     /* load the third operand into r3 before modifying anything */
4128     tcg_gen_qemu_ld_i64(regs[r3], o->addr1, get_mem_index(s), MO_TEUQ);
4129 
4130     /* subtract CPU timer from first operand and store in GR0 */
4131     gen_helper_stpt(tmp, tcg_env);
4132     tcg_gen_sub_i64(regs[0], o->in1, tmp);
4133 
4134     /* store second operand in GR1 */
4135     tcg_gen_mov_i64(regs[1], o->in2);
4136     return DISAS_NEXT;
4137 }
4138 
4139 #ifndef CONFIG_USER_ONLY
4140 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4141 {
4142     tcg_gen_shri_i64(o->in2, o->in2, 4);
4143     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4144     return DISAS_NEXT;
4145 }
4146 
4147 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4148 {
4149     gen_helper_sske(tcg_env, o->in1, o->in2);
4150     return DISAS_NEXT;
4151 }
4152 
4153 static void gen_check_psw_mask(DisasContext *s)
4154 {
4155     TCGv_i64 reserved = tcg_temp_new_i64();
4156     TCGLabel *ok = gen_new_label();
4157 
4158     tcg_gen_andi_i64(reserved, psw_mask, PSW_MASK_RESERVED);
4159     tcg_gen_brcondi_i64(TCG_COND_EQ, reserved, 0, ok);
4160     gen_program_exception(s, PGM_SPECIFICATION);
4161     gen_set_label(ok);
4162 }
4163 
4164 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4165 {
4166     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4167 
4168     gen_check_psw_mask(s);
4169 
4170     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4171     s->exit_to_mainloop = true;
4172     return DISAS_TOO_MANY;
4173 }
4174 
4175 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4176 {
4177     tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, core_id));
4178     return DISAS_NEXT;
4179 }
4180 #endif
4181 
4182 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4183 {
4184     gen_helper_stck(o->out, tcg_env);
4185     /* ??? We don't implement clock states.  */
4186     gen_op_movi_cc(s, 0);
4187     return DISAS_NEXT;
4188 }
4189 
4190 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4191 {
4192     TCGv_i64 c1 = tcg_temp_new_i64();
4193     TCGv_i64 c2 = tcg_temp_new_i64();
4194     TCGv_i64 todpr = tcg_temp_new_i64();
4195     gen_helper_stck(c1, tcg_env);
4196     /* 16 bit value store in an uint32_t (only valid bits set) */
4197     tcg_gen_ld32u_i64(todpr, tcg_env, offsetof(CPUS390XState, todpr));
4198     /* Shift the 64-bit value into its place as a zero-extended
4199        104-bit value.  Note that "bit positions 64-103 are always
4200        non-zero so that they compare differently to STCK"; we set
4201        the least significant bit to 1.  */
4202     tcg_gen_shli_i64(c2, c1, 56);
4203     tcg_gen_shri_i64(c1, c1, 8);
4204     tcg_gen_ori_i64(c2, c2, 0x10000);
4205     tcg_gen_or_i64(c2, c2, todpr);
4206     tcg_gen_qemu_st_i64(c1, o->in2, get_mem_index(s), MO_TEUQ);
4207     tcg_gen_addi_i64(o->in2, o->in2, 8);
4208     tcg_gen_qemu_st_i64(c2, o->in2, get_mem_index(s), MO_TEUQ);
4209     /* ??? We don't implement clock states.  */
4210     gen_op_movi_cc(s, 0);
4211     return DISAS_NEXT;
4212 }
4213 
4214 #ifndef CONFIG_USER_ONLY
4215 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4216 {
4217     gen_helper_sck(cc_op, tcg_env, o->in2);
4218     set_cc_static(s);
4219     return DISAS_NEXT;
4220 }
4221 
4222 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4223 {
4224     gen_helper_sckc(tcg_env, o->in2);
4225     return DISAS_NEXT;
4226 }
4227 
4228 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4229 {
4230     gen_helper_sckpf(tcg_env, regs[0]);
4231     return DISAS_NEXT;
4232 }
4233 
4234 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4235 {
4236     gen_helper_stckc(o->out, tcg_env);
4237     return DISAS_NEXT;
4238 }
4239 
4240 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4241 {
4242     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4243     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4244 
4245     gen_helper_stctg(tcg_env, r1, o->in2, r3);
4246     return DISAS_NEXT;
4247 }
4248 
4249 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4250 {
4251     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4252     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4253 
4254     gen_helper_stctl(tcg_env, r1, o->in2, r3);
4255     return DISAS_NEXT;
4256 }
4257 
4258 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4259 {
4260     tcg_gen_ld_i64(o->out, tcg_env, offsetof(CPUS390XState, cpuid));
4261     return DISAS_NEXT;
4262 }
4263 
4264 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4265 {
4266     gen_helper_spt(tcg_env, o->in2);
4267     return DISAS_NEXT;
4268 }
4269 
4270 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4271 {
4272     gen_helper_stfl(tcg_env);
4273     return DISAS_NEXT;
4274 }
4275 
4276 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4277 {
4278     gen_helper_stpt(o->out, tcg_env);
4279     return DISAS_NEXT;
4280 }
4281 
4282 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4283 {
4284     gen_helper_stsi(cc_op, tcg_env, o->in2, regs[0], regs[1]);
4285     set_cc_static(s);
4286     return DISAS_NEXT;
4287 }
4288 
4289 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4290 {
4291     gen_helper_spx(tcg_env, o->in2);
4292     return DISAS_NEXT;
4293 }
4294 
4295 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4296 {
4297     gen_helper_xsch(tcg_env, regs[1]);
4298     set_cc_static(s);
4299     return DISAS_NEXT;
4300 }
4301 
4302 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4303 {
4304     gen_helper_csch(tcg_env, regs[1]);
4305     set_cc_static(s);
4306     return DISAS_NEXT;
4307 }
4308 
4309 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4310 {
4311     gen_helper_hsch(tcg_env, regs[1]);
4312     set_cc_static(s);
4313     return DISAS_NEXT;
4314 }
4315 
4316 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4317 {
4318     gen_helper_msch(tcg_env, regs[1], o->in2);
4319     set_cc_static(s);
4320     return DISAS_NEXT;
4321 }
4322 
4323 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4324 {
4325     gen_helper_rchp(tcg_env, regs[1]);
4326     set_cc_static(s);
4327     return DISAS_NEXT;
4328 }
4329 
4330 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4331 {
4332     gen_helper_rsch(tcg_env, regs[1]);
4333     set_cc_static(s);
4334     return DISAS_NEXT;
4335 }
4336 
4337 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4338 {
4339     gen_helper_sal(tcg_env, regs[1]);
4340     return DISAS_NEXT;
4341 }
4342 
4343 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4344 {
4345     gen_helper_schm(tcg_env, regs[1], regs[2], o->in2);
4346     return DISAS_NEXT;
4347 }
4348 
4349 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4350 {
4351     /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4352     gen_op_movi_cc(s, 3);
4353     return DISAS_NEXT;
4354 }
4355 
4356 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4357 {
4358     /* The instruction is suppressed if not provided. */
4359     return DISAS_NEXT;
4360 }
4361 
4362 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4363 {
4364     gen_helper_ssch(tcg_env, regs[1], o->in2);
4365     set_cc_static(s);
4366     return DISAS_NEXT;
4367 }
4368 
4369 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4370 {
4371     gen_helper_stsch(tcg_env, regs[1], o->in2);
4372     set_cc_static(s);
4373     return DISAS_NEXT;
4374 }
4375 
4376 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4377 {
4378     gen_helper_stcrw(tcg_env, o->in2);
4379     set_cc_static(s);
4380     return DISAS_NEXT;
4381 }
4382 
4383 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4384 {
4385     gen_helper_tpi(cc_op, tcg_env, o->addr1);
4386     set_cc_static(s);
4387     return DISAS_NEXT;
4388 }
4389 
4390 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4391 {
4392     gen_helper_tsch(tcg_env, regs[1], o->in2);
4393     set_cc_static(s);
4394     return DISAS_NEXT;
4395 }
4396 
4397 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4398 {
4399     gen_helper_chsc(tcg_env, o->in2);
4400     set_cc_static(s);
4401     return DISAS_NEXT;
4402 }
4403 
4404 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4405 {
4406     tcg_gen_ld_i64(o->out, tcg_env, offsetof(CPUS390XState, psa));
4407     tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4408     return DISAS_NEXT;
4409 }
4410 
4411 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4412 {
4413     uint64_t i2 = get_field(s, i2);
4414     TCGv_i64 t;
4415 
4416     /* It is important to do what the instruction name says: STORE THEN.
4417        If we let the output hook perform the store then if we fault and
4418        restart, we'll have the wrong SYSTEM MASK in place.  */
4419     t = tcg_temp_new_i64();
4420     tcg_gen_shri_i64(t, psw_mask, 56);
4421     tcg_gen_qemu_st_i64(t, o->addr1, get_mem_index(s), MO_UB);
4422 
4423     if (s->fields.op == 0xac) {
4424         tcg_gen_andi_i64(psw_mask, psw_mask,
4425                          (i2 << 56) | 0x00ffffffffffffffull);
4426     } else {
4427         tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4428     }
4429 
4430     gen_check_psw_mask(s);
4431 
4432     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4433     s->exit_to_mainloop = true;
4434     return DISAS_TOO_MANY;
4435 }
4436 
4437 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4438 {
4439     tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data);
4440 
4441     if (s->base.tb->flags & FLAG_MASK_PER) {
4442         update_psw_addr(s);
4443         gen_helper_per_store_real(tcg_env);
4444     }
4445     return DISAS_NEXT;
4446 }
4447 #endif
4448 
4449 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4450 {
4451     gen_helper_stfle(cc_op, tcg_env, o->in2);
4452     set_cc_static(s);
4453     return DISAS_NEXT;
4454 }
4455 
4456 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4457 {
4458     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_UB);
4459     return DISAS_NEXT;
4460 }
4461 
4462 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4463 {
4464     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_TEUW);
4465     return DISAS_NEXT;
4466 }
4467 
4468 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4469 {
4470     tcg_gen_qemu_st_tl(o->in1, o->in2, get_mem_index(s),
4471                        MO_TEUL | s->insn->data);
4472     return DISAS_NEXT;
4473 }
4474 
4475 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4476 {
4477     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s),
4478                         MO_TEUQ | s->insn->data);
4479     return DISAS_NEXT;
4480 }
4481 
4482 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4483 {
4484     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4485     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4486 
4487     gen_helper_stam(tcg_env, r1, o->in2, r3);
4488     return DISAS_NEXT;
4489 }
4490 
4491 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4492 {
4493     int m3 = get_field(s, m3);
4494     int pos, base = s->insn->data;
4495     TCGv_i64 tmp = tcg_temp_new_i64();
4496 
4497     pos = base + ctz32(m3) * 8;
4498     switch (m3) {
4499     case 0xf:
4500         /* Effectively a 32-bit store.  */
4501         tcg_gen_shri_i64(tmp, o->in1, pos);
4502         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
4503         break;
4504 
4505     case 0xc:
4506     case 0x6:
4507     case 0x3:
4508         /* Effectively a 16-bit store.  */
4509         tcg_gen_shri_i64(tmp, o->in1, pos);
4510         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
4511         break;
4512 
4513     case 0x8:
4514     case 0x4:
4515     case 0x2:
4516     case 0x1:
4517         /* Effectively an 8-bit store.  */
4518         tcg_gen_shri_i64(tmp, o->in1, pos);
4519         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4520         break;
4521 
4522     default:
4523         /* This is going to be a sequence of shifts and stores.  */
4524         pos = base + 32 - 8;
4525         while (m3) {
4526             if (m3 & 0x8) {
4527                 tcg_gen_shri_i64(tmp, o->in1, pos);
4528                 tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4529                 tcg_gen_addi_i64(o->in2, o->in2, 1);
4530             }
4531             m3 = (m3 << 1) & 0xf;
4532             pos -= 8;
4533         }
4534         break;
4535     }
4536     return DISAS_NEXT;
4537 }
4538 
4539 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4540 {
4541     int r1 = get_field(s, r1);
4542     int r3 = get_field(s, r3);
4543     int size = s->insn->data;
4544     TCGv_i64 tsize = tcg_constant_i64(size);
4545 
4546     while (1) {
4547         tcg_gen_qemu_st_i64(regs[r1], o->in2, get_mem_index(s),
4548                             size == 8 ? MO_TEUQ : MO_TEUL);
4549         if (r1 == r3) {
4550             break;
4551         }
4552         tcg_gen_add_i64(o->in2, o->in2, tsize);
4553         r1 = (r1 + 1) & 15;
4554     }
4555 
4556     return DISAS_NEXT;
4557 }
4558 
4559 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4560 {
4561     int r1 = get_field(s, r1);
4562     int r3 = get_field(s, r3);
4563     TCGv_i64 t = tcg_temp_new_i64();
4564     TCGv_i64 t4 = tcg_constant_i64(4);
4565     TCGv_i64 t32 = tcg_constant_i64(32);
4566 
4567     while (1) {
4568         tcg_gen_shl_i64(t, regs[r1], t32);
4569         tcg_gen_qemu_st_i64(t, o->in2, get_mem_index(s), MO_TEUL);
4570         if (r1 == r3) {
4571             break;
4572         }
4573         tcg_gen_add_i64(o->in2, o->in2, t4);
4574         r1 = (r1 + 1) & 15;
4575     }
4576     return DISAS_NEXT;
4577 }
4578 
4579 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4580 {
4581     TCGv_i128 t16 = tcg_temp_new_i128();
4582 
4583     tcg_gen_concat_i64_i128(t16, o->out2, o->out);
4584     tcg_gen_qemu_st_i128(t16, o->in2, get_mem_index(s),
4585                          MO_TE | MO_128 | MO_ALIGN);
4586     return DISAS_NEXT;
4587 }
4588 
4589 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4590 {
4591     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4592     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4593 
4594     gen_helper_srst(tcg_env, r1, r2);
4595     set_cc_static(s);
4596     return DISAS_NEXT;
4597 }
4598 
4599 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4600 {
4601     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4602     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4603 
4604     gen_helper_srstu(tcg_env, r1, r2);
4605     set_cc_static(s);
4606     return DISAS_NEXT;
4607 }
4608 
4609 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4610 {
4611     tcg_gen_sub_i64(o->out, o->in1, o->in2);
4612     return DISAS_NEXT;
4613 }
4614 
4615 static DisasJumpType op_subu64(DisasContext *s, DisasOps *o)
4616 {
4617     tcg_gen_movi_i64(cc_src, 0);
4618     tcg_gen_sub2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
4619     return DISAS_NEXT;
4620 }
4621 
4622 /* Compute borrow (0, -1) into cc_src. */
4623 static void compute_borrow(DisasContext *s)
4624 {
4625     switch (s->cc_op) {
4626     case CC_OP_SUBU:
4627         /* The borrow value is already in cc_src (0,-1). */
4628         break;
4629     default:
4630         gen_op_calc_cc(s);
4631         /* fall through */
4632     case CC_OP_STATIC:
4633         /* The carry flag is the msb of CC; compute into cc_src. */
4634         tcg_gen_extu_i32_i64(cc_src, cc_op);
4635         tcg_gen_shri_i64(cc_src, cc_src, 1);
4636         /* fall through */
4637     case CC_OP_ADDU:
4638         /* Convert carry (1,0) to borrow (0,-1). */
4639         tcg_gen_subi_i64(cc_src, cc_src, 1);
4640         break;
4641     }
4642 }
4643 
4644 static DisasJumpType op_subb32(DisasContext *s, DisasOps *o)
4645 {
4646     compute_borrow(s);
4647 
4648     /* Borrow is {0, -1}, so add to subtract. */
4649     tcg_gen_add_i64(o->out, o->in1, cc_src);
4650     tcg_gen_sub_i64(o->out, o->out, o->in2);
4651     return DISAS_NEXT;
4652 }
4653 
4654 static DisasJumpType op_subb64(DisasContext *s, DisasOps *o)
4655 {
4656     compute_borrow(s);
4657 
4658     /*
4659      * Borrow is {0, -1}, so add to subtract; replicate the
4660      * borrow input to produce 128-bit -1 for the addition.
4661      */
4662     TCGv_i64 zero = tcg_constant_i64(0);
4663     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src);
4664     tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
4665 
4666     return DISAS_NEXT;
4667 }
4668 
4669 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4670 {
4671     TCGv_i32 t;
4672 
4673     update_psw_addr(s);
4674     update_cc_op(s);
4675 
4676     t = tcg_constant_i32(get_field(s, i1) & 0xff);
4677     tcg_gen_st_i32(t, tcg_env, offsetof(CPUS390XState, int_svc_code));
4678 
4679     t = tcg_constant_i32(s->ilen);
4680     tcg_gen_st_i32(t, tcg_env, offsetof(CPUS390XState, int_svc_ilen));
4681 
4682     gen_exception(EXCP_SVC);
4683     return DISAS_NORETURN;
4684 }
4685 
4686 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4687 {
4688     int cc = 0;
4689 
4690     cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4691     cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4692     gen_op_movi_cc(s, cc);
4693     return DISAS_NEXT;
4694 }
4695 
4696 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4697 {
4698     gen_helper_tceb(cc_op, tcg_env, o->in1, o->in2);
4699     set_cc_static(s);
4700     return DISAS_NEXT;
4701 }
4702 
4703 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4704 {
4705     gen_helper_tcdb(cc_op, tcg_env, o->in1, o->in2);
4706     set_cc_static(s);
4707     return DISAS_NEXT;
4708 }
4709 
4710 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4711 {
4712     gen_helper_tcxb(cc_op, tcg_env, o->in1_128, o->in2);
4713     set_cc_static(s);
4714     return DISAS_NEXT;
4715 }
4716 
4717 #ifndef CONFIG_USER_ONLY
4718 
4719 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4720 {
4721     gen_helper_testblock(cc_op, tcg_env, o->in2);
4722     set_cc_static(s);
4723     return DISAS_NEXT;
4724 }
4725 
4726 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4727 {
4728     gen_helper_tprot(cc_op, tcg_env, o->addr1, o->in2);
4729     set_cc_static(s);
4730     return DISAS_NEXT;
4731 }
4732 
4733 #endif
4734 
4735 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4736 {
4737     TCGv_i32 l1 = tcg_constant_i32(get_field(s, l1) + 1);
4738 
4739     gen_helper_tp(cc_op, tcg_env, o->addr1, l1);
4740     set_cc_static(s);
4741     return DISAS_NEXT;
4742 }
4743 
4744 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4745 {
4746     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4747 
4748     gen_helper_tr(tcg_env, l, o->addr1, o->in2);
4749     set_cc_static(s);
4750     return DISAS_NEXT;
4751 }
4752 
4753 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4754 {
4755     TCGv_i128 pair = tcg_temp_new_i128();
4756 
4757     gen_helper_tre(pair, tcg_env, o->out, o->out2, o->in2);
4758     tcg_gen_extr_i128_i64(o->out2, o->out, pair);
4759     set_cc_static(s);
4760     return DISAS_NEXT;
4761 }
4762 
4763 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4764 {
4765     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4766 
4767     gen_helper_trt(cc_op, tcg_env, l, o->addr1, o->in2);
4768     set_cc_static(s);
4769     return DISAS_NEXT;
4770 }
4771 
4772 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4773 {
4774     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4775 
4776     gen_helper_trtr(cc_op, tcg_env, l, o->addr1, o->in2);
4777     set_cc_static(s);
4778     return DISAS_NEXT;
4779 }
4780 
4781 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4782 {
4783     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4784     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4785     TCGv_i32 sizes = tcg_constant_i32(s->insn->opc & 3);
4786     TCGv_i32 tst = tcg_temp_new_i32();
4787     int m3 = get_field(s, m3);
4788 
4789     if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4790         m3 = 0;
4791     }
4792     if (m3 & 1) {
4793         tcg_gen_movi_i32(tst, -1);
4794     } else {
4795         tcg_gen_extrl_i64_i32(tst, regs[0]);
4796         if (s->insn->opc & 3) {
4797             tcg_gen_ext8u_i32(tst, tst);
4798         } else {
4799             tcg_gen_ext16u_i32(tst, tst);
4800         }
4801     }
4802     gen_helper_trXX(cc_op, tcg_env, r1, r2, tst, sizes);
4803 
4804     set_cc_static(s);
4805     return DISAS_NEXT;
4806 }
4807 
4808 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4809 {
4810     TCGv_i32 t1 = tcg_constant_i32(0xff);
4811 
4812     tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4813     tcg_gen_extract_i32(cc_op, t1, 7, 1);
4814     set_cc_static(s);
4815     return DISAS_NEXT;
4816 }
4817 
4818 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4819 {
4820     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4821 
4822     gen_helper_unpk(tcg_env, l, o->addr1, o->in2);
4823     return DISAS_NEXT;
4824 }
4825 
4826 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4827 {
4828     int l1 = get_field(s, l1) + 1;
4829     TCGv_i32 l;
4830 
4831     /* The length must not exceed 32 bytes.  */
4832     if (l1 > 32) {
4833         gen_program_exception(s, PGM_SPECIFICATION);
4834         return DISAS_NORETURN;
4835     }
4836     l = tcg_constant_i32(l1);
4837     gen_helper_unpka(cc_op, tcg_env, o->addr1, l, o->in2);
4838     set_cc_static(s);
4839     return DISAS_NEXT;
4840 }
4841 
4842 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4843 {
4844     int l1 = get_field(s, l1) + 1;
4845     TCGv_i32 l;
4846 
4847     /* The length must be even and should not exceed 64 bytes.  */
4848     if ((l1 & 1) || (l1 > 64)) {
4849         gen_program_exception(s, PGM_SPECIFICATION);
4850         return DISAS_NORETURN;
4851     }
4852     l = tcg_constant_i32(l1);
4853     gen_helper_unpku(cc_op, tcg_env, o->addr1, l, o->in2);
4854     set_cc_static(s);
4855     return DISAS_NEXT;
4856 }
4857 
4858 
4859 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4860 {
4861     int d1 = get_field(s, d1);
4862     int d2 = get_field(s, d2);
4863     int b1 = get_field(s, b1);
4864     int b2 = get_field(s, b2);
4865     int l = get_field(s, l1);
4866     TCGv_i32 t32;
4867 
4868     o->addr1 = get_address(s, 0, b1, d1);
4869 
4870     /* If the addresses are identical, this is a store/memset of zero.  */
4871     if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4872         o->in2 = tcg_constant_i64(0);
4873 
4874         l++;
4875         while (l >= 8) {
4876             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UQ);
4877             l -= 8;
4878             if (l > 0) {
4879                 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4880             }
4881         }
4882         if (l >= 4) {
4883             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UL);
4884             l -= 4;
4885             if (l > 0) {
4886                 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4887             }
4888         }
4889         if (l >= 2) {
4890             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UW);
4891             l -= 2;
4892             if (l > 0) {
4893                 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4894             }
4895         }
4896         if (l) {
4897             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UB);
4898         }
4899         gen_op_movi_cc(s, 0);
4900         return DISAS_NEXT;
4901     }
4902 
4903     /* But in general we'll defer to a helper.  */
4904     o->in2 = get_address(s, 0, b2, d2);
4905     t32 = tcg_constant_i32(l);
4906     gen_helper_xc(cc_op, tcg_env, t32, o->addr1, o->in2);
4907     set_cc_static(s);
4908     return DISAS_NEXT;
4909 }
4910 
4911 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
4912 {
4913     tcg_gen_xor_i64(o->out, o->in1, o->in2);
4914     return DISAS_NEXT;
4915 }
4916 
4917 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
4918 {
4919     int shift = s->insn->data & 0xff;
4920     int size = s->insn->data >> 8;
4921     uint64_t mask = ((1ull << size) - 1) << shift;
4922     TCGv_i64 t = tcg_temp_new_i64();
4923 
4924     tcg_gen_shli_i64(t, o->in2, shift);
4925     tcg_gen_xor_i64(o->out, o->in1, t);
4926 
4927     /* Produce the CC from only the bits manipulated.  */
4928     tcg_gen_andi_i64(cc_dst, o->out, mask);
4929     set_cc_nz_u64(s, cc_dst);
4930     return DISAS_NEXT;
4931 }
4932 
4933 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
4934 {
4935     o->in1 = tcg_temp_new_i64();
4936 
4937     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4938         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
4939     } else {
4940         /* Perform the atomic operation in memory. */
4941         tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
4942                                      s->insn->data);
4943     }
4944 
4945     /* Recompute also for atomic case: needed for setting CC. */
4946     tcg_gen_xor_i64(o->out, o->in1, o->in2);
4947 
4948     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4949         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
4950     }
4951     return DISAS_NEXT;
4952 }
4953 
4954 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
4955 {
4956     o->out = tcg_constant_i64(0);
4957     return DISAS_NEXT;
4958 }
4959 
4960 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
4961 {
4962     o->out = tcg_constant_i64(0);
4963     o->out2 = o->out;
4964     return DISAS_NEXT;
4965 }
4966 
4967 #ifndef CONFIG_USER_ONLY
4968 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
4969 {
4970     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4971 
4972     gen_helper_clp(tcg_env, r2);
4973     set_cc_static(s);
4974     return DISAS_NEXT;
4975 }
4976 
4977 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
4978 {
4979     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4980     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4981 
4982     gen_helper_pcilg(tcg_env, r1, r2);
4983     set_cc_static(s);
4984     return DISAS_NEXT;
4985 }
4986 
4987 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
4988 {
4989     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4990     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4991 
4992     gen_helper_pcistg(tcg_env, r1, r2);
4993     set_cc_static(s);
4994     return DISAS_NEXT;
4995 }
4996 
4997 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
4998 {
4999     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
5000     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
5001 
5002     gen_helper_stpcifc(tcg_env, r1, o->addr1, ar);
5003     set_cc_static(s);
5004     return DISAS_NEXT;
5005 }
5006 
5007 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
5008 {
5009     gen_helper_sic(tcg_env, o->in1, o->in2);
5010     return DISAS_NEXT;
5011 }
5012 
5013 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
5014 {
5015     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
5016     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
5017 
5018     gen_helper_rpcit(tcg_env, r1, r2);
5019     set_cc_static(s);
5020     return DISAS_NEXT;
5021 }
5022 
5023 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
5024 {
5025     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
5026     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
5027     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
5028 
5029     gen_helper_pcistb(tcg_env, r1, r3, o->addr1, ar);
5030     set_cc_static(s);
5031     return DISAS_NEXT;
5032 }
5033 
5034 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
5035 {
5036     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
5037     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
5038 
5039     gen_helper_mpcifc(tcg_env, r1, o->addr1, ar);
5040     set_cc_static(s);
5041     return DISAS_NEXT;
5042 }
5043 #endif
5044 
5045 #include "translate_vx.c.inc"
5046 
5047 /* ====================================================================== */
5048 /* The "Cc OUTput" generators.  Given the generated output (and in some cases
5049    the original inputs), update the various cc data structures in order to
5050    be able to compute the new condition code.  */
5051 
5052 static void cout_abs32(DisasContext *s, DisasOps *o)
5053 {
5054     gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
5055 }
5056 
5057 static void cout_abs64(DisasContext *s, DisasOps *o)
5058 {
5059     gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5060 }
5061 
5062 static void cout_adds32(DisasContext *s, DisasOps *o)
5063 {
5064     gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5065 }
5066 
5067 static void cout_adds64(DisasContext *s, DisasOps *o)
5068 {
5069     gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5070 }
5071 
5072 static void cout_addu32(DisasContext *s, DisasOps *o)
5073 {
5074     tcg_gen_shri_i64(cc_src, o->out, 32);
5075     tcg_gen_ext32u_i64(cc_dst, o->out);
5076     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst);
5077 }
5078 
5079 static void cout_addu64(DisasContext *s, DisasOps *o)
5080 {
5081     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out);
5082 }
5083 
5084 static void cout_cmps32(DisasContext *s, DisasOps *o)
5085 {
5086     gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5087 }
5088 
5089 static void cout_cmps64(DisasContext *s, DisasOps *o)
5090 {
5091     gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5092 }
5093 
5094 static void cout_cmpu32(DisasContext *s, DisasOps *o)
5095 {
5096     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5097 }
5098 
5099 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5100 {
5101     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5102 }
5103 
5104 static void cout_f32(DisasContext *s, DisasOps *o)
5105 {
5106     gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5107 }
5108 
5109 static void cout_f64(DisasContext *s, DisasOps *o)
5110 {
5111     gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5112 }
5113 
5114 static void cout_f128(DisasContext *s, DisasOps *o)
5115 {
5116     gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5117 }
5118 
5119 static void cout_nabs32(DisasContext *s, DisasOps *o)
5120 {
5121     gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5122 }
5123 
5124 static void cout_nabs64(DisasContext *s, DisasOps *o)
5125 {
5126     gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5127 }
5128 
5129 static void cout_neg32(DisasContext *s, DisasOps *o)
5130 {
5131     gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5132 }
5133 
5134 static void cout_neg64(DisasContext *s, DisasOps *o)
5135 {
5136     gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5137 }
5138 
5139 static void cout_nz32(DisasContext *s, DisasOps *o)
5140 {
5141     tcg_gen_ext32u_i64(cc_dst, o->out);
5142     gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5143 }
5144 
5145 static void cout_nz64(DisasContext *s, DisasOps *o)
5146 {
5147     gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5148 }
5149 
5150 static void cout_s32(DisasContext *s, DisasOps *o)
5151 {
5152     gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5153 }
5154 
5155 static void cout_s64(DisasContext *s, DisasOps *o)
5156 {
5157     gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5158 }
5159 
5160 static void cout_subs32(DisasContext *s, DisasOps *o)
5161 {
5162     gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5163 }
5164 
5165 static void cout_subs64(DisasContext *s, DisasOps *o)
5166 {
5167     gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5168 }
5169 
5170 static void cout_subu32(DisasContext *s, DisasOps *o)
5171 {
5172     tcg_gen_sari_i64(cc_src, o->out, 32);
5173     tcg_gen_ext32u_i64(cc_dst, o->out);
5174     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, cc_dst);
5175 }
5176 
5177 static void cout_subu64(DisasContext *s, DisasOps *o)
5178 {
5179     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, o->out);
5180 }
5181 
5182 static void cout_tm32(DisasContext *s, DisasOps *o)
5183 {
5184     gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5185 }
5186 
5187 static void cout_tm64(DisasContext *s, DisasOps *o)
5188 {
5189     gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5190 }
5191 
5192 static void cout_muls32(DisasContext *s, DisasOps *o)
5193 {
5194     gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out);
5195 }
5196 
5197 static void cout_muls64(DisasContext *s, DisasOps *o)
5198 {
5199     /* out contains "high" part, out2 contains "low" part of 128 bit result */
5200     gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2);
5201 }
5202 
5203 /* ====================================================================== */
5204 /* The "PREParation" generators.  These initialize the DisasOps.OUT fields
5205    with the TCG register to which we will write.  Used in combination with
5206    the "wout" generators, in some cases we need a new temporary, and in
5207    some cases we can write to a TCG global.  */
5208 
5209 static void prep_new(DisasContext *s, DisasOps *o)
5210 {
5211     o->out = tcg_temp_new_i64();
5212 }
5213 #define SPEC_prep_new 0
5214 
5215 static void prep_new_P(DisasContext *s, DisasOps *o)
5216 {
5217     o->out = tcg_temp_new_i64();
5218     o->out2 = tcg_temp_new_i64();
5219 }
5220 #define SPEC_prep_new_P 0
5221 
5222 static void prep_new_x(DisasContext *s, DisasOps *o)
5223 {
5224     o->out_128 = tcg_temp_new_i128();
5225 }
5226 #define SPEC_prep_new_x 0
5227 
5228 static void prep_r1(DisasContext *s, DisasOps *o)
5229 {
5230     o->out = regs[get_field(s, r1)];
5231 }
5232 #define SPEC_prep_r1 0
5233 
5234 static void prep_r1_P(DisasContext *s, DisasOps *o)
5235 {
5236     int r1 = get_field(s, r1);
5237     o->out = regs[r1];
5238     o->out2 = regs[r1 + 1];
5239 }
5240 #define SPEC_prep_r1_P SPEC_r1_even
5241 
5242 /* ====================================================================== */
5243 /* The "Write OUTput" generators.  These generally perform some non-trivial
5244    copy of data to TCG globals, or to main memory.  The trivial cases are
5245    generally handled by having a "prep" generator install the TCG global
5246    as the destination of the operation.  */
5247 
5248 static void wout_r1(DisasContext *s, DisasOps *o)
5249 {
5250     store_reg(get_field(s, r1), o->out);
5251 }
5252 #define SPEC_wout_r1 0
5253 
5254 static void wout_out2_r1(DisasContext *s, DisasOps *o)
5255 {
5256     store_reg(get_field(s, r1), o->out2);
5257 }
5258 #define SPEC_wout_out2_r1 0
5259 
5260 static void wout_r1_8(DisasContext *s, DisasOps *o)
5261 {
5262     int r1 = get_field(s, r1);
5263     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5264 }
5265 #define SPEC_wout_r1_8 0
5266 
5267 static void wout_r1_16(DisasContext *s, DisasOps *o)
5268 {
5269     int r1 = get_field(s, r1);
5270     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5271 }
5272 #define SPEC_wout_r1_16 0
5273 
5274 static void wout_r1_32(DisasContext *s, DisasOps *o)
5275 {
5276     store_reg32_i64(get_field(s, r1), o->out);
5277 }
5278 #define SPEC_wout_r1_32 0
5279 
5280 static void wout_r1_32h(DisasContext *s, DisasOps *o)
5281 {
5282     store_reg32h_i64(get_field(s, r1), o->out);
5283 }
5284 #define SPEC_wout_r1_32h 0
5285 
5286 static void wout_r1_P32(DisasContext *s, DisasOps *o)
5287 {
5288     int r1 = get_field(s, r1);
5289     store_reg32_i64(r1, o->out);
5290     store_reg32_i64(r1 + 1, o->out2);
5291 }
5292 #define SPEC_wout_r1_P32 SPEC_r1_even
5293 
5294 static void wout_r1_D32(DisasContext *s, DisasOps *o)
5295 {
5296     int r1 = get_field(s, r1);
5297     TCGv_i64 t = tcg_temp_new_i64();
5298     store_reg32_i64(r1 + 1, o->out);
5299     tcg_gen_shri_i64(t, o->out, 32);
5300     store_reg32_i64(r1, t);
5301 }
5302 #define SPEC_wout_r1_D32 SPEC_r1_even
5303 
5304 static void wout_r1_D64(DisasContext *s, DisasOps *o)
5305 {
5306     int r1 = get_field(s, r1);
5307     tcg_gen_extr_i128_i64(regs[r1 + 1], regs[r1], o->out_128);
5308 }
5309 #define SPEC_wout_r1_D64 SPEC_r1_even
5310 
5311 static void wout_r3_P32(DisasContext *s, DisasOps *o)
5312 {
5313     int r3 = get_field(s, r3);
5314     store_reg32_i64(r3, o->out);
5315     store_reg32_i64(r3 + 1, o->out2);
5316 }
5317 #define SPEC_wout_r3_P32 SPEC_r3_even
5318 
5319 static void wout_r3_P64(DisasContext *s, DisasOps *o)
5320 {
5321     int r3 = get_field(s, r3);
5322     store_reg(r3, o->out);
5323     store_reg(r3 + 1, o->out2);
5324 }
5325 #define SPEC_wout_r3_P64 SPEC_r3_even
5326 
5327 static void wout_e1(DisasContext *s, DisasOps *o)
5328 {
5329     store_freg32_i64(get_field(s, r1), o->out);
5330 }
5331 #define SPEC_wout_e1 0
5332 
5333 static void wout_f1(DisasContext *s, DisasOps *o)
5334 {
5335     store_freg(get_field(s, r1), o->out);
5336 }
5337 #define SPEC_wout_f1 0
5338 
5339 static void wout_x1(DisasContext *s, DisasOps *o)
5340 {
5341     int f1 = get_field(s, r1);
5342 
5343     /* Split out_128 into out+out2 for cout_f128. */
5344     tcg_debug_assert(o->out == NULL);
5345     o->out = tcg_temp_new_i64();
5346     o->out2 = tcg_temp_new_i64();
5347 
5348     tcg_gen_extr_i128_i64(o->out2, o->out, o->out_128);
5349     store_freg(f1, o->out);
5350     store_freg(f1 + 2, o->out2);
5351 }
5352 #define SPEC_wout_x1 SPEC_r1_f128
5353 
5354 static void wout_x1_P(DisasContext *s, DisasOps *o)
5355 {
5356     int f1 = get_field(s, r1);
5357     store_freg(f1, o->out);
5358     store_freg(f1 + 2, o->out2);
5359 }
5360 #define SPEC_wout_x1_P SPEC_r1_f128
5361 
5362 static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
5363 {
5364     if (get_field(s, r1) != get_field(s, r2)) {
5365         store_reg32_i64(get_field(s, r1), o->out);
5366     }
5367 }
5368 #define SPEC_wout_cond_r1r2_32 0
5369 
5370 static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
5371 {
5372     if (get_field(s, r1) != get_field(s, r2)) {
5373         store_freg32_i64(get_field(s, r1), o->out);
5374     }
5375 }
5376 #define SPEC_wout_cond_e1e2 0
5377 
5378 static void wout_m1_8(DisasContext *s, DisasOps *o)
5379 {
5380     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_UB);
5381 }
5382 #define SPEC_wout_m1_8 0
5383 
5384 static void wout_m1_16(DisasContext *s, DisasOps *o)
5385 {
5386     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUW);
5387 }
5388 #define SPEC_wout_m1_16 0
5389 
5390 #ifndef CONFIG_USER_ONLY
5391 static void wout_m1_16a(DisasContext *s, DisasOps *o)
5392 {
5393     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5394 }
5395 #define SPEC_wout_m1_16a 0
5396 #endif
5397 
5398 static void wout_m1_32(DisasContext *s, DisasOps *o)
5399 {
5400     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUL);
5401 }
5402 #define SPEC_wout_m1_32 0
5403 
5404 #ifndef CONFIG_USER_ONLY
5405 static void wout_m1_32a(DisasContext *s, DisasOps *o)
5406 {
5407     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5408 }
5409 #define SPEC_wout_m1_32a 0
5410 #endif
5411 
5412 static void wout_m1_64(DisasContext *s, DisasOps *o)
5413 {
5414     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ);
5415 }
5416 #define SPEC_wout_m1_64 0
5417 
5418 #ifndef CONFIG_USER_ONLY
5419 static void wout_m1_64a(DisasContext *s, DisasOps *o)
5420 {
5421     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5422 }
5423 #define SPEC_wout_m1_64a 0
5424 #endif
5425 
5426 static void wout_m2_32(DisasContext *s, DisasOps *o)
5427 {
5428     tcg_gen_qemu_st_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
5429 }
5430 #define SPEC_wout_m2_32 0
5431 
5432 static void wout_in2_r1(DisasContext *s, DisasOps *o)
5433 {
5434     store_reg(get_field(s, r1), o->in2);
5435 }
5436 #define SPEC_wout_in2_r1 0
5437 
5438 static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
5439 {
5440     store_reg32_i64(get_field(s, r1), o->in2);
5441 }
5442 #define SPEC_wout_in2_r1_32 0
5443 
5444 /* ====================================================================== */
5445 /* The "INput 1" generators.  These load the first operand to an insn.  */
5446 
5447 static void in1_r1(DisasContext *s, DisasOps *o)
5448 {
5449     o->in1 = load_reg(get_field(s, r1));
5450 }
5451 #define SPEC_in1_r1 0
5452 
5453 static void in1_r1_o(DisasContext *s, DisasOps *o)
5454 {
5455     o->in1 = regs[get_field(s, r1)];
5456 }
5457 #define SPEC_in1_r1_o 0
5458 
5459 static void in1_r1_32s(DisasContext *s, DisasOps *o)
5460 {
5461     o->in1 = tcg_temp_new_i64();
5462     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
5463 }
5464 #define SPEC_in1_r1_32s 0
5465 
5466 static void in1_r1_32u(DisasContext *s, DisasOps *o)
5467 {
5468     o->in1 = tcg_temp_new_i64();
5469     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
5470 }
5471 #define SPEC_in1_r1_32u 0
5472 
5473 static void in1_r1_sr32(DisasContext *s, DisasOps *o)
5474 {
5475     o->in1 = tcg_temp_new_i64();
5476     tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
5477 }
5478 #define SPEC_in1_r1_sr32 0
5479 
5480 static void in1_r1p1(DisasContext *s, DisasOps *o)
5481 {
5482     o->in1 = load_reg(get_field(s, r1) + 1);
5483 }
5484 #define SPEC_in1_r1p1 SPEC_r1_even
5485 
5486 static void in1_r1p1_o(DisasContext *s, DisasOps *o)
5487 {
5488     o->in1 = regs[get_field(s, r1) + 1];
5489 }
5490 #define SPEC_in1_r1p1_o SPEC_r1_even
5491 
5492 static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
5493 {
5494     o->in1 = tcg_temp_new_i64();
5495     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
5496 }
5497 #define SPEC_in1_r1p1_32s SPEC_r1_even
5498 
5499 static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
5500 {
5501     o->in1 = tcg_temp_new_i64();
5502     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
5503 }
5504 #define SPEC_in1_r1p1_32u SPEC_r1_even
5505 
5506 static void in1_r1_D32(DisasContext *s, DisasOps *o)
5507 {
5508     int r1 = get_field(s, r1);
5509     o->in1 = tcg_temp_new_i64();
5510     tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5511 }
5512 #define SPEC_in1_r1_D32 SPEC_r1_even
5513 
5514 static void in1_r2(DisasContext *s, DisasOps *o)
5515 {
5516     o->in1 = load_reg(get_field(s, r2));
5517 }
5518 #define SPEC_in1_r2 0
5519 
5520 static void in1_r2_sr32(DisasContext *s, DisasOps *o)
5521 {
5522     o->in1 = tcg_temp_new_i64();
5523     tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
5524 }
5525 #define SPEC_in1_r2_sr32 0
5526 
5527 static void in1_r2_32u(DisasContext *s, DisasOps *o)
5528 {
5529     o->in1 = tcg_temp_new_i64();
5530     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]);
5531 }
5532 #define SPEC_in1_r2_32u 0
5533 
5534 static void in1_r3(DisasContext *s, DisasOps *o)
5535 {
5536     o->in1 = load_reg(get_field(s, r3));
5537 }
5538 #define SPEC_in1_r3 0
5539 
5540 static void in1_r3_o(DisasContext *s, DisasOps *o)
5541 {
5542     o->in1 = regs[get_field(s, r3)];
5543 }
5544 #define SPEC_in1_r3_o 0
5545 
5546 static void in1_r3_32s(DisasContext *s, DisasOps *o)
5547 {
5548     o->in1 = tcg_temp_new_i64();
5549     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
5550 }
5551 #define SPEC_in1_r3_32s 0
5552 
5553 static void in1_r3_32u(DisasContext *s, DisasOps *o)
5554 {
5555     o->in1 = tcg_temp_new_i64();
5556     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
5557 }
5558 #define SPEC_in1_r3_32u 0
5559 
5560 static void in1_r3_D32(DisasContext *s, DisasOps *o)
5561 {
5562     int r3 = get_field(s, r3);
5563     o->in1 = tcg_temp_new_i64();
5564     tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5565 }
5566 #define SPEC_in1_r3_D32 SPEC_r3_even
5567 
5568 static void in1_r3_sr32(DisasContext *s, DisasOps *o)
5569 {
5570     o->in1 = tcg_temp_new_i64();
5571     tcg_gen_shri_i64(o->in1, regs[get_field(s, r3)], 32);
5572 }
5573 #define SPEC_in1_r3_sr32 0
5574 
5575 static void in1_e1(DisasContext *s, DisasOps *o)
5576 {
5577     o->in1 = load_freg32_i64(get_field(s, r1));
5578 }
5579 #define SPEC_in1_e1 0
5580 
5581 static void in1_f1(DisasContext *s, DisasOps *o)
5582 {
5583     o->in1 = load_freg(get_field(s, r1));
5584 }
5585 #define SPEC_in1_f1 0
5586 
5587 static void in1_x1(DisasContext *s, DisasOps *o)
5588 {
5589     o->in1_128 = load_freg_128(get_field(s, r1));
5590 }
5591 #define SPEC_in1_x1 SPEC_r1_f128
5592 
5593 /* Load the high double word of an extended (128-bit) format FP number */
5594 static void in1_x2h(DisasContext *s, DisasOps *o)
5595 {
5596     o->in1 = load_freg(get_field(s, r2));
5597 }
5598 #define SPEC_in1_x2h SPEC_r2_f128
5599 
5600 static void in1_f3(DisasContext *s, DisasOps *o)
5601 {
5602     o->in1 = load_freg(get_field(s, r3));
5603 }
5604 #define SPEC_in1_f3 0
5605 
5606 static void in1_la1(DisasContext *s, DisasOps *o)
5607 {
5608     o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
5609 }
5610 #define SPEC_in1_la1 0
5611 
5612 static void in1_la2(DisasContext *s, DisasOps *o)
5613 {
5614     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5615     o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5616 }
5617 #define SPEC_in1_la2 0
5618 
5619 static void in1_m1_8u(DisasContext *s, DisasOps *o)
5620 {
5621     in1_la1(s, o);
5622     o->in1 = tcg_temp_new_i64();
5623     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_UB);
5624 }
5625 #define SPEC_in1_m1_8u 0
5626 
5627 static void in1_m1_16s(DisasContext *s, DisasOps *o)
5628 {
5629     in1_la1(s, o);
5630     o->in1 = tcg_temp_new_i64();
5631     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESW);
5632 }
5633 #define SPEC_in1_m1_16s 0
5634 
5635 static void in1_m1_16u(DisasContext *s, DisasOps *o)
5636 {
5637     in1_la1(s, o);
5638     o->in1 = tcg_temp_new_i64();
5639     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUW);
5640 }
5641 #define SPEC_in1_m1_16u 0
5642 
5643 static void in1_m1_32s(DisasContext *s, DisasOps *o)
5644 {
5645     in1_la1(s, o);
5646     o->in1 = tcg_temp_new_i64();
5647     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESL);
5648 }
5649 #define SPEC_in1_m1_32s 0
5650 
5651 static void in1_m1_32u(DisasContext *s, DisasOps *o)
5652 {
5653     in1_la1(s, o);
5654     o->in1 = tcg_temp_new_i64();
5655     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUL);
5656 }
5657 #define SPEC_in1_m1_32u 0
5658 
5659 static void in1_m1_64(DisasContext *s, DisasOps *o)
5660 {
5661     in1_la1(s, o);
5662     o->in1 = tcg_temp_new_i64();
5663     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUQ);
5664 }
5665 #define SPEC_in1_m1_64 0
5666 
5667 /* ====================================================================== */
5668 /* The "INput 2" generators.  These load the second operand to an insn.  */
5669 
5670 static void in2_r1_o(DisasContext *s, DisasOps *o)
5671 {
5672     o->in2 = regs[get_field(s, r1)];
5673 }
5674 #define SPEC_in2_r1_o 0
5675 
5676 static void in2_r1_16u(DisasContext *s, DisasOps *o)
5677 {
5678     o->in2 = tcg_temp_new_i64();
5679     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
5680 }
5681 #define SPEC_in2_r1_16u 0
5682 
5683 static void in2_r1_32u(DisasContext *s, DisasOps *o)
5684 {
5685     o->in2 = tcg_temp_new_i64();
5686     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
5687 }
5688 #define SPEC_in2_r1_32u 0
5689 
5690 static void in2_r1_D32(DisasContext *s, DisasOps *o)
5691 {
5692     int r1 = get_field(s, r1);
5693     o->in2 = tcg_temp_new_i64();
5694     tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5695 }
5696 #define SPEC_in2_r1_D32 SPEC_r1_even
5697 
5698 static void in2_r2(DisasContext *s, DisasOps *o)
5699 {
5700     o->in2 = load_reg(get_field(s, r2));
5701 }
5702 #define SPEC_in2_r2 0
5703 
5704 static void in2_r2_o(DisasContext *s, DisasOps *o)
5705 {
5706     o->in2 = regs[get_field(s, r2)];
5707 }
5708 #define SPEC_in2_r2_o 0
5709 
5710 static void in2_r2_nz(DisasContext *s, DisasOps *o)
5711 {
5712     int r2 = get_field(s, r2);
5713     if (r2 != 0) {
5714         o->in2 = load_reg(r2);
5715     }
5716 }
5717 #define SPEC_in2_r2_nz 0
5718 
5719 static void in2_r2_8s(DisasContext *s, DisasOps *o)
5720 {
5721     o->in2 = tcg_temp_new_i64();
5722     tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
5723 }
5724 #define SPEC_in2_r2_8s 0
5725 
5726 static void in2_r2_8u(DisasContext *s, DisasOps *o)
5727 {
5728     o->in2 = tcg_temp_new_i64();
5729     tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
5730 }
5731 #define SPEC_in2_r2_8u 0
5732 
5733 static void in2_r2_16s(DisasContext *s, DisasOps *o)
5734 {
5735     o->in2 = tcg_temp_new_i64();
5736     tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
5737 }
5738 #define SPEC_in2_r2_16s 0
5739 
5740 static void in2_r2_16u(DisasContext *s, DisasOps *o)
5741 {
5742     o->in2 = tcg_temp_new_i64();
5743     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
5744 }
5745 #define SPEC_in2_r2_16u 0
5746 
5747 static void in2_r3(DisasContext *s, DisasOps *o)
5748 {
5749     o->in2 = load_reg(get_field(s, r3));
5750 }
5751 #define SPEC_in2_r3 0
5752 
5753 static void in2_r3_D64(DisasContext *s, DisasOps *o)
5754 {
5755     int r3 = get_field(s, r3);
5756     o->in2_128 = tcg_temp_new_i128();
5757     tcg_gen_concat_i64_i128(o->in2_128, regs[r3 + 1], regs[r3]);
5758 }
5759 #define SPEC_in2_r3_D64 SPEC_r3_even
5760 
5761 static void in2_r3_sr32(DisasContext *s, DisasOps *o)
5762 {
5763     o->in2 = tcg_temp_new_i64();
5764     tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
5765 }
5766 #define SPEC_in2_r3_sr32 0
5767 
5768 static void in2_r3_32u(DisasContext *s, DisasOps *o)
5769 {
5770     o->in2 = tcg_temp_new_i64();
5771     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
5772 }
5773 #define SPEC_in2_r3_32u 0
5774 
5775 static void in2_r2_32s(DisasContext *s, DisasOps *o)
5776 {
5777     o->in2 = tcg_temp_new_i64();
5778     tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
5779 }
5780 #define SPEC_in2_r2_32s 0
5781 
5782 static void in2_r2_32u(DisasContext *s, DisasOps *o)
5783 {
5784     o->in2 = tcg_temp_new_i64();
5785     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
5786 }
5787 #define SPEC_in2_r2_32u 0
5788 
5789 static void in2_r2_sr32(DisasContext *s, DisasOps *o)
5790 {
5791     o->in2 = tcg_temp_new_i64();
5792     tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
5793 }
5794 #define SPEC_in2_r2_sr32 0
5795 
5796 static void in2_e2(DisasContext *s, DisasOps *o)
5797 {
5798     o->in2 = load_freg32_i64(get_field(s, r2));
5799 }
5800 #define SPEC_in2_e2 0
5801 
5802 static void in2_f2(DisasContext *s, DisasOps *o)
5803 {
5804     o->in2 = load_freg(get_field(s, r2));
5805 }
5806 #define SPEC_in2_f2 0
5807 
5808 static void in2_x2(DisasContext *s, DisasOps *o)
5809 {
5810     o->in2_128 = load_freg_128(get_field(s, r2));
5811 }
5812 #define SPEC_in2_x2 SPEC_r2_f128
5813 
5814 /* Load the low double word of an extended (128-bit) format FP number */
5815 static void in2_x2l(DisasContext *s, DisasOps *o)
5816 {
5817     o->in2 = load_freg(get_field(s, r2) + 2);
5818 }
5819 #define SPEC_in2_x2l SPEC_r2_f128
5820 
5821 static void in2_ra2(DisasContext *s, DisasOps *o)
5822 {
5823     int r2 = get_field(s, r2);
5824 
5825     /* Note: *don't* treat !r2 as 0, use the reg value. */
5826     o->in2 = tcg_temp_new_i64();
5827     gen_addi_and_wrap_i64(s, o->in2, regs[r2], 0);
5828 }
5829 #define SPEC_in2_ra2 0
5830 
5831 static void in2_ra2_E(DisasContext *s, DisasOps *o)
5832 {
5833     return in2_ra2(s, o);
5834 }
5835 #define SPEC_in2_ra2_E SPEC_r2_even
5836 
5837 static void in2_a2(DisasContext *s, DisasOps *o)
5838 {
5839     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5840     o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5841 }
5842 #define SPEC_in2_a2 0
5843 
5844 static TCGv gen_ri2(DisasContext *s)
5845 {
5846     TCGv ri2 = NULL;
5847     bool is_imm;
5848     int imm;
5849 
5850     disas_jdest(s, i2, is_imm, imm, ri2);
5851     if (is_imm) {
5852         ri2 = tcg_constant_i64(s->base.pc_next + (int64_t)imm * 2);
5853     }
5854 
5855     return ri2;
5856 }
5857 
5858 static void in2_ri2(DisasContext *s, DisasOps *o)
5859 {
5860     o->in2 = gen_ri2(s);
5861 }
5862 #define SPEC_in2_ri2 0
5863 
5864 static void in2_sh(DisasContext *s, DisasOps *o)
5865 {
5866     int b2 = get_field(s, b2);
5867     int d2 = get_field(s, d2);
5868 
5869     if (b2 == 0) {
5870         o->in2 = tcg_constant_i64(d2 & 0x3f);
5871     } else {
5872         o->in2 = get_address(s, 0, b2, d2);
5873         tcg_gen_andi_i64(o->in2, o->in2, 0x3f);
5874     }
5875 }
5876 #define SPEC_in2_sh 0
5877 
5878 static void in2_m2_8u(DisasContext *s, DisasOps *o)
5879 {
5880     in2_a2(s, o);
5881     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_UB);
5882 }
5883 #define SPEC_in2_m2_8u 0
5884 
5885 static void in2_m2_16s(DisasContext *s, DisasOps *o)
5886 {
5887     in2_a2(s, o);
5888     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESW);
5889 }
5890 #define SPEC_in2_m2_16s 0
5891 
5892 static void in2_m2_16u(DisasContext *s, DisasOps *o)
5893 {
5894     in2_a2(s, o);
5895     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUW);
5896 }
5897 #define SPEC_in2_m2_16u 0
5898 
5899 static void in2_m2_32s(DisasContext *s, DisasOps *o)
5900 {
5901     in2_a2(s, o);
5902     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESL);
5903 }
5904 #define SPEC_in2_m2_32s 0
5905 
5906 static void in2_m2_32u(DisasContext *s, DisasOps *o)
5907 {
5908     in2_a2(s, o);
5909     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUL);
5910 }
5911 #define SPEC_in2_m2_32u 0
5912 
5913 #ifndef CONFIG_USER_ONLY
5914 static void in2_m2_32ua(DisasContext *s, DisasOps *o)
5915 {
5916     in2_a2(s, o);
5917     tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5918 }
5919 #define SPEC_in2_m2_32ua 0
5920 #endif
5921 
5922 static void in2_m2_64(DisasContext *s, DisasOps *o)
5923 {
5924     in2_a2(s, o);
5925     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5926 }
5927 #define SPEC_in2_m2_64 0
5928 
5929 static void in2_m2_64w(DisasContext *s, DisasOps *o)
5930 {
5931     in2_a2(s, o);
5932     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5933     gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
5934 }
5935 #define SPEC_in2_m2_64w 0
5936 
5937 #ifndef CONFIG_USER_ONLY
5938 static void in2_m2_64a(DisasContext *s, DisasOps *o)
5939 {
5940     in2_a2(s, o);
5941     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5942 }
5943 #define SPEC_in2_m2_64a 0
5944 #endif
5945 
5946 static void in2_mri2_16s(DisasContext *s, DisasOps *o)
5947 {
5948     o->in2 = tcg_temp_new_i64();
5949     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TESW);
5950 }
5951 #define SPEC_in2_mri2_16s 0
5952 
5953 static void in2_mri2_16u(DisasContext *s, DisasOps *o)
5954 {
5955     o->in2 = tcg_temp_new_i64();
5956     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TEUW);
5957 }
5958 #define SPEC_in2_mri2_16u 0
5959 
5960 static void in2_mri2_32s(DisasContext *s, DisasOps *o)
5961 {
5962     o->in2 = tcg_temp_new_i64();
5963     tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5964                        MO_TESL | MO_ALIGN);
5965 }
5966 #define SPEC_in2_mri2_32s 0
5967 
5968 static void in2_mri2_32u(DisasContext *s, DisasOps *o)
5969 {
5970     o->in2 = tcg_temp_new_i64();
5971     tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5972                        MO_TEUL | MO_ALIGN);
5973 }
5974 #define SPEC_in2_mri2_32u 0
5975 
5976 static void in2_mri2_64(DisasContext *s, DisasOps *o)
5977 {
5978     o->in2 = tcg_temp_new_i64();
5979     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s),
5980                         MO_TEUQ | MO_ALIGN);
5981 }
5982 #define SPEC_in2_mri2_64 0
5983 
5984 static void in2_i2(DisasContext *s, DisasOps *o)
5985 {
5986     o->in2 = tcg_constant_i64(get_field(s, i2));
5987 }
5988 #define SPEC_in2_i2 0
5989 
5990 static void in2_i2_8u(DisasContext *s, DisasOps *o)
5991 {
5992     o->in2 = tcg_constant_i64((uint8_t)get_field(s, i2));
5993 }
5994 #define SPEC_in2_i2_8u 0
5995 
5996 static void in2_i2_16u(DisasContext *s, DisasOps *o)
5997 {
5998     o->in2 = tcg_constant_i64((uint16_t)get_field(s, i2));
5999 }
6000 #define SPEC_in2_i2_16u 0
6001 
6002 static void in2_i2_32u(DisasContext *s, DisasOps *o)
6003 {
6004     o->in2 = tcg_constant_i64((uint32_t)get_field(s, i2));
6005 }
6006 #define SPEC_in2_i2_32u 0
6007 
6008 static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
6009 {
6010     uint64_t i2 = (uint16_t)get_field(s, i2);
6011     o->in2 = tcg_constant_i64(i2 << s->insn->data);
6012 }
6013 #define SPEC_in2_i2_16u_shl 0
6014 
6015 static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
6016 {
6017     uint64_t i2 = (uint32_t)get_field(s, i2);
6018     o->in2 = tcg_constant_i64(i2 << s->insn->data);
6019 }
6020 #define SPEC_in2_i2_32u_shl 0
6021 
6022 #ifndef CONFIG_USER_ONLY
6023 static void in2_insn(DisasContext *s, DisasOps *o)
6024 {
6025     o->in2 = tcg_constant_i64(s->fields.raw_insn);
6026 }
6027 #define SPEC_in2_insn 0
6028 #endif
6029 
6030 /* ====================================================================== */
6031 
6032 /* Find opc within the table of insns.  This is formulated as a switch
6033    statement so that (1) we get compile-time notice of cut-paste errors
6034    for duplicated opcodes, and (2) the compiler generates the binary
6035    search tree, rather than us having to post-process the table.  */
6036 
6037 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
6038     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
6039 
6040 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
6041     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
6042 
6043 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
6044     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
6045 
6046 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
6047 
6048 enum DisasInsnEnum {
6049 #include "insn-data.h.inc"
6050 };
6051 
6052 #undef E
6053 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) {                   \
6054     .opc = OPC,                                                             \
6055     .flags = FL,                                                            \
6056     .fmt = FMT_##FT,                                                        \
6057     .fac = FAC_##FC,                                                        \
6058     .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W,  \
6059     .name = #NM,                                                            \
6060     .help_in1 = in1_##I1,                                                   \
6061     .help_in2 = in2_##I2,                                                   \
6062     .help_prep = prep_##P,                                                  \
6063     .help_wout = wout_##W,                                                  \
6064     .help_cout = cout_##CC,                                                 \
6065     .help_op = op_##OP,                                                     \
6066     .data = D                                                               \
6067  },
6068 
6069 /* Allow 0 to be used for NULL in the table below.  */
6070 #define in1_0  NULL
6071 #define in2_0  NULL
6072 #define prep_0  NULL
6073 #define wout_0  NULL
6074 #define cout_0  NULL
6075 #define op_0  NULL
6076 
6077 #define SPEC_in1_0 0
6078 #define SPEC_in2_0 0
6079 #define SPEC_prep_0 0
6080 #define SPEC_wout_0 0
6081 
6082 /* Give smaller names to the various facilities.  */
6083 #define FAC_Z           S390_FEAT_ZARCH
6084 #define FAC_CASS        S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6085 #define FAC_DFP         S390_FEAT_DFP
6086 #define FAC_DFPR        S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* DFP-rounding */
6087 #define FAC_DO          S390_FEAT_STFLE_45 /* distinct-operands */
6088 #define FAC_EE          S390_FEAT_EXECUTE_EXT
6089 #define FAC_EI          S390_FEAT_EXTENDED_IMMEDIATE
6090 #define FAC_FPE         S390_FEAT_FLOATING_POINT_EXT
6091 #define FAC_FPSSH       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPS-sign-handling */
6092 #define FAC_FPRGR       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPR-GR-transfer */
6093 #define FAC_GIE         S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6094 #define FAC_HFP_MA      S390_FEAT_HFP_MADDSUB
6095 #define FAC_HW          S390_FEAT_STFLE_45 /* high-word */
6096 #define FAC_IEEEE_SIM   S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* IEEE-exception-simulation */
6097 #define FAC_MIE         S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6098 #define FAC_LAT         S390_FEAT_STFLE_49 /* load-and-trap */
6099 #define FAC_LOC         S390_FEAT_STFLE_45 /* load/store on condition 1 */
6100 #define FAC_LOC2        S390_FEAT_STFLE_53 /* load/store on condition 2 */
6101 #define FAC_LD          S390_FEAT_LONG_DISPLACEMENT
6102 #define FAC_PC          S390_FEAT_STFLE_45 /* population count */
6103 #define FAC_SCF         S390_FEAT_STORE_CLOCK_FAST
6104 #define FAC_SFLE        S390_FEAT_STFLE
6105 #define FAC_ILA         S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6106 #define FAC_MVCOS       S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6107 #define FAC_LPP         S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6108 #define FAC_DAT_ENH     S390_FEAT_DAT_ENH
6109 #define FAC_E2          S390_FEAT_EXTENDED_TRANSLATION_2
6110 #define FAC_EH          S390_FEAT_STFLE_49 /* execution-hint */
6111 #define FAC_PPA         S390_FEAT_STFLE_49 /* processor-assist */
6112 #define FAC_LZRB        S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6113 #define FAC_ETF3        S390_FEAT_EXTENDED_TRANSLATION_3
6114 #define FAC_MSA         S390_FEAT_MSA /* message-security-assist facility */
6115 #define FAC_MSA3        S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6116 #define FAC_MSA4        S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6117 #define FAC_MSA5        S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6118 #define FAC_MSA8        S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6119 #define FAC_ECT         S390_FEAT_EXTRACT_CPU_TIME
6120 #define FAC_PCI         S390_FEAT_ZPCI /* z/PCI facility */
6121 #define FAC_AIS         S390_FEAT_ADAPTER_INT_SUPPRESSION
6122 #define FAC_V           S390_FEAT_VECTOR /* vector facility */
6123 #define FAC_VE          S390_FEAT_VECTOR_ENH  /* vector enhancements facility 1 */
6124 #define FAC_VE2         S390_FEAT_VECTOR_ENH2 /* vector enhancements facility 2 */
6125 #define FAC_MIE2        S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6126 #define FAC_MIE3        S390_FEAT_MISC_INSTRUCTION_EXT3 /* miscellaneous-instruction-extensions facility 3 */
6127 
6128 static const DisasInsn insn_info[] = {
6129 #include "insn-data.h.inc"
6130 };
6131 
6132 #undef E
6133 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6134     case OPC: return &insn_info[insn_ ## NM];
6135 
6136 static const DisasInsn *lookup_opc(uint16_t opc)
6137 {
6138     switch (opc) {
6139 #include "insn-data.h.inc"
6140     default:
6141         return NULL;
6142     }
6143 }
6144 
6145 #undef F
6146 #undef E
6147 #undef D
6148 #undef C
6149 
6150 /* Extract a field from the insn.  The INSN should be left-aligned in
6151    the uint64_t so that we can more easily utilize the big-bit-endian
6152    definitions we extract from the Principals of Operation.  */
6153 
6154 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6155 {
6156     uint32_t r, m;
6157 
6158     if (f->size == 0) {
6159         return;
6160     }
6161 
6162     /* Zero extract the field from the insn.  */
6163     r = (insn << f->beg) >> (64 - f->size);
6164 
6165     /* Sign-extend, or un-swap the field as necessary.  */
6166     switch (f->type) {
6167     case 0: /* unsigned */
6168         break;
6169     case 1: /* signed */
6170         assert(f->size <= 32);
6171         m = 1u << (f->size - 1);
6172         r = (r ^ m) - m;
6173         break;
6174     case 2: /* dl+dh split, signed 20 bit. */
6175         r = ((int8_t)r << 12) | (r >> 8);
6176         break;
6177     case 3: /* MSB stored in RXB */
6178         g_assert(f->size == 4);
6179         switch (f->beg) {
6180         case 8:
6181             r |= extract64(insn, 63 - 36, 1) << 4;
6182             break;
6183         case 12:
6184             r |= extract64(insn, 63 - 37, 1) << 4;
6185             break;
6186         case 16:
6187             r |= extract64(insn, 63 - 38, 1) << 4;
6188             break;
6189         case 32:
6190             r |= extract64(insn, 63 - 39, 1) << 4;
6191             break;
6192         default:
6193             g_assert_not_reached();
6194         }
6195         break;
6196     default:
6197         abort();
6198     }
6199 
6200     /*
6201      * Validate that the "compressed" encoding we selected above is valid.
6202      * I.e. we haven't made two different original fields overlap.
6203      */
6204     assert(((o->presentC >> f->indexC) & 1) == 0);
6205     o->presentC |= 1 << f->indexC;
6206     o->presentO |= 1 << f->indexO;
6207 
6208     o->c[f->indexC] = r;
6209 }
6210 
6211 /* Lookup the insn at the current PC, extracting the operands into O and
6212    returning the info struct for the insn.  Returns NULL for invalid insn.  */
6213 
6214 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
6215 {
6216     uint64_t insn, pc = s->base.pc_next;
6217     int op, op2, ilen;
6218     const DisasInsn *info;
6219 
6220     if (unlikely(s->ex_value)) {
6221         /* Drop the EX data now, so that it's clear on exception paths.  */
6222         tcg_gen_st_i64(tcg_constant_i64(0), tcg_env,
6223                        offsetof(CPUS390XState, ex_value));
6224 
6225         /* Extract the values saved by EXECUTE.  */
6226         insn = s->ex_value & 0xffffffffffff0000ull;
6227         ilen = s->ex_value & 0xf;
6228 
6229         /* Register insn bytes with translator so plugins work. */
6230         for (int i = 0; i < ilen; i++) {
6231             uint8_t byte = extract64(insn, 56 - (i * 8), 8);
6232             translator_fake_ldb(byte, pc + i);
6233         }
6234         op = insn >> 56;
6235     } else {
6236         insn = ld_code2(env, s, pc);
6237         op = (insn >> 8) & 0xff;
6238         ilen = get_ilen(op);
6239         switch (ilen) {
6240         case 2:
6241             insn = insn << 48;
6242             break;
6243         case 4:
6244             insn = ld_code4(env, s, pc) << 32;
6245             break;
6246         case 6:
6247             insn = (insn << 48) | (ld_code4(env, s, pc + 2) << 16);
6248             break;
6249         default:
6250             g_assert_not_reached();
6251         }
6252     }
6253     s->pc_tmp = s->base.pc_next + ilen;
6254     s->ilen = ilen;
6255 
6256     /* We can't actually determine the insn format until we've looked up
6257        the full insn opcode.  Which we can't do without locating the
6258        secondary opcode.  Assume by default that OP2 is at bit 40; for
6259        those smaller insns that don't actually have a secondary opcode
6260        this will correctly result in OP2 = 0. */
6261     switch (op) {
6262     case 0x01: /* E */
6263     case 0x80: /* S */
6264     case 0x82: /* S */
6265     case 0x93: /* S */
6266     case 0xb2: /* S, RRF, RRE, IE */
6267     case 0xb3: /* RRE, RRD, RRF */
6268     case 0xb9: /* RRE, RRF */
6269     case 0xe5: /* SSE, SIL */
6270         op2 = (insn << 8) >> 56;
6271         break;
6272     case 0xa5: /* RI */
6273     case 0xa7: /* RI */
6274     case 0xc0: /* RIL */
6275     case 0xc2: /* RIL */
6276     case 0xc4: /* RIL */
6277     case 0xc6: /* RIL */
6278     case 0xc8: /* SSF */
6279     case 0xcc: /* RIL */
6280         op2 = (insn << 12) >> 60;
6281         break;
6282     case 0xc5: /* MII */
6283     case 0xc7: /* SMI */
6284     case 0xd0 ... 0xdf: /* SS */
6285     case 0xe1: /* SS */
6286     case 0xe2: /* SS */
6287     case 0xe8: /* SS */
6288     case 0xe9: /* SS */
6289     case 0xea: /* SS */
6290     case 0xee ... 0xf3: /* SS */
6291     case 0xf8 ... 0xfd: /* SS */
6292         op2 = 0;
6293         break;
6294     default:
6295         op2 = (insn << 40) >> 56;
6296         break;
6297     }
6298 
6299     memset(&s->fields, 0, sizeof(s->fields));
6300     s->fields.raw_insn = insn;
6301     s->fields.op = op;
6302     s->fields.op2 = op2;
6303 
6304     /* Lookup the instruction.  */
6305     info = lookup_opc(op << 8 | op2);
6306     s->insn = info;
6307 
6308     /* If we found it, extract the operands.  */
6309     if (info != NULL) {
6310         DisasFormat fmt = info->fmt;
6311         int i;
6312 
6313         for (i = 0; i < NUM_C_FIELD; ++i) {
6314             extract_field(&s->fields, &format_info[fmt].op[i], insn);
6315         }
6316     }
6317     return info;
6318 }
6319 
6320 static bool is_afp_reg(int reg)
6321 {
6322     return reg % 2 || reg > 6;
6323 }
6324 
6325 static bool is_fp_pair(int reg)
6326 {
6327     /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6328     return !(reg & 0x2);
6329 }
6330 
6331 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6332 {
6333     const DisasInsn *insn;
6334     DisasJumpType ret = DISAS_NEXT;
6335     DisasOps o = {};
6336     bool icount = false;
6337 
6338     /* Search for the insn in the table.  */
6339     insn = extract_insn(env, s);
6340 
6341     /* Update insn_start now that we know the ILEN.  */
6342     tcg_set_insn_start_param(s->insn_start, 2, s->ilen);
6343 
6344     /* Not found means unimplemented/illegal opcode.  */
6345     if (insn == NULL) {
6346         qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6347                       s->fields.op, s->fields.op2);
6348         gen_illegal_opcode(s);
6349         ret = DISAS_NORETURN;
6350         goto out;
6351     }
6352 
6353 #ifndef CONFIG_USER_ONLY
6354     if (s->base.tb->flags & FLAG_MASK_PER) {
6355         TCGv_i64 addr = tcg_constant_i64(s->base.pc_next);
6356         gen_helper_per_ifetch(tcg_env, addr);
6357     }
6358 #endif
6359 
6360     /* process flags */
6361     if (insn->flags) {
6362         /* privileged instruction */
6363         if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6364             gen_program_exception(s, PGM_PRIVILEGED);
6365             ret = DISAS_NORETURN;
6366             goto out;
6367         }
6368 
6369         /* if AFP is not enabled, instructions and registers are forbidden */
6370         if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6371             uint8_t dxc = 0;
6372 
6373             if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
6374                 dxc = 1;
6375             }
6376             if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
6377                 dxc = 1;
6378             }
6379             if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
6380                 dxc = 1;
6381             }
6382             if (insn->flags & IF_BFP) {
6383                 dxc = 2;
6384             }
6385             if (insn->flags & IF_DFP) {
6386                 dxc = 3;
6387             }
6388             if (insn->flags & IF_VEC) {
6389                 dxc = 0xfe;
6390             }
6391             if (dxc) {
6392                 gen_data_exception(dxc);
6393                 ret = DISAS_NORETURN;
6394                 goto out;
6395             }
6396         }
6397 
6398         /* if vector instructions not enabled, executing them is forbidden */
6399         if (insn->flags & IF_VEC) {
6400             if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6401                 gen_data_exception(0xfe);
6402                 ret = DISAS_NORETURN;
6403                 goto out;
6404             }
6405         }
6406 
6407         /* input/output is the special case for icount mode */
6408         if (unlikely(insn->flags & IF_IO)) {
6409             icount = translator_io_start(&s->base);
6410         }
6411     }
6412 
6413     /* Check for insn specification exceptions.  */
6414     if (insn->spec) {
6415         if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
6416             (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
6417             (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
6418             (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
6419             (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
6420             gen_program_exception(s, PGM_SPECIFICATION);
6421             ret = DISAS_NORETURN;
6422             goto out;
6423         }
6424     }
6425 
6426     /* Implement the instruction.  */
6427     if (insn->help_in1) {
6428         insn->help_in1(s, &o);
6429     }
6430     if (insn->help_in2) {
6431         insn->help_in2(s, &o);
6432     }
6433     if (insn->help_prep) {
6434         insn->help_prep(s, &o);
6435     }
6436     if (insn->help_op) {
6437         ret = insn->help_op(s, &o);
6438     }
6439     if (ret != DISAS_NORETURN) {
6440         if (insn->help_wout) {
6441             insn->help_wout(s, &o);
6442         }
6443         if (insn->help_cout) {
6444             insn->help_cout(s, &o);
6445         }
6446     }
6447 
6448     /* io should be the last instruction in tb when icount is enabled */
6449     if (unlikely(icount && ret == DISAS_NEXT)) {
6450         ret = DISAS_TOO_MANY;
6451     }
6452 
6453 #ifndef CONFIG_USER_ONLY
6454     if (s->base.tb->flags & FLAG_MASK_PER) {
6455         /* An exception might be triggered, save PSW if not already done.  */
6456         if (ret == DISAS_NEXT || ret == DISAS_TOO_MANY) {
6457             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6458         }
6459 
6460         /* Call the helper to check for a possible PER exception.  */
6461         gen_helper_per_check_exception(tcg_env);
6462     }
6463 #endif
6464 
6465 out:
6466     /* Advance to the next instruction.  */
6467     s->base.pc_next = s->pc_tmp;
6468     return ret;
6469 }
6470 
6471 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6472 {
6473     DisasContext *dc = container_of(dcbase, DisasContext, base);
6474 
6475     /* 31-bit mode */
6476     if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6477         dc->base.pc_first &= 0x7fffffff;
6478         dc->base.pc_next = dc->base.pc_first;
6479     }
6480 
6481     dc->cc_op = CC_OP_DYNAMIC;
6482     dc->ex_value = dc->base.tb->cs_base;
6483     dc->exit_to_mainloop = (dc->base.tb->flags & FLAG_MASK_PER) || dc->ex_value;
6484 }
6485 
6486 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6487 {
6488 }
6489 
6490 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6491 {
6492     DisasContext *dc = container_of(dcbase, DisasContext, base);
6493 
6494     /* Delay the set of ilen until we've read the insn. */
6495     tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 0);
6496     dc->insn_start = tcg_last_op();
6497 }
6498 
6499 static target_ulong get_next_pc(CPUS390XState *env, DisasContext *s,
6500                                 uint64_t pc)
6501 {
6502     uint64_t insn = cpu_lduw_code(env, pc);
6503 
6504     return pc + get_ilen((insn >> 8) & 0xff);
6505 }
6506 
6507 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6508 {
6509     CPUS390XState *env = cpu_env(cs);
6510     DisasContext *dc = container_of(dcbase, DisasContext, base);
6511 
6512     dc->base.is_jmp = translate_one(env, dc);
6513     if (dc->base.is_jmp == DISAS_NEXT) {
6514         if (dc->ex_value ||
6515             !is_same_page(dcbase, dc->base.pc_next) ||
6516             !is_same_page(dcbase, get_next_pc(env, dc, dc->base.pc_next))) {
6517             dc->base.is_jmp = DISAS_TOO_MANY;
6518         }
6519     }
6520 }
6521 
6522 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6523 {
6524     DisasContext *dc = container_of(dcbase, DisasContext, base);
6525 
6526     switch (dc->base.is_jmp) {
6527     case DISAS_NORETURN:
6528         break;
6529     case DISAS_TOO_MANY:
6530         update_psw_addr(dc);
6531         /* FALLTHRU */
6532     case DISAS_PC_UPDATED:
6533         /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6534            cc op type is in env */
6535         update_cc_op(dc);
6536         /* FALLTHRU */
6537     case DISAS_PC_CC_UPDATED:
6538         /* Exit the TB, either by raising a debug exception or by return.  */
6539         if (dc->exit_to_mainloop) {
6540             tcg_gen_exit_tb(NULL, 0);
6541         } else {
6542             tcg_gen_lookup_and_goto_ptr();
6543         }
6544         break;
6545     default:
6546         g_assert_not_reached();
6547     }
6548 }
6549 
6550 static void s390x_tr_disas_log(const DisasContextBase *dcbase,
6551                                CPUState *cs, FILE *logfile)
6552 {
6553     DisasContext *dc = container_of(dcbase, DisasContext, base);
6554 
6555     if (unlikely(dc->ex_value)) {
6556         /* ??? Unfortunately target_disas can't use host memory.  */
6557         fprintf(logfile, "IN: EXECUTE %016" PRIx64, dc->ex_value);
6558     } else {
6559         fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
6560         target_disas(logfile, cs, dc->base.pc_first, dc->base.tb->size);
6561     }
6562 }
6563 
6564 static const TranslatorOps s390x_tr_ops = {
6565     .init_disas_context = s390x_tr_init_disas_context,
6566     .tb_start           = s390x_tr_tb_start,
6567     .insn_start         = s390x_tr_insn_start,
6568     .translate_insn     = s390x_tr_translate_insn,
6569     .tb_stop            = s390x_tr_tb_stop,
6570     .disas_log          = s390x_tr_disas_log,
6571 };
6572 
6573 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
6574                            vaddr pc, void *host_pc)
6575 {
6576     DisasContext dc;
6577 
6578     translator_loop(cs, tb, max_insns, pc, host_pc, &s390x_tr_ops, &dc.base);
6579 }
6580 
6581 void s390x_restore_state_to_opc(CPUState *cs,
6582                                 const TranslationBlock *tb,
6583                                 const uint64_t *data)
6584 {
6585     CPUS390XState *env = cpu_env(cs);
6586     int cc_op = data[1];
6587 
6588     env->psw.addr = data[0];
6589 
6590     /* Update the CC opcode if it is not already up-to-date.  */
6591     if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6592         env->cc_op = cc_op;
6593     }
6594 
6595     /* Record ILEN.  */
6596     env->int_pgm_ilen = data[2];
6597 }
6598