xref: /qemu/target/s390x/tcg/translate.c (revision 5ac034b1)
1 /*
2  *  S/390 translation
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2010 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24 
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 #  define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 #  define LOG_DISAS(...) do { } while (0)
29 #endif
30 
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "s390x-internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg/tcg-op.h"
37 #include "tcg/tcg-op-gvec.h"
38 #include "qemu/log.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
44 
45 #include "exec/translator.h"
46 #include "exec/log.h"
47 #include "qemu/atomic128.h"
48 
49 
50 /* Information that (most) every instruction needs to manipulate.  */
51 typedef struct DisasContext DisasContext;
52 typedef struct DisasInsn DisasInsn;
53 typedef struct DisasFields DisasFields;
54 
55 /*
56  * Define a structure to hold the decoded fields.  We'll store each inside
57  * an array indexed by an enum.  In order to conserve memory, we'll arrange
58  * for fields that do not exist at the same time to overlap, thus the "C"
59  * for compact.  For checking purposes there is an "O" for original index
60  * as well that will be applied to availability bitmaps.
61  */
62 
63 enum DisasFieldIndexO {
64     FLD_O_r1,
65     FLD_O_r2,
66     FLD_O_r3,
67     FLD_O_m1,
68     FLD_O_m3,
69     FLD_O_m4,
70     FLD_O_m5,
71     FLD_O_m6,
72     FLD_O_b1,
73     FLD_O_b2,
74     FLD_O_b4,
75     FLD_O_d1,
76     FLD_O_d2,
77     FLD_O_d4,
78     FLD_O_x2,
79     FLD_O_l1,
80     FLD_O_l2,
81     FLD_O_i1,
82     FLD_O_i2,
83     FLD_O_i3,
84     FLD_O_i4,
85     FLD_O_i5,
86     FLD_O_v1,
87     FLD_O_v2,
88     FLD_O_v3,
89     FLD_O_v4,
90 };
91 
92 enum DisasFieldIndexC {
93     FLD_C_r1 = 0,
94     FLD_C_m1 = 0,
95     FLD_C_b1 = 0,
96     FLD_C_i1 = 0,
97     FLD_C_v1 = 0,
98 
99     FLD_C_r2 = 1,
100     FLD_C_b2 = 1,
101     FLD_C_i2 = 1,
102 
103     FLD_C_r3 = 2,
104     FLD_C_m3 = 2,
105     FLD_C_i3 = 2,
106     FLD_C_v3 = 2,
107 
108     FLD_C_m4 = 3,
109     FLD_C_b4 = 3,
110     FLD_C_i4 = 3,
111     FLD_C_l1 = 3,
112     FLD_C_v4 = 3,
113 
114     FLD_C_i5 = 4,
115     FLD_C_d1 = 4,
116     FLD_C_m5 = 4,
117 
118     FLD_C_d2 = 5,
119     FLD_C_m6 = 5,
120 
121     FLD_C_d4 = 6,
122     FLD_C_x2 = 6,
123     FLD_C_l2 = 6,
124     FLD_C_v2 = 6,
125 
126     NUM_C_FIELD = 7
127 };
128 
129 struct DisasFields {
130     uint64_t raw_insn;
131     unsigned op:8;
132     unsigned op2:8;
133     unsigned presentC:16;
134     unsigned int presentO;
135     int c[NUM_C_FIELD];
136 };
137 
138 struct DisasContext {
139     DisasContextBase base;
140     const DisasInsn *insn;
141     TCGOp *insn_start;
142     DisasFields fields;
143     uint64_t ex_value;
144     /*
145      * During translate_one(), pc_tmp is used to determine the instruction
146      * to be executed after base.pc_next - e.g. next sequential instruction
147      * or a branch target.
148      */
149     uint64_t pc_tmp;
150     uint32_t ilen;
151     enum cc_op cc_op;
152     bool exit_to_mainloop;
153 };
154 
155 /* Information carried about a condition to be evaluated.  */
156 typedef struct {
157     TCGCond cond:8;
158     bool is_64;
159     bool g1;
160     bool g2;
161     union {
162         struct { TCGv_i64 a, b; } s64;
163         struct { TCGv_i32 a, b; } s32;
164     } u;
165 } DisasCompare;
166 
167 #ifdef DEBUG_INLINE_BRANCHES
168 static uint64_t inline_branch_hit[CC_OP_MAX];
169 static uint64_t inline_branch_miss[CC_OP_MAX];
170 #endif
171 
172 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
173 {
174     if (s->base.tb->flags & FLAG_MASK_32) {
175         if (s->base.tb->flags & FLAG_MASK_64) {
176             tcg_gen_movi_i64(out, pc);
177             return;
178         }
179         pc |= 0x80000000;
180     }
181     assert(!(s->base.tb->flags & FLAG_MASK_64));
182     tcg_gen_deposit_i64(out, out, tcg_constant_i64(pc), 0, 32);
183 }
184 
185 static TCGv_i64 psw_addr;
186 static TCGv_i64 psw_mask;
187 static TCGv_i64 gbea;
188 
189 static TCGv_i32 cc_op;
190 static TCGv_i64 cc_src;
191 static TCGv_i64 cc_dst;
192 static TCGv_i64 cc_vr;
193 
194 static char cpu_reg_names[16][4];
195 static TCGv_i64 regs[16];
196 
197 void s390x_translate_init(void)
198 {
199     int i;
200 
201     psw_addr = tcg_global_mem_new_i64(cpu_env,
202                                       offsetof(CPUS390XState, psw.addr),
203                                       "psw_addr");
204     psw_mask = tcg_global_mem_new_i64(cpu_env,
205                                       offsetof(CPUS390XState, psw.mask),
206                                       "psw_mask");
207     gbea = tcg_global_mem_new_i64(cpu_env,
208                                   offsetof(CPUS390XState, gbea),
209                                   "gbea");
210 
211     cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
212                                    "cc_op");
213     cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
214                                     "cc_src");
215     cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
216                                     "cc_dst");
217     cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
218                                    "cc_vr");
219 
220     for (i = 0; i < 16; i++) {
221         snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
222         regs[i] = tcg_global_mem_new(cpu_env,
223                                      offsetof(CPUS390XState, regs[i]),
224                                      cpu_reg_names[i]);
225     }
226 }
227 
228 static inline int vec_full_reg_offset(uint8_t reg)
229 {
230     g_assert(reg < 32);
231     return offsetof(CPUS390XState, vregs[reg][0]);
232 }
233 
234 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
235 {
236     /* Convert element size (es) - e.g. MO_8 - to bytes */
237     const uint8_t bytes = 1 << es;
238     int offs = enr * bytes;
239 
240     /*
241      * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
242      * of the 16 byte vector, on both, little and big endian systems.
243      *
244      * Big Endian (target/possible host)
245      * B:  [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
246      * HW: [     0][     1][     2][     3] - [     4][     5][     6][     7]
247      * W:  [             0][             1] - [             2][             3]
248      * DW: [                             0] - [                             1]
249      *
250      * Little Endian (possible host)
251      * B:  [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
252      * HW: [     3][     2][     1][     0] - [     7][     6][     5][     4]
253      * W:  [             1][             0] - [             3][             2]
254      * DW: [                             0] - [                             1]
255      *
256      * For 16 byte elements, the two 8 byte halves will not form a host
257      * int128 if the host is little endian, since they're in the wrong order.
258      * Some operations (e.g. xor) do not care. For operations like addition,
259      * the two 8 byte elements have to be loaded separately. Let's force all
260      * 16 byte operations to handle it in a special way.
261      */
262     g_assert(es <= MO_64);
263 #if !HOST_BIG_ENDIAN
264     offs ^= (8 - bytes);
265 #endif
266     return offs + vec_full_reg_offset(reg);
267 }
268 
269 static inline int freg64_offset(uint8_t reg)
270 {
271     g_assert(reg < 16);
272     return vec_reg_offset(reg, 0, MO_64);
273 }
274 
275 static inline int freg32_offset(uint8_t reg)
276 {
277     g_assert(reg < 16);
278     return vec_reg_offset(reg, 0, MO_32);
279 }
280 
281 static TCGv_i64 load_reg(int reg)
282 {
283     TCGv_i64 r = tcg_temp_new_i64();
284     tcg_gen_mov_i64(r, regs[reg]);
285     return r;
286 }
287 
288 static TCGv_i64 load_freg(int reg)
289 {
290     TCGv_i64 r = tcg_temp_new_i64();
291 
292     tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
293     return r;
294 }
295 
296 static TCGv_i64 load_freg32_i64(int reg)
297 {
298     TCGv_i64 r = tcg_temp_new_i64();
299 
300     tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
301     return r;
302 }
303 
304 static TCGv_i128 load_freg_128(int reg)
305 {
306     TCGv_i64 h = load_freg(reg);
307     TCGv_i64 l = load_freg(reg + 2);
308     TCGv_i128 r = tcg_temp_new_i128();
309 
310     tcg_gen_concat_i64_i128(r, l, h);
311     tcg_temp_free_i64(h);
312     tcg_temp_free_i64(l);
313     return r;
314 }
315 
316 static void store_reg(int reg, TCGv_i64 v)
317 {
318     tcg_gen_mov_i64(regs[reg], v);
319 }
320 
321 static void store_freg(int reg, TCGv_i64 v)
322 {
323     tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
324 }
325 
326 static void store_reg32_i64(int reg, TCGv_i64 v)
327 {
328     /* 32 bit register writes keep the upper half */
329     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
330 }
331 
332 static void store_reg32h_i64(int reg, TCGv_i64 v)
333 {
334     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
335 }
336 
337 static void store_freg32_i64(int reg, TCGv_i64 v)
338 {
339     tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
340 }
341 
342 static void return_low128(TCGv_i64 dest)
343 {
344     tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
345 }
346 
347 static void update_psw_addr(DisasContext *s)
348 {
349     /* psw.addr */
350     tcg_gen_movi_i64(psw_addr, s->base.pc_next);
351 }
352 
353 static void per_branch(DisasContext *s, bool to_next)
354 {
355 #ifndef CONFIG_USER_ONLY
356     tcg_gen_movi_i64(gbea, s->base.pc_next);
357 
358     if (s->base.tb->flags & FLAG_MASK_PER) {
359         TCGv_i64 next_pc = to_next ? tcg_constant_i64(s->pc_tmp) : psw_addr;
360         gen_helper_per_branch(cpu_env, gbea, next_pc);
361     }
362 #endif
363 }
364 
365 static void per_branch_cond(DisasContext *s, TCGCond cond,
366                             TCGv_i64 arg1, TCGv_i64 arg2)
367 {
368 #ifndef CONFIG_USER_ONLY
369     if (s->base.tb->flags & FLAG_MASK_PER) {
370         TCGLabel *lab = gen_new_label();
371         tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
372 
373         tcg_gen_movi_i64(gbea, s->base.pc_next);
374         gen_helper_per_branch(cpu_env, gbea, psw_addr);
375 
376         gen_set_label(lab);
377     } else {
378         TCGv_i64 pc = tcg_constant_i64(s->base.pc_next);
379         tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
380     }
381 #endif
382 }
383 
384 static void per_breaking_event(DisasContext *s)
385 {
386     tcg_gen_movi_i64(gbea, s->base.pc_next);
387 }
388 
389 static void update_cc_op(DisasContext *s)
390 {
391     if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
392         tcg_gen_movi_i32(cc_op, s->cc_op);
393     }
394 }
395 
396 static inline uint64_t ld_code2(CPUS390XState *env, DisasContext *s,
397                                 uint64_t pc)
398 {
399     return (uint64_t)translator_lduw(env, &s->base, pc);
400 }
401 
402 static inline uint64_t ld_code4(CPUS390XState *env, DisasContext *s,
403                                 uint64_t pc)
404 {
405     return (uint64_t)(uint32_t)translator_ldl(env, &s->base, pc);
406 }
407 
408 static int get_mem_index(DisasContext *s)
409 {
410 #ifdef CONFIG_USER_ONLY
411     return MMU_USER_IDX;
412 #else
413     if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
414         return MMU_REAL_IDX;
415     }
416 
417     switch (s->base.tb->flags & FLAG_MASK_ASC) {
418     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
419         return MMU_PRIMARY_IDX;
420     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
421         return MMU_SECONDARY_IDX;
422     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
423         return MMU_HOME_IDX;
424     default:
425         tcg_abort();
426         break;
427     }
428 #endif
429 }
430 
431 static void gen_exception(int excp)
432 {
433     gen_helper_exception(cpu_env, tcg_constant_i32(excp));
434 }
435 
436 static void gen_program_exception(DisasContext *s, int code)
437 {
438     /* Remember what pgm exeption this was.  */
439     tcg_gen_st_i32(tcg_constant_i32(code), cpu_env,
440                    offsetof(CPUS390XState, int_pgm_code));
441 
442     tcg_gen_st_i32(tcg_constant_i32(s->ilen), cpu_env,
443                    offsetof(CPUS390XState, int_pgm_ilen));
444 
445     /* update the psw */
446     update_psw_addr(s);
447 
448     /* Save off cc.  */
449     update_cc_op(s);
450 
451     /* Trigger exception.  */
452     gen_exception(EXCP_PGM);
453 }
454 
455 static inline void gen_illegal_opcode(DisasContext *s)
456 {
457     gen_program_exception(s, PGM_OPERATION);
458 }
459 
460 static inline void gen_data_exception(uint8_t dxc)
461 {
462     gen_helper_data_exception(cpu_env, tcg_constant_i32(dxc));
463 }
464 
465 static inline void gen_trap(DisasContext *s)
466 {
467     /* Set DXC to 0xff */
468     gen_data_exception(0xff);
469 }
470 
471 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
472                                   int64_t imm)
473 {
474     tcg_gen_addi_i64(dst, src, imm);
475     if (!(s->base.tb->flags & FLAG_MASK_64)) {
476         if (s->base.tb->flags & FLAG_MASK_32) {
477             tcg_gen_andi_i64(dst, dst, 0x7fffffff);
478         } else {
479             tcg_gen_andi_i64(dst, dst, 0x00ffffff);
480         }
481     }
482 }
483 
484 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
485 {
486     TCGv_i64 tmp = tcg_temp_new_i64();
487 
488     /*
489      * Note that d2 is limited to 20 bits, signed.  If we crop negative
490      * displacements early we create larger immediate addends.
491      */
492     if (b2 && x2) {
493         tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
494         gen_addi_and_wrap_i64(s, tmp, tmp, d2);
495     } else if (b2) {
496         gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
497     } else if (x2) {
498         gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
499     } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
500         if (s->base.tb->flags & FLAG_MASK_32) {
501             tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
502         } else {
503             tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
504         }
505     } else {
506         tcg_gen_movi_i64(tmp, d2);
507     }
508 
509     return tmp;
510 }
511 
512 static inline bool live_cc_data(DisasContext *s)
513 {
514     return (s->cc_op != CC_OP_DYNAMIC
515             && s->cc_op != CC_OP_STATIC
516             && s->cc_op > 3);
517 }
518 
519 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
520 {
521     if (live_cc_data(s)) {
522         tcg_gen_discard_i64(cc_src);
523         tcg_gen_discard_i64(cc_dst);
524         tcg_gen_discard_i64(cc_vr);
525     }
526     s->cc_op = CC_OP_CONST0 + val;
527 }
528 
529 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
530 {
531     if (live_cc_data(s)) {
532         tcg_gen_discard_i64(cc_src);
533         tcg_gen_discard_i64(cc_vr);
534     }
535     tcg_gen_mov_i64(cc_dst, dst);
536     s->cc_op = op;
537 }
538 
539 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
540                                   TCGv_i64 dst)
541 {
542     if (live_cc_data(s)) {
543         tcg_gen_discard_i64(cc_vr);
544     }
545     tcg_gen_mov_i64(cc_src, src);
546     tcg_gen_mov_i64(cc_dst, dst);
547     s->cc_op = op;
548 }
549 
550 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
551                                   TCGv_i64 dst, TCGv_i64 vr)
552 {
553     tcg_gen_mov_i64(cc_src, src);
554     tcg_gen_mov_i64(cc_dst, dst);
555     tcg_gen_mov_i64(cc_vr, vr);
556     s->cc_op = op;
557 }
558 
559 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
560 {
561     gen_op_update1_cc_i64(s, CC_OP_NZ, val);
562 }
563 
564 /* CC value is in env->cc_op */
565 static void set_cc_static(DisasContext *s)
566 {
567     if (live_cc_data(s)) {
568         tcg_gen_discard_i64(cc_src);
569         tcg_gen_discard_i64(cc_dst);
570         tcg_gen_discard_i64(cc_vr);
571     }
572     s->cc_op = CC_OP_STATIC;
573 }
574 
575 /* calculates cc into cc_op */
576 static void gen_op_calc_cc(DisasContext *s)
577 {
578     TCGv_i32 local_cc_op = NULL;
579     TCGv_i64 dummy = NULL;
580 
581     switch (s->cc_op) {
582     default:
583         dummy = tcg_constant_i64(0);
584         /* FALLTHRU */
585     case CC_OP_ADD_64:
586     case CC_OP_SUB_64:
587     case CC_OP_ADD_32:
588     case CC_OP_SUB_32:
589         local_cc_op = tcg_constant_i32(s->cc_op);
590         break;
591     case CC_OP_CONST0:
592     case CC_OP_CONST1:
593     case CC_OP_CONST2:
594     case CC_OP_CONST3:
595     case CC_OP_STATIC:
596     case CC_OP_DYNAMIC:
597         break;
598     }
599 
600     switch (s->cc_op) {
601     case CC_OP_CONST0:
602     case CC_OP_CONST1:
603     case CC_OP_CONST2:
604     case CC_OP_CONST3:
605         /* s->cc_op is the cc value */
606         tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
607         break;
608     case CC_OP_STATIC:
609         /* env->cc_op already is the cc value */
610         break;
611     case CC_OP_NZ:
612         tcg_gen_setcondi_i64(TCG_COND_NE, cc_dst, cc_dst, 0);
613         tcg_gen_extrl_i64_i32(cc_op, cc_dst);
614         break;
615     case CC_OP_ABS_64:
616     case CC_OP_NABS_64:
617     case CC_OP_ABS_32:
618     case CC_OP_NABS_32:
619     case CC_OP_LTGT0_32:
620     case CC_OP_LTGT0_64:
621     case CC_OP_COMP_32:
622     case CC_OP_COMP_64:
623     case CC_OP_NZ_F32:
624     case CC_OP_NZ_F64:
625     case CC_OP_FLOGR:
626     case CC_OP_LCBB:
627     case CC_OP_MULS_32:
628         /* 1 argument */
629         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
630         break;
631     case CC_OP_ADDU:
632     case CC_OP_ICM:
633     case CC_OP_LTGT_32:
634     case CC_OP_LTGT_64:
635     case CC_OP_LTUGTU_32:
636     case CC_OP_LTUGTU_64:
637     case CC_OP_TM_32:
638     case CC_OP_TM_64:
639     case CC_OP_SLA:
640     case CC_OP_SUBU:
641     case CC_OP_NZ_F128:
642     case CC_OP_VC:
643     case CC_OP_MULS_64:
644         /* 2 arguments */
645         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
646         break;
647     case CC_OP_ADD_64:
648     case CC_OP_SUB_64:
649     case CC_OP_ADD_32:
650     case CC_OP_SUB_32:
651         /* 3 arguments */
652         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
653         break;
654     case CC_OP_DYNAMIC:
655         /* unknown operation - assume 3 arguments and cc_op in env */
656         gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
657         break;
658     default:
659         tcg_abort();
660     }
661 
662     /* We now have cc in cc_op as constant */
663     set_cc_static(s);
664 }
665 
666 static bool use_goto_tb(DisasContext *s, uint64_t dest)
667 {
668     if (unlikely(s->base.tb->flags & FLAG_MASK_PER)) {
669         return false;
670     }
671     return translator_use_goto_tb(&s->base, dest);
672 }
673 
674 static void account_noninline_branch(DisasContext *s, int cc_op)
675 {
676 #ifdef DEBUG_INLINE_BRANCHES
677     inline_branch_miss[cc_op]++;
678 #endif
679 }
680 
681 static void account_inline_branch(DisasContext *s, int cc_op)
682 {
683 #ifdef DEBUG_INLINE_BRANCHES
684     inline_branch_hit[cc_op]++;
685 #endif
686 }
687 
688 /* Table of mask values to comparison codes, given a comparison as input.
689    For such, CC=3 should not be possible.  */
690 static const TCGCond ltgt_cond[16] = {
691     TCG_COND_NEVER,  TCG_COND_NEVER,     /*    |    |    | x */
692     TCG_COND_GT,     TCG_COND_GT,        /*    |    | GT | x */
693     TCG_COND_LT,     TCG_COND_LT,        /*    | LT |    | x */
694     TCG_COND_NE,     TCG_COND_NE,        /*    | LT | GT | x */
695     TCG_COND_EQ,     TCG_COND_EQ,        /* EQ |    |    | x */
696     TCG_COND_GE,     TCG_COND_GE,        /* EQ |    | GT | x */
697     TCG_COND_LE,     TCG_COND_LE,        /* EQ | LT |    | x */
698     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | LT | GT | x */
699 };
700 
701 /* Table of mask values to comparison codes, given a logic op as input.
702    For such, only CC=0 and CC=1 should be possible.  */
703 static const TCGCond nz_cond[16] = {
704     TCG_COND_NEVER, TCG_COND_NEVER,      /*    |    | x | x */
705     TCG_COND_NEVER, TCG_COND_NEVER,
706     TCG_COND_NE, TCG_COND_NE,            /*    | NE | x | x */
707     TCG_COND_NE, TCG_COND_NE,
708     TCG_COND_EQ, TCG_COND_EQ,            /* EQ |    | x | x */
709     TCG_COND_EQ, TCG_COND_EQ,
710     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | NE | x | x */
711     TCG_COND_ALWAYS, TCG_COND_ALWAYS,
712 };
713 
714 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
715    details required to generate a TCG comparison.  */
716 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
717 {
718     TCGCond cond;
719     enum cc_op old_cc_op = s->cc_op;
720 
721     if (mask == 15 || mask == 0) {
722         c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
723         c->u.s32.a = cc_op;
724         c->u.s32.b = cc_op;
725         c->g1 = c->g2 = true;
726         c->is_64 = false;
727         return;
728     }
729 
730     /* Find the TCG condition for the mask + cc op.  */
731     switch (old_cc_op) {
732     case CC_OP_LTGT0_32:
733     case CC_OP_LTGT0_64:
734     case CC_OP_LTGT_32:
735     case CC_OP_LTGT_64:
736         cond = ltgt_cond[mask];
737         if (cond == TCG_COND_NEVER) {
738             goto do_dynamic;
739         }
740         account_inline_branch(s, old_cc_op);
741         break;
742 
743     case CC_OP_LTUGTU_32:
744     case CC_OP_LTUGTU_64:
745         cond = tcg_unsigned_cond(ltgt_cond[mask]);
746         if (cond == TCG_COND_NEVER) {
747             goto do_dynamic;
748         }
749         account_inline_branch(s, old_cc_op);
750         break;
751 
752     case CC_OP_NZ:
753         cond = nz_cond[mask];
754         if (cond == TCG_COND_NEVER) {
755             goto do_dynamic;
756         }
757         account_inline_branch(s, old_cc_op);
758         break;
759 
760     case CC_OP_TM_32:
761     case CC_OP_TM_64:
762         switch (mask) {
763         case 8:
764             cond = TCG_COND_EQ;
765             break;
766         case 4 | 2 | 1:
767             cond = TCG_COND_NE;
768             break;
769         default:
770             goto do_dynamic;
771         }
772         account_inline_branch(s, old_cc_op);
773         break;
774 
775     case CC_OP_ICM:
776         switch (mask) {
777         case 8:
778             cond = TCG_COND_EQ;
779             break;
780         case 4 | 2 | 1:
781         case 4 | 2:
782             cond = TCG_COND_NE;
783             break;
784         default:
785             goto do_dynamic;
786         }
787         account_inline_branch(s, old_cc_op);
788         break;
789 
790     case CC_OP_FLOGR:
791         switch (mask & 0xa) {
792         case 8: /* src == 0 -> no one bit found */
793             cond = TCG_COND_EQ;
794             break;
795         case 2: /* src != 0 -> one bit found */
796             cond = TCG_COND_NE;
797             break;
798         default:
799             goto do_dynamic;
800         }
801         account_inline_branch(s, old_cc_op);
802         break;
803 
804     case CC_OP_ADDU:
805     case CC_OP_SUBU:
806         switch (mask) {
807         case 8 | 2: /* result == 0 */
808             cond = TCG_COND_EQ;
809             break;
810         case 4 | 1: /* result != 0 */
811             cond = TCG_COND_NE;
812             break;
813         case 8 | 4: /* !carry (borrow) */
814             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_EQ : TCG_COND_NE;
815             break;
816         case 2 | 1: /* carry (!borrow) */
817             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_NE : TCG_COND_EQ;
818             break;
819         default:
820             goto do_dynamic;
821         }
822         account_inline_branch(s, old_cc_op);
823         break;
824 
825     default:
826     do_dynamic:
827         /* Calculate cc value.  */
828         gen_op_calc_cc(s);
829         /* FALLTHRU */
830 
831     case CC_OP_STATIC:
832         /* Jump based on CC.  We'll load up the real cond below;
833            the assignment here merely avoids a compiler warning.  */
834         account_noninline_branch(s, old_cc_op);
835         old_cc_op = CC_OP_STATIC;
836         cond = TCG_COND_NEVER;
837         break;
838     }
839 
840     /* Load up the arguments of the comparison.  */
841     c->is_64 = true;
842     c->g1 = c->g2 = false;
843     switch (old_cc_op) {
844     case CC_OP_LTGT0_32:
845         c->is_64 = false;
846         c->u.s32.a = tcg_temp_new_i32();
847         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
848         c->u.s32.b = tcg_constant_i32(0);
849         break;
850     case CC_OP_LTGT_32:
851     case CC_OP_LTUGTU_32:
852         c->is_64 = false;
853         c->u.s32.a = tcg_temp_new_i32();
854         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
855         c->u.s32.b = tcg_temp_new_i32();
856         tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
857         break;
858 
859     case CC_OP_LTGT0_64:
860     case CC_OP_NZ:
861     case CC_OP_FLOGR:
862         c->u.s64.a = cc_dst;
863         c->u.s64.b = tcg_constant_i64(0);
864         c->g1 = true;
865         break;
866     case CC_OP_LTGT_64:
867     case CC_OP_LTUGTU_64:
868         c->u.s64.a = cc_src;
869         c->u.s64.b = cc_dst;
870         c->g1 = c->g2 = true;
871         break;
872 
873     case CC_OP_TM_32:
874     case CC_OP_TM_64:
875     case CC_OP_ICM:
876         c->u.s64.a = tcg_temp_new_i64();
877         c->u.s64.b = tcg_constant_i64(0);
878         tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
879         break;
880 
881     case CC_OP_ADDU:
882     case CC_OP_SUBU:
883         c->is_64 = true;
884         c->u.s64.b = tcg_constant_i64(0);
885         c->g1 = true;
886         switch (mask) {
887         case 8 | 2:
888         case 4 | 1: /* result */
889             c->u.s64.a = cc_dst;
890             break;
891         case 8 | 4:
892         case 2 | 1: /* carry */
893             c->u.s64.a = cc_src;
894             break;
895         default:
896             g_assert_not_reached();
897         }
898         break;
899 
900     case CC_OP_STATIC:
901         c->is_64 = false;
902         c->u.s32.a = cc_op;
903         c->g1 = true;
904         switch (mask) {
905         case 0x8 | 0x4 | 0x2: /* cc != 3 */
906             cond = TCG_COND_NE;
907             c->u.s32.b = tcg_constant_i32(3);
908             break;
909         case 0x8 | 0x4 | 0x1: /* cc != 2 */
910             cond = TCG_COND_NE;
911             c->u.s32.b = tcg_constant_i32(2);
912             break;
913         case 0x8 | 0x2 | 0x1: /* cc != 1 */
914             cond = TCG_COND_NE;
915             c->u.s32.b = tcg_constant_i32(1);
916             break;
917         case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
918             cond = TCG_COND_EQ;
919             c->g1 = false;
920             c->u.s32.a = tcg_temp_new_i32();
921             c->u.s32.b = tcg_constant_i32(0);
922             tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
923             break;
924         case 0x8 | 0x4: /* cc < 2 */
925             cond = TCG_COND_LTU;
926             c->u.s32.b = tcg_constant_i32(2);
927             break;
928         case 0x8: /* cc == 0 */
929             cond = TCG_COND_EQ;
930             c->u.s32.b = tcg_constant_i32(0);
931             break;
932         case 0x4 | 0x2 | 0x1: /* cc != 0 */
933             cond = TCG_COND_NE;
934             c->u.s32.b = tcg_constant_i32(0);
935             break;
936         case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
937             cond = TCG_COND_NE;
938             c->g1 = false;
939             c->u.s32.a = tcg_temp_new_i32();
940             c->u.s32.b = tcg_constant_i32(0);
941             tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
942             break;
943         case 0x4: /* cc == 1 */
944             cond = TCG_COND_EQ;
945             c->u.s32.b = tcg_constant_i32(1);
946             break;
947         case 0x2 | 0x1: /* cc > 1 */
948             cond = TCG_COND_GTU;
949             c->u.s32.b = tcg_constant_i32(1);
950             break;
951         case 0x2: /* cc == 2 */
952             cond = TCG_COND_EQ;
953             c->u.s32.b = tcg_constant_i32(2);
954             break;
955         case 0x1: /* cc == 3 */
956             cond = TCG_COND_EQ;
957             c->u.s32.b = tcg_constant_i32(3);
958             break;
959         default:
960             /* CC is masked by something else: (8 >> cc) & mask.  */
961             cond = TCG_COND_NE;
962             c->g1 = false;
963             c->u.s32.a = tcg_temp_new_i32();
964             c->u.s32.b = tcg_constant_i32(0);
965             tcg_gen_shr_i32(c->u.s32.a, tcg_constant_i32(8), cc_op);
966             tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
967             break;
968         }
969         break;
970 
971     default:
972         abort();
973     }
974     c->cond = cond;
975 }
976 
977 static void free_compare(DisasCompare *c)
978 {
979     if (!c->g1) {
980         if (c->is_64) {
981             tcg_temp_free_i64(c->u.s64.a);
982         } else {
983             tcg_temp_free_i32(c->u.s32.a);
984         }
985     }
986     if (!c->g2) {
987         if (c->is_64) {
988             tcg_temp_free_i64(c->u.s64.b);
989         } else {
990             tcg_temp_free_i32(c->u.s32.b);
991         }
992     }
993 }
994 
995 /* ====================================================================== */
996 /* Define the insn format enumeration.  */
997 #define F0(N)                         FMT_##N,
998 #define F1(N, X1)                     F0(N)
999 #define F2(N, X1, X2)                 F0(N)
1000 #define F3(N, X1, X2, X3)             F0(N)
1001 #define F4(N, X1, X2, X3, X4)         F0(N)
1002 #define F5(N, X1, X2, X3, X4, X5)     F0(N)
1003 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
1004 
1005 typedef enum {
1006 #include "insn-format.h.inc"
1007 } DisasFormat;
1008 
1009 #undef F0
1010 #undef F1
1011 #undef F2
1012 #undef F3
1013 #undef F4
1014 #undef F5
1015 #undef F6
1016 
1017 /* This is the way fields are to be accessed out of DisasFields.  */
1018 #define have_field(S, F)  have_field1((S), FLD_O_##F)
1019 #define get_field(S, F)   get_field1((S), FLD_O_##F, FLD_C_##F)
1020 
1021 static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
1022 {
1023     return (s->fields.presentO >> c) & 1;
1024 }
1025 
1026 static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
1027                       enum DisasFieldIndexC c)
1028 {
1029     assert(have_field1(s, o));
1030     return s->fields.c[c];
1031 }
1032 
1033 /* Describe the layout of each field in each format.  */
1034 typedef struct DisasField {
1035     unsigned int beg:8;
1036     unsigned int size:8;
1037     unsigned int type:2;
1038     unsigned int indexC:6;
1039     enum DisasFieldIndexO indexO:8;
1040 } DisasField;
1041 
1042 typedef struct DisasFormatInfo {
1043     DisasField op[NUM_C_FIELD];
1044 } DisasFormatInfo;
1045 
1046 #define R(N, B)       {  B,  4, 0, FLD_C_r##N, FLD_O_r##N }
1047 #define M(N, B)       {  B,  4, 0, FLD_C_m##N, FLD_O_m##N }
1048 #define V(N, B)       {  B,  4, 3, FLD_C_v##N, FLD_O_v##N }
1049 #define BD(N, BB, BD) { BB,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1050                       { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1051 #define BXD(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1052                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1053                       { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1054 #define BDL(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1055                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1056 #define BXDL(N)       { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1057                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1058                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1059 #define I(N, B, S)    {  B,  S, 1, FLD_C_i##N, FLD_O_i##N }
1060 #define L(N, B, S)    {  B,  S, 0, FLD_C_l##N, FLD_O_l##N }
1061 
1062 #define F0(N)                     { { } },
1063 #define F1(N, X1)                 { { X1 } },
1064 #define F2(N, X1, X2)             { { X1, X2 } },
1065 #define F3(N, X1, X2, X3)         { { X1, X2, X3 } },
1066 #define F4(N, X1, X2, X3, X4)     { { X1, X2, X3, X4 } },
1067 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1068 #define F6(N, X1, X2, X3, X4, X5, X6)       { { X1, X2, X3, X4, X5, X6 } },
1069 
1070 static const DisasFormatInfo format_info[] = {
1071 #include "insn-format.h.inc"
1072 };
1073 
1074 #undef F0
1075 #undef F1
1076 #undef F2
1077 #undef F3
1078 #undef F4
1079 #undef F5
1080 #undef F6
1081 #undef R
1082 #undef M
1083 #undef V
1084 #undef BD
1085 #undef BXD
1086 #undef BDL
1087 #undef BXDL
1088 #undef I
1089 #undef L
1090 
1091 /* Generally, we'll extract operands into this structures, operate upon
1092    them, and store them back.  See the "in1", "in2", "prep", "wout" sets
1093    of routines below for more details.  */
1094 typedef struct {
1095     bool g_out, g_out2, g_in1, g_in2;
1096     TCGv_i64 out, out2, in1, in2;
1097     TCGv_i64 addr1;
1098     TCGv_i128 out_128, in1_128, in2_128;
1099 } DisasOps;
1100 
1101 /* Instructions can place constraints on their operands, raising specification
1102    exceptions if they are violated.  To make this easy to automate, each "in1",
1103    "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1104    of the following, or 0.  To make this easy to document, we'll put the
1105    SPEC_<name> defines next to <name>.  */
1106 
1107 #define SPEC_r1_even    1
1108 #define SPEC_r2_even    2
1109 #define SPEC_r3_even    4
1110 #define SPEC_r1_f128    8
1111 #define SPEC_r2_f128    16
1112 
1113 /* Return values from translate_one, indicating the state of the TB.  */
1114 
1115 /* We are not using a goto_tb (for whatever reason), but have updated
1116    the PC (for whatever reason), so there's no need to do it again on
1117    exiting the TB.  */
1118 #define DISAS_PC_UPDATED        DISAS_TARGET_0
1119 
1120 /* We have updated the PC and CC values.  */
1121 #define DISAS_PC_CC_UPDATED     DISAS_TARGET_2
1122 
1123 
1124 /* Instruction flags */
1125 #define IF_AFP1     0x0001      /* r1 is a fp reg for HFP/FPS instructions */
1126 #define IF_AFP2     0x0002      /* r2 is a fp reg for HFP/FPS instructions */
1127 #define IF_AFP3     0x0004      /* r3 is a fp reg for HFP/FPS instructions */
1128 #define IF_BFP      0x0008      /* binary floating point instruction */
1129 #define IF_DFP      0x0010      /* decimal floating point instruction */
1130 #define IF_PRIV     0x0020      /* privileged instruction */
1131 #define IF_VEC      0x0040      /* vector instruction */
1132 #define IF_IO       0x0080      /* input/output instruction */
1133 
1134 struct DisasInsn {
1135     unsigned opc:16;
1136     unsigned flags:16;
1137     DisasFormat fmt:8;
1138     unsigned fac:8;
1139     unsigned spec:8;
1140 
1141     const char *name;
1142 
1143     /* Pre-process arguments before HELP_OP.  */
1144     void (*help_in1)(DisasContext *, DisasOps *);
1145     void (*help_in2)(DisasContext *, DisasOps *);
1146     void (*help_prep)(DisasContext *, DisasOps *);
1147 
1148     /*
1149      * Post-process output after HELP_OP.
1150      * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1151      */
1152     void (*help_wout)(DisasContext *, DisasOps *);
1153     void (*help_cout)(DisasContext *, DisasOps *);
1154 
1155     /* Implement the operation itself.  */
1156     DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1157 
1158     uint64_t data;
1159 };
1160 
1161 /* ====================================================================== */
1162 /* Miscellaneous helpers, used by several operations.  */
1163 
1164 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1165 {
1166     if (dest == s->pc_tmp) {
1167         per_branch(s, true);
1168         return DISAS_NEXT;
1169     }
1170     if (use_goto_tb(s, dest)) {
1171         update_cc_op(s);
1172         per_breaking_event(s);
1173         tcg_gen_goto_tb(0);
1174         tcg_gen_movi_i64(psw_addr, dest);
1175         tcg_gen_exit_tb(s->base.tb, 0);
1176         return DISAS_NORETURN;
1177     } else {
1178         tcg_gen_movi_i64(psw_addr, dest);
1179         per_branch(s, false);
1180         return DISAS_PC_UPDATED;
1181     }
1182 }
1183 
1184 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1185                                  bool is_imm, int imm, TCGv_i64 cdest)
1186 {
1187     DisasJumpType ret;
1188     uint64_t dest = s->base.pc_next + (int64_t)imm * 2;
1189     TCGLabel *lab;
1190 
1191     /* Take care of the special cases first.  */
1192     if (c->cond == TCG_COND_NEVER) {
1193         ret = DISAS_NEXT;
1194         goto egress;
1195     }
1196     if (is_imm) {
1197         if (dest == s->pc_tmp) {
1198             /* Branch to next.  */
1199             per_branch(s, true);
1200             ret = DISAS_NEXT;
1201             goto egress;
1202         }
1203         if (c->cond == TCG_COND_ALWAYS) {
1204             ret = help_goto_direct(s, dest);
1205             goto egress;
1206         }
1207     } else {
1208         if (!cdest) {
1209             /* E.g. bcr %r0 -> no branch.  */
1210             ret = DISAS_NEXT;
1211             goto egress;
1212         }
1213         if (c->cond == TCG_COND_ALWAYS) {
1214             tcg_gen_mov_i64(psw_addr, cdest);
1215             per_branch(s, false);
1216             ret = DISAS_PC_UPDATED;
1217             goto egress;
1218         }
1219     }
1220 
1221     if (use_goto_tb(s, s->pc_tmp)) {
1222         if (is_imm && use_goto_tb(s, dest)) {
1223             /* Both exits can use goto_tb.  */
1224             update_cc_op(s);
1225 
1226             lab = gen_new_label();
1227             if (c->is_64) {
1228                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1229             } else {
1230                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1231             }
1232 
1233             /* Branch not taken.  */
1234             tcg_gen_goto_tb(0);
1235             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1236             tcg_gen_exit_tb(s->base.tb, 0);
1237 
1238             /* Branch taken.  */
1239             gen_set_label(lab);
1240             per_breaking_event(s);
1241             tcg_gen_goto_tb(1);
1242             tcg_gen_movi_i64(psw_addr, dest);
1243             tcg_gen_exit_tb(s->base.tb, 1);
1244 
1245             ret = DISAS_NORETURN;
1246         } else {
1247             /* Fallthru can use goto_tb, but taken branch cannot.  */
1248             /* Store taken branch destination before the brcond.  This
1249                avoids having to allocate a new local temp to hold it.
1250                We'll overwrite this in the not taken case anyway.  */
1251             if (!is_imm) {
1252                 tcg_gen_mov_i64(psw_addr, cdest);
1253             }
1254 
1255             lab = gen_new_label();
1256             if (c->is_64) {
1257                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1258             } else {
1259                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1260             }
1261 
1262             /* Branch not taken.  */
1263             update_cc_op(s);
1264             tcg_gen_goto_tb(0);
1265             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1266             tcg_gen_exit_tb(s->base.tb, 0);
1267 
1268             gen_set_label(lab);
1269             if (is_imm) {
1270                 tcg_gen_movi_i64(psw_addr, dest);
1271             }
1272             per_breaking_event(s);
1273             ret = DISAS_PC_UPDATED;
1274         }
1275     } else {
1276         /* Fallthru cannot use goto_tb.  This by itself is vanishingly rare.
1277            Most commonly we're single-stepping or some other condition that
1278            disables all use of goto_tb.  Just update the PC and exit.  */
1279 
1280         TCGv_i64 next = tcg_constant_i64(s->pc_tmp);
1281         if (is_imm) {
1282             cdest = tcg_constant_i64(dest);
1283         }
1284 
1285         if (c->is_64) {
1286             tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1287                                 cdest, next);
1288             per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1289         } else {
1290             TCGv_i32 t0 = tcg_temp_new_i32();
1291             TCGv_i64 t1 = tcg_temp_new_i64();
1292             TCGv_i64 z = tcg_constant_i64(0);
1293             tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1294             tcg_gen_extu_i32_i64(t1, t0);
1295             tcg_temp_free_i32(t0);
1296             tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1297             per_branch_cond(s, TCG_COND_NE, t1, z);
1298             tcg_temp_free_i64(t1);
1299         }
1300 
1301         ret = DISAS_PC_UPDATED;
1302     }
1303 
1304  egress:
1305     free_compare(c);
1306     return ret;
1307 }
1308 
1309 /* ====================================================================== */
1310 /* The operations.  These perform the bulk of the work for any insn,
1311    usually after the operands have been loaded and output initialized.  */
1312 
1313 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1314 {
1315     tcg_gen_abs_i64(o->out, o->in2);
1316     return DISAS_NEXT;
1317 }
1318 
1319 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1320 {
1321     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1322     return DISAS_NEXT;
1323 }
1324 
1325 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1326 {
1327     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1328     return DISAS_NEXT;
1329 }
1330 
1331 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1332 {
1333     tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1334     tcg_gen_mov_i64(o->out2, o->in2);
1335     return DISAS_NEXT;
1336 }
1337 
1338 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1339 {
1340     tcg_gen_add_i64(o->out, o->in1, o->in2);
1341     return DISAS_NEXT;
1342 }
1343 
1344 static DisasJumpType op_addu64(DisasContext *s, DisasOps *o)
1345 {
1346     tcg_gen_movi_i64(cc_src, 0);
1347     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1348     return DISAS_NEXT;
1349 }
1350 
1351 /* Compute carry into cc_src. */
1352 static void compute_carry(DisasContext *s)
1353 {
1354     switch (s->cc_op) {
1355     case CC_OP_ADDU:
1356         /* The carry value is already in cc_src (1,0). */
1357         break;
1358     case CC_OP_SUBU:
1359         tcg_gen_addi_i64(cc_src, cc_src, 1);
1360         break;
1361     default:
1362         gen_op_calc_cc(s);
1363         /* fall through */
1364     case CC_OP_STATIC:
1365         /* The carry flag is the msb of CC; compute into cc_src. */
1366         tcg_gen_extu_i32_i64(cc_src, cc_op);
1367         tcg_gen_shri_i64(cc_src, cc_src, 1);
1368         break;
1369     }
1370 }
1371 
1372 static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
1373 {
1374     compute_carry(s);
1375     tcg_gen_add_i64(o->out, o->in1, o->in2);
1376     tcg_gen_add_i64(o->out, o->out, cc_src);
1377     return DISAS_NEXT;
1378 }
1379 
1380 static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
1381 {
1382     compute_carry(s);
1383 
1384     TCGv_i64 zero = tcg_constant_i64(0);
1385     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
1386     tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
1387 
1388     return DISAS_NEXT;
1389 }
1390 
1391 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1392 {
1393     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1394 
1395     o->in1 = tcg_temp_new_i64();
1396     if (non_atomic) {
1397         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1398     } else {
1399         /* Perform the atomic addition in memory. */
1400         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1401                                      s->insn->data);
1402     }
1403 
1404     /* Recompute also for atomic case: needed for setting CC. */
1405     tcg_gen_add_i64(o->out, o->in1, o->in2);
1406 
1407     if (non_atomic) {
1408         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1409     }
1410     return DISAS_NEXT;
1411 }
1412 
1413 static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o)
1414 {
1415     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1416 
1417     o->in1 = tcg_temp_new_i64();
1418     if (non_atomic) {
1419         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1420     } else {
1421         /* Perform the atomic addition in memory. */
1422         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1423                                      s->insn->data);
1424     }
1425 
1426     /* Recompute also for atomic case: needed for setting CC. */
1427     tcg_gen_movi_i64(cc_src, 0);
1428     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1429 
1430     if (non_atomic) {
1431         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1432     }
1433     return DISAS_NEXT;
1434 }
1435 
1436 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1437 {
1438     gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1439     return DISAS_NEXT;
1440 }
1441 
1442 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1443 {
1444     gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1445     return DISAS_NEXT;
1446 }
1447 
1448 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1449 {
1450     gen_helper_axb(o->out_128, cpu_env, o->in1_128, o->in2_128);
1451     return DISAS_NEXT;
1452 }
1453 
1454 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1455 {
1456     tcg_gen_and_i64(o->out, o->in1, o->in2);
1457     return DISAS_NEXT;
1458 }
1459 
1460 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1461 {
1462     int shift = s->insn->data & 0xff;
1463     int size = s->insn->data >> 8;
1464     uint64_t mask = ((1ull << size) - 1) << shift;
1465 
1466     assert(!o->g_in2);
1467     tcg_gen_shli_i64(o->in2, o->in2, shift);
1468     tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1469     tcg_gen_and_i64(o->out, o->in1, o->in2);
1470 
1471     /* Produce the CC from only the bits manipulated.  */
1472     tcg_gen_andi_i64(cc_dst, o->out, mask);
1473     set_cc_nz_u64(s, cc_dst);
1474     return DISAS_NEXT;
1475 }
1476 
1477 static DisasJumpType op_andc(DisasContext *s, DisasOps *o)
1478 {
1479     tcg_gen_andc_i64(o->out, o->in1, o->in2);
1480     return DISAS_NEXT;
1481 }
1482 
1483 static DisasJumpType op_orc(DisasContext *s, DisasOps *o)
1484 {
1485     tcg_gen_orc_i64(o->out, o->in1, o->in2);
1486     return DISAS_NEXT;
1487 }
1488 
1489 static DisasJumpType op_nand(DisasContext *s, DisasOps *o)
1490 {
1491     tcg_gen_nand_i64(o->out, o->in1, o->in2);
1492     return DISAS_NEXT;
1493 }
1494 
1495 static DisasJumpType op_nor(DisasContext *s, DisasOps *o)
1496 {
1497     tcg_gen_nor_i64(o->out, o->in1, o->in2);
1498     return DISAS_NEXT;
1499 }
1500 
1501 static DisasJumpType op_nxor(DisasContext *s, DisasOps *o)
1502 {
1503     tcg_gen_eqv_i64(o->out, o->in1, o->in2);
1504     return DISAS_NEXT;
1505 }
1506 
1507 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1508 {
1509     o->in1 = tcg_temp_new_i64();
1510 
1511     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1512         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1513     } else {
1514         /* Perform the atomic operation in memory. */
1515         tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1516                                      s->insn->data);
1517     }
1518 
1519     /* Recompute also for atomic case: needed for setting CC. */
1520     tcg_gen_and_i64(o->out, o->in1, o->in2);
1521 
1522     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1523         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1524     }
1525     return DISAS_NEXT;
1526 }
1527 
1528 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1529 {
1530     pc_to_link_info(o->out, s, s->pc_tmp);
1531     if (o->in2) {
1532         tcg_gen_mov_i64(psw_addr, o->in2);
1533         per_branch(s, false);
1534         return DISAS_PC_UPDATED;
1535     } else {
1536         return DISAS_NEXT;
1537     }
1538 }
1539 
1540 static void save_link_info(DisasContext *s, DisasOps *o)
1541 {
1542     TCGv_i64 t;
1543 
1544     if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1545         pc_to_link_info(o->out, s, s->pc_tmp);
1546         return;
1547     }
1548     gen_op_calc_cc(s);
1549     tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1550     tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1551     t = tcg_temp_new_i64();
1552     tcg_gen_shri_i64(t, psw_mask, 16);
1553     tcg_gen_andi_i64(t, t, 0x0f000000);
1554     tcg_gen_or_i64(o->out, o->out, t);
1555     tcg_gen_extu_i32_i64(t, cc_op);
1556     tcg_gen_shli_i64(t, t, 28);
1557     tcg_gen_or_i64(o->out, o->out, t);
1558     tcg_temp_free_i64(t);
1559 }
1560 
1561 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1562 {
1563     save_link_info(s, o);
1564     if (o->in2) {
1565         tcg_gen_mov_i64(psw_addr, o->in2);
1566         per_branch(s, false);
1567         return DISAS_PC_UPDATED;
1568     } else {
1569         return DISAS_NEXT;
1570     }
1571 }
1572 
1573 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1574 {
1575     pc_to_link_info(o->out, s, s->pc_tmp);
1576     return help_goto_direct(s, s->base.pc_next + (int64_t)get_field(s, i2) * 2);
1577 }
1578 
1579 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1580 {
1581     int m1 = get_field(s, m1);
1582     bool is_imm = have_field(s, i2);
1583     int imm = is_imm ? get_field(s, i2) : 0;
1584     DisasCompare c;
1585 
1586     /* BCR with R2 = 0 causes no branching */
1587     if (have_field(s, r2) && get_field(s, r2) == 0) {
1588         if (m1 == 14) {
1589             /* Perform serialization */
1590             /* FIXME: check for fast-BCR-serialization facility */
1591             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1592         }
1593         if (m1 == 15) {
1594             /* Perform serialization */
1595             /* FIXME: perform checkpoint-synchronisation */
1596             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1597         }
1598         return DISAS_NEXT;
1599     }
1600 
1601     disas_jcc(s, &c, m1);
1602     return help_branch(s, &c, is_imm, imm, o->in2);
1603 }
1604 
1605 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1606 {
1607     int r1 = get_field(s, r1);
1608     bool is_imm = have_field(s, i2);
1609     int imm = is_imm ? get_field(s, i2) : 0;
1610     DisasCompare c;
1611     TCGv_i64 t;
1612 
1613     c.cond = TCG_COND_NE;
1614     c.is_64 = false;
1615     c.g1 = false;
1616     c.g2 = false;
1617 
1618     t = tcg_temp_new_i64();
1619     tcg_gen_subi_i64(t, regs[r1], 1);
1620     store_reg32_i64(r1, t);
1621     c.u.s32.a = tcg_temp_new_i32();
1622     c.u.s32.b = tcg_constant_i32(0);
1623     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1624     tcg_temp_free_i64(t);
1625 
1626     return help_branch(s, &c, is_imm, imm, o->in2);
1627 }
1628 
1629 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1630 {
1631     int r1 = get_field(s, r1);
1632     int imm = get_field(s, i2);
1633     DisasCompare c;
1634     TCGv_i64 t;
1635 
1636     c.cond = TCG_COND_NE;
1637     c.is_64 = false;
1638     c.g1 = false;
1639     c.g2 = false;
1640 
1641     t = tcg_temp_new_i64();
1642     tcg_gen_shri_i64(t, regs[r1], 32);
1643     tcg_gen_subi_i64(t, t, 1);
1644     store_reg32h_i64(r1, t);
1645     c.u.s32.a = tcg_temp_new_i32();
1646     c.u.s32.b = tcg_constant_i32(0);
1647     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1648     tcg_temp_free_i64(t);
1649 
1650     return help_branch(s, &c, 1, imm, o->in2);
1651 }
1652 
1653 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1654 {
1655     int r1 = get_field(s, r1);
1656     bool is_imm = have_field(s, i2);
1657     int imm = is_imm ? get_field(s, i2) : 0;
1658     DisasCompare c;
1659 
1660     c.cond = TCG_COND_NE;
1661     c.is_64 = true;
1662     c.g1 = true;
1663     c.g2 = false;
1664 
1665     tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1666     c.u.s64.a = regs[r1];
1667     c.u.s64.b = tcg_constant_i64(0);
1668 
1669     return help_branch(s, &c, is_imm, imm, o->in2);
1670 }
1671 
1672 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1673 {
1674     int r1 = get_field(s, r1);
1675     int r3 = get_field(s, r3);
1676     bool is_imm = have_field(s, i2);
1677     int imm = is_imm ? get_field(s, i2) : 0;
1678     DisasCompare c;
1679     TCGv_i64 t;
1680 
1681     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1682     c.is_64 = false;
1683     c.g1 = false;
1684     c.g2 = false;
1685 
1686     t = tcg_temp_new_i64();
1687     tcg_gen_add_i64(t, regs[r1], regs[r3]);
1688     c.u.s32.a = tcg_temp_new_i32();
1689     c.u.s32.b = tcg_temp_new_i32();
1690     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1691     tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1692     store_reg32_i64(r1, t);
1693     tcg_temp_free_i64(t);
1694 
1695     return help_branch(s, &c, is_imm, imm, o->in2);
1696 }
1697 
1698 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1699 {
1700     int r1 = get_field(s, r1);
1701     int r3 = get_field(s, r3);
1702     bool is_imm = have_field(s, i2);
1703     int imm = is_imm ? get_field(s, i2) : 0;
1704     DisasCompare c;
1705 
1706     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1707     c.is_64 = true;
1708 
1709     if (r1 == (r3 | 1)) {
1710         c.u.s64.b = load_reg(r3 | 1);
1711         c.g2 = false;
1712     } else {
1713         c.u.s64.b = regs[r3 | 1];
1714         c.g2 = true;
1715     }
1716 
1717     tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1718     c.u.s64.a = regs[r1];
1719     c.g1 = true;
1720 
1721     return help_branch(s, &c, is_imm, imm, o->in2);
1722 }
1723 
1724 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1725 {
1726     int imm, m3 = get_field(s, m3);
1727     bool is_imm;
1728     DisasCompare c;
1729 
1730     c.cond = ltgt_cond[m3];
1731     if (s->insn->data) {
1732         c.cond = tcg_unsigned_cond(c.cond);
1733     }
1734     c.is_64 = c.g1 = c.g2 = true;
1735     c.u.s64.a = o->in1;
1736     c.u.s64.b = o->in2;
1737 
1738     is_imm = have_field(s, i4);
1739     if (is_imm) {
1740         imm = get_field(s, i4);
1741     } else {
1742         imm = 0;
1743         o->out = get_address(s, 0, get_field(s, b4),
1744                              get_field(s, d4));
1745     }
1746 
1747     return help_branch(s, &c, is_imm, imm, o->out);
1748 }
1749 
1750 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1751 {
1752     gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1753     set_cc_static(s);
1754     return DISAS_NEXT;
1755 }
1756 
1757 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1758 {
1759     gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1760     set_cc_static(s);
1761     return DISAS_NEXT;
1762 }
1763 
1764 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1765 {
1766     gen_helper_cxb(cc_op, cpu_env, o->in1_128, o->in2_128);
1767     set_cc_static(s);
1768     return DISAS_NEXT;
1769 }
1770 
1771 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1772                                    bool m4_with_fpe)
1773 {
1774     const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1775     uint8_t m3 = get_field(s, m3);
1776     uint8_t m4 = get_field(s, m4);
1777 
1778     /* m3 field was introduced with FPE */
1779     if (!fpe && m3_with_fpe) {
1780         m3 = 0;
1781     }
1782     /* m4 field was introduced with FPE */
1783     if (!fpe && m4_with_fpe) {
1784         m4 = 0;
1785     }
1786 
1787     /* Check for valid rounding modes. Mode 3 was introduced later. */
1788     if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1789         gen_program_exception(s, PGM_SPECIFICATION);
1790         return NULL;
1791     }
1792 
1793     return tcg_constant_i32(deposit32(m3, 4, 4, m4));
1794 }
1795 
1796 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1797 {
1798     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1799 
1800     if (!m34) {
1801         return DISAS_NORETURN;
1802     }
1803     gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
1804     set_cc_static(s);
1805     return DISAS_NEXT;
1806 }
1807 
1808 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1809 {
1810     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1811 
1812     if (!m34) {
1813         return DISAS_NORETURN;
1814     }
1815     gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
1816     set_cc_static(s);
1817     return DISAS_NEXT;
1818 }
1819 
1820 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1821 {
1822     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1823 
1824     if (!m34) {
1825         return DISAS_NORETURN;
1826     }
1827     gen_helper_cfxb(o->out, cpu_env, o->in2_128, m34);
1828     set_cc_static(s);
1829     return DISAS_NEXT;
1830 }
1831 
1832 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1833 {
1834     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1835 
1836     if (!m34) {
1837         return DISAS_NORETURN;
1838     }
1839     gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
1840     set_cc_static(s);
1841     return DISAS_NEXT;
1842 }
1843 
1844 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1845 {
1846     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1847 
1848     if (!m34) {
1849         return DISAS_NORETURN;
1850     }
1851     gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
1852     set_cc_static(s);
1853     return DISAS_NEXT;
1854 }
1855 
1856 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1857 {
1858     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1859 
1860     if (!m34) {
1861         return DISAS_NORETURN;
1862     }
1863     gen_helper_cgxb(o->out, cpu_env, o->in2_128, m34);
1864     set_cc_static(s);
1865     return DISAS_NEXT;
1866 }
1867 
1868 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1869 {
1870     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1871 
1872     if (!m34) {
1873         return DISAS_NORETURN;
1874     }
1875     gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
1876     set_cc_static(s);
1877     return DISAS_NEXT;
1878 }
1879 
1880 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1881 {
1882     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1883 
1884     if (!m34) {
1885         return DISAS_NORETURN;
1886     }
1887     gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
1888     set_cc_static(s);
1889     return DISAS_NEXT;
1890 }
1891 
1892 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1893 {
1894     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1895 
1896     if (!m34) {
1897         return DISAS_NORETURN;
1898     }
1899     gen_helper_clfxb(o->out, cpu_env, o->in2_128, m34);
1900     set_cc_static(s);
1901     return DISAS_NEXT;
1902 }
1903 
1904 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1905 {
1906     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1907 
1908     if (!m34) {
1909         return DISAS_NORETURN;
1910     }
1911     gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
1912     set_cc_static(s);
1913     return DISAS_NEXT;
1914 }
1915 
1916 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1917 {
1918     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1919 
1920     if (!m34) {
1921         return DISAS_NORETURN;
1922     }
1923     gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
1924     set_cc_static(s);
1925     return DISAS_NEXT;
1926 }
1927 
1928 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1929 {
1930     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1931 
1932     if (!m34) {
1933         return DISAS_NORETURN;
1934     }
1935     gen_helper_clgxb(o->out, cpu_env, o->in2_128, m34);
1936     set_cc_static(s);
1937     return DISAS_NEXT;
1938 }
1939 
1940 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1941 {
1942     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1943 
1944     if (!m34) {
1945         return DISAS_NORETURN;
1946     }
1947     gen_helper_cegb(o->out, cpu_env, o->in2, m34);
1948     return DISAS_NEXT;
1949 }
1950 
1951 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1952 {
1953     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1954 
1955     if (!m34) {
1956         return DISAS_NORETURN;
1957     }
1958     gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
1959     return DISAS_NEXT;
1960 }
1961 
1962 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
1963 {
1964     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1965 
1966     if (!m34) {
1967         return DISAS_NORETURN;
1968     }
1969     gen_helper_cxgb(o->out_128, cpu_env, o->in2, m34);
1970     return DISAS_NEXT;
1971 }
1972 
1973 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
1974 {
1975     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1976 
1977     if (!m34) {
1978         return DISAS_NORETURN;
1979     }
1980     gen_helper_celgb(o->out, cpu_env, o->in2, m34);
1981     return DISAS_NEXT;
1982 }
1983 
1984 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
1985 {
1986     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1987 
1988     if (!m34) {
1989         return DISAS_NORETURN;
1990     }
1991     gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
1992     return DISAS_NEXT;
1993 }
1994 
1995 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
1996 {
1997     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1998 
1999     if (!m34) {
2000         return DISAS_NORETURN;
2001     }
2002     gen_helper_cxlgb(o->out_128, cpu_env, o->in2, m34);
2003     return DISAS_NEXT;
2004 }
2005 
2006 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
2007 {
2008     int r2 = get_field(s, r2);
2009     TCGv_i128 pair = tcg_temp_new_i128();
2010     TCGv_i64 len = tcg_temp_new_i64();
2011 
2012     gen_helper_cksm(pair, cpu_env, o->in1, o->in2, regs[r2 + 1]);
2013     set_cc_static(s);
2014     tcg_gen_extr_i128_i64(o->out, len, pair);
2015     tcg_temp_free_i128(pair);
2016 
2017     tcg_gen_add_i64(regs[r2], regs[r2], len);
2018     tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
2019     tcg_temp_free_i64(len);
2020 
2021     return DISAS_NEXT;
2022 }
2023 
2024 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
2025 {
2026     int l = get_field(s, l1);
2027     TCGv_i32 vl;
2028 
2029     switch (l + 1) {
2030     case 1:
2031         tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
2032         tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
2033         break;
2034     case 2:
2035         tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
2036         tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
2037         break;
2038     case 4:
2039         tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
2040         tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
2041         break;
2042     case 8:
2043         tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
2044         tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
2045         break;
2046     default:
2047         vl = tcg_constant_i32(l);
2048         gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2049         set_cc_static(s);
2050         return DISAS_NEXT;
2051     }
2052     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2053     return DISAS_NEXT;
2054 }
2055 
2056 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2057 {
2058     int r1 = get_field(s, r1);
2059     int r2 = get_field(s, r2);
2060     TCGv_i32 t1, t2;
2061 
2062     /* r1 and r2 must be even.  */
2063     if (r1 & 1 || r2 & 1) {
2064         gen_program_exception(s, PGM_SPECIFICATION);
2065         return DISAS_NORETURN;
2066     }
2067 
2068     t1 = tcg_constant_i32(r1);
2069     t2 = tcg_constant_i32(r2);
2070     gen_helper_clcl(cc_op, cpu_env, t1, t2);
2071     set_cc_static(s);
2072     return DISAS_NEXT;
2073 }
2074 
2075 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2076 {
2077     int r1 = get_field(s, r1);
2078     int r3 = get_field(s, r3);
2079     TCGv_i32 t1, t3;
2080 
2081     /* r1 and r3 must be even.  */
2082     if (r1 & 1 || r3 & 1) {
2083         gen_program_exception(s, PGM_SPECIFICATION);
2084         return DISAS_NORETURN;
2085     }
2086 
2087     t1 = tcg_constant_i32(r1);
2088     t3 = tcg_constant_i32(r3);
2089     gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
2090     set_cc_static(s);
2091     return DISAS_NEXT;
2092 }
2093 
2094 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2095 {
2096     int r1 = get_field(s, r1);
2097     int r3 = get_field(s, r3);
2098     TCGv_i32 t1, t3;
2099 
2100     /* r1 and r3 must be even.  */
2101     if (r1 & 1 || r3 & 1) {
2102         gen_program_exception(s, PGM_SPECIFICATION);
2103         return DISAS_NORETURN;
2104     }
2105 
2106     t1 = tcg_constant_i32(r1);
2107     t3 = tcg_constant_i32(r3);
2108     gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
2109     set_cc_static(s);
2110     return DISAS_NEXT;
2111 }
2112 
2113 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2114 {
2115     TCGv_i32 m3 = tcg_constant_i32(get_field(s, m3));
2116     TCGv_i32 t1 = tcg_temp_new_i32();
2117 
2118     tcg_gen_extrl_i64_i32(t1, o->in1);
2119     gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2120     set_cc_static(s);
2121     tcg_temp_free_i32(t1);
2122     return DISAS_NEXT;
2123 }
2124 
2125 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2126 {
2127     TCGv_i128 pair = tcg_temp_new_i128();
2128 
2129     gen_helper_clst(pair, cpu_env, regs[0], o->in1, o->in2);
2130     tcg_gen_extr_i128_i64(o->in2, o->in1, pair);
2131     tcg_temp_free_i128(pair);
2132 
2133     set_cc_static(s);
2134     return DISAS_NEXT;
2135 }
2136 
2137 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2138 {
2139     TCGv_i64 t = tcg_temp_new_i64();
2140     tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2141     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2142     tcg_gen_or_i64(o->out, o->out, t);
2143     tcg_temp_free_i64(t);
2144     return DISAS_NEXT;
2145 }
2146 
2147 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2148 {
2149     int d2 = get_field(s, d2);
2150     int b2 = get_field(s, b2);
2151     TCGv_i64 addr, cc;
2152 
2153     /* Note that in1 = R3 (new value) and
2154        in2 = (zero-extended) R1 (expected value).  */
2155 
2156     addr = get_address(s, 0, b2, d2);
2157     tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2158                                get_mem_index(s), s->insn->data | MO_ALIGN);
2159     tcg_temp_free_i64(addr);
2160 
2161     /* Are the memory and expected values (un)equal?  Note that this setcond
2162        produces the output CC value, thus the NE sense of the test.  */
2163     cc = tcg_temp_new_i64();
2164     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2165     tcg_gen_extrl_i64_i32(cc_op, cc);
2166     tcg_temp_free_i64(cc);
2167     set_cc_static(s);
2168 
2169     return DISAS_NEXT;
2170 }
2171 
2172 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2173 {
2174     int r1 = get_field(s, r1);
2175 
2176     o->out_128 = tcg_temp_new_i128();
2177     tcg_gen_concat_i64_i128(o->out_128, regs[r1 + 1], regs[r1]);
2178 
2179     /* Note out (R1:R1+1) = expected value and in2 (R3:R3+1) = new value.  */
2180     tcg_gen_atomic_cmpxchg_i128(o->out_128, o->addr1, o->out_128, o->in2_128,
2181                                 get_mem_index(s), MO_BE | MO_128 | MO_ALIGN);
2182 
2183     /*
2184      * Extract result into cc_dst:cc_src, compare vs the expected value
2185      * in the as yet unmodified input registers, then update CC_OP.
2186      */
2187     tcg_gen_extr_i128_i64(cc_src, cc_dst, o->out_128);
2188     tcg_gen_xor_i64(cc_dst, cc_dst, regs[r1]);
2189     tcg_gen_xor_i64(cc_src, cc_src, regs[r1 + 1]);
2190     tcg_gen_or_i64(cc_dst, cc_dst, cc_src);
2191     set_cc_nz_u64(s, cc_dst);
2192 
2193     return DISAS_NEXT;
2194 }
2195 
2196 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2197 {
2198     int r3 = get_field(s, r3);
2199     TCGv_i32 t_r3 = tcg_constant_i32(r3);
2200 
2201     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2202         gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2203     } else {
2204         gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2205     }
2206 
2207     set_cc_static(s);
2208     return DISAS_NEXT;
2209 }
2210 
2211 #ifndef CONFIG_USER_ONLY
2212 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2213 {
2214     MemOp mop = s->insn->data;
2215     TCGv_i64 addr, old, cc;
2216     TCGLabel *lab = gen_new_label();
2217 
2218     /* Note that in1 = R1 (zero-extended expected value),
2219        out = R1 (original reg), out2 = R1+1 (new value).  */
2220 
2221     addr = tcg_temp_new_i64();
2222     old = tcg_temp_new_i64();
2223     tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2224     tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2225                                get_mem_index(s), mop | MO_ALIGN);
2226     tcg_temp_free_i64(addr);
2227 
2228     /* Are the memory and expected values (un)equal?  */
2229     cc = tcg_temp_new_i64();
2230     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2231     tcg_gen_extrl_i64_i32(cc_op, cc);
2232 
2233     /* Write back the output now, so that it happens before the
2234        following branch, so that we don't need local temps.  */
2235     if ((mop & MO_SIZE) == MO_32) {
2236         tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2237     } else {
2238         tcg_gen_mov_i64(o->out, old);
2239     }
2240     tcg_temp_free_i64(old);
2241 
2242     /* If the comparison was equal, and the LSB of R2 was set,
2243        then we need to flush the TLB (for all cpus).  */
2244     tcg_gen_xori_i64(cc, cc, 1);
2245     tcg_gen_and_i64(cc, cc, o->in2);
2246     tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2247     tcg_temp_free_i64(cc);
2248 
2249     gen_helper_purge(cpu_env);
2250     gen_set_label(lab);
2251 
2252     return DISAS_NEXT;
2253 }
2254 #endif
2255 
2256 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2257 {
2258     TCGv_i64 t1 = tcg_temp_new_i64();
2259     TCGv_i32 t2 = tcg_temp_new_i32();
2260     tcg_gen_extrl_i64_i32(t2, o->in1);
2261     gen_helper_cvd(t1, t2);
2262     tcg_temp_free_i32(t2);
2263     tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2264     tcg_temp_free_i64(t1);
2265     return DISAS_NEXT;
2266 }
2267 
2268 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2269 {
2270     int m3 = get_field(s, m3);
2271     TCGLabel *lab = gen_new_label();
2272     TCGCond c;
2273 
2274     c = tcg_invert_cond(ltgt_cond[m3]);
2275     if (s->insn->data) {
2276         c = tcg_unsigned_cond(c);
2277     }
2278     tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2279 
2280     /* Trap.  */
2281     gen_trap(s);
2282 
2283     gen_set_label(lab);
2284     return DISAS_NEXT;
2285 }
2286 
2287 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2288 {
2289     int m3 = get_field(s, m3);
2290     int r1 = get_field(s, r1);
2291     int r2 = get_field(s, r2);
2292     TCGv_i32 tr1, tr2, chk;
2293 
2294     /* R1 and R2 must both be even.  */
2295     if ((r1 | r2) & 1) {
2296         gen_program_exception(s, PGM_SPECIFICATION);
2297         return DISAS_NORETURN;
2298     }
2299     if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2300         m3 = 0;
2301     }
2302 
2303     tr1 = tcg_constant_i32(r1);
2304     tr2 = tcg_constant_i32(r2);
2305     chk = tcg_constant_i32(m3);
2306 
2307     switch (s->insn->data) {
2308     case 12:
2309         gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2310         break;
2311     case 14:
2312         gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2313         break;
2314     case 21:
2315         gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2316         break;
2317     case 24:
2318         gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2319         break;
2320     case 41:
2321         gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2322         break;
2323     case 42:
2324         gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2325         break;
2326     default:
2327         g_assert_not_reached();
2328     }
2329 
2330     set_cc_static(s);
2331     return DISAS_NEXT;
2332 }
2333 
2334 #ifndef CONFIG_USER_ONLY
2335 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2336 {
2337     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2338     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2339     TCGv_i32 func_code = tcg_constant_i32(get_field(s, i2));
2340 
2341     gen_helper_diag(cpu_env, r1, r3, func_code);
2342     return DISAS_NEXT;
2343 }
2344 #endif
2345 
2346 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2347 {
2348     gen_helper_divs32(o->out, cpu_env, o->in1, o->in2);
2349     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2350     return DISAS_NEXT;
2351 }
2352 
2353 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2354 {
2355     gen_helper_divu32(o->out, cpu_env, o->in1, o->in2);
2356     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2357     return DISAS_NEXT;
2358 }
2359 
2360 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2361 {
2362     TCGv_i128 t = tcg_temp_new_i128();
2363 
2364     gen_helper_divs64(t, cpu_env, o->in1, o->in2);
2365     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2366     tcg_temp_free_i128(t);
2367     return DISAS_NEXT;
2368 }
2369 
2370 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2371 {
2372     TCGv_i128 t = tcg_temp_new_i128();
2373 
2374     gen_helper_divu64(t, cpu_env, o->out, o->out2, o->in2);
2375     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2376     tcg_temp_free_i128(t);
2377     return DISAS_NEXT;
2378 }
2379 
2380 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2381 {
2382     gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2383     return DISAS_NEXT;
2384 }
2385 
2386 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2387 {
2388     gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2389     return DISAS_NEXT;
2390 }
2391 
2392 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2393 {
2394     gen_helper_dxb(o->out_128, cpu_env, o->in1_128, o->in2_128);
2395     return DISAS_NEXT;
2396 }
2397 
2398 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2399 {
2400     int r2 = get_field(s, r2);
2401     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2402     return DISAS_NEXT;
2403 }
2404 
2405 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2406 {
2407     /* No cache information provided.  */
2408     tcg_gen_movi_i64(o->out, -1);
2409     return DISAS_NEXT;
2410 }
2411 
2412 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2413 {
2414     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2415     return DISAS_NEXT;
2416 }
2417 
2418 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2419 {
2420     int r1 = get_field(s, r1);
2421     int r2 = get_field(s, r2);
2422     TCGv_i64 t = tcg_temp_new_i64();
2423 
2424     /* Note the "subsequently" in the PoO, which implies a defined result
2425        if r1 == r2.  Thus we cannot defer these writes to an output hook.  */
2426     tcg_gen_shri_i64(t, psw_mask, 32);
2427     store_reg32_i64(r1, t);
2428     if (r2 != 0) {
2429         store_reg32_i64(r2, psw_mask);
2430     }
2431 
2432     tcg_temp_free_i64(t);
2433     return DISAS_NEXT;
2434 }
2435 
2436 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2437 {
2438     int r1 = get_field(s, r1);
2439     TCGv_i32 ilen;
2440     TCGv_i64 v1;
2441 
2442     /* Nested EXECUTE is not allowed.  */
2443     if (unlikely(s->ex_value)) {
2444         gen_program_exception(s, PGM_EXECUTE);
2445         return DISAS_NORETURN;
2446     }
2447 
2448     update_psw_addr(s);
2449     update_cc_op(s);
2450 
2451     if (r1 == 0) {
2452         v1 = tcg_constant_i64(0);
2453     } else {
2454         v1 = regs[r1];
2455     }
2456 
2457     ilen = tcg_constant_i32(s->ilen);
2458     gen_helper_ex(cpu_env, ilen, v1, o->in2);
2459 
2460     return DISAS_PC_CC_UPDATED;
2461 }
2462 
2463 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2464 {
2465     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2466 
2467     if (!m34) {
2468         return DISAS_NORETURN;
2469     }
2470     gen_helper_fieb(o->out, cpu_env, o->in2, m34);
2471     return DISAS_NEXT;
2472 }
2473 
2474 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2475 {
2476     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2477 
2478     if (!m34) {
2479         return DISAS_NORETURN;
2480     }
2481     gen_helper_fidb(o->out, cpu_env, o->in2, m34);
2482     return DISAS_NEXT;
2483 }
2484 
2485 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2486 {
2487     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2488 
2489     if (!m34) {
2490         return DISAS_NORETURN;
2491     }
2492     gen_helper_fixb(o->out_128, cpu_env, o->in2_128, m34);
2493     return DISAS_NEXT;
2494 }
2495 
2496 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2497 {
2498     /* We'll use the original input for cc computation, since we get to
2499        compare that against 0, which ought to be better than comparing
2500        the real output against 64.  It also lets cc_dst be a convenient
2501        temporary during our computation.  */
2502     gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2503 
2504     /* R1 = IN ? CLZ(IN) : 64.  */
2505     tcg_gen_clzi_i64(o->out, o->in2, 64);
2506 
2507     /* R1+1 = IN & ~(found bit).  Note that we may attempt to shift this
2508        value by 64, which is undefined.  But since the shift is 64 iff the
2509        input is zero, we still get the correct result after and'ing.  */
2510     tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2511     tcg_gen_shr_i64(o->out2, o->out2, o->out);
2512     tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2513     return DISAS_NEXT;
2514 }
2515 
2516 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2517 {
2518     int m3 = get_field(s, m3);
2519     int pos, len, base = s->insn->data;
2520     TCGv_i64 tmp = tcg_temp_new_i64();
2521     uint64_t ccm;
2522 
2523     switch (m3) {
2524     case 0xf:
2525         /* Effectively a 32-bit load.  */
2526         tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2527         len = 32;
2528         goto one_insert;
2529 
2530     case 0xc:
2531     case 0x6:
2532     case 0x3:
2533         /* Effectively a 16-bit load.  */
2534         tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2535         len = 16;
2536         goto one_insert;
2537 
2538     case 0x8:
2539     case 0x4:
2540     case 0x2:
2541     case 0x1:
2542         /* Effectively an 8-bit load.  */
2543         tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2544         len = 8;
2545         goto one_insert;
2546 
2547     one_insert:
2548         pos = base + ctz32(m3) * 8;
2549         tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2550         ccm = ((1ull << len) - 1) << pos;
2551         break;
2552 
2553     default:
2554         /* This is going to be a sequence of loads and inserts.  */
2555         pos = base + 32 - 8;
2556         ccm = 0;
2557         while (m3) {
2558             if (m3 & 0x8) {
2559                 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2560                 tcg_gen_addi_i64(o->in2, o->in2, 1);
2561                 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2562                 ccm |= 0xffull << pos;
2563             }
2564             m3 = (m3 << 1) & 0xf;
2565             pos -= 8;
2566         }
2567         break;
2568     }
2569 
2570     tcg_gen_movi_i64(tmp, ccm);
2571     gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2572     tcg_temp_free_i64(tmp);
2573     return DISAS_NEXT;
2574 }
2575 
2576 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2577 {
2578     int shift = s->insn->data & 0xff;
2579     int size = s->insn->data >> 8;
2580     tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2581     return DISAS_NEXT;
2582 }
2583 
2584 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2585 {
2586     TCGv_i64 t1, t2;
2587 
2588     gen_op_calc_cc(s);
2589     t1 = tcg_temp_new_i64();
2590     tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2591     t2 = tcg_temp_new_i64();
2592     tcg_gen_extu_i32_i64(t2, cc_op);
2593     tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2594     tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2595     tcg_temp_free_i64(t1);
2596     tcg_temp_free_i64(t2);
2597     return DISAS_NEXT;
2598 }
2599 
2600 #ifndef CONFIG_USER_ONLY
2601 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2602 {
2603     TCGv_i32 m4;
2604 
2605     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2606         m4 = tcg_constant_i32(get_field(s, m4));
2607     } else {
2608         m4 = tcg_constant_i32(0);
2609     }
2610     gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2611     return DISAS_NEXT;
2612 }
2613 
2614 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2615 {
2616     TCGv_i32 m4;
2617 
2618     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2619         m4 = tcg_constant_i32(get_field(s, m4));
2620     } else {
2621         m4 = tcg_constant_i32(0);
2622     }
2623     gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2624     return DISAS_NEXT;
2625 }
2626 
2627 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2628 {
2629     gen_helper_iske(o->out, cpu_env, o->in2);
2630     return DISAS_NEXT;
2631 }
2632 #endif
2633 
2634 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2635 {
2636     int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
2637     int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
2638     int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
2639     TCGv_i32 t_r1, t_r2, t_r3, type;
2640 
2641     switch (s->insn->data) {
2642     case S390_FEAT_TYPE_KMA:
2643         if (r3 == r1 || r3 == r2) {
2644             gen_program_exception(s, PGM_SPECIFICATION);
2645             return DISAS_NORETURN;
2646         }
2647         /* FALL THROUGH */
2648     case S390_FEAT_TYPE_KMCTR:
2649         if (r3 & 1 || !r3) {
2650             gen_program_exception(s, PGM_SPECIFICATION);
2651             return DISAS_NORETURN;
2652         }
2653         /* FALL THROUGH */
2654     case S390_FEAT_TYPE_PPNO:
2655     case S390_FEAT_TYPE_KMF:
2656     case S390_FEAT_TYPE_KMC:
2657     case S390_FEAT_TYPE_KMO:
2658     case S390_FEAT_TYPE_KM:
2659         if (r1 & 1 || !r1) {
2660             gen_program_exception(s, PGM_SPECIFICATION);
2661             return DISAS_NORETURN;
2662         }
2663         /* FALL THROUGH */
2664     case S390_FEAT_TYPE_KMAC:
2665     case S390_FEAT_TYPE_KIMD:
2666     case S390_FEAT_TYPE_KLMD:
2667         if (r2 & 1 || !r2) {
2668             gen_program_exception(s, PGM_SPECIFICATION);
2669             return DISAS_NORETURN;
2670         }
2671         /* FALL THROUGH */
2672     case S390_FEAT_TYPE_PCKMO:
2673     case S390_FEAT_TYPE_PCC:
2674         break;
2675     default:
2676         g_assert_not_reached();
2677     };
2678 
2679     t_r1 = tcg_constant_i32(r1);
2680     t_r2 = tcg_constant_i32(r2);
2681     t_r3 = tcg_constant_i32(r3);
2682     type = tcg_constant_i32(s->insn->data);
2683     gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2684     set_cc_static(s);
2685     return DISAS_NEXT;
2686 }
2687 
2688 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2689 {
2690     gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2691     set_cc_static(s);
2692     return DISAS_NEXT;
2693 }
2694 
2695 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2696 {
2697     gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2698     set_cc_static(s);
2699     return DISAS_NEXT;
2700 }
2701 
2702 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2703 {
2704     gen_helper_kxb(cc_op, cpu_env, o->in1_128, o->in2_128);
2705     set_cc_static(s);
2706     return DISAS_NEXT;
2707 }
2708 
2709 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2710 {
2711     /* The real output is indeed the original value in memory;
2712        recompute the addition for the computation of CC.  */
2713     tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2714                                  s->insn->data | MO_ALIGN);
2715     /* However, we need to recompute the addition for setting CC.  */
2716     tcg_gen_add_i64(o->out, o->in1, o->in2);
2717     return DISAS_NEXT;
2718 }
2719 
2720 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2721 {
2722     /* The real output is indeed the original value in memory;
2723        recompute the addition for the computation of CC.  */
2724     tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2725                                  s->insn->data | MO_ALIGN);
2726     /* However, we need to recompute the operation for setting CC.  */
2727     tcg_gen_and_i64(o->out, o->in1, o->in2);
2728     return DISAS_NEXT;
2729 }
2730 
2731 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2732 {
2733     /* The real output is indeed the original value in memory;
2734        recompute the addition for the computation of CC.  */
2735     tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2736                                 s->insn->data | MO_ALIGN);
2737     /* However, we need to recompute the operation for setting CC.  */
2738     tcg_gen_or_i64(o->out, o->in1, o->in2);
2739     return DISAS_NEXT;
2740 }
2741 
2742 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2743 {
2744     /* The real output is indeed the original value in memory;
2745        recompute the addition for the computation of CC.  */
2746     tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2747                                  s->insn->data | MO_ALIGN);
2748     /* However, we need to recompute the operation for setting CC.  */
2749     tcg_gen_xor_i64(o->out, o->in1, o->in2);
2750     return DISAS_NEXT;
2751 }
2752 
2753 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2754 {
2755     gen_helper_ldeb(o->out, cpu_env, o->in2);
2756     return DISAS_NEXT;
2757 }
2758 
2759 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2760 {
2761     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2762 
2763     if (!m34) {
2764         return DISAS_NORETURN;
2765     }
2766     gen_helper_ledb(o->out, cpu_env, o->in2, m34);
2767     return DISAS_NEXT;
2768 }
2769 
2770 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2771 {
2772     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2773 
2774     if (!m34) {
2775         return DISAS_NORETURN;
2776     }
2777     gen_helper_ldxb(o->out, cpu_env, o->in2_128, m34);
2778     return DISAS_NEXT;
2779 }
2780 
2781 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2782 {
2783     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2784 
2785     if (!m34) {
2786         return DISAS_NORETURN;
2787     }
2788     gen_helper_lexb(o->out, cpu_env, o->in2_128, m34);
2789     return DISAS_NEXT;
2790 }
2791 
2792 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2793 {
2794     gen_helper_lxdb(o->out_128, cpu_env, o->in2);
2795     return DISAS_NEXT;
2796 }
2797 
2798 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2799 {
2800     gen_helper_lxeb(o->out_128, cpu_env, o->in2);
2801     return DISAS_NEXT;
2802 }
2803 
2804 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2805 {
2806     tcg_gen_shli_i64(o->out, o->in2, 32);
2807     return DISAS_NEXT;
2808 }
2809 
2810 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2811 {
2812     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2813     return DISAS_NEXT;
2814 }
2815 
2816 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2817 {
2818     tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2819     return DISAS_NEXT;
2820 }
2821 
2822 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2823 {
2824     tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2825     return DISAS_NEXT;
2826 }
2827 
2828 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2829 {
2830     tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2831     return DISAS_NEXT;
2832 }
2833 
2834 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2835 {
2836     tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2837     return DISAS_NEXT;
2838 }
2839 
2840 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2841 {
2842     tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2843     return DISAS_NEXT;
2844 }
2845 
2846 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2847 {
2848     tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2849     return DISAS_NEXT;
2850 }
2851 
2852 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2853 {
2854     tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2855     return DISAS_NEXT;
2856 }
2857 
2858 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2859 {
2860     TCGLabel *lab = gen_new_label();
2861     store_reg32_i64(get_field(s, r1), o->in2);
2862     /* The value is stored even in case of trap. */
2863     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2864     gen_trap(s);
2865     gen_set_label(lab);
2866     return DISAS_NEXT;
2867 }
2868 
2869 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2870 {
2871     TCGLabel *lab = gen_new_label();
2872     tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2873     /* The value is stored even in case of trap. */
2874     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2875     gen_trap(s);
2876     gen_set_label(lab);
2877     return DISAS_NEXT;
2878 }
2879 
2880 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2881 {
2882     TCGLabel *lab = gen_new_label();
2883     store_reg32h_i64(get_field(s, r1), o->in2);
2884     /* The value is stored even in case of trap. */
2885     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2886     gen_trap(s);
2887     gen_set_label(lab);
2888     return DISAS_NEXT;
2889 }
2890 
2891 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2892 {
2893     TCGLabel *lab = gen_new_label();
2894     tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2895     /* The value is stored even in case of trap. */
2896     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2897     gen_trap(s);
2898     gen_set_label(lab);
2899     return DISAS_NEXT;
2900 }
2901 
2902 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2903 {
2904     TCGLabel *lab = gen_new_label();
2905     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2906     /* The value is stored even in case of trap. */
2907     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2908     gen_trap(s);
2909     gen_set_label(lab);
2910     return DISAS_NEXT;
2911 }
2912 
2913 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2914 {
2915     DisasCompare c;
2916 
2917     if (have_field(s, m3)) {
2918         /* LOAD * ON CONDITION */
2919         disas_jcc(s, &c, get_field(s, m3));
2920     } else {
2921         /* SELECT */
2922         disas_jcc(s, &c, get_field(s, m4));
2923     }
2924 
2925     if (c.is_64) {
2926         tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2927                             o->in2, o->in1);
2928         free_compare(&c);
2929     } else {
2930         TCGv_i32 t32 = tcg_temp_new_i32();
2931         TCGv_i64 t, z;
2932 
2933         tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2934         free_compare(&c);
2935 
2936         t = tcg_temp_new_i64();
2937         tcg_gen_extu_i32_i64(t, t32);
2938         tcg_temp_free_i32(t32);
2939 
2940         z = tcg_constant_i64(0);
2941         tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2942         tcg_temp_free_i64(t);
2943     }
2944 
2945     return DISAS_NEXT;
2946 }
2947 
2948 #ifndef CONFIG_USER_ONLY
2949 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
2950 {
2951     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2952     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2953 
2954     gen_helper_lctl(cpu_env, r1, o->in2, r3);
2955     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2956     s->exit_to_mainloop = true;
2957     return DISAS_TOO_MANY;
2958 }
2959 
2960 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
2961 {
2962     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2963     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2964 
2965     gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2966     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2967     s->exit_to_mainloop = true;
2968     return DISAS_TOO_MANY;
2969 }
2970 
2971 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
2972 {
2973     gen_helper_lra(o->out, cpu_env, o->in2);
2974     set_cc_static(s);
2975     return DISAS_NEXT;
2976 }
2977 
2978 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
2979 {
2980     tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
2981     return DISAS_NEXT;
2982 }
2983 
2984 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
2985 {
2986     TCGv_i64 t1, t2;
2987 
2988     per_breaking_event(s);
2989 
2990     t1 = tcg_temp_new_i64();
2991     t2 = tcg_temp_new_i64();
2992     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
2993                         MO_TEUL | MO_ALIGN_8);
2994     tcg_gen_addi_i64(o->in2, o->in2, 4);
2995     tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2996     /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK.  */
2997     tcg_gen_shli_i64(t1, t1, 32);
2998     gen_helper_load_psw(cpu_env, t1, t2);
2999     tcg_temp_free_i64(t1);
3000     tcg_temp_free_i64(t2);
3001     return DISAS_NORETURN;
3002 }
3003 
3004 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
3005 {
3006     TCGv_i64 t1, t2;
3007 
3008     per_breaking_event(s);
3009 
3010     t1 = tcg_temp_new_i64();
3011     t2 = tcg_temp_new_i64();
3012     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3013                         MO_TEUQ | MO_ALIGN_8);
3014     tcg_gen_addi_i64(o->in2, o->in2, 8);
3015     tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
3016     gen_helper_load_psw(cpu_env, t1, t2);
3017     tcg_temp_free_i64(t1);
3018     tcg_temp_free_i64(t2);
3019     return DISAS_NORETURN;
3020 }
3021 #endif
3022 
3023 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
3024 {
3025     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
3026     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
3027 
3028     gen_helper_lam(cpu_env, r1, o->in2, r3);
3029     return DISAS_NEXT;
3030 }
3031 
3032 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
3033 {
3034     int r1 = get_field(s, r1);
3035     int r3 = get_field(s, r3);
3036     TCGv_i64 t1, t2;
3037 
3038     /* Only one register to read. */
3039     t1 = tcg_temp_new_i64();
3040     if (unlikely(r1 == r3)) {
3041         tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3042         store_reg32_i64(r1, t1);
3043         tcg_temp_free(t1);
3044         return DISAS_NEXT;
3045     }
3046 
3047     /* First load the values of the first and last registers to trigger
3048        possible page faults. */
3049     t2 = tcg_temp_new_i64();
3050     tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3051     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3052     tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3053     store_reg32_i64(r1, t1);
3054     store_reg32_i64(r3, t2);
3055 
3056     /* Only two registers to read. */
3057     if (((r1 + 1) & 15) == r3) {
3058         tcg_temp_free(t2);
3059         tcg_temp_free(t1);
3060         return DISAS_NEXT;
3061     }
3062 
3063     /* Then load the remaining registers. Page fault can't occur. */
3064     r3 = (r3 - 1) & 15;
3065     tcg_gen_movi_i64(t2, 4);
3066     while (r1 != r3) {
3067         r1 = (r1 + 1) & 15;
3068         tcg_gen_add_i64(o->in2, o->in2, t2);
3069         tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3070         store_reg32_i64(r1, t1);
3071     }
3072     tcg_temp_free(t2);
3073     tcg_temp_free(t1);
3074 
3075     return DISAS_NEXT;
3076 }
3077 
3078 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3079 {
3080     int r1 = get_field(s, r1);
3081     int r3 = get_field(s, r3);
3082     TCGv_i64 t1, t2;
3083 
3084     /* Only one register to read. */
3085     t1 = tcg_temp_new_i64();
3086     if (unlikely(r1 == r3)) {
3087         tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3088         store_reg32h_i64(r1, t1);
3089         tcg_temp_free(t1);
3090         return DISAS_NEXT;
3091     }
3092 
3093     /* First load the values of the first and last registers to trigger
3094        possible page faults. */
3095     t2 = tcg_temp_new_i64();
3096     tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3097     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3098     tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3099     store_reg32h_i64(r1, t1);
3100     store_reg32h_i64(r3, t2);
3101 
3102     /* Only two registers to read. */
3103     if (((r1 + 1) & 15) == r3) {
3104         tcg_temp_free(t2);
3105         tcg_temp_free(t1);
3106         return DISAS_NEXT;
3107     }
3108 
3109     /* Then load the remaining registers. Page fault can't occur. */
3110     r3 = (r3 - 1) & 15;
3111     tcg_gen_movi_i64(t2, 4);
3112     while (r1 != r3) {
3113         r1 = (r1 + 1) & 15;
3114         tcg_gen_add_i64(o->in2, o->in2, t2);
3115         tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3116         store_reg32h_i64(r1, t1);
3117     }
3118     tcg_temp_free(t2);
3119     tcg_temp_free(t1);
3120 
3121     return DISAS_NEXT;
3122 }
3123 
3124 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3125 {
3126     int r1 = get_field(s, r1);
3127     int r3 = get_field(s, r3);
3128     TCGv_i64 t1, t2;
3129 
3130     /* Only one register to read. */
3131     if (unlikely(r1 == r3)) {
3132         tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3133         return DISAS_NEXT;
3134     }
3135 
3136     /* First load the values of the first and last registers to trigger
3137        possible page faults. */
3138     t1 = tcg_temp_new_i64();
3139     t2 = tcg_temp_new_i64();
3140     tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
3141     tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3142     tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
3143     tcg_gen_mov_i64(regs[r1], t1);
3144     tcg_temp_free(t2);
3145 
3146     /* Only two registers to read. */
3147     if (((r1 + 1) & 15) == r3) {
3148         tcg_temp_free(t1);
3149         return DISAS_NEXT;
3150     }
3151 
3152     /* Then load the remaining registers. Page fault can't occur. */
3153     r3 = (r3 - 1) & 15;
3154     tcg_gen_movi_i64(t1, 8);
3155     while (r1 != r3) {
3156         r1 = (r1 + 1) & 15;
3157         tcg_gen_add_i64(o->in2, o->in2, t1);
3158         tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3159     }
3160     tcg_temp_free(t1);
3161 
3162     return DISAS_NEXT;
3163 }
3164 
3165 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3166 {
3167     TCGv_i64 a1, a2;
3168     MemOp mop = s->insn->data;
3169 
3170     /* In a parallel context, stop the world and single step.  */
3171     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3172         update_psw_addr(s);
3173         update_cc_op(s);
3174         gen_exception(EXCP_ATOMIC);
3175         return DISAS_NORETURN;
3176     }
3177 
3178     /* In a serial context, perform the two loads ... */
3179     a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
3180     a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3181     tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3182     tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3183     tcg_temp_free_i64(a1);
3184     tcg_temp_free_i64(a2);
3185 
3186     /* ... and indicate that we performed them while interlocked.  */
3187     gen_op_movi_cc(s, 0);
3188     return DISAS_NEXT;
3189 }
3190 
3191 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3192 {
3193     if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
3194         gen_helper_lpq(o->out, cpu_env, o->in2);
3195     } else if (HAVE_ATOMIC128) {
3196         gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
3197     } else {
3198         gen_helper_exit_atomic(cpu_env);
3199         return DISAS_NORETURN;
3200     }
3201     return_low128(o->out2);
3202     return DISAS_NEXT;
3203 }
3204 
3205 #ifndef CONFIG_USER_ONLY
3206 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3207 {
3208     tcg_gen_qemu_ld_tl(o->out, o->in2, MMU_REAL_IDX, s->insn->data);
3209     return DISAS_NEXT;
3210 }
3211 #endif
3212 
3213 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3214 {
3215     tcg_gen_andi_i64(o->out, o->in2, -256);
3216     return DISAS_NEXT;
3217 }
3218 
3219 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3220 {
3221     const int64_t block_size = (1ull << (get_field(s, m3) + 6));
3222 
3223     if (get_field(s, m3) > 6) {
3224         gen_program_exception(s, PGM_SPECIFICATION);
3225         return DISAS_NORETURN;
3226     }
3227 
3228     tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3229     tcg_gen_neg_i64(o->addr1, o->addr1);
3230     tcg_gen_movi_i64(o->out, 16);
3231     tcg_gen_umin_i64(o->out, o->out, o->addr1);
3232     gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3233     return DISAS_NEXT;
3234 }
3235 
3236 static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
3237 {
3238     const uint16_t monitor_class = get_field(s, i2);
3239 
3240     if (monitor_class & 0xff00) {
3241         gen_program_exception(s, PGM_SPECIFICATION);
3242         return DISAS_NORETURN;
3243     }
3244 
3245 #if !defined(CONFIG_USER_ONLY)
3246     gen_helper_monitor_call(cpu_env, o->addr1,
3247                             tcg_constant_i32(monitor_class));
3248 #endif
3249     /* Defaults to a NOP. */
3250     return DISAS_NEXT;
3251 }
3252 
3253 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3254 {
3255     o->out = o->in2;
3256     o->g_out = o->g_in2;
3257     o->in2 = NULL;
3258     o->g_in2 = false;
3259     return DISAS_NEXT;
3260 }
3261 
3262 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3263 {
3264     int b2 = get_field(s, b2);
3265     TCGv ar1 = tcg_temp_new_i64();
3266 
3267     o->out = o->in2;
3268     o->g_out = o->g_in2;
3269     o->in2 = NULL;
3270     o->g_in2 = false;
3271 
3272     switch (s->base.tb->flags & FLAG_MASK_ASC) {
3273     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3274         tcg_gen_movi_i64(ar1, 0);
3275         break;
3276     case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3277         tcg_gen_movi_i64(ar1, 1);
3278         break;
3279     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3280         if (b2) {
3281             tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3282         } else {
3283             tcg_gen_movi_i64(ar1, 0);
3284         }
3285         break;
3286     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3287         tcg_gen_movi_i64(ar1, 2);
3288         break;
3289     }
3290 
3291     tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3292     tcg_temp_free_i64(ar1);
3293 
3294     return DISAS_NEXT;
3295 }
3296 
3297 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3298 {
3299     o->out = o->in1;
3300     o->out2 = o->in2;
3301     o->g_out = o->g_in1;
3302     o->g_out2 = o->g_in2;
3303     o->in1 = NULL;
3304     o->in2 = NULL;
3305     o->g_in1 = o->g_in2 = false;
3306     return DISAS_NEXT;
3307 }
3308 
3309 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3310 {
3311     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3312 
3313     gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3314     return DISAS_NEXT;
3315 }
3316 
3317 static DisasJumpType op_mvcrl(DisasContext *s, DisasOps *o)
3318 {
3319     gen_helper_mvcrl(cpu_env, regs[0], o->addr1, o->in2);
3320     return DISAS_NEXT;
3321 }
3322 
3323 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3324 {
3325     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3326 
3327     gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3328     return DISAS_NEXT;
3329 }
3330 
3331 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3332 {
3333     int r1 = get_field(s, r1);
3334     int r2 = get_field(s, r2);
3335     TCGv_i32 t1, t2;
3336 
3337     /* r1 and r2 must be even.  */
3338     if (r1 & 1 || r2 & 1) {
3339         gen_program_exception(s, PGM_SPECIFICATION);
3340         return DISAS_NORETURN;
3341     }
3342 
3343     t1 = tcg_constant_i32(r1);
3344     t2 = tcg_constant_i32(r2);
3345     gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3346     set_cc_static(s);
3347     return DISAS_NEXT;
3348 }
3349 
3350 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3351 {
3352     int r1 = get_field(s, r1);
3353     int r3 = get_field(s, r3);
3354     TCGv_i32 t1, t3;
3355 
3356     /* r1 and r3 must be even.  */
3357     if (r1 & 1 || r3 & 1) {
3358         gen_program_exception(s, PGM_SPECIFICATION);
3359         return DISAS_NORETURN;
3360     }
3361 
3362     t1 = tcg_constant_i32(r1);
3363     t3 = tcg_constant_i32(r3);
3364     gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3365     set_cc_static(s);
3366     return DISAS_NEXT;
3367 }
3368 
3369 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3370 {
3371     int r1 = get_field(s, r1);
3372     int r3 = get_field(s, r3);
3373     TCGv_i32 t1, t3;
3374 
3375     /* r1 and r3 must be even.  */
3376     if (r1 & 1 || r3 & 1) {
3377         gen_program_exception(s, PGM_SPECIFICATION);
3378         return DISAS_NORETURN;
3379     }
3380 
3381     t1 = tcg_constant_i32(r1);
3382     t3 = tcg_constant_i32(r3);
3383     gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3384     set_cc_static(s);
3385     return DISAS_NEXT;
3386 }
3387 
3388 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3389 {
3390     int r3 = get_field(s, r3);
3391     gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3392     set_cc_static(s);
3393     return DISAS_NEXT;
3394 }
3395 
3396 #ifndef CONFIG_USER_ONLY
3397 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3398 {
3399     int r1 = get_field(s, l1);
3400     int r3 = get_field(s, r3);
3401     gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2, regs[r3]);
3402     set_cc_static(s);
3403     return DISAS_NEXT;
3404 }
3405 
3406 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3407 {
3408     int r1 = get_field(s, l1);
3409     int r3 = get_field(s, r3);
3410     gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2, regs[r3]);
3411     set_cc_static(s);
3412     return DISAS_NEXT;
3413 }
3414 #endif
3415 
3416 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3417 {
3418     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3419 
3420     gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3421     return DISAS_NEXT;
3422 }
3423 
3424 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3425 {
3426     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3427 
3428     gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3429     return DISAS_NEXT;
3430 }
3431 
3432 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3433 {
3434     TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3435     TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3436 
3437     gen_helper_mvpg(cc_op, cpu_env, regs[0], t1, t2);
3438     set_cc_static(s);
3439     return DISAS_NEXT;
3440 }
3441 
3442 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3443 {
3444     TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3445     TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3446 
3447     gen_helper_mvst(cc_op, cpu_env, t1, t2);
3448     set_cc_static(s);
3449     return DISAS_NEXT;
3450 }
3451 
3452 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3453 {
3454     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3455 
3456     gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3457     return DISAS_NEXT;
3458 }
3459 
3460 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3461 {
3462     tcg_gen_mul_i64(o->out, o->in1, o->in2);
3463     return DISAS_NEXT;
3464 }
3465 
3466 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3467 {
3468     tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3469     return DISAS_NEXT;
3470 }
3471 
3472 static DisasJumpType op_muls128(DisasContext *s, DisasOps *o)
3473 {
3474     tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2);
3475     return DISAS_NEXT;
3476 }
3477 
3478 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3479 {
3480     gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3481     return DISAS_NEXT;
3482 }
3483 
3484 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3485 {
3486     gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3487     return DISAS_NEXT;
3488 }
3489 
3490 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3491 {
3492     gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3493     return DISAS_NEXT;
3494 }
3495 
3496 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3497 {
3498     gen_helper_mxb(o->out_128, cpu_env, o->in1_128, o->in2_128);
3499     return DISAS_NEXT;
3500 }
3501 
3502 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3503 {
3504     gen_helper_mxdb(o->out_128, cpu_env, o->in1_128, o->in2);
3505     return DISAS_NEXT;
3506 }
3507 
3508 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3509 {
3510     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3511     gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3512     tcg_temp_free_i64(r3);
3513     return DISAS_NEXT;
3514 }
3515 
3516 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3517 {
3518     TCGv_i64 r3 = load_freg(get_field(s, r3));
3519     gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
3520     tcg_temp_free_i64(r3);
3521     return DISAS_NEXT;
3522 }
3523 
3524 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3525 {
3526     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3527     gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3528     tcg_temp_free_i64(r3);
3529     return DISAS_NEXT;
3530 }
3531 
3532 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3533 {
3534     TCGv_i64 r3 = load_freg(get_field(s, r3));
3535     gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
3536     tcg_temp_free_i64(r3);
3537     return DISAS_NEXT;
3538 }
3539 
3540 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3541 {
3542     TCGv_i64 z = tcg_constant_i64(0);
3543     TCGv_i64 n = tcg_temp_new_i64();
3544 
3545     tcg_gen_neg_i64(n, o->in2);
3546     tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3547     tcg_temp_free_i64(n);
3548     return DISAS_NEXT;
3549 }
3550 
3551 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3552 {
3553     tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3554     return DISAS_NEXT;
3555 }
3556 
3557 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3558 {
3559     tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3560     return DISAS_NEXT;
3561 }
3562 
3563 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3564 {
3565     tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3566     tcg_gen_mov_i64(o->out2, o->in2);
3567     return DISAS_NEXT;
3568 }
3569 
3570 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3571 {
3572     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3573 
3574     gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3575     set_cc_static(s);
3576     return DISAS_NEXT;
3577 }
3578 
3579 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3580 {
3581     tcg_gen_neg_i64(o->out, o->in2);
3582     return DISAS_NEXT;
3583 }
3584 
3585 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3586 {
3587     tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3588     return DISAS_NEXT;
3589 }
3590 
3591 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3592 {
3593     tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3594     return DISAS_NEXT;
3595 }
3596 
3597 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3598 {
3599     tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3600     tcg_gen_mov_i64(o->out2, o->in2);
3601     return DISAS_NEXT;
3602 }
3603 
3604 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3605 {
3606     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3607 
3608     gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3609     set_cc_static(s);
3610     return DISAS_NEXT;
3611 }
3612 
3613 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3614 {
3615     tcg_gen_or_i64(o->out, o->in1, o->in2);
3616     return DISAS_NEXT;
3617 }
3618 
3619 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3620 {
3621     int shift = s->insn->data & 0xff;
3622     int size = s->insn->data >> 8;
3623     uint64_t mask = ((1ull << size) - 1) << shift;
3624 
3625     assert(!o->g_in2);
3626     tcg_gen_shli_i64(o->in2, o->in2, shift);
3627     tcg_gen_or_i64(o->out, o->in1, o->in2);
3628 
3629     /* Produce the CC from only the bits manipulated.  */
3630     tcg_gen_andi_i64(cc_dst, o->out, mask);
3631     set_cc_nz_u64(s, cc_dst);
3632     return DISAS_NEXT;
3633 }
3634 
3635 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3636 {
3637     o->in1 = tcg_temp_new_i64();
3638 
3639     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3640         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3641     } else {
3642         /* Perform the atomic operation in memory. */
3643         tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3644                                     s->insn->data);
3645     }
3646 
3647     /* Recompute also for atomic case: needed for setting CC. */
3648     tcg_gen_or_i64(o->out, o->in1, o->in2);
3649 
3650     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3651         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3652     }
3653     return DISAS_NEXT;
3654 }
3655 
3656 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3657 {
3658     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3659 
3660     gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3661     return DISAS_NEXT;
3662 }
3663 
3664 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3665 {
3666     int l2 = get_field(s, l2) + 1;
3667     TCGv_i32 l;
3668 
3669     /* The length must not exceed 32 bytes.  */
3670     if (l2 > 32) {
3671         gen_program_exception(s, PGM_SPECIFICATION);
3672         return DISAS_NORETURN;
3673     }
3674     l = tcg_constant_i32(l2);
3675     gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3676     return DISAS_NEXT;
3677 }
3678 
3679 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3680 {
3681     int l2 = get_field(s, l2) + 1;
3682     TCGv_i32 l;
3683 
3684     /* The length must be even and should not exceed 64 bytes.  */
3685     if ((l2 & 1) || (l2 > 64)) {
3686         gen_program_exception(s, PGM_SPECIFICATION);
3687         return DISAS_NORETURN;
3688     }
3689     l = tcg_constant_i32(l2);
3690     gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3691     return DISAS_NEXT;
3692 }
3693 
3694 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3695 {
3696     const uint8_t m3 = get_field(s, m3);
3697 
3698     if ((m3 & 8) && s390_has_feat(S390_FEAT_MISC_INSTRUCTION_EXT3)) {
3699         tcg_gen_ctpop_i64(o->out, o->in2);
3700     } else {
3701         gen_helper_popcnt(o->out, o->in2);
3702     }
3703     return DISAS_NEXT;
3704 }
3705 
3706 #ifndef CONFIG_USER_ONLY
3707 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3708 {
3709     gen_helper_ptlb(cpu_env);
3710     return DISAS_NEXT;
3711 }
3712 #endif
3713 
3714 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3715 {
3716     int i3 = get_field(s, i3);
3717     int i4 = get_field(s, i4);
3718     int i5 = get_field(s, i5);
3719     int do_zero = i4 & 0x80;
3720     uint64_t mask, imask, pmask;
3721     int pos, len, rot;
3722 
3723     /* Adjust the arguments for the specific insn.  */
3724     switch (s->fields.op2) {
3725     case 0x55: /* risbg */
3726     case 0x59: /* risbgn */
3727         i3 &= 63;
3728         i4 &= 63;
3729         pmask = ~0;
3730         break;
3731     case 0x5d: /* risbhg */
3732         i3 &= 31;
3733         i4 &= 31;
3734         pmask = 0xffffffff00000000ull;
3735         break;
3736     case 0x51: /* risblg */
3737         i3 = (i3 & 31) + 32;
3738         i4 = (i4 & 31) + 32;
3739         pmask = 0x00000000ffffffffull;
3740         break;
3741     default:
3742         g_assert_not_reached();
3743     }
3744 
3745     /* MASK is the set of bits to be inserted from R2. */
3746     if (i3 <= i4) {
3747         /* [0...i3---i4...63] */
3748         mask = (-1ull >> i3) & (-1ull << (63 - i4));
3749     } else {
3750         /* [0---i4...i3---63] */
3751         mask = (-1ull >> i3) | (-1ull << (63 - i4));
3752     }
3753     /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
3754     mask &= pmask;
3755 
3756     /* IMASK is the set of bits to be kept from R1.  In the case of the high/low
3757        insns, we need to keep the other half of the register.  */
3758     imask = ~mask | ~pmask;
3759     if (do_zero) {
3760         imask = ~pmask;
3761     }
3762 
3763     len = i4 - i3 + 1;
3764     pos = 63 - i4;
3765     rot = i5 & 63;
3766 
3767     /* In some cases we can implement this with extract.  */
3768     if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3769         tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3770         return DISAS_NEXT;
3771     }
3772 
3773     /* In some cases we can implement this with deposit.  */
3774     if (len > 0 && (imask == 0 || ~mask == imask)) {
3775         /* Note that we rotate the bits to be inserted to the lsb, not to
3776            the position as described in the PoO.  */
3777         rot = (rot - pos) & 63;
3778     } else {
3779         pos = -1;
3780     }
3781 
3782     /* Rotate the input as necessary.  */
3783     tcg_gen_rotli_i64(o->in2, o->in2, rot);
3784 
3785     /* Insert the selected bits into the output.  */
3786     if (pos >= 0) {
3787         if (imask == 0) {
3788             tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3789         } else {
3790             tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3791         }
3792     } else if (imask == 0) {
3793         tcg_gen_andi_i64(o->out, o->in2, mask);
3794     } else {
3795         tcg_gen_andi_i64(o->in2, o->in2, mask);
3796         tcg_gen_andi_i64(o->out, o->out, imask);
3797         tcg_gen_or_i64(o->out, o->out, o->in2);
3798     }
3799     return DISAS_NEXT;
3800 }
3801 
3802 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3803 {
3804     int i3 = get_field(s, i3);
3805     int i4 = get_field(s, i4);
3806     int i5 = get_field(s, i5);
3807     uint64_t mask;
3808 
3809     /* If this is a test-only form, arrange to discard the result.  */
3810     if (i3 & 0x80) {
3811         o->out = tcg_temp_new_i64();
3812         o->g_out = false;
3813     }
3814 
3815     i3 &= 63;
3816     i4 &= 63;
3817     i5 &= 63;
3818 
3819     /* MASK is the set of bits to be operated on from R2.
3820        Take care for I3/I4 wraparound.  */
3821     mask = ~0ull >> i3;
3822     if (i3 <= i4) {
3823         mask ^= ~0ull >> i4 >> 1;
3824     } else {
3825         mask |= ~(~0ull >> i4 >> 1);
3826     }
3827 
3828     /* Rotate the input as necessary.  */
3829     tcg_gen_rotli_i64(o->in2, o->in2, i5);
3830 
3831     /* Operate.  */
3832     switch (s->fields.op2) {
3833     case 0x54: /* AND */
3834         tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3835         tcg_gen_and_i64(o->out, o->out, o->in2);
3836         break;
3837     case 0x56: /* OR */
3838         tcg_gen_andi_i64(o->in2, o->in2, mask);
3839         tcg_gen_or_i64(o->out, o->out, o->in2);
3840         break;
3841     case 0x57: /* XOR */
3842         tcg_gen_andi_i64(o->in2, o->in2, mask);
3843         tcg_gen_xor_i64(o->out, o->out, o->in2);
3844         break;
3845     default:
3846         abort();
3847     }
3848 
3849     /* Set the CC.  */
3850     tcg_gen_andi_i64(cc_dst, o->out, mask);
3851     set_cc_nz_u64(s, cc_dst);
3852     return DISAS_NEXT;
3853 }
3854 
3855 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3856 {
3857     tcg_gen_bswap16_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3858     return DISAS_NEXT;
3859 }
3860 
3861 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3862 {
3863     tcg_gen_bswap32_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3864     return DISAS_NEXT;
3865 }
3866 
3867 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3868 {
3869     tcg_gen_bswap64_i64(o->out, o->in2);
3870     return DISAS_NEXT;
3871 }
3872 
3873 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3874 {
3875     TCGv_i32 t1 = tcg_temp_new_i32();
3876     TCGv_i32 t2 = tcg_temp_new_i32();
3877     TCGv_i32 to = tcg_temp_new_i32();
3878     tcg_gen_extrl_i64_i32(t1, o->in1);
3879     tcg_gen_extrl_i64_i32(t2, o->in2);
3880     tcg_gen_rotl_i32(to, t1, t2);
3881     tcg_gen_extu_i32_i64(o->out, to);
3882     tcg_temp_free_i32(t1);
3883     tcg_temp_free_i32(t2);
3884     tcg_temp_free_i32(to);
3885     return DISAS_NEXT;
3886 }
3887 
3888 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3889 {
3890     tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3891     return DISAS_NEXT;
3892 }
3893 
3894 #ifndef CONFIG_USER_ONLY
3895 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3896 {
3897     gen_helper_rrbe(cc_op, cpu_env, o->in2);
3898     set_cc_static(s);
3899     return DISAS_NEXT;
3900 }
3901 
3902 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3903 {
3904     gen_helper_sacf(cpu_env, o->in2);
3905     /* Addressing mode has changed, so end the block.  */
3906     return DISAS_TOO_MANY;
3907 }
3908 #endif
3909 
3910 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3911 {
3912     int sam = s->insn->data;
3913     TCGv_i64 tsam;
3914     uint64_t mask;
3915 
3916     switch (sam) {
3917     case 0:
3918         mask = 0xffffff;
3919         break;
3920     case 1:
3921         mask = 0x7fffffff;
3922         break;
3923     default:
3924         mask = -1;
3925         break;
3926     }
3927 
3928     /* Bizarre but true, we check the address of the current insn for the
3929        specification exception, not the next to be executed.  Thus the PoO
3930        documents that Bad Things Happen two bytes before the end.  */
3931     if (s->base.pc_next & ~mask) {
3932         gen_program_exception(s, PGM_SPECIFICATION);
3933         return DISAS_NORETURN;
3934     }
3935     s->pc_tmp &= mask;
3936 
3937     tsam = tcg_constant_i64(sam);
3938     tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3939 
3940     /* Always exit the TB, since we (may have) changed execution mode.  */
3941     return DISAS_TOO_MANY;
3942 }
3943 
3944 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
3945 {
3946     int r1 = get_field(s, r1);
3947     tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3948     return DISAS_NEXT;
3949 }
3950 
3951 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
3952 {
3953     gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3954     return DISAS_NEXT;
3955 }
3956 
3957 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
3958 {
3959     gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3960     return DISAS_NEXT;
3961 }
3962 
3963 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
3964 {
3965     gen_helper_sxb(o->out_128, cpu_env, o->in1_128, o->in2_128);
3966     return DISAS_NEXT;
3967 }
3968 
3969 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
3970 {
3971     gen_helper_sqeb(o->out, cpu_env, o->in2);
3972     return DISAS_NEXT;
3973 }
3974 
3975 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
3976 {
3977     gen_helper_sqdb(o->out, cpu_env, o->in2);
3978     return DISAS_NEXT;
3979 }
3980 
3981 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
3982 {
3983     gen_helper_sqxb(o->out_128, cpu_env, o->in2_128);
3984     return DISAS_NEXT;
3985 }
3986 
3987 #ifndef CONFIG_USER_ONLY
3988 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
3989 {
3990     gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3991     set_cc_static(s);
3992     return DISAS_NEXT;
3993 }
3994 
3995 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
3996 {
3997     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
3998     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
3999 
4000     gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
4001     set_cc_static(s);
4002     return DISAS_NEXT;
4003 }
4004 #endif
4005 
4006 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
4007 {
4008     DisasCompare c;
4009     TCGv_i64 a, h;
4010     TCGLabel *lab;
4011     int r1;
4012 
4013     disas_jcc(s, &c, get_field(s, m3));
4014 
4015     /* We want to store when the condition is fulfilled, so branch
4016        out when it's not */
4017     c.cond = tcg_invert_cond(c.cond);
4018 
4019     lab = gen_new_label();
4020     if (c.is_64) {
4021         tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
4022     } else {
4023         tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
4024     }
4025     free_compare(&c);
4026 
4027     r1 = get_field(s, r1);
4028     a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
4029     switch (s->insn->data) {
4030     case 1: /* STOCG */
4031         tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
4032         break;
4033     case 0: /* STOC */
4034         tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
4035         break;
4036     case 2: /* STOCFH */
4037         h = tcg_temp_new_i64();
4038         tcg_gen_shri_i64(h, regs[r1], 32);
4039         tcg_gen_qemu_st32(h, a, get_mem_index(s));
4040         tcg_temp_free_i64(h);
4041         break;
4042     default:
4043         g_assert_not_reached();
4044     }
4045     tcg_temp_free_i64(a);
4046 
4047     gen_set_label(lab);
4048     return DISAS_NEXT;
4049 }
4050 
4051 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
4052 {
4053     TCGv_i64 t;
4054     uint64_t sign = 1ull << s->insn->data;
4055     if (s->insn->data == 31) {
4056         t = tcg_temp_new_i64();
4057         tcg_gen_shli_i64(t, o->in1, 32);
4058     } else {
4059         t = o->in1;
4060     }
4061     gen_op_update2_cc_i64(s, CC_OP_SLA, t, o->in2);
4062     if (s->insn->data == 31) {
4063         tcg_temp_free_i64(t);
4064     }
4065     tcg_gen_shl_i64(o->out, o->in1, o->in2);
4066     /* The arithmetic left shift is curious in that it does not affect
4067        the sign bit.  Copy that over from the source unchanged.  */
4068     tcg_gen_andi_i64(o->out, o->out, ~sign);
4069     tcg_gen_andi_i64(o->in1, o->in1, sign);
4070     tcg_gen_or_i64(o->out, o->out, o->in1);
4071     return DISAS_NEXT;
4072 }
4073 
4074 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
4075 {
4076     tcg_gen_shl_i64(o->out, o->in1, o->in2);
4077     return DISAS_NEXT;
4078 }
4079 
4080 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
4081 {
4082     tcg_gen_sar_i64(o->out, o->in1, o->in2);
4083     return DISAS_NEXT;
4084 }
4085 
4086 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
4087 {
4088     tcg_gen_shr_i64(o->out, o->in1, o->in2);
4089     return DISAS_NEXT;
4090 }
4091 
4092 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4093 {
4094     gen_helper_sfpc(cpu_env, o->in2);
4095     return DISAS_NEXT;
4096 }
4097 
4098 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4099 {
4100     gen_helper_sfas(cpu_env, o->in2);
4101     return DISAS_NEXT;
4102 }
4103 
4104 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4105 {
4106     /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4107     tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4108     gen_helper_srnm(cpu_env, o->addr1);
4109     return DISAS_NEXT;
4110 }
4111 
4112 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4113 {
4114     /* Bits 0-55 are are ignored. */
4115     tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4116     gen_helper_srnm(cpu_env, o->addr1);
4117     return DISAS_NEXT;
4118 }
4119 
4120 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4121 {
4122     TCGv_i64 tmp = tcg_temp_new_i64();
4123 
4124     /* Bits other than 61-63 are ignored. */
4125     tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4126 
4127     /* No need to call a helper, we don't implement dfp */
4128     tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4129     tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4130     tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4131 
4132     tcg_temp_free_i64(tmp);
4133     return DISAS_NEXT;
4134 }
4135 
4136 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4137 {
4138     tcg_gen_extrl_i64_i32(cc_op, o->in1);
4139     tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4140     set_cc_static(s);
4141 
4142     tcg_gen_shri_i64(o->in1, o->in1, 24);
4143     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4144     return DISAS_NEXT;
4145 }
4146 
4147 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4148 {
4149     int b1 = get_field(s, b1);
4150     int d1 = get_field(s, d1);
4151     int b2 = get_field(s, b2);
4152     int d2 = get_field(s, d2);
4153     int r3 = get_field(s, r3);
4154     TCGv_i64 tmp = tcg_temp_new_i64();
4155 
4156     /* fetch all operands first */
4157     o->in1 = tcg_temp_new_i64();
4158     tcg_gen_addi_i64(o->in1, regs[b1], d1);
4159     o->in2 = tcg_temp_new_i64();
4160     tcg_gen_addi_i64(o->in2, regs[b2], d2);
4161     o->addr1 = tcg_temp_new_i64();
4162     gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0);
4163 
4164     /* load the third operand into r3 before modifying anything */
4165     tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
4166 
4167     /* subtract CPU timer from first operand and store in GR0 */
4168     gen_helper_stpt(tmp, cpu_env);
4169     tcg_gen_sub_i64(regs[0], o->in1, tmp);
4170 
4171     /* store second operand in GR1 */
4172     tcg_gen_mov_i64(regs[1], o->in2);
4173 
4174     tcg_temp_free_i64(tmp);
4175     return DISAS_NEXT;
4176 }
4177 
4178 #ifndef CONFIG_USER_ONLY
4179 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4180 {
4181     tcg_gen_shri_i64(o->in2, o->in2, 4);
4182     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4183     return DISAS_NEXT;
4184 }
4185 
4186 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4187 {
4188     gen_helper_sske(cpu_env, o->in1, o->in2);
4189     return DISAS_NEXT;
4190 }
4191 
4192 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4193 {
4194     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4195     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4196     s->exit_to_mainloop = true;
4197     return DISAS_TOO_MANY;
4198 }
4199 
4200 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4201 {
4202     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
4203     return DISAS_NEXT;
4204 }
4205 #endif
4206 
4207 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4208 {
4209     gen_helper_stck(o->out, cpu_env);
4210     /* ??? We don't implement clock states.  */
4211     gen_op_movi_cc(s, 0);
4212     return DISAS_NEXT;
4213 }
4214 
4215 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4216 {
4217     TCGv_i64 c1 = tcg_temp_new_i64();
4218     TCGv_i64 c2 = tcg_temp_new_i64();
4219     TCGv_i64 todpr = tcg_temp_new_i64();
4220     gen_helper_stck(c1, cpu_env);
4221     /* 16 bit value store in an uint32_t (only valid bits set) */
4222     tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
4223     /* Shift the 64-bit value into its place as a zero-extended
4224        104-bit value.  Note that "bit positions 64-103 are always
4225        non-zero so that they compare differently to STCK"; we set
4226        the least significant bit to 1.  */
4227     tcg_gen_shli_i64(c2, c1, 56);
4228     tcg_gen_shri_i64(c1, c1, 8);
4229     tcg_gen_ori_i64(c2, c2, 0x10000);
4230     tcg_gen_or_i64(c2, c2, todpr);
4231     tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
4232     tcg_gen_addi_i64(o->in2, o->in2, 8);
4233     tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
4234     tcg_temp_free_i64(c1);
4235     tcg_temp_free_i64(c2);
4236     tcg_temp_free_i64(todpr);
4237     /* ??? We don't implement clock states.  */
4238     gen_op_movi_cc(s, 0);
4239     return DISAS_NEXT;
4240 }
4241 
4242 #ifndef CONFIG_USER_ONLY
4243 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4244 {
4245     gen_helper_sck(cc_op, cpu_env, o->in2);
4246     set_cc_static(s);
4247     return DISAS_NEXT;
4248 }
4249 
4250 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4251 {
4252     gen_helper_sckc(cpu_env, o->in2);
4253     return DISAS_NEXT;
4254 }
4255 
4256 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4257 {
4258     gen_helper_sckpf(cpu_env, regs[0]);
4259     return DISAS_NEXT;
4260 }
4261 
4262 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4263 {
4264     gen_helper_stckc(o->out, cpu_env);
4265     return DISAS_NEXT;
4266 }
4267 
4268 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4269 {
4270     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4271     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4272 
4273     gen_helper_stctg(cpu_env, r1, o->in2, r3);
4274     return DISAS_NEXT;
4275 }
4276 
4277 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4278 {
4279     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4280     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4281 
4282     gen_helper_stctl(cpu_env, r1, o->in2, r3);
4283     return DISAS_NEXT;
4284 }
4285 
4286 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4287 {
4288     tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4289     return DISAS_NEXT;
4290 }
4291 
4292 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4293 {
4294     gen_helper_spt(cpu_env, o->in2);
4295     return DISAS_NEXT;
4296 }
4297 
4298 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4299 {
4300     gen_helper_stfl(cpu_env);
4301     return DISAS_NEXT;
4302 }
4303 
4304 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4305 {
4306     gen_helper_stpt(o->out, cpu_env);
4307     return DISAS_NEXT;
4308 }
4309 
4310 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4311 {
4312     gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4313     set_cc_static(s);
4314     return DISAS_NEXT;
4315 }
4316 
4317 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4318 {
4319     gen_helper_spx(cpu_env, o->in2);
4320     return DISAS_NEXT;
4321 }
4322 
4323 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4324 {
4325     gen_helper_xsch(cpu_env, regs[1]);
4326     set_cc_static(s);
4327     return DISAS_NEXT;
4328 }
4329 
4330 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4331 {
4332     gen_helper_csch(cpu_env, regs[1]);
4333     set_cc_static(s);
4334     return DISAS_NEXT;
4335 }
4336 
4337 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4338 {
4339     gen_helper_hsch(cpu_env, regs[1]);
4340     set_cc_static(s);
4341     return DISAS_NEXT;
4342 }
4343 
4344 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4345 {
4346     gen_helper_msch(cpu_env, regs[1], o->in2);
4347     set_cc_static(s);
4348     return DISAS_NEXT;
4349 }
4350 
4351 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4352 {
4353     gen_helper_rchp(cpu_env, regs[1]);
4354     set_cc_static(s);
4355     return DISAS_NEXT;
4356 }
4357 
4358 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4359 {
4360     gen_helper_rsch(cpu_env, regs[1]);
4361     set_cc_static(s);
4362     return DISAS_NEXT;
4363 }
4364 
4365 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4366 {
4367     gen_helper_sal(cpu_env, regs[1]);
4368     return DISAS_NEXT;
4369 }
4370 
4371 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4372 {
4373     gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4374     return DISAS_NEXT;
4375 }
4376 
4377 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4378 {
4379     /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4380     gen_op_movi_cc(s, 3);
4381     return DISAS_NEXT;
4382 }
4383 
4384 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4385 {
4386     /* The instruction is suppressed if not provided. */
4387     return DISAS_NEXT;
4388 }
4389 
4390 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4391 {
4392     gen_helper_ssch(cpu_env, regs[1], o->in2);
4393     set_cc_static(s);
4394     return DISAS_NEXT;
4395 }
4396 
4397 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4398 {
4399     gen_helper_stsch(cpu_env, regs[1], o->in2);
4400     set_cc_static(s);
4401     return DISAS_NEXT;
4402 }
4403 
4404 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4405 {
4406     gen_helper_stcrw(cpu_env, o->in2);
4407     set_cc_static(s);
4408     return DISAS_NEXT;
4409 }
4410 
4411 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4412 {
4413     gen_helper_tpi(cc_op, cpu_env, o->addr1);
4414     set_cc_static(s);
4415     return DISAS_NEXT;
4416 }
4417 
4418 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4419 {
4420     gen_helper_tsch(cpu_env, regs[1], o->in2);
4421     set_cc_static(s);
4422     return DISAS_NEXT;
4423 }
4424 
4425 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4426 {
4427     gen_helper_chsc(cpu_env, o->in2);
4428     set_cc_static(s);
4429     return DISAS_NEXT;
4430 }
4431 
4432 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4433 {
4434     tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4435     tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4436     return DISAS_NEXT;
4437 }
4438 
4439 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4440 {
4441     uint64_t i2 = get_field(s, i2);
4442     TCGv_i64 t;
4443 
4444     /* It is important to do what the instruction name says: STORE THEN.
4445        If we let the output hook perform the store then if we fault and
4446        restart, we'll have the wrong SYSTEM MASK in place.  */
4447     t = tcg_temp_new_i64();
4448     tcg_gen_shri_i64(t, psw_mask, 56);
4449     tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4450     tcg_temp_free_i64(t);
4451 
4452     if (s->fields.op == 0xac) {
4453         tcg_gen_andi_i64(psw_mask, psw_mask,
4454                          (i2 << 56) | 0x00ffffffffffffffull);
4455     } else {
4456         tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4457     }
4458 
4459     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4460     s->exit_to_mainloop = true;
4461     return DISAS_TOO_MANY;
4462 }
4463 
4464 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4465 {
4466     tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data);
4467 
4468     if (s->base.tb->flags & FLAG_MASK_PER) {
4469         update_psw_addr(s);
4470         gen_helper_per_store_real(cpu_env);
4471     }
4472     return DISAS_NEXT;
4473 }
4474 #endif
4475 
4476 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4477 {
4478     gen_helper_stfle(cc_op, cpu_env, o->in2);
4479     set_cc_static(s);
4480     return DISAS_NEXT;
4481 }
4482 
4483 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4484 {
4485     tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4486     return DISAS_NEXT;
4487 }
4488 
4489 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4490 {
4491     tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4492     return DISAS_NEXT;
4493 }
4494 
4495 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4496 {
4497     tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4498     return DISAS_NEXT;
4499 }
4500 
4501 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4502 {
4503     tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4504     return DISAS_NEXT;
4505 }
4506 
4507 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4508 {
4509     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4510     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4511 
4512     gen_helper_stam(cpu_env, r1, o->in2, r3);
4513     return DISAS_NEXT;
4514 }
4515 
4516 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4517 {
4518     int m3 = get_field(s, m3);
4519     int pos, base = s->insn->data;
4520     TCGv_i64 tmp = tcg_temp_new_i64();
4521 
4522     pos = base + ctz32(m3) * 8;
4523     switch (m3) {
4524     case 0xf:
4525         /* Effectively a 32-bit store.  */
4526         tcg_gen_shri_i64(tmp, o->in1, pos);
4527         tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4528         break;
4529 
4530     case 0xc:
4531     case 0x6:
4532     case 0x3:
4533         /* Effectively a 16-bit store.  */
4534         tcg_gen_shri_i64(tmp, o->in1, pos);
4535         tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4536         break;
4537 
4538     case 0x8:
4539     case 0x4:
4540     case 0x2:
4541     case 0x1:
4542         /* Effectively an 8-bit store.  */
4543         tcg_gen_shri_i64(tmp, o->in1, pos);
4544         tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4545         break;
4546 
4547     default:
4548         /* This is going to be a sequence of shifts and stores.  */
4549         pos = base + 32 - 8;
4550         while (m3) {
4551             if (m3 & 0x8) {
4552                 tcg_gen_shri_i64(tmp, o->in1, pos);
4553                 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4554                 tcg_gen_addi_i64(o->in2, o->in2, 1);
4555             }
4556             m3 = (m3 << 1) & 0xf;
4557             pos -= 8;
4558         }
4559         break;
4560     }
4561     tcg_temp_free_i64(tmp);
4562     return DISAS_NEXT;
4563 }
4564 
4565 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4566 {
4567     int r1 = get_field(s, r1);
4568     int r3 = get_field(s, r3);
4569     int size = s->insn->data;
4570     TCGv_i64 tsize = tcg_constant_i64(size);
4571 
4572     while (1) {
4573         if (size == 8) {
4574             tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4575         } else {
4576             tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4577         }
4578         if (r1 == r3) {
4579             break;
4580         }
4581         tcg_gen_add_i64(o->in2, o->in2, tsize);
4582         r1 = (r1 + 1) & 15;
4583     }
4584 
4585     return DISAS_NEXT;
4586 }
4587 
4588 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4589 {
4590     int r1 = get_field(s, r1);
4591     int r3 = get_field(s, r3);
4592     TCGv_i64 t = tcg_temp_new_i64();
4593     TCGv_i64 t4 = tcg_constant_i64(4);
4594     TCGv_i64 t32 = tcg_constant_i64(32);
4595 
4596     while (1) {
4597         tcg_gen_shl_i64(t, regs[r1], t32);
4598         tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4599         if (r1 == r3) {
4600             break;
4601         }
4602         tcg_gen_add_i64(o->in2, o->in2, t4);
4603         r1 = (r1 + 1) & 15;
4604     }
4605 
4606     tcg_temp_free_i64(t);
4607     return DISAS_NEXT;
4608 }
4609 
4610 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4611 {
4612     if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4613         gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4614     } else if (HAVE_ATOMIC128) {
4615         gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4616     } else {
4617         gen_helper_exit_atomic(cpu_env);
4618         return DISAS_NORETURN;
4619     }
4620     return DISAS_NEXT;
4621 }
4622 
4623 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4624 {
4625     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4626     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4627 
4628     gen_helper_srst(cpu_env, r1, r2);
4629     set_cc_static(s);
4630     return DISAS_NEXT;
4631 }
4632 
4633 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4634 {
4635     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4636     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4637 
4638     gen_helper_srstu(cpu_env, r1, r2);
4639     set_cc_static(s);
4640     return DISAS_NEXT;
4641 }
4642 
4643 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4644 {
4645     tcg_gen_sub_i64(o->out, o->in1, o->in2);
4646     return DISAS_NEXT;
4647 }
4648 
4649 static DisasJumpType op_subu64(DisasContext *s, DisasOps *o)
4650 {
4651     tcg_gen_movi_i64(cc_src, 0);
4652     tcg_gen_sub2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
4653     return DISAS_NEXT;
4654 }
4655 
4656 /* Compute borrow (0, -1) into cc_src. */
4657 static void compute_borrow(DisasContext *s)
4658 {
4659     switch (s->cc_op) {
4660     case CC_OP_SUBU:
4661         /* The borrow value is already in cc_src (0,-1). */
4662         break;
4663     default:
4664         gen_op_calc_cc(s);
4665         /* fall through */
4666     case CC_OP_STATIC:
4667         /* The carry flag is the msb of CC; compute into cc_src. */
4668         tcg_gen_extu_i32_i64(cc_src, cc_op);
4669         tcg_gen_shri_i64(cc_src, cc_src, 1);
4670         /* fall through */
4671     case CC_OP_ADDU:
4672         /* Convert carry (1,0) to borrow (0,-1). */
4673         tcg_gen_subi_i64(cc_src, cc_src, 1);
4674         break;
4675     }
4676 }
4677 
4678 static DisasJumpType op_subb32(DisasContext *s, DisasOps *o)
4679 {
4680     compute_borrow(s);
4681 
4682     /* Borrow is {0, -1}, so add to subtract. */
4683     tcg_gen_add_i64(o->out, o->in1, cc_src);
4684     tcg_gen_sub_i64(o->out, o->out, o->in2);
4685     return DISAS_NEXT;
4686 }
4687 
4688 static DisasJumpType op_subb64(DisasContext *s, DisasOps *o)
4689 {
4690     compute_borrow(s);
4691 
4692     /*
4693      * Borrow is {0, -1}, so add to subtract; replicate the
4694      * borrow input to produce 128-bit -1 for the addition.
4695      */
4696     TCGv_i64 zero = tcg_constant_i64(0);
4697     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src);
4698     tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
4699 
4700     return DISAS_NEXT;
4701 }
4702 
4703 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4704 {
4705     TCGv_i32 t;
4706 
4707     update_psw_addr(s);
4708     update_cc_op(s);
4709 
4710     t = tcg_constant_i32(get_field(s, i1) & 0xff);
4711     tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4712 
4713     t = tcg_constant_i32(s->ilen);
4714     tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4715 
4716     gen_exception(EXCP_SVC);
4717     return DISAS_NORETURN;
4718 }
4719 
4720 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4721 {
4722     int cc = 0;
4723 
4724     cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4725     cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4726     gen_op_movi_cc(s, cc);
4727     return DISAS_NEXT;
4728 }
4729 
4730 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4731 {
4732     gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4733     set_cc_static(s);
4734     return DISAS_NEXT;
4735 }
4736 
4737 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4738 {
4739     gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4740     set_cc_static(s);
4741     return DISAS_NEXT;
4742 }
4743 
4744 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4745 {
4746     gen_helper_tcxb(cc_op, cpu_env, o->in1_128, o->in2);
4747     set_cc_static(s);
4748     return DISAS_NEXT;
4749 }
4750 
4751 #ifndef CONFIG_USER_ONLY
4752 
4753 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4754 {
4755     gen_helper_testblock(cc_op, cpu_env, o->in2);
4756     set_cc_static(s);
4757     return DISAS_NEXT;
4758 }
4759 
4760 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4761 {
4762     gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4763     set_cc_static(s);
4764     return DISAS_NEXT;
4765 }
4766 
4767 #endif
4768 
4769 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4770 {
4771     TCGv_i32 l1 = tcg_constant_i32(get_field(s, l1) + 1);
4772 
4773     gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4774     set_cc_static(s);
4775     return DISAS_NEXT;
4776 }
4777 
4778 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4779 {
4780     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4781 
4782     gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4783     set_cc_static(s);
4784     return DISAS_NEXT;
4785 }
4786 
4787 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4788 {
4789     TCGv_i128 pair = tcg_temp_new_i128();
4790 
4791     gen_helper_tre(pair, cpu_env, o->out, o->out2, o->in2);
4792     tcg_gen_extr_i128_i64(o->out2, o->out, pair);
4793     tcg_temp_free_i128(pair);
4794     set_cc_static(s);
4795     return DISAS_NEXT;
4796 }
4797 
4798 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4799 {
4800     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4801 
4802     gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4803     set_cc_static(s);
4804     return DISAS_NEXT;
4805 }
4806 
4807 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4808 {
4809     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4810 
4811     gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4812     set_cc_static(s);
4813     return DISAS_NEXT;
4814 }
4815 
4816 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4817 {
4818     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4819     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4820     TCGv_i32 sizes = tcg_constant_i32(s->insn->opc & 3);
4821     TCGv_i32 tst = tcg_temp_new_i32();
4822     int m3 = get_field(s, m3);
4823 
4824     if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4825         m3 = 0;
4826     }
4827     if (m3 & 1) {
4828         tcg_gen_movi_i32(tst, -1);
4829     } else {
4830         tcg_gen_extrl_i64_i32(tst, regs[0]);
4831         if (s->insn->opc & 3) {
4832             tcg_gen_ext8u_i32(tst, tst);
4833         } else {
4834             tcg_gen_ext16u_i32(tst, tst);
4835         }
4836     }
4837     gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4838 
4839     tcg_temp_free_i32(tst);
4840     set_cc_static(s);
4841     return DISAS_NEXT;
4842 }
4843 
4844 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4845 {
4846     TCGv_i32 t1 = tcg_constant_i32(0xff);
4847 
4848     tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4849     tcg_gen_extract_i32(cc_op, t1, 7, 1);
4850     set_cc_static(s);
4851     return DISAS_NEXT;
4852 }
4853 
4854 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4855 {
4856     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4857 
4858     gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4859     return DISAS_NEXT;
4860 }
4861 
4862 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4863 {
4864     int l1 = get_field(s, l1) + 1;
4865     TCGv_i32 l;
4866 
4867     /* The length must not exceed 32 bytes.  */
4868     if (l1 > 32) {
4869         gen_program_exception(s, PGM_SPECIFICATION);
4870         return DISAS_NORETURN;
4871     }
4872     l = tcg_constant_i32(l1);
4873     gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4874     set_cc_static(s);
4875     return DISAS_NEXT;
4876 }
4877 
4878 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4879 {
4880     int l1 = get_field(s, l1) + 1;
4881     TCGv_i32 l;
4882 
4883     /* The length must be even and should not exceed 64 bytes.  */
4884     if ((l1 & 1) || (l1 > 64)) {
4885         gen_program_exception(s, PGM_SPECIFICATION);
4886         return DISAS_NORETURN;
4887     }
4888     l = tcg_constant_i32(l1);
4889     gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4890     set_cc_static(s);
4891     return DISAS_NEXT;
4892 }
4893 
4894 
4895 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4896 {
4897     int d1 = get_field(s, d1);
4898     int d2 = get_field(s, d2);
4899     int b1 = get_field(s, b1);
4900     int b2 = get_field(s, b2);
4901     int l = get_field(s, l1);
4902     TCGv_i32 t32;
4903 
4904     o->addr1 = get_address(s, 0, b1, d1);
4905 
4906     /* If the addresses are identical, this is a store/memset of zero.  */
4907     if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4908         o->in2 = tcg_constant_i64(0);
4909 
4910         l++;
4911         while (l >= 8) {
4912             tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4913             l -= 8;
4914             if (l > 0) {
4915                 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4916             }
4917         }
4918         if (l >= 4) {
4919             tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4920             l -= 4;
4921             if (l > 0) {
4922                 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4923             }
4924         }
4925         if (l >= 2) {
4926             tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4927             l -= 2;
4928             if (l > 0) {
4929                 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4930             }
4931         }
4932         if (l) {
4933             tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4934         }
4935         gen_op_movi_cc(s, 0);
4936         return DISAS_NEXT;
4937     }
4938 
4939     /* But in general we'll defer to a helper.  */
4940     o->in2 = get_address(s, 0, b2, d2);
4941     t32 = tcg_constant_i32(l);
4942     gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4943     set_cc_static(s);
4944     return DISAS_NEXT;
4945 }
4946 
4947 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
4948 {
4949     tcg_gen_xor_i64(o->out, o->in1, o->in2);
4950     return DISAS_NEXT;
4951 }
4952 
4953 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
4954 {
4955     int shift = s->insn->data & 0xff;
4956     int size = s->insn->data >> 8;
4957     uint64_t mask = ((1ull << size) - 1) << shift;
4958 
4959     assert(!o->g_in2);
4960     tcg_gen_shli_i64(o->in2, o->in2, shift);
4961     tcg_gen_xor_i64(o->out, o->in1, o->in2);
4962 
4963     /* Produce the CC from only the bits manipulated.  */
4964     tcg_gen_andi_i64(cc_dst, o->out, mask);
4965     set_cc_nz_u64(s, cc_dst);
4966     return DISAS_NEXT;
4967 }
4968 
4969 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
4970 {
4971     o->in1 = tcg_temp_new_i64();
4972 
4973     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4974         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
4975     } else {
4976         /* Perform the atomic operation in memory. */
4977         tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
4978                                      s->insn->data);
4979     }
4980 
4981     /* Recompute also for atomic case: needed for setting CC. */
4982     tcg_gen_xor_i64(o->out, o->in1, o->in2);
4983 
4984     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4985         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
4986     }
4987     return DISAS_NEXT;
4988 }
4989 
4990 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
4991 {
4992     o->out = tcg_const_i64(0);
4993     return DISAS_NEXT;
4994 }
4995 
4996 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
4997 {
4998     o->out = tcg_const_i64(0);
4999     o->out2 = o->out;
5000     o->g_out2 = true;
5001     return DISAS_NEXT;
5002 }
5003 
5004 #ifndef CONFIG_USER_ONLY
5005 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
5006 {
5007     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
5008 
5009     gen_helper_clp(cpu_env, r2);
5010     set_cc_static(s);
5011     return DISAS_NEXT;
5012 }
5013 
5014 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
5015 {
5016     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
5017     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
5018 
5019     gen_helper_pcilg(cpu_env, r1, r2);
5020     set_cc_static(s);
5021     return DISAS_NEXT;
5022 }
5023 
5024 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
5025 {
5026     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
5027     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
5028 
5029     gen_helper_pcistg(cpu_env, r1, r2);
5030     set_cc_static(s);
5031     return DISAS_NEXT;
5032 }
5033 
5034 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
5035 {
5036     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
5037     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
5038 
5039     gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
5040     set_cc_static(s);
5041     return DISAS_NEXT;
5042 }
5043 
5044 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
5045 {
5046     gen_helper_sic(cpu_env, o->in1, o->in2);
5047     return DISAS_NEXT;
5048 }
5049 
5050 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
5051 {
5052     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
5053     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
5054 
5055     gen_helper_rpcit(cpu_env, r1, r2);
5056     set_cc_static(s);
5057     return DISAS_NEXT;
5058 }
5059 
5060 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
5061 {
5062     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
5063     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
5064     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
5065 
5066     gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
5067     set_cc_static(s);
5068     return DISAS_NEXT;
5069 }
5070 
5071 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
5072 {
5073     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
5074     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
5075 
5076     gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
5077     set_cc_static(s);
5078     return DISAS_NEXT;
5079 }
5080 #endif
5081 
5082 #include "translate_vx.c.inc"
5083 
5084 /* ====================================================================== */
5085 /* The "Cc OUTput" generators.  Given the generated output (and in some cases
5086    the original inputs), update the various cc data structures in order to
5087    be able to compute the new condition code.  */
5088 
5089 static void cout_abs32(DisasContext *s, DisasOps *o)
5090 {
5091     gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
5092 }
5093 
5094 static void cout_abs64(DisasContext *s, DisasOps *o)
5095 {
5096     gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5097 }
5098 
5099 static void cout_adds32(DisasContext *s, DisasOps *o)
5100 {
5101     gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5102 }
5103 
5104 static void cout_adds64(DisasContext *s, DisasOps *o)
5105 {
5106     gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5107 }
5108 
5109 static void cout_addu32(DisasContext *s, DisasOps *o)
5110 {
5111     tcg_gen_shri_i64(cc_src, o->out, 32);
5112     tcg_gen_ext32u_i64(cc_dst, o->out);
5113     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst);
5114 }
5115 
5116 static void cout_addu64(DisasContext *s, DisasOps *o)
5117 {
5118     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out);
5119 }
5120 
5121 static void cout_cmps32(DisasContext *s, DisasOps *o)
5122 {
5123     gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5124 }
5125 
5126 static void cout_cmps64(DisasContext *s, DisasOps *o)
5127 {
5128     gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5129 }
5130 
5131 static void cout_cmpu32(DisasContext *s, DisasOps *o)
5132 {
5133     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5134 }
5135 
5136 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5137 {
5138     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5139 }
5140 
5141 static void cout_f32(DisasContext *s, DisasOps *o)
5142 {
5143     gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5144 }
5145 
5146 static void cout_f64(DisasContext *s, DisasOps *o)
5147 {
5148     gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5149 }
5150 
5151 static void cout_f128(DisasContext *s, DisasOps *o)
5152 {
5153     gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5154 }
5155 
5156 static void cout_nabs32(DisasContext *s, DisasOps *o)
5157 {
5158     gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5159 }
5160 
5161 static void cout_nabs64(DisasContext *s, DisasOps *o)
5162 {
5163     gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5164 }
5165 
5166 static void cout_neg32(DisasContext *s, DisasOps *o)
5167 {
5168     gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5169 }
5170 
5171 static void cout_neg64(DisasContext *s, DisasOps *o)
5172 {
5173     gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5174 }
5175 
5176 static void cout_nz32(DisasContext *s, DisasOps *o)
5177 {
5178     tcg_gen_ext32u_i64(cc_dst, o->out);
5179     gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5180 }
5181 
5182 static void cout_nz64(DisasContext *s, DisasOps *o)
5183 {
5184     gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5185 }
5186 
5187 static void cout_s32(DisasContext *s, DisasOps *o)
5188 {
5189     gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5190 }
5191 
5192 static void cout_s64(DisasContext *s, DisasOps *o)
5193 {
5194     gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5195 }
5196 
5197 static void cout_subs32(DisasContext *s, DisasOps *o)
5198 {
5199     gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5200 }
5201 
5202 static void cout_subs64(DisasContext *s, DisasOps *o)
5203 {
5204     gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5205 }
5206 
5207 static void cout_subu32(DisasContext *s, DisasOps *o)
5208 {
5209     tcg_gen_sari_i64(cc_src, o->out, 32);
5210     tcg_gen_ext32u_i64(cc_dst, o->out);
5211     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, cc_dst);
5212 }
5213 
5214 static void cout_subu64(DisasContext *s, DisasOps *o)
5215 {
5216     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, o->out);
5217 }
5218 
5219 static void cout_tm32(DisasContext *s, DisasOps *o)
5220 {
5221     gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5222 }
5223 
5224 static void cout_tm64(DisasContext *s, DisasOps *o)
5225 {
5226     gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5227 }
5228 
5229 static void cout_muls32(DisasContext *s, DisasOps *o)
5230 {
5231     gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out);
5232 }
5233 
5234 static void cout_muls64(DisasContext *s, DisasOps *o)
5235 {
5236     /* out contains "high" part, out2 contains "low" part of 128 bit result */
5237     gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2);
5238 }
5239 
5240 /* ====================================================================== */
5241 /* The "PREParation" generators.  These initialize the DisasOps.OUT fields
5242    with the TCG register to which we will write.  Used in combination with
5243    the "wout" generators, in some cases we need a new temporary, and in
5244    some cases we can write to a TCG global.  */
5245 
5246 static void prep_new(DisasContext *s, DisasOps *o)
5247 {
5248     o->out = tcg_temp_new_i64();
5249 }
5250 #define SPEC_prep_new 0
5251 
5252 static void prep_new_P(DisasContext *s, DisasOps *o)
5253 {
5254     o->out = tcg_temp_new_i64();
5255     o->out2 = tcg_temp_new_i64();
5256 }
5257 #define SPEC_prep_new_P 0
5258 
5259 static void prep_new_x(DisasContext *s, DisasOps *o)
5260 {
5261     o->out_128 = tcg_temp_new_i128();
5262 }
5263 #define SPEC_prep_new_x 0
5264 
5265 static void prep_r1(DisasContext *s, DisasOps *o)
5266 {
5267     o->out = regs[get_field(s, r1)];
5268     o->g_out = true;
5269 }
5270 #define SPEC_prep_r1 0
5271 
5272 static void prep_r1_P(DisasContext *s, DisasOps *o)
5273 {
5274     int r1 = get_field(s, r1);
5275     o->out = regs[r1];
5276     o->out2 = regs[r1 + 1];
5277     o->g_out = o->g_out2 = true;
5278 }
5279 #define SPEC_prep_r1_P SPEC_r1_even
5280 
5281 static void prep_x1(DisasContext *s, DisasOps *o)
5282 {
5283     o->out_128 = load_freg_128(get_field(s, r1));
5284 }
5285 #define SPEC_prep_x1 SPEC_r1_f128
5286 
5287 /* ====================================================================== */
5288 /* The "Write OUTput" generators.  These generally perform some non-trivial
5289    copy of data to TCG globals, or to main memory.  The trivial cases are
5290    generally handled by having a "prep" generator install the TCG global
5291    as the destination of the operation.  */
5292 
5293 static void wout_r1(DisasContext *s, DisasOps *o)
5294 {
5295     store_reg(get_field(s, r1), o->out);
5296 }
5297 #define SPEC_wout_r1 0
5298 
5299 static void wout_out2_r1(DisasContext *s, DisasOps *o)
5300 {
5301     store_reg(get_field(s, r1), o->out2);
5302 }
5303 #define SPEC_wout_out2_r1 0
5304 
5305 static void wout_r1_8(DisasContext *s, DisasOps *o)
5306 {
5307     int r1 = get_field(s, r1);
5308     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5309 }
5310 #define SPEC_wout_r1_8 0
5311 
5312 static void wout_r1_16(DisasContext *s, DisasOps *o)
5313 {
5314     int r1 = get_field(s, r1);
5315     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5316 }
5317 #define SPEC_wout_r1_16 0
5318 
5319 static void wout_r1_32(DisasContext *s, DisasOps *o)
5320 {
5321     store_reg32_i64(get_field(s, r1), o->out);
5322 }
5323 #define SPEC_wout_r1_32 0
5324 
5325 static void wout_r1_32h(DisasContext *s, DisasOps *o)
5326 {
5327     store_reg32h_i64(get_field(s, r1), o->out);
5328 }
5329 #define SPEC_wout_r1_32h 0
5330 
5331 static void wout_r1_P32(DisasContext *s, DisasOps *o)
5332 {
5333     int r1 = get_field(s, r1);
5334     store_reg32_i64(r1, o->out);
5335     store_reg32_i64(r1 + 1, o->out2);
5336 }
5337 #define SPEC_wout_r1_P32 SPEC_r1_even
5338 
5339 static void wout_r1_D32(DisasContext *s, DisasOps *o)
5340 {
5341     int r1 = get_field(s, r1);
5342     TCGv_i64 t = tcg_temp_new_i64();
5343     store_reg32_i64(r1 + 1, o->out);
5344     tcg_gen_shri_i64(t, o->out, 32);
5345     store_reg32_i64(r1, t);
5346     tcg_temp_free_i64(t);
5347 }
5348 #define SPEC_wout_r1_D32 SPEC_r1_even
5349 
5350 static void wout_r1_D64(DisasContext *s, DisasOps *o)
5351 {
5352     int r1 = get_field(s, r1);
5353     tcg_gen_extr_i128_i64(regs[r1 + 1], regs[r1], o->out_128);
5354 }
5355 #define SPEC_wout_r1_D64 SPEC_r1_even
5356 
5357 static void wout_r3_P32(DisasContext *s, DisasOps *o)
5358 {
5359     int r3 = get_field(s, r3);
5360     store_reg32_i64(r3, o->out);
5361     store_reg32_i64(r3 + 1, o->out2);
5362 }
5363 #define SPEC_wout_r3_P32 SPEC_r3_even
5364 
5365 static void wout_r3_P64(DisasContext *s, DisasOps *o)
5366 {
5367     int r3 = get_field(s, r3);
5368     store_reg(r3, o->out);
5369     store_reg(r3 + 1, o->out2);
5370 }
5371 #define SPEC_wout_r3_P64 SPEC_r3_even
5372 
5373 static void wout_e1(DisasContext *s, DisasOps *o)
5374 {
5375     store_freg32_i64(get_field(s, r1), o->out);
5376 }
5377 #define SPEC_wout_e1 0
5378 
5379 static void wout_f1(DisasContext *s, DisasOps *o)
5380 {
5381     store_freg(get_field(s, r1), o->out);
5382 }
5383 #define SPEC_wout_f1 0
5384 
5385 static void wout_x1(DisasContext *s, DisasOps *o)
5386 {
5387     int f1 = get_field(s, r1);
5388 
5389     /* Split out_128 into out+out2 for cout_f128. */
5390     tcg_debug_assert(o->out == NULL);
5391     o->out = tcg_temp_new_i64();
5392     o->out2 = tcg_temp_new_i64();
5393 
5394     tcg_gen_extr_i128_i64(o->out2, o->out, o->out_128);
5395     store_freg(f1, o->out);
5396     store_freg(f1 + 2, o->out2);
5397 }
5398 #define SPEC_wout_x1 SPEC_r1_f128
5399 
5400 static void wout_x1_P(DisasContext *s, DisasOps *o)
5401 {
5402     int f1 = get_field(s, r1);
5403     store_freg(f1, o->out);
5404     store_freg(f1 + 2, o->out2);
5405 }
5406 #define SPEC_wout_x1_P SPEC_r1_f128
5407 
5408 static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
5409 {
5410     if (get_field(s, r1) != get_field(s, r2)) {
5411         store_reg32_i64(get_field(s, r1), o->out);
5412     }
5413 }
5414 #define SPEC_wout_cond_r1r2_32 0
5415 
5416 static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
5417 {
5418     if (get_field(s, r1) != get_field(s, r2)) {
5419         store_freg32_i64(get_field(s, r1), o->out);
5420     }
5421 }
5422 #define SPEC_wout_cond_e1e2 0
5423 
5424 static void wout_m1_8(DisasContext *s, DisasOps *o)
5425 {
5426     tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
5427 }
5428 #define SPEC_wout_m1_8 0
5429 
5430 static void wout_m1_16(DisasContext *s, DisasOps *o)
5431 {
5432     tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
5433 }
5434 #define SPEC_wout_m1_16 0
5435 
5436 #ifndef CONFIG_USER_ONLY
5437 static void wout_m1_16a(DisasContext *s, DisasOps *o)
5438 {
5439     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5440 }
5441 #define SPEC_wout_m1_16a 0
5442 #endif
5443 
5444 static void wout_m1_32(DisasContext *s, DisasOps *o)
5445 {
5446     tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
5447 }
5448 #define SPEC_wout_m1_32 0
5449 
5450 #ifndef CONFIG_USER_ONLY
5451 static void wout_m1_32a(DisasContext *s, DisasOps *o)
5452 {
5453     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5454 }
5455 #define SPEC_wout_m1_32a 0
5456 #endif
5457 
5458 static void wout_m1_64(DisasContext *s, DisasOps *o)
5459 {
5460     tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
5461 }
5462 #define SPEC_wout_m1_64 0
5463 
5464 #ifndef CONFIG_USER_ONLY
5465 static void wout_m1_64a(DisasContext *s, DisasOps *o)
5466 {
5467     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5468 }
5469 #define SPEC_wout_m1_64a 0
5470 #endif
5471 
5472 static void wout_m2_32(DisasContext *s, DisasOps *o)
5473 {
5474     tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
5475 }
5476 #define SPEC_wout_m2_32 0
5477 
5478 static void wout_in2_r1(DisasContext *s, DisasOps *o)
5479 {
5480     store_reg(get_field(s, r1), o->in2);
5481 }
5482 #define SPEC_wout_in2_r1 0
5483 
5484 static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
5485 {
5486     store_reg32_i64(get_field(s, r1), o->in2);
5487 }
5488 #define SPEC_wout_in2_r1_32 0
5489 
5490 /* ====================================================================== */
5491 /* The "INput 1" generators.  These load the first operand to an insn.  */
5492 
5493 static void in1_r1(DisasContext *s, DisasOps *o)
5494 {
5495     o->in1 = load_reg(get_field(s, r1));
5496 }
5497 #define SPEC_in1_r1 0
5498 
5499 static void in1_r1_o(DisasContext *s, DisasOps *o)
5500 {
5501     o->in1 = regs[get_field(s, r1)];
5502     o->g_in1 = true;
5503 }
5504 #define SPEC_in1_r1_o 0
5505 
5506 static void in1_r1_32s(DisasContext *s, DisasOps *o)
5507 {
5508     o->in1 = tcg_temp_new_i64();
5509     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
5510 }
5511 #define SPEC_in1_r1_32s 0
5512 
5513 static void in1_r1_32u(DisasContext *s, DisasOps *o)
5514 {
5515     o->in1 = tcg_temp_new_i64();
5516     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
5517 }
5518 #define SPEC_in1_r1_32u 0
5519 
5520 static void in1_r1_sr32(DisasContext *s, DisasOps *o)
5521 {
5522     o->in1 = tcg_temp_new_i64();
5523     tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
5524 }
5525 #define SPEC_in1_r1_sr32 0
5526 
5527 static void in1_r1p1(DisasContext *s, DisasOps *o)
5528 {
5529     o->in1 = load_reg(get_field(s, r1) + 1);
5530 }
5531 #define SPEC_in1_r1p1 SPEC_r1_even
5532 
5533 static void in1_r1p1_o(DisasContext *s, DisasOps *o)
5534 {
5535     o->in1 = regs[get_field(s, r1) + 1];
5536     o->g_in1 = true;
5537 }
5538 #define SPEC_in1_r1p1_o SPEC_r1_even
5539 
5540 static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
5541 {
5542     o->in1 = tcg_temp_new_i64();
5543     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
5544 }
5545 #define SPEC_in1_r1p1_32s SPEC_r1_even
5546 
5547 static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
5548 {
5549     o->in1 = tcg_temp_new_i64();
5550     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
5551 }
5552 #define SPEC_in1_r1p1_32u SPEC_r1_even
5553 
5554 static void in1_r1_D32(DisasContext *s, DisasOps *o)
5555 {
5556     int r1 = get_field(s, r1);
5557     o->in1 = tcg_temp_new_i64();
5558     tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5559 }
5560 #define SPEC_in1_r1_D32 SPEC_r1_even
5561 
5562 static void in1_r2(DisasContext *s, DisasOps *o)
5563 {
5564     o->in1 = load_reg(get_field(s, r2));
5565 }
5566 #define SPEC_in1_r2 0
5567 
5568 static void in1_r2_sr32(DisasContext *s, DisasOps *o)
5569 {
5570     o->in1 = tcg_temp_new_i64();
5571     tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
5572 }
5573 #define SPEC_in1_r2_sr32 0
5574 
5575 static void in1_r2_32u(DisasContext *s, DisasOps *o)
5576 {
5577     o->in1 = tcg_temp_new_i64();
5578     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]);
5579 }
5580 #define SPEC_in1_r2_32u 0
5581 
5582 static void in1_r3(DisasContext *s, DisasOps *o)
5583 {
5584     o->in1 = load_reg(get_field(s, r3));
5585 }
5586 #define SPEC_in1_r3 0
5587 
5588 static void in1_r3_o(DisasContext *s, DisasOps *o)
5589 {
5590     o->in1 = regs[get_field(s, r3)];
5591     o->g_in1 = true;
5592 }
5593 #define SPEC_in1_r3_o 0
5594 
5595 static void in1_r3_32s(DisasContext *s, DisasOps *o)
5596 {
5597     o->in1 = tcg_temp_new_i64();
5598     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
5599 }
5600 #define SPEC_in1_r3_32s 0
5601 
5602 static void in1_r3_32u(DisasContext *s, DisasOps *o)
5603 {
5604     o->in1 = tcg_temp_new_i64();
5605     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
5606 }
5607 #define SPEC_in1_r3_32u 0
5608 
5609 static void in1_r3_D32(DisasContext *s, DisasOps *o)
5610 {
5611     int r3 = get_field(s, r3);
5612     o->in1 = tcg_temp_new_i64();
5613     tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5614 }
5615 #define SPEC_in1_r3_D32 SPEC_r3_even
5616 
5617 static void in1_r3_sr32(DisasContext *s, DisasOps *o)
5618 {
5619     o->in1 = tcg_temp_new_i64();
5620     tcg_gen_shri_i64(o->in1, regs[get_field(s, r3)], 32);
5621 }
5622 #define SPEC_in1_r3_sr32 0
5623 
5624 static void in1_e1(DisasContext *s, DisasOps *o)
5625 {
5626     o->in1 = load_freg32_i64(get_field(s, r1));
5627 }
5628 #define SPEC_in1_e1 0
5629 
5630 static void in1_f1(DisasContext *s, DisasOps *o)
5631 {
5632     o->in1 = load_freg(get_field(s, r1));
5633 }
5634 #define SPEC_in1_f1 0
5635 
5636 static void in1_x1(DisasContext *s, DisasOps *o)
5637 {
5638     o->in1_128 = load_freg_128(get_field(s, r1));
5639 }
5640 #define SPEC_in1_x1 SPEC_r1_f128
5641 
5642 /* Load the high double word of an extended (128-bit) format FP number */
5643 static void in1_x2h(DisasContext *s, DisasOps *o)
5644 {
5645     o->in1 = load_freg(get_field(s, r2));
5646 }
5647 #define SPEC_in1_x2h SPEC_r2_f128
5648 
5649 static void in1_f3(DisasContext *s, DisasOps *o)
5650 {
5651     o->in1 = load_freg(get_field(s, r3));
5652 }
5653 #define SPEC_in1_f3 0
5654 
5655 static void in1_la1(DisasContext *s, DisasOps *o)
5656 {
5657     o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
5658 }
5659 #define SPEC_in1_la1 0
5660 
5661 static void in1_la2(DisasContext *s, DisasOps *o)
5662 {
5663     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5664     o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5665 }
5666 #define SPEC_in1_la2 0
5667 
5668 static void in1_m1_8u(DisasContext *s, DisasOps *o)
5669 {
5670     in1_la1(s, o);
5671     o->in1 = tcg_temp_new_i64();
5672     tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5673 }
5674 #define SPEC_in1_m1_8u 0
5675 
5676 static void in1_m1_16s(DisasContext *s, DisasOps *o)
5677 {
5678     in1_la1(s, o);
5679     o->in1 = tcg_temp_new_i64();
5680     tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5681 }
5682 #define SPEC_in1_m1_16s 0
5683 
5684 static void in1_m1_16u(DisasContext *s, DisasOps *o)
5685 {
5686     in1_la1(s, o);
5687     o->in1 = tcg_temp_new_i64();
5688     tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5689 }
5690 #define SPEC_in1_m1_16u 0
5691 
5692 static void in1_m1_32s(DisasContext *s, DisasOps *o)
5693 {
5694     in1_la1(s, o);
5695     o->in1 = tcg_temp_new_i64();
5696     tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5697 }
5698 #define SPEC_in1_m1_32s 0
5699 
5700 static void in1_m1_32u(DisasContext *s, DisasOps *o)
5701 {
5702     in1_la1(s, o);
5703     o->in1 = tcg_temp_new_i64();
5704     tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5705 }
5706 #define SPEC_in1_m1_32u 0
5707 
5708 static void in1_m1_64(DisasContext *s, DisasOps *o)
5709 {
5710     in1_la1(s, o);
5711     o->in1 = tcg_temp_new_i64();
5712     tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5713 }
5714 #define SPEC_in1_m1_64 0
5715 
5716 /* ====================================================================== */
5717 /* The "INput 2" generators.  These load the second operand to an insn.  */
5718 
5719 static void in2_r1_o(DisasContext *s, DisasOps *o)
5720 {
5721     o->in2 = regs[get_field(s, r1)];
5722     o->g_in2 = true;
5723 }
5724 #define SPEC_in2_r1_o 0
5725 
5726 static void in2_r1_16u(DisasContext *s, DisasOps *o)
5727 {
5728     o->in2 = tcg_temp_new_i64();
5729     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
5730 }
5731 #define SPEC_in2_r1_16u 0
5732 
5733 static void in2_r1_32u(DisasContext *s, DisasOps *o)
5734 {
5735     o->in2 = tcg_temp_new_i64();
5736     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
5737 }
5738 #define SPEC_in2_r1_32u 0
5739 
5740 static void in2_r1_D32(DisasContext *s, DisasOps *o)
5741 {
5742     int r1 = get_field(s, r1);
5743     o->in2 = tcg_temp_new_i64();
5744     tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5745 }
5746 #define SPEC_in2_r1_D32 SPEC_r1_even
5747 
5748 static void in2_r2(DisasContext *s, DisasOps *o)
5749 {
5750     o->in2 = load_reg(get_field(s, r2));
5751 }
5752 #define SPEC_in2_r2 0
5753 
5754 static void in2_r2_o(DisasContext *s, DisasOps *o)
5755 {
5756     o->in2 = regs[get_field(s, r2)];
5757     o->g_in2 = true;
5758 }
5759 #define SPEC_in2_r2_o 0
5760 
5761 static void in2_r2_nz(DisasContext *s, DisasOps *o)
5762 {
5763     int r2 = get_field(s, r2);
5764     if (r2 != 0) {
5765         o->in2 = load_reg(r2);
5766     }
5767 }
5768 #define SPEC_in2_r2_nz 0
5769 
5770 static void in2_r2_8s(DisasContext *s, DisasOps *o)
5771 {
5772     o->in2 = tcg_temp_new_i64();
5773     tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
5774 }
5775 #define SPEC_in2_r2_8s 0
5776 
5777 static void in2_r2_8u(DisasContext *s, DisasOps *o)
5778 {
5779     o->in2 = tcg_temp_new_i64();
5780     tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
5781 }
5782 #define SPEC_in2_r2_8u 0
5783 
5784 static void in2_r2_16s(DisasContext *s, DisasOps *o)
5785 {
5786     o->in2 = tcg_temp_new_i64();
5787     tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
5788 }
5789 #define SPEC_in2_r2_16s 0
5790 
5791 static void in2_r2_16u(DisasContext *s, DisasOps *o)
5792 {
5793     o->in2 = tcg_temp_new_i64();
5794     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
5795 }
5796 #define SPEC_in2_r2_16u 0
5797 
5798 static void in2_r3(DisasContext *s, DisasOps *o)
5799 {
5800     o->in2 = load_reg(get_field(s, r3));
5801 }
5802 #define SPEC_in2_r3 0
5803 
5804 static void in2_r3_D64(DisasContext *s, DisasOps *o)
5805 {
5806     int r3 = get_field(s, r3);
5807     o->in2_128 = tcg_temp_new_i128();
5808     tcg_gen_concat_i64_i128(o->in2_128, regs[r3 + 1], regs[r3]);
5809 }
5810 #define SPEC_in2_r3_D64 SPEC_r3_even
5811 
5812 static void in2_r3_sr32(DisasContext *s, DisasOps *o)
5813 {
5814     o->in2 = tcg_temp_new_i64();
5815     tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
5816 }
5817 #define SPEC_in2_r3_sr32 0
5818 
5819 static void in2_r3_32u(DisasContext *s, DisasOps *o)
5820 {
5821     o->in2 = tcg_temp_new_i64();
5822     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
5823 }
5824 #define SPEC_in2_r3_32u 0
5825 
5826 static void in2_r2_32s(DisasContext *s, DisasOps *o)
5827 {
5828     o->in2 = tcg_temp_new_i64();
5829     tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
5830 }
5831 #define SPEC_in2_r2_32s 0
5832 
5833 static void in2_r2_32u(DisasContext *s, DisasOps *o)
5834 {
5835     o->in2 = tcg_temp_new_i64();
5836     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
5837 }
5838 #define SPEC_in2_r2_32u 0
5839 
5840 static void in2_r2_sr32(DisasContext *s, DisasOps *o)
5841 {
5842     o->in2 = tcg_temp_new_i64();
5843     tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
5844 }
5845 #define SPEC_in2_r2_sr32 0
5846 
5847 static void in2_e2(DisasContext *s, DisasOps *o)
5848 {
5849     o->in2 = load_freg32_i64(get_field(s, r2));
5850 }
5851 #define SPEC_in2_e2 0
5852 
5853 static void in2_f2(DisasContext *s, DisasOps *o)
5854 {
5855     o->in2 = load_freg(get_field(s, r2));
5856 }
5857 #define SPEC_in2_f2 0
5858 
5859 static void in2_x2(DisasContext *s, DisasOps *o)
5860 {
5861     o->in2_128 = load_freg_128(get_field(s, r2));
5862 }
5863 #define SPEC_in2_x2 SPEC_r2_f128
5864 
5865 /* Load the low double word of an extended (128-bit) format FP number */
5866 static void in2_x2l(DisasContext *s, DisasOps *o)
5867 {
5868     o->in2 = load_freg(get_field(s, r2) + 2);
5869 }
5870 #define SPEC_in2_x2l SPEC_r2_f128
5871 
5872 static void in2_ra2(DisasContext *s, DisasOps *o)
5873 {
5874     int r2 = get_field(s, r2);
5875 
5876     /* Note: *don't* treat !r2 as 0, use the reg value. */
5877     o->in2 = tcg_temp_new_i64();
5878     gen_addi_and_wrap_i64(s, o->in2, regs[r2], 0);
5879 }
5880 #define SPEC_in2_ra2 0
5881 
5882 static void in2_a2(DisasContext *s, DisasOps *o)
5883 {
5884     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5885     o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5886 }
5887 #define SPEC_in2_a2 0
5888 
5889 static void in2_ri2(DisasContext *s, DisasOps *o)
5890 {
5891     o->in2 = tcg_const_i64(s->base.pc_next + (int64_t)get_field(s, i2) * 2);
5892 }
5893 #define SPEC_in2_ri2 0
5894 
5895 static void in2_sh(DisasContext *s, DisasOps *o)
5896 {
5897     int b2 = get_field(s, b2);
5898     int d2 = get_field(s, d2);
5899 
5900     if (b2 == 0) {
5901         o->in2 = tcg_const_i64(d2 & 0x3f);
5902     } else {
5903         o->in2 = get_address(s, 0, b2, d2);
5904         tcg_gen_andi_i64(o->in2, o->in2, 0x3f);
5905     }
5906 }
5907 #define SPEC_in2_sh 0
5908 
5909 static void in2_m2_8u(DisasContext *s, DisasOps *o)
5910 {
5911     in2_a2(s, o);
5912     tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5913 }
5914 #define SPEC_in2_m2_8u 0
5915 
5916 static void in2_m2_16s(DisasContext *s, DisasOps *o)
5917 {
5918     in2_a2(s, o);
5919     tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5920 }
5921 #define SPEC_in2_m2_16s 0
5922 
5923 static void in2_m2_16u(DisasContext *s, DisasOps *o)
5924 {
5925     in2_a2(s, o);
5926     tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5927 }
5928 #define SPEC_in2_m2_16u 0
5929 
5930 static void in2_m2_32s(DisasContext *s, DisasOps *o)
5931 {
5932     in2_a2(s, o);
5933     tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5934 }
5935 #define SPEC_in2_m2_32s 0
5936 
5937 static void in2_m2_32u(DisasContext *s, DisasOps *o)
5938 {
5939     in2_a2(s, o);
5940     tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5941 }
5942 #define SPEC_in2_m2_32u 0
5943 
5944 #ifndef CONFIG_USER_ONLY
5945 static void in2_m2_32ua(DisasContext *s, DisasOps *o)
5946 {
5947     in2_a2(s, o);
5948     tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5949 }
5950 #define SPEC_in2_m2_32ua 0
5951 #endif
5952 
5953 static void in2_m2_64(DisasContext *s, DisasOps *o)
5954 {
5955     in2_a2(s, o);
5956     tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5957 }
5958 #define SPEC_in2_m2_64 0
5959 
5960 static void in2_m2_64w(DisasContext *s, DisasOps *o)
5961 {
5962     in2_a2(s, o);
5963     tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5964     gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
5965 }
5966 #define SPEC_in2_m2_64w 0
5967 
5968 #ifndef CONFIG_USER_ONLY
5969 static void in2_m2_64a(DisasContext *s, DisasOps *o)
5970 {
5971     in2_a2(s, o);
5972     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5973 }
5974 #define SPEC_in2_m2_64a 0
5975 #endif
5976 
5977 static void in2_mri2_16u(DisasContext *s, DisasOps *o)
5978 {
5979     in2_ri2(s, o);
5980     tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5981 }
5982 #define SPEC_in2_mri2_16u 0
5983 
5984 static void in2_mri2_32s(DisasContext *s, DisasOps *o)
5985 {
5986     in2_ri2(s, o);
5987     tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5988 }
5989 #define SPEC_in2_mri2_32s 0
5990 
5991 static void in2_mri2_32u(DisasContext *s, DisasOps *o)
5992 {
5993     in2_ri2(s, o);
5994     tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5995 }
5996 #define SPEC_in2_mri2_32u 0
5997 
5998 static void in2_mri2_64(DisasContext *s, DisasOps *o)
5999 {
6000     in2_ri2(s, o);
6001     tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
6002 }
6003 #define SPEC_in2_mri2_64 0
6004 
6005 static void in2_i2(DisasContext *s, DisasOps *o)
6006 {
6007     o->in2 = tcg_const_i64(get_field(s, i2));
6008 }
6009 #define SPEC_in2_i2 0
6010 
6011 static void in2_i2_8u(DisasContext *s, DisasOps *o)
6012 {
6013     o->in2 = tcg_const_i64((uint8_t)get_field(s, i2));
6014 }
6015 #define SPEC_in2_i2_8u 0
6016 
6017 static void in2_i2_16u(DisasContext *s, DisasOps *o)
6018 {
6019     o->in2 = tcg_const_i64((uint16_t)get_field(s, i2));
6020 }
6021 #define SPEC_in2_i2_16u 0
6022 
6023 static void in2_i2_32u(DisasContext *s, DisasOps *o)
6024 {
6025     o->in2 = tcg_const_i64((uint32_t)get_field(s, i2));
6026 }
6027 #define SPEC_in2_i2_32u 0
6028 
6029 static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
6030 {
6031     uint64_t i2 = (uint16_t)get_field(s, i2);
6032     o->in2 = tcg_const_i64(i2 << s->insn->data);
6033 }
6034 #define SPEC_in2_i2_16u_shl 0
6035 
6036 static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
6037 {
6038     uint64_t i2 = (uint32_t)get_field(s, i2);
6039     o->in2 = tcg_const_i64(i2 << s->insn->data);
6040 }
6041 #define SPEC_in2_i2_32u_shl 0
6042 
6043 #ifndef CONFIG_USER_ONLY
6044 static void in2_insn(DisasContext *s, DisasOps *o)
6045 {
6046     o->in2 = tcg_const_i64(s->fields.raw_insn);
6047 }
6048 #define SPEC_in2_insn 0
6049 #endif
6050 
6051 /* ====================================================================== */
6052 
6053 /* Find opc within the table of insns.  This is formulated as a switch
6054    statement so that (1) we get compile-time notice of cut-paste errors
6055    for duplicated opcodes, and (2) the compiler generates the binary
6056    search tree, rather than us having to post-process the table.  */
6057 
6058 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
6059     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
6060 
6061 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
6062     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
6063 
6064 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
6065     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
6066 
6067 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
6068 
6069 enum DisasInsnEnum {
6070 #include "insn-data.h.inc"
6071 };
6072 
6073 #undef E
6074 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) {                   \
6075     .opc = OPC,                                                             \
6076     .flags = FL,                                                            \
6077     .fmt = FMT_##FT,                                                        \
6078     .fac = FAC_##FC,                                                        \
6079     .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W,  \
6080     .name = #NM,                                                            \
6081     .help_in1 = in1_##I1,                                                   \
6082     .help_in2 = in2_##I2,                                                   \
6083     .help_prep = prep_##P,                                                  \
6084     .help_wout = wout_##W,                                                  \
6085     .help_cout = cout_##CC,                                                 \
6086     .help_op = op_##OP,                                                     \
6087     .data = D                                                               \
6088  },
6089 
6090 /* Allow 0 to be used for NULL in the table below.  */
6091 #define in1_0  NULL
6092 #define in2_0  NULL
6093 #define prep_0  NULL
6094 #define wout_0  NULL
6095 #define cout_0  NULL
6096 #define op_0  NULL
6097 
6098 #define SPEC_in1_0 0
6099 #define SPEC_in2_0 0
6100 #define SPEC_prep_0 0
6101 #define SPEC_wout_0 0
6102 
6103 /* Give smaller names to the various facilities.  */
6104 #define FAC_Z           S390_FEAT_ZARCH
6105 #define FAC_CASS        S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6106 #define FAC_DFP         S390_FEAT_DFP
6107 #define FAC_DFPR        S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* DFP-rounding */
6108 #define FAC_DO          S390_FEAT_STFLE_45 /* distinct-operands */
6109 #define FAC_EE          S390_FEAT_EXECUTE_EXT
6110 #define FAC_EI          S390_FEAT_EXTENDED_IMMEDIATE
6111 #define FAC_FPE         S390_FEAT_FLOATING_POINT_EXT
6112 #define FAC_FPSSH       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPS-sign-handling */
6113 #define FAC_FPRGR       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPR-GR-transfer */
6114 #define FAC_GIE         S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6115 #define FAC_HFP_MA      S390_FEAT_HFP_MADDSUB
6116 #define FAC_HW          S390_FEAT_STFLE_45 /* high-word */
6117 #define FAC_IEEEE_SIM   S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* IEEE-exception-simulation */
6118 #define FAC_MIE         S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6119 #define FAC_LAT         S390_FEAT_STFLE_49 /* load-and-trap */
6120 #define FAC_LOC         S390_FEAT_STFLE_45 /* load/store on condition 1 */
6121 #define FAC_LOC2        S390_FEAT_STFLE_53 /* load/store on condition 2 */
6122 #define FAC_LD          S390_FEAT_LONG_DISPLACEMENT
6123 #define FAC_PC          S390_FEAT_STFLE_45 /* population count */
6124 #define FAC_SCF         S390_FEAT_STORE_CLOCK_FAST
6125 #define FAC_SFLE        S390_FEAT_STFLE
6126 #define FAC_ILA         S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6127 #define FAC_MVCOS       S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6128 #define FAC_LPP         S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6129 #define FAC_DAT_ENH     S390_FEAT_DAT_ENH
6130 #define FAC_E2          S390_FEAT_EXTENDED_TRANSLATION_2
6131 #define FAC_EH          S390_FEAT_STFLE_49 /* execution-hint */
6132 #define FAC_PPA         S390_FEAT_STFLE_49 /* processor-assist */
6133 #define FAC_LZRB        S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6134 #define FAC_ETF3        S390_FEAT_EXTENDED_TRANSLATION_3
6135 #define FAC_MSA         S390_FEAT_MSA /* message-security-assist facility */
6136 #define FAC_MSA3        S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6137 #define FAC_MSA4        S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6138 #define FAC_MSA5        S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6139 #define FAC_MSA8        S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6140 #define FAC_ECT         S390_FEAT_EXTRACT_CPU_TIME
6141 #define FAC_PCI         S390_FEAT_ZPCI /* z/PCI facility */
6142 #define FAC_AIS         S390_FEAT_ADAPTER_INT_SUPPRESSION
6143 #define FAC_V           S390_FEAT_VECTOR /* vector facility */
6144 #define FAC_VE          S390_FEAT_VECTOR_ENH  /* vector enhancements facility 1 */
6145 #define FAC_VE2         S390_FEAT_VECTOR_ENH2 /* vector enhancements facility 2 */
6146 #define FAC_MIE2        S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6147 #define FAC_MIE3        S390_FEAT_MISC_INSTRUCTION_EXT3 /* miscellaneous-instruction-extensions facility 3 */
6148 
6149 static const DisasInsn insn_info[] = {
6150 #include "insn-data.h.inc"
6151 };
6152 
6153 #undef E
6154 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6155     case OPC: return &insn_info[insn_ ## NM];
6156 
6157 static const DisasInsn *lookup_opc(uint16_t opc)
6158 {
6159     switch (opc) {
6160 #include "insn-data.h.inc"
6161     default:
6162         return NULL;
6163     }
6164 }
6165 
6166 #undef F
6167 #undef E
6168 #undef D
6169 #undef C
6170 
6171 /* Extract a field from the insn.  The INSN should be left-aligned in
6172    the uint64_t so that we can more easily utilize the big-bit-endian
6173    definitions we extract from the Principals of Operation.  */
6174 
6175 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6176 {
6177     uint32_t r, m;
6178 
6179     if (f->size == 0) {
6180         return;
6181     }
6182 
6183     /* Zero extract the field from the insn.  */
6184     r = (insn << f->beg) >> (64 - f->size);
6185 
6186     /* Sign-extend, or un-swap the field as necessary.  */
6187     switch (f->type) {
6188     case 0: /* unsigned */
6189         break;
6190     case 1: /* signed */
6191         assert(f->size <= 32);
6192         m = 1u << (f->size - 1);
6193         r = (r ^ m) - m;
6194         break;
6195     case 2: /* dl+dh split, signed 20 bit. */
6196         r = ((int8_t)r << 12) | (r >> 8);
6197         break;
6198     case 3: /* MSB stored in RXB */
6199         g_assert(f->size == 4);
6200         switch (f->beg) {
6201         case 8:
6202             r |= extract64(insn, 63 - 36, 1) << 4;
6203             break;
6204         case 12:
6205             r |= extract64(insn, 63 - 37, 1) << 4;
6206             break;
6207         case 16:
6208             r |= extract64(insn, 63 - 38, 1) << 4;
6209             break;
6210         case 32:
6211             r |= extract64(insn, 63 - 39, 1) << 4;
6212             break;
6213         default:
6214             g_assert_not_reached();
6215         }
6216         break;
6217     default:
6218         abort();
6219     }
6220 
6221     /*
6222      * Validate that the "compressed" encoding we selected above is valid.
6223      * I.e. we haven't made two different original fields overlap.
6224      */
6225     assert(((o->presentC >> f->indexC) & 1) == 0);
6226     o->presentC |= 1 << f->indexC;
6227     o->presentO |= 1 << f->indexO;
6228 
6229     o->c[f->indexC] = r;
6230 }
6231 
6232 /* Lookup the insn at the current PC, extracting the operands into O and
6233    returning the info struct for the insn.  Returns NULL for invalid insn.  */
6234 
6235 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
6236 {
6237     uint64_t insn, pc = s->base.pc_next;
6238     int op, op2, ilen;
6239     const DisasInsn *info;
6240 
6241     if (unlikely(s->ex_value)) {
6242         /* Drop the EX data now, so that it's clear on exception paths.  */
6243         tcg_gen_st_i64(tcg_constant_i64(0), cpu_env,
6244                        offsetof(CPUS390XState, ex_value));
6245 
6246         /* Extract the values saved by EXECUTE.  */
6247         insn = s->ex_value & 0xffffffffffff0000ull;
6248         ilen = s->ex_value & 0xf;
6249 
6250         /* Register insn bytes with translator so plugins work. */
6251         for (int i = 0; i < ilen; i++) {
6252             uint8_t byte = extract64(insn, 56 - (i * 8), 8);
6253             translator_fake_ldb(byte, pc + i);
6254         }
6255         op = insn >> 56;
6256     } else {
6257         insn = ld_code2(env, s, pc);
6258         op = (insn >> 8) & 0xff;
6259         ilen = get_ilen(op);
6260         switch (ilen) {
6261         case 2:
6262             insn = insn << 48;
6263             break;
6264         case 4:
6265             insn = ld_code4(env, s, pc) << 32;
6266             break;
6267         case 6:
6268             insn = (insn << 48) | (ld_code4(env, s, pc + 2) << 16);
6269             break;
6270         default:
6271             g_assert_not_reached();
6272         }
6273     }
6274     s->pc_tmp = s->base.pc_next + ilen;
6275     s->ilen = ilen;
6276 
6277     /* We can't actually determine the insn format until we've looked up
6278        the full insn opcode.  Which we can't do without locating the
6279        secondary opcode.  Assume by default that OP2 is at bit 40; for
6280        those smaller insns that don't actually have a secondary opcode
6281        this will correctly result in OP2 = 0. */
6282     switch (op) {
6283     case 0x01: /* E */
6284     case 0x80: /* S */
6285     case 0x82: /* S */
6286     case 0x93: /* S */
6287     case 0xb2: /* S, RRF, RRE, IE */
6288     case 0xb3: /* RRE, RRD, RRF */
6289     case 0xb9: /* RRE, RRF */
6290     case 0xe5: /* SSE, SIL */
6291         op2 = (insn << 8) >> 56;
6292         break;
6293     case 0xa5: /* RI */
6294     case 0xa7: /* RI */
6295     case 0xc0: /* RIL */
6296     case 0xc2: /* RIL */
6297     case 0xc4: /* RIL */
6298     case 0xc6: /* RIL */
6299     case 0xc8: /* SSF */
6300     case 0xcc: /* RIL */
6301         op2 = (insn << 12) >> 60;
6302         break;
6303     case 0xc5: /* MII */
6304     case 0xc7: /* SMI */
6305     case 0xd0 ... 0xdf: /* SS */
6306     case 0xe1: /* SS */
6307     case 0xe2: /* SS */
6308     case 0xe8: /* SS */
6309     case 0xe9: /* SS */
6310     case 0xea: /* SS */
6311     case 0xee ... 0xf3: /* SS */
6312     case 0xf8 ... 0xfd: /* SS */
6313         op2 = 0;
6314         break;
6315     default:
6316         op2 = (insn << 40) >> 56;
6317         break;
6318     }
6319 
6320     memset(&s->fields, 0, sizeof(s->fields));
6321     s->fields.raw_insn = insn;
6322     s->fields.op = op;
6323     s->fields.op2 = op2;
6324 
6325     /* Lookup the instruction.  */
6326     info = lookup_opc(op << 8 | op2);
6327     s->insn = info;
6328 
6329     /* If we found it, extract the operands.  */
6330     if (info != NULL) {
6331         DisasFormat fmt = info->fmt;
6332         int i;
6333 
6334         for (i = 0; i < NUM_C_FIELD; ++i) {
6335             extract_field(&s->fields, &format_info[fmt].op[i], insn);
6336         }
6337     }
6338     return info;
6339 }
6340 
6341 static bool is_afp_reg(int reg)
6342 {
6343     return reg % 2 || reg > 6;
6344 }
6345 
6346 static bool is_fp_pair(int reg)
6347 {
6348     /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6349     return !(reg & 0x2);
6350 }
6351 
6352 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6353 {
6354     const DisasInsn *insn;
6355     DisasJumpType ret = DISAS_NEXT;
6356     DisasOps o = {};
6357     bool icount = false;
6358 
6359     /* Search for the insn in the table.  */
6360     insn = extract_insn(env, s);
6361 
6362     /* Update insn_start now that we know the ILEN.  */
6363     tcg_set_insn_start_param(s->insn_start, 2, s->ilen);
6364 
6365     /* Not found means unimplemented/illegal opcode.  */
6366     if (insn == NULL) {
6367         qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6368                       s->fields.op, s->fields.op2);
6369         gen_illegal_opcode(s);
6370         ret = DISAS_NORETURN;
6371         goto out;
6372     }
6373 
6374 #ifndef CONFIG_USER_ONLY
6375     if (s->base.tb->flags & FLAG_MASK_PER) {
6376         TCGv_i64 addr = tcg_constant_i64(s->base.pc_next);
6377         gen_helper_per_ifetch(cpu_env, addr);
6378     }
6379 #endif
6380 
6381     /* process flags */
6382     if (insn->flags) {
6383         /* privileged instruction */
6384         if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6385             gen_program_exception(s, PGM_PRIVILEGED);
6386             ret = DISAS_NORETURN;
6387             goto out;
6388         }
6389 
6390         /* if AFP is not enabled, instructions and registers are forbidden */
6391         if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6392             uint8_t dxc = 0;
6393 
6394             if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
6395                 dxc = 1;
6396             }
6397             if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
6398                 dxc = 1;
6399             }
6400             if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
6401                 dxc = 1;
6402             }
6403             if (insn->flags & IF_BFP) {
6404                 dxc = 2;
6405             }
6406             if (insn->flags & IF_DFP) {
6407                 dxc = 3;
6408             }
6409             if (insn->flags & IF_VEC) {
6410                 dxc = 0xfe;
6411             }
6412             if (dxc) {
6413                 gen_data_exception(dxc);
6414                 ret = DISAS_NORETURN;
6415                 goto out;
6416             }
6417         }
6418 
6419         /* if vector instructions not enabled, executing them is forbidden */
6420         if (insn->flags & IF_VEC) {
6421             if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6422                 gen_data_exception(0xfe);
6423                 ret = DISAS_NORETURN;
6424                 goto out;
6425             }
6426         }
6427 
6428         /* input/output is the special case for icount mode */
6429         if (unlikely(insn->flags & IF_IO)) {
6430             icount = tb_cflags(s->base.tb) & CF_USE_ICOUNT;
6431             if (icount) {
6432                 gen_io_start();
6433             }
6434         }
6435     }
6436 
6437     /* Check for insn specification exceptions.  */
6438     if (insn->spec) {
6439         if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
6440             (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
6441             (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
6442             (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
6443             (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
6444             gen_program_exception(s, PGM_SPECIFICATION);
6445             ret = DISAS_NORETURN;
6446             goto out;
6447         }
6448     }
6449 
6450     /* Implement the instruction.  */
6451     if (insn->help_in1) {
6452         insn->help_in1(s, &o);
6453     }
6454     if (insn->help_in2) {
6455         insn->help_in2(s, &o);
6456     }
6457     if (insn->help_prep) {
6458         insn->help_prep(s, &o);
6459     }
6460     if (insn->help_op) {
6461         ret = insn->help_op(s, &o);
6462     }
6463     if (ret != DISAS_NORETURN) {
6464         if (insn->help_wout) {
6465             insn->help_wout(s, &o);
6466         }
6467         if (insn->help_cout) {
6468             insn->help_cout(s, &o);
6469         }
6470     }
6471 
6472     /* Free any temporaries created by the helpers.  */
6473     if (o.out && !o.g_out) {
6474         tcg_temp_free_i64(o.out);
6475     }
6476     if (o.out2 && !o.g_out2) {
6477         tcg_temp_free_i64(o.out2);
6478     }
6479     if (o.in1 && !o.g_in1) {
6480         tcg_temp_free_i64(o.in1);
6481     }
6482     if (o.in2 && !o.g_in2) {
6483         tcg_temp_free_i64(o.in2);
6484     }
6485     if (o.addr1) {
6486         tcg_temp_free_i64(o.addr1);
6487     }
6488     if (o.out_128) {
6489         tcg_temp_free_i128(o.out_128);
6490     }
6491     if (o.in1_128) {
6492         tcg_temp_free_i128(o.in1_128);
6493     }
6494     if (o.in2_128) {
6495         tcg_temp_free_i128(o.in2_128);
6496     }
6497     /* io should be the last instruction in tb when icount is enabled */
6498     if (unlikely(icount && ret == DISAS_NEXT)) {
6499         ret = DISAS_TOO_MANY;
6500     }
6501 
6502 #ifndef CONFIG_USER_ONLY
6503     if (s->base.tb->flags & FLAG_MASK_PER) {
6504         /* An exception might be triggered, save PSW if not already done.  */
6505         if (ret == DISAS_NEXT || ret == DISAS_TOO_MANY) {
6506             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6507         }
6508 
6509         /* Call the helper to check for a possible PER exception.  */
6510         gen_helper_per_check_exception(cpu_env);
6511     }
6512 #endif
6513 
6514 out:
6515     /* Advance to the next instruction.  */
6516     s->base.pc_next = s->pc_tmp;
6517     return ret;
6518 }
6519 
6520 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6521 {
6522     DisasContext *dc = container_of(dcbase, DisasContext, base);
6523 
6524     /* 31-bit mode */
6525     if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6526         dc->base.pc_first &= 0x7fffffff;
6527         dc->base.pc_next = dc->base.pc_first;
6528     }
6529 
6530     dc->cc_op = CC_OP_DYNAMIC;
6531     dc->ex_value = dc->base.tb->cs_base;
6532     dc->exit_to_mainloop = (dc->base.tb->flags & FLAG_MASK_PER) || dc->ex_value;
6533 }
6534 
6535 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6536 {
6537 }
6538 
6539 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6540 {
6541     DisasContext *dc = container_of(dcbase, DisasContext, base);
6542 
6543     /* Delay the set of ilen until we've read the insn. */
6544     tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 0);
6545     dc->insn_start = tcg_last_op();
6546 }
6547 
6548 static target_ulong get_next_pc(CPUS390XState *env, DisasContext *s,
6549                                 uint64_t pc)
6550 {
6551     uint64_t insn = cpu_lduw_code(env, pc);
6552 
6553     return pc + get_ilen((insn >> 8) & 0xff);
6554 }
6555 
6556 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6557 {
6558     CPUS390XState *env = cs->env_ptr;
6559     DisasContext *dc = container_of(dcbase, DisasContext, base);
6560 
6561     dc->base.is_jmp = translate_one(env, dc);
6562     if (dc->base.is_jmp == DISAS_NEXT) {
6563         if (dc->ex_value ||
6564             !is_same_page(dcbase, dc->base.pc_next) ||
6565             !is_same_page(dcbase, get_next_pc(env, dc, dc->base.pc_next))) {
6566             dc->base.is_jmp = DISAS_TOO_MANY;
6567         }
6568     }
6569 }
6570 
6571 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6572 {
6573     DisasContext *dc = container_of(dcbase, DisasContext, base);
6574 
6575     switch (dc->base.is_jmp) {
6576     case DISAS_NORETURN:
6577         break;
6578     case DISAS_TOO_MANY:
6579         update_psw_addr(dc);
6580         /* FALLTHRU */
6581     case DISAS_PC_UPDATED:
6582         /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6583            cc op type is in env */
6584         update_cc_op(dc);
6585         /* FALLTHRU */
6586     case DISAS_PC_CC_UPDATED:
6587         /* Exit the TB, either by raising a debug exception or by return.  */
6588         if (dc->exit_to_mainloop) {
6589             tcg_gen_exit_tb(NULL, 0);
6590         } else {
6591             tcg_gen_lookup_and_goto_ptr();
6592         }
6593         break;
6594     default:
6595         g_assert_not_reached();
6596     }
6597 }
6598 
6599 static void s390x_tr_disas_log(const DisasContextBase *dcbase,
6600                                CPUState *cs, FILE *logfile)
6601 {
6602     DisasContext *dc = container_of(dcbase, DisasContext, base);
6603 
6604     if (unlikely(dc->ex_value)) {
6605         /* ??? Unfortunately target_disas can't use host memory.  */
6606         fprintf(logfile, "IN: EXECUTE %016" PRIx64, dc->ex_value);
6607     } else {
6608         fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
6609         target_disas(logfile, cs, dc->base.pc_first, dc->base.tb->size);
6610     }
6611 }
6612 
6613 static const TranslatorOps s390x_tr_ops = {
6614     .init_disas_context = s390x_tr_init_disas_context,
6615     .tb_start           = s390x_tr_tb_start,
6616     .insn_start         = s390x_tr_insn_start,
6617     .translate_insn     = s390x_tr_translate_insn,
6618     .tb_stop            = s390x_tr_tb_stop,
6619     .disas_log          = s390x_tr_disas_log,
6620 };
6621 
6622 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
6623                            target_ulong pc, void *host_pc)
6624 {
6625     DisasContext dc;
6626 
6627     translator_loop(cs, tb, max_insns, pc, host_pc, &s390x_tr_ops, &dc.base);
6628 }
6629 
6630 void s390x_restore_state_to_opc(CPUState *cs,
6631                                 const TranslationBlock *tb,
6632                                 const uint64_t *data)
6633 {
6634     S390CPU *cpu = S390_CPU(cs);
6635     CPUS390XState *env = &cpu->env;
6636     int cc_op = data[1];
6637 
6638     env->psw.addr = data[0];
6639 
6640     /* Update the CC opcode if it is not already up-to-date.  */
6641     if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6642         env->cc_op = cc_op;
6643     }
6644 
6645     /* Record ILEN.  */
6646     env->int_pgm_ilen = data[2];
6647 }
6648