xref: /qemu/target/s390x/tcg/translate.c (revision 86d063fa)
1 /*
2  *  S/390 translation
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2010 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24 
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 #  define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 #  define LOG_DISAS(...) do { } while (0)
29 #endif
30 
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "s390x-internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg/tcg-op.h"
37 #include "tcg/tcg-op-gvec.h"
38 #include "qemu/log.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
44 
45 #include "exec/translator.h"
46 #include "exec/log.h"
47 #include "qemu/atomic128.h"
48 
49 
50 /* Information that (most) every instruction needs to manipulate.  */
51 typedef struct DisasContext DisasContext;
52 typedef struct DisasInsn DisasInsn;
53 typedef struct DisasFields DisasFields;
54 
55 /*
56  * Define a structure to hold the decoded fields.  We'll store each inside
57  * an array indexed by an enum.  In order to conserve memory, we'll arrange
58  * for fields that do not exist at the same time to overlap, thus the "C"
59  * for compact.  For checking purposes there is an "O" for original index
60  * as well that will be applied to availability bitmaps.
61  */
62 
63 enum DisasFieldIndexO {
64     FLD_O_r1,
65     FLD_O_r2,
66     FLD_O_r3,
67     FLD_O_m1,
68     FLD_O_m3,
69     FLD_O_m4,
70     FLD_O_m5,
71     FLD_O_m6,
72     FLD_O_b1,
73     FLD_O_b2,
74     FLD_O_b4,
75     FLD_O_d1,
76     FLD_O_d2,
77     FLD_O_d4,
78     FLD_O_x2,
79     FLD_O_l1,
80     FLD_O_l2,
81     FLD_O_i1,
82     FLD_O_i2,
83     FLD_O_i3,
84     FLD_O_i4,
85     FLD_O_i5,
86     FLD_O_v1,
87     FLD_O_v2,
88     FLD_O_v3,
89     FLD_O_v4,
90 };
91 
92 enum DisasFieldIndexC {
93     FLD_C_r1 = 0,
94     FLD_C_m1 = 0,
95     FLD_C_b1 = 0,
96     FLD_C_i1 = 0,
97     FLD_C_v1 = 0,
98 
99     FLD_C_r2 = 1,
100     FLD_C_b2 = 1,
101     FLD_C_i2 = 1,
102 
103     FLD_C_r3 = 2,
104     FLD_C_m3 = 2,
105     FLD_C_i3 = 2,
106     FLD_C_v3 = 2,
107 
108     FLD_C_m4 = 3,
109     FLD_C_b4 = 3,
110     FLD_C_i4 = 3,
111     FLD_C_l1 = 3,
112     FLD_C_v4 = 3,
113 
114     FLD_C_i5 = 4,
115     FLD_C_d1 = 4,
116     FLD_C_m5 = 4,
117 
118     FLD_C_d2 = 5,
119     FLD_C_m6 = 5,
120 
121     FLD_C_d4 = 6,
122     FLD_C_x2 = 6,
123     FLD_C_l2 = 6,
124     FLD_C_v2 = 6,
125 
126     NUM_C_FIELD = 7
127 };
128 
129 struct DisasFields {
130     uint64_t raw_insn;
131     unsigned op:8;
132     unsigned op2:8;
133     unsigned presentC:16;
134     unsigned int presentO;
135     int c[NUM_C_FIELD];
136 };
137 
138 struct DisasContext {
139     DisasContextBase base;
140     const DisasInsn *insn;
141     TCGOp *insn_start;
142     DisasFields fields;
143     uint64_t ex_value;
144     /*
145      * During translate_one(), pc_tmp is used to determine the instruction
146      * to be executed after base.pc_next - e.g. next sequential instruction
147      * or a branch target.
148      */
149     uint64_t pc_tmp;
150     uint32_t ilen;
151     enum cc_op cc_op;
152     bool exit_to_mainloop;
153 };
154 
155 /* Information carried about a condition to be evaluated.  */
156 typedef struct {
157     TCGCond cond:8;
158     bool is_64;
159     union {
160         struct { TCGv_i64 a, b; } s64;
161         struct { TCGv_i32 a, b; } s32;
162     } u;
163 } DisasCompare;
164 
165 #ifdef DEBUG_INLINE_BRANCHES
166 static uint64_t inline_branch_hit[CC_OP_MAX];
167 static uint64_t inline_branch_miss[CC_OP_MAX];
168 #endif
169 
170 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
171 {
172     if (s->base.tb->flags & FLAG_MASK_32) {
173         if (s->base.tb->flags & FLAG_MASK_64) {
174             tcg_gen_movi_i64(out, pc);
175             return;
176         }
177         pc |= 0x80000000;
178     }
179     assert(!(s->base.tb->flags & FLAG_MASK_64));
180     tcg_gen_deposit_i64(out, out, tcg_constant_i64(pc), 0, 32);
181 }
182 
183 static TCGv_i64 psw_addr;
184 static TCGv_i64 psw_mask;
185 static TCGv_i64 gbea;
186 
187 static TCGv_i32 cc_op;
188 static TCGv_i64 cc_src;
189 static TCGv_i64 cc_dst;
190 static TCGv_i64 cc_vr;
191 
192 static char cpu_reg_names[16][4];
193 static TCGv_i64 regs[16];
194 
195 void s390x_translate_init(void)
196 {
197     int i;
198 
199     psw_addr = tcg_global_mem_new_i64(cpu_env,
200                                       offsetof(CPUS390XState, psw.addr),
201                                       "psw_addr");
202     psw_mask = tcg_global_mem_new_i64(cpu_env,
203                                       offsetof(CPUS390XState, psw.mask),
204                                       "psw_mask");
205     gbea = tcg_global_mem_new_i64(cpu_env,
206                                   offsetof(CPUS390XState, gbea),
207                                   "gbea");
208 
209     cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
210                                    "cc_op");
211     cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
212                                     "cc_src");
213     cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
214                                     "cc_dst");
215     cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
216                                    "cc_vr");
217 
218     for (i = 0; i < 16; i++) {
219         snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
220         regs[i] = tcg_global_mem_new(cpu_env,
221                                      offsetof(CPUS390XState, regs[i]),
222                                      cpu_reg_names[i]);
223     }
224 }
225 
226 static inline int vec_full_reg_offset(uint8_t reg)
227 {
228     g_assert(reg < 32);
229     return offsetof(CPUS390XState, vregs[reg][0]);
230 }
231 
232 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
233 {
234     /* Convert element size (es) - e.g. MO_8 - to bytes */
235     const uint8_t bytes = 1 << es;
236     int offs = enr * bytes;
237 
238     /*
239      * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
240      * of the 16 byte vector, on both, little and big endian systems.
241      *
242      * Big Endian (target/possible host)
243      * B:  [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
244      * HW: [     0][     1][     2][     3] - [     4][     5][     6][     7]
245      * W:  [             0][             1] - [             2][             3]
246      * DW: [                             0] - [                             1]
247      *
248      * Little Endian (possible host)
249      * B:  [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
250      * HW: [     3][     2][     1][     0] - [     7][     6][     5][     4]
251      * W:  [             1][             0] - [             3][             2]
252      * DW: [                             0] - [                             1]
253      *
254      * For 16 byte elements, the two 8 byte halves will not form a host
255      * int128 if the host is little endian, since they're in the wrong order.
256      * Some operations (e.g. xor) do not care. For operations like addition,
257      * the two 8 byte elements have to be loaded separately. Let's force all
258      * 16 byte operations to handle it in a special way.
259      */
260     g_assert(es <= MO_64);
261 #if !HOST_BIG_ENDIAN
262     offs ^= (8 - bytes);
263 #endif
264     return offs + vec_full_reg_offset(reg);
265 }
266 
267 static inline int freg64_offset(uint8_t reg)
268 {
269     g_assert(reg < 16);
270     return vec_reg_offset(reg, 0, MO_64);
271 }
272 
273 static inline int freg32_offset(uint8_t reg)
274 {
275     g_assert(reg < 16);
276     return vec_reg_offset(reg, 0, MO_32);
277 }
278 
279 static TCGv_i64 load_reg(int reg)
280 {
281     TCGv_i64 r = tcg_temp_new_i64();
282     tcg_gen_mov_i64(r, regs[reg]);
283     return r;
284 }
285 
286 static TCGv_i64 load_freg(int reg)
287 {
288     TCGv_i64 r = tcg_temp_new_i64();
289 
290     tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
291     return r;
292 }
293 
294 static TCGv_i64 load_freg32_i64(int reg)
295 {
296     TCGv_i64 r = tcg_temp_new_i64();
297 
298     tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
299     return r;
300 }
301 
302 static TCGv_i128 load_freg_128(int reg)
303 {
304     TCGv_i64 h = load_freg(reg);
305     TCGv_i64 l = load_freg(reg + 2);
306     TCGv_i128 r = tcg_temp_new_i128();
307 
308     tcg_gen_concat_i64_i128(r, l, h);
309     return r;
310 }
311 
312 static void store_reg(int reg, TCGv_i64 v)
313 {
314     tcg_gen_mov_i64(regs[reg], v);
315 }
316 
317 static void store_freg(int reg, TCGv_i64 v)
318 {
319     tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
320 }
321 
322 static void store_reg32_i64(int reg, TCGv_i64 v)
323 {
324     /* 32 bit register writes keep the upper half */
325     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
326 }
327 
328 static void store_reg32h_i64(int reg, TCGv_i64 v)
329 {
330     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
331 }
332 
333 static void store_freg32_i64(int reg, TCGv_i64 v)
334 {
335     tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
336 }
337 
338 static void return_low128(TCGv_i64 dest)
339 {
340     tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
341 }
342 
343 static void update_psw_addr(DisasContext *s)
344 {
345     /* psw.addr */
346     tcg_gen_movi_i64(psw_addr, s->base.pc_next);
347 }
348 
349 static void per_branch(DisasContext *s, bool to_next)
350 {
351 #ifndef CONFIG_USER_ONLY
352     tcg_gen_movi_i64(gbea, s->base.pc_next);
353 
354     if (s->base.tb->flags & FLAG_MASK_PER) {
355         TCGv_i64 next_pc = to_next ? tcg_constant_i64(s->pc_tmp) : psw_addr;
356         gen_helper_per_branch(cpu_env, gbea, next_pc);
357     }
358 #endif
359 }
360 
361 static void per_branch_cond(DisasContext *s, TCGCond cond,
362                             TCGv_i64 arg1, TCGv_i64 arg2)
363 {
364 #ifndef CONFIG_USER_ONLY
365     if (s->base.tb->flags & FLAG_MASK_PER) {
366         TCGLabel *lab = gen_new_label();
367         tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
368 
369         tcg_gen_movi_i64(gbea, s->base.pc_next);
370         gen_helper_per_branch(cpu_env, gbea, psw_addr);
371 
372         gen_set_label(lab);
373     } else {
374         TCGv_i64 pc = tcg_constant_i64(s->base.pc_next);
375         tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
376     }
377 #endif
378 }
379 
380 static void per_breaking_event(DisasContext *s)
381 {
382     tcg_gen_movi_i64(gbea, s->base.pc_next);
383 }
384 
385 static void update_cc_op(DisasContext *s)
386 {
387     if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
388         tcg_gen_movi_i32(cc_op, s->cc_op);
389     }
390 }
391 
392 static inline uint64_t ld_code2(CPUS390XState *env, DisasContext *s,
393                                 uint64_t pc)
394 {
395     return (uint64_t)translator_lduw(env, &s->base, pc);
396 }
397 
398 static inline uint64_t ld_code4(CPUS390XState *env, DisasContext *s,
399                                 uint64_t pc)
400 {
401     return (uint64_t)(uint32_t)translator_ldl(env, &s->base, pc);
402 }
403 
404 static int get_mem_index(DisasContext *s)
405 {
406 #ifdef CONFIG_USER_ONLY
407     return MMU_USER_IDX;
408 #else
409     if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
410         return MMU_REAL_IDX;
411     }
412 
413     switch (s->base.tb->flags & FLAG_MASK_ASC) {
414     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
415         return MMU_PRIMARY_IDX;
416     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
417         return MMU_SECONDARY_IDX;
418     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
419         return MMU_HOME_IDX;
420     default:
421         tcg_abort();
422         break;
423     }
424 #endif
425 }
426 
427 static void gen_exception(int excp)
428 {
429     gen_helper_exception(cpu_env, tcg_constant_i32(excp));
430 }
431 
432 static void gen_program_exception(DisasContext *s, int code)
433 {
434     /* Remember what pgm exeption this was.  */
435     tcg_gen_st_i32(tcg_constant_i32(code), cpu_env,
436                    offsetof(CPUS390XState, int_pgm_code));
437 
438     tcg_gen_st_i32(tcg_constant_i32(s->ilen), cpu_env,
439                    offsetof(CPUS390XState, int_pgm_ilen));
440 
441     /* update the psw */
442     update_psw_addr(s);
443 
444     /* Save off cc.  */
445     update_cc_op(s);
446 
447     /* Trigger exception.  */
448     gen_exception(EXCP_PGM);
449 }
450 
451 static inline void gen_illegal_opcode(DisasContext *s)
452 {
453     gen_program_exception(s, PGM_OPERATION);
454 }
455 
456 static inline void gen_data_exception(uint8_t dxc)
457 {
458     gen_helper_data_exception(cpu_env, tcg_constant_i32(dxc));
459 }
460 
461 static inline void gen_trap(DisasContext *s)
462 {
463     /* Set DXC to 0xff */
464     gen_data_exception(0xff);
465 }
466 
467 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
468                                   int64_t imm)
469 {
470     tcg_gen_addi_i64(dst, src, imm);
471     if (!(s->base.tb->flags & FLAG_MASK_64)) {
472         if (s->base.tb->flags & FLAG_MASK_32) {
473             tcg_gen_andi_i64(dst, dst, 0x7fffffff);
474         } else {
475             tcg_gen_andi_i64(dst, dst, 0x00ffffff);
476         }
477     }
478 }
479 
480 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
481 {
482     TCGv_i64 tmp = tcg_temp_new_i64();
483 
484     /*
485      * Note that d2 is limited to 20 bits, signed.  If we crop negative
486      * displacements early we create larger immediate addends.
487      */
488     if (b2 && x2) {
489         tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
490         gen_addi_and_wrap_i64(s, tmp, tmp, d2);
491     } else if (b2) {
492         gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
493     } else if (x2) {
494         gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
495     } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
496         if (s->base.tb->flags & FLAG_MASK_32) {
497             tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
498         } else {
499             tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
500         }
501     } else {
502         tcg_gen_movi_i64(tmp, d2);
503     }
504 
505     return tmp;
506 }
507 
508 static inline bool live_cc_data(DisasContext *s)
509 {
510     return (s->cc_op != CC_OP_DYNAMIC
511             && s->cc_op != CC_OP_STATIC
512             && s->cc_op > 3);
513 }
514 
515 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
516 {
517     if (live_cc_data(s)) {
518         tcg_gen_discard_i64(cc_src);
519         tcg_gen_discard_i64(cc_dst);
520         tcg_gen_discard_i64(cc_vr);
521     }
522     s->cc_op = CC_OP_CONST0 + val;
523 }
524 
525 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
526 {
527     if (live_cc_data(s)) {
528         tcg_gen_discard_i64(cc_src);
529         tcg_gen_discard_i64(cc_vr);
530     }
531     tcg_gen_mov_i64(cc_dst, dst);
532     s->cc_op = op;
533 }
534 
535 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
536                                   TCGv_i64 dst)
537 {
538     if (live_cc_data(s)) {
539         tcg_gen_discard_i64(cc_vr);
540     }
541     tcg_gen_mov_i64(cc_src, src);
542     tcg_gen_mov_i64(cc_dst, dst);
543     s->cc_op = op;
544 }
545 
546 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
547                                   TCGv_i64 dst, TCGv_i64 vr)
548 {
549     tcg_gen_mov_i64(cc_src, src);
550     tcg_gen_mov_i64(cc_dst, dst);
551     tcg_gen_mov_i64(cc_vr, vr);
552     s->cc_op = op;
553 }
554 
555 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
556 {
557     gen_op_update1_cc_i64(s, CC_OP_NZ, val);
558 }
559 
560 /* CC value is in env->cc_op */
561 static void set_cc_static(DisasContext *s)
562 {
563     if (live_cc_data(s)) {
564         tcg_gen_discard_i64(cc_src);
565         tcg_gen_discard_i64(cc_dst);
566         tcg_gen_discard_i64(cc_vr);
567     }
568     s->cc_op = CC_OP_STATIC;
569 }
570 
571 /* calculates cc into cc_op */
572 static void gen_op_calc_cc(DisasContext *s)
573 {
574     TCGv_i32 local_cc_op = NULL;
575     TCGv_i64 dummy = NULL;
576 
577     switch (s->cc_op) {
578     default:
579         dummy = tcg_constant_i64(0);
580         /* FALLTHRU */
581     case CC_OP_ADD_64:
582     case CC_OP_SUB_64:
583     case CC_OP_ADD_32:
584     case CC_OP_SUB_32:
585         local_cc_op = tcg_constant_i32(s->cc_op);
586         break;
587     case CC_OP_CONST0:
588     case CC_OP_CONST1:
589     case CC_OP_CONST2:
590     case CC_OP_CONST3:
591     case CC_OP_STATIC:
592     case CC_OP_DYNAMIC:
593         break;
594     }
595 
596     switch (s->cc_op) {
597     case CC_OP_CONST0:
598     case CC_OP_CONST1:
599     case CC_OP_CONST2:
600     case CC_OP_CONST3:
601         /* s->cc_op is the cc value */
602         tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
603         break;
604     case CC_OP_STATIC:
605         /* env->cc_op already is the cc value */
606         break;
607     case CC_OP_NZ:
608         tcg_gen_setcondi_i64(TCG_COND_NE, cc_dst, cc_dst, 0);
609         tcg_gen_extrl_i64_i32(cc_op, cc_dst);
610         break;
611     case CC_OP_ABS_64:
612     case CC_OP_NABS_64:
613     case CC_OP_ABS_32:
614     case CC_OP_NABS_32:
615     case CC_OP_LTGT0_32:
616     case CC_OP_LTGT0_64:
617     case CC_OP_COMP_32:
618     case CC_OP_COMP_64:
619     case CC_OP_NZ_F32:
620     case CC_OP_NZ_F64:
621     case CC_OP_FLOGR:
622     case CC_OP_LCBB:
623     case CC_OP_MULS_32:
624         /* 1 argument */
625         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
626         break;
627     case CC_OP_ADDU:
628     case CC_OP_ICM:
629     case CC_OP_LTGT_32:
630     case CC_OP_LTGT_64:
631     case CC_OP_LTUGTU_32:
632     case CC_OP_LTUGTU_64:
633     case CC_OP_TM_32:
634     case CC_OP_TM_64:
635     case CC_OP_SLA:
636     case CC_OP_SUBU:
637     case CC_OP_NZ_F128:
638     case CC_OP_VC:
639     case CC_OP_MULS_64:
640         /* 2 arguments */
641         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
642         break;
643     case CC_OP_ADD_64:
644     case CC_OP_SUB_64:
645     case CC_OP_ADD_32:
646     case CC_OP_SUB_32:
647         /* 3 arguments */
648         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
649         break;
650     case CC_OP_DYNAMIC:
651         /* unknown operation - assume 3 arguments and cc_op in env */
652         gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
653         break;
654     default:
655         tcg_abort();
656     }
657 
658     /* We now have cc in cc_op as constant */
659     set_cc_static(s);
660 }
661 
662 static bool use_goto_tb(DisasContext *s, uint64_t dest)
663 {
664     if (unlikely(s->base.tb->flags & FLAG_MASK_PER)) {
665         return false;
666     }
667     return translator_use_goto_tb(&s->base, dest);
668 }
669 
670 static void account_noninline_branch(DisasContext *s, int cc_op)
671 {
672 #ifdef DEBUG_INLINE_BRANCHES
673     inline_branch_miss[cc_op]++;
674 #endif
675 }
676 
677 static void account_inline_branch(DisasContext *s, int cc_op)
678 {
679 #ifdef DEBUG_INLINE_BRANCHES
680     inline_branch_hit[cc_op]++;
681 #endif
682 }
683 
684 /* Table of mask values to comparison codes, given a comparison as input.
685    For such, CC=3 should not be possible.  */
686 static const TCGCond ltgt_cond[16] = {
687     TCG_COND_NEVER,  TCG_COND_NEVER,     /*    |    |    | x */
688     TCG_COND_GT,     TCG_COND_GT,        /*    |    | GT | x */
689     TCG_COND_LT,     TCG_COND_LT,        /*    | LT |    | x */
690     TCG_COND_NE,     TCG_COND_NE,        /*    | LT | GT | x */
691     TCG_COND_EQ,     TCG_COND_EQ,        /* EQ |    |    | x */
692     TCG_COND_GE,     TCG_COND_GE,        /* EQ |    | GT | x */
693     TCG_COND_LE,     TCG_COND_LE,        /* EQ | LT |    | x */
694     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | LT | GT | x */
695 };
696 
697 /* Table of mask values to comparison codes, given a logic op as input.
698    For such, only CC=0 and CC=1 should be possible.  */
699 static const TCGCond nz_cond[16] = {
700     TCG_COND_NEVER, TCG_COND_NEVER,      /*    |    | x | x */
701     TCG_COND_NEVER, TCG_COND_NEVER,
702     TCG_COND_NE, TCG_COND_NE,            /*    | NE | x | x */
703     TCG_COND_NE, TCG_COND_NE,
704     TCG_COND_EQ, TCG_COND_EQ,            /* EQ |    | x | x */
705     TCG_COND_EQ, TCG_COND_EQ,
706     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | NE | x | x */
707     TCG_COND_ALWAYS, TCG_COND_ALWAYS,
708 };
709 
710 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
711    details required to generate a TCG comparison.  */
712 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
713 {
714     TCGCond cond;
715     enum cc_op old_cc_op = s->cc_op;
716 
717     if (mask == 15 || mask == 0) {
718         c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
719         c->u.s32.a = cc_op;
720         c->u.s32.b = cc_op;
721         c->is_64 = false;
722         return;
723     }
724 
725     /* Find the TCG condition for the mask + cc op.  */
726     switch (old_cc_op) {
727     case CC_OP_LTGT0_32:
728     case CC_OP_LTGT0_64:
729     case CC_OP_LTGT_32:
730     case CC_OP_LTGT_64:
731         cond = ltgt_cond[mask];
732         if (cond == TCG_COND_NEVER) {
733             goto do_dynamic;
734         }
735         account_inline_branch(s, old_cc_op);
736         break;
737 
738     case CC_OP_LTUGTU_32:
739     case CC_OP_LTUGTU_64:
740         cond = tcg_unsigned_cond(ltgt_cond[mask]);
741         if (cond == TCG_COND_NEVER) {
742             goto do_dynamic;
743         }
744         account_inline_branch(s, old_cc_op);
745         break;
746 
747     case CC_OP_NZ:
748         cond = nz_cond[mask];
749         if (cond == TCG_COND_NEVER) {
750             goto do_dynamic;
751         }
752         account_inline_branch(s, old_cc_op);
753         break;
754 
755     case CC_OP_TM_32:
756     case CC_OP_TM_64:
757         switch (mask) {
758         case 8:
759             cond = TCG_COND_EQ;
760             break;
761         case 4 | 2 | 1:
762             cond = TCG_COND_NE;
763             break;
764         default:
765             goto do_dynamic;
766         }
767         account_inline_branch(s, old_cc_op);
768         break;
769 
770     case CC_OP_ICM:
771         switch (mask) {
772         case 8:
773             cond = TCG_COND_EQ;
774             break;
775         case 4 | 2 | 1:
776         case 4 | 2:
777             cond = TCG_COND_NE;
778             break;
779         default:
780             goto do_dynamic;
781         }
782         account_inline_branch(s, old_cc_op);
783         break;
784 
785     case CC_OP_FLOGR:
786         switch (mask & 0xa) {
787         case 8: /* src == 0 -> no one bit found */
788             cond = TCG_COND_EQ;
789             break;
790         case 2: /* src != 0 -> one bit found */
791             cond = TCG_COND_NE;
792             break;
793         default:
794             goto do_dynamic;
795         }
796         account_inline_branch(s, old_cc_op);
797         break;
798 
799     case CC_OP_ADDU:
800     case CC_OP_SUBU:
801         switch (mask) {
802         case 8 | 2: /* result == 0 */
803             cond = TCG_COND_EQ;
804             break;
805         case 4 | 1: /* result != 0 */
806             cond = TCG_COND_NE;
807             break;
808         case 8 | 4: /* !carry (borrow) */
809             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_EQ : TCG_COND_NE;
810             break;
811         case 2 | 1: /* carry (!borrow) */
812             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_NE : TCG_COND_EQ;
813             break;
814         default:
815             goto do_dynamic;
816         }
817         account_inline_branch(s, old_cc_op);
818         break;
819 
820     default:
821     do_dynamic:
822         /* Calculate cc value.  */
823         gen_op_calc_cc(s);
824         /* FALLTHRU */
825 
826     case CC_OP_STATIC:
827         /* Jump based on CC.  We'll load up the real cond below;
828            the assignment here merely avoids a compiler warning.  */
829         account_noninline_branch(s, old_cc_op);
830         old_cc_op = CC_OP_STATIC;
831         cond = TCG_COND_NEVER;
832         break;
833     }
834 
835     /* Load up the arguments of the comparison.  */
836     c->is_64 = true;
837     switch (old_cc_op) {
838     case CC_OP_LTGT0_32:
839         c->is_64 = false;
840         c->u.s32.a = tcg_temp_new_i32();
841         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
842         c->u.s32.b = tcg_constant_i32(0);
843         break;
844     case CC_OP_LTGT_32:
845     case CC_OP_LTUGTU_32:
846         c->is_64 = false;
847         c->u.s32.a = tcg_temp_new_i32();
848         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
849         c->u.s32.b = tcg_temp_new_i32();
850         tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
851         break;
852 
853     case CC_OP_LTGT0_64:
854     case CC_OP_NZ:
855     case CC_OP_FLOGR:
856         c->u.s64.a = cc_dst;
857         c->u.s64.b = tcg_constant_i64(0);
858         break;
859     case CC_OP_LTGT_64:
860     case CC_OP_LTUGTU_64:
861         c->u.s64.a = cc_src;
862         c->u.s64.b = cc_dst;
863         break;
864 
865     case CC_OP_TM_32:
866     case CC_OP_TM_64:
867     case CC_OP_ICM:
868         c->u.s64.a = tcg_temp_new_i64();
869         c->u.s64.b = tcg_constant_i64(0);
870         tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
871         break;
872 
873     case CC_OP_ADDU:
874     case CC_OP_SUBU:
875         c->is_64 = true;
876         c->u.s64.b = tcg_constant_i64(0);
877         switch (mask) {
878         case 8 | 2:
879         case 4 | 1: /* result */
880             c->u.s64.a = cc_dst;
881             break;
882         case 8 | 4:
883         case 2 | 1: /* carry */
884             c->u.s64.a = cc_src;
885             break;
886         default:
887             g_assert_not_reached();
888         }
889         break;
890 
891     case CC_OP_STATIC:
892         c->is_64 = false;
893         c->u.s32.a = cc_op;
894         switch (mask) {
895         case 0x8 | 0x4 | 0x2: /* cc != 3 */
896             cond = TCG_COND_NE;
897             c->u.s32.b = tcg_constant_i32(3);
898             break;
899         case 0x8 | 0x4 | 0x1: /* cc != 2 */
900             cond = TCG_COND_NE;
901             c->u.s32.b = tcg_constant_i32(2);
902             break;
903         case 0x8 | 0x2 | 0x1: /* cc != 1 */
904             cond = TCG_COND_NE;
905             c->u.s32.b = tcg_constant_i32(1);
906             break;
907         case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
908             cond = TCG_COND_EQ;
909             c->u.s32.a = tcg_temp_new_i32();
910             c->u.s32.b = tcg_constant_i32(0);
911             tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
912             break;
913         case 0x8 | 0x4: /* cc < 2 */
914             cond = TCG_COND_LTU;
915             c->u.s32.b = tcg_constant_i32(2);
916             break;
917         case 0x8: /* cc == 0 */
918             cond = TCG_COND_EQ;
919             c->u.s32.b = tcg_constant_i32(0);
920             break;
921         case 0x4 | 0x2 | 0x1: /* cc != 0 */
922             cond = TCG_COND_NE;
923             c->u.s32.b = tcg_constant_i32(0);
924             break;
925         case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
926             cond = TCG_COND_NE;
927             c->u.s32.a = tcg_temp_new_i32();
928             c->u.s32.b = tcg_constant_i32(0);
929             tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
930             break;
931         case 0x4: /* cc == 1 */
932             cond = TCG_COND_EQ;
933             c->u.s32.b = tcg_constant_i32(1);
934             break;
935         case 0x2 | 0x1: /* cc > 1 */
936             cond = TCG_COND_GTU;
937             c->u.s32.b = tcg_constant_i32(1);
938             break;
939         case 0x2: /* cc == 2 */
940             cond = TCG_COND_EQ;
941             c->u.s32.b = tcg_constant_i32(2);
942             break;
943         case 0x1: /* cc == 3 */
944             cond = TCG_COND_EQ;
945             c->u.s32.b = tcg_constant_i32(3);
946             break;
947         default:
948             /* CC is masked by something else: (8 >> cc) & mask.  */
949             cond = TCG_COND_NE;
950             c->u.s32.a = tcg_temp_new_i32();
951             c->u.s32.b = tcg_constant_i32(0);
952             tcg_gen_shr_i32(c->u.s32.a, tcg_constant_i32(8), cc_op);
953             tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
954             break;
955         }
956         break;
957 
958     default:
959         abort();
960     }
961     c->cond = cond;
962 }
963 
964 /* ====================================================================== */
965 /* Define the insn format enumeration.  */
966 #define F0(N)                         FMT_##N,
967 #define F1(N, X1)                     F0(N)
968 #define F2(N, X1, X2)                 F0(N)
969 #define F3(N, X1, X2, X3)             F0(N)
970 #define F4(N, X1, X2, X3, X4)         F0(N)
971 #define F5(N, X1, X2, X3, X4, X5)     F0(N)
972 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
973 
974 typedef enum {
975 #include "insn-format.h.inc"
976 } DisasFormat;
977 
978 #undef F0
979 #undef F1
980 #undef F2
981 #undef F3
982 #undef F4
983 #undef F5
984 #undef F6
985 
986 /* This is the way fields are to be accessed out of DisasFields.  */
987 #define have_field(S, F)  have_field1((S), FLD_O_##F)
988 #define get_field(S, F)   get_field1((S), FLD_O_##F, FLD_C_##F)
989 
990 static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
991 {
992     return (s->fields.presentO >> c) & 1;
993 }
994 
995 static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
996                       enum DisasFieldIndexC c)
997 {
998     assert(have_field1(s, o));
999     return s->fields.c[c];
1000 }
1001 
1002 /* Describe the layout of each field in each format.  */
1003 typedef struct DisasField {
1004     unsigned int beg:8;
1005     unsigned int size:8;
1006     unsigned int type:2;
1007     unsigned int indexC:6;
1008     enum DisasFieldIndexO indexO:8;
1009 } DisasField;
1010 
1011 typedef struct DisasFormatInfo {
1012     DisasField op[NUM_C_FIELD];
1013 } DisasFormatInfo;
1014 
1015 #define R(N, B)       {  B,  4, 0, FLD_C_r##N, FLD_O_r##N }
1016 #define M(N, B)       {  B,  4, 0, FLD_C_m##N, FLD_O_m##N }
1017 #define V(N, B)       {  B,  4, 3, FLD_C_v##N, FLD_O_v##N }
1018 #define BD(N, BB, BD) { BB,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1019                       { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1020 #define BXD(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1021                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1022                       { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1023 #define BDL(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1024                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1025 #define BXDL(N)       { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1026                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1027                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1028 #define I(N, B, S)    {  B,  S, 1, FLD_C_i##N, FLD_O_i##N }
1029 #define L(N, B, S)    {  B,  S, 0, FLD_C_l##N, FLD_O_l##N }
1030 
1031 #define F0(N)                     { { } },
1032 #define F1(N, X1)                 { { X1 } },
1033 #define F2(N, X1, X2)             { { X1, X2 } },
1034 #define F3(N, X1, X2, X3)         { { X1, X2, X3 } },
1035 #define F4(N, X1, X2, X3, X4)     { { X1, X2, X3, X4 } },
1036 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1037 #define F6(N, X1, X2, X3, X4, X5, X6)       { { X1, X2, X3, X4, X5, X6 } },
1038 
1039 static const DisasFormatInfo format_info[] = {
1040 #include "insn-format.h.inc"
1041 };
1042 
1043 #undef F0
1044 #undef F1
1045 #undef F2
1046 #undef F3
1047 #undef F4
1048 #undef F5
1049 #undef F6
1050 #undef R
1051 #undef M
1052 #undef V
1053 #undef BD
1054 #undef BXD
1055 #undef BDL
1056 #undef BXDL
1057 #undef I
1058 #undef L
1059 
1060 /* Generally, we'll extract operands into this structures, operate upon
1061    them, and store them back.  See the "in1", "in2", "prep", "wout" sets
1062    of routines below for more details.  */
1063 typedef struct {
1064     TCGv_i64 out, out2, in1, in2;
1065     TCGv_i64 addr1;
1066     TCGv_i128 out_128, in1_128, in2_128;
1067 } DisasOps;
1068 
1069 /* Instructions can place constraints on their operands, raising specification
1070    exceptions if they are violated.  To make this easy to automate, each "in1",
1071    "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1072    of the following, or 0.  To make this easy to document, we'll put the
1073    SPEC_<name> defines next to <name>.  */
1074 
1075 #define SPEC_r1_even    1
1076 #define SPEC_r2_even    2
1077 #define SPEC_r3_even    4
1078 #define SPEC_r1_f128    8
1079 #define SPEC_r2_f128    16
1080 
1081 /* Return values from translate_one, indicating the state of the TB.  */
1082 
1083 /* We are not using a goto_tb (for whatever reason), but have updated
1084    the PC (for whatever reason), so there's no need to do it again on
1085    exiting the TB.  */
1086 #define DISAS_PC_UPDATED        DISAS_TARGET_0
1087 
1088 /* We have updated the PC and CC values.  */
1089 #define DISAS_PC_CC_UPDATED     DISAS_TARGET_2
1090 
1091 
1092 /* Instruction flags */
1093 #define IF_AFP1     0x0001      /* r1 is a fp reg for HFP/FPS instructions */
1094 #define IF_AFP2     0x0002      /* r2 is a fp reg for HFP/FPS instructions */
1095 #define IF_AFP3     0x0004      /* r3 is a fp reg for HFP/FPS instructions */
1096 #define IF_BFP      0x0008      /* binary floating point instruction */
1097 #define IF_DFP      0x0010      /* decimal floating point instruction */
1098 #define IF_PRIV     0x0020      /* privileged instruction */
1099 #define IF_VEC      0x0040      /* vector instruction */
1100 #define IF_IO       0x0080      /* input/output instruction */
1101 
1102 struct DisasInsn {
1103     unsigned opc:16;
1104     unsigned flags:16;
1105     DisasFormat fmt:8;
1106     unsigned fac:8;
1107     unsigned spec:8;
1108 
1109     const char *name;
1110 
1111     /* Pre-process arguments before HELP_OP.  */
1112     void (*help_in1)(DisasContext *, DisasOps *);
1113     void (*help_in2)(DisasContext *, DisasOps *);
1114     void (*help_prep)(DisasContext *, DisasOps *);
1115 
1116     /*
1117      * Post-process output after HELP_OP.
1118      * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1119      */
1120     void (*help_wout)(DisasContext *, DisasOps *);
1121     void (*help_cout)(DisasContext *, DisasOps *);
1122 
1123     /* Implement the operation itself.  */
1124     DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1125 
1126     uint64_t data;
1127 };
1128 
1129 /* ====================================================================== */
1130 /* Miscellaneous helpers, used by several operations.  */
1131 
1132 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1133 {
1134     if (dest == s->pc_tmp) {
1135         per_branch(s, true);
1136         return DISAS_NEXT;
1137     }
1138     if (use_goto_tb(s, dest)) {
1139         update_cc_op(s);
1140         per_breaking_event(s);
1141         tcg_gen_goto_tb(0);
1142         tcg_gen_movi_i64(psw_addr, dest);
1143         tcg_gen_exit_tb(s->base.tb, 0);
1144         return DISAS_NORETURN;
1145     } else {
1146         tcg_gen_movi_i64(psw_addr, dest);
1147         per_branch(s, false);
1148         return DISAS_PC_UPDATED;
1149     }
1150 }
1151 
1152 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1153                                  bool is_imm, int imm, TCGv_i64 cdest)
1154 {
1155     DisasJumpType ret;
1156     uint64_t dest = s->base.pc_next + (int64_t)imm * 2;
1157     TCGLabel *lab;
1158 
1159     /* Take care of the special cases first.  */
1160     if (c->cond == TCG_COND_NEVER) {
1161         ret = DISAS_NEXT;
1162         goto egress;
1163     }
1164     if (is_imm) {
1165         if (dest == s->pc_tmp) {
1166             /* Branch to next.  */
1167             per_branch(s, true);
1168             ret = DISAS_NEXT;
1169             goto egress;
1170         }
1171         if (c->cond == TCG_COND_ALWAYS) {
1172             ret = help_goto_direct(s, dest);
1173             goto egress;
1174         }
1175     } else {
1176         if (!cdest) {
1177             /* E.g. bcr %r0 -> no branch.  */
1178             ret = DISAS_NEXT;
1179             goto egress;
1180         }
1181         if (c->cond == TCG_COND_ALWAYS) {
1182             tcg_gen_mov_i64(psw_addr, cdest);
1183             per_branch(s, false);
1184             ret = DISAS_PC_UPDATED;
1185             goto egress;
1186         }
1187     }
1188 
1189     if (use_goto_tb(s, s->pc_tmp)) {
1190         if (is_imm && use_goto_tb(s, dest)) {
1191             /* Both exits can use goto_tb.  */
1192             update_cc_op(s);
1193 
1194             lab = gen_new_label();
1195             if (c->is_64) {
1196                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1197             } else {
1198                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1199             }
1200 
1201             /* Branch not taken.  */
1202             tcg_gen_goto_tb(0);
1203             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1204             tcg_gen_exit_tb(s->base.tb, 0);
1205 
1206             /* Branch taken.  */
1207             gen_set_label(lab);
1208             per_breaking_event(s);
1209             tcg_gen_goto_tb(1);
1210             tcg_gen_movi_i64(psw_addr, dest);
1211             tcg_gen_exit_tb(s->base.tb, 1);
1212 
1213             ret = DISAS_NORETURN;
1214         } else {
1215             /* Fallthru can use goto_tb, but taken branch cannot.  */
1216             /* Store taken branch destination before the brcond.  This
1217                avoids having to allocate a new local temp to hold it.
1218                We'll overwrite this in the not taken case anyway.  */
1219             if (!is_imm) {
1220                 tcg_gen_mov_i64(psw_addr, cdest);
1221             }
1222 
1223             lab = gen_new_label();
1224             if (c->is_64) {
1225                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1226             } else {
1227                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1228             }
1229 
1230             /* Branch not taken.  */
1231             update_cc_op(s);
1232             tcg_gen_goto_tb(0);
1233             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1234             tcg_gen_exit_tb(s->base.tb, 0);
1235 
1236             gen_set_label(lab);
1237             if (is_imm) {
1238                 tcg_gen_movi_i64(psw_addr, dest);
1239             }
1240             per_breaking_event(s);
1241             ret = DISAS_PC_UPDATED;
1242         }
1243     } else {
1244         /* Fallthru cannot use goto_tb.  This by itself is vanishingly rare.
1245            Most commonly we're single-stepping or some other condition that
1246            disables all use of goto_tb.  Just update the PC and exit.  */
1247 
1248         TCGv_i64 next = tcg_constant_i64(s->pc_tmp);
1249         if (is_imm) {
1250             cdest = tcg_constant_i64(dest);
1251         }
1252 
1253         if (c->is_64) {
1254             tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1255                                 cdest, next);
1256             per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1257         } else {
1258             TCGv_i32 t0 = tcg_temp_new_i32();
1259             TCGv_i64 t1 = tcg_temp_new_i64();
1260             TCGv_i64 z = tcg_constant_i64(0);
1261             tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1262             tcg_gen_extu_i32_i64(t1, t0);
1263             tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1264             per_branch_cond(s, TCG_COND_NE, t1, z);
1265         }
1266 
1267         ret = DISAS_PC_UPDATED;
1268     }
1269 
1270  egress:
1271     return ret;
1272 }
1273 
1274 /* ====================================================================== */
1275 /* The operations.  These perform the bulk of the work for any insn,
1276    usually after the operands have been loaded and output initialized.  */
1277 
1278 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1279 {
1280     tcg_gen_abs_i64(o->out, o->in2);
1281     return DISAS_NEXT;
1282 }
1283 
1284 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1285 {
1286     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1287     return DISAS_NEXT;
1288 }
1289 
1290 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1291 {
1292     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1293     return DISAS_NEXT;
1294 }
1295 
1296 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1297 {
1298     tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1299     tcg_gen_mov_i64(o->out2, o->in2);
1300     return DISAS_NEXT;
1301 }
1302 
1303 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1304 {
1305     tcg_gen_add_i64(o->out, o->in1, o->in2);
1306     return DISAS_NEXT;
1307 }
1308 
1309 static DisasJumpType op_addu64(DisasContext *s, DisasOps *o)
1310 {
1311     tcg_gen_movi_i64(cc_src, 0);
1312     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1313     return DISAS_NEXT;
1314 }
1315 
1316 /* Compute carry into cc_src. */
1317 static void compute_carry(DisasContext *s)
1318 {
1319     switch (s->cc_op) {
1320     case CC_OP_ADDU:
1321         /* The carry value is already in cc_src (1,0). */
1322         break;
1323     case CC_OP_SUBU:
1324         tcg_gen_addi_i64(cc_src, cc_src, 1);
1325         break;
1326     default:
1327         gen_op_calc_cc(s);
1328         /* fall through */
1329     case CC_OP_STATIC:
1330         /* The carry flag is the msb of CC; compute into cc_src. */
1331         tcg_gen_extu_i32_i64(cc_src, cc_op);
1332         tcg_gen_shri_i64(cc_src, cc_src, 1);
1333         break;
1334     }
1335 }
1336 
1337 static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
1338 {
1339     compute_carry(s);
1340     tcg_gen_add_i64(o->out, o->in1, o->in2);
1341     tcg_gen_add_i64(o->out, o->out, cc_src);
1342     return DISAS_NEXT;
1343 }
1344 
1345 static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
1346 {
1347     compute_carry(s);
1348 
1349     TCGv_i64 zero = tcg_constant_i64(0);
1350     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
1351     tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
1352 
1353     return DISAS_NEXT;
1354 }
1355 
1356 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1357 {
1358     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1359 
1360     o->in1 = tcg_temp_new_i64();
1361     if (non_atomic) {
1362         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1363     } else {
1364         /* Perform the atomic addition in memory. */
1365         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1366                                      s->insn->data);
1367     }
1368 
1369     /* Recompute also for atomic case: needed for setting CC. */
1370     tcg_gen_add_i64(o->out, o->in1, o->in2);
1371 
1372     if (non_atomic) {
1373         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1374     }
1375     return DISAS_NEXT;
1376 }
1377 
1378 static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o)
1379 {
1380     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1381 
1382     o->in1 = tcg_temp_new_i64();
1383     if (non_atomic) {
1384         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1385     } else {
1386         /* Perform the atomic addition in memory. */
1387         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1388                                      s->insn->data);
1389     }
1390 
1391     /* Recompute also for atomic case: needed for setting CC. */
1392     tcg_gen_movi_i64(cc_src, 0);
1393     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1394 
1395     if (non_atomic) {
1396         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1397     }
1398     return DISAS_NEXT;
1399 }
1400 
1401 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1402 {
1403     gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1404     return DISAS_NEXT;
1405 }
1406 
1407 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1408 {
1409     gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1410     return DISAS_NEXT;
1411 }
1412 
1413 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1414 {
1415     gen_helper_axb(o->out_128, cpu_env, o->in1_128, o->in2_128);
1416     return DISAS_NEXT;
1417 }
1418 
1419 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1420 {
1421     tcg_gen_and_i64(o->out, o->in1, o->in2);
1422     return DISAS_NEXT;
1423 }
1424 
1425 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1426 {
1427     int shift = s->insn->data & 0xff;
1428     int size = s->insn->data >> 8;
1429     uint64_t mask = ((1ull << size) - 1) << shift;
1430     TCGv_i64 t = tcg_temp_new_i64();
1431 
1432     tcg_gen_shli_i64(t, o->in2, shift);
1433     tcg_gen_ori_i64(t, t, ~mask);
1434     tcg_gen_and_i64(o->out, o->in1, t);
1435 
1436     /* Produce the CC from only the bits manipulated.  */
1437     tcg_gen_andi_i64(cc_dst, o->out, mask);
1438     set_cc_nz_u64(s, cc_dst);
1439     return DISAS_NEXT;
1440 }
1441 
1442 static DisasJumpType op_andc(DisasContext *s, DisasOps *o)
1443 {
1444     tcg_gen_andc_i64(o->out, o->in1, o->in2);
1445     return DISAS_NEXT;
1446 }
1447 
1448 static DisasJumpType op_orc(DisasContext *s, DisasOps *o)
1449 {
1450     tcg_gen_orc_i64(o->out, o->in1, o->in2);
1451     return DISAS_NEXT;
1452 }
1453 
1454 static DisasJumpType op_nand(DisasContext *s, DisasOps *o)
1455 {
1456     tcg_gen_nand_i64(o->out, o->in1, o->in2);
1457     return DISAS_NEXT;
1458 }
1459 
1460 static DisasJumpType op_nor(DisasContext *s, DisasOps *o)
1461 {
1462     tcg_gen_nor_i64(o->out, o->in1, o->in2);
1463     return DISAS_NEXT;
1464 }
1465 
1466 static DisasJumpType op_nxor(DisasContext *s, DisasOps *o)
1467 {
1468     tcg_gen_eqv_i64(o->out, o->in1, o->in2);
1469     return DISAS_NEXT;
1470 }
1471 
1472 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1473 {
1474     o->in1 = tcg_temp_new_i64();
1475 
1476     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1477         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1478     } else {
1479         /* Perform the atomic operation in memory. */
1480         tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1481                                      s->insn->data);
1482     }
1483 
1484     /* Recompute also for atomic case: needed for setting CC. */
1485     tcg_gen_and_i64(o->out, o->in1, o->in2);
1486 
1487     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1488         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1489     }
1490     return DISAS_NEXT;
1491 }
1492 
1493 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1494 {
1495     pc_to_link_info(o->out, s, s->pc_tmp);
1496     if (o->in2) {
1497         tcg_gen_mov_i64(psw_addr, o->in2);
1498         per_branch(s, false);
1499         return DISAS_PC_UPDATED;
1500     } else {
1501         return DISAS_NEXT;
1502     }
1503 }
1504 
1505 static void save_link_info(DisasContext *s, DisasOps *o)
1506 {
1507     TCGv_i64 t;
1508 
1509     if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1510         pc_to_link_info(o->out, s, s->pc_tmp);
1511         return;
1512     }
1513     gen_op_calc_cc(s);
1514     tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1515     tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1516     t = tcg_temp_new_i64();
1517     tcg_gen_shri_i64(t, psw_mask, 16);
1518     tcg_gen_andi_i64(t, t, 0x0f000000);
1519     tcg_gen_or_i64(o->out, o->out, t);
1520     tcg_gen_extu_i32_i64(t, cc_op);
1521     tcg_gen_shli_i64(t, t, 28);
1522     tcg_gen_or_i64(o->out, o->out, t);
1523 }
1524 
1525 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1526 {
1527     save_link_info(s, o);
1528     if (o->in2) {
1529         tcg_gen_mov_i64(psw_addr, o->in2);
1530         per_branch(s, false);
1531         return DISAS_PC_UPDATED;
1532     } else {
1533         return DISAS_NEXT;
1534     }
1535 }
1536 
1537 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1538 {
1539     pc_to_link_info(o->out, s, s->pc_tmp);
1540     return help_goto_direct(s, s->base.pc_next + (int64_t)get_field(s, i2) * 2);
1541 }
1542 
1543 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1544 {
1545     int m1 = get_field(s, m1);
1546     bool is_imm = have_field(s, i2);
1547     int imm = is_imm ? get_field(s, i2) : 0;
1548     DisasCompare c;
1549 
1550     /* BCR with R2 = 0 causes no branching */
1551     if (have_field(s, r2) && get_field(s, r2) == 0) {
1552         if (m1 == 14) {
1553             /* Perform serialization */
1554             /* FIXME: check for fast-BCR-serialization facility */
1555             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1556         }
1557         if (m1 == 15) {
1558             /* Perform serialization */
1559             /* FIXME: perform checkpoint-synchronisation */
1560             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1561         }
1562         return DISAS_NEXT;
1563     }
1564 
1565     disas_jcc(s, &c, m1);
1566     return help_branch(s, &c, is_imm, imm, o->in2);
1567 }
1568 
1569 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1570 {
1571     int r1 = get_field(s, r1);
1572     bool is_imm = have_field(s, i2);
1573     int imm = is_imm ? get_field(s, i2) : 0;
1574     DisasCompare c;
1575     TCGv_i64 t;
1576 
1577     c.cond = TCG_COND_NE;
1578     c.is_64 = false;
1579 
1580     t = tcg_temp_new_i64();
1581     tcg_gen_subi_i64(t, regs[r1], 1);
1582     store_reg32_i64(r1, t);
1583     c.u.s32.a = tcg_temp_new_i32();
1584     c.u.s32.b = tcg_constant_i32(0);
1585     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1586 
1587     return help_branch(s, &c, is_imm, imm, o->in2);
1588 }
1589 
1590 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1591 {
1592     int r1 = get_field(s, r1);
1593     int imm = get_field(s, i2);
1594     DisasCompare c;
1595     TCGv_i64 t;
1596 
1597     c.cond = TCG_COND_NE;
1598     c.is_64 = false;
1599 
1600     t = tcg_temp_new_i64();
1601     tcg_gen_shri_i64(t, regs[r1], 32);
1602     tcg_gen_subi_i64(t, t, 1);
1603     store_reg32h_i64(r1, t);
1604     c.u.s32.a = tcg_temp_new_i32();
1605     c.u.s32.b = tcg_constant_i32(0);
1606     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1607 
1608     return help_branch(s, &c, 1, imm, o->in2);
1609 }
1610 
1611 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1612 {
1613     int r1 = get_field(s, r1);
1614     bool is_imm = have_field(s, i2);
1615     int imm = is_imm ? get_field(s, i2) : 0;
1616     DisasCompare c;
1617 
1618     c.cond = TCG_COND_NE;
1619     c.is_64 = true;
1620 
1621     tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1622     c.u.s64.a = regs[r1];
1623     c.u.s64.b = tcg_constant_i64(0);
1624 
1625     return help_branch(s, &c, is_imm, imm, o->in2);
1626 }
1627 
1628 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1629 {
1630     int r1 = get_field(s, r1);
1631     int r3 = get_field(s, r3);
1632     bool is_imm = have_field(s, i2);
1633     int imm = is_imm ? get_field(s, i2) : 0;
1634     DisasCompare c;
1635     TCGv_i64 t;
1636 
1637     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1638     c.is_64 = false;
1639 
1640     t = tcg_temp_new_i64();
1641     tcg_gen_add_i64(t, regs[r1], regs[r3]);
1642     c.u.s32.a = tcg_temp_new_i32();
1643     c.u.s32.b = tcg_temp_new_i32();
1644     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1645     tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1646     store_reg32_i64(r1, t);
1647 
1648     return help_branch(s, &c, is_imm, imm, o->in2);
1649 }
1650 
1651 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1652 {
1653     int r1 = get_field(s, r1);
1654     int r3 = get_field(s, r3);
1655     bool is_imm = have_field(s, i2);
1656     int imm = is_imm ? get_field(s, i2) : 0;
1657     DisasCompare c;
1658 
1659     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1660     c.is_64 = true;
1661 
1662     if (r1 == (r3 | 1)) {
1663         c.u.s64.b = load_reg(r3 | 1);
1664     } else {
1665         c.u.s64.b = regs[r3 | 1];
1666     }
1667 
1668     tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1669     c.u.s64.a = regs[r1];
1670 
1671     return help_branch(s, &c, is_imm, imm, o->in2);
1672 }
1673 
1674 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1675 {
1676     int imm, m3 = get_field(s, m3);
1677     bool is_imm;
1678     DisasCompare c;
1679 
1680     c.cond = ltgt_cond[m3];
1681     if (s->insn->data) {
1682         c.cond = tcg_unsigned_cond(c.cond);
1683     }
1684     c.is_64 = true;
1685     c.u.s64.a = o->in1;
1686     c.u.s64.b = o->in2;
1687 
1688     is_imm = have_field(s, i4);
1689     if (is_imm) {
1690         imm = get_field(s, i4);
1691     } else {
1692         imm = 0;
1693         o->out = get_address(s, 0, get_field(s, b4),
1694                              get_field(s, d4));
1695     }
1696 
1697     return help_branch(s, &c, is_imm, imm, o->out);
1698 }
1699 
1700 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1701 {
1702     gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1703     set_cc_static(s);
1704     return DISAS_NEXT;
1705 }
1706 
1707 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1708 {
1709     gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1710     set_cc_static(s);
1711     return DISAS_NEXT;
1712 }
1713 
1714 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1715 {
1716     gen_helper_cxb(cc_op, cpu_env, o->in1_128, o->in2_128);
1717     set_cc_static(s);
1718     return DISAS_NEXT;
1719 }
1720 
1721 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1722                                    bool m4_with_fpe)
1723 {
1724     const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1725     uint8_t m3 = get_field(s, m3);
1726     uint8_t m4 = get_field(s, m4);
1727 
1728     /* m3 field was introduced with FPE */
1729     if (!fpe && m3_with_fpe) {
1730         m3 = 0;
1731     }
1732     /* m4 field was introduced with FPE */
1733     if (!fpe && m4_with_fpe) {
1734         m4 = 0;
1735     }
1736 
1737     /* Check for valid rounding modes. Mode 3 was introduced later. */
1738     if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1739         gen_program_exception(s, PGM_SPECIFICATION);
1740         return NULL;
1741     }
1742 
1743     return tcg_constant_i32(deposit32(m3, 4, 4, m4));
1744 }
1745 
1746 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1747 {
1748     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1749 
1750     if (!m34) {
1751         return DISAS_NORETURN;
1752     }
1753     gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
1754     set_cc_static(s);
1755     return DISAS_NEXT;
1756 }
1757 
1758 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1759 {
1760     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1761 
1762     if (!m34) {
1763         return DISAS_NORETURN;
1764     }
1765     gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
1766     set_cc_static(s);
1767     return DISAS_NEXT;
1768 }
1769 
1770 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1771 {
1772     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1773 
1774     if (!m34) {
1775         return DISAS_NORETURN;
1776     }
1777     gen_helper_cfxb(o->out, cpu_env, o->in2_128, m34);
1778     set_cc_static(s);
1779     return DISAS_NEXT;
1780 }
1781 
1782 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1783 {
1784     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1785 
1786     if (!m34) {
1787         return DISAS_NORETURN;
1788     }
1789     gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
1790     set_cc_static(s);
1791     return DISAS_NEXT;
1792 }
1793 
1794 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1795 {
1796     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1797 
1798     if (!m34) {
1799         return DISAS_NORETURN;
1800     }
1801     gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
1802     set_cc_static(s);
1803     return DISAS_NEXT;
1804 }
1805 
1806 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1807 {
1808     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1809 
1810     if (!m34) {
1811         return DISAS_NORETURN;
1812     }
1813     gen_helper_cgxb(o->out, cpu_env, o->in2_128, m34);
1814     set_cc_static(s);
1815     return DISAS_NEXT;
1816 }
1817 
1818 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1819 {
1820     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1821 
1822     if (!m34) {
1823         return DISAS_NORETURN;
1824     }
1825     gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
1826     set_cc_static(s);
1827     return DISAS_NEXT;
1828 }
1829 
1830 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1831 {
1832     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1833 
1834     if (!m34) {
1835         return DISAS_NORETURN;
1836     }
1837     gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
1838     set_cc_static(s);
1839     return DISAS_NEXT;
1840 }
1841 
1842 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1843 {
1844     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1845 
1846     if (!m34) {
1847         return DISAS_NORETURN;
1848     }
1849     gen_helper_clfxb(o->out, cpu_env, o->in2_128, m34);
1850     set_cc_static(s);
1851     return DISAS_NEXT;
1852 }
1853 
1854 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1855 {
1856     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1857 
1858     if (!m34) {
1859         return DISAS_NORETURN;
1860     }
1861     gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
1862     set_cc_static(s);
1863     return DISAS_NEXT;
1864 }
1865 
1866 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1867 {
1868     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1869 
1870     if (!m34) {
1871         return DISAS_NORETURN;
1872     }
1873     gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
1874     set_cc_static(s);
1875     return DISAS_NEXT;
1876 }
1877 
1878 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1879 {
1880     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1881 
1882     if (!m34) {
1883         return DISAS_NORETURN;
1884     }
1885     gen_helper_clgxb(o->out, cpu_env, o->in2_128, m34);
1886     set_cc_static(s);
1887     return DISAS_NEXT;
1888 }
1889 
1890 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1891 {
1892     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1893 
1894     if (!m34) {
1895         return DISAS_NORETURN;
1896     }
1897     gen_helper_cegb(o->out, cpu_env, o->in2, m34);
1898     return DISAS_NEXT;
1899 }
1900 
1901 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1902 {
1903     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1904 
1905     if (!m34) {
1906         return DISAS_NORETURN;
1907     }
1908     gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
1909     return DISAS_NEXT;
1910 }
1911 
1912 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
1913 {
1914     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1915 
1916     if (!m34) {
1917         return DISAS_NORETURN;
1918     }
1919     gen_helper_cxgb(o->out_128, cpu_env, o->in2, m34);
1920     return DISAS_NEXT;
1921 }
1922 
1923 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
1924 {
1925     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1926 
1927     if (!m34) {
1928         return DISAS_NORETURN;
1929     }
1930     gen_helper_celgb(o->out, cpu_env, o->in2, m34);
1931     return DISAS_NEXT;
1932 }
1933 
1934 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
1935 {
1936     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1937 
1938     if (!m34) {
1939         return DISAS_NORETURN;
1940     }
1941     gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
1942     return DISAS_NEXT;
1943 }
1944 
1945 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
1946 {
1947     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1948 
1949     if (!m34) {
1950         return DISAS_NORETURN;
1951     }
1952     gen_helper_cxlgb(o->out_128, cpu_env, o->in2, m34);
1953     return DISAS_NEXT;
1954 }
1955 
1956 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
1957 {
1958     int r2 = get_field(s, r2);
1959     TCGv_i128 pair = tcg_temp_new_i128();
1960     TCGv_i64 len = tcg_temp_new_i64();
1961 
1962     gen_helper_cksm(pair, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1963     set_cc_static(s);
1964     tcg_gen_extr_i128_i64(o->out, len, pair);
1965 
1966     tcg_gen_add_i64(regs[r2], regs[r2], len);
1967     tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1968 
1969     return DISAS_NEXT;
1970 }
1971 
1972 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
1973 {
1974     int l = get_field(s, l1);
1975     TCGv_i32 vl;
1976 
1977     switch (l + 1) {
1978     case 1:
1979         tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1980         tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1981         break;
1982     case 2:
1983         tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1984         tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1985         break;
1986     case 4:
1987         tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1988         tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1989         break;
1990     case 8:
1991         tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1992         tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1993         break;
1994     default:
1995         vl = tcg_constant_i32(l);
1996         gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1997         set_cc_static(s);
1998         return DISAS_NEXT;
1999     }
2000     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2001     return DISAS_NEXT;
2002 }
2003 
2004 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2005 {
2006     int r1 = get_field(s, r1);
2007     int r2 = get_field(s, r2);
2008     TCGv_i32 t1, t2;
2009 
2010     /* r1 and r2 must be even.  */
2011     if (r1 & 1 || r2 & 1) {
2012         gen_program_exception(s, PGM_SPECIFICATION);
2013         return DISAS_NORETURN;
2014     }
2015 
2016     t1 = tcg_constant_i32(r1);
2017     t2 = tcg_constant_i32(r2);
2018     gen_helper_clcl(cc_op, cpu_env, t1, t2);
2019     set_cc_static(s);
2020     return DISAS_NEXT;
2021 }
2022 
2023 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2024 {
2025     int r1 = get_field(s, r1);
2026     int r3 = get_field(s, r3);
2027     TCGv_i32 t1, t3;
2028 
2029     /* r1 and r3 must be even.  */
2030     if (r1 & 1 || r3 & 1) {
2031         gen_program_exception(s, PGM_SPECIFICATION);
2032         return DISAS_NORETURN;
2033     }
2034 
2035     t1 = tcg_constant_i32(r1);
2036     t3 = tcg_constant_i32(r3);
2037     gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
2038     set_cc_static(s);
2039     return DISAS_NEXT;
2040 }
2041 
2042 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2043 {
2044     int r1 = get_field(s, r1);
2045     int r3 = get_field(s, r3);
2046     TCGv_i32 t1, t3;
2047 
2048     /* r1 and r3 must be even.  */
2049     if (r1 & 1 || r3 & 1) {
2050         gen_program_exception(s, PGM_SPECIFICATION);
2051         return DISAS_NORETURN;
2052     }
2053 
2054     t1 = tcg_constant_i32(r1);
2055     t3 = tcg_constant_i32(r3);
2056     gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
2057     set_cc_static(s);
2058     return DISAS_NEXT;
2059 }
2060 
2061 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2062 {
2063     TCGv_i32 m3 = tcg_constant_i32(get_field(s, m3));
2064     TCGv_i32 t1 = tcg_temp_new_i32();
2065 
2066     tcg_gen_extrl_i64_i32(t1, o->in1);
2067     gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2068     set_cc_static(s);
2069     return DISAS_NEXT;
2070 }
2071 
2072 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2073 {
2074     TCGv_i128 pair = tcg_temp_new_i128();
2075 
2076     gen_helper_clst(pair, cpu_env, regs[0], o->in1, o->in2);
2077     tcg_gen_extr_i128_i64(o->in2, o->in1, pair);
2078 
2079     set_cc_static(s);
2080     return DISAS_NEXT;
2081 }
2082 
2083 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2084 {
2085     TCGv_i64 t = tcg_temp_new_i64();
2086     tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2087     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2088     tcg_gen_or_i64(o->out, o->out, t);
2089     return DISAS_NEXT;
2090 }
2091 
2092 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2093 {
2094     int d2 = get_field(s, d2);
2095     int b2 = get_field(s, b2);
2096     TCGv_i64 addr, cc;
2097 
2098     /* Note that in1 = R3 (new value) and
2099        in2 = (zero-extended) R1 (expected value).  */
2100 
2101     addr = get_address(s, 0, b2, d2);
2102     tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2103                                get_mem_index(s), s->insn->data | MO_ALIGN);
2104 
2105     /* Are the memory and expected values (un)equal?  Note that this setcond
2106        produces the output CC value, thus the NE sense of the test.  */
2107     cc = tcg_temp_new_i64();
2108     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2109     tcg_gen_extrl_i64_i32(cc_op, cc);
2110     set_cc_static(s);
2111 
2112     return DISAS_NEXT;
2113 }
2114 
2115 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2116 {
2117     int r1 = get_field(s, r1);
2118 
2119     o->out_128 = tcg_temp_new_i128();
2120     tcg_gen_concat_i64_i128(o->out_128, regs[r1 + 1], regs[r1]);
2121 
2122     /* Note out (R1:R1+1) = expected value and in2 (R3:R3+1) = new value.  */
2123     tcg_gen_atomic_cmpxchg_i128(o->out_128, o->addr1, o->out_128, o->in2_128,
2124                                 get_mem_index(s), MO_BE | MO_128 | MO_ALIGN);
2125 
2126     /*
2127      * Extract result into cc_dst:cc_src, compare vs the expected value
2128      * in the as yet unmodified input registers, then update CC_OP.
2129      */
2130     tcg_gen_extr_i128_i64(cc_src, cc_dst, o->out_128);
2131     tcg_gen_xor_i64(cc_dst, cc_dst, regs[r1]);
2132     tcg_gen_xor_i64(cc_src, cc_src, regs[r1 + 1]);
2133     tcg_gen_or_i64(cc_dst, cc_dst, cc_src);
2134     set_cc_nz_u64(s, cc_dst);
2135 
2136     return DISAS_NEXT;
2137 }
2138 
2139 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2140 {
2141     int r3 = get_field(s, r3);
2142     TCGv_i32 t_r3 = tcg_constant_i32(r3);
2143 
2144     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2145         gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2146     } else {
2147         gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2148     }
2149 
2150     set_cc_static(s);
2151     return DISAS_NEXT;
2152 }
2153 
2154 #ifndef CONFIG_USER_ONLY
2155 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2156 {
2157     MemOp mop = s->insn->data;
2158     TCGv_i64 addr, old, cc;
2159     TCGLabel *lab = gen_new_label();
2160 
2161     /* Note that in1 = R1 (zero-extended expected value),
2162        out = R1 (original reg), out2 = R1+1 (new value).  */
2163 
2164     addr = tcg_temp_new_i64();
2165     old = tcg_temp_new_i64();
2166     tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2167     tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2168                                get_mem_index(s), mop | MO_ALIGN);
2169 
2170     /* Are the memory and expected values (un)equal?  */
2171     cc = tcg_temp_new_i64();
2172     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2173     tcg_gen_extrl_i64_i32(cc_op, cc);
2174 
2175     /* Write back the output now, so that it happens before the
2176        following branch, so that we don't need local temps.  */
2177     if ((mop & MO_SIZE) == MO_32) {
2178         tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2179     } else {
2180         tcg_gen_mov_i64(o->out, old);
2181     }
2182 
2183     /* If the comparison was equal, and the LSB of R2 was set,
2184        then we need to flush the TLB (for all cpus).  */
2185     tcg_gen_xori_i64(cc, cc, 1);
2186     tcg_gen_and_i64(cc, cc, o->in2);
2187     tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2188 
2189     gen_helper_purge(cpu_env);
2190     gen_set_label(lab);
2191 
2192     return DISAS_NEXT;
2193 }
2194 #endif
2195 
2196 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2197 {
2198     TCGv_i64 t1 = tcg_temp_new_i64();
2199     TCGv_i32 t2 = tcg_temp_new_i32();
2200     tcg_gen_extrl_i64_i32(t2, o->in1);
2201     gen_helper_cvd(t1, t2);
2202     tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2203     return DISAS_NEXT;
2204 }
2205 
2206 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2207 {
2208     int m3 = get_field(s, m3);
2209     TCGLabel *lab = gen_new_label();
2210     TCGCond c;
2211 
2212     c = tcg_invert_cond(ltgt_cond[m3]);
2213     if (s->insn->data) {
2214         c = tcg_unsigned_cond(c);
2215     }
2216     tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2217 
2218     /* Trap.  */
2219     gen_trap(s);
2220 
2221     gen_set_label(lab);
2222     return DISAS_NEXT;
2223 }
2224 
2225 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2226 {
2227     int m3 = get_field(s, m3);
2228     int r1 = get_field(s, r1);
2229     int r2 = get_field(s, r2);
2230     TCGv_i32 tr1, tr2, chk;
2231 
2232     /* R1 and R2 must both be even.  */
2233     if ((r1 | r2) & 1) {
2234         gen_program_exception(s, PGM_SPECIFICATION);
2235         return DISAS_NORETURN;
2236     }
2237     if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2238         m3 = 0;
2239     }
2240 
2241     tr1 = tcg_constant_i32(r1);
2242     tr2 = tcg_constant_i32(r2);
2243     chk = tcg_constant_i32(m3);
2244 
2245     switch (s->insn->data) {
2246     case 12:
2247         gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2248         break;
2249     case 14:
2250         gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2251         break;
2252     case 21:
2253         gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2254         break;
2255     case 24:
2256         gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2257         break;
2258     case 41:
2259         gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2260         break;
2261     case 42:
2262         gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2263         break;
2264     default:
2265         g_assert_not_reached();
2266     }
2267 
2268     set_cc_static(s);
2269     return DISAS_NEXT;
2270 }
2271 
2272 #ifndef CONFIG_USER_ONLY
2273 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2274 {
2275     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2276     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2277     TCGv_i32 func_code = tcg_constant_i32(get_field(s, i2));
2278 
2279     gen_helper_diag(cpu_env, r1, r3, func_code);
2280     return DISAS_NEXT;
2281 }
2282 #endif
2283 
2284 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2285 {
2286     gen_helper_divs32(o->out, cpu_env, o->in1, o->in2);
2287     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2288     return DISAS_NEXT;
2289 }
2290 
2291 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2292 {
2293     gen_helper_divu32(o->out, cpu_env, o->in1, o->in2);
2294     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2295     return DISAS_NEXT;
2296 }
2297 
2298 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2299 {
2300     TCGv_i128 t = tcg_temp_new_i128();
2301 
2302     gen_helper_divs64(t, cpu_env, o->in1, o->in2);
2303     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2304     return DISAS_NEXT;
2305 }
2306 
2307 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2308 {
2309     TCGv_i128 t = tcg_temp_new_i128();
2310 
2311     gen_helper_divu64(t, cpu_env, o->out, o->out2, o->in2);
2312     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2313     return DISAS_NEXT;
2314 }
2315 
2316 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2317 {
2318     gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2319     return DISAS_NEXT;
2320 }
2321 
2322 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2323 {
2324     gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2325     return DISAS_NEXT;
2326 }
2327 
2328 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2329 {
2330     gen_helper_dxb(o->out_128, cpu_env, o->in1_128, o->in2_128);
2331     return DISAS_NEXT;
2332 }
2333 
2334 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2335 {
2336     int r2 = get_field(s, r2);
2337     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2338     return DISAS_NEXT;
2339 }
2340 
2341 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2342 {
2343     /* No cache information provided.  */
2344     tcg_gen_movi_i64(o->out, -1);
2345     return DISAS_NEXT;
2346 }
2347 
2348 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2349 {
2350     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2351     return DISAS_NEXT;
2352 }
2353 
2354 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2355 {
2356     int r1 = get_field(s, r1);
2357     int r2 = get_field(s, r2);
2358     TCGv_i64 t = tcg_temp_new_i64();
2359 
2360     /* Note the "subsequently" in the PoO, which implies a defined result
2361        if r1 == r2.  Thus we cannot defer these writes to an output hook.  */
2362     tcg_gen_shri_i64(t, psw_mask, 32);
2363     store_reg32_i64(r1, t);
2364     if (r2 != 0) {
2365         store_reg32_i64(r2, psw_mask);
2366     }
2367     return DISAS_NEXT;
2368 }
2369 
2370 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2371 {
2372     int r1 = get_field(s, r1);
2373     TCGv_i32 ilen;
2374     TCGv_i64 v1;
2375 
2376     /* Nested EXECUTE is not allowed.  */
2377     if (unlikely(s->ex_value)) {
2378         gen_program_exception(s, PGM_EXECUTE);
2379         return DISAS_NORETURN;
2380     }
2381 
2382     update_psw_addr(s);
2383     update_cc_op(s);
2384 
2385     if (r1 == 0) {
2386         v1 = tcg_constant_i64(0);
2387     } else {
2388         v1 = regs[r1];
2389     }
2390 
2391     ilen = tcg_constant_i32(s->ilen);
2392     gen_helper_ex(cpu_env, ilen, v1, o->in2);
2393 
2394     return DISAS_PC_CC_UPDATED;
2395 }
2396 
2397 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2398 {
2399     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2400 
2401     if (!m34) {
2402         return DISAS_NORETURN;
2403     }
2404     gen_helper_fieb(o->out, cpu_env, o->in2, m34);
2405     return DISAS_NEXT;
2406 }
2407 
2408 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2409 {
2410     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2411 
2412     if (!m34) {
2413         return DISAS_NORETURN;
2414     }
2415     gen_helper_fidb(o->out, cpu_env, o->in2, m34);
2416     return DISAS_NEXT;
2417 }
2418 
2419 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2420 {
2421     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2422 
2423     if (!m34) {
2424         return DISAS_NORETURN;
2425     }
2426     gen_helper_fixb(o->out_128, cpu_env, o->in2_128, m34);
2427     return DISAS_NEXT;
2428 }
2429 
2430 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2431 {
2432     /* We'll use the original input for cc computation, since we get to
2433        compare that against 0, which ought to be better than comparing
2434        the real output against 64.  It also lets cc_dst be a convenient
2435        temporary during our computation.  */
2436     gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2437 
2438     /* R1 = IN ? CLZ(IN) : 64.  */
2439     tcg_gen_clzi_i64(o->out, o->in2, 64);
2440 
2441     /* R1+1 = IN & ~(found bit).  Note that we may attempt to shift this
2442        value by 64, which is undefined.  But since the shift is 64 iff the
2443        input is zero, we still get the correct result after and'ing.  */
2444     tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2445     tcg_gen_shr_i64(o->out2, o->out2, o->out);
2446     tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2447     return DISAS_NEXT;
2448 }
2449 
2450 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2451 {
2452     int m3 = get_field(s, m3);
2453     int pos, len, base = s->insn->data;
2454     TCGv_i64 tmp = tcg_temp_new_i64();
2455     uint64_t ccm;
2456 
2457     switch (m3) {
2458     case 0xf:
2459         /* Effectively a 32-bit load.  */
2460         tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2461         len = 32;
2462         goto one_insert;
2463 
2464     case 0xc:
2465     case 0x6:
2466     case 0x3:
2467         /* Effectively a 16-bit load.  */
2468         tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2469         len = 16;
2470         goto one_insert;
2471 
2472     case 0x8:
2473     case 0x4:
2474     case 0x2:
2475     case 0x1:
2476         /* Effectively an 8-bit load.  */
2477         tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2478         len = 8;
2479         goto one_insert;
2480 
2481     one_insert:
2482         pos = base + ctz32(m3) * 8;
2483         tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2484         ccm = ((1ull << len) - 1) << pos;
2485         break;
2486 
2487     default:
2488         /* This is going to be a sequence of loads and inserts.  */
2489         pos = base + 32 - 8;
2490         ccm = 0;
2491         while (m3) {
2492             if (m3 & 0x8) {
2493                 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2494                 tcg_gen_addi_i64(o->in2, o->in2, 1);
2495                 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2496                 ccm |= 0xffull << pos;
2497             }
2498             m3 = (m3 << 1) & 0xf;
2499             pos -= 8;
2500         }
2501         break;
2502     }
2503 
2504     tcg_gen_movi_i64(tmp, ccm);
2505     gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2506     return DISAS_NEXT;
2507 }
2508 
2509 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2510 {
2511     int shift = s->insn->data & 0xff;
2512     int size = s->insn->data >> 8;
2513     tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2514     return DISAS_NEXT;
2515 }
2516 
2517 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2518 {
2519     TCGv_i64 t1, t2;
2520 
2521     gen_op_calc_cc(s);
2522     t1 = tcg_temp_new_i64();
2523     tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2524     t2 = tcg_temp_new_i64();
2525     tcg_gen_extu_i32_i64(t2, cc_op);
2526     tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2527     tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2528     return DISAS_NEXT;
2529 }
2530 
2531 #ifndef CONFIG_USER_ONLY
2532 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2533 {
2534     TCGv_i32 m4;
2535 
2536     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2537         m4 = tcg_constant_i32(get_field(s, m4));
2538     } else {
2539         m4 = tcg_constant_i32(0);
2540     }
2541     gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2542     return DISAS_NEXT;
2543 }
2544 
2545 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2546 {
2547     TCGv_i32 m4;
2548 
2549     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2550         m4 = tcg_constant_i32(get_field(s, m4));
2551     } else {
2552         m4 = tcg_constant_i32(0);
2553     }
2554     gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2555     return DISAS_NEXT;
2556 }
2557 
2558 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2559 {
2560     gen_helper_iske(o->out, cpu_env, o->in2);
2561     return DISAS_NEXT;
2562 }
2563 #endif
2564 
2565 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2566 {
2567     int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
2568     int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
2569     int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
2570     TCGv_i32 t_r1, t_r2, t_r3, type;
2571 
2572     switch (s->insn->data) {
2573     case S390_FEAT_TYPE_KMA:
2574         if (r3 == r1 || r3 == r2) {
2575             gen_program_exception(s, PGM_SPECIFICATION);
2576             return DISAS_NORETURN;
2577         }
2578         /* FALL THROUGH */
2579     case S390_FEAT_TYPE_KMCTR:
2580         if (r3 & 1 || !r3) {
2581             gen_program_exception(s, PGM_SPECIFICATION);
2582             return DISAS_NORETURN;
2583         }
2584         /* FALL THROUGH */
2585     case S390_FEAT_TYPE_PPNO:
2586     case S390_FEAT_TYPE_KMF:
2587     case S390_FEAT_TYPE_KMC:
2588     case S390_FEAT_TYPE_KMO:
2589     case S390_FEAT_TYPE_KM:
2590         if (r1 & 1 || !r1) {
2591             gen_program_exception(s, PGM_SPECIFICATION);
2592             return DISAS_NORETURN;
2593         }
2594         /* FALL THROUGH */
2595     case S390_FEAT_TYPE_KMAC:
2596     case S390_FEAT_TYPE_KIMD:
2597     case S390_FEAT_TYPE_KLMD:
2598         if (r2 & 1 || !r2) {
2599             gen_program_exception(s, PGM_SPECIFICATION);
2600             return DISAS_NORETURN;
2601         }
2602         /* FALL THROUGH */
2603     case S390_FEAT_TYPE_PCKMO:
2604     case S390_FEAT_TYPE_PCC:
2605         break;
2606     default:
2607         g_assert_not_reached();
2608     };
2609 
2610     t_r1 = tcg_constant_i32(r1);
2611     t_r2 = tcg_constant_i32(r2);
2612     t_r3 = tcg_constant_i32(r3);
2613     type = tcg_constant_i32(s->insn->data);
2614     gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2615     set_cc_static(s);
2616     return DISAS_NEXT;
2617 }
2618 
2619 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2620 {
2621     gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2622     set_cc_static(s);
2623     return DISAS_NEXT;
2624 }
2625 
2626 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2627 {
2628     gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2629     set_cc_static(s);
2630     return DISAS_NEXT;
2631 }
2632 
2633 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2634 {
2635     gen_helper_kxb(cc_op, cpu_env, o->in1_128, o->in2_128);
2636     set_cc_static(s);
2637     return DISAS_NEXT;
2638 }
2639 
2640 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2641 {
2642     /* The real output is indeed the original value in memory;
2643        recompute the addition for the computation of CC.  */
2644     tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2645                                  s->insn->data | MO_ALIGN);
2646     /* However, we need to recompute the addition for setting CC.  */
2647     tcg_gen_add_i64(o->out, o->in1, o->in2);
2648     return DISAS_NEXT;
2649 }
2650 
2651 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2652 {
2653     /* The real output is indeed the original value in memory;
2654        recompute the addition for the computation of CC.  */
2655     tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2656                                  s->insn->data | MO_ALIGN);
2657     /* However, we need to recompute the operation for setting CC.  */
2658     tcg_gen_and_i64(o->out, o->in1, o->in2);
2659     return DISAS_NEXT;
2660 }
2661 
2662 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2663 {
2664     /* The real output is indeed the original value in memory;
2665        recompute the addition for the computation of CC.  */
2666     tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2667                                 s->insn->data | MO_ALIGN);
2668     /* However, we need to recompute the operation for setting CC.  */
2669     tcg_gen_or_i64(o->out, o->in1, o->in2);
2670     return DISAS_NEXT;
2671 }
2672 
2673 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2674 {
2675     /* The real output is indeed the original value in memory;
2676        recompute the addition for the computation of CC.  */
2677     tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2678                                  s->insn->data | MO_ALIGN);
2679     /* However, we need to recompute the operation for setting CC.  */
2680     tcg_gen_xor_i64(o->out, o->in1, o->in2);
2681     return DISAS_NEXT;
2682 }
2683 
2684 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2685 {
2686     gen_helper_ldeb(o->out, cpu_env, o->in2);
2687     return DISAS_NEXT;
2688 }
2689 
2690 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2691 {
2692     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2693 
2694     if (!m34) {
2695         return DISAS_NORETURN;
2696     }
2697     gen_helper_ledb(o->out, cpu_env, o->in2, m34);
2698     return DISAS_NEXT;
2699 }
2700 
2701 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2702 {
2703     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2704 
2705     if (!m34) {
2706         return DISAS_NORETURN;
2707     }
2708     gen_helper_ldxb(o->out, cpu_env, o->in2_128, m34);
2709     return DISAS_NEXT;
2710 }
2711 
2712 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2713 {
2714     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2715 
2716     if (!m34) {
2717         return DISAS_NORETURN;
2718     }
2719     gen_helper_lexb(o->out, cpu_env, o->in2_128, m34);
2720     return DISAS_NEXT;
2721 }
2722 
2723 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2724 {
2725     gen_helper_lxdb(o->out_128, cpu_env, o->in2);
2726     return DISAS_NEXT;
2727 }
2728 
2729 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2730 {
2731     gen_helper_lxeb(o->out_128, cpu_env, o->in2);
2732     return DISAS_NEXT;
2733 }
2734 
2735 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2736 {
2737     tcg_gen_shli_i64(o->out, o->in2, 32);
2738     return DISAS_NEXT;
2739 }
2740 
2741 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2742 {
2743     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2744     return DISAS_NEXT;
2745 }
2746 
2747 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2748 {
2749     tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2750     return DISAS_NEXT;
2751 }
2752 
2753 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2754 {
2755     tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2756     return DISAS_NEXT;
2757 }
2758 
2759 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2760 {
2761     tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2762     return DISAS_NEXT;
2763 }
2764 
2765 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2766 {
2767     tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2768     return DISAS_NEXT;
2769 }
2770 
2771 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2772 {
2773     tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2774                        MO_TESL | s->insn->data);
2775     return DISAS_NEXT;
2776 }
2777 
2778 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2779 {
2780     tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2781                        MO_TEUL | s->insn->data);
2782     return DISAS_NEXT;
2783 }
2784 
2785 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2786 {
2787     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s),
2788                         MO_TEUQ | s->insn->data);
2789     return DISAS_NEXT;
2790 }
2791 
2792 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2793 {
2794     TCGLabel *lab = gen_new_label();
2795     store_reg32_i64(get_field(s, r1), o->in2);
2796     /* The value is stored even in case of trap. */
2797     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2798     gen_trap(s);
2799     gen_set_label(lab);
2800     return DISAS_NEXT;
2801 }
2802 
2803 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2804 {
2805     TCGLabel *lab = gen_new_label();
2806     tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2807     /* The value is stored even in case of trap. */
2808     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2809     gen_trap(s);
2810     gen_set_label(lab);
2811     return DISAS_NEXT;
2812 }
2813 
2814 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2815 {
2816     TCGLabel *lab = gen_new_label();
2817     store_reg32h_i64(get_field(s, r1), o->in2);
2818     /* The value is stored even in case of trap. */
2819     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2820     gen_trap(s);
2821     gen_set_label(lab);
2822     return DISAS_NEXT;
2823 }
2824 
2825 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2826 {
2827     TCGLabel *lab = gen_new_label();
2828     tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2829     /* The value is stored even in case of trap. */
2830     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2831     gen_trap(s);
2832     gen_set_label(lab);
2833     return DISAS_NEXT;
2834 }
2835 
2836 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2837 {
2838     TCGLabel *lab = gen_new_label();
2839     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2840     /* The value is stored even in case of trap. */
2841     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2842     gen_trap(s);
2843     gen_set_label(lab);
2844     return DISAS_NEXT;
2845 }
2846 
2847 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2848 {
2849     DisasCompare c;
2850 
2851     if (have_field(s, m3)) {
2852         /* LOAD * ON CONDITION */
2853         disas_jcc(s, &c, get_field(s, m3));
2854     } else {
2855         /* SELECT */
2856         disas_jcc(s, &c, get_field(s, m4));
2857     }
2858 
2859     if (c.is_64) {
2860         tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2861                             o->in2, o->in1);
2862     } else {
2863         TCGv_i32 t32 = tcg_temp_new_i32();
2864         TCGv_i64 t, z;
2865 
2866         tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2867 
2868         t = tcg_temp_new_i64();
2869         tcg_gen_extu_i32_i64(t, t32);
2870 
2871         z = tcg_constant_i64(0);
2872         tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2873     }
2874 
2875     return DISAS_NEXT;
2876 }
2877 
2878 #ifndef CONFIG_USER_ONLY
2879 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
2880 {
2881     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2882     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2883 
2884     gen_helper_lctl(cpu_env, r1, o->in2, r3);
2885     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2886     s->exit_to_mainloop = true;
2887     return DISAS_TOO_MANY;
2888 }
2889 
2890 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
2891 {
2892     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2893     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2894 
2895     gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2896     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2897     s->exit_to_mainloop = true;
2898     return DISAS_TOO_MANY;
2899 }
2900 
2901 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
2902 {
2903     gen_helper_lra(o->out, cpu_env, o->in2);
2904     set_cc_static(s);
2905     return DISAS_NEXT;
2906 }
2907 
2908 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
2909 {
2910     tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
2911     return DISAS_NEXT;
2912 }
2913 
2914 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
2915 {
2916     TCGv_i64 mask, addr;
2917 
2918     per_breaking_event(s);
2919 
2920     /*
2921      * Convert the short PSW into the normal PSW, similar to what
2922      * s390_cpu_load_normal() does.
2923      */
2924     mask = tcg_temp_new_i64();
2925     addr = tcg_temp_new_i64();
2926     tcg_gen_qemu_ld_i64(mask, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN_8);
2927     tcg_gen_andi_i64(addr, mask, PSW_MASK_SHORT_ADDR);
2928     tcg_gen_andi_i64(mask, mask, PSW_MASK_SHORT_CTRL);
2929     tcg_gen_xori_i64(mask, mask, PSW_MASK_SHORTPSW);
2930     gen_helper_load_psw(cpu_env, mask, addr);
2931     return DISAS_NORETURN;
2932 }
2933 
2934 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
2935 {
2936     TCGv_i64 t1, t2;
2937 
2938     per_breaking_event(s);
2939 
2940     t1 = tcg_temp_new_i64();
2941     t2 = tcg_temp_new_i64();
2942     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
2943                         MO_TEUQ | MO_ALIGN_8);
2944     tcg_gen_addi_i64(o->in2, o->in2, 8);
2945     tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2946     gen_helper_load_psw(cpu_env, t1, t2);
2947     return DISAS_NORETURN;
2948 }
2949 #endif
2950 
2951 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
2952 {
2953     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2954     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2955 
2956     gen_helper_lam(cpu_env, r1, o->in2, r3);
2957     return DISAS_NEXT;
2958 }
2959 
2960 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
2961 {
2962     int r1 = get_field(s, r1);
2963     int r3 = get_field(s, r3);
2964     TCGv_i64 t1, t2;
2965 
2966     /* Only one register to read. */
2967     t1 = tcg_temp_new_i64();
2968     if (unlikely(r1 == r3)) {
2969         tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2970         store_reg32_i64(r1, t1);
2971         return DISAS_NEXT;
2972     }
2973 
2974     /* First load the values of the first and last registers to trigger
2975        possible page faults. */
2976     t2 = tcg_temp_new_i64();
2977     tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2978     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2979     tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2980     store_reg32_i64(r1, t1);
2981     store_reg32_i64(r3, t2);
2982 
2983     /* Only two registers to read. */
2984     if (((r1 + 1) & 15) == r3) {
2985         return DISAS_NEXT;
2986     }
2987 
2988     /* Then load the remaining registers. Page fault can't occur. */
2989     r3 = (r3 - 1) & 15;
2990     tcg_gen_movi_i64(t2, 4);
2991     while (r1 != r3) {
2992         r1 = (r1 + 1) & 15;
2993         tcg_gen_add_i64(o->in2, o->in2, t2);
2994         tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2995         store_reg32_i64(r1, t1);
2996     }
2997     return DISAS_NEXT;
2998 }
2999 
3000 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3001 {
3002     int r1 = get_field(s, r1);
3003     int r3 = get_field(s, r3);
3004     TCGv_i64 t1, t2;
3005 
3006     /* Only one register to read. */
3007     t1 = tcg_temp_new_i64();
3008     if (unlikely(r1 == r3)) {
3009         tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3010         store_reg32h_i64(r1, t1);
3011         return DISAS_NEXT;
3012     }
3013 
3014     /* First load the values of the first and last registers to trigger
3015        possible page faults. */
3016     t2 = tcg_temp_new_i64();
3017     tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3018     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3019     tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3020     store_reg32h_i64(r1, t1);
3021     store_reg32h_i64(r3, t2);
3022 
3023     /* Only two registers to read. */
3024     if (((r1 + 1) & 15) == r3) {
3025         return DISAS_NEXT;
3026     }
3027 
3028     /* Then load the remaining registers. Page fault can't occur. */
3029     r3 = (r3 - 1) & 15;
3030     tcg_gen_movi_i64(t2, 4);
3031     while (r1 != r3) {
3032         r1 = (r1 + 1) & 15;
3033         tcg_gen_add_i64(o->in2, o->in2, t2);
3034         tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3035         store_reg32h_i64(r1, t1);
3036     }
3037     return DISAS_NEXT;
3038 }
3039 
3040 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3041 {
3042     int r1 = get_field(s, r1);
3043     int r3 = get_field(s, r3);
3044     TCGv_i64 t1, t2;
3045 
3046     /* Only one register to read. */
3047     if (unlikely(r1 == r3)) {
3048         tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3049         return DISAS_NEXT;
3050     }
3051 
3052     /* First load the values of the first and last registers to trigger
3053        possible page faults. */
3054     t1 = tcg_temp_new_i64();
3055     t2 = tcg_temp_new_i64();
3056     tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
3057     tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3058     tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
3059     tcg_gen_mov_i64(regs[r1], t1);
3060 
3061     /* Only two registers to read. */
3062     if (((r1 + 1) & 15) == r3) {
3063         return DISAS_NEXT;
3064     }
3065 
3066     /* Then load the remaining registers. Page fault can't occur. */
3067     r3 = (r3 - 1) & 15;
3068     tcg_gen_movi_i64(t1, 8);
3069     while (r1 != r3) {
3070         r1 = (r1 + 1) & 15;
3071         tcg_gen_add_i64(o->in2, o->in2, t1);
3072         tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3073     }
3074     return DISAS_NEXT;
3075 }
3076 
3077 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3078 {
3079     TCGv_i64 a1, a2;
3080     MemOp mop = s->insn->data;
3081 
3082     /* In a parallel context, stop the world and single step.  */
3083     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3084         update_psw_addr(s);
3085         update_cc_op(s);
3086         gen_exception(EXCP_ATOMIC);
3087         return DISAS_NORETURN;
3088     }
3089 
3090     /* In a serial context, perform the two loads ... */
3091     a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
3092     a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3093     tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3094     tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3095 
3096     /* ... and indicate that we performed them while interlocked.  */
3097     gen_op_movi_cc(s, 0);
3098     return DISAS_NEXT;
3099 }
3100 
3101 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3102 {
3103     if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
3104         gen_helper_lpq(o->out, cpu_env, o->in2);
3105     } else if (HAVE_ATOMIC128) {
3106         gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
3107     } else {
3108         gen_helper_exit_atomic(cpu_env);
3109         return DISAS_NORETURN;
3110     }
3111     return_low128(o->out2);
3112     return DISAS_NEXT;
3113 }
3114 
3115 #ifndef CONFIG_USER_ONLY
3116 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3117 {
3118     tcg_gen_qemu_ld_tl(o->out, o->in2, MMU_REAL_IDX, s->insn->data);
3119     return DISAS_NEXT;
3120 }
3121 #endif
3122 
3123 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3124 {
3125     tcg_gen_andi_i64(o->out, o->in2, -256);
3126     return DISAS_NEXT;
3127 }
3128 
3129 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3130 {
3131     const int64_t block_size = (1ull << (get_field(s, m3) + 6));
3132 
3133     if (get_field(s, m3) > 6) {
3134         gen_program_exception(s, PGM_SPECIFICATION);
3135         return DISAS_NORETURN;
3136     }
3137 
3138     tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3139     tcg_gen_neg_i64(o->addr1, o->addr1);
3140     tcg_gen_movi_i64(o->out, 16);
3141     tcg_gen_umin_i64(o->out, o->out, o->addr1);
3142     gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3143     return DISAS_NEXT;
3144 }
3145 
3146 static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
3147 {
3148     const uint16_t monitor_class = get_field(s, i2);
3149 
3150     if (monitor_class & 0xff00) {
3151         gen_program_exception(s, PGM_SPECIFICATION);
3152         return DISAS_NORETURN;
3153     }
3154 
3155 #if !defined(CONFIG_USER_ONLY)
3156     gen_helper_monitor_call(cpu_env, o->addr1,
3157                             tcg_constant_i32(monitor_class));
3158 #endif
3159     /* Defaults to a NOP. */
3160     return DISAS_NEXT;
3161 }
3162 
3163 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3164 {
3165     o->out = o->in2;
3166     o->in2 = NULL;
3167     return DISAS_NEXT;
3168 }
3169 
3170 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3171 {
3172     int b2 = get_field(s, b2);
3173     TCGv ar1 = tcg_temp_new_i64();
3174 
3175     o->out = o->in2;
3176     o->in2 = NULL;
3177 
3178     switch (s->base.tb->flags & FLAG_MASK_ASC) {
3179     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3180         tcg_gen_movi_i64(ar1, 0);
3181         break;
3182     case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3183         tcg_gen_movi_i64(ar1, 1);
3184         break;
3185     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3186         if (b2) {
3187             tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3188         } else {
3189             tcg_gen_movi_i64(ar1, 0);
3190         }
3191         break;
3192     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3193         tcg_gen_movi_i64(ar1, 2);
3194         break;
3195     }
3196 
3197     tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3198     return DISAS_NEXT;
3199 }
3200 
3201 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3202 {
3203     o->out = o->in1;
3204     o->out2 = o->in2;
3205     o->in1 = NULL;
3206     o->in2 = NULL;
3207     return DISAS_NEXT;
3208 }
3209 
3210 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3211 {
3212     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3213 
3214     gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3215     return DISAS_NEXT;
3216 }
3217 
3218 static DisasJumpType op_mvcrl(DisasContext *s, DisasOps *o)
3219 {
3220     gen_helper_mvcrl(cpu_env, regs[0], o->addr1, o->in2);
3221     return DISAS_NEXT;
3222 }
3223 
3224 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3225 {
3226     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3227 
3228     gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3229     return DISAS_NEXT;
3230 }
3231 
3232 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3233 {
3234     int r1 = get_field(s, r1);
3235     int r2 = get_field(s, r2);
3236     TCGv_i32 t1, t2;
3237 
3238     /* r1 and r2 must be even.  */
3239     if (r1 & 1 || r2 & 1) {
3240         gen_program_exception(s, PGM_SPECIFICATION);
3241         return DISAS_NORETURN;
3242     }
3243 
3244     t1 = tcg_constant_i32(r1);
3245     t2 = tcg_constant_i32(r2);
3246     gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3247     set_cc_static(s);
3248     return DISAS_NEXT;
3249 }
3250 
3251 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3252 {
3253     int r1 = get_field(s, r1);
3254     int r3 = get_field(s, r3);
3255     TCGv_i32 t1, t3;
3256 
3257     /* r1 and r3 must be even.  */
3258     if (r1 & 1 || r3 & 1) {
3259         gen_program_exception(s, PGM_SPECIFICATION);
3260         return DISAS_NORETURN;
3261     }
3262 
3263     t1 = tcg_constant_i32(r1);
3264     t3 = tcg_constant_i32(r3);
3265     gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3266     set_cc_static(s);
3267     return DISAS_NEXT;
3268 }
3269 
3270 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3271 {
3272     int r1 = get_field(s, r1);
3273     int r3 = get_field(s, r3);
3274     TCGv_i32 t1, t3;
3275 
3276     /* r1 and r3 must be even.  */
3277     if (r1 & 1 || r3 & 1) {
3278         gen_program_exception(s, PGM_SPECIFICATION);
3279         return DISAS_NORETURN;
3280     }
3281 
3282     t1 = tcg_constant_i32(r1);
3283     t3 = tcg_constant_i32(r3);
3284     gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3285     set_cc_static(s);
3286     return DISAS_NEXT;
3287 }
3288 
3289 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3290 {
3291     int r3 = get_field(s, r3);
3292     gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3293     set_cc_static(s);
3294     return DISAS_NEXT;
3295 }
3296 
3297 #ifndef CONFIG_USER_ONLY
3298 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3299 {
3300     int r1 = get_field(s, l1);
3301     int r3 = get_field(s, r3);
3302     gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2, regs[r3]);
3303     set_cc_static(s);
3304     return DISAS_NEXT;
3305 }
3306 
3307 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3308 {
3309     int r1 = get_field(s, l1);
3310     int r3 = get_field(s, r3);
3311     gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2, regs[r3]);
3312     set_cc_static(s);
3313     return DISAS_NEXT;
3314 }
3315 #endif
3316 
3317 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3318 {
3319     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3320 
3321     gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3322     return DISAS_NEXT;
3323 }
3324 
3325 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3326 {
3327     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3328 
3329     gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3330     return DISAS_NEXT;
3331 }
3332 
3333 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3334 {
3335     TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3336     TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3337 
3338     gen_helper_mvpg(cc_op, cpu_env, regs[0], t1, t2);
3339     set_cc_static(s);
3340     return DISAS_NEXT;
3341 }
3342 
3343 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3344 {
3345     TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3346     TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3347 
3348     gen_helper_mvst(cc_op, cpu_env, t1, t2);
3349     set_cc_static(s);
3350     return DISAS_NEXT;
3351 }
3352 
3353 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3354 {
3355     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3356 
3357     gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3358     return DISAS_NEXT;
3359 }
3360 
3361 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3362 {
3363     tcg_gen_mul_i64(o->out, o->in1, o->in2);
3364     return DISAS_NEXT;
3365 }
3366 
3367 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3368 {
3369     tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3370     return DISAS_NEXT;
3371 }
3372 
3373 static DisasJumpType op_muls128(DisasContext *s, DisasOps *o)
3374 {
3375     tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2);
3376     return DISAS_NEXT;
3377 }
3378 
3379 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3380 {
3381     gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3382     return DISAS_NEXT;
3383 }
3384 
3385 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3386 {
3387     gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3388     return DISAS_NEXT;
3389 }
3390 
3391 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3392 {
3393     gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3394     return DISAS_NEXT;
3395 }
3396 
3397 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3398 {
3399     gen_helper_mxb(o->out_128, cpu_env, o->in1_128, o->in2_128);
3400     return DISAS_NEXT;
3401 }
3402 
3403 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3404 {
3405     gen_helper_mxdb(o->out_128, cpu_env, o->in1_128, o->in2);
3406     return DISAS_NEXT;
3407 }
3408 
3409 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3410 {
3411     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3412     gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3413     return DISAS_NEXT;
3414 }
3415 
3416 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3417 {
3418     TCGv_i64 r3 = load_freg(get_field(s, r3));
3419     gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
3420     return DISAS_NEXT;
3421 }
3422 
3423 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3424 {
3425     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3426     gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3427     return DISAS_NEXT;
3428 }
3429 
3430 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3431 {
3432     TCGv_i64 r3 = load_freg(get_field(s, r3));
3433     gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
3434     return DISAS_NEXT;
3435 }
3436 
3437 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3438 {
3439     TCGv_i64 z = tcg_constant_i64(0);
3440     TCGv_i64 n = tcg_temp_new_i64();
3441 
3442     tcg_gen_neg_i64(n, o->in2);
3443     tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3444     return DISAS_NEXT;
3445 }
3446 
3447 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3448 {
3449     tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3450     return DISAS_NEXT;
3451 }
3452 
3453 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3454 {
3455     tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3456     return DISAS_NEXT;
3457 }
3458 
3459 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3460 {
3461     tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3462     tcg_gen_mov_i64(o->out2, o->in2);
3463     return DISAS_NEXT;
3464 }
3465 
3466 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3467 {
3468     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3469 
3470     gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3471     set_cc_static(s);
3472     return DISAS_NEXT;
3473 }
3474 
3475 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3476 {
3477     tcg_gen_neg_i64(o->out, o->in2);
3478     return DISAS_NEXT;
3479 }
3480 
3481 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3482 {
3483     tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3484     return DISAS_NEXT;
3485 }
3486 
3487 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3488 {
3489     tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3490     return DISAS_NEXT;
3491 }
3492 
3493 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3494 {
3495     tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3496     tcg_gen_mov_i64(o->out2, o->in2);
3497     return DISAS_NEXT;
3498 }
3499 
3500 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3501 {
3502     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3503 
3504     gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3505     set_cc_static(s);
3506     return DISAS_NEXT;
3507 }
3508 
3509 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3510 {
3511     tcg_gen_or_i64(o->out, o->in1, o->in2);
3512     return DISAS_NEXT;
3513 }
3514 
3515 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3516 {
3517     int shift = s->insn->data & 0xff;
3518     int size = s->insn->data >> 8;
3519     uint64_t mask = ((1ull << size) - 1) << shift;
3520     TCGv_i64 t = tcg_temp_new_i64();
3521 
3522     tcg_gen_shli_i64(t, o->in2, shift);
3523     tcg_gen_or_i64(o->out, o->in1, t);
3524 
3525     /* Produce the CC from only the bits manipulated.  */
3526     tcg_gen_andi_i64(cc_dst, o->out, mask);
3527     set_cc_nz_u64(s, cc_dst);
3528     return DISAS_NEXT;
3529 }
3530 
3531 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3532 {
3533     o->in1 = tcg_temp_new_i64();
3534 
3535     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3536         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3537     } else {
3538         /* Perform the atomic operation in memory. */
3539         tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3540                                     s->insn->data);
3541     }
3542 
3543     /* Recompute also for atomic case: needed for setting CC. */
3544     tcg_gen_or_i64(o->out, o->in1, o->in2);
3545 
3546     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3547         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3548     }
3549     return DISAS_NEXT;
3550 }
3551 
3552 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3553 {
3554     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3555 
3556     gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3557     return DISAS_NEXT;
3558 }
3559 
3560 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3561 {
3562     int l2 = get_field(s, l2) + 1;
3563     TCGv_i32 l;
3564 
3565     /* The length must not exceed 32 bytes.  */
3566     if (l2 > 32) {
3567         gen_program_exception(s, PGM_SPECIFICATION);
3568         return DISAS_NORETURN;
3569     }
3570     l = tcg_constant_i32(l2);
3571     gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3572     return DISAS_NEXT;
3573 }
3574 
3575 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3576 {
3577     int l2 = get_field(s, l2) + 1;
3578     TCGv_i32 l;
3579 
3580     /* The length must be even and should not exceed 64 bytes.  */
3581     if ((l2 & 1) || (l2 > 64)) {
3582         gen_program_exception(s, PGM_SPECIFICATION);
3583         return DISAS_NORETURN;
3584     }
3585     l = tcg_constant_i32(l2);
3586     gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3587     return DISAS_NEXT;
3588 }
3589 
3590 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3591 {
3592     const uint8_t m3 = get_field(s, m3);
3593 
3594     if ((m3 & 8) && s390_has_feat(S390_FEAT_MISC_INSTRUCTION_EXT3)) {
3595         tcg_gen_ctpop_i64(o->out, o->in2);
3596     } else {
3597         gen_helper_popcnt(o->out, o->in2);
3598     }
3599     return DISAS_NEXT;
3600 }
3601 
3602 #ifndef CONFIG_USER_ONLY
3603 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3604 {
3605     gen_helper_ptlb(cpu_env);
3606     return DISAS_NEXT;
3607 }
3608 #endif
3609 
3610 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3611 {
3612     int i3 = get_field(s, i3);
3613     int i4 = get_field(s, i4);
3614     int i5 = get_field(s, i5);
3615     int do_zero = i4 & 0x80;
3616     uint64_t mask, imask, pmask;
3617     int pos, len, rot;
3618 
3619     /* Adjust the arguments for the specific insn.  */
3620     switch (s->fields.op2) {
3621     case 0x55: /* risbg */
3622     case 0x59: /* risbgn */
3623         i3 &= 63;
3624         i4 &= 63;
3625         pmask = ~0;
3626         break;
3627     case 0x5d: /* risbhg */
3628         i3 &= 31;
3629         i4 &= 31;
3630         pmask = 0xffffffff00000000ull;
3631         break;
3632     case 0x51: /* risblg */
3633         i3 = (i3 & 31) + 32;
3634         i4 = (i4 & 31) + 32;
3635         pmask = 0x00000000ffffffffull;
3636         break;
3637     default:
3638         g_assert_not_reached();
3639     }
3640 
3641     /* MASK is the set of bits to be inserted from R2. */
3642     if (i3 <= i4) {
3643         /* [0...i3---i4...63] */
3644         mask = (-1ull >> i3) & (-1ull << (63 - i4));
3645     } else {
3646         /* [0---i4...i3---63] */
3647         mask = (-1ull >> i3) | (-1ull << (63 - i4));
3648     }
3649     /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
3650     mask &= pmask;
3651 
3652     /* IMASK is the set of bits to be kept from R1.  In the case of the high/low
3653        insns, we need to keep the other half of the register.  */
3654     imask = ~mask | ~pmask;
3655     if (do_zero) {
3656         imask = ~pmask;
3657     }
3658 
3659     len = i4 - i3 + 1;
3660     pos = 63 - i4;
3661     rot = i5 & 63;
3662 
3663     /* In some cases we can implement this with extract.  */
3664     if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3665         tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3666         return DISAS_NEXT;
3667     }
3668 
3669     /* In some cases we can implement this with deposit.  */
3670     if (len > 0 && (imask == 0 || ~mask == imask)) {
3671         /* Note that we rotate the bits to be inserted to the lsb, not to
3672            the position as described in the PoO.  */
3673         rot = (rot - pos) & 63;
3674     } else {
3675         pos = -1;
3676     }
3677 
3678     /* Rotate the input as necessary.  */
3679     tcg_gen_rotli_i64(o->in2, o->in2, rot);
3680 
3681     /* Insert the selected bits into the output.  */
3682     if (pos >= 0) {
3683         if (imask == 0) {
3684             tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3685         } else {
3686             tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3687         }
3688     } else if (imask == 0) {
3689         tcg_gen_andi_i64(o->out, o->in2, mask);
3690     } else {
3691         tcg_gen_andi_i64(o->in2, o->in2, mask);
3692         tcg_gen_andi_i64(o->out, o->out, imask);
3693         tcg_gen_or_i64(o->out, o->out, o->in2);
3694     }
3695     return DISAS_NEXT;
3696 }
3697 
3698 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3699 {
3700     int i3 = get_field(s, i3);
3701     int i4 = get_field(s, i4);
3702     int i5 = get_field(s, i5);
3703     TCGv_i64 orig_out;
3704     uint64_t mask;
3705 
3706     /* If this is a test-only form, arrange to discard the result.  */
3707     if (i3 & 0x80) {
3708         tcg_debug_assert(o->out != NULL);
3709         orig_out = o->out;
3710         o->out = tcg_temp_new_i64();
3711         tcg_gen_mov_i64(o->out, orig_out);
3712     }
3713 
3714     i3 &= 63;
3715     i4 &= 63;
3716     i5 &= 63;
3717 
3718     /* MASK is the set of bits to be operated on from R2.
3719        Take care for I3/I4 wraparound.  */
3720     mask = ~0ull >> i3;
3721     if (i3 <= i4) {
3722         mask ^= ~0ull >> i4 >> 1;
3723     } else {
3724         mask |= ~(~0ull >> i4 >> 1);
3725     }
3726 
3727     /* Rotate the input as necessary.  */
3728     tcg_gen_rotli_i64(o->in2, o->in2, i5);
3729 
3730     /* Operate.  */
3731     switch (s->fields.op2) {
3732     case 0x54: /* AND */
3733         tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3734         tcg_gen_and_i64(o->out, o->out, o->in2);
3735         break;
3736     case 0x56: /* OR */
3737         tcg_gen_andi_i64(o->in2, o->in2, mask);
3738         tcg_gen_or_i64(o->out, o->out, o->in2);
3739         break;
3740     case 0x57: /* XOR */
3741         tcg_gen_andi_i64(o->in2, o->in2, mask);
3742         tcg_gen_xor_i64(o->out, o->out, o->in2);
3743         break;
3744     default:
3745         abort();
3746     }
3747 
3748     /* Set the CC.  */
3749     tcg_gen_andi_i64(cc_dst, o->out, mask);
3750     set_cc_nz_u64(s, cc_dst);
3751     return DISAS_NEXT;
3752 }
3753 
3754 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3755 {
3756     tcg_gen_bswap16_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3757     return DISAS_NEXT;
3758 }
3759 
3760 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3761 {
3762     tcg_gen_bswap32_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3763     return DISAS_NEXT;
3764 }
3765 
3766 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3767 {
3768     tcg_gen_bswap64_i64(o->out, o->in2);
3769     return DISAS_NEXT;
3770 }
3771 
3772 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3773 {
3774     TCGv_i32 t1 = tcg_temp_new_i32();
3775     TCGv_i32 t2 = tcg_temp_new_i32();
3776     TCGv_i32 to = tcg_temp_new_i32();
3777     tcg_gen_extrl_i64_i32(t1, o->in1);
3778     tcg_gen_extrl_i64_i32(t2, o->in2);
3779     tcg_gen_rotl_i32(to, t1, t2);
3780     tcg_gen_extu_i32_i64(o->out, to);
3781     return DISAS_NEXT;
3782 }
3783 
3784 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3785 {
3786     tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3787     return DISAS_NEXT;
3788 }
3789 
3790 #ifndef CONFIG_USER_ONLY
3791 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3792 {
3793     gen_helper_rrbe(cc_op, cpu_env, o->in2);
3794     set_cc_static(s);
3795     return DISAS_NEXT;
3796 }
3797 
3798 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3799 {
3800     gen_helper_sacf(cpu_env, o->in2);
3801     /* Addressing mode has changed, so end the block.  */
3802     return DISAS_TOO_MANY;
3803 }
3804 #endif
3805 
3806 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3807 {
3808     int sam = s->insn->data;
3809     TCGv_i64 tsam;
3810     uint64_t mask;
3811 
3812     switch (sam) {
3813     case 0:
3814         mask = 0xffffff;
3815         break;
3816     case 1:
3817         mask = 0x7fffffff;
3818         break;
3819     default:
3820         mask = -1;
3821         break;
3822     }
3823 
3824     /* Bizarre but true, we check the address of the current insn for the
3825        specification exception, not the next to be executed.  Thus the PoO
3826        documents that Bad Things Happen two bytes before the end.  */
3827     if (s->base.pc_next & ~mask) {
3828         gen_program_exception(s, PGM_SPECIFICATION);
3829         return DISAS_NORETURN;
3830     }
3831     s->pc_tmp &= mask;
3832 
3833     tsam = tcg_constant_i64(sam);
3834     tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3835 
3836     /* Always exit the TB, since we (may have) changed execution mode.  */
3837     return DISAS_TOO_MANY;
3838 }
3839 
3840 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
3841 {
3842     int r1 = get_field(s, r1);
3843     tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3844     return DISAS_NEXT;
3845 }
3846 
3847 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
3848 {
3849     gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3850     return DISAS_NEXT;
3851 }
3852 
3853 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
3854 {
3855     gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3856     return DISAS_NEXT;
3857 }
3858 
3859 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
3860 {
3861     gen_helper_sxb(o->out_128, cpu_env, o->in1_128, o->in2_128);
3862     return DISAS_NEXT;
3863 }
3864 
3865 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
3866 {
3867     gen_helper_sqeb(o->out, cpu_env, o->in2);
3868     return DISAS_NEXT;
3869 }
3870 
3871 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
3872 {
3873     gen_helper_sqdb(o->out, cpu_env, o->in2);
3874     return DISAS_NEXT;
3875 }
3876 
3877 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
3878 {
3879     gen_helper_sqxb(o->out_128, cpu_env, o->in2_128);
3880     return DISAS_NEXT;
3881 }
3882 
3883 #ifndef CONFIG_USER_ONLY
3884 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
3885 {
3886     gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3887     set_cc_static(s);
3888     return DISAS_NEXT;
3889 }
3890 
3891 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
3892 {
3893     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
3894     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
3895 
3896     gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
3897     set_cc_static(s);
3898     return DISAS_NEXT;
3899 }
3900 #endif
3901 
3902 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
3903 {
3904     DisasCompare c;
3905     TCGv_i64 a, h;
3906     TCGLabel *lab;
3907     int r1;
3908 
3909     disas_jcc(s, &c, get_field(s, m3));
3910 
3911     /* We want to store when the condition is fulfilled, so branch
3912        out when it's not */
3913     c.cond = tcg_invert_cond(c.cond);
3914 
3915     lab = gen_new_label();
3916     if (c.is_64) {
3917         tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3918     } else {
3919         tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3920     }
3921 
3922     r1 = get_field(s, r1);
3923     a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3924     switch (s->insn->data) {
3925     case 1: /* STOCG */
3926         tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3927         break;
3928     case 0: /* STOC */
3929         tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3930         break;
3931     case 2: /* STOCFH */
3932         h = tcg_temp_new_i64();
3933         tcg_gen_shri_i64(h, regs[r1], 32);
3934         tcg_gen_qemu_st32(h, a, get_mem_index(s));
3935         break;
3936     default:
3937         g_assert_not_reached();
3938     }
3939 
3940     gen_set_label(lab);
3941     return DISAS_NEXT;
3942 }
3943 
3944 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
3945 {
3946     TCGv_i64 t;
3947     uint64_t sign = 1ull << s->insn->data;
3948     if (s->insn->data == 31) {
3949         t = tcg_temp_new_i64();
3950         tcg_gen_shli_i64(t, o->in1, 32);
3951     } else {
3952         t = o->in1;
3953     }
3954     gen_op_update2_cc_i64(s, CC_OP_SLA, t, o->in2);
3955     tcg_gen_shl_i64(o->out, o->in1, o->in2);
3956     /* The arithmetic left shift is curious in that it does not affect
3957        the sign bit.  Copy that over from the source unchanged.  */
3958     tcg_gen_andi_i64(o->out, o->out, ~sign);
3959     tcg_gen_andi_i64(o->in1, o->in1, sign);
3960     tcg_gen_or_i64(o->out, o->out, o->in1);
3961     return DISAS_NEXT;
3962 }
3963 
3964 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
3965 {
3966     tcg_gen_shl_i64(o->out, o->in1, o->in2);
3967     return DISAS_NEXT;
3968 }
3969 
3970 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
3971 {
3972     tcg_gen_sar_i64(o->out, o->in1, o->in2);
3973     return DISAS_NEXT;
3974 }
3975 
3976 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
3977 {
3978     tcg_gen_shr_i64(o->out, o->in1, o->in2);
3979     return DISAS_NEXT;
3980 }
3981 
3982 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
3983 {
3984     gen_helper_sfpc(cpu_env, o->in2);
3985     return DISAS_NEXT;
3986 }
3987 
3988 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
3989 {
3990     gen_helper_sfas(cpu_env, o->in2);
3991     return DISAS_NEXT;
3992 }
3993 
3994 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
3995 {
3996     /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
3997     tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
3998     gen_helper_srnm(cpu_env, o->addr1);
3999     return DISAS_NEXT;
4000 }
4001 
4002 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4003 {
4004     /* Bits 0-55 are are ignored. */
4005     tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4006     gen_helper_srnm(cpu_env, o->addr1);
4007     return DISAS_NEXT;
4008 }
4009 
4010 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4011 {
4012     TCGv_i64 tmp = tcg_temp_new_i64();
4013 
4014     /* Bits other than 61-63 are ignored. */
4015     tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4016 
4017     /* No need to call a helper, we don't implement dfp */
4018     tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4019     tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4020     tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4021     return DISAS_NEXT;
4022 }
4023 
4024 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4025 {
4026     tcg_gen_extrl_i64_i32(cc_op, o->in1);
4027     tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4028     set_cc_static(s);
4029 
4030     tcg_gen_shri_i64(o->in1, o->in1, 24);
4031     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4032     return DISAS_NEXT;
4033 }
4034 
4035 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4036 {
4037     int b1 = get_field(s, b1);
4038     int d1 = get_field(s, d1);
4039     int b2 = get_field(s, b2);
4040     int d2 = get_field(s, d2);
4041     int r3 = get_field(s, r3);
4042     TCGv_i64 tmp = tcg_temp_new_i64();
4043 
4044     /* fetch all operands first */
4045     o->in1 = tcg_temp_new_i64();
4046     tcg_gen_addi_i64(o->in1, regs[b1], d1);
4047     o->in2 = tcg_temp_new_i64();
4048     tcg_gen_addi_i64(o->in2, regs[b2], d2);
4049     o->addr1 = tcg_temp_new_i64();
4050     gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0);
4051 
4052     /* load the third operand into r3 before modifying anything */
4053     tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
4054 
4055     /* subtract CPU timer from first operand and store in GR0 */
4056     gen_helper_stpt(tmp, cpu_env);
4057     tcg_gen_sub_i64(regs[0], o->in1, tmp);
4058 
4059     /* store second operand in GR1 */
4060     tcg_gen_mov_i64(regs[1], o->in2);
4061     return DISAS_NEXT;
4062 }
4063 
4064 #ifndef CONFIG_USER_ONLY
4065 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4066 {
4067     tcg_gen_shri_i64(o->in2, o->in2, 4);
4068     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4069     return DISAS_NEXT;
4070 }
4071 
4072 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4073 {
4074     gen_helper_sske(cpu_env, o->in1, o->in2);
4075     return DISAS_NEXT;
4076 }
4077 
4078 static void gen_check_psw_mask(DisasContext *s)
4079 {
4080     TCGv_i64 reserved = tcg_temp_new_i64();
4081     TCGLabel *ok = gen_new_label();
4082 
4083     tcg_gen_andi_i64(reserved, psw_mask, PSW_MASK_RESERVED);
4084     tcg_gen_brcondi_i64(TCG_COND_EQ, reserved, 0, ok);
4085     gen_program_exception(s, PGM_SPECIFICATION);
4086     gen_set_label(ok);
4087 }
4088 
4089 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4090 {
4091     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4092 
4093     gen_check_psw_mask(s);
4094 
4095     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4096     s->exit_to_mainloop = true;
4097     return DISAS_TOO_MANY;
4098 }
4099 
4100 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4101 {
4102     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
4103     return DISAS_NEXT;
4104 }
4105 #endif
4106 
4107 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4108 {
4109     gen_helper_stck(o->out, cpu_env);
4110     /* ??? We don't implement clock states.  */
4111     gen_op_movi_cc(s, 0);
4112     return DISAS_NEXT;
4113 }
4114 
4115 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4116 {
4117     TCGv_i64 c1 = tcg_temp_new_i64();
4118     TCGv_i64 c2 = tcg_temp_new_i64();
4119     TCGv_i64 todpr = tcg_temp_new_i64();
4120     gen_helper_stck(c1, cpu_env);
4121     /* 16 bit value store in an uint32_t (only valid bits set) */
4122     tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
4123     /* Shift the 64-bit value into its place as a zero-extended
4124        104-bit value.  Note that "bit positions 64-103 are always
4125        non-zero so that they compare differently to STCK"; we set
4126        the least significant bit to 1.  */
4127     tcg_gen_shli_i64(c2, c1, 56);
4128     tcg_gen_shri_i64(c1, c1, 8);
4129     tcg_gen_ori_i64(c2, c2, 0x10000);
4130     tcg_gen_or_i64(c2, c2, todpr);
4131     tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
4132     tcg_gen_addi_i64(o->in2, o->in2, 8);
4133     tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
4134     /* ??? We don't implement clock states.  */
4135     gen_op_movi_cc(s, 0);
4136     return DISAS_NEXT;
4137 }
4138 
4139 #ifndef CONFIG_USER_ONLY
4140 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4141 {
4142     gen_helper_sck(cc_op, cpu_env, o->in2);
4143     set_cc_static(s);
4144     return DISAS_NEXT;
4145 }
4146 
4147 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4148 {
4149     gen_helper_sckc(cpu_env, o->in2);
4150     return DISAS_NEXT;
4151 }
4152 
4153 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4154 {
4155     gen_helper_sckpf(cpu_env, regs[0]);
4156     return DISAS_NEXT;
4157 }
4158 
4159 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4160 {
4161     gen_helper_stckc(o->out, cpu_env);
4162     return DISAS_NEXT;
4163 }
4164 
4165 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4166 {
4167     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4168     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4169 
4170     gen_helper_stctg(cpu_env, r1, o->in2, r3);
4171     return DISAS_NEXT;
4172 }
4173 
4174 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4175 {
4176     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4177     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4178 
4179     gen_helper_stctl(cpu_env, r1, o->in2, r3);
4180     return DISAS_NEXT;
4181 }
4182 
4183 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4184 {
4185     tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4186     return DISAS_NEXT;
4187 }
4188 
4189 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4190 {
4191     gen_helper_spt(cpu_env, o->in2);
4192     return DISAS_NEXT;
4193 }
4194 
4195 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4196 {
4197     gen_helper_stfl(cpu_env);
4198     return DISAS_NEXT;
4199 }
4200 
4201 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4202 {
4203     gen_helper_stpt(o->out, cpu_env);
4204     return DISAS_NEXT;
4205 }
4206 
4207 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4208 {
4209     gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4210     set_cc_static(s);
4211     return DISAS_NEXT;
4212 }
4213 
4214 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4215 {
4216     gen_helper_spx(cpu_env, o->in2);
4217     return DISAS_NEXT;
4218 }
4219 
4220 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4221 {
4222     gen_helper_xsch(cpu_env, regs[1]);
4223     set_cc_static(s);
4224     return DISAS_NEXT;
4225 }
4226 
4227 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4228 {
4229     gen_helper_csch(cpu_env, regs[1]);
4230     set_cc_static(s);
4231     return DISAS_NEXT;
4232 }
4233 
4234 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4235 {
4236     gen_helper_hsch(cpu_env, regs[1]);
4237     set_cc_static(s);
4238     return DISAS_NEXT;
4239 }
4240 
4241 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4242 {
4243     gen_helper_msch(cpu_env, regs[1], o->in2);
4244     set_cc_static(s);
4245     return DISAS_NEXT;
4246 }
4247 
4248 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4249 {
4250     gen_helper_rchp(cpu_env, regs[1]);
4251     set_cc_static(s);
4252     return DISAS_NEXT;
4253 }
4254 
4255 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4256 {
4257     gen_helper_rsch(cpu_env, regs[1]);
4258     set_cc_static(s);
4259     return DISAS_NEXT;
4260 }
4261 
4262 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4263 {
4264     gen_helper_sal(cpu_env, regs[1]);
4265     return DISAS_NEXT;
4266 }
4267 
4268 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4269 {
4270     gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4271     return DISAS_NEXT;
4272 }
4273 
4274 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4275 {
4276     /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4277     gen_op_movi_cc(s, 3);
4278     return DISAS_NEXT;
4279 }
4280 
4281 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4282 {
4283     /* The instruction is suppressed if not provided. */
4284     return DISAS_NEXT;
4285 }
4286 
4287 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4288 {
4289     gen_helper_ssch(cpu_env, regs[1], o->in2);
4290     set_cc_static(s);
4291     return DISAS_NEXT;
4292 }
4293 
4294 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4295 {
4296     gen_helper_stsch(cpu_env, regs[1], o->in2);
4297     set_cc_static(s);
4298     return DISAS_NEXT;
4299 }
4300 
4301 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4302 {
4303     gen_helper_stcrw(cpu_env, o->in2);
4304     set_cc_static(s);
4305     return DISAS_NEXT;
4306 }
4307 
4308 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4309 {
4310     gen_helper_tpi(cc_op, cpu_env, o->addr1);
4311     set_cc_static(s);
4312     return DISAS_NEXT;
4313 }
4314 
4315 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4316 {
4317     gen_helper_tsch(cpu_env, regs[1], o->in2);
4318     set_cc_static(s);
4319     return DISAS_NEXT;
4320 }
4321 
4322 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4323 {
4324     gen_helper_chsc(cpu_env, o->in2);
4325     set_cc_static(s);
4326     return DISAS_NEXT;
4327 }
4328 
4329 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4330 {
4331     tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4332     tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4333     return DISAS_NEXT;
4334 }
4335 
4336 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4337 {
4338     uint64_t i2 = get_field(s, i2);
4339     TCGv_i64 t;
4340 
4341     /* It is important to do what the instruction name says: STORE THEN.
4342        If we let the output hook perform the store then if we fault and
4343        restart, we'll have the wrong SYSTEM MASK in place.  */
4344     t = tcg_temp_new_i64();
4345     tcg_gen_shri_i64(t, psw_mask, 56);
4346     tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4347 
4348     if (s->fields.op == 0xac) {
4349         tcg_gen_andi_i64(psw_mask, psw_mask,
4350                          (i2 << 56) | 0x00ffffffffffffffull);
4351     } else {
4352         tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4353     }
4354 
4355     gen_check_psw_mask(s);
4356 
4357     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4358     s->exit_to_mainloop = true;
4359     return DISAS_TOO_MANY;
4360 }
4361 
4362 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4363 {
4364     tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data);
4365 
4366     if (s->base.tb->flags & FLAG_MASK_PER) {
4367         update_psw_addr(s);
4368         gen_helper_per_store_real(cpu_env);
4369     }
4370     return DISAS_NEXT;
4371 }
4372 #endif
4373 
4374 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4375 {
4376     gen_helper_stfle(cc_op, cpu_env, o->in2);
4377     set_cc_static(s);
4378     return DISAS_NEXT;
4379 }
4380 
4381 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4382 {
4383     tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4384     return DISAS_NEXT;
4385 }
4386 
4387 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4388 {
4389     tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4390     return DISAS_NEXT;
4391 }
4392 
4393 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4394 {
4395     tcg_gen_qemu_st_tl(o->in1, o->in2, get_mem_index(s),
4396                        MO_TEUL | s->insn->data);
4397     return DISAS_NEXT;
4398 }
4399 
4400 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4401 {
4402     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s),
4403                         MO_TEUQ | s->insn->data);
4404     return DISAS_NEXT;
4405 }
4406 
4407 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4408 {
4409     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4410     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4411 
4412     gen_helper_stam(cpu_env, r1, o->in2, r3);
4413     return DISAS_NEXT;
4414 }
4415 
4416 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4417 {
4418     int m3 = get_field(s, m3);
4419     int pos, base = s->insn->data;
4420     TCGv_i64 tmp = tcg_temp_new_i64();
4421 
4422     pos = base + ctz32(m3) * 8;
4423     switch (m3) {
4424     case 0xf:
4425         /* Effectively a 32-bit store.  */
4426         tcg_gen_shri_i64(tmp, o->in1, pos);
4427         tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4428         break;
4429 
4430     case 0xc:
4431     case 0x6:
4432     case 0x3:
4433         /* Effectively a 16-bit store.  */
4434         tcg_gen_shri_i64(tmp, o->in1, pos);
4435         tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4436         break;
4437 
4438     case 0x8:
4439     case 0x4:
4440     case 0x2:
4441     case 0x1:
4442         /* Effectively an 8-bit store.  */
4443         tcg_gen_shri_i64(tmp, o->in1, pos);
4444         tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4445         break;
4446 
4447     default:
4448         /* This is going to be a sequence of shifts and stores.  */
4449         pos = base + 32 - 8;
4450         while (m3) {
4451             if (m3 & 0x8) {
4452                 tcg_gen_shri_i64(tmp, o->in1, pos);
4453                 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4454                 tcg_gen_addi_i64(o->in2, o->in2, 1);
4455             }
4456             m3 = (m3 << 1) & 0xf;
4457             pos -= 8;
4458         }
4459         break;
4460     }
4461     return DISAS_NEXT;
4462 }
4463 
4464 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4465 {
4466     int r1 = get_field(s, r1);
4467     int r3 = get_field(s, r3);
4468     int size = s->insn->data;
4469     TCGv_i64 tsize = tcg_constant_i64(size);
4470 
4471     while (1) {
4472         if (size == 8) {
4473             tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4474         } else {
4475             tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4476         }
4477         if (r1 == r3) {
4478             break;
4479         }
4480         tcg_gen_add_i64(o->in2, o->in2, tsize);
4481         r1 = (r1 + 1) & 15;
4482     }
4483 
4484     return DISAS_NEXT;
4485 }
4486 
4487 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4488 {
4489     int r1 = get_field(s, r1);
4490     int r3 = get_field(s, r3);
4491     TCGv_i64 t = tcg_temp_new_i64();
4492     TCGv_i64 t4 = tcg_constant_i64(4);
4493     TCGv_i64 t32 = tcg_constant_i64(32);
4494 
4495     while (1) {
4496         tcg_gen_shl_i64(t, regs[r1], t32);
4497         tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4498         if (r1 == r3) {
4499             break;
4500         }
4501         tcg_gen_add_i64(o->in2, o->in2, t4);
4502         r1 = (r1 + 1) & 15;
4503     }
4504     return DISAS_NEXT;
4505 }
4506 
4507 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4508 {
4509     if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4510         gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4511     } else if (HAVE_ATOMIC128) {
4512         gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4513     } else {
4514         gen_helper_exit_atomic(cpu_env);
4515         return DISAS_NORETURN;
4516     }
4517     return DISAS_NEXT;
4518 }
4519 
4520 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4521 {
4522     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4523     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4524 
4525     gen_helper_srst(cpu_env, r1, r2);
4526     set_cc_static(s);
4527     return DISAS_NEXT;
4528 }
4529 
4530 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4531 {
4532     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4533     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4534 
4535     gen_helper_srstu(cpu_env, r1, r2);
4536     set_cc_static(s);
4537     return DISAS_NEXT;
4538 }
4539 
4540 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4541 {
4542     tcg_gen_sub_i64(o->out, o->in1, o->in2);
4543     return DISAS_NEXT;
4544 }
4545 
4546 static DisasJumpType op_subu64(DisasContext *s, DisasOps *o)
4547 {
4548     tcg_gen_movi_i64(cc_src, 0);
4549     tcg_gen_sub2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
4550     return DISAS_NEXT;
4551 }
4552 
4553 /* Compute borrow (0, -1) into cc_src. */
4554 static void compute_borrow(DisasContext *s)
4555 {
4556     switch (s->cc_op) {
4557     case CC_OP_SUBU:
4558         /* The borrow value is already in cc_src (0,-1). */
4559         break;
4560     default:
4561         gen_op_calc_cc(s);
4562         /* fall through */
4563     case CC_OP_STATIC:
4564         /* The carry flag is the msb of CC; compute into cc_src. */
4565         tcg_gen_extu_i32_i64(cc_src, cc_op);
4566         tcg_gen_shri_i64(cc_src, cc_src, 1);
4567         /* fall through */
4568     case CC_OP_ADDU:
4569         /* Convert carry (1,0) to borrow (0,-1). */
4570         tcg_gen_subi_i64(cc_src, cc_src, 1);
4571         break;
4572     }
4573 }
4574 
4575 static DisasJumpType op_subb32(DisasContext *s, DisasOps *o)
4576 {
4577     compute_borrow(s);
4578 
4579     /* Borrow is {0, -1}, so add to subtract. */
4580     tcg_gen_add_i64(o->out, o->in1, cc_src);
4581     tcg_gen_sub_i64(o->out, o->out, o->in2);
4582     return DISAS_NEXT;
4583 }
4584 
4585 static DisasJumpType op_subb64(DisasContext *s, DisasOps *o)
4586 {
4587     compute_borrow(s);
4588 
4589     /*
4590      * Borrow is {0, -1}, so add to subtract; replicate the
4591      * borrow input to produce 128-bit -1 for the addition.
4592      */
4593     TCGv_i64 zero = tcg_constant_i64(0);
4594     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src);
4595     tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
4596 
4597     return DISAS_NEXT;
4598 }
4599 
4600 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4601 {
4602     TCGv_i32 t;
4603 
4604     update_psw_addr(s);
4605     update_cc_op(s);
4606 
4607     t = tcg_constant_i32(get_field(s, i1) & 0xff);
4608     tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4609 
4610     t = tcg_constant_i32(s->ilen);
4611     tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4612 
4613     gen_exception(EXCP_SVC);
4614     return DISAS_NORETURN;
4615 }
4616 
4617 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4618 {
4619     int cc = 0;
4620 
4621     cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4622     cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4623     gen_op_movi_cc(s, cc);
4624     return DISAS_NEXT;
4625 }
4626 
4627 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4628 {
4629     gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4630     set_cc_static(s);
4631     return DISAS_NEXT;
4632 }
4633 
4634 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4635 {
4636     gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4637     set_cc_static(s);
4638     return DISAS_NEXT;
4639 }
4640 
4641 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4642 {
4643     gen_helper_tcxb(cc_op, cpu_env, o->in1_128, o->in2);
4644     set_cc_static(s);
4645     return DISAS_NEXT;
4646 }
4647 
4648 #ifndef CONFIG_USER_ONLY
4649 
4650 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4651 {
4652     gen_helper_testblock(cc_op, cpu_env, o->in2);
4653     set_cc_static(s);
4654     return DISAS_NEXT;
4655 }
4656 
4657 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4658 {
4659     gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4660     set_cc_static(s);
4661     return DISAS_NEXT;
4662 }
4663 
4664 #endif
4665 
4666 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4667 {
4668     TCGv_i32 l1 = tcg_constant_i32(get_field(s, l1) + 1);
4669 
4670     gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4671     set_cc_static(s);
4672     return DISAS_NEXT;
4673 }
4674 
4675 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4676 {
4677     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4678 
4679     gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4680     set_cc_static(s);
4681     return DISAS_NEXT;
4682 }
4683 
4684 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4685 {
4686     TCGv_i128 pair = tcg_temp_new_i128();
4687 
4688     gen_helper_tre(pair, cpu_env, o->out, o->out2, o->in2);
4689     tcg_gen_extr_i128_i64(o->out2, o->out, pair);
4690     set_cc_static(s);
4691     return DISAS_NEXT;
4692 }
4693 
4694 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4695 {
4696     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4697 
4698     gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4699     set_cc_static(s);
4700     return DISAS_NEXT;
4701 }
4702 
4703 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4704 {
4705     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4706 
4707     gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4708     set_cc_static(s);
4709     return DISAS_NEXT;
4710 }
4711 
4712 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4713 {
4714     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4715     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4716     TCGv_i32 sizes = tcg_constant_i32(s->insn->opc & 3);
4717     TCGv_i32 tst = tcg_temp_new_i32();
4718     int m3 = get_field(s, m3);
4719 
4720     if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4721         m3 = 0;
4722     }
4723     if (m3 & 1) {
4724         tcg_gen_movi_i32(tst, -1);
4725     } else {
4726         tcg_gen_extrl_i64_i32(tst, regs[0]);
4727         if (s->insn->opc & 3) {
4728             tcg_gen_ext8u_i32(tst, tst);
4729         } else {
4730             tcg_gen_ext16u_i32(tst, tst);
4731         }
4732     }
4733     gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4734 
4735     set_cc_static(s);
4736     return DISAS_NEXT;
4737 }
4738 
4739 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4740 {
4741     TCGv_i32 t1 = tcg_constant_i32(0xff);
4742 
4743     tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4744     tcg_gen_extract_i32(cc_op, t1, 7, 1);
4745     set_cc_static(s);
4746     return DISAS_NEXT;
4747 }
4748 
4749 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4750 {
4751     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4752 
4753     gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4754     return DISAS_NEXT;
4755 }
4756 
4757 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4758 {
4759     int l1 = get_field(s, l1) + 1;
4760     TCGv_i32 l;
4761 
4762     /* The length must not exceed 32 bytes.  */
4763     if (l1 > 32) {
4764         gen_program_exception(s, PGM_SPECIFICATION);
4765         return DISAS_NORETURN;
4766     }
4767     l = tcg_constant_i32(l1);
4768     gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4769     set_cc_static(s);
4770     return DISAS_NEXT;
4771 }
4772 
4773 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4774 {
4775     int l1 = get_field(s, l1) + 1;
4776     TCGv_i32 l;
4777 
4778     /* The length must be even and should not exceed 64 bytes.  */
4779     if ((l1 & 1) || (l1 > 64)) {
4780         gen_program_exception(s, PGM_SPECIFICATION);
4781         return DISAS_NORETURN;
4782     }
4783     l = tcg_constant_i32(l1);
4784     gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4785     set_cc_static(s);
4786     return DISAS_NEXT;
4787 }
4788 
4789 
4790 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4791 {
4792     int d1 = get_field(s, d1);
4793     int d2 = get_field(s, d2);
4794     int b1 = get_field(s, b1);
4795     int b2 = get_field(s, b2);
4796     int l = get_field(s, l1);
4797     TCGv_i32 t32;
4798 
4799     o->addr1 = get_address(s, 0, b1, d1);
4800 
4801     /* If the addresses are identical, this is a store/memset of zero.  */
4802     if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4803         o->in2 = tcg_constant_i64(0);
4804 
4805         l++;
4806         while (l >= 8) {
4807             tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4808             l -= 8;
4809             if (l > 0) {
4810                 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4811             }
4812         }
4813         if (l >= 4) {
4814             tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4815             l -= 4;
4816             if (l > 0) {
4817                 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4818             }
4819         }
4820         if (l >= 2) {
4821             tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4822             l -= 2;
4823             if (l > 0) {
4824                 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4825             }
4826         }
4827         if (l) {
4828             tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4829         }
4830         gen_op_movi_cc(s, 0);
4831         return DISAS_NEXT;
4832     }
4833 
4834     /* But in general we'll defer to a helper.  */
4835     o->in2 = get_address(s, 0, b2, d2);
4836     t32 = tcg_constant_i32(l);
4837     gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4838     set_cc_static(s);
4839     return DISAS_NEXT;
4840 }
4841 
4842 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
4843 {
4844     tcg_gen_xor_i64(o->out, o->in1, o->in2);
4845     return DISAS_NEXT;
4846 }
4847 
4848 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
4849 {
4850     int shift = s->insn->data & 0xff;
4851     int size = s->insn->data >> 8;
4852     uint64_t mask = ((1ull << size) - 1) << shift;
4853     TCGv_i64 t = tcg_temp_new_i64();
4854 
4855     tcg_gen_shli_i64(t, o->in2, shift);
4856     tcg_gen_xor_i64(o->out, o->in1, t);
4857 
4858     /* Produce the CC from only the bits manipulated.  */
4859     tcg_gen_andi_i64(cc_dst, o->out, mask);
4860     set_cc_nz_u64(s, cc_dst);
4861     return DISAS_NEXT;
4862 }
4863 
4864 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
4865 {
4866     o->in1 = tcg_temp_new_i64();
4867 
4868     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4869         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
4870     } else {
4871         /* Perform the atomic operation in memory. */
4872         tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
4873                                      s->insn->data);
4874     }
4875 
4876     /* Recompute also for atomic case: needed for setting CC. */
4877     tcg_gen_xor_i64(o->out, o->in1, o->in2);
4878 
4879     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4880         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
4881     }
4882     return DISAS_NEXT;
4883 }
4884 
4885 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
4886 {
4887     o->out = tcg_constant_i64(0);
4888     return DISAS_NEXT;
4889 }
4890 
4891 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
4892 {
4893     o->out = tcg_constant_i64(0);
4894     o->out2 = o->out;
4895     return DISAS_NEXT;
4896 }
4897 
4898 #ifndef CONFIG_USER_ONLY
4899 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
4900 {
4901     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4902 
4903     gen_helper_clp(cpu_env, r2);
4904     set_cc_static(s);
4905     return DISAS_NEXT;
4906 }
4907 
4908 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
4909 {
4910     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4911     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4912 
4913     gen_helper_pcilg(cpu_env, r1, r2);
4914     set_cc_static(s);
4915     return DISAS_NEXT;
4916 }
4917 
4918 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
4919 {
4920     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4921     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4922 
4923     gen_helper_pcistg(cpu_env, r1, r2);
4924     set_cc_static(s);
4925     return DISAS_NEXT;
4926 }
4927 
4928 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
4929 {
4930     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4931     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4932 
4933     gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
4934     set_cc_static(s);
4935     return DISAS_NEXT;
4936 }
4937 
4938 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
4939 {
4940     gen_helper_sic(cpu_env, o->in1, o->in2);
4941     return DISAS_NEXT;
4942 }
4943 
4944 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
4945 {
4946     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4947     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4948 
4949     gen_helper_rpcit(cpu_env, r1, r2);
4950     set_cc_static(s);
4951     return DISAS_NEXT;
4952 }
4953 
4954 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
4955 {
4956     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4957     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4958     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4959 
4960     gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
4961     set_cc_static(s);
4962     return DISAS_NEXT;
4963 }
4964 
4965 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
4966 {
4967     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4968     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4969 
4970     gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
4971     set_cc_static(s);
4972     return DISAS_NEXT;
4973 }
4974 #endif
4975 
4976 #include "translate_vx.c.inc"
4977 
4978 /* ====================================================================== */
4979 /* The "Cc OUTput" generators.  Given the generated output (and in some cases
4980    the original inputs), update the various cc data structures in order to
4981    be able to compute the new condition code.  */
4982 
4983 static void cout_abs32(DisasContext *s, DisasOps *o)
4984 {
4985     gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4986 }
4987 
4988 static void cout_abs64(DisasContext *s, DisasOps *o)
4989 {
4990     gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4991 }
4992 
4993 static void cout_adds32(DisasContext *s, DisasOps *o)
4994 {
4995     gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4996 }
4997 
4998 static void cout_adds64(DisasContext *s, DisasOps *o)
4999 {
5000     gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5001 }
5002 
5003 static void cout_addu32(DisasContext *s, DisasOps *o)
5004 {
5005     tcg_gen_shri_i64(cc_src, o->out, 32);
5006     tcg_gen_ext32u_i64(cc_dst, o->out);
5007     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst);
5008 }
5009 
5010 static void cout_addu64(DisasContext *s, DisasOps *o)
5011 {
5012     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out);
5013 }
5014 
5015 static void cout_cmps32(DisasContext *s, DisasOps *o)
5016 {
5017     gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5018 }
5019 
5020 static void cout_cmps64(DisasContext *s, DisasOps *o)
5021 {
5022     gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5023 }
5024 
5025 static void cout_cmpu32(DisasContext *s, DisasOps *o)
5026 {
5027     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5028 }
5029 
5030 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5031 {
5032     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5033 }
5034 
5035 static void cout_f32(DisasContext *s, DisasOps *o)
5036 {
5037     gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5038 }
5039 
5040 static void cout_f64(DisasContext *s, DisasOps *o)
5041 {
5042     gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5043 }
5044 
5045 static void cout_f128(DisasContext *s, DisasOps *o)
5046 {
5047     gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5048 }
5049 
5050 static void cout_nabs32(DisasContext *s, DisasOps *o)
5051 {
5052     gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5053 }
5054 
5055 static void cout_nabs64(DisasContext *s, DisasOps *o)
5056 {
5057     gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5058 }
5059 
5060 static void cout_neg32(DisasContext *s, DisasOps *o)
5061 {
5062     gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5063 }
5064 
5065 static void cout_neg64(DisasContext *s, DisasOps *o)
5066 {
5067     gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5068 }
5069 
5070 static void cout_nz32(DisasContext *s, DisasOps *o)
5071 {
5072     tcg_gen_ext32u_i64(cc_dst, o->out);
5073     gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5074 }
5075 
5076 static void cout_nz64(DisasContext *s, DisasOps *o)
5077 {
5078     gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5079 }
5080 
5081 static void cout_s32(DisasContext *s, DisasOps *o)
5082 {
5083     gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5084 }
5085 
5086 static void cout_s64(DisasContext *s, DisasOps *o)
5087 {
5088     gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5089 }
5090 
5091 static void cout_subs32(DisasContext *s, DisasOps *o)
5092 {
5093     gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5094 }
5095 
5096 static void cout_subs64(DisasContext *s, DisasOps *o)
5097 {
5098     gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5099 }
5100 
5101 static void cout_subu32(DisasContext *s, DisasOps *o)
5102 {
5103     tcg_gen_sari_i64(cc_src, o->out, 32);
5104     tcg_gen_ext32u_i64(cc_dst, o->out);
5105     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, cc_dst);
5106 }
5107 
5108 static void cout_subu64(DisasContext *s, DisasOps *o)
5109 {
5110     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, o->out);
5111 }
5112 
5113 static void cout_tm32(DisasContext *s, DisasOps *o)
5114 {
5115     gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5116 }
5117 
5118 static void cout_tm64(DisasContext *s, DisasOps *o)
5119 {
5120     gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5121 }
5122 
5123 static void cout_muls32(DisasContext *s, DisasOps *o)
5124 {
5125     gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out);
5126 }
5127 
5128 static void cout_muls64(DisasContext *s, DisasOps *o)
5129 {
5130     /* out contains "high" part, out2 contains "low" part of 128 bit result */
5131     gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2);
5132 }
5133 
5134 /* ====================================================================== */
5135 /* The "PREParation" generators.  These initialize the DisasOps.OUT fields
5136    with the TCG register to which we will write.  Used in combination with
5137    the "wout" generators, in some cases we need a new temporary, and in
5138    some cases we can write to a TCG global.  */
5139 
5140 static void prep_new(DisasContext *s, DisasOps *o)
5141 {
5142     o->out = tcg_temp_new_i64();
5143 }
5144 #define SPEC_prep_new 0
5145 
5146 static void prep_new_P(DisasContext *s, DisasOps *o)
5147 {
5148     o->out = tcg_temp_new_i64();
5149     o->out2 = tcg_temp_new_i64();
5150 }
5151 #define SPEC_prep_new_P 0
5152 
5153 static void prep_new_x(DisasContext *s, DisasOps *o)
5154 {
5155     o->out_128 = tcg_temp_new_i128();
5156 }
5157 #define SPEC_prep_new_x 0
5158 
5159 static void prep_r1(DisasContext *s, DisasOps *o)
5160 {
5161     o->out = regs[get_field(s, r1)];
5162 }
5163 #define SPEC_prep_r1 0
5164 
5165 static void prep_r1_P(DisasContext *s, DisasOps *o)
5166 {
5167     int r1 = get_field(s, r1);
5168     o->out = regs[r1];
5169     o->out2 = regs[r1 + 1];
5170 }
5171 #define SPEC_prep_r1_P SPEC_r1_even
5172 
5173 static void prep_x1(DisasContext *s, DisasOps *o)
5174 {
5175     o->out_128 = load_freg_128(get_field(s, r1));
5176 }
5177 #define SPEC_prep_x1 SPEC_r1_f128
5178 
5179 /* ====================================================================== */
5180 /* The "Write OUTput" generators.  These generally perform some non-trivial
5181    copy of data to TCG globals, or to main memory.  The trivial cases are
5182    generally handled by having a "prep" generator install the TCG global
5183    as the destination of the operation.  */
5184 
5185 static void wout_r1(DisasContext *s, DisasOps *o)
5186 {
5187     store_reg(get_field(s, r1), o->out);
5188 }
5189 #define SPEC_wout_r1 0
5190 
5191 static void wout_out2_r1(DisasContext *s, DisasOps *o)
5192 {
5193     store_reg(get_field(s, r1), o->out2);
5194 }
5195 #define SPEC_wout_out2_r1 0
5196 
5197 static void wout_r1_8(DisasContext *s, DisasOps *o)
5198 {
5199     int r1 = get_field(s, r1);
5200     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5201 }
5202 #define SPEC_wout_r1_8 0
5203 
5204 static void wout_r1_16(DisasContext *s, DisasOps *o)
5205 {
5206     int r1 = get_field(s, r1);
5207     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5208 }
5209 #define SPEC_wout_r1_16 0
5210 
5211 static void wout_r1_32(DisasContext *s, DisasOps *o)
5212 {
5213     store_reg32_i64(get_field(s, r1), o->out);
5214 }
5215 #define SPEC_wout_r1_32 0
5216 
5217 static void wout_r1_32h(DisasContext *s, DisasOps *o)
5218 {
5219     store_reg32h_i64(get_field(s, r1), o->out);
5220 }
5221 #define SPEC_wout_r1_32h 0
5222 
5223 static void wout_r1_P32(DisasContext *s, DisasOps *o)
5224 {
5225     int r1 = get_field(s, r1);
5226     store_reg32_i64(r1, o->out);
5227     store_reg32_i64(r1 + 1, o->out2);
5228 }
5229 #define SPEC_wout_r1_P32 SPEC_r1_even
5230 
5231 static void wout_r1_D32(DisasContext *s, DisasOps *o)
5232 {
5233     int r1 = get_field(s, r1);
5234     TCGv_i64 t = tcg_temp_new_i64();
5235     store_reg32_i64(r1 + 1, o->out);
5236     tcg_gen_shri_i64(t, o->out, 32);
5237     store_reg32_i64(r1, t);
5238 }
5239 #define SPEC_wout_r1_D32 SPEC_r1_even
5240 
5241 static void wout_r1_D64(DisasContext *s, DisasOps *o)
5242 {
5243     int r1 = get_field(s, r1);
5244     tcg_gen_extr_i128_i64(regs[r1 + 1], regs[r1], o->out_128);
5245 }
5246 #define SPEC_wout_r1_D64 SPEC_r1_even
5247 
5248 static void wout_r3_P32(DisasContext *s, DisasOps *o)
5249 {
5250     int r3 = get_field(s, r3);
5251     store_reg32_i64(r3, o->out);
5252     store_reg32_i64(r3 + 1, o->out2);
5253 }
5254 #define SPEC_wout_r3_P32 SPEC_r3_even
5255 
5256 static void wout_r3_P64(DisasContext *s, DisasOps *o)
5257 {
5258     int r3 = get_field(s, r3);
5259     store_reg(r3, o->out);
5260     store_reg(r3 + 1, o->out2);
5261 }
5262 #define SPEC_wout_r3_P64 SPEC_r3_even
5263 
5264 static void wout_e1(DisasContext *s, DisasOps *o)
5265 {
5266     store_freg32_i64(get_field(s, r1), o->out);
5267 }
5268 #define SPEC_wout_e1 0
5269 
5270 static void wout_f1(DisasContext *s, DisasOps *o)
5271 {
5272     store_freg(get_field(s, r1), o->out);
5273 }
5274 #define SPEC_wout_f1 0
5275 
5276 static void wout_x1(DisasContext *s, DisasOps *o)
5277 {
5278     int f1 = get_field(s, r1);
5279 
5280     /* Split out_128 into out+out2 for cout_f128. */
5281     tcg_debug_assert(o->out == NULL);
5282     o->out = tcg_temp_new_i64();
5283     o->out2 = tcg_temp_new_i64();
5284 
5285     tcg_gen_extr_i128_i64(o->out2, o->out, o->out_128);
5286     store_freg(f1, o->out);
5287     store_freg(f1 + 2, o->out2);
5288 }
5289 #define SPEC_wout_x1 SPEC_r1_f128
5290 
5291 static void wout_x1_P(DisasContext *s, DisasOps *o)
5292 {
5293     int f1 = get_field(s, r1);
5294     store_freg(f1, o->out);
5295     store_freg(f1 + 2, o->out2);
5296 }
5297 #define SPEC_wout_x1_P SPEC_r1_f128
5298 
5299 static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
5300 {
5301     if (get_field(s, r1) != get_field(s, r2)) {
5302         store_reg32_i64(get_field(s, r1), o->out);
5303     }
5304 }
5305 #define SPEC_wout_cond_r1r2_32 0
5306 
5307 static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
5308 {
5309     if (get_field(s, r1) != get_field(s, r2)) {
5310         store_freg32_i64(get_field(s, r1), o->out);
5311     }
5312 }
5313 #define SPEC_wout_cond_e1e2 0
5314 
5315 static void wout_m1_8(DisasContext *s, DisasOps *o)
5316 {
5317     tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
5318 }
5319 #define SPEC_wout_m1_8 0
5320 
5321 static void wout_m1_16(DisasContext *s, DisasOps *o)
5322 {
5323     tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
5324 }
5325 #define SPEC_wout_m1_16 0
5326 
5327 #ifndef CONFIG_USER_ONLY
5328 static void wout_m1_16a(DisasContext *s, DisasOps *o)
5329 {
5330     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5331 }
5332 #define SPEC_wout_m1_16a 0
5333 #endif
5334 
5335 static void wout_m1_32(DisasContext *s, DisasOps *o)
5336 {
5337     tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
5338 }
5339 #define SPEC_wout_m1_32 0
5340 
5341 #ifndef CONFIG_USER_ONLY
5342 static void wout_m1_32a(DisasContext *s, DisasOps *o)
5343 {
5344     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5345 }
5346 #define SPEC_wout_m1_32a 0
5347 #endif
5348 
5349 static void wout_m1_64(DisasContext *s, DisasOps *o)
5350 {
5351     tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
5352 }
5353 #define SPEC_wout_m1_64 0
5354 
5355 #ifndef CONFIG_USER_ONLY
5356 static void wout_m1_64a(DisasContext *s, DisasOps *o)
5357 {
5358     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5359 }
5360 #define SPEC_wout_m1_64a 0
5361 #endif
5362 
5363 static void wout_m2_32(DisasContext *s, DisasOps *o)
5364 {
5365     tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
5366 }
5367 #define SPEC_wout_m2_32 0
5368 
5369 static void wout_in2_r1(DisasContext *s, DisasOps *o)
5370 {
5371     store_reg(get_field(s, r1), o->in2);
5372 }
5373 #define SPEC_wout_in2_r1 0
5374 
5375 static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
5376 {
5377     store_reg32_i64(get_field(s, r1), o->in2);
5378 }
5379 #define SPEC_wout_in2_r1_32 0
5380 
5381 /* ====================================================================== */
5382 /* The "INput 1" generators.  These load the first operand to an insn.  */
5383 
5384 static void in1_r1(DisasContext *s, DisasOps *o)
5385 {
5386     o->in1 = load_reg(get_field(s, r1));
5387 }
5388 #define SPEC_in1_r1 0
5389 
5390 static void in1_r1_o(DisasContext *s, DisasOps *o)
5391 {
5392     o->in1 = regs[get_field(s, r1)];
5393 }
5394 #define SPEC_in1_r1_o 0
5395 
5396 static void in1_r1_32s(DisasContext *s, DisasOps *o)
5397 {
5398     o->in1 = tcg_temp_new_i64();
5399     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
5400 }
5401 #define SPEC_in1_r1_32s 0
5402 
5403 static void in1_r1_32u(DisasContext *s, DisasOps *o)
5404 {
5405     o->in1 = tcg_temp_new_i64();
5406     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
5407 }
5408 #define SPEC_in1_r1_32u 0
5409 
5410 static void in1_r1_sr32(DisasContext *s, DisasOps *o)
5411 {
5412     o->in1 = tcg_temp_new_i64();
5413     tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
5414 }
5415 #define SPEC_in1_r1_sr32 0
5416 
5417 static void in1_r1p1(DisasContext *s, DisasOps *o)
5418 {
5419     o->in1 = load_reg(get_field(s, r1) + 1);
5420 }
5421 #define SPEC_in1_r1p1 SPEC_r1_even
5422 
5423 static void in1_r1p1_o(DisasContext *s, DisasOps *o)
5424 {
5425     o->in1 = regs[get_field(s, r1) + 1];
5426 }
5427 #define SPEC_in1_r1p1_o SPEC_r1_even
5428 
5429 static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
5430 {
5431     o->in1 = tcg_temp_new_i64();
5432     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
5433 }
5434 #define SPEC_in1_r1p1_32s SPEC_r1_even
5435 
5436 static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
5437 {
5438     o->in1 = tcg_temp_new_i64();
5439     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
5440 }
5441 #define SPEC_in1_r1p1_32u SPEC_r1_even
5442 
5443 static void in1_r1_D32(DisasContext *s, DisasOps *o)
5444 {
5445     int r1 = get_field(s, r1);
5446     o->in1 = tcg_temp_new_i64();
5447     tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5448 }
5449 #define SPEC_in1_r1_D32 SPEC_r1_even
5450 
5451 static void in1_r2(DisasContext *s, DisasOps *o)
5452 {
5453     o->in1 = load_reg(get_field(s, r2));
5454 }
5455 #define SPEC_in1_r2 0
5456 
5457 static void in1_r2_sr32(DisasContext *s, DisasOps *o)
5458 {
5459     o->in1 = tcg_temp_new_i64();
5460     tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
5461 }
5462 #define SPEC_in1_r2_sr32 0
5463 
5464 static void in1_r2_32u(DisasContext *s, DisasOps *o)
5465 {
5466     o->in1 = tcg_temp_new_i64();
5467     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]);
5468 }
5469 #define SPEC_in1_r2_32u 0
5470 
5471 static void in1_r3(DisasContext *s, DisasOps *o)
5472 {
5473     o->in1 = load_reg(get_field(s, r3));
5474 }
5475 #define SPEC_in1_r3 0
5476 
5477 static void in1_r3_o(DisasContext *s, DisasOps *o)
5478 {
5479     o->in1 = regs[get_field(s, r3)];
5480 }
5481 #define SPEC_in1_r3_o 0
5482 
5483 static void in1_r3_32s(DisasContext *s, DisasOps *o)
5484 {
5485     o->in1 = tcg_temp_new_i64();
5486     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
5487 }
5488 #define SPEC_in1_r3_32s 0
5489 
5490 static void in1_r3_32u(DisasContext *s, DisasOps *o)
5491 {
5492     o->in1 = tcg_temp_new_i64();
5493     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
5494 }
5495 #define SPEC_in1_r3_32u 0
5496 
5497 static void in1_r3_D32(DisasContext *s, DisasOps *o)
5498 {
5499     int r3 = get_field(s, r3);
5500     o->in1 = tcg_temp_new_i64();
5501     tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5502 }
5503 #define SPEC_in1_r3_D32 SPEC_r3_even
5504 
5505 static void in1_r3_sr32(DisasContext *s, DisasOps *o)
5506 {
5507     o->in1 = tcg_temp_new_i64();
5508     tcg_gen_shri_i64(o->in1, regs[get_field(s, r3)], 32);
5509 }
5510 #define SPEC_in1_r3_sr32 0
5511 
5512 static void in1_e1(DisasContext *s, DisasOps *o)
5513 {
5514     o->in1 = load_freg32_i64(get_field(s, r1));
5515 }
5516 #define SPEC_in1_e1 0
5517 
5518 static void in1_f1(DisasContext *s, DisasOps *o)
5519 {
5520     o->in1 = load_freg(get_field(s, r1));
5521 }
5522 #define SPEC_in1_f1 0
5523 
5524 static void in1_x1(DisasContext *s, DisasOps *o)
5525 {
5526     o->in1_128 = load_freg_128(get_field(s, r1));
5527 }
5528 #define SPEC_in1_x1 SPEC_r1_f128
5529 
5530 /* Load the high double word of an extended (128-bit) format FP number */
5531 static void in1_x2h(DisasContext *s, DisasOps *o)
5532 {
5533     o->in1 = load_freg(get_field(s, r2));
5534 }
5535 #define SPEC_in1_x2h SPEC_r2_f128
5536 
5537 static void in1_f3(DisasContext *s, DisasOps *o)
5538 {
5539     o->in1 = load_freg(get_field(s, r3));
5540 }
5541 #define SPEC_in1_f3 0
5542 
5543 static void in1_la1(DisasContext *s, DisasOps *o)
5544 {
5545     o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
5546 }
5547 #define SPEC_in1_la1 0
5548 
5549 static void in1_la2(DisasContext *s, DisasOps *o)
5550 {
5551     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5552     o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5553 }
5554 #define SPEC_in1_la2 0
5555 
5556 static void in1_m1_8u(DisasContext *s, DisasOps *o)
5557 {
5558     in1_la1(s, o);
5559     o->in1 = tcg_temp_new_i64();
5560     tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5561 }
5562 #define SPEC_in1_m1_8u 0
5563 
5564 static void in1_m1_16s(DisasContext *s, DisasOps *o)
5565 {
5566     in1_la1(s, o);
5567     o->in1 = tcg_temp_new_i64();
5568     tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5569 }
5570 #define SPEC_in1_m1_16s 0
5571 
5572 static void in1_m1_16u(DisasContext *s, DisasOps *o)
5573 {
5574     in1_la1(s, o);
5575     o->in1 = tcg_temp_new_i64();
5576     tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5577 }
5578 #define SPEC_in1_m1_16u 0
5579 
5580 static void in1_m1_32s(DisasContext *s, DisasOps *o)
5581 {
5582     in1_la1(s, o);
5583     o->in1 = tcg_temp_new_i64();
5584     tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5585 }
5586 #define SPEC_in1_m1_32s 0
5587 
5588 static void in1_m1_32u(DisasContext *s, DisasOps *o)
5589 {
5590     in1_la1(s, o);
5591     o->in1 = tcg_temp_new_i64();
5592     tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5593 }
5594 #define SPEC_in1_m1_32u 0
5595 
5596 static void in1_m1_64(DisasContext *s, DisasOps *o)
5597 {
5598     in1_la1(s, o);
5599     o->in1 = tcg_temp_new_i64();
5600     tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5601 }
5602 #define SPEC_in1_m1_64 0
5603 
5604 /* ====================================================================== */
5605 /* The "INput 2" generators.  These load the second operand to an insn.  */
5606 
5607 static void in2_r1_o(DisasContext *s, DisasOps *o)
5608 {
5609     o->in2 = regs[get_field(s, r1)];
5610 }
5611 #define SPEC_in2_r1_o 0
5612 
5613 static void in2_r1_16u(DisasContext *s, DisasOps *o)
5614 {
5615     o->in2 = tcg_temp_new_i64();
5616     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
5617 }
5618 #define SPEC_in2_r1_16u 0
5619 
5620 static void in2_r1_32u(DisasContext *s, DisasOps *o)
5621 {
5622     o->in2 = tcg_temp_new_i64();
5623     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
5624 }
5625 #define SPEC_in2_r1_32u 0
5626 
5627 static void in2_r1_D32(DisasContext *s, DisasOps *o)
5628 {
5629     int r1 = get_field(s, r1);
5630     o->in2 = tcg_temp_new_i64();
5631     tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5632 }
5633 #define SPEC_in2_r1_D32 SPEC_r1_even
5634 
5635 static void in2_r2(DisasContext *s, DisasOps *o)
5636 {
5637     o->in2 = load_reg(get_field(s, r2));
5638 }
5639 #define SPEC_in2_r2 0
5640 
5641 static void in2_r2_o(DisasContext *s, DisasOps *o)
5642 {
5643     o->in2 = regs[get_field(s, r2)];
5644 }
5645 #define SPEC_in2_r2_o 0
5646 
5647 static void in2_r2_nz(DisasContext *s, DisasOps *o)
5648 {
5649     int r2 = get_field(s, r2);
5650     if (r2 != 0) {
5651         o->in2 = load_reg(r2);
5652     }
5653 }
5654 #define SPEC_in2_r2_nz 0
5655 
5656 static void in2_r2_8s(DisasContext *s, DisasOps *o)
5657 {
5658     o->in2 = tcg_temp_new_i64();
5659     tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
5660 }
5661 #define SPEC_in2_r2_8s 0
5662 
5663 static void in2_r2_8u(DisasContext *s, DisasOps *o)
5664 {
5665     o->in2 = tcg_temp_new_i64();
5666     tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
5667 }
5668 #define SPEC_in2_r2_8u 0
5669 
5670 static void in2_r2_16s(DisasContext *s, DisasOps *o)
5671 {
5672     o->in2 = tcg_temp_new_i64();
5673     tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
5674 }
5675 #define SPEC_in2_r2_16s 0
5676 
5677 static void in2_r2_16u(DisasContext *s, DisasOps *o)
5678 {
5679     o->in2 = tcg_temp_new_i64();
5680     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
5681 }
5682 #define SPEC_in2_r2_16u 0
5683 
5684 static void in2_r3(DisasContext *s, DisasOps *o)
5685 {
5686     o->in2 = load_reg(get_field(s, r3));
5687 }
5688 #define SPEC_in2_r3 0
5689 
5690 static void in2_r3_D64(DisasContext *s, DisasOps *o)
5691 {
5692     int r3 = get_field(s, r3);
5693     o->in2_128 = tcg_temp_new_i128();
5694     tcg_gen_concat_i64_i128(o->in2_128, regs[r3 + 1], regs[r3]);
5695 }
5696 #define SPEC_in2_r3_D64 SPEC_r3_even
5697 
5698 static void in2_r3_sr32(DisasContext *s, DisasOps *o)
5699 {
5700     o->in2 = tcg_temp_new_i64();
5701     tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
5702 }
5703 #define SPEC_in2_r3_sr32 0
5704 
5705 static void in2_r3_32u(DisasContext *s, DisasOps *o)
5706 {
5707     o->in2 = tcg_temp_new_i64();
5708     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
5709 }
5710 #define SPEC_in2_r3_32u 0
5711 
5712 static void in2_r2_32s(DisasContext *s, DisasOps *o)
5713 {
5714     o->in2 = tcg_temp_new_i64();
5715     tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
5716 }
5717 #define SPEC_in2_r2_32s 0
5718 
5719 static void in2_r2_32u(DisasContext *s, DisasOps *o)
5720 {
5721     o->in2 = tcg_temp_new_i64();
5722     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
5723 }
5724 #define SPEC_in2_r2_32u 0
5725 
5726 static void in2_r2_sr32(DisasContext *s, DisasOps *o)
5727 {
5728     o->in2 = tcg_temp_new_i64();
5729     tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
5730 }
5731 #define SPEC_in2_r2_sr32 0
5732 
5733 static void in2_e2(DisasContext *s, DisasOps *o)
5734 {
5735     o->in2 = load_freg32_i64(get_field(s, r2));
5736 }
5737 #define SPEC_in2_e2 0
5738 
5739 static void in2_f2(DisasContext *s, DisasOps *o)
5740 {
5741     o->in2 = load_freg(get_field(s, r2));
5742 }
5743 #define SPEC_in2_f2 0
5744 
5745 static void in2_x2(DisasContext *s, DisasOps *o)
5746 {
5747     o->in2_128 = load_freg_128(get_field(s, r2));
5748 }
5749 #define SPEC_in2_x2 SPEC_r2_f128
5750 
5751 /* Load the low double word of an extended (128-bit) format FP number */
5752 static void in2_x2l(DisasContext *s, DisasOps *o)
5753 {
5754     o->in2 = load_freg(get_field(s, r2) + 2);
5755 }
5756 #define SPEC_in2_x2l SPEC_r2_f128
5757 
5758 static void in2_ra2(DisasContext *s, DisasOps *o)
5759 {
5760     int r2 = get_field(s, r2);
5761 
5762     /* Note: *don't* treat !r2 as 0, use the reg value. */
5763     o->in2 = tcg_temp_new_i64();
5764     gen_addi_and_wrap_i64(s, o->in2, regs[r2], 0);
5765 }
5766 #define SPEC_in2_ra2 0
5767 
5768 static void in2_a2(DisasContext *s, DisasOps *o)
5769 {
5770     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5771     o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5772 }
5773 #define SPEC_in2_a2 0
5774 
5775 static TCGv gen_ri2(DisasContext *s)
5776 {
5777     int64_t delta = (int64_t)get_field(s, i2) * 2;
5778     TCGv ri2;
5779 
5780     if (unlikely(s->ex_value)) {
5781         ri2 = tcg_temp_new_i64();
5782         tcg_gen_ld_i64(ri2, cpu_env, offsetof(CPUS390XState, ex_target));
5783         tcg_gen_addi_i64(ri2, ri2, delta);
5784     } else {
5785         ri2 = tcg_constant_i64(s->base.pc_next + delta);
5786     }
5787 
5788     return ri2;
5789 }
5790 
5791 static void in2_ri2(DisasContext *s, DisasOps *o)
5792 {
5793     o->in2 = gen_ri2(s);
5794 }
5795 #define SPEC_in2_ri2 0
5796 
5797 static void in2_sh(DisasContext *s, DisasOps *o)
5798 {
5799     int b2 = get_field(s, b2);
5800     int d2 = get_field(s, d2);
5801 
5802     if (b2 == 0) {
5803         o->in2 = tcg_constant_i64(d2 & 0x3f);
5804     } else {
5805         o->in2 = get_address(s, 0, b2, d2);
5806         tcg_gen_andi_i64(o->in2, o->in2, 0x3f);
5807     }
5808 }
5809 #define SPEC_in2_sh 0
5810 
5811 static void in2_m2_8u(DisasContext *s, DisasOps *o)
5812 {
5813     in2_a2(s, o);
5814     tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5815 }
5816 #define SPEC_in2_m2_8u 0
5817 
5818 static void in2_m2_16s(DisasContext *s, DisasOps *o)
5819 {
5820     in2_a2(s, o);
5821     tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5822 }
5823 #define SPEC_in2_m2_16s 0
5824 
5825 static void in2_m2_16u(DisasContext *s, DisasOps *o)
5826 {
5827     in2_a2(s, o);
5828     tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5829 }
5830 #define SPEC_in2_m2_16u 0
5831 
5832 static void in2_m2_32s(DisasContext *s, DisasOps *o)
5833 {
5834     in2_a2(s, o);
5835     tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5836 }
5837 #define SPEC_in2_m2_32s 0
5838 
5839 static void in2_m2_32u(DisasContext *s, DisasOps *o)
5840 {
5841     in2_a2(s, o);
5842     tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5843 }
5844 #define SPEC_in2_m2_32u 0
5845 
5846 #ifndef CONFIG_USER_ONLY
5847 static void in2_m2_32ua(DisasContext *s, DisasOps *o)
5848 {
5849     in2_a2(s, o);
5850     tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5851 }
5852 #define SPEC_in2_m2_32ua 0
5853 #endif
5854 
5855 static void in2_m2_64(DisasContext *s, DisasOps *o)
5856 {
5857     in2_a2(s, o);
5858     tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5859 }
5860 #define SPEC_in2_m2_64 0
5861 
5862 static void in2_m2_64w(DisasContext *s, DisasOps *o)
5863 {
5864     in2_a2(s, o);
5865     tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5866     gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
5867 }
5868 #define SPEC_in2_m2_64w 0
5869 
5870 #ifndef CONFIG_USER_ONLY
5871 static void in2_m2_64a(DisasContext *s, DisasOps *o)
5872 {
5873     in2_a2(s, o);
5874     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5875 }
5876 #define SPEC_in2_m2_64a 0
5877 #endif
5878 
5879 static void in2_mri2_16s(DisasContext *s, DisasOps *o)
5880 {
5881     o->in2 = tcg_temp_new_i64();
5882     tcg_gen_qemu_ld16s(o->in2, gen_ri2(s), get_mem_index(s));
5883 }
5884 #define SPEC_in2_mri2_16s 0
5885 
5886 static void in2_mri2_16u(DisasContext *s, DisasOps *o)
5887 {
5888     o->in2 = tcg_temp_new_i64();
5889     tcg_gen_qemu_ld16u(o->in2, gen_ri2(s), get_mem_index(s));
5890 }
5891 #define SPEC_in2_mri2_16u 0
5892 
5893 static void in2_mri2_32s(DisasContext *s, DisasOps *o)
5894 {
5895     o->in2 = tcg_temp_new_i64();
5896     tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5897                        MO_TESL | MO_ALIGN);
5898 }
5899 #define SPEC_in2_mri2_32s 0
5900 
5901 static void in2_mri2_32u(DisasContext *s, DisasOps *o)
5902 {
5903     o->in2 = tcg_temp_new_i64();
5904     tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5905                        MO_TEUL | MO_ALIGN);
5906 }
5907 #define SPEC_in2_mri2_32u 0
5908 
5909 static void in2_mri2_64(DisasContext *s, DisasOps *o)
5910 {
5911     o->in2 = tcg_temp_new_i64();
5912     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s),
5913                         MO_TEUQ | MO_ALIGN);
5914 }
5915 #define SPEC_in2_mri2_64 0
5916 
5917 static void in2_i2(DisasContext *s, DisasOps *o)
5918 {
5919     o->in2 = tcg_constant_i64(get_field(s, i2));
5920 }
5921 #define SPEC_in2_i2 0
5922 
5923 static void in2_i2_8u(DisasContext *s, DisasOps *o)
5924 {
5925     o->in2 = tcg_constant_i64((uint8_t)get_field(s, i2));
5926 }
5927 #define SPEC_in2_i2_8u 0
5928 
5929 static void in2_i2_16u(DisasContext *s, DisasOps *o)
5930 {
5931     o->in2 = tcg_constant_i64((uint16_t)get_field(s, i2));
5932 }
5933 #define SPEC_in2_i2_16u 0
5934 
5935 static void in2_i2_32u(DisasContext *s, DisasOps *o)
5936 {
5937     o->in2 = tcg_constant_i64((uint32_t)get_field(s, i2));
5938 }
5939 #define SPEC_in2_i2_32u 0
5940 
5941 static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
5942 {
5943     uint64_t i2 = (uint16_t)get_field(s, i2);
5944     o->in2 = tcg_constant_i64(i2 << s->insn->data);
5945 }
5946 #define SPEC_in2_i2_16u_shl 0
5947 
5948 static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
5949 {
5950     uint64_t i2 = (uint32_t)get_field(s, i2);
5951     o->in2 = tcg_constant_i64(i2 << s->insn->data);
5952 }
5953 #define SPEC_in2_i2_32u_shl 0
5954 
5955 #ifndef CONFIG_USER_ONLY
5956 static void in2_insn(DisasContext *s, DisasOps *o)
5957 {
5958     o->in2 = tcg_constant_i64(s->fields.raw_insn);
5959 }
5960 #define SPEC_in2_insn 0
5961 #endif
5962 
5963 /* ====================================================================== */
5964 
5965 /* Find opc within the table of insns.  This is formulated as a switch
5966    statement so that (1) we get compile-time notice of cut-paste errors
5967    for duplicated opcodes, and (2) the compiler generates the binary
5968    search tree, rather than us having to post-process the table.  */
5969 
5970 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5971     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
5972 
5973 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5974     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
5975 
5976 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
5977     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
5978 
5979 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
5980 
5981 enum DisasInsnEnum {
5982 #include "insn-data.h.inc"
5983 };
5984 
5985 #undef E
5986 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) {                   \
5987     .opc = OPC,                                                             \
5988     .flags = FL,                                                            \
5989     .fmt = FMT_##FT,                                                        \
5990     .fac = FAC_##FC,                                                        \
5991     .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W,  \
5992     .name = #NM,                                                            \
5993     .help_in1 = in1_##I1,                                                   \
5994     .help_in2 = in2_##I2,                                                   \
5995     .help_prep = prep_##P,                                                  \
5996     .help_wout = wout_##W,                                                  \
5997     .help_cout = cout_##CC,                                                 \
5998     .help_op = op_##OP,                                                     \
5999     .data = D                                                               \
6000  },
6001 
6002 /* Allow 0 to be used for NULL in the table below.  */
6003 #define in1_0  NULL
6004 #define in2_0  NULL
6005 #define prep_0  NULL
6006 #define wout_0  NULL
6007 #define cout_0  NULL
6008 #define op_0  NULL
6009 
6010 #define SPEC_in1_0 0
6011 #define SPEC_in2_0 0
6012 #define SPEC_prep_0 0
6013 #define SPEC_wout_0 0
6014 
6015 /* Give smaller names to the various facilities.  */
6016 #define FAC_Z           S390_FEAT_ZARCH
6017 #define FAC_CASS        S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6018 #define FAC_DFP         S390_FEAT_DFP
6019 #define FAC_DFPR        S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* DFP-rounding */
6020 #define FAC_DO          S390_FEAT_STFLE_45 /* distinct-operands */
6021 #define FAC_EE          S390_FEAT_EXECUTE_EXT
6022 #define FAC_EI          S390_FEAT_EXTENDED_IMMEDIATE
6023 #define FAC_FPE         S390_FEAT_FLOATING_POINT_EXT
6024 #define FAC_FPSSH       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPS-sign-handling */
6025 #define FAC_FPRGR       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPR-GR-transfer */
6026 #define FAC_GIE         S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6027 #define FAC_HFP_MA      S390_FEAT_HFP_MADDSUB
6028 #define FAC_HW          S390_FEAT_STFLE_45 /* high-word */
6029 #define FAC_IEEEE_SIM   S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* IEEE-exception-simulation */
6030 #define FAC_MIE         S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6031 #define FAC_LAT         S390_FEAT_STFLE_49 /* load-and-trap */
6032 #define FAC_LOC         S390_FEAT_STFLE_45 /* load/store on condition 1 */
6033 #define FAC_LOC2        S390_FEAT_STFLE_53 /* load/store on condition 2 */
6034 #define FAC_LD          S390_FEAT_LONG_DISPLACEMENT
6035 #define FAC_PC          S390_FEAT_STFLE_45 /* population count */
6036 #define FAC_SCF         S390_FEAT_STORE_CLOCK_FAST
6037 #define FAC_SFLE        S390_FEAT_STFLE
6038 #define FAC_ILA         S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6039 #define FAC_MVCOS       S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6040 #define FAC_LPP         S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6041 #define FAC_DAT_ENH     S390_FEAT_DAT_ENH
6042 #define FAC_E2          S390_FEAT_EXTENDED_TRANSLATION_2
6043 #define FAC_EH          S390_FEAT_STFLE_49 /* execution-hint */
6044 #define FAC_PPA         S390_FEAT_STFLE_49 /* processor-assist */
6045 #define FAC_LZRB        S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6046 #define FAC_ETF3        S390_FEAT_EXTENDED_TRANSLATION_3
6047 #define FAC_MSA         S390_FEAT_MSA /* message-security-assist facility */
6048 #define FAC_MSA3        S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6049 #define FAC_MSA4        S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6050 #define FAC_MSA5        S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6051 #define FAC_MSA8        S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6052 #define FAC_ECT         S390_FEAT_EXTRACT_CPU_TIME
6053 #define FAC_PCI         S390_FEAT_ZPCI /* z/PCI facility */
6054 #define FAC_AIS         S390_FEAT_ADAPTER_INT_SUPPRESSION
6055 #define FAC_V           S390_FEAT_VECTOR /* vector facility */
6056 #define FAC_VE          S390_FEAT_VECTOR_ENH  /* vector enhancements facility 1 */
6057 #define FAC_VE2         S390_FEAT_VECTOR_ENH2 /* vector enhancements facility 2 */
6058 #define FAC_MIE2        S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6059 #define FAC_MIE3        S390_FEAT_MISC_INSTRUCTION_EXT3 /* miscellaneous-instruction-extensions facility 3 */
6060 
6061 static const DisasInsn insn_info[] = {
6062 #include "insn-data.h.inc"
6063 };
6064 
6065 #undef E
6066 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6067     case OPC: return &insn_info[insn_ ## NM];
6068 
6069 static const DisasInsn *lookup_opc(uint16_t opc)
6070 {
6071     switch (opc) {
6072 #include "insn-data.h.inc"
6073     default:
6074         return NULL;
6075     }
6076 }
6077 
6078 #undef F
6079 #undef E
6080 #undef D
6081 #undef C
6082 
6083 /* Extract a field from the insn.  The INSN should be left-aligned in
6084    the uint64_t so that we can more easily utilize the big-bit-endian
6085    definitions we extract from the Principals of Operation.  */
6086 
6087 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6088 {
6089     uint32_t r, m;
6090 
6091     if (f->size == 0) {
6092         return;
6093     }
6094 
6095     /* Zero extract the field from the insn.  */
6096     r = (insn << f->beg) >> (64 - f->size);
6097 
6098     /* Sign-extend, or un-swap the field as necessary.  */
6099     switch (f->type) {
6100     case 0: /* unsigned */
6101         break;
6102     case 1: /* signed */
6103         assert(f->size <= 32);
6104         m = 1u << (f->size - 1);
6105         r = (r ^ m) - m;
6106         break;
6107     case 2: /* dl+dh split, signed 20 bit. */
6108         r = ((int8_t)r << 12) | (r >> 8);
6109         break;
6110     case 3: /* MSB stored in RXB */
6111         g_assert(f->size == 4);
6112         switch (f->beg) {
6113         case 8:
6114             r |= extract64(insn, 63 - 36, 1) << 4;
6115             break;
6116         case 12:
6117             r |= extract64(insn, 63 - 37, 1) << 4;
6118             break;
6119         case 16:
6120             r |= extract64(insn, 63 - 38, 1) << 4;
6121             break;
6122         case 32:
6123             r |= extract64(insn, 63 - 39, 1) << 4;
6124             break;
6125         default:
6126             g_assert_not_reached();
6127         }
6128         break;
6129     default:
6130         abort();
6131     }
6132 
6133     /*
6134      * Validate that the "compressed" encoding we selected above is valid.
6135      * I.e. we haven't made two different original fields overlap.
6136      */
6137     assert(((o->presentC >> f->indexC) & 1) == 0);
6138     o->presentC |= 1 << f->indexC;
6139     o->presentO |= 1 << f->indexO;
6140 
6141     o->c[f->indexC] = r;
6142 }
6143 
6144 /* Lookup the insn at the current PC, extracting the operands into O and
6145    returning the info struct for the insn.  Returns NULL for invalid insn.  */
6146 
6147 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
6148 {
6149     uint64_t insn, pc = s->base.pc_next;
6150     int op, op2, ilen;
6151     const DisasInsn *info;
6152 
6153     if (unlikely(s->ex_value)) {
6154         /* Drop the EX data now, so that it's clear on exception paths.  */
6155         tcg_gen_st_i64(tcg_constant_i64(0), cpu_env,
6156                        offsetof(CPUS390XState, ex_value));
6157 
6158         /* Extract the values saved by EXECUTE.  */
6159         insn = s->ex_value & 0xffffffffffff0000ull;
6160         ilen = s->ex_value & 0xf;
6161 
6162         /* Register insn bytes with translator so plugins work. */
6163         for (int i = 0; i < ilen; i++) {
6164             uint8_t byte = extract64(insn, 56 - (i * 8), 8);
6165             translator_fake_ldb(byte, pc + i);
6166         }
6167         op = insn >> 56;
6168     } else {
6169         insn = ld_code2(env, s, pc);
6170         op = (insn >> 8) & 0xff;
6171         ilen = get_ilen(op);
6172         switch (ilen) {
6173         case 2:
6174             insn = insn << 48;
6175             break;
6176         case 4:
6177             insn = ld_code4(env, s, pc) << 32;
6178             break;
6179         case 6:
6180             insn = (insn << 48) | (ld_code4(env, s, pc + 2) << 16);
6181             break;
6182         default:
6183             g_assert_not_reached();
6184         }
6185     }
6186     s->pc_tmp = s->base.pc_next + ilen;
6187     s->ilen = ilen;
6188 
6189     /* We can't actually determine the insn format until we've looked up
6190        the full insn opcode.  Which we can't do without locating the
6191        secondary opcode.  Assume by default that OP2 is at bit 40; for
6192        those smaller insns that don't actually have a secondary opcode
6193        this will correctly result in OP2 = 0. */
6194     switch (op) {
6195     case 0x01: /* E */
6196     case 0x80: /* S */
6197     case 0x82: /* S */
6198     case 0x93: /* S */
6199     case 0xb2: /* S, RRF, RRE, IE */
6200     case 0xb3: /* RRE, RRD, RRF */
6201     case 0xb9: /* RRE, RRF */
6202     case 0xe5: /* SSE, SIL */
6203         op2 = (insn << 8) >> 56;
6204         break;
6205     case 0xa5: /* RI */
6206     case 0xa7: /* RI */
6207     case 0xc0: /* RIL */
6208     case 0xc2: /* RIL */
6209     case 0xc4: /* RIL */
6210     case 0xc6: /* RIL */
6211     case 0xc8: /* SSF */
6212     case 0xcc: /* RIL */
6213         op2 = (insn << 12) >> 60;
6214         break;
6215     case 0xc5: /* MII */
6216     case 0xc7: /* SMI */
6217     case 0xd0 ... 0xdf: /* SS */
6218     case 0xe1: /* SS */
6219     case 0xe2: /* SS */
6220     case 0xe8: /* SS */
6221     case 0xe9: /* SS */
6222     case 0xea: /* SS */
6223     case 0xee ... 0xf3: /* SS */
6224     case 0xf8 ... 0xfd: /* SS */
6225         op2 = 0;
6226         break;
6227     default:
6228         op2 = (insn << 40) >> 56;
6229         break;
6230     }
6231 
6232     memset(&s->fields, 0, sizeof(s->fields));
6233     s->fields.raw_insn = insn;
6234     s->fields.op = op;
6235     s->fields.op2 = op2;
6236 
6237     /* Lookup the instruction.  */
6238     info = lookup_opc(op << 8 | op2);
6239     s->insn = info;
6240 
6241     /* If we found it, extract the operands.  */
6242     if (info != NULL) {
6243         DisasFormat fmt = info->fmt;
6244         int i;
6245 
6246         for (i = 0; i < NUM_C_FIELD; ++i) {
6247             extract_field(&s->fields, &format_info[fmt].op[i], insn);
6248         }
6249     }
6250     return info;
6251 }
6252 
6253 static bool is_afp_reg(int reg)
6254 {
6255     return reg % 2 || reg > 6;
6256 }
6257 
6258 static bool is_fp_pair(int reg)
6259 {
6260     /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6261     return !(reg & 0x2);
6262 }
6263 
6264 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6265 {
6266     const DisasInsn *insn;
6267     DisasJumpType ret = DISAS_NEXT;
6268     DisasOps o = {};
6269     bool icount = false;
6270 
6271     /* Search for the insn in the table.  */
6272     insn = extract_insn(env, s);
6273 
6274     /* Update insn_start now that we know the ILEN.  */
6275     tcg_set_insn_start_param(s->insn_start, 2, s->ilen);
6276 
6277     /* Not found means unimplemented/illegal opcode.  */
6278     if (insn == NULL) {
6279         qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6280                       s->fields.op, s->fields.op2);
6281         gen_illegal_opcode(s);
6282         ret = DISAS_NORETURN;
6283         goto out;
6284     }
6285 
6286 #ifndef CONFIG_USER_ONLY
6287     if (s->base.tb->flags & FLAG_MASK_PER) {
6288         TCGv_i64 addr = tcg_constant_i64(s->base.pc_next);
6289         gen_helper_per_ifetch(cpu_env, addr);
6290     }
6291 #endif
6292 
6293     /* process flags */
6294     if (insn->flags) {
6295         /* privileged instruction */
6296         if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6297             gen_program_exception(s, PGM_PRIVILEGED);
6298             ret = DISAS_NORETURN;
6299             goto out;
6300         }
6301 
6302         /* if AFP is not enabled, instructions and registers are forbidden */
6303         if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6304             uint8_t dxc = 0;
6305 
6306             if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
6307                 dxc = 1;
6308             }
6309             if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
6310                 dxc = 1;
6311             }
6312             if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
6313                 dxc = 1;
6314             }
6315             if (insn->flags & IF_BFP) {
6316                 dxc = 2;
6317             }
6318             if (insn->flags & IF_DFP) {
6319                 dxc = 3;
6320             }
6321             if (insn->flags & IF_VEC) {
6322                 dxc = 0xfe;
6323             }
6324             if (dxc) {
6325                 gen_data_exception(dxc);
6326                 ret = DISAS_NORETURN;
6327                 goto out;
6328             }
6329         }
6330 
6331         /* if vector instructions not enabled, executing them is forbidden */
6332         if (insn->flags & IF_VEC) {
6333             if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6334                 gen_data_exception(0xfe);
6335                 ret = DISAS_NORETURN;
6336                 goto out;
6337             }
6338         }
6339 
6340         /* input/output is the special case for icount mode */
6341         if (unlikely(insn->flags & IF_IO)) {
6342             icount = tb_cflags(s->base.tb) & CF_USE_ICOUNT;
6343             if (icount) {
6344                 gen_io_start();
6345             }
6346         }
6347     }
6348 
6349     /* Check for insn specification exceptions.  */
6350     if (insn->spec) {
6351         if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
6352             (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
6353             (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
6354             (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
6355             (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
6356             gen_program_exception(s, PGM_SPECIFICATION);
6357             ret = DISAS_NORETURN;
6358             goto out;
6359         }
6360     }
6361 
6362     /* Implement the instruction.  */
6363     if (insn->help_in1) {
6364         insn->help_in1(s, &o);
6365     }
6366     if (insn->help_in2) {
6367         insn->help_in2(s, &o);
6368     }
6369     if (insn->help_prep) {
6370         insn->help_prep(s, &o);
6371     }
6372     if (insn->help_op) {
6373         ret = insn->help_op(s, &o);
6374     }
6375     if (ret != DISAS_NORETURN) {
6376         if (insn->help_wout) {
6377             insn->help_wout(s, &o);
6378         }
6379         if (insn->help_cout) {
6380             insn->help_cout(s, &o);
6381         }
6382     }
6383 
6384     /* io should be the last instruction in tb when icount is enabled */
6385     if (unlikely(icount && ret == DISAS_NEXT)) {
6386         ret = DISAS_TOO_MANY;
6387     }
6388 
6389 #ifndef CONFIG_USER_ONLY
6390     if (s->base.tb->flags & FLAG_MASK_PER) {
6391         /* An exception might be triggered, save PSW if not already done.  */
6392         if (ret == DISAS_NEXT || ret == DISAS_TOO_MANY) {
6393             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6394         }
6395 
6396         /* Call the helper to check for a possible PER exception.  */
6397         gen_helper_per_check_exception(cpu_env);
6398     }
6399 #endif
6400 
6401 out:
6402     /* Advance to the next instruction.  */
6403     s->base.pc_next = s->pc_tmp;
6404     return ret;
6405 }
6406 
6407 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6408 {
6409     DisasContext *dc = container_of(dcbase, DisasContext, base);
6410 
6411     /* 31-bit mode */
6412     if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6413         dc->base.pc_first &= 0x7fffffff;
6414         dc->base.pc_next = dc->base.pc_first;
6415     }
6416 
6417     dc->cc_op = CC_OP_DYNAMIC;
6418     dc->ex_value = dc->base.tb->cs_base;
6419     dc->exit_to_mainloop = (dc->base.tb->flags & FLAG_MASK_PER) || dc->ex_value;
6420 }
6421 
6422 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6423 {
6424 }
6425 
6426 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6427 {
6428     DisasContext *dc = container_of(dcbase, DisasContext, base);
6429 
6430     /* Delay the set of ilen until we've read the insn. */
6431     tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 0);
6432     dc->insn_start = tcg_last_op();
6433 }
6434 
6435 static target_ulong get_next_pc(CPUS390XState *env, DisasContext *s,
6436                                 uint64_t pc)
6437 {
6438     uint64_t insn = cpu_lduw_code(env, pc);
6439 
6440     return pc + get_ilen((insn >> 8) & 0xff);
6441 }
6442 
6443 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6444 {
6445     CPUS390XState *env = cs->env_ptr;
6446     DisasContext *dc = container_of(dcbase, DisasContext, base);
6447 
6448     dc->base.is_jmp = translate_one(env, dc);
6449     if (dc->base.is_jmp == DISAS_NEXT) {
6450         if (dc->ex_value ||
6451             !is_same_page(dcbase, dc->base.pc_next) ||
6452             !is_same_page(dcbase, get_next_pc(env, dc, dc->base.pc_next))) {
6453             dc->base.is_jmp = DISAS_TOO_MANY;
6454         }
6455     }
6456 }
6457 
6458 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6459 {
6460     DisasContext *dc = container_of(dcbase, DisasContext, base);
6461 
6462     switch (dc->base.is_jmp) {
6463     case DISAS_NORETURN:
6464         break;
6465     case DISAS_TOO_MANY:
6466         update_psw_addr(dc);
6467         /* FALLTHRU */
6468     case DISAS_PC_UPDATED:
6469         /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6470            cc op type is in env */
6471         update_cc_op(dc);
6472         /* FALLTHRU */
6473     case DISAS_PC_CC_UPDATED:
6474         /* Exit the TB, either by raising a debug exception or by return.  */
6475         if (dc->exit_to_mainloop) {
6476             tcg_gen_exit_tb(NULL, 0);
6477         } else {
6478             tcg_gen_lookup_and_goto_ptr();
6479         }
6480         break;
6481     default:
6482         g_assert_not_reached();
6483     }
6484 }
6485 
6486 static void s390x_tr_disas_log(const DisasContextBase *dcbase,
6487                                CPUState *cs, FILE *logfile)
6488 {
6489     DisasContext *dc = container_of(dcbase, DisasContext, base);
6490 
6491     if (unlikely(dc->ex_value)) {
6492         /* ??? Unfortunately target_disas can't use host memory.  */
6493         fprintf(logfile, "IN: EXECUTE %016" PRIx64, dc->ex_value);
6494     } else {
6495         fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
6496         target_disas(logfile, cs, dc->base.pc_first, dc->base.tb->size);
6497     }
6498 }
6499 
6500 static const TranslatorOps s390x_tr_ops = {
6501     .init_disas_context = s390x_tr_init_disas_context,
6502     .tb_start           = s390x_tr_tb_start,
6503     .insn_start         = s390x_tr_insn_start,
6504     .translate_insn     = s390x_tr_translate_insn,
6505     .tb_stop            = s390x_tr_tb_stop,
6506     .disas_log          = s390x_tr_disas_log,
6507 };
6508 
6509 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
6510                            target_ulong pc, void *host_pc)
6511 {
6512     DisasContext dc;
6513 
6514     translator_loop(cs, tb, max_insns, pc, host_pc, &s390x_tr_ops, &dc.base);
6515 }
6516 
6517 void s390x_restore_state_to_opc(CPUState *cs,
6518                                 const TranslationBlock *tb,
6519                                 const uint64_t *data)
6520 {
6521     S390CPU *cpu = S390_CPU(cs);
6522     CPUS390XState *env = &cpu->env;
6523     int cc_op = data[1];
6524 
6525     env->psw.addr = data[0];
6526 
6527     /* Update the CC opcode if it is not already up-to-date.  */
6528     if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6529         env->cc_op = cc_op;
6530     }
6531 
6532     /* Record ILEN.  */
6533     env->int_pgm_ilen = data[2];
6534 }
6535