xref: /qemu/target/s390x/tcg/translate.c (revision 370ed600)
1 /*
2  *  S/390 translation
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2010 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24 
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 #  define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 #  define LOG_DISAS(...) do { } while (0)
29 #endif
30 
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "s390x-internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg/tcg-op.h"
37 #include "tcg/tcg-op-gvec.h"
38 #include "qemu/log.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
44 
45 #include "exec/translator.h"
46 #include "exec/log.h"
47 #include "qemu/atomic128.h"
48 
49 
50 /* Information that (most) every instruction needs to manipulate.  */
51 typedef struct DisasContext DisasContext;
52 typedef struct DisasInsn DisasInsn;
53 typedef struct DisasFields DisasFields;
54 
55 /*
56  * Define a structure to hold the decoded fields.  We'll store each inside
57  * an array indexed by an enum.  In order to conserve memory, we'll arrange
58  * for fields that do not exist at the same time to overlap, thus the "C"
59  * for compact.  For checking purposes there is an "O" for original index
60  * as well that will be applied to availability bitmaps.
61  */
62 
63 enum DisasFieldIndexO {
64     FLD_O_r1,
65     FLD_O_r2,
66     FLD_O_r3,
67     FLD_O_m1,
68     FLD_O_m3,
69     FLD_O_m4,
70     FLD_O_m5,
71     FLD_O_m6,
72     FLD_O_b1,
73     FLD_O_b2,
74     FLD_O_b4,
75     FLD_O_d1,
76     FLD_O_d2,
77     FLD_O_d4,
78     FLD_O_x2,
79     FLD_O_l1,
80     FLD_O_l2,
81     FLD_O_i1,
82     FLD_O_i2,
83     FLD_O_i3,
84     FLD_O_i4,
85     FLD_O_i5,
86     FLD_O_v1,
87     FLD_O_v2,
88     FLD_O_v3,
89     FLD_O_v4,
90 };
91 
92 enum DisasFieldIndexC {
93     FLD_C_r1 = 0,
94     FLD_C_m1 = 0,
95     FLD_C_b1 = 0,
96     FLD_C_i1 = 0,
97     FLD_C_v1 = 0,
98 
99     FLD_C_r2 = 1,
100     FLD_C_b2 = 1,
101     FLD_C_i2 = 1,
102 
103     FLD_C_r3 = 2,
104     FLD_C_m3 = 2,
105     FLD_C_i3 = 2,
106     FLD_C_v3 = 2,
107 
108     FLD_C_m4 = 3,
109     FLD_C_b4 = 3,
110     FLD_C_i4 = 3,
111     FLD_C_l1 = 3,
112     FLD_C_v4 = 3,
113 
114     FLD_C_i5 = 4,
115     FLD_C_d1 = 4,
116     FLD_C_m5 = 4,
117 
118     FLD_C_d2 = 5,
119     FLD_C_m6 = 5,
120 
121     FLD_C_d4 = 6,
122     FLD_C_x2 = 6,
123     FLD_C_l2 = 6,
124     FLD_C_v2 = 6,
125 
126     NUM_C_FIELD = 7
127 };
128 
129 struct DisasFields {
130     uint64_t raw_insn;
131     unsigned op:8;
132     unsigned op2:8;
133     unsigned presentC:16;
134     unsigned int presentO;
135     int c[NUM_C_FIELD];
136 };
137 
138 struct DisasContext {
139     DisasContextBase base;
140     const DisasInsn *insn;
141     TCGOp *insn_start;
142     DisasFields fields;
143     uint64_t ex_value;
144     /*
145      * During translate_one(), pc_tmp is used to determine the instruction
146      * to be executed after base.pc_next - e.g. next sequential instruction
147      * or a branch target.
148      */
149     uint64_t pc_tmp;
150     uint32_t ilen;
151     enum cc_op cc_op;
152     bool exit_to_mainloop;
153 };
154 
155 /* Information carried about a condition to be evaluated.  */
156 typedef struct {
157     TCGCond cond:8;
158     bool is_64;
159     union {
160         struct { TCGv_i64 a, b; } s64;
161         struct { TCGv_i32 a, b; } s32;
162     } u;
163 } DisasCompare;
164 
165 #ifdef DEBUG_INLINE_BRANCHES
166 static uint64_t inline_branch_hit[CC_OP_MAX];
167 static uint64_t inline_branch_miss[CC_OP_MAX];
168 #endif
169 
170 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
171 {
172     if (s->base.tb->flags & FLAG_MASK_32) {
173         if (s->base.tb->flags & FLAG_MASK_64) {
174             tcg_gen_movi_i64(out, pc);
175             return;
176         }
177         pc |= 0x80000000;
178     }
179     assert(!(s->base.tb->flags & FLAG_MASK_64));
180     tcg_gen_deposit_i64(out, out, tcg_constant_i64(pc), 0, 32);
181 }
182 
183 static TCGv_i64 psw_addr;
184 static TCGv_i64 psw_mask;
185 static TCGv_i64 gbea;
186 
187 static TCGv_i32 cc_op;
188 static TCGv_i64 cc_src;
189 static TCGv_i64 cc_dst;
190 static TCGv_i64 cc_vr;
191 
192 static char cpu_reg_names[16][4];
193 static TCGv_i64 regs[16];
194 
195 void s390x_translate_init(void)
196 {
197     int i;
198 
199     psw_addr = tcg_global_mem_new_i64(cpu_env,
200                                       offsetof(CPUS390XState, psw.addr),
201                                       "psw_addr");
202     psw_mask = tcg_global_mem_new_i64(cpu_env,
203                                       offsetof(CPUS390XState, psw.mask),
204                                       "psw_mask");
205     gbea = tcg_global_mem_new_i64(cpu_env,
206                                   offsetof(CPUS390XState, gbea),
207                                   "gbea");
208 
209     cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
210                                    "cc_op");
211     cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
212                                     "cc_src");
213     cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
214                                     "cc_dst");
215     cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
216                                    "cc_vr");
217 
218     for (i = 0; i < 16; i++) {
219         snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
220         regs[i] = tcg_global_mem_new(cpu_env,
221                                      offsetof(CPUS390XState, regs[i]),
222                                      cpu_reg_names[i]);
223     }
224 }
225 
226 static inline int vec_full_reg_offset(uint8_t reg)
227 {
228     g_assert(reg < 32);
229     return offsetof(CPUS390XState, vregs[reg][0]);
230 }
231 
232 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
233 {
234     /* Convert element size (es) - e.g. MO_8 - to bytes */
235     const uint8_t bytes = 1 << es;
236     int offs = enr * bytes;
237 
238     /*
239      * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
240      * of the 16 byte vector, on both, little and big endian systems.
241      *
242      * Big Endian (target/possible host)
243      * B:  [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
244      * HW: [     0][     1][     2][     3] - [     4][     5][     6][     7]
245      * W:  [             0][             1] - [             2][             3]
246      * DW: [                             0] - [                             1]
247      *
248      * Little Endian (possible host)
249      * B:  [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
250      * HW: [     3][     2][     1][     0] - [     7][     6][     5][     4]
251      * W:  [             1][             0] - [             3][             2]
252      * DW: [                             0] - [                             1]
253      *
254      * For 16 byte elements, the two 8 byte halves will not form a host
255      * int128 if the host is little endian, since they're in the wrong order.
256      * Some operations (e.g. xor) do not care. For operations like addition,
257      * the two 8 byte elements have to be loaded separately. Let's force all
258      * 16 byte operations to handle it in a special way.
259      */
260     g_assert(es <= MO_64);
261 #if !HOST_BIG_ENDIAN
262     offs ^= (8 - bytes);
263 #endif
264     return offs + vec_full_reg_offset(reg);
265 }
266 
267 static inline int freg64_offset(uint8_t reg)
268 {
269     g_assert(reg < 16);
270     return vec_reg_offset(reg, 0, MO_64);
271 }
272 
273 static inline int freg32_offset(uint8_t reg)
274 {
275     g_assert(reg < 16);
276     return vec_reg_offset(reg, 0, MO_32);
277 }
278 
279 static TCGv_i64 load_reg(int reg)
280 {
281     TCGv_i64 r = tcg_temp_new_i64();
282     tcg_gen_mov_i64(r, regs[reg]);
283     return r;
284 }
285 
286 static TCGv_i64 load_freg(int reg)
287 {
288     TCGv_i64 r = tcg_temp_new_i64();
289 
290     tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
291     return r;
292 }
293 
294 static TCGv_i64 load_freg32_i64(int reg)
295 {
296     TCGv_i64 r = tcg_temp_new_i64();
297 
298     tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
299     return r;
300 }
301 
302 static TCGv_i128 load_freg_128(int reg)
303 {
304     TCGv_i64 h = load_freg(reg);
305     TCGv_i64 l = load_freg(reg + 2);
306     TCGv_i128 r = tcg_temp_new_i128();
307 
308     tcg_gen_concat_i64_i128(r, l, h);
309     return r;
310 }
311 
312 static void store_reg(int reg, TCGv_i64 v)
313 {
314     tcg_gen_mov_i64(regs[reg], v);
315 }
316 
317 static void store_freg(int reg, TCGv_i64 v)
318 {
319     tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
320 }
321 
322 static void store_reg32_i64(int reg, TCGv_i64 v)
323 {
324     /* 32 bit register writes keep the upper half */
325     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
326 }
327 
328 static void store_reg32h_i64(int reg, TCGv_i64 v)
329 {
330     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
331 }
332 
333 static void store_freg32_i64(int reg, TCGv_i64 v)
334 {
335     tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
336 }
337 
338 static void return_low128(TCGv_i64 dest)
339 {
340     tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
341 }
342 
343 static void update_psw_addr(DisasContext *s)
344 {
345     /* psw.addr */
346     tcg_gen_movi_i64(psw_addr, s->base.pc_next);
347 }
348 
349 static void per_branch(DisasContext *s, bool to_next)
350 {
351 #ifndef CONFIG_USER_ONLY
352     tcg_gen_movi_i64(gbea, s->base.pc_next);
353 
354     if (s->base.tb->flags & FLAG_MASK_PER) {
355         TCGv_i64 next_pc = to_next ? tcg_constant_i64(s->pc_tmp) : psw_addr;
356         gen_helper_per_branch(cpu_env, gbea, next_pc);
357     }
358 #endif
359 }
360 
361 static void per_branch_cond(DisasContext *s, TCGCond cond,
362                             TCGv_i64 arg1, TCGv_i64 arg2)
363 {
364 #ifndef CONFIG_USER_ONLY
365     if (s->base.tb->flags & FLAG_MASK_PER) {
366         TCGLabel *lab = gen_new_label();
367         tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
368 
369         tcg_gen_movi_i64(gbea, s->base.pc_next);
370         gen_helper_per_branch(cpu_env, gbea, psw_addr);
371 
372         gen_set_label(lab);
373     } else {
374         TCGv_i64 pc = tcg_constant_i64(s->base.pc_next);
375         tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
376     }
377 #endif
378 }
379 
380 static void per_breaking_event(DisasContext *s)
381 {
382     tcg_gen_movi_i64(gbea, s->base.pc_next);
383 }
384 
385 static void update_cc_op(DisasContext *s)
386 {
387     if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
388         tcg_gen_movi_i32(cc_op, s->cc_op);
389     }
390 }
391 
392 static inline uint64_t ld_code2(CPUS390XState *env, DisasContext *s,
393                                 uint64_t pc)
394 {
395     return (uint64_t)translator_lduw(env, &s->base, pc);
396 }
397 
398 static inline uint64_t ld_code4(CPUS390XState *env, DisasContext *s,
399                                 uint64_t pc)
400 {
401     return (uint64_t)(uint32_t)translator_ldl(env, &s->base, pc);
402 }
403 
404 static int get_mem_index(DisasContext *s)
405 {
406 #ifdef CONFIG_USER_ONLY
407     return MMU_USER_IDX;
408 #else
409     if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
410         return MMU_REAL_IDX;
411     }
412 
413     switch (s->base.tb->flags & FLAG_MASK_ASC) {
414     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
415         return MMU_PRIMARY_IDX;
416     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
417         return MMU_SECONDARY_IDX;
418     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
419         return MMU_HOME_IDX;
420     default:
421         g_assert_not_reached();
422         break;
423     }
424 #endif
425 }
426 
427 static void gen_exception(int excp)
428 {
429     gen_helper_exception(cpu_env, tcg_constant_i32(excp));
430 }
431 
432 static void gen_program_exception(DisasContext *s, int code)
433 {
434     /* Remember what pgm exeption this was.  */
435     tcg_gen_st_i32(tcg_constant_i32(code), cpu_env,
436                    offsetof(CPUS390XState, int_pgm_code));
437 
438     tcg_gen_st_i32(tcg_constant_i32(s->ilen), cpu_env,
439                    offsetof(CPUS390XState, int_pgm_ilen));
440 
441     /* update the psw */
442     update_psw_addr(s);
443 
444     /* Save off cc.  */
445     update_cc_op(s);
446 
447     /* Trigger exception.  */
448     gen_exception(EXCP_PGM);
449 }
450 
451 static inline void gen_illegal_opcode(DisasContext *s)
452 {
453     gen_program_exception(s, PGM_OPERATION);
454 }
455 
456 static inline void gen_data_exception(uint8_t dxc)
457 {
458     gen_helper_data_exception(cpu_env, tcg_constant_i32(dxc));
459 }
460 
461 static inline void gen_trap(DisasContext *s)
462 {
463     /* Set DXC to 0xff */
464     gen_data_exception(0xff);
465 }
466 
467 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
468                                   int64_t imm)
469 {
470     tcg_gen_addi_i64(dst, src, imm);
471     if (!(s->base.tb->flags & FLAG_MASK_64)) {
472         if (s->base.tb->flags & FLAG_MASK_32) {
473             tcg_gen_andi_i64(dst, dst, 0x7fffffff);
474         } else {
475             tcg_gen_andi_i64(dst, dst, 0x00ffffff);
476         }
477     }
478 }
479 
480 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
481 {
482     TCGv_i64 tmp = tcg_temp_new_i64();
483 
484     /*
485      * Note that d2 is limited to 20 bits, signed.  If we crop negative
486      * displacements early we create larger immediate addends.
487      */
488     if (b2 && x2) {
489         tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
490         gen_addi_and_wrap_i64(s, tmp, tmp, d2);
491     } else if (b2) {
492         gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
493     } else if (x2) {
494         gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
495     } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
496         if (s->base.tb->flags & FLAG_MASK_32) {
497             tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
498         } else {
499             tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
500         }
501     } else {
502         tcg_gen_movi_i64(tmp, d2);
503     }
504 
505     return tmp;
506 }
507 
508 static inline bool live_cc_data(DisasContext *s)
509 {
510     return (s->cc_op != CC_OP_DYNAMIC
511             && s->cc_op != CC_OP_STATIC
512             && s->cc_op > 3);
513 }
514 
515 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
516 {
517     if (live_cc_data(s)) {
518         tcg_gen_discard_i64(cc_src);
519         tcg_gen_discard_i64(cc_dst);
520         tcg_gen_discard_i64(cc_vr);
521     }
522     s->cc_op = CC_OP_CONST0 + val;
523 }
524 
525 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
526 {
527     if (live_cc_data(s)) {
528         tcg_gen_discard_i64(cc_src);
529         tcg_gen_discard_i64(cc_vr);
530     }
531     tcg_gen_mov_i64(cc_dst, dst);
532     s->cc_op = op;
533 }
534 
535 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
536                                   TCGv_i64 dst)
537 {
538     if (live_cc_data(s)) {
539         tcg_gen_discard_i64(cc_vr);
540     }
541     tcg_gen_mov_i64(cc_src, src);
542     tcg_gen_mov_i64(cc_dst, dst);
543     s->cc_op = op;
544 }
545 
546 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
547                                   TCGv_i64 dst, TCGv_i64 vr)
548 {
549     tcg_gen_mov_i64(cc_src, src);
550     tcg_gen_mov_i64(cc_dst, dst);
551     tcg_gen_mov_i64(cc_vr, vr);
552     s->cc_op = op;
553 }
554 
555 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
556 {
557     gen_op_update1_cc_i64(s, CC_OP_NZ, val);
558 }
559 
560 /* CC value is in env->cc_op */
561 static void set_cc_static(DisasContext *s)
562 {
563     if (live_cc_data(s)) {
564         tcg_gen_discard_i64(cc_src);
565         tcg_gen_discard_i64(cc_dst);
566         tcg_gen_discard_i64(cc_vr);
567     }
568     s->cc_op = CC_OP_STATIC;
569 }
570 
571 /* calculates cc into cc_op */
572 static void gen_op_calc_cc(DisasContext *s)
573 {
574     TCGv_i32 local_cc_op = NULL;
575     TCGv_i64 dummy = NULL;
576 
577     switch (s->cc_op) {
578     default:
579         dummy = tcg_constant_i64(0);
580         /* FALLTHRU */
581     case CC_OP_ADD_64:
582     case CC_OP_SUB_64:
583     case CC_OP_ADD_32:
584     case CC_OP_SUB_32:
585         local_cc_op = tcg_constant_i32(s->cc_op);
586         break;
587     case CC_OP_CONST0:
588     case CC_OP_CONST1:
589     case CC_OP_CONST2:
590     case CC_OP_CONST3:
591     case CC_OP_STATIC:
592     case CC_OP_DYNAMIC:
593         break;
594     }
595 
596     switch (s->cc_op) {
597     case CC_OP_CONST0:
598     case CC_OP_CONST1:
599     case CC_OP_CONST2:
600     case CC_OP_CONST3:
601         /* s->cc_op is the cc value */
602         tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
603         break;
604     case CC_OP_STATIC:
605         /* env->cc_op already is the cc value */
606         break;
607     case CC_OP_NZ:
608         tcg_gen_setcondi_i64(TCG_COND_NE, cc_dst, cc_dst, 0);
609         tcg_gen_extrl_i64_i32(cc_op, cc_dst);
610         break;
611     case CC_OP_ABS_64:
612     case CC_OP_NABS_64:
613     case CC_OP_ABS_32:
614     case CC_OP_NABS_32:
615     case CC_OP_LTGT0_32:
616     case CC_OP_LTGT0_64:
617     case CC_OP_COMP_32:
618     case CC_OP_COMP_64:
619     case CC_OP_NZ_F32:
620     case CC_OP_NZ_F64:
621     case CC_OP_FLOGR:
622     case CC_OP_LCBB:
623     case CC_OP_MULS_32:
624         /* 1 argument */
625         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
626         break;
627     case CC_OP_ADDU:
628     case CC_OP_ICM:
629     case CC_OP_LTGT_32:
630     case CC_OP_LTGT_64:
631     case CC_OP_LTUGTU_32:
632     case CC_OP_LTUGTU_64:
633     case CC_OP_TM_32:
634     case CC_OP_TM_64:
635     case CC_OP_SLA:
636     case CC_OP_SUBU:
637     case CC_OP_NZ_F128:
638     case CC_OP_VC:
639     case CC_OP_MULS_64:
640         /* 2 arguments */
641         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
642         break;
643     case CC_OP_ADD_64:
644     case CC_OP_SUB_64:
645     case CC_OP_ADD_32:
646     case CC_OP_SUB_32:
647         /* 3 arguments */
648         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
649         break;
650     case CC_OP_DYNAMIC:
651         /* unknown operation - assume 3 arguments and cc_op in env */
652         gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
653         break;
654     default:
655         g_assert_not_reached();
656     }
657 
658     /* We now have cc in cc_op as constant */
659     set_cc_static(s);
660 }
661 
662 static bool use_goto_tb(DisasContext *s, uint64_t dest)
663 {
664     if (unlikely(s->base.tb->flags & FLAG_MASK_PER)) {
665         return false;
666     }
667     return translator_use_goto_tb(&s->base, dest);
668 }
669 
670 static void account_noninline_branch(DisasContext *s, int cc_op)
671 {
672 #ifdef DEBUG_INLINE_BRANCHES
673     inline_branch_miss[cc_op]++;
674 #endif
675 }
676 
677 static void account_inline_branch(DisasContext *s, int cc_op)
678 {
679 #ifdef DEBUG_INLINE_BRANCHES
680     inline_branch_hit[cc_op]++;
681 #endif
682 }
683 
684 /* Table of mask values to comparison codes, given a comparison as input.
685    For such, CC=3 should not be possible.  */
686 static const TCGCond ltgt_cond[16] = {
687     TCG_COND_NEVER,  TCG_COND_NEVER,     /*    |    |    | x */
688     TCG_COND_GT,     TCG_COND_GT,        /*    |    | GT | x */
689     TCG_COND_LT,     TCG_COND_LT,        /*    | LT |    | x */
690     TCG_COND_NE,     TCG_COND_NE,        /*    | LT | GT | x */
691     TCG_COND_EQ,     TCG_COND_EQ,        /* EQ |    |    | x */
692     TCG_COND_GE,     TCG_COND_GE,        /* EQ |    | GT | x */
693     TCG_COND_LE,     TCG_COND_LE,        /* EQ | LT |    | x */
694     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | LT | GT | x */
695 };
696 
697 /* Table of mask values to comparison codes, given a logic op as input.
698    For such, only CC=0 and CC=1 should be possible.  */
699 static const TCGCond nz_cond[16] = {
700     TCG_COND_NEVER, TCG_COND_NEVER,      /*    |    | x | x */
701     TCG_COND_NEVER, TCG_COND_NEVER,
702     TCG_COND_NE, TCG_COND_NE,            /*    | NE | x | x */
703     TCG_COND_NE, TCG_COND_NE,
704     TCG_COND_EQ, TCG_COND_EQ,            /* EQ |    | x | x */
705     TCG_COND_EQ, TCG_COND_EQ,
706     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | NE | x | x */
707     TCG_COND_ALWAYS, TCG_COND_ALWAYS,
708 };
709 
710 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
711    details required to generate a TCG comparison.  */
712 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
713 {
714     TCGCond cond;
715     enum cc_op old_cc_op = s->cc_op;
716 
717     if (mask == 15 || mask == 0) {
718         c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
719         c->u.s32.a = cc_op;
720         c->u.s32.b = cc_op;
721         c->is_64 = false;
722         return;
723     }
724 
725     /* Find the TCG condition for the mask + cc op.  */
726     switch (old_cc_op) {
727     case CC_OP_LTGT0_32:
728     case CC_OP_LTGT0_64:
729     case CC_OP_LTGT_32:
730     case CC_OP_LTGT_64:
731         cond = ltgt_cond[mask];
732         if (cond == TCG_COND_NEVER) {
733             goto do_dynamic;
734         }
735         account_inline_branch(s, old_cc_op);
736         break;
737 
738     case CC_OP_LTUGTU_32:
739     case CC_OP_LTUGTU_64:
740         cond = tcg_unsigned_cond(ltgt_cond[mask]);
741         if (cond == TCG_COND_NEVER) {
742             goto do_dynamic;
743         }
744         account_inline_branch(s, old_cc_op);
745         break;
746 
747     case CC_OP_NZ:
748         cond = nz_cond[mask];
749         if (cond == TCG_COND_NEVER) {
750             goto do_dynamic;
751         }
752         account_inline_branch(s, old_cc_op);
753         break;
754 
755     case CC_OP_TM_32:
756     case CC_OP_TM_64:
757         switch (mask) {
758         case 8:
759             cond = TCG_COND_EQ;
760             break;
761         case 4 | 2 | 1:
762             cond = TCG_COND_NE;
763             break;
764         default:
765             goto do_dynamic;
766         }
767         account_inline_branch(s, old_cc_op);
768         break;
769 
770     case CC_OP_ICM:
771         switch (mask) {
772         case 8:
773             cond = TCG_COND_EQ;
774             break;
775         case 4 | 2 | 1:
776         case 4 | 2:
777             cond = TCG_COND_NE;
778             break;
779         default:
780             goto do_dynamic;
781         }
782         account_inline_branch(s, old_cc_op);
783         break;
784 
785     case CC_OP_FLOGR:
786         switch (mask & 0xa) {
787         case 8: /* src == 0 -> no one bit found */
788             cond = TCG_COND_EQ;
789             break;
790         case 2: /* src != 0 -> one bit found */
791             cond = TCG_COND_NE;
792             break;
793         default:
794             goto do_dynamic;
795         }
796         account_inline_branch(s, old_cc_op);
797         break;
798 
799     case CC_OP_ADDU:
800     case CC_OP_SUBU:
801         switch (mask) {
802         case 8 | 2: /* result == 0 */
803             cond = TCG_COND_EQ;
804             break;
805         case 4 | 1: /* result != 0 */
806             cond = TCG_COND_NE;
807             break;
808         case 8 | 4: /* !carry (borrow) */
809             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_EQ : TCG_COND_NE;
810             break;
811         case 2 | 1: /* carry (!borrow) */
812             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_NE : TCG_COND_EQ;
813             break;
814         default:
815             goto do_dynamic;
816         }
817         account_inline_branch(s, old_cc_op);
818         break;
819 
820     default:
821     do_dynamic:
822         /* Calculate cc value.  */
823         gen_op_calc_cc(s);
824         /* FALLTHRU */
825 
826     case CC_OP_STATIC:
827         /* Jump based on CC.  We'll load up the real cond below;
828            the assignment here merely avoids a compiler warning.  */
829         account_noninline_branch(s, old_cc_op);
830         old_cc_op = CC_OP_STATIC;
831         cond = TCG_COND_NEVER;
832         break;
833     }
834 
835     /* Load up the arguments of the comparison.  */
836     c->is_64 = true;
837     switch (old_cc_op) {
838     case CC_OP_LTGT0_32:
839         c->is_64 = false;
840         c->u.s32.a = tcg_temp_new_i32();
841         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
842         c->u.s32.b = tcg_constant_i32(0);
843         break;
844     case CC_OP_LTGT_32:
845     case CC_OP_LTUGTU_32:
846         c->is_64 = false;
847         c->u.s32.a = tcg_temp_new_i32();
848         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
849         c->u.s32.b = tcg_temp_new_i32();
850         tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
851         break;
852 
853     case CC_OP_LTGT0_64:
854     case CC_OP_NZ:
855     case CC_OP_FLOGR:
856         c->u.s64.a = cc_dst;
857         c->u.s64.b = tcg_constant_i64(0);
858         break;
859     case CC_OP_LTGT_64:
860     case CC_OP_LTUGTU_64:
861         c->u.s64.a = cc_src;
862         c->u.s64.b = cc_dst;
863         break;
864 
865     case CC_OP_TM_32:
866     case CC_OP_TM_64:
867     case CC_OP_ICM:
868         c->u.s64.a = tcg_temp_new_i64();
869         c->u.s64.b = tcg_constant_i64(0);
870         tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
871         break;
872 
873     case CC_OP_ADDU:
874     case CC_OP_SUBU:
875         c->is_64 = true;
876         c->u.s64.b = tcg_constant_i64(0);
877         switch (mask) {
878         case 8 | 2:
879         case 4 | 1: /* result */
880             c->u.s64.a = cc_dst;
881             break;
882         case 8 | 4:
883         case 2 | 1: /* carry */
884             c->u.s64.a = cc_src;
885             break;
886         default:
887             g_assert_not_reached();
888         }
889         break;
890 
891     case CC_OP_STATIC:
892         c->is_64 = false;
893         c->u.s32.a = cc_op;
894         switch (mask) {
895         case 0x8 | 0x4 | 0x2: /* cc != 3 */
896             cond = TCG_COND_NE;
897             c->u.s32.b = tcg_constant_i32(3);
898             break;
899         case 0x8 | 0x4 | 0x1: /* cc != 2 */
900             cond = TCG_COND_NE;
901             c->u.s32.b = tcg_constant_i32(2);
902             break;
903         case 0x8 | 0x2 | 0x1: /* cc != 1 */
904             cond = TCG_COND_NE;
905             c->u.s32.b = tcg_constant_i32(1);
906             break;
907         case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
908             cond = TCG_COND_EQ;
909             c->u.s32.a = tcg_temp_new_i32();
910             c->u.s32.b = tcg_constant_i32(0);
911             tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
912             break;
913         case 0x8 | 0x4: /* cc < 2 */
914             cond = TCG_COND_LTU;
915             c->u.s32.b = tcg_constant_i32(2);
916             break;
917         case 0x8: /* cc == 0 */
918             cond = TCG_COND_EQ;
919             c->u.s32.b = tcg_constant_i32(0);
920             break;
921         case 0x4 | 0x2 | 0x1: /* cc != 0 */
922             cond = TCG_COND_NE;
923             c->u.s32.b = tcg_constant_i32(0);
924             break;
925         case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
926             cond = TCG_COND_NE;
927             c->u.s32.a = tcg_temp_new_i32();
928             c->u.s32.b = tcg_constant_i32(0);
929             tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
930             break;
931         case 0x4: /* cc == 1 */
932             cond = TCG_COND_EQ;
933             c->u.s32.b = tcg_constant_i32(1);
934             break;
935         case 0x2 | 0x1: /* cc > 1 */
936             cond = TCG_COND_GTU;
937             c->u.s32.b = tcg_constant_i32(1);
938             break;
939         case 0x2: /* cc == 2 */
940             cond = TCG_COND_EQ;
941             c->u.s32.b = tcg_constant_i32(2);
942             break;
943         case 0x1: /* cc == 3 */
944             cond = TCG_COND_EQ;
945             c->u.s32.b = tcg_constant_i32(3);
946             break;
947         default:
948             /* CC is masked by something else: (8 >> cc) & mask.  */
949             cond = TCG_COND_NE;
950             c->u.s32.a = tcg_temp_new_i32();
951             c->u.s32.b = tcg_constant_i32(0);
952             tcg_gen_shr_i32(c->u.s32.a, tcg_constant_i32(8), cc_op);
953             tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
954             break;
955         }
956         break;
957 
958     default:
959         abort();
960     }
961     c->cond = cond;
962 }
963 
964 /* ====================================================================== */
965 /* Define the insn format enumeration.  */
966 #define F0(N)                         FMT_##N,
967 #define F1(N, X1)                     F0(N)
968 #define F2(N, X1, X2)                 F0(N)
969 #define F3(N, X1, X2, X3)             F0(N)
970 #define F4(N, X1, X2, X3, X4)         F0(N)
971 #define F5(N, X1, X2, X3, X4, X5)     F0(N)
972 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
973 
974 typedef enum {
975 #include "insn-format.h.inc"
976 } DisasFormat;
977 
978 #undef F0
979 #undef F1
980 #undef F2
981 #undef F3
982 #undef F4
983 #undef F5
984 #undef F6
985 
986 /* This is the way fields are to be accessed out of DisasFields.  */
987 #define have_field(S, F)  have_field1((S), FLD_O_##F)
988 #define get_field(S, F)   get_field1((S), FLD_O_##F, FLD_C_##F)
989 
990 static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
991 {
992     return (s->fields.presentO >> c) & 1;
993 }
994 
995 static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
996                       enum DisasFieldIndexC c)
997 {
998     assert(have_field1(s, o));
999     return s->fields.c[c];
1000 }
1001 
1002 /* Describe the layout of each field in each format.  */
1003 typedef struct DisasField {
1004     unsigned int beg:8;
1005     unsigned int size:8;
1006     unsigned int type:2;
1007     unsigned int indexC:6;
1008     enum DisasFieldIndexO indexO:8;
1009 } DisasField;
1010 
1011 typedef struct DisasFormatInfo {
1012     DisasField op[NUM_C_FIELD];
1013 } DisasFormatInfo;
1014 
1015 #define R(N, B)       {  B,  4, 0, FLD_C_r##N, FLD_O_r##N }
1016 #define M(N, B)       {  B,  4, 0, FLD_C_m##N, FLD_O_m##N }
1017 #define V(N, B)       {  B,  4, 3, FLD_C_v##N, FLD_O_v##N }
1018 #define BD(N, BB, BD) { BB,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1019                       { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1020 #define BXD(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1021                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1022                       { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1023 #define BDL(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1024                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1025 #define BXDL(N)       { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1026                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1027                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1028 #define I(N, B, S)    {  B,  S, 1, FLD_C_i##N, FLD_O_i##N }
1029 #define L(N, B, S)    {  B,  S, 0, FLD_C_l##N, FLD_O_l##N }
1030 
1031 #define F0(N)                     { { } },
1032 #define F1(N, X1)                 { { X1 } },
1033 #define F2(N, X1, X2)             { { X1, X2 } },
1034 #define F3(N, X1, X2, X3)         { { X1, X2, X3 } },
1035 #define F4(N, X1, X2, X3, X4)     { { X1, X2, X3, X4 } },
1036 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1037 #define F6(N, X1, X2, X3, X4, X5, X6)       { { X1, X2, X3, X4, X5, X6 } },
1038 
1039 static const DisasFormatInfo format_info[] = {
1040 #include "insn-format.h.inc"
1041 };
1042 
1043 #undef F0
1044 #undef F1
1045 #undef F2
1046 #undef F3
1047 #undef F4
1048 #undef F5
1049 #undef F6
1050 #undef R
1051 #undef M
1052 #undef V
1053 #undef BD
1054 #undef BXD
1055 #undef BDL
1056 #undef BXDL
1057 #undef I
1058 #undef L
1059 
1060 /* Generally, we'll extract operands into this structures, operate upon
1061    them, and store them back.  See the "in1", "in2", "prep", "wout" sets
1062    of routines below for more details.  */
1063 typedef struct {
1064     TCGv_i64 out, out2, in1, in2;
1065     TCGv_i64 addr1;
1066     TCGv_i128 out_128, in1_128, in2_128;
1067 } DisasOps;
1068 
1069 /* Instructions can place constraints on their operands, raising specification
1070    exceptions if they are violated.  To make this easy to automate, each "in1",
1071    "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1072    of the following, or 0.  To make this easy to document, we'll put the
1073    SPEC_<name> defines next to <name>.  */
1074 
1075 #define SPEC_r1_even    1
1076 #define SPEC_r2_even    2
1077 #define SPEC_r3_even    4
1078 #define SPEC_r1_f128    8
1079 #define SPEC_r2_f128    16
1080 
1081 /* Return values from translate_one, indicating the state of the TB.  */
1082 
1083 /* We are not using a goto_tb (for whatever reason), but have updated
1084    the PC (for whatever reason), so there's no need to do it again on
1085    exiting the TB.  */
1086 #define DISAS_PC_UPDATED        DISAS_TARGET_0
1087 
1088 /* We have updated the PC and CC values.  */
1089 #define DISAS_PC_CC_UPDATED     DISAS_TARGET_2
1090 
1091 
1092 /* Instruction flags */
1093 #define IF_AFP1     0x0001      /* r1 is a fp reg for HFP/FPS instructions */
1094 #define IF_AFP2     0x0002      /* r2 is a fp reg for HFP/FPS instructions */
1095 #define IF_AFP3     0x0004      /* r3 is a fp reg for HFP/FPS instructions */
1096 #define IF_BFP      0x0008      /* binary floating point instruction */
1097 #define IF_DFP      0x0010      /* decimal floating point instruction */
1098 #define IF_PRIV     0x0020      /* privileged instruction */
1099 #define IF_VEC      0x0040      /* vector instruction */
1100 #define IF_IO       0x0080      /* input/output instruction */
1101 
1102 struct DisasInsn {
1103     unsigned opc:16;
1104     unsigned flags:16;
1105     DisasFormat fmt:8;
1106     unsigned fac:8;
1107     unsigned spec:8;
1108 
1109     const char *name;
1110 
1111     /* Pre-process arguments before HELP_OP.  */
1112     void (*help_in1)(DisasContext *, DisasOps *);
1113     void (*help_in2)(DisasContext *, DisasOps *);
1114     void (*help_prep)(DisasContext *, DisasOps *);
1115 
1116     /*
1117      * Post-process output after HELP_OP.
1118      * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1119      */
1120     void (*help_wout)(DisasContext *, DisasOps *);
1121     void (*help_cout)(DisasContext *, DisasOps *);
1122 
1123     /* Implement the operation itself.  */
1124     DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1125 
1126     uint64_t data;
1127 };
1128 
1129 /* ====================================================================== */
1130 /* Miscellaneous helpers, used by several operations.  */
1131 
1132 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1133 {
1134     if (dest == s->pc_tmp) {
1135         per_branch(s, true);
1136         return DISAS_NEXT;
1137     }
1138     if (use_goto_tb(s, dest)) {
1139         update_cc_op(s);
1140         per_breaking_event(s);
1141         tcg_gen_goto_tb(0);
1142         tcg_gen_movi_i64(psw_addr, dest);
1143         tcg_gen_exit_tb(s->base.tb, 0);
1144         return DISAS_NORETURN;
1145     } else {
1146         tcg_gen_movi_i64(psw_addr, dest);
1147         per_branch(s, false);
1148         return DISAS_PC_UPDATED;
1149     }
1150 }
1151 
1152 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1153                                  bool is_imm, int imm, TCGv_i64 cdest)
1154 {
1155     DisasJumpType ret;
1156     uint64_t dest = s->base.pc_next + (int64_t)imm * 2;
1157     TCGLabel *lab;
1158 
1159     /* Take care of the special cases first.  */
1160     if (c->cond == TCG_COND_NEVER) {
1161         ret = DISAS_NEXT;
1162         goto egress;
1163     }
1164     if (is_imm) {
1165         if (dest == s->pc_tmp) {
1166             /* Branch to next.  */
1167             per_branch(s, true);
1168             ret = DISAS_NEXT;
1169             goto egress;
1170         }
1171         if (c->cond == TCG_COND_ALWAYS) {
1172             ret = help_goto_direct(s, dest);
1173             goto egress;
1174         }
1175     } else {
1176         if (!cdest) {
1177             /* E.g. bcr %r0 -> no branch.  */
1178             ret = DISAS_NEXT;
1179             goto egress;
1180         }
1181         if (c->cond == TCG_COND_ALWAYS) {
1182             tcg_gen_mov_i64(psw_addr, cdest);
1183             per_branch(s, false);
1184             ret = DISAS_PC_UPDATED;
1185             goto egress;
1186         }
1187     }
1188 
1189     if (use_goto_tb(s, s->pc_tmp)) {
1190         if (is_imm && use_goto_tb(s, dest)) {
1191             /* Both exits can use goto_tb.  */
1192             update_cc_op(s);
1193 
1194             lab = gen_new_label();
1195             if (c->is_64) {
1196                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1197             } else {
1198                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1199             }
1200 
1201             /* Branch not taken.  */
1202             tcg_gen_goto_tb(0);
1203             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1204             tcg_gen_exit_tb(s->base.tb, 0);
1205 
1206             /* Branch taken.  */
1207             gen_set_label(lab);
1208             per_breaking_event(s);
1209             tcg_gen_goto_tb(1);
1210             tcg_gen_movi_i64(psw_addr, dest);
1211             tcg_gen_exit_tb(s->base.tb, 1);
1212 
1213             ret = DISAS_NORETURN;
1214         } else {
1215             /* Fallthru can use goto_tb, but taken branch cannot.  */
1216             /* Store taken branch destination before the brcond.  This
1217                avoids having to allocate a new local temp to hold it.
1218                We'll overwrite this in the not taken case anyway.  */
1219             if (!is_imm) {
1220                 tcg_gen_mov_i64(psw_addr, cdest);
1221             }
1222 
1223             lab = gen_new_label();
1224             if (c->is_64) {
1225                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1226             } else {
1227                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1228             }
1229 
1230             /* Branch not taken.  */
1231             update_cc_op(s);
1232             tcg_gen_goto_tb(0);
1233             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1234             tcg_gen_exit_tb(s->base.tb, 0);
1235 
1236             gen_set_label(lab);
1237             if (is_imm) {
1238                 tcg_gen_movi_i64(psw_addr, dest);
1239             }
1240             per_breaking_event(s);
1241             ret = DISAS_PC_UPDATED;
1242         }
1243     } else {
1244         /* Fallthru cannot use goto_tb.  This by itself is vanishingly rare.
1245            Most commonly we're single-stepping or some other condition that
1246            disables all use of goto_tb.  Just update the PC and exit.  */
1247 
1248         TCGv_i64 next = tcg_constant_i64(s->pc_tmp);
1249         if (is_imm) {
1250             cdest = tcg_constant_i64(dest);
1251         }
1252 
1253         if (c->is_64) {
1254             tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1255                                 cdest, next);
1256             per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1257         } else {
1258             TCGv_i32 t0 = tcg_temp_new_i32();
1259             TCGv_i64 t1 = tcg_temp_new_i64();
1260             TCGv_i64 z = tcg_constant_i64(0);
1261             tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1262             tcg_gen_extu_i32_i64(t1, t0);
1263             tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1264             per_branch_cond(s, TCG_COND_NE, t1, z);
1265         }
1266 
1267         ret = DISAS_PC_UPDATED;
1268     }
1269 
1270  egress:
1271     return ret;
1272 }
1273 
1274 /* ====================================================================== */
1275 /* The operations.  These perform the bulk of the work for any insn,
1276    usually after the operands have been loaded and output initialized.  */
1277 
1278 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1279 {
1280     tcg_gen_abs_i64(o->out, o->in2);
1281     return DISAS_NEXT;
1282 }
1283 
1284 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1285 {
1286     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1287     return DISAS_NEXT;
1288 }
1289 
1290 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1291 {
1292     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1293     return DISAS_NEXT;
1294 }
1295 
1296 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1297 {
1298     tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1299     tcg_gen_mov_i64(o->out2, o->in2);
1300     return DISAS_NEXT;
1301 }
1302 
1303 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1304 {
1305     tcg_gen_add_i64(o->out, o->in1, o->in2);
1306     return DISAS_NEXT;
1307 }
1308 
1309 static DisasJumpType op_addu64(DisasContext *s, DisasOps *o)
1310 {
1311     tcg_gen_movi_i64(cc_src, 0);
1312     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1313     return DISAS_NEXT;
1314 }
1315 
1316 /* Compute carry into cc_src. */
1317 static void compute_carry(DisasContext *s)
1318 {
1319     switch (s->cc_op) {
1320     case CC_OP_ADDU:
1321         /* The carry value is already in cc_src (1,0). */
1322         break;
1323     case CC_OP_SUBU:
1324         tcg_gen_addi_i64(cc_src, cc_src, 1);
1325         break;
1326     default:
1327         gen_op_calc_cc(s);
1328         /* fall through */
1329     case CC_OP_STATIC:
1330         /* The carry flag is the msb of CC; compute into cc_src. */
1331         tcg_gen_extu_i32_i64(cc_src, cc_op);
1332         tcg_gen_shri_i64(cc_src, cc_src, 1);
1333         break;
1334     }
1335 }
1336 
1337 static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
1338 {
1339     compute_carry(s);
1340     tcg_gen_add_i64(o->out, o->in1, o->in2);
1341     tcg_gen_add_i64(o->out, o->out, cc_src);
1342     return DISAS_NEXT;
1343 }
1344 
1345 static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
1346 {
1347     compute_carry(s);
1348 
1349     TCGv_i64 zero = tcg_constant_i64(0);
1350     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
1351     tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
1352 
1353     return DISAS_NEXT;
1354 }
1355 
1356 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1357 {
1358     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1359 
1360     o->in1 = tcg_temp_new_i64();
1361     if (non_atomic) {
1362         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1363     } else {
1364         /* Perform the atomic addition in memory. */
1365         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1366                                      s->insn->data);
1367     }
1368 
1369     /* Recompute also for atomic case: needed for setting CC. */
1370     tcg_gen_add_i64(o->out, o->in1, o->in2);
1371 
1372     if (non_atomic) {
1373         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1374     }
1375     return DISAS_NEXT;
1376 }
1377 
1378 static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o)
1379 {
1380     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1381 
1382     o->in1 = tcg_temp_new_i64();
1383     if (non_atomic) {
1384         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1385     } else {
1386         /* Perform the atomic addition in memory. */
1387         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1388                                      s->insn->data);
1389     }
1390 
1391     /* Recompute also for atomic case: needed for setting CC. */
1392     tcg_gen_movi_i64(cc_src, 0);
1393     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1394 
1395     if (non_atomic) {
1396         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1397     }
1398     return DISAS_NEXT;
1399 }
1400 
1401 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1402 {
1403     gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1404     return DISAS_NEXT;
1405 }
1406 
1407 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1408 {
1409     gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1410     return DISAS_NEXT;
1411 }
1412 
1413 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1414 {
1415     gen_helper_axb(o->out_128, cpu_env, o->in1_128, o->in2_128);
1416     return DISAS_NEXT;
1417 }
1418 
1419 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1420 {
1421     tcg_gen_and_i64(o->out, o->in1, o->in2);
1422     return DISAS_NEXT;
1423 }
1424 
1425 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1426 {
1427     int shift = s->insn->data & 0xff;
1428     int size = s->insn->data >> 8;
1429     uint64_t mask = ((1ull << size) - 1) << shift;
1430     TCGv_i64 t = tcg_temp_new_i64();
1431 
1432     tcg_gen_shli_i64(t, o->in2, shift);
1433     tcg_gen_ori_i64(t, t, ~mask);
1434     tcg_gen_and_i64(o->out, o->in1, t);
1435 
1436     /* Produce the CC from only the bits manipulated.  */
1437     tcg_gen_andi_i64(cc_dst, o->out, mask);
1438     set_cc_nz_u64(s, cc_dst);
1439     return DISAS_NEXT;
1440 }
1441 
1442 static DisasJumpType op_andc(DisasContext *s, DisasOps *o)
1443 {
1444     tcg_gen_andc_i64(o->out, o->in1, o->in2);
1445     return DISAS_NEXT;
1446 }
1447 
1448 static DisasJumpType op_orc(DisasContext *s, DisasOps *o)
1449 {
1450     tcg_gen_orc_i64(o->out, o->in1, o->in2);
1451     return DISAS_NEXT;
1452 }
1453 
1454 static DisasJumpType op_nand(DisasContext *s, DisasOps *o)
1455 {
1456     tcg_gen_nand_i64(o->out, o->in1, o->in2);
1457     return DISAS_NEXT;
1458 }
1459 
1460 static DisasJumpType op_nor(DisasContext *s, DisasOps *o)
1461 {
1462     tcg_gen_nor_i64(o->out, o->in1, o->in2);
1463     return DISAS_NEXT;
1464 }
1465 
1466 static DisasJumpType op_nxor(DisasContext *s, DisasOps *o)
1467 {
1468     tcg_gen_eqv_i64(o->out, o->in1, o->in2);
1469     return DISAS_NEXT;
1470 }
1471 
1472 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1473 {
1474     o->in1 = tcg_temp_new_i64();
1475 
1476     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1477         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1478     } else {
1479         /* Perform the atomic operation in memory. */
1480         tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1481                                      s->insn->data);
1482     }
1483 
1484     /* Recompute also for atomic case: needed for setting CC. */
1485     tcg_gen_and_i64(o->out, o->in1, o->in2);
1486 
1487     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1488         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1489     }
1490     return DISAS_NEXT;
1491 }
1492 
1493 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1494 {
1495     pc_to_link_info(o->out, s, s->pc_tmp);
1496     if (o->in2) {
1497         tcg_gen_mov_i64(psw_addr, o->in2);
1498         per_branch(s, false);
1499         return DISAS_PC_UPDATED;
1500     } else {
1501         return DISAS_NEXT;
1502     }
1503 }
1504 
1505 static void save_link_info(DisasContext *s, DisasOps *o)
1506 {
1507     TCGv_i64 t;
1508 
1509     if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1510         pc_to_link_info(o->out, s, s->pc_tmp);
1511         return;
1512     }
1513     gen_op_calc_cc(s);
1514     tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1515     tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1516     t = tcg_temp_new_i64();
1517     tcg_gen_shri_i64(t, psw_mask, 16);
1518     tcg_gen_andi_i64(t, t, 0x0f000000);
1519     tcg_gen_or_i64(o->out, o->out, t);
1520     tcg_gen_extu_i32_i64(t, cc_op);
1521     tcg_gen_shli_i64(t, t, 28);
1522     tcg_gen_or_i64(o->out, o->out, t);
1523 }
1524 
1525 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1526 {
1527     save_link_info(s, o);
1528     if (o->in2) {
1529         tcg_gen_mov_i64(psw_addr, o->in2);
1530         per_branch(s, false);
1531         return DISAS_PC_UPDATED;
1532     } else {
1533         return DISAS_NEXT;
1534     }
1535 }
1536 
1537 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1538 {
1539     pc_to_link_info(o->out, s, s->pc_tmp);
1540     return help_goto_direct(s, s->base.pc_next + (int64_t)get_field(s, i2) * 2);
1541 }
1542 
1543 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1544 {
1545     int m1 = get_field(s, m1);
1546     bool is_imm = have_field(s, i2);
1547     int imm = is_imm ? get_field(s, i2) : 0;
1548     DisasCompare c;
1549 
1550     /* BCR with R2 = 0 causes no branching */
1551     if (have_field(s, r2) && get_field(s, r2) == 0) {
1552         if (m1 == 14) {
1553             /* Perform serialization */
1554             /* FIXME: check for fast-BCR-serialization facility */
1555             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1556         }
1557         if (m1 == 15) {
1558             /* Perform serialization */
1559             /* FIXME: perform checkpoint-synchronisation */
1560             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1561         }
1562         return DISAS_NEXT;
1563     }
1564 
1565     disas_jcc(s, &c, m1);
1566     return help_branch(s, &c, is_imm, imm, o->in2);
1567 }
1568 
1569 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1570 {
1571     int r1 = get_field(s, r1);
1572     bool is_imm = have_field(s, i2);
1573     int imm = is_imm ? get_field(s, i2) : 0;
1574     DisasCompare c;
1575     TCGv_i64 t;
1576 
1577     c.cond = TCG_COND_NE;
1578     c.is_64 = false;
1579 
1580     t = tcg_temp_new_i64();
1581     tcg_gen_subi_i64(t, regs[r1], 1);
1582     store_reg32_i64(r1, t);
1583     c.u.s32.a = tcg_temp_new_i32();
1584     c.u.s32.b = tcg_constant_i32(0);
1585     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1586 
1587     return help_branch(s, &c, is_imm, imm, o->in2);
1588 }
1589 
1590 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1591 {
1592     int r1 = get_field(s, r1);
1593     int imm = get_field(s, i2);
1594     DisasCompare c;
1595     TCGv_i64 t;
1596 
1597     c.cond = TCG_COND_NE;
1598     c.is_64 = false;
1599 
1600     t = tcg_temp_new_i64();
1601     tcg_gen_shri_i64(t, regs[r1], 32);
1602     tcg_gen_subi_i64(t, t, 1);
1603     store_reg32h_i64(r1, t);
1604     c.u.s32.a = tcg_temp_new_i32();
1605     c.u.s32.b = tcg_constant_i32(0);
1606     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1607 
1608     return help_branch(s, &c, 1, imm, o->in2);
1609 }
1610 
1611 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1612 {
1613     int r1 = get_field(s, r1);
1614     bool is_imm = have_field(s, i2);
1615     int imm = is_imm ? get_field(s, i2) : 0;
1616     DisasCompare c;
1617 
1618     c.cond = TCG_COND_NE;
1619     c.is_64 = true;
1620 
1621     tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1622     c.u.s64.a = regs[r1];
1623     c.u.s64.b = tcg_constant_i64(0);
1624 
1625     return help_branch(s, &c, is_imm, imm, o->in2);
1626 }
1627 
1628 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1629 {
1630     int r1 = get_field(s, r1);
1631     int r3 = get_field(s, r3);
1632     bool is_imm = have_field(s, i2);
1633     int imm = is_imm ? get_field(s, i2) : 0;
1634     DisasCompare c;
1635     TCGv_i64 t;
1636 
1637     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1638     c.is_64 = false;
1639 
1640     t = tcg_temp_new_i64();
1641     tcg_gen_add_i64(t, regs[r1], regs[r3]);
1642     c.u.s32.a = tcg_temp_new_i32();
1643     c.u.s32.b = tcg_temp_new_i32();
1644     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1645     tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1646     store_reg32_i64(r1, t);
1647 
1648     return help_branch(s, &c, is_imm, imm, o->in2);
1649 }
1650 
1651 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1652 {
1653     int r1 = get_field(s, r1);
1654     int r3 = get_field(s, r3);
1655     bool is_imm = have_field(s, i2);
1656     int imm = is_imm ? get_field(s, i2) : 0;
1657     DisasCompare c;
1658 
1659     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1660     c.is_64 = true;
1661 
1662     if (r1 == (r3 | 1)) {
1663         c.u.s64.b = load_reg(r3 | 1);
1664     } else {
1665         c.u.s64.b = regs[r3 | 1];
1666     }
1667 
1668     tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1669     c.u.s64.a = regs[r1];
1670 
1671     return help_branch(s, &c, is_imm, imm, o->in2);
1672 }
1673 
1674 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1675 {
1676     int imm, m3 = get_field(s, m3);
1677     bool is_imm;
1678     DisasCompare c;
1679 
1680     c.cond = ltgt_cond[m3];
1681     if (s->insn->data) {
1682         c.cond = tcg_unsigned_cond(c.cond);
1683     }
1684     c.is_64 = true;
1685     c.u.s64.a = o->in1;
1686     c.u.s64.b = o->in2;
1687 
1688     is_imm = have_field(s, i4);
1689     if (is_imm) {
1690         imm = get_field(s, i4);
1691     } else {
1692         imm = 0;
1693         o->out = get_address(s, 0, get_field(s, b4),
1694                              get_field(s, d4));
1695     }
1696 
1697     return help_branch(s, &c, is_imm, imm, o->out);
1698 }
1699 
1700 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1701 {
1702     gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1703     set_cc_static(s);
1704     return DISAS_NEXT;
1705 }
1706 
1707 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1708 {
1709     gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1710     set_cc_static(s);
1711     return DISAS_NEXT;
1712 }
1713 
1714 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1715 {
1716     gen_helper_cxb(cc_op, cpu_env, o->in1_128, o->in2_128);
1717     set_cc_static(s);
1718     return DISAS_NEXT;
1719 }
1720 
1721 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1722                                    bool m4_with_fpe)
1723 {
1724     const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1725     uint8_t m3 = get_field(s, m3);
1726     uint8_t m4 = get_field(s, m4);
1727 
1728     /* m3 field was introduced with FPE */
1729     if (!fpe && m3_with_fpe) {
1730         m3 = 0;
1731     }
1732     /* m4 field was introduced with FPE */
1733     if (!fpe && m4_with_fpe) {
1734         m4 = 0;
1735     }
1736 
1737     /* Check for valid rounding modes. Mode 3 was introduced later. */
1738     if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1739         gen_program_exception(s, PGM_SPECIFICATION);
1740         return NULL;
1741     }
1742 
1743     return tcg_constant_i32(deposit32(m3, 4, 4, m4));
1744 }
1745 
1746 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1747 {
1748     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1749 
1750     if (!m34) {
1751         return DISAS_NORETURN;
1752     }
1753     gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
1754     set_cc_static(s);
1755     return DISAS_NEXT;
1756 }
1757 
1758 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1759 {
1760     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1761 
1762     if (!m34) {
1763         return DISAS_NORETURN;
1764     }
1765     gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
1766     set_cc_static(s);
1767     return DISAS_NEXT;
1768 }
1769 
1770 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1771 {
1772     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1773 
1774     if (!m34) {
1775         return DISAS_NORETURN;
1776     }
1777     gen_helper_cfxb(o->out, cpu_env, o->in2_128, m34);
1778     set_cc_static(s);
1779     return DISAS_NEXT;
1780 }
1781 
1782 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1783 {
1784     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1785 
1786     if (!m34) {
1787         return DISAS_NORETURN;
1788     }
1789     gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
1790     set_cc_static(s);
1791     return DISAS_NEXT;
1792 }
1793 
1794 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1795 {
1796     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1797 
1798     if (!m34) {
1799         return DISAS_NORETURN;
1800     }
1801     gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
1802     set_cc_static(s);
1803     return DISAS_NEXT;
1804 }
1805 
1806 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1807 {
1808     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1809 
1810     if (!m34) {
1811         return DISAS_NORETURN;
1812     }
1813     gen_helper_cgxb(o->out, cpu_env, o->in2_128, m34);
1814     set_cc_static(s);
1815     return DISAS_NEXT;
1816 }
1817 
1818 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1819 {
1820     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1821 
1822     if (!m34) {
1823         return DISAS_NORETURN;
1824     }
1825     gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
1826     set_cc_static(s);
1827     return DISAS_NEXT;
1828 }
1829 
1830 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1831 {
1832     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1833 
1834     if (!m34) {
1835         return DISAS_NORETURN;
1836     }
1837     gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
1838     set_cc_static(s);
1839     return DISAS_NEXT;
1840 }
1841 
1842 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1843 {
1844     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1845 
1846     if (!m34) {
1847         return DISAS_NORETURN;
1848     }
1849     gen_helper_clfxb(o->out, cpu_env, o->in2_128, m34);
1850     set_cc_static(s);
1851     return DISAS_NEXT;
1852 }
1853 
1854 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1855 {
1856     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1857 
1858     if (!m34) {
1859         return DISAS_NORETURN;
1860     }
1861     gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
1862     set_cc_static(s);
1863     return DISAS_NEXT;
1864 }
1865 
1866 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1867 {
1868     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1869 
1870     if (!m34) {
1871         return DISAS_NORETURN;
1872     }
1873     gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
1874     set_cc_static(s);
1875     return DISAS_NEXT;
1876 }
1877 
1878 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1879 {
1880     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1881 
1882     if (!m34) {
1883         return DISAS_NORETURN;
1884     }
1885     gen_helper_clgxb(o->out, cpu_env, o->in2_128, m34);
1886     set_cc_static(s);
1887     return DISAS_NEXT;
1888 }
1889 
1890 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1891 {
1892     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1893 
1894     if (!m34) {
1895         return DISAS_NORETURN;
1896     }
1897     gen_helper_cegb(o->out, cpu_env, o->in2, m34);
1898     return DISAS_NEXT;
1899 }
1900 
1901 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1902 {
1903     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1904 
1905     if (!m34) {
1906         return DISAS_NORETURN;
1907     }
1908     gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
1909     return DISAS_NEXT;
1910 }
1911 
1912 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
1913 {
1914     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1915 
1916     if (!m34) {
1917         return DISAS_NORETURN;
1918     }
1919     gen_helper_cxgb(o->out_128, cpu_env, o->in2, m34);
1920     return DISAS_NEXT;
1921 }
1922 
1923 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
1924 {
1925     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1926 
1927     if (!m34) {
1928         return DISAS_NORETURN;
1929     }
1930     gen_helper_celgb(o->out, cpu_env, o->in2, m34);
1931     return DISAS_NEXT;
1932 }
1933 
1934 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
1935 {
1936     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1937 
1938     if (!m34) {
1939         return DISAS_NORETURN;
1940     }
1941     gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
1942     return DISAS_NEXT;
1943 }
1944 
1945 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
1946 {
1947     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1948 
1949     if (!m34) {
1950         return DISAS_NORETURN;
1951     }
1952     gen_helper_cxlgb(o->out_128, cpu_env, o->in2, m34);
1953     return DISAS_NEXT;
1954 }
1955 
1956 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
1957 {
1958     int r2 = get_field(s, r2);
1959     TCGv_i128 pair = tcg_temp_new_i128();
1960     TCGv_i64 len = tcg_temp_new_i64();
1961 
1962     gen_helper_cksm(pair, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1963     set_cc_static(s);
1964     tcg_gen_extr_i128_i64(o->out, len, pair);
1965 
1966     tcg_gen_add_i64(regs[r2], regs[r2], len);
1967     tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1968 
1969     return DISAS_NEXT;
1970 }
1971 
1972 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
1973 {
1974     int l = get_field(s, l1);
1975     TCGv_i32 vl;
1976     MemOp mop;
1977 
1978     switch (l + 1) {
1979     case 1:
1980     case 2:
1981     case 4:
1982     case 8:
1983         mop = ctz32(l + 1) | MO_TE;
1984         tcg_gen_qemu_ld_tl(cc_src, o->addr1, get_mem_index(s), mop);
1985         tcg_gen_qemu_ld_tl(cc_dst, o->in2, get_mem_index(s), mop);
1986         gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1987         return DISAS_NEXT;
1988     default:
1989         vl = tcg_constant_i32(l);
1990         gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1991         set_cc_static(s);
1992         return DISAS_NEXT;
1993     }
1994 }
1995 
1996 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
1997 {
1998     int r1 = get_field(s, r1);
1999     int r2 = get_field(s, r2);
2000     TCGv_i32 t1, t2;
2001 
2002     /* r1 and r2 must be even.  */
2003     if (r1 & 1 || r2 & 1) {
2004         gen_program_exception(s, PGM_SPECIFICATION);
2005         return DISAS_NORETURN;
2006     }
2007 
2008     t1 = tcg_constant_i32(r1);
2009     t2 = tcg_constant_i32(r2);
2010     gen_helper_clcl(cc_op, cpu_env, t1, t2);
2011     set_cc_static(s);
2012     return DISAS_NEXT;
2013 }
2014 
2015 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2016 {
2017     int r1 = get_field(s, r1);
2018     int r3 = get_field(s, r3);
2019     TCGv_i32 t1, t3;
2020 
2021     /* r1 and r3 must be even.  */
2022     if (r1 & 1 || r3 & 1) {
2023         gen_program_exception(s, PGM_SPECIFICATION);
2024         return DISAS_NORETURN;
2025     }
2026 
2027     t1 = tcg_constant_i32(r1);
2028     t3 = tcg_constant_i32(r3);
2029     gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
2030     set_cc_static(s);
2031     return DISAS_NEXT;
2032 }
2033 
2034 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2035 {
2036     int r1 = get_field(s, r1);
2037     int r3 = get_field(s, r3);
2038     TCGv_i32 t1, t3;
2039 
2040     /* r1 and r3 must be even.  */
2041     if (r1 & 1 || r3 & 1) {
2042         gen_program_exception(s, PGM_SPECIFICATION);
2043         return DISAS_NORETURN;
2044     }
2045 
2046     t1 = tcg_constant_i32(r1);
2047     t3 = tcg_constant_i32(r3);
2048     gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
2049     set_cc_static(s);
2050     return DISAS_NEXT;
2051 }
2052 
2053 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2054 {
2055     TCGv_i32 m3 = tcg_constant_i32(get_field(s, m3));
2056     TCGv_i32 t1 = tcg_temp_new_i32();
2057 
2058     tcg_gen_extrl_i64_i32(t1, o->in1);
2059     gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2060     set_cc_static(s);
2061     return DISAS_NEXT;
2062 }
2063 
2064 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2065 {
2066     TCGv_i128 pair = tcg_temp_new_i128();
2067 
2068     gen_helper_clst(pair, cpu_env, regs[0], o->in1, o->in2);
2069     tcg_gen_extr_i128_i64(o->in2, o->in1, pair);
2070 
2071     set_cc_static(s);
2072     return DISAS_NEXT;
2073 }
2074 
2075 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2076 {
2077     TCGv_i64 t = tcg_temp_new_i64();
2078     tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2079     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2080     tcg_gen_or_i64(o->out, o->out, t);
2081     return DISAS_NEXT;
2082 }
2083 
2084 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2085 {
2086     int d2 = get_field(s, d2);
2087     int b2 = get_field(s, b2);
2088     TCGv_i64 addr, cc;
2089 
2090     /* Note that in1 = R3 (new value) and
2091        in2 = (zero-extended) R1 (expected value).  */
2092 
2093     addr = get_address(s, 0, b2, d2);
2094     tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2095                                get_mem_index(s), s->insn->data | MO_ALIGN);
2096 
2097     /* Are the memory and expected values (un)equal?  Note that this setcond
2098        produces the output CC value, thus the NE sense of the test.  */
2099     cc = tcg_temp_new_i64();
2100     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2101     tcg_gen_extrl_i64_i32(cc_op, cc);
2102     set_cc_static(s);
2103 
2104     return DISAS_NEXT;
2105 }
2106 
2107 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2108 {
2109     int r1 = get_field(s, r1);
2110 
2111     o->out_128 = tcg_temp_new_i128();
2112     tcg_gen_concat_i64_i128(o->out_128, regs[r1 + 1], regs[r1]);
2113 
2114     /* Note out (R1:R1+1) = expected value and in2 (R3:R3+1) = new value.  */
2115     tcg_gen_atomic_cmpxchg_i128(o->out_128, o->addr1, o->out_128, o->in2_128,
2116                                 get_mem_index(s), MO_BE | MO_128 | MO_ALIGN);
2117 
2118     /*
2119      * Extract result into cc_dst:cc_src, compare vs the expected value
2120      * in the as yet unmodified input registers, then update CC_OP.
2121      */
2122     tcg_gen_extr_i128_i64(cc_src, cc_dst, o->out_128);
2123     tcg_gen_xor_i64(cc_dst, cc_dst, regs[r1]);
2124     tcg_gen_xor_i64(cc_src, cc_src, regs[r1 + 1]);
2125     tcg_gen_or_i64(cc_dst, cc_dst, cc_src);
2126     set_cc_nz_u64(s, cc_dst);
2127 
2128     return DISAS_NEXT;
2129 }
2130 
2131 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2132 {
2133     int r3 = get_field(s, r3);
2134     TCGv_i32 t_r3 = tcg_constant_i32(r3);
2135 
2136     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2137         gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2138     } else {
2139         gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2140     }
2141 
2142     set_cc_static(s);
2143     return DISAS_NEXT;
2144 }
2145 
2146 #ifndef CONFIG_USER_ONLY
2147 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2148 {
2149     MemOp mop = s->insn->data;
2150     TCGv_i64 addr, old, cc;
2151     TCGLabel *lab = gen_new_label();
2152 
2153     /* Note that in1 = R1 (zero-extended expected value),
2154        out = R1 (original reg), out2 = R1+1 (new value).  */
2155 
2156     addr = tcg_temp_new_i64();
2157     old = tcg_temp_new_i64();
2158     tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2159     tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2160                                get_mem_index(s), mop | MO_ALIGN);
2161 
2162     /* Are the memory and expected values (un)equal?  */
2163     cc = tcg_temp_new_i64();
2164     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2165     tcg_gen_extrl_i64_i32(cc_op, cc);
2166 
2167     /* Write back the output now, so that it happens before the
2168        following branch, so that we don't need local temps.  */
2169     if ((mop & MO_SIZE) == MO_32) {
2170         tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2171     } else {
2172         tcg_gen_mov_i64(o->out, old);
2173     }
2174 
2175     /* If the comparison was equal, and the LSB of R2 was set,
2176        then we need to flush the TLB (for all cpus).  */
2177     tcg_gen_xori_i64(cc, cc, 1);
2178     tcg_gen_and_i64(cc, cc, o->in2);
2179     tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2180 
2181     gen_helper_purge(cpu_env);
2182     gen_set_label(lab);
2183 
2184     return DISAS_NEXT;
2185 }
2186 #endif
2187 
2188 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2189 {
2190     TCGv_i64 t1 = tcg_temp_new_i64();
2191     TCGv_i32 t2 = tcg_temp_new_i32();
2192     tcg_gen_extrl_i64_i32(t2, o->in1);
2193     gen_helper_cvd(t1, t2);
2194     tcg_gen_qemu_st_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
2195     return DISAS_NEXT;
2196 }
2197 
2198 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2199 {
2200     int m3 = get_field(s, m3);
2201     TCGLabel *lab = gen_new_label();
2202     TCGCond c;
2203 
2204     c = tcg_invert_cond(ltgt_cond[m3]);
2205     if (s->insn->data) {
2206         c = tcg_unsigned_cond(c);
2207     }
2208     tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2209 
2210     /* Trap.  */
2211     gen_trap(s);
2212 
2213     gen_set_label(lab);
2214     return DISAS_NEXT;
2215 }
2216 
2217 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2218 {
2219     int m3 = get_field(s, m3);
2220     int r1 = get_field(s, r1);
2221     int r2 = get_field(s, r2);
2222     TCGv_i32 tr1, tr2, chk;
2223 
2224     /* R1 and R2 must both be even.  */
2225     if ((r1 | r2) & 1) {
2226         gen_program_exception(s, PGM_SPECIFICATION);
2227         return DISAS_NORETURN;
2228     }
2229     if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2230         m3 = 0;
2231     }
2232 
2233     tr1 = tcg_constant_i32(r1);
2234     tr2 = tcg_constant_i32(r2);
2235     chk = tcg_constant_i32(m3);
2236 
2237     switch (s->insn->data) {
2238     case 12:
2239         gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2240         break;
2241     case 14:
2242         gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2243         break;
2244     case 21:
2245         gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2246         break;
2247     case 24:
2248         gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2249         break;
2250     case 41:
2251         gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2252         break;
2253     case 42:
2254         gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2255         break;
2256     default:
2257         g_assert_not_reached();
2258     }
2259 
2260     set_cc_static(s);
2261     return DISAS_NEXT;
2262 }
2263 
2264 #ifndef CONFIG_USER_ONLY
2265 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2266 {
2267     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2268     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2269     TCGv_i32 func_code = tcg_constant_i32(get_field(s, i2));
2270 
2271     gen_helper_diag(cpu_env, r1, r3, func_code);
2272     return DISAS_NEXT;
2273 }
2274 #endif
2275 
2276 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2277 {
2278     gen_helper_divs32(o->out, cpu_env, o->in1, o->in2);
2279     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2280     return DISAS_NEXT;
2281 }
2282 
2283 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2284 {
2285     gen_helper_divu32(o->out, cpu_env, o->in1, o->in2);
2286     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2287     return DISAS_NEXT;
2288 }
2289 
2290 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2291 {
2292     TCGv_i128 t = tcg_temp_new_i128();
2293 
2294     gen_helper_divs64(t, cpu_env, o->in1, o->in2);
2295     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2296     return DISAS_NEXT;
2297 }
2298 
2299 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2300 {
2301     TCGv_i128 t = tcg_temp_new_i128();
2302 
2303     gen_helper_divu64(t, cpu_env, o->out, o->out2, o->in2);
2304     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2305     return DISAS_NEXT;
2306 }
2307 
2308 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2309 {
2310     gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2311     return DISAS_NEXT;
2312 }
2313 
2314 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2315 {
2316     gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2317     return DISAS_NEXT;
2318 }
2319 
2320 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2321 {
2322     gen_helper_dxb(o->out_128, cpu_env, o->in1_128, o->in2_128);
2323     return DISAS_NEXT;
2324 }
2325 
2326 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2327 {
2328     int r2 = get_field(s, r2);
2329     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2330     return DISAS_NEXT;
2331 }
2332 
2333 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2334 {
2335     /* No cache information provided.  */
2336     tcg_gen_movi_i64(o->out, -1);
2337     return DISAS_NEXT;
2338 }
2339 
2340 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2341 {
2342     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2343     return DISAS_NEXT;
2344 }
2345 
2346 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2347 {
2348     int r1 = get_field(s, r1);
2349     int r2 = get_field(s, r2);
2350     TCGv_i64 t = tcg_temp_new_i64();
2351 
2352     /* Note the "subsequently" in the PoO, which implies a defined result
2353        if r1 == r2.  Thus we cannot defer these writes to an output hook.  */
2354     tcg_gen_shri_i64(t, psw_mask, 32);
2355     store_reg32_i64(r1, t);
2356     if (r2 != 0) {
2357         store_reg32_i64(r2, psw_mask);
2358     }
2359     return DISAS_NEXT;
2360 }
2361 
2362 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2363 {
2364     int r1 = get_field(s, r1);
2365     TCGv_i32 ilen;
2366     TCGv_i64 v1;
2367 
2368     /* Nested EXECUTE is not allowed.  */
2369     if (unlikely(s->ex_value)) {
2370         gen_program_exception(s, PGM_EXECUTE);
2371         return DISAS_NORETURN;
2372     }
2373 
2374     update_psw_addr(s);
2375     update_cc_op(s);
2376 
2377     if (r1 == 0) {
2378         v1 = tcg_constant_i64(0);
2379     } else {
2380         v1 = regs[r1];
2381     }
2382 
2383     ilen = tcg_constant_i32(s->ilen);
2384     gen_helper_ex(cpu_env, ilen, v1, o->in2);
2385 
2386     return DISAS_PC_CC_UPDATED;
2387 }
2388 
2389 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2390 {
2391     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2392 
2393     if (!m34) {
2394         return DISAS_NORETURN;
2395     }
2396     gen_helper_fieb(o->out, cpu_env, o->in2, m34);
2397     return DISAS_NEXT;
2398 }
2399 
2400 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2401 {
2402     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2403 
2404     if (!m34) {
2405         return DISAS_NORETURN;
2406     }
2407     gen_helper_fidb(o->out, cpu_env, o->in2, m34);
2408     return DISAS_NEXT;
2409 }
2410 
2411 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2412 {
2413     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2414 
2415     if (!m34) {
2416         return DISAS_NORETURN;
2417     }
2418     gen_helper_fixb(o->out_128, cpu_env, o->in2_128, m34);
2419     return DISAS_NEXT;
2420 }
2421 
2422 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2423 {
2424     /* We'll use the original input for cc computation, since we get to
2425        compare that against 0, which ought to be better than comparing
2426        the real output against 64.  It also lets cc_dst be a convenient
2427        temporary during our computation.  */
2428     gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2429 
2430     /* R1 = IN ? CLZ(IN) : 64.  */
2431     tcg_gen_clzi_i64(o->out, o->in2, 64);
2432 
2433     /* R1+1 = IN & ~(found bit).  Note that we may attempt to shift this
2434        value by 64, which is undefined.  But since the shift is 64 iff the
2435        input is zero, we still get the correct result after and'ing.  */
2436     tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2437     tcg_gen_shr_i64(o->out2, o->out2, o->out);
2438     tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2439     return DISAS_NEXT;
2440 }
2441 
2442 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2443 {
2444     int m3 = get_field(s, m3);
2445     int pos, len, base = s->insn->data;
2446     TCGv_i64 tmp = tcg_temp_new_i64();
2447     uint64_t ccm;
2448 
2449     switch (m3) {
2450     case 0xf:
2451         /* Effectively a 32-bit load.  */
2452         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
2453         len = 32;
2454         goto one_insert;
2455 
2456     case 0xc:
2457     case 0x6:
2458     case 0x3:
2459         /* Effectively a 16-bit load.  */
2460         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
2461         len = 16;
2462         goto one_insert;
2463 
2464     case 0x8:
2465     case 0x4:
2466     case 0x2:
2467     case 0x1:
2468         /* Effectively an 8-bit load.  */
2469         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2470         len = 8;
2471         goto one_insert;
2472 
2473     one_insert:
2474         pos = base + ctz32(m3) * 8;
2475         tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2476         ccm = ((1ull << len) - 1) << pos;
2477         break;
2478 
2479     default:
2480         /* This is going to be a sequence of loads and inserts.  */
2481         pos = base + 32 - 8;
2482         ccm = 0;
2483         while (m3) {
2484             if (m3 & 0x8) {
2485                 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2486                 tcg_gen_addi_i64(o->in2, o->in2, 1);
2487                 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2488                 ccm |= 0xffull << pos;
2489             }
2490             m3 = (m3 << 1) & 0xf;
2491             pos -= 8;
2492         }
2493         break;
2494     }
2495 
2496     tcg_gen_movi_i64(tmp, ccm);
2497     gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2498     return DISAS_NEXT;
2499 }
2500 
2501 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2502 {
2503     int shift = s->insn->data & 0xff;
2504     int size = s->insn->data >> 8;
2505     tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2506     return DISAS_NEXT;
2507 }
2508 
2509 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2510 {
2511     TCGv_i64 t1, t2;
2512 
2513     gen_op_calc_cc(s);
2514     t1 = tcg_temp_new_i64();
2515     tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2516     t2 = tcg_temp_new_i64();
2517     tcg_gen_extu_i32_i64(t2, cc_op);
2518     tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2519     tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2520     return DISAS_NEXT;
2521 }
2522 
2523 #ifndef CONFIG_USER_ONLY
2524 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2525 {
2526     TCGv_i32 m4;
2527 
2528     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2529         m4 = tcg_constant_i32(get_field(s, m4));
2530     } else {
2531         m4 = tcg_constant_i32(0);
2532     }
2533     gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2534     return DISAS_NEXT;
2535 }
2536 
2537 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2538 {
2539     TCGv_i32 m4;
2540 
2541     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2542         m4 = tcg_constant_i32(get_field(s, m4));
2543     } else {
2544         m4 = tcg_constant_i32(0);
2545     }
2546     gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2547     return DISAS_NEXT;
2548 }
2549 
2550 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2551 {
2552     gen_helper_iske(o->out, cpu_env, o->in2);
2553     return DISAS_NEXT;
2554 }
2555 #endif
2556 
2557 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2558 {
2559     int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
2560     int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
2561     int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
2562     TCGv_i32 t_r1, t_r2, t_r3, type;
2563 
2564     switch (s->insn->data) {
2565     case S390_FEAT_TYPE_KMA:
2566         if (r3 == r1 || r3 == r2) {
2567             gen_program_exception(s, PGM_SPECIFICATION);
2568             return DISAS_NORETURN;
2569         }
2570         /* FALL THROUGH */
2571     case S390_FEAT_TYPE_KMCTR:
2572         if (r3 & 1 || !r3) {
2573             gen_program_exception(s, PGM_SPECIFICATION);
2574             return DISAS_NORETURN;
2575         }
2576         /* FALL THROUGH */
2577     case S390_FEAT_TYPE_PPNO:
2578     case S390_FEAT_TYPE_KMF:
2579     case S390_FEAT_TYPE_KMC:
2580     case S390_FEAT_TYPE_KMO:
2581     case S390_FEAT_TYPE_KM:
2582         if (r1 & 1 || !r1) {
2583             gen_program_exception(s, PGM_SPECIFICATION);
2584             return DISAS_NORETURN;
2585         }
2586         /* FALL THROUGH */
2587     case S390_FEAT_TYPE_KMAC:
2588     case S390_FEAT_TYPE_KIMD:
2589     case S390_FEAT_TYPE_KLMD:
2590         if (r2 & 1 || !r2) {
2591             gen_program_exception(s, PGM_SPECIFICATION);
2592             return DISAS_NORETURN;
2593         }
2594         /* FALL THROUGH */
2595     case S390_FEAT_TYPE_PCKMO:
2596     case S390_FEAT_TYPE_PCC:
2597         break;
2598     default:
2599         g_assert_not_reached();
2600     };
2601 
2602     t_r1 = tcg_constant_i32(r1);
2603     t_r2 = tcg_constant_i32(r2);
2604     t_r3 = tcg_constant_i32(r3);
2605     type = tcg_constant_i32(s->insn->data);
2606     gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2607     set_cc_static(s);
2608     return DISAS_NEXT;
2609 }
2610 
2611 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2612 {
2613     gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2614     set_cc_static(s);
2615     return DISAS_NEXT;
2616 }
2617 
2618 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2619 {
2620     gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2621     set_cc_static(s);
2622     return DISAS_NEXT;
2623 }
2624 
2625 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2626 {
2627     gen_helper_kxb(cc_op, cpu_env, o->in1_128, o->in2_128);
2628     set_cc_static(s);
2629     return DISAS_NEXT;
2630 }
2631 
2632 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2633 {
2634     /* The real output is indeed the original value in memory;
2635        recompute the addition for the computation of CC.  */
2636     tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2637                                  s->insn->data | MO_ALIGN);
2638     /* However, we need to recompute the addition for setting CC.  */
2639     tcg_gen_add_i64(o->out, o->in1, o->in2);
2640     return DISAS_NEXT;
2641 }
2642 
2643 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2644 {
2645     /* The real output is indeed the original value in memory;
2646        recompute the addition for the computation of CC.  */
2647     tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2648                                  s->insn->data | MO_ALIGN);
2649     /* However, we need to recompute the operation for setting CC.  */
2650     tcg_gen_and_i64(o->out, o->in1, o->in2);
2651     return DISAS_NEXT;
2652 }
2653 
2654 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2655 {
2656     /* The real output is indeed the original value in memory;
2657        recompute the addition for the computation of CC.  */
2658     tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2659                                 s->insn->data | MO_ALIGN);
2660     /* However, we need to recompute the operation for setting CC.  */
2661     tcg_gen_or_i64(o->out, o->in1, o->in2);
2662     return DISAS_NEXT;
2663 }
2664 
2665 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2666 {
2667     /* The real output is indeed the original value in memory;
2668        recompute the addition for the computation of CC.  */
2669     tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2670                                  s->insn->data | MO_ALIGN);
2671     /* However, we need to recompute the operation for setting CC.  */
2672     tcg_gen_xor_i64(o->out, o->in1, o->in2);
2673     return DISAS_NEXT;
2674 }
2675 
2676 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2677 {
2678     gen_helper_ldeb(o->out, cpu_env, o->in2);
2679     return DISAS_NEXT;
2680 }
2681 
2682 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2683 {
2684     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2685 
2686     if (!m34) {
2687         return DISAS_NORETURN;
2688     }
2689     gen_helper_ledb(o->out, cpu_env, o->in2, m34);
2690     return DISAS_NEXT;
2691 }
2692 
2693 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2694 {
2695     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2696 
2697     if (!m34) {
2698         return DISAS_NORETURN;
2699     }
2700     gen_helper_ldxb(o->out, cpu_env, o->in2_128, m34);
2701     return DISAS_NEXT;
2702 }
2703 
2704 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2705 {
2706     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2707 
2708     if (!m34) {
2709         return DISAS_NORETURN;
2710     }
2711     gen_helper_lexb(o->out, cpu_env, o->in2_128, m34);
2712     return DISAS_NEXT;
2713 }
2714 
2715 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2716 {
2717     gen_helper_lxdb(o->out_128, cpu_env, o->in2);
2718     return DISAS_NEXT;
2719 }
2720 
2721 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2722 {
2723     gen_helper_lxeb(o->out_128, cpu_env, o->in2);
2724     return DISAS_NEXT;
2725 }
2726 
2727 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2728 {
2729     tcg_gen_shli_i64(o->out, o->in2, 32);
2730     return DISAS_NEXT;
2731 }
2732 
2733 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2734 {
2735     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2736     return DISAS_NEXT;
2737 }
2738 
2739 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2740 {
2741     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_SB);
2742     return DISAS_NEXT;
2743 }
2744 
2745 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2746 {
2747     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_UB);
2748     return DISAS_NEXT;
2749 }
2750 
2751 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2752 {
2753     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TESW);
2754     return DISAS_NEXT;
2755 }
2756 
2757 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2758 {
2759     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUW);
2760     return DISAS_NEXT;
2761 }
2762 
2763 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2764 {
2765     tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2766                        MO_TESL | s->insn->data);
2767     return DISAS_NEXT;
2768 }
2769 
2770 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2771 {
2772     tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2773                        MO_TEUL | s->insn->data);
2774     return DISAS_NEXT;
2775 }
2776 
2777 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2778 {
2779     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s),
2780                         MO_TEUQ | s->insn->data);
2781     return DISAS_NEXT;
2782 }
2783 
2784 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2785 {
2786     TCGLabel *lab = gen_new_label();
2787     store_reg32_i64(get_field(s, r1), o->in2);
2788     /* The value is stored even in case of trap. */
2789     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2790     gen_trap(s);
2791     gen_set_label(lab);
2792     return DISAS_NEXT;
2793 }
2794 
2795 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2796 {
2797     TCGLabel *lab = gen_new_label();
2798     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUQ);
2799     /* The value is stored even in case of trap. */
2800     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2801     gen_trap(s);
2802     gen_set_label(lab);
2803     return DISAS_NEXT;
2804 }
2805 
2806 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2807 {
2808     TCGLabel *lab = gen_new_label();
2809     store_reg32h_i64(get_field(s, r1), o->in2);
2810     /* The value is stored even in case of trap. */
2811     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2812     gen_trap(s);
2813     gen_set_label(lab);
2814     return DISAS_NEXT;
2815 }
2816 
2817 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2818 {
2819     TCGLabel *lab = gen_new_label();
2820 
2821     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
2822     /* The value is stored even in case of trap. */
2823     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2824     gen_trap(s);
2825     gen_set_label(lab);
2826     return DISAS_NEXT;
2827 }
2828 
2829 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2830 {
2831     TCGLabel *lab = gen_new_label();
2832     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2833     /* The value is stored even in case of trap. */
2834     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2835     gen_trap(s);
2836     gen_set_label(lab);
2837     return DISAS_NEXT;
2838 }
2839 
2840 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2841 {
2842     DisasCompare c;
2843 
2844     if (have_field(s, m3)) {
2845         /* LOAD * ON CONDITION */
2846         disas_jcc(s, &c, get_field(s, m3));
2847     } else {
2848         /* SELECT */
2849         disas_jcc(s, &c, get_field(s, m4));
2850     }
2851 
2852     if (c.is_64) {
2853         tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2854                             o->in2, o->in1);
2855     } else {
2856         TCGv_i32 t32 = tcg_temp_new_i32();
2857         TCGv_i64 t, z;
2858 
2859         tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2860 
2861         t = tcg_temp_new_i64();
2862         tcg_gen_extu_i32_i64(t, t32);
2863 
2864         z = tcg_constant_i64(0);
2865         tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2866     }
2867 
2868     return DISAS_NEXT;
2869 }
2870 
2871 #ifndef CONFIG_USER_ONLY
2872 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
2873 {
2874     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2875     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2876 
2877     gen_helper_lctl(cpu_env, r1, o->in2, r3);
2878     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2879     s->exit_to_mainloop = true;
2880     return DISAS_TOO_MANY;
2881 }
2882 
2883 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
2884 {
2885     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2886     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2887 
2888     gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2889     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2890     s->exit_to_mainloop = true;
2891     return DISAS_TOO_MANY;
2892 }
2893 
2894 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
2895 {
2896     gen_helper_lra(o->out, cpu_env, o->in2);
2897     set_cc_static(s);
2898     return DISAS_NEXT;
2899 }
2900 
2901 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
2902 {
2903     tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
2904     return DISAS_NEXT;
2905 }
2906 
2907 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
2908 {
2909     TCGv_i64 mask, addr;
2910 
2911     per_breaking_event(s);
2912 
2913     /*
2914      * Convert the short PSW into the normal PSW, similar to what
2915      * s390_cpu_load_normal() does.
2916      */
2917     mask = tcg_temp_new_i64();
2918     addr = tcg_temp_new_i64();
2919     tcg_gen_qemu_ld_i64(mask, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN_8);
2920     tcg_gen_andi_i64(addr, mask, PSW_MASK_SHORT_ADDR);
2921     tcg_gen_andi_i64(mask, mask, PSW_MASK_SHORT_CTRL);
2922     tcg_gen_xori_i64(mask, mask, PSW_MASK_SHORTPSW);
2923     gen_helper_load_psw(cpu_env, mask, addr);
2924     return DISAS_NORETURN;
2925 }
2926 
2927 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
2928 {
2929     TCGv_i64 t1, t2;
2930 
2931     per_breaking_event(s);
2932 
2933     t1 = tcg_temp_new_i64();
2934     t2 = tcg_temp_new_i64();
2935     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
2936                         MO_TEUQ | MO_ALIGN_8);
2937     tcg_gen_addi_i64(o->in2, o->in2, 8);
2938     tcg_gen_qemu_ld_i64(t2, o->in2, get_mem_index(s), MO_TEUQ);
2939     gen_helper_load_psw(cpu_env, t1, t2);
2940     return DISAS_NORETURN;
2941 }
2942 #endif
2943 
2944 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
2945 {
2946     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2947     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2948 
2949     gen_helper_lam(cpu_env, r1, o->in2, r3);
2950     return DISAS_NEXT;
2951 }
2952 
2953 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
2954 {
2955     int r1 = get_field(s, r1);
2956     int r3 = get_field(s, r3);
2957     TCGv_i64 t1, t2;
2958 
2959     /* Only one register to read. */
2960     t1 = tcg_temp_new_i64();
2961     if (unlikely(r1 == r3)) {
2962         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
2963         store_reg32_i64(r1, t1);
2964         return DISAS_NEXT;
2965     }
2966 
2967     /* First load the values of the first and last registers to trigger
2968        possible page faults. */
2969     t2 = tcg_temp_new_i64();
2970     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
2971     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2972     tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
2973     store_reg32_i64(r1, t1);
2974     store_reg32_i64(r3, t2);
2975 
2976     /* Only two registers to read. */
2977     if (((r1 + 1) & 15) == r3) {
2978         return DISAS_NEXT;
2979     }
2980 
2981     /* Then load the remaining registers. Page fault can't occur. */
2982     r3 = (r3 - 1) & 15;
2983     tcg_gen_movi_i64(t2, 4);
2984     while (r1 != r3) {
2985         r1 = (r1 + 1) & 15;
2986         tcg_gen_add_i64(o->in2, o->in2, t2);
2987         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
2988         store_reg32_i64(r1, t1);
2989     }
2990     return DISAS_NEXT;
2991 }
2992 
2993 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
2994 {
2995     int r1 = get_field(s, r1);
2996     int r3 = get_field(s, r3);
2997     TCGv_i64 t1, t2;
2998 
2999     /* Only one register to read. */
3000     t1 = tcg_temp_new_i64();
3001     if (unlikely(r1 == r3)) {
3002         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3003         store_reg32h_i64(r1, t1);
3004         return DISAS_NEXT;
3005     }
3006 
3007     /* First load the values of the first and last registers to trigger
3008        possible page faults. */
3009     t2 = tcg_temp_new_i64();
3010     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3011     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3012     tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
3013     store_reg32h_i64(r1, t1);
3014     store_reg32h_i64(r3, t2);
3015 
3016     /* Only two registers to read. */
3017     if (((r1 + 1) & 15) == r3) {
3018         return DISAS_NEXT;
3019     }
3020 
3021     /* Then load the remaining registers. Page fault can't occur. */
3022     r3 = (r3 - 1) & 15;
3023     tcg_gen_movi_i64(t2, 4);
3024     while (r1 != r3) {
3025         r1 = (r1 + 1) & 15;
3026         tcg_gen_add_i64(o->in2, o->in2, t2);
3027         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3028         store_reg32h_i64(r1, t1);
3029     }
3030     return DISAS_NEXT;
3031 }
3032 
3033 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3034 {
3035     int r1 = get_field(s, r1);
3036     int r3 = get_field(s, r3);
3037     TCGv_i64 t1, t2;
3038 
3039     /* Only one register to read. */
3040     if (unlikely(r1 == r3)) {
3041         tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3042         return DISAS_NEXT;
3043     }
3044 
3045     /* First load the values of the first and last registers to trigger
3046        possible page faults. */
3047     t1 = tcg_temp_new_i64();
3048     t2 = tcg_temp_new_i64();
3049     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
3050     tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3051     tcg_gen_qemu_ld_i64(regs[r3], t2, get_mem_index(s), MO_TEUQ);
3052     tcg_gen_mov_i64(regs[r1], t1);
3053 
3054     /* Only two registers to read. */
3055     if (((r1 + 1) & 15) == r3) {
3056         return DISAS_NEXT;
3057     }
3058 
3059     /* Then load the remaining registers. Page fault can't occur. */
3060     r3 = (r3 - 1) & 15;
3061     tcg_gen_movi_i64(t1, 8);
3062     while (r1 != r3) {
3063         r1 = (r1 + 1) & 15;
3064         tcg_gen_add_i64(o->in2, o->in2, t1);
3065         tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3066     }
3067     return DISAS_NEXT;
3068 }
3069 
3070 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3071 {
3072     TCGv_i64 a1, a2;
3073     MemOp mop = s->insn->data;
3074 
3075     /* In a parallel context, stop the world and single step.  */
3076     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3077         update_psw_addr(s);
3078         update_cc_op(s);
3079         gen_exception(EXCP_ATOMIC);
3080         return DISAS_NORETURN;
3081     }
3082 
3083     /* In a serial context, perform the two loads ... */
3084     a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
3085     a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3086     tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3087     tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3088 
3089     /* ... and indicate that we performed them while interlocked.  */
3090     gen_op_movi_cc(s, 0);
3091     return DISAS_NEXT;
3092 }
3093 
3094 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3095 {
3096     if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
3097         gen_helper_lpq(o->out, cpu_env, o->in2);
3098     } else if (HAVE_ATOMIC128) {
3099         gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
3100     } else {
3101         gen_helper_exit_atomic(cpu_env);
3102         return DISAS_NORETURN;
3103     }
3104     return_low128(o->out2);
3105     return DISAS_NEXT;
3106 }
3107 
3108 #ifndef CONFIG_USER_ONLY
3109 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3110 {
3111     tcg_gen_qemu_ld_tl(o->out, o->in2, MMU_REAL_IDX, s->insn->data);
3112     return DISAS_NEXT;
3113 }
3114 #endif
3115 
3116 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3117 {
3118     tcg_gen_andi_i64(o->out, o->in2, -256);
3119     return DISAS_NEXT;
3120 }
3121 
3122 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3123 {
3124     const int64_t block_size = (1ull << (get_field(s, m3) + 6));
3125 
3126     if (get_field(s, m3) > 6) {
3127         gen_program_exception(s, PGM_SPECIFICATION);
3128         return DISAS_NORETURN;
3129     }
3130 
3131     tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3132     tcg_gen_neg_i64(o->addr1, o->addr1);
3133     tcg_gen_movi_i64(o->out, 16);
3134     tcg_gen_umin_i64(o->out, o->out, o->addr1);
3135     gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3136     return DISAS_NEXT;
3137 }
3138 
3139 static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
3140 {
3141     const uint16_t monitor_class = get_field(s, i2);
3142 
3143     if (monitor_class & 0xff00) {
3144         gen_program_exception(s, PGM_SPECIFICATION);
3145         return DISAS_NORETURN;
3146     }
3147 
3148 #if !defined(CONFIG_USER_ONLY)
3149     gen_helper_monitor_call(cpu_env, o->addr1,
3150                             tcg_constant_i32(monitor_class));
3151 #endif
3152     /* Defaults to a NOP. */
3153     return DISAS_NEXT;
3154 }
3155 
3156 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3157 {
3158     o->out = o->in2;
3159     o->in2 = NULL;
3160     return DISAS_NEXT;
3161 }
3162 
3163 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3164 {
3165     int b2 = get_field(s, b2);
3166     TCGv ar1 = tcg_temp_new_i64();
3167 
3168     o->out = o->in2;
3169     o->in2 = NULL;
3170 
3171     switch (s->base.tb->flags & FLAG_MASK_ASC) {
3172     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3173         tcg_gen_movi_i64(ar1, 0);
3174         break;
3175     case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3176         tcg_gen_movi_i64(ar1, 1);
3177         break;
3178     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3179         if (b2) {
3180             tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3181         } else {
3182             tcg_gen_movi_i64(ar1, 0);
3183         }
3184         break;
3185     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3186         tcg_gen_movi_i64(ar1, 2);
3187         break;
3188     }
3189 
3190     tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3191     return DISAS_NEXT;
3192 }
3193 
3194 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3195 {
3196     o->out = o->in1;
3197     o->out2 = o->in2;
3198     o->in1 = NULL;
3199     o->in2 = NULL;
3200     return DISAS_NEXT;
3201 }
3202 
3203 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3204 {
3205     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3206 
3207     gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3208     return DISAS_NEXT;
3209 }
3210 
3211 static DisasJumpType op_mvcrl(DisasContext *s, DisasOps *o)
3212 {
3213     gen_helper_mvcrl(cpu_env, regs[0], o->addr1, o->in2);
3214     return DISAS_NEXT;
3215 }
3216 
3217 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3218 {
3219     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3220 
3221     gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3222     return DISAS_NEXT;
3223 }
3224 
3225 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3226 {
3227     int r1 = get_field(s, r1);
3228     int r2 = get_field(s, r2);
3229     TCGv_i32 t1, t2;
3230 
3231     /* r1 and r2 must be even.  */
3232     if (r1 & 1 || r2 & 1) {
3233         gen_program_exception(s, PGM_SPECIFICATION);
3234         return DISAS_NORETURN;
3235     }
3236 
3237     t1 = tcg_constant_i32(r1);
3238     t2 = tcg_constant_i32(r2);
3239     gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3240     set_cc_static(s);
3241     return DISAS_NEXT;
3242 }
3243 
3244 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3245 {
3246     int r1 = get_field(s, r1);
3247     int r3 = get_field(s, r3);
3248     TCGv_i32 t1, t3;
3249 
3250     /* r1 and r3 must be even.  */
3251     if (r1 & 1 || r3 & 1) {
3252         gen_program_exception(s, PGM_SPECIFICATION);
3253         return DISAS_NORETURN;
3254     }
3255 
3256     t1 = tcg_constant_i32(r1);
3257     t3 = tcg_constant_i32(r3);
3258     gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3259     set_cc_static(s);
3260     return DISAS_NEXT;
3261 }
3262 
3263 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3264 {
3265     int r1 = get_field(s, r1);
3266     int r3 = get_field(s, r3);
3267     TCGv_i32 t1, t3;
3268 
3269     /* r1 and r3 must be even.  */
3270     if (r1 & 1 || r3 & 1) {
3271         gen_program_exception(s, PGM_SPECIFICATION);
3272         return DISAS_NORETURN;
3273     }
3274 
3275     t1 = tcg_constant_i32(r1);
3276     t3 = tcg_constant_i32(r3);
3277     gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3278     set_cc_static(s);
3279     return DISAS_NEXT;
3280 }
3281 
3282 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3283 {
3284     int r3 = get_field(s, r3);
3285     gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3286     set_cc_static(s);
3287     return DISAS_NEXT;
3288 }
3289 
3290 #ifndef CONFIG_USER_ONLY
3291 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3292 {
3293     int r1 = get_field(s, l1);
3294     int r3 = get_field(s, r3);
3295     gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2, regs[r3]);
3296     set_cc_static(s);
3297     return DISAS_NEXT;
3298 }
3299 
3300 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3301 {
3302     int r1 = get_field(s, l1);
3303     int r3 = get_field(s, r3);
3304     gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2, regs[r3]);
3305     set_cc_static(s);
3306     return DISAS_NEXT;
3307 }
3308 #endif
3309 
3310 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3311 {
3312     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3313 
3314     gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3315     return DISAS_NEXT;
3316 }
3317 
3318 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3319 {
3320     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3321 
3322     gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3323     return DISAS_NEXT;
3324 }
3325 
3326 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3327 {
3328     TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3329     TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3330 
3331     gen_helper_mvpg(cc_op, cpu_env, regs[0], t1, t2);
3332     set_cc_static(s);
3333     return DISAS_NEXT;
3334 }
3335 
3336 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3337 {
3338     TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3339     TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3340 
3341     gen_helper_mvst(cc_op, cpu_env, t1, t2);
3342     set_cc_static(s);
3343     return DISAS_NEXT;
3344 }
3345 
3346 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3347 {
3348     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3349 
3350     gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3351     return DISAS_NEXT;
3352 }
3353 
3354 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3355 {
3356     tcg_gen_mul_i64(o->out, o->in1, o->in2);
3357     return DISAS_NEXT;
3358 }
3359 
3360 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3361 {
3362     tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3363     return DISAS_NEXT;
3364 }
3365 
3366 static DisasJumpType op_muls128(DisasContext *s, DisasOps *o)
3367 {
3368     tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2);
3369     return DISAS_NEXT;
3370 }
3371 
3372 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3373 {
3374     gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3375     return DISAS_NEXT;
3376 }
3377 
3378 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3379 {
3380     gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3381     return DISAS_NEXT;
3382 }
3383 
3384 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3385 {
3386     gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3387     return DISAS_NEXT;
3388 }
3389 
3390 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3391 {
3392     gen_helper_mxb(o->out_128, cpu_env, o->in1_128, o->in2_128);
3393     return DISAS_NEXT;
3394 }
3395 
3396 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3397 {
3398     gen_helper_mxdb(o->out_128, cpu_env, o->in1_128, o->in2);
3399     return DISAS_NEXT;
3400 }
3401 
3402 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3403 {
3404     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3405     gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3406     return DISAS_NEXT;
3407 }
3408 
3409 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3410 {
3411     TCGv_i64 r3 = load_freg(get_field(s, r3));
3412     gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
3413     return DISAS_NEXT;
3414 }
3415 
3416 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3417 {
3418     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3419     gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3420     return DISAS_NEXT;
3421 }
3422 
3423 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3424 {
3425     TCGv_i64 r3 = load_freg(get_field(s, r3));
3426     gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
3427     return DISAS_NEXT;
3428 }
3429 
3430 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3431 {
3432     TCGv_i64 z = tcg_constant_i64(0);
3433     TCGv_i64 n = tcg_temp_new_i64();
3434 
3435     tcg_gen_neg_i64(n, o->in2);
3436     tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3437     return DISAS_NEXT;
3438 }
3439 
3440 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3441 {
3442     tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3443     return DISAS_NEXT;
3444 }
3445 
3446 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3447 {
3448     tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3449     return DISAS_NEXT;
3450 }
3451 
3452 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3453 {
3454     tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3455     tcg_gen_mov_i64(o->out2, o->in2);
3456     return DISAS_NEXT;
3457 }
3458 
3459 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3460 {
3461     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3462 
3463     gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3464     set_cc_static(s);
3465     return DISAS_NEXT;
3466 }
3467 
3468 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3469 {
3470     tcg_gen_neg_i64(o->out, o->in2);
3471     return DISAS_NEXT;
3472 }
3473 
3474 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3475 {
3476     tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3477     return DISAS_NEXT;
3478 }
3479 
3480 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3481 {
3482     tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3483     return DISAS_NEXT;
3484 }
3485 
3486 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3487 {
3488     tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3489     tcg_gen_mov_i64(o->out2, o->in2);
3490     return DISAS_NEXT;
3491 }
3492 
3493 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3494 {
3495     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3496 
3497     gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3498     set_cc_static(s);
3499     return DISAS_NEXT;
3500 }
3501 
3502 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3503 {
3504     tcg_gen_or_i64(o->out, o->in1, o->in2);
3505     return DISAS_NEXT;
3506 }
3507 
3508 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3509 {
3510     int shift = s->insn->data & 0xff;
3511     int size = s->insn->data >> 8;
3512     uint64_t mask = ((1ull << size) - 1) << shift;
3513     TCGv_i64 t = tcg_temp_new_i64();
3514 
3515     tcg_gen_shli_i64(t, o->in2, shift);
3516     tcg_gen_or_i64(o->out, o->in1, t);
3517 
3518     /* Produce the CC from only the bits manipulated.  */
3519     tcg_gen_andi_i64(cc_dst, o->out, mask);
3520     set_cc_nz_u64(s, cc_dst);
3521     return DISAS_NEXT;
3522 }
3523 
3524 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3525 {
3526     o->in1 = tcg_temp_new_i64();
3527 
3528     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3529         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3530     } else {
3531         /* Perform the atomic operation in memory. */
3532         tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3533                                     s->insn->data);
3534     }
3535 
3536     /* Recompute also for atomic case: needed for setting CC. */
3537     tcg_gen_or_i64(o->out, o->in1, o->in2);
3538 
3539     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3540         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3541     }
3542     return DISAS_NEXT;
3543 }
3544 
3545 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3546 {
3547     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3548 
3549     gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3550     return DISAS_NEXT;
3551 }
3552 
3553 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3554 {
3555     int l2 = get_field(s, l2) + 1;
3556     TCGv_i32 l;
3557 
3558     /* The length must not exceed 32 bytes.  */
3559     if (l2 > 32) {
3560         gen_program_exception(s, PGM_SPECIFICATION);
3561         return DISAS_NORETURN;
3562     }
3563     l = tcg_constant_i32(l2);
3564     gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3565     return DISAS_NEXT;
3566 }
3567 
3568 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3569 {
3570     int l2 = get_field(s, l2) + 1;
3571     TCGv_i32 l;
3572 
3573     /* The length must be even and should not exceed 64 bytes.  */
3574     if ((l2 & 1) || (l2 > 64)) {
3575         gen_program_exception(s, PGM_SPECIFICATION);
3576         return DISAS_NORETURN;
3577     }
3578     l = tcg_constant_i32(l2);
3579     gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3580     return DISAS_NEXT;
3581 }
3582 
3583 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3584 {
3585     const uint8_t m3 = get_field(s, m3);
3586 
3587     if ((m3 & 8) && s390_has_feat(S390_FEAT_MISC_INSTRUCTION_EXT3)) {
3588         tcg_gen_ctpop_i64(o->out, o->in2);
3589     } else {
3590         gen_helper_popcnt(o->out, o->in2);
3591     }
3592     return DISAS_NEXT;
3593 }
3594 
3595 #ifndef CONFIG_USER_ONLY
3596 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3597 {
3598     gen_helper_ptlb(cpu_env);
3599     return DISAS_NEXT;
3600 }
3601 #endif
3602 
3603 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3604 {
3605     int i3 = get_field(s, i3);
3606     int i4 = get_field(s, i4);
3607     int i5 = get_field(s, i5);
3608     int do_zero = i4 & 0x80;
3609     uint64_t mask, imask, pmask;
3610     int pos, len, rot;
3611 
3612     /* Adjust the arguments for the specific insn.  */
3613     switch (s->fields.op2) {
3614     case 0x55: /* risbg */
3615     case 0x59: /* risbgn */
3616         i3 &= 63;
3617         i4 &= 63;
3618         pmask = ~0;
3619         break;
3620     case 0x5d: /* risbhg */
3621         i3 &= 31;
3622         i4 &= 31;
3623         pmask = 0xffffffff00000000ull;
3624         break;
3625     case 0x51: /* risblg */
3626         i3 = (i3 & 31) + 32;
3627         i4 = (i4 & 31) + 32;
3628         pmask = 0x00000000ffffffffull;
3629         break;
3630     default:
3631         g_assert_not_reached();
3632     }
3633 
3634     /* MASK is the set of bits to be inserted from R2. */
3635     if (i3 <= i4) {
3636         /* [0...i3---i4...63] */
3637         mask = (-1ull >> i3) & (-1ull << (63 - i4));
3638     } else {
3639         /* [0---i4...i3---63] */
3640         mask = (-1ull >> i3) | (-1ull << (63 - i4));
3641     }
3642     /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
3643     mask &= pmask;
3644 
3645     /* IMASK is the set of bits to be kept from R1.  In the case of the high/low
3646        insns, we need to keep the other half of the register.  */
3647     imask = ~mask | ~pmask;
3648     if (do_zero) {
3649         imask = ~pmask;
3650     }
3651 
3652     len = i4 - i3 + 1;
3653     pos = 63 - i4;
3654     rot = i5 & 63;
3655 
3656     /* In some cases we can implement this with extract.  */
3657     if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3658         tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3659         return DISAS_NEXT;
3660     }
3661 
3662     /* In some cases we can implement this with deposit.  */
3663     if (len > 0 && (imask == 0 || ~mask == imask)) {
3664         /* Note that we rotate the bits to be inserted to the lsb, not to
3665            the position as described in the PoO.  */
3666         rot = (rot - pos) & 63;
3667     } else {
3668         pos = -1;
3669     }
3670 
3671     /* Rotate the input as necessary.  */
3672     tcg_gen_rotli_i64(o->in2, o->in2, rot);
3673 
3674     /* Insert the selected bits into the output.  */
3675     if (pos >= 0) {
3676         if (imask == 0) {
3677             tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3678         } else {
3679             tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3680         }
3681     } else if (imask == 0) {
3682         tcg_gen_andi_i64(o->out, o->in2, mask);
3683     } else {
3684         tcg_gen_andi_i64(o->in2, o->in2, mask);
3685         tcg_gen_andi_i64(o->out, o->out, imask);
3686         tcg_gen_or_i64(o->out, o->out, o->in2);
3687     }
3688     return DISAS_NEXT;
3689 }
3690 
3691 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3692 {
3693     int i3 = get_field(s, i3);
3694     int i4 = get_field(s, i4);
3695     int i5 = get_field(s, i5);
3696     TCGv_i64 orig_out;
3697     uint64_t mask;
3698 
3699     /* If this is a test-only form, arrange to discard the result.  */
3700     if (i3 & 0x80) {
3701         tcg_debug_assert(o->out != NULL);
3702         orig_out = o->out;
3703         o->out = tcg_temp_new_i64();
3704         tcg_gen_mov_i64(o->out, orig_out);
3705     }
3706 
3707     i3 &= 63;
3708     i4 &= 63;
3709     i5 &= 63;
3710 
3711     /* MASK is the set of bits to be operated on from R2.
3712        Take care for I3/I4 wraparound.  */
3713     mask = ~0ull >> i3;
3714     if (i3 <= i4) {
3715         mask ^= ~0ull >> i4 >> 1;
3716     } else {
3717         mask |= ~(~0ull >> i4 >> 1);
3718     }
3719 
3720     /* Rotate the input as necessary.  */
3721     tcg_gen_rotli_i64(o->in2, o->in2, i5);
3722 
3723     /* Operate.  */
3724     switch (s->fields.op2) {
3725     case 0x54: /* AND */
3726         tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3727         tcg_gen_and_i64(o->out, o->out, o->in2);
3728         break;
3729     case 0x56: /* OR */
3730         tcg_gen_andi_i64(o->in2, o->in2, mask);
3731         tcg_gen_or_i64(o->out, o->out, o->in2);
3732         break;
3733     case 0x57: /* XOR */
3734         tcg_gen_andi_i64(o->in2, o->in2, mask);
3735         tcg_gen_xor_i64(o->out, o->out, o->in2);
3736         break;
3737     default:
3738         abort();
3739     }
3740 
3741     /* Set the CC.  */
3742     tcg_gen_andi_i64(cc_dst, o->out, mask);
3743     set_cc_nz_u64(s, cc_dst);
3744     return DISAS_NEXT;
3745 }
3746 
3747 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3748 {
3749     tcg_gen_bswap16_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3750     return DISAS_NEXT;
3751 }
3752 
3753 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3754 {
3755     tcg_gen_bswap32_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3756     return DISAS_NEXT;
3757 }
3758 
3759 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3760 {
3761     tcg_gen_bswap64_i64(o->out, o->in2);
3762     return DISAS_NEXT;
3763 }
3764 
3765 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3766 {
3767     TCGv_i32 t1 = tcg_temp_new_i32();
3768     TCGv_i32 t2 = tcg_temp_new_i32();
3769     TCGv_i32 to = tcg_temp_new_i32();
3770     tcg_gen_extrl_i64_i32(t1, o->in1);
3771     tcg_gen_extrl_i64_i32(t2, o->in2);
3772     tcg_gen_rotl_i32(to, t1, t2);
3773     tcg_gen_extu_i32_i64(o->out, to);
3774     return DISAS_NEXT;
3775 }
3776 
3777 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3778 {
3779     tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3780     return DISAS_NEXT;
3781 }
3782 
3783 #ifndef CONFIG_USER_ONLY
3784 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3785 {
3786     gen_helper_rrbe(cc_op, cpu_env, o->in2);
3787     set_cc_static(s);
3788     return DISAS_NEXT;
3789 }
3790 
3791 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3792 {
3793     gen_helper_sacf(cpu_env, o->in2);
3794     /* Addressing mode has changed, so end the block.  */
3795     return DISAS_TOO_MANY;
3796 }
3797 #endif
3798 
3799 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3800 {
3801     int sam = s->insn->data;
3802     TCGv_i64 tsam;
3803     uint64_t mask;
3804 
3805     switch (sam) {
3806     case 0:
3807         mask = 0xffffff;
3808         break;
3809     case 1:
3810         mask = 0x7fffffff;
3811         break;
3812     default:
3813         mask = -1;
3814         break;
3815     }
3816 
3817     /* Bizarre but true, we check the address of the current insn for the
3818        specification exception, not the next to be executed.  Thus the PoO
3819        documents that Bad Things Happen two bytes before the end.  */
3820     if (s->base.pc_next & ~mask) {
3821         gen_program_exception(s, PGM_SPECIFICATION);
3822         return DISAS_NORETURN;
3823     }
3824     s->pc_tmp &= mask;
3825 
3826     tsam = tcg_constant_i64(sam);
3827     tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3828 
3829     /* Always exit the TB, since we (may have) changed execution mode.  */
3830     return DISAS_TOO_MANY;
3831 }
3832 
3833 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
3834 {
3835     int r1 = get_field(s, r1);
3836     tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3837     return DISAS_NEXT;
3838 }
3839 
3840 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
3841 {
3842     gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3843     return DISAS_NEXT;
3844 }
3845 
3846 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
3847 {
3848     gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3849     return DISAS_NEXT;
3850 }
3851 
3852 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
3853 {
3854     gen_helper_sxb(o->out_128, cpu_env, o->in1_128, o->in2_128);
3855     return DISAS_NEXT;
3856 }
3857 
3858 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
3859 {
3860     gen_helper_sqeb(o->out, cpu_env, o->in2);
3861     return DISAS_NEXT;
3862 }
3863 
3864 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
3865 {
3866     gen_helper_sqdb(o->out, cpu_env, o->in2);
3867     return DISAS_NEXT;
3868 }
3869 
3870 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
3871 {
3872     gen_helper_sqxb(o->out_128, cpu_env, o->in2_128);
3873     return DISAS_NEXT;
3874 }
3875 
3876 #ifndef CONFIG_USER_ONLY
3877 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
3878 {
3879     gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3880     set_cc_static(s);
3881     return DISAS_NEXT;
3882 }
3883 
3884 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
3885 {
3886     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
3887     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
3888 
3889     gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
3890     set_cc_static(s);
3891     return DISAS_NEXT;
3892 }
3893 #endif
3894 
3895 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
3896 {
3897     DisasCompare c;
3898     TCGv_i64 a, h;
3899     TCGLabel *lab;
3900     int r1;
3901 
3902     disas_jcc(s, &c, get_field(s, m3));
3903 
3904     /* We want to store when the condition is fulfilled, so branch
3905        out when it's not */
3906     c.cond = tcg_invert_cond(c.cond);
3907 
3908     lab = gen_new_label();
3909     if (c.is_64) {
3910         tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3911     } else {
3912         tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3913     }
3914 
3915     r1 = get_field(s, r1);
3916     a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3917     switch (s->insn->data) {
3918     case 1: /* STOCG */
3919         tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUQ);
3920         break;
3921     case 0: /* STOC */
3922         tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUL);
3923         break;
3924     case 2: /* STOCFH */
3925         h = tcg_temp_new_i64();
3926         tcg_gen_shri_i64(h, regs[r1], 32);
3927         tcg_gen_qemu_st_i64(h, a, get_mem_index(s), MO_TEUL);
3928         break;
3929     default:
3930         g_assert_not_reached();
3931     }
3932 
3933     gen_set_label(lab);
3934     return DISAS_NEXT;
3935 }
3936 
3937 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
3938 {
3939     TCGv_i64 t;
3940     uint64_t sign = 1ull << s->insn->data;
3941     if (s->insn->data == 31) {
3942         t = tcg_temp_new_i64();
3943         tcg_gen_shli_i64(t, o->in1, 32);
3944     } else {
3945         t = o->in1;
3946     }
3947     gen_op_update2_cc_i64(s, CC_OP_SLA, t, o->in2);
3948     tcg_gen_shl_i64(o->out, o->in1, o->in2);
3949     /* The arithmetic left shift is curious in that it does not affect
3950        the sign bit.  Copy that over from the source unchanged.  */
3951     tcg_gen_andi_i64(o->out, o->out, ~sign);
3952     tcg_gen_andi_i64(o->in1, o->in1, sign);
3953     tcg_gen_or_i64(o->out, o->out, o->in1);
3954     return DISAS_NEXT;
3955 }
3956 
3957 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
3958 {
3959     tcg_gen_shl_i64(o->out, o->in1, o->in2);
3960     return DISAS_NEXT;
3961 }
3962 
3963 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
3964 {
3965     tcg_gen_sar_i64(o->out, o->in1, o->in2);
3966     return DISAS_NEXT;
3967 }
3968 
3969 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
3970 {
3971     tcg_gen_shr_i64(o->out, o->in1, o->in2);
3972     return DISAS_NEXT;
3973 }
3974 
3975 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
3976 {
3977     gen_helper_sfpc(cpu_env, o->in2);
3978     return DISAS_NEXT;
3979 }
3980 
3981 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
3982 {
3983     gen_helper_sfas(cpu_env, o->in2);
3984     return DISAS_NEXT;
3985 }
3986 
3987 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
3988 {
3989     /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
3990     tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
3991     gen_helper_srnm(cpu_env, o->addr1);
3992     return DISAS_NEXT;
3993 }
3994 
3995 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
3996 {
3997     /* Bits 0-55 are are ignored. */
3998     tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
3999     gen_helper_srnm(cpu_env, o->addr1);
4000     return DISAS_NEXT;
4001 }
4002 
4003 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4004 {
4005     TCGv_i64 tmp = tcg_temp_new_i64();
4006 
4007     /* Bits other than 61-63 are ignored. */
4008     tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4009 
4010     /* No need to call a helper, we don't implement dfp */
4011     tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4012     tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4013     tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4014     return DISAS_NEXT;
4015 }
4016 
4017 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4018 {
4019     tcg_gen_extrl_i64_i32(cc_op, o->in1);
4020     tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4021     set_cc_static(s);
4022 
4023     tcg_gen_shri_i64(o->in1, o->in1, 24);
4024     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4025     return DISAS_NEXT;
4026 }
4027 
4028 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4029 {
4030     int b1 = get_field(s, b1);
4031     int d1 = get_field(s, d1);
4032     int b2 = get_field(s, b2);
4033     int d2 = get_field(s, d2);
4034     int r3 = get_field(s, r3);
4035     TCGv_i64 tmp = tcg_temp_new_i64();
4036 
4037     /* fetch all operands first */
4038     o->in1 = tcg_temp_new_i64();
4039     tcg_gen_addi_i64(o->in1, regs[b1], d1);
4040     o->in2 = tcg_temp_new_i64();
4041     tcg_gen_addi_i64(o->in2, regs[b2], d2);
4042     o->addr1 = tcg_temp_new_i64();
4043     gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0);
4044 
4045     /* load the third operand into r3 before modifying anything */
4046     tcg_gen_qemu_ld_i64(regs[r3], o->addr1, get_mem_index(s), MO_TEUQ);
4047 
4048     /* subtract CPU timer from first operand and store in GR0 */
4049     gen_helper_stpt(tmp, cpu_env);
4050     tcg_gen_sub_i64(regs[0], o->in1, tmp);
4051 
4052     /* store second operand in GR1 */
4053     tcg_gen_mov_i64(regs[1], o->in2);
4054     return DISAS_NEXT;
4055 }
4056 
4057 #ifndef CONFIG_USER_ONLY
4058 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4059 {
4060     tcg_gen_shri_i64(o->in2, o->in2, 4);
4061     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4062     return DISAS_NEXT;
4063 }
4064 
4065 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4066 {
4067     gen_helper_sske(cpu_env, o->in1, o->in2);
4068     return DISAS_NEXT;
4069 }
4070 
4071 static void gen_check_psw_mask(DisasContext *s)
4072 {
4073     TCGv_i64 reserved = tcg_temp_new_i64();
4074     TCGLabel *ok = gen_new_label();
4075 
4076     tcg_gen_andi_i64(reserved, psw_mask, PSW_MASK_RESERVED);
4077     tcg_gen_brcondi_i64(TCG_COND_EQ, reserved, 0, ok);
4078     gen_program_exception(s, PGM_SPECIFICATION);
4079     gen_set_label(ok);
4080 }
4081 
4082 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4083 {
4084     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4085 
4086     gen_check_psw_mask(s);
4087 
4088     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4089     s->exit_to_mainloop = true;
4090     return DISAS_TOO_MANY;
4091 }
4092 
4093 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4094 {
4095     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
4096     return DISAS_NEXT;
4097 }
4098 #endif
4099 
4100 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4101 {
4102     gen_helper_stck(o->out, cpu_env);
4103     /* ??? We don't implement clock states.  */
4104     gen_op_movi_cc(s, 0);
4105     return DISAS_NEXT;
4106 }
4107 
4108 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4109 {
4110     TCGv_i64 c1 = tcg_temp_new_i64();
4111     TCGv_i64 c2 = tcg_temp_new_i64();
4112     TCGv_i64 todpr = tcg_temp_new_i64();
4113     gen_helper_stck(c1, cpu_env);
4114     /* 16 bit value store in an uint32_t (only valid bits set) */
4115     tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
4116     /* Shift the 64-bit value into its place as a zero-extended
4117        104-bit value.  Note that "bit positions 64-103 are always
4118        non-zero so that they compare differently to STCK"; we set
4119        the least significant bit to 1.  */
4120     tcg_gen_shli_i64(c2, c1, 56);
4121     tcg_gen_shri_i64(c1, c1, 8);
4122     tcg_gen_ori_i64(c2, c2, 0x10000);
4123     tcg_gen_or_i64(c2, c2, todpr);
4124     tcg_gen_qemu_st_i64(c1, o->in2, get_mem_index(s), MO_TEUQ);
4125     tcg_gen_addi_i64(o->in2, o->in2, 8);
4126     tcg_gen_qemu_st_i64(c2, o->in2, get_mem_index(s), MO_TEUQ);
4127     /* ??? We don't implement clock states.  */
4128     gen_op_movi_cc(s, 0);
4129     return DISAS_NEXT;
4130 }
4131 
4132 #ifndef CONFIG_USER_ONLY
4133 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4134 {
4135     gen_helper_sck(cc_op, cpu_env, o->in2);
4136     set_cc_static(s);
4137     return DISAS_NEXT;
4138 }
4139 
4140 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4141 {
4142     gen_helper_sckc(cpu_env, o->in2);
4143     return DISAS_NEXT;
4144 }
4145 
4146 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4147 {
4148     gen_helper_sckpf(cpu_env, regs[0]);
4149     return DISAS_NEXT;
4150 }
4151 
4152 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4153 {
4154     gen_helper_stckc(o->out, cpu_env);
4155     return DISAS_NEXT;
4156 }
4157 
4158 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4159 {
4160     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4161     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4162 
4163     gen_helper_stctg(cpu_env, r1, o->in2, r3);
4164     return DISAS_NEXT;
4165 }
4166 
4167 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4168 {
4169     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4170     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4171 
4172     gen_helper_stctl(cpu_env, r1, o->in2, r3);
4173     return DISAS_NEXT;
4174 }
4175 
4176 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4177 {
4178     tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4179     return DISAS_NEXT;
4180 }
4181 
4182 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4183 {
4184     gen_helper_spt(cpu_env, o->in2);
4185     return DISAS_NEXT;
4186 }
4187 
4188 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4189 {
4190     gen_helper_stfl(cpu_env);
4191     return DISAS_NEXT;
4192 }
4193 
4194 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4195 {
4196     gen_helper_stpt(o->out, cpu_env);
4197     return DISAS_NEXT;
4198 }
4199 
4200 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4201 {
4202     gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4203     set_cc_static(s);
4204     return DISAS_NEXT;
4205 }
4206 
4207 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4208 {
4209     gen_helper_spx(cpu_env, o->in2);
4210     return DISAS_NEXT;
4211 }
4212 
4213 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4214 {
4215     gen_helper_xsch(cpu_env, regs[1]);
4216     set_cc_static(s);
4217     return DISAS_NEXT;
4218 }
4219 
4220 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4221 {
4222     gen_helper_csch(cpu_env, regs[1]);
4223     set_cc_static(s);
4224     return DISAS_NEXT;
4225 }
4226 
4227 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4228 {
4229     gen_helper_hsch(cpu_env, regs[1]);
4230     set_cc_static(s);
4231     return DISAS_NEXT;
4232 }
4233 
4234 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4235 {
4236     gen_helper_msch(cpu_env, regs[1], o->in2);
4237     set_cc_static(s);
4238     return DISAS_NEXT;
4239 }
4240 
4241 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4242 {
4243     gen_helper_rchp(cpu_env, regs[1]);
4244     set_cc_static(s);
4245     return DISAS_NEXT;
4246 }
4247 
4248 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4249 {
4250     gen_helper_rsch(cpu_env, regs[1]);
4251     set_cc_static(s);
4252     return DISAS_NEXT;
4253 }
4254 
4255 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4256 {
4257     gen_helper_sal(cpu_env, regs[1]);
4258     return DISAS_NEXT;
4259 }
4260 
4261 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4262 {
4263     gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4264     return DISAS_NEXT;
4265 }
4266 
4267 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4268 {
4269     /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4270     gen_op_movi_cc(s, 3);
4271     return DISAS_NEXT;
4272 }
4273 
4274 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4275 {
4276     /* The instruction is suppressed if not provided. */
4277     return DISAS_NEXT;
4278 }
4279 
4280 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4281 {
4282     gen_helper_ssch(cpu_env, regs[1], o->in2);
4283     set_cc_static(s);
4284     return DISAS_NEXT;
4285 }
4286 
4287 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4288 {
4289     gen_helper_stsch(cpu_env, regs[1], o->in2);
4290     set_cc_static(s);
4291     return DISAS_NEXT;
4292 }
4293 
4294 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4295 {
4296     gen_helper_stcrw(cpu_env, o->in2);
4297     set_cc_static(s);
4298     return DISAS_NEXT;
4299 }
4300 
4301 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4302 {
4303     gen_helper_tpi(cc_op, cpu_env, o->addr1);
4304     set_cc_static(s);
4305     return DISAS_NEXT;
4306 }
4307 
4308 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4309 {
4310     gen_helper_tsch(cpu_env, regs[1], o->in2);
4311     set_cc_static(s);
4312     return DISAS_NEXT;
4313 }
4314 
4315 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4316 {
4317     gen_helper_chsc(cpu_env, o->in2);
4318     set_cc_static(s);
4319     return DISAS_NEXT;
4320 }
4321 
4322 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4323 {
4324     tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4325     tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4326     return DISAS_NEXT;
4327 }
4328 
4329 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4330 {
4331     uint64_t i2 = get_field(s, i2);
4332     TCGv_i64 t;
4333 
4334     /* It is important to do what the instruction name says: STORE THEN.
4335        If we let the output hook perform the store then if we fault and
4336        restart, we'll have the wrong SYSTEM MASK in place.  */
4337     t = tcg_temp_new_i64();
4338     tcg_gen_shri_i64(t, psw_mask, 56);
4339     tcg_gen_qemu_st_i64(t, o->addr1, get_mem_index(s), MO_UB);
4340 
4341     if (s->fields.op == 0xac) {
4342         tcg_gen_andi_i64(psw_mask, psw_mask,
4343                          (i2 << 56) | 0x00ffffffffffffffull);
4344     } else {
4345         tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4346     }
4347 
4348     gen_check_psw_mask(s);
4349 
4350     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4351     s->exit_to_mainloop = true;
4352     return DISAS_TOO_MANY;
4353 }
4354 
4355 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4356 {
4357     tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data);
4358 
4359     if (s->base.tb->flags & FLAG_MASK_PER) {
4360         update_psw_addr(s);
4361         gen_helper_per_store_real(cpu_env);
4362     }
4363     return DISAS_NEXT;
4364 }
4365 #endif
4366 
4367 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4368 {
4369     gen_helper_stfle(cc_op, cpu_env, o->in2);
4370     set_cc_static(s);
4371     return DISAS_NEXT;
4372 }
4373 
4374 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4375 {
4376     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_UB);
4377     return DISAS_NEXT;
4378 }
4379 
4380 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4381 {
4382     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_TEUW);
4383     return DISAS_NEXT;
4384 }
4385 
4386 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4387 {
4388     tcg_gen_qemu_st_tl(o->in1, o->in2, get_mem_index(s),
4389                        MO_TEUL | s->insn->data);
4390     return DISAS_NEXT;
4391 }
4392 
4393 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4394 {
4395     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s),
4396                         MO_TEUQ | s->insn->data);
4397     return DISAS_NEXT;
4398 }
4399 
4400 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4401 {
4402     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4403     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4404 
4405     gen_helper_stam(cpu_env, r1, o->in2, r3);
4406     return DISAS_NEXT;
4407 }
4408 
4409 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4410 {
4411     int m3 = get_field(s, m3);
4412     int pos, base = s->insn->data;
4413     TCGv_i64 tmp = tcg_temp_new_i64();
4414 
4415     pos = base + ctz32(m3) * 8;
4416     switch (m3) {
4417     case 0xf:
4418         /* Effectively a 32-bit store.  */
4419         tcg_gen_shri_i64(tmp, o->in1, pos);
4420         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
4421         break;
4422 
4423     case 0xc:
4424     case 0x6:
4425     case 0x3:
4426         /* Effectively a 16-bit store.  */
4427         tcg_gen_shri_i64(tmp, o->in1, pos);
4428         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
4429         break;
4430 
4431     case 0x8:
4432     case 0x4:
4433     case 0x2:
4434     case 0x1:
4435         /* Effectively an 8-bit store.  */
4436         tcg_gen_shri_i64(tmp, o->in1, pos);
4437         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4438         break;
4439 
4440     default:
4441         /* This is going to be a sequence of shifts and stores.  */
4442         pos = base + 32 - 8;
4443         while (m3) {
4444             if (m3 & 0x8) {
4445                 tcg_gen_shri_i64(tmp, o->in1, pos);
4446                 tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4447                 tcg_gen_addi_i64(o->in2, o->in2, 1);
4448             }
4449             m3 = (m3 << 1) & 0xf;
4450             pos -= 8;
4451         }
4452         break;
4453     }
4454     return DISAS_NEXT;
4455 }
4456 
4457 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4458 {
4459     int r1 = get_field(s, r1);
4460     int r3 = get_field(s, r3);
4461     int size = s->insn->data;
4462     TCGv_i64 tsize = tcg_constant_i64(size);
4463 
4464     while (1) {
4465         tcg_gen_qemu_st_i64(regs[r1], o->in2, get_mem_index(s),
4466                             size == 8 ? MO_TEUQ : MO_TEUL);
4467         if (r1 == r3) {
4468             break;
4469         }
4470         tcg_gen_add_i64(o->in2, o->in2, tsize);
4471         r1 = (r1 + 1) & 15;
4472     }
4473 
4474     return DISAS_NEXT;
4475 }
4476 
4477 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4478 {
4479     int r1 = get_field(s, r1);
4480     int r3 = get_field(s, r3);
4481     TCGv_i64 t = tcg_temp_new_i64();
4482     TCGv_i64 t4 = tcg_constant_i64(4);
4483     TCGv_i64 t32 = tcg_constant_i64(32);
4484 
4485     while (1) {
4486         tcg_gen_shl_i64(t, regs[r1], t32);
4487         tcg_gen_qemu_st_i64(t, o->in2, get_mem_index(s), MO_TEUL);
4488         if (r1 == r3) {
4489             break;
4490         }
4491         tcg_gen_add_i64(o->in2, o->in2, t4);
4492         r1 = (r1 + 1) & 15;
4493     }
4494     return DISAS_NEXT;
4495 }
4496 
4497 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4498 {
4499     if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4500         gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4501     } else if (HAVE_ATOMIC128) {
4502         gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4503     } else {
4504         gen_helper_exit_atomic(cpu_env);
4505         return DISAS_NORETURN;
4506     }
4507     return DISAS_NEXT;
4508 }
4509 
4510 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4511 {
4512     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4513     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4514 
4515     gen_helper_srst(cpu_env, r1, r2);
4516     set_cc_static(s);
4517     return DISAS_NEXT;
4518 }
4519 
4520 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4521 {
4522     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4523     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4524 
4525     gen_helper_srstu(cpu_env, r1, r2);
4526     set_cc_static(s);
4527     return DISAS_NEXT;
4528 }
4529 
4530 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4531 {
4532     tcg_gen_sub_i64(o->out, o->in1, o->in2);
4533     return DISAS_NEXT;
4534 }
4535 
4536 static DisasJumpType op_subu64(DisasContext *s, DisasOps *o)
4537 {
4538     tcg_gen_movi_i64(cc_src, 0);
4539     tcg_gen_sub2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
4540     return DISAS_NEXT;
4541 }
4542 
4543 /* Compute borrow (0, -1) into cc_src. */
4544 static void compute_borrow(DisasContext *s)
4545 {
4546     switch (s->cc_op) {
4547     case CC_OP_SUBU:
4548         /* The borrow value is already in cc_src (0,-1). */
4549         break;
4550     default:
4551         gen_op_calc_cc(s);
4552         /* fall through */
4553     case CC_OP_STATIC:
4554         /* The carry flag is the msb of CC; compute into cc_src. */
4555         tcg_gen_extu_i32_i64(cc_src, cc_op);
4556         tcg_gen_shri_i64(cc_src, cc_src, 1);
4557         /* fall through */
4558     case CC_OP_ADDU:
4559         /* Convert carry (1,0) to borrow (0,-1). */
4560         tcg_gen_subi_i64(cc_src, cc_src, 1);
4561         break;
4562     }
4563 }
4564 
4565 static DisasJumpType op_subb32(DisasContext *s, DisasOps *o)
4566 {
4567     compute_borrow(s);
4568 
4569     /* Borrow is {0, -1}, so add to subtract. */
4570     tcg_gen_add_i64(o->out, o->in1, cc_src);
4571     tcg_gen_sub_i64(o->out, o->out, o->in2);
4572     return DISAS_NEXT;
4573 }
4574 
4575 static DisasJumpType op_subb64(DisasContext *s, DisasOps *o)
4576 {
4577     compute_borrow(s);
4578 
4579     /*
4580      * Borrow is {0, -1}, so add to subtract; replicate the
4581      * borrow input to produce 128-bit -1 for the addition.
4582      */
4583     TCGv_i64 zero = tcg_constant_i64(0);
4584     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src);
4585     tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
4586 
4587     return DISAS_NEXT;
4588 }
4589 
4590 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4591 {
4592     TCGv_i32 t;
4593 
4594     update_psw_addr(s);
4595     update_cc_op(s);
4596 
4597     t = tcg_constant_i32(get_field(s, i1) & 0xff);
4598     tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4599 
4600     t = tcg_constant_i32(s->ilen);
4601     tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4602 
4603     gen_exception(EXCP_SVC);
4604     return DISAS_NORETURN;
4605 }
4606 
4607 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4608 {
4609     int cc = 0;
4610 
4611     cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4612     cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4613     gen_op_movi_cc(s, cc);
4614     return DISAS_NEXT;
4615 }
4616 
4617 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4618 {
4619     gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4620     set_cc_static(s);
4621     return DISAS_NEXT;
4622 }
4623 
4624 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4625 {
4626     gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4627     set_cc_static(s);
4628     return DISAS_NEXT;
4629 }
4630 
4631 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4632 {
4633     gen_helper_tcxb(cc_op, cpu_env, o->in1_128, o->in2);
4634     set_cc_static(s);
4635     return DISAS_NEXT;
4636 }
4637 
4638 #ifndef CONFIG_USER_ONLY
4639 
4640 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4641 {
4642     gen_helper_testblock(cc_op, cpu_env, o->in2);
4643     set_cc_static(s);
4644     return DISAS_NEXT;
4645 }
4646 
4647 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4648 {
4649     gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4650     set_cc_static(s);
4651     return DISAS_NEXT;
4652 }
4653 
4654 #endif
4655 
4656 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4657 {
4658     TCGv_i32 l1 = tcg_constant_i32(get_field(s, l1) + 1);
4659 
4660     gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4661     set_cc_static(s);
4662     return DISAS_NEXT;
4663 }
4664 
4665 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4666 {
4667     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4668 
4669     gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4670     set_cc_static(s);
4671     return DISAS_NEXT;
4672 }
4673 
4674 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4675 {
4676     TCGv_i128 pair = tcg_temp_new_i128();
4677 
4678     gen_helper_tre(pair, cpu_env, o->out, o->out2, o->in2);
4679     tcg_gen_extr_i128_i64(o->out2, o->out, pair);
4680     set_cc_static(s);
4681     return DISAS_NEXT;
4682 }
4683 
4684 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4685 {
4686     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4687 
4688     gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4689     set_cc_static(s);
4690     return DISAS_NEXT;
4691 }
4692 
4693 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4694 {
4695     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4696 
4697     gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4698     set_cc_static(s);
4699     return DISAS_NEXT;
4700 }
4701 
4702 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4703 {
4704     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4705     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4706     TCGv_i32 sizes = tcg_constant_i32(s->insn->opc & 3);
4707     TCGv_i32 tst = tcg_temp_new_i32();
4708     int m3 = get_field(s, m3);
4709 
4710     if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4711         m3 = 0;
4712     }
4713     if (m3 & 1) {
4714         tcg_gen_movi_i32(tst, -1);
4715     } else {
4716         tcg_gen_extrl_i64_i32(tst, regs[0]);
4717         if (s->insn->opc & 3) {
4718             tcg_gen_ext8u_i32(tst, tst);
4719         } else {
4720             tcg_gen_ext16u_i32(tst, tst);
4721         }
4722     }
4723     gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4724 
4725     set_cc_static(s);
4726     return DISAS_NEXT;
4727 }
4728 
4729 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4730 {
4731     TCGv_i32 t1 = tcg_constant_i32(0xff);
4732 
4733     tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4734     tcg_gen_extract_i32(cc_op, t1, 7, 1);
4735     set_cc_static(s);
4736     return DISAS_NEXT;
4737 }
4738 
4739 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4740 {
4741     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4742 
4743     gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4744     return DISAS_NEXT;
4745 }
4746 
4747 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4748 {
4749     int l1 = get_field(s, l1) + 1;
4750     TCGv_i32 l;
4751 
4752     /* The length must not exceed 32 bytes.  */
4753     if (l1 > 32) {
4754         gen_program_exception(s, PGM_SPECIFICATION);
4755         return DISAS_NORETURN;
4756     }
4757     l = tcg_constant_i32(l1);
4758     gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4759     set_cc_static(s);
4760     return DISAS_NEXT;
4761 }
4762 
4763 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4764 {
4765     int l1 = get_field(s, l1) + 1;
4766     TCGv_i32 l;
4767 
4768     /* The length must be even and should not exceed 64 bytes.  */
4769     if ((l1 & 1) || (l1 > 64)) {
4770         gen_program_exception(s, PGM_SPECIFICATION);
4771         return DISAS_NORETURN;
4772     }
4773     l = tcg_constant_i32(l1);
4774     gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4775     set_cc_static(s);
4776     return DISAS_NEXT;
4777 }
4778 
4779 
4780 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4781 {
4782     int d1 = get_field(s, d1);
4783     int d2 = get_field(s, d2);
4784     int b1 = get_field(s, b1);
4785     int b2 = get_field(s, b2);
4786     int l = get_field(s, l1);
4787     TCGv_i32 t32;
4788 
4789     o->addr1 = get_address(s, 0, b1, d1);
4790 
4791     /* If the addresses are identical, this is a store/memset of zero.  */
4792     if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4793         o->in2 = tcg_constant_i64(0);
4794 
4795         l++;
4796         while (l >= 8) {
4797             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UQ);
4798             l -= 8;
4799             if (l > 0) {
4800                 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4801             }
4802         }
4803         if (l >= 4) {
4804             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UL);
4805             l -= 4;
4806             if (l > 0) {
4807                 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4808             }
4809         }
4810         if (l >= 2) {
4811             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UW);
4812             l -= 2;
4813             if (l > 0) {
4814                 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4815             }
4816         }
4817         if (l) {
4818             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UB);
4819         }
4820         gen_op_movi_cc(s, 0);
4821         return DISAS_NEXT;
4822     }
4823 
4824     /* But in general we'll defer to a helper.  */
4825     o->in2 = get_address(s, 0, b2, d2);
4826     t32 = tcg_constant_i32(l);
4827     gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4828     set_cc_static(s);
4829     return DISAS_NEXT;
4830 }
4831 
4832 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
4833 {
4834     tcg_gen_xor_i64(o->out, o->in1, o->in2);
4835     return DISAS_NEXT;
4836 }
4837 
4838 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
4839 {
4840     int shift = s->insn->data & 0xff;
4841     int size = s->insn->data >> 8;
4842     uint64_t mask = ((1ull << size) - 1) << shift;
4843     TCGv_i64 t = tcg_temp_new_i64();
4844 
4845     tcg_gen_shli_i64(t, o->in2, shift);
4846     tcg_gen_xor_i64(o->out, o->in1, t);
4847 
4848     /* Produce the CC from only the bits manipulated.  */
4849     tcg_gen_andi_i64(cc_dst, o->out, mask);
4850     set_cc_nz_u64(s, cc_dst);
4851     return DISAS_NEXT;
4852 }
4853 
4854 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
4855 {
4856     o->in1 = tcg_temp_new_i64();
4857 
4858     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4859         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
4860     } else {
4861         /* Perform the atomic operation in memory. */
4862         tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
4863                                      s->insn->data);
4864     }
4865 
4866     /* Recompute also for atomic case: needed for setting CC. */
4867     tcg_gen_xor_i64(o->out, o->in1, o->in2);
4868 
4869     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4870         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
4871     }
4872     return DISAS_NEXT;
4873 }
4874 
4875 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
4876 {
4877     o->out = tcg_constant_i64(0);
4878     return DISAS_NEXT;
4879 }
4880 
4881 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
4882 {
4883     o->out = tcg_constant_i64(0);
4884     o->out2 = o->out;
4885     return DISAS_NEXT;
4886 }
4887 
4888 #ifndef CONFIG_USER_ONLY
4889 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
4890 {
4891     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4892 
4893     gen_helper_clp(cpu_env, r2);
4894     set_cc_static(s);
4895     return DISAS_NEXT;
4896 }
4897 
4898 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
4899 {
4900     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4901     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4902 
4903     gen_helper_pcilg(cpu_env, r1, r2);
4904     set_cc_static(s);
4905     return DISAS_NEXT;
4906 }
4907 
4908 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
4909 {
4910     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4911     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4912 
4913     gen_helper_pcistg(cpu_env, r1, r2);
4914     set_cc_static(s);
4915     return DISAS_NEXT;
4916 }
4917 
4918 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
4919 {
4920     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4921     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4922 
4923     gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
4924     set_cc_static(s);
4925     return DISAS_NEXT;
4926 }
4927 
4928 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
4929 {
4930     gen_helper_sic(cpu_env, o->in1, o->in2);
4931     return DISAS_NEXT;
4932 }
4933 
4934 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
4935 {
4936     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4937     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4938 
4939     gen_helper_rpcit(cpu_env, r1, r2);
4940     set_cc_static(s);
4941     return DISAS_NEXT;
4942 }
4943 
4944 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
4945 {
4946     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4947     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4948     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4949 
4950     gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
4951     set_cc_static(s);
4952     return DISAS_NEXT;
4953 }
4954 
4955 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
4956 {
4957     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4958     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4959 
4960     gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
4961     set_cc_static(s);
4962     return DISAS_NEXT;
4963 }
4964 #endif
4965 
4966 #include "translate_vx.c.inc"
4967 
4968 /* ====================================================================== */
4969 /* The "Cc OUTput" generators.  Given the generated output (and in some cases
4970    the original inputs), update the various cc data structures in order to
4971    be able to compute the new condition code.  */
4972 
4973 static void cout_abs32(DisasContext *s, DisasOps *o)
4974 {
4975     gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4976 }
4977 
4978 static void cout_abs64(DisasContext *s, DisasOps *o)
4979 {
4980     gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4981 }
4982 
4983 static void cout_adds32(DisasContext *s, DisasOps *o)
4984 {
4985     gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4986 }
4987 
4988 static void cout_adds64(DisasContext *s, DisasOps *o)
4989 {
4990     gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4991 }
4992 
4993 static void cout_addu32(DisasContext *s, DisasOps *o)
4994 {
4995     tcg_gen_shri_i64(cc_src, o->out, 32);
4996     tcg_gen_ext32u_i64(cc_dst, o->out);
4997     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst);
4998 }
4999 
5000 static void cout_addu64(DisasContext *s, DisasOps *o)
5001 {
5002     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out);
5003 }
5004 
5005 static void cout_cmps32(DisasContext *s, DisasOps *o)
5006 {
5007     gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5008 }
5009 
5010 static void cout_cmps64(DisasContext *s, DisasOps *o)
5011 {
5012     gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5013 }
5014 
5015 static void cout_cmpu32(DisasContext *s, DisasOps *o)
5016 {
5017     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5018 }
5019 
5020 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5021 {
5022     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5023 }
5024 
5025 static void cout_f32(DisasContext *s, DisasOps *o)
5026 {
5027     gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5028 }
5029 
5030 static void cout_f64(DisasContext *s, DisasOps *o)
5031 {
5032     gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5033 }
5034 
5035 static void cout_f128(DisasContext *s, DisasOps *o)
5036 {
5037     gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5038 }
5039 
5040 static void cout_nabs32(DisasContext *s, DisasOps *o)
5041 {
5042     gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5043 }
5044 
5045 static void cout_nabs64(DisasContext *s, DisasOps *o)
5046 {
5047     gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5048 }
5049 
5050 static void cout_neg32(DisasContext *s, DisasOps *o)
5051 {
5052     gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5053 }
5054 
5055 static void cout_neg64(DisasContext *s, DisasOps *o)
5056 {
5057     gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5058 }
5059 
5060 static void cout_nz32(DisasContext *s, DisasOps *o)
5061 {
5062     tcg_gen_ext32u_i64(cc_dst, o->out);
5063     gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5064 }
5065 
5066 static void cout_nz64(DisasContext *s, DisasOps *o)
5067 {
5068     gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5069 }
5070 
5071 static void cout_s32(DisasContext *s, DisasOps *o)
5072 {
5073     gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5074 }
5075 
5076 static void cout_s64(DisasContext *s, DisasOps *o)
5077 {
5078     gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5079 }
5080 
5081 static void cout_subs32(DisasContext *s, DisasOps *o)
5082 {
5083     gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5084 }
5085 
5086 static void cout_subs64(DisasContext *s, DisasOps *o)
5087 {
5088     gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5089 }
5090 
5091 static void cout_subu32(DisasContext *s, DisasOps *o)
5092 {
5093     tcg_gen_sari_i64(cc_src, o->out, 32);
5094     tcg_gen_ext32u_i64(cc_dst, o->out);
5095     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, cc_dst);
5096 }
5097 
5098 static void cout_subu64(DisasContext *s, DisasOps *o)
5099 {
5100     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, o->out);
5101 }
5102 
5103 static void cout_tm32(DisasContext *s, DisasOps *o)
5104 {
5105     gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5106 }
5107 
5108 static void cout_tm64(DisasContext *s, DisasOps *o)
5109 {
5110     gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5111 }
5112 
5113 static void cout_muls32(DisasContext *s, DisasOps *o)
5114 {
5115     gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out);
5116 }
5117 
5118 static void cout_muls64(DisasContext *s, DisasOps *o)
5119 {
5120     /* out contains "high" part, out2 contains "low" part of 128 bit result */
5121     gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2);
5122 }
5123 
5124 /* ====================================================================== */
5125 /* The "PREParation" generators.  These initialize the DisasOps.OUT fields
5126    with the TCG register to which we will write.  Used in combination with
5127    the "wout" generators, in some cases we need a new temporary, and in
5128    some cases we can write to a TCG global.  */
5129 
5130 static void prep_new(DisasContext *s, DisasOps *o)
5131 {
5132     o->out = tcg_temp_new_i64();
5133 }
5134 #define SPEC_prep_new 0
5135 
5136 static void prep_new_P(DisasContext *s, DisasOps *o)
5137 {
5138     o->out = tcg_temp_new_i64();
5139     o->out2 = tcg_temp_new_i64();
5140 }
5141 #define SPEC_prep_new_P 0
5142 
5143 static void prep_new_x(DisasContext *s, DisasOps *o)
5144 {
5145     o->out_128 = tcg_temp_new_i128();
5146 }
5147 #define SPEC_prep_new_x 0
5148 
5149 static void prep_r1(DisasContext *s, DisasOps *o)
5150 {
5151     o->out = regs[get_field(s, r1)];
5152 }
5153 #define SPEC_prep_r1 0
5154 
5155 static void prep_r1_P(DisasContext *s, DisasOps *o)
5156 {
5157     int r1 = get_field(s, r1);
5158     o->out = regs[r1];
5159     o->out2 = regs[r1 + 1];
5160 }
5161 #define SPEC_prep_r1_P SPEC_r1_even
5162 
5163 static void prep_x1(DisasContext *s, DisasOps *o)
5164 {
5165     o->out_128 = load_freg_128(get_field(s, r1));
5166 }
5167 #define SPEC_prep_x1 SPEC_r1_f128
5168 
5169 /* ====================================================================== */
5170 /* The "Write OUTput" generators.  These generally perform some non-trivial
5171    copy of data to TCG globals, or to main memory.  The trivial cases are
5172    generally handled by having a "prep" generator install the TCG global
5173    as the destination of the operation.  */
5174 
5175 static void wout_r1(DisasContext *s, DisasOps *o)
5176 {
5177     store_reg(get_field(s, r1), o->out);
5178 }
5179 #define SPEC_wout_r1 0
5180 
5181 static void wout_out2_r1(DisasContext *s, DisasOps *o)
5182 {
5183     store_reg(get_field(s, r1), o->out2);
5184 }
5185 #define SPEC_wout_out2_r1 0
5186 
5187 static void wout_r1_8(DisasContext *s, DisasOps *o)
5188 {
5189     int r1 = get_field(s, r1);
5190     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5191 }
5192 #define SPEC_wout_r1_8 0
5193 
5194 static void wout_r1_16(DisasContext *s, DisasOps *o)
5195 {
5196     int r1 = get_field(s, r1);
5197     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5198 }
5199 #define SPEC_wout_r1_16 0
5200 
5201 static void wout_r1_32(DisasContext *s, DisasOps *o)
5202 {
5203     store_reg32_i64(get_field(s, r1), o->out);
5204 }
5205 #define SPEC_wout_r1_32 0
5206 
5207 static void wout_r1_32h(DisasContext *s, DisasOps *o)
5208 {
5209     store_reg32h_i64(get_field(s, r1), o->out);
5210 }
5211 #define SPEC_wout_r1_32h 0
5212 
5213 static void wout_r1_P32(DisasContext *s, DisasOps *o)
5214 {
5215     int r1 = get_field(s, r1);
5216     store_reg32_i64(r1, o->out);
5217     store_reg32_i64(r1 + 1, o->out2);
5218 }
5219 #define SPEC_wout_r1_P32 SPEC_r1_even
5220 
5221 static void wout_r1_D32(DisasContext *s, DisasOps *o)
5222 {
5223     int r1 = get_field(s, r1);
5224     TCGv_i64 t = tcg_temp_new_i64();
5225     store_reg32_i64(r1 + 1, o->out);
5226     tcg_gen_shri_i64(t, o->out, 32);
5227     store_reg32_i64(r1, t);
5228 }
5229 #define SPEC_wout_r1_D32 SPEC_r1_even
5230 
5231 static void wout_r1_D64(DisasContext *s, DisasOps *o)
5232 {
5233     int r1 = get_field(s, r1);
5234     tcg_gen_extr_i128_i64(regs[r1 + 1], regs[r1], o->out_128);
5235 }
5236 #define SPEC_wout_r1_D64 SPEC_r1_even
5237 
5238 static void wout_r3_P32(DisasContext *s, DisasOps *o)
5239 {
5240     int r3 = get_field(s, r3);
5241     store_reg32_i64(r3, o->out);
5242     store_reg32_i64(r3 + 1, o->out2);
5243 }
5244 #define SPEC_wout_r3_P32 SPEC_r3_even
5245 
5246 static void wout_r3_P64(DisasContext *s, DisasOps *o)
5247 {
5248     int r3 = get_field(s, r3);
5249     store_reg(r3, o->out);
5250     store_reg(r3 + 1, o->out2);
5251 }
5252 #define SPEC_wout_r3_P64 SPEC_r3_even
5253 
5254 static void wout_e1(DisasContext *s, DisasOps *o)
5255 {
5256     store_freg32_i64(get_field(s, r1), o->out);
5257 }
5258 #define SPEC_wout_e1 0
5259 
5260 static void wout_f1(DisasContext *s, DisasOps *o)
5261 {
5262     store_freg(get_field(s, r1), o->out);
5263 }
5264 #define SPEC_wout_f1 0
5265 
5266 static void wout_x1(DisasContext *s, DisasOps *o)
5267 {
5268     int f1 = get_field(s, r1);
5269 
5270     /* Split out_128 into out+out2 for cout_f128. */
5271     tcg_debug_assert(o->out == NULL);
5272     o->out = tcg_temp_new_i64();
5273     o->out2 = tcg_temp_new_i64();
5274 
5275     tcg_gen_extr_i128_i64(o->out2, o->out, o->out_128);
5276     store_freg(f1, o->out);
5277     store_freg(f1 + 2, o->out2);
5278 }
5279 #define SPEC_wout_x1 SPEC_r1_f128
5280 
5281 static void wout_x1_P(DisasContext *s, DisasOps *o)
5282 {
5283     int f1 = get_field(s, r1);
5284     store_freg(f1, o->out);
5285     store_freg(f1 + 2, o->out2);
5286 }
5287 #define SPEC_wout_x1_P SPEC_r1_f128
5288 
5289 static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
5290 {
5291     if (get_field(s, r1) != get_field(s, r2)) {
5292         store_reg32_i64(get_field(s, r1), o->out);
5293     }
5294 }
5295 #define SPEC_wout_cond_r1r2_32 0
5296 
5297 static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
5298 {
5299     if (get_field(s, r1) != get_field(s, r2)) {
5300         store_freg32_i64(get_field(s, r1), o->out);
5301     }
5302 }
5303 #define SPEC_wout_cond_e1e2 0
5304 
5305 static void wout_m1_8(DisasContext *s, DisasOps *o)
5306 {
5307     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_UB);
5308 }
5309 #define SPEC_wout_m1_8 0
5310 
5311 static void wout_m1_16(DisasContext *s, DisasOps *o)
5312 {
5313     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUW);
5314 }
5315 #define SPEC_wout_m1_16 0
5316 
5317 #ifndef CONFIG_USER_ONLY
5318 static void wout_m1_16a(DisasContext *s, DisasOps *o)
5319 {
5320     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5321 }
5322 #define SPEC_wout_m1_16a 0
5323 #endif
5324 
5325 static void wout_m1_32(DisasContext *s, DisasOps *o)
5326 {
5327     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUL);
5328 }
5329 #define SPEC_wout_m1_32 0
5330 
5331 #ifndef CONFIG_USER_ONLY
5332 static void wout_m1_32a(DisasContext *s, DisasOps *o)
5333 {
5334     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5335 }
5336 #define SPEC_wout_m1_32a 0
5337 #endif
5338 
5339 static void wout_m1_64(DisasContext *s, DisasOps *o)
5340 {
5341     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ);
5342 }
5343 #define SPEC_wout_m1_64 0
5344 
5345 #ifndef CONFIG_USER_ONLY
5346 static void wout_m1_64a(DisasContext *s, DisasOps *o)
5347 {
5348     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5349 }
5350 #define SPEC_wout_m1_64a 0
5351 #endif
5352 
5353 static void wout_m2_32(DisasContext *s, DisasOps *o)
5354 {
5355     tcg_gen_qemu_st_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
5356 }
5357 #define SPEC_wout_m2_32 0
5358 
5359 static void wout_in2_r1(DisasContext *s, DisasOps *o)
5360 {
5361     store_reg(get_field(s, r1), o->in2);
5362 }
5363 #define SPEC_wout_in2_r1 0
5364 
5365 static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
5366 {
5367     store_reg32_i64(get_field(s, r1), o->in2);
5368 }
5369 #define SPEC_wout_in2_r1_32 0
5370 
5371 /* ====================================================================== */
5372 /* The "INput 1" generators.  These load the first operand to an insn.  */
5373 
5374 static void in1_r1(DisasContext *s, DisasOps *o)
5375 {
5376     o->in1 = load_reg(get_field(s, r1));
5377 }
5378 #define SPEC_in1_r1 0
5379 
5380 static void in1_r1_o(DisasContext *s, DisasOps *o)
5381 {
5382     o->in1 = regs[get_field(s, r1)];
5383 }
5384 #define SPEC_in1_r1_o 0
5385 
5386 static void in1_r1_32s(DisasContext *s, DisasOps *o)
5387 {
5388     o->in1 = tcg_temp_new_i64();
5389     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
5390 }
5391 #define SPEC_in1_r1_32s 0
5392 
5393 static void in1_r1_32u(DisasContext *s, DisasOps *o)
5394 {
5395     o->in1 = tcg_temp_new_i64();
5396     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
5397 }
5398 #define SPEC_in1_r1_32u 0
5399 
5400 static void in1_r1_sr32(DisasContext *s, DisasOps *o)
5401 {
5402     o->in1 = tcg_temp_new_i64();
5403     tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
5404 }
5405 #define SPEC_in1_r1_sr32 0
5406 
5407 static void in1_r1p1(DisasContext *s, DisasOps *o)
5408 {
5409     o->in1 = load_reg(get_field(s, r1) + 1);
5410 }
5411 #define SPEC_in1_r1p1 SPEC_r1_even
5412 
5413 static void in1_r1p1_o(DisasContext *s, DisasOps *o)
5414 {
5415     o->in1 = regs[get_field(s, r1) + 1];
5416 }
5417 #define SPEC_in1_r1p1_o SPEC_r1_even
5418 
5419 static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
5420 {
5421     o->in1 = tcg_temp_new_i64();
5422     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
5423 }
5424 #define SPEC_in1_r1p1_32s SPEC_r1_even
5425 
5426 static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
5427 {
5428     o->in1 = tcg_temp_new_i64();
5429     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
5430 }
5431 #define SPEC_in1_r1p1_32u SPEC_r1_even
5432 
5433 static void in1_r1_D32(DisasContext *s, DisasOps *o)
5434 {
5435     int r1 = get_field(s, r1);
5436     o->in1 = tcg_temp_new_i64();
5437     tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5438 }
5439 #define SPEC_in1_r1_D32 SPEC_r1_even
5440 
5441 static void in1_r2(DisasContext *s, DisasOps *o)
5442 {
5443     o->in1 = load_reg(get_field(s, r2));
5444 }
5445 #define SPEC_in1_r2 0
5446 
5447 static void in1_r2_sr32(DisasContext *s, DisasOps *o)
5448 {
5449     o->in1 = tcg_temp_new_i64();
5450     tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
5451 }
5452 #define SPEC_in1_r2_sr32 0
5453 
5454 static void in1_r2_32u(DisasContext *s, DisasOps *o)
5455 {
5456     o->in1 = tcg_temp_new_i64();
5457     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]);
5458 }
5459 #define SPEC_in1_r2_32u 0
5460 
5461 static void in1_r3(DisasContext *s, DisasOps *o)
5462 {
5463     o->in1 = load_reg(get_field(s, r3));
5464 }
5465 #define SPEC_in1_r3 0
5466 
5467 static void in1_r3_o(DisasContext *s, DisasOps *o)
5468 {
5469     o->in1 = regs[get_field(s, r3)];
5470 }
5471 #define SPEC_in1_r3_o 0
5472 
5473 static void in1_r3_32s(DisasContext *s, DisasOps *o)
5474 {
5475     o->in1 = tcg_temp_new_i64();
5476     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
5477 }
5478 #define SPEC_in1_r3_32s 0
5479 
5480 static void in1_r3_32u(DisasContext *s, DisasOps *o)
5481 {
5482     o->in1 = tcg_temp_new_i64();
5483     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
5484 }
5485 #define SPEC_in1_r3_32u 0
5486 
5487 static void in1_r3_D32(DisasContext *s, DisasOps *o)
5488 {
5489     int r3 = get_field(s, r3);
5490     o->in1 = tcg_temp_new_i64();
5491     tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5492 }
5493 #define SPEC_in1_r3_D32 SPEC_r3_even
5494 
5495 static void in1_r3_sr32(DisasContext *s, DisasOps *o)
5496 {
5497     o->in1 = tcg_temp_new_i64();
5498     tcg_gen_shri_i64(o->in1, regs[get_field(s, r3)], 32);
5499 }
5500 #define SPEC_in1_r3_sr32 0
5501 
5502 static void in1_e1(DisasContext *s, DisasOps *o)
5503 {
5504     o->in1 = load_freg32_i64(get_field(s, r1));
5505 }
5506 #define SPEC_in1_e1 0
5507 
5508 static void in1_f1(DisasContext *s, DisasOps *o)
5509 {
5510     o->in1 = load_freg(get_field(s, r1));
5511 }
5512 #define SPEC_in1_f1 0
5513 
5514 static void in1_x1(DisasContext *s, DisasOps *o)
5515 {
5516     o->in1_128 = load_freg_128(get_field(s, r1));
5517 }
5518 #define SPEC_in1_x1 SPEC_r1_f128
5519 
5520 /* Load the high double word of an extended (128-bit) format FP number */
5521 static void in1_x2h(DisasContext *s, DisasOps *o)
5522 {
5523     o->in1 = load_freg(get_field(s, r2));
5524 }
5525 #define SPEC_in1_x2h SPEC_r2_f128
5526 
5527 static void in1_f3(DisasContext *s, DisasOps *o)
5528 {
5529     o->in1 = load_freg(get_field(s, r3));
5530 }
5531 #define SPEC_in1_f3 0
5532 
5533 static void in1_la1(DisasContext *s, DisasOps *o)
5534 {
5535     o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
5536 }
5537 #define SPEC_in1_la1 0
5538 
5539 static void in1_la2(DisasContext *s, DisasOps *o)
5540 {
5541     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5542     o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5543 }
5544 #define SPEC_in1_la2 0
5545 
5546 static void in1_m1_8u(DisasContext *s, DisasOps *o)
5547 {
5548     in1_la1(s, o);
5549     o->in1 = tcg_temp_new_i64();
5550     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_UB);
5551 }
5552 #define SPEC_in1_m1_8u 0
5553 
5554 static void in1_m1_16s(DisasContext *s, DisasOps *o)
5555 {
5556     in1_la1(s, o);
5557     o->in1 = tcg_temp_new_i64();
5558     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESW);
5559 }
5560 #define SPEC_in1_m1_16s 0
5561 
5562 static void in1_m1_16u(DisasContext *s, DisasOps *o)
5563 {
5564     in1_la1(s, o);
5565     o->in1 = tcg_temp_new_i64();
5566     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUW);
5567 }
5568 #define SPEC_in1_m1_16u 0
5569 
5570 static void in1_m1_32s(DisasContext *s, DisasOps *o)
5571 {
5572     in1_la1(s, o);
5573     o->in1 = tcg_temp_new_i64();
5574     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESL);
5575 }
5576 #define SPEC_in1_m1_32s 0
5577 
5578 static void in1_m1_32u(DisasContext *s, DisasOps *o)
5579 {
5580     in1_la1(s, o);
5581     o->in1 = tcg_temp_new_i64();
5582     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUL);
5583 }
5584 #define SPEC_in1_m1_32u 0
5585 
5586 static void in1_m1_64(DisasContext *s, DisasOps *o)
5587 {
5588     in1_la1(s, o);
5589     o->in1 = tcg_temp_new_i64();
5590     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUQ);
5591 }
5592 #define SPEC_in1_m1_64 0
5593 
5594 /* ====================================================================== */
5595 /* The "INput 2" generators.  These load the second operand to an insn.  */
5596 
5597 static void in2_r1_o(DisasContext *s, DisasOps *o)
5598 {
5599     o->in2 = regs[get_field(s, r1)];
5600 }
5601 #define SPEC_in2_r1_o 0
5602 
5603 static void in2_r1_16u(DisasContext *s, DisasOps *o)
5604 {
5605     o->in2 = tcg_temp_new_i64();
5606     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
5607 }
5608 #define SPEC_in2_r1_16u 0
5609 
5610 static void in2_r1_32u(DisasContext *s, DisasOps *o)
5611 {
5612     o->in2 = tcg_temp_new_i64();
5613     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
5614 }
5615 #define SPEC_in2_r1_32u 0
5616 
5617 static void in2_r1_D32(DisasContext *s, DisasOps *o)
5618 {
5619     int r1 = get_field(s, r1);
5620     o->in2 = tcg_temp_new_i64();
5621     tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5622 }
5623 #define SPEC_in2_r1_D32 SPEC_r1_even
5624 
5625 static void in2_r2(DisasContext *s, DisasOps *o)
5626 {
5627     o->in2 = load_reg(get_field(s, r2));
5628 }
5629 #define SPEC_in2_r2 0
5630 
5631 static void in2_r2_o(DisasContext *s, DisasOps *o)
5632 {
5633     o->in2 = regs[get_field(s, r2)];
5634 }
5635 #define SPEC_in2_r2_o 0
5636 
5637 static void in2_r2_nz(DisasContext *s, DisasOps *o)
5638 {
5639     int r2 = get_field(s, r2);
5640     if (r2 != 0) {
5641         o->in2 = load_reg(r2);
5642     }
5643 }
5644 #define SPEC_in2_r2_nz 0
5645 
5646 static void in2_r2_8s(DisasContext *s, DisasOps *o)
5647 {
5648     o->in2 = tcg_temp_new_i64();
5649     tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
5650 }
5651 #define SPEC_in2_r2_8s 0
5652 
5653 static void in2_r2_8u(DisasContext *s, DisasOps *o)
5654 {
5655     o->in2 = tcg_temp_new_i64();
5656     tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
5657 }
5658 #define SPEC_in2_r2_8u 0
5659 
5660 static void in2_r2_16s(DisasContext *s, DisasOps *o)
5661 {
5662     o->in2 = tcg_temp_new_i64();
5663     tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
5664 }
5665 #define SPEC_in2_r2_16s 0
5666 
5667 static void in2_r2_16u(DisasContext *s, DisasOps *o)
5668 {
5669     o->in2 = tcg_temp_new_i64();
5670     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
5671 }
5672 #define SPEC_in2_r2_16u 0
5673 
5674 static void in2_r3(DisasContext *s, DisasOps *o)
5675 {
5676     o->in2 = load_reg(get_field(s, r3));
5677 }
5678 #define SPEC_in2_r3 0
5679 
5680 static void in2_r3_D64(DisasContext *s, DisasOps *o)
5681 {
5682     int r3 = get_field(s, r3);
5683     o->in2_128 = tcg_temp_new_i128();
5684     tcg_gen_concat_i64_i128(o->in2_128, regs[r3 + 1], regs[r3]);
5685 }
5686 #define SPEC_in2_r3_D64 SPEC_r3_even
5687 
5688 static void in2_r3_sr32(DisasContext *s, DisasOps *o)
5689 {
5690     o->in2 = tcg_temp_new_i64();
5691     tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
5692 }
5693 #define SPEC_in2_r3_sr32 0
5694 
5695 static void in2_r3_32u(DisasContext *s, DisasOps *o)
5696 {
5697     o->in2 = tcg_temp_new_i64();
5698     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
5699 }
5700 #define SPEC_in2_r3_32u 0
5701 
5702 static void in2_r2_32s(DisasContext *s, DisasOps *o)
5703 {
5704     o->in2 = tcg_temp_new_i64();
5705     tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
5706 }
5707 #define SPEC_in2_r2_32s 0
5708 
5709 static void in2_r2_32u(DisasContext *s, DisasOps *o)
5710 {
5711     o->in2 = tcg_temp_new_i64();
5712     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
5713 }
5714 #define SPEC_in2_r2_32u 0
5715 
5716 static void in2_r2_sr32(DisasContext *s, DisasOps *o)
5717 {
5718     o->in2 = tcg_temp_new_i64();
5719     tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
5720 }
5721 #define SPEC_in2_r2_sr32 0
5722 
5723 static void in2_e2(DisasContext *s, DisasOps *o)
5724 {
5725     o->in2 = load_freg32_i64(get_field(s, r2));
5726 }
5727 #define SPEC_in2_e2 0
5728 
5729 static void in2_f2(DisasContext *s, DisasOps *o)
5730 {
5731     o->in2 = load_freg(get_field(s, r2));
5732 }
5733 #define SPEC_in2_f2 0
5734 
5735 static void in2_x2(DisasContext *s, DisasOps *o)
5736 {
5737     o->in2_128 = load_freg_128(get_field(s, r2));
5738 }
5739 #define SPEC_in2_x2 SPEC_r2_f128
5740 
5741 /* Load the low double word of an extended (128-bit) format FP number */
5742 static void in2_x2l(DisasContext *s, DisasOps *o)
5743 {
5744     o->in2 = load_freg(get_field(s, r2) + 2);
5745 }
5746 #define SPEC_in2_x2l SPEC_r2_f128
5747 
5748 static void in2_ra2(DisasContext *s, DisasOps *o)
5749 {
5750     int r2 = get_field(s, r2);
5751 
5752     /* Note: *don't* treat !r2 as 0, use the reg value. */
5753     o->in2 = tcg_temp_new_i64();
5754     gen_addi_and_wrap_i64(s, o->in2, regs[r2], 0);
5755 }
5756 #define SPEC_in2_ra2 0
5757 
5758 static void in2_a2(DisasContext *s, DisasOps *o)
5759 {
5760     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5761     o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5762 }
5763 #define SPEC_in2_a2 0
5764 
5765 static TCGv gen_ri2(DisasContext *s)
5766 {
5767     int64_t delta = (int64_t)get_field(s, i2) * 2;
5768     TCGv ri2;
5769 
5770     if (unlikely(s->ex_value)) {
5771         ri2 = tcg_temp_new_i64();
5772         tcg_gen_ld_i64(ri2, cpu_env, offsetof(CPUS390XState, ex_target));
5773         tcg_gen_addi_i64(ri2, ri2, delta);
5774     } else {
5775         ri2 = tcg_constant_i64(s->base.pc_next + delta);
5776     }
5777 
5778     return ri2;
5779 }
5780 
5781 static void in2_ri2(DisasContext *s, DisasOps *o)
5782 {
5783     o->in2 = gen_ri2(s);
5784 }
5785 #define SPEC_in2_ri2 0
5786 
5787 static void in2_sh(DisasContext *s, DisasOps *o)
5788 {
5789     int b2 = get_field(s, b2);
5790     int d2 = get_field(s, d2);
5791 
5792     if (b2 == 0) {
5793         o->in2 = tcg_constant_i64(d2 & 0x3f);
5794     } else {
5795         o->in2 = get_address(s, 0, b2, d2);
5796         tcg_gen_andi_i64(o->in2, o->in2, 0x3f);
5797     }
5798 }
5799 #define SPEC_in2_sh 0
5800 
5801 static void in2_m2_8u(DisasContext *s, DisasOps *o)
5802 {
5803     in2_a2(s, o);
5804     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_UB);
5805 }
5806 #define SPEC_in2_m2_8u 0
5807 
5808 static void in2_m2_16s(DisasContext *s, DisasOps *o)
5809 {
5810     in2_a2(s, o);
5811     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESW);
5812 }
5813 #define SPEC_in2_m2_16s 0
5814 
5815 static void in2_m2_16u(DisasContext *s, DisasOps *o)
5816 {
5817     in2_a2(s, o);
5818     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUW);
5819 }
5820 #define SPEC_in2_m2_16u 0
5821 
5822 static void in2_m2_32s(DisasContext *s, DisasOps *o)
5823 {
5824     in2_a2(s, o);
5825     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESL);
5826 }
5827 #define SPEC_in2_m2_32s 0
5828 
5829 static void in2_m2_32u(DisasContext *s, DisasOps *o)
5830 {
5831     in2_a2(s, o);
5832     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUL);
5833 }
5834 #define SPEC_in2_m2_32u 0
5835 
5836 #ifndef CONFIG_USER_ONLY
5837 static void in2_m2_32ua(DisasContext *s, DisasOps *o)
5838 {
5839     in2_a2(s, o);
5840     tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5841 }
5842 #define SPEC_in2_m2_32ua 0
5843 #endif
5844 
5845 static void in2_m2_64(DisasContext *s, DisasOps *o)
5846 {
5847     in2_a2(s, o);
5848     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5849 }
5850 #define SPEC_in2_m2_64 0
5851 
5852 static void in2_m2_64w(DisasContext *s, DisasOps *o)
5853 {
5854     in2_a2(s, o);
5855     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5856     gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
5857 }
5858 #define SPEC_in2_m2_64w 0
5859 
5860 #ifndef CONFIG_USER_ONLY
5861 static void in2_m2_64a(DisasContext *s, DisasOps *o)
5862 {
5863     in2_a2(s, o);
5864     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5865 }
5866 #define SPEC_in2_m2_64a 0
5867 #endif
5868 
5869 static void in2_mri2_16s(DisasContext *s, DisasOps *o)
5870 {
5871     o->in2 = tcg_temp_new_i64();
5872     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TESW);
5873 }
5874 #define SPEC_in2_mri2_16s 0
5875 
5876 static void in2_mri2_16u(DisasContext *s, DisasOps *o)
5877 {
5878     o->in2 = tcg_temp_new_i64();
5879     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TEUW);
5880 }
5881 #define SPEC_in2_mri2_16u 0
5882 
5883 static void in2_mri2_32s(DisasContext *s, DisasOps *o)
5884 {
5885     o->in2 = tcg_temp_new_i64();
5886     tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5887                        MO_TESL | MO_ALIGN);
5888 }
5889 #define SPEC_in2_mri2_32s 0
5890 
5891 static void in2_mri2_32u(DisasContext *s, DisasOps *o)
5892 {
5893     o->in2 = tcg_temp_new_i64();
5894     tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5895                        MO_TEUL | MO_ALIGN);
5896 }
5897 #define SPEC_in2_mri2_32u 0
5898 
5899 static void in2_mri2_64(DisasContext *s, DisasOps *o)
5900 {
5901     o->in2 = tcg_temp_new_i64();
5902     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s),
5903                         MO_TEUQ | MO_ALIGN);
5904 }
5905 #define SPEC_in2_mri2_64 0
5906 
5907 static void in2_i2(DisasContext *s, DisasOps *o)
5908 {
5909     o->in2 = tcg_constant_i64(get_field(s, i2));
5910 }
5911 #define SPEC_in2_i2 0
5912 
5913 static void in2_i2_8u(DisasContext *s, DisasOps *o)
5914 {
5915     o->in2 = tcg_constant_i64((uint8_t)get_field(s, i2));
5916 }
5917 #define SPEC_in2_i2_8u 0
5918 
5919 static void in2_i2_16u(DisasContext *s, DisasOps *o)
5920 {
5921     o->in2 = tcg_constant_i64((uint16_t)get_field(s, i2));
5922 }
5923 #define SPEC_in2_i2_16u 0
5924 
5925 static void in2_i2_32u(DisasContext *s, DisasOps *o)
5926 {
5927     o->in2 = tcg_constant_i64((uint32_t)get_field(s, i2));
5928 }
5929 #define SPEC_in2_i2_32u 0
5930 
5931 static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
5932 {
5933     uint64_t i2 = (uint16_t)get_field(s, i2);
5934     o->in2 = tcg_constant_i64(i2 << s->insn->data);
5935 }
5936 #define SPEC_in2_i2_16u_shl 0
5937 
5938 static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
5939 {
5940     uint64_t i2 = (uint32_t)get_field(s, i2);
5941     o->in2 = tcg_constant_i64(i2 << s->insn->data);
5942 }
5943 #define SPEC_in2_i2_32u_shl 0
5944 
5945 #ifndef CONFIG_USER_ONLY
5946 static void in2_insn(DisasContext *s, DisasOps *o)
5947 {
5948     o->in2 = tcg_constant_i64(s->fields.raw_insn);
5949 }
5950 #define SPEC_in2_insn 0
5951 #endif
5952 
5953 /* ====================================================================== */
5954 
5955 /* Find opc within the table of insns.  This is formulated as a switch
5956    statement so that (1) we get compile-time notice of cut-paste errors
5957    for duplicated opcodes, and (2) the compiler generates the binary
5958    search tree, rather than us having to post-process the table.  */
5959 
5960 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5961     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
5962 
5963 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5964     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
5965 
5966 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
5967     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
5968 
5969 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
5970 
5971 enum DisasInsnEnum {
5972 #include "insn-data.h.inc"
5973 };
5974 
5975 #undef E
5976 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) {                   \
5977     .opc = OPC,                                                             \
5978     .flags = FL,                                                            \
5979     .fmt = FMT_##FT,                                                        \
5980     .fac = FAC_##FC,                                                        \
5981     .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W,  \
5982     .name = #NM,                                                            \
5983     .help_in1 = in1_##I1,                                                   \
5984     .help_in2 = in2_##I2,                                                   \
5985     .help_prep = prep_##P,                                                  \
5986     .help_wout = wout_##W,                                                  \
5987     .help_cout = cout_##CC,                                                 \
5988     .help_op = op_##OP,                                                     \
5989     .data = D                                                               \
5990  },
5991 
5992 /* Allow 0 to be used for NULL in the table below.  */
5993 #define in1_0  NULL
5994 #define in2_0  NULL
5995 #define prep_0  NULL
5996 #define wout_0  NULL
5997 #define cout_0  NULL
5998 #define op_0  NULL
5999 
6000 #define SPEC_in1_0 0
6001 #define SPEC_in2_0 0
6002 #define SPEC_prep_0 0
6003 #define SPEC_wout_0 0
6004 
6005 /* Give smaller names to the various facilities.  */
6006 #define FAC_Z           S390_FEAT_ZARCH
6007 #define FAC_CASS        S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6008 #define FAC_DFP         S390_FEAT_DFP
6009 #define FAC_DFPR        S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* DFP-rounding */
6010 #define FAC_DO          S390_FEAT_STFLE_45 /* distinct-operands */
6011 #define FAC_EE          S390_FEAT_EXECUTE_EXT
6012 #define FAC_EI          S390_FEAT_EXTENDED_IMMEDIATE
6013 #define FAC_FPE         S390_FEAT_FLOATING_POINT_EXT
6014 #define FAC_FPSSH       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPS-sign-handling */
6015 #define FAC_FPRGR       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPR-GR-transfer */
6016 #define FAC_GIE         S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6017 #define FAC_HFP_MA      S390_FEAT_HFP_MADDSUB
6018 #define FAC_HW          S390_FEAT_STFLE_45 /* high-word */
6019 #define FAC_IEEEE_SIM   S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* IEEE-exception-simulation */
6020 #define FAC_MIE         S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6021 #define FAC_LAT         S390_FEAT_STFLE_49 /* load-and-trap */
6022 #define FAC_LOC         S390_FEAT_STFLE_45 /* load/store on condition 1 */
6023 #define FAC_LOC2        S390_FEAT_STFLE_53 /* load/store on condition 2 */
6024 #define FAC_LD          S390_FEAT_LONG_DISPLACEMENT
6025 #define FAC_PC          S390_FEAT_STFLE_45 /* population count */
6026 #define FAC_SCF         S390_FEAT_STORE_CLOCK_FAST
6027 #define FAC_SFLE        S390_FEAT_STFLE
6028 #define FAC_ILA         S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6029 #define FAC_MVCOS       S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6030 #define FAC_LPP         S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6031 #define FAC_DAT_ENH     S390_FEAT_DAT_ENH
6032 #define FAC_E2          S390_FEAT_EXTENDED_TRANSLATION_2
6033 #define FAC_EH          S390_FEAT_STFLE_49 /* execution-hint */
6034 #define FAC_PPA         S390_FEAT_STFLE_49 /* processor-assist */
6035 #define FAC_LZRB        S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6036 #define FAC_ETF3        S390_FEAT_EXTENDED_TRANSLATION_3
6037 #define FAC_MSA         S390_FEAT_MSA /* message-security-assist facility */
6038 #define FAC_MSA3        S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6039 #define FAC_MSA4        S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6040 #define FAC_MSA5        S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6041 #define FAC_MSA8        S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6042 #define FAC_ECT         S390_FEAT_EXTRACT_CPU_TIME
6043 #define FAC_PCI         S390_FEAT_ZPCI /* z/PCI facility */
6044 #define FAC_AIS         S390_FEAT_ADAPTER_INT_SUPPRESSION
6045 #define FAC_V           S390_FEAT_VECTOR /* vector facility */
6046 #define FAC_VE          S390_FEAT_VECTOR_ENH  /* vector enhancements facility 1 */
6047 #define FAC_VE2         S390_FEAT_VECTOR_ENH2 /* vector enhancements facility 2 */
6048 #define FAC_MIE2        S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6049 #define FAC_MIE3        S390_FEAT_MISC_INSTRUCTION_EXT3 /* miscellaneous-instruction-extensions facility 3 */
6050 
6051 static const DisasInsn insn_info[] = {
6052 #include "insn-data.h.inc"
6053 };
6054 
6055 #undef E
6056 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6057     case OPC: return &insn_info[insn_ ## NM];
6058 
6059 static const DisasInsn *lookup_opc(uint16_t opc)
6060 {
6061     switch (opc) {
6062 #include "insn-data.h.inc"
6063     default:
6064         return NULL;
6065     }
6066 }
6067 
6068 #undef F
6069 #undef E
6070 #undef D
6071 #undef C
6072 
6073 /* Extract a field from the insn.  The INSN should be left-aligned in
6074    the uint64_t so that we can more easily utilize the big-bit-endian
6075    definitions we extract from the Principals of Operation.  */
6076 
6077 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6078 {
6079     uint32_t r, m;
6080 
6081     if (f->size == 0) {
6082         return;
6083     }
6084 
6085     /* Zero extract the field from the insn.  */
6086     r = (insn << f->beg) >> (64 - f->size);
6087 
6088     /* Sign-extend, or un-swap the field as necessary.  */
6089     switch (f->type) {
6090     case 0: /* unsigned */
6091         break;
6092     case 1: /* signed */
6093         assert(f->size <= 32);
6094         m = 1u << (f->size - 1);
6095         r = (r ^ m) - m;
6096         break;
6097     case 2: /* dl+dh split, signed 20 bit. */
6098         r = ((int8_t)r << 12) | (r >> 8);
6099         break;
6100     case 3: /* MSB stored in RXB */
6101         g_assert(f->size == 4);
6102         switch (f->beg) {
6103         case 8:
6104             r |= extract64(insn, 63 - 36, 1) << 4;
6105             break;
6106         case 12:
6107             r |= extract64(insn, 63 - 37, 1) << 4;
6108             break;
6109         case 16:
6110             r |= extract64(insn, 63 - 38, 1) << 4;
6111             break;
6112         case 32:
6113             r |= extract64(insn, 63 - 39, 1) << 4;
6114             break;
6115         default:
6116             g_assert_not_reached();
6117         }
6118         break;
6119     default:
6120         abort();
6121     }
6122 
6123     /*
6124      * Validate that the "compressed" encoding we selected above is valid.
6125      * I.e. we haven't made two different original fields overlap.
6126      */
6127     assert(((o->presentC >> f->indexC) & 1) == 0);
6128     o->presentC |= 1 << f->indexC;
6129     o->presentO |= 1 << f->indexO;
6130 
6131     o->c[f->indexC] = r;
6132 }
6133 
6134 /* Lookup the insn at the current PC, extracting the operands into O and
6135    returning the info struct for the insn.  Returns NULL for invalid insn.  */
6136 
6137 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
6138 {
6139     uint64_t insn, pc = s->base.pc_next;
6140     int op, op2, ilen;
6141     const DisasInsn *info;
6142 
6143     if (unlikely(s->ex_value)) {
6144         /* Drop the EX data now, so that it's clear on exception paths.  */
6145         tcg_gen_st_i64(tcg_constant_i64(0), cpu_env,
6146                        offsetof(CPUS390XState, ex_value));
6147 
6148         /* Extract the values saved by EXECUTE.  */
6149         insn = s->ex_value & 0xffffffffffff0000ull;
6150         ilen = s->ex_value & 0xf;
6151 
6152         /* Register insn bytes with translator so plugins work. */
6153         for (int i = 0; i < ilen; i++) {
6154             uint8_t byte = extract64(insn, 56 - (i * 8), 8);
6155             translator_fake_ldb(byte, pc + i);
6156         }
6157         op = insn >> 56;
6158     } else {
6159         insn = ld_code2(env, s, pc);
6160         op = (insn >> 8) & 0xff;
6161         ilen = get_ilen(op);
6162         switch (ilen) {
6163         case 2:
6164             insn = insn << 48;
6165             break;
6166         case 4:
6167             insn = ld_code4(env, s, pc) << 32;
6168             break;
6169         case 6:
6170             insn = (insn << 48) | (ld_code4(env, s, pc + 2) << 16);
6171             break;
6172         default:
6173             g_assert_not_reached();
6174         }
6175     }
6176     s->pc_tmp = s->base.pc_next + ilen;
6177     s->ilen = ilen;
6178 
6179     /* We can't actually determine the insn format until we've looked up
6180        the full insn opcode.  Which we can't do without locating the
6181        secondary opcode.  Assume by default that OP2 is at bit 40; for
6182        those smaller insns that don't actually have a secondary opcode
6183        this will correctly result in OP2 = 0. */
6184     switch (op) {
6185     case 0x01: /* E */
6186     case 0x80: /* S */
6187     case 0x82: /* S */
6188     case 0x93: /* S */
6189     case 0xb2: /* S, RRF, RRE, IE */
6190     case 0xb3: /* RRE, RRD, RRF */
6191     case 0xb9: /* RRE, RRF */
6192     case 0xe5: /* SSE, SIL */
6193         op2 = (insn << 8) >> 56;
6194         break;
6195     case 0xa5: /* RI */
6196     case 0xa7: /* RI */
6197     case 0xc0: /* RIL */
6198     case 0xc2: /* RIL */
6199     case 0xc4: /* RIL */
6200     case 0xc6: /* RIL */
6201     case 0xc8: /* SSF */
6202     case 0xcc: /* RIL */
6203         op2 = (insn << 12) >> 60;
6204         break;
6205     case 0xc5: /* MII */
6206     case 0xc7: /* SMI */
6207     case 0xd0 ... 0xdf: /* SS */
6208     case 0xe1: /* SS */
6209     case 0xe2: /* SS */
6210     case 0xe8: /* SS */
6211     case 0xe9: /* SS */
6212     case 0xea: /* SS */
6213     case 0xee ... 0xf3: /* SS */
6214     case 0xf8 ... 0xfd: /* SS */
6215         op2 = 0;
6216         break;
6217     default:
6218         op2 = (insn << 40) >> 56;
6219         break;
6220     }
6221 
6222     memset(&s->fields, 0, sizeof(s->fields));
6223     s->fields.raw_insn = insn;
6224     s->fields.op = op;
6225     s->fields.op2 = op2;
6226 
6227     /* Lookup the instruction.  */
6228     info = lookup_opc(op << 8 | op2);
6229     s->insn = info;
6230 
6231     /* If we found it, extract the operands.  */
6232     if (info != NULL) {
6233         DisasFormat fmt = info->fmt;
6234         int i;
6235 
6236         for (i = 0; i < NUM_C_FIELD; ++i) {
6237             extract_field(&s->fields, &format_info[fmt].op[i], insn);
6238         }
6239     }
6240     return info;
6241 }
6242 
6243 static bool is_afp_reg(int reg)
6244 {
6245     return reg % 2 || reg > 6;
6246 }
6247 
6248 static bool is_fp_pair(int reg)
6249 {
6250     /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6251     return !(reg & 0x2);
6252 }
6253 
6254 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6255 {
6256     const DisasInsn *insn;
6257     DisasJumpType ret = DISAS_NEXT;
6258     DisasOps o = {};
6259     bool icount = false;
6260 
6261     /* Search for the insn in the table.  */
6262     insn = extract_insn(env, s);
6263 
6264     /* Update insn_start now that we know the ILEN.  */
6265     tcg_set_insn_start_param(s->insn_start, 2, s->ilen);
6266 
6267     /* Not found means unimplemented/illegal opcode.  */
6268     if (insn == NULL) {
6269         qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6270                       s->fields.op, s->fields.op2);
6271         gen_illegal_opcode(s);
6272         ret = DISAS_NORETURN;
6273         goto out;
6274     }
6275 
6276 #ifndef CONFIG_USER_ONLY
6277     if (s->base.tb->flags & FLAG_MASK_PER) {
6278         TCGv_i64 addr = tcg_constant_i64(s->base.pc_next);
6279         gen_helper_per_ifetch(cpu_env, addr);
6280     }
6281 #endif
6282 
6283     /* process flags */
6284     if (insn->flags) {
6285         /* privileged instruction */
6286         if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6287             gen_program_exception(s, PGM_PRIVILEGED);
6288             ret = DISAS_NORETURN;
6289             goto out;
6290         }
6291 
6292         /* if AFP is not enabled, instructions and registers are forbidden */
6293         if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6294             uint8_t dxc = 0;
6295 
6296             if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
6297                 dxc = 1;
6298             }
6299             if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
6300                 dxc = 1;
6301             }
6302             if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
6303                 dxc = 1;
6304             }
6305             if (insn->flags & IF_BFP) {
6306                 dxc = 2;
6307             }
6308             if (insn->flags & IF_DFP) {
6309                 dxc = 3;
6310             }
6311             if (insn->flags & IF_VEC) {
6312                 dxc = 0xfe;
6313             }
6314             if (dxc) {
6315                 gen_data_exception(dxc);
6316                 ret = DISAS_NORETURN;
6317                 goto out;
6318             }
6319         }
6320 
6321         /* if vector instructions not enabled, executing them is forbidden */
6322         if (insn->flags & IF_VEC) {
6323             if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6324                 gen_data_exception(0xfe);
6325                 ret = DISAS_NORETURN;
6326                 goto out;
6327             }
6328         }
6329 
6330         /* input/output is the special case for icount mode */
6331         if (unlikely(insn->flags & IF_IO)) {
6332             icount = tb_cflags(s->base.tb) & CF_USE_ICOUNT;
6333             if (icount) {
6334                 gen_io_start();
6335             }
6336         }
6337     }
6338 
6339     /* Check for insn specification exceptions.  */
6340     if (insn->spec) {
6341         if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
6342             (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
6343             (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
6344             (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
6345             (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
6346             gen_program_exception(s, PGM_SPECIFICATION);
6347             ret = DISAS_NORETURN;
6348             goto out;
6349         }
6350     }
6351 
6352     /* Implement the instruction.  */
6353     if (insn->help_in1) {
6354         insn->help_in1(s, &o);
6355     }
6356     if (insn->help_in2) {
6357         insn->help_in2(s, &o);
6358     }
6359     if (insn->help_prep) {
6360         insn->help_prep(s, &o);
6361     }
6362     if (insn->help_op) {
6363         ret = insn->help_op(s, &o);
6364     }
6365     if (ret != DISAS_NORETURN) {
6366         if (insn->help_wout) {
6367             insn->help_wout(s, &o);
6368         }
6369         if (insn->help_cout) {
6370             insn->help_cout(s, &o);
6371         }
6372     }
6373 
6374     /* io should be the last instruction in tb when icount is enabled */
6375     if (unlikely(icount && ret == DISAS_NEXT)) {
6376         ret = DISAS_TOO_MANY;
6377     }
6378 
6379 #ifndef CONFIG_USER_ONLY
6380     if (s->base.tb->flags & FLAG_MASK_PER) {
6381         /* An exception might be triggered, save PSW if not already done.  */
6382         if (ret == DISAS_NEXT || ret == DISAS_TOO_MANY) {
6383             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6384         }
6385 
6386         /* Call the helper to check for a possible PER exception.  */
6387         gen_helper_per_check_exception(cpu_env);
6388     }
6389 #endif
6390 
6391 out:
6392     /* Advance to the next instruction.  */
6393     s->base.pc_next = s->pc_tmp;
6394     return ret;
6395 }
6396 
6397 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6398 {
6399     DisasContext *dc = container_of(dcbase, DisasContext, base);
6400 
6401     /* 31-bit mode */
6402     if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6403         dc->base.pc_first &= 0x7fffffff;
6404         dc->base.pc_next = dc->base.pc_first;
6405     }
6406 
6407     dc->cc_op = CC_OP_DYNAMIC;
6408     dc->ex_value = dc->base.tb->cs_base;
6409     dc->exit_to_mainloop = (dc->base.tb->flags & FLAG_MASK_PER) || dc->ex_value;
6410 }
6411 
6412 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6413 {
6414 }
6415 
6416 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6417 {
6418     DisasContext *dc = container_of(dcbase, DisasContext, base);
6419 
6420     /* Delay the set of ilen until we've read the insn. */
6421     tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 0);
6422     dc->insn_start = tcg_last_op();
6423 }
6424 
6425 static target_ulong get_next_pc(CPUS390XState *env, DisasContext *s,
6426                                 uint64_t pc)
6427 {
6428     uint64_t insn = cpu_lduw_code(env, pc);
6429 
6430     return pc + get_ilen((insn >> 8) & 0xff);
6431 }
6432 
6433 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6434 {
6435     CPUS390XState *env = cs->env_ptr;
6436     DisasContext *dc = container_of(dcbase, DisasContext, base);
6437 
6438     dc->base.is_jmp = translate_one(env, dc);
6439     if (dc->base.is_jmp == DISAS_NEXT) {
6440         if (dc->ex_value ||
6441             !is_same_page(dcbase, dc->base.pc_next) ||
6442             !is_same_page(dcbase, get_next_pc(env, dc, dc->base.pc_next))) {
6443             dc->base.is_jmp = DISAS_TOO_MANY;
6444         }
6445     }
6446 }
6447 
6448 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6449 {
6450     DisasContext *dc = container_of(dcbase, DisasContext, base);
6451 
6452     switch (dc->base.is_jmp) {
6453     case DISAS_NORETURN:
6454         break;
6455     case DISAS_TOO_MANY:
6456         update_psw_addr(dc);
6457         /* FALLTHRU */
6458     case DISAS_PC_UPDATED:
6459         /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6460            cc op type is in env */
6461         update_cc_op(dc);
6462         /* FALLTHRU */
6463     case DISAS_PC_CC_UPDATED:
6464         /* Exit the TB, either by raising a debug exception or by return.  */
6465         if (dc->exit_to_mainloop) {
6466             tcg_gen_exit_tb(NULL, 0);
6467         } else {
6468             tcg_gen_lookup_and_goto_ptr();
6469         }
6470         break;
6471     default:
6472         g_assert_not_reached();
6473     }
6474 }
6475 
6476 static void s390x_tr_disas_log(const DisasContextBase *dcbase,
6477                                CPUState *cs, FILE *logfile)
6478 {
6479     DisasContext *dc = container_of(dcbase, DisasContext, base);
6480 
6481     if (unlikely(dc->ex_value)) {
6482         /* ??? Unfortunately target_disas can't use host memory.  */
6483         fprintf(logfile, "IN: EXECUTE %016" PRIx64, dc->ex_value);
6484     } else {
6485         fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
6486         target_disas(logfile, cs, dc->base.pc_first, dc->base.tb->size);
6487     }
6488 }
6489 
6490 static const TranslatorOps s390x_tr_ops = {
6491     .init_disas_context = s390x_tr_init_disas_context,
6492     .tb_start           = s390x_tr_tb_start,
6493     .insn_start         = s390x_tr_insn_start,
6494     .translate_insn     = s390x_tr_translate_insn,
6495     .tb_stop            = s390x_tr_tb_stop,
6496     .disas_log          = s390x_tr_disas_log,
6497 };
6498 
6499 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
6500                            target_ulong pc, void *host_pc)
6501 {
6502     DisasContext dc;
6503 
6504     translator_loop(cs, tb, max_insns, pc, host_pc, &s390x_tr_ops, &dc.base);
6505 }
6506 
6507 void s390x_restore_state_to_opc(CPUState *cs,
6508                                 const TranslationBlock *tb,
6509                                 const uint64_t *data)
6510 {
6511     S390CPU *cpu = S390_CPU(cs);
6512     CPUS390XState *env = &cpu->env;
6513     int cc_op = data[1];
6514 
6515     env->psw.addr = data[0];
6516 
6517     /* Update the CC opcode if it is not already up-to-date.  */
6518     if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6519         env->cc_op = cc_op;
6520     }
6521 
6522     /* Record ILEN.  */
6523     env->int_pgm_ilen = data[2];
6524 }
6525