xref: /qemu/target/ppc/translate/vsx-impl.c.inc (revision 3d2d2996)
1/***                           VSX extension                               ***/
2
3static inline void get_cpu_vsr(TCGv_i64 dst, int n, bool high)
4{
5    tcg_gen_ld_i64(dst, tcg_env, vsr64_offset(n, high));
6}
7
8static inline void set_cpu_vsr(int n, TCGv_i64 src, bool high)
9{
10    tcg_gen_st_i64(src, tcg_env, vsr64_offset(n, high));
11}
12
13static inline TCGv_ptr gen_vsr_ptr(int reg)
14{
15    TCGv_ptr r = tcg_temp_new_ptr();
16    tcg_gen_addi_ptr(r, tcg_env, vsr_full_offset(reg));
17    return r;
18}
19
20static inline TCGv_ptr gen_acc_ptr(int reg)
21{
22    TCGv_ptr r = tcg_temp_new_ptr();
23    tcg_gen_addi_ptr(r, tcg_env, acc_full_offset(reg));
24    return r;
25}
26
27#define VSX_LOAD_SCALAR(name, operation)                      \
28static void gen_##name(DisasContext *ctx)                     \
29{                                                             \
30    TCGv EA;                                                  \
31    TCGv_i64 t0;                                              \
32    if (unlikely(!ctx->vsx_enabled)) {                        \
33        gen_exception(ctx, POWERPC_EXCP_VSXU);                \
34        return;                                               \
35    }                                                         \
36    t0 = tcg_temp_new_i64();                                  \
37    gen_set_access_type(ctx, ACCESS_INT);                     \
38    EA = tcg_temp_new();                                      \
39    gen_addr_reg_index(ctx, EA);                              \
40    gen_qemu_##operation(ctx, t0, EA);                        \
41    set_cpu_vsr(xT(ctx->opcode), t0, true);                   \
42    /* NOTE: cpu_vsrl is undefined */                         \
43}
44
45VSX_LOAD_SCALAR(lxsdx, ld64_i64)
46VSX_LOAD_SCALAR(lxsiwax, ld32s_i64)
47VSX_LOAD_SCALAR(lxsibzx, ld8u_i64)
48VSX_LOAD_SCALAR(lxsihzx, ld16u_i64)
49VSX_LOAD_SCALAR(lxsiwzx, ld32u_i64)
50VSX_LOAD_SCALAR(lxsspx, ld32fs)
51
52static void gen_lxvd2x(DisasContext *ctx)
53{
54    TCGv EA;
55    TCGv_i64 t0;
56    if (unlikely(!ctx->vsx_enabled)) {
57        gen_exception(ctx, POWERPC_EXCP_VSXU);
58        return;
59    }
60    t0 = tcg_temp_new_i64();
61    gen_set_access_type(ctx, ACCESS_INT);
62    EA = tcg_temp_new();
63    gen_addr_reg_index(ctx, EA);
64    gen_qemu_ld64_i64(ctx, t0, EA);
65    set_cpu_vsr(xT(ctx->opcode), t0, true);
66    tcg_gen_addi_tl(EA, EA, 8);
67    gen_qemu_ld64_i64(ctx, t0, EA);
68    set_cpu_vsr(xT(ctx->opcode), t0, false);
69}
70
71static void gen_lxvw4x(DisasContext *ctx)
72{
73    TCGv EA;
74    TCGv_i64 xth;
75    TCGv_i64 xtl;
76    if (unlikely(!ctx->vsx_enabled)) {
77        gen_exception(ctx, POWERPC_EXCP_VSXU);
78        return;
79    }
80    xth = tcg_temp_new_i64();
81    xtl = tcg_temp_new_i64();
82
83    gen_set_access_type(ctx, ACCESS_INT);
84    EA = tcg_temp_new();
85
86    gen_addr_reg_index(ctx, EA);
87    if (ctx->le_mode) {
88        TCGv_i64 t0 = tcg_temp_new_i64();
89        TCGv_i64 t1 = tcg_temp_new_i64();
90
91        tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEUQ);
92        tcg_gen_shri_i64(t1, t0, 32);
93        tcg_gen_deposit_i64(xth, t1, t0, 32, 32);
94        tcg_gen_addi_tl(EA, EA, 8);
95        tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEUQ);
96        tcg_gen_shri_i64(t1, t0, 32);
97        tcg_gen_deposit_i64(xtl, t1, t0, 32, 32);
98    } else {
99        tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEUQ);
100        tcg_gen_addi_tl(EA, EA, 8);
101        tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEUQ);
102    }
103    set_cpu_vsr(xT(ctx->opcode), xth, true);
104    set_cpu_vsr(xT(ctx->opcode), xtl, false);
105}
106
107static void gen_lxvwsx(DisasContext *ctx)
108{
109    TCGv EA;
110    TCGv_i32 data;
111
112    if (xT(ctx->opcode) < 32) {
113        if (unlikely(!ctx->vsx_enabled)) {
114            gen_exception(ctx, POWERPC_EXCP_VSXU);
115            return;
116        }
117    } else {
118        if (unlikely(!ctx->altivec_enabled)) {
119            gen_exception(ctx, POWERPC_EXCP_VPU);
120            return;
121        }
122    }
123
124    gen_set_access_type(ctx, ACCESS_INT);
125    EA = tcg_temp_new();
126
127    gen_addr_reg_index(ctx, EA);
128
129    data = tcg_temp_new_i32();
130    tcg_gen_qemu_ld_i32(data, EA, ctx->mem_idx, DEF_MEMOP(MO_UL));
131    tcg_gen_gvec_dup_i32(MO_UL, vsr_full_offset(xT(ctx->opcode)), 16, 16, data);
132}
133
134static void gen_lxvdsx(DisasContext *ctx)
135{
136    TCGv EA;
137    TCGv_i64 data;
138
139    if (unlikely(!ctx->vsx_enabled)) {
140        gen_exception(ctx, POWERPC_EXCP_VSXU);
141        return;
142    }
143
144    gen_set_access_type(ctx, ACCESS_INT);
145    EA = tcg_temp_new();
146
147    gen_addr_reg_index(ctx, EA);
148
149    data = tcg_temp_new_i64();
150    tcg_gen_qemu_ld_i64(data, EA, ctx->mem_idx, DEF_MEMOP(MO_UQ));
151    tcg_gen_gvec_dup_i64(MO_UQ, vsr_full_offset(xT(ctx->opcode)), 16, 16, data);
152}
153
154static void gen_bswap16x8(TCGv_i64 outh, TCGv_i64 outl,
155                          TCGv_i64 inh, TCGv_i64 inl)
156{
157    TCGv_i64 mask = tcg_constant_i64(0x00FF00FF00FF00FF);
158    TCGv_i64 t0 = tcg_temp_new_i64();
159    TCGv_i64 t1 = tcg_temp_new_i64();
160
161    /* outh = ((inh & mask) << 8) | ((inh >> 8) & mask) */
162    tcg_gen_and_i64(t0, inh, mask);
163    tcg_gen_shli_i64(t0, t0, 8);
164    tcg_gen_shri_i64(t1, inh, 8);
165    tcg_gen_and_i64(t1, t1, mask);
166    tcg_gen_or_i64(outh, t0, t1);
167
168    /* outl = ((inl & mask) << 8) | ((inl >> 8) & mask) */
169    tcg_gen_and_i64(t0, inl, mask);
170    tcg_gen_shli_i64(t0, t0, 8);
171    tcg_gen_shri_i64(t1, inl, 8);
172    tcg_gen_and_i64(t1, t1, mask);
173    tcg_gen_or_i64(outl, t0, t1);
174}
175
176static void gen_bswap32x4(TCGv_i64 outh, TCGv_i64 outl,
177                          TCGv_i64 inh, TCGv_i64 inl)
178{
179    TCGv_i64 hi = tcg_temp_new_i64();
180    TCGv_i64 lo = tcg_temp_new_i64();
181
182    tcg_gen_bswap64_i64(hi, inh);
183    tcg_gen_bswap64_i64(lo, inl);
184    tcg_gen_shri_i64(outh, hi, 32);
185    tcg_gen_deposit_i64(outh, outh, hi, 32, 32);
186    tcg_gen_shri_i64(outl, lo, 32);
187    tcg_gen_deposit_i64(outl, outl, lo, 32, 32);
188}
189
190static void gen_lxvh8x(DisasContext *ctx)
191{
192    TCGv EA;
193    TCGv_i64 xth;
194    TCGv_i64 xtl;
195
196    if (unlikely(!ctx->vsx_enabled)) {
197        gen_exception(ctx, POWERPC_EXCP_VSXU);
198        return;
199    }
200    xth = tcg_temp_new_i64();
201    xtl = tcg_temp_new_i64();
202    gen_set_access_type(ctx, ACCESS_INT);
203
204    EA = tcg_temp_new();
205    gen_addr_reg_index(ctx, EA);
206    tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEUQ);
207    tcg_gen_addi_tl(EA, EA, 8);
208    tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEUQ);
209    if (ctx->le_mode) {
210        gen_bswap16x8(xth, xtl, xth, xtl);
211    }
212    set_cpu_vsr(xT(ctx->opcode), xth, true);
213    set_cpu_vsr(xT(ctx->opcode), xtl, false);
214}
215
216static void gen_lxvb16x(DisasContext *ctx)
217{
218    TCGv EA;
219    TCGv_i64 xth;
220    TCGv_i64 xtl;
221
222    if (unlikely(!ctx->vsx_enabled)) {
223        gen_exception(ctx, POWERPC_EXCP_VSXU);
224        return;
225    }
226    xth = tcg_temp_new_i64();
227    xtl = tcg_temp_new_i64();
228    gen_set_access_type(ctx, ACCESS_INT);
229    EA = tcg_temp_new();
230    gen_addr_reg_index(ctx, EA);
231    tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEUQ);
232    tcg_gen_addi_tl(EA, EA, 8);
233    tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEUQ);
234    set_cpu_vsr(xT(ctx->opcode), xth, true);
235    set_cpu_vsr(xT(ctx->opcode), xtl, false);
236}
237
238#ifdef TARGET_PPC64
239#define VSX_VECTOR_LOAD_STORE_LENGTH(name)                         \
240static void gen_##name(DisasContext *ctx)                          \
241{                                                                  \
242    TCGv EA;                                                       \
243    TCGv_ptr xt;                                                   \
244                                                                   \
245    if (xT(ctx->opcode) < 32) {                                    \
246        if (unlikely(!ctx->vsx_enabled)) {                         \
247            gen_exception(ctx, POWERPC_EXCP_VSXU);                 \
248            return;                                                \
249        }                                                          \
250    } else {                                                       \
251        if (unlikely(!ctx->altivec_enabled)) {                     \
252            gen_exception(ctx, POWERPC_EXCP_VPU);                  \
253            return;                                                \
254        }                                                          \
255    }                                                              \
256    EA = tcg_temp_new();                                           \
257    xt = gen_vsr_ptr(xT(ctx->opcode));                             \
258    gen_set_access_type(ctx, ACCESS_INT);                          \
259    gen_addr_register(ctx, EA);                                    \
260    gen_helper_##name(tcg_env, EA, xt, cpu_gpr[rB(ctx->opcode)]);  \
261}
262
263VSX_VECTOR_LOAD_STORE_LENGTH(lxvl)
264VSX_VECTOR_LOAD_STORE_LENGTH(lxvll)
265VSX_VECTOR_LOAD_STORE_LENGTH(stxvl)
266VSX_VECTOR_LOAD_STORE_LENGTH(stxvll)
267#endif
268
269#define VSX_STORE_SCALAR(name, operation)                     \
270static void gen_##name(DisasContext *ctx)                     \
271{                                                             \
272    TCGv EA;                                                  \
273    TCGv_i64 t0;                                              \
274    if (unlikely(!ctx->vsx_enabled)) {                        \
275        gen_exception(ctx, POWERPC_EXCP_VSXU);                \
276        return;                                               \
277    }                                                         \
278    t0 = tcg_temp_new_i64();                                  \
279    gen_set_access_type(ctx, ACCESS_INT);                     \
280    EA = tcg_temp_new();                                      \
281    gen_addr_reg_index(ctx, EA);                              \
282    get_cpu_vsr(t0, xS(ctx->opcode), true);                   \
283    gen_qemu_##operation(ctx, t0, EA);                        \
284}
285
286VSX_STORE_SCALAR(stxsdx, st64_i64)
287
288VSX_STORE_SCALAR(stxsibx, st8_i64)
289VSX_STORE_SCALAR(stxsihx, st16_i64)
290VSX_STORE_SCALAR(stxsiwx, st32_i64)
291VSX_STORE_SCALAR(stxsspx, st32fs)
292
293static void gen_stxvd2x(DisasContext *ctx)
294{
295    TCGv EA;
296    TCGv_i64 t0;
297    if (unlikely(!ctx->vsx_enabled)) {
298        gen_exception(ctx, POWERPC_EXCP_VSXU);
299        return;
300    }
301    t0 = tcg_temp_new_i64();
302    gen_set_access_type(ctx, ACCESS_INT);
303    EA = tcg_temp_new();
304    gen_addr_reg_index(ctx, EA);
305    get_cpu_vsr(t0, xS(ctx->opcode), true);
306    gen_qemu_st64_i64(ctx, t0, EA);
307    tcg_gen_addi_tl(EA, EA, 8);
308    get_cpu_vsr(t0, xS(ctx->opcode), false);
309    gen_qemu_st64_i64(ctx, t0, EA);
310}
311
312static void gen_stxvw4x(DisasContext *ctx)
313{
314    TCGv EA;
315    TCGv_i64 xsh;
316    TCGv_i64 xsl;
317
318    if (unlikely(!ctx->vsx_enabled)) {
319        gen_exception(ctx, POWERPC_EXCP_VSXU);
320        return;
321    }
322    xsh = tcg_temp_new_i64();
323    xsl = tcg_temp_new_i64();
324    get_cpu_vsr(xsh, xS(ctx->opcode), true);
325    get_cpu_vsr(xsl, xS(ctx->opcode), false);
326    gen_set_access_type(ctx, ACCESS_INT);
327    EA = tcg_temp_new();
328    gen_addr_reg_index(ctx, EA);
329    if (ctx->le_mode) {
330        TCGv_i64 t0 = tcg_temp_new_i64();
331        TCGv_i64 t1 = tcg_temp_new_i64();
332
333        tcg_gen_shri_i64(t0, xsh, 32);
334        tcg_gen_deposit_i64(t1, t0, xsh, 32, 32);
335        tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEUQ);
336        tcg_gen_addi_tl(EA, EA, 8);
337        tcg_gen_shri_i64(t0, xsl, 32);
338        tcg_gen_deposit_i64(t1, t0, xsl, 32, 32);
339        tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEUQ);
340    } else {
341        tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEUQ);
342        tcg_gen_addi_tl(EA, EA, 8);
343        tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEUQ);
344    }
345}
346
347static void gen_stxvh8x(DisasContext *ctx)
348{
349    TCGv EA;
350    TCGv_i64 xsh;
351    TCGv_i64 xsl;
352
353    if (unlikely(!ctx->vsx_enabled)) {
354        gen_exception(ctx, POWERPC_EXCP_VSXU);
355        return;
356    }
357    xsh = tcg_temp_new_i64();
358    xsl = tcg_temp_new_i64();
359    get_cpu_vsr(xsh, xS(ctx->opcode), true);
360    get_cpu_vsr(xsl, xS(ctx->opcode), false);
361    gen_set_access_type(ctx, ACCESS_INT);
362    EA = tcg_temp_new();
363    gen_addr_reg_index(ctx, EA);
364    if (ctx->le_mode) {
365        TCGv_i64 outh = tcg_temp_new_i64();
366        TCGv_i64 outl = tcg_temp_new_i64();
367
368        gen_bswap16x8(outh, outl, xsh, xsl);
369        tcg_gen_qemu_st_i64(outh, EA, ctx->mem_idx, MO_BEUQ);
370        tcg_gen_addi_tl(EA, EA, 8);
371        tcg_gen_qemu_st_i64(outl, EA, ctx->mem_idx, MO_BEUQ);
372    } else {
373        tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEUQ);
374        tcg_gen_addi_tl(EA, EA, 8);
375        tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEUQ);
376    }
377}
378
379static void gen_stxvb16x(DisasContext *ctx)
380{
381    TCGv EA;
382    TCGv_i64 xsh;
383    TCGv_i64 xsl;
384
385    if (unlikely(!ctx->vsx_enabled)) {
386        gen_exception(ctx, POWERPC_EXCP_VSXU);
387        return;
388    }
389    xsh = tcg_temp_new_i64();
390    xsl = tcg_temp_new_i64();
391    get_cpu_vsr(xsh, xS(ctx->opcode), true);
392    get_cpu_vsr(xsl, xS(ctx->opcode), false);
393    gen_set_access_type(ctx, ACCESS_INT);
394    EA = tcg_temp_new();
395    gen_addr_reg_index(ctx, EA);
396    tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEUQ);
397    tcg_gen_addi_tl(EA, EA, 8);
398    tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEUQ);
399}
400
401static void gen_mfvsrwz(DisasContext *ctx)
402{
403    if (xS(ctx->opcode) < 32) {
404        if (unlikely(!ctx->fpu_enabled)) {
405            gen_exception(ctx, POWERPC_EXCP_FPU);
406            return;
407        }
408    } else {
409        if (unlikely(!ctx->altivec_enabled)) {
410            gen_exception(ctx, POWERPC_EXCP_VPU);
411            return;
412        }
413    }
414    TCGv_i64 tmp = tcg_temp_new_i64();
415    TCGv_i64 xsh = tcg_temp_new_i64();
416    get_cpu_vsr(xsh, xS(ctx->opcode), true);
417    tcg_gen_ext32u_i64(tmp, xsh);
418    tcg_gen_trunc_i64_tl(cpu_gpr[rA(ctx->opcode)], tmp);
419}
420
421static void gen_mtvsrwa(DisasContext *ctx)
422{
423    if (xS(ctx->opcode) < 32) {
424        if (unlikely(!ctx->fpu_enabled)) {
425            gen_exception(ctx, POWERPC_EXCP_FPU);
426            return;
427        }
428    } else {
429        if (unlikely(!ctx->altivec_enabled)) {
430            gen_exception(ctx, POWERPC_EXCP_VPU);
431            return;
432        }
433    }
434    TCGv_i64 tmp = tcg_temp_new_i64();
435    TCGv_i64 xsh = tcg_temp_new_i64();
436    tcg_gen_extu_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)]);
437    tcg_gen_ext32s_i64(xsh, tmp);
438    set_cpu_vsr(xT(ctx->opcode), xsh, true);
439}
440
441static void gen_mtvsrwz(DisasContext *ctx)
442{
443    if (xS(ctx->opcode) < 32) {
444        if (unlikely(!ctx->fpu_enabled)) {
445            gen_exception(ctx, POWERPC_EXCP_FPU);
446            return;
447        }
448    } else {
449        if (unlikely(!ctx->altivec_enabled)) {
450            gen_exception(ctx, POWERPC_EXCP_VPU);
451            return;
452        }
453    }
454    TCGv_i64 tmp = tcg_temp_new_i64();
455    TCGv_i64 xsh = tcg_temp_new_i64();
456    tcg_gen_extu_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)]);
457    tcg_gen_ext32u_i64(xsh, tmp);
458    set_cpu_vsr(xT(ctx->opcode), xsh, true);
459}
460
461#if defined(TARGET_PPC64)
462static void gen_mfvsrd(DisasContext *ctx)
463{
464    TCGv_i64 t0;
465    if (xS(ctx->opcode) < 32) {
466        if (unlikely(!ctx->fpu_enabled)) {
467            gen_exception(ctx, POWERPC_EXCP_FPU);
468            return;
469        }
470    } else {
471        if (unlikely(!ctx->altivec_enabled)) {
472            gen_exception(ctx, POWERPC_EXCP_VPU);
473            return;
474        }
475    }
476    t0 = tcg_temp_new_i64();
477    get_cpu_vsr(t0, xS(ctx->opcode), true);
478    tcg_gen_mov_i64(cpu_gpr[rA(ctx->opcode)], t0);
479}
480
481static void gen_mtvsrd(DisasContext *ctx)
482{
483    TCGv_i64 t0;
484    if (xS(ctx->opcode) < 32) {
485        if (unlikely(!ctx->fpu_enabled)) {
486            gen_exception(ctx, POWERPC_EXCP_FPU);
487            return;
488        }
489    } else {
490        if (unlikely(!ctx->altivec_enabled)) {
491            gen_exception(ctx, POWERPC_EXCP_VPU);
492            return;
493        }
494    }
495    t0 = tcg_temp_new_i64();
496    tcg_gen_mov_i64(t0, cpu_gpr[rA(ctx->opcode)]);
497    set_cpu_vsr(xT(ctx->opcode), t0, true);
498}
499
500static void gen_mfvsrld(DisasContext *ctx)
501{
502    TCGv_i64 t0;
503    if (xS(ctx->opcode) < 32) {
504        if (unlikely(!ctx->vsx_enabled)) {
505            gen_exception(ctx, POWERPC_EXCP_VSXU);
506            return;
507        }
508    } else {
509        if (unlikely(!ctx->altivec_enabled)) {
510            gen_exception(ctx, POWERPC_EXCP_VPU);
511            return;
512        }
513    }
514    t0 = tcg_temp_new_i64();
515    get_cpu_vsr(t0, xS(ctx->opcode), false);
516    tcg_gen_mov_i64(cpu_gpr[rA(ctx->opcode)], t0);
517}
518
519static void gen_mtvsrdd(DisasContext *ctx)
520{
521    TCGv_i64 t0;
522    if (xT(ctx->opcode) < 32) {
523        if (unlikely(!ctx->vsx_enabled)) {
524            gen_exception(ctx, POWERPC_EXCP_VSXU);
525            return;
526        }
527    } else {
528        if (unlikely(!ctx->altivec_enabled)) {
529            gen_exception(ctx, POWERPC_EXCP_VPU);
530            return;
531        }
532    }
533
534    t0 = tcg_temp_new_i64();
535    if (!rA(ctx->opcode)) {
536        tcg_gen_movi_i64(t0, 0);
537    } else {
538        tcg_gen_mov_i64(t0, cpu_gpr[rA(ctx->opcode)]);
539    }
540    set_cpu_vsr(xT(ctx->opcode), t0, true);
541
542    tcg_gen_mov_i64(t0, cpu_gpr[rB(ctx->opcode)]);
543    set_cpu_vsr(xT(ctx->opcode), t0, false);
544}
545
546static void gen_mtvsrws(DisasContext *ctx)
547{
548    TCGv_i64 t0;
549    if (xT(ctx->opcode) < 32) {
550        if (unlikely(!ctx->vsx_enabled)) {
551            gen_exception(ctx, POWERPC_EXCP_VSXU);
552            return;
553        }
554    } else {
555        if (unlikely(!ctx->altivec_enabled)) {
556            gen_exception(ctx, POWERPC_EXCP_VPU);
557            return;
558        }
559    }
560
561    t0 = tcg_temp_new_i64();
562    tcg_gen_deposit_i64(t0, cpu_gpr[rA(ctx->opcode)],
563                        cpu_gpr[rA(ctx->opcode)], 32, 32);
564    set_cpu_vsr(xT(ctx->opcode), t0, false);
565    set_cpu_vsr(xT(ctx->opcode), t0, true);
566}
567
568#endif
569
570#define OP_ABS 1
571#define OP_NABS 2
572#define OP_NEG 3
573#define OP_CPSGN 4
574#define SGN_MASK_DP  0x8000000000000000ull
575#define SGN_MASK_SP 0x8000000080000000ull
576#define EXP_MASK_DP  0x7FF0000000000000ull
577#define EXP_MASK_SP 0x7F8000007F800000ull
578#define FRC_MASK_DP (~(SGN_MASK_DP | EXP_MASK_DP))
579#define FRC_MASK_SP (~(SGN_MASK_SP | EXP_MASK_SP))
580
581#define VSX_SCALAR_MOVE(name, op, sgn_mask)                       \
582static void glue(gen_, name)(DisasContext *ctx)                   \
583    {                                                             \
584        TCGv_i64 xb, sgm;                                         \
585        if (unlikely(!ctx->vsx_enabled)) {                        \
586            gen_exception(ctx, POWERPC_EXCP_VSXU);                \
587            return;                                               \
588        }                                                         \
589        xb = tcg_temp_new_i64();                                  \
590        sgm = tcg_temp_new_i64();                                 \
591        get_cpu_vsr(xb, xB(ctx->opcode), true);                   \
592        tcg_gen_movi_i64(sgm, sgn_mask);                          \
593        switch (op) {                                             \
594            case OP_ABS: {                                        \
595                tcg_gen_andc_i64(xb, xb, sgm);                    \
596                break;                                            \
597            }                                                     \
598            case OP_NABS: {                                       \
599                tcg_gen_or_i64(xb, xb, sgm);                      \
600                break;                                            \
601            }                                                     \
602            case OP_NEG: {                                        \
603                tcg_gen_xor_i64(xb, xb, sgm);                     \
604                break;                                            \
605            }                                                     \
606            case OP_CPSGN: {                                      \
607                TCGv_i64 xa = tcg_temp_new_i64();                 \
608                get_cpu_vsr(xa, xA(ctx->opcode), true);           \
609                tcg_gen_and_i64(xa, xa, sgm);                     \
610                tcg_gen_andc_i64(xb, xb, sgm);                    \
611                tcg_gen_or_i64(xb, xb, xa);                       \
612                break;                                            \
613            }                                                     \
614        }                                                         \
615        set_cpu_vsr(xT(ctx->opcode), xb, true);                   \
616        set_cpu_vsr(xT(ctx->opcode), tcg_constant_i64(0), false); \
617    }
618
619VSX_SCALAR_MOVE(xsabsdp, OP_ABS, SGN_MASK_DP)
620VSX_SCALAR_MOVE(xsnabsdp, OP_NABS, SGN_MASK_DP)
621VSX_SCALAR_MOVE(xsnegdp, OP_NEG, SGN_MASK_DP)
622VSX_SCALAR_MOVE(xscpsgndp, OP_CPSGN, SGN_MASK_DP)
623
624#define VSX_SCALAR_MOVE_QP(name, op, sgn_mask)                    \
625static void glue(gen_, name)(DisasContext *ctx)                   \
626{                                                                 \
627    int xa;                                                       \
628    int xt = rD(ctx->opcode) + 32;                                \
629    int xb = rB(ctx->opcode) + 32;                                \
630    TCGv_i64 xah, xbh, xbl, sgm, tmp;                             \
631                                                                  \
632    if (unlikely(!ctx->vsx_enabled)) {                            \
633        gen_exception(ctx, POWERPC_EXCP_VSXU);                    \
634        return;                                                   \
635    }                                                             \
636    xbh = tcg_temp_new_i64();                                     \
637    xbl = tcg_temp_new_i64();                                     \
638    sgm = tcg_temp_new_i64();                                     \
639    tmp = tcg_temp_new_i64();                                     \
640    get_cpu_vsr(xbh, xb, true);                                   \
641    get_cpu_vsr(xbl, xb, false);                                  \
642    tcg_gen_movi_i64(sgm, sgn_mask);                              \
643    switch (op) {                                                 \
644    case OP_ABS:                                                  \
645        tcg_gen_andc_i64(xbh, xbh, sgm);                          \
646        break;                                                    \
647    case OP_NABS:                                                 \
648        tcg_gen_or_i64(xbh, xbh, sgm);                            \
649        break;                                                    \
650    case OP_NEG:                                                  \
651        tcg_gen_xor_i64(xbh, xbh, sgm);                           \
652        break;                                                    \
653    case OP_CPSGN:                                                \
654        xah = tcg_temp_new_i64();                                 \
655        xa = rA(ctx->opcode) + 32;                                \
656        get_cpu_vsr(tmp, xa, true);                               \
657        tcg_gen_and_i64(xah, tmp, sgm);                           \
658        tcg_gen_andc_i64(xbh, xbh, sgm);                          \
659        tcg_gen_or_i64(xbh, xbh, xah);                            \
660        break;                                                    \
661    }                                                             \
662    set_cpu_vsr(xt, xbh, true);                                   \
663    set_cpu_vsr(xt, xbl, false);                                  \
664}
665
666VSX_SCALAR_MOVE_QP(xsabsqp, OP_ABS, SGN_MASK_DP)
667VSX_SCALAR_MOVE_QP(xsnabsqp, OP_NABS, SGN_MASK_DP)
668VSX_SCALAR_MOVE_QP(xsnegqp, OP_NEG, SGN_MASK_DP)
669VSX_SCALAR_MOVE_QP(xscpsgnqp, OP_CPSGN, SGN_MASK_DP)
670
671#define TCG_OP_IMM_i64(FUNC, OP, IMM)                           \
672    static void FUNC(TCGv_i64 t, TCGv_i64 b)                    \
673    {                                                           \
674        OP(t, b, IMM);                                          \
675    }
676
677TCG_OP_IMM_i64(do_xvabssp_i64, tcg_gen_andi_i64, ~SGN_MASK_SP)
678TCG_OP_IMM_i64(do_xvnabssp_i64, tcg_gen_ori_i64, SGN_MASK_SP)
679TCG_OP_IMM_i64(do_xvnegsp_i64, tcg_gen_xori_i64, SGN_MASK_SP)
680TCG_OP_IMM_i64(do_xvabsdp_i64, tcg_gen_andi_i64, ~SGN_MASK_DP)
681TCG_OP_IMM_i64(do_xvnabsdp_i64, tcg_gen_ori_i64, SGN_MASK_DP)
682TCG_OP_IMM_i64(do_xvnegdp_i64, tcg_gen_xori_i64, SGN_MASK_DP)
683#undef TCG_OP_IMM_i64
684
685static void xv_msb_op1(unsigned vece, TCGv_vec t, TCGv_vec b,
686                 void (*tcg_gen_op_vec)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec))
687{
688    uint64_t msb = (vece == MO_32) ? SGN_MASK_SP : SGN_MASK_DP;
689    tcg_gen_op_vec(vece, t, b, tcg_constant_vec_matching(t, vece, msb));
690}
691
692static void do_xvabs_vec(unsigned vece, TCGv_vec t, TCGv_vec b)
693{
694    xv_msb_op1(vece, t, b, tcg_gen_andc_vec);
695}
696
697static void do_xvnabs_vec(unsigned vece, TCGv_vec t, TCGv_vec b)
698{
699    xv_msb_op1(vece, t, b, tcg_gen_or_vec);
700}
701
702static void do_xvneg_vec(unsigned vece, TCGv_vec t, TCGv_vec b)
703{
704    xv_msb_op1(vece, t, b, tcg_gen_xor_vec);
705}
706
707static bool do_vsx_msb_op(DisasContext *ctx, arg_XX2 *a, unsigned vece,
708                          void (*vec)(unsigned, TCGv_vec, TCGv_vec),
709                          void (*i64)(TCGv_i64, TCGv_i64))
710{
711    static const TCGOpcode vecop_list[] = {
712        0
713    };
714
715    const GVecGen2 op = {
716       .fni8 = i64,
717       .fniv = vec,
718       .opt_opc = vecop_list,
719       .vece = vece
720    };
721
722    REQUIRE_INSNS_FLAGS2(ctx, VSX);
723    REQUIRE_VSX(ctx);
724
725    tcg_gen_gvec_2(vsr_full_offset(a->xt), vsr_full_offset(a->xb),
726                   16, 16, &op);
727
728    return true;
729}
730
731TRANS(XVABSDP, do_vsx_msb_op, MO_64, do_xvabs_vec, do_xvabsdp_i64)
732TRANS(XVNABSDP, do_vsx_msb_op, MO_64, do_xvnabs_vec, do_xvnabsdp_i64)
733TRANS(XVNEGDP, do_vsx_msb_op, MO_64, do_xvneg_vec, do_xvnegdp_i64)
734TRANS(XVABSSP, do_vsx_msb_op, MO_32, do_xvabs_vec, do_xvabssp_i64)
735TRANS(XVNABSSP, do_vsx_msb_op, MO_32, do_xvnabs_vec, do_xvnabssp_i64)
736TRANS(XVNEGSP, do_vsx_msb_op, MO_32, do_xvneg_vec, do_xvnegsp_i64)
737
738static void do_xvcpsgndp_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
739{
740    tcg_gen_andi_i64(a, a, SGN_MASK_DP);
741    tcg_gen_andi_i64(b, b, ~SGN_MASK_DP);
742    tcg_gen_or_i64(t, a, b);
743}
744
745static void do_xvcpsgnsp_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
746{
747    tcg_gen_andi_i64(a, a, SGN_MASK_SP);
748    tcg_gen_andi_i64(b, b, ~SGN_MASK_SP);
749    tcg_gen_or_i64(t, a, b);
750}
751
752static void do_xvcpsgn_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
753{
754    uint64_t msb = (vece == MO_32) ? SGN_MASK_SP : SGN_MASK_DP;
755    tcg_gen_bitsel_vec(vece, t, tcg_constant_vec_matching(t, vece, msb), a, b);
756}
757
758static bool do_xvcpsgn(DisasContext *ctx, arg_XX3 *a, unsigned vece)
759{
760    static const TCGOpcode vecop_list[] = {
761        0
762    };
763
764    static const GVecGen3 op[] = {
765        {
766            .fni8 = do_xvcpsgnsp_i64,
767            .fniv = do_xvcpsgn_vec,
768            .opt_opc = vecop_list,
769            .vece = MO_32
770        },
771        {
772            .fni8 = do_xvcpsgndp_i64,
773            .fniv = do_xvcpsgn_vec,
774            .opt_opc = vecop_list,
775            .vece = MO_64
776        },
777    };
778
779    REQUIRE_INSNS_FLAGS2(ctx, VSX);
780    REQUIRE_VSX(ctx);
781
782    tcg_gen_gvec_3(vsr_full_offset(a->xt), vsr_full_offset(a->xa),
783                   vsr_full_offset(a->xb), 16, 16, &op[vece - MO_32]);
784
785    return true;
786}
787
788TRANS(XVCPSGNSP, do_xvcpsgn, MO_32)
789TRANS(XVCPSGNDP, do_xvcpsgn, MO_64)
790
791#define VSX_CMP(name, op1, op2, inval, type)                                  \
792static void gen_##name(DisasContext *ctx)                                     \
793{                                                                             \
794    TCGv_i32 ignored;                                                         \
795    TCGv_ptr xt, xa, xb;                                                      \
796    if (unlikely(!ctx->vsx_enabled)) {                                        \
797        gen_exception(ctx, POWERPC_EXCP_VSXU);                                \
798        return;                                                               \
799    }                                                                         \
800    xt = gen_vsr_ptr(xT(ctx->opcode));                                        \
801    xa = gen_vsr_ptr(xA(ctx->opcode));                                        \
802    xb = gen_vsr_ptr(xB(ctx->opcode));                                        \
803    if ((ctx->opcode >> (31 - 21)) & 1) {                                     \
804        gen_helper_##name(cpu_crf[6], tcg_env, xt, xa, xb);                   \
805    } else {                                                                  \
806        ignored = tcg_temp_new_i32();                                         \
807        gen_helper_##name(ignored, tcg_env, xt, xa, xb);                      \
808    }                                                                         \
809}
810
811VSX_CMP(xvcmpeqdp, 0x0C, 0x0C, 0, PPC2_VSX)
812VSX_CMP(xvcmpgedp, 0x0C, 0x0E, 0, PPC2_VSX)
813VSX_CMP(xvcmpgtdp, 0x0C, 0x0D, 0, PPC2_VSX)
814VSX_CMP(xvcmpnedp, 0x0C, 0x0F, 0, PPC2_ISA300)
815VSX_CMP(xvcmpeqsp, 0x0C, 0x08, 0, PPC2_VSX)
816VSX_CMP(xvcmpgesp, 0x0C, 0x0A, 0, PPC2_VSX)
817VSX_CMP(xvcmpgtsp, 0x0C, 0x09, 0, PPC2_VSX)
818VSX_CMP(xvcmpnesp, 0x0C, 0x0B, 0, PPC2_VSX)
819
820static bool trans_XSCVQPDP(DisasContext *ctx, arg_X_tb_rc *a)
821{
822    TCGv_i32 ro;
823    TCGv_ptr xt, xb;
824
825    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
826    REQUIRE_VSX(ctx);
827
828    ro = tcg_constant_i32(a->rc);
829
830    xt = gen_avr_ptr(a->rt);
831    xb = gen_avr_ptr(a->rb);
832    gen_helper_XSCVQPDP(tcg_env, ro, xt, xb);
833    return true;
834}
835
836static bool do_helper_env_X_tb(DisasContext *ctx, arg_X_tb *a,
837                               void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr))
838{
839    TCGv_ptr xt, xb;
840
841    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
842    REQUIRE_VSX(ctx);
843
844    xt = gen_avr_ptr(a->rt);
845    xb = gen_avr_ptr(a->rb);
846    gen_helper(tcg_env, xt, xb);
847    return true;
848}
849
850TRANS(XSCVUQQP, do_helper_env_X_tb, gen_helper_XSCVUQQP)
851TRANS(XSCVSQQP, do_helper_env_X_tb, gen_helper_XSCVSQQP)
852TRANS(XSCVQPUQZ, do_helper_env_X_tb, gen_helper_XSCVQPUQZ)
853TRANS(XSCVQPSQZ, do_helper_env_X_tb, gen_helper_XSCVQPSQZ)
854
855#define GEN_VSX_HELPER_2(name, op1, op2, inval, type)                         \
856static void gen_##name(DisasContext *ctx)                                     \
857{                                                                             \
858    TCGv_i32 opc;                                                             \
859    if (unlikely(!ctx->vsx_enabled)) {                                        \
860        gen_exception(ctx, POWERPC_EXCP_VSXU);                                \
861        return;                                                               \
862    }                                                                         \
863    opc = tcg_constant_i32(ctx->opcode);                                      \
864    gen_helper_##name(tcg_env, opc);                                          \
865}
866
867#define GEN_VSX_HELPER_X3(name, op1, op2, inval, type)                        \
868static void gen_##name(DisasContext *ctx)                                     \
869{                                                                             \
870    TCGv_ptr xt, xa, xb;                                                      \
871    if (unlikely(!ctx->vsx_enabled)) {                                        \
872        gen_exception(ctx, POWERPC_EXCP_VSXU);                                \
873        return;                                                               \
874    }                                                                         \
875    xt = gen_vsr_ptr(xT(ctx->opcode));                                        \
876    xa = gen_vsr_ptr(xA(ctx->opcode));                                        \
877    xb = gen_vsr_ptr(xB(ctx->opcode));                                        \
878    gen_helper_##name(tcg_env, xt, xa, xb);                                   \
879}
880
881#define GEN_VSX_HELPER_X2(name, op1, op2, inval, type)                        \
882static void gen_##name(DisasContext *ctx)                                     \
883{                                                                             \
884    TCGv_ptr xt, xb;                                                          \
885    if (unlikely(!ctx->vsx_enabled)) {                                        \
886        gen_exception(ctx, POWERPC_EXCP_VSXU);                                \
887        return;                                                               \
888    }                                                                         \
889    xt = gen_vsr_ptr(xT(ctx->opcode));                                        \
890    xb = gen_vsr_ptr(xB(ctx->opcode));                                        \
891    gen_helper_##name(tcg_env, xt, xb);                                       \
892}
893
894#define GEN_VSX_HELPER_X2_AB(name, op1, op2, inval, type)                     \
895static void gen_##name(DisasContext *ctx)                                     \
896{                                                                             \
897    TCGv_i32 opc;                                                             \
898    TCGv_ptr xa, xb;                                                          \
899    if (unlikely(!ctx->vsx_enabled)) {                                        \
900        gen_exception(ctx, POWERPC_EXCP_VSXU);                                \
901        return;                                                               \
902    }                                                                         \
903    opc = tcg_constant_i32(ctx->opcode);                                      \
904    xa = gen_vsr_ptr(xA(ctx->opcode));                                        \
905    xb = gen_vsr_ptr(xB(ctx->opcode));                                        \
906    gen_helper_##name(tcg_env, opc, xa, xb);                                  \
907}
908
909#define GEN_VSX_HELPER_X1(name, op1, op2, inval, type)                        \
910static void gen_##name(DisasContext *ctx)                                     \
911{                                                                             \
912    TCGv_i32 opc;                                                             \
913    TCGv_ptr xb;                                                              \
914    if (unlikely(!ctx->vsx_enabled)) {                                        \
915        gen_exception(ctx, POWERPC_EXCP_VSXU);                                \
916        return;                                                               \
917    }                                                                         \
918    opc = tcg_constant_i32(ctx->opcode);                                      \
919    xb = gen_vsr_ptr(xB(ctx->opcode));                                        \
920    gen_helper_##name(tcg_env, opc, xb);                                      \
921}
922
923#define GEN_VSX_HELPER_R3(name, op1, op2, inval, type)                        \
924static void gen_##name(DisasContext *ctx)                                     \
925{                                                                             \
926    TCGv_i32 opc;                                                             \
927    TCGv_ptr xt, xa, xb;                                                      \
928    if (unlikely(!ctx->vsx_enabled)) {                                        \
929        gen_exception(ctx, POWERPC_EXCP_VSXU);                                \
930        return;                                                               \
931    }                                                                         \
932    opc = tcg_constant_i32(ctx->opcode);                                      \
933    xt = gen_vsr_ptr(rD(ctx->opcode) + 32);                                   \
934    xa = gen_vsr_ptr(rA(ctx->opcode) + 32);                                   \
935    xb = gen_vsr_ptr(rB(ctx->opcode) + 32);                                   \
936    gen_helper_##name(tcg_env, opc, xt, xa, xb);                              \
937}
938
939#define GEN_VSX_HELPER_R2(name, op1, op2, inval, type)                        \
940static void gen_##name(DisasContext *ctx)                                     \
941{                                                                             \
942    TCGv_i32 opc;                                                             \
943    TCGv_ptr xt, xb;                                                          \
944    if (unlikely(!ctx->vsx_enabled)) {                                        \
945        gen_exception(ctx, POWERPC_EXCP_VSXU);                                \
946        return;                                                               \
947    }                                                                         \
948    opc = tcg_constant_i32(ctx->opcode);                                      \
949    xt = gen_vsr_ptr(rD(ctx->opcode) + 32);                                   \
950    xb = gen_vsr_ptr(rB(ctx->opcode) + 32);                                   \
951    gen_helper_##name(tcg_env, opc, xt, xb);                                  \
952}
953
954#define GEN_VSX_HELPER_R2_AB(name, op1, op2, inval, type)                     \
955static void gen_##name(DisasContext *ctx)                                     \
956{                                                                             \
957    TCGv_i32 opc;                                                             \
958    TCGv_ptr xa, xb;                                                          \
959    if (unlikely(!ctx->vsx_enabled)) {                                        \
960        gen_exception(ctx, POWERPC_EXCP_VSXU);                                \
961        return;                                                               \
962    }                                                                         \
963    opc = tcg_constant_i32(ctx->opcode);                                      \
964    xa = gen_vsr_ptr(rA(ctx->opcode) + 32);                                   \
965    xb = gen_vsr_ptr(rB(ctx->opcode) + 32);                                   \
966    gen_helper_##name(tcg_env, opc, xa, xb);                                  \
967}
968
969#define GEN_VSX_HELPER_XT_XB_ENV(name, op1, op2, inval, type) \
970static void gen_##name(DisasContext *ctx)                     \
971{                                                             \
972    TCGv_i64 t0;                                              \
973    TCGv_i64 t1;                                              \
974    if (unlikely(!ctx->vsx_enabled)) {                        \
975        gen_exception(ctx, POWERPC_EXCP_VSXU);                \
976        return;                                               \
977    }                                                         \
978    t0 = tcg_temp_new_i64();                                  \
979    t1 = tcg_temp_new_i64();                                  \
980    get_cpu_vsr(t0, xB(ctx->opcode), true);                   \
981    gen_helper_##name(t1, tcg_env, t0);                       \
982    set_cpu_vsr(xT(ctx->opcode), t1, true);                   \
983    set_cpu_vsr(xT(ctx->opcode), tcg_constant_i64(0), false); \
984}
985
986GEN_VSX_HELPER_X3(xsadddp, 0x00, 0x04, 0, PPC2_VSX)
987GEN_VSX_HELPER_R3(xsaddqp, 0x04, 0x00, 0, PPC2_ISA300)
988GEN_VSX_HELPER_X3(xssubdp, 0x00, 0x05, 0, PPC2_VSX)
989GEN_VSX_HELPER_X3(xsmuldp, 0x00, 0x06, 0, PPC2_VSX)
990GEN_VSX_HELPER_R3(xsmulqp, 0x04, 0x01, 0, PPC2_ISA300)
991GEN_VSX_HELPER_X3(xsdivdp, 0x00, 0x07, 0, PPC2_VSX)
992GEN_VSX_HELPER_R3(xsdivqp, 0x04, 0x11, 0, PPC2_ISA300)
993GEN_VSX_HELPER_X2(xsredp, 0x14, 0x05, 0, PPC2_VSX)
994GEN_VSX_HELPER_X2(xssqrtdp, 0x16, 0x04, 0, PPC2_VSX)
995GEN_VSX_HELPER_X2(xsrsqrtedp, 0x14, 0x04, 0, PPC2_VSX)
996GEN_VSX_HELPER_X2_AB(xstdivdp, 0x14, 0x07, 0, PPC2_VSX)
997GEN_VSX_HELPER_X1(xstsqrtdp, 0x14, 0x06, 0, PPC2_VSX)
998GEN_VSX_HELPER_X2_AB(xscmpexpdp, 0x0C, 0x07, 0, PPC2_ISA300)
999GEN_VSX_HELPER_R2_AB(xscmpexpqp, 0x04, 0x05, 0, PPC2_ISA300)
1000GEN_VSX_HELPER_X2_AB(xscmpodp, 0x0C, 0x05, 0, PPC2_VSX)
1001GEN_VSX_HELPER_X2_AB(xscmpudp, 0x0C, 0x04, 0, PPC2_VSX)
1002GEN_VSX_HELPER_R2_AB(xscmpoqp, 0x04, 0x04, 0, PPC2_VSX)
1003GEN_VSX_HELPER_R2_AB(xscmpuqp, 0x04, 0x14, 0, PPC2_VSX)
1004GEN_VSX_HELPER_X3(xsmaxdp, 0x00, 0x14, 0, PPC2_VSX)
1005GEN_VSX_HELPER_X3(xsmindp, 0x00, 0x15, 0, PPC2_VSX)
1006GEN_VSX_HELPER_X2(xscvdphp, 0x16, 0x15, 0x11, PPC2_ISA300)
1007GEN_VSX_HELPER_X2(xscvdpsp, 0x12, 0x10, 0, PPC2_VSX)
1008GEN_VSX_HELPER_R2(xscvdpqp, 0x04, 0x1A, 0x16, PPC2_ISA300)
1009GEN_VSX_HELPER_XT_XB_ENV(xscvdpspn, 0x16, 0x10, 0, PPC2_VSX207)
1010GEN_VSX_HELPER_R2(xscvqpsdz, 0x04, 0x1A, 0x19, PPC2_ISA300)
1011GEN_VSX_HELPER_R2(xscvqpswz, 0x04, 0x1A, 0x09, PPC2_ISA300)
1012GEN_VSX_HELPER_R2(xscvqpudz, 0x04, 0x1A, 0x11, PPC2_ISA300)
1013GEN_VSX_HELPER_R2(xscvqpuwz, 0x04, 0x1A, 0x01, PPC2_ISA300)
1014GEN_VSX_HELPER_X2(xscvhpdp, 0x16, 0x15, 0x10, PPC2_ISA300)
1015GEN_VSX_HELPER_R2(xscvsdqp, 0x04, 0x1A, 0x0A, PPC2_ISA300)
1016GEN_VSX_HELPER_X2(xscvspdp, 0x12, 0x14, 0, PPC2_VSX)
1017
1018/* test if +Inf */
1019static void gen_is_pos_inf(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t v)
1020{
1021    uint64_t exp_msk = (vece == MO_32) ? (uint32_t)EXP_MASK_SP : EXP_MASK_DP;
1022    tcg_gen_cmp_vec(TCG_COND_EQ, vece, t, b,
1023                    tcg_constant_vec_matching(t, vece, exp_msk));
1024}
1025
1026/* test if -Inf */
1027static void gen_is_neg_inf(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t v)
1028{
1029    uint64_t exp_msk = (vece == MO_32) ? (uint32_t)EXP_MASK_SP : EXP_MASK_DP;
1030    uint64_t sgn_msk = (vece == MO_32) ? (uint32_t)SGN_MASK_SP : SGN_MASK_DP;
1031    tcg_gen_cmp_vec(TCG_COND_EQ, vece, t, b,
1032                    tcg_constant_vec_matching(t, vece, sgn_msk | exp_msk));
1033}
1034
1035/* test if +Inf or -Inf */
1036static void gen_is_any_inf(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t v)
1037{
1038    uint64_t exp_msk = (vece == MO_32) ? (uint32_t)EXP_MASK_SP : EXP_MASK_DP;
1039    uint64_t sgn_msk = (vece == MO_32) ? (uint32_t)SGN_MASK_SP : SGN_MASK_DP;
1040    tcg_gen_andc_vec(vece, b, b, tcg_constant_vec_matching(t, vece, sgn_msk));
1041    tcg_gen_cmp_vec(TCG_COND_EQ, vece, t, b,
1042                    tcg_constant_vec_matching(t, vece, exp_msk));
1043}
1044
1045/* test if +0 */
1046static void gen_is_pos_zero(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t v)
1047{
1048    tcg_gen_cmp_vec(TCG_COND_EQ, vece, t, b,
1049                    tcg_constant_vec_matching(t, vece, 0));
1050}
1051
1052/* test if -0 */
1053static void gen_is_neg_zero(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t v)
1054{
1055    uint64_t sgn_msk = (vece == MO_32) ? (uint32_t)SGN_MASK_SP : SGN_MASK_DP;
1056    tcg_gen_cmp_vec(TCG_COND_EQ, vece, t, b,
1057                    tcg_constant_vec_matching(t, vece, sgn_msk));
1058}
1059
1060/* test if +0 or -0 */
1061static void gen_is_any_zero(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t v)
1062{
1063    uint64_t sgn_msk = (vece == MO_32) ? (uint32_t)SGN_MASK_SP : SGN_MASK_DP;
1064    tcg_gen_andc_vec(vece, b, b, tcg_constant_vec_matching(t, vece, sgn_msk));
1065    tcg_gen_cmp_vec(TCG_COND_EQ, vece, t, b,
1066                    tcg_constant_vec_matching(t, vece, 0));
1067}
1068
1069/* test if +Denormal */
1070static void gen_is_pos_denormal(unsigned vece, TCGv_vec t,
1071                                TCGv_vec b, int64_t v)
1072{
1073    uint64_t frc_msk = (vece == MO_32) ? (uint32_t)FRC_MASK_SP : FRC_MASK_DP;
1074    tcg_gen_cmp_vec(TCG_COND_LEU, vece, t, b,
1075                    tcg_constant_vec_matching(t, vece, frc_msk));
1076    tcg_gen_cmp_vec(TCG_COND_NE, vece, b, b,
1077                    tcg_constant_vec_matching(t, vece, 0));
1078    tcg_gen_and_vec(vece, t, t, b);
1079}
1080
1081/* test if -Denormal */
1082static void gen_is_neg_denormal(unsigned vece, TCGv_vec t,
1083                                TCGv_vec b, int64_t v)
1084{
1085    uint64_t sgn_msk = (vece == MO_32) ? (uint32_t)SGN_MASK_SP : SGN_MASK_DP;
1086    uint64_t frc_msk = (vece == MO_32) ? (uint32_t)FRC_MASK_SP : FRC_MASK_DP;
1087    tcg_gen_cmp_vec(TCG_COND_LEU, vece, t, b,
1088                    tcg_constant_vec_matching(t, vece, sgn_msk | frc_msk));
1089    tcg_gen_cmp_vec(TCG_COND_GTU, vece, b, b,
1090                    tcg_constant_vec_matching(t, vece, sgn_msk));
1091    tcg_gen_and_vec(vece, t, t, b);
1092}
1093
1094/* test if +Denormal or -Denormal */
1095static void gen_is_any_denormal(unsigned vece, TCGv_vec t,
1096                                TCGv_vec b, int64_t v)
1097{
1098    uint64_t sgn_msk = (vece == MO_32) ? (uint32_t)SGN_MASK_SP : SGN_MASK_DP;
1099    uint64_t frc_msk = (vece == MO_32) ? (uint32_t)FRC_MASK_SP : FRC_MASK_DP;
1100    tcg_gen_andc_vec(vece, b, b, tcg_constant_vec_matching(t, vece, sgn_msk));
1101    tcg_gen_cmp_vec(TCG_COND_LE, vece, t, b,
1102                    tcg_constant_vec_matching(t, vece, frc_msk));
1103    tcg_gen_cmp_vec(TCG_COND_NE, vece, b, b,
1104                    tcg_constant_vec_matching(t, vece, 0));
1105    tcg_gen_and_vec(vece, t, t, b);
1106}
1107
1108/* test if NaN */
1109static void gen_is_nan(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t v)
1110{
1111    uint64_t exp_msk = (vece == MO_32) ? (uint32_t)EXP_MASK_SP : EXP_MASK_DP;
1112    uint64_t sgn_msk = (vece == MO_32) ? (uint32_t)SGN_MASK_SP : SGN_MASK_DP;
1113    tcg_gen_and_vec(vece, b, b, tcg_constant_vec_matching(t, vece, ~sgn_msk));
1114    tcg_gen_cmp_vec(TCG_COND_GT, vece, t, b,
1115                    tcg_constant_vec_matching(t, vece, exp_msk));
1116}
1117
1118static bool do_xvtstdc(DisasContext *ctx, arg_XX2_uim *a, unsigned vece)
1119{
1120    static const TCGOpcode vecop_list[] = {
1121        INDEX_op_cmp_vec, 0
1122    };
1123
1124    GVecGen2i op = {
1125        .fnoi = (vece == MO_32) ? gen_helper_XVTSTDCSP : gen_helper_XVTSTDCDP,
1126        .vece = vece,
1127        .opt_opc = vecop_list
1128    };
1129
1130    REQUIRE_VSX(ctx);
1131
1132    switch (a->uim) {
1133    case 0:
1134        set_cpu_vsr(a->xt, tcg_constant_i64(0), true);
1135        set_cpu_vsr(a->xt, tcg_constant_i64(0), false);
1136        return true;
1137    case ((1 << 0) | (1 << 1)):
1138        /* test if +Denormal or -Denormal */
1139        op.fniv = gen_is_any_denormal;
1140        break;
1141    case (1 << 0):
1142        /* test if -Denormal */
1143        op.fniv = gen_is_neg_denormal;
1144        break;
1145    case (1 << 1):
1146        /* test if +Denormal */
1147        op.fniv = gen_is_pos_denormal;
1148        break;
1149    case ((1 << 2) | (1 << 3)):
1150        /* test if +0 or -0 */
1151        op.fniv = gen_is_any_zero;
1152        break;
1153    case (1 << 2):
1154        /* test if -0 */
1155        op.fniv = gen_is_neg_zero;
1156        break;
1157    case (1 << 3):
1158        /* test if +0 */
1159        op.fniv = gen_is_pos_zero;
1160        break;
1161    case ((1 << 4) | (1 << 5)):
1162        /* test if +Inf or -Inf */
1163        op.fniv = gen_is_any_inf;
1164        break;
1165    case (1 << 4):
1166        /* test if -Inf */
1167        op.fniv = gen_is_neg_inf;
1168        break;
1169    case (1 << 5):
1170        /* test if +Inf */
1171        op.fniv = gen_is_pos_inf;
1172        break;
1173    case (1 << 6):
1174        /* test if NaN */
1175        op.fniv = gen_is_nan;
1176        break;
1177    }
1178    tcg_gen_gvec_2i(vsr_full_offset(a->xt), vsr_full_offset(a->xb),
1179                    16, 16, a->uim, &op);
1180
1181    return true;
1182}
1183
1184TRANS_FLAGS2(VSX, XVTSTDCSP, do_xvtstdc, MO_32)
1185TRANS_FLAGS2(VSX, XVTSTDCDP, do_xvtstdc, MO_64)
1186
1187static bool do_XX2_bf_uim(DisasContext *ctx, arg_XX2_bf_uim *a, bool vsr,
1188                     void (*gen_helper)(TCGv_env, TCGv_i32, TCGv_i32, TCGv_ptr))
1189{
1190    TCGv_ptr xb;
1191
1192    REQUIRE_VSX(ctx);
1193    xb = vsr ? gen_vsr_ptr(a->xb) : gen_avr_ptr(a->xb);
1194    gen_helper(tcg_env, tcg_constant_i32(a->bf), tcg_constant_i32(a->uim), xb);
1195    return true;
1196}
1197
1198TRANS_FLAGS2(ISA300, XSTSTDCSP, do_XX2_bf_uim, true, gen_helper_XSTSTDCSP)
1199TRANS_FLAGS2(ISA300, XSTSTDCDP, do_XX2_bf_uim, true, gen_helper_XSTSTDCDP)
1200TRANS_FLAGS2(ISA300, XSTSTDCQP, do_XX2_bf_uim, false, gen_helper_XSTSTDCQP)
1201
1202bool trans_XSCVSPDPN(DisasContext *ctx, arg_XX2 *a)
1203{
1204    TCGv_i64 tmp;
1205
1206    REQUIRE_INSNS_FLAGS2(ctx, VSX207);
1207    REQUIRE_VSX(ctx);
1208
1209    tmp = tcg_temp_new_i64();
1210    get_cpu_vsr(tmp, a->xb, true);
1211
1212    gen_helper_XSCVSPDPN(tmp, tmp);
1213
1214    set_cpu_vsr(a->xt, tmp, true);
1215    set_cpu_vsr(a->xt, tcg_constant_i64(0), false);
1216    return true;
1217}
1218
1219GEN_VSX_HELPER_X2(xscvdpsxds, 0x10, 0x15, 0, PPC2_VSX)
1220GEN_VSX_HELPER_X2(xscvdpsxws, 0x10, 0x05, 0, PPC2_VSX)
1221GEN_VSX_HELPER_X2(xscvdpuxds, 0x10, 0x14, 0, PPC2_VSX)
1222GEN_VSX_HELPER_X2(xscvdpuxws, 0x10, 0x04, 0, PPC2_VSX)
1223GEN_VSX_HELPER_X2(xscvsxddp, 0x10, 0x17, 0, PPC2_VSX)
1224GEN_VSX_HELPER_R2(xscvudqp, 0x04, 0x1A, 0x02, PPC2_ISA300)
1225GEN_VSX_HELPER_X2(xscvuxddp, 0x10, 0x16, 0, PPC2_VSX)
1226GEN_VSX_HELPER_X2(xsrdpi, 0x12, 0x04, 0, PPC2_VSX)
1227GEN_VSX_HELPER_X2(xsrdpic, 0x16, 0x06, 0, PPC2_VSX)
1228GEN_VSX_HELPER_X2(xsrdpim, 0x12, 0x07, 0, PPC2_VSX)
1229GEN_VSX_HELPER_X2(xsrdpip, 0x12, 0x06, 0, PPC2_VSX)
1230GEN_VSX_HELPER_X2(xsrdpiz, 0x12, 0x05, 0, PPC2_VSX)
1231GEN_VSX_HELPER_XT_XB_ENV(xsrsp, 0x12, 0x11, 0, PPC2_VSX207)
1232GEN_VSX_HELPER_R2(xsrqpi, 0x05, 0x00, 0, PPC2_ISA300)
1233GEN_VSX_HELPER_R2(xsrqpxp, 0x05, 0x01, 0, PPC2_ISA300)
1234GEN_VSX_HELPER_R2(xssqrtqp, 0x04, 0x19, 0x1B, PPC2_ISA300)
1235GEN_VSX_HELPER_R3(xssubqp, 0x04, 0x10, 0, PPC2_ISA300)
1236GEN_VSX_HELPER_X3(xsaddsp, 0x00, 0x00, 0, PPC2_VSX207)
1237GEN_VSX_HELPER_X3(xssubsp, 0x00, 0x01, 0, PPC2_VSX207)
1238GEN_VSX_HELPER_X3(xsmulsp, 0x00, 0x02, 0, PPC2_VSX207)
1239GEN_VSX_HELPER_X3(xsdivsp, 0x00, 0x03, 0, PPC2_VSX207)
1240GEN_VSX_HELPER_X2(xsresp, 0x14, 0x01, 0, PPC2_VSX207)
1241GEN_VSX_HELPER_X2(xssqrtsp, 0x16, 0x00, 0, PPC2_VSX207)
1242GEN_VSX_HELPER_X2(xsrsqrtesp, 0x14, 0x00, 0, PPC2_VSX207)
1243GEN_VSX_HELPER_X2(xscvsxdsp, 0x10, 0x13, 0, PPC2_VSX207)
1244GEN_VSX_HELPER_X2(xscvuxdsp, 0x10, 0x12, 0, PPC2_VSX207)
1245
1246GEN_VSX_HELPER_X3(xvadddp, 0x00, 0x0C, 0, PPC2_VSX)
1247GEN_VSX_HELPER_X3(xvsubdp, 0x00, 0x0D, 0, PPC2_VSX)
1248GEN_VSX_HELPER_X3(xvmuldp, 0x00, 0x0E, 0, PPC2_VSX)
1249GEN_VSX_HELPER_X3(xvdivdp, 0x00, 0x0F, 0, PPC2_VSX)
1250GEN_VSX_HELPER_X2(xvredp, 0x14, 0x0D, 0, PPC2_VSX)
1251GEN_VSX_HELPER_X2(xvsqrtdp, 0x16, 0x0C, 0, PPC2_VSX)
1252GEN_VSX_HELPER_X2(xvrsqrtedp, 0x14, 0x0C, 0, PPC2_VSX)
1253GEN_VSX_HELPER_X2_AB(xvtdivdp, 0x14, 0x0F, 0, PPC2_VSX)
1254GEN_VSX_HELPER_X1(xvtsqrtdp, 0x14, 0x0E, 0, PPC2_VSX)
1255GEN_VSX_HELPER_X3(xvmaxdp, 0x00, 0x1C, 0, PPC2_VSX)
1256GEN_VSX_HELPER_X3(xvmindp, 0x00, 0x1D, 0, PPC2_VSX)
1257GEN_VSX_HELPER_X2(xvcvdpsp, 0x12, 0x18, 0, PPC2_VSX)
1258GEN_VSX_HELPER_X2(xvcvdpsxds, 0x10, 0x1D, 0, PPC2_VSX)
1259GEN_VSX_HELPER_X2(xvcvdpsxws, 0x10, 0x0D, 0, PPC2_VSX)
1260GEN_VSX_HELPER_X2(xvcvdpuxds, 0x10, 0x1C, 0, PPC2_VSX)
1261GEN_VSX_HELPER_X2(xvcvdpuxws, 0x10, 0x0C, 0, PPC2_VSX)
1262GEN_VSX_HELPER_X2(xvcvsxddp, 0x10, 0x1F, 0, PPC2_VSX)
1263GEN_VSX_HELPER_X2(xvcvuxddp, 0x10, 0x1E, 0, PPC2_VSX)
1264GEN_VSX_HELPER_X2(xvcvsxwdp, 0x10, 0x0F, 0, PPC2_VSX)
1265GEN_VSX_HELPER_X2(xvcvuxwdp, 0x10, 0x0E, 0, PPC2_VSX)
1266GEN_VSX_HELPER_X2(xvrdpi, 0x12, 0x0C, 0, PPC2_VSX)
1267GEN_VSX_HELPER_X2(xvrdpic, 0x16, 0x0E, 0, PPC2_VSX)
1268GEN_VSX_HELPER_X2(xvrdpim, 0x12, 0x0F, 0, PPC2_VSX)
1269GEN_VSX_HELPER_X2(xvrdpip, 0x12, 0x0E, 0, PPC2_VSX)
1270GEN_VSX_HELPER_X2(xvrdpiz, 0x12, 0x0D, 0, PPC2_VSX)
1271
1272GEN_VSX_HELPER_X3(xvaddsp, 0x00, 0x08, 0, PPC2_VSX)
1273GEN_VSX_HELPER_X3(xvsubsp, 0x00, 0x09, 0, PPC2_VSX)
1274GEN_VSX_HELPER_X3(xvmulsp, 0x00, 0x0A, 0, PPC2_VSX)
1275GEN_VSX_HELPER_X3(xvdivsp, 0x00, 0x0B, 0, PPC2_VSX)
1276GEN_VSX_HELPER_X2(xvresp, 0x14, 0x09, 0, PPC2_VSX)
1277GEN_VSX_HELPER_X2(xvsqrtsp, 0x16, 0x08, 0, PPC2_VSX)
1278GEN_VSX_HELPER_X2(xvrsqrtesp, 0x14, 0x08, 0, PPC2_VSX)
1279GEN_VSX_HELPER_X2_AB(xvtdivsp, 0x14, 0x0B, 0, PPC2_VSX)
1280GEN_VSX_HELPER_X1(xvtsqrtsp, 0x14, 0x0A, 0, PPC2_VSX)
1281GEN_VSX_HELPER_X3(xvmaxsp, 0x00, 0x18, 0, PPC2_VSX)
1282GEN_VSX_HELPER_X3(xvminsp, 0x00, 0x19, 0, PPC2_VSX)
1283GEN_VSX_HELPER_X2(xvcvspdp, 0x12, 0x1C, 0, PPC2_VSX)
1284GEN_VSX_HELPER_X2(xvcvhpsp, 0x16, 0x1D, 0x18, PPC2_ISA300)
1285GEN_VSX_HELPER_X2(xvcvsphp, 0x16, 0x1D, 0x19, PPC2_ISA300)
1286GEN_VSX_HELPER_X2(xvcvspsxds, 0x10, 0x19, 0, PPC2_VSX)
1287GEN_VSX_HELPER_X2(xvcvspsxws, 0x10, 0x09, 0, PPC2_VSX)
1288GEN_VSX_HELPER_X2(xvcvspuxds, 0x10, 0x18, 0, PPC2_VSX)
1289GEN_VSX_HELPER_X2(xvcvspuxws, 0x10, 0x08, 0, PPC2_VSX)
1290GEN_VSX_HELPER_X2(xvcvsxdsp, 0x10, 0x1B, 0, PPC2_VSX)
1291GEN_VSX_HELPER_X2(xvcvuxdsp, 0x10, 0x1A, 0, PPC2_VSX)
1292GEN_VSX_HELPER_X2(xvcvsxwsp, 0x10, 0x0B, 0, PPC2_VSX)
1293GEN_VSX_HELPER_X2(xvcvuxwsp, 0x10, 0x0A, 0, PPC2_VSX)
1294GEN_VSX_HELPER_X2(xvrspi, 0x12, 0x08, 0, PPC2_VSX)
1295GEN_VSX_HELPER_X2(xvrspic, 0x16, 0x0A, 0, PPC2_VSX)
1296GEN_VSX_HELPER_X2(xvrspim, 0x12, 0x0B, 0, PPC2_VSX)
1297GEN_VSX_HELPER_X2(xvrspip, 0x12, 0x0A, 0, PPC2_VSX)
1298GEN_VSX_HELPER_X2(xvrspiz, 0x12, 0x09, 0, PPC2_VSX)
1299
1300static bool trans_XXPERM(DisasContext *ctx, arg_XX3 *a)
1301{
1302    TCGv_ptr xt, xa, xb;
1303
1304    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
1305    REQUIRE_VSX(ctx);
1306
1307    xt = gen_vsr_ptr(a->xt);
1308    xa = gen_vsr_ptr(a->xa);
1309    xb = gen_vsr_ptr(a->xb);
1310
1311    gen_helper_VPERM(xt, xa, xt, xb);
1312    return true;
1313}
1314
1315static bool trans_XXPERMR(DisasContext *ctx, arg_XX3 *a)
1316{
1317    TCGv_ptr xt, xa, xb;
1318
1319    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
1320    REQUIRE_VSX(ctx);
1321
1322    xt = gen_vsr_ptr(a->xt);
1323    xa = gen_vsr_ptr(a->xa);
1324    xb = gen_vsr_ptr(a->xb);
1325
1326    gen_helper_VPERMR(xt, xa, xt, xb);
1327    return true;
1328}
1329
1330static bool trans_XXPERMDI(DisasContext *ctx, arg_XX3_dm *a)
1331{
1332    TCGv_i64 t0, t1;
1333
1334    REQUIRE_INSNS_FLAGS2(ctx, VSX);
1335    REQUIRE_VSX(ctx);
1336
1337    t0 = tcg_temp_new_i64();
1338
1339    if (unlikely(a->xt == a->xa || a->xt == a->xb)) {
1340        t1 = tcg_temp_new_i64();
1341
1342        get_cpu_vsr(t0, a->xa, (a->dm & 2) == 0);
1343        get_cpu_vsr(t1, a->xb, (a->dm & 1) == 0);
1344
1345        set_cpu_vsr(a->xt, t0, true);
1346        set_cpu_vsr(a->xt, t1, false);
1347    } else {
1348        get_cpu_vsr(t0, a->xa, (a->dm & 2) == 0);
1349        set_cpu_vsr(a->xt, t0, true);
1350
1351        get_cpu_vsr(t0, a->xb, (a->dm & 1) == 0);
1352        set_cpu_vsr(a->xt, t0, false);
1353    }
1354    return true;
1355}
1356
1357static bool trans_XXPERMX(DisasContext *ctx, arg_8RR_XX4_uim3 *a)
1358{
1359    TCGv_ptr xt, xa, xb, xc;
1360
1361    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1362    REQUIRE_VSX(ctx);
1363
1364    xt = gen_vsr_ptr(a->xt);
1365    xa = gen_vsr_ptr(a->xa);
1366    xb = gen_vsr_ptr(a->xb);
1367    xc = gen_vsr_ptr(a->xc);
1368
1369    gen_helper_XXPERMX(xt, xa, xb, xc, tcg_constant_tl(a->uim3));
1370    return true;
1371}
1372
1373typedef void (*xxgenpcv_genfn)(TCGv_ptr, TCGv_ptr);
1374
1375static bool do_xxgenpcv(DisasContext *ctx, arg_X_imm5 *a,
1376                        const xxgenpcv_genfn fn[4])
1377{
1378    TCGv_ptr xt, vrb;
1379
1380    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1381    REQUIRE_VSX(ctx);
1382
1383    if (a->imm & ~0x3) {
1384        gen_invalid(ctx);
1385        return true;
1386    }
1387
1388    xt = gen_vsr_ptr(a->xt);
1389    vrb = gen_avr_ptr(a->vrb);
1390
1391    fn[a->imm](xt, vrb);
1392    return true;
1393}
1394
1395#define XXGENPCV(NAME) \
1396    static bool trans_##NAME(DisasContext *ctx, arg_X_imm5 *a)  \
1397    {                                                           \
1398        static const xxgenpcv_genfn fn[4] = {                   \
1399            gen_helper_##NAME##_be_exp,                         \
1400            gen_helper_##NAME##_be_comp,                        \
1401            gen_helper_##NAME##_le_exp,                         \
1402            gen_helper_##NAME##_le_comp,                        \
1403        };                                                      \
1404        return do_xxgenpcv(ctx, a, fn);                         \
1405    }
1406
1407XXGENPCV(XXGENPCVBM)
1408XXGENPCV(XXGENPCVHM)
1409XXGENPCV(XXGENPCVWM)
1410XXGENPCV(XXGENPCVDM)
1411#undef XXGENPCV
1412
1413static bool do_xsmadd(DisasContext *ctx, int tgt, int src1, int src2, int src3,
1414        void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
1415{
1416    TCGv_ptr t, s1, s2, s3;
1417
1418    t = gen_vsr_ptr(tgt);
1419    s1 = gen_vsr_ptr(src1);
1420    s2 = gen_vsr_ptr(src2);
1421    s3 = gen_vsr_ptr(src3);
1422
1423    gen_helper(tcg_env, t, s1, s2, s3);
1424    return true;
1425}
1426
1427static bool do_xsmadd_XX3(DisasContext *ctx, arg_XX3 *a, bool type_a,
1428        void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
1429{
1430    REQUIRE_VSX(ctx);
1431
1432    if (type_a) {
1433        return do_xsmadd(ctx, a->xt, a->xa, a->xt, a->xb, gen_helper);
1434    }
1435    return do_xsmadd(ctx, a->xt, a->xa, a->xb, a->xt, gen_helper);
1436}
1437
1438TRANS_FLAGS2(VSX, XSMADDADP, do_xsmadd_XX3, true, gen_helper_XSMADDDP)
1439TRANS_FLAGS2(VSX, XSMADDMDP, do_xsmadd_XX3, false, gen_helper_XSMADDDP)
1440TRANS_FLAGS2(VSX, XSMSUBADP, do_xsmadd_XX3, true, gen_helper_XSMSUBDP)
1441TRANS_FLAGS2(VSX, XSMSUBMDP, do_xsmadd_XX3, false, gen_helper_XSMSUBDP)
1442TRANS_FLAGS2(VSX, XSNMADDADP, do_xsmadd_XX3, true, gen_helper_XSNMADDDP)
1443TRANS_FLAGS2(VSX, XSNMADDMDP, do_xsmadd_XX3, false, gen_helper_XSNMADDDP)
1444TRANS_FLAGS2(VSX, XSNMSUBADP, do_xsmadd_XX3, true, gen_helper_XSNMSUBDP)
1445TRANS_FLAGS2(VSX, XSNMSUBMDP, do_xsmadd_XX3, false, gen_helper_XSNMSUBDP)
1446TRANS_FLAGS2(VSX207, XSMADDASP, do_xsmadd_XX3, true, gen_helper_XSMADDSP)
1447TRANS_FLAGS2(VSX207, XSMADDMSP, do_xsmadd_XX3, false, gen_helper_XSMADDSP)
1448TRANS_FLAGS2(VSX207, XSMSUBASP, do_xsmadd_XX3, true, gen_helper_XSMSUBSP)
1449TRANS_FLAGS2(VSX207, XSMSUBMSP, do_xsmadd_XX3, false, gen_helper_XSMSUBSP)
1450TRANS_FLAGS2(VSX207, XSNMADDASP, do_xsmadd_XX3, true, gen_helper_XSNMADDSP)
1451TRANS_FLAGS2(VSX207, XSNMADDMSP, do_xsmadd_XX3, false, gen_helper_XSNMADDSP)
1452TRANS_FLAGS2(VSX207, XSNMSUBASP, do_xsmadd_XX3, true, gen_helper_XSNMSUBSP)
1453TRANS_FLAGS2(VSX207, XSNMSUBMSP, do_xsmadd_XX3, false, gen_helper_XSNMSUBSP)
1454
1455static bool do_xsmadd_X(DisasContext *ctx, arg_X_rc *a,
1456        void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr),
1457        void (*gen_helper_ro)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
1458{
1459    int vrt, vra, vrb;
1460
1461    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
1462    REQUIRE_VSX(ctx);
1463
1464    vrt = a->rt + 32;
1465    vra = a->ra + 32;
1466    vrb = a->rb + 32;
1467
1468    if (a->rc) {
1469        return do_xsmadd(ctx, vrt, vra, vrt, vrb, gen_helper_ro);
1470    }
1471
1472    return do_xsmadd(ctx, vrt, vra, vrt, vrb, gen_helper);
1473}
1474
1475TRANS(XSMADDQP, do_xsmadd_X, gen_helper_XSMADDQP, gen_helper_XSMADDQPO)
1476TRANS(XSMSUBQP, do_xsmadd_X, gen_helper_XSMSUBQP, gen_helper_XSMSUBQPO)
1477TRANS(XSNMADDQP, do_xsmadd_X, gen_helper_XSNMADDQP, gen_helper_XSNMADDQPO)
1478TRANS(XSNMSUBQP, do_xsmadd_X, gen_helper_XSNMSUBQP, gen_helper_XSNMSUBQPO)
1479
1480#define GEN_VSX_HELPER_VSX_MADD(name, op1, aop, mop, inval, type)             \
1481static void gen_##name(DisasContext *ctx)                                     \
1482{                                                                             \
1483    TCGv_ptr xt, s1, s2, s3;                                                  \
1484    if (unlikely(!ctx->vsx_enabled)) {                                        \
1485        gen_exception(ctx, POWERPC_EXCP_VSXU);                                \
1486        return;                                                               \
1487    }                                                                         \
1488    xt = gen_vsr_ptr(xT(ctx->opcode));                                        \
1489    s1 = gen_vsr_ptr(xA(ctx->opcode));                                        \
1490    if (ctx->opcode & PPC_BIT32(25)) {                                        \
1491        /*                                                                    \
1492         * AxT + B                                                            \
1493         */                                                                   \
1494        s2 = gen_vsr_ptr(xB(ctx->opcode));                                    \
1495        s3 = gen_vsr_ptr(xT(ctx->opcode));                                    \
1496    } else {                                                                  \
1497        /*                                                                    \
1498         * AxB + T                                                            \
1499         */                                                                   \
1500        s2 = gen_vsr_ptr(xT(ctx->opcode));                                    \
1501        s3 = gen_vsr_ptr(xB(ctx->opcode));                                    \
1502    }                                                                         \
1503    gen_helper_##name(tcg_env, xt, s1, s2, s3);                               \
1504}
1505
1506GEN_VSX_HELPER_VSX_MADD(xvmadddp, 0x04, 0x0C, 0x0D, 0, PPC2_VSX)
1507GEN_VSX_HELPER_VSX_MADD(xvmsubdp, 0x04, 0x0E, 0x0F, 0, PPC2_VSX)
1508GEN_VSX_HELPER_VSX_MADD(xvnmadddp, 0x04, 0x1C, 0x1D, 0, PPC2_VSX)
1509GEN_VSX_HELPER_VSX_MADD(xvnmsubdp, 0x04, 0x1E, 0x1F, 0, PPC2_VSX)
1510GEN_VSX_HELPER_VSX_MADD(xvmaddsp, 0x04, 0x08, 0x09, 0, PPC2_VSX)
1511GEN_VSX_HELPER_VSX_MADD(xvmsubsp, 0x04, 0x0A, 0x0B, 0, PPC2_VSX)
1512GEN_VSX_HELPER_VSX_MADD(xvnmaddsp, 0x04, 0x18, 0x19, 0, PPC2_VSX)
1513GEN_VSX_HELPER_VSX_MADD(xvnmsubsp, 0x04, 0x1A, 0x1B, 0, PPC2_VSX)
1514
1515static void gen_xxbrd(DisasContext *ctx)
1516{
1517    TCGv_i64 xth;
1518    TCGv_i64 xtl;
1519    TCGv_i64 xbh;
1520    TCGv_i64 xbl;
1521
1522    if (unlikely(!ctx->vsx_enabled)) {
1523        gen_exception(ctx, POWERPC_EXCP_VSXU);
1524        return;
1525    }
1526    xth = tcg_temp_new_i64();
1527    xtl = tcg_temp_new_i64();
1528    xbh = tcg_temp_new_i64();
1529    xbl = tcg_temp_new_i64();
1530    get_cpu_vsr(xbh, xB(ctx->opcode), true);
1531    get_cpu_vsr(xbl, xB(ctx->opcode), false);
1532
1533    tcg_gen_bswap64_i64(xth, xbh);
1534    tcg_gen_bswap64_i64(xtl, xbl);
1535    set_cpu_vsr(xT(ctx->opcode), xth, true);
1536    set_cpu_vsr(xT(ctx->opcode), xtl, false);
1537}
1538
1539static void gen_xxbrh(DisasContext *ctx)
1540{
1541    TCGv_i64 xth;
1542    TCGv_i64 xtl;
1543    TCGv_i64 xbh;
1544    TCGv_i64 xbl;
1545
1546    if (unlikely(!ctx->vsx_enabled)) {
1547        gen_exception(ctx, POWERPC_EXCP_VSXU);
1548        return;
1549    }
1550    xth = tcg_temp_new_i64();
1551    xtl = tcg_temp_new_i64();
1552    xbh = tcg_temp_new_i64();
1553    xbl = tcg_temp_new_i64();
1554    get_cpu_vsr(xbh, xB(ctx->opcode), true);
1555    get_cpu_vsr(xbl, xB(ctx->opcode), false);
1556
1557    gen_bswap16x8(xth, xtl, xbh, xbl);
1558    set_cpu_vsr(xT(ctx->opcode), xth, true);
1559    set_cpu_vsr(xT(ctx->opcode), xtl, false);
1560}
1561
1562static void gen_xxbrq(DisasContext *ctx)
1563{
1564    TCGv_i64 xth;
1565    TCGv_i64 xtl;
1566    TCGv_i64 xbh;
1567    TCGv_i64 xbl;
1568    TCGv_i64 t0;
1569
1570    if (unlikely(!ctx->vsx_enabled)) {
1571        gen_exception(ctx, POWERPC_EXCP_VSXU);
1572        return;
1573    }
1574    xth = tcg_temp_new_i64();
1575    xtl = tcg_temp_new_i64();
1576    xbh = tcg_temp_new_i64();
1577    xbl = tcg_temp_new_i64();
1578    get_cpu_vsr(xbh, xB(ctx->opcode), true);
1579    get_cpu_vsr(xbl, xB(ctx->opcode), false);
1580    t0 = tcg_temp_new_i64();
1581
1582    tcg_gen_bswap64_i64(t0, xbl);
1583    tcg_gen_bswap64_i64(xtl, xbh);
1584    set_cpu_vsr(xT(ctx->opcode), xtl, false);
1585    tcg_gen_mov_i64(xth, t0);
1586    set_cpu_vsr(xT(ctx->opcode), xth, true);
1587}
1588
1589static void gen_xxbrw(DisasContext *ctx)
1590{
1591    TCGv_i64 xth;
1592    TCGv_i64 xtl;
1593    TCGv_i64 xbh;
1594    TCGv_i64 xbl;
1595
1596    if (unlikely(!ctx->vsx_enabled)) {
1597        gen_exception(ctx, POWERPC_EXCP_VSXU);
1598        return;
1599    }
1600    xth = tcg_temp_new_i64();
1601    xtl = tcg_temp_new_i64();
1602    xbh = tcg_temp_new_i64();
1603    xbl = tcg_temp_new_i64();
1604    get_cpu_vsr(xbh, xB(ctx->opcode), true);
1605    get_cpu_vsr(xbl, xB(ctx->opcode), false);
1606
1607    gen_bswap32x4(xth, xtl, xbh, xbl);
1608    set_cpu_vsr(xT(ctx->opcode), xth, true);
1609    set_cpu_vsr(xT(ctx->opcode), xtl, false);
1610}
1611
1612#define VSX_LOGICAL(name, vece, tcg_op)                              \
1613static void glue(gen_, name)(DisasContext *ctx)                      \
1614    {                                                                \
1615        if (unlikely(!ctx->vsx_enabled)) {                           \
1616            gen_exception(ctx, POWERPC_EXCP_VSXU);                   \
1617            return;                                                  \
1618        }                                                            \
1619        tcg_op(vece, vsr_full_offset(xT(ctx->opcode)),               \
1620               vsr_full_offset(xA(ctx->opcode)),                     \
1621               vsr_full_offset(xB(ctx->opcode)), 16, 16);            \
1622    }
1623
1624VSX_LOGICAL(xxland, MO_64, tcg_gen_gvec_and)
1625VSX_LOGICAL(xxlandc, MO_64, tcg_gen_gvec_andc)
1626VSX_LOGICAL(xxlor, MO_64, tcg_gen_gvec_or)
1627VSX_LOGICAL(xxlxor, MO_64, tcg_gen_gvec_xor)
1628VSX_LOGICAL(xxlnor, MO_64, tcg_gen_gvec_nor)
1629VSX_LOGICAL(xxleqv, MO_64, tcg_gen_gvec_eqv)
1630VSX_LOGICAL(xxlnand, MO_64, tcg_gen_gvec_nand)
1631VSX_LOGICAL(xxlorc, MO_64, tcg_gen_gvec_orc)
1632
1633#define VSX_XXMRG(name, high)                               \
1634static void glue(gen_, name)(DisasContext *ctx)             \
1635    {                                                       \
1636        TCGv_i64 a0, a1, b0, b1, tmp;                       \
1637        if (unlikely(!ctx->vsx_enabled)) {                  \
1638            gen_exception(ctx, POWERPC_EXCP_VSXU);          \
1639            return;                                         \
1640        }                                                   \
1641        a0 = tcg_temp_new_i64();                            \
1642        a1 = tcg_temp_new_i64();                            \
1643        b0 = tcg_temp_new_i64();                            \
1644        b1 = tcg_temp_new_i64();                            \
1645        tmp = tcg_temp_new_i64();                           \
1646        get_cpu_vsr(a0, xA(ctx->opcode), high);             \
1647        get_cpu_vsr(a1, xA(ctx->opcode), high);             \
1648        get_cpu_vsr(b0, xB(ctx->opcode), high);             \
1649        get_cpu_vsr(b1, xB(ctx->opcode), high);             \
1650        tcg_gen_shri_i64(a0, a0, 32);                       \
1651        tcg_gen_shri_i64(b0, b0, 32);                       \
1652        tcg_gen_deposit_i64(tmp, b0, a0, 32, 32);           \
1653        set_cpu_vsr(xT(ctx->opcode), tmp, true);            \
1654        tcg_gen_deposit_i64(tmp, b1, a1, 32, 32);           \
1655        set_cpu_vsr(xT(ctx->opcode), tmp, false);           \
1656    }
1657
1658VSX_XXMRG(xxmrghw, 1)
1659VSX_XXMRG(xxmrglw, 0)
1660
1661static bool trans_XXSEL(DisasContext *ctx, arg_XX4 *a)
1662{
1663    REQUIRE_INSNS_FLAGS2(ctx, VSX);
1664    REQUIRE_VSX(ctx);
1665
1666    tcg_gen_gvec_bitsel(MO_64, vsr_full_offset(a->xt), vsr_full_offset(a->xc),
1667                        vsr_full_offset(a->xb), vsr_full_offset(a->xa), 16, 16);
1668
1669    return true;
1670}
1671
1672static bool trans_XXSPLTW(DisasContext *ctx, arg_XX2_uim *a)
1673{
1674    int tofs, bofs;
1675
1676    REQUIRE_VSX(ctx);
1677
1678    tofs = vsr_full_offset(a->xt);
1679    bofs = vsr_full_offset(a->xb);
1680    bofs += a->uim << MO_32;
1681#if !HOST_BIG_ENDIAN
1682    bofs ^= 8 | 4;
1683#endif
1684
1685    tcg_gen_gvec_dup_mem(MO_32, tofs, bofs, 16, 16);
1686    return true;
1687}
1688
1689#define pattern(x) (((x) & 0xff) * (~(uint64_t)0 / 0xff))
1690
1691static bool trans_XXSPLTIB(DisasContext *ctx, arg_X_imm8 *a)
1692{
1693    if (a->xt < 32) {
1694        REQUIRE_VSX(ctx);
1695    } else {
1696        REQUIRE_VECTOR(ctx);
1697    }
1698    tcg_gen_gvec_dup_imm(MO_8, vsr_full_offset(a->xt), 16, 16, a->imm);
1699    return true;
1700}
1701
1702static bool trans_XXSPLTIW(DisasContext *ctx, arg_8RR_D *a)
1703{
1704    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1705    REQUIRE_VSX(ctx);
1706
1707    tcg_gen_gvec_dup_imm(MO_32, vsr_full_offset(a->xt), 16, 16, a->si);
1708
1709    return true;
1710}
1711
1712static bool trans_XXSPLTIDP(DisasContext *ctx, arg_8RR_D *a)
1713{
1714    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1715    REQUIRE_VSX(ctx);
1716
1717    tcg_gen_gvec_dup_imm(MO_64, vsr_full_offset(a->xt), 16, 16,
1718                         helper_todouble(a->si));
1719    return true;
1720}
1721
1722static bool trans_XXSPLTI32DX(DisasContext *ctx, arg_8RR_D_IX *a)
1723{
1724    TCGv_i32 imm;
1725
1726    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1727    REQUIRE_VSX(ctx);
1728
1729    imm = tcg_constant_i32(a->si);
1730
1731    tcg_gen_st_i32(imm, tcg_env,
1732        offsetof(CPUPPCState, vsr[a->xt].VsrW(0 + a->ix)));
1733    tcg_gen_st_i32(imm, tcg_env,
1734        offsetof(CPUPPCState, vsr[a->xt].VsrW(2 + a->ix)));
1735
1736    return true;
1737}
1738
1739static bool trans_LXVKQ(DisasContext *ctx, arg_X_uim5 *a)
1740{
1741    static const uint64_t values[32] = {
1742        0, /* Unspecified */
1743        0x3FFF000000000000llu, /* QP +1.0 */
1744        0x4000000000000000llu, /* QP +2.0 */
1745        0x4000800000000000llu, /* QP +3.0 */
1746        0x4001000000000000llu, /* QP +4.0 */
1747        0x4001400000000000llu, /* QP +5.0 */
1748        0x4001800000000000llu, /* QP +6.0 */
1749        0x4001C00000000000llu, /* QP +7.0 */
1750        0x7FFF000000000000llu, /* QP +Inf */
1751        0x7FFF800000000000llu, /* QP dQNaN */
1752        0, /* Unspecified */
1753        0, /* Unspecified */
1754        0, /* Unspecified */
1755        0, /* Unspecified */
1756        0, /* Unspecified */
1757        0, /* Unspecified */
1758        0x8000000000000000llu, /* QP -0.0 */
1759        0xBFFF000000000000llu, /* QP -1.0 */
1760        0xC000000000000000llu, /* QP -2.0 */
1761        0xC000800000000000llu, /* QP -3.0 */
1762        0xC001000000000000llu, /* QP -4.0 */
1763        0xC001400000000000llu, /* QP -5.0 */
1764        0xC001800000000000llu, /* QP -6.0 */
1765        0xC001C00000000000llu, /* QP -7.0 */
1766        0xFFFF000000000000llu, /* QP -Inf */
1767    };
1768
1769    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1770    REQUIRE_VSX(ctx);
1771
1772    if (values[a->uim]) {
1773        set_cpu_vsr(a->xt, tcg_constant_i64(0x0), false);
1774        set_cpu_vsr(a->xt, tcg_constant_i64(values[a->uim]), true);
1775    } else {
1776        gen_invalid(ctx);
1777    }
1778
1779    return true;
1780}
1781
1782static bool trans_XVTLSBB(DisasContext *ctx, arg_XX2_bf_xb *a)
1783{
1784    TCGv_i64 xb, t0, t1, all_true, all_false, mask, zero;
1785
1786    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1787    REQUIRE_VSX(ctx);
1788
1789    xb = tcg_temp_new_i64();
1790    t0 = tcg_temp_new_i64();
1791    t1 = tcg_temp_new_i64();
1792    all_true = tcg_temp_new_i64();
1793    all_false = tcg_temp_new_i64();
1794    mask = tcg_constant_i64(dup_const(MO_8, 1));
1795    zero = tcg_constant_i64(0);
1796
1797    get_cpu_vsr(xb, a->xb, true);
1798    tcg_gen_and_i64(t0, mask, xb);
1799    get_cpu_vsr(xb, a->xb, false);
1800    tcg_gen_and_i64(t1, mask, xb);
1801
1802    tcg_gen_or_i64(all_false, t0, t1);
1803    tcg_gen_and_i64(all_true, t0, t1);
1804
1805    tcg_gen_setcond_i64(TCG_COND_EQ, all_false, all_false, zero);
1806    tcg_gen_shli_i64(all_false, all_false, 1);
1807    tcg_gen_setcond_i64(TCG_COND_EQ, all_true, all_true, mask);
1808    tcg_gen_shli_i64(all_true, all_true, 3);
1809
1810    tcg_gen_or_i64(t0, all_false, all_true);
1811    tcg_gen_extrl_i64_i32(cpu_crf[a->bf], t0);
1812    return true;
1813}
1814
1815static void gen_xxsldwi(DisasContext *ctx)
1816{
1817    TCGv_i64 xth, xtl;
1818    if (unlikely(!ctx->vsx_enabled)) {
1819        gen_exception(ctx, POWERPC_EXCP_VSXU);
1820        return;
1821    }
1822    xth = tcg_temp_new_i64();
1823    xtl = tcg_temp_new_i64();
1824
1825    switch (SHW(ctx->opcode)) {
1826        case 0: {
1827            get_cpu_vsr(xth, xA(ctx->opcode), true);
1828            get_cpu_vsr(xtl, xA(ctx->opcode), false);
1829            break;
1830        }
1831        case 1: {
1832            TCGv_i64 t0 = tcg_temp_new_i64();
1833            get_cpu_vsr(xth, xA(ctx->opcode), true);
1834            tcg_gen_shli_i64(xth, xth, 32);
1835            get_cpu_vsr(t0, xA(ctx->opcode), false);
1836            tcg_gen_shri_i64(t0, t0, 32);
1837            tcg_gen_or_i64(xth, xth, t0);
1838            get_cpu_vsr(xtl, xA(ctx->opcode), false);
1839            tcg_gen_shli_i64(xtl, xtl, 32);
1840            get_cpu_vsr(t0, xB(ctx->opcode), true);
1841            tcg_gen_shri_i64(t0, t0, 32);
1842            tcg_gen_or_i64(xtl, xtl, t0);
1843            break;
1844        }
1845        case 2: {
1846            get_cpu_vsr(xth, xA(ctx->opcode), false);
1847            get_cpu_vsr(xtl, xB(ctx->opcode), true);
1848            break;
1849        }
1850        case 3: {
1851            TCGv_i64 t0 = tcg_temp_new_i64();
1852            get_cpu_vsr(xth, xA(ctx->opcode), false);
1853            tcg_gen_shli_i64(xth, xth, 32);
1854            get_cpu_vsr(t0, xB(ctx->opcode), true);
1855            tcg_gen_shri_i64(t0, t0, 32);
1856            tcg_gen_or_i64(xth, xth, t0);
1857            get_cpu_vsr(xtl, xB(ctx->opcode), true);
1858            tcg_gen_shli_i64(xtl, xtl, 32);
1859            get_cpu_vsr(t0, xB(ctx->opcode), false);
1860            tcg_gen_shri_i64(t0, t0, 32);
1861            tcg_gen_or_i64(xtl, xtl, t0);
1862            break;
1863        }
1864    }
1865
1866    set_cpu_vsr(xT(ctx->opcode), xth, true);
1867    set_cpu_vsr(xT(ctx->opcode), xtl, false);
1868}
1869
1870static bool do_vsx_extract_insert(DisasContext *ctx, arg_XX2_uim *a,
1871    void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i32))
1872{
1873    TCGv_i64 zero = tcg_constant_i64(0);
1874    TCGv_ptr xt, xb;
1875
1876    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
1877    REQUIRE_VSX(ctx);
1878
1879    /*
1880     * uim > 15 out of bound and for
1881     * uim > 12 handle as per hardware in helper
1882     */
1883    if (a->uim > 15) {
1884        set_cpu_vsr(a->xt, zero, true);
1885        set_cpu_vsr(a->xt, zero, false);
1886    } else {
1887        xt = gen_vsr_ptr(a->xt);
1888        xb = gen_vsr_ptr(a->xb);
1889        gen_helper(xt, xb, tcg_constant_i32(a->uim));
1890    }
1891    return true;
1892}
1893
1894TRANS(XXEXTRACTUW, do_vsx_extract_insert, gen_helper_XXEXTRACTUW)
1895TRANS(XXINSERTW, do_vsx_extract_insert, gen_helper_XXINSERTW)
1896
1897#ifdef TARGET_PPC64
1898static void gen_xsxexpdp(DisasContext *ctx)
1899{
1900    TCGv rt = cpu_gpr[rD(ctx->opcode)];
1901    TCGv_i64 t0;
1902    if (unlikely(!ctx->vsx_enabled)) {
1903        gen_exception(ctx, POWERPC_EXCP_VSXU);
1904        return;
1905    }
1906    t0 = tcg_temp_new_i64();
1907    get_cpu_vsr(t0, xB(ctx->opcode), true);
1908    tcg_gen_extract_i64(rt, t0, 52, 11);
1909}
1910
1911static void gen_xsxexpqp(DisasContext *ctx)
1912{
1913    TCGv_i64 xth;
1914    TCGv_i64 xtl;
1915    TCGv_i64 xbh;
1916
1917    if (unlikely(!ctx->vsx_enabled)) {
1918        gen_exception(ctx, POWERPC_EXCP_VSXU);
1919        return;
1920    }
1921    xth = tcg_temp_new_i64();
1922    xtl = tcg_temp_new_i64();
1923    xbh = tcg_temp_new_i64();
1924    get_cpu_vsr(xbh, rB(ctx->opcode) + 32, true);
1925
1926    tcg_gen_extract_i64(xth, xbh, 48, 15);
1927    set_cpu_vsr(rD(ctx->opcode) + 32, xth, true);
1928    tcg_gen_movi_i64(xtl, 0);
1929    set_cpu_vsr(rD(ctx->opcode) + 32, xtl, false);
1930}
1931
1932static void gen_xsiexpdp(DisasContext *ctx)
1933{
1934    TCGv_i64 xth;
1935    TCGv ra = cpu_gpr[rA(ctx->opcode)];
1936    TCGv rb = cpu_gpr[rB(ctx->opcode)];
1937    TCGv_i64 t0;
1938
1939    if (unlikely(!ctx->vsx_enabled)) {
1940        gen_exception(ctx, POWERPC_EXCP_VSXU);
1941        return;
1942    }
1943    t0 = tcg_temp_new_i64();
1944    xth = tcg_temp_new_i64();
1945    tcg_gen_andi_i64(xth, ra, 0x800FFFFFFFFFFFFF);
1946    tcg_gen_andi_i64(t0, rb, 0x7FF);
1947    tcg_gen_shli_i64(t0, t0, 52);
1948    tcg_gen_or_i64(xth, xth, t0);
1949    set_cpu_vsr(xT(ctx->opcode), xth, true);
1950    set_cpu_vsr(xT(ctx->opcode), tcg_constant_i64(0), false);
1951}
1952
1953static void gen_xsiexpqp(DisasContext *ctx)
1954{
1955    TCGv_i64 xth;
1956    TCGv_i64 xtl;
1957    TCGv_i64 xah;
1958    TCGv_i64 xal;
1959    TCGv_i64 xbh;
1960    TCGv_i64 t0;
1961
1962    if (unlikely(!ctx->vsx_enabled)) {
1963        gen_exception(ctx, POWERPC_EXCP_VSXU);
1964        return;
1965    }
1966    xth = tcg_temp_new_i64();
1967    xtl = tcg_temp_new_i64();
1968    xah = tcg_temp_new_i64();
1969    xal = tcg_temp_new_i64();
1970    get_cpu_vsr(xah, rA(ctx->opcode) + 32, true);
1971    get_cpu_vsr(xal, rA(ctx->opcode) + 32, false);
1972    xbh = tcg_temp_new_i64();
1973    get_cpu_vsr(xbh, rB(ctx->opcode) + 32, true);
1974    t0 = tcg_temp_new_i64();
1975
1976    tcg_gen_andi_i64(xth, xah, 0x8000FFFFFFFFFFFF);
1977    tcg_gen_andi_i64(t0, xbh, 0x7FFF);
1978    tcg_gen_shli_i64(t0, t0, 48);
1979    tcg_gen_or_i64(xth, xth, t0);
1980    set_cpu_vsr(rD(ctx->opcode) + 32, xth, true);
1981    tcg_gen_mov_i64(xtl, xal);
1982    set_cpu_vsr(rD(ctx->opcode) + 32, xtl, false);
1983}
1984
1985static void gen_xsxsigdp(DisasContext *ctx)
1986{
1987    TCGv rt = cpu_gpr[rD(ctx->opcode)];
1988    TCGv_i64 t0, t1, zr, nan, exp;
1989
1990    if (unlikely(!ctx->vsx_enabled)) {
1991        gen_exception(ctx, POWERPC_EXCP_VSXU);
1992        return;
1993    }
1994    exp = tcg_temp_new_i64();
1995    t0 = tcg_temp_new_i64();
1996    t1 = tcg_temp_new_i64();
1997    zr = tcg_constant_i64(0);
1998    nan = tcg_constant_i64(2047);
1999
2000    get_cpu_vsr(t1, xB(ctx->opcode), true);
2001    tcg_gen_extract_i64(exp, t1, 52, 11);
2002    tcg_gen_movi_i64(t0, 0x0010000000000000);
2003    tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0);
2004    tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0);
2005    get_cpu_vsr(t1, xB(ctx->opcode), true);
2006    tcg_gen_deposit_i64(rt, t0, t1, 0, 52);
2007}
2008
2009static void gen_xsxsigqp(DisasContext *ctx)
2010{
2011    TCGv_i64 t0, zr, nan, exp;
2012    TCGv_i64 xth;
2013    TCGv_i64 xtl;
2014    TCGv_i64 xbh;
2015    TCGv_i64 xbl;
2016
2017    if (unlikely(!ctx->vsx_enabled)) {
2018        gen_exception(ctx, POWERPC_EXCP_VSXU);
2019        return;
2020    }
2021    xth = tcg_temp_new_i64();
2022    xtl = tcg_temp_new_i64();
2023    xbh = tcg_temp_new_i64();
2024    xbl = tcg_temp_new_i64();
2025    get_cpu_vsr(xbh, rB(ctx->opcode) + 32, true);
2026    get_cpu_vsr(xbl, rB(ctx->opcode) + 32, false);
2027    exp = tcg_temp_new_i64();
2028    t0 = tcg_temp_new_i64();
2029    zr = tcg_constant_i64(0);
2030    nan = tcg_constant_i64(32767);
2031
2032    tcg_gen_extract_i64(exp, xbh, 48, 15);
2033    tcg_gen_movi_i64(t0, 0x0001000000000000);
2034    tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0);
2035    tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0);
2036    tcg_gen_deposit_i64(xth, t0, xbh, 0, 48);
2037    set_cpu_vsr(rD(ctx->opcode) + 32, xth, true);
2038    tcg_gen_mov_i64(xtl, xbl);
2039    set_cpu_vsr(rD(ctx->opcode) + 32, xtl, false);
2040}
2041#endif
2042
2043static void gen_xviexpsp(DisasContext *ctx)
2044{
2045    TCGv_i64 xth;
2046    TCGv_i64 xtl;
2047    TCGv_i64 xah;
2048    TCGv_i64 xal;
2049    TCGv_i64 xbh;
2050    TCGv_i64 xbl;
2051    TCGv_i64 t0;
2052
2053    if (unlikely(!ctx->vsx_enabled)) {
2054        gen_exception(ctx, POWERPC_EXCP_VSXU);
2055        return;
2056    }
2057    xth = tcg_temp_new_i64();
2058    xtl = tcg_temp_new_i64();
2059    xah = tcg_temp_new_i64();
2060    xal = tcg_temp_new_i64();
2061    xbh = tcg_temp_new_i64();
2062    xbl = tcg_temp_new_i64();
2063    get_cpu_vsr(xah, xA(ctx->opcode), true);
2064    get_cpu_vsr(xal, xA(ctx->opcode), false);
2065    get_cpu_vsr(xbh, xB(ctx->opcode), true);
2066    get_cpu_vsr(xbl, xB(ctx->opcode), false);
2067    t0 = tcg_temp_new_i64();
2068
2069    tcg_gen_andi_i64(xth, xah, 0x807FFFFF807FFFFF);
2070    tcg_gen_andi_i64(t0, xbh, 0xFF000000FF);
2071    tcg_gen_shli_i64(t0, t0, 23);
2072    tcg_gen_or_i64(xth, xth, t0);
2073    set_cpu_vsr(xT(ctx->opcode), xth, true);
2074    tcg_gen_andi_i64(xtl, xal, 0x807FFFFF807FFFFF);
2075    tcg_gen_andi_i64(t0, xbl, 0xFF000000FF);
2076    tcg_gen_shli_i64(t0, t0, 23);
2077    tcg_gen_or_i64(xtl, xtl, t0);
2078    set_cpu_vsr(xT(ctx->opcode), xtl, false);
2079}
2080
2081static void gen_xviexpdp(DisasContext *ctx)
2082{
2083    TCGv_i64 xth;
2084    TCGv_i64 xtl;
2085    TCGv_i64 xah;
2086    TCGv_i64 xal;
2087    TCGv_i64 xbh;
2088    TCGv_i64 xbl;
2089
2090    if (unlikely(!ctx->vsx_enabled)) {
2091        gen_exception(ctx, POWERPC_EXCP_VSXU);
2092        return;
2093    }
2094    xth = tcg_temp_new_i64();
2095    xtl = tcg_temp_new_i64();
2096    xah = tcg_temp_new_i64();
2097    xal = tcg_temp_new_i64();
2098    xbh = tcg_temp_new_i64();
2099    xbl = tcg_temp_new_i64();
2100    get_cpu_vsr(xah, xA(ctx->opcode), true);
2101    get_cpu_vsr(xal, xA(ctx->opcode), false);
2102    get_cpu_vsr(xbh, xB(ctx->opcode), true);
2103    get_cpu_vsr(xbl, xB(ctx->opcode), false);
2104
2105    tcg_gen_deposit_i64(xth, xah, xbh, 52, 11);
2106    set_cpu_vsr(xT(ctx->opcode), xth, true);
2107
2108    tcg_gen_deposit_i64(xtl, xal, xbl, 52, 11);
2109    set_cpu_vsr(xT(ctx->opcode), xtl, false);
2110}
2111
2112static void gen_xvxexpsp(DisasContext *ctx)
2113{
2114    TCGv_i64 xth;
2115    TCGv_i64 xtl;
2116    TCGv_i64 xbh;
2117    TCGv_i64 xbl;
2118
2119    if (unlikely(!ctx->vsx_enabled)) {
2120        gen_exception(ctx, POWERPC_EXCP_VSXU);
2121        return;
2122    }
2123    xth = tcg_temp_new_i64();
2124    xtl = tcg_temp_new_i64();
2125    xbh = tcg_temp_new_i64();
2126    xbl = tcg_temp_new_i64();
2127    get_cpu_vsr(xbh, xB(ctx->opcode), true);
2128    get_cpu_vsr(xbl, xB(ctx->opcode), false);
2129
2130    tcg_gen_shri_i64(xth, xbh, 23);
2131    tcg_gen_andi_i64(xth, xth, 0xFF000000FF);
2132    set_cpu_vsr(xT(ctx->opcode), xth, true);
2133    tcg_gen_shri_i64(xtl, xbl, 23);
2134    tcg_gen_andi_i64(xtl, xtl, 0xFF000000FF);
2135    set_cpu_vsr(xT(ctx->opcode), xtl, false);
2136}
2137
2138static void gen_xvxexpdp(DisasContext *ctx)
2139{
2140    TCGv_i64 xth;
2141    TCGv_i64 xtl;
2142    TCGv_i64 xbh;
2143    TCGv_i64 xbl;
2144
2145    if (unlikely(!ctx->vsx_enabled)) {
2146        gen_exception(ctx, POWERPC_EXCP_VSXU);
2147        return;
2148    }
2149    xth = tcg_temp_new_i64();
2150    xtl = tcg_temp_new_i64();
2151    xbh = tcg_temp_new_i64();
2152    xbl = tcg_temp_new_i64();
2153    get_cpu_vsr(xbh, xB(ctx->opcode), true);
2154    get_cpu_vsr(xbl, xB(ctx->opcode), false);
2155
2156    tcg_gen_extract_i64(xth, xbh, 52, 11);
2157    set_cpu_vsr(xT(ctx->opcode), xth, true);
2158    tcg_gen_extract_i64(xtl, xbl, 52, 11);
2159    set_cpu_vsr(xT(ctx->opcode), xtl, false);
2160}
2161
2162static bool trans_XVXSIGSP(DisasContext *ctx, arg_XX2 *a)
2163{
2164    TCGv_ptr t, b;
2165
2166    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
2167    REQUIRE_VSX(ctx);
2168
2169    t = gen_vsr_ptr(a->xt);
2170    b = gen_vsr_ptr(a->xb);
2171
2172    gen_helper_XVXSIGSP(t, b);
2173    return true;
2174}
2175
2176static void gen_xvxsigdp(DisasContext *ctx)
2177{
2178    TCGv_i64 xth;
2179    TCGv_i64 xtl;
2180    TCGv_i64 xbh;
2181    TCGv_i64 xbl;
2182    TCGv_i64 t0, zr, nan, exp;
2183
2184    if (unlikely(!ctx->vsx_enabled)) {
2185        gen_exception(ctx, POWERPC_EXCP_VSXU);
2186        return;
2187    }
2188    xth = tcg_temp_new_i64();
2189    xtl = tcg_temp_new_i64();
2190    xbh = tcg_temp_new_i64();
2191    xbl = tcg_temp_new_i64();
2192    get_cpu_vsr(xbh, xB(ctx->opcode), true);
2193    get_cpu_vsr(xbl, xB(ctx->opcode), false);
2194    exp = tcg_temp_new_i64();
2195    t0 = tcg_temp_new_i64();
2196    zr = tcg_constant_i64(0);
2197    nan = tcg_constant_i64(2047);
2198
2199    tcg_gen_extract_i64(exp, xbh, 52, 11);
2200    tcg_gen_movi_i64(t0, 0x0010000000000000);
2201    tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0);
2202    tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0);
2203    tcg_gen_deposit_i64(xth, t0, xbh, 0, 52);
2204    set_cpu_vsr(xT(ctx->opcode), xth, true);
2205
2206    tcg_gen_extract_i64(exp, xbl, 52, 11);
2207    tcg_gen_movi_i64(t0, 0x0010000000000000);
2208    tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0);
2209    tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0);
2210    tcg_gen_deposit_i64(xtl, t0, xbl, 0, 52);
2211    set_cpu_vsr(xT(ctx->opcode), xtl, false);
2212}
2213
2214static bool do_lstxv(DisasContext *ctx, int ra, TCGv displ,
2215                     int rt, bool store, bool paired)
2216{
2217    TCGv ea;
2218    TCGv_i64 xt;
2219    MemOp mop;
2220    int rt1, rt2;
2221
2222    xt = tcg_temp_new_i64();
2223
2224    mop = DEF_MEMOP(MO_UQ);
2225
2226    gen_set_access_type(ctx, ACCESS_INT);
2227    ea = do_ea_calc(ctx, ra, displ);
2228
2229    if (paired && ctx->le_mode) {
2230        rt1 = rt + 1;
2231        rt2 = rt;
2232    } else {
2233        rt1 = rt;
2234        rt2 = rt + 1;
2235    }
2236
2237    if (store) {
2238        get_cpu_vsr(xt, rt1, !ctx->le_mode);
2239        tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop);
2240        gen_addr_add(ctx, ea, ea, 8);
2241        get_cpu_vsr(xt, rt1, ctx->le_mode);
2242        tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop);
2243        if (paired) {
2244            gen_addr_add(ctx, ea, ea, 8);
2245            get_cpu_vsr(xt, rt2, !ctx->le_mode);
2246            tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop);
2247            gen_addr_add(ctx, ea, ea, 8);
2248            get_cpu_vsr(xt, rt2, ctx->le_mode);
2249            tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop);
2250        }
2251    } else {
2252        tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop);
2253        set_cpu_vsr(rt1, xt, !ctx->le_mode);
2254        gen_addr_add(ctx, ea, ea, 8);
2255        tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop);
2256        set_cpu_vsr(rt1, xt, ctx->le_mode);
2257        if (paired) {
2258            gen_addr_add(ctx, ea, ea, 8);
2259            tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop);
2260            set_cpu_vsr(rt2, xt, !ctx->le_mode);
2261            gen_addr_add(ctx, ea, ea, 8);
2262            tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop);
2263            set_cpu_vsr(rt2, xt, ctx->le_mode);
2264        }
2265    }
2266    return true;
2267}
2268
2269static bool do_lstxv_D(DisasContext *ctx, arg_D *a, bool store, bool paired)
2270{
2271    if (paired || a->rt < 32) {
2272        REQUIRE_VSX(ctx);
2273    } else {
2274        REQUIRE_VECTOR(ctx);
2275    }
2276
2277    return do_lstxv(ctx, a->ra, tcg_constant_tl(a->si), a->rt, store, paired);
2278}
2279
2280static bool do_lstxv_PLS_D(DisasContext *ctx, arg_PLS_D *a,
2281                           bool store, bool paired)
2282{
2283    arg_D d;
2284    REQUIRE_VSX(ctx);
2285
2286    if (!resolve_PLS_D(ctx, &d, a)) {
2287        return true;
2288    }
2289
2290    return do_lstxv(ctx, d.ra, tcg_constant_tl(d.si), d.rt, store, paired);
2291}
2292
2293static bool do_lstxv_X(DisasContext *ctx, arg_X *a, bool store, bool paired)
2294{
2295    if (paired || a->rt >= 32) {
2296        REQUIRE_VSX(ctx);
2297    } else {
2298        REQUIRE_VECTOR(ctx);
2299    }
2300
2301    return do_lstxv(ctx, a->ra, cpu_gpr[a->rb], a->rt, store, paired);
2302}
2303
2304static bool do_lstxsd(DisasContext *ctx, int rt, int ra, TCGv displ, bool store)
2305{
2306    TCGv ea;
2307    TCGv_i64 xt;
2308    MemOp mop;
2309
2310    if (store) {
2311        REQUIRE_VECTOR(ctx);
2312    } else {
2313        REQUIRE_VSX(ctx);
2314    }
2315
2316    xt = tcg_temp_new_i64();
2317    mop = DEF_MEMOP(MO_UQ);
2318
2319    gen_set_access_type(ctx, ACCESS_INT);
2320    ea = do_ea_calc(ctx, ra, displ);
2321
2322    if (store) {
2323        get_cpu_vsr(xt, rt + 32, true);
2324        tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop);
2325    } else {
2326        tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop);
2327        set_cpu_vsr(rt + 32, xt, true);
2328        set_cpu_vsr(rt + 32, tcg_constant_i64(0), false);
2329    }
2330    return true;
2331}
2332
2333static bool do_lstxsd_DS(DisasContext *ctx, arg_D *a, bool store)
2334{
2335    return do_lstxsd(ctx, a->rt, a->ra, tcg_constant_tl(a->si), store);
2336}
2337
2338static bool do_plstxsd_PLS_D(DisasContext *ctx, arg_PLS_D *a, bool store)
2339{
2340    arg_D d;
2341
2342    if (!resolve_PLS_D(ctx, &d, a)) {
2343        return true;
2344    }
2345
2346    return do_lstxsd(ctx, d.rt, d.ra, tcg_constant_tl(d.si), store);
2347}
2348
2349static bool do_lstxssp(DisasContext *ctx, int rt, int ra, TCGv displ, bool store)
2350{
2351    TCGv ea;
2352    TCGv_i64 xt;
2353
2354    REQUIRE_VECTOR(ctx);
2355
2356    xt = tcg_temp_new_i64();
2357
2358    gen_set_access_type(ctx, ACCESS_INT);
2359    ea = do_ea_calc(ctx, ra, displ);
2360
2361    if (store) {
2362        get_cpu_vsr(xt, rt + 32, true);
2363        gen_qemu_st32fs(ctx, xt, ea);
2364    } else {
2365        gen_qemu_ld32fs(ctx, xt, ea);
2366        set_cpu_vsr(rt + 32, xt, true);
2367        set_cpu_vsr(rt + 32, tcg_constant_i64(0), false);
2368    }
2369    return true;
2370}
2371
2372static bool do_lstxssp_DS(DisasContext *ctx, arg_D *a, bool store)
2373{
2374    return do_lstxssp(ctx, a->rt, a->ra, tcg_constant_tl(a->si), store);
2375}
2376
2377static bool do_plstxssp_PLS_D(DisasContext *ctx, arg_PLS_D *a, bool store)
2378{
2379    arg_D d;
2380
2381    if (!resolve_PLS_D(ctx, &d, a)) {
2382        return true;
2383    }
2384
2385    return do_lstxssp(ctx, d.rt, d.ra, tcg_constant_tl(d.si), store);
2386}
2387
2388TRANS_FLAGS2(ISA300, LXSD, do_lstxsd_DS, false)
2389TRANS_FLAGS2(ISA300, STXSD, do_lstxsd_DS, true)
2390TRANS_FLAGS2(ISA300, LXSSP, do_lstxssp_DS, false)
2391TRANS_FLAGS2(ISA300, STXSSP, do_lstxssp_DS, true)
2392TRANS_FLAGS2(ISA300, STXV, do_lstxv_D, true, false)
2393TRANS_FLAGS2(ISA300, LXV, do_lstxv_D, false, false)
2394TRANS_FLAGS2(ISA310, STXVP, do_lstxv_D, true, true)
2395TRANS_FLAGS2(ISA310, LXVP, do_lstxv_D, false, true)
2396TRANS_FLAGS2(ISA300, STXVX, do_lstxv_X, true, false)
2397TRANS_FLAGS2(ISA300, LXVX, do_lstxv_X, false, false)
2398TRANS_FLAGS2(ISA310, STXVPX, do_lstxv_X, true, true)
2399TRANS_FLAGS2(ISA310, LXVPX, do_lstxv_X, false, true)
2400TRANS64_FLAGS2(ISA310, PLXSD, do_plstxsd_PLS_D, false)
2401TRANS64_FLAGS2(ISA310, PSTXSD, do_plstxsd_PLS_D, true)
2402TRANS64_FLAGS2(ISA310, PLXSSP, do_plstxssp_PLS_D, false)
2403TRANS64_FLAGS2(ISA310, PSTXSSP, do_plstxssp_PLS_D, true)
2404TRANS64_FLAGS2(ISA310, PSTXV, do_lstxv_PLS_D, true, false)
2405TRANS64_FLAGS2(ISA310, PLXV, do_lstxv_PLS_D, false, false)
2406TRANS64_FLAGS2(ISA310, PSTXVP, do_lstxv_PLS_D, true, true)
2407TRANS64_FLAGS2(ISA310, PLXVP, do_lstxv_PLS_D, false, true)
2408
2409static bool do_lstrm(DisasContext *ctx, arg_X *a, MemOp mop, bool store)
2410{
2411    TCGv ea;
2412    TCGv_i64 xt;
2413
2414    REQUIRE_VSX(ctx);
2415
2416    xt = tcg_temp_new_i64();
2417
2418    gen_set_access_type(ctx, ACCESS_INT);
2419    ea = do_ea_calc(ctx, a->ra , cpu_gpr[a->rb]);
2420
2421    if (store) {
2422        get_cpu_vsr(xt, a->rt, false);
2423        tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop);
2424    } else {
2425        tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop);
2426        set_cpu_vsr(a->rt, xt, false);
2427        set_cpu_vsr(a->rt, tcg_constant_i64(0), true);
2428    }
2429    return true;
2430}
2431
2432TRANS_FLAGS2(ISA310, LXVRBX, do_lstrm, DEF_MEMOP(MO_UB), false)
2433TRANS_FLAGS2(ISA310, LXVRHX, do_lstrm, DEF_MEMOP(MO_UW), false)
2434TRANS_FLAGS2(ISA310, LXVRWX, do_lstrm, DEF_MEMOP(MO_UL), false)
2435TRANS_FLAGS2(ISA310, LXVRDX, do_lstrm, DEF_MEMOP(MO_UQ), false)
2436TRANS_FLAGS2(ISA310, STXVRBX, do_lstrm, DEF_MEMOP(MO_UB), true)
2437TRANS_FLAGS2(ISA310, STXVRHX, do_lstrm, DEF_MEMOP(MO_UW), true)
2438TRANS_FLAGS2(ISA310, STXVRWX, do_lstrm, DEF_MEMOP(MO_UL), true)
2439TRANS_FLAGS2(ISA310, STXVRDX, do_lstrm, DEF_MEMOP(MO_UQ), true)
2440
2441static void gen_xxeval_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b, TCGv_i64 c,
2442                           int64_t imm)
2443{
2444    /*
2445     * Instead of processing imm bit-by-bit, we'll skip the computation of
2446     * conjunctions whose corresponding bit is unset.
2447     */
2448    int bit;
2449    TCGv_i64 conj, disj;
2450
2451    conj = tcg_temp_new_i64();
2452    disj = tcg_temp_new_i64();
2453    tcg_gen_movi_i64(disj, 0);
2454
2455    /* Iterate over set bits from the least to the most significant bit */
2456    while (imm) {
2457        /*
2458         * Get the next bit to be processed with ctz64. Invert the result of
2459         * ctz64 to match the indexing used by PowerISA.
2460         */
2461        bit = 7 - ctz64(imm);
2462        if (bit & 0x4) {
2463            tcg_gen_mov_i64(conj, a);
2464        } else {
2465            tcg_gen_not_i64(conj, a);
2466        }
2467        if (bit & 0x2) {
2468            tcg_gen_and_i64(conj, conj, b);
2469        } else {
2470            tcg_gen_andc_i64(conj, conj, b);
2471        }
2472        if (bit & 0x1) {
2473            tcg_gen_and_i64(conj, conj, c);
2474        } else {
2475            tcg_gen_andc_i64(conj, conj, c);
2476        }
2477        tcg_gen_or_i64(disj, disj, conj);
2478
2479        /* Unset the least significant bit that is set */
2480        imm &= imm - 1;
2481    }
2482
2483    tcg_gen_mov_i64(t, disj);
2484}
2485
2486static void gen_xxeval_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b,
2487                           TCGv_vec c, int64_t imm)
2488{
2489    /*
2490     * Instead of processing imm bit-by-bit, we'll skip the computation of
2491     * conjunctions whose corresponding bit is unset.
2492     */
2493    int bit;
2494    TCGv_vec disj, conj;
2495
2496    conj = tcg_temp_new_vec_matching(t);
2497    disj = tcg_temp_new_vec_matching(t);
2498    tcg_gen_dupi_vec(vece, disj, 0);
2499
2500    /* Iterate over set bits from the least to the most significant bit */
2501    while (imm) {
2502        /*
2503         * Get the next bit to be processed with ctz64. Invert the result of
2504         * ctz64 to match the indexing used by PowerISA.
2505         */
2506        bit = 7 - ctz64(imm);
2507        if (bit & 0x4) {
2508            tcg_gen_mov_vec(conj, a);
2509        } else {
2510            tcg_gen_not_vec(vece, conj, a);
2511        }
2512        if (bit & 0x2) {
2513            tcg_gen_and_vec(vece, conj, conj, b);
2514        } else {
2515            tcg_gen_andc_vec(vece, conj, conj, b);
2516        }
2517        if (bit & 0x1) {
2518            tcg_gen_and_vec(vece, conj, conj, c);
2519        } else {
2520            tcg_gen_andc_vec(vece, conj, conj, c);
2521        }
2522        tcg_gen_or_vec(vece, disj, disj, conj);
2523
2524        /* Unset the least significant bit that is set */
2525        imm &= imm - 1;
2526    }
2527
2528    tcg_gen_mov_vec(t, disj);
2529}
2530
2531static bool trans_XXEVAL(DisasContext *ctx, arg_8RR_XX4_imm *a)
2532{
2533    static const TCGOpcode vecop_list[] = {
2534        INDEX_op_andc_vec, 0
2535    };
2536    static const GVecGen4i op = {
2537        .fniv = gen_xxeval_vec,
2538        .fno = gen_helper_XXEVAL,
2539        .fni8 = gen_xxeval_i64,
2540        .opt_opc = vecop_list,
2541        .vece = MO_64
2542    };
2543    int xt = vsr_full_offset(a->xt), xa = vsr_full_offset(a->xa),
2544        xb = vsr_full_offset(a->xb), xc = vsr_full_offset(a->xc);
2545
2546    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2547    REQUIRE_VSX(ctx);
2548
2549    /* Equivalent functions that can be implemented with a single gen_gvec */
2550    switch (a->imm) {
2551    case 0b00000000: /* false */
2552        set_cpu_vsr(a->xt, tcg_constant_i64(0), true);
2553        set_cpu_vsr(a->xt, tcg_constant_i64(0), false);
2554        break;
2555    case 0b00000011: /* and(B,A) */
2556        tcg_gen_gvec_and(MO_64, xt, xb, xa, 16, 16);
2557        break;
2558    case 0b00000101: /* and(C,A) */
2559        tcg_gen_gvec_and(MO_64, xt, xc, xa, 16, 16);
2560        break;
2561    case 0b00001111: /* A */
2562        tcg_gen_gvec_mov(MO_64, xt, xa, 16, 16);
2563        break;
2564    case 0b00010001: /* and(C,B) */
2565        tcg_gen_gvec_and(MO_64, xt, xc, xb, 16, 16);
2566        break;
2567    case 0b00011011: /* C?B:A */
2568        tcg_gen_gvec_bitsel(MO_64, xt, xc, xb, xa, 16, 16);
2569        break;
2570    case 0b00011101: /* B?C:A */
2571        tcg_gen_gvec_bitsel(MO_64, xt, xb, xc, xa, 16, 16);
2572        break;
2573    case 0b00100111: /* C?A:B */
2574        tcg_gen_gvec_bitsel(MO_64, xt, xc, xa, xb, 16, 16);
2575        break;
2576    case 0b00110011: /* B */
2577        tcg_gen_gvec_mov(MO_64, xt, xb, 16, 16);
2578        break;
2579    case 0b00110101: /* A?C:B */
2580        tcg_gen_gvec_bitsel(MO_64, xt, xa, xc, xb, 16, 16);
2581        break;
2582    case 0b00111100: /* xor(B,A) */
2583        tcg_gen_gvec_xor(MO_64, xt, xb, xa, 16, 16);
2584        break;
2585    case 0b00111111: /* or(B,A) */
2586        tcg_gen_gvec_or(MO_64, xt, xb, xa, 16, 16);
2587        break;
2588    case 0b01000111: /* B?A:C */
2589        tcg_gen_gvec_bitsel(MO_64, xt, xb, xa, xc, 16, 16);
2590        break;
2591    case 0b01010011: /* A?B:C */
2592        tcg_gen_gvec_bitsel(MO_64, xt, xa, xb, xc, 16, 16);
2593        break;
2594    case 0b01010101: /* C */
2595        tcg_gen_gvec_mov(MO_64, xt, xc, 16, 16);
2596        break;
2597    case 0b01011010: /* xor(C,A) */
2598        tcg_gen_gvec_xor(MO_64, xt, xc, xa, 16, 16);
2599        break;
2600    case 0b01011111: /* or(C,A) */
2601        tcg_gen_gvec_or(MO_64, xt, xc, xa, 16, 16);
2602        break;
2603    case 0b01100110: /* xor(C,B) */
2604        tcg_gen_gvec_xor(MO_64, xt, xc, xb, 16, 16);
2605        break;
2606    case 0b01110111: /* or(C,B) */
2607        tcg_gen_gvec_or(MO_64, xt, xc, xb, 16, 16);
2608        break;
2609    case 0b10001000: /* nor(C,B) */
2610        tcg_gen_gvec_nor(MO_64, xt, xc, xb, 16, 16);
2611        break;
2612    case 0b10011001: /* eqv(C,B) */
2613        tcg_gen_gvec_eqv(MO_64, xt, xc, xb, 16, 16);
2614        break;
2615    case 0b10100000: /* nor(C,A) */
2616        tcg_gen_gvec_nor(MO_64, xt, xc, xa, 16, 16);
2617        break;
2618    case 0b10100101: /* eqv(C,A) */
2619        tcg_gen_gvec_eqv(MO_64, xt, xc, xa, 16, 16);
2620        break;
2621    case 0b10101010: /* not(C) */
2622        tcg_gen_gvec_not(MO_64, xt, xc, 16, 16);
2623        break;
2624    case 0b11000000: /* nor(B,A) */
2625        tcg_gen_gvec_nor(MO_64, xt,  xb, xa, 16, 16);
2626        break;
2627    case 0b11000011: /* eqv(B,A) */
2628        tcg_gen_gvec_eqv(MO_64, xt,  xb, xa, 16, 16);
2629        break;
2630    case 0b11001100: /* not(B) */
2631        tcg_gen_gvec_not(MO_64, xt, xb, 16, 16);
2632        break;
2633    case 0b11101110: /* nand(C,B) */
2634        tcg_gen_gvec_nand(MO_64, xt, xc, xb, 16, 16);
2635        break;
2636    case 0b11110000: /* not(A) */
2637        tcg_gen_gvec_not(MO_64, xt, xa, 16, 16);
2638        break;
2639    case 0b11111010: /* nand(C,A) */
2640        tcg_gen_gvec_nand(MO_64, xt, xc, xa, 16, 16);
2641        break;
2642    case 0b11111100: /* nand(B,A) */
2643        tcg_gen_gvec_nand(MO_64, xt, xb, xa, 16, 16);
2644        break;
2645    case 0b11111111: /* true */
2646        set_cpu_vsr(a->xt, tcg_constant_i64(-1), true);
2647        set_cpu_vsr(a->xt, tcg_constant_i64(-1), false);
2648        break;
2649    default:
2650        /* Fallback to compute all conjunctions/disjunctions */
2651        tcg_gen_gvec_4i(xt, xa, xb, xc, 16, 16, a->imm, &op);
2652    }
2653
2654    return true;
2655}
2656
2657static void gen_xxblendv_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b,
2658                             TCGv_vec c)
2659{
2660    TCGv_vec tmp = tcg_temp_new_vec_matching(c);
2661    tcg_gen_sari_vec(vece, tmp, c, (8 << vece) - 1);
2662    tcg_gen_bitsel_vec(vece, t, tmp, b, a);
2663}
2664
2665static bool do_xxblendv(DisasContext *ctx, arg_8RR_XX4 *a, unsigned vece)
2666{
2667    static const TCGOpcode vecop_list[] = {
2668        INDEX_op_sari_vec, 0
2669    };
2670    static const GVecGen4 ops[4] = {
2671        {
2672            .fniv = gen_xxblendv_vec,
2673            .fno = gen_helper_XXBLENDVB,
2674            .opt_opc = vecop_list,
2675            .vece = MO_8
2676        },
2677        {
2678            .fniv = gen_xxblendv_vec,
2679            .fno = gen_helper_XXBLENDVH,
2680            .opt_opc = vecop_list,
2681            .vece = MO_16
2682        },
2683        {
2684            .fniv = gen_xxblendv_vec,
2685            .fno = gen_helper_XXBLENDVW,
2686            .opt_opc = vecop_list,
2687            .vece = MO_32
2688        },
2689        {
2690            .fniv = gen_xxblendv_vec,
2691            .fno = gen_helper_XXBLENDVD,
2692            .opt_opc = vecop_list,
2693            .vece = MO_64
2694        }
2695    };
2696
2697    REQUIRE_VSX(ctx);
2698
2699    tcg_gen_gvec_4(vsr_full_offset(a->xt), vsr_full_offset(a->xa),
2700                   vsr_full_offset(a->xb), vsr_full_offset(a->xc),
2701                   16, 16, &ops[vece]);
2702
2703    return true;
2704}
2705
2706TRANS(XXBLENDVB, do_xxblendv, MO_8)
2707TRANS(XXBLENDVH, do_xxblendv, MO_16)
2708TRANS(XXBLENDVW, do_xxblendv, MO_32)
2709TRANS(XXBLENDVD, do_xxblendv, MO_64)
2710
2711static bool do_helper_XX3(DisasContext *ctx, arg_XX3 *a,
2712    void (*helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
2713{
2714    TCGv_ptr xt, xa, xb;
2715
2716    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
2717    REQUIRE_VSX(ctx);
2718
2719    xt = gen_vsr_ptr(a->xt);
2720    xa = gen_vsr_ptr(a->xa);
2721    xb = gen_vsr_ptr(a->xb);
2722
2723    helper(tcg_env, xt, xa, xb);
2724    return true;
2725}
2726
2727TRANS(XSCMPEQDP, do_helper_XX3, gen_helper_XSCMPEQDP)
2728TRANS(XSCMPGEDP, do_helper_XX3, gen_helper_XSCMPGEDP)
2729TRANS(XSCMPGTDP, do_helper_XX3, gen_helper_XSCMPGTDP)
2730TRANS(XSMAXCDP, do_helper_XX3, gen_helper_XSMAXCDP)
2731TRANS(XSMINCDP, do_helper_XX3, gen_helper_XSMINCDP)
2732TRANS(XSMAXJDP, do_helper_XX3, gen_helper_XSMAXJDP)
2733TRANS(XSMINJDP, do_helper_XX3, gen_helper_XSMINJDP)
2734
2735static bool do_helper_X(arg_X *a,
2736    void (*helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
2737{
2738    TCGv_ptr rt, ra, rb;
2739
2740    rt = gen_avr_ptr(a->rt);
2741    ra = gen_avr_ptr(a->ra);
2742    rb = gen_avr_ptr(a->rb);
2743
2744    helper(tcg_env, rt, ra, rb);
2745    return true;
2746}
2747
2748static bool do_xscmpqp(DisasContext *ctx, arg_X *a,
2749    void (*helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
2750{
2751    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2752    REQUIRE_VSX(ctx);
2753
2754    return do_helper_X(a, helper);
2755}
2756
2757TRANS(XSCMPEQQP, do_xscmpqp, gen_helper_XSCMPEQQP)
2758TRANS(XSCMPGEQP, do_xscmpqp, gen_helper_XSCMPGEQP)
2759TRANS(XSCMPGTQP, do_xscmpqp, gen_helper_XSCMPGTQP)
2760TRANS(XSMAXCQP, do_xscmpqp, gen_helper_XSMAXCQP)
2761TRANS(XSMINCQP, do_xscmpqp, gen_helper_XSMINCQP)
2762
2763static bool trans_XVCVSPBF16(DisasContext *ctx, arg_XX2 *a)
2764{
2765    TCGv_ptr xt, xb;
2766
2767    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2768    REQUIRE_VSX(ctx);
2769
2770    xt = gen_vsr_ptr(a->xt);
2771    xb = gen_vsr_ptr(a->xb);
2772
2773    gen_helper_XVCVSPBF16(tcg_env, xt, xb);
2774    return true;
2775}
2776
2777static bool trans_XVCVBF16SPN(DisasContext *ctx, arg_XX2 *a)
2778{
2779    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2780    REQUIRE_VSX(ctx);
2781
2782    tcg_gen_gvec_shli(MO_32, vsr_full_offset(a->xt), vsr_full_offset(a->xb),
2783                      16, 16, 16);
2784
2785    return true;
2786}
2787
2788    /*
2789     *  The PowerISA 3.1 mentions that for the current version of the
2790     *  architecture, "the hardware implementation provides the effect of
2791     *  ACC[i] and VSRs 4*i to 4*i + 3 logically containing the same data"
2792     *  and "The Accumulators introduce no new logical state at this time"
2793     *  (page 501). For now it seems unnecessary to create new structures,
2794     *  so ACC[i] is the same as VSRs 4*i to 4*i+3 and therefore
2795     *  move to and from accumulators are no-ops.
2796     */
2797static bool trans_XXMFACC(DisasContext *ctx, arg_X_a *a)
2798{
2799    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2800    REQUIRE_VSX(ctx);
2801    return true;
2802}
2803
2804static bool trans_XXMTACC(DisasContext *ctx, arg_X_a *a)
2805{
2806    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2807    REQUIRE_VSX(ctx);
2808    return true;
2809}
2810
2811static bool trans_XXSETACCZ(DisasContext *ctx, arg_X_a *a)
2812{
2813    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2814    REQUIRE_VSX(ctx);
2815    tcg_gen_gvec_dup_imm(MO_64, acc_full_offset(a->ra), 64, 64, 0);
2816    return true;
2817}
2818
2819static bool do_ger(DisasContext *ctx, arg_MMIRR_XX3 *a,
2820    void (*helper)(TCGv_env, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32))
2821{
2822    uint32_t mask;
2823    TCGv_ptr xt, xa, xb;
2824    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2825    REQUIRE_VSX(ctx);
2826    if (unlikely((a->xa / 4 == a->xt) || (a->xb / 4 == a->xt))) {
2827        gen_invalid(ctx);
2828        return true;
2829    }
2830
2831    xt = gen_acc_ptr(a->xt);
2832    xa = gen_vsr_ptr(a->xa);
2833    xb = gen_vsr_ptr(a->xb);
2834
2835    mask = ger_pack_masks(a->pmsk, a->ymsk, a->xmsk);
2836    helper(tcg_env, xa, xb, xt, tcg_constant_i32(mask));
2837    return true;
2838}
2839
2840TRANS(XVI4GER8, do_ger, gen_helper_XVI4GER8)
2841TRANS(XVI4GER8PP, do_ger,  gen_helper_XVI4GER8PP)
2842TRANS(XVI8GER4, do_ger, gen_helper_XVI8GER4)
2843TRANS(XVI8GER4PP, do_ger,  gen_helper_XVI8GER4PP)
2844TRANS(XVI8GER4SPP, do_ger, gen_helper_XVI8GER4SPP)
2845TRANS(XVI16GER2, do_ger, gen_helper_XVI16GER2)
2846TRANS(XVI16GER2PP, do_ger, gen_helper_XVI16GER2PP)
2847TRANS(XVI16GER2S, do_ger, gen_helper_XVI16GER2S)
2848TRANS(XVI16GER2SPP, do_ger, gen_helper_XVI16GER2SPP)
2849
2850TRANS64(PMXVI4GER8, do_ger, gen_helper_XVI4GER8)
2851TRANS64(PMXVI4GER8PP, do_ger, gen_helper_XVI4GER8PP)
2852TRANS64(PMXVI8GER4, do_ger, gen_helper_XVI8GER4)
2853TRANS64(PMXVI8GER4PP, do_ger, gen_helper_XVI8GER4PP)
2854TRANS64(PMXVI8GER4SPP, do_ger, gen_helper_XVI8GER4SPP)
2855TRANS64(PMXVI16GER2, do_ger, gen_helper_XVI16GER2)
2856TRANS64(PMXVI16GER2PP, do_ger, gen_helper_XVI16GER2PP)
2857TRANS64(PMXVI16GER2S, do_ger, gen_helper_XVI16GER2S)
2858TRANS64(PMXVI16GER2SPP, do_ger, gen_helper_XVI16GER2SPP)
2859
2860TRANS(XVBF16GER2, do_ger, gen_helper_XVBF16GER2)
2861TRANS(XVBF16GER2PP, do_ger, gen_helper_XVBF16GER2PP)
2862TRANS(XVBF16GER2PN, do_ger, gen_helper_XVBF16GER2PN)
2863TRANS(XVBF16GER2NP, do_ger, gen_helper_XVBF16GER2NP)
2864TRANS(XVBF16GER2NN, do_ger, gen_helper_XVBF16GER2NN)
2865
2866TRANS(XVF16GER2, do_ger, gen_helper_XVF16GER2)
2867TRANS(XVF16GER2PP, do_ger, gen_helper_XVF16GER2PP)
2868TRANS(XVF16GER2PN, do_ger, gen_helper_XVF16GER2PN)
2869TRANS(XVF16GER2NP, do_ger, gen_helper_XVF16GER2NP)
2870TRANS(XVF16GER2NN, do_ger, gen_helper_XVF16GER2NN)
2871
2872TRANS(XVF32GER, do_ger, gen_helper_XVF32GER)
2873TRANS(XVF32GERPP, do_ger, gen_helper_XVF32GERPP)
2874TRANS(XVF32GERPN, do_ger, gen_helper_XVF32GERPN)
2875TRANS(XVF32GERNP, do_ger, gen_helper_XVF32GERNP)
2876TRANS(XVF32GERNN, do_ger, gen_helper_XVF32GERNN)
2877
2878TRANS(XVF64GER, do_ger, gen_helper_XVF64GER)
2879TRANS(XVF64GERPP, do_ger, gen_helper_XVF64GERPP)
2880TRANS(XVF64GERPN, do_ger, gen_helper_XVF64GERPN)
2881TRANS(XVF64GERNP, do_ger, gen_helper_XVF64GERNP)
2882TRANS(XVF64GERNN, do_ger, gen_helper_XVF64GERNN)
2883
2884TRANS64(PMXVBF16GER2, do_ger, gen_helper_XVBF16GER2)
2885TRANS64(PMXVBF16GER2PP, do_ger, gen_helper_XVBF16GER2PP)
2886TRANS64(PMXVBF16GER2PN, do_ger, gen_helper_XVBF16GER2PN)
2887TRANS64(PMXVBF16GER2NP, do_ger, gen_helper_XVBF16GER2NP)
2888TRANS64(PMXVBF16GER2NN, do_ger, gen_helper_XVBF16GER2NN)
2889
2890TRANS64(PMXVF16GER2, do_ger, gen_helper_XVF16GER2)
2891TRANS64(PMXVF16GER2PP, do_ger, gen_helper_XVF16GER2PP)
2892TRANS64(PMXVF16GER2PN, do_ger, gen_helper_XVF16GER2PN)
2893TRANS64(PMXVF16GER2NP, do_ger, gen_helper_XVF16GER2NP)
2894TRANS64(PMXVF16GER2NN, do_ger, gen_helper_XVF16GER2NN)
2895
2896TRANS64(PMXVF32GER, do_ger, gen_helper_XVF32GER)
2897TRANS64(PMXVF32GERPP, do_ger, gen_helper_XVF32GERPP)
2898TRANS64(PMXVF32GERPN, do_ger, gen_helper_XVF32GERPN)
2899TRANS64(PMXVF32GERNP, do_ger, gen_helper_XVF32GERNP)
2900TRANS64(PMXVF32GERNN, do_ger, gen_helper_XVF32GERNN)
2901
2902TRANS64(PMXVF64GER, do_ger, gen_helper_XVF64GER)
2903TRANS64(PMXVF64GERPP, do_ger, gen_helper_XVF64GERPP)
2904TRANS64(PMXVF64GERPN, do_ger, gen_helper_XVF64GERPN)
2905TRANS64(PMXVF64GERNP, do_ger, gen_helper_XVF64GERNP)
2906TRANS64(PMXVF64GERNN, do_ger, gen_helper_XVF64GERNN)
2907
2908#undef GEN_XX2FORM
2909#undef GEN_XX3FORM
2910#undef GEN_XX2IFORM
2911#undef GEN_XX3_RC_FORM
2912#undef GEN_XX3FORM_DM
2913#undef VSX_LOGICAL
2914