xref: /qemu/target/hexagon/gen_tcg_hvx.h (revision 78f314cf)
1 /*
2  *  Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved.
3  *
4  *  This program is free software; you can redistribute it and/or modify
5  *  it under the terms of the GNU General Public License as published by
6  *  the Free Software Foundation; either version 2 of the License, or
7  *  (at your option) any later version.
8  *
9  *  This program is distributed in the hope that it will be useful,
10  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
11  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  *  GNU General Public License for more details.
13  *
14  *  You should have received a copy of the GNU General Public License
15  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #ifndef HEXAGON_GEN_TCG_HVX_H
19 #define HEXAGON_GEN_TCG_HVX_H
20 
21 /*
22  * Histogram instructions
23  *
24  * Note that these instructions operate directly on the vector registers
25  * and therefore happen after commit.
26  *
27  * The generate_<tag> function is called twice
28  *     The first time is during the normal TCG generation
29  *         ctx->pre_commit is true
30  *         In the masked cases, we save the mask to the qtmp temporary
31  *         Otherwise, there is nothing to do
32  *     The second call is at the end of gen_commit_packet
33  *         ctx->pre_commit is false
34  *         Generate the call to the helper
35  */
36 
37 static inline void assert_vhist_tmp(DisasContext *ctx)
38 {
39     /* vhist instructions require exactly one .tmp to be defined */
40     g_assert(ctx->tmp_vregs_idx == 1);
41 }
42 
43 #define fGEN_TCG_V6_vhist(SHORTCODE) \
44     if (!ctx->pre_commit) { \
45         assert_vhist_tmp(ctx); \
46         gen_helper_vhist(cpu_env); \
47     }
48 #define fGEN_TCG_V6_vhistq(SHORTCODE) \
49     do { \
50         if (ctx->pre_commit) { \
51             intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \
52             tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \
53                              sizeof(MMVector), sizeof(MMVector)); \
54         } else { \
55             assert_vhist_tmp(ctx); \
56             gen_helper_vhistq(cpu_env); \
57         } \
58     } while (0)
59 #define fGEN_TCG_V6_vwhist256(SHORTCODE) \
60     if (!ctx->pre_commit) { \
61         assert_vhist_tmp(ctx); \
62         gen_helper_vwhist256(cpu_env); \
63     }
64 #define fGEN_TCG_V6_vwhist256q(SHORTCODE) \
65     do { \
66         if (ctx->pre_commit) { \
67             intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \
68             tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \
69                              sizeof(MMVector), sizeof(MMVector)); \
70         } else { \
71             assert_vhist_tmp(ctx); \
72             gen_helper_vwhist256q(cpu_env); \
73         } \
74     } while (0)
75 #define fGEN_TCG_V6_vwhist256_sat(SHORTCODE) \
76     if (!ctx->pre_commit) { \
77         assert_vhist_tmp(ctx); \
78         gen_helper_vwhist256_sat(cpu_env); \
79     }
80 #define fGEN_TCG_V6_vwhist256q_sat(SHORTCODE) \
81     do { \
82         if (ctx->pre_commit) { \
83             intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \
84             tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \
85                              sizeof(MMVector), sizeof(MMVector)); \
86         } else { \
87             assert_vhist_tmp(ctx); \
88             gen_helper_vwhist256q_sat(cpu_env); \
89         } \
90     } while (0)
91 #define fGEN_TCG_V6_vwhist128(SHORTCODE) \
92     if (!ctx->pre_commit) { \
93         assert_vhist_tmp(ctx); \
94         gen_helper_vwhist128(cpu_env); \
95     }
96 #define fGEN_TCG_V6_vwhist128q(SHORTCODE) \
97     do { \
98         if (ctx->pre_commit) { \
99             intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \
100             tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \
101                              sizeof(MMVector), sizeof(MMVector)); \
102         } else { \
103             assert_vhist_tmp(ctx); \
104             gen_helper_vwhist128q(cpu_env); \
105         } \
106     } while (0)
107 #define fGEN_TCG_V6_vwhist128m(SHORTCODE) \
108     if (!ctx->pre_commit) { \
109         TCGv tcgv_uiV = tcg_constant_tl(uiV); \
110         assert_vhist_tmp(ctx); \
111         gen_helper_vwhist128m(cpu_env, tcgv_uiV); \
112     }
113 #define fGEN_TCG_V6_vwhist128qm(SHORTCODE) \
114     do { \
115         if (ctx->pre_commit) { \
116             intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \
117             tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \
118                              sizeof(MMVector), sizeof(MMVector)); \
119         } else { \
120             TCGv tcgv_uiV = tcg_constant_tl(uiV); \
121             assert_vhist_tmp(ctx); \
122             gen_helper_vwhist128qm(cpu_env, tcgv_uiV); \
123         } \
124     } while (0)
125 
126 
127 #define fGEN_TCG_V6_vassign(SHORTCODE) \
128     tcg_gen_gvec_mov(MO_64, VdV_off, VuV_off, \
129                      sizeof(MMVector), sizeof(MMVector))
130 
131 #define fGEN_TCG_V6_vassign_tmp(SHORTCODE) \
132     tcg_gen_gvec_mov(MO_64, VdV_off, VuV_off, \
133                      sizeof(MMVector), sizeof(MMVector))
134 
135 #define fGEN_TCG_V6_vcombine_tmp(SHORTCODE) \
136     do { \
137         tcg_gen_gvec_mov(MO_64, VddV_off, VvV_off, \
138                          sizeof(MMVector), sizeof(MMVector)); \
139         tcg_gen_gvec_mov(MO_64, VddV_off + sizeof(MMVector), VuV_off, \
140                          sizeof(MMVector), sizeof(MMVector)); \
141     } while (0)
142 
143 /*
144  * Vector combine
145  *
146  * Be careful that the source and dest don't overlap
147  */
148 #define fGEN_TCG_V6_vcombine(SHORTCODE) \
149     do { \
150         if (VddV_off != VuV_off) { \
151             tcg_gen_gvec_mov(MO_64, VddV_off, VvV_off, \
152                              sizeof(MMVector), sizeof(MMVector)); \
153             tcg_gen_gvec_mov(MO_64, VddV_off + sizeof(MMVector), VuV_off, \
154                              sizeof(MMVector), sizeof(MMVector)); \
155         } else { \
156             intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \
157             tcg_gen_gvec_mov(MO_64, tmpoff, VuV_off, \
158                              sizeof(MMVector), sizeof(MMVector)); \
159             tcg_gen_gvec_mov(MO_64, VddV_off, VvV_off, \
160                              sizeof(MMVector), sizeof(MMVector)); \
161             tcg_gen_gvec_mov(MO_64, VddV_off + sizeof(MMVector), tmpoff, \
162                              sizeof(MMVector), sizeof(MMVector)); \
163         } \
164     } while (0)
165 
166 /* Vector conditional move */
167 #define fGEN_TCG_VEC_CMOV(PRED) \
168     do { \
169         TCGv lsb = tcg_temp_new(); \
170         TCGLabel *false_label = gen_new_label(); \
171         tcg_gen_andi_tl(lsb, PsV, 1); \
172         tcg_gen_brcondi_tl(TCG_COND_NE, lsb, PRED, false_label); \
173         tcg_gen_gvec_mov(MO_64, VdV_off, VuV_off, \
174                          sizeof(MMVector), sizeof(MMVector)); \
175         gen_set_label(false_label); \
176     } while (0)
177 
178 
179 /* Vector conditional move (true) */
180 #define fGEN_TCG_V6_vcmov(SHORTCODE) \
181     fGEN_TCG_VEC_CMOV(1)
182 
183 /* Vector conditional move (false) */
184 #define fGEN_TCG_V6_vncmov(SHORTCODE) \
185     fGEN_TCG_VEC_CMOV(0)
186 
187 /* Vector add - various forms */
188 #define fGEN_TCG_V6_vaddb(SHORTCODE) \
189     tcg_gen_gvec_add(MO_8, VdV_off, VuV_off, VvV_off, \
190                      sizeof(MMVector), sizeof(MMVector))
191 
192 #define fGEN_TCG_V6_vaddh(SHORTCYDE) \
193     tcg_gen_gvec_add(MO_16, VdV_off, VuV_off, VvV_off, \
194                      sizeof(MMVector), sizeof(MMVector))
195 
196 #define fGEN_TCG_V6_vaddw(SHORTCODE) \
197     tcg_gen_gvec_add(MO_32, VdV_off, VuV_off, VvV_off, \
198                      sizeof(MMVector), sizeof(MMVector))
199 
200 #define fGEN_TCG_V6_vaddb_dv(SHORTCODE) \
201     tcg_gen_gvec_add(MO_8, VddV_off, VuuV_off, VvvV_off, \
202                      sizeof(MMVector) * 2, sizeof(MMVector) * 2)
203 
204 #define fGEN_TCG_V6_vaddh_dv(SHORTCYDE) \
205     tcg_gen_gvec_add(MO_16, VddV_off, VuuV_off, VvvV_off, \
206                      sizeof(MMVector) * 2, sizeof(MMVector) * 2)
207 
208 #define fGEN_TCG_V6_vaddw_dv(SHORTCODE) \
209     tcg_gen_gvec_add(MO_32, VddV_off, VuuV_off, VvvV_off, \
210                      sizeof(MMVector) * 2, sizeof(MMVector) * 2)
211 
212 /* Vector sub - various forms */
213 #define fGEN_TCG_V6_vsubb(SHORTCODE) \
214     tcg_gen_gvec_sub(MO_8, VdV_off, VuV_off, VvV_off, \
215                      sizeof(MMVector), sizeof(MMVector))
216 
217 #define fGEN_TCG_V6_vsubh(SHORTCODE) \
218     tcg_gen_gvec_sub(MO_16, VdV_off, VuV_off, VvV_off, \
219                      sizeof(MMVector), sizeof(MMVector))
220 
221 #define fGEN_TCG_V6_vsubw(SHORTCODE) \
222     tcg_gen_gvec_sub(MO_32, VdV_off, VuV_off, VvV_off, \
223                      sizeof(MMVector), sizeof(MMVector))
224 
225 #define fGEN_TCG_V6_vsubb_dv(SHORTCODE) \
226     tcg_gen_gvec_sub(MO_8, VddV_off, VuuV_off, VvvV_off, \
227                      sizeof(MMVector) * 2, sizeof(MMVector) * 2)
228 
229 #define fGEN_TCG_V6_vsubh_dv(SHORTCODE) \
230     tcg_gen_gvec_sub(MO_16, VddV_off, VuuV_off, VvvV_off, \
231                      sizeof(MMVector) * 2, sizeof(MMVector) * 2)
232 
233 #define fGEN_TCG_V6_vsubw_dv(SHORTCODE) \
234     tcg_gen_gvec_sub(MO_32, VddV_off, VuuV_off, VvvV_off, \
235                      sizeof(MMVector) * 2, sizeof(MMVector) * 2)
236 
237 /* Vector shift right - various forms */
238 #define fGEN_TCG_V6_vasrh(SHORTCODE) \
239     do { \
240         TCGv shift = tcg_temp_new(); \
241         tcg_gen_andi_tl(shift, RtV, 15); \
242         tcg_gen_gvec_sars(MO_16, VdV_off, VuV_off, shift, \
243                           sizeof(MMVector), sizeof(MMVector)); \
244     } while (0)
245 
246 #define fGEN_TCG_V6_vasrh_acc(SHORTCODE) \
247     do { \
248         intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \
249         TCGv shift = tcg_temp_new(); \
250         tcg_gen_andi_tl(shift, RtV, 15); \
251         tcg_gen_gvec_sars(MO_16, tmpoff, VuV_off, shift, \
252                           sizeof(MMVector), sizeof(MMVector)); \
253         tcg_gen_gvec_add(MO_16, VxV_off, VxV_off, tmpoff, \
254                          sizeof(MMVector), sizeof(MMVector)); \
255     } while (0)
256 
257 #define fGEN_TCG_V6_vasrw(SHORTCODE) \
258     do { \
259         TCGv shift = tcg_temp_new(); \
260         tcg_gen_andi_tl(shift, RtV, 31); \
261         tcg_gen_gvec_sars(MO_32, VdV_off, VuV_off, shift, \
262                           sizeof(MMVector), sizeof(MMVector)); \
263     } while (0)
264 
265 #define fGEN_TCG_V6_vasrw_acc(SHORTCODE) \
266     do { \
267         intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \
268         TCGv shift = tcg_temp_new(); \
269         tcg_gen_andi_tl(shift, RtV, 31); \
270         tcg_gen_gvec_sars(MO_32, tmpoff, VuV_off, shift, \
271                           sizeof(MMVector), sizeof(MMVector)); \
272         tcg_gen_gvec_add(MO_32, VxV_off, VxV_off, tmpoff, \
273                           sizeof(MMVector), sizeof(MMVector)); \
274     } while (0)
275 
276 #define fGEN_TCG_V6_vlsrb(SHORTCODE) \
277     do { \
278         TCGv shift = tcg_temp_new(); \
279         tcg_gen_andi_tl(shift, RtV, 7); \
280         tcg_gen_gvec_shrs(MO_8, VdV_off, VuV_off, shift, \
281                           sizeof(MMVector), sizeof(MMVector)); \
282     } while (0)
283 
284 #define fGEN_TCG_V6_vlsrh(SHORTCODE) \
285     do { \
286         TCGv shift = tcg_temp_new(); \
287         tcg_gen_andi_tl(shift, RtV, 15); \
288         tcg_gen_gvec_shrs(MO_16, VdV_off, VuV_off, shift, \
289                           sizeof(MMVector), sizeof(MMVector)); \
290     } while (0)
291 
292 #define fGEN_TCG_V6_vlsrw(SHORTCODE) \
293     do { \
294         TCGv shift = tcg_temp_new(); \
295         tcg_gen_andi_tl(shift, RtV, 31); \
296         tcg_gen_gvec_shrs(MO_32, VdV_off, VuV_off, shift, \
297                           sizeof(MMVector), sizeof(MMVector)); \
298     } while (0)
299 
300 /* Vector shift left - various forms */
301 #define fGEN_TCG_V6_vaslb(SHORTCODE) \
302     do { \
303         TCGv shift = tcg_temp_new(); \
304         tcg_gen_andi_tl(shift, RtV, 7); \
305         tcg_gen_gvec_shls(MO_8, VdV_off, VuV_off, shift, \
306                           sizeof(MMVector), sizeof(MMVector)); \
307     } while (0)
308 
309 #define fGEN_TCG_V6_vaslh(SHORTCODE) \
310     do { \
311         TCGv shift = tcg_temp_new(); \
312         tcg_gen_andi_tl(shift, RtV, 15); \
313         tcg_gen_gvec_shls(MO_16, VdV_off, VuV_off, shift, \
314                           sizeof(MMVector), sizeof(MMVector)); \
315     } while (0)
316 
317 #define fGEN_TCG_V6_vaslh_acc(SHORTCODE) \
318     do { \
319         intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \
320         TCGv shift = tcg_temp_new(); \
321         tcg_gen_andi_tl(shift, RtV, 15); \
322         tcg_gen_gvec_shls(MO_16, tmpoff, VuV_off, shift, \
323                           sizeof(MMVector), sizeof(MMVector)); \
324         tcg_gen_gvec_add(MO_16, VxV_off, VxV_off, tmpoff, \
325                          sizeof(MMVector), sizeof(MMVector)); \
326     } while (0)
327 
328 #define fGEN_TCG_V6_vaslw(SHORTCODE) \
329     do { \
330         TCGv shift = tcg_temp_new(); \
331         tcg_gen_andi_tl(shift, RtV, 31); \
332         tcg_gen_gvec_shls(MO_32, VdV_off, VuV_off, shift, \
333                           sizeof(MMVector), sizeof(MMVector)); \
334     } while (0)
335 
336 #define fGEN_TCG_V6_vaslw_acc(SHORTCODE) \
337     do { \
338         intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \
339         TCGv shift = tcg_temp_new(); \
340         tcg_gen_andi_tl(shift, RtV, 31); \
341         tcg_gen_gvec_shls(MO_32, tmpoff, VuV_off, shift, \
342                           sizeof(MMVector), sizeof(MMVector)); \
343         tcg_gen_gvec_add(MO_32, VxV_off, VxV_off, tmpoff, \
344                          sizeof(MMVector), sizeof(MMVector)); \
345     } while (0)
346 
347 /* Vector max - various forms */
348 #define fGEN_TCG_V6_vmaxw(SHORTCODE) \
349     tcg_gen_gvec_smax(MO_32, VdV_off, VuV_off, VvV_off, \
350                       sizeof(MMVector), sizeof(MMVector))
351 #define fGEN_TCG_V6_vmaxh(SHORTCODE) \
352     tcg_gen_gvec_smax(MO_16, VdV_off, VuV_off, VvV_off, \
353                       sizeof(MMVector), sizeof(MMVector))
354 #define fGEN_TCG_V6_vmaxuh(SHORTCODE) \
355     tcg_gen_gvec_umax(MO_16, VdV_off, VuV_off, VvV_off, \
356                       sizeof(MMVector), sizeof(MMVector))
357 #define fGEN_TCG_V6_vmaxb(SHORTCODE) \
358     tcg_gen_gvec_smax(MO_8, VdV_off, VuV_off, VvV_off, \
359                       sizeof(MMVector), sizeof(MMVector))
360 #define fGEN_TCG_V6_vmaxub(SHORTCODE) \
361     tcg_gen_gvec_umax(MO_8, VdV_off, VuV_off, VvV_off, \
362                       sizeof(MMVector), sizeof(MMVector))
363 
364 /* Vector min - various forms */
365 #define fGEN_TCG_V6_vminw(SHORTCODE) \
366     tcg_gen_gvec_smin(MO_32, VdV_off, VuV_off, VvV_off, \
367                       sizeof(MMVector), sizeof(MMVector))
368 #define fGEN_TCG_V6_vminh(SHORTCODE) \
369     tcg_gen_gvec_smin(MO_16, VdV_off, VuV_off, VvV_off, \
370                       sizeof(MMVector), sizeof(MMVector))
371 #define fGEN_TCG_V6_vminuh(SHORTCODE) \
372     tcg_gen_gvec_umin(MO_16, VdV_off, VuV_off, VvV_off, \
373                       sizeof(MMVector), sizeof(MMVector))
374 #define fGEN_TCG_V6_vminb(SHORTCODE) \
375     tcg_gen_gvec_smin(MO_8, VdV_off, VuV_off, VvV_off, \
376                       sizeof(MMVector), sizeof(MMVector))
377 #define fGEN_TCG_V6_vminub(SHORTCODE) \
378     tcg_gen_gvec_umin(MO_8, VdV_off, VuV_off, VvV_off, \
379                       sizeof(MMVector), sizeof(MMVector))
380 
381 /* Vector logical ops */
382 #define fGEN_TCG_V6_vxor(SHORTCODE) \
383     tcg_gen_gvec_xor(MO_64, VdV_off, VuV_off, VvV_off, \
384                      sizeof(MMVector), sizeof(MMVector))
385 
386 #define fGEN_TCG_V6_vand(SHORTCODE) \
387     tcg_gen_gvec_and(MO_64, VdV_off, VuV_off, VvV_off, \
388                      sizeof(MMVector), sizeof(MMVector))
389 
390 #define fGEN_TCG_V6_vor(SHORTCODE) \
391     tcg_gen_gvec_or(MO_64, VdV_off, VuV_off, VvV_off, \
392                     sizeof(MMVector), sizeof(MMVector))
393 
394 #define fGEN_TCG_V6_vnot(SHORTCODE) \
395     tcg_gen_gvec_not(MO_64, VdV_off, VuV_off, \
396                      sizeof(MMVector), sizeof(MMVector))
397 
398 /* Q register logical ops */
399 #define fGEN_TCG_V6_pred_or(SHORTCODE) \
400     tcg_gen_gvec_or(MO_64, QdV_off, QsV_off, QtV_off, \
401                     sizeof(MMQReg), sizeof(MMQReg))
402 
403 #define fGEN_TCG_V6_pred_and(SHORTCODE) \
404     tcg_gen_gvec_and(MO_64, QdV_off, QsV_off, QtV_off, \
405                      sizeof(MMQReg), sizeof(MMQReg))
406 
407 #define fGEN_TCG_V6_pred_xor(SHORTCODE) \
408     tcg_gen_gvec_xor(MO_64, QdV_off, QsV_off, QtV_off, \
409                      sizeof(MMQReg), sizeof(MMQReg))
410 
411 #define fGEN_TCG_V6_pred_or_n(SHORTCODE) \
412     tcg_gen_gvec_orc(MO_64, QdV_off, QsV_off, QtV_off, \
413                      sizeof(MMQReg), sizeof(MMQReg))
414 
415 #define fGEN_TCG_V6_pred_and_n(SHORTCODE) \
416     tcg_gen_gvec_andc(MO_64, QdV_off, QsV_off, QtV_off, \
417                       sizeof(MMQReg), sizeof(MMQReg))
418 
419 #define fGEN_TCG_V6_pred_not(SHORTCODE) \
420     tcg_gen_gvec_not(MO_64, QdV_off, QsV_off, \
421                      sizeof(MMQReg), sizeof(MMQReg))
422 
423 /* Vector compares */
424 #define fGEN_TCG_VEC_CMP(COND, TYPE, SIZE) \
425     do { \
426         intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \
427         tcg_gen_gvec_cmp(COND, TYPE, tmpoff, VuV_off, VvV_off, \
428                          sizeof(MMVector), sizeof(MMVector)); \
429         vec_to_qvec(SIZE, QdV_off, tmpoff); \
430     } while (0)
431 
432 #define fGEN_TCG_V6_vgtw(SHORTCODE) \
433     fGEN_TCG_VEC_CMP(TCG_COND_GT, MO_32, 4)
434 #define fGEN_TCG_V6_vgth(SHORTCODE) \
435     fGEN_TCG_VEC_CMP(TCG_COND_GT, MO_16, 2)
436 #define fGEN_TCG_V6_vgtb(SHORTCODE) \
437     fGEN_TCG_VEC_CMP(TCG_COND_GT, MO_8, 1)
438 
439 #define fGEN_TCG_V6_vgtuw(SHORTCODE) \
440     fGEN_TCG_VEC_CMP(TCG_COND_GTU, MO_32, 4)
441 #define fGEN_TCG_V6_vgtuh(SHORTCODE) \
442     fGEN_TCG_VEC_CMP(TCG_COND_GTU, MO_16, 2)
443 #define fGEN_TCG_V6_vgtub(SHORTCODE) \
444     fGEN_TCG_VEC_CMP(TCG_COND_GTU, MO_8, 1)
445 
446 #define fGEN_TCG_V6_veqw(SHORTCODE) \
447     fGEN_TCG_VEC_CMP(TCG_COND_EQ, MO_32, 4)
448 #define fGEN_TCG_V6_veqh(SHORTCODE) \
449     fGEN_TCG_VEC_CMP(TCG_COND_EQ, MO_16, 2)
450 #define fGEN_TCG_V6_veqb(SHORTCODE) \
451     fGEN_TCG_VEC_CMP(TCG_COND_EQ, MO_8, 1)
452 
453 #define fGEN_TCG_VEC_CMP_OP(COND, TYPE, SIZE, OP) \
454     do { \
455         intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \
456         intptr_t qoff = offsetof(CPUHexagonState, qtmp); \
457         tcg_gen_gvec_cmp(COND, TYPE, tmpoff, VuV_off, VvV_off, \
458                          sizeof(MMVector), sizeof(MMVector)); \
459         vec_to_qvec(SIZE, qoff, tmpoff); \
460         OP(MO_64, QxV_off, QxV_off, qoff, sizeof(MMQReg), sizeof(MMQReg)); \
461     } while (0)
462 
463 #define fGEN_TCG_V6_vgtw_and(SHORTCODE) \
464     fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_32, 4, tcg_gen_gvec_and)
465 #define fGEN_TCG_V6_vgtw_or(SHORTCODE) \
466     fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_32, 4, tcg_gen_gvec_or)
467 #define fGEN_TCG_V6_vgtw_xor(SHORTCODE) \
468     fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_32, 4, tcg_gen_gvec_xor)
469 
470 #define fGEN_TCG_V6_vgtuw_and(SHORTCODE) \
471     fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_32, 4, tcg_gen_gvec_and)
472 #define fGEN_TCG_V6_vgtuw_or(SHORTCODE) \
473     fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_32, 4, tcg_gen_gvec_or)
474 #define fGEN_TCG_V6_vgtuw_xor(SHORTCODE) \
475     fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_32, 4, tcg_gen_gvec_xor)
476 
477 #define fGEN_TCG_V6_vgth_and(SHORTCODE) \
478     fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_16, 2, tcg_gen_gvec_and)
479 #define fGEN_TCG_V6_vgth_or(SHORTCODE) \
480     fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_16, 2, tcg_gen_gvec_or)
481 #define fGEN_TCG_V6_vgth_xor(SHORTCODE) \
482     fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_16, 2, tcg_gen_gvec_xor)
483 
484 #define fGEN_TCG_V6_vgtuh_and(SHORTCODE) \
485     fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_16, 2, tcg_gen_gvec_and)
486 #define fGEN_TCG_V6_vgtuh_or(SHORTCODE) \
487     fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_16, 2, tcg_gen_gvec_or)
488 #define fGEN_TCG_V6_vgtuh_xor(SHORTCODE) \
489     fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_16, 2, tcg_gen_gvec_xor)
490 
491 #define fGEN_TCG_V6_vgtb_and(SHORTCODE) \
492     fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_8, 1, tcg_gen_gvec_and)
493 #define fGEN_TCG_V6_vgtb_or(SHORTCODE) \
494     fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_8, 1, tcg_gen_gvec_or)
495 #define fGEN_TCG_V6_vgtb_xor(SHORTCODE) \
496     fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_8, 1, tcg_gen_gvec_xor)
497 
498 #define fGEN_TCG_V6_vgtub_and(SHORTCODE) \
499     fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_8, 1, tcg_gen_gvec_and)
500 #define fGEN_TCG_V6_vgtub_or(SHORTCODE) \
501     fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_8, 1, tcg_gen_gvec_or)
502 #define fGEN_TCG_V6_vgtub_xor(SHORTCODE) \
503     fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_8, 1, tcg_gen_gvec_xor)
504 
505 #define fGEN_TCG_V6_veqw_and(SHORTCODE) \
506     fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_32, 4, tcg_gen_gvec_and)
507 #define fGEN_TCG_V6_veqw_or(SHORTCODE) \
508     fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_32, 4, tcg_gen_gvec_or)
509 #define fGEN_TCG_V6_veqw_xor(SHORTCODE) \
510     fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_32, 4, tcg_gen_gvec_xor)
511 
512 #define fGEN_TCG_V6_veqh_and(SHORTCODE) \
513     fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_16, 2, tcg_gen_gvec_and)
514 #define fGEN_TCG_V6_veqh_or(SHORTCODE) \
515     fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_16, 2, tcg_gen_gvec_or)
516 #define fGEN_TCG_V6_veqh_xor(SHORTCODE) \
517     fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_16, 2, tcg_gen_gvec_xor)
518 
519 #define fGEN_TCG_V6_veqb_and(SHORTCODE) \
520     fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_8, 1, tcg_gen_gvec_and)
521 #define fGEN_TCG_V6_veqb_or(SHORTCODE) \
522     fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_8, 1, tcg_gen_gvec_or)
523 #define fGEN_TCG_V6_veqb_xor(SHORTCODE) \
524     fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_8, 1, tcg_gen_gvec_xor)
525 
526 /* Vector splat - various forms */
527 #define fGEN_TCG_V6_lvsplatw(SHORTCODE) \
528     tcg_gen_gvec_dup_i32(MO_32, VdV_off, \
529                          sizeof(MMVector), sizeof(MMVector), RtV)
530 
531 #define fGEN_TCG_V6_lvsplath(SHORTCODE) \
532     tcg_gen_gvec_dup_i32(MO_16, VdV_off, \
533                          sizeof(MMVector), sizeof(MMVector), RtV)
534 
535 #define fGEN_TCG_V6_lvsplatb(SHORTCODE) \
536     tcg_gen_gvec_dup_i32(MO_8, VdV_off, \
537                          sizeof(MMVector), sizeof(MMVector), RtV)
538 
539 /* Vector absolute value - various forms */
540 #define fGEN_TCG_V6_vabsb(SHORTCODE) \
541     tcg_gen_gvec_abs(MO_8, VdV_off, VuV_off, \
542                      sizeof(MMVector), sizeof(MMVector))
543 
544 #define fGEN_TCG_V6_vabsh(SHORTCODE) \
545     tcg_gen_gvec_abs(MO_16, VdV_off, VuV_off, \
546                      sizeof(MMVector), sizeof(MMVector))
547 
548 #define fGEN_TCG_V6_vabsw(SHORTCODE) \
549     tcg_gen_gvec_abs(MO_32, VdV_off, VuV_off, \
550                      sizeof(MMVector), sizeof(MMVector))
551 
552 /* Vector loads */
553 #define fGEN_TCG_V6_vL32b_pi(SHORTCODE)                    SHORTCODE
554 #define fGEN_TCG_V6_vL32Ub_pi(SHORTCODE)                   SHORTCODE
555 #define fGEN_TCG_V6_vL32b_cur_pi(SHORTCODE)                SHORTCODE
556 #define fGEN_TCG_V6_vL32b_tmp_pi(SHORTCODE)                SHORTCODE
557 #define fGEN_TCG_V6_vL32b_nt_pi(SHORTCODE)                 SHORTCODE
558 #define fGEN_TCG_V6_vL32b_nt_cur_pi(SHORTCODE)             SHORTCODE
559 #define fGEN_TCG_V6_vL32b_nt_tmp_pi(SHORTCODE)             SHORTCODE
560 #define fGEN_TCG_V6_vL32b_ai(SHORTCODE)                    SHORTCODE
561 #define fGEN_TCG_V6_vL32Ub_ai(SHORTCODE)                   SHORTCODE
562 #define fGEN_TCG_V6_vL32b_cur_ai(SHORTCODE)                SHORTCODE
563 #define fGEN_TCG_V6_vL32b_tmp_ai(SHORTCODE)                SHORTCODE
564 #define fGEN_TCG_V6_vL32b_nt_ai(SHORTCODE)                 SHORTCODE
565 #define fGEN_TCG_V6_vL32b_nt_cur_ai(SHORTCODE)             SHORTCODE
566 #define fGEN_TCG_V6_vL32b_nt_tmp_ai(SHORTCODE)             SHORTCODE
567 #define fGEN_TCG_V6_vL32b_ppu(SHORTCODE)                   SHORTCODE
568 #define fGEN_TCG_V6_vL32Ub_ppu(SHORTCODE)                  SHORTCODE
569 #define fGEN_TCG_V6_vL32b_cur_ppu(SHORTCODE)               SHORTCODE
570 #define fGEN_TCG_V6_vL32b_tmp_ppu(SHORTCODE)               SHORTCODE
571 #define fGEN_TCG_V6_vL32b_nt_ppu(SHORTCODE)                SHORTCODE
572 #define fGEN_TCG_V6_vL32b_nt_cur_ppu(SHORTCODE)            SHORTCODE
573 #define fGEN_TCG_V6_vL32b_nt_tmp_ppu(SHORTCODE)            SHORTCODE
574 
575 /* Predicated vector loads */
576 #define fGEN_TCG_PRED_VEC_LOAD(GET_EA, PRED, DSTOFF, INC) \
577     do { \
578         TCGv LSB = tcg_temp_new(); \
579         TCGLabel *false_label = gen_new_label(); \
580         GET_EA; \
581         PRED; \
582         tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, false_label); \
583         gen_vreg_load(ctx, DSTOFF, EA, true); \
584         INC; \
585         gen_set_label(false_label); \
586     } while (0)
587 
588 #define fGEN_TCG_PRED_VEC_LOAD_pred_pi \
589     fGEN_TCG_PRED_VEC_LOAD(fLSBOLD(PvV), \
590                            fEA_REG(RxV), \
591                            VdV_off, \
592                            fPM_I(RxV, siV * sizeof(MMVector)))
593 #define fGEN_TCG_PRED_VEC_LOAD_npred_pi \
594     fGEN_TCG_PRED_VEC_LOAD(fLSBOLDNOT(PvV), \
595                            fEA_REG(RxV), \
596                            VdV_off, \
597                            fPM_I(RxV, siV * sizeof(MMVector)))
598 
599 #define fGEN_TCG_V6_vL32b_pred_pi(SHORTCODE) \
600     fGEN_TCG_PRED_VEC_LOAD_pred_pi
601 #define fGEN_TCG_V6_vL32b_npred_pi(SHORTCODE) \
602     fGEN_TCG_PRED_VEC_LOAD_npred_pi
603 #define fGEN_TCG_V6_vL32b_cur_pred_pi(SHORTCODE) \
604     fGEN_TCG_PRED_VEC_LOAD_pred_pi
605 #define fGEN_TCG_V6_vL32b_cur_npred_pi(SHORTCODE) \
606     fGEN_TCG_PRED_VEC_LOAD_npred_pi
607 #define fGEN_TCG_V6_vL32b_tmp_pred_pi(SHORTCODE) \
608     fGEN_TCG_PRED_VEC_LOAD_pred_pi
609 #define fGEN_TCG_V6_vL32b_tmp_npred_pi(SHORTCODE) \
610     fGEN_TCG_PRED_VEC_LOAD_npred_pi
611 #define fGEN_TCG_V6_vL32b_nt_pred_pi(SHORTCODE) \
612     fGEN_TCG_PRED_VEC_LOAD_pred_pi
613 #define fGEN_TCG_V6_vL32b_nt_npred_pi(SHORTCODE) \
614     fGEN_TCG_PRED_VEC_LOAD_npred_pi
615 #define fGEN_TCG_V6_vL32b_nt_cur_pred_pi(SHORTCODE) \
616     fGEN_TCG_PRED_VEC_LOAD_pred_pi
617 #define fGEN_TCG_V6_vL32b_nt_cur_npred_pi(SHORTCODE) \
618     fGEN_TCG_PRED_VEC_LOAD_npred_pi
619 #define fGEN_TCG_V6_vL32b_nt_tmp_pred_pi(SHORTCODE) \
620     fGEN_TCG_PRED_VEC_LOAD_pred_pi
621 #define fGEN_TCG_V6_vL32b_nt_tmp_npred_pi(SHORTCODE) \
622     fGEN_TCG_PRED_VEC_LOAD_npred_pi
623 
624 #define fGEN_TCG_PRED_VEC_LOAD_pred_ai \
625     fGEN_TCG_PRED_VEC_LOAD(fLSBOLD(PvV), \
626                            fEA_RI(RtV, siV * sizeof(MMVector)), \
627                            VdV_off, \
628                            do {} while (0))
629 #define fGEN_TCG_PRED_VEC_LOAD_npred_ai \
630     fGEN_TCG_PRED_VEC_LOAD(fLSBOLDNOT(PvV), \
631                            fEA_RI(RtV, siV * sizeof(MMVector)), \
632                            VdV_off, \
633                            do {} while (0))
634 
635 #define fGEN_TCG_V6_vL32b_pred_ai(SHORTCODE) \
636     fGEN_TCG_PRED_VEC_LOAD_pred_ai
637 #define fGEN_TCG_V6_vL32b_npred_ai(SHORTCODE) \
638     fGEN_TCG_PRED_VEC_LOAD_npred_ai
639 #define fGEN_TCG_V6_vL32b_cur_pred_ai(SHORTCODE) \
640     fGEN_TCG_PRED_VEC_LOAD_pred_ai
641 #define fGEN_TCG_V6_vL32b_cur_npred_ai(SHORTCODE) \
642     fGEN_TCG_PRED_VEC_LOAD_npred_ai
643 #define fGEN_TCG_V6_vL32b_tmp_pred_ai(SHORTCODE) \
644     fGEN_TCG_PRED_VEC_LOAD_pred_ai
645 #define fGEN_TCG_V6_vL32b_tmp_npred_ai(SHORTCODE) \
646     fGEN_TCG_PRED_VEC_LOAD_npred_ai
647 #define fGEN_TCG_V6_vL32b_nt_pred_ai(SHORTCODE) \
648     fGEN_TCG_PRED_VEC_LOAD_pred_ai
649 #define fGEN_TCG_V6_vL32b_nt_npred_ai(SHORTCODE) \
650     fGEN_TCG_PRED_VEC_LOAD_npred_ai
651 #define fGEN_TCG_V6_vL32b_nt_cur_pred_ai(SHORTCODE) \
652     fGEN_TCG_PRED_VEC_LOAD_pred_ai
653 #define fGEN_TCG_V6_vL32b_nt_cur_npred_ai(SHORTCODE) \
654     fGEN_TCG_PRED_VEC_LOAD_npred_ai
655 #define fGEN_TCG_V6_vL32b_nt_tmp_pred_ai(SHORTCODE) \
656     fGEN_TCG_PRED_VEC_LOAD_pred_ai
657 #define fGEN_TCG_V6_vL32b_nt_tmp_npred_ai(SHORTCODE) \
658     fGEN_TCG_PRED_VEC_LOAD_npred_ai
659 
660 #define fGEN_TCG_PRED_VEC_LOAD_pred_ppu \
661     fGEN_TCG_PRED_VEC_LOAD(fLSBOLD(PvV), \
662                            fEA_REG(RxV), \
663                            VdV_off, \
664                            fPM_M(RxV, MuV))
665 #define fGEN_TCG_PRED_VEC_LOAD_npred_ppu \
666     fGEN_TCG_PRED_VEC_LOAD(fLSBOLDNOT(PvV), \
667                            fEA_REG(RxV), \
668                            VdV_off, \
669                            fPM_M(RxV, MuV))
670 
671 #define fGEN_TCG_V6_vL32b_pred_ppu(SHORTCODE) \
672     fGEN_TCG_PRED_VEC_LOAD_pred_ppu
673 #define fGEN_TCG_V6_vL32b_npred_ppu(SHORTCODE) \
674     fGEN_TCG_PRED_VEC_LOAD_npred_ppu
675 #define fGEN_TCG_V6_vL32b_cur_pred_ppu(SHORTCODE) \
676     fGEN_TCG_PRED_VEC_LOAD_pred_ppu
677 #define fGEN_TCG_V6_vL32b_cur_npred_ppu(SHORTCODE) \
678     fGEN_TCG_PRED_VEC_LOAD_npred_ppu
679 #define fGEN_TCG_V6_vL32b_tmp_pred_ppu(SHORTCODE) \
680     fGEN_TCG_PRED_VEC_LOAD_pred_ppu
681 #define fGEN_TCG_V6_vL32b_tmp_npred_ppu(SHORTCODE) \
682     fGEN_TCG_PRED_VEC_LOAD_npred_ppu
683 #define fGEN_TCG_V6_vL32b_nt_pred_ppu(SHORTCODE) \
684     fGEN_TCG_PRED_VEC_LOAD_pred_ppu
685 #define fGEN_TCG_V6_vL32b_nt_npred_ppu(SHORTCODE) \
686     fGEN_TCG_PRED_VEC_LOAD_npred_ppu
687 #define fGEN_TCG_V6_vL32b_nt_cur_pred_ppu(SHORTCODE) \
688     fGEN_TCG_PRED_VEC_LOAD_pred_ppu
689 #define fGEN_TCG_V6_vL32b_nt_cur_npred_ppu(SHORTCODE) \
690     fGEN_TCG_PRED_VEC_LOAD_npred_ppu
691 #define fGEN_TCG_V6_vL32b_nt_tmp_pred_ppu(SHORTCODE) \
692     fGEN_TCG_PRED_VEC_LOAD_pred_ppu
693 #define fGEN_TCG_V6_vL32b_nt_tmp_npred_ppu(SHORTCODE) \
694     fGEN_TCG_PRED_VEC_LOAD_npred_ppu
695 
696 /* Vector stores */
697 #define fGEN_TCG_V6_vS32b_pi(SHORTCODE)                    SHORTCODE
698 #define fGEN_TCG_V6_vS32Ub_pi(SHORTCODE)                   SHORTCODE
699 #define fGEN_TCG_V6_vS32b_nt_pi(SHORTCODE)                 SHORTCODE
700 #define fGEN_TCG_V6_vS32b_ai(SHORTCODE)                    SHORTCODE
701 #define fGEN_TCG_V6_vS32Ub_ai(SHORTCODE)                   SHORTCODE
702 #define fGEN_TCG_V6_vS32b_nt_ai(SHORTCODE)                 SHORTCODE
703 #define fGEN_TCG_V6_vS32b_ppu(SHORTCODE)                   SHORTCODE
704 #define fGEN_TCG_V6_vS32Ub_ppu(SHORTCODE)                  SHORTCODE
705 #define fGEN_TCG_V6_vS32b_nt_ppu(SHORTCODE)                SHORTCODE
706 
707 /* New value vector stores */
708 #define fGEN_TCG_NEWVAL_VEC_STORE(GET_EA, INC) \
709     do { \
710         GET_EA; \
711         gen_vreg_store(ctx, EA, OsN_off, insn->slot, true); \
712         INC; \
713     } while (0)
714 
715 #define fGEN_TCG_NEWVAL_VEC_STORE_pi \
716     fGEN_TCG_NEWVAL_VEC_STORE(fEA_REG(RxV), fPM_I(RxV, siV * sizeof(MMVector)))
717 
718 #define fGEN_TCG_V6_vS32b_new_pi(SHORTCODE) \
719     fGEN_TCG_NEWVAL_VEC_STORE_pi
720 #define fGEN_TCG_V6_vS32b_nt_new_pi(SHORTCODE) \
721     fGEN_TCG_NEWVAL_VEC_STORE_pi
722 
723 #define fGEN_TCG_NEWVAL_VEC_STORE_ai \
724     fGEN_TCG_NEWVAL_VEC_STORE(fEA_RI(RtV, siV * sizeof(MMVector)), \
725                               do { } while (0))
726 
727 #define fGEN_TCG_V6_vS32b_new_ai(SHORTCODE) \
728     fGEN_TCG_NEWVAL_VEC_STORE_ai
729 #define fGEN_TCG_V6_vS32b_nt_new_ai(SHORTCODE) \
730     fGEN_TCG_NEWVAL_VEC_STORE_ai
731 
732 #define fGEN_TCG_NEWVAL_VEC_STORE_ppu \
733     fGEN_TCG_NEWVAL_VEC_STORE(fEA_REG(RxV), fPM_M(RxV, MuV))
734 
735 #define fGEN_TCG_V6_vS32b_new_ppu(SHORTCODE) \
736     fGEN_TCG_NEWVAL_VEC_STORE_ppu
737 #define fGEN_TCG_V6_vS32b_nt_new_ppu(SHORTCODE) \
738     fGEN_TCG_NEWVAL_VEC_STORE_ppu
739 
740 /* Predicated vector stores */
741 #define fGEN_TCG_PRED_VEC_STORE(GET_EA, PRED, SRCOFF, ALIGN, INC) \
742     do { \
743         TCGv LSB = tcg_temp_new(); \
744         TCGLabel *false_label = gen_new_label(); \
745         GET_EA; \
746         PRED; \
747         tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, false_label); \
748         gen_vreg_store(ctx, EA, SRCOFF, insn->slot, ALIGN); \
749         INC; \
750         gen_set_label(false_label); \
751     } while (0)
752 
753 #define fGEN_TCG_PRED_VEC_STORE_pred_pi(ALIGN) \
754     fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \
755                             fEA_REG(RxV), \
756                             VsV_off, ALIGN, \
757                             fPM_I(RxV, siV * sizeof(MMVector)))
758 #define fGEN_TCG_PRED_VEC_STORE_npred_pi(ALIGN) \
759     fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \
760                             fEA_REG(RxV), \
761                             VsV_off, ALIGN, \
762                             fPM_I(RxV, siV * sizeof(MMVector)))
763 #define fGEN_TCG_PRED_VEC_STORE_new_pred_pi \
764     fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \
765                             fEA_REG(RxV), \
766                             OsN_off, true, \
767                             fPM_I(RxV, siV * sizeof(MMVector)))
768 #define fGEN_TCG_PRED_VEC_STORE_new_npred_pi \
769     fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \
770                             fEA_REG(RxV), \
771                             OsN_off, true, \
772                             fPM_I(RxV, siV * sizeof(MMVector)))
773 
774 #define fGEN_TCG_V6_vS32b_pred_pi(SHORTCODE) \
775     fGEN_TCG_PRED_VEC_STORE_pred_pi(true)
776 #define fGEN_TCG_V6_vS32b_npred_pi(SHORTCODE) \
777     fGEN_TCG_PRED_VEC_STORE_npred_pi(true)
778 #define fGEN_TCG_V6_vS32Ub_pred_pi(SHORTCODE) \
779     fGEN_TCG_PRED_VEC_STORE_pred_pi(false)
780 #define fGEN_TCG_V6_vS32Ub_npred_pi(SHORTCODE) \
781     fGEN_TCG_PRED_VEC_STORE_npred_pi(false)
782 #define fGEN_TCG_V6_vS32b_nt_pred_pi(SHORTCODE) \
783     fGEN_TCG_PRED_VEC_STORE_pred_pi(true)
784 #define fGEN_TCG_V6_vS32b_nt_npred_pi(SHORTCODE) \
785     fGEN_TCG_PRED_VEC_STORE_npred_pi(true)
786 #define fGEN_TCG_V6_vS32b_new_pred_pi(SHORTCODE) \
787     fGEN_TCG_PRED_VEC_STORE_new_pred_pi
788 #define fGEN_TCG_V6_vS32b_new_npred_pi(SHORTCODE) \
789     fGEN_TCG_PRED_VEC_STORE_new_npred_pi
790 #define fGEN_TCG_V6_vS32b_nt_new_pred_pi(SHORTCODE) \
791     fGEN_TCG_PRED_VEC_STORE_new_pred_pi
792 #define fGEN_TCG_V6_vS32b_nt_new_npred_pi(SHORTCODE) \
793     fGEN_TCG_PRED_VEC_STORE_new_npred_pi
794 
795 #define fGEN_TCG_PRED_VEC_STORE_pred_ai(ALIGN) \
796     fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \
797                             fEA_RI(RtV, siV * sizeof(MMVector)), \
798                             VsV_off, ALIGN, \
799                             do { } while (0))
800 #define fGEN_TCG_PRED_VEC_STORE_npred_ai(ALIGN) \
801     fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \
802                             fEA_RI(RtV, siV * sizeof(MMVector)), \
803                             VsV_off, ALIGN, \
804                             do { } while (0))
805 #define fGEN_TCG_PRED_VEC_STORE_new_pred_ai \
806     fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \
807                             fEA_RI(RtV, siV * sizeof(MMVector)), \
808                             OsN_off, true, \
809                             do { } while (0))
810 #define fGEN_TCG_PRED_VEC_STORE_new_npred_ai \
811     fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \
812                             fEA_RI(RtV, siV * sizeof(MMVector)), \
813                             OsN_off, true, \
814                             do { } while (0))
815 
816 #define fGEN_TCG_V6_vS32b_pred_ai(SHORTCODE) \
817     fGEN_TCG_PRED_VEC_STORE_pred_ai(true)
818 #define fGEN_TCG_V6_vS32b_npred_ai(SHORTCODE) \
819     fGEN_TCG_PRED_VEC_STORE_npred_ai(true)
820 #define fGEN_TCG_V6_vS32Ub_pred_ai(SHORTCODE) \
821     fGEN_TCG_PRED_VEC_STORE_pred_ai(false)
822 #define fGEN_TCG_V6_vS32Ub_npred_ai(SHORTCODE) \
823     fGEN_TCG_PRED_VEC_STORE_npred_ai(false)
824 #define fGEN_TCG_V6_vS32b_nt_pred_ai(SHORTCODE) \
825     fGEN_TCG_PRED_VEC_STORE_pred_ai(true)
826 #define fGEN_TCG_V6_vS32b_nt_npred_ai(SHORTCODE) \
827     fGEN_TCG_PRED_VEC_STORE_npred_ai(true)
828 #define fGEN_TCG_V6_vS32b_new_pred_ai(SHORTCODE) \
829     fGEN_TCG_PRED_VEC_STORE_new_pred_ai
830 #define fGEN_TCG_V6_vS32b_new_npred_ai(SHORTCODE) \
831     fGEN_TCG_PRED_VEC_STORE_new_npred_ai
832 #define fGEN_TCG_V6_vS32b_nt_new_pred_ai(SHORTCODE) \
833     fGEN_TCG_PRED_VEC_STORE_new_pred_ai
834 #define fGEN_TCG_V6_vS32b_nt_new_npred_ai(SHORTCODE) \
835     fGEN_TCG_PRED_VEC_STORE_new_npred_ai
836 
837 #define fGEN_TCG_PRED_VEC_STORE_pred_ppu(ALIGN) \
838     fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \
839                             fEA_REG(RxV), \
840                             VsV_off, ALIGN, \
841                             fPM_M(RxV, MuV))
842 #define fGEN_TCG_PRED_VEC_STORE_npred_ppu(ALIGN) \
843     fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \
844                             fEA_REG(RxV), \
845                             VsV_off, ALIGN, \
846                             fPM_M(RxV, MuV))
847 #define fGEN_TCG_PRED_VEC_STORE_new_pred_ppu \
848     fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \
849                             fEA_REG(RxV), \
850                             OsN_off, true, \
851                             fPM_M(RxV, MuV))
852 #define fGEN_TCG_PRED_VEC_STORE_new_npred_ppu \
853     fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \
854                             fEA_REG(RxV), \
855                             OsN_off, true, \
856                             fPM_M(RxV, MuV))
857 
858 #define fGEN_TCG_V6_vS32b_pred_ppu(SHORTCODE) \
859     fGEN_TCG_PRED_VEC_STORE_pred_ppu(true)
860 #define fGEN_TCG_V6_vS32b_npred_ppu(SHORTCODE) \
861     fGEN_TCG_PRED_VEC_STORE_npred_ppu(true)
862 #define fGEN_TCG_V6_vS32Ub_pred_ppu(SHORTCODE) \
863     fGEN_TCG_PRED_VEC_STORE_pred_ppu(false)
864 #define fGEN_TCG_V6_vS32Ub_npred_ppu(SHORTCODE) \
865     fGEN_TCG_PRED_VEC_STORE_npred_ppu(false)
866 #define fGEN_TCG_V6_vS32b_nt_pred_ppu(SHORTCODE) \
867     fGEN_TCG_PRED_VEC_STORE_pred_ppu(true)
868 #define fGEN_TCG_V6_vS32b_nt_npred_ppu(SHORTCODE) \
869     fGEN_TCG_PRED_VEC_STORE_npred_ppu(true)
870 #define fGEN_TCG_V6_vS32b_new_pred_ppu(SHORTCODE) \
871     fGEN_TCG_PRED_VEC_STORE_new_pred_ppu
872 #define fGEN_TCG_V6_vS32b_new_npred_ppu(SHORTCODE) \
873     fGEN_TCG_PRED_VEC_STORE_new_npred_ppu
874 #define fGEN_TCG_V6_vS32b_nt_new_pred_ppu(SHORTCODE) \
875     fGEN_TCG_PRED_VEC_STORE_new_pred_ppu
876 #define fGEN_TCG_V6_vS32b_nt_new_npred_ppu(SHORTCODE) \
877     fGEN_TCG_PRED_VEC_STORE_new_npred_ppu
878 
879 /* Masked vector stores */
880 #define fGEN_TCG_V6_vS32b_qpred_pi(SHORTCODE)              SHORTCODE
881 #define fGEN_TCG_V6_vS32b_nt_qpred_pi(SHORTCODE)           SHORTCODE
882 #define fGEN_TCG_V6_vS32b_qpred_ai(SHORTCODE)              SHORTCODE
883 #define fGEN_TCG_V6_vS32b_nt_qpred_ai(SHORTCODE)           SHORTCODE
884 #define fGEN_TCG_V6_vS32b_qpred_ppu(SHORTCODE)             SHORTCODE
885 #define fGEN_TCG_V6_vS32b_nt_qpred_ppu(SHORTCODE)          SHORTCODE
886 #define fGEN_TCG_V6_vS32b_nqpred_pi(SHORTCODE)             SHORTCODE
887 #define fGEN_TCG_V6_vS32b_nt_nqpred_pi(SHORTCODE)          SHORTCODE
888 #define fGEN_TCG_V6_vS32b_nqpred_ai(SHORTCODE)             SHORTCODE
889 #define fGEN_TCG_V6_vS32b_nt_nqpred_ai(SHORTCODE)          SHORTCODE
890 #define fGEN_TCG_V6_vS32b_nqpred_ppu(SHORTCODE)            SHORTCODE
891 #define fGEN_TCG_V6_vS32b_nt_nqpred_ppu(SHORTCODE)         SHORTCODE
892 
893 /* Store release not modelled in qemu, but need to suppress compiler warnings */
894 #define fGEN_TCG_V6_vS32b_srls_pi(SHORTCODE) \
895     do { \
896         siV = siV; \
897     } while (0)
898 #define fGEN_TCG_V6_vS32b_srls_ai(SHORTCODE) \
899     do { \
900         RtV = RtV; \
901         siV = siV; \
902     } while (0)
903 #define fGEN_TCG_V6_vS32b_srls_ppu(SHORTCODE) \
904     do { \
905         MuV = MuV; \
906     } while (0)
907 
908 #endif
909