1/*
2 *
3 * Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2 or later, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program.  If not, see <http://www.gnu.org/licenses/>.
16 */
17#include "tcg/tcg-op-gvec.h"
18#include "tcg/tcg-gvec-desc.h"
19#include "internals.h"
20
21static inline bool is_overlapped(const int8_t astart, int8_t asize,
22                                 const int8_t bstart, int8_t bsize)
23{
24    const int8_t aend = astart + asize;
25    const int8_t bend = bstart + bsize;
26
27    return MAX(aend, bend) - MIN(astart, bstart) < asize + bsize;
28}
29
30static bool require_rvv(DisasContext *s)
31{
32    return s->mstatus_vs != EXT_STATUS_DISABLED;
33}
34
35static bool require_rvf(DisasContext *s)
36{
37    if (s->mstatus_fs == EXT_STATUS_DISABLED) {
38        return false;
39    }
40
41    switch (s->sew) {
42    case MO_16:
43        return s->cfg_ptr->ext_zvfh;
44    case MO_32:
45        return s->cfg_ptr->ext_zve32f;
46    case MO_64:
47        return s->cfg_ptr->ext_zve64d;
48    default:
49        return false;
50    }
51}
52
53static bool require_scale_rvf(DisasContext *s)
54{
55    if (s->mstatus_fs == EXT_STATUS_DISABLED) {
56        return false;
57    }
58
59    switch (s->sew) {
60    case MO_8:
61        return s->cfg_ptr->ext_zvfh;
62    case MO_16:
63        return s->cfg_ptr->ext_zve32f;
64    case MO_32:
65        return s->cfg_ptr->ext_zve64d;
66    default:
67        return false;
68    }
69}
70
71static bool require_scale_rvfmin(DisasContext *s)
72{
73    if (s->mstatus_fs == EXT_STATUS_DISABLED) {
74        return false;
75    }
76
77    switch (s->sew) {
78    case MO_8:
79        return s->cfg_ptr->ext_zvfhmin;
80    case MO_16:
81        return s->cfg_ptr->ext_zve32f;
82    case MO_32:
83        return s->cfg_ptr->ext_zve64d;
84    default:
85        return false;
86    }
87}
88
89/* Destination vector register group cannot overlap source mask register. */
90static bool require_vm(int vm, int vd)
91{
92    return (vm != 0 || vd != 0);
93}
94
95static bool require_nf(int vd, int nf, int lmul)
96{
97    int size = nf << MAX(lmul, 0);
98    return size <= 8 && vd + size <= 32;
99}
100
101/*
102 * Vector register should aligned with the passed-in LMUL (EMUL).
103 * If LMUL < 0, i.e. fractional LMUL, any vector register is allowed.
104 */
105static bool require_align(const int8_t val, const int8_t lmul)
106{
107    return lmul <= 0 || extract32(val, 0, lmul) == 0;
108}
109
110/*
111 * A destination vector register group can overlap a source vector
112 * register group only if one of the following holds:
113 *  1. The destination EEW equals the source EEW.
114 *  2. The destination EEW is smaller than the source EEW and the overlap
115 *     is in the lowest-numbered part of the source register group.
116 *  3. The destination EEW is greater than the source EEW, the source EMUL
117 *     is at least 1, and the overlap is in the highest-numbered part of
118 *     the destination register group.
119 * (Section 5.2)
120 *
121 * This function returns true if one of the following holds:
122 *  * Destination vector register group does not overlap a source vector
123 *    register group.
124 *  * Rule 3 met.
125 * For rule 1, overlap is allowed so this function doesn't need to be called.
126 * For rule 2, (vd == vs). Caller has to check whether: (vd != vs) before
127 * calling this function.
128 */
129static bool require_noover(const int8_t dst, const int8_t dst_lmul,
130                           const int8_t src, const int8_t src_lmul)
131{
132    int8_t dst_size = dst_lmul <= 0 ? 1 : 1 << dst_lmul;
133    int8_t src_size = src_lmul <= 0 ? 1 : 1 << src_lmul;
134
135    /* Destination EEW is greater than the source EEW, check rule 3. */
136    if (dst_size > src_size) {
137        if (dst < src &&
138            src_lmul >= 0 &&
139            is_overlapped(dst, dst_size, src, src_size) &&
140            !is_overlapped(dst, dst_size, src + src_size, src_size)) {
141            return true;
142        }
143    }
144
145    return !is_overlapped(dst, dst_size, src, src_size);
146}
147
148static bool do_vsetvl(DisasContext *s, int rd, int rs1, TCGv s2)
149{
150    TCGv s1, dst;
151
152    if (!require_rvv(s) || !s->cfg_ptr->ext_zve32f) {
153        return false;
154    }
155
156    dst = dest_gpr(s, rd);
157
158    if (rd == 0 && rs1 == 0) {
159        s1 = tcg_temp_new();
160        tcg_gen_mov_tl(s1, cpu_vl);
161    } else if (rs1 == 0) {
162        /* As the mask is at least one bit, RV_VLEN_MAX is >= VLMAX */
163        s1 = tcg_constant_tl(RV_VLEN_MAX);
164    } else {
165        s1 = get_gpr(s, rs1, EXT_ZERO);
166    }
167
168    gen_helper_vsetvl(dst, tcg_env, s1, s2);
169    gen_set_gpr(s, rd, dst);
170    finalize_rvv_inst(s);
171
172    gen_update_pc(s, s->cur_insn_len);
173    lookup_and_goto_ptr(s);
174    s->base.is_jmp = DISAS_NORETURN;
175    return true;
176}
177
178static bool do_vsetivli(DisasContext *s, int rd, TCGv s1, TCGv s2)
179{
180    TCGv dst;
181
182    if (!require_rvv(s) || !s->cfg_ptr->ext_zve32f) {
183        return false;
184    }
185
186    dst = dest_gpr(s, rd);
187
188    gen_helper_vsetvl(dst, tcg_env, s1, s2);
189    gen_set_gpr(s, rd, dst);
190    finalize_rvv_inst(s);
191    gen_update_pc(s, s->cur_insn_len);
192    lookup_and_goto_ptr(s);
193    s->base.is_jmp = DISAS_NORETURN;
194
195    return true;
196}
197
198static bool trans_vsetvl(DisasContext *s, arg_vsetvl *a)
199{
200    TCGv s2 = get_gpr(s, a->rs2, EXT_ZERO);
201    return do_vsetvl(s, a->rd, a->rs1, s2);
202}
203
204static bool trans_vsetvli(DisasContext *s, arg_vsetvli *a)
205{
206    TCGv s2 = tcg_constant_tl(a->zimm);
207    return do_vsetvl(s, a->rd, a->rs1, s2);
208}
209
210static bool trans_vsetivli(DisasContext *s, arg_vsetivli *a)
211{
212    TCGv s1 = tcg_constant_tl(a->rs1);
213    TCGv s2 = tcg_constant_tl(a->zimm);
214    return do_vsetivli(s, a->rd, s1, s2);
215}
216
217/* vector register offset from env */
218static uint32_t vreg_ofs(DisasContext *s, int reg)
219{
220    return offsetof(CPURISCVState, vreg) + reg * s->cfg_ptr->vlenb;
221}
222
223/* check functions */
224
225/*
226 * Vector unit-stride, strided, unit-stride segment, strided segment
227 * store check function.
228 *
229 * Rules to be checked here:
230 *   1. EMUL must within the range: 1/8 <= EMUL <= 8. (Section 7.3)
231 *   2. Destination vector register number is multiples of EMUL.
232 *      (Section 3.4.2, 7.3)
233 *   3. The EMUL setting must be such that EMUL * NFIELDS ≤ 8. (Section 7.8)
234 *   4. Vector register numbers accessed by the segment load or store
235 *      cannot increment past 31. (Section 7.8)
236 */
237static bool vext_check_store(DisasContext *s, int vd, int nf, uint8_t eew)
238{
239    int8_t emul = eew - s->sew + s->lmul;
240    return (emul >= -3 && emul <= 3) &&
241           require_align(vd, emul) &&
242           require_nf(vd, nf, emul);
243}
244
245/*
246 * Vector unit-stride, strided, unit-stride segment, strided segment
247 * load check function.
248 *
249 * Rules to be checked here:
250 *   1. All rules applies to store instructions are applies
251 *      to load instructions.
252 *   2. Destination vector register group for a masked vector
253 *      instruction cannot overlap the source mask register (v0).
254 *      (Section 5.3)
255 */
256static bool vext_check_load(DisasContext *s, int vd, int nf, int vm,
257                            uint8_t eew)
258{
259    return vext_check_store(s, vd, nf, eew) && require_vm(vm, vd);
260}
261
262/*
263 * Vector indexed, indexed segment store check function.
264 *
265 * Rules to be checked here:
266 *   1. EMUL must within the range: 1/8 <= EMUL <= 8. (Section 7.3)
267 *   2. Index vector register number is multiples of EMUL.
268 *      (Section 3.4.2, 7.3)
269 *   3. Destination vector register number is multiples of LMUL.
270 *      (Section 3.4.2, 7.3)
271 *   4. The EMUL setting must be such that EMUL * NFIELDS ≤ 8. (Section 7.8)
272 *   5. Vector register numbers accessed by the segment load or store
273 *      cannot increment past 31. (Section 7.8)
274 */
275static bool vext_check_st_index(DisasContext *s, int vd, int vs2, int nf,
276                                uint8_t eew)
277{
278    int8_t emul = eew - s->sew + s->lmul;
279    bool ret = (emul >= -3 && emul <= 3) &&
280               require_align(vs2, emul) &&
281               require_align(vd, s->lmul) &&
282               require_nf(vd, nf, s->lmul);
283
284    /*
285     * V extension supports all vector load and store instructions,
286     * except V extension does not support EEW=64 for index values
287     * when XLEN=32. (Section 18.3)
288     */
289    if (get_xl(s) == MXL_RV32) {
290        ret &= (eew != MO_64);
291    }
292
293    return ret;
294}
295
296/*
297 * Vector indexed, indexed segment load check function.
298 *
299 * Rules to be checked here:
300 *   1. All rules applies to store instructions are applies
301 *      to load instructions.
302 *   2. Destination vector register group for a masked vector
303 *      instruction cannot overlap the source mask register (v0).
304 *      (Section 5.3)
305 *   3. Destination vector register cannot overlap a source vector
306 *      register (vs2) group.
307 *      (Section 5.2)
308 *   4. Destination vector register groups cannot overlap
309 *      the source vector register (vs2) group for
310 *      indexed segment load instructions. (Section 7.8.3)
311 */
312static bool vext_check_ld_index(DisasContext *s, int vd, int vs2,
313                                int nf, int vm, uint8_t eew)
314{
315    int8_t seg_vd;
316    int8_t emul = eew - s->sew + s->lmul;
317    bool ret = vext_check_st_index(s, vd, vs2, nf, eew) &&
318               require_vm(vm, vd);
319
320    /* Each segment register group has to follow overlap rules. */
321    for (int i = 0; i < nf; ++i) {
322        seg_vd = vd + (1 << MAX(s->lmul, 0)) * i;
323
324        if (eew > s->sew) {
325            if (seg_vd != vs2) {
326                ret &= require_noover(seg_vd, s->lmul, vs2, emul);
327            }
328        } else if (eew < s->sew) {
329            ret &= require_noover(seg_vd, s->lmul, vs2, emul);
330        }
331
332        /*
333         * Destination vector register groups cannot overlap
334         * the source vector register (vs2) group for
335         * indexed segment load instructions.
336         */
337        if (nf > 1) {
338            ret &= !is_overlapped(seg_vd, 1 << MAX(s->lmul, 0),
339                                  vs2, 1 << MAX(emul, 0));
340        }
341    }
342    return ret;
343}
344
345static bool vext_check_ss(DisasContext *s, int vd, int vs, int vm)
346{
347    return require_vm(vm, vd) &&
348           require_align(vd, s->lmul) &&
349           require_align(vs, s->lmul);
350}
351
352/*
353 * Check function for vector instruction with format:
354 * single-width result and single-width sources (SEW = SEW op SEW)
355 *
356 * Rules to be checked here:
357 *   1. Destination vector register group for a masked vector
358 *      instruction cannot overlap the source mask register (v0).
359 *      (Section 5.3)
360 *   2. Destination vector register number is multiples of LMUL.
361 *      (Section 3.4.2)
362 *   3. Source (vs2, vs1) vector register number are multiples of LMUL.
363 *      (Section 3.4.2)
364 */
365static bool vext_check_sss(DisasContext *s, int vd, int vs1, int vs2, int vm)
366{
367    return vext_check_ss(s, vd, vs2, vm) &&
368           require_align(vs1, s->lmul);
369}
370
371static bool vext_check_ms(DisasContext *s, int vd, int vs)
372{
373    bool ret = require_align(vs, s->lmul);
374    if (vd != vs) {
375        ret &= require_noover(vd, 0, vs, s->lmul);
376    }
377    return ret;
378}
379
380/*
381 * Check function for maskable vector instruction with format:
382 * single-width result and single-width sources (SEW = SEW op SEW)
383 *
384 * Rules to be checked here:
385 *   1. Source (vs2, vs1) vector register number are multiples of LMUL.
386 *      (Section 3.4.2)
387 *   2. Destination vector register cannot overlap a source vector
388 *      register (vs2, vs1) group.
389 *      (Section 5.2)
390 *   3. The destination vector register group for a masked vector
391 *      instruction cannot overlap the source mask register (v0),
392 *      unless the destination vector register is being written
393 *      with a mask value (e.g., comparisons) or the scalar result
394 *      of a reduction. (Section 5.3)
395 */
396static bool vext_check_mss(DisasContext *s, int vd, int vs1, int vs2)
397{
398    bool ret = vext_check_ms(s, vd, vs2) &&
399               require_align(vs1, s->lmul);
400    if (vd != vs1) {
401        ret &= require_noover(vd, 0, vs1, s->lmul);
402    }
403    return ret;
404}
405
406/*
407 * Common check function for vector widening instructions
408 * of double-width result (2*SEW).
409 *
410 * Rules to be checked here:
411 *   1. The largest vector register group used by an instruction
412 *      can not be greater than 8 vector registers (Section 5.2):
413 *      => LMUL < 8.
414 *      => SEW < 64.
415 *   2. Double-width SEW cannot greater than ELEN.
416 *   3. Destination vector register number is multiples of 2 * LMUL.
417 *      (Section 3.4.2)
418 *   4. Destination vector register group for a masked vector
419 *      instruction cannot overlap the source mask register (v0).
420 *      (Section 5.3)
421 */
422static bool vext_wide_check_common(DisasContext *s, int vd, int vm)
423{
424    return (s->lmul <= 2) &&
425           (s->sew < MO_64) &&
426           ((s->sew + 1) <= (s->cfg_ptr->elen >> 4)) &&
427           require_align(vd, s->lmul + 1) &&
428           require_vm(vm, vd);
429}
430
431/*
432 * Common check function for vector narrowing instructions
433 * of single-width result (SEW) and double-width source (2*SEW).
434 *
435 * Rules to be checked here:
436 *   1. The largest vector register group used by an instruction
437 *      can not be greater than 8 vector registers (Section 5.2):
438 *      => LMUL < 8.
439 *      => SEW < 64.
440 *   2. Double-width SEW cannot greater than ELEN.
441 *   3. Source vector register number is multiples of 2 * LMUL.
442 *      (Section 3.4.2)
443 *   4. Destination vector register number is multiples of LMUL.
444 *      (Section 3.4.2)
445 *   5. Destination vector register group for a masked vector
446 *      instruction cannot overlap the source mask register (v0).
447 *      (Section 5.3)
448 */
449static bool vext_narrow_check_common(DisasContext *s, int vd, int vs2,
450                                     int vm)
451{
452    return (s->lmul <= 2) &&
453           (s->sew < MO_64) &&
454           ((s->sew + 1) <= (s->cfg_ptr->elen >> 4)) &&
455           require_align(vs2, s->lmul + 1) &&
456           require_align(vd, s->lmul) &&
457           require_vm(vm, vd);
458}
459
460static bool vext_check_ds(DisasContext *s, int vd, int vs, int vm)
461{
462    return vext_wide_check_common(s, vd, vm) &&
463           require_align(vs, s->lmul) &&
464           require_noover(vd, s->lmul + 1, vs, s->lmul);
465}
466
467static bool vext_check_dd(DisasContext *s, int vd, int vs, int vm)
468{
469    return vext_wide_check_common(s, vd, vm) &&
470           require_align(vs, s->lmul + 1);
471}
472
473/*
474 * Check function for vector instruction with format:
475 * double-width result and single-width sources (2*SEW = SEW op SEW)
476 *
477 * Rules to be checked here:
478 *   1. All rules in defined in widen common rules are applied.
479 *   2. Source (vs2, vs1) vector register number are multiples of LMUL.
480 *      (Section 3.4.2)
481 *   3. Destination vector register cannot overlap a source vector
482 *      register (vs2, vs1) group.
483 *      (Section 5.2)
484 */
485static bool vext_check_dss(DisasContext *s, int vd, int vs1, int vs2, int vm)
486{
487    return vext_check_ds(s, vd, vs2, vm) &&
488           require_align(vs1, s->lmul) &&
489           require_noover(vd, s->lmul + 1, vs1, s->lmul);
490}
491
492/*
493 * Check function for vector instruction with format:
494 * double-width result and double-width source1 and single-width
495 * source2 (2*SEW = 2*SEW op SEW)
496 *
497 * Rules to be checked here:
498 *   1. All rules in defined in widen common rules are applied.
499 *   2. Source 1 (vs2) vector register number is multiples of 2 * LMUL.
500 *      (Section 3.4.2)
501 *   3. Source 2 (vs1) vector register number is multiples of LMUL.
502 *      (Section 3.4.2)
503 *   4. Destination vector register cannot overlap a source vector
504 *      register (vs1) group.
505 *      (Section 5.2)
506 */
507static bool vext_check_dds(DisasContext *s, int vd, int vs1, int vs2, int vm)
508{
509    return vext_check_ds(s, vd, vs1, vm) &&
510           require_align(vs2, s->lmul + 1);
511}
512
513static bool vext_check_sd(DisasContext *s, int vd, int vs, int vm)
514{
515    bool ret = vext_narrow_check_common(s, vd, vs, vm);
516    if (vd != vs) {
517        ret &= require_noover(vd, s->lmul, vs, s->lmul + 1);
518    }
519    return ret;
520}
521
522/*
523 * Check function for vector instruction with format:
524 * single-width result and double-width source 1 and single-width
525 * source 2 (SEW = 2*SEW op SEW)
526 *
527 * Rules to be checked here:
528 *   1. All rules in defined in narrow common rules are applied.
529 *   2. Destination vector register cannot overlap a source vector
530 *      register (vs2) group.
531 *      (Section 5.2)
532 *   3. Source 2 (vs1) vector register number is multiples of LMUL.
533 *      (Section 3.4.2)
534 */
535static bool vext_check_sds(DisasContext *s, int vd, int vs1, int vs2, int vm)
536{
537    return vext_check_sd(s, vd, vs2, vm) &&
538           require_align(vs1, s->lmul);
539}
540
541/*
542 * Check function for vector reduction instructions.
543 *
544 * Rules to be checked here:
545 *   1. Source 1 (vs2) vector register number is multiples of LMUL.
546 *      (Section 3.4.2)
547 */
548static bool vext_check_reduction(DisasContext *s, int vs2)
549{
550    return require_align(vs2, s->lmul) && s->vstart_eq_zero;
551}
552
553/*
554 * Check function for vector slide instructions.
555 *
556 * Rules to be checked here:
557 *   1. Source 1 (vs2) vector register number is multiples of LMUL.
558 *      (Section 3.4.2)
559 *   2. Destination vector register number is multiples of LMUL.
560 *      (Section 3.4.2)
561 *   3. Destination vector register group for a masked vector
562 *      instruction cannot overlap the source mask register (v0).
563 *      (Section 5.3)
564 *   4. The destination vector register group for vslideup, vslide1up,
565 *      vfslide1up, cannot overlap the source vector register (vs2) group.
566 *      (Section 5.2, 16.3.1, 16.3.3)
567 */
568static bool vext_check_slide(DisasContext *s, int vd, int vs2,
569                             int vm, bool is_over)
570{
571    bool ret = require_align(vs2, s->lmul) &&
572               require_align(vd, s->lmul) &&
573               require_vm(vm, vd);
574    if (is_over) {
575        ret &= (vd != vs2);
576    }
577    return ret;
578}
579
580/*
581 * In cpu_get_tb_cpu_state(), set VILL if RVV was not present.
582 * So RVV is also be checked in this function.
583 */
584static bool vext_check_isa_ill(DisasContext *s)
585{
586    return !s->vill;
587}
588
589/* common translation macro */
590#define GEN_VEXT_TRANS(NAME, EEW, ARGTYPE, OP, CHECK)        \
591static bool trans_##NAME(DisasContext *s, arg_##ARGTYPE * a) \
592{                                                            \
593    if (CHECK(s, a, EEW)) {                                  \
594        return OP(s, a, EEW);                                \
595    }                                                        \
596    return false;                                            \
597}
598
599static uint8_t vext_get_emul(DisasContext *s, uint8_t eew)
600{
601    int8_t emul = eew - s->sew + s->lmul;
602    return emul < 0 ? 0 : emul;
603}
604
605/*
606 *** unit stride load and store
607 */
608typedef void gen_helper_ldst_us(TCGv_ptr, TCGv_ptr, TCGv,
609                                TCGv_env, TCGv_i32);
610
611static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data,
612                          gen_helper_ldst_us *fn, DisasContext *s,
613                          bool is_store)
614{
615    TCGv_ptr dest, mask;
616    TCGv base;
617    TCGv_i32 desc;
618
619    dest = tcg_temp_new_ptr();
620    mask = tcg_temp_new_ptr();
621    base = get_gpr(s, rs1, EXT_NONE);
622
623    /*
624     * As simd_desc supports at most 2048 bytes, and in this implementation,
625     * the max vector group length is 4096 bytes. So split it into two parts.
626     *
627     * The first part is vlen in bytes (vlenb), encoded in maxsz of simd_desc.
628     * The second part is lmul, encoded in data of simd_desc.
629     */
630    desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
631                                      s->cfg_ptr->vlenb, data));
632
633    tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
634    tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
635
636    /*
637     * According to the specification
638     *
639     *   Additionally, if the Ztso extension is implemented, then vector memory
640     *   instructions in the V extension and Zve family of extensions follow
641     *   RVTSO at the instruction level.  The Ztso extension does not
642     *   strengthen the ordering of intra-instruction element accesses.
643     *
644     * as a result neither ordered nor unordered accesses from the V
645     * instructions need ordering within the loop but we do still need barriers
646     * around the loop.
647     */
648    if (is_store && s->ztso) {
649        tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
650    }
651
652    mark_vs_dirty(s);
653
654    fn(dest, mask, base, tcg_env, desc);
655
656    if (!is_store && s->ztso) {
657        tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
658    }
659
660    finalize_rvv_inst(s);
661    return true;
662}
663
664static bool ld_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
665{
666    uint32_t data = 0;
667    gen_helper_ldst_us *fn;
668    static gen_helper_ldst_us * const fns[2][4] = {
669        /* masked unit stride load */
670        { gen_helper_vle8_v_mask, gen_helper_vle16_v_mask,
671          gen_helper_vle32_v_mask, gen_helper_vle64_v_mask },
672        /* unmasked unit stride load */
673        { gen_helper_vle8_v, gen_helper_vle16_v,
674          gen_helper_vle32_v, gen_helper_vle64_v }
675    };
676
677    fn =  fns[a->vm][eew];
678    if (fn == NULL) {
679        return false;
680    }
681
682    /*
683     * Vector load/store instructions have the EEW encoded
684     * directly in the instructions. The maximum vector size is
685     * calculated with EMUL rather than LMUL.
686     */
687    uint8_t emul = vext_get_emul(s, eew);
688    data = FIELD_DP32(data, VDATA, VM, a->vm);
689    data = FIELD_DP32(data, VDATA, LMUL, emul);
690    data = FIELD_DP32(data, VDATA, NF, a->nf);
691    data = FIELD_DP32(data, VDATA, VTA, s->vta);
692    data = FIELD_DP32(data, VDATA, VMA, s->vma);
693    return ldst_us_trans(a->rd, a->rs1, data, fn, s, false);
694}
695
696static bool ld_us_check(DisasContext *s, arg_r2nfvm* a, uint8_t eew)
697{
698    return require_rvv(s) &&
699           vext_check_isa_ill(s) &&
700           vext_check_load(s, a->rd, a->nf, a->vm, eew);
701}
702
703GEN_VEXT_TRANS(vle8_v,  MO_8,  r2nfvm, ld_us_op, ld_us_check)
704GEN_VEXT_TRANS(vle16_v, MO_16, r2nfvm, ld_us_op, ld_us_check)
705GEN_VEXT_TRANS(vle32_v, MO_32, r2nfvm, ld_us_op, ld_us_check)
706GEN_VEXT_TRANS(vle64_v, MO_64, r2nfvm, ld_us_op, ld_us_check)
707
708static bool st_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
709{
710    uint32_t data = 0;
711    gen_helper_ldst_us *fn;
712    static gen_helper_ldst_us * const fns[2][4] = {
713        /* masked unit stride store */
714        { gen_helper_vse8_v_mask, gen_helper_vse16_v_mask,
715          gen_helper_vse32_v_mask, gen_helper_vse64_v_mask },
716        /* unmasked unit stride store */
717        { gen_helper_vse8_v, gen_helper_vse16_v,
718          gen_helper_vse32_v, gen_helper_vse64_v }
719    };
720
721    fn =  fns[a->vm][eew];
722    if (fn == NULL) {
723        return false;
724    }
725
726    uint8_t emul = vext_get_emul(s, eew);
727    data = FIELD_DP32(data, VDATA, VM, a->vm);
728    data = FIELD_DP32(data, VDATA, LMUL, emul);
729    data = FIELD_DP32(data, VDATA, NF, a->nf);
730    return ldst_us_trans(a->rd, a->rs1, data, fn, s, true);
731}
732
733static bool st_us_check(DisasContext *s, arg_r2nfvm* a, uint8_t eew)
734{
735    return require_rvv(s) &&
736           vext_check_isa_ill(s) &&
737           vext_check_store(s, a->rd, a->nf, eew);
738}
739
740GEN_VEXT_TRANS(vse8_v,  MO_8,  r2nfvm, st_us_op, st_us_check)
741GEN_VEXT_TRANS(vse16_v, MO_16, r2nfvm, st_us_op, st_us_check)
742GEN_VEXT_TRANS(vse32_v, MO_32, r2nfvm, st_us_op, st_us_check)
743GEN_VEXT_TRANS(vse64_v, MO_64, r2nfvm, st_us_op, st_us_check)
744
745/*
746 *** unit stride mask load and store
747 */
748static bool ld_us_mask_op(DisasContext *s, arg_vlm_v *a, uint8_t eew)
749{
750    uint32_t data = 0;
751    gen_helper_ldst_us *fn = gen_helper_vlm_v;
752
753    /* EMUL = 1, NFIELDS = 1 */
754    data = FIELD_DP32(data, VDATA, LMUL, 0);
755    data = FIELD_DP32(data, VDATA, NF, 1);
756    /* Mask destination register are always tail-agnostic */
757    data = FIELD_DP32(data, VDATA, VTA, s->cfg_vta_all_1s);
758    data = FIELD_DP32(data, VDATA, VMA, s->vma);
759    return ldst_us_trans(a->rd, a->rs1, data, fn, s, false);
760}
761
762static bool ld_us_mask_check(DisasContext *s, arg_vlm_v *a, uint8_t eew)
763{
764    /* EMUL = 1, NFIELDS = 1 */
765    return require_rvv(s) && vext_check_isa_ill(s);
766}
767
768static bool st_us_mask_op(DisasContext *s, arg_vsm_v *a, uint8_t eew)
769{
770    uint32_t data = 0;
771    gen_helper_ldst_us *fn = gen_helper_vsm_v;
772
773    /* EMUL = 1, NFIELDS = 1 */
774    data = FIELD_DP32(data, VDATA, LMUL, 0);
775    data = FIELD_DP32(data, VDATA, NF, 1);
776    return ldst_us_trans(a->rd, a->rs1, data, fn, s, true);
777}
778
779static bool st_us_mask_check(DisasContext *s, arg_vsm_v *a, uint8_t eew)
780{
781    /* EMUL = 1, NFIELDS = 1 */
782    return require_rvv(s) && vext_check_isa_ill(s);
783}
784
785GEN_VEXT_TRANS(vlm_v, MO_8, vlm_v, ld_us_mask_op, ld_us_mask_check)
786GEN_VEXT_TRANS(vsm_v, MO_8, vsm_v, st_us_mask_op, st_us_mask_check)
787
788/*
789 *** stride load and store
790 */
791typedef void gen_helper_ldst_stride(TCGv_ptr, TCGv_ptr, TCGv,
792                                    TCGv, TCGv_env, TCGv_i32);
793
794static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
795                              uint32_t data, gen_helper_ldst_stride *fn,
796                              DisasContext *s)
797{
798    TCGv_ptr dest, mask;
799    TCGv base, stride;
800    TCGv_i32 desc;
801
802    dest = tcg_temp_new_ptr();
803    mask = tcg_temp_new_ptr();
804    base = get_gpr(s, rs1, EXT_NONE);
805    stride = get_gpr(s, rs2, EXT_NONE);
806    desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
807                                      s->cfg_ptr->vlenb, data));
808
809    tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
810    tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
811
812    mark_vs_dirty(s);
813
814    fn(dest, mask, base, stride, tcg_env, desc);
815
816    finalize_rvv_inst(s);
817    return true;
818}
819
820static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
821{
822    uint32_t data = 0;
823    gen_helper_ldst_stride *fn;
824    static gen_helper_ldst_stride * const fns[4] = {
825        gen_helper_vlse8_v, gen_helper_vlse16_v,
826        gen_helper_vlse32_v, gen_helper_vlse64_v
827    };
828
829    fn = fns[eew];
830    if (fn == NULL) {
831        return false;
832    }
833
834    uint8_t emul = vext_get_emul(s, eew);
835    data = FIELD_DP32(data, VDATA, VM, a->vm);
836    data = FIELD_DP32(data, VDATA, LMUL, emul);
837    data = FIELD_DP32(data, VDATA, NF, a->nf);
838    data = FIELD_DP32(data, VDATA, VTA, s->vta);
839    data = FIELD_DP32(data, VDATA, VMA, s->vma);
840    return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s);
841}
842
843static bool ld_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
844{
845    return require_rvv(s) &&
846           vext_check_isa_ill(s) &&
847           vext_check_load(s, a->rd, a->nf, a->vm, eew);
848}
849
850GEN_VEXT_TRANS(vlse8_v,  MO_8,  rnfvm, ld_stride_op, ld_stride_check)
851GEN_VEXT_TRANS(vlse16_v, MO_16, rnfvm, ld_stride_op, ld_stride_check)
852GEN_VEXT_TRANS(vlse32_v, MO_32, rnfvm, ld_stride_op, ld_stride_check)
853GEN_VEXT_TRANS(vlse64_v, MO_64, rnfvm, ld_stride_op, ld_stride_check)
854
855static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
856{
857    uint32_t data = 0;
858    gen_helper_ldst_stride *fn;
859    static gen_helper_ldst_stride * const fns[4] = {
860        /* masked stride store */
861        gen_helper_vsse8_v,  gen_helper_vsse16_v,
862        gen_helper_vsse32_v,  gen_helper_vsse64_v
863    };
864
865    uint8_t emul = vext_get_emul(s, eew);
866    data = FIELD_DP32(data, VDATA, VM, a->vm);
867    data = FIELD_DP32(data, VDATA, LMUL, emul);
868    data = FIELD_DP32(data, VDATA, NF, a->nf);
869    fn = fns[eew];
870    if (fn == NULL) {
871        return false;
872    }
873
874    return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s);
875}
876
877static bool st_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
878{
879    return require_rvv(s) &&
880           vext_check_isa_ill(s) &&
881           vext_check_store(s, a->rd, a->nf, eew);
882}
883
884GEN_VEXT_TRANS(vsse8_v,  MO_8,  rnfvm, st_stride_op, st_stride_check)
885GEN_VEXT_TRANS(vsse16_v, MO_16, rnfvm, st_stride_op, st_stride_check)
886GEN_VEXT_TRANS(vsse32_v, MO_32, rnfvm, st_stride_op, st_stride_check)
887GEN_VEXT_TRANS(vsse64_v, MO_64, rnfvm, st_stride_op, st_stride_check)
888
889/*
890 *** index load and store
891 */
892typedef void gen_helper_ldst_index(TCGv_ptr, TCGv_ptr, TCGv,
893                                   TCGv_ptr, TCGv_env, TCGv_i32);
894
895static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
896                             uint32_t data, gen_helper_ldst_index *fn,
897                             DisasContext *s)
898{
899    TCGv_ptr dest, mask, index;
900    TCGv base;
901    TCGv_i32 desc;
902
903    dest = tcg_temp_new_ptr();
904    mask = tcg_temp_new_ptr();
905    index = tcg_temp_new_ptr();
906    base = get_gpr(s, rs1, EXT_NONE);
907    desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
908                                      s->cfg_ptr->vlenb, data));
909
910    tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
911    tcg_gen_addi_ptr(index, tcg_env, vreg_ofs(s, vs2));
912    tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
913
914    mark_vs_dirty(s);
915
916    fn(dest, mask, base, index, tcg_env, desc);
917
918    finalize_rvv_inst(s);
919    return true;
920}
921
922static bool ld_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
923{
924    uint32_t data = 0;
925    gen_helper_ldst_index *fn;
926    static gen_helper_ldst_index * const fns[4][4] = {
927        /*
928         * offset vector register group EEW = 8,
929         * data vector register group EEW = SEW
930         */
931        { gen_helper_vlxei8_8_v,  gen_helper_vlxei8_16_v,
932          gen_helper_vlxei8_32_v, gen_helper_vlxei8_64_v },
933        /*
934         * offset vector register group EEW = 16,
935         * data vector register group EEW = SEW
936         */
937        { gen_helper_vlxei16_8_v, gen_helper_vlxei16_16_v,
938          gen_helper_vlxei16_32_v, gen_helper_vlxei16_64_v },
939        /*
940         * offset vector register group EEW = 32,
941         * data vector register group EEW = SEW
942         */
943        { gen_helper_vlxei32_8_v, gen_helper_vlxei32_16_v,
944          gen_helper_vlxei32_32_v, gen_helper_vlxei32_64_v },
945        /*
946         * offset vector register group EEW = 64,
947         * data vector register group EEW = SEW
948         */
949        { gen_helper_vlxei64_8_v, gen_helper_vlxei64_16_v,
950          gen_helper_vlxei64_32_v, gen_helper_vlxei64_64_v }
951    };
952
953    fn = fns[eew][s->sew];
954
955    uint8_t emul = vext_get_emul(s, s->sew);
956    data = FIELD_DP32(data, VDATA, VM, a->vm);
957    data = FIELD_DP32(data, VDATA, LMUL, emul);
958    data = FIELD_DP32(data, VDATA, NF, a->nf);
959    data = FIELD_DP32(data, VDATA, VTA, s->vta);
960    data = FIELD_DP32(data, VDATA, VMA, s->vma);
961    return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s);
962}
963
964static bool ld_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
965{
966    return require_rvv(s) &&
967           vext_check_isa_ill(s) &&
968           vext_check_ld_index(s, a->rd, a->rs2, a->nf, a->vm, eew);
969}
970
971GEN_VEXT_TRANS(vlxei8_v,  MO_8,  rnfvm, ld_index_op, ld_index_check)
972GEN_VEXT_TRANS(vlxei16_v, MO_16, rnfvm, ld_index_op, ld_index_check)
973GEN_VEXT_TRANS(vlxei32_v, MO_32, rnfvm, ld_index_op, ld_index_check)
974GEN_VEXT_TRANS(vlxei64_v, MO_64, rnfvm, ld_index_op, ld_index_check)
975
976static bool st_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
977{
978    uint32_t data = 0;
979    gen_helper_ldst_index *fn;
980    static gen_helper_ldst_index * const fns[4][4] = {
981        /*
982         * offset vector register group EEW = 8,
983         * data vector register group EEW = SEW
984         */
985        { gen_helper_vsxei8_8_v,  gen_helper_vsxei8_16_v,
986          gen_helper_vsxei8_32_v, gen_helper_vsxei8_64_v },
987        /*
988         * offset vector register group EEW = 16,
989         * data vector register group EEW = SEW
990         */
991        { gen_helper_vsxei16_8_v, gen_helper_vsxei16_16_v,
992          gen_helper_vsxei16_32_v, gen_helper_vsxei16_64_v },
993        /*
994         * offset vector register group EEW = 32,
995         * data vector register group EEW = SEW
996         */
997        { gen_helper_vsxei32_8_v, gen_helper_vsxei32_16_v,
998          gen_helper_vsxei32_32_v, gen_helper_vsxei32_64_v },
999        /*
1000         * offset vector register group EEW = 64,
1001         * data vector register group EEW = SEW
1002         */
1003        { gen_helper_vsxei64_8_v, gen_helper_vsxei64_16_v,
1004          gen_helper_vsxei64_32_v, gen_helper_vsxei64_64_v }
1005    };
1006
1007    fn = fns[eew][s->sew];
1008
1009    uint8_t emul = vext_get_emul(s, s->sew);
1010    data = FIELD_DP32(data, VDATA, VM, a->vm);
1011    data = FIELD_DP32(data, VDATA, LMUL, emul);
1012    data = FIELD_DP32(data, VDATA, NF, a->nf);
1013    return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s);
1014}
1015
1016static bool st_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
1017{
1018    return require_rvv(s) &&
1019           vext_check_isa_ill(s) &&
1020           vext_check_st_index(s, a->rd, a->rs2, a->nf, eew);
1021}
1022
1023GEN_VEXT_TRANS(vsxei8_v,  MO_8,  rnfvm, st_index_op, st_index_check)
1024GEN_VEXT_TRANS(vsxei16_v, MO_16, rnfvm, st_index_op, st_index_check)
1025GEN_VEXT_TRANS(vsxei32_v, MO_32, rnfvm, st_index_op, st_index_check)
1026GEN_VEXT_TRANS(vsxei64_v, MO_64, rnfvm, st_index_op, st_index_check)
1027
1028/*
1029 *** unit stride fault-only-first load
1030 */
1031static bool ldff_trans(uint32_t vd, uint32_t rs1, uint32_t data,
1032                       gen_helper_ldst_us *fn, DisasContext *s)
1033{
1034    TCGv_ptr dest, mask;
1035    TCGv base;
1036    TCGv_i32 desc;
1037
1038    dest = tcg_temp_new_ptr();
1039    mask = tcg_temp_new_ptr();
1040    base = get_gpr(s, rs1, EXT_NONE);
1041    desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
1042                                      s->cfg_ptr->vlenb, data));
1043
1044    tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
1045    tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
1046
1047    fn(dest, mask, base, tcg_env, desc);
1048
1049    finalize_rvv_inst(s);
1050    return true;
1051}
1052
1053static bool ldff_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
1054{
1055    uint32_t data = 0;
1056    gen_helper_ldst_us *fn;
1057    static gen_helper_ldst_us * const fns[4] = {
1058        gen_helper_vle8ff_v, gen_helper_vle16ff_v,
1059        gen_helper_vle32ff_v, gen_helper_vle64ff_v
1060    };
1061
1062    fn = fns[eew];
1063    if (fn == NULL) {
1064        return false;
1065    }
1066
1067    uint8_t emul = vext_get_emul(s, eew);
1068    data = FIELD_DP32(data, VDATA, VM, a->vm);
1069    data = FIELD_DP32(data, VDATA, LMUL, emul);
1070    data = FIELD_DP32(data, VDATA, NF, a->nf);
1071    data = FIELD_DP32(data, VDATA, VTA, s->vta);
1072    data = FIELD_DP32(data, VDATA, VMA, s->vma);
1073    return ldff_trans(a->rd, a->rs1, data, fn, s);
1074}
1075
1076GEN_VEXT_TRANS(vle8ff_v,  MO_8,  r2nfvm, ldff_op, ld_us_check)
1077GEN_VEXT_TRANS(vle16ff_v, MO_16, r2nfvm, ldff_op, ld_us_check)
1078GEN_VEXT_TRANS(vle32ff_v, MO_32, r2nfvm, ldff_op, ld_us_check)
1079GEN_VEXT_TRANS(vle64ff_v, MO_64, r2nfvm, ldff_op, ld_us_check)
1080
1081/*
1082 * load and store whole register instructions
1083 */
1084typedef void gen_helper_ldst_whole(TCGv_ptr, TCGv, TCGv_env, TCGv_i32);
1085
1086static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf,
1087                             gen_helper_ldst_whole *fn,
1088                             DisasContext *s)
1089{
1090    TCGv_ptr dest;
1091    TCGv base;
1092    TCGv_i32 desc;
1093
1094    uint32_t data = FIELD_DP32(0, VDATA, NF, nf);
1095    dest = tcg_temp_new_ptr();
1096    desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
1097                                      s->cfg_ptr->vlenb, data));
1098
1099    base = get_gpr(s, rs1, EXT_NONE);
1100    tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
1101
1102    mark_vs_dirty(s);
1103
1104    fn(dest, base, tcg_env, desc);
1105
1106    finalize_rvv_inst(s);
1107    return true;
1108}
1109
1110/*
1111 * load and store whole register instructions ignore vtype and vl setting.
1112 * Thus, we don't need to check vill bit. (Section 7.9)
1113 */
1114#define GEN_LDST_WHOLE_TRANS(NAME, ARG_NF)                                \
1115static bool trans_##NAME(DisasContext *s, arg_##NAME * a)                 \
1116{                                                                         \
1117    if (require_rvv(s) &&                                                 \
1118        QEMU_IS_ALIGNED(a->rd, ARG_NF)) {                                 \
1119        return ldst_whole_trans(a->rd, a->rs1, ARG_NF,                    \
1120                                gen_helper_##NAME, s);                    \
1121    }                                                                     \
1122    return false;                                                         \
1123}
1124
1125GEN_LDST_WHOLE_TRANS(vl1re8_v,  1)
1126GEN_LDST_WHOLE_TRANS(vl1re16_v, 1)
1127GEN_LDST_WHOLE_TRANS(vl1re32_v, 1)
1128GEN_LDST_WHOLE_TRANS(vl1re64_v, 1)
1129GEN_LDST_WHOLE_TRANS(vl2re8_v,  2)
1130GEN_LDST_WHOLE_TRANS(vl2re16_v, 2)
1131GEN_LDST_WHOLE_TRANS(vl2re32_v, 2)
1132GEN_LDST_WHOLE_TRANS(vl2re64_v, 2)
1133GEN_LDST_WHOLE_TRANS(vl4re8_v,  4)
1134GEN_LDST_WHOLE_TRANS(vl4re16_v, 4)
1135GEN_LDST_WHOLE_TRANS(vl4re32_v, 4)
1136GEN_LDST_WHOLE_TRANS(vl4re64_v, 4)
1137GEN_LDST_WHOLE_TRANS(vl8re8_v,  8)
1138GEN_LDST_WHOLE_TRANS(vl8re16_v, 8)
1139GEN_LDST_WHOLE_TRANS(vl8re32_v, 8)
1140GEN_LDST_WHOLE_TRANS(vl8re64_v, 8)
1141
1142/*
1143 * The vector whole register store instructions are encoded similar to
1144 * unmasked unit-stride store of elements with EEW=8.
1145 */
1146GEN_LDST_WHOLE_TRANS(vs1r_v, 1)
1147GEN_LDST_WHOLE_TRANS(vs2r_v, 2)
1148GEN_LDST_WHOLE_TRANS(vs4r_v, 4)
1149GEN_LDST_WHOLE_TRANS(vs8r_v, 8)
1150
1151/*
1152 *** Vector Integer Arithmetic Instructions
1153 */
1154
1155/*
1156 * MAXSZ returns the maximum vector size can be operated in bytes,
1157 * which is used in GVEC IR when vl_eq_vlmax flag is set to true
1158 * to accelerate vector operation.
1159 */
1160static inline uint32_t MAXSZ(DisasContext *s)
1161{
1162    int max_sz = s->cfg_ptr->vlenb * 8;
1163    return max_sz >> (3 - s->lmul);
1164}
1165
1166static bool opivv_check(DisasContext *s, arg_rmrr *a)
1167{
1168    return require_rvv(s) &&
1169           vext_check_isa_ill(s) &&
1170           vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
1171}
1172
1173typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
1174                        uint32_t, uint32_t, uint32_t);
1175
1176static inline bool
1177do_opivv_gvec(DisasContext *s, arg_rmrr *a, GVecGen3Fn *gvec_fn,
1178              gen_helper_gvec_4_ptr *fn)
1179{
1180    if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
1181        gvec_fn(s->sew, vreg_ofs(s, a->rd),
1182                vreg_ofs(s, a->rs2), vreg_ofs(s, a->rs1),
1183                MAXSZ(s), MAXSZ(s));
1184    } else {
1185        uint32_t data = 0;
1186
1187        data = FIELD_DP32(data, VDATA, VM, a->vm);
1188        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1189        data = FIELD_DP32(data, VDATA, VTA, s->vta);
1190        data = FIELD_DP32(data, VDATA, VMA, s->vma);
1191        tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
1192                           vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2),
1193                           tcg_env, s->cfg_ptr->vlenb,
1194                           s->cfg_ptr->vlenb, data, fn);
1195    }
1196    finalize_rvv_inst(s);
1197    return true;
1198}
1199
1200/* OPIVV with GVEC IR */
1201#define GEN_OPIVV_GVEC_TRANS(NAME, SUF) \
1202static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
1203{                                                                  \
1204    static gen_helper_gvec_4_ptr * const fns[4] = {                \
1205        gen_helper_##NAME##_b, gen_helper_##NAME##_h,              \
1206        gen_helper_##NAME##_w, gen_helper_##NAME##_d,              \
1207    };                                                             \
1208    if (!opivv_check(s, a)) {                                      \
1209        return false;                                              \
1210    }                                                              \
1211    return do_opivv_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]);   \
1212}
1213
1214GEN_OPIVV_GVEC_TRANS(vadd_vv, add)
1215GEN_OPIVV_GVEC_TRANS(vsub_vv, sub)
1216
1217typedef void gen_helper_opivx(TCGv_ptr, TCGv_ptr, TCGv, TCGv_ptr,
1218                              TCGv_env, TCGv_i32);
1219
1220static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm,
1221                        gen_helper_opivx *fn, DisasContext *s)
1222{
1223    TCGv_ptr dest, src2, mask;
1224    TCGv src1;
1225    TCGv_i32 desc;
1226    uint32_t data = 0;
1227
1228    dest = tcg_temp_new_ptr();
1229    mask = tcg_temp_new_ptr();
1230    src2 = tcg_temp_new_ptr();
1231    src1 = get_gpr(s, rs1, EXT_SIGN);
1232
1233    data = FIELD_DP32(data, VDATA, VM, vm);
1234    data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1235    data = FIELD_DP32(data, VDATA, VTA, s->vta);
1236    data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);
1237    data = FIELD_DP32(data, VDATA, VMA, s->vma);
1238    desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
1239                                      s->cfg_ptr->vlenb, data));
1240
1241    tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
1242    tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, vs2));
1243    tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
1244
1245    fn(dest, mask, src1, src2, tcg_env, desc);
1246
1247    finalize_rvv_inst(s);
1248    return true;
1249}
1250
1251static bool opivx_check(DisasContext *s, arg_rmrr *a)
1252{
1253    return require_rvv(s) &&
1254           vext_check_isa_ill(s) &&
1255           vext_check_ss(s, a->rd, a->rs2, a->vm);
1256}
1257
1258typedef void GVecGen2sFn(unsigned, uint32_t, uint32_t, TCGv_i64,
1259                         uint32_t, uint32_t);
1260
1261static inline bool
1262do_opivx_gvec(DisasContext *s, arg_rmrr *a, GVecGen2sFn *gvec_fn,
1263              gen_helper_opivx *fn)
1264{
1265    if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
1266        TCGv_i64 src1 = tcg_temp_new_i64();
1267
1268        tcg_gen_ext_tl_i64(src1, get_gpr(s, a->rs1, EXT_SIGN));
1269        gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
1270                src1, MAXSZ(s), MAXSZ(s));
1271
1272        finalize_rvv_inst(s);
1273        return true;
1274    }
1275    return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
1276}
1277
1278/* OPIVX with GVEC IR */
1279#define GEN_OPIVX_GVEC_TRANS(NAME, SUF) \
1280static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
1281{                                                                  \
1282    static gen_helper_opivx * const fns[4] = {                     \
1283        gen_helper_##NAME##_b, gen_helper_##NAME##_h,              \
1284        gen_helper_##NAME##_w, gen_helper_##NAME##_d,              \
1285    };                                                             \
1286    if (!opivx_check(s, a)) {                                      \
1287        return false;                                              \
1288    }                                                              \
1289    return do_opivx_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]);   \
1290}
1291
1292GEN_OPIVX_GVEC_TRANS(vadd_vx, adds)
1293GEN_OPIVX_GVEC_TRANS(vsub_vx, subs)
1294
1295static void gen_vec_rsub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
1296{
1297    tcg_gen_vec_sub8_i64(d, b, a);
1298}
1299
1300static void gen_vec_rsub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
1301{
1302    tcg_gen_vec_sub16_i64(d, b, a);
1303}
1304
1305static void gen_rsub_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
1306{
1307    tcg_gen_sub_i32(ret, arg2, arg1);
1308}
1309
1310static void gen_rsub_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1311{
1312    tcg_gen_sub_i64(ret, arg2, arg1);
1313}
1314
1315static void gen_rsub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
1316{
1317    tcg_gen_sub_vec(vece, r, b, a);
1318}
1319
1320static void tcg_gen_gvec_rsubs(unsigned vece, uint32_t dofs, uint32_t aofs,
1321                               TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
1322{
1323    static const TCGOpcode vecop_list[] = { INDEX_op_sub_vec, 0 };
1324    static const GVecGen2s rsub_op[4] = {
1325        { .fni8 = gen_vec_rsub8_i64,
1326          .fniv = gen_rsub_vec,
1327          .fno = gen_helper_vec_rsubs8,
1328          .opt_opc = vecop_list,
1329          .vece = MO_8 },
1330        { .fni8 = gen_vec_rsub16_i64,
1331          .fniv = gen_rsub_vec,
1332          .fno = gen_helper_vec_rsubs16,
1333          .opt_opc = vecop_list,
1334          .vece = MO_16 },
1335        { .fni4 = gen_rsub_i32,
1336          .fniv = gen_rsub_vec,
1337          .fno = gen_helper_vec_rsubs32,
1338          .opt_opc = vecop_list,
1339          .vece = MO_32 },
1340        { .fni8 = gen_rsub_i64,
1341          .fniv = gen_rsub_vec,
1342          .fno = gen_helper_vec_rsubs64,
1343          .opt_opc = vecop_list,
1344          .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1345          .vece = MO_64 },
1346    };
1347
1348    tcg_debug_assert(vece <= MO_64);
1349    tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, c, &rsub_op[vece]);
1350}
1351
1352GEN_OPIVX_GVEC_TRANS(vrsub_vx, rsubs)
1353
1354typedef enum {
1355    IMM_ZX,         /* Zero-extended */
1356    IMM_SX,         /* Sign-extended */
1357    IMM_TRUNC_SEW,  /* Truncate to log(SEW) bits */
1358    IMM_TRUNC_2SEW, /* Truncate to log(2*SEW) bits */
1359} imm_mode_t;
1360
1361static int64_t extract_imm(DisasContext *s, uint32_t imm, imm_mode_t imm_mode)
1362{
1363    switch (imm_mode) {
1364    case IMM_ZX:
1365        return extract64(imm, 0, 5);
1366    case IMM_SX:
1367        return sextract64(imm, 0, 5);
1368    case IMM_TRUNC_SEW:
1369        return extract64(imm, 0, s->sew + 3);
1370    case IMM_TRUNC_2SEW:
1371        return extract64(imm, 0, s->sew + 4);
1372    default:
1373        g_assert_not_reached();
1374    }
1375}
1376
1377static bool opivi_trans(uint32_t vd, uint32_t imm, uint32_t vs2, uint32_t vm,
1378                        gen_helper_opivx *fn, DisasContext *s,
1379                        imm_mode_t imm_mode)
1380{
1381    TCGv_ptr dest, src2, mask;
1382    TCGv src1;
1383    TCGv_i32 desc;
1384    uint32_t data = 0;
1385
1386    dest = tcg_temp_new_ptr();
1387    mask = tcg_temp_new_ptr();
1388    src2 = tcg_temp_new_ptr();
1389    src1 = tcg_constant_tl(extract_imm(s, imm, imm_mode));
1390
1391    data = FIELD_DP32(data, VDATA, VM, vm);
1392    data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1393    data = FIELD_DP32(data, VDATA, VTA, s->vta);
1394    data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);
1395    data = FIELD_DP32(data, VDATA, VMA, s->vma);
1396    desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
1397                                      s->cfg_ptr->vlenb, data));
1398
1399    tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
1400    tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, vs2));
1401    tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
1402
1403    fn(dest, mask, src1, src2, tcg_env, desc);
1404
1405    finalize_rvv_inst(s);
1406    return true;
1407}
1408
1409typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t,
1410                         uint32_t, uint32_t);
1411
1412static inline bool
1413do_opivi_gvec(DisasContext *s, arg_rmrr *a, GVecGen2iFn *gvec_fn,
1414              gen_helper_opivx *fn, imm_mode_t imm_mode)
1415{
1416    if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
1417        gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
1418                extract_imm(s, a->rs1, imm_mode), MAXSZ(s), MAXSZ(s));
1419        finalize_rvv_inst(s);
1420        return true;
1421    }
1422    return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s, imm_mode);
1423}
1424
1425/* OPIVI with GVEC IR */
1426#define GEN_OPIVI_GVEC_TRANS(NAME, IMM_MODE, OPIVX, SUF) \
1427static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
1428{                                                                  \
1429    static gen_helper_opivx * const fns[4] = {                     \
1430        gen_helper_##OPIVX##_b, gen_helper_##OPIVX##_h,            \
1431        gen_helper_##OPIVX##_w, gen_helper_##OPIVX##_d,            \
1432    };                                                             \
1433    if (!opivx_check(s, a)) {                                      \
1434        return false;                                              \
1435    }                                                              \
1436    return do_opivi_gvec(s, a, tcg_gen_gvec_##SUF,                 \
1437                         fns[s->sew], IMM_MODE);                   \
1438}
1439
1440GEN_OPIVI_GVEC_TRANS(vadd_vi, IMM_SX, vadd_vx, addi)
1441
1442static void tcg_gen_gvec_rsubi(unsigned vece, uint32_t dofs, uint32_t aofs,
1443                               int64_t c, uint32_t oprsz, uint32_t maxsz)
1444{
1445    TCGv_i64 tmp = tcg_constant_i64(c);
1446    tcg_gen_gvec_rsubs(vece, dofs, aofs, tmp, oprsz, maxsz);
1447}
1448
1449GEN_OPIVI_GVEC_TRANS(vrsub_vi, IMM_SX, vrsub_vx, rsubi)
1450
1451/* Vector Widening Integer Add/Subtract */
1452
1453/* OPIVV with WIDEN */
1454static bool opivv_widen_check(DisasContext *s, arg_rmrr *a)
1455{
1456    return require_rvv(s) &&
1457           vext_check_isa_ill(s) &&
1458           vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm);
1459}
1460
1461static bool do_opivv_widen(DisasContext *s, arg_rmrr *a,
1462                           gen_helper_gvec_4_ptr *fn,
1463                           bool (*checkfn)(DisasContext *, arg_rmrr *))
1464{
1465    if (checkfn(s, a)) {
1466        uint32_t data = 0;
1467
1468        data = FIELD_DP32(data, VDATA, VM, a->vm);
1469        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1470        data = FIELD_DP32(data, VDATA, VTA, s->vta);
1471        data = FIELD_DP32(data, VDATA, VMA, s->vma);
1472        tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
1473                           vreg_ofs(s, a->rs1),
1474                           vreg_ofs(s, a->rs2),
1475                           tcg_env, s->cfg_ptr->vlenb,
1476                           s->cfg_ptr->vlenb,
1477                           data, fn);
1478        finalize_rvv_inst(s);
1479        return true;
1480    }
1481    return false;
1482}
1483
1484#define GEN_OPIVV_WIDEN_TRANS(NAME, CHECK) \
1485static bool trans_##NAME(DisasContext *s, arg_rmrr *a)       \
1486{                                                            \
1487    static gen_helper_gvec_4_ptr * const fns[3] = {          \
1488        gen_helper_##NAME##_b,                               \
1489        gen_helper_##NAME##_h,                               \
1490        gen_helper_##NAME##_w                                \
1491    };                                                       \
1492    return do_opivv_widen(s, a, fns[s->sew], CHECK);         \
1493}
1494
1495GEN_OPIVV_WIDEN_TRANS(vwaddu_vv, opivv_widen_check)
1496GEN_OPIVV_WIDEN_TRANS(vwadd_vv, opivv_widen_check)
1497GEN_OPIVV_WIDEN_TRANS(vwsubu_vv, opivv_widen_check)
1498GEN_OPIVV_WIDEN_TRANS(vwsub_vv, opivv_widen_check)
1499
1500/* OPIVX with WIDEN */
1501static bool opivx_widen_check(DisasContext *s, arg_rmrr *a)
1502{
1503    return require_rvv(s) &&
1504           vext_check_isa_ill(s) &&
1505           vext_check_ds(s, a->rd, a->rs2, a->vm);
1506}
1507
1508#define GEN_OPIVX_WIDEN_TRANS(NAME, CHECK) \
1509static bool trans_##NAME(DisasContext *s, arg_rmrr *a)                    \
1510{                                                                         \
1511    if (CHECK(s, a)) {                                                    \
1512        static gen_helper_opivx * const fns[3] = {                        \
1513            gen_helper_##NAME##_b,                                        \
1514            gen_helper_##NAME##_h,                                        \
1515            gen_helper_##NAME##_w                                         \
1516        };                                                                \
1517        return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s); \
1518    }                                                                     \
1519    return false;                                                         \
1520}
1521
1522GEN_OPIVX_WIDEN_TRANS(vwaddu_vx, opivx_widen_check)
1523GEN_OPIVX_WIDEN_TRANS(vwadd_vx, opivx_widen_check)
1524GEN_OPIVX_WIDEN_TRANS(vwsubu_vx, opivx_widen_check)
1525GEN_OPIVX_WIDEN_TRANS(vwsub_vx, opivx_widen_check)
1526
1527/* WIDEN OPIVV with WIDEN */
1528static bool opiwv_widen_check(DisasContext *s, arg_rmrr *a)
1529{
1530    return require_rvv(s) &&
1531           vext_check_isa_ill(s) &&
1532           vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm);
1533}
1534
1535static bool do_opiwv_widen(DisasContext *s, arg_rmrr *a,
1536                           gen_helper_gvec_4_ptr *fn)
1537{
1538    if (opiwv_widen_check(s, a)) {
1539        uint32_t data = 0;
1540
1541        data = FIELD_DP32(data, VDATA, VM, a->vm);
1542        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1543        data = FIELD_DP32(data, VDATA, VTA, s->vta);
1544        data = FIELD_DP32(data, VDATA, VMA, s->vma);
1545        tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
1546                           vreg_ofs(s, a->rs1),
1547                           vreg_ofs(s, a->rs2),
1548                           tcg_env, s->cfg_ptr->vlenb,
1549                           s->cfg_ptr->vlenb, data, fn);
1550        finalize_rvv_inst(s);
1551        return true;
1552    }
1553    return false;
1554}
1555
1556#define GEN_OPIWV_WIDEN_TRANS(NAME) \
1557static bool trans_##NAME(DisasContext *s, arg_rmrr *a)       \
1558{                                                            \
1559    static gen_helper_gvec_4_ptr * const fns[3] = {          \
1560        gen_helper_##NAME##_b,                               \
1561        gen_helper_##NAME##_h,                               \
1562        gen_helper_##NAME##_w                                \
1563    };                                                       \
1564    return do_opiwv_widen(s, a, fns[s->sew]);                \
1565}
1566
1567GEN_OPIWV_WIDEN_TRANS(vwaddu_wv)
1568GEN_OPIWV_WIDEN_TRANS(vwadd_wv)
1569GEN_OPIWV_WIDEN_TRANS(vwsubu_wv)
1570GEN_OPIWV_WIDEN_TRANS(vwsub_wv)
1571
1572/* WIDEN OPIVX with WIDEN */
1573static bool opiwx_widen_check(DisasContext *s, arg_rmrr *a)
1574{
1575    return require_rvv(s) &&
1576           vext_check_isa_ill(s) &&
1577           vext_check_dd(s, a->rd, a->rs2, a->vm);
1578}
1579
1580static bool do_opiwx_widen(DisasContext *s, arg_rmrr *a,
1581                           gen_helper_opivx *fn)
1582{
1583    if (opiwx_widen_check(s, a)) {
1584        return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
1585    }
1586    return false;
1587}
1588
1589#define GEN_OPIWX_WIDEN_TRANS(NAME) \
1590static bool trans_##NAME(DisasContext *s, arg_rmrr *a)       \
1591{                                                            \
1592    static gen_helper_opivx * const fns[3] = {               \
1593        gen_helper_##NAME##_b,                               \
1594        gen_helper_##NAME##_h,                               \
1595        gen_helper_##NAME##_w                                \
1596    };                                                       \
1597    return do_opiwx_widen(s, a, fns[s->sew]);                \
1598}
1599
1600GEN_OPIWX_WIDEN_TRANS(vwaddu_wx)
1601GEN_OPIWX_WIDEN_TRANS(vwadd_wx)
1602GEN_OPIWX_WIDEN_TRANS(vwsubu_wx)
1603GEN_OPIWX_WIDEN_TRANS(vwsub_wx)
1604
1605static bool opivv_trans(uint32_t vd, uint32_t vs1, uint32_t vs2, uint32_t vm,
1606                        gen_helper_gvec_4_ptr *fn, DisasContext *s)
1607{
1608    uint32_t data = 0;
1609
1610    data = FIELD_DP32(data, VDATA, VM, vm);
1611    data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1612    data = FIELD_DP32(data, VDATA, VTA, s->vta);
1613    data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);
1614    data = FIELD_DP32(data, VDATA, VMA, s->vma);
1615    tcg_gen_gvec_4_ptr(vreg_ofs(s, vd), vreg_ofs(s, 0), vreg_ofs(s, vs1),
1616                       vreg_ofs(s, vs2), tcg_env, s->cfg_ptr->vlenb,
1617                       s->cfg_ptr->vlenb, data, fn);
1618    finalize_rvv_inst(s);
1619    return true;
1620}
1621
1622/* Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions */
1623/* OPIVV without GVEC IR */
1624#define GEN_OPIVV_TRANS(NAME, CHECK)                                     \
1625static bool trans_##NAME(DisasContext *s, arg_rmrr *a)                   \
1626{                                                                        \
1627    if (CHECK(s, a)) {                                                   \
1628        static gen_helper_gvec_4_ptr * const fns[4] = {                  \
1629            gen_helper_##NAME##_b, gen_helper_##NAME##_h,                \
1630            gen_helper_##NAME##_w, gen_helper_##NAME##_d,                \
1631        };                                                               \
1632        return opivv_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);\
1633    }                                                                    \
1634    return false;                                                        \
1635}
1636
1637/*
1638 * For vadc and vsbc, an illegal instruction exception is raised if the
1639 * destination vector register is v0 and LMUL > 1. (Section 11.4)
1640 */
1641static bool opivv_vadc_check(DisasContext *s, arg_rmrr *a)
1642{
1643    return require_rvv(s) &&
1644           vext_check_isa_ill(s) &&
1645           (a->rd != 0) &&
1646           vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
1647}
1648
1649GEN_OPIVV_TRANS(vadc_vvm, opivv_vadc_check)
1650GEN_OPIVV_TRANS(vsbc_vvm, opivv_vadc_check)
1651
1652/*
1653 * For vmadc and vmsbc, an illegal instruction exception is raised if the
1654 * destination vector register overlaps a source vector register group.
1655 */
1656static bool opivv_vmadc_check(DisasContext *s, arg_rmrr *a)
1657{
1658    return require_rvv(s) &&
1659           vext_check_isa_ill(s) &&
1660           vext_check_mss(s, a->rd, a->rs1, a->rs2);
1661}
1662
1663GEN_OPIVV_TRANS(vmadc_vvm, opivv_vmadc_check)
1664GEN_OPIVV_TRANS(vmsbc_vvm, opivv_vmadc_check)
1665
1666static bool opivx_vadc_check(DisasContext *s, arg_rmrr *a)
1667{
1668    return require_rvv(s) &&
1669           vext_check_isa_ill(s) &&
1670           (a->rd != 0) &&
1671           vext_check_ss(s, a->rd, a->rs2, a->vm);
1672}
1673
1674/* OPIVX without GVEC IR */
1675#define GEN_OPIVX_TRANS(NAME, CHECK)                                     \
1676static bool trans_##NAME(DisasContext *s, arg_rmrr *a)                   \
1677{                                                                        \
1678    if (CHECK(s, a)) {                                                   \
1679        static gen_helper_opivx * const fns[4] = {                       \
1680            gen_helper_##NAME##_b, gen_helper_##NAME##_h,                \
1681            gen_helper_##NAME##_w, gen_helper_##NAME##_d,                \
1682        };                                                               \
1683                                                                         \
1684        return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);\
1685    }                                                                    \
1686    return false;                                                        \
1687}
1688
1689GEN_OPIVX_TRANS(vadc_vxm, opivx_vadc_check)
1690GEN_OPIVX_TRANS(vsbc_vxm, opivx_vadc_check)
1691
1692static bool opivx_vmadc_check(DisasContext *s, arg_rmrr *a)
1693{
1694    return require_rvv(s) &&
1695           vext_check_isa_ill(s) &&
1696           vext_check_ms(s, a->rd, a->rs2);
1697}
1698
1699GEN_OPIVX_TRANS(vmadc_vxm, opivx_vmadc_check)
1700GEN_OPIVX_TRANS(vmsbc_vxm, opivx_vmadc_check)
1701
1702/* OPIVI without GVEC IR */
1703#define GEN_OPIVI_TRANS(NAME, IMM_MODE, OPIVX, CHECK)                    \
1704static bool trans_##NAME(DisasContext *s, arg_rmrr *a)                   \
1705{                                                                        \
1706    if (CHECK(s, a)) {                                                   \
1707        static gen_helper_opivx * const fns[4] = {                       \
1708            gen_helper_##OPIVX##_b, gen_helper_##OPIVX##_h,              \
1709            gen_helper_##OPIVX##_w, gen_helper_##OPIVX##_d,              \
1710        };                                                               \
1711        return opivi_trans(a->rd, a->rs1, a->rs2, a->vm,                 \
1712                           fns[s->sew], s, IMM_MODE);                    \
1713    }                                                                    \
1714    return false;                                                        \
1715}
1716
1717GEN_OPIVI_TRANS(vadc_vim, IMM_SX, vadc_vxm, opivx_vadc_check)
1718GEN_OPIVI_TRANS(vmadc_vim, IMM_SX, vmadc_vxm, opivx_vmadc_check)
1719
1720/* Vector Bitwise Logical Instructions */
1721GEN_OPIVV_GVEC_TRANS(vand_vv, and)
1722GEN_OPIVV_GVEC_TRANS(vor_vv,  or)
1723GEN_OPIVV_GVEC_TRANS(vxor_vv, xor)
1724GEN_OPIVX_GVEC_TRANS(vand_vx, ands)
1725GEN_OPIVX_GVEC_TRANS(vor_vx,  ors)
1726GEN_OPIVX_GVEC_TRANS(vxor_vx, xors)
1727GEN_OPIVI_GVEC_TRANS(vand_vi, IMM_SX, vand_vx, andi)
1728GEN_OPIVI_GVEC_TRANS(vor_vi, IMM_SX, vor_vx,  ori)
1729GEN_OPIVI_GVEC_TRANS(vxor_vi, IMM_SX, vxor_vx, xori)
1730
1731/* Vector Single-Width Bit Shift Instructions */
1732GEN_OPIVV_GVEC_TRANS(vsll_vv,  shlv)
1733GEN_OPIVV_GVEC_TRANS(vsrl_vv,  shrv)
1734GEN_OPIVV_GVEC_TRANS(vsra_vv,  sarv)
1735
1736typedef void GVecGen2sFn32(unsigned, uint32_t, uint32_t, TCGv_i32,
1737                           uint32_t, uint32_t);
1738
1739static inline bool
1740do_opivx_gvec_shift(DisasContext *s, arg_rmrr *a, GVecGen2sFn32 *gvec_fn,
1741                    gen_helper_opivx *fn)
1742{
1743    if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
1744        TCGv_i32 src1 = tcg_temp_new_i32();
1745
1746        tcg_gen_trunc_tl_i32(src1, get_gpr(s, a->rs1, EXT_NONE));
1747        tcg_gen_extract_i32(src1, src1, 0, s->sew + 3);
1748        gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
1749                src1, MAXSZ(s), MAXSZ(s));
1750
1751        finalize_rvv_inst(s);
1752        return true;
1753    }
1754    return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
1755}
1756
1757#define GEN_OPIVX_GVEC_SHIFT_TRANS(NAME, SUF) \
1758static bool trans_##NAME(DisasContext *s, arg_rmrr *a)                    \
1759{                                                                         \
1760    static gen_helper_opivx * const fns[4] = {                            \
1761        gen_helper_##NAME##_b, gen_helper_##NAME##_h,                     \
1762        gen_helper_##NAME##_w, gen_helper_##NAME##_d,                     \
1763    };                                                                    \
1764    if (!opivx_check(s, a)) {                                             \
1765        return false;                                                     \
1766    }                                                                     \
1767    return do_opivx_gvec_shift(s, a, tcg_gen_gvec_##SUF, fns[s->sew]);    \
1768}
1769
1770GEN_OPIVX_GVEC_SHIFT_TRANS(vsll_vx,  shls)
1771GEN_OPIVX_GVEC_SHIFT_TRANS(vsrl_vx,  shrs)
1772GEN_OPIVX_GVEC_SHIFT_TRANS(vsra_vx,  sars)
1773
1774GEN_OPIVI_GVEC_TRANS(vsll_vi, IMM_TRUNC_SEW, vsll_vx, shli)
1775GEN_OPIVI_GVEC_TRANS(vsrl_vi, IMM_TRUNC_SEW, vsrl_vx, shri)
1776GEN_OPIVI_GVEC_TRANS(vsra_vi, IMM_TRUNC_SEW, vsra_vx, sari)
1777
1778/* Vector Narrowing Integer Right Shift Instructions */
1779static bool opiwv_narrow_check(DisasContext *s, arg_rmrr *a)
1780{
1781    return require_rvv(s) &&
1782           vext_check_isa_ill(s) &&
1783           vext_check_sds(s, a->rd, a->rs1, a->rs2, a->vm);
1784}
1785
1786/* OPIVV with NARROW */
1787#define GEN_OPIWV_NARROW_TRANS(NAME)                               \
1788static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
1789{                                                                  \
1790    if (opiwv_narrow_check(s, a)) {                                \
1791        uint32_t data = 0;                                         \
1792        static gen_helper_gvec_4_ptr * const fns[3] = {            \
1793            gen_helper_##NAME##_b,                                 \
1794            gen_helper_##NAME##_h,                                 \
1795            gen_helper_##NAME##_w,                                 \
1796        };                                                         \
1797                                                                   \
1798        data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
1799        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
1800        data = FIELD_DP32(data, VDATA, VTA, s->vta);               \
1801        data = FIELD_DP32(data, VDATA, VMA, s->vma);               \
1802        tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
1803                           vreg_ofs(s, a->rs1),                    \
1804                           vreg_ofs(s, a->rs2), tcg_env,           \
1805                           s->cfg_ptr->vlenb,                      \
1806                           s->cfg_ptr->vlenb, data,                \
1807                           fns[s->sew]);                           \
1808        finalize_rvv_inst(s);                                      \
1809        return true;                                               \
1810    }                                                              \
1811    return false;                                                  \
1812}
1813GEN_OPIWV_NARROW_TRANS(vnsra_wv)
1814GEN_OPIWV_NARROW_TRANS(vnsrl_wv)
1815
1816static bool opiwx_narrow_check(DisasContext *s, arg_rmrr *a)
1817{
1818    return require_rvv(s) &&
1819           vext_check_isa_ill(s) &&
1820           vext_check_sd(s, a->rd, a->rs2, a->vm);
1821}
1822
1823/* OPIVX with NARROW */
1824#define GEN_OPIWX_NARROW_TRANS(NAME)                                     \
1825static bool trans_##NAME(DisasContext *s, arg_rmrr *a)                   \
1826{                                                                        \
1827    if (opiwx_narrow_check(s, a)) {                                      \
1828        static gen_helper_opivx * const fns[3] = {                       \
1829            gen_helper_##NAME##_b,                                       \
1830            gen_helper_##NAME##_h,                                       \
1831            gen_helper_##NAME##_w,                                       \
1832        };                                                               \
1833        return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);\
1834    }                                                                    \
1835    return false;                                                        \
1836}
1837
1838GEN_OPIWX_NARROW_TRANS(vnsra_wx)
1839GEN_OPIWX_NARROW_TRANS(vnsrl_wx)
1840
1841/* OPIWI with NARROW */
1842#define GEN_OPIWI_NARROW_TRANS(NAME, IMM_MODE, OPIVX)                    \
1843static bool trans_##NAME(DisasContext *s, arg_rmrr *a)                   \
1844{                                                                        \
1845    if (opiwx_narrow_check(s, a)) {                                      \
1846        static gen_helper_opivx * const fns[3] = {                       \
1847            gen_helper_##OPIVX##_b,                                      \
1848            gen_helper_##OPIVX##_h,                                      \
1849            gen_helper_##OPIVX##_w,                                      \
1850        };                                                               \
1851        return opivi_trans(a->rd, a->rs1, a->rs2, a->vm,                 \
1852                           fns[s->sew], s, IMM_MODE);                    \
1853    }                                                                    \
1854    return false;                                                        \
1855}
1856
1857GEN_OPIWI_NARROW_TRANS(vnsra_wi, IMM_ZX, vnsra_wx)
1858GEN_OPIWI_NARROW_TRANS(vnsrl_wi, IMM_ZX, vnsrl_wx)
1859
1860/* Vector Integer Comparison Instructions */
1861/*
1862 * For all comparison instructions, an illegal instruction exception is raised
1863 * if the destination vector register overlaps a source vector register group
1864 * and LMUL > 1.
1865 */
1866static bool opivv_cmp_check(DisasContext *s, arg_rmrr *a)
1867{
1868    return require_rvv(s) &&
1869           vext_check_isa_ill(s) &&
1870           vext_check_mss(s, a->rd, a->rs1, a->rs2);
1871}
1872
1873GEN_OPIVV_TRANS(vmseq_vv, opivv_cmp_check)
1874GEN_OPIVV_TRANS(vmsne_vv, opivv_cmp_check)
1875GEN_OPIVV_TRANS(vmsltu_vv, opivv_cmp_check)
1876GEN_OPIVV_TRANS(vmslt_vv, opivv_cmp_check)
1877GEN_OPIVV_TRANS(vmsleu_vv, opivv_cmp_check)
1878GEN_OPIVV_TRANS(vmsle_vv, opivv_cmp_check)
1879
1880static bool opivx_cmp_check(DisasContext *s, arg_rmrr *a)
1881{
1882    return require_rvv(s) &&
1883           vext_check_isa_ill(s) &&
1884           vext_check_ms(s, a->rd, a->rs2);
1885}
1886
1887GEN_OPIVX_TRANS(vmseq_vx, opivx_cmp_check)
1888GEN_OPIVX_TRANS(vmsne_vx, opivx_cmp_check)
1889GEN_OPIVX_TRANS(vmsltu_vx, opivx_cmp_check)
1890GEN_OPIVX_TRANS(vmslt_vx, opivx_cmp_check)
1891GEN_OPIVX_TRANS(vmsleu_vx, opivx_cmp_check)
1892GEN_OPIVX_TRANS(vmsle_vx, opivx_cmp_check)
1893GEN_OPIVX_TRANS(vmsgtu_vx, opivx_cmp_check)
1894GEN_OPIVX_TRANS(vmsgt_vx, opivx_cmp_check)
1895
1896GEN_OPIVI_TRANS(vmseq_vi, IMM_SX, vmseq_vx, opivx_cmp_check)
1897GEN_OPIVI_TRANS(vmsne_vi, IMM_SX, vmsne_vx, opivx_cmp_check)
1898GEN_OPIVI_TRANS(vmsleu_vi, IMM_SX, vmsleu_vx, opivx_cmp_check)
1899GEN_OPIVI_TRANS(vmsle_vi, IMM_SX, vmsle_vx, opivx_cmp_check)
1900GEN_OPIVI_TRANS(vmsgtu_vi, IMM_SX, vmsgtu_vx, opivx_cmp_check)
1901GEN_OPIVI_TRANS(vmsgt_vi, IMM_SX, vmsgt_vx, opivx_cmp_check)
1902
1903/* Vector Integer Min/Max Instructions */
1904GEN_OPIVV_GVEC_TRANS(vminu_vv, umin)
1905GEN_OPIVV_GVEC_TRANS(vmin_vv,  smin)
1906GEN_OPIVV_GVEC_TRANS(vmaxu_vv, umax)
1907GEN_OPIVV_GVEC_TRANS(vmax_vv,  smax)
1908GEN_OPIVX_TRANS(vminu_vx, opivx_check)
1909GEN_OPIVX_TRANS(vmin_vx,  opivx_check)
1910GEN_OPIVX_TRANS(vmaxu_vx, opivx_check)
1911GEN_OPIVX_TRANS(vmax_vx,  opivx_check)
1912
1913/* Vector Single-Width Integer Multiply Instructions */
1914
1915static bool vmulh_vv_check(DisasContext *s, arg_rmrr *a)
1916{
1917    /*
1918     * All Zve* extensions support all vector integer instructions,
1919     * except that the vmulh integer multiply variants
1920     * that return the high word of the product
1921     * (vmulh.vv, vmulh.vx, vmulhu.vv, vmulhu.vx, vmulhsu.vv, vmulhsu.vx)
1922     * are not included for EEW=64 in Zve64*. (Section 18.2)
1923     */
1924    return opivv_check(s, a) &&
1925           (!has_ext(s, RVV) ? s->sew != MO_64 : true);
1926}
1927
1928static bool vmulh_vx_check(DisasContext *s, arg_rmrr *a)
1929{
1930    /*
1931     * All Zve* extensions support all vector integer instructions,
1932     * except that the vmulh integer multiply variants
1933     * that return the high word of the product
1934     * (vmulh.vv, vmulh.vx, vmulhu.vv, vmulhu.vx, vmulhsu.vv, vmulhsu.vx)
1935     * are not included for EEW=64 in Zve64*. (Section 18.2)
1936     */
1937    return opivx_check(s, a) &&
1938           (!has_ext(s, RVV) ? s->sew != MO_64 : true);
1939}
1940
1941GEN_OPIVV_GVEC_TRANS(vmul_vv,  mul)
1942GEN_OPIVV_TRANS(vmulh_vv, vmulh_vv_check)
1943GEN_OPIVV_TRANS(vmulhu_vv, vmulh_vv_check)
1944GEN_OPIVV_TRANS(vmulhsu_vv, vmulh_vv_check)
1945GEN_OPIVX_GVEC_TRANS(vmul_vx,  muls)
1946GEN_OPIVX_TRANS(vmulh_vx, vmulh_vx_check)
1947GEN_OPIVX_TRANS(vmulhu_vx, vmulh_vx_check)
1948GEN_OPIVX_TRANS(vmulhsu_vx, vmulh_vx_check)
1949
1950/* Vector Integer Divide Instructions */
1951GEN_OPIVV_TRANS(vdivu_vv, opivv_check)
1952GEN_OPIVV_TRANS(vdiv_vv, opivv_check)
1953GEN_OPIVV_TRANS(vremu_vv, opivv_check)
1954GEN_OPIVV_TRANS(vrem_vv, opivv_check)
1955GEN_OPIVX_TRANS(vdivu_vx, opivx_check)
1956GEN_OPIVX_TRANS(vdiv_vx, opivx_check)
1957GEN_OPIVX_TRANS(vremu_vx, opivx_check)
1958GEN_OPIVX_TRANS(vrem_vx, opivx_check)
1959
1960/* Vector Widening Integer Multiply Instructions */
1961GEN_OPIVV_WIDEN_TRANS(vwmul_vv, opivv_widen_check)
1962GEN_OPIVV_WIDEN_TRANS(vwmulu_vv, opivv_widen_check)
1963GEN_OPIVV_WIDEN_TRANS(vwmulsu_vv, opivv_widen_check)
1964GEN_OPIVX_WIDEN_TRANS(vwmul_vx, opivx_widen_check)
1965GEN_OPIVX_WIDEN_TRANS(vwmulu_vx, opivx_widen_check)
1966GEN_OPIVX_WIDEN_TRANS(vwmulsu_vx, opivx_widen_check)
1967
1968/* Vector Single-Width Integer Multiply-Add Instructions */
1969GEN_OPIVV_TRANS(vmacc_vv, opivv_check)
1970GEN_OPIVV_TRANS(vnmsac_vv, opivv_check)
1971GEN_OPIVV_TRANS(vmadd_vv, opivv_check)
1972GEN_OPIVV_TRANS(vnmsub_vv, opivv_check)
1973GEN_OPIVX_TRANS(vmacc_vx, opivx_check)
1974GEN_OPIVX_TRANS(vnmsac_vx, opivx_check)
1975GEN_OPIVX_TRANS(vmadd_vx, opivx_check)
1976GEN_OPIVX_TRANS(vnmsub_vx, opivx_check)
1977
1978/* Vector Widening Integer Multiply-Add Instructions */
1979GEN_OPIVV_WIDEN_TRANS(vwmaccu_vv, opivv_widen_check)
1980GEN_OPIVV_WIDEN_TRANS(vwmacc_vv, opivv_widen_check)
1981GEN_OPIVV_WIDEN_TRANS(vwmaccsu_vv, opivv_widen_check)
1982GEN_OPIVX_WIDEN_TRANS(vwmaccu_vx, opivx_widen_check)
1983GEN_OPIVX_WIDEN_TRANS(vwmacc_vx, opivx_widen_check)
1984GEN_OPIVX_WIDEN_TRANS(vwmaccsu_vx, opivx_widen_check)
1985GEN_OPIVX_WIDEN_TRANS(vwmaccus_vx, opivx_widen_check)
1986
1987/* Vector Integer Merge and Move Instructions */
1988static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a)
1989{
1990    if (require_rvv(s) &&
1991        vext_check_isa_ill(s) &&
1992        /* vmv.v.v has rs2 = 0 and vm = 1 */
1993        vext_check_sss(s, a->rd, a->rs1, 0, 1)) {
1994        if (s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
1995            tcg_gen_gvec_mov(s->sew, vreg_ofs(s, a->rd),
1996                             vreg_ofs(s, a->rs1),
1997                             MAXSZ(s), MAXSZ(s));
1998        } else {
1999            uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
2000            data = FIELD_DP32(data, VDATA, VTA, s->vta);
2001            static gen_helper_gvec_2_ptr * const fns[4] = {
2002                gen_helper_vmv_v_v_b, gen_helper_vmv_v_v_h,
2003                gen_helper_vmv_v_v_w, gen_helper_vmv_v_v_d,
2004            };
2005
2006            tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1),
2007                               tcg_env, s->cfg_ptr->vlenb,
2008                               s->cfg_ptr->vlenb, data,
2009                               fns[s->sew]);
2010        }
2011        finalize_rvv_inst(s);
2012        return true;
2013    }
2014    return false;
2015}
2016
2017typedef void gen_helper_vmv_vx(TCGv_ptr, TCGv_i64, TCGv_env, TCGv_i32);
2018static bool trans_vmv_v_x(DisasContext *s, arg_vmv_v_x *a)
2019{
2020    if (require_rvv(s) &&
2021        vext_check_isa_ill(s) &&
2022        /* vmv.v.x has rs2 = 0 and vm = 1 */
2023        vext_check_ss(s, a->rd, 0, 1)) {
2024        TCGv s1;
2025
2026        s1 = get_gpr(s, a->rs1, EXT_SIGN);
2027
2028        if (s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
2029            if (get_xl(s) == MXL_RV32 && s->sew == MO_64) {
2030                TCGv_i64 s1_i64 = tcg_temp_new_i64();
2031                tcg_gen_ext_tl_i64(s1_i64, s1);
2032                tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd),
2033                                     MAXSZ(s), MAXSZ(s), s1_i64);
2034            } else {
2035                tcg_gen_gvec_dup_tl(s->sew, vreg_ofs(s, a->rd),
2036                                    MAXSZ(s), MAXSZ(s), s1);
2037            }
2038        } else {
2039            TCGv_i32 desc;
2040            TCGv_i64 s1_i64 = tcg_temp_new_i64();
2041            TCGv_ptr dest = tcg_temp_new_ptr();
2042            uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
2043            data = FIELD_DP32(data, VDATA, VTA, s->vta);
2044            static gen_helper_vmv_vx * const fns[4] = {
2045                gen_helper_vmv_v_x_b, gen_helper_vmv_v_x_h,
2046                gen_helper_vmv_v_x_w, gen_helper_vmv_v_x_d,
2047            };
2048
2049            tcg_gen_ext_tl_i64(s1_i64, s1);
2050            desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
2051                                              s->cfg_ptr->vlenb, data));
2052            tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, a->rd));
2053            fns[s->sew](dest, s1_i64, tcg_env, desc);
2054        }
2055
2056        finalize_rvv_inst(s);
2057        return true;
2058    }
2059    return false;
2060}
2061
2062static bool trans_vmv_v_i(DisasContext *s, arg_vmv_v_i *a)
2063{
2064    if (require_rvv(s) &&
2065        vext_check_isa_ill(s) &&
2066        /* vmv.v.i has rs2 = 0 and vm = 1 */
2067        vext_check_ss(s, a->rd, 0, 1)) {
2068        int64_t simm = sextract64(a->rs1, 0, 5);
2069        if (s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
2070            tcg_gen_gvec_dup_imm(s->sew, vreg_ofs(s, a->rd),
2071                                 MAXSZ(s), MAXSZ(s), simm);
2072        } else {
2073            TCGv_i32 desc;
2074            TCGv_i64 s1;
2075            TCGv_ptr dest;
2076            uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
2077            data = FIELD_DP32(data, VDATA, VTA, s->vta);
2078            static gen_helper_vmv_vx * const fns[4] = {
2079                gen_helper_vmv_v_x_b, gen_helper_vmv_v_x_h,
2080                gen_helper_vmv_v_x_w, gen_helper_vmv_v_x_d,
2081            };
2082
2083            s1 = tcg_constant_i64(simm);
2084            dest = tcg_temp_new_ptr();
2085            desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
2086                                              s->cfg_ptr->vlenb, data));
2087            tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, a->rd));
2088            fns[s->sew](dest, s1, tcg_env, desc);
2089        }
2090        finalize_rvv_inst(s);
2091        return true;
2092    }
2093    return false;
2094}
2095
2096GEN_OPIVV_TRANS(vmerge_vvm, opivv_vadc_check)
2097GEN_OPIVX_TRANS(vmerge_vxm, opivx_vadc_check)
2098GEN_OPIVI_TRANS(vmerge_vim, IMM_SX, vmerge_vxm, opivx_vadc_check)
2099
2100/*
2101 *** Vector Fixed-Point Arithmetic Instructions
2102 */
2103
2104/* Vector Single-Width Saturating Add and Subtract */
2105GEN_OPIVV_TRANS(vsaddu_vv, opivv_check)
2106GEN_OPIVV_TRANS(vsadd_vv,  opivv_check)
2107GEN_OPIVV_TRANS(vssubu_vv, opivv_check)
2108GEN_OPIVV_TRANS(vssub_vv,  opivv_check)
2109GEN_OPIVX_TRANS(vsaddu_vx,  opivx_check)
2110GEN_OPIVX_TRANS(vsadd_vx,  opivx_check)
2111GEN_OPIVX_TRANS(vssubu_vx,  opivx_check)
2112GEN_OPIVX_TRANS(vssub_vx,  opivx_check)
2113GEN_OPIVI_TRANS(vsaddu_vi, IMM_SX, vsaddu_vx, opivx_check)
2114GEN_OPIVI_TRANS(vsadd_vi, IMM_SX, vsadd_vx, opivx_check)
2115
2116/* Vector Single-Width Averaging Add and Subtract */
2117GEN_OPIVV_TRANS(vaadd_vv, opivv_check)
2118GEN_OPIVV_TRANS(vaaddu_vv, opivv_check)
2119GEN_OPIVV_TRANS(vasub_vv, opivv_check)
2120GEN_OPIVV_TRANS(vasubu_vv, opivv_check)
2121GEN_OPIVX_TRANS(vaadd_vx,  opivx_check)
2122GEN_OPIVX_TRANS(vaaddu_vx,  opivx_check)
2123GEN_OPIVX_TRANS(vasub_vx,  opivx_check)
2124GEN_OPIVX_TRANS(vasubu_vx,  opivx_check)
2125
2126/* Vector Single-Width Fractional Multiply with Rounding and Saturation */
2127
2128static bool vsmul_vv_check(DisasContext *s, arg_rmrr *a)
2129{
2130    /*
2131     * All Zve* extensions support all vector fixed-point arithmetic
2132     * instructions, except that vsmul.vv and vsmul.vx are not supported
2133     * for EEW=64 in Zve64*. (Section 18.2)
2134     */
2135    return opivv_check(s, a) &&
2136           (!has_ext(s, RVV) ? s->sew != MO_64 : true);
2137}
2138
2139static bool vsmul_vx_check(DisasContext *s, arg_rmrr *a)
2140{
2141    /*
2142     * All Zve* extensions support all vector fixed-point arithmetic
2143     * instructions, except that vsmul.vv and vsmul.vx are not supported
2144     * for EEW=64 in Zve64*. (Section 18.2)
2145     */
2146    return opivx_check(s, a) &&
2147           (!has_ext(s, RVV) ? s->sew != MO_64 : true);
2148}
2149
2150GEN_OPIVV_TRANS(vsmul_vv, vsmul_vv_check)
2151GEN_OPIVX_TRANS(vsmul_vx,  vsmul_vx_check)
2152
2153/* Vector Single-Width Scaling Shift Instructions */
2154GEN_OPIVV_TRANS(vssrl_vv, opivv_check)
2155GEN_OPIVV_TRANS(vssra_vv, opivv_check)
2156GEN_OPIVX_TRANS(vssrl_vx,  opivx_check)
2157GEN_OPIVX_TRANS(vssra_vx,  opivx_check)
2158GEN_OPIVI_TRANS(vssrl_vi, IMM_TRUNC_SEW, vssrl_vx, opivx_check)
2159GEN_OPIVI_TRANS(vssra_vi, IMM_TRUNC_SEW, vssra_vx, opivx_check)
2160
2161/* Vector Narrowing Fixed-Point Clip Instructions */
2162GEN_OPIWV_NARROW_TRANS(vnclipu_wv)
2163GEN_OPIWV_NARROW_TRANS(vnclip_wv)
2164GEN_OPIWX_NARROW_TRANS(vnclipu_wx)
2165GEN_OPIWX_NARROW_TRANS(vnclip_wx)
2166GEN_OPIWI_NARROW_TRANS(vnclipu_wi, IMM_ZX, vnclipu_wx)
2167GEN_OPIWI_NARROW_TRANS(vnclip_wi, IMM_ZX, vnclip_wx)
2168
2169/*
2170 *** Vector Float Point Arithmetic Instructions
2171 */
2172
2173/*
2174 * As RVF-only cpus always have values NaN-boxed to 64-bits,
2175 * RVF and RVD can be treated equally.
2176 * We don't have to deal with the cases of: SEW > FLEN.
2177 *
2178 * If SEW < FLEN, check whether input fp register is a valid
2179 * NaN-boxed value, in which case the least-significant SEW bits
2180 * of the f register are used, else the canonical NaN value is used.
2181 */
2182static void do_nanbox(DisasContext *s, TCGv_i64 out, TCGv_i64 in)
2183{
2184    switch (s->sew) {
2185    case 1:
2186        gen_check_nanbox_h(out, in);
2187        break;
2188    case 2:
2189        gen_check_nanbox_s(out, in);
2190        break;
2191    case 3:
2192        tcg_gen_mov_i64(out, in);
2193        break;
2194    default:
2195        g_assert_not_reached();
2196    }
2197}
2198
2199/* Vector Single-Width Floating-Point Add/Subtract Instructions */
2200
2201/*
2202 * If the current SEW does not correspond to a supported IEEE floating-point
2203 * type, an illegal instruction exception is raised.
2204 */
2205static bool opfvv_check(DisasContext *s, arg_rmrr *a)
2206{
2207    return require_rvv(s) &&
2208           require_rvf(s) &&
2209           vext_check_isa_ill(s) &&
2210           vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
2211}
2212
2213/* OPFVV without GVEC IR */
2214#define GEN_OPFVV_TRANS(NAME, CHECK)                               \
2215static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
2216{                                                                  \
2217    if (CHECK(s, a)) {                                             \
2218        uint32_t data = 0;                                         \
2219        static gen_helper_gvec_4_ptr * const fns[3] = {            \
2220            gen_helper_##NAME##_h,                                 \
2221            gen_helper_##NAME##_w,                                 \
2222            gen_helper_##NAME##_d,                                 \
2223        };                                                         \
2224        gen_set_rm(s, RISCV_FRM_DYN);                              \
2225                                                                   \
2226        data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
2227        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
2228        data = FIELD_DP32(data, VDATA, VTA, s->vta);               \
2229        data =                                                     \
2230            FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);\
2231        data = FIELD_DP32(data, VDATA, VMA, s->vma);               \
2232        tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
2233                           vreg_ofs(s, a->rs1),                    \
2234                           vreg_ofs(s, a->rs2), tcg_env,           \
2235                           s->cfg_ptr->vlenb,                      \
2236                           s->cfg_ptr->vlenb, data,                \
2237                           fns[s->sew - 1]);                       \
2238        finalize_rvv_inst(s);                                      \
2239        return true;                                               \
2240    }                                                              \
2241    return false;                                                  \
2242}
2243GEN_OPFVV_TRANS(vfadd_vv, opfvv_check)
2244GEN_OPFVV_TRANS(vfsub_vv, opfvv_check)
2245
2246typedef void gen_helper_opfvf(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_ptr,
2247                              TCGv_env, TCGv_i32);
2248
2249static bool opfvf_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
2250                        uint32_t data, gen_helper_opfvf *fn, DisasContext *s)
2251{
2252    TCGv_ptr dest, src2, mask;
2253    TCGv_i32 desc;
2254    TCGv_i64 t1;
2255
2256    dest = tcg_temp_new_ptr();
2257    mask = tcg_temp_new_ptr();
2258    src2 = tcg_temp_new_ptr();
2259    desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
2260                                      s->cfg_ptr->vlenb, data));
2261
2262    tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
2263    tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, vs2));
2264    tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
2265
2266    /* NaN-box f[rs1] */
2267    t1 = tcg_temp_new_i64();
2268    do_nanbox(s, t1, cpu_fpr[rs1]);
2269
2270    fn(dest, mask, t1, src2, tcg_env, desc);
2271
2272    finalize_rvv_inst(s);
2273    return true;
2274}
2275
2276/*
2277 * If the current SEW does not correspond to a supported IEEE floating-point
2278 * type, an illegal instruction exception is raised
2279 */
2280static bool opfvf_check(DisasContext *s, arg_rmrr *a)
2281{
2282    return require_rvv(s) &&
2283           require_rvf(s) &&
2284           vext_check_isa_ill(s) &&
2285           vext_check_ss(s, a->rd, a->rs2, a->vm);
2286}
2287
2288/* OPFVF without GVEC IR */
2289#define GEN_OPFVF_TRANS(NAME, CHECK)                              \
2290static bool trans_##NAME(DisasContext *s, arg_rmrr *a)            \
2291{                                                                 \
2292    if (CHECK(s, a)) {                                            \
2293        uint32_t data = 0;                                        \
2294        static gen_helper_opfvf *const fns[3] = {                 \
2295            gen_helper_##NAME##_h,                                \
2296            gen_helper_##NAME##_w,                                \
2297            gen_helper_##NAME##_d,                                \
2298        };                                                        \
2299        gen_set_rm(s, RISCV_FRM_DYN);                             \
2300        data = FIELD_DP32(data, VDATA, VM, a->vm);                \
2301        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);            \
2302        data = FIELD_DP32(data, VDATA, VTA, s->vta);              \
2303        data = FIELD_DP32(data, VDATA, VTA_ALL_1S,                \
2304                          s->cfg_vta_all_1s);                     \
2305        data = FIELD_DP32(data, VDATA, VMA, s->vma);              \
2306        return opfvf_trans(a->rd, a->rs1, a->rs2, data,           \
2307                           fns[s->sew - 1], s);                   \
2308    }                                                             \
2309    return false;                                                 \
2310}
2311
2312GEN_OPFVF_TRANS(vfadd_vf,  opfvf_check)
2313GEN_OPFVF_TRANS(vfsub_vf,  opfvf_check)
2314GEN_OPFVF_TRANS(vfrsub_vf,  opfvf_check)
2315
2316/* Vector Widening Floating-Point Add/Subtract Instructions */
2317static bool opfvv_widen_check(DisasContext *s, arg_rmrr *a)
2318{
2319    return require_rvv(s) &&
2320           require_scale_rvf(s) &&
2321           (s->sew != MO_8) &&
2322           vext_check_isa_ill(s) &&
2323           vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm);
2324}
2325
2326/* OPFVV with WIDEN */
2327#define GEN_OPFVV_WIDEN_TRANS(NAME, CHECK)                       \
2328static bool trans_##NAME(DisasContext *s, arg_rmrr *a)           \
2329{                                                                \
2330    if (CHECK(s, a)) {                                           \
2331        uint32_t data = 0;                                       \
2332        static gen_helper_gvec_4_ptr * const fns[2] = {          \
2333            gen_helper_##NAME##_h, gen_helper_##NAME##_w,        \
2334        };                                                       \
2335        gen_set_rm(s, RISCV_FRM_DYN);                            \
2336                                                                 \
2337        data = FIELD_DP32(data, VDATA, VM, a->vm);               \
2338        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);           \
2339        data = FIELD_DP32(data, VDATA, VTA, s->vta);             \
2340        data = FIELD_DP32(data, VDATA, VMA, s->vma);             \
2341        tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),   \
2342                           vreg_ofs(s, a->rs1),                  \
2343                           vreg_ofs(s, a->rs2), tcg_env,         \
2344                           s->cfg_ptr->vlenb,                    \
2345                           s->cfg_ptr->vlenb, data,              \
2346                           fns[s->sew - 1]);                     \
2347        finalize_rvv_inst(s);                                    \
2348        return true;                                             \
2349    }                                                            \
2350    return false;                                                \
2351}
2352
2353GEN_OPFVV_WIDEN_TRANS(vfwadd_vv, opfvv_widen_check)
2354GEN_OPFVV_WIDEN_TRANS(vfwsub_vv, opfvv_widen_check)
2355
2356static bool opfvf_widen_check(DisasContext *s, arg_rmrr *a)
2357{
2358    return require_rvv(s) &&
2359           require_scale_rvf(s) &&
2360           (s->sew != MO_8) &&
2361           vext_check_isa_ill(s) &&
2362           vext_check_ds(s, a->rd, a->rs2, a->vm);
2363}
2364
2365/* OPFVF with WIDEN */
2366#define GEN_OPFVF_WIDEN_TRANS(NAME)                              \
2367static bool trans_##NAME(DisasContext *s, arg_rmrr *a)           \
2368{                                                                \
2369    if (opfvf_widen_check(s, a)) {                               \
2370        uint32_t data = 0;                                       \
2371        static gen_helper_opfvf *const fns[2] = {                \
2372            gen_helper_##NAME##_h, gen_helper_##NAME##_w,        \
2373        };                                                       \
2374        gen_set_rm(s, RISCV_FRM_DYN);                            \
2375        data = FIELD_DP32(data, VDATA, VM, a->vm);               \
2376        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);           \
2377        data = FIELD_DP32(data, VDATA, VTA, s->vta);             \
2378        data = FIELD_DP32(data, VDATA, VMA, s->vma);             \
2379        return opfvf_trans(a->rd, a->rs1, a->rs2, data,          \
2380                           fns[s->sew - 1], s);                  \
2381    }                                                            \
2382    return false;                                                \
2383}
2384
2385GEN_OPFVF_WIDEN_TRANS(vfwadd_vf)
2386GEN_OPFVF_WIDEN_TRANS(vfwsub_vf)
2387
2388static bool opfwv_widen_check(DisasContext *s, arg_rmrr *a)
2389{
2390    return require_rvv(s) &&
2391           require_scale_rvf(s) &&
2392           (s->sew != MO_8) &&
2393           vext_check_isa_ill(s) &&
2394           vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm);
2395}
2396
2397/* WIDEN OPFVV with WIDEN */
2398#define GEN_OPFWV_WIDEN_TRANS(NAME)                                \
2399static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
2400{                                                                  \
2401    if (opfwv_widen_check(s, a)) {                                 \
2402        uint32_t data = 0;                                         \
2403        static gen_helper_gvec_4_ptr * const fns[2] = {            \
2404            gen_helper_##NAME##_h, gen_helper_##NAME##_w,          \
2405        };                                                         \
2406        gen_set_rm(s, RISCV_FRM_DYN);                              \
2407                                                                   \
2408        data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
2409        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
2410        data = FIELD_DP32(data, VDATA, VTA, s->vta);               \
2411        data = FIELD_DP32(data, VDATA, VMA, s->vma);               \
2412        tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
2413                           vreg_ofs(s, a->rs1),                    \
2414                           vreg_ofs(s, a->rs2), tcg_env,           \
2415                           s->cfg_ptr->vlenb,                      \
2416                           s->cfg_ptr->vlenb, data,                \
2417                           fns[s->sew - 1]);                       \
2418        finalize_rvv_inst(s);                                      \
2419        return true;                                               \
2420    }                                                              \
2421    return false;                                                  \
2422}
2423
2424GEN_OPFWV_WIDEN_TRANS(vfwadd_wv)
2425GEN_OPFWV_WIDEN_TRANS(vfwsub_wv)
2426
2427static bool opfwf_widen_check(DisasContext *s, arg_rmrr *a)
2428{
2429    return require_rvv(s) &&
2430           require_scale_rvf(s) &&
2431           (s->sew != MO_8) &&
2432           vext_check_isa_ill(s) &&
2433           vext_check_dd(s, a->rd, a->rs2, a->vm);
2434}
2435
2436/* WIDEN OPFVF with WIDEN */
2437#define GEN_OPFWF_WIDEN_TRANS(NAME)                              \
2438static bool trans_##NAME(DisasContext *s, arg_rmrr *a)           \
2439{                                                                \
2440    if (opfwf_widen_check(s, a)) {                               \
2441        uint32_t data = 0;                                       \
2442        static gen_helper_opfvf *const fns[2] = {                \
2443            gen_helper_##NAME##_h, gen_helper_##NAME##_w,        \
2444        };                                                       \
2445        gen_set_rm(s, RISCV_FRM_DYN);                            \
2446        data = FIELD_DP32(data, VDATA, VM, a->vm);               \
2447        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);           \
2448        data = FIELD_DP32(data, VDATA, VTA, s->vta);             \
2449        data = FIELD_DP32(data, VDATA, VMA, s->vma);             \
2450        return opfvf_trans(a->rd, a->rs1, a->rs2, data,          \
2451                           fns[s->sew - 1], s);                  \
2452    }                                                            \
2453    return false;                                                \
2454}
2455
2456GEN_OPFWF_WIDEN_TRANS(vfwadd_wf)
2457GEN_OPFWF_WIDEN_TRANS(vfwsub_wf)
2458
2459/* Vector Single-Width Floating-Point Multiply/Divide Instructions */
2460GEN_OPFVV_TRANS(vfmul_vv, opfvv_check)
2461GEN_OPFVV_TRANS(vfdiv_vv, opfvv_check)
2462GEN_OPFVF_TRANS(vfmul_vf,  opfvf_check)
2463GEN_OPFVF_TRANS(vfdiv_vf,  opfvf_check)
2464GEN_OPFVF_TRANS(vfrdiv_vf,  opfvf_check)
2465
2466/* Vector Widening Floating-Point Multiply */
2467GEN_OPFVV_WIDEN_TRANS(vfwmul_vv, opfvv_widen_check)
2468GEN_OPFVF_WIDEN_TRANS(vfwmul_vf)
2469
2470/* Vector Single-Width Floating-Point Fused Multiply-Add Instructions */
2471GEN_OPFVV_TRANS(vfmacc_vv, opfvv_check)
2472GEN_OPFVV_TRANS(vfnmacc_vv, opfvv_check)
2473GEN_OPFVV_TRANS(vfmsac_vv, opfvv_check)
2474GEN_OPFVV_TRANS(vfnmsac_vv, opfvv_check)
2475GEN_OPFVV_TRANS(vfmadd_vv, opfvv_check)
2476GEN_OPFVV_TRANS(vfnmadd_vv, opfvv_check)
2477GEN_OPFVV_TRANS(vfmsub_vv, opfvv_check)
2478GEN_OPFVV_TRANS(vfnmsub_vv, opfvv_check)
2479GEN_OPFVF_TRANS(vfmacc_vf, opfvf_check)
2480GEN_OPFVF_TRANS(vfnmacc_vf, opfvf_check)
2481GEN_OPFVF_TRANS(vfmsac_vf, opfvf_check)
2482GEN_OPFVF_TRANS(vfnmsac_vf, opfvf_check)
2483GEN_OPFVF_TRANS(vfmadd_vf, opfvf_check)
2484GEN_OPFVF_TRANS(vfnmadd_vf, opfvf_check)
2485GEN_OPFVF_TRANS(vfmsub_vf, opfvf_check)
2486GEN_OPFVF_TRANS(vfnmsub_vf, opfvf_check)
2487
2488/* Vector Widening Floating-Point Fused Multiply-Add Instructions */
2489GEN_OPFVV_WIDEN_TRANS(vfwmacc_vv, opfvv_widen_check)
2490GEN_OPFVV_WIDEN_TRANS(vfwnmacc_vv, opfvv_widen_check)
2491GEN_OPFVV_WIDEN_TRANS(vfwmsac_vv, opfvv_widen_check)
2492GEN_OPFVV_WIDEN_TRANS(vfwnmsac_vv, opfvv_widen_check)
2493GEN_OPFVF_WIDEN_TRANS(vfwmacc_vf)
2494GEN_OPFVF_WIDEN_TRANS(vfwnmacc_vf)
2495GEN_OPFVF_WIDEN_TRANS(vfwmsac_vf)
2496GEN_OPFVF_WIDEN_TRANS(vfwnmsac_vf)
2497
2498/* Vector Floating-Point Square-Root Instruction */
2499
2500/*
2501 * If the current SEW does not correspond to a supported IEEE floating-point
2502 * type, an illegal instruction exception is raised
2503 */
2504static bool opfv_check(DisasContext *s, arg_rmr *a)
2505{
2506    return require_rvv(s) &&
2507           require_rvf(s) &&
2508           vext_check_isa_ill(s) &&
2509           /* OPFV instructions ignore vs1 check */
2510           vext_check_ss(s, a->rd, a->rs2, a->vm);
2511}
2512
2513static bool do_opfv(DisasContext *s, arg_rmr *a,
2514                    gen_helper_gvec_3_ptr *fn,
2515                    bool (*checkfn)(DisasContext *, arg_rmr *),
2516                    int rm)
2517{
2518    if (checkfn(s, a)) {
2519        uint32_t data = 0;
2520        gen_set_rm_chkfrm(s, rm);
2521
2522        data = FIELD_DP32(data, VDATA, VM, a->vm);
2523        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
2524        data = FIELD_DP32(data, VDATA, VTA, s->vta);
2525        data = FIELD_DP32(data, VDATA, VMA, s->vma);
2526        tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
2527                           vreg_ofs(s, a->rs2), tcg_env,
2528                           s->cfg_ptr->vlenb,
2529                           s->cfg_ptr->vlenb, data, fn);
2530        finalize_rvv_inst(s);
2531        return true;
2532    }
2533    return false;
2534}
2535
2536#define GEN_OPFV_TRANS(NAME, CHECK, FRM)               \
2537static bool trans_##NAME(DisasContext *s, arg_rmr *a)  \
2538{                                                      \
2539    static gen_helper_gvec_3_ptr * const fns[3] = {    \
2540        gen_helper_##NAME##_h,                         \
2541        gen_helper_##NAME##_w,                         \
2542        gen_helper_##NAME##_d                          \
2543    };                                                 \
2544    return do_opfv(s, a, fns[s->sew - 1], CHECK, FRM); \
2545}
2546
2547GEN_OPFV_TRANS(vfsqrt_v, opfv_check, RISCV_FRM_DYN)
2548GEN_OPFV_TRANS(vfrsqrt7_v, opfv_check, RISCV_FRM_DYN)
2549GEN_OPFV_TRANS(vfrec7_v, opfv_check, RISCV_FRM_DYN)
2550
2551/* Vector Floating-Point MIN/MAX Instructions */
2552GEN_OPFVV_TRANS(vfmin_vv, opfvv_check)
2553GEN_OPFVV_TRANS(vfmax_vv, opfvv_check)
2554GEN_OPFVF_TRANS(vfmin_vf, opfvf_check)
2555GEN_OPFVF_TRANS(vfmax_vf, opfvf_check)
2556
2557/* Vector Floating-Point Sign-Injection Instructions */
2558GEN_OPFVV_TRANS(vfsgnj_vv, opfvv_check)
2559GEN_OPFVV_TRANS(vfsgnjn_vv, opfvv_check)
2560GEN_OPFVV_TRANS(vfsgnjx_vv, opfvv_check)
2561GEN_OPFVF_TRANS(vfsgnj_vf, opfvf_check)
2562GEN_OPFVF_TRANS(vfsgnjn_vf, opfvf_check)
2563GEN_OPFVF_TRANS(vfsgnjx_vf, opfvf_check)
2564
2565/* Vector Floating-Point Compare Instructions */
2566static bool opfvv_cmp_check(DisasContext *s, arg_rmrr *a)
2567{
2568    return require_rvv(s) &&
2569           require_rvf(s) &&
2570           vext_check_isa_ill(s) &&
2571           vext_check_mss(s, a->rd, a->rs1, a->rs2);
2572}
2573
2574GEN_OPFVV_TRANS(vmfeq_vv, opfvv_cmp_check)
2575GEN_OPFVV_TRANS(vmfne_vv, opfvv_cmp_check)
2576GEN_OPFVV_TRANS(vmflt_vv, opfvv_cmp_check)
2577GEN_OPFVV_TRANS(vmfle_vv, opfvv_cmp_check)
2578
2579static bool opfvf_cmp_check(DisasContext *s, arg_rmrr *a)
2580{
2581    return require_rvv(s) &&
2582           require_rvf(s) &&
2583           vext_check_isa_ill(s) &&
2584           vext_check_ms(s, a->rd, a->rs2);
2585}
2586
2587GEN_OPFVF_TRANS(vmfeq_vf, opfvf_cmp_check)
2588GEN_OPFVF_TRANS(vmfne_vf, opfvf_cmp_check)
2589GEN_OPFVF_TRANS(vmflt_vf, opfvf_cmp_check)
2590GEN_OPFVF_TRANS(vmfle_vf, opfvf_cmp_check)
2591GEN_OPFVF_TRANS(vmfgt_vf, opfvf_cmp_check)
2592GEN_OPFVF_TRANS(vmfge_vf, opfvf_cmp_check)
2593
2594/* Vector Floating-Point Classify Instruction */
2595GEN_OPFV_TRANS(vfclass_v, opfv_check, RISCV_FRM_DYN)
2596
2597/* Vector Floating-Point Merge Instruction */
2598GEN_OPFVF_TRANS(vfmerge_vfm,  opfvf_check)
2599
2600static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a)
2601{
2602    if (require_rvv(s) &&
2603        require_rvf(s) &&
2604        vext_check_isa_ill(s) &&
2605        require_align(a->rd, s->lmul)) {
2606        gen_set_rm(s, RISCV_FRM_DYN);
2607
2608        TCGv_i64 t1;
2609
2610        if (s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
2611            t1 = tcg_temp_new_i64();
2612            /* NaN-box f[rs1] */
2613            do_nanbox(s, t1, cpu_fpr[a->rs1]);
2614
2615            tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd),
2616                                 MAXSZ(s), MAXSZ(s), t1);
2617        } else {
2618            TCGv_ptr dest;
2619            TCGv_i32 desc;
2620            uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
2621            data = FIELD_DP32(data, VDATA, VTA, s->vta);
2622            data = FIELD_DP32(data, VDATA, VMA, s->vma);
2623            static gen_helper_vmv_vx * const fns[3] = {
2624                gen_helper_vmv_v_x_h,
2625                gen_helper_vmv_v_x_w,
2626                gen_helper_vmv_v_x_d,
2627            };
2628
2629            t1 = tcg_temp_new_i64();
2630            /* NaN-box f[rs1] */
2631            do_nanbox(s, t1, cpu_fpr[a->rs1]);
2632
2633            dest = tcg_temp_new_ptr();
2634            desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
2635                                              s->cfg_ptr->vlenb, data));
2636            tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, a->rd));
2637
2638            fns[s->sew - 1](dest, t1, tcg_env, desc);
2639        }
2640        finalize_rvv_inst(s);
2641        return true;
2642    }
2643    return false;
2644}
2645
2646/* Single-Width Floating-Point/Integer Type-Convert Instructions */
2647#define GEN_OPFV_CVT_TRANS(NAME, HELPER, FRM)               \
2648static bool trans_##NAME(DisasContext *s, arg_rmr *a)       \
2649{                                                           \
2650    static gen_helper_gvec_3_ptr * const fns[3] = {         \
2651        gen_helper_##HELPER##_h,                            \
2652        gen_helper_##HELPER##_w,                            \
2653        gen_helper_##HELPER##_d                             \
2654    };                                                      \
2655    return do_opfv(s, a, fns[s->sew - 1], opfv_check, FRM); \
2656}
2657
2658GEN_OPFV_CVT_TRANS(vfcvt_xu_f_v, vfcvt_xu_f_v, RISCV_FRM_DYN)
2659GEN_OPFV_CVT_TRANS(vfcvt_x_f_v, vfcvt_x_f_v, RISCV_FRM_DYN)
2660GEN_OPFV_CVT_TRANS(vfcvt_f_xu_v, vfcvt_f_xu_v, RISCV_FRM_DYN)
2661GEN_OPFV_CVT_TRANS(vfcvt_f_x_v, vfcvt_f_x_v, RISCV_FRM_DYN)
2662/* Reuse the helper functions from vfcvt.xu.f.v and vfcvt.x.f.v */
2663GEN_OPFV_CVT_TRANS(vfcvt_rtz_xu_f_v, vfcvt_xu_f_v, RISCV_FRM_RTZ)
2664GEN_OPFV_CVT_TRANS(vfcvt_rtz_x_f_v, vfcvt_x_f_v, RISCV_FRM_RTZ)
2665
2666/* Widening Floating-Point/Integer Type-Convert Instructions */
2667
2668/*
2669 * If the current SEW does not correspond to a supported IEEE floating-point
2670 * type, an illegal instruction exception is raised
2671 */
2672static bool opfv_widen_check(DisasContext *s, arg_rmr *a)
2673{
2674    return require_rvv(s) &&
2675           vext_check_isa_ill(s) &&
2676           vext_check_ds(s, a->rd, a->rs2, a->vm);
2677}
2678
2679static bool opxfv_widen_check(DisasContext *s, arg_rmr *a)
2680{
2681    return opfv_widen_check(s, a) &&
2682           require_rvf(s);
2683}
2684
2685static bool opffv_widen_check(DisasContext *s, arg_rmr *a)
2686{
2687    return opfv_widen_check(s, a) &&
2688           require_scale_rvfmin(s) &&
2689           (s->sew != MO_8);
2690}
2691
2692#define GEN_OPFV_WIDEN_TRANS(NAME, CHECK, HELPER, FRM)             \
2693static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
2694{                                                                  \
2695    if (CHECK(s, a)) {                                             \
2696        uint32_t data = 0;                                         \
2697        static gen_helper_gvec_3_ptr * const fns[2] = {            \
2698            gen_helper_##HELPER##_h,                               \
2699            gen_helper_##HELPER##_w,                               \
2700        };                                                         \
2701        gen_set_rm_chkfrm(s, FRM);                                 \
2702                                                                   \
2703        data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
2704        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
2705        data = FIELD_DP32(data, VDATA, VTA, s->vta);               \
2706        data = FIELD_DP32(data, VDATA, VMA, s->vma);               \
2707        tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
2708                           vreg_ofs(s, a->rs2), tcg_env,           \
2709                           s->cfg_ptr->vlenb,                      \
2710                           s->cfg_ptr->vlenb, data,                \
2711                           fns[s->sew - 1]);                       \
2712        finalize_rvv_inst(s);                                      \
2713        return true;                                               \
2714    }                                                              \
2715    return false;                                                  \
2716}
2717
2718GEN_OPFV_WIDEN_TRANS(vfwcvt_xu_f_v, opxfv_widen_check, vfwcvt_xu_f_v,
2719                     RISCV_FRM_DYN)
2720GEN_OPFV_WIDEN_TRANS(vfwcvt_x_f_v, opxfv_widen_check, vfwcvt_x_f_v,
2721                     RISCV_FRM_DYN)
2722GEN_OPFV_WIDEN_TRANS(vfwcvt_f_f_v, opffv_widen_check, vfwcvt_f_f_v,
2723                     RISCV_FRM_DYN)
2724/* Reuse the helper functions from vfwcvt.xu.f.v and vfwcvt.x.f.v */
2725GEN_OPFV_WIDEN_TRANS(vfwcvt_rtz_xu_f_v, opxfv_widen_check, vfwcvt_xu_f_v,
2726                     RISCV_FRM_RTZ)
2727GEN_OPFV_WIDEN_TRANS(vfwcvt_rtz_x_f_v, opxfv_widen_check, vfwcvt_x_f_v,
2728                     RISCV_FRM_RTZ)
2729
2730static bool opfxv_widen_check(DisasContext *s, arg_rmr *a)
2731{
2732    return require_rvv(s) &&
2733           require_scale_rvf(s) &&
2734           vext_check_isa_ill(s) &&
2735           /* OPFV widening instructions ignore vs1 check */
2736           vext_check_ds(s, a->rd, a->rs2, a->vm);
2737}
2738
2739#define GEN_OPFXV_WIDEN_TRANS(NAME)                                \
2740static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
2741{                                                                  \
2742    if (opfxv_widen_check(s, a)) {                                 \
2743        uint32_t data = 0;                                         \
2744        static gen_helper_gvec_3_ptr * const fns[3] = {            \
2745            gen_helper_##NAME##_b,                                 \
2746            gen_helper_##NAME##_h,                                 \
2747            gen_helper_##NAME##_w,                                 \
2748        };                                                         \
2749        gen_set_rm(s, RISCV_FRM_DYN);                              \
2750                                                                   \
2751        data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
2752        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
2753        data = FIELD_DP32(data, VDATA, VTA, s->vta);               \
2754        data = FIELD_DP32(data, VDATA, VMA, s->vma);               \
2755        tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
2756                           vreg_ofs(s, a->rs2), tcg_env,           \
2757                           s->cfg_ptr->vlenb,                      \
2758                           s->cfg_ptr->vlenb, data,                \
2759                           fns[s->sew]);                           \
2760        finalize_rvv_inst(s);                                      \
2761        return true;                                               \
2762    }                                                              \
2763    return false;                                                  \
2764}
2765
2766GEN_OPFXV_WIDEN_TRANS(vfwcvt_f_xu_v)
2767GEN_OPFXV_WIDEN_TRANS(vfwcvt_f_x_v)
2768
2769/* Narrowing Floating-Point/Integer Type-Convert Instructions */
2770
2771/*
2772 * If the current SEW does not correspond to a supported IEEE floating-point
2773 * type, an illegal instruction exception is raised
2774 */
2775static bool opfv_narrow_check(DisasContext *s, arg_rmr *a)
2776{
2777    return require_rvv(s) &&
2778           vext_check_isa_ill(s) &&
2779           /* OPFV narrowing instructions ignore vs1 check */
2780           vext_check_sd(s, a->rd, a->rs2, a->vm);
2781}
2782
2783static bool opfxv_narrow_check(DisasContext *s, arg_rmr *a)
2784{
2785    return opfv_narrow_check(s, a) &&
2786           require_rvf(s) &&
2787           (s->sew != MO_64);
2788}
2789
2790static bool opffv_narrow_check(DisasContext *s, arg_rmr *a)
2791{
2792    return opfv_narrow_check(s, a) &&
2793           require_scale_rvfmin(s) &&
2794           (s->sew != MO_8);
2795}
2796
2797static bool opffv_rod_narrow_check(DisasContext *s, arg_rmr *a)
2798{
2799    return opfv_narrow_check(s, a) &&
2800           require_scale_rvf(s) &&
2801           (s->sew != MO_8);
2802}
2803
2804#define GEN_OPFV_NARROW_TRANS(NAME, CHECK, HELPER, FRM)            \
2805static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
2806{                                                                  \
2807    if (CHECK(s, a)) {                                             \
2808        uint32_t data = 0;                                         \
2809        static gen_helper_gvec_3_ptr * const fns[2] = {            \
2810            gen_helper_##HELPER##_h,                               \
2811            gen_helper_##HELPER##_w,                               \
2812        };                                                         \
2813        gen_set_rm_chkfrm(s, FRM);                                 \
2814                                                                   \
2815        data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
2816        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
2817        data = FIELD_DP32(data, VDATA, VTA, s->vta);               \
2818        data = FIELD_DP32(data, VDATA, VMA, s->vma);               \
2819        tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
2820                           vreg_ofs(s, a->rs2), tcg_env,           \
2821                           s->cfg_ptr->vlenb,                      \
2822                           s->cfg_ptr->vlenb, data,                \
2823                           fns[s->sew - 1]);                       \
2824        finalize_rvv_inst(s);                                      \
2825        return true;                                               \
2826    }                                                              \
2827    return false;                                                  \
2828}
2829
2830GEN_OPFV_NARROW_TRANS(vfncvt_f_xu_w, opfxv_narrow_check, vfncvt_f_xu_w,
2831                      RISCV_FRM_DYN)
2832GEN_OPFV_NARROW_TRANS(vfncvt_f_x_w, opfxv_narrow_check, vfncvt_f_x_w,
2833                      RISCV_FRM_DYN)
2834GEN_OPFV_NARROW_TRANS(vfncvt_f_f_w, opffv_narrow_check, vfncvt_f_f_w,
2835                      RISCV_FRM_DYN)
2836/* Reuse the helper function from vfncvt.f.f.w */
2837GEN_OPFV_NARROW_TRANS(vfncvt_rod_f_f_w, opffv_rod_narrow_check, vfncvt_f_f_w,
2838                      RISCV_FRM_ROD)
2839
2840static bool opxfv_narrow_check(DisasContext *s, arg_rmr *a)
2841{
2842    return require_rvv(s) &&
2843           require_scale_rvf(s) &&
2844           vext_check_isa_ill(s) &&
2845           /* OPFV narrowing instructions ignore vs1 check */
2846           vext_check_sd(s, a->rd, a->rs2, a->vm);
2847}
2848
2849#define GEN_OPXFV_NARROW_TRANS(NAME, HELPER, FRM)                  \
2850static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
2851{                                                                  \
2852    if (opxfv_narrow_check(s, a)) {                                \
2853        uint32_t data = 0;                                         \
2854        static gen_helper_gvec_3_ptr * const fns[3] = {            \
2855            gen_helper_##HELPER##_b,                               \
2856            gen_helper_##HELPER##_h,                               \
2857            gen_helper_##HELPER##_w,                               \
2858        };                                                         \
2859        gen_set_rm_chkfrm(s, FRM);                                 \
2860                                                                   \
2861        data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
2862        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
2863        data = FIELD_DP32(data, VDATA, VTA, s->vta);               \
2864        data = FIELD_DP32(data, VDATA, VMA, s->vma);               \
2865        tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
2866                           vreg_ofs(s, a->rs2), tcg_env,           \
2867                           s->cfg_ptr->vlenb,                      \
2868                           s->cfg_ptr->vlenb, data,                \
2869                           fns[s->sew]);                           \
2870        finalize_rvv_inst(s);                                      \
2871        return true;                                               \
2872    }                                                              \
2873    return false;                                                  \
2874}
2875
2876GEN_OPXFV_NARROW_TRANS(vfncvt_xu_f_w, vfncvt_xu_f_w, RISCV_FRM_DYN)
2877GEN_OPXFV_NARROW_TRANS(vfncvt_x_f_w, vfncvt_x_f_w, RISCV_FRM_DYN)
2878/* Reuse the helper functions from vfncvt.xu.f.w and vfncvt.x.f.w */
2879GEN_OPXFV_NARROW_TRANS(vfncvt_rtz_xu_f_w, vfncvt_xu_f_w, RISCV_FRM_RTZ)
2880GEN_OPXFV_NARROW_TRANS(vfncvt_rtz_x_f_w, vfncvt_x_f_w, RISCV_FRM_RTZ)
2881
2882/*
2883 *** Vector Reduction Operations
2884 */
2885/* Vector Single-Width Integer Reduction Instructions */
2886static bool reduction_check(DisasContext *s, arg_rmrr *a)
2887{
2888    return require_rvv(s) &&
2889           vext_check_isa_ill(s) &&
2890           vext_check_reduction(s, a->rs2);
2891}
2892
2893GEN_OPIVV_TRANS(vredsum_vs, reduction_check)
2894GEN_OPIVV_TRANS(vredmaxu_vs, reduction_check)
2895GEN_OPIVV_TRANS(vredmax_vs, reduction_check)
2896GEN_OPIVV_TRANS(vredminu_vs, reduction_check)
2897GEN_OPIVV_TRANS(vredmin_vs, reduction_check)
2898GEN_OPIVV_TRANS(vredand_vs, reduction_check)
2899GEN_OPIVV_TRANS(vredor_vs, reduction_check)
2900GEN_OPIVV_TRANS(vredxor_vs, reduction_check)
2901
2902/* Vector Widening Integer Reduction Instructions */
2903static bool reduction_widen_check(DisasContext *s, arg_rmrr *a)
2904{
2905    return reduction_check(s, a) && (s->sew < MO_64) &&
2906           ((s->sew + 1) <= (s->cfg_ptr->elen >> 4));
2907}
2908
2909GEN_OPIVV_WIDEN_TRANS(vwredsum_vs, reduction_widen_check)
2910GEN_OPIVV_WIDEN_TRANS(vwredsumu_vs, reduction_widen_check)
2911
2912/* Vector Single-Width Floating-Point Reduction Instructions */
2913static bool freduction_check(DisasContext *s, arg_rmrr *a)
2914{
2915    return reduction_check(s, a) &&
2916           require_rvf(s);
2917}
2918
2919GEN_OPFVV_TRANS(vfredusum_vs, freduction_check)
2920GEN_OPFVV_TRANS(vfredosum_vs, freduction_check)
2921GEN_OPFVV_TRANS(vfredmax_vs, freduction_check)
2922GEN_OPFVV_TRANS(vfredmin_vs, freduction_check)
2923
2924/* Vector Widening Floating-Point Reduction Instructions */
2925static bool freduction_widen_check(DisasContext *s, arg_rmrr *a)
2926{
2927    return reduction_widen_check(s, a) &&
2928           require_scale_rvf(s) &&
2929           (s->sew != MO_8);
2930}
2931
2932GEN_OPFVV_WIDEN_TRANS(vfwredusum_vs, freduction_widen_check)
2933GEN_OPFVV_WIDEN_TRANS(vfwredosum_vs, freduction_widen_check)
2934
2935/*
2936 *** Vector Mask Operations
2937 */
2938
2939/* Vector Mask-Register Logical Instructions */
2940#define GEN_MM_TRANS(NAME)                                         \
2941static bool trans_##NAME(DisasContext *s, arg_r *a)                \
2942{                                                                  \
2943    if (require_rvv(s) &&                                          \
2944        vext_check_isa_ill(s)) {                                   \
2945        uint32_t data = 0;                                         \
2946        gen_helper_gvec_4_ptr *fn = gen_helper_##NAME;             \
2947                                                                   \
2948        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
2949        data =                                                     \
2950            FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);\
2951        tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
2952                           vreg_ofs(s, a->rs1),                    \
2953                           vreg_ofs(s, a->rs2), tcg_env,           \
2954                           s->cfg_ptr->vlenb,                      \
2955                           s->cfg_ptr->vlenb, data, fn);           \
2956        finalize_rvv_inst(s);                                      \
2957        return true;                                               \
2958    }                                                              \
2959    return false;                                                  \
2960}
2961
2962GEN_MM_TRANS(vmand_mm)
2963GEN_MM_TRANS(vmnand_mm)
2964GEN_MM_TRANS(vmandn_mm)
2965GEN_MM_TRANS(vmxor_mm)
2966GEN_MM_TRANS(vmor_mm)
2967GEN_MM_TRANS(vmnor_mm)
2968GEN_MM_TRANS(vmorn_mm)
2969GEN_MM_TRANS(vmxnor_mm)
2970
2971/* Vector count population in mask vcpop */
2972static bool trans_vcpop_m(DisasContext *s, arg_rmr *a)
2973{
2974    if (require_rvv(s) &&
2975        vext_check_isa_ill(s) &&
2976        s->vstart_eq_zero) {
2977        TCGv_ptr src2, mask;
2978        TCGv dst;
2979        TCGv_i32 desc;
2980        uint32_t data = 0;
2981        data = FIELD_DP32(data, VDATA, VM, a->vm);
2982        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
2983
2984        mask = tcg_temp_new_ptr();
2985        src2 = tcg_temp_new_ptr();
2986        dst = dest_gpr(s, a->rd);
2987        desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
2988                                          s->cfg_ptr->vlenb, data));
2989
2990        tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, a->rs2));
2991        tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
2992
2993        gen_helper_vcpop_m(dst, mask, src2, tcg_env, desc);
2994        gen_set_gpr(s, a->rd, dst);
2995        return true;
2996    }
2997    return false;
2998}
2999
3000/* vmfirst find-first-set mask bit */
3001static bool trans_vfirst_m(DisasContext *s, arg_rmr *a)
3002{
3003    if (require_rvv(s) &&
3004        vext_check_isa_ill(s) &&
3005        s->vstart_eq_zero) {
3006        TCGv_ptr src2, mask;
3007        TCGv dst;
3008        TCGv_i32 desc;
3009        uint32_t data = 0;
3010        data = FIELD_DP32(data, VDATA, VM, a->vm);
3011        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
3012
3013        mask = tcg_temp_new_ptr();
3014        src2 = tcg_temp_new_ptr();
3015        dst = dest_gpr(s, a->rd);
3016        desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
3017                                          s->cfg_ptr->vlenb, data));
3018
3019        tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, a->rs2));
3020        tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
3021
3022        gen_helper_vfirst_m(dst, mask, src2, tcg_env, desc);
3023        gen_set_gpr(s, a->rd, dst);
3024        return true;
3025    }
3026    return false;
3027}
3028
3029/*
3030 * vmsbf.m set-before-first mask bit
3031 * vmsif.m set-including-first mask bit
3032 * vmsof.m set-only-first mask bit
3033 */
3034#define GEN_M_TRANS(NAME)                                          \
3035static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
3036{                                                                  \
3037    if (require_rvv(s) &&                                          \
3038        vext_check_isa_ill(s) &&                                   \
3039        require_vm(a->vm, a->rd) &&                                \
3040        (a->rd != a->rs2) &&                                       \
3041        s->vstart_eq_zero) {                                       \
3042        uint32_t data = 0;                                         \
3043        gen_helper_gvec_3_ptr *fn = gen_helper_##NAME;             \
3044                                                                   \
3045        data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
3046        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
3047        data =                                                     \
3048            FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);\
3049        data = FIELD_DP32(data, VDATA, VMA, s->vma);               \
3050        tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd),                     \
3051                           vreg_ofs(s, 0), vreg_ofs(s, a->rs2),    \
3052                           tcg_env, s->cfg_ptr->vlenb,             \
3053                           s->cfg_ptr->vlenb,                      \
3054                           data, fn);                              \
3055        finalize_rvv_inst(s);                                      \
3056        return true;                                               \
3057    }                                                              \
3058    return false;                                                  \
3059}
3060
3061GEN_M_TRANS(vmsbf_m)
3062GEN_M_TRANS(vmsif_m)
3063GEN_M_TRANS(vmsof_m)
3064
3065/*
3066 * Vector Iota Instruction
3067 *
3068 * 1. The destination register cannot overlap the source register.
3069 * 2. If masked, cannot overlap the mask register ('v0').
3070 * 3. An illegal instruction exception is raised if vstart is non-zero.
3071 */
3072static bool trans_viota_m(DisasContext *s, arg_viota_m *a)
3073{
3074    if (require_rvv(s) &&
3075        vext_check_isa_ill(s) &&
3076        !is_overlapped(a->rd, 1 << MAX(s->lmul, 0), a->rs2, 1) &&
3077        require_vm(a->vm, a->rd) &&
3078        require_align(a->rd, s->lmul) &&
3079        s->vstart_eq_zero) {
3080        uint32_t data = 0;
3081
3082        data = FIELD_DP32(data, VDATA, VM, a->vm);
3083        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
3084        data = FIELD_DP32(data, VDATA, VTA, s->vta);
3085        data = FIELD_DP32(data, VDATA, VMA, s->vma);
3086        static gen_helper_gvec_3_ptr * const fns[4] = {
3087            gen_helper_viota_m_b, gen_helper_viota_m_h,
3088            gen_helper_viota_m_w, gen_helper_viota_m_d,
3089        };
3090        tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
3091                           vreg_ofs(s, a->rs2), tcg_env,
3092                           s->cfg_ptr->vlenb,
3093                           s->cfg_ptr->vlenb, data, fns[s->sew]);
3094        finalize_rvv_inst(s);
3095        return true;
3096    }
3097    return false;
3098}
3099
3100/* Vector Element Index Instruction */
3101static bool trans_vid_v(DisasContext *s, arg_vid_v *a)
3102{
3103    if (require_rvv(s) &&
3104        vext_check_isa_ill(s) &&
3105        require_align(a->rd, s->lmul) &&
3106        require_vm(a->vm, a->rd)) {
3107        uint32_t data = 0;
3108
3109        data = FIELD_DP32(data, VDATA, VM, a->vm);
3110        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
3111        data = FIELD_DP32(data, VDATA, VTA, s->vta);
3112        data = FIELD_DP32(data, VDATA, VMA, s->vma);
3113        static gen_helper_gvec_2_ptr * const fns[4] = {
3114            gen_helper_vid_v_b, gen_helper_vid_v_h,
3115            gen_helper_vid_v_w, gen_helper_vid_v_d,
3116        };
3117        tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
3118                           tcg_env, s->cfg_ptr->vlenb,
3119                           s->cfg_ptr->vlenb,
3120                           data, fns[s->sew]);
3121        finalize_rvv_inst(s);
3122        return true;
3123    }
3124    return false;
3125}
3126
3127/*
3128 *** Vector Permutation Instructions
3129 */
3130
3131static void load_element(TCGv_i64 dest, TCGv_ptr base,
3132                         int ofs, int sew, bool sign)
3133{
3134    switch (sew) {
3135    case MO_8:
3136        if (!sign) {
3137            tcg_gen_ld8u_i64(dest, base, ofs);
3138        } else {
3139            tcg_gen_ld8s_i64(dest, base, ofs);
3140        }
3141        break;
3142    case MO_16:
3143        if (!sign) {
3144            tcg_gen_ld16u_i64(dest, base, ofs);
3145        } else {
3146            tcg_gen_ld16s_i64(dest, base, ofs);
3147        }
3148        break;
3149    case MO_32:
3150        if (!sign) {
3151            tcg_gen_ld32u_i64(dest, base, ofs);
3152        } else {
3153            tcg_gen_ld32s_i64(dest, base, ofs);
3154        }
3155        break;
3156    case MO_64:
3157        tcg_gen_ld_i64(dest, base, ofs);
3158        break;
3159    default:
3160        g_assert_not_reached();
3161        break;
3162    }
3163}
3164
3165/* offset of the idx element with base register r */
3166static uint32_t endian_ofs(DisasContext *s, int r, int idx)
3167{
3168#if HOST_BIG_ENDIAN
3169    return vreg_ofs(s, r) + ((idx ^ (7 >> s->sew)) << s->sew);
3170#else
3171    return vreg_ofs(s, r) + (idx << s->sew);
3172#endif
3173}
3174
3175/* adjust the index according to the endian */
3176static void endian_adjust(TCGv_i32 ofs, int sew)
3177{
3178#if HOST_BIG_ENDIAN
3179    tcg_gen_xori_i32(ofs, ofs, 7 >> sew);
3180#endif
3181}
3182
3183/* Load idx >= VLMAX ? 0 : vreg[idx] */
3184static void vec_element_loadx(DisasContext *s, TCGv_i64 dest,
3185                              int vreg, TCGv idx, int vlmax)
3186{
3187    TCGv_i32 ofs = tcg_temp_new_i32();
3188    TCGv_ptr base = tcg_temp_new_ptr();
3189    TCGv_i64 t_idx = tcg_temp_new_i64();
3190    TCGv_i64 t_vlmax, t_zero;
3191
3192    /*
3193     * Mask the index to the length so that we do
3194     * not produce an out-of-range load.
3195     */
3196    tcg_gen_trunc_tl_i32(ofs, idx);
3197    tcg_gen_andi_i32(ofs, ofs, vlmax - 1);
3198
3199    /* Convert the index to an offset. */
3200    endian_adjust(ofs, s->sew);
3201    tcg_gen_shli_i32(ofs, ofs, s->sew);
3202
3203    /* Convert the index to a pointer. */
3204    tcg_gen_ext_i32_ptr(base, ofs);
3205    tcg_gen_add_ptr(base, base, tcg_env);
3206
3207    /* Perform the load. */
3208    load_element(dest, base,
3209                 vreg_ofs(s, vreg), s->sew, false);
3210
3211    /* Flush out-of-range indexing to zero.  */
3212    t_vlmax = tcg_constant_i64(vlmax);
3213    t_zero = tcg_constant_i64(0);
3214    tcg_gen_extu_tl_i64(t_idx, idx);
3215
3216    tcg_gen_movcond_i64(TCG_COND_LTU, dest, t_idx,
3217                        t_vlmax, dest, t_zero);
3218}
3219
3220static void vec_element_loadi(DisasContext *s, TCGv_i64 dest,
3221                              int vreg, int idx, bool sign)
3222{
3223    load_element(dest, tcg_env, endian_ofs(s, vreg, idx), s->sew, sign);
3224}
3225
3226/* Integer Scalar Move Instruction */
3227
3228static void store_element(TCGv_i64 val, TCGv_ptr base,
3229                          int ofs, int sew)
3230{
3231    switch (sew) {
3232    case MO_8:
3233        tcg_gen_st8_i64(val, base, ofs);
3234        break;
3235    case MO_16:
3236        tcg_gen_st16_i64(val, base, ofs);
3237        break;
3238    case MO_32:
3239        tcg_gen_st32_i64(val, base, ofs);
3240        break;
3241    case MO_64:
3242        tcg_gen_st_i64(val, base, ofs);
3243        break;
3244    default:
3245        g_assert_not_reached();
3246        break;
3247    }
3248}
3249
3250/*
3251 * Store vreg[idx] = val.
3252 * The index must be in range of VLMAX.
3253 */
3254static void vec_element_storei(DisasContext *s, int vreg,
3255                               int idx, TCGv_i64 val)
3256{
3257    store_element(val, tcg_env, endian_ofs(s, vreg, idx), s->sew);
3258}
3259
3260/* vmv.x.s rd, vs2 # x[rd] = vs2[0] */
3261static bool trans_vmv_x_s(DisasContext *s, arg_vmv_x_s *a)
3262{
3263    if (require_rvv(s) &&
3264        vext_check_isa_ill(s)) {
3265        TCGv_i64 t1;
3266        TCGv dest;
3267
3268        t1 = tcg_temp_new_i64();
3269        dest = tcg_temp_new();
3270        /*
3271         * load vreg and sign-extend to 64 bits,
3272         * then truncate to XLEN bits before storing to gpr.
3273         */
3274        vec_element_loadi(s, t1, a->rs2, 0, true);
3275        tcg_gen_trunc_i64_tl(dest, t1);
3276        gen_set_gpr(s, a->rd, dest);
3277        tcg_gen_movi_tl(cpu_vstart, 0);
3278        finalize_rvv_inst(s);
3279        return true;
3280    }
3281    return false;
3282}
3283
3284/* vmv.s.x vd, rs1 # vd[0] = rs1 */
3285static bool trans_vmv_s_x(DisasContext *s, arg_vmv_s_x *a)
3286{
3287    if (require_rvv(s) &&
3288        vext_check_isa_ill(s)) {
3289        /* This instruction ignores LMUL and vector register groups */
3290        TCGv_i64 t1;
3291        TCGv s1;
3292        TCGLabel *over = gen_new_label();
3293
3294        tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
3295
3296        t1 = tcg_temp_new_i64();
3297
3298        /*
3299         * load gpr and sign-extend to 64 bits,
3300         * then truncate to SEW bits when storing to vreg.
3301         */
3302        s1 = get_gpr(s, a->rs1, EXT_NONE);
3303        tcg_gen_ext_tl_i64(t1, s1);
3304        vec_element_storei(s, a->rd, 0, t1);
3305        gen_set_label(over);
3306        tcg_gen_movi_tl(cpu_vstart, 0);
3307        finalize_rvv_inst(s);
3308        return true;
3309    }
3310    return false;
3311}
3312
3313/* Floating-Point Scalar Move Instructions */
3314static bool trans_vfmv_f_s(DisasContext *s, arg_vfmv_f_s *a)
3315{
3316    if (require_rvv(s) &&
3317        require_rvf(s) &&
3318        vext_check_isa_ill(s)) {
3319        gen_set_rm(s, RISCV_FRM_DYN);
3320
3321        unsigned int ofs = (8 << s->sew);
3322        unsigned int len = 64 - ofs;
3323        TCGv_i64 t_nan;
3324
3325        vec_element_loadi(s, cpu_fpr[a->rd], a->rs2, 0, false);
3326        /* NaN-box f[rd] as necessary for SEW */
3327        if (len) {
3328            t_nan = tcg_constant_i64(UINT64_MAX);
3329            tcg_gen_deposit_i64(cpu_fpr[a->rd], cpu_fpr[a->rd],
3330                                t_nan, ofs, len);
3331        }
3332
3333        mark_fs_dirty(s);
3334        tcg_gen_movi_tl(cpu_vstart, 0);
3335        finalize_rvv_inst(s);
3336        return true;
3337    }
3338    return false;
3339}
3340
3341/* vfmv.s.f vd, rs1 # vd[0] = rs1 (vs2=0) */
3342static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a)
3343{
3344    if (require_rvv(s) &&
3345        require_rvf(s) &&
3346        vext_check_isa_ill(s)) {
3347        gen_set_rm(s, RISCV_FRM_DYN);
3348
3349        /* The instructions ignore LMUL and vector register group. */
3350        TCGv_i64 t1;
3351        TCGLabel *over = gen_new_label();
3352
3353        /* if vstart >= vl, skip vector register write back */
3354        tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
3355
3356        /* NaN-box f[rs1] */
3357        t1 = tcg_temp_new_i64();
3358        do_nanbox(s, t1, cpu_fpr[a->rs1]);
3359
3360        vec_element_storei(s, a->rd, 0, t1);
3361
3362        gen_set_label(over);
3363        tcg_gen_movi_tl(cpu_vstart, 0);
3364        finalize_rvv_inst(s);
3365        return true;
3366    }
3367    return false;
3368}
3369
3370/* Vector Slide Instructions */
3371static bool slideup_check(DisasContext *s, arg_rmrr *a)
3372{
3373    return require_rvv(s) &&
3374           vext_check_isa_ill(s) &&
3375           vext_check_slide(s, a->rd, a->rs2, a->vm, true);
3376}
3377
3378GEN_OPIVX_TRANS(vslideup_vx, slideup_check)
3379GEN_OPIVX_TRANS(vslide1up_vx, slideup_check)
3380GEN_OPIVI_TRANS(vslideup_vi, IMM_ZX, vslideup_vx, slideup_check)
3381
3382static bool slidedown_check(DisasContext *s, arg_rmrr *a)
3383{
3384    return require_rvv(s) &&
3385           vext_check_isa_ill(s) &&
3386           vext_check_slide(s, a->rd, a->rs2, a->vm, false);
3387}
3388
3389GEN_OPIVX_TRANS(vslidedown_vx, slidedown_check)
3390GEN_OPIVX_TRANS(vslide1down_vx, slidedown_check)
3391GEN_OPIVI_TRANS(vslidedown_vi, IMM_ZX, vslidedown_vx, slidedown_check)
3392
3393/* Vector Floating-Point Slide Instructions */
3394static bool fslideup_check(DisasContext *s, arg_rmrr *a)
3395{
3396    return slideup_check(s, a) &&
3397           require_rvf(s);
3398}
3399
3400static bool fslidedown_check(DisasContext *s, arg_rmrr *a)
3401{
3402    return slidedown_check(s, a) &&
3403           require_rvf(s);
3404}
3405
3406GEN_OPFVF_TRANS(vfslide1up_vf, fslideup_check)
3407GEN_OPFVF_TRANS(vfslide1down_vf, fslidedown_check)
3408
3409/* Vector Register Gather Instruction */
3410static bool vrgather_vv_check(DisasContext *s, arg_rmrr *a)
3411{
3412    return require_rvv(s) &&
3413           vext_check_isa_ill(s) &&
3414           require_align(a->rd, s->lmul) &&
3415           require_align(a->rs1, s->lmul) &&
3416           require_align(a->rs2, s->lmul) &&
3417           (a->rd != a->rs2 && a->rd != a->rs1) &&
3418           require_vm(a->vm, a->rd);
3419}
3420
3421static bool vrgatherei16_vv_check(DisasContext *s, arg_rmrr *a)
3422{
3423    int8_t emul = MO_16 - s->sew + s->lmul;
3424    return require_rvv(s) &&
3425           vext_check_isa_ill(s) &&
3426           (emul >= -3 && emul <= 3) &&
3427           require_align(a->rd, s->lmul) &&
3428           require_align(a->rs1, emul) &&
3429           require_align(a->rs2, s->lmul) &&
3430           (a->rd != a->rs2 && a->rd != a->rs1) &&
3431           !is_overlapped(a->rd, 1 << MAX(s->lmul, 0),
3432                          a->rs1, 1 << MAX(emul, 0)) &&
3433           !is_overlapped(a->rd, 1 << MAX(s->lmul, 0),
3434                          a->rs2, 1 << MAX(s->lmul, 0)) &&
3435           require_vm(a->vm, a->rd);
3436}
3437
3438GEN_OPIVV_TRANS(vrgather_vv, vrgather_vv_check)
3439GEN_OPIVV_TRANS(vrgatherei16_vv, vrgatherei16_vv_check)
3440
3441static bool vrgather_vx_check(DisasContext *s, arg_rmrr *a)
3442{
3443    return require_rvv(s) &&
3444           vext_check_isa_ill(s) &&
3445           require_align(a->rd, s->lmul) &&
3446           require_align(a->rs2, s->lmul) &&
3447           (a->rd != a->rs2) &&
3448           require_vm(a->vm, a->rd);
3449}
3450
3451/* vrgather.vx vd, vs2, rs1, vm # vd[i] = (x[rs1] >= VLMAX) ? 0 : vs2[rs1] */
3452static bool trans_vrgather_vx(DisasContext *s, arg_rmrr *a)
3453{
3454    if (!vrgather_vx_check(s, a)) {
3455        return false;
3456    }
3457
3458    if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
3459        int vlmax = vext_get_vlmax(s->cfg_ptr->vlenb, s->sew, s->lmul);
3460        TCGv_i64 dest = tcg_temp_new_i64();
3461
3462        if (a->rs1 == 0) {
3463            vec_element_loadi(s, dest, a->rs2, 0, false);
3464        } else {
3465            vec_element_loadx(s, dest, a->rs2, cpu_gpr[a->rs1], vlmax);
3466        }
3467
3468        tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd),
3469                             MAXSZ(s), MAXSZ(s), dest);
3470        finalize_rvv_inst(s);
3471    } else {
3472        static gen_helper_opivx * const fns[4] = {
3473            gen_helper_vrgather_vx_b, gen_helper_vrgather_vx_h,
3474            gen_helper_vrgather_vx_w, gen_helper_vrgather_vx_d
3475        };
3476        return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);
3477    }
3478    return true;
3479}
3480
3481/* vrgather.vi vd, vs2, imm, vm # vd[i] = (imm >= VLMAX) ? 0 : vs2[imm] */
3482static bool trans_vrgather_vi(DisasContext *s, arg_rmrr *a)
3483{
3484    if (!vrgather_vx_check(s, a)) {
3485        return false;
3486    }
3487
3488    if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
3489        int vlmax = vext_get_vlmax(s->cfg_ptr->vlenb, s->sew, s->lmul);
3490        if (a->rs1 >= vlmax) {
3491            tcg_gen_gvec_dup_imm(MO_64, vreg_ofs(s, a->rd),
3492                                 MAXSZ(s), MAXSZ(s), 0);
3493        } else {
3494            tcg_gen_gvec_dup_mem(s->sew, vreg_ofs(s, a->rd),
3495                                 endian_ofs(s, a->rs2, a->rs1),
3496                                 MAXSZ(s), MAXSZ(s));
3497        }
3498        finalize_rvv_inst(s);
3499    } else {
3500        static gen_helper_opivx * const fns[4] = {
3501            gen_helper_vrgather_vx_b, gen_helper_vrgather_vx_h,
3502            gen_helper_vrgather_vx_w, gen_helper_vrgather_vx_d
3503        };
3504        return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew],
3505                           s, IMM_ZX);
3506    }
3507    return true;
3508}
3509
3510/*
3511 * Vector Compress Instruction
3512 *
3513 * The destination vector register group cannot overlap the
3514 * source vector register group or the source mask register.
3515 */
3516static bool vcompress_vm_check(DisasContext *s, arg_r *a)
3517{
3518    return require_rvv(s) &&
3519           vext_check_isa_ill(s) &&
3520           require_align(a->rd, s->lmul) &&
3521           require_align(a->rs2, s->lmul) &&
3522           (a->rd != a->rs2) &&
3523           !is_overlapped(a->rd, 1 << MAX(s->lmul, 0), a->rs1, 1) &&
3524           s->vstart_eq_zero;
3525}
3526
3527static bool trans_vcompress_vm(DisasContext *s, arg_r *a)
3528{
3529    if (vcompress_vm_check(s, a)) {
3530        uint32_t data = 0;
3531        static gen_helper_gvec_4_ptr * const fns[4] = {
3532            gen_helper_vcompress_vm_b, gen_helper_vcompress_vm_h,
3533            gen_helper_vcompress_vm_w, gen_helper_vcompress_vm_d,
3534        };
3535
3536        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
3537        data = FIELD_DP32(data, VDATA, VTA, s->vta);
3538        tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
3539                           vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2),
3540                           tcg_env, s->cfg_ptr->vlenb,
3541                           s->cfg_ptr->vlenb, data,
3542                           fns[s->sew]);
3543        finalize_rvv_inst(s);
3544        return true;
3545    }
3546    return false;
3547}
3548
3549/*
3550 * Whole Vector Register Move Instructions depend on vtype register(vsew).
3551 * Thus, we need to check vill bit. (Section 16.6)
3552 */
3553#define GEN_VMV_WHOLE_TRANS(NAME, LEN)                             \
3554static bool trans_##NAME(DisasContext *s, arg_##NAME * a)               \
3555{                                                                       \
3556    if (require_rvv(s) &&                                               \
3557        vext_check_isa_ill(s) &&                                        \
3558        QEMU_IS_ALIGNED(a->rd, LEN) &&                                  \
3559        QEMU_IS_ALIGNED(a->rs2, LEN)) {                                 \
3560        uint32_t maxsz = s->cfg_ptr->vlenb * LEN;                       \
3561        if (s->vstart_eq_zero) {                                        \
3562            tcg_gen_gvec_mov(s->sew, vreg_ofs(s, a->rd),                \
3563                             vreg_ofs(s, a->rs2), maxsz, maxsz);        \
3564        } else {                                                        \
3565            tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2), \
3566                               tcg_env, maxsz, maxsz, 0, gen_helper_vmvr_v); \
3567        }                                                               \
3568        finalize_rvv_inst(s);                                           \
3569        return true;                                                    \
3570    }                                                                   \
3571    return false;                                                       \
3572}
3573
3574GEN_VMV_WHOLE_TRANS(vmv1r_v, 1)
3575GEN_VMV_WHOLE_TRANS(vmv2r_v, 2)
3576GEN_VMV_WHOLE_TRANS(vmv4r_v, 4)
3577GEN_VMV_WHOLE_TRANS(vmv8r_v, 8)
3578
3579static bool int_ext_check(DisasContext *s, arg_rmr *a, uint8_t div)
3580{
3581    uint8_t from = (s->sew + 3) - div;
3582    bool ret = require_rvv(s) &&
3583        (from >= 3 && from <= 8) &&
3584        (a->rd != a->rs2) &&
3585        require_align(a->rd, s->lmul) &&
3586        require_align(a->rs2, s->lmul - div) &&
3587        require_vm(a->vm, a->rd) &&
3588        require_noover(a->rd, s->lmul, a->rs2, s->lmul - div);
3589    return ret;
3590}
3591
3592static bool int_ext_op(DisasContext *s, arg_rmr *a, uint8_t seq)
3593{
3594    uint32_t data = 0;
3595    gen_helper_gvec_3_ptr *fn;
3596
3597    static gen_helper_gvec_3_ptr * const fns[6][4] = {
3598        {
3599            NULL, gen_helper_vzext_vf2_h,
3600            gen_helper_vzext_vf2_w, gen_helper_vzext_vf2_d
3601        },
3602        {
3603            NULL, NULL,
3604            gen_helper_vzext_vf4_w, gen_helper_vzext_vf4_d,
3605        },
3606        {
3607            NULL, NULL,
3608            NULL, gen_helper_vzext_vf8_d
3609        },
3610        {
3611            NULL, gen_helper_vsext_vf2_h,
3612            gen_helper_vsext_vf2_w, gen_helper_vsext_vf2_d
3613        },
3614        {
3615            NULL, NULL,
3616            gen_helper_vsext_vf4_w, gen_helper_vsext_vf4_d,
3617        },
3618        {
3619            NULL, NULL,
3620            NULL, gen_helper_vsext_vf8_d
3621        }
3622    };
3623
3624    fn = fns[seq][s->sew];
3625    if (fn == NULL) {
3626        return false;
3627    }
3628
3629    data = FIELD_DP32(data, VDATA, VM, a->vm);
3630    data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
3631    data = FIELD_DP32(data, VDATA, VTA, s->vta);
3632    data = FIELD_DP32(data, VDATA, VMA, s->vma);
3633
3634    tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
3635                       vreg_ofs(s, a->rs2), tcg_env,
3636                       s->cfg_ptr->vlenb,
3637                       s->cfg_ptr->vlenb, data, fn);
3638
3639    finalize_rvv_inst(s);
3640    return true;
3641}
3642
3643/* Vector Integer Extension */
3644#define GEN_INT_EXT_TRANS(NAME, DIV, SEQ)             \
3645static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
3646{                                                     \
3647    if (int_ext_check(s, a, DIV)) {                   \
3648        return int_ext_op(s, a, SEQ);                 \
3649    }                                                 \
3650    return false;                                     \
3651}
3652
3653GEN_INT_EXT_TRANS(vzext_vf2, 1, 0)
3654GEN_INT_EXT_TRANS(vzext_vf4, 2, 1)
3655GEN_INT_EXT_TRANS(vzext_vf8, 3, 2)
3656GEN_INT_EXT_TRANS(vsext_vf2, 1, 3)
3657GEN_INT_EXT_TRANS(vsext_vf4, 2, 4)
3658GEN_INT_EXT_TRANS(vsext_vf8, 3, 5)
3659