xref: /qemu/target/riscv/vector_internals.h (revision df4252b2)
1 /*
2  * RISC-V Vector Extension Internals
3  *
4  * Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2 or later, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #ifndef TARGET_RISCV_VECTOR_INTERNALS_H
20 #define TARGET_RISCV_VECTOR_INTERNALS_H
21 
22 #include "qemu/bitops.h"
23 #include "cpu.h"
24 #include "tcg/tcg-gvec-desc.h"
25 #include "internals.h"
26 
27 #define VSTART_CHECK_EARLY_EXIT(env) do { \
28     if (env->vstart >= env->vl) {         \
29         env->vstart = 0;                  \
30         return;                           \
31     }                                     \
32 } while (0)
33 
vext_nf(uint32_t desc)34 static inline uint32_t vext_nf(uint32_t desc)
35 {
36     return FIELD_EX32(simd_data(desc), VDATA, NF);
37 }
38 
39 /*
40  * Note that vector data is stored in host-endian 64-bit chunks,
41  * so addressing units smaller than that needs a host-endian fixup.
42  */
43 #if HOST_BIG_ENDIAN
44 #define H1(x)   ((x) ^ 7)
45 #define H1_2(x) ((x) ^ 6)
46 #define H1_4(x) ((x) ^ 4)
47 #define H2(x)   ((x) ^ 3)
48 #define H4(x)   ((x) ^ 1)
49 #define H8(x)   ((x))
50 #else
51 #define H1(x)   (x)
52 #define H1_2(x) (x)
53 #define H1_4(x) (x)
54 #define H2(x)   (x)
55 #define H4(x)   (x)
56 #define H8(x)   (x)
57 #endif
58 
59 /*
60  * Encode LMUL to lmul as following:
61  *     LMUL    vlmul    lmul
62  *      1       000       0
63  *      2       001       1
64  *      4       010       2
65  *      8       011       3
66  *      -       100       -
67  *     1/8      101      -3
68  *     1/4      110      -2
69  *     1/2      111      -1
70  */
vext_lmul(uint32_t desc)71 static inline int32_t vext_lmul(uint32_t desc)
72 {
73     return sextract32(FIELD_EX32(simd_data(desc), VDATA, LMUL), 0, 3);
74 }
75 
vext_vm(uint32_t desc)76 static inline uint32_t vext_vm(uint32_t desc)
77 {
78     return FIELD_EX32(simd_data(desc), VDATA, VM);
79 }
80 
vext_vma(uint32_t desc)81 static inline uint32_t vext_vma(uint32_t desc)
82 {
83     return FIELD_EX32(simd_data(desc), VDATA, VMA);
84 }
85 
vext_vta(uint32_t desc)86 static inline uint32_t vext_vta(uint32_t desc)
87 {
88     return FIELD_EX32(simd_data(desc), VDATA, VTA);
89 }
90 
vext_vta_all_1s(uint32_t desc)91 static inline uint32_t vext_vta_all_1s(uint32_t desc)
92 {
93     return FIELD_EX32(simd_data(desc), VDATA, VTA_ALL_1S);
94 }
95 
96 /*
97  * Earlier designs (pre-0.9) had a varying number of bits
98  * per mask value (MLEN). In the 0.9 design, MLEN=1.
99  * (Section 4.5)
100  */
vext_elem_mask(void * v0,int index)101 static inline int vext_elem_mask(void *v0, int index)
102 {
103     int idx = index / 64;
104     int pos = index  % 64;
105     return (((uint64_t *)v0)[idx] >> pos) & 1;
106 }
107 
108 /*
109  * Get number of total elements, including prestart, body and tail elements.
110  * Note that when LMUL < 1, the tail includes the elements past VLMAX that
111  * are held in the same vector register.
112  */
vext_get_total_elems(CPURISCVState * env,uint32_t desc,uint32_t esz)113 static inline uint32_t vext_get_total_elems(CPURISCVState *env, uint32_t desc,
114                                             uint32_t esz)
115 {
116     uint32_t vlenb = simd_maxsz(desc);
117     uint32_t sew = 1 << FIELD_EX64(env->vtype, VTYPE, VSEW);
118     int8_t emul = ctzl(esz) - ctzl(sew) + vext_lmul(desc) < 0 ? 0 :
119                   ctzl(esz) - ctzl(sew) + vext_lmul(desc);
120     return (vlenb << emul) / esz;
121 }
122 
123 /* set agnostic elements to 1s */
124 void vext_set_elems_1s(void *base, uint32_t is_agnostic, uint32_t cnt,
125                        uint32_t tot);
126 
127 /* expand macro args before macro */
128 #define RVVCALL(macro, ...)  macro(__VA_ARGS__)
129 
130 /* (TD, T2, TX2) */
131 #define OP_UU_B uint8_t, uint8_t, uint8_t
132 #define OP_UU_H uint16_t, uint16_t, uint16_t
133 #define OP_UU_W uint32_t, uint32_t, uint32_t
134 #define OP_UU_D uint64_t, uint64_t, uint64_t
135 
136 /* (TD, T1, T2, TX1, TX2) */
137 #define OP_UUU_B uint8_t, uint8_t, uint8_t, uint8_t, uint8_t
138 #define OP_UUU_H uint16_t, uint16_t, uint16_t, uint16_t, uint16_t
139 #define OP_UUU_W uint32_t, uint32_t, uint32_t, uint32_t, uint32_t
140 #define OP_UUU_D uint64_t, uint64_t, uint64_t, uint64_t, uint64_t
141 
142 #define OPIVV1(NAME, TD, T2, TX2, HD, HS2, OP)         \
143 static void do_##NAME(void *vd, void *vs2, int i)      \
144 {                                                      \
145     TX2 s2 = *((T2 *)vs2 + HS2(i));                    \
146     *((TD *)vd + HD(i)) = OP(s2);                      \
147 }
148 
149 #define GEN_VEXT_V(NAME, ESZ)                          \
150 void HELPER(NAME)(void *vd, void *v0, void *vs2,       \
151                   CPURISCVState *env, uint32_t desc)   \
152 {                                                      \
153     uint32_t vm = vext_vm(desc);                       \
154     uint32_t vl = env->vl;                             \
155     uint32_t total_elems =                             \
156         vext_get_total_elems(env, desc, ESZ);          \
157     uint32_t vta = vext_vta(desc);                     \
158     uint32_t vma = vext_vma(desc);                     \
159     uint32_t i;                                        \
160                                                        \
161     VSTART_CHECK_EARLY_EXIT(env);                      \
162                                                        \
163     for (i = env->vstart; i < vl; i++) {               \
164         if (!vm && !vext_elem_mask(v0, i)) {           \
165             /* set masked-off elements to 1s */        \
166             vext_set_elems_1s(vd, vma, i * ESZ,        \
167                               (i + 1) * ESZ);          \
168             continue;                                  \
169         }                                              \
170         do_##NAME(vd, vs2, i);                         \
171     }                                                  \
172     env->vstart = 0;                                   \
173     /* set tail elements to 1s */                      \
174     vext_set_elems_1s(vd, vta, vl * ESZ,               \
175                       total_elems * ESZ);              \
176 }
177 
178 /* operation of two vector elements */
179 typedef void opivv2_fn(void *vd, void *vs1, void *vs2, int i);
180 
181 #define OPIVV2(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP)    \
182 static void do_##NAME(void *vd, void *vs1, void *vs2, int i)    \
183 {                                                               \
184     TX1 s1 = *((T1 *)vs1 + HS1(i));                             \
185     TX2 s2 = *((T2 *)vs2 + HS2(i));                             \
186     *((TD *)vd + HD(i)) = OP(s2, s1);                           \
187 }
188 
189 void do_vext_vv(void *vd, void *v0, void *vs1, void *vs2,
190                 CPURISCVState *env, uint32_t desc,
191                 opivv2_fn *fn, uint32_t esz);
192 
193 /* generate the helpers for OPIVV */
194 #define GEN_VEXT_VV(NAME, ESZ)                            \
195 void HELPER(NAME)(void *vd, void *v0, void *vs1,          \
196                   void *vs2, CPURISCVState *env,          \
197                   uint32_t desc)                          \
198 {                                                         \
199     do_vext_vv(vd, v0, vs1, vs2, env, desc,               \
200                do_##NAME, ESZ);                           \
201 }
202 
203 typedef void opivx2_fn(void *vd, target_long s1, void *vs2, int i);
204 
205 /*
206  * (T1)s1 gives the real operator type.
207  * (TX1)(T1)s1 expands the operator type of widen or narrow operations.
208  */
209 #define OPIVX2(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP)             \
210 static void do_##NAME(void *vd, target_long s1, void *vs2, int i)   \
211 {                                                                   \
212     TX2 s2 = *((T2 *)vs2 + HS2(i));                                 \
213     *((TD *)vd + HD(i)) = OP(s2, (TX1)(T1)s1);                      \
214 }
215 
216 void do_vext_vx(void *vd, void *v0, target_long s1, void *vs2,
217                 CPURISCVState *env, uint32_t desc,
218                 opivx2_fn fn, uint32_t esz);
219 
220 /* generate the helpers for OPIVX */
221 #define GEN_VEXT_VX(NAME, ESZ)                            \
222 void HELPER(NAME)(void *vd, void *v0, target_ulong s1,    \
223                   void *vs2, CPURISCVState *env,          \
224                   uint32_t desc)                          \
225 {                                                         \
226     do_vext_vx(vd, v0, s1, vs2, env, desc,                \
227                do_##NAME, ESZ);                           \
228 }
229 
230 /* Three of the widening shortening macros: */
231 /* (TD, T1, T2, TX1, TX2) */
232 #define WOP_UUU_B uint16_t, uint8_t, uint8_t, uint16_t, uint16_t
233 #define WOP_UUU_H uint32_t, uint16_t, uint16_t, uint32_t, uint32_t
234 #define WOP_UUU_W uint64_t, uint32_t, uint32_t, uint64_t, uint64_t
235 
236 #endif /* TARGET_RISCV_VECTOR_INTERNALS_H */
237