1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
4 // RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
5 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
6 
7 #include <riscv_vector.h>
8 
9 // CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m2(
10 // CHECK-RV64-NEXT:  entry:
11 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv8i8(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 8)
12 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
13 //
test_vset_v_i8m1_i8m2(vint8m2_t dest,vint8m1_t val)14 vint8m2_t test_vset_v_i8m1_i8m2(vint8m2_t dest, vint8m1_t val) {
15   return vset_v_i8m1_i8m2(dest, 1, val);
16 }
17 
18 // CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m4(
19 // CHECK-RV64-NEXT:  entry:
20 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv8i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 24)
21 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
22 //
test_vset_v_i8m1_i8m4(vint8m4_t dest,vint8m1_t val)23 vint8m4_t test_vset_v_i8m1_i8m4(vint8m4_t dest, vint8m1_t val) {
24   return vset_v_i8m1_i8m4(dest, 3, val);
25 }
26 
27 // CHECK-RV64-LABEL: @test_vset_v_i8m2_i8m4(
28 // CHECK-RV64-NEXT:  entry:
29 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 16)
30 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
31 //
test_vset_v_i8m2_i8m4(vint8m4_t dest,vint8m2_t val)32 vint8m4_t test_vset_v_i8m2_i8m4(vint8m4_t dest, vint8m2_t val) {
33   return vset_v_i8m2_i8m4(dest, 1, val);
34 }
35 
36 // CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m8(
37 // CHECK-RV64-NEXT:  entry:
38 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv8i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 56)
39 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
40 //
test_vset_v_i8m1_i8m8(vint8m8_t dest,vint8m1_t val)41 vint8m8_t test_vset_v_i8m1_i8m8(vint8m8_t dest, vint8m1_t val) {
42   return vset_v_i8m1_i8m8(dest, 7, val);
43 }
44 
45 // CHECK-RV64-LABEL: @test_vset_v_i8m2_i8m8(
46 // CHECK-RV64-NEXT:  entry:
47 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 32)
48 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
49 //
test_vset_v_i8m2_i8m8(vint8m8_t dest,vint8m2_t val)50 vint8m8_t test_vset_v_i8m2_i8m8(vint8m8_t dest, vint8m2_t val) {
51   return vset_v_i8m2_i8m8(dest, 2, val);
52 }
53 
54 // CHECK-RV64-LABEL: @test_vset_v_i8m4_i8m8(
55 // CHECK-RV64-NEXT:  entry:
56 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv32i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VAL:%.*]], i64 32)
57 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
58 //
test_vset_v_i8m4_i8m8(vint8m8_t dest,vint8m4_t val)59 vint8m8_t test_vset_v_i8m4_i8m8(vint8m8_t dest, vint8m4_t val) {
60   return vset_v_i8m4_i8m8(dest, 1, val);
61 }
62 
63 // CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m2(
64 // CHECK-RV64-NEXT:  entry:
65 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv4i16(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 4)
66 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
67 //
test_vset_v_i16m1_i16m2(vint16m2_t dest,vint16m1_t val)68 vint16m2_t test_vset_v_i16m1_i16m2(vint16m2_t dest, vint16m1_t val) {
69   return vset_v_i16m1_i16m2(dest, 1, val);
70 }
71 
72 // CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m4(
73 // CHECK-RV64-NEXT:  entry:
74 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv4i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 12)
75 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
76 //
test_vset_v_i16m1_i16m4(vint16m4_t dest,vint16m1_t val)77 vint16m4_t test_vset_v_i16m1_i16m4(vint16m4_t dest, vint16m1_t val) {
78   return vset_v_i16m1_i16m4(dest, 3, val);
79 }
80 
81 // CHECK-RV64-LABEL: @test_vset_v_i16m2_i16m4(
82 // CHECK-RV64-NEXT:  entry:
83 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 8)
84 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
85 //
test_vset_v_i16m2_i16m4(vint16m4_t dest,vint16m2_t val)86 vint16m4_t test_vset_v_i16m2_i16m4(vint16m4_t dest, vint16m2_t val) {
87   return vset_v_i16m2_i16m4(dest, 1, val);
88 }
89 
90 // CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m8(
91 // CHECK-RV64-NEXT:  entry:
92 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv4i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 28)
93 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
94 //
test_vset_v_i16m1_i16m8(vint16m8_t dest,vint16m1_t val)95 vint16m8_t test_vset_v_i16m1_i16m8(vint16m8_t dest, vint16m1_t val) {
96   return vset_v_i16m1_i16m8(dest, 7, val);
97 }
98 
99 // CHECK-RV64-LABEL: @test_vset_v_i16m2_i16m8(
100 // CHECK-RV64-NEXT:  entry:
101 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 16)
102 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
103 //
test_vset_v_i16m2_i16m8(vint16m8_t dest,vint16m2_t val)104 vint16m8_t test_vset_v_i16m2_i16m8(vint16m8_t dest, vint16m2_t val) {
105   return vset_v_i16m2_i16m8(dest, 2, val);
106 }
107 
108 // CHECK-RV64-LABEL: @test_vset_v_i16m4_i16m8(
109 // CHECK-RV64-NEXT:  entry:
110 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv16i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VAL:%.*]], i64 16)
111 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
112 //
test_vset_v_i16m4_i16m8(vint16m8_t dest,vint16m4_t val)113 vint16m8_t test_vset_v_i16m4_i16m8(vint16m8_t dest, vint16m4_t val) {
114   return vset_v_i16m4_i16m8(dest, 1, val);
115 }
116 
117 // CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m2(
118 // CHECK-RV64-NEXT:  entry:
119 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv2i32(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 2)
120 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
121 //
test_vset_v_i32m1_i32m2(vint32m2_t dest,vint32m1_t val)122 vint32m2_t test_vset_v_i32m1_i32m2(vint32m2_t dest, vint32m1_t val) {
123   return vset_v_i32m1_i32m2(dest, 1, val);
124 }
125 
126 // CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m4(
127 // CHECK-RV64-NEXT:  entry:
128 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv2i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 6)
129 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
130 //
test_vset_v_i32m1_i32m4(vint32m4_t dest,vint32m1_t val)131 vint32m4_t test_vset_v_i32m1_i32m4(vint32m4_t dest, vint32m1_t val) {
132   return vset_v_i32m1_i32m4(dest, 3, val);
133 }
134 
135 // CHECK-RV64-LABEL: @test_vset_v_i32m2_i32m4(
136 // CHECK-RV64-NEXT:  entry:
137 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 4)
138 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
139 //
test_vset_v_i32m2_i32m4(vint32m4_t dest,vint32m2_t val)140 vint32m4_t test_vset_v_i32m2_i32m4(vint32m4_t dest, vint32m2_t val) {
141   return vset_v_i32m2_i32m4(dest, 1, val);
142 }
143 
144 // CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m8(
145 // CHECK-RV64-NEXT:  entry:
146 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv2i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 14)
147 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
148 //
test_vset_v_i32m1_i32m8(vint32m8_t dest,vint32m1_t val)149 vint32m8_t test_vset_v_i32m1_i32m8(vint32m8_t dest, vint32m1_t val) {
150   return vset_v_i32m1_i32m8(dest, 7, val);
151 }
152 
153 // CHECK-RV64-LABEL: @test_vset_v_i32m2_i32m8(
154 // CHECK-RV64-NEXT:  entry:
155 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 8)
156 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
157 //
test_vset_v_i32m2_i32m8(vint32m8_t dest,vint32m2_t val)158 vint32m8_t test_vset_v_i32m2_i32m8(vint32m8_t dest, vint32m2_t val) {
159   return vset_v_i32m2_i32m8(dest, 2, val);
160 }
161 
162 // CHECK-RV64-LABEL: @test_vset_v_i32m4_i32m8(
163 // CHECK-RV64-NEXT:  entry:
164 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv8i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VAL:%.*]], i64 8)
165 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
166 //
test_vset_v_i32m4_i32m8(vint32m8_t dest,vint32m4_t val)167 vint32m8_t test_vset_v_i32m4_i32m8(vint32m8_t dest, vint32m4_t val) {
168   return vset_v_i32m4_i32m8(dest, 1, val);
169 }
170 
171 // CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m2(
172 // CHECK-RV64-NEXT:  entry:
173 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.nxv1i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 1)
174 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
175 //
test_vset_v_i64m1_i64m2(vint64m2_t dest,vint64m1_t val)176 vint64m2_t test_vset_v_i64m1_i64m2(vint64m2_t dest, vint64m1_t val) {
177   return vset_v_i64m1_i64m2(dest, 1, val);
178 }
179 
180 // CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m4(
181 // CHECK-RV64-NEXT:  entry:
182 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv1i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 3)
183 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
184 //
test_vset_v_i64m1_i64m4(vint64m4_t dest,vint64m1_t val)185 vint64m4_t test_vset_v_i64m1_i64m4(vint64m4_t dest, vint64m1_t val) {
186   return vset_v_i64m1_i64m4(dest, 3, val);
187 }
188 
189 // CHECK-RV64-LABEL: @test_vset_v_i64m2_i64m4(
190 // CHECK-RV64-NEXT:  entry:
191 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 2)
192 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
193 //
test_vset_v_i64m2_i64m4(vint64m4_t dest,vint64m2_t val)194 vint64m4_t test_vset_v_i64m2_i64m4(vint64m4_t dest, vint64m2_t val) {
195   return vset_v_i64m2_i64m4(dest, 1, val);
196 }
197 
198 // CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m8(
199 // CHECK-RV64-NEXT:  entry:
200 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv1i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 7)
201 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
202 //
test_vset_v_i64m1_i64m8(vint64m8_t dest,vint64m1_t val)203 vint64m8_t test_vset_v_i64m1_i64m8(vint64m8_t dest, vint64m1_t val) {
204   return vset_v_i64m1_i64m8(dest, 7, val);
205 }
206 
207 // CHECK-RV64-LABEL: @test_vset_v_i64m2_i64m8(
208 // CHECK-RV64-NEXT:  entry:
209 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 4)
210 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
211 //
test_vset_v_i64m2_i64m8(vint64m8_t dest,vint64m2_t val)212 vint64m8_t test_vset_v_i64m2_i64m8(vint64m8_t dest, vint64m2_t val) {
213   return vset_v_i64m2_i64m8(dest, 2, val);
214 }
215 
216 // CHECK-RV64-LABEL: @test_vset_v_i64m4_i64m8(
217 // CHECK-RV64-NEXT:  entry:
218 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv4i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VAL:%.*]], i64 4)
219 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
220 //
test_vset_v_i64m4_i64m8(vint64m8_t dest,vint64m4_t val)221 vint64m8_t test_vset_v_i64m4_i64m8(vint64m8_t dest, vint64m4_t val) {
222   return vset_v_i64m4_i64m8(dest, 1, val);
223 }
224 
225 // CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m2(
226 // CHECK-RV64-NEXT:  entry:
227 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv8i8(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 8)
228 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
229 //
test_vset_v_u8m1_u8m2(vuint8m2_t dest,vuint8m1_t val)230 vuint8m2_t test_vset_v_u8m1_u8m2(vuint8m2_t dest, vuint8m1_t val) {
231   return vset_v_u8m1_u8m2(dest, 1, val);
232 }
233 
234 // CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m4(
235 // CHECK-RV64-NEXT:  entry:
236 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv8i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 24)
237 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
238 //
test_vset_v_u8m1_u8m4(vuint8m4_t dest,vuint8m1_t val)239 vuint8m4_t test_vset_v_u8m1_u8m4(vuint8m4_t dest, vuint8m1_t val) {
240   return vset_v_u8m1_u8m4(dest, 3, val);
241 }
242 
243 // CHECK-RV64-LABEL: @test_vset_v_u8m2_u8m4(
244 // CHECK-RV64-NEXT:  entry:
245 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 16)
246 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
247 //
test_vset_v_u8m2_u8m4(vuint8m4_t dest,vuint8m2_t val)248 vuint8m4_t test_vset_v_u8m2_u8m4(vuint8m4_t dest, vuint8m2_t val) {
249   return vset_v_u8m2_u8m4(dest, 1, val);
250 }
251 
252 // CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m8(
253 // CHECK-RV64-NEXT:  entry:
254 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv8i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 56)
255 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
256 //
test_vset_v_u8m1_u8m8(vuint8m8_t dest,vuint8m1_t val)257 vuint8m8_t test_vset_v_u8m1_u8m8(vuint8m8_t dest, vuint8m1_t val) {
258   return vset_v_u8m1_u8m8(dest, 7, val);
259 }
260 
261 // CHECK-RV64-LABEL: @test_vset_v_u8m2_u8m8(
262 // CHECK-RV64-NEXT:  entry:
263 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 32)
264 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
265 //
test_vset_v_u8m2_u8m8(vuint8m8_t dest,vuint8m2_t val)266 vuint8m8_t test_vset_v_u8m2_u8m8(vuint8m8_t dest, vuint8m2_t val) {
267   return vset_v_u8m2_u8m8(dest, 2, val);
268 }
269 
270 // CHECK-RV64-LABEL: @test_vset_v_u8m4_u8m8(
271 // CHECK-RV64-NEXT:  entry:
272 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv32i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VAL:%.*]], i64 32)
273 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
274 //
test_vset_v_u8m4_u8m8(vuint8m8_t dest,vuint8m4_t val)275 vuint8m8_t test_vset_v_u8m4_u8m8(vuint8m8_t dest, vuint8m4_t val) {
276   return vset_v_u8m4_u8m8(dest, 1, val);
277 }
278 
279 // CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m2(
280 // CHECK-RV64-NEXT:  entry:
281 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv4i16(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 4)
282 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
283 //
test_vset_v_u16m1_u16m2(vuint16m2_t dest,vuint16m1_t val)284 vuint16m2_t test_vset_v_u16m1_u16m2(vuint16m2_t dest, vuint16m1_t val) {
285   return vset_v_u16m1_u16m2(dest, 1, val);
286 }
287 
288 // CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m4(
289 // CHECK-RV64-NEXT:  entry:
290 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv4i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 12)
291 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
292 //
test_vset_v_u16m1_u16m4(vuint16m4_t dest,vuint16m1_t val)293 vuint16m4_t test_vset_v_u16m1_u16m4(vuint16m4_t dest, vuint16m1_t val) {
294   return vset_v_u16m1_u16m4(dest, 3, val);
295 }
296 
297 // CHECK-RV64-LABEL: @test_vset_v_u16m2_u16m4(
298 // CHECK-RV64-NEXT:  entry:
299 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 8)
300 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
301 //
test_vset_v_u16m2_u16m4(vuint16m4_t dest,vuint16m2_t val)302 vuint16m4_t test_vset_v_u16m2_u16m4(vuint16m4_t dest, vuint16m2_t val) {
303   return vset_v_u16m2_u16m4(dest, 1, val);
304 }
305 
306 // CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m8(
307 // CHECK-RV64-NEXT:  entry:
308 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv4i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 28)
309 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
310 //
test_vset_v_u16m1_u16m8(vuint16m8_t dest,vuint16m1_t val)311 vuint16m8_t test_vset_v_u16m1_u16m8(vuint16m8_t dest, vuint16m1_t val) {
312   return vset_v_u16m1_u16m8(dest, 7, val);
313 }
314 
315 // CHECK-RV64-LABEL: @test_vset_v_u16m2_u16m8(
316 // CHECK-RV64-NEXT:  entry:
317 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 16)
318 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
319 //
test_vset_v_u16m2_u16m8(vuint16m8_t dest,vuint16m2_t val)320 vuint16m8_t test_vset_v_u16m2_u16m8(vuint16m8_t dest, vuint16m2_t val) {
321   return vset_v_u16m2_u16m8(dest, 2, val);
322 }
323 
324 // CHECK-RV64-LABEL: @test_vset_v_u16m4_u16m8(
325 // CHECK-RV64-NEXT:  entry:
326 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv16i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VAL:%.*]], i64 16)
327 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
328 //
test_vset_v_u16m4_u16m8(vuint16m8_t dest,vuint16m4_t val)329 vuint16m8_t test_vset_v_u16m4_u16m8(vuint16m8_t dest, vuint16m4_t val) {
330   return vset_v_u16m4_u16m8(dest, 1, val);
331 }
332 
333 // CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m2(
334 // CHECK-RV64-NEXT:  entry:
335 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv2i32(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 2)
336 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
337 //
test_vset_v_u32m1_u32m2(vuint32m2_t dest,vuint32m1_t val)338 vuint32m2_t test_vset_v_u32m1_u32m2(vuint32m2_t dest, vuint32m1_t val) {
339   return vset_v_u32m1_u32m2(dest, 1, val);
340 }
341 
342 // CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m4(
343 // CHECK-RV64-NEXT:  entry:
344 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv2i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 6)
345 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
346 //
test_vset_v_u32m1_u32m4(vuint32m4_t dest,vuint32m1_t val)347 vuint32m4_t test_vset_v_u32m1_u32m4(vuint32m4_t dest, vuint32m1_t val) {
348   return vset_v_u32m1_u32m4(dest, 3, val);
349 }
350 
351 // CHECK-RV64-LABEL: @test_vset_v_u32m2_u32m4(
352 // CHECK-RV64-NEXT:  entry:
353 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 4)
354 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
355 //
test_vset_v_u32m2_u32m4(vuint32m4_t dest,vuint32m2_t val)356 vuint32m4_t test_vset_v_u32m2_u32m4(vuint32m4_t dest, vuint32m2_t val) {
357   return vset_v_u32m2_u32m4(dest, 1, val);
358 }
359 
360 // CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m8(
361 // CHECK-RV64-NEXT:  entry:
362 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv2i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 14)
363 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
364 //
test_vset_v_u32m1_u32m8(vuint32m8_t dest,vuint32m1_t val)365 vuint32m8_t test_vset_v_u32m1_u32m8(vuint32m8_t dest, vuint32m1_t val) {
366   return vset_v_u32m1_u32m8(dest, 7, val);
367 }
368 
369 // CHECK-RV64-LABEL: @test_vset_v_u32m2_u32m8(
370 // CHECK-RV64-NEXT:  entry:
371 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 8)
372 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
373 //
test_vset_v_u32m2_u32m8(vuint32m8_t dest,vuint32m2_t val)374 vuint32m8_t test_vset_v_u32m2_u32m8(vuint32m8_t dest, vuint32m2_t val) {
375   return vset_v_u32m2_u32m8(dest, 2, val);
376 }
377 
378 // CHECK-RV64-LABEL: @test_vset_v_u32m4_u32m8(
379 // CHECK-RV64-NEXT:  entry:
380 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv8i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VAL:%.*]], i64 8)
381 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
382 //
test_vset_v_u32m4_u32m8(vuint32m8_t dest,vuint32m4_t val)383 vuint32m8_t test_vset_v_u32m4_u32m8(vuint32m8_t dest, vuint32m4_t val) {
384   return vset_v_u32m4_u32m8(dest, 1, val);
385 }
386 
387 // CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m2(
388 // CHECK-RV64-NEXT:  entry:
389 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.nxv1i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 1)
390 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
391 //
test_vset_v_u64m1_u64m2(vuint64m2_t dest,vuint64m1_t val)392 vuint64m2_t test_vset_v_u64m1_u64m2(vuint64m2_t dest, vuint64m1_t val) {
393   return vset_v_u64m1_u64m2(dest, 1, val);
394 }
395 
396 // CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m4(
397 // CHECK-RV64-NEXT:  entry:
398 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv1i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 3)
399 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
400 //
test_vset_v_u64m1_u64m4(vuint64m4_t dest,vuint64m1_t val)401 vuint64m4_t test_vset_v_u64m1_u64m4(vuint64m4_t dest, vuint64m1_t val) {
402   return vset_v_u64m1_u64m4(dest, 3, val);
403 }
404 
405 // CHECK-RV64-LABEL: @test_vset_v_u64m2_u64m4(
406 // CHECK-RV64-NEXT:  entry:
407 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 2)
408 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
409 //
test_vset_v_u64m2_u64m4(vuint64m4_t dest,vuint64m2_t val)410 vuint64m4_t test_vset_v_u64m2_u64m4(vuint64m4_t dest, vuint64m2_t val) {
411   return vset_v_u64m2_u64m4(dest, 1, val);
412 }
413 
414 // CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m8(
415 // CHECK-RV64-NEXT:  entry:
416 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv1i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 7)
417 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
418 //
test_vset_v_u64m1_u64m8(vuint64m8_t dest,vuint64m1_t val)419 vuint64m8_t test_vset_v_u64m1_u64m8(vuint64m8_t dest, vuint64m1_t val) {
420   return vset_v_u64m1_u64m8(dest, 7, val);
421 }
422 
423 // CHECK-RV64-LABEL: @test_vset_v_u64m2_u64m8(
424 // CHECK-RV64-NEXT:  entry:
425 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 4)
426 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
427 //
test_vset_v_u64m2_u64m8(vuint64m8_t dest,vuint64m2_t val)428 vuint64m8_t test_vset_v_u64m2_u64m8(vuint64m8_t dest, vuint64m2_t val) {
429   return vset_v_u64m2_u64m8(dest, 2, val);
430 }
431 
432 // CHECK-RV64-LABEL: @test_vset_v_u64m4_u64m8(
433 // CHECK-RV64-NEXT:  entry:
434 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv4i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VAL:%.*]], i64 4)
435 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
436 //
test_vset_v_u64m4_u64m8(vuint64m8_t dest,vuint64m4_t val)437 vuint64m8_t test_vset_v_u64m4_u64m8(vuint64m8_t dest, vuint64m4_t val) {
438   return vset_v_u64m4_u64m8(dest, 1, val);
439 }
440 
441 // CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m2(
442 // CHECK-RV64-NEXT:  entry:
443 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.insert.nxv4f32.nxv2f32(<vscale x 4 x float> [[DEST:%.*]], <vscale x 2 x float> [[VAL:%.*]], i64 2)
444 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
445 //
test_vset_v_f32m1_f32m2(vfloat32m2_t dest,vfloat32m1_t val)446 vfloat32m2_t test_vset_v_f32m1_f32m2(vfloat32m2_t dest, vfloat32m1_t val) {
447   return vset_v_f32m1_f32m2(dest, 1, val);
448 }
449 
450 // CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m4(
451 // CHECK-RV64-NEXT:  entry:
452 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.experimental.vector.insert.nxv8f32.nxv2f32(<vscale x 8 x float> [[DEST:%.*]], <vscale x 2 x float> [[VAL:%.*]], i64 6)
453 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
454 //
test_vset_v_f32m1_f32m4(vfloat32m4_t dest,vfloat32m1_t val)455 vfloat32m4_t test_vset_v_f32m1_f32m4(vfloat32m4_t dest, vfloat32m1_t val) {
456   return vset_v_f32m1_f32m4(dest, 3, val);
457 }
458 
459 // CHECK-RV64-LABEL: @test_vset_v_f32m2_f32m4(
460 // CHECK-RV64-NEXT:  entry:
461 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.experimental.vector.insert.nxv8f32.nxv4f32(<vscale x 8 x float> [[DEST:%.*]], <vscale x 4 x float> [[VAL:%.*]], i64 4)
462 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
463 //
test_vset_v_f32m2_f32m4(vfloat32m4_t dest,vfloat32m2_t val)464 vfloat32m4_t test_vset_v_f32m2_f32m4(vfloat32m4_t dest, vfloat32m2_t val) {
465   return vset_v_f32m2_f32m4(dest, 1, val);
466 }
467 
468 // CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m8(
469 // CHECK-RV64-NEXT:  entry:
470 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.experimental.vector.insert.nxv16f32.nxv2f32(<vscale x 16 x float> [[DEST:%.*]], <vscale x 2 x float> [[VAL:%.*]], i64 14)
471 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
472 //
test_vset_v_f32m1_f32m8(vfloat32m8_t dest,vfloat32m1_t val)473 vfloat32m8_t test_vset_v_f32m1_f32m8(vfloat32m8_t dest, vfloat32m1_t val) {
474   return vset_v_f32m1_f32m8(dest, 7, val);
475 }
476 
477 // CHECK-RV64-LABEL: @test_vset_v_f32m2_f32m8(
478 // CHECK-RV64-NEXT:  entry:
479 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.experimental.vector.insert.nxv16f32.nxv4f32(<vscale x 16 x float> [[DEST:%.*]], <vscale x 4 x float> [[VAL:%.*]], i64 8)
480 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
481 //
test_vset_v_f32m2_f32m8(vfloat32m8_t dest,vfloat32m2_t val)482 vfloat32m8_t test_vset_v_f32m2_f32m8(vfloat32m8_t dest, vfloat32m2_t val) {
483   return vset_v_f32m2_f32m8(dest, 2, val);
484 }
485 
486 // CHECK-RV64-LABEL: @test_vset_v_f32m4_f32m8(
487 // CHECK-RV64-NEXT:  entry:
488 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.experimental.vector.insert.nxv16f32.nxv8f32(<vscale x 16 x float> [[DEST:%.*]], <vscale x 8 x float> [[VAL:%.*]], i64 8)
489 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
490 //
test_vset_v_f32m4_f32m8(vfloat32m8_t dest,vfloat32m4_t val)491 vfloat32m8_t test_vset_v_f32m4_f32m8(vfloat32m8_t dest, vfloat32m4_t val) {
492   return vset_v_f32m4_f32m8(dest, 1, val);
493 }
494 
495 // CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m2(
496 // CHECK-RV64-NEXT:  entry:
497 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.nxv1f64(<vscale x 2 x double> [[DEST:%.*]], <vscale x 1 x double> [[VAL:%.*]], i64 1)
498 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
499 //
test_vset_v_f64m1_f64m2(vfloat64m2_t dest,vfloat64m1_t val)500 vfloat64m2_t test_vset_v_f64m1_f64m2(vfloat64m2_t dest, vfloat64m1_t val) {
501   return vset_v_f64m1_f64m2(dest, 1, val);
502 }
503 
504 // CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m4(
505 // CHECK-RV64-NEXT:  entry:
506 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.experimental.vector.insert.nxv4f64.nxv1f64(<vscale x 4 x double> [[DEST:%.*]], <vscale x 1 x double> [[VAL:%.*]], i64 3)
507 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
508 //
test_vset_v_f64m1_f64m4(vfloat64m4_t dest,vfloat64m1_t val)509 vfloat64m4_t test_vset_v_f64m1_f64m4(vfloat64m4_t dest, vfloat64m1_t val) {
510   return vset_v_f64m1_f64m4(dest, 3, val);
511 }
512 
513 // CHECK-RV64-LABEL: @test_vset_v_f64m2_f64m4(
514 // CHECK-RV64-NEXT:  entry:
515 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.experimental.vector.insert.nxv4f64.nxv2f64(<vscale x 4 x double> [[DEST:%.*]], <vscale x 2 x double> [[VAL:%.*]], i64 2)
516 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
517 //
test_vset_v_f64m2_f64m4(vfloat64m4_t dest,vfloat64m2_t val)518 vfloat64m4_t test_vset_v_f64m2_f64m4(vfloat64m4_t dest, vfloat64m2_t val) {
519   return vset_v_f64m2_f64m4(dest, 1, val);
520 }
521 
522 // CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m8(
523 // CHECK-RV64-NEXT:  entry:
524 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.experimental.vector.insert.nxv8f64.nxv1f64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 1 x double> [[VAL:%.*]], i64 7)
525 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
526 //
test_vset_v_f64m1_f64m8(vfloat64m8_t dest,vfloat64m1_t val)527 vfloat64m8_t test_vset_v_f64m1_f64m8(vfloat64m8_t dest, vfloat64m1_t val) {
528   return vset_v_f64m1_f64m8(dest, 7, val);
529 }
530 
531 // CHECK-RV64-LABEL: @test_vset_v_f64m2_f64m8(
532 // CHECK-RV64-NEXT:  entry:
533 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.experimental.vector.insert.nxv8f64.nxv2f64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 2 x double> [[VAL:%.*]], i64 4)
534 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
535 //
test_vset_v_f64m2_f64m8(vfloat64m8_t dest,vfloat64m2_t val)536 vfloat64m8_t test_vset_v_f64m2_f64m8(vfloat64m8_t dest, vfloat64m2_t val) {
537   return vset_v_f64m2_f64m8(dest, 2, val);
538 }
539 
540 // CHECK-RV64-LABEL: @test_vset_v_f64m4_f64m8(
541 // CHECK-RV64-NEXT:  entry:
542 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.experimental.vector.insert.nxv8f64.nxv4f64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 4 x double> [[VAL:%.*]], i64 4)
543 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
544 //
test_vset_v_f64m4_f64m8(vfloat64m8_t dest,vfloat64m4_t val)545 vfloat64m8_t test_vset_v_f64m4_f64m8(vfloat64m8_t dest, vfloat64m4_t val) {
546   return vset_v_f64m4_f64m8(dest, 1, val);
547 }
548 
549 // CHECK-RV64-LABEL: @test_vset_v_f16m1_f16m2(
550 // CHECK-RV64-NEXT:  entry:
551 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.experimental.vector.insert.nxv8f16.nxv4f16(<vscale x 8 x half> [[DEST:%.*]], <vscale x 4 x half> [[VAL:%.*]], i64 0)
552 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
553 //
test_vset_v_f16m1_f16m2(vfloat16m2_t dest,vfloat16m1_t val)554 vfloat16m2_t test_vset_v_f16m1_f16m2 (vfloat16m2_t dest, vfloat16m1_t val) {
555   return vset_v_f16m1_f16m2(dest, 0, val);
556 }
557 
558 // CHECK-RV64-LABEL: @test_vset_v_f16m1_f16m4(
559 // CHECK-RV64-NEXT:  entry:
560 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.experimental.vector.insert.nxv16f16.nxv4f16(<vscale x 16 x half> [[DEST:%.*]], <vscale x 4 x half> [[VAL:%.*]], i64 0)
561 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
562 //
test_vset_v_f16m1_f16m4(vfloat16m4_t dest,vfloat16m1_t val)563 vfloat16m4_t test_vset_v_f16m1_f16m4 (vfloat16m4_t dest, vfloat16m1_t val) {
564   return vset_v_f16m1_f16m4(dest, 0, val);
565 }
566 
567 // CHECK-RV64-LABEL: @test_vset_v_f16m2_f16m4(
568 // CHECK-RV64-NEXT:  entry:
569 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.experimental.vector.insert.nxv16f16.nxv8f16(<vscale x 16 x half> [[DEST:%.*]], <vscale x 8 x half> [[VAL:%.*]], i64 0)
570 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
571 //
test_vset_v_f16m2_f16m4(vfloat16m4_t dest,vfloat16m2_t val)572 vfloat16m4_t test_vset_v_f16m2_f16m4 (vfloat16m4_t dest, vfloat16m2_t val) {
573   return vset_v_f16m2_f16m4(dest, 0, val);
574 }
575 
576 // CHECK-RV64-LABEL: @test_vset_v_f16m1_f16m8(
577 // CHECK-RV64-NEXT:  entry:
578 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.experimental.vector.insert.nxv32f16.nxv4f16(<vscale x 32 x half> [[DEST:%.*]], <vscale x 4 x half> [[VAL:%.*]], i64 0)
579 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
580 //
test_vset_v_f16m1_f16m8(vfloat16m8_t dest,vfloat16m1_t val)581 vfloat16m8_t test_vset_v_f16m1_f16m8 (vfloat16m8_t dest, vfloat16m1_t val) {
582   return vset_v_f16m1_f16m8(dest, 0, val);
583 }
584 
585 // CHECK-RV64-LABEL: @test_vset_v_f16m2_f16m8(
586 // CHECK-RV64-NEXT:  entry:
587 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.experimental.vector.insert.nxv32f16.nxv8f16(<vscale x 32 x half> [[DEST:%.*]], <vscale x 8 x half> [[VAL:%.*]], i64 0)
588 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
589 //
test_vset_v_f16m2_f16m8(vfloat16m8_t dest,vfloat16m2_t val)590 vfloat16m8_t test_vset_v_f16m2_f16m8 (vfloat16m8_t dest, vfloat16m2_t val) {
591   return vset_v_f16m2_f16m8(dest, 0, val);
592 }
593 
594 // CHECK-RV64-LABEL: @test_vset_v_f16m4_f16m8(
595 // CHECK-RV64-NEXT:  entry:
596 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.experimental.vector.insert.nxv32f16.nxv16f16(<vscale x 32 x half> [[DEST:%.*]], <vscale x 16 x half> [[VAL:%.*]], i64 0)
597 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
598 //
test_vset_v_f16m4_f16m8(vfloat16m8_t dest,vfloat16m4_t val)599 vfloat16m8_t test_vset_v_f16m4_f16m8 (vfloat16m8_t dest, vfloat16m4_t val) {
600   return vset_v_f16m4_f16m8(dest, 0, val);
601 }
602