1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
4 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
5 
6 #include <riscv_vector.h>
7 
8 // CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m2(
9 // CHECK-RV64-NEXT:  entry:
10 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv8i8(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 8)
11 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
12 //
test_vset_v_i8m1_i8m2(vint8m2_t dest,vint8m1_t val)13 vint8m2_t test_vset_v_i8m1_i8m2(vint8m2_t dest, vint8m1_t val) {
14   return vset_v_i8m1_i8m2(dest, 1, val);
15 }
16 
17 // CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m4(
18 // CHECK-RV64-NEXT:  entry:
19 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv8i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 24)
20 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
21 //
test_vset_v_i8m1_i8m4(vint8m4_t dest,vint8m1_t val)22 vint8m4_t test_vset_v_i8m1_i8m4(vint8m4_t dest, vint8m1_t val) {
23   return vset_v_i8m1_i8m4(dest, 3, val);
24 }
25 
26 // CHECK-RV64-LABEL: @test_vset_v_i8m2_i8m4(
27 // CHECK-RV64-NEXT:  entry:
28 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 16)
29 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
30 //
test_vset_v_i8m2_i8m4(vint8m4_t dest,vint8m2_t val)31 vint8m4_t test_vset_v_i8m2_i8m4(vint8m4_t dest, vint8m2_t val) {
32   return vset_v_i8m2_i8m4(dest, 1, val);
33 }
34 
35 // CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m8(
36 // CHECK-RV64-NEXT:  entry:
37 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv8i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 56)
38 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
39 //
test_vset_v_i8m1_i8m8(vint8m8_t dest,vint8m1_t val)40 vint8m8_t test_vset_v_i8m1_i8m8(vint8m8_t dest, vint8m1_t val) {
41   return vset_v_i8m1_i8m8(dest, 7, val);
42 }
43 
44 // CHECK-RV64-LABEL: @test_vset_v_i8m2_i8m8(
45 // CHECK-RV64-NEXT:  entry:
46 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 32)
47 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
48 //
test_vset_v_i8m2_i8m8(vint8m8_t dest,vint8m2_t val)49 vint8m8_t test_vset_v_i8m2_i8m8(vint8m8_t dest, vint8m2_t val) {
50   return vset_v_i8m2_i8m8(dest, 2, val);
51 }
52 
53 // CHECK-RV64-LABEL: @test_vset_v_i8m4_i8m8(
54 // CHECK-RV64-NEXT:  entry:
55 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv32i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VAL:%.*]], i64 32)
56 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
57 //
test_vset_v_i8m4_i8m8(vint8m8_t dest,vint8m4_t val)58 vint8m8_t test_vset_v_i8m4_i8m8(vint8m8_t dest, vint8m4_t val) {
59   return vset_v_i8m4_i8m8(dest, 1, val);
60 }
61 
62 // CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m2(
63 // CHECK-RV64-NEXT:  entry:
64 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv4i16(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 4)
65 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
66 //
test_vset_v_i16m1_i16m2(vint16m2_t dest,vint16m1_t val)67 vint16m2_t test_vset_v_i16m1_i16m2(vint16m2_t dest, vint16m1_t val) {
68   return vset_v_i16m1_i16m2(dest, 1, val);
69 }
70 
71 // CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m4(
72 // CHECK-RV64-NEXT:  entry:
73 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv4i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 12)
74 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
75 //
test_vset_v_i16m1_i16m4(vint16m4_t dest,vint16m1_t val)76 vint16m4_t test_vset_v_i16m1_i16m4(vint16m4_t dest, vint16m1_t val) {
77   return vset_v_i16m1_i16m4(dest, 3, val);
78 }
79 
80 // CHECK-RV64-LABEL: @test_vset_v_i16m2_i16m4(
81 // CHECK-RV64-NEXT:  entry:
82 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 8)
83 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
84 //
test_vset_v_i16m2_i16m4(vint16m4_t dest,vint16m2_t val)85 vint16m4_t test_vset_v_i16m2_i16m4(vint16m4_t dest, vint16m2_t val) {
86   return vset_v_i16m2_i16m4(dest, 1, val);
87 }
88 
89 // CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m8(
90 // CHECK-RV64-NEXT:  entry:
91 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv4i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 28)
92 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
93 //
test_vset_v_i16m1_i16m8(vint16m8_t dest,vint16m1_t val)94 vint16m8_t test_vset_v_i16m1_i16m8(vint16m8_t dest, vint16m1_t val) {
95   return vset_v_i16m1_i16m8(dest, 7, val);
96 }
97 
98 // CHECK-RV64-LABEL: @test_vset_v_i16m2_i16m8(
99 // CHECK-RV64-NEXT:  entry:
100 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 16)
101 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
102 //
test_vset_v_i16m2_i16m8(vint16m8_t dest,vint16m2_t val)103 vint16m8_t test_vset_v_i16m2_i16m8(vint16m8_t dest, vint16m2_t val) {
104   return vset_v_i16m2_i16m8(dest, 2, val);
105 }
106 
107 // CHECK-RV64-LABEL: @test_vset_v_i16m4_i16m8(
108 // CHECK-RV64-NEXT:  entry:
109 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv16i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VAL:%.*]], i64 16)
110 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
111 //
test_vset_v_i16m4_i16m8(vint16m8_t dest,vint16m4_t val)112 vint16m8_t test_vset_v_i16m4_i16m8(vint16m8_t dest, vint16m4_t val) {
113   return vset_v_i16m4_i16m8(dest, 1, val);
114 }
115 
116 // CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m2(
117 // CHECK-RV64-NEXT:  entry:
118 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv2i32(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 2)
119 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
120 //
test_vset_v_i32m1_i32m2(vint32m2_t dest,vint32m1_t val)121 vint32m2_t test_vset_v_i32m1_i32m2(vint32m2_t dest, vint32m1_t val) {
122   return vset_v_i32m1_i32m2(dest, 1, val);
123 }
124 
125 // CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m4(
126 // CHECK-RV64-NEXT:  entry:
127 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv2i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 6)
128 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
129 //
test_vset_v_i32m1_i32m4(vint32m4_t dest,vint32m1_t val)130 vint32m4_t test_vset_v_i32m1_i32m4(vint32m4_t dest, vint32m1_t val) {
131   return vset_v_i32m1_i32m4(dest, 3, val);
132 }
133 
134 // CHECK-RV64-LABEL: @test_vset_v_i32m2_i32m4(
135 // CHECK-RV64-NEXT:  entry:
136 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 4)
137 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
138 //
test_vset_v_i32m2_i32m4(vint32m4_t dest,vint32m2_t val)139 vint32m4_t test_vset_v_i32m2_i32m4(vint32m4_t dest, vint32m2_t val) {
140   return vset_v_i32m2_i32m4(dest, 1, val);
141 }
142 
143 // CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m8(
144 // CHECK-RV64-NEXT:  entry:
145 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv2i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 14)
146 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
147 //
test_vset_v_i32m1_i32m8(vint32m8_t dest,vint32m1_t val)148 vint32m8_t test_vset_v_i32m1_i32m8(vint32m8_t dest, vint32m1_t val) {
149   return vset_v_i32m1_i32m8(dest, 7, val);
150 }
151 
152 // CHECK-RV64-LABEL: @test_vset_v_i32m2_i32m8(
153 // CHECK-RV64-NEXT:  entry:
154 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 8)
155 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
156 //
test_vset_v_i32m2_i32m8(vint32m8_t dest,vint32m2_t val)157 vint32m8_t test_vset_v_i32m2_i32m8(vint32m8_t dest, vint32m2_t val) {
158   return vset_v_i32m2_i32m8(dest, 2, val);
159 }
160 
161 // CHECK-RV64-LABEL: @test_vset_v_i32m4_i32m8(
162 // CHECK-RV64-NEXT:  entry:
163 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv8i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VAL:%.*]], i64 8)
164 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
165 //
test_vset_v_i32m4_i32m8(vint32m8_t dest,vint32m4_t val)166 vint32m8_t test_vset_v_i32m4_i32m8(vint32m8_t dest, vint32m4_t val) {
167   return vset_v_i32m4_i32m8(dest, 1, val);
168 }
169 
170 // CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m2(
171 // CHECK-RV64-NEXT:  entry:
172 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.nxv1i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 1)
173 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
174 //
test_vset_v_i64m1_i64m2(vint64m2_t dest,vint64m1_t val)175 vint64m2_t test_vset_v_i64m1_i64m2(vint64m2_t dest, vint64m1_t val) {
176   return vset_v_i64m1_i64m2(dest, 1, val);
177 }
178 
179 // CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m4(
180 // CHECK-RV64-NEXT:  entry:
181 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv1i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 3)
182 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
183 //
test_vset_v_i64m1_i64m4(vint64m4_t dest,vint64m1_t val)184 vint64m4_t test_vset_v_i64m1_i64m4(vint64m4_t dest, vint64m1_t val) {
185   return vset_v_i64m1_i64m4(dest, 3, val);
186 }
187 
188 // CHECK-RV64-LABEL: @test_vset_v_i64m2_i64m4(
189 // CHECK-RV64-NEXT:  entry:
190 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 2)
191 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
192 //
test_vset_v_i64m2_i64m4(vint64m4_t dest,vint64m2_t val)193 vint64m4_t test_vset_v_i64m2_i64m4(vint64m4_t dest, vint64m2_t val) {
194   return vset_v_i64m2_i64m4(dest, 1, val);
195 }
196 
197 // CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m8(
198 // CHECK-RV64-NEXT:  entry:
199 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv1i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 7)
200 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
201 //
test_vset_v_i64m1_i64m8(vint64m8_t dest,vint64m1_t val)202 vint64m8_t test_vset_v_i64m1_i64m8(vint64m8_t dest, vint64m1_t val) {
203   return vset_v_i64m1_i64m8(dest, 7, val);
204 }
205 
206 // CHECK-RV64-LABEL: @test_vset_v_i64m2_i64m8(
207 // CHECK-RV64-NEXT:  entry:
208 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 4)
209 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
210 //
test_vset_v_i64m2_i64m8(vint64m8_t dest,vint64m2_t val)211 vint64m8_t test_vset_v_i64m2_i64m8(vint64m8_t dest, vint64m2_t val) {
212   return vset_v_i64m2_i64m8(dest, 2, val);
213 }
214 
215 // CHECK-RV64-LABEL: @test_vset_v_i64m4_i64m8(
216 // CHECK-RV64-NEXT:  entry:
217 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv4i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VAL:%.*]], i64 4)
218 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
219 //
test_vset_v_i64m4_i64m8(vint64m8_t dest,vint64m4_t val)220 vint64m8_t test_vset_v_i64m4_i64m8(vint64m8_t dest, vint64m4_t val) {
221   return vset_v_i64m4_i64m8(dest, 1, val);
222 }
223 
224 // CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m2(
225 // CHECK-RV64-NEXT:  entry:
226 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv8i8(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 8)
227 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
228 //
test_vset_v_u8m1_u8m2(vuint8m2_t dest,vuint8m1_t val)229 vuint8m2_t test_vset_v_u8m1_u8m2(vuint8m2_t dest, vuint8m1_t val) {
230   return vset_v_u8m1_u8m2(dest, 1, val);
231 }
232 
233 // CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m4(
234 // CHECK-RV64-NEXT:  entry:
235 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv8i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 24)
236 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
237 //
test_vset_v_u8m1_u8m4(vuint8m4_t dest,vuint8m1_t val)238 vuint8m4_t test_vset_v_u8m1_u8m4(vuint8m4_t dest, vuint8m1_t val) {
239   return vset_v_u8m1_u8m4(dest, 3, val);
240 }
241 
242 // CHECK-RV64-LABEL: @test_vset_v_u8m2_u8m4(
243 // CHECK-RV64-NEXT:  entry:
244 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 16)
245 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
246 //
test_vset_v_u8m2_u8m4(vuint8m4_t dest,vuint8m2_t val)247 vuint8m4_t test_vset_v_u8m2_u8m4(vuint8m4_t dest, vuint8m2_t val) {
248   return vset_v_u8m2_u8m4(dest, 1, val);
249 }
250 
251 // CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m8(
252 // CHECK-RV64-NEXT:  entry:
253 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv8i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 56)
254 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
255 //
test_vset_v_u8m1_u8m8(vuint8m8_t dest,vuint8m1_t val)256 vuint8m8_t test_vset_v_u8m1_u8m8(vuint8m8_t dest, vuint8m1_t val) {
257   return vset_v_u8m1_u8m8(dest, 7, val);
258 }
259 
260 // CHECK-RV64-LABEL: @test_vset_v_u8m2_u8m8(
261 // CHECK-RV64-NEXT:  entry:
262 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 32)
263 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
264 //
test_vset_v_u8m2_u8m8(vuint8m8_t dest,vuint8m2_t val)265 vuint8m8_t test_vset_v_u8m2_u8m8(vuint8m8_t dest, vuint8m2_t val) {
266   return vset_v_u8m2_u8m8(dest, 2, val);
267 }
268 
269 // CHECK-RV64-LABEL: @test_vset_v_u8m4_u8m8(
270 // CHECK-RV64-NEXT:  entry:
271 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv32i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VAL:%.*]], i64 32)
272 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
273 //
test_vset_v_u8m4_u8m8(vuint8m8_t dest,vuint8m4_t val)274 vuint8m8_t test_vset_v_u8m4_u8m8(vuint8m8_t dest, vuint8m4_t val) {
275   return vset_v_u8m4_u8m8(dest, 1, val);
276 }
277 
278 // CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m2(
279 // CHECK-RV64-NEXT:  entry:
280 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv4i16(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 4)
281 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
282 //
test_vset_v_u16m1_u16m2(vuint16m2_t dest,vuint16m1_t val)283 vuint16m2_t test_vset_v_u16m1_u16m2(vuint16m2_t dest, vuint16m1_t val) {
284   return vset_v_u16m1_u16m2(dest, 1, val);
285 }
286 
287 // CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m4(
288 // CHECK-RV64-NEXT:  entry:
289 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv4i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 12)
290 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
291 //
test_vset_v_u16m1_u16m4(vuint16m4_t dest,vuint16m1_t val)292 vuint16m4_t test_vset_v_u16m1_u16m4(vuint16m4_t dest, vuint16m1_t val) {
293   return vset_v_u16m1_u16m4(dest, 3, val);
294 }
295 
296 // CHECK-RV64-LABEL: @test_vset_v_u16m2_u16m4(
297 // CHECK-RV64-NEXT:  entry:
298 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 8)
299 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
300 //
test_vset_v_u16m2_u16m4(vuint16m4_t dest,vuint16m2_t val)301 vuint16m4_t test_vset_v_u16m2_u16m4(vuint16m4_t dest, vuint16m2_t val) {
302   return vset_v_u16m2_u16m4(dest, 1, val);
303 }
304 
305 // CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m8(
306 // CHECK-RV64-NEXT:  entry:
307 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv4i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 28)
308 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
309 //
test_vset_v_u16m1_u16m8(vuint16m8_t dest,vuint16m1_t val)310 vuint16m8_t test_vset_v_u16m1_u16m8(vuint16m8_t dest, vuint16m1_t val) {
311   return vset_v_u16m1_u16m8(dest, 7, val);
312 }
313 
314 // CHECK-RV64-LABEL: @test_vset_v_u16m2_u16m8(
315 // CHECK-RV64-NEXT:  entry:
316 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 16)
317 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
318 //
test_vset_v_u16m2_u16m8(vuint16m8_t dest,vuint16m2_t val)319 vuint16m8_t test_vset_v_u16m2_u16m8(vuint16m8_t dest, vuint16m2_t val) {
320   return vset_v_u16m2_u16m8(dest, 2, val);
321 }
322 
323 // CHECK-RV64-LABEL: @test_vset_v_u16m4_u16m8(
324 // CHECK-RV64-NEXT:  entry:
325 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv16i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VAL:%.*]], i64 16)
326 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
327 //
test_vset_v_u16m4_u16m8(vuint16m8_t dest,vuint16m4_t val)328 vuint16m8_t test_vset_v_u16m4_u16m8(vuint16m8_t dest, vuint16m4_t val) {
329   return vset_v_u16m4_u16m8(dest, 1, val);
330 }
331 
332 // CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m2(
333 // CHECK-RV64-NEXT:  entry:
334 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv2i32(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 2)
335 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
336 //
test_vset_v_u32m1_u32m2(vuint32m2_t dest,vuint32m1_t val)337 vuint32m2_t test_vset_v_u32m1_u32m2(vuint32m2_t dest, vuint32m1_t val) {
338   return vset_v_u32m1_u32m2(dest, 1, val);
339 }
340 
341 // CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m4(
342 // CHECK-RV64-NEXT:  entry:
343 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv2i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 6)
344 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
345 //
test_vset_v_u32m1_u32m4(vuint32m4_t dest,vuint32m1_t val)346 vuint32m4_t test_vset_v_u32m1_u32m4(vuint32m4_t dest, vuint32m1_t val) {
347   return vset_v_u32m1_u32m4(dest, 3, val);
348 }
349 
350 // CHECK-RV64-LABEL: @test_vset_v_u32m2_u32m4(
351 // CHECK-RV64-NEXT:  entry:
352 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 4)
353 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
354 //
test_vset_v_u32m2_u32m4(vuint32m4_t dest,vuint32m2_t val)355 vuint32m4_t test_vset_v_u32m2_u32m4(vuint32m4_t dest, vuint32m2_t val) {
356   return vset_v_u32m2_u32m4(dest, 1, val);
357 }
358 
359 // CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m8(
360 // CHECK-RV64-NEXT:  entry:
361 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv2i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 14)
362 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
363 //
test_vset_v_u32m1_u32m8(vuint32m8_t dest,vuint32m1_t val)364 vuint32m8_t test_vset_v_u32m1_u32m8(vuint32m8_t dest, vuint32m1_t val) {
365   return vset_v_u32m1_u32m8(dest, 7, val);
366 }
367 
368 // CHECK-RV64-LABEL: @test_vset_v_u32m2_u32m8(
369 // CHECK-RV64-NEXT:  entry:
370 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 8)
371 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
372 //
test_vset_v_u32m2_u32m8(vuint32m8_t dest,vuint32m2_t val)373 vuint32m8_t test_vset_v_u32m2_u32m8(vuint32m8_t dest, vuint32m2_t val) {
374   return vset_v_u32m2_u32m8(dest, 2, val);
375 }
376 
377 // CHECK-RV64-LABEL: @test_vset_v_u32m4_u32m8(
378 // CHECK-RV64-NEXT:  entry:
379 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv8i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VAL:%.*]], i64 8)
380 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
381 //
test_vset_v_u32m4_u32m8(vuint32m8_t dest,vuint32m4_t val)382 vuint32m8_t test_vset_v_u32m4_u32m8(vuint32m8_t dest, vuint32m4_t val) {
383   return vset_v_u32m4_u32m8(dest, 1, val);
384 }
385 
386 // CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m2(
387 // CHECK-RV64-NEXT:  entry:
388 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.nxv1i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 1)
389 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
390 //
test_vset_v_u64m1_u64m2(vuint64m2_t dest,vuint64m1_t val)391 vuint64m2_t test_vset_v_u64m1_u64m2(vuint64m2_t dest, vuint64m1_t val) {
392   return vset_v_u64m1_u64m2(dest, 1, val);
393 }
394 
395 // CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m4(
396 // CHECK-RV64-NEXT:  entry:
397 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv1i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 3)
398 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
399 //
test_vset_v_u64m1_u64m4(vuint64m4_t dest,vuint64m1_t val)400 vuint64m4_t test_vset_v_u64m1_u64m4(vuint64m4_t dest, vuint64m1_t val) {
401   return vset_v_u64m1_u64m4(dest, 3, val);
402 }
403 
404 // CHECK-RV64-LABEL: @test_vset_v_u64m2_u64m4(
405 // CHECK-RV64-NEXT:  entry:
406 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 2)
407 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
408 //
test_vset_v_u64m2_u64m4(vuint64m4_t dest,vuint64m2_t val)409 vuint64m4_t test_vset_v_u64m2_u64m4(vuint64m4_t dest, vuint64m2_t val) {
410   return vset_v_u64m2_u64m4(dest, 1, val);
411 }
412 
413 // CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m8(
414 // CHECK-RV64-NEXT:  entry:
415 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv1i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 7)
416 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
417 //
test_vset_v_u64m1_u64m8(vuint64m8_t dest,vuint64m1_t val)418 vuint64m8_t test_vset_v_u64m1_u64m8(vuint64m8_t dest, vuint64m1_t val) {
419   return vset_v_u64m1_u64m8(dest, 7, val);
420 }
421 
422 // CHECK-RV64-LABEL: @test_vset_v_u64m2_u64m8(
423 // CHECK-RV64-NEXT:  entry:
424 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 4)
425 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
426 //
test_vset_v_u64m2_u64m8(vuint64m8_t dest,vuint64m2_t val)427 vuint64m8_t test_vset_v_u64m2_u64m8(vuint64m8_t dest, vuint64m2_t val) {
428   return vset_v_u64m2_u64m8(dest, 2, val);
429 }
430 
431 // CHECK-RV64-LABEL: @test_vset_v_u64m4_u64m8(
432 // CHECK-RV64-NEXT:  entry:
433 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv4i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VAL:%.*]], i64 4)
434 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
435 //
test_vset_v_u64m4_u64m8(vuint64m8_t dest,vuint64m4_t val)436 vuint64m8_t test_vset_v_u64m4_u64m8(vuint64m8_t dest, vuint64m4_t val) {
437   return vset_v_u64m4_u64m8(dest, 1, val);
438 }
439 
440 // CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m2(
441 // CHECK-RV64-NEXT:  entry:
442 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.insert.nxv4f32.nxv2f32(<vscale x 4 x float> [[DEST:%.*]], <vscale x 2 x float> [[VAL:%.*]], i64 2)
443 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
444 //
test_vset_v_f32m1_f32m2(vfloat32m2_t dest,vfloat32m1_t val)445 vfloat32m2_t test_vset_v_f32m1_f32m2(vfloat32m2_t dest, vfloat32m1_t val) {
446   return vset_v_f32m1_f32m2(dest, 1, val);
447 }
448 
449 // CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m4(
450 // CHECK-RV64-NEXT:  entry:
451 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.experimental.vector.insert.nxv8f32.nxv2f32(<vscale x 8 x float> [[DEST:%.*]], <vscale x 2 x float> [[VAL:%.*]], i64 6)
452 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
453 //
test_vset_v_f32m1_f32m4(vfloat32m4_t dest,vfloat32m1_t val)454 vfloat32m4_t test_vset_v_f32m1_f32m4(vfloat32m4_t dest, vfloat32m1_t val) {
455   return vset_v_f32m1_f32m4(dest, 3, val);
456 }
457 
458 // CHECK-RV64-LABEL: @test_vset_v_f32m2_f32m4(
459 // CHECK-RV64-NEXT:  entry:
460 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.experimental.vector.insert.nxv8f32.nxv4f32(<vscale x 8 x float> [[DEST:%.*]], <vscale x 4 x float> [[VAL:%.*]], i64 4)
461 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
462 //
test_vset_v_f32m2_f32m4(vfloat32m4_t dest,vfloat32m2_t val)463 vfloat32m4_t test_vset_v_f32m2_f32m4(vfloat32m4_t dest, vfloat32m2_t val) {
464   return vset_v_f32m2_f32m4(dest, 1, val);
465 }
466 
467 // CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m8(
468 // CHECK-RV64-NEXT:  entry:
469 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.experimental.vector.insert.nxv16f32.nxv2f32(<vscale x 16 x float> [[DEST:%.*]], <vscale x 2 x float> [[VAL:%.*]], i64 14)
470 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
471 //
test_vset_v_f32m1_f32m8(vfloat32m8_t dest,vfloat32m1_t val)472 vfloat32m8_t test_vset_v_f32m1_f32m8(vfloat32m8_t dest, vfloat32m1_t val) {
473   return vset_v_f32m1_f32m8(dest, 7, val);
474 }
475 
476 // CHECK-RV64-LABEL: @test_vset_v_f32m2_f32m8(
477 // CHECK-RV64-NEXT:  entry:
478 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.experimental.vector.insert.nxv16f32.nxv4f32(<vscale x 16 x float> [[DEST:%.*]], <vscale x 4 x float> [[VAL:%.*]], i64 8)
479 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
480 //
test_vset_v_f32m2_f32m8(vfloat32m8_t dest,vfloat32m2_t val)481 vfloat32m8_t test_vset_v_f32m2_f32m8(vfloat32m8_t dest, vfloat32m2_t val) {
482   return vset_v_f32m2_f32m8(dest, 2, val);
483 }
484 
485 // CHECK-RV64-LABEL: @test_vset_v_f32m4_f32m8(
486 // CHECK-RV64-NEXT:  entry:
487 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.experimental.vector.insert.nxv16f32.nxv8f32(<vscale x 16 x float> [[DEST:%.*]], <vscale x 8 x float> [[VAL:%.*]], i64 8)
488 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
489 //
test_vset_v_f32m4_f32m8(vfloat32m8_t dest,vfloat32m4_t val)490 vfloat32m8_t test_vset_v_f32m4_f32m8(vfloat32m8_t dest, vfloat32m4_t val) {
491   return vset_v_f32m4_f32m8(dest, 1, val);
492 }
493 
494 // CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m2(
495 // CHECK-RV64-NEXT:  entry:
496 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.nxv1f64(<vscale x 2 x double> [[DEST:%.*]], <vscale x 1 x double> [[VAL:%.*]], i64 1)
497 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
498 //
test_vset_v_f64m1_f64m2(vfloat64m2_t dest,vfloat64m1_t val)499 vfloat64m2_t test_vset_v_f64m1_f64m2(vfloat64m2_t dest, vfloat64m1_t val) {
500   return vset_v_f64m1_f64m2(dest, 1, val);
501 }
502 
503 // CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m4(
504 // CHECK-RV64-NEXT:  entry:
505 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.experimental.vector.insert.nxv4f64.nxv1f64(<vscale x 4 x double> [[DEST:%.*]], <vscale x 1 x double> [[VAL:%.*]], i64 3)
506 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
507 //
test_vset_v_f64m1_f64m4(vfloat64m4_t dest,vfloat64m1_t val)508 vfloat64m4_t test_vset_v_f64m1_f64m4(vfloat64m4_t dest, vfloat64m1_t val) {
509   return vset_v_f64m1_f64m4(dest, 3, val);
510 }
511 
512 // CHECK-RV64-LABEL: @test_vset_v_f64m2_f64m4(
513 // CHECK-RV64-NEXT:  entry:
514 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.experimental.vector.insert.nxv4f64.nxv2f64(<vscale x 4 x double> [[DEST:%.*]], <vscale x 2 x double> [[VAL:%.*]], i64 2)
515 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
516 //
test_vset_v_f64m2_f64m4(vfloat64m4_t dest,vfloat64m2_t val)517 vfloat64m4_t test_vset_v_f64m2_f64m4(vfloat64m4_t dest, vfloat64m2_t val) {
518   return vset_v_f64m2_f64m4(dest, 1, val);
519 }
520 
521 // CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m8(
522 // CHECK-RV64-NEXT:  entry:
523 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.experimental.vector.insert.nxv8f64.nxv1f64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 1 x double> [[VAL:%.*]], i64 7)
524 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
525 //
test_vset_v_f64m1_f64m8(vfloat64m8_t dest,vfloat64m1_t val)526 vfloat64m8_t test_vset_v_f64m1_f64m8(vfloat64m8_t dest, vfloat64m1_t val) {
527   return vset_v_f64m1_f64m8(dest, 7, val);
528 }
529 
530 // CHECK-RV64-LABEL: @test_vset_v_f64m2_f64m8(
531 // CHECK-RV64-NEXT:  entry:
532 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.experimental.vector.insert.nxv8f64.nxv2f64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 2 x double> [[VAL:%.*]], i64 4)
533 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
534 //
test_vset_v_f64m2_f64m8(vfloat64m8_t dest,vfloat64m2_t val)535 vfloat64m8_t test_vset_v_f64m2_f64m8(vfloat64m8_t dest, vfloat64m2_t val) {
536   return vset_v_f64m2_f64m8(dest, 2, val);
537 }
538 
539 // CHECK-RV64-LABEL: @test_vset_v_f64m4_f64m8(
540 // CHECK-RV64-NEXT:  entry:
541 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.experimental.vector.insert.nxv8f64.nxv4f64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 4 x double> [[VAL:%.*]], i64 4)
542 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
543 //
test_vset_v_f64m4_f64m8(vfloat64m8_t dest,vfloat64m4_t val)544 vfloat64m8_t test_vset_v_f64m4_f64m8(vfloat64m8_t dest, vfloat64m4_t val) {
545   return vset_v_f64m4_f64m8(dest, 1, val);
546 }
547