1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
4 // RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
5
6 #include <riscv_vector.h>
7
8 //
9 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf8(
10 // CHECK-RV64-NEXT: entry:
11 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
12 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
13 //
test_vmerge_vvm_i8mf8(vbool64_t mask,vint8mf8_t op1,vint8mf8_t op2,size_t vl)14 vint8mf8_t test_vmerge_vvm_i8mf8(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2,
15 size_t vl) {
16 return vmerge_vvm_i8mf8(mask, op1, op2, vl);
17 }
18
19 //
20 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf8(
21 // CHECK-RV64-NEXT: entry:
22 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
23 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
24 //
test_vmerge_vxm_i8mf8(vbool64_t mask,vint8mf8_t op1,int8_t op2,size_t vl)25 vint8mf8_t test_vmerge_vxm_i8mf8(vbool64_t mask, vint8mf8_t op1, int8_t op2,
26 size_t vl) {
27 return vmerge_vxm_i8mf8(mask, op1, op2, vl);
28 }
29
30 //
31 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf4(
32 // CHECK-RV64-NEXT: entry:
33 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
34 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
35 //
test_vmerge_vvm_i8mf4(vbool32_t mask,vint8mf4_t op1,vint8mf4_t op2,size_t vl)36 vint8mf4_t test_vmerge_vvm_i8mf4(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2,
37 size_t vl) {
38 return vmerge_vvm_i8mf4(mask, op1, op2, vl);
39 }
40
41 //
42 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf4(
43 // CHECK-RV64-NEXT: entry:
44 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
45 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
46 //
test_vmerge_vxm_i8mf4(vbool32_t mask,vint8mf4_t op1,int8_t op2,size_t vl)47 vint8mf4_t test_vmerge_vxm_i8mf4(vbool32_t mask, vint8mf4_t op1, int8_t op2,
48 size_t vl) {
49 return vmerge_vxm_i8mf4(mask, op1, op2, vl);
50 }
51
52 //
53 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf2(
54 // CHECK-RV64-NEXT: entry:
55 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
56 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
57 //
test_vmerge_vvm_i8mf2(vbool16_t mask,vint8mf2_t op1,vint8mf2_t op2,size_t vl)58 vint8mf2_t test_vmerge_vvm_i8mf2(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2,
59 size_t vl) {
60 return vmerge_vvm_i8mf2(mask, op1, op2, vl);
61 }
62
63 //
64 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf2(
65 // CHECK-RV64-NEXT: entry:
66 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
67 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
68 //
test_vmerge_vxm_i8mf2(vbool16_t mask,vint8mf2_t op1,int8_t op2,size_t vl)69 vint8mf2_t test_vmerge_vxm_i8mf2(vbool16_t mask, vint8mf2_t op1, int8_t op2,
70 size_t vl) {
71 return vmerge_vxm_i8mf2(mask, op1, op2, vl);
72 }
73
74 //
75 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m1(
76 // CHECK-RV64-NEXT: entry:
77 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
78 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
79 //
test_vmerge_vvm_i8m1(vbool8_t mask,vint8m1_t op1,vint8m1_t op2,size_t vl)80 vint8m1_t test_vmerge_vvm_i8m1(vbool8_t mask, vint8m1_t op1, vint8m1_t op2,
81 size_t vl) {
82 return vmerge_vvm_i8m1(mask, op1, op2, vl);
83 }
84
85 //
86 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m1(
87 // CHECK-RV64-NEXT: entry:
88 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
89 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
90 //
test_vmerge_vxm_i8m1(vbool8_t mask,vint8m1_t op1,int8_t op2,size_t vl)91 vint8m1_t test_vmerge_vxm_i8m1(vbool8_t mask, vint8m1_t op1, int8_t op2,
92 size_t vl) {
93 return vmerge_vxm_i8m1(mask, op1, op2, vl);
94 }
95
96 //
97 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m2(
98 // CHECK-RV64-NEXT: entry:
99 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
100 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
101 //
test_vmerge_vvm_i8m2(vbool4_t mask,vint8m2_t op1,vint8m2_t op2,size_t vl)102 vint8m2_t test_vmerge_vvm_i8m2(vbool4_t mask, vint8m2_t op1, vint8m2_t op2,
103 size_t vl) {
104 return vmerge_vvm_i8m2(mask, op1, op2, vl);
105 }
106
107 //
108 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m2(
109 // CHECK-RV64-NEXT: entry:
110 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
111 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
112 //
test_vmerge_vxm_i8m2(vbool4_t mask,vint8m2_t op1,int8_t op2,size_t vl)113 vint8m2_t test_vmerge_vxm_i8m2(vbool4_t mask, vint8m2_t op1, int8_t op2,
114 size_t vl) {
115 return vmerge_vxm_i8m2(mask, op1, op2, vl);
116 }
117
118 //
119 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m4(
120 // CHECK-RV64-NEXT: entry:
121 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
122 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
123 //
test_vmerge_vvm_i8m4(vbool2_t mask,vint8m4_t op1,vint8m4_t op2,size_t vl)124 vint8m4_t test_vmerge_vvm_i8m4(vbool2_t mask, vint8m4_t op1, vint8m4_t op2,
125 size_t vl) {
126 return vmerge_vvm_i8m4(mask, op1, op2, vl);
127 }
128
129 //
130 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m4(
131 // CHECK-RV64-NEXT: entry:
132 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
133 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
134 //
test_vmerge_vxm_i8m4(vbool2_t mask,vint8m4_t op1,int8_t op2,size_t vl)135 vint8m4_t test_vmerge_vxm_i8m4(vbool2_t mask, vint8m4_t op1, int8_t op2,
136 size_t vl) {
137 return vmerge_vxm_i8m4(mask, op1, op2, vl);
138 }
139
140 //
141 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m8(
142 // CHECK-RV64-NEXT: entry:
143 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
144 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
145 //
test_vmerge_vvm_i8m8(vbool1_t mask,vint8m8_t op1,vint8m8_t op2,size_t vl)146 vint8m8_t test_vmerge_vvm_i8m8(vbool1_t mask, vint8m8_t op1, vint8m8_t op2,
147 size_t vl) {
148 return vmerge_vvm_i8m8(mask, op1, op2, vl);
149 }
150
151 //
152 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m8(
153 // CHECK-RV64-NEXT: entry:
154 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
155 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
156 //
test_vmerge_vxm_i8m8(vbool1_t mask,vint8m8_t op1,int8_t op2,size_t vl)157 vint8m8_t test_vmerge_vxm_i8m8(vbool1_t mask, vint8m8_t op1, int8_t op2,
158 size_t vl) {
159 return vmerge_vxm_i8m8(mask, op1, op2, vl);
160 }
161
162 //
163 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf4(
164 // CHECK-RV64-NEXT: entry:
165 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
166 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
167 //
test_vmerge_vvm_i16mf4(vbool64_t mask,vint16mf4_t op1,vint16mf4_t op2,size_t vl)168 vint16mf4_t test_vmerge_vvm_i16mf4(vbool64_t mask, vint16mf4_t op1,
169 vint16mf4_t op2, size_t vl) {
170 return vmerge_vvm_i16mf4(mask, op1, op2, vl);
171 }
172
173 //
174 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf4(
175 // CHECK-RV64-NEXT: entry:
176 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
177 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
178 //
test_vmerge_vxm_i16mf4(vbool64_t mask,vint16mf4_t op1,int16_t op2,size_t vl)179 vint16mf4_t test_vmerge_vxm_i16mf4(vbool64_t mask, vint16mf4_t op1, int16_t op2,
180 size_t vl) {
181 return vmerge_vxm_i16mf4(mask, op1, op2, vl);
182 }
183
184 //
185 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf2(
186 // CHECK-RV64-NEXT: entry:
187 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
188 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
189 //
test_vmerge_vvm_i16mf2(vbool32_t mask,vint16mf2_t op1,vint16mf2_t op2,size_t vl)190 vint16mf2_t test_vmerge_vvm_i16mf2(vbool32_t mask, vint16mf2_t op1,
191 vint16mf2_t op2, size_t vl) {
192 return vmerge_vvm_i16mf2(mask, op1, op2, vl);
193 }
194
195 //
196 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf2(
197 // CHECK-RV64-NEXT: entry:
198 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
199 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
200 //
test_vmerge_vxm_i16mf2(vbool32_t mask,vint16mf2_t op1,int16_t op2,size_t vl)201 vint16mf2_t test_vmerge_vxm_i16mf2(vbool32_t mask, vint16mf2_t op1, int16_t op2,
202 size_t vl) {
203 return vmerge_vxm_i16mf2(mask, op1, op2, vl);
204 }
205
206 //
207 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m1(
208 // CHECK-RV64-NEXT: entry:
209 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
210 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
211 //
test_vmerge_vvm_i16m1(vbool16_t mask,vint16m1_t op1,vint16m1_t op2,size_t vl)212 vint16m1_t test_vmerge_vvm_i16m1(vbool16_t mask, vint16m1_t op1, vint16m1_t op2,
213 size_t vl) {
214 return vmerge_vvm_i16m1(mask, op1, op2, vl);
215 }
216
217 //
218 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m1(
219 // CHECK-RV64-NEXT: entry:
220 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
221 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
222 //
test_vmerge_vxm_i16m1(vbool16_t mask,vint16m1_t op1,int16_t op2,size_t vl)223 vint16m1_t test_vmerge_vxm_i16m1(vbool16_t mask, vint16m1_t op1, int16_t op2,
224 size_t vl) {
225 return vmerge_vxm_i16m1(mask, op1, op2, vl);
226 }
227
228 //
229 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m2(
230 // CHECK-RV64-NEXT: entry:
231 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
232 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
233 //
test_vmerge_vvm_i16m2(vbool8_t mask,vint16m2_t op1,vint16m2_t op2,size_t vl)234 vint16m2_t test_vmerge_vvm_i16m2(vbool8_t mask, vint16m2_t op1, vint16m2_t op2,
235 size_t vl) {
236 return vmerge_vvm_i16m2(mask, op1, op2, vl);
237 }
238
239 //
240 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m2(
241 // CHECK-RV64-NEXT: entry:
242 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
243 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
244 //
test_vmerge_vxm_i16m2(vbool8_t mask,vint16m2_t op1,int16_t op2,size_t vl)245 vint16m2_t test_vmerge_vxm_i16m2(vbool8_t mask, vint16m2_t op1, int16_t op2,
246 size_t vl) {
247 return vmerge_vxm_i16m2(mask, op1, op2, vl);
248 }
249
250 //
251 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m4(
252 // CHECK-RV64-NEXT: entry:
253 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
254 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
255 //
test_vmerge_vvm_i16m4(vbool4_t mask,vint16m4_t op1,vint16m4_t op2,size_t vl)256 vint16m4_t test_vmerge_vvm_i16m4(vbool4_t mask, vint16m4_t op1, vint16m4_t op2,
257 size_t vl) {
258 return vmerge_vvm_i16m4(mask, op1, op2, vl);
259 }
260
261 //
262 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m4(
263 // CHECK-RV64-NEXT: entry:
264 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
265 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
266 //
test_vmerge_vxm_i16m4(vbool4_t mask,vint16m4_t op1,int16_t op2,size_t vl)267 vint16m4_t test_vmerge_vxm_i16m4(vbool4_t mask, vint16m4_t op1, int16_t op2,
268 size_t vl) {
269 return vmerge_vxm_i16m4(mask, op1, op2, vl);
270 }
271
272 //
273 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m8(
274 // CHECK-RV64-NEXT: entry:
275 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
276 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
277 //
test_vmerge_vvm_i16m8(vbool2_t mask,vint16m8_t op1,vint16m8_t op2,size_t vl)278 vint16m8_t test_vmerge_vvm_i16m8(vbool2_t mask, vint16m8_t op1, vint16m8_t op2,
279 size_t vl) {
280 return vmerge_vvm_i16m8(mask, op1, op2, vl);
281 }
282
283 //
284 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m8(
285 // CHECK-RV64-NEXT: entry:
286 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
287 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
288 //
test_vmerge_vxm_i16m8(vbool2_t mask,vint16m8_t op1,int16_t op2,size_t vl)289 vint16m8_t test_vmerge_vxm_i16m8(vbool2_t mask, vint16m8_t op1, int16_t op2,
290 size_t vl) {
291 return vmerge_vxm_i16m8(mask, op1, op2, vl);
292 }
293
294 //
295 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2(
296 // CHECK-RV64-NEXT: entry:
297 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
298 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
299 //
test_vmerge_vvm_i32mf2(vbool64_t mask,vint32mf2_t op1,vint32mf2_t op2,size_t vl)300 vint32mf2_t test_vmerge_vvm_i32mf2(vbool64_t mask, vint32mf2_t op1,
301 vint32mf2_t op2, size_t vl) {
302 return vmerge_vvm_i32mf2(mask, op1, op2, vl);
303 }
304
305 //
306 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2(
307 // CHECK-RV64-NEXT: entry:
308 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
309 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
310 //
test_vmerge_vxm_i32mf2(vbool64_t mask,vint32mf2_t op1,int32_t op2,size_t vl)311 vint32mf2_t test_vmerge_vxm_i32mf2(vbool64_t mask, vint32mf2_t op1, int32_t op2,
312 size_t vl) {
313 return vmerge_vxm_i32mf2(mask, op1, op2, vl);
314 }
315
316 //
317 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m1(
318 // CHECK-RV64-NEXT: entry:
319 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
320 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
321 //
test_vmerge_vvm_i32m1(vbool32_t mask,vint32m1_t op1,vint32m1_t op2,size_t vl)322 vint32m1_t test_vmerge_vvm_i32m1(vbool32_t mask, vint32m1_t op1, vint32m1_t op2,
323 size_t vl) {
324 return vmerge_vvm_i32m1(mask, op1, op2, vl);
325 }
326
327 //
328 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m1(
329 // CHECK-RV64-NEXT: entry:
330 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
331 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
332 //
test_vmerge_vxm_i32m1(vbool32_t mask,vint32m1_t op1,int32_t op2,size_t vl)333 vint32m1_t test_vmerge_vxm_i32m1(vbool32_t mask, vint32m1_t op1, int32_t op2,
334 size_t vl) {
335 return vmerge_vxm_i32m1(mask, op1, op2, vl);
336 }
337
338 //
339 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m2(
340 // CHECK-RV64-NEXT: entry:
341 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
342 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
343 //
test_vmerge_vvm_i32m2(vbool16_t mask,vint32m2_t op1,vint32m2_t op2,size_t vl)344 vint32m2_t test_vmerge_vvm_i32m2(vbool16_t mask, vint32m2_t op1, vint32m2_t op2,
345 size_t vl) {
346 return vmerge_vvm_i32m2(mask, op1, op2, vl);
347 }
348
349 //
350 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m2(
351 // CHECK-RV64-NEXT: entry:
352 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
353 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
354 //
test_vmerge_vxm_i32m2(vbool16_t mask,vint32m2_t op1,int32_t op2,size_t vl)355 vint32m2_t test_vmerge_vxm_i32m2(vbool16_t mask, vint32m2_t op1, int32_t op2,
356 size_t vl) {
357 return vmerge_vxm_i32m2(mask, op1, op2, vl);
358 }
359
360 //
361 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m4(
362 // CHECK-RV64-NEXT: entry:
363 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
364 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
365 //
test_vmerge_vvm_i32m4(vbool8_t mask,vint32m4_t op1,vint32m4_t op2,size_t vl)366 vint32m4_t test_vmerge_vvm_i32m4(vbool8_t mask, vint32m4_t op1, vint32m4_t op2,
367 size_t vl) {
368 return vmerge_vvm_i32m4(mask, op1, op2, vl);
369 }
370
371 //
372 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m4(
373 // CHECK-RV64-NEXT: entry:
374 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
375 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
376 //
test_vmerge_vxm_i32m4(vbool8_t mask,vint32m4_t op1,int32_t op2,size_t vl)377 vint32m4_t test_vmerge_vxm_i32m4(vbool8_t mask, vint32m4_t op1, int32_t op2,
378 size_t vl) {
379 return vmerge_vxm_i32m4(mask, op1, op2, vl);
380 }
381
382 //
383 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m8(
384 // CHECK-RV64-NEXT: entry:
385 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
386 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
387 //
test_vmerge_vvm_i32m8(vbool4_t mask,vint32m8_t op1,vint32m8_t op2,size_t vl)388 vint32m8_t test_vmerge_vvm_i32m8(vbool4_t mask, vint32m8_t op1, vint32m8_t op2,
389 size_t vl) {
390 return vmerge_vvm_i32m8(mask, op1, op2, vl);
391 }
392
393 //
394 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m8(
395 // CHECK-RV64-NEXT: entry:
396 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
397 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
398 //
test_vmerge_vxm_i32m8(vbool4_t mask,vint32m8_t op1,int32_t op2,size_t vl)399 vint32m8_t test_vmerge_vxm_i32m8(vbool4_t mask, vint32m8_t op1, int32_t op2,
400 size_t vl) {
401 return vmerge_vxm_i32m8(mask, op1, op2, vl);
402 }
403
404 //
405 // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m1(
406 // CHECK-RV64-NEXT: entry:
407 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
408 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
409 //
test_vmerge_vvm_i64m1(vbool64_t mask,vint64m1_t op1,vint64m1_t op2,size_t vl)410 vint64m1_t test_vmerge_vvm_i64m1(vbool64_t mask, vint64m1_t op1, vint64m1_t op2,
411 size_t vl) {
412 return vmerge_vvm_i64m1(mask, op1, op2, vl);
413 }
414
415 //
416 // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m1(
417 // CHECK-RV64-NEXT: entry:
418 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
419 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
420 //
test_vmerge_vxm_i64m1(vbool64_t mask,vint64m1_t op1,int64_t op2,size_t vl)421 vint64m1_t test_vmerge_vxm_i64m1(vbool64_t mask, vint64m1_t op1, int64_t op2,
422 size_t vl) {
423 return vmerge_vxm_i64m1(mask, op1, op2, vl);
424 }
425
426 //
427 // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m2(
428 // CHECK-RV64-NEXT: entry:
429 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
430 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
431 //
test_vmerge_vvm_i64m2(vbool32_t mask,vint64m2_t op1,vint64m2_t op2,size_t vl)432 vint64m2_t test_vmerge_vvm_i64m2(vbool32_t mask, vint64m2_t op1, vint64m2_t op2,
433 size_t vl) {
434 return vmerge_vvm_i64m2(mask, op1, op2, vl);
435 }
436
437 //
438 // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m2(
439 // CHECK-RV64-NEXT: entry:
440 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
441 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
442 //
test_vmerge_vxm_i64m2(vbool32_t mask,vint64m2_t op1,int64_t op2,size_t vl)443 vint64m2_t test_vmerge_vxm_i64m2(vbool32_t mask, vint64m2_t op1, int64_t op2,
444 size_t vl) {
445 return vmerge_vxm_i64m2(mask, op1, op2, vl);
446 }
447
448 //
449 // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m4(
450 // CHECK-RV64-NEXT: entry:
451 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
452 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
453 //
test_vmerge_vvm_i64m4(vbool16_t mask,vint64m4_t op1,vint64m4_t op2,size_t vl)454 vint64m4_t test_vmerge_vvm_i64m4(vbool16_t mask, vint64m4_t op1, vint64m4_t op2,
455 size_t vl) {
456 return vmerge_vvm_i64m4(mask, op1, op2, vl);
457 }
458
459 //
460 // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m4(
461 // CHECK-RV64-NEXT: entry:
462 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
463 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
464 //
test_vmerge_vxm_i64m4(vbool16_t mask,vint64m4_t op1,int64_t op2,size_t vl)465 vint64m4_t test_vmerge_vxm_i64m4(vbool16_t mask, vint64m4_t op1, int64_t op2,
466 size_t vl) {
467 return vmerge_vxm_i64m4(mask, op1, op2, vl);
468 }
469
470 //
471 // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m8(
472 // CHECK-RV64-NEXT: entry:
473 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
474 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
475 //
test_vmerge_vvm_i64m8(vbool8_t mask,vint64m8_t op1,vint64m8_t op2,size_t vl)476 vint64m8_t test_vmerge_vvm_i64m8(vbool8_t mask, vint64m8_t op1, vint64m8_t op2,
477 size_t vl) {
478 return vmerge_vvm_i64m8(mask, op1, op2, vl);
479 }
480
481 //
482 // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m8(
483 // CHECK-RV64-NEXT: entry:
484 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
485 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
486 //
test_vmerge_vxm_i64m8(vbool8_t mask,vint64m8_t op1,int64_t op2,size_t vl)487 vint64m8_t test_vmerge_vxm_i64m8(vbool8_t mask, vint64m8_t op1, int64_t op2,
488 size_t vl) {
489 return vmerge_vxm_i64m8(mask, op1, op2, vl);
490 }
491
492 //
493 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf8(
494 // CHECK-RV64-NEXT: entry:
495 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
496 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
497 //
test_vmerge_vvm_u8mf8(vbool64_t mask,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)498 vuint8mf8_t test_vmerge_vvm_u8mf8(vbool64_t mask, vuint8mf8_t op1,
499 vuint8mf8_t op2, size_t vl) {
500 return vmerge_vvm_u8mf8(mask, op1, op2, vl);
501 }
502
503 //
504 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf8(
505 // CHECK-RV64-NEXT: entry:
506 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
507 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
508 //
test_vmerge_vxm_u8mf8(vbool64_t mask,vuint8mf8_t op1,uint8_t op2,size_t vl)509 vuint8mf8_t test_vmerge_vxm_u8mf8(vbool64_t mask, vuint8mf8_t op1, uint8_t op2,
510 size_t vl) {
511 return vmerge_vxm_u8mf8(mask, op1, op2, vl);
512 }
513
514 //
515 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf4(
516 // CHECK-RV64-NEXT: entry:
517 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
518 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
519 //
test_vmerge_vvm_u8mf4(vbool32_t mask,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)520 vuint8mf4_t test_vmerge_vvm_u8mf4(vbool32_t mask, vuint8mf4_t op1,
521 vuint8mf4_t op2, size_t vl) {
522 return vmerge_vvm_u8mf4(mask, op1, op2, vl);
523 }
524
525 //
526 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf4(
527 // CHECK-RV64-NEXT: entry:
528 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
529 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
530 //
test_vmerge_vxm_u8mf4(vbool32_t mask,vuint8mf4_t op1,uint8_t op2,size_t vl)531 vuint8mf4_t test_vmerge_vxm_u8mf4(vbool32_t mask, vuint8mf4_t op1, uint8_t op2,
532 size_t vl) {
533 return vmerge_vxm_u8mf4(mask, op1, op2, vl);
534 }
535
536 //
537 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf2(
538 // CHECK-RV64-NEXT: entry:
539 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
540 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
541 //
test_vmerge_vvm_u8mf2(vbool16_t mask,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)542 vuint8mf2_t test_vmerge_vvm_u8mf2(vbool16_t mask, vuint8mf2_t op1,
543 vuint8mf2_t op2, size_t vl) {
544 return vmerge_vvm_u8mf2(mask, op1, op2, vl);
545 }
546
547 //
548 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf2(
549 // CHECK-RV64-NEXT: entry:
550 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
551 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
552 //
test_vmerge_vxm_u8mf2(vbool16_t mask,vuint8mf2_t op1,uint8_t op2,size_t vl)553 vuint8mf2_t test_vmerge_vxm_u8mf2(vbool16_t mask, vuint8mf2_t op1, uint8_t op2,
554 size_t vl) {
555 return vmerge_vxm_u8mf2(mask, op1, op2, vl);
556 }
557
558 //
559 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m1(
560 // CHECK-RV64-NEXT: entry:
561 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
562 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
563 //
test_vmerge_vvm_u8m1(vbool8_t mask,vuint8m1_t op1,vuint8m1_t op2,size_t vl)564 vuint8m1_t test_vmerge_vvm_u8m1(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2,
565 size_t vl) {
566 return vmerge_vvm_u8m1(mask, op1, op2, vl);
567 }
568
569 //
570 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m1(
571 // CHECK-RV64-NEXT: entry:
572 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
573 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
574 //
test_vmerge_vxm_u8m1(vbool8_t mask,vuint8m1_t op1,uint8_t op2,size_t vl)575 vuint8m1_t test_vmerge_vxm_u8m1(vbool8_t mask, vuint8m1_t op1, uint8_t op2,
576 size_t vl) {
577 return vmerge_vxm_u8m1(mask, op1, op2, vl);
578 }
579
580 //
581 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m2(
582 // CHECK-RV64-NEXT: entry:
583 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
584 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
585 //
test_vmerge_vvm_u8m2(vbool4_t mask,vuint8m2_t op1,vuint8m2_t op2,size_t vl)586 vuint8m2_t test_vmerge_vvm_u8m2(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2,
587 size_t vl) {
588 return vmerge_vvm_u8m2(mask, op1, op2, vl);
589 }
590
591 //
592 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m2(
593 // CHECK-RV64-NEXT: entry:
594 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
595 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
596 //
test_vmerge_vxm_u8m2(vbool4_t mask,vuint8m2_t op1,uint8_t op2,size_t vl)597 vuint8m2_t test_vmerge_vxm_u8m2(vbool4_t mask, vuint8m2_t op1, uint8_t op2,
598 size_t vl) {
599 return vmerge_vxm_u8m2(mask, op1, op2, vl);
600 }
601
602 //
603 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m4(
604 // CHECK-RV64-NEXT: entry:
605 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
606 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
607 //
test_vmerge_vvm_u8m4(vbool2_t mask,vuint8m4_t op1,vuint8m4_t op2,size_t vl)608 vuint8m4_t test_vmerge_vvm_u8m4(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2,
609 size_t vl) {
610 return vmerge_vvm_u8m4(mask, op1, op2, vl);
611 }
612
613 //
614 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m4(
615 // CHECK-RV64-NEXT: entry:
616 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
617 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
618 //
test_vmerge_vxm_u8m4(vbool2_t mask,vuint8m4_t op1,uint8_t op2,size_t vl)619 vuint8m4_t test_vmerge_vxm_u8m4(vbool2_t mask, vuint8m4_t op1, uint8_t op2,
620 size_t vl) {
621 return vmerge_vxm_u8m4(mask, op1, op2, vl);
622 }
623
624 //
625 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m8(
626 // CHECK-RV64-NEXT: entry:
627 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
628 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
629 //
test_vmerge_vvm_u8m8(vbool1_t mask,vuint8m8_t op1,vuint8m8_t op2,size_t vl)630 vuint8m8_t test_vmerge_vvm_u8m8(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2,
631 size_t vl) {
632 return vmerge_vvm_u8m8(mask, op1, op2, vl);
633 }
634
635 //
636 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m8(
637 // CHECK-RV64-NEXT: entry:
638 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
639 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
640 //
test_vmerge_vxm_u8m8(vbool1_t mask,vuint8m8_t op1,uint8_t op2,size_t vl)641 vuint8m8_t test_vmerge_vxm_u8m8(vbool1_t mask, vuint8m8_t op1, uint8_t op2,
642 size_t vl) {
643 return vmerge_vxm_u8m8(mask, op1, op2, vl);
644 }
645
646 //
647 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf4(
648 // CHECK-RV64-NEXT: entry:
649 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
650 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
651 //
test_vmerge_vvm_u16mf4(vbool64_t mask,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)652 vuint16mf4_t test_vmerge_vvm_u16mf4(vbool64_t mask, vuint16mf4_t op1,
653 vuint16mf4_t op2, size_t vl) {
654 return vmerge_vvm_u16mf4(mask, op1, op2, vl);
655 }
656
657 //
658 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf4(
659 // CHECK-RV64-NEXT: entry:
660 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
661 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
662 //
test_vmerge_vxm_u16mf4(vbool64_t mask,vuint16mf4_t op1,uint16_t op2,size_t vl)663 vuint16mf4_t test_vmerge_vxm_u16mf4(vbool64_t mask, vuint16mf4_t op1,
664 uint16_t op2, size_t vl) {
665 return vmerge_vxm_u16mf4(mask, op1, op2, vl);
666 }
667
668 //
669 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf2(
670 // CHECK-RV64-NEXT: entry:
671 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
672 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
673 //
test_vmerge_vvm_u16mf2(vbool32_t mask,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)674 vuint16mf2_t test_vmerge_vvm_u16mf2(vbool32_t mask, vuint16mf2_t op1,
675 vuint16mf2_t op2, size_t vl) {
676 return vmerge_vvm_u16mf2(mask, op1, op2, vl);
677 }
678
679 //
680 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf2(
681 // CHECK-RV64-NEXT: entry:
682 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
683 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
684 //
test_vmerge_vxm_u16mf2(vbool32_t mask,vuint16mf2_t op1,uint16_t op2,size_t vl)685 vuint16mf2_t test_vmerge_vxm_u16mf2(vbool32_t mask, vuint16mf2_t op1,
686 uint16_t op2, size_t vl) {
687 return vmerge_vxm_u16mf2(mask, op1, op2, vl);
688 }
689
690 //
691 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m1(
692 // CHECK-RV64-NEXT: entry:
693 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
694 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
695 //
test_vmerge_vvm_u16m1(vbool16_t mask,vuint16m1_t op1,vuint16m1_t op2,size_t vl)696 vuint16m1_t test_vmerge_vvm_u16m1(vbool16_t mask, vuint16m1_t op1,
697 vuint16m1_t op2, size_t vl) {
698 return vmerge_vvm_u16m1(mask, op1, op2, vl);
699 }
700
701 //
702 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m1(
703 // CHECK-RV64-NEXT: entry:
704 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
705 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
706 //
test_vmerge_vxm_u16m1(vbool16_t mask,vuint16m1_t op1,uint16_t op2,size_t vl)707 vuint16m1_t test_vmerge_vxm_u16m1(vbool16_t mask, vuint16m1_t op1, uint16_t op2,
708 size_t vl) {
709 return vmerge_vxm_u16m1(mask, op1, op2, vl);
710 }
711
712 //
713 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m2(
714 // CHECK-RV64-NEXT: entry:
715 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
716 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
717 //
test_vmerge_vvm_u16m2(vbool8_t mask,vuint16m2_t op1,vuint16m2_t op2,size_t vl)718 vuint16m2_t test_vmerge_vvm_u16m2(vbool8_t mask, vuint16m2_t op1,
719 vuint16m2_t op2, size_t vl) {
720 return vmerge_vvm_u16m2(mask, op1, op2, vl);
721 }
722
723 //
724 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m2(
725 // CHECK-RV64-NEXT: entry:
726 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
727 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
728 //
test_vmerge_vxm_u16m2(vbool8_t mask,vuint16m2_t op1,uint16_t op2,size_t vl)729 vuint16m2_t test_vmerge_vxm_u16m2(vbool8_t mask, vuint16m2_t op1, uint16_t op2,
730 size_t vl) {
731 return vmerge_vxm_u16m2(mask, op1, op2, vl);
732 }
733
734 //
735 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m4(
736 // CHECK-RV64-NEXT: entry:
737 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
738 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
739 //
test_vmerge_vvm_u16m4(vbool4_t mask,vuint16m4_t op1,vuint16m4_t op2,size_t vl)740 vuint16m4_t test_vmerge_vvm_u16m4(vbool4_t mask, vuint16m4_t op1,
741 vuint16m4_t op2, size_t vl) {
742 return vmerge_vvm_u16m4(mask, op1, op2, vl);
743 }
744
745 //
746 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m4(
747 // CHECK-RV64-NEXT: entry:
748 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
749 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
750 //
test_vmerge_vxm_u16m4(vbool4_t mask,vuint16m4_t op1,uint16_t op2,size_t vl)751 vuint16m4_t test_vmerge_vxm_u16m4(vbool4_t mask, vuint16m4_t op1, uint16_t op2,
752 size_t vl) {
753 return vmerge_vxm_u16m4(mask, op1, op2, vl);
754 }
755
756 //
757 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m8(
758 // CHECK-RV64-NEXT: entry:
759 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
760 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
761 //
test_vmerge_vvm_u16m8(vbool2_t mask,vuint16m8_t op1,vuint16m8_t op2,size_t vl)762 vuint16m8_t test_vmerge_vvm_u16m8(vbool2_t mask, vuint16m8_t op1,
763 vuint16m8_t op2, size_t vl) {
764 return vmerge_vvm_u16m8(mask, op1, op2, vl);
765 }
766
767 //
768 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m8(
769 // CHECK-RV64-NEXT: entry:
770 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
771 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
772 //
test_vmerge_vxm_u16m8(vbool2_t mask,vuint16m8_t op1,uint16_t op2,size_t vl)773 vuint16m8_t test_vmerge_vxm_u16m8(vbool2_t mask, vuint16m8_t op1, uint16_t op2,
774 size_t vl) {
775 return vmerge_vxm_u16m8(mask, op1, op2, vl);
776 }
777
778 //
779 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2(
780 // CHECK-RV64-NEXT: entry:
781 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
782 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
783 //
test_vmerge_vvm_u32mf2(vbool64_t mask,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)784 vuint32mf2_t test_vmerge_vvm_u32mf2(vbool64_t mask, vuint32mf2_t op1,
785 vuint32mf2_t op2, size_t vl) {
786 return vmerge_vvm_u32mf2(mask, op1, op2, vl);
787 }
788
789 //
790 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2(
791 // CHECK-RV64-NEXT: entry:
792 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
793 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
794 //
test_vmerge_vxm_u32mf2(vbool64_t mask,vuint32mf2_t op1,uint32_t op2,size_t vl)795 vuint32mf2_t test_vmerge_vxm_u32mf2(vbool64_t mask, vuint32mf2_t op1,
796 uint32_t op2, size_t vl) {
797 return vmerge_vxm_u32mf2(mask, op1, op2, vl);
798 }
799
800 //
801 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m1(
802 // CHECK-RV64-NEXT: entry:
803 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
804 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
805 //
test_vmerge_vvm_u32m1(vbool32_t mask,vuint32m1_t op1,vuint32m1_t op2,size_t vl)806 vuint32m1_t test_vmerge_vvm_u32m1(vbool32_t mask, vuint32m1_t op1,
807 vuint32m1_t op2, size_t vl) {
808 return vmerge_vvm_u32m1(mask, op1, op2, vl);
809 }
810
811 //
812 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m1(
813 // CHECK-RV64-NEXT: entry:
814 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
815 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
816 //
test_vmerge_vxm_u32m1(vbool32_t mask,vuint32m1_t op1,uint32_t op2,size_t vl)817 vuint32m1_t test_vmerge_vxm_u32m1(vbool32_t mask, vuint32m1_t op1, uint32_t op2,
818 size_t vl) {
819 return vmerge_vxm_u32m1(mask, op1, op2, vl);
820 }
821
822 //
823 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m2(
824 // CHECK-RV64-NEXT: entry:
825 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
826 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
827 //
test_vmerge_vvm_u32m2(vbool16_t mask,vuint32m2_t op1,vuint32m2_t op2,size_t vl)828 vuint32m2_t test_vmerge_vvm_u32m2(vbool16_t mask, vuint32m2_t op1,
829 vuint32m2_t op2, size_t vl) {
830 return vmerge_vvm_u32m2(mask, op1, op2, vl);
831 }
832
833 //
834 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m2(
835 // CHECK-RV64-NEXT: entry:
836 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
837 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
838 //
test_vmerge_vxm_u32m2(vbool16_t mask,vuint32m2_t op1,uint32_t op2,size_t vl)839 vuint32m2_t test_vmerge_vxm_u32m2(vbool16_t mask, vuint32m2_t op1, uint32_t op2,
840 size_t vl) {
841 return vmerge_vxm_u32m2(mask, op1, op2, vl);
842 }
843
844 //
845 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m4(
846 // CHECK-RV64-NEXT: entry:
847 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
848 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
849 //
test_vmerge_vvm_u32m4(vbool8_t mask,vuint32m4_t op1,vuint32m4_t op2,size_t vl)850 vuint32m4_t test_vmerge_vvm_u32m4(vbool8_t mask, vuint32m4_t op1,
851 vuint32m4_t op2, size_t vl) {
852 return vmerge_vvm_u32m4(mask, op1, op2, vl);
853 }
854
855 //
856 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m4(
857 // CHECK-RV64-NEXT: entry:
858 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
859 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
860 //
test_vmerge_vxm_u32m4(vbool8_t mask,vuint32m4_t op1,uint32_t op2,size_t vl)861 vuint32m4_t test_vmerge_vxm_u32m4(vbool8_t mask, vuint32m4_t op1, uint32_t op2,
862 size_t vl) {
863 return vmerge_vxm_u32m4(mask, op1, op2, vl);
864 }
865
866 //
867 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m8(
868 // CHECK-RV64-NEXT: entry:
869 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
870 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
871 //
test_vmerge_vvm_u32m8(vbool4_t mask,vuint32m8_t op1,vuint32m8_t op2,size_t vl)872 vuint32m8_t test_vmerge_vvm_u32m8(vbool4_t mask, vuint32m8_t op1,
873 vuint32m8_t op2, size_t vl) {
874 return vmerge_vvm_u32m8(mask, op1, op2, vl);
875 }
876
877 //
878 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m8(
879 // CHECK-RV64-NEXT: entry:
880 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
881 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
882 //
test_vmerge_vxm_u32m8(vbool4_t mask,vuint32m8_t op1,uint32_t op2,size_t vl)883 vuint32m8_t test_vmerge_vxm_u32m8(vbool4_t mask, vuint32m8_t op1, uint32_t op2,
884 size_t vl) {
885 return vmerge_vxm_u32m8(mask, op1, op2, vl);
886 }
887
888 //
889 // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m1(
890 // CHECK-RV64-NEXT: entry:
891 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
892 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
893 //
test_vmerge_vvm_u64m1(vbool64_t mask,vuint64m1_t op1,vuint64m1_t op2,size_t vl)894 vuint64m1_t test_vmerge_vvm_u64m1(vbool64_t mask, vuint64m1_t op1,
895 vuint64m1_t op2, size_t vl) {
896 return vmerge_vvm_u64m1(mask, op1, op2, vl);
897 }
898
899 //
900 // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m1(
901 // CHECK-RV64-NEXT: entry:
902 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
903 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
904 //
test_vmerge_vxm_u64m1(vbool64_t mask,vuint64m1_t op1,uint64_t op2,size_t vl)905 vuint64m1_t test_vmerge_vxm_u64m1(vbool64_t mask, vuint64m1_t op1, uint64_t op2,
906 size_t vl) {
907 return vmerge_vxm_u64m1(mask, op1, op2, vl);
908 }
909
910 //
911 // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m2(
912 // CHECK-RV64-NEXT: entry:
913 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
914 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
915 //
test_vmerge_vvm_u64m2(vbool32_t mask,vuint64m2_t op1,vuint64m2_t op2,size_t vl)916 vuint64m2_t test_vmerge_vvm_u64m2(vbool32_t mask, vuint64m2_t op1,
917 vuint64m2_t op2, size_t vl) {
918 return vmerge_vvm_u64m2(mask, op1, op2, vl);
919 }
920
921 //
922 // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m2(
923 // CHECK-RV64-NEXT: entry:
924 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
925 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
926 //
test_vmerge_vxm_u64m2(vbool32_t mask,vuint64m2_t op1,uint64_t op2,size_t vl)927 vuint64m2_t test_vmerge_vxm_u64m2(vbool32_t mask, vuint64m2_t op1, uint64_t op2,
928 size_t vl) {
929 return vmerge_vxm_u64m2(mask, op1, op2, vl);
930 }
931
932 //
933 // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m4(
934 // CHECK-RV64-NEXT: entry:
935 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
936 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
937 //
test_vmerge_vvm_u64m4(vbool16_t mask,vuint64m4_t op1,vuint64m4_t op2,size_t vl)938 vuint64m4_t test_vmerge_vvm_u64m4(vbool16_t mask, vuint64m4_t op1,
939 vuint64m4_t op2, size_t vl) {
940 return vmerge_vvm_u64m4(mask, op1, op2, vl);
941 }
942
943 //
944 // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m4(
945 // CHECK-RV64-NEXT: entry:
946 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
947 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
948 //
test_vmerge_vxm_u64m4(vbool16_t mask,vuint64m4_t op1,uint64_t op2,size_t vl)949 vuint64m4_t test_vmerge_vxm_u64m4(vbool16_t mask, vuint64m4_t op1, uint64_t op2,
950 size_t vl) {
951 return vmerge_vxm_u64m4(mask, op1, op2, vl);
952 }
953
954 //
955 // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m8(
956 // CHECK-RV64-NEXT: entry:
957 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
958 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
959 //
test_vmerge_vvm_u64m8(vbool8_t mask,vuint64m8_t op1,vuint64m8_t op2,size_t vl)960 vuint64m8_t test_vmerge_vvm_u64m8(vbool8_t mask, vuint64m8_t op1,
961 vuint64m8_t op2, size_t vl) {
962 return vmerge_vvm_u64m8(mask, op1, op2, vl);
963 }
964
965 //
966 // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m8(
967 // CHECK-RV64-NEXT: entry:
968 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
969 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
970 //
test_vmerge_vxm_u64m8(vbool8_t mask,vuint64m8_t op1,uint64_t op2,size_t vl)971 vuint64m8_t test_vmerge_vxm_u64m8(vbool8_t mask, vuint64m8_t op1, uint64_t op2,
972 size_t vl) {
973 return vmerge_vxm_u64m8(mask, op1, op2, vl);
974 }
975
976 //
977 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2(
978 // CHECK-RV64-NEXT: entry:
979 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
980 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
981 //
test_vmerge_vvm_f32mf2(vbool64_t mask,vfloat32mf2_t op1,vfloat32mf2_t op2,size_t vl)982 vfloat32mf2_t test_vmerge_vvm_f32mf2(vbool64_t mask, vfloat32mf2_t op1,
983 vfloat32mf2_t op2, size_t vl) {
984 return vmerge_vvm_f32mf2(mask, op1, op2, vl);
985 }
986
987 //
988 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m1(
989 // CHECK-RV64-NEXT: entry:
990 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
991 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
992 //
test_vmerge_vvm_f32m1(vbool32_t mask,vfloat32m1_t op1,vfloat32m1_t op2,size_t vl)993 vfloat32m1_t test_vmerge_vvm_f32m1(vbool32_t mask, vfloat32m1_t op1,
994 vfloat32m1_t op2, size_t vl) {
995 return vmerge_vvm_f32m1(mask, op1, op2, vl);
996 }
997
998 //
999 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m2(
1000 // CHECK-RV64-NEXT: entry:
1001 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1002 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
1003 //
test_vmerge_vvm_f32m2(vbool16_t mask,vfloat32m2_t op1,vfloat32m2_t op2,size_t vl)1004 vfloat32m2_t test_vmerge_vvm_f32m2(vbool16_t mask, vfloat32m2_t op1,
1005 vfloat32m2_t op2, size_t vl) {
1006 return vmerge_vvm_f32m2(mask, op1, op2, vl);
1007 }
1008
1009 //
1010 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m4(
1011 // CHECK-RV64-NEXT: entry:
1012 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1013 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
1014 //
test_vmerge_vvm_f32m4(vbool8_t mask,vfloat32m4_t op1,vfloat32m4_t op2,size_t vl)1015 vfloat32m4_t test_vmerge_vvm_f32m4(vbool8_t mask, vfloat32m4_t op1,
1016 vfloat32m4_t op2, size_t vl) {
1017 return vmerge_vvm_f32m4(mask, op1, op2, vl);
1018 }
1019
1020 //
1021 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m8(
1022 // CHECK-RV64-NEXT: entry:
1023 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1024 // CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
1025 //
test_vmerge_vvm_f32m8(vbool4_t mask,vfloat32m8_t op1,vfloat32m8_t op2,size_t vl)1026 vfloat32m8_t test_vmerge_vvm_f32m8(vbool4_t mask, vfloat32m8_t op1,
1027 vfloat32m8_t op2, size_t vl) {
1028 return vmerge_vvm_f32m8(mask, op1, op2, vl);
1029 }
1030
1031 //
1032 // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m1(
1033 // CHECK-RV64-NEXT: entry:
1034 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1035 // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
1036 //
test_vmerge_vvm_f64m1(vbool64_t mask,vfloat64m1_t op1,vfloat64m1_t op2,size_t vl)1037 vfloat64m1_t test_vmerge_vvm_f64m1(vbool64_t mask, vfloat64m1_t op1,
1038 vfloat64m1_t op2, size_t vl) {
1039 return vmerge_vvm_f64m1(mask, op1, op2, vl);
1040 }
1041
1042 //
1043 // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m2(
1044 // CHECK-RV64-NEXT: entry:
1045 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1046 // CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
1047 //
test_vmerge_vvm_f64m2(vbool32_t mask,vfloat64m2_t op1,vfloat64m2_t op2,size_t vl)1048 vfloat64m2_t test_vmerge_vvm_f64m2(vbool32_t mask, vfloat64m2_t op1,
1049 vfloat64m2_t op2, size_t vl) {
1050 return vmerge_vvm_f64m2(mask, op1, op2, vl);
1051 }
1052
1053 //
1054 // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m4(
1055 // CHECK-RV64-NEXT: entry:
1056 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1057 // CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
1058 //
test_vmerge_vvm_f64m4(vbool16_t mask,vfloat64m4_t op1,vfloat64m4_t op2,size_t vl)1059 vfloat64m4_t test_vmerge_vvm_f64m4(vbool16_t mask, vfloat64m4_t op1,
1060 vfloat64m4_t op2, size_t vl) {
1061 return vmerge_vvm_f64m4(mask, op1, op2, vl);
1062 }
1063
1064 //
1065 // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m8(
1066 // CHECK-RV64-NEXT: entry:
1067 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1068 // CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
1069 //
test_vmerge_vvm_f64m8(vbool8_t mask,vfloat64m8_t op1,vfloat64m8_t op2,size_t vl)1070 vfloat64m8_t test_vmerge_vvm_f64m8(vbool8_t mask, vfloat64m8_t op1,
1071 vfloat64m8_t op2, size_t vl) {
1072 return vmerge_vvm_f64m8(mask, op1, op2, vl);
1073 }
1074