1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
4 
5 #include <riscv_vector.h>
6 
7 //
8 // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8(
9 // CHECK-RV64-NEXT:  entry:
10 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
11 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
12 //
test_vnclip_wv_i8mf8(vint16mf4_t op1,vuint8mf8_t shift,size_t vl)13 vint8mf8_t test_vnclip_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
14   return vnclip_wv_i8mf8(op1, shift, vl);
15 }
16 
17 //
18 // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8(
19 // CHECK-RV64-NEXT:  entry:
20 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
21 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
22 //
test_vnclip_wx_i8mf8(vint16mf4_t op1,size_t shift,size_t vl)23 vint8mf8_t test_vnclip_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) {
24   return vnclip_wx_i8mf8(op1, shift, vl);
25 }
26 
27 //
28 // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4(
29 // CHECK-RV64-NEXT:  entry:
30 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
31 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
32 //
test_vnclip_wv_i8mf4(vint16mf2_t op1,vuint8mf4_t shift,size_t vl)33 vint8mf4_t test_vnclip_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
34   return vnclip_wv_i8mf4(op1, shift, vl);
35 }
36 
37 //
38 // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4(
39 // CHECK-RV64-NEXT:  entry:
40 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
41 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
42 //
test_vnclip_wx_i8mf4(vint16mf2_t op1,size_t shift,size_t vl)43 vint8mf4_t test_vnclip_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) {
44   return vnclip_wx_i8mf4(op1, shift, vl);
45 }
46 
47 //
48 // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2(
49 // CHECK-RV64-NEXT:  entry:
50 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
51 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
52 //
test_vnclip_wv_i8mf2(vint16m1_t op1,vuint8mf2_t shift,size_t vl)53 vint8mf2_t test_vnclip_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
54   return vnclip_wv_i8mf2(op1, shift, vl);
55 }
56 
57 //
58 // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2(
59 // CHECK-RV64-NEXT:  entry:
60 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
61 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
62 //
test_vnclip_wx_i8mf2(vint16m1_t op1,size_t shift,size_t vl)63 vint8mf2_t test_vnclip_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) {
64   return vnclip_wx_i8mf2(op1, shift, vl);
65 }
66 
67 //
68 // CHECK-RV64-LABEL: @test_vnclip_wv_i8m1(
69 // CHECK-RV64-NEXT:  entry:
70 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
71 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
72 //
test_vnclip_wv_i8m1(vint16m2_t op1,vuint8m1_t shift,size_t vl)73 vint8m1_t test_vnclip_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) {
74   return vnclip_wv_i8m1(op1, shift, vl);
75 }
76 
77 //
78 // CHECK-RV64-LABEL: @test_vnclip_wx_i8m1(
79 // CHECK-RV64-NEXT:  entry:
80 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
81 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
82 //
test_vnclip_wx_i8m1(vint16m2_t op1,size_t shift,size_t vl)83 vint8m1_t test_vnclip_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) {
84   return vnclip_wx_i8m1(op1, shift, vl);
85 }
86 
87 //
88 // CHECK-RV64-LABEL: @test_vnclip_wv_i8m2(
89 // CHECK-RV64-NEXT:  entry:
90 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
91 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
92 //
test_vnclip_wv_i8m2(vint16m4_t op1,vuint8m2_t shift,size_t vl)93 vint8m2_t test_vnclip_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) {
94   return vnclip_wv_i8m2(op1, shift, vl);
95 }
96 
97 //
98 // CHECK-RV64-LABEL: @test_vnclip_wx_i8m2(
99 // CHECK-RV64-NEXT:  entry:
100 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
101 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
102 //
test_vnclip_wx_i8m2(vint16m4_t op1,size_t shift,size_t vl)103 vint8m2_t test_vnclip_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) {
104   return vnclip_wx_i8m2(op1, shift, vl);
105 }
106 
107 //
108 // CHECK-RV64-LABEL: @test_vnclip_wv_i8m4(
109 // CHECK-RV64-NEXT:  entry:
110 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
111 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
112 //
test_vnclip_wv_i8m4(vint16m8_t op1,vuint8m4_t shift,size_t vl)113 vint8m4_t test_vnclip_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) {
114   return vnclip_wv_i8m4(op1, shift, vl);
115 }
116 
117 //
118 // CHECK-RV64-LABEL: @test_vnclip_wx_i8m4(
119 // CHECK-RV64-NEXT:  entry:
120 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
121 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
122 //
test_vnclip_wx_i8m4(vint16m8_t op1,size_t shift,size_t vl)123 vint8m4_t test_vnclip_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) {
124   return vnclip_wx_i8m4(op1, shift, vl);
125 }
126 
127 //
128 // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4(
129 // CHECK-RV64-NEXT:  entry:
130 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
131 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
132 //
test_vnclip_wv_i16mf4(vint32mf2_t op1,vuint16mf4_t shift,size_t vl)133 vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift,
134                                   size_t vl) {
135   return vnclip_wv_i16mf4(op1, shift, vl);
136 }
137 
138 //
139 // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4(
140 // CHECK-RV64-NEXT:  entry:
141 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
142 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
143 //
test_vnclip_wx_i16mf4(vint32mf2_t op1,size_t shift,size_t vl)144 vint16mf4_t test_vnclip_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) {
145   return vnclip_wx_i16mf4(op1, shift, vl);
146 }
147 
148 //
149 // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2(
150 // CHECK-RV64-NEXT:  entry:
151 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
152 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
153 //
test_vnclip_wv_i16mf2(vint32m1_t op1,vuint16mf2_t shift,size_t vl)154 vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift,
155                                   size_t vl) {
156   return vnclip_wv_i16mf2(op1, shift, vl);
157 }
158 
159 //
160 // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2(
161 // CHECK-RV64-NEXT:  entry:
162 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
163 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
164 //
test_vnclip_wx_i16mf2(vint32m1_t op1,size_t shift,size_t vl)165 vint16mf2_t test_vnclip_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) {
166   return vnclip_wx_i16mf2(op1, shift, vl);
167 }
168 
169 //
170 // CHECK-RV64-LABEL: @test_vnclip_wv_i16m1(
171 // CHECK-RV64-NEXT:  entry:
172 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
173 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
174 //
test_vnclip_wv_i16m1(vint32m2_t op1,vuint16m1_t shift,size_t vl)175 vint16m1_t test_vnclip_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) {
176   return vnclip_wv_i16m1(op1, shift, vl);
177 }
178 
179 //
180 // CHECK-RV64-LABEL: @test_vnclip_wx_i16m1(
181 // CHECK-RV64-NEXT:  entry:
182 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
183 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
184 //
test_vnclip_wx_i16m1(vint32m2_t op1,size_t shift,size_t vl)185 vint16m1_t test_vnclip_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) {
186   return vnclip_wx_i16m1(op1, shift, vl);
187 }
188 
189 //
190 // CHECK-RV64-LABEL: @test_vnclip_wv_i16m2(
191 // CHECK-RV64-NEXT:  entry:
192 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
193 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
194 //
test_vnclip_wv_i16m2(vint32m4_t op1,vuint16m2_t shift,size_t vl)195 vint16m2_t test_vnclip_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) {
196   return vnclip_wv_i16m2(op1, shift, vl);
197 }
198 
199 //
200 // CHECK-RV64-LABEL: @test_vnclip_wx_i16m2(
201 // CHECK-RV64-NEXT:  entry:
202 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
203 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
204 //
test_vnclip_wx_i16m2(vint32m4_t op1,size_t shift,size_t vl)205 vint16m2_t test_vnclip_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) {
206   return vnclip_wx_i16m2(op1, shift, vl);
207 }
208 
209 //
210 // CHECK-RV64-LABEL: @test_vnclip_wv_i16m4(
211 // CHECK-RV64-NEXT:  entry:
212 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
213 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
214 //
test_vnclip_wv_i16m4(vint32m8_t op1,vuint16m4_t shift,size_t vl)215 vint16m4_t test_vnclip_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) {
216   return vnclip_wv_i16m4(op1, shift, vl);
217 }
218 
219 //
220 // CHECK-RV64-LABEL: @test_vnclip_wx_i16m4(
221 // CHECK-RV64-NEXT:  entry:
222 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
223 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
224 //
test_vnclip_wx_i16m4(vint32m8_t op1,size_t shift,size_t vl)225 vint16m4_t test_vnclip_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) {
226   return vnclip_wx_i16m4(op1, shift, vl);
227 }
228 
229 //
230 // CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2(
231 // CHECK-RV64-NEXT:  entry:
232 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
233 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
234 //
test_vnclip_wv_i32mf2(vint64m1_t op1,vuint32mf2_t shift,size_t vl)235 vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift,
236                                   size_t vl) {
237   return vnclip_wv_i32mf2(op1, shift, vl);
238 }
239 
240 //
241 // CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2(
242 // CHECK-RV64-NEXT:  entry:
243 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
244 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
245 //
test_vnclip_wx_i32mf2(vint64m1_t op1,size_t shift,size_t vl)246 vint32mf2_t test_vnclip_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) {
247   return vnclip_wx_i32mf2(op1, shift, vl);
248 }
249 
250 //
251 // CHECK-RV64-LABEL: @test_vnclip_wv_i32m1(
252 // CHECK-RV64-NEXT:  entry:
253 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
254 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
255 //
test_vnclip_wv_i32m1(vint64m2_t op1,vuint32m1_t shift,size_t vl)256 vint32m1_t test_vnclip_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) {
257   return vnclip_wv_i32m1(op1, shift, vl);
258 }
259 
260 //
261 // CHECK-RV64-LABEL: @test_vnclip_wx_i32m1(
262 // CHECK-RV64-NEXT:  entry:
263 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
264 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
265 //
test_vnclip_wx_i32m1(vint64m2_t op1,size_t shift,size_t vl)266 vint32m1_t test_vnclip_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) {
267   return vnclip_wx_i32m1(op1, shift, vl);
268 }
269 
270 //
271 // CHECK-RV64-LABEL: @test_vnclip_wv_i32m2(
272 // CHECK-RV64-NEXT:  entry:
273 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
274 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
275 //
test_vnclip_wv_i32m2(vint64m4_t op1,vuint32m2_t shift,size_t vl)276 vint32m2_t test_vnclip_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) {
277   return vnclip_wv_i32m2(op1, shift, vl);
278 }
279 
280 //
281 // CHECK-RV64-LABEL: @test_vnclip_wx_i32m2(
282 // CHECK-RV64-NEXT:  entry:
283 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
284 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
285 //
test_vnclip_wx_i32m2(vint64m4_t op1,size_t shift,size_t vl)286 vint32m2_t test_vnclip_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) {
287   return vnclip_wx_i32m2(op1, shift, vl);
288 }
289 
290 //
291 // CHECK-RV64-LABEL: @test_vnclip_wv_i32m4(
292 // CHECK-RV64-NEXT:  entry:
293 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
294 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
295 //
test_vnclip_wv_i32m4(vint64m8_t op1,vuint32m4_t shift,size_t vl)296 vint32m4_t test_vnclip_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) {
297   return vnclip_wv_i32m4(op1, shift, vl);
298 }
299 
300 //
301 // CHECK-RV64-LABEL: @test_vnclip_wx_i32m4(
302 // CHECK-RV64-NEXT:  entry:
303 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
304 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
305 //
test_vnclip_wx_i32m4(vint64m8_t op1,size_t shift,size_t vl)306 vint32m4_t test_vnclip_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) {
307   return vnclip_wx_i32m4(op1, shift, vl);
308 }
309 
310 //
311 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8(
312 // CHECK-RV64-NEXT:  entry:
313 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
314 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
315 //
test_vnclipu_wv_u8mf8(vuint16mf4_t op1,vuint8mf8_t shift,size_t vl)316 vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift,
317                                   size_t vl) {
318   return vnclipu_wv_u8mf8(op1, shift, vl);
319 }
320 
321 //
322 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8(
323 // CHECK-RV64-NEXT:  entry:
324 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
325 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
326 //
test_vnclipu_wx_u8mf8(vuint16mf4_t op1,size_t shift,size_t vl)327 vuint8mf8_t test_vnclipu_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) {
328   return vnclipu_wx_u8mf8(op1, shift, vl);
329 }
330 
331 //
332 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4(
333 // CHECK-RV64-NEXT:  entry:
334 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
335 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
336 //
test_vnclipu_wv_u8mf4(vuint16mf2_t op1,vuint8mf4_t shift,size_t vl)337 vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift,
338                                   size_t vl) {
339   return vnclipu_wv_u8mf4(op1, shift, vl);
340 }
341 
342 //
343 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4(
344 // CHECK-RV64-NEXT:  entry:
345 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
346 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
347 //
test_vnclipu_wx_u8mf4(vuint16mf2_t op1,size_t shift,size_t vl)348 vuint8mf4_t test_vnclipu_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) {
349   return vnclipu_wx_u8mf4(op1, shift, vl);
350 }
351 
352 //
353 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2(
354 // CHECK-RV64-NEXT:  entry:
355 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
356 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
357 //
test_vnclipu_wv_u8mf2(vuint16m1_t op1,vuint8mf2_t shift,size_t vl)358 vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift,
359                                   size_t vl) {
360   return vnclipu_wv_u8mf2(op1, shift, vl);
361 }
362 
363 //
364 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2(
365 // CHECK-RV64-NEXT:  entry:
366 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
367 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
368 //
test_vnclipu_wx_u8mf2(vuint16m1_t op1,size_t shift,size_t vl)369 vuint8mf2_t test_vnclipu_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) {
370   return vnclipu_wx_u8mf2(op1, shift, vl);
371 }
372 
373 //
374 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1(
375 // CHECK-RV64-NEXT:  entry:
376 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
377 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
378 //
test_vnclipu_wv_u8m1(vuint16m2_t op1,vuint8m1_t shift,size_t vl)379 vuint8m1_t test_vnclipu_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
380   return vnclipu_wv_u8m1(op1, shift, vl);
381 }
382 
383 //
384 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1(
385 // CHECK-RV64-NEXT:  entry:
386 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
387 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
388 //
test_vnclipu_wx_u8m1(vuint16m2_t op1,size_t shift,size_t vl)389 vuint8m1_t test_vnclipu_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) {
390   return vnclipu_wx_u8m1(op1, shift, vl);
391 }
392 
393 //
394 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2(
395 // CHECK-RV64-NEXT:  entry:
396 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
397 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
398 //
test_vnclipu_wv_u8m2(vuint16m4_t op1,vuint8m2_t shift,size_t vl)399 vuint8m2_t test_vnclipu_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
400   return vnclipu_wv_u8m2(op1, shift, vl);
401 }
402 
403 //
404 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2(
405 // CHECK-RV64-NEXT:  entry:
406 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
407 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
408 //
test_vnclipu_wx_u8m2(vuint16m4_t op1,size_t shift,size_t vl)409 vuint8m2_t test_vnclipu_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) {
410   return vnclipu_wx_u8m2(op1, shift, vl);
411 }
412 
413 //
414 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4(
415 // CHECK-RV64-NEXT:  entry:
416 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
417 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
418 //
test_vnclipu_wv_u8m4(vuint16m8_t op1,vuint8m4_t shift,size_t vl)419 vuint8m4_t test_vnclipu_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
420   return vnclipu_wv_u8m4(op1, shift, vl);
421 }
422 
423 //
424 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4(
425 // CHECK-RV64-NEXT:  entry:
426 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
427 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
428 //
test_vnclipu_wx_u8m4(vuint16m8_t op1,size_t shift,size_t vl)429 vuint8m4_t test_vnclipu_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) {
430   return vnclipu_wx_u8m4(op1, shift, vl);
431 }
432 
433 //
434 // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4(
435 // CHECK-RV64-NEXT:  entry:
436 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
437 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
438 //
test_vnclipu_wv_u16mf4(vuint32mf2_t op1,vuint16mf4_t shift,size_t vl)439 vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift,
440                                     size_t vl) {
441   return vnclipu_wv_u16mf4(op1, shift, vl);
442 }
443 
444 //
445 // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4(
446 // CHECK-RV64-NEXT:  entry:
447 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
448 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
449 //
test_vnclipu_wx_u16mf4(vuint32mf2_t op1,size_t shift,size_t vl)450 vuint16mf4_t test_vnclipu_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) {
451   return vnclipu_wx_u16mf4(op1, shift, vl);
452 }
453 
454 //
455 // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2(
456 // CHECK-RV64-NEXT:  entry:
457 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
458 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
459 //
test_vnclipu_wv_u16mf2(vuint32m1_t op1,vuint16mf2_t shift,size_t vl)460 vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift,
461                                     size_t vl) {
462   return vnclipu_wv_u16mf2(op1, shift, vl);
463 }
464 
465 //
466 // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2(
467 // CHECK-RV64-NEXT:  entry:
468 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
469 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
470 //
test_vnclipu_wx_u16mf2(vuint32m1_t op1,size_t shift,size_t vl)471 vuint16mf2_t test_vnclipu_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) {
472   return vnclipu_wx_u16mf2(op1, shift, vl);
473 }
474 
475 //
476 // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1(
477 // CHECK-RV64-NEXT:  entry:
478 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
479 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
480 //
test_vnclipu_wv_u16m1(vuint32m2_t op1,vuint16m1_t shift,size_t vl)481 vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift,
482                                   size_t vl) {
483   return vnclipu_wv_u16m1(op1, shift, vl);
484 }
485 
486 //
487 // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1(
488 // CHECK-RV64-NEXT:  entry:
489 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
490 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
491 //
test_vnclipu_wx_u16m1(vuint32m2_t op1,size_t shift,size_t vl)492 vuint16m1_t test_vnclipu_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) {
493   return vnclipu_wx_u16m1(op1, shift, vl);
494 }
495 
496 //
497 // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2(
498 // CHECK-RV64-NEXT:  entry:
499 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
500 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
501 //
test_vnclipu_wv_u16m2(vuint32m4_t op1,vuint16m2_t shift,size_t vl)502 vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift,
503                                   size_t vl) {
504   return vnclipu_wv_u16m2(op1, shift, vl);
505 }
506 
507 //
508 // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2(
509 // CHECK-RV64-NEXT:  entry:
510 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
511 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
512 //
test_vnclipu_wx_u16m2(vuint32m4_t op1,size_t shift,size_t vl)513 vuint16m2_t test_vnclipu_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) {
514   return vnclipu_wx_u16m2(op1, shift, vl);
515 }
516 
517 //
518 // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4(
519 // CHECK-RV64-NEXT:  entry:
520 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
521 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
522 //
test_vnclipu_wv_u16m4(vuint32m8_t op1,vuint16m4_t shift,size_t vl)523 vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift,
524                                   size_t vl) {
525   return vnclipu_wv_u16m4(op1, shift, vl);
526 }
527 
528 //
529 // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4(
530 // CHECK-RV64-NEXT:  entry:
531 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
532 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
533 //
test_vnclipu_wx_u16m4(vuint32m8_t op1,size_t shift,size_t vl)534 vuint16m4_t test_vnclipu_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) {
535   return vnclipu_wx_u16m4(op1, shift, vl);
536 }
537 
538 //
539 // CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2(
540 // CHECK-RV64-NEXT:  entry:
541 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
542 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
543 //
test_vnclipu_wv_u32mf2(vuint64m1_t op1,vuint32mf2_t shift,size_t vl)544 vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift,
545                                     size_t vl) {
546   return vnclipu_wv_u32mf2(op1, shift, vl);
547 }
548 
549 //
550 // CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2(
551 // CHECK-RV64-NEXT:  entry:
552 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
553 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
554 //
test_vnclipu_wx_u32mf2(vuint64m1_t op1,size_t shift,size_t vl)555 vuint32mf2_t test_vnclipu_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) {
556   return vnclipu_wx_u32mf2(op1, shift, vl);
557 }
558 
559 //
560 // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1(
561 // CHECK-RV64-NEXT:  entry:
562 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
563 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
564 //
test_vnclipu_wv_u32m1(vuint64m2_t op1,vuint32m1_t shift,size_t vl)565 vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift,
566                                   size_t vl) {
567   return vnclipu_wv_u32m1(op1, shift, vl);
568 }
569 
570 //
571 // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1(
572 // CHECK-RV64-NEXT:  entry:
573 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
574 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
575 //
test_vnclipu_wx_u32m1(vuint64m2_t op1,size_t shift,size_t vl)576 vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) {
577   return vnclipu_wx_u32m1(op1, shift, vl);
578 }
579 
580 //
581 // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2(
582 // CHECK-RV64-NEXT:  entry:
583 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
584 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
585 //
test_vnclipu_wv_u32m2(vuint64m4_t op1,vuint32m2_t shift,size_t vl)586 vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift,
587                                   size_t vl) {
588   return vnclipu_wv_u32m2(op1, shift, vl);
589 }
590 
591 //
592 // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2(
593 // CHECK-RV64-NEXT:  entry:
594 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
595 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
596 //
test_vnclipu_wx_u32m2(vuint64m4_t op1,size_t shift,size_t vl)597 vuint32m2_t test_vnclipu_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) {
598   return vnclipu_wx_u32m2(op1, shift, vl);
599 }
600 
601 //
602 // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4(
603 // CHECK-RV64-NEXT:  entry:
604 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
605 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
606 //
test_vnclipu_wv_u32m4(vuint64m8_t op1,vuint32m4_t shift,size_t vl)607 vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift,
608                                   size_t vl) {
609   return vnclipu_wv_u32m4(op1, shift, vl);
610 }
611 
612 //
613 // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4(
614 // CHECK-RV64-NEXT:  entry:
615 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
616 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
617 //
test_vnclipu_wx_u32m4(vuint64m8_t op1,size_t shift,size_t vl)618 vuint32m4_t test_vnclipu_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) {
619   return vnclipu_wx_u32m4(op1, shift, vl);
620 }
621 
622 //
623 // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8_m(
624 // CHECK-RV64-NEXT:  entry:
625 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
626 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
627 //
test_vnclip_wv_i8mf8_m(vbool64_t mask,vint8mf8_t maskedoff,vint16mf4_t op1,vuint8mf8_t shift,size_t vl)628 vint8mf8_t test_vnclip_wv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
629                                   vint16mf4_t op1, vuint8mf8_t shift,
630                                   size_t vl) {
631   return vnclip_wv_i8mf8_m(mask, maskedoff, op1, shift, vl);
632 }
633 
634 //
635 // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8_m(
636 // CHECK-RV64-NEXT:  entry:
637 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
638 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
639 //
test_vnclip_wx_i8mf8_m(vbool64_t mask,vint8mf8_t maskedoff,vint16mf4_t op1,size_t shift,size_t vl)640 vint8mf8_t test_vnclip_wx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
641                                   vint16mf4_t op1, size_t shift, size_t vl) {
642   return vnclip_wx_i8mf8_m(mask, maskedoff, op1, shift, vl);
643 }
644 
645 //
646 // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4_m(
647 // CHECK-RV64-NEXT:  entry:
648 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
649 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
650 //
test_vnclip_wv_i8mf4_m(vbool32_t mask,vint8mf4_t maskedoff,vint16mf2_t op1,vuint8mf4_t shift,size_t vl)651 vint8mf4_t test_vnclip_wv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
652                                   vint16mf2_t op1, vuint8mf4_t shift,
653                                   size_t vl) {
654   return vnclip_wv_i8mf4_m(mask, maskedoff, op1, shift, vl);
655 }
656 
657 //
658 // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4_m(
659 // CHECK-RV64-NEXT:  entry:
660 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
661 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
662 //
test_vnclip_wx_i8mf4_m(vbool32_t mask,vint8mf4_t maskedoff,vint16mf2_t op1,size_t shift,size_t vl)663 vint8mf4_t test_vnclip_wx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
664                                   vint16mf2_t op1, size_t shift, size_t vl) {
665   return vnclip_wx_i8mf4_m(mask, maskedoff, op1, shift, vl);
666 }
667 
668 //
669 // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2_m(
670 // CHECK-RV64-NEXT:  entry:
671 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
672 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
673 //
test_vnclip_wv_i8mf2_m(vbool16_t mask,vint8mf2_t maskedoff,vint16m1_t op1,vuint8mf2_t shift,size_t vl)674 vint8mf2_t test_vnclip_wv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
675                                   vint16m1_t op1, vuint8mf2_t shift,
676                                   size_t vl) {
677   return vnclip_wv_i8mf2_m(mask, maskedoff, op1, shift, vl);
678 }
679 
680 //
681 // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2_m(
682 // CHECK-RV64-NEXT:  entry:
683 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
684 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
685 //
test_vnclip_wx_i8mf2_m(vbool16_t mask,vint8mf2_t maskedoff,vint16m1_t op1,size_t shift,size_t vl)686 vint8mf2_t test_vnclip_wx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
687                                   vint16m1_t op1, size_t shift, size_t vl) {
688   return vnclip_wx_i8mf2_m(mask, maskedoff, op1, shift, vl);
689 }
690 
691 //
692 // CHECK-RV64-LABEL: @test_vnclip_wv_i8m1_m(
693 // CHECK-RV64-NEXT:  entry:
694 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
695 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
696 //
test_vnclip_wv_i8m1_m(vbool8_t mask,vint8m1_t maskedoff,vint16m2_t op1,vuint8m1_t shift,size_t vl)697 vint8m1_t test_vnclip_wv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
698                                 vint16m2_t op1, vuint8m1_t shift, size_t vl) {
699   return vnclip_wv_i8m1_m(mask, maskedoff, op1, shift, vl);
700 }
701 
702 //
703 // CHECK-RV64-LABEL: @test_vnclip_wx_i8m1_m(
704 // CHECK-RV64-NEXT:  entry:
705 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
706 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
707 //
test_vnclip_wx_i8m1_m(vbool8_t mask,vint8m1_t maskedoff,vint16m2_t op1,size_t shift,size_t vl)708 vint8m1_t test_vnclip_wx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
709                                 vint16m2_t op1, size_t shift, size_t vl) {
710   return vnclip_wx_i8m1_m(mask, maskedoff, op1, shift, vl);
711 }
712 
713 //
714 // CHECK-RV64-LABEL: @test_vnclip_wv_i8m2_m(
715 // CHECK-RV64-NEXT:  entry:
716 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
717 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
718 //
test_vnclip_wv_i8m2_m(vbool4_t mask,vint8m2_t maskedoff,vint16m4_t op1,vuint8m2_t shift,size_t vl)719 vint8m2_t test_vnclip_wv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
720                                 vint16m4_t op1, vuint8m2_t shift, size_t vl) {
721   return vnclip_wv_i8m2_m(mask, maskedoff, op1, shift, vl);
722 }
723 
724 //
725 // CHECK-RV64-LABEL: @test_vnclip_wx_i8m2_m(
726 // CHECK-RV64-NEXT:  entry:
727 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
728 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
729 //
test_vnclip_wx_i8m2_m(vbool4_t mask,vint8m2_t maskedoff,vint16m4_t op1,size_t shift,size_t vl)730 vint8m2_t test_vnclip_wx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
731                                 vint16m4_t op1, size_t shift, size_t vl) {
732   return vnclip_wx_i8m2_m(mask, maskedoff, op1, shift, vl);
733 }
734 
735 //
736 // CHECK-RV64-LABEL: @test_vnclip_wv_i8m4_m(
737 // CHECK-RV64-NEXT:  entry:
738 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
739 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
740 //
test_vnclip_wv_i8m4_m(vbool2_t mask,vint8m4_t maskedoff,vint16m8_t op1,vuint8m4_t shift,size_t vl)741 vint8m4_t test_vnclip_wv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
742                                 vint16m8_t op1, vuint8m4_t shift, size_t vl) {
743   return vnclip_wv_i8m4_m(mask, maskedoff, op1, shift, vl);
744 }
745 
746 //
747 // CHECK-RV64-LABEL: @test_vnclip_wx_i8m4_m(
748 // CHECK-RV64-NEXT:  entry:
749 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
750 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
751 //
test_vnclip_wx_i8m4_m(vbool2_t mask,vint8m4_t maskedoff,vint16m8_t op1,size_t shift,size_t vl)752 vint8m4_t test_vnclip_wx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
753                                 vint16m8_t op1, size_t shift, size_t vl) {
754   return vnclip_wx_i8m4_m(mask, maskedoff, op1, shift, vl);
755 }
756 
757 //
758 // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4_m(
759 // CHECK-RV64-NEXT:  entry:
760 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
761 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
762 //
test_vnclip_wv_i16mf4_m(vbool64_t mask,vint16mf4_t maskedoff,vint32mf2_t op1,vuint16mf4_t shift,size_t vl)763 vint16mf4_t test_vnclip_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
764                                     vint32mf2_t op1, vuint16mf4_t shift,
765                                     size_t vl) {
766   return vnclip_wv_i16mf4_m(mask, maskedoff, op1, shift, vl);
767 }
768 
769 //
770 // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4_m(
771 // CHECK-RV64-NEXT:  entry:
772 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
773 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
774 //
test_vnclip_wx_i16mf4_m(vbool64_t mask,vint16mf4_t maskedoff,vint32mf2_t op1,size_t shift,size_t vl)775 vint16mf4_t test_vnclip_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
776                                     vint32mf2_t op1, size_t shift, size_t vl) {
777   return vnclip_wx_i16mf4_m(mask, maskedoff, op1, shift, vl);
778 }
779 
780 //
781 // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2_m(
782 // CHECK-RV64-NEXT:  entry:
783 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
784 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
785 //
test_vnclip_wv_i16mf2_m(vbool32_t mask,vint16mf2_t maskedoff,vint32m1_t op1,vuint16mf2_t shift,size_t vl)786 vint16mf2_t test_vnclip_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
787                                     vint32m1_t op1, vuint16mf2_t shift,
788                                     size_t vl) {
789   return vnclip_wv_i16mf2_m(mask, maskedoff, op1, shift, vl);
790 }
791 
792 //
793 // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2_m(
794 // CHECK-RV64-NEXT:  entry:
795 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
796 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
797 //
test_vnclip_wx_i16mf2_m(vbool32_t mask,vint16mf2_t maskedoff,vint32m1_t op1,size_t shift,size_t vl)798 vint16mf2_t test_vnclip_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
799                                     vint32m1_t op1, size_t shift, size_t vl) {
800   return vnclip_wx_i16mf2_m(mask, maskedoff, op1, shift, vl);
801 }
802 
803 //
804 // CHECK-RV64-LABEL: @test_vnclip_wv_i16m1_m(
805 // CHECK-RV64-NEXT:  entry:
806 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
807 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
808 //
test_vnclip_wv_i16m1_m(vbool16_t mask,vint16m1_t maskedoff,vint32m2_t op1,vuint16m1_t shift,size_t vl)809 vint16m1_t test_vnclip_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
810                                   vint32m2_t op1, vuint16m1_t shift,
811                                   size_t vl) {
812   return vnclip_wv_i16m1_m(mask, maskedoff, op1, shift, vl);
813 }
814 
815 //
816 // CHECK-RV64-LABEL: @test_vnclip_wx_i16m1_m(
817 // CHECK-RV64-NEXT:  entry:
818 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
819 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
820 //
test_vnclip_wx_i16m1_m(vbool16_t mask,vint16m1_t maskedoff,vint32m2_t op1,size_t shift,size_t vl)821 vint16m1_t test_vnclip_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
822                                   vint32m2_t op1, size_t shift, size_t vl) {
823   return vnclip_wx_i16m1_m(mask, maskedoff, op1, shift, vl);
824 }
825 
826 //
827 // CHECK-RV64-LABEL: @test_vnclip_wv_i16m2_m(
828 // CHECK-RV64-NEXT:  entry:
829 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
830 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
831 //
test_vnclip_wv_i16m2_m(vbool8_t mask,vint16m2_t maskedoff,vint32m4_t op1,vuint16m2_t shift,size_t vl)832 vint16m2_t test_vnclip_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
833                                   vint32m4_t op1, vuint16m2_t shift,
834                                   size_t vl) {
835   return vnclip_wv_i16m2_m(mask, maskedoff, op1, shift, vl);
836 }
837 
838 //
839 // CHECK-RV64-LABEL: @test_vnclip_wx_i16m2_m(
840 // CHECK-RV64-NEXT:  entry:
841 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
842 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
843 //
test_vnclip_wx_i16m2_m(vbool8_t mask,vint16m2_t maskedoff,vint32m4_t op1,size_t shift,size_t vl)844 vint16m2_t test_vnclip_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
845                                   vint32m4_t op1, size_t shift, size_t vl) {
846   return vnclip_wx_i16m2_m(mask, maskedoff, op1, shift, vl);
847 }
848 
849 //
850 // CHECK-RV64-LABEL: @test_vnclip_wv_i16m4_m(
851 // CHECK-RV64-NEXT:  entry:
852 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
853 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
854 //
test_vnclip_wv_i16m4_m(vbool4_t mask,vint16m4_t maskedoff,vint32m8_t op1,vuint16m4_t shift,size_t vl)855 vint16m4_t test_vnclip_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
856                                   vint32m8_t op1, vuint16m4_t shift,
857                                   size_t vl) {
858   return vnclip_wv_i16m4_m(mask, maskedoff, op1, shift, vl);
859 }
860 
861 //
862 // CHECK-RV64-LABEL: @test_vnclip_wx_i16m4_m(
863 // CHECK-RV64-NEXT:  entry:
864 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
865 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
866 //
test_vnclip_wx_i16m4_m(vbool4_t mask,vint16m4_t maskedoff,vint32m8_t op1,size_t shift,size_t vl)867 vint16m4_t test_vnclip_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
868                                   vint32m8_t op1, size_t shift, size_t vl) {
869   return vnclip_wx_i16m4_m(mask, maskedoff, op1, shift, vl);
870 }
871 
872 //
873 // CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_m(
874 // CHECK-RV64-NEXT:  entry:
875 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
876 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
877 //
test_vnclip_wv_i32mf2_m(vbool64_t mask,vint32mf2_t maskedoff,vint64m1_t op1,vuint32mf2_t shift,size_t vl)878 vint32mf2_t test_vnclip_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
879                                     vint64m1_t op1, vuint32mf2_t shift,
880                                     size_t vl) {
881   return vnclip_wv_i32mf2_m(mask, maskedoff, op1, shift, vl);
882 }
883 
884 //
885 // CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_m(
886 // CHECK-RV64-NEXT:  entry:
887 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
888 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
889 //
test_vnclip_wx_i32mf2_m(vbool64_t mask,vint32mf2_t maskedoff,vint64m1_t op1,size_t shift,size_t vl)890 vint32mf2_t test_vnclip_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
891                                     vint64m1_t op1, size_t shift, size_t vl) {
892   return vnclip_wx_i32mf2_m(mask, maskedoff, op1, shift, vl);
893 }
894 
895 //
896 // CHECK-RV64-LABEL: @test_vnclip_wv_i32m1_m(
897 // CHECK-RV64-NEXT:  entry:
898 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
899 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
900 //
test_vnclip_wv_i32m1_m(vbool32_t mask,vint32m1_t maskedoff,vint64m2_t op1,vuint32m1_t shift,size_t vl)901 vint32m1_t test_vnclip_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
902                                   vint64m2_t op1, vuint32m1_t shift,
903                                   size_t vl) {
904   return vnclip_wv_i32m1_m(mask, maskedoff, op1, shift, vl);
905 }
906 
907 //
908 // CHECK-RV64-LABEL: @test_vnclip_wx_i32m1_m(
909 // CHECK-RV64-NEXT:  entry:
910 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
911 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
912 //
test_vnclip_wx_i32m1_m(vbool32_t mask,vint32m1_t maskedoff,vint64m2_t op1,size_t shift,size_t vl)913 vint32m1_t test_vnclip_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
914                                   vint64m2_t op1, size_t shift, size_t vl) {
915   return vnclip_wx_i32m1_m(mask, maskedoff, op1, shift, vl);
916 }
917 
918 //
919 // CHECK-RV64-LABEL: @test_vnclip_wv_i32m2_m(
920 // CHECK-RV64-NEXT:  entry:
921 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
922 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
923 //
test_vnclip_wv_i32m2_m(vbool16_t mask,vint32m2_t maskedoff,vint64m4_t op1,vuint32m2_t shift,size_t vl)924 vint32m2_t test_vnclip_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
925                                   vint64m4_t op1, vuint32m2_t shift,
926                                   size_t vl) {
927   return vnclip_wv_i32m2_m(mask, maskedoff, op1, shift, vl);
928 }
929 
930 //
931 // CHECK-RV64-LABEL: @test_vnclip_wx_i32m2_m(
932 // CHECK-RV64-NEXT:  entry:
933 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
934 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
935 //
test_vnclip_wx_i32m2_m(vbool16_t mask,vint32m2_t maskedoff,vint64m4_t op1,size_t shift,size_t vl)936 vint32m2_t test_vnclip_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
937                                   vint64m4_t op1, size_t shift, size_t vl) {
938   return vnclip_wx_i32m2_m(mask, maskedoff, op1, shift, vl);
939 }
940 
941 //
942 // CHECK-RV64-LABEL: @test_vnclip_wv_i32m4_m(
943 // CHECK-RV64-NEXT:  entry:
944 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
945 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
946 //
test_vnclip_wv_i32m4_m(vbool8_t mask,vint32m4_t maskedoff,vint64m8_t op1,vuint32m4_t shift,size_t vl)947 vint32m4_t test_vnclip_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
948                                   vint64m8_t op1, vuint32m4_t shift,
949                                   size_t vl) {
950   return vnclip_wv_i32m4_m(mask, maskedoff, op1, shift, vl);
951 }
952 
953 //
954 // CHECK-RV64-LABEL: @test_vnclip_wx_i32m4_m(
955 // CHECK-RV64-NEXT:  entry:
956 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
957 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
958 //
test_vnclip_wx_i32m4_m(vbool8_t mask,vint32m4_t maskedoff,vint64m8_t op1,size_t shift,size_t vl)959 vint32m4_t test_vnclip_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
960                                   vint64m8_t op1, size_t shift, size_t vl) {
961   return vnclip_wx_i32m4_m(mask, maskedoff, op1, shift, vl);
962 }
963 
964 //
965 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8_m(
966 // CHECK-RV64-NEXT:  entry:
967 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
968 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
969 //
test_vnclipu_wv_u8mf8_m(vbool64_t mask,vuint8mf8_t maskedoff,vuint16mf4_t op1,vuint8mf8_t shift,size_t vl)970 vuint8mf8_t test_vnclipu_wv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
971                                     vuint16mf4_t op1, vuint8mf8_t shift,
972                                     size_t vl) {
973   return vnclipu_wv_u8mf8_m(mask, maskedoff, op1, shift, vl);
974 }
975 
976 //
977 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8_m(
978 // CHECK-RV64-NEXT:  entry:
979 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
980 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
981 //
test_vnclipu_wx_u8mf8_m(vbool64_t mask,vuint8mf8_t maskedoff,vuint16mf4_t op1,size_t shift,size_t vl)982 vuint8mf8_t test_vnclipu_wx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
983                                     vuint16mf4_t op1, size_t shift, size_t vl) {
984   return vnclipu_wx_u8mf8_m(mask, maskedoff, op1, shift, vl);
985 }
986 
987 //
988 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4_m(
989 // CHECK-RV64-NEXT:  entry:
990 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
991 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
992 //
test_vnclipu_wv_u8mf4_m(vbool32_t mask,vuint8mf4_t maskedoff,vuint16mf2_t op1,vuint8mf4_t shift,size_t vl)993 vuint8mf4_t test_vnclipu_wv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
994                                     vuint16mf2_t op1, vuint8mf4_t shift,
995                                     size_t vl) {
996   return vnclipu_wv_u8mf4_m(mask, maskedoff, op1, shift, vl);
997 }
998 
999 //
1000 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4_m(
1001 // CHECK-RV64-NEXT:  entry:
1002 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1003 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
1004 //
test_vnclipu_wx_u8mf4_m(vbool32_t mask,vuint8mf4_t maskedoff,vuint16mf2_t op1,size_t shift,size_t vl)1005 vuint8mf4_t test_vnclipu_wx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
1006                                     vuint16mf2_t op1, size_t shift, size_t vl) {
1007   return vnclipu_wx_u8mf4_m(mask, maskedoff, op1, shift, vl);
1008 }
1009 
1010 //
1011 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2_m(
1012 // CHECK-RV64-NEXT:  entry:
1013 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1014 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
1015 //
test_vnclipu_wv_u8mf2_m(vbool16_t mask,vuint8mf2_t maskedoff,vuint16m1_t op1,vuint8mf2_t shift,size_t vl)1016 vuint8mf2_t test_vnclipu_wv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
1017                                     vuint16m1_t op1, vuint8mf2_t shift,
1018                                     size_t vl) {
1019   return vnclipu_wv_u8mf2_m(mask, maskedoff, op1, shift, vl);
1020 }
1021 
1022 //
1023 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2_m(
1024 // CHECK-RV64-NEXT:  entry:
1025 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1026 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
1027 //
test_vnclipu_wx_u8mf2_m(vbool16_t mask,vuint8mf2_t maskedoff,vuint16m1_t op1,size_t shift,size_t vl)1028 vuint8mf2_t test_vnclipu_wx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
1029                                     vuint16m1_t op1, size_t shift, size_t vl) {
1030   return vnclipu_wx_u8mf2_m(mask, maskedoff, op1, shift, vl);
1031 }
1032 
1033 //
1034 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1_m(
1035 // CHECK-RV64-NEXT:  entry:
1036 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1037 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
1038 //
test_vnclipu_wv_u8m1_m(vbool8_t mask,vuint8m1_t maskedoff,vuint16m2_t op1,vuint8m1_t shift,size_t vl)1039 vuint8m1_t test_vnclipu_wv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
1040                                   vuint16m2_t op1, vuint8m1_t shift,
1041                                   size_t vl) {
1042   return vnclipu_wv_u8m1_m(mask, maskedoff, op1, shift, vl);
1043 }
1044 
1045 //
1046 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1_m(
1047 // CHECK-RV64-NEXT:  entry:
1048 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1049 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
1050 //
test_vnclipu_wx_u8m1_m(vbool8_t mask,vuint8m1_t maskedoff,vuint16m2_t op1,size_t shift,size_t vl)1051 vuint8m1_t test_vnclipu_wx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
1052                                   vuint16m2_t op1, size_t shift, size_t vl) {
1053   return vnclipu_wx_u8m1_m(mask, maskedoff, op1, shift, vl);
1054 }
1055 
1056 //
1057 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2_m(
1058 // CHECK-RV64-NEXT:  entry:
1059 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1060 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
1061 //
test_vnclipu_wv_u8m2_m(vbool4_t mask,vuint8m2_t maskedoff,vuint16m4_t op1,vuint8m2_t shift,size_t vl)1062 vuint8m2_t test_vnclipu_wv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
1063                                   vuint16m4_t op1, vuint8m2_t shift,
1064                                   size_t vl) {
1065   return vnclipu_wv_u8m2_m(mask, maskedoff, op1, shift, vl);
1066 }
1067 
1068 //
1069 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2_m(
1070 // CHECK-RV64-NEXT:  entry:
1071 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1072 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
1073 //
test_vnclipu_wx_u8m2_m(vbool4_t mask,vuint8m2_t maskedoff,vuint16m4_t op1,size_t shift,size_t vl)1074 vuint8m2_t test_vnclipu_wx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
1075                                   vuint16m4_t op1, size_t shift, size_t vl) {
1076   return vnclipu_wx_u8m2_m(mask, maskedoff, op1, shift, vl);
1077 }
1078 
1079 //
1080 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4_m(
1081 // CHECK-RV64-NEXT:  entry:
1082 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1083 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
1084 //
test_vnclipu_wv_u8m4_m(vbool2_t mask,vuint8m4_t maskedoff,vuint16m8_t op1,vuint8m4_t shift,size_t vl)1085 vuint8m4_t test_vnclipu_wv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
1086                                   vuint16m8_t op1, vuint8m4_t shift,
1087                                   size_t vl) {
1088   return vnclipu_wv_u8m4_m(mask, maskedoff, op1, shift, vl);
1089 }
1090 
1091 //
1092 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4_m(
1093 // CHECK-RV64-NEXT:  entry:
1094 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1095 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
1096 //
test_vnclipu_wx_u8m4_m(vbool2_t mask,vuint8m4_t maskedoff,vuint16m8_t op1,size_t shift,size_t vl)1097 vuint8m4_t test_vnclipu_wx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
1098                                   vuint16m8_t op1, size_t shift, size_t vl) {
1099   return vnclipu_wx_u8m4_m(mask, maskedoff, op1, shift, vl);
1100 }
1101 
1102 //
1103 // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4_m(
1104 // CHECK-RV64-NEXT:  entry:
1105 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1106 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
1107 //
test_vnclipu_wv_u16mf4_m(vbool64_t mask,vuint16mf4_t maskedoff,vuint32mf2_t op1,vuint16mf4_t shift,size_t vl)1108 vuint16mf4_t test_vnclipu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
1109                                       vuint32mf2_t op1, vuint16mf4_t shift,
1110                                       size_t vl) {
1111   return vnclipu_wv_u16mf4_m(mask, maskedoff, op1, shift, vl);
1112 }
1113 
1114 //
1115 // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4_m(
1116 // CHECK-RV64-NEXT:  entry:
1117 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1118 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
1119 //
test_vnclipu_wx_u16mf4_m(vbool64_t mask,vuint16mf4_t maskedoff,vuint32mf2_t op1,size_t shift,size_t vl)1120 vuint16mf4_t test_vnclipu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
1121                                       vuint32mf2_t op1, size_t shift,
1122                                       size_t vl) {
1123   return vnclipu_wx_u16mf4_m(mask, maskedoff, op1, shift, vl);
1124 }
1125 
1126 //
1127 // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2_m(
1128 // CHECK-RV64-NEXT:  entry:
1129 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1130 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
1131 //
test_vnclipu_wv_u16mf2_m(vbool32_t mask,vuint16mf2_t maskedoff,vuint32m1_t op1,vuint16mf2_t shift,size_t vl)1132 vuint16mf2_t test_vnclipu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
1133                                       vuint32m1_t op1, vuint16mf2_t shift,
1134                                       size_t vl) {
1135   return vnclipu_wv_u16mf2_m(mask, maskedoff, op1, shift, vl);
1136 }
1137 
1138 //
1139 // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2_m(
1140 // CHECK-RV64-NEXT:  entry:
1141 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1142 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
1143 //
test_vnclipu_wx_u16mf2_m(vbool32_t mask,vuint16mf2_t maskedoff,vuint32m1_t op1,size_t shift,size_t vl)1144 vuint16mf2_t test_vnclipu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
1145                                       vuint32m1_t op1, size_t shift,
1146                                       size_t vl) {
1147   return vnclipu_wx_u16mf2_m(mask, maskedoff, op1, shift, vl);
1148 }
1149 
1150 //
1151 // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1_m(
1152 // CHECK-RV64-NEXT:  entry:
1153 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1154 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
1155 //
test_vnclipu_wv_u16m1_m(vbool16_t mask,vuint16m1_t maskedoff,vuint32m2_t op1,vuint16m1_t shift,size_t vl)1156 vuint16m1_t test_vnclipu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
1157                                     vuint32m2_t op1, vuint16m1_t shift,
1158                                     size_t vl) {
1159   return vnclipu_wv_u16m1_m(mask, maskedoff, op1, shift, vl);
1160 }
1161 
1162 //
1163 // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1_m(
1164 // CHECK-RV64-NEXT:  entry:
1165 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1166 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
1167 //
test_vnclipu_wx_u16m1_m(vbool16_t mask,vuint16m1_t maskedoff,vuint32m2_t op1,size_t shift,size_t vl)1168 vuint16m1_t test_vnclipu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
1169                                     vuint32m2_t op1, size_t shift, size_t vl) {
1170   return vnclipu_wx_u16m1_m(mask, maskedoff, op1, shift, vl);
1171 }
1172 
1173 //
1174 // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2_m(
1175 // CHECK-RV64-NEXT:  entry:
1176 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1177 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
1178 //
test_vnclipu_wv_u16m2_m(vbool8_t mask,vuint16m2_t maskedoff,vuint32m4_t op1,vuint16m2_t shift,size_t vl)1179 vuint16m2_t test_vnclipu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
1180                                     vuint32m4_t op1, vuint16m2_t shift,
1181                                     size_t vl) {
1182   return vnclipu_wv_u16m2_m(mask, maskedoff, op1, shift, vl);
1183 }
1184 
1185 //
1186 // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2_m(
1187 // CHECK-RV64-NEXT:  entry:
1188 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1189 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
1190 //
test_vnclipu_wx_u16m2_m(vbool8_t mask,vuint16m2_t maskedoff,vuint32m4_t op1,size_t shift,size_t vl)1191 vuint16m2_t test_vnclipu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
1192                                     vuint32m4_t op1, size_t shift, size_t vl) {
1193   return vnclipu_wx_u16m2_m(mask, maskedoff, op1, shift, vl);
1194 }
1195 
1196 //
1197 // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4_m(
1198 // CHECK-RV64-NEXT:  entry:
1199 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1200 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
1201 //
test_vnclipu_wv_u16m4_m(vbool4_t mask,vuint16m4_t maskedoff,vuint32m8_t op1,vuint16m4_t shift,size_t vl)1202 vuint16m4_t test_vnclipu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
1203                                     vuint32m8_t op1, vuint16m4_t shift,
1204                                     size_t vl) {
1205   return vnclipu_wv_u16m4_m(mask, maskedoff, op1, shift, vl);
1206 }
1207 
1208 //
1209 // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4_m(
1210 // CHECK-RV64-NEXT:  entry:
1211 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1212 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
1213 //
test_vnclipu_wx_u16m4_m(vbool4_t mask,vuint16m4_t maskedoff,vuint32m8_t op1,size_t shift,size_t vl)1214 vuint16m4_t test_vnclipu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
1215                                     vuint32m8_t op1, size_t shift, size_t vl) {
1216   return vnclipu_wx_u16m4_m(mask, maskedoff, op1, shift, vl);
1217 }
1218 
1219 //
1220 // CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_m(
1221 // CHECK-RV64-NEXT:  entry:
1222 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1223 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
1224 //
test_vnclipu_wv_u32mf2_m(vbool64_t mask,vuint32mf2_t maskedoff,vuint64m1_t op1,vuint32mf2_t shift,size_t vl)1225 vuint32mf2_t test_vnclipu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
1226                                       vuint64m1_t op1, vuint32mf2_t shift,
1227                                       size_t vl) {
1228   return vnclipu_wv_u32mf2_m(mask, maskedoff, op1, shift, vl);
1229 }
1230 
1231 //
1232 // CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_m(
1233 // CHECK-RV64-NEXT:  entry:
1234 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1235 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
1236 //
test_vnclipu_wx_u32mf2_m(vbool64_t mask,vuint32mf2_t maskedoff,vuint64m1_t op1,size_t shift,size_t vl)1237 vuint32mf2_t test_vnclipu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
1238                                       vuint64m1_t op1, size_t shift,
1239                                       size_t vl) {
1240   return vnclipu_wx_u32mf2_m(mask, maskedoff, op1, shift, vl);
1241 }
1242 
1243 //
1244 // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1_m(
1245 // CHECK-RV64-NEXT:  entry:
1246 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1247 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
1248 //
test_vnclipu_wv_u32m1_m(vbool32_t mask,vuint32m1_t maskedoff,vuint64m2_t op1,vuint32m1_t shift,size_t vl)1249 vuint32m1_t test_vnclipu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
1250                                     vuint64m2_t op1, vuint32m1_t shift,
1251                                     size_t vl) {
1252   return vnclipu_wv_u32m1_m(mask, maskedoff, op1, shift, vl);
1253 }
1254 
1255 //
1256 // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1_m(
1257 // CHECK-RV64-NEXT:  entry:
1258 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1259 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
1260 //
test_vnclipu_wx_u32m1_m(vbool32_t mask,vuint32m1_t maskedoff,vuint64m2_t op1,size_t shift,size_t vl)1261 vuint32m1_t test_vnclipu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
1262                                     vuint64m2_t op1, size_t shift, size_t vl) {
1263   return vnclipu_wx_u32m1_m(mask, maskedoff, op1, shift, vl);
1264 }
1265 
1266 //
1267 // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2_m(
1268 // CHECK-RV64-NEXT:  entry:
1269 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1270 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
1271 //
test_vnclipu_wv_u32m2_m(vbool16_t mask,vuint32m2_t maskedoff,vuint64m4_t op1,vuint32m2_t shift,size_t vl)1272 vuint32m2_t test_vnclipu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
1273                                     vuint64m4_t op1, vuint32m2_t shift,
1274                                     size_t vl) {
1275   return vnclipu_wv_u32m2_m(mask, maskedoff, op1, shift, vl);
1276 }
1277 
1278 //
1279 // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2_m(
1280 // CHECK-RV64-NEXT:  entry:
1281 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1282 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
1283 //
test_vnclipu_wx_u32m2_m(vbool16_t mask,vuint32m2_t maskedoff,vuint64m4_t op1,size_t shift,size_t vl)1284 vuint32m2_t test_vnclipu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
1285                                     vuint64m4_t op1, size_t shift, size_t vl) {
1286   return vnclipu_wx_u32m2_m(mask, maskedoff, op1, shift, vl);
1287 }
1288 
1289 //
1290 // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4_m(
1291 // CHECK-RV64-NEXT:  entry:
1292 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1293 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
1294 //
test_vnclipu_wv_u32m4_m(vbool8_t mask,vuint32m4_t maskedoff,vuint64m8_t op1,vuint32m4_t shift,size_t vl)1295 vuint32m4_t test_vnclipu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
1296                                     vuint64m8_t op1, vuint32m4_t shift,
1297                                     size_t vl) {
1298   return vnclipu_wv_u32m4_m(mask, maskedoff, op1, shift, vl);
1299 }
1300 
1301 //
1302 // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4_m(
1303 // CHECK-RV64-NEXT:  entry:
1304 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1305 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
1306 //
test_vnclipu_wx_u32m4_m(vbool8_t mask,vuint32m4_t maskedoff,vuint64m8_t op1,size_t shift,size_t vl)1307 vuint32m4_t test_vnclipu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
1308                                     vuint64m8_t op1, size_t shift, size_t vl) {
1309   return vnclipu_wx_u32m4_m(mask, maskedoff, op1, shift, vl);
1310 }
1311