1; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s
2
3;;; Test vector multiply intrinsic instructions
4;;;
5;;; Note:
6;;;   We test VMUL*vvl, VMUL*vvl_v, VMUL*rvl, VMUL*rvl_v, VMUL*ivl, VMUL*ivl_v,
7;;;   VMUL*vvml_v, VMUL*rvml_v, and VMUL*ivml_v instructions.
8
9; Function Attrs: nounwind readnone
10define fastcc <256 x double> @vmulul_vvvl(<256 x double> %0, <256 x double> %1) {
11; CHECK-LABEL: vmulul_vvvl:
12; CHECK:       # %bb.0:
13; CHECK-NEXT:    lea %s0, 256
14; CHECK-NEXT:    lvl %s0
15; CHECK-NEXT:    vmulu.l %v0, %v0, %v1
16; CHECK-NEXT:    b.l.t (, %s10)
17  %3 = tail call fast <256 x double> @llvm.ve.vl.vmulul.vvvl(<256 x double> %0, <256 x double> %1, i32 256)
18  ret <256 x double> %3
19}
20
21; Function Attrs: nounwind readnone
22declare <256 x double> @llvm.ve.vl.vmulul.vvvl(<256 x double>, <256 x double>, i32)
23
24; Function Attrs: nounwind readnone
25define fastcc <256 x double> @vmulul_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
26; CHECK-LABEL: vmulul_vvvvl:
27; CHECK:       # %bb.0:
28; CHECK-NEXT:    lea %s0, 128
29; CHECK-NEXT:    lvl %s0
30; CHECK-NEXT:    vmulu.l %v2, %v0, %v1
31; CHECK-NEXT:    lea %s16, 256
32; CHECK-NEXT:    lvl %s16
33; CHECK-NEXT:    vor %v0, (0)1, %v2
34; CHECK-NEXT:    b.l.t (, %s10)
35  %4 = tail call fast <256 x double> @llvm.ve.vl.vmulul.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128)
36  ret <256 x double> %4
37}
38
39; Function Attrs: nounwind readnone
40declare <256 x double> @llvm.ve.vl.vmulul.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
41
42; Function Attrs: nounwind readnone
43define fastcc <256 x double> @vmulul_vsvl(i64 %0, <256 x double> %1) {
44; CHECK-LABEL: vmulul_vsvl:
45; CHECK:       # %bb.0:
46; CHECK-NEXT:    lea %s1, 256
47; CHECK-NEXT:    lvl %s1
48; CHECK-NEXT:    vmulu.l %v0, %s0, %v0
49; CHECK-NEXT:    b.l.t (, %s10)
50  %3 = tail call fast <256 x double> @llvm.ve.vl.vmulul.vsvl(i64 %0, <256 x double> %1, i32 256)
51  ret <256 x double> %3
52}
53
54; Function Attrs: nounwind readnone
55declare <256 x double> @llvm.ve.vl.vmulul.vsvl(i64, <256 x double>, i32)
56
57; Function Attrs: nounwind readnone
58define fastcc <256 x double> @vmulul_vsvvl(i64 %0, <256 x double> %1, <256 x double> %2) {
59; CHECK-LABEL: vmulul_vsvvl:
60; CHECK:       # %bb.0:
61; CHECK-NEXT:    lea %s1, 128
62; CHECK-NEXT:    lvl %s1
63; CHECK-NEXT:    vmulu.l %v1, %s0, %v0
64; CHECK-NEXT:    lea %s16, 256
65; CHECK-NEXT:    lvl %s16
66; CHECK-NEXT:    vor %v0, (0)1, %v1
67; CHECK-NEXT:    b.l.t (, %s10)
68  %4 = tail call fast <256 x double> @llvm.ve.vl.vmulul.vsvvl(i64 %0, <256 x double> %1, <256 x double> %2, i32 128)
69  ret <256 x double> %4
70}
71
72; Function Attrs: nounwind readnone
73declare <256 x double> @llvm.ve.vl.vmulul.vsvvl(i64, <256 x double>, <256 x double>, i32)
74
75; Function Attrs: nounwind readnone
76define fastcc <256 x double> @vmulul_vsvl_imm(<256 x double> %0) {
77; CHECK-LABEL: vmulul_vsvl_imm:
78; CHECK:       # %bb.0:
79; CHECK-NEXT:    lea %s0, 256
80; CHECK-NEXT:    lvl %s0
81; CHECK-NEXT:    vmulu.l %v0, 8, %v0
82; CHECK-NEXT:    b.l.t (, %s10)
83  %2 = tail call fast <256 x double> @llvm.ve.vl.vmulul.vsvl(i64 8, <256 x double> %0, i32 256)
84  ret <256 x double> %2
85}
86
87; Function Attrs: nounwind readnone
88define fastcc <256 x double> @vmulul_vsvvl_imm(<256 x double> %0, <256 x double> %1) {
89; CHECK-LABEL: vmulul_vsvvl_imm:
90; CHECK:       # %bb.0:
91; CHECK-NEXT:    lea %s0, 128
92; CHECK-NEXT:    lvl %s0
93; CHECK-NEXT:    vmulu.l %v1, 8, %v0
94; CHECK-NEXT:    lea %s16, 256
95; CHECK-NEXT:    lvl %s16
96; CHECK-NEXT:    vor %v0, (0)1, %v1
97; CHECK-NEXT:    b.l.t (, %s10)
98  %3 = tail call fast <256 x double> @llvm.ve.vl.vmulul.vsvvl(i64 8, <256 x double> %0, <256 x double> %1, i32 128)
99  ret <256 x double> %3
100}
101
102; Function Attrs: nounwind readnone
103define fastcc <256 x double> @vmulul_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
104; CHECK-LABEL: vmulul_vvvmvl:
105; CHECK:       # %bb.0:
106; CHECK-NEXT:    lea %s0, 128
107; CHECK-NEXT:    lvl %s0
108; CHECK-NEXT:    vmulu.l %v2, %v0, %v1, %vm1
109; CHECK-NEXT:    lea %s16, 256
110; CHECK-NEXT:    lvl %s16
111; CHECK-NEXT:    vor %v0, (0)1, %v2
112; CHECK-NEXT:    b.l.t (, %s10)
113  %5 = tail call fast <256 x double> @llvm.ve.vl.vmulul.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
114  ret <256 x double> %5
115}
116
117; Function Attrs: nounwind readnone
118declare <256 x double> @llvm.ve.vl.vmulul.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32)
119
120; Function Attrs: nounwind readnone
121define fastcc <256 x double> @vmulul_vsvmvl(i64 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
122; CHECK-LABEL: vmulul_vsvmvl:
123; CHECK:       # %bb.0:
124; CHECK-NEXT:    lea %s1, 128
125; CHECK-NEXT:    lvl %s1
126; CHECK-NEXT:    vmulu.l %v1, %s0, %v0, %vm1
127; CHECK-NEXT:    lea %s16, 256
128; CHECK-NEXT:    lvl %s16
129; CHECK-NEXT:    vor %v0, (0)1, %v1
130; CHECK-NEXT:    b.l.t (, %s10)
131  %5 = tail call fast <256 x double> @llvm.ve.vl.vmulul.vsvmvl(i64 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
132  ret <256 x double> %5
133}
134
135; Function Attrs: nounwind readnone
136declare <256 x double> @llvm.ve.vl.vmulul.vsvmvl(i64, <256 x double>, <256 x i1>, <256 x double>, i32)
137
138; Function Attrs: nounwind readnone
139define fastcc <256 x double> @vmulul_vsvmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
140; CHECK-LABEL: vmulul_vsvmvl_imm:
141; CHECK:       # %bb.0:
142; CHECK-NEXT:    lea %s0, 128
143; CHECK-NEXT:    lvl %s0
144; CHECK-NEXT:    vmulu.l %v1, 8, %v0, %vm1
145; CHECK-NEXT:    lea %s16, 256
146; CHECK-NEXT:    lvl %s16
147; CHECK-NEXT:    vor %v0, (0)1, %v1
148; CHECK-NEXT:    b.l.t (, %s10)
149  %4 = tail call fast <256 x double> @llvm.ve.vl.vmulul.vsvmvl(i64 8, <256 x double> %0, <256 x i1> %1, <256 x double> %2, i32 128)
150  ret <256 x double> %4
151}
152
153; Function Attrs: nounwind readnone
154define fastcc <256 x double> @vmuluw_vvvl(<256 x double> %0, <256 x double> %1) {
155; CHECK-LABEL: vmuluw_vvvl:
156; CHECK:       # %bb.0:
157; CHECK-NEXT:    lea %s0, 256
158; CHECK-NEXT:    lvl %s0
159; CHECK-NEXT:    vmulu.w %v0, %v0, %v1
160; CHECK-NEXT:    b.l.t (, %s10)
161  %3 = tail call fast <256 x double> @llvm.ve.vl.vmuluw.vvvl(<256 x double> %0, <256 x double> %1, i32 256)
162  ret <256 x double> %3
163}
164
165; Function Attrs: nounwind readnone
166declare <256 x double> @llvm.ve.vl.vmuluw.vvvl(<256 x double>, <256 x double>, i32)
167
168; Function Attrs: nounwind readnone
169define fastcc <256 x double> @vmuluw_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
170; CHECK-LABEL: vmuluw_vvvvl:
171; CHECK:       # %bb.0:
172; CHECK-NEXT:    lea %s0, 128
173; CHECK-NEXT:    lvl %s0
174; CHECK-NEXT:    vmulu.w %v2, %v0, %v1
175; CHECK-NEXT:    lea %s16, 256
176; CHECK-NEXT:    lvl %s16
177; CHECK-NEXT:    vor %v0, (0)1, %v2
178; CHECK-NEXT:    b.l.t (, %s10)
179  %4 = tail call fast <256 x double> @llvm.ve.vl.vmuluw.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128)
180  ret <256 x double> %4
181}
182
183; Function Attrs: nounwind readnone
184declare <256 x double> @llvm.ve.vl.vmuluw.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
185
186; Function Attrs: nounwind readnone
187define fastcc <256 x double> @vmuluw_vsvl(i32 signext %0, <256 x double> %1) {
188; CHECK-LABEL: vmuluw_vsvl:
189; CHECK:       # %bb.0:
190; CHECK-NEXT:    and %s0, %s0, (32)0
191; CHECK-NEXT:    lea %s1, 256
192; CHECK-NEXT:    lvl %s1
193; CHECK-NEXT:    vmulu.w %v0, %s0, %v0
194; CHECK-NEXT:    b.l.t (, %s10)
195  %3 = tail call fast <256 x double> @llvm.ve.vl.vmuluw.vsvl(i32 %0, <256 x double> %1, i32 256)
196  ret <256 x double> %3
197}
198
199; Function Attrs: nounwind readnone
200declare <256 x double> @llvm.ve.vl.vmuluw.vsvl(i32, <256 x double>, i32)
201
202; Function Attrs: nounwind readnone
203define fastcc <256 x double> @vmuluw_vsvvl(i32 signext %0, <256 x double> %1, <256 x double> %2) {
204; CHECK-LABEL: vmuluw_vsvvl:
205; CHECK:       # %bb.0:
206; CHECK-NEXT:    and %s0, %s0, (32)0
207; CHECK-NEXT:    lea %s1, 128
208; CHECK-NEXT:    lvl %s1
209; CHECK-NEXT:    vmulu.w %v1, %s0, %v0
210; CHECK-NEXT:    lea %s16, 256
211; CHECK-NEXT:    lvl %s16
212; CHECK-NEXT:    vor %v0, (0)1, %v1
213; CHECK-NEXT:    b.l.t (, %s10)
214  %4 = tail call fast <256 x double> @llvm.ve.vl.vmuluw.vsvvl(i32 %0, <256 x double> %1, <256 x double> %2, i32 128)
215  ret <256 x double> %4
216}
217
218; Function Attrs: nounwind readnone
219declare <256 x double> @llvm.ve.vl.vmuluw.vsvvl(i32, <256 x double>, <256 x double>, i32)
220
221; Function Attrs: nounwind readnone
222define fastcc <256 x double> @vmuluw_vsvl_imm(<256 x double> %0) {
223; CHECK-LABEL: vmuluw_vsvl_imm:
224; CHECK:       # %bb.0:
225; CHECK-NEXT:    lea %s0, 256
226; CHECK-NEXT:    lvl %s0
227; CHECK-NEXT:    vmulu.w %v0, 8, %v0
228; CHECK-NEXT:    b.l.t (, %s10)
229  %2 = tail call fast <256 x double> @llvm.ve.vl.vmuluw.vsvl(i32 8, <256 x double> %0, i32 256)
230  ret <256 x double> %2
231}
232
233; Function Attrs: nounwind readnone
234define fastcc <256 x double> @vmuluw_vsvvl_imm(<256 x double> %0, <256 x double> %1) {
235; CHECK-LABEL: vmuluw_vsvvl_imm:
236; CHECK:       # %bb.0:
237; CHECK-NEXT:    lea %s0, 128
238; CHECK-NEXT:    lvl %s0
239; CHECK-NEXT:    vmulu.w %v1, 8, %v0
240; CHECK-NEXT:    lea %s16, 256
241; CHECK-NEXT:    lvl %s16
242; CHECK-NEXT:    vor %v0, (0)1, %v1
243; CHECK-NEXT:    b.l.t (, %s10)
244  %3 = tail call fast <256 x double> @llvm.ve.vl.vmuluw.vsvvl(i32 8, <256 x double> %0, <256 x double> %1, i32 128)
245  ret <256 x double> %3
246}
247
248; Function Attrs: nounwind readnone
249define fastcc <256 x double> @vmuluw_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
250; CHECK-LABEL: vmuluw_vvvmvl:
251; CHECK:       # %bb.0:
252; CHECK-NEXT:    lea %s0, 128
253; CHECK-NEXT:    lvl %s0
254; CHECK-NEXT:    vmulu.w %v2, %v0, %v1, %vm1
255; CHECK-NEXT:    lea %s16, 256
256; CHECK-NEXT:    lvl %s16
257; CHECK-NEXT:    vor %v0, (0)1, %v2
258; CHECK-NEXT:    b.l.t (, %s10)
259  %5 = tail call fast <256 x double> @llvm.ve.vl.vmuluw.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
260  ret <256 x double> %5
261}
262
263; Function Attrs: nounwind readnone
264declare <256 x double> @llvm.ve.vl.vmuluw.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32)
265
266; Function Attrs: nounwind readnone
267define fastcc <256 x double> @vmuluw_vsvmvl(i32 signext %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
268; CHECK-LABEL: vmuluw_vsvmvl:
269; CHECK:       # %bb.0:
270; CHECK-NEXT:    and %s0, %s0, (32)0
271; CHECK-NEXT:    lea %s1, 128
272; CHECK-NEXT:    lvl %s1
273; CHECK-NEXT:    vmulu.w %v1, %s0, %v0, %vm1
274; CHECK-NEXT:    lea %s16, 256
275; CHECK-NEXT:    lvl %s16
276; CHECK-NEXT:    vor %v0, (0)1, %v1
277; CHECK-NEXT:    b.l.t (, %s10)
278  %5 = tail call fast <256 x double> @llvm.ve.vl.vmuluw.vsvmvl(i32 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
279  ret <256 x double> %5
280}
281
282; Function Attrs: nounwind readnone
283declare <256 x double> @llvm.ve.vl.vmuluw.vsvmvl(i32, <256 x double>, <256 x i1>, <256 x double>, i32)
284
285; Function Attrs: nounwind readnone
286define fastcc <256 x double> @vmuluw_vsvmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
287; CHECK-LABEL: vmuluw_vsvmvl_imm:
288; CHECK:       # %bb.0:
289; CHECK-NEXT:    lea %s0, 128
290; CHECK-NEXT:    lvl %s0
291; CHECK-NEXT:    vmulu.w %v1, 8, %v0, %vm1
292; CHECK-NEXT:    lea %s16, 256
293; CHECK-NEXT:    lvl %s16
294; CHECK-NEXT:    vor %v0, (0)1, %v1
295; CHECK-NEXT:    b.l.t (, %s10)
296  %4 = tail call fast <256 x double> @llvm.ve.vl.vmuluw.vsvmvl(i32 8, <256 x double> %0, <256 x i1> %1, <256 x double> %2, i32 128)
297  ret <256 x double> %4
298}
299
300; Function Attrs: nounwind readnone
301define fastcc <256 x double> @vmulswsx_vvvl(<256 x double> %0, <256 x double> %1) {
302; CHECK-LABEL: vmulswsx_vvvl:
303; CHECK:       # %bb.0:
304; CHECK-NEXT:    lea %s0, 256
305; CHECK-NEXT:    lvl %s0
306; CHECK-NEXT:    vmuls.w.sx %v0, %v0, %v1
307; CHECK-NEXT:    b.l.t (, %s10)
308  %3 = tail call fast <256 x double> @llvm.ve.vl.vmulswsx.vvvl(<256 x double> %0, <256 x double> %1, i32 256)
309  ret <256 x double> %3
310}
311
312; Function Attrs: nounwind readnone
313declare <256 x double> @llvm.ve.vl.vmulswsx.vvvl(<256 x double>, <256 x double>, i32)
314
315; Function Attrs: nounwind readnone
316define fastcc <256 x double> @vmulswsx_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
317; CHECK-LABEL: vmulswsx_vvvvl:
318; CHECK:       # %bb.0:
319; CHECK-NEXT:    lea %s0, 128
320; CHECK-NEXT:    lvl %s0
321; CHECK-NEXT:    vmuls.w.sx %v2, %v0, %v1
322; CHECK-NEXT:    lea %s16, 256
323; CHECK-NEXT:    lvl %s16
324; CHECK-NEXT:    vor %v0, (0)1, %v2
325; CHECK-NEXT:    b.l.t (, %s10)
326  %4 = tail call fast <256 x double> @llvm.ve.vl.vmulswsx.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128)
327  ret <256 x double> %4
328}
329
330; Function Attrs: nounwind readnone
331declare <256 x double> @llvm.ve.vl.vmulswsx.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
332
333; Function Attrs: nounwind readnone
334define fastcc <256 x double> @vmulswsx_vsvl(i32 signext %0, <256 x double> %1) {
335; CHECK-LABEL: vmulswsx_vsvl:
336; CHECK:       # %bb.0:
337; CHECK-NEXT:    and %s0, %s0, (32)0
338; CHECK-NEXT:    lea %s1, 256
339; CHECK-NEXT:    lvl %s1
340; CHECK-NEXT:    vmuls.w.sx %v0, %s0, %v0
341; CHECK-NEXT:    b.l.t (, %s10)
342  %3 = tail call fast <256 x double> @llvm.ve.vl.vmulswsx.vsvl(i32 %0, <256 x double> %1, i32 256)
343  ret <256 x double> %3
344}
345
346; Function Attrs: nounwind readnone
347declare <256 x double> @llvm.ve.vl.vmulswsx.vsvl(i32, <256 x double>, i32)
348
349; Function Attrs: nounwind readnone
350define fastcc <256 x double> @vmulswsx_vsvvl(i32 signext %0, <256 x double> %1, <256 x double> %2) {
351; CHECK-LABEL: vmulswsx_vsvvl:
352; CHECK:       # %bb.0:
353; CHECK-NEXT:    and %s0, %s0, (32)0
354; CHECK-NEXT:    lea %s1, 128
355; CHECK-NEXT:    lvl %s1
356; CHECK-NEXT:    vmuls.w.sx %v1, %s0, %v0
357; CHECK-NEXT:    lea %s16, 256
358; CHECK-NEXT:    lvl %s16
359; CHECK-NEXT:    vor %v0, (0)1, %v1
360; CHECK-NEXT:    b.l.t (, %s10)
361  %4 = tail call fast <256 x double> @llvm.ve.vl.vmulswsx.vsvvl(i32 %0, <256 x double> %1, <256 x double> %2, i32 128)
362  ret <256 x double> %4
363}
364
365; Function Attrs: nounwind readnone
366declare <256 x double> @llvm.ve.vl.vmulswsx.vsvvl(i32, <256 x double>, <256 x double>, i32)
367
368; Function Attrs: nounwind readnone
369define fastcc <256 x double> @vmulswsx_vsvl_imm(<256 x double> %0) {
370; CHECK-LABEL: vmulswsx_vsvl_imm:
371; CHECK:       # %bb.0:
372; CHECK-NEXT:    lea %s0, 256
373; CHECK-NEXT:    lvl %s0
374; CHECK-NEXT:    vmuls.w.sx %v0, 8, %v0
375; CHECK-NEXT:    b.l.t (, %s10)
376  %2 = tail call fast <256 x double> @llvm.ve.vl.vmulswsx.vsvl(i32 8, <256 x double> %0, i32 256)
377  ret <256 x double> %2
378}
379
380; Function Attrs: nounwind readnone
381define fastcc <256 x double> @vmulswsx_vsvvl_imm(<256 x double> %0, <256 x double> %1) {
382; CHECK-LABEL: vmulswsx_vsvvl_imm:
383; CHECK:       # %bb.0:
384; CHECK-NEXT:    lea %s0, 128
385; CHECK-NEXT:    lvl %s0
386; CHECK-NEXT:    vmuls.w.sx %v1, 8, %v0
387; CHECK-NEXT:    lea %s16, 256
388; CHECK-NEXT:    lvl %s16
389; CHECK-NEXT:    vor %v0, (0)1, %v1
390; CHECK-NEXT:    b.l.t (, %s10)
391  %3 = tail call fast <256 x double> @llvm.ve.vl.vmulswsx.vsvvl(i32 8, <256 x double> %0, <256 x double> %1, i32 128)
392  ret <256 x double> %3
393}
394
395; Function Attrs: nounwind readnone
396define fastcc <256 x double> @vmulswsx_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
397; CHECK-LABEL: vmulswsx_vvvmvl:
398; CHECK:       # %bb.0:
399; CHECK-NEXT:    lea %s0, 128
400; CHECK-NEXT:    lvl %s0
401; CHECK-NEXT:    vmuls.w.sx %v2, %v0, %v1, %vm1
402; CHECK-NEXT:    lea %s16, 256
403; CHECK-NEXT:    lvl %s16
404; CHECK-NEXT:    vor %v0, (0)1, %v2
405; CHECK-NEXT:    b.l.t (, %s10)
406  %5 = tail call fast <256 x double> @llvm.ve.vl.vmulswsx.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
407  ret <256 x double> %5
408}
409
410; Function Attrs: nounwind readnone
411declare <256 x double> @llvm.ve.vl.vmulswsx.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32)
412
413; Function Attrs: nounwind readnone
414define fastcc <256 x double> @vmulswsx_vsvmvl(i32 signext %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
415; CHECK-LABEL: vmulswsx_vsvmvl:
416; CHECK:       # %bb.0:
417; CHECK-NEXT:    and %s0, %s0, (32)0
418; CHECK-NEXT:    lea %s1, 128
419; CHECK-NEXT:    lvl %s1
420; CHECK-NEXT:    vmuls.w.sx %v1, %s0, %v0, %vm1
421; CHECK-NEXT:    lea %s16, 256
422; CHECK-NEXT:    lvl %s16
423; CHECK-NEXT:    vor %v0, (0)1, %v1
424; CHECK-NEXT:    b.l.t (, %s10)
425  %5 = tail call fast <256 x double> @llvm.ve.vl.vmulswsx.vsvmvl(i32 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
426  ret <256 x double> %5
427}
428
429; Function Attrs: nounwind readnone
430declare <256 x double> @llvm.ve.vl.vmulswsx.vsvmvl(i32, <256 x double>, <256 x i1>, <256 x double>, i32)
431
432; Function Attrs: nounwind readnone
433define fastcc <256 x double> @vmulswsx_vsvmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
434; CHECK-LABEL: vmulswsx_vsvmvl_imm:
435; CHECK:       # %bb.0:
436; CHECK-NEXT:    lea %s0, 128
437; CHECK-NEXT:    lvl %s0
438; CHECK-NEXT:    vmuls.w.sx %v1, 8, %v0, %vm1
439; CHECK-NEXT:    lea %s16, 256
440; CHECK-NEXT:    lvl %s16
441; CHECK-NEXT:    vor %v0, (0)1, %v1
442; CHECK-NEXT:    b.l.t (, %s10)
443  %4 = tail call fast <256 x double> @llvm.ve.vl.vmulswsx.vsvmvl(i32 8, <256 x double> %0, <256 x i1> %1, <256 x double> %2, i32 128)
444  ret <256 x double> %4
445}
446
447; Function Attrs: nounwind readnone
448define fastcc <256 x double> @vmulswzx_vvvl(<256 x double> %0, <256 x double> %1) {
449; CHECK-LABEL: vmulswzx_vvvl:
450; CHECK:       # %bb.0:
451; CHECK-NEXT:    lea %s0, 256
452; CHECK-NEXT:    lvl %s0
453; CHECK-NEXT:    vmuls.w.zx %v0, %v0, %v1
454; CHECK-NEXT:    b.l.t (, %s10)
455  %3 = tail call fast <256 x double> @llvm.ve.vl.vmulswzx.vvvl(<256 x double> %0, <256 x double> %1, i32 256)
456  ret <256 x double> %3
457}
458
459; Function Attrs: nounwind readnone
460declare <256 x double> @llvm.ve.vl.vmulswzx.vvvl(<256 x double>, <256 x double>, i32)
461
462; Function Attrs: nounwind readnone
463define fastcc <256 x double> @vmulswzx_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
464; CHECK-LABEL: vmulswzx_vvvvl:
465; CHECK:       # %bb.0:
466; CHECK-NEXT:    lea %s0, 128
467; CHECK-NEXT:    lvl %s0
468; CHECK-NEXT:    vmuls.w.zx %v2, %v0, %v1
469; CHECK-NEXT:    lea %s16, 256
470; CHECK-NEXT:    lvl %s16
471; CHECK-NEXT:    vor %v0, (0)1, %v2
472; CHECK-NEXT:    b.l.t (, %s10)
473  %4 = tail call fast <256 x double> @llvm.ve.vl.vmulswzx.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128)
474  ret <256 x double> %4
475}
476
477; Function Attrs: nounwind readnone
478declare <256 x double> @llvm.ve.vl.vmulswzx.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
479
480; Function Attrs: nounwind readnone
481define fastcc <256 x double> @vmulswzx_vsvl(i32 signext %0, <256 x double> %1) {
482; CHECK-LABEL: vmulswzx_vsvl:
483; CHECK:       # %bb.0:
484; CHECK-NEXT:    and %s0, %s0, (32)0
485; CHECK-NEXT:    lea %s1, 256
486; CHECK-NEXT:    lvl %s1
487; CHECK-NEXT:    vmuls.w.zx %v0, %s0, %v0
488; CHECK-NEXT:    b.l.t (, %s10)
489  %3 = tail call fast <256 x double> @llvm.ve.vl.vmulswzx.vsvl(i32 %0, <256 x double> %1, i32 256)
490  ret <256 x double> %3
491}
492
493; Function Attrs: nounwind readnone
494declare <256 x double> @llvm.ve.vl.vmulswzx.vsvl(i32, <256 x double>, i32)
495
496; Function Attrs: nounwind readnone
497define fastcc <256 x double> @vmulswzx_vsvvl(i32 signext %0, <256 x double> %1, <256 x double> %2) {
498; CHECK-LABEL: vmulswzx_vsvvl:
499; CHECK:       # %bb.0:
500; CHECK-NEXT:    and %s0, %s0, (32)0
501; CHECK-NEXT:    lea %s1, 128
502; CHECK-NEXT:    lvl %s1
503; CHECK-NEXT:    vmuls.w.zx %v1, %s0, %v0
504; CHECK-NEXT:    lea %s16, 256
505; CHECK-NEXT:    lvl %s16
506; CHECK-NEXT:    vor %v0, (0)1, %v1
507; CHECK-NEXT:    b.l.t (, %s10)
508  %4 = tail call fast <256 x double> @llvm.ve.vl.vmulswzx.vsvvl(i32 %0, <256 x double> %1, <256 x double> %2, i32 128)
509  ret <256 x double> %4
510}
511
512; Function Attrs: nounwind readnone
513declare <256 x double> @llvm.ve.vl.vmulswzx.vsvvl(i32, <256 x double>, <256 x double>, i32)
514
515; Function Attrs: nounwind readnone
516define fastcc <256 x double> @vmulswzx_vsvl_imm(<256 x double> %0) {
517; CHECK-LABEL: vmulswzx_vsvl_imm:
518; CHECK:       # %bb.0:
519; CHECK-NEXT:    lea %s0, 256
520; CHECK-NEXT:    lvl %s0
521; CHECK-NEXT:    vmuls.w.zx %v0, 8, %v0
522; CHECK-NEXT:    b.l.t (, %s10)
523  %2 = tail call fast <256 x double> @llvm.ve.vl.vmulswzx.vsvl(i32 8, <256 x double> %0, i32 256)
524  ret <256 x double> %2
525}
526
527; Function Attrs: nounwind readnone
528define fastcc <256 x double> @vmulswzx_vsvvl_imm(<256 x double> %0, <256 x double> %1) {
529; CHECK-LABEL: vmulswzx_vsvvl_imm:
530; CHECK:       # %bb.0:
531; CHECK-NEXT:    lea %s0, 128
532; CHECK-NEXT:    lvl %s0
533; CHECK-NEXT:    vmuls.w.zx %v1, 8, %v0
534; CHECK-NEXT:    lea %s16, 256
535; CHECK-NEXT:    lvl %s16
536; CHECK-NEXT:    vor %v0, (0)1, %v1
537; CHECK-NEXT:    b.l.t (, %s10)
538  %3 = tail call fast <256 x double> @llvm.ve.vl.vmulswzx.vsvvl(i32 8, <256 x double> %0, <256 x double> %1, i32 128)
539  ret <256 x double> %3
540}
541
542; Function Attrs: nounwind readnone
543define fastcc <256 x double> @vmulswzx_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
544; CHECK-LABEL: vmulswzx_vvvmvl:
545; CHECK:       # %bb.0:
546; CHECK-NEXT:    lea %s0, 128
547; CHECK-NEXT:    lvl %s0
548; CHECK-NEXT:    vmuls.w.zx %v2, %v0, %v1, %vm1
549; CHECK-NEXT:    lea %s16, 256
550; CHECK-NEXT:    lvl %s16
551; CHECK-NEXT:    vor %v0, (0)1, %v2
552; CHECK-NEXT:    b.l.t (, %s10)
553  %5 = tail call fast <256 x double> @llvm.ve.vl.vmulswzx.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
554  ret <256 x double> %5
555}
556
557; Function Attrs: nounwind readnone
558declare <256 x double> @llvm.ve.vl.vmulswzx.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32)
559
560; Function Attrs: nounwind readnone
561define fastcc <256 x double> @vmulswzx_vsvmvl(i32 signext %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
562; CHECK-LABEL: vmulswzx_vsvmvl:
563; CHECK:       # %bb.0:
564; CHECK-NEXT:    and %s0, %s0, (32)0
565; CHECK-NEXT:    lea %s1, 128
566; CHECK-NEXT:    lvl %s1
567; CHECK-NEXT:    vmuls.w.zx %v1, %s0, %v0, %vm1
568; CHECK-NEXT:    lea %s16, 256
569; CHECK-NEXT:    lvl %s16
570; CHECK-NEXT:    vor %v0, (0)1, %v1
571; CHECK-NEXT:    b.l.t (, %s10)
572  %5 = tail call fast <256 x double> @llvm.ve.vl.vmulswzx.vsvmvl(i32 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
573  ret <256 x double> %5
574}
575
576; Function Attrs: nounwind readnone
577declare <256 x double> @llvm.ve.vl.vmulswzx.vsvmvl(i32, <256 x double>, <256 x i1>, <256 x double>, i32)
578
579; Function Attrs: nounwind readnone
580define fastcc <256 x double> @vmulswzx_vsvmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
581; CHECK-LABEL: vmulswzx_vsvmvl_imm:
582; CHECK:       # %bb.0:
583; CHECK-NEXT:    lea %s0, 128
584; CHECK-NEXT:    lvl %s0
585; CHECK-NEXT:    vmuls.w.zx %v1, 8, %v0, %vm1
586; CHECK-NEXT:    lea %s16, 256
587; CHECK-NEXT:    lvl %s16
588; CHECK-NEXT:    vor %v0, (0)1, %v1
589; CHECK-NEXT:    b.l.t (, %s10)
590  %4 = tail call fast <256 x double> @llvm.ve.vl.vmulswzx.vsvmvl(i32 8, <256 x double> %0, <256 x i1> %1, <256 x double> %2, i32 128)
591  ret <256 x double> %4
592}
593
594; Function Attrs: nounwind readnone
595define fastcc <256 x double> @vmulsl_vvvl(<256 x double> %0, <256 x double> %1) {
596; CHECK-LABEL: vmulsl_vvvl:
597; CHECK:       # %bb.0:
598; CHECK-NEXT:    lea %s0, 256
599; CHECK-NEXT:    lvl %s0
600; CHECK-NEXT:    vmuls.l %v0, %v0, %v1
601; CHECK-NEXT:    b.l.t (, %s10)
602  %3 = tail call fast <256 x double> @llvm.ve.vl.vmulsl.vvvl(<256 x double> %0, <256 x double> %1, i32 256)
603  ret <256 x double> %3
604}
605
606; Function Attrs: nounwind readnone
607declare <256 x double> @llvm.ve.vl.vmulsl.vvvl(<256 x double>, <256 x double>, i32)
608
609; Function Attrs: nounwind readnone
610define fastcc <256 x double> @vmulsl_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
611; CHECK-LABEL: vmulsl_vvvvl:
612; CHECK:       # %bb.0:
613; CHECK-NEXT:    lea %s0, 128
614; CHECK-NEXT:    lvl %s0
615; CHECK-NEXT:    vmuls.l %v2, %v0, %v1
616; CHECK-NEXT:    lea %s16, 256
617; CHECK-NEXT:    lvl %s16
618; CHECK-NEXT:    vor %v0, (0)1, %v2
619; CHECK-NEXT:    b.l.t (, %s10)
620  %4 = tail call fast <256 x double> @llvm.ve.vl.vmulsl.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128)
621  ret <256 x double> %4
622}
623
624; Function Attrs: nounwind readnone
625declare <256 x double> @llvm.ve.vl.vmulsl.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
626
627; Function Attrs: nounwind readnone
628define fastcc <256 x double> @vmulsl_vsvl(i64 %0, <256 x double> %1) {
629; CHECK-LABEL: vmulsl_vsvl:
630; CHECK:       # %bb.0:
631; CHECK-NEXT:    lea %s1, 256
632; CHECK-NEXT:    lvl %s1
633; CHECK-NEXT:    vmuls.l %v0, %s0, %v0
634; CHECK-NEXT:    b.l.t (, %s10)
635  %3 = tail call fast <256 x double> @llvm.ve.vl.vmulsl.vsvl(i64 %0, <256 x double> %1, i32 256)
636  ret <256 x double> %3
637}
638
639; Function Attrs: nounwind readnone
640declare <256 x double> @llvm.ve.vl.vmulsl.vsvl(i64, <256 x double>, i32)
641
642; Function Attrs: nounwind readnone
643define fastcc <256 x double> @vmulsl_vsvvl(i64 %0, <256 x double> %1, <256 x double> %2) {
644; CHECK-LABEL: vmulsl_vsvvl:
645; CHECK:       # %bb.0:
646; CHECK-NEXT:    lea %s1, 128
647; CHECK-NEXT:    lvl %s1
648; CHECK-NEXT:    vmuls.l %v1, %s0, %v0
649; CHECK-NEXT:    lea %s16, 256
650; CHECK-NEXT:    lvl %s16
651; CHECK-NEXT:    vor %v0, (0)1, %v1
652; CHECK-NEXT:    b.l.t (, %s10)
653  %4 = tail call fast <256 x double> @llvm.ve.vl.vmulsl.vsvvl(i64 %0, <256 x double> %1, <256 x double> %2, i32 128)
654  ret <256 x double> %4
655}
656
657; Function Attrs: nounwind readnone
658declare <256 x double> @llvm.ve.vl.vmulsl.vsvvl(i64, <256 x double>, <256 x double>, i32)
659
660; Function Attrs: nounwind readnone
661define fastcc <256 x double> @vmulsl_vsvl_imm(<256 x double> %0) {
662; CHECK-LABEL: vmulsl_vsvl_imm:
663; CHECK:       # %bb.0:
664; CHECK-NEXT:    lea %s0, 256
665; CHECK-NEXT:    lvl %s0
666; CHECK-NEXT:    vmuls.l %v0, 8, %v0
667; CHECK-NEXT:    b.l.t (, %s10)
668  %2 = tail call fast <256 x double> @llvm.ve.vl.vmulsl.vsvl(i64 8, <256 x double> %0, i32 256)
669  ret <256 x double> %2
670}
671
672; Function Attrs: nounwind readnone
673define fastcc <256 x double> @vmulsl_vsvvl_imm(<256 x double> %0, <256 x double> %1) {
674; CHECK-LABEL: vmulsl_vsvvl_imm:
675; CHECK:       # %bb.0:
676; CHECK-NEXT:    lea %s0, 128
677; CHECK-NEXT:    lvl %s0
678; CHECK-NEXT:    vmuls.l %v1, 8, %v0
679; CHECK-NEXT:    lea %s16, 256
680; CHECK-NEXT:    lvl %s16
681; CHECK-NEXT:    vor %v0, (0)1, %v1
682; CHECK-NEXT:    b.l.t (, %s10)
683  %3 = tail call fast <256 x double> @llvm.ve.vl.vmulsl.vsvvl(i64 8, <256 x double> %0, <256 x double> %1, i32 128)
684  ret <256 x double> %3
685}
686
687; Function Attrs: nounwind readnone
688define fastcc <256 x double> @vmulsl_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
689; CHECK-LABEL: vmulsl_vvvmvl:
690; CHECK:       # %bb.0:
691; CHECK-NEXT:    lea %s0, 128
692; CHECK-NEXT:    lvl %s0
693; CHECK-NEXT:    vmuls.l %v2, %v0, %v1, %vm1
694; CHECK-NEXT:    lea %s16, 256
695; CHECK-NEXT:    lvl %s16
696; CHECK-NEXT:    vor %v0, (0)1, %v2
697; CHECK-NEXT:    b.l.t (, %s10)
698  %5 = tail call fast <256 x double> @llvm.ve.vl.vmulsl.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
699  ret <256 x double> %5
700}
701
702; Function Attrs: nounwind readnone
703declare <256 x double> @llvm.ve.vl.vmulsl.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32)
704
705; Function Attrs: nounwind readnone
706define fastcc <256 x double> @vmulsl_vsvmvl(i64 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
707; CHECK-LABEL: vmulsl_vsvmvl:
708; CHECK:       # %bb.0:
709; CHECK-NEXT:    lea %s1, 128
710; CHECK-NEXT:    lvl %s1
711; CHECK-NEXT:    vmuls.l %v1, %s0, %v0, %vm1
712; CHECK-NEXT:    lea %s16, 256
713; CHECK-NEXT:    lvl %s16
714; CHECK-NEXT:    vor %v0, (0)1, %v1
715; CHECK-NEXT:    b.l.t (, %s10)
716  %5 = tail call fast <256 x double> @llvm.ve.vl.vmulsl.vsvmvl(i64 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
717  ret <256 x double> %5
718}
719
720; Function Attrs: nounwind readnone
721declare <256 x double> @llvm.ve.vl.vmulsl.vsvmvl(i64, <256 x double>, <256 x i1>, <256 x double>, i32)
722
723; Function Attrs: nounwind readnone
724define fastcc <256 x double> @vmulsl_vsvmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
725; CHECK-LABEL: vmulsl_vsvmvl_imm:
726; CHECK:       # %bb.0:
727; CHECK-NEXT:    lea %s0, 128
728; CHECK-NEXT:    lvl %s0
729; CHECK-NEXT:    vmuls.l %v1, 8, %v0, %vm1
730; CHECK-NEXT:    lea %s16, 256
731; CHECK-NEXT:    lvl %s16
732; CHECK-NEXT:    vor %v0, (0)1, %v1
733; CHECK-NEXT:    b.l.t (, %s10)
734  %4 = tail call fast <256 x double> @llvm.ve.vl.vmulsl.vsvmvl(i64 8, <256 x double> %0, <256 x i1> %1, <256 x double> %2, i32 128)
735  ret <256 x double> %4
736}
737
738; Function Attrs: nounwind readnone
739define fastcc <256 x double> @vmulslw_vvvl(<256 x double> %0, <256 x double> %1) {
740; CHECK-LABEL: vmulslw_vvvl:
741; CHECK:       # %bb.0:
742; CHECK-NEXT:    lea %s0, 256
743; CHECK-NEXT:    lvl %s0
744; CHECK-NEXT:    vmuls.l.w %v0, %v0, %v1
745; CHECK-NEXT:    b.l.t (, %s10)
746  %3 = tail call fast <256 x double> @llvm.ve.vl.vmulslw.vvvl(<256 x double> %0, <256 x double> %1, i32 256)
747  ret <256 x double> %3
748}
749
750; Function Attrs: nounwind readnone
751declare <256 x double> @llvm.ve.vl.vmulslw.vvvl(<256 x double>, <256 x double>, i32)
752
753; Function Attrs: nounwind readnone
754define fastcc <256 x double> @vmulslw_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
755; CHECK-LABEL: vmulslw_vvvvl:
756; CHECK:       # %bb.0:
757; CHECK-NEXT:    lea %s0, 128
758; CHECK-NEXT:    lvl %s0
759; CHECK-NEXT:    vmuls.l.w %v2, %v0, %v1
760; CHECK-NEXT:    lea %s16, 256
761; CHECK-NEXT:    lvl %s16
762; CHECK-NEXT:    vor %v0, (0)1, %v2
763; CHECK-NEXT:    b.l.t (, %s10)
764  %4 = tail call fast <256 x double> @llvm.ve.vl.vmulslw.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128)
765  ret <256 x double> %4
766}
767
768; Function Attrs: nounwind readnone
769declare <256 x double> @llvm.ve.vl.vmulslw.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
770
771; Function Attrs: nounwind readnone
772define fastcc <256 x double> @vmulslw_vsvl(i32 signext %0, <256 x double> %1) {
773; CHECK-LABEL: vmulslw_vsvl:
774; CHECK:       # %bb.0:
775; CHECK-NEXT:    and %s0, %s0, (32)0
776; CHECK-NEXT:    lea %s1, 256
777; CHECK-NEXT:    lvl %s1
778; CHECK-NEXT:    vmuls.l.w %v0, %s0, %v0
779; CHECK-NEXT:    b.l.t (, %s10)
780  %3 = tail call fast <256 x double> @llvm.ve.vl.vmulslw.vsvl(i32 %0, <256 x double> %1, i32 256)
781  ret <256 x double> %3
782}
783
784; Function Attrs: nounwind readnone
785declare <256 x double> @llvm.ve.vl.vmulslw.vsvl(i32, <256 x double>, i32)
786
787; Function Attrs: nounwind readnone
788define fastcc <256 x double> @vmulslw_vsvvl(i32 signext %0, <256 x double> %1, <256 x double> %2) {
789; CHECK-LABEL: vmulslw_vsvvl:
790; CHECK:       # %bb.0:
791; CHECK-NEXT:    and %s0, %s0, (32)0
792; CHECK-NEXT:    lea %s1, 128
793; CHECK-NEXT:    lvl %s1
794; CHECK-NEXT:    vmuls.l.w %v1, %s0, %v0
795; CHECK-NEXT:    lea %s16, 256
796; CHECK-NEXT:    lvl %s16
797; CHECK-NEXT:    vor %v0, (0)1, %v1
798; CHECK-NEXT:    b.l.t (, %s10)
799  %4 = tail call fast <256 x double> @llvm.ve.vl.vmulslw.vsvvl(i32 %0, <256 x double> %1, <256 x double> %2, i32 128)
800  ret <256 x double> %4
801}
802
803; Function Attrs: nounwind readnone
804declare <256 x double> @llvm.ve.vl.vmulslw.vsvvl(i32, <256 x double>, <256 x double>, i32)
805
806; Function Attrs: nounwind readnone
807define fastcc <256 x double> @vmulslw_vsvl_imm(<256 x double> %0) {
808; CHECK-LABEL: vmulslw_vsvl_imm:
809; CHECK:       # %bb.0:
810; CHECK-NEXT:    lea %s0, 256
811; CHECK-NEXT:    lvl %s0
812; CHECK-NEXT:    vmuls.l.w %v0, 8, %v0
813; CHECK-NEXT:    b.l.t (, %s10)
814  %2 = tail call fast <256 x double> @llvm.ve.vl.vmulslw.vsvl(i32 8, <256 x double> %0, i32 256)
815  ret <256 x double> %2
816}
817
818; Function Attrs: nounwind readnone
819define fastcc <256 x double> @vmulslw_vsvvl_imm(<256 x double> %0, <256 x double> %1) {
820; CHECK-LABEL: vmulslw_vsvvl_imm:
821; CHECK:       # %bb.0:
822; CHECK-NEXT:    lea %s0, 128
823; CHECK-NEXT:    lvl %s0
824; CHECK-NEXT:    vmuls.l.w %v1, 8, %v0
825; CHECK-NEXT:    lea %s16, 256
826; CHECK-NEXT:    lvl %s16
827; CHECK-NEXT:    vor %v0, (0)1, %v1
828; CHECK-NEXT:    b.l.t (, %s10)
829  %3 = tail call fast <256 x double> @llvm.ve.vl.vmulslw.vsvvl(i32 8, <256 x double> %0, <256 x double> %1, i32 128)
830  ret <256 x double> %3
831}
832