1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
4 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
5 
6 #include <riscv_vector.h>
7 
8 //
9 // CHECK-RV64-LABEL: @test_vfneg_v_f32mf2(
10 // CHECK-RV64-NEXT:  entry:
11 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
12 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
13 //
test_vfneg_v_f32mf2(vfloat32mf2_t op1,size_t vl)14 vfloat32mf2_t test_vfneg_v_f32mf2 (vfloat32mf2_t op1, size_t vl) {
15   return vfneg_v_f32mf2(op1, vl);
16 }
17 
18 //
19 // CHECK-RV64-LABEL: @test_vfneg_v_f32m1(
20 // CHECK-RV64-NEXT:  entry:
21 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP1]], i64 [[VL:%.*]])
22 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
23 //
test_vfneg_v_f32m1(vfloat32m1_t op1,size_t vl)24 vfloat32m1_t test_vfneg_v_f32m1 (vfloat32m1_t op1, size_t vl) {
25   return vfneg_v_f32m1(op1, vl);
26 }
27 
28 //
29 // CHECK-RV64-LABEL: @test_vfneg_v_f32m2(
30 // CHECK-RV64-NEXT:  entry:
31 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP1]], i64 [[VL:%.*]])
32 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
33 //
test_vfneg_v_f32m2(vfloat32m2_t op1,size_t vl)34 vfloat32m2_t test_vfneg_v_f32m2 (vfloat32m2_t op1, size_t vl) {
35   return vfneg_v_f32m2(op1, vl);
36 }
37 
38 //
39 // CHECK-RV64-LABEL: @test_vfneg_v_f32m4(
40 // CHECK-RV64-NEXT:  entry:
41 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP1]], i64 [[VL:%.*]])
42 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
43 //
test_vfneg_v_f32m4(vfloat32m4_t op1,size_t vl)44 vfloat32m4_t test_vfneg_v_f32m4 (vfloat32m4_t op1, size_t vl) {
45   return vfneg_v_f32m4(op1, vl);
46 }
47 
48 //
49 // CHECK-RV64-LABEL: @test_vfneg_v_f32m8(
50 // CHECK-RV64-NEXT:  entry:
51 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP1]], i64 [[VL:%.*]])
52 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
53 //
test_vfneg_v_f32m8(vfloat32m8_t op1,size_t vl)54 vfloat32m8_t test_vfneg_v_f32m8 (vfloat32m8_t op1, size_t vl) {
55   return vfneg_v_f32m8(op1, vl);
56 }
57 
58 //
59 // CHECK-RV64-LABEL: @test_vfneg_v_f64m1(
60 // CHECK-RV64-NEXT:  entry:
61 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP1]], i64 [[VL:%.*]])
62 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
63 //
test_vfneg_v_f64m1(vfloat64m1_t op1,size_t vl)64 vfloat64m1_t test_vfneg_v_f64m1 (vfloat64m1_t op1, size_t vl) {
65   return vfneg_v_f64m1(op1, vl);
66 }
67 
68 //
69 // CHECK-RV64-LABEL: @test_vfneg_v_f64m2(
70 // CHECK-RV64-NEXT:  entry:
71 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP1]], i64 [[VL:%.*]])
72 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
73 //
test_vfneg_v_f64m2(vfloat64m2_t op1,size_t vl)74 vfloat64m2_t test_vfneg_v_f64m2 (vfloat64m2_t op1, size_t vl) {
75   return vfneg_v_f64m2(op1, vl);
76 }
77 
78 //
79 // CHECK-RV64-LABEL: @test_vfneg_v_f64m4(
80 // CHECK-RV64-NEXT:  entry:
81 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP1]], i64 [[VL:%.*]])
82 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
83 //
test_vfneg_v_f64m4(vfloat64m4_t op1,size_t vl)84 vfloat64m4_t test_vfneg_v_f64m4 (vfloat64m4_t op1, size_t vl) {
85   return vfneg_v_f64m4(op1, vl);
86 }
87 
88 //
89 // CHECK-RV64-LABEL: @test_vfneg_v_f64m8(
90 // CHECK-RV64-NEXT:  entry:
91 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP1]], i64 [[VL:%.*]])
92 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
93 //
test_vfneg_v_f64m8(vfloat64m8_t op1,size_t vl)94 vfloat64m8_t test_vfneg_v_f64m8 (vfloat64m8_t op1, size_t vl) {
95   return vfneg_v_f64m8(op1, vl);
96 }
97 
98 //
99 // CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_m(
100 // CHECK-RV64-NEXT:  entry:
101 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
102 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
103 //
test_vfneg_v_f32mf2_m(vbool64_t mask,vfloat32mf2_t maskedoff,vfloat32mf2_t op1,size_t vl)104 vfloat32mf2_t test_vfneg_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
105   return vfneg_v_f32mf2_m(mask, maskedoff, op1, vl);
106 }
107 
108 //
109 // CHECK-RV64-LABEL: @test_vfneg_v_f32m1_m(
110 // CHECK-RV64-NEXT:  entry:
111 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
112 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
113 //
test_vfneg_v_f32m1_m(vbool32_t mask,vfloat32m1_t maskedoff,vfloat32m1_t op1,size_t vl)114 vfloat32m1_t test_vfneg_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
115   return vfneg_v_f32m1_m(mask, maskedoff, op1, vl);
116 }
117 
118 //
119 // CHECK-RV64-LABEL: @test_vfneg_v_f32m2_m(
120 // CHECK-RV64-NEXT:  entry:
121 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
122 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
123 //
test_vfneg_v_f32m2_m(vbool16_t mask,vfloat32m2_t maskedoff,vfloat32m2_t op1,size_t vl)124 vfloat32m2_t test_vfneg_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
125   return vfneg_v_f32m2_m(mask, maskedoff, op1, vl);
126 }
127 
128 //
129 // CHECK-RV64-LABEL: @test_vfneg_v_f32m4_m(
130 // CHECK-RV64-NEXT:  entry:
131 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
132 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
133 //
test_vfneg_v_f32m4_m(vbool8_t mask,vfloat32m4_t maskedoff,vfloat32m4_t op1,size_t vl)134 vfloat32m4_t test_vfneg_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
135   return vfneg_v_f32m4_m(mask, maskedoff, op1, vl);
136 }
137 
138 //
139 // CHECK-RV64-LABEL: @test_vfneg_v_f32m8_m(
140 // CHECK-RV64-NEXT:  entry:
141 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
142 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
143 //
test_vfneg_v_f32m8_m(vbool4_t mask,vfloat32m8_t maskedoff,vfloat32m8_t op1,size_t vl)144 vfloat32m8_t test_vfneg_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
145   return vfneg_v_f32m8_m(mask, maskedoff, op1, vl);
146 }
147 
148 //
149 // CHECK-RV64-LABEL: @test_vfneg_v_f64m1_m(
150 // CHECK-RV64-NEXT:  entry:
151 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
152 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
153 //
test_vfneg_v_f64m1_m(vbool64_t mask,vfloat64m1_t maskedoff,vfloat64m1_t op1,size_t vl)154 vfloat64m1_t test_vfneg_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
155   return vfneg_v_f64m1_m(mask, maskedoff, op1, vl);
156 }
157 
158 //
159 // CHECK-RV64-LABEL: @test_vfneg_v_f64m2_m(
160 // CHECK-RV64-NEXT:  entry:
161 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
162 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
163 //
test_vfneg_v_f64m2_m(vbool32_t mask,vfloat64m2_t maskedoff,vfloat64m2_t op1,size_t vl)164 vfloat64m2_t test_vfneg_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
165   return vfneg_v_f64m2_m(mask, maskedoff, op1, vl);
166 }
167 
168 //
169 // CHECK-RV64-LABEL: @test_vfneg_v_f64m4_m(
170 // CHECK-RV64-NEXT:  entry:
171 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
172 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
173 //
test_vfneg_v_f64m4_m(vbool16_t mask,vfloat64m4_t maskedoff,vfloat64m4_t op1,size_t vl)174 vfloat64m4_t test_vfneg_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
175   return vfneg_v_f64m4_m(mask, maskedoff, op1, vl);
176 }
177 
178 //
179 // CHECK-RV64-LABEL: @test_vfneg_v_f64m8_m(
180 // CHECK-RV64-NEXT:  entry:
181 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
182 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
183 //
test_vfneg_v_f64m8_m(vbool8_t mask,vfloat64m8_t maskedoff,vfloat64m8_t op1,size_t vl)184 vfloat64m8_t test_vfneg_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
185   return vfneg_v_f64m8_m(mask, maskedoff, op1, vl);
186 }
187 
188