1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
3; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
4
5declare <vscale x 1 x i16> @llvm.abs.nxv1i16(<vscale x 1 x i16>, i1)
6
7define <vscale x 1 x i16> @vabs_nxv1i16(<vscale x 1 x i16> %v) {
8; CHECK-LABEL: vabs_nxv1i16:
9; CHECK:       # %bb.0:
10; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
11; CHECK-NEXT:    vrsub.vi v25, v8, 0
12; CHECK-NEXT:    vmax.vv v8, v8, v25
13; CHECK-NEXT:    ret
14  %r = call <vscale x 1 x i16> @llvm.abs.nxv1i16(<vscale x 1 x i16> %v, i1 false)
15  ret <vscale x 1 x i16> %r
16}
17
18declare <vscale x 2 x i16> @llvm.abs.nxv2i16(<vscale x 2 x i16>, i1)
19
20define <vscale x 2 x i16> @vabs_nxv2i16(<vscale x 2 x i16> %v) {
21; CHECK-LABEL: vabs_nxv2i16:
22; CHECK:       # %bb.0:
23; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
24; CHECK-NEXT:    vrsub.vi v25, v8, 0
25; CHECK-NEXT:    vmax.vv v8, v8, v25
26; CHECK-NEXT:    ret
27  %r = call <vscale x 2 x i16> @llvm.abs.nxv2i16(<vscale x 2 x i16> %v, i1 false)
28  ret <vscale x 2 x i16> %r
29}
30
31declare <vscale x 4 x i16> @llvm.abs.nxv4i16(<vscale x 4 x i16>, i1)
32
33define <vscale x 4 x i16> @vabs_nxv4i16(<vscale x 4 x i16> %v) {
34; CHECK-LABEL: vabs_nxv4i16:
35; CHECK:       # %bb.0:
36; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
37; CHECK-NEXT:    vrsub.vi v25, v8, 0
38; CHECK-NEXT:    vmax.vv v8, v8, v25
39; CHECK-NEXT:    ret
40  %r = call <vscale x 4 x i16> @llvm.abs.nxv4i16(<vscale x 4 x i16> %v, i1 false)
41  ret <vscale x 4 x i16> %r
42}
43
44declare <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16>, i1)
45
46define <vscale x 8 x i16> @vabs_nxv8i16(<vscale x 8 x i16> %v) {
47; CHECK-LABEL: vabs_nxv8i16:
48; CHECK:       # %bb.0:
49; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
50; CHECK-NEXT:    vrsub.vi v26, v8, 0
51; CHECK-NEXT:    vmax.vv v8, v8, v26
52; CHECK-NEXT:    ret
53  %r = call <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16> %v, i1 false)
54  ret <vscale x 8 x i16> %r
55}
56
57declare <vscale x 16 x i16> @llvm.abs.nxv16i16(<vscale x 16 x i16>, i1)
58
59define <vscale x 16 x i16> @vabs_nxv16i16(<vscale x 16 x i16> %v) {
60; CHECK-LABEL: vabs_nxv16i16:
61; CHECK:       # %bb.0:
62; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
63; CHECK-NEXT:    vrsub.vi v28, v8, 0
64; CHECK-NEXT:    vmax.vv v8, v8, v28
65; CHECK-NEXT:    ret
66  %r = call <vscale x 16 x i16> @llvm.abs.nxv16i16(<vscale x 16 x i16> %v, i1 false)
67  ret <vscale x 16 x i16> %r
68}
69
70declare <vscale x 32 x i16> @llvm.abs.nxv32i16(<vscale x 32 x i16>, i1)
71
72define <vscale x 32 x i16> @vabs_nxv32i16(<vscale x 32 x i16> %v) {
73; CHECK-LABEL: vabs_nxv32i16:
74; CHECK:       # %bb.0:
75; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, mu
76; CHECK-NEXT:    vrsub.vi v16, v8, 0
77; CHECK-NEXT:    vmax.vv v8, v8, v16
78; CHECK-NEXT:    ret
79  %r = call <vscale x 32 x i16> @llvm.abs.nxv32i16(<vscale x 32 x i16> %v, i1 false)
80  ret <vscale x 32 x i16> %r
81}
82
83declare <vscale x 1 x i32> @llvm.abs.nxv1i32(<vscale x 1 x i32>, i1)
84
85define <vscale x 1 x i32> @vabs_nxv1i32(<vscale x 1 x i32> %v) {
86; CHECK-LABEL: vabs_nxv1i32:
87; CHECK:       # %bb.0:
88; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
89; CHECK-NEXT:    vrsub.vi v25, v8, 0
90; CHECK-NEXT:    vmax.vv v8, v8, v25
91; CHECK-NEXT:    ret
92  %r = call <vscale x 1 x i32> @llvm.abs.nxv1i32(<vscale x 1 x i32> %v, i1 false)
93  ret <vscale x 1 x i32> %r
94}
95
96declare <vscale x 2 x i32> @llvm.abs.nxv2i32(<vscale x 2 x i32>, i1)
97
98define <vscale x 2 x i32> @vabs_nxv2i32(<vscale x 2 x i32> %v) {
99; CHECK-LABEL: vabs_nxv2i32:
100; CHECK:       # %bb.0:
101; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
102; CHECK-NEXT:    vrsub.vi v25, v8, 0
103; CHECK-NEXT:    vmax.vv v8, v8, v25
104; CHECK-NEXT:    ret
105  %r = call <vscale x 2 x i32> @llvm.abs.nxv2i32(<vscale x 2 x i32> %v, i1 false)
106  ret <vscale x 2 x i32> %r
107}
108
109declare <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32>, i1)
110
111define <vscale x 4 x i32> @vabs_nxv4i32(<vscale x 4 x i32> %v) {
112; CHECK-LABEL: vabs_nxv4i32:
113; CHECK:       # %bb.0:
114; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
115; CHECK-NEXT:    vrsub.vi v26, v8, 0
116; CHECK-NEXT:    vmax.vv v8, v8, v26
117; CHECK-NEXT:    ret
118  %r = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %v, i1 false)
119  ret <vscale x 4 x i32> %r
120}
121
122declare <vscale x 8 x i32> @llvm.abs.nxv8i32(<vscale x 8 x i32>, i1)
123
124define <vscale x 8 x i32> @vabs_nxv8i32(<vscale x 8 x i32> %v) {
125; CHECK-LABEL: vabs_nxv8i32:
126; CHECK:       # %bb.0:
127; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
128; CHECK-NEXT:    vrsub.vi v28, v8, 0
129; CHECK-NEXT:    vmax.vv v8, v8, v28
130; CHECK-NEXT:    ret
131  %r = call <vscale x 8 x i32> @llvm.abs.nxv8i32(<vscale x 8 x i32> %v, i1 false)
132  ret <vscale x 8 x i32> %r
133}
134
135declare <vscale x 16 x i32> @llvm.abs.nxv16i32(<vscale x 16 x i32>, i1)
136
137define <vscale x 16 x i32> @vabs_nxv16i32(<vscale x 16 x i32> %v) {
138; CHECK-LABEL: vabs_nxv16i32:
139; CHECK:       # %bb.0:
140; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, mu
141; CHECK-NEXT:    vrsub.vi v16, v8, 0
142; CHECK-NEXT:    vmax.vv v8, v8, v16
143; CHECK-NEXT:    ret
144  %r = call <vscale x 16 x i32> @llvm.abs.nxv16i32(<vscale x 16 x i32> %v, i1 false)
145  ret <vscale x 16 x i32> %r
146}
147
148declare <vscale x 1 x i64> @llvm.abs.nxv1i64(<vscale x 1 x i64>, i1)
149
150define <vscale x 1 x i64> @vabs_nxv1i64(<vscale x 1 x i64> %v) {
151; CHECK-LABEL: vabs_nxv1i64:
152; CHECK:       # %bb.0:
153; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
154; CHECK-NEXT:    vrsub.vi v25, v8, 0
155; CHECK-NEXT:    vmax.vv v8, v8, v25
156; CHECK-NEXT:    ret
157  %r = call <vscale x 1 x i64> @llvm.abs.nxv1i64(<vscale x 1 x i64> %v, i1 false)
158  ret <vscale x 1 x i64> %r
159}
160
161declare <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64>, i1)
162
163define <vscale x 2 x i64> @vabs_nxv2i64(<vscale x 2 x i64> %v) {
164; CHECK-LABEL: vabs_nxv2i64:
165; CHECK:       # %bb.0:
166; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
167; CHECK-NEXT:    vrsub.vi v26, v8, 0
168; CHECK-NEXT:    vmax.vv v8, v8, v26
169; CHECK-NEXT:    ret
170  %r = call <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64> %v, i1 false)
171  ret <vscale x 2 x i64> %r
172}
173
174declare <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64>, i1)
175
176define <vscale x 4 x i64> @vabs_nxv4i64(<vscale x 4 x i64> %v) {
177; CHECK-LABEL: vabs_nxv4i64:
178; CHECK:       # %bb.0:
179; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
180; CHECK-NEXT:    vrsub.vi v28, v8, 0
181; CHECK-NEXT:    vmax.vv v8, v8, v28
182; CHECK-NEXT:    ret
183  %r = call <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64> %v, i1 false)
184  ret <vscale x 4 x i64> %r
185}
186
187declare <vscale x 8 x i64> @llvm.abs.nxv8i64(<vscale x 8 x i64>, i1)
188
189define <vscale x 8 x i64> @vabs_nxv8i64(<vscale x 8 x i64> %v) {
190; CHECK-LABEL: vabs_nxv8i64:
191; CHECK:       # %bb.0:
192; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
193; CHECK-NEXT:    vrsub.vi v16, v8, 0
194; CHECK-NEXT:    vmax.vv v8, v8, v16
195; CHECK-NEXT:    ret
196  %r = call <vscale x 8 x i64> @llvm.abs.nxv8i64(<vscale x 8 x i64> %v, i1 false)
197  ret <vscale x 8 x i64> %r
198}
199