1; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI,SICIVI,FUNC %s
2; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,VI,SICIVI,FUNC %s
3; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX9,FUNC %s
4
5
6; FUNC-LABEL: {{^}}s_usubo_i64_zext:
7; GCN: s_sub_u32
8; GCN: s_subb_u32
9; GCN: v_cmp_gt_u64_e32 vcc
10
11; EG: SUBB_UINT
12; EG: ADDC_UINT
13define amdgpu_kernel void @s_usubo_i64_zext(i64 addrspace(1)* %out, i64 %a, i64 %b) #0 {
14  %usub = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %a, i64 %b) #0
15  %val = extractvalue { i64, i1 } %usub, 0
16  %carry = extractvalue { i64, i1 } %usub, 1
17  %ext = zext i1 %carry to i64
18  %add2 = add i64 %val, %ext
19  store i64 %add2, i64 addrspace(1)* %out, align 8
20  ret void
21}
22
23; FIXME: Could do scalar
24
25; FUNC-LABEL: {{^}}s_usubo_i32:
26; SI: v_sub_i32_e32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}}
27; VI: v_sub_u32_e32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}}
28; GFX9: v_sub_co_u32_e32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}}
29
30; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc
31
32; EG-DAG: SUBB_UINT
33; EG-DAG: SUB_INT
34define amdgpu_kernel void @s_usubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 %a, i32 %b) #0 {
35  %usub = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %a, i32 %b)
36  %val = extractvalue { i32, i1 } %usub, 0
37  %carry = extractvalue { i32, i1 } %usub, 1
38  store i32 %val, i32 addrspace(1)* %out, align 4
39  store i1 %carry, i1 addrspace(1)* %carryout
40  ret void
41}
42
43; FUNC-LABEL: {{^}}v_usubo_i32:
44; SI: v_sub_i32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
45; VI: v_sub_u32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
46; GFX9: v_sub_co_u32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
47
48; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc
49
50; EG-DAG: SUBB_UINT
51; EG-DAG: SUB_INT
52define amdgpu_kernel void @v_usubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %a.ptr, i32 addrspace(1)* %b.ptr) #0 {
53  %tid = call i32 @llvm.amdgcn.workitem.id.x()
54  %tid.ext = sext i32 %tid to i64
55  %a.gep = getelementptr inbounds i32, i32 addrspace(1)* %a.ptr
56  %b.gep = getelementptr inbounds i32, i32 addrspace(1)* %b.ptr
57  %a = load i32, i32 addrspace(1)* %a.gep, align 4
58  %b = load i32, i32 addrspace(1)* %b.gep, align 4
59  %usub = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %a, i32 %b)
60  %val = extractvalue { i32, i1 } %usub, 0
61  %carry = extractvalue { i32, i1 } %usub, 1
62  store i32 %val, i32 addrspace(1)* %out, align 4
63  store i1 %carry, i1 addrspace(1)* %carryout
64  ret void
65}
66
67; FUNC-LABEL: {{^}}v_usubo_i32_novcc:
68; SI: v_sub_i32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
69; VI: v_sub_u32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
70; GFX9: v_sub_co_u32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
71
72; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc
73
74; EG-DAG: SUBB_UINT
75; EG-DAG: SUB_INT
76define amdgpu_kernel void @v_usubo_i32_novcc(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %a.ptr, i32 addrspace(1)* %b.ptr) #0 {
77  %tid = call i32 @llvm.amdgcn.workitem.id.x()
78  %tid.ext = sext i32 %tid to i64
79  %a.gep = getelementptr inbounds i32, i32 addrspace(1)* %a.ptr
80  %b.gep = getelementptr inbounds i32, i32 addrspace(1)* %b.ptr
81  %a = load i32, i32 addrspace(1)* %a.gep, align 4
82  %b = load i32, i32 addrspace(1)* %b.gep, align 4
83  %uadd = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %a, i32 %b)
84  %val = extractvalue { i32, i1 } %uadd, 0
85  %carry = extractvalue { i32, i1 } %uadd, 1
86  store volatile i32 %val, i32 addrspace(1)* %out, align 4
87  call void asm sideeffect "", "~{vcc}"() #0
88  store volatile i1 %carry, i1 addrspace(1)* %carryout
89  ret void
90}
91
92; FUNC-LABEL: {{^}}s_usubo_i64:
93; GCN: s_sub_u32
94; GCN: s_subb_u32
95
96; EG-DAG: SUBB_UINT
97; EG-DAG: SUB_INT
98; EG-DAG: SUB_INT
99; EG: SUB_INT
100define amdgpu_kernel void @s_usubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 %a, i64 %b) #0 {
101  %usub = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
102  %val = extractvalue { i64, i1 } %usub, 0
103  %carry = extractvalue { i64, i1 } %usub, 1
104  store i64 %val, i64 addrspace(1)* %out, align 8
105  store i1 %carry, i1 addrspace(1)* %carryout
106  ret void
107}
108
109; FUNC-LABEL: {{^}}v_usubo_i64:
110; SI: v_sub_i32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
111; SI: v_subb_u32
112; VI: v_sub_u32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
113; VI: v_subb_u32
114
115; GFX9: v_sub_co_u32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
116; GFX9: v_subb_co_u32
117
118; EG-DAG: SUBB_UINT
119; EG-DAG: SUB_INT
120; EG-DAG: SUB_INT
121; EG: SUB_INT
122define amdgpu_kernel void @v_usubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 addrspace(1)* %a.ptr, i64 addrspace(1)* %b.ptr) #0 {
123  %tid = call i32 @llvm.amdgcn.workitem.id.x()
124  %tid.ext = sext i32 %tid to i64
125  %a.gep = getelementptr inbounds i64, i64 addrspace(1)* %a.ptr
126  %b.gep = getelementptr inbounds i64, i64 addrspace(1)* %b.ptr
127  %a = load i64, i64 addrspace(1)* %a.gep
128  %b = load i64, i64 addrspace(1)* %b.gep
129  %usub = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
130  %val = extractvalue { i64, i1 } %usub, 0
131  %carry = extractvalue { i64, i1 } %usub, 1
132  store i64 %val, i64 addrspace(1)* %out, align 8
133  store i1 %carry, i1 addrspace(1)* %carryout
134  ret void
135}
136
137; FUNC-LABEL: {{^}}v_usubo_i16:
138; SI: v_subrev_i32_e32
139; SI: v_and_b32
140; SI: v_cmp_ne_u32_e32
141
142; VI: v_sub_u16_e32
143; VI: v_cmp_gt_u16_e32
144
145; GFX9: v_sub_u16_e32
146; GFX9: v_cmp_gt_u16_e32
147define amdgpu_kernel void @v_usubo_i16(i16 addrspace(1)* %out, i1 addrspace(1)* %carryout, i16 addrspace(1)* %a.ptr, i16 addrspace(1)* %b.ptr) #0 {
148  %tid = call i32 @llvm.amdgcn.workitem.id.x()
149  %tid.ext = sext i32 %tid to i64
150  %a.gep = getelementptr inbounds i16, i16 addrspace(1)* %a.ptr
151  %b.gep = getelementptr inbounds i16, i16 addrspace(1)* %b.ptr
152  %a = load i16, i16 addrspace(1)* %a.gep
153  %b = load i16, i16 addrspace(1)* %b.gep
154  %usub = call { i16, i1 } @llvm.usub.with.overflow.i16(i16 %a, i16 %b)
155  %val = extractvalue { i16, i1 } %usub, 0
156  %carry = extractvalue { i16, i1 } %usub, 1
157  store i16 %val, i16 addrspace(1)* %out
158  store i1 %carry, i1 addrspace(1)* %carryout
159  ret void
160}
161
162; FUNC-LABEL: {{^}}v_usubo_v2i32:
163; SICIVI: v_sub_{{[iu]}}32
164; SICIVI: v_cndmask_b32
165; SICIVI: v_sub_{{[iu]}}32
166; SICIVI: v_cndmask_b32
167define amdgpu_kernel void @v_usubo_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %carryout, <2 x i32> addrspace(1)* %aptr, <2 x i32> addrspace(1)* %bptr) nounwind {
168  %a = load <2 x i32>, <2 x i32> addrspace(1)* %aptr, align 4
169  %b = load <2 x i32>, <2 x i32> addrspace(1)* %bptr, align 4
170  %sadd = call { <2 x i32>, <2 x i1> } @llvm.usub.with.overflow.v2i32(<2 x i32> %a, <2 x i32> %b) nounwind
171  %val = extractvalue { <2 x i32>, <2 x i1> } %sadd, 0
172  %carry = extractvalue { <2 x i32>, <2 x i1> } %sadd, 1
173  store <2 x i32> %val, <2 x i32> addrspace(1)* %out, align 4
174  %carry.ext = zext <2 x i1> %carry to <2 x i32>
175  store <2 x i32> %carry.ext, <2 x i32> addrspace(1)* %carryout
176  ret void
177}
178
179; FUNC-LABEL: {{^}}s_usubo_clamp_bit:
180; GCN: v_sub_{{i|u|co_u}}32_e32
181; GCN: s_endpgm
182define amdgpu_kernel void @s_usubo_clamp_bit(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 %a, i32 %b) #0 {
183entry:
184  %usub = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %a, i32 %b)
185  %val = extractvalue { i32, i1 } %usub, 0
186  %carry = extractvalue { i32, i1 } %usub, 1
187  %c2 = icmp eq i1 %carry, false
188  %cc = icmp eq i32 %a, %b
189  br i1 %cc, label %exit, label %if
190
191if:
192  br label %exit
193
194exit:
195  %cout = phi i1 [false, %entry], [%c2, %if]
196  store i32 %val, i32 addrspace(1)* %out, align 4
197  store i1 %cout, i1 addrspace(1)* %carryout
198  ret void
199}
200
201
202; FUNC-LABEL: {{^}}v_usubo_clamp_bit:
203; GCN: v_sub_{{i|u|co_u}}32_e64
204; GCN: s_endpgm
205define amdgpu_kernel void @v_usubo_clamp_bit(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %a.ptr, i32 addrspace(1)* %b.ptr) #0 {
206entry:
207  %tid = call i32 @llvm.amdgcn.workitem.id.x()
208  %tid.ext = sext i32 %tid to i64
209  %a.gep = getelementptr inbounds i32, i32 addrspace(1)* %a.ptr
210  %b.gep = getelementptr inbounds i32, i32 addrspace(1)* %b.ptr
211  %a = load i32, i32 addrspace(1)* %a.gep, align 4
212  %b = load i32, i32 addrspace(1)* %b.gep, align 4
213  %usub = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %a, i32 %b)
214  %val = extractvalue { i32, i1 } %usub, 0
215  %carry = extractvalue { i32, i1 } %usub, 1
216  %c2 = icmp eq i1 %carry, false
217  %cc = icmp eq i32 %a, %b
218  br i1 %cc, label %exit, label %if
219
220if:
221  br label %exit
222
223exit:
224  %cout = phi i1 [false, %entry], [%c2, %if]
225  store i32 %val, i32 addrspace(1)* %out, align 4
226  store i1 %cout, i1 addrspace(1)* %carryout
227  ret void
228}
229
230declare i32 @llvm.amdgcn.workitem.id.x() #1
231declare { i16, i1 } @llvm.usub.with.overflow.i16(i16, i16) #1
232declare { i32, i1 } @llvm.usub.with.overflow.i32(i32, i32) #1
233declare { i64, i1 } @llvm.usub.with.overflow.i64(i64, i64) #1
234declare { <2 x i32>, <2 x i1> } @llvm.usub.with.overflow.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
235
236attributes #0 = { nounwind }
237attributes #1 = { nounwind readnone }
238