1; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -verify-machineinstrs -o - %s | FileCheck %s
3
4; Natural mapping
5define amdgpu_ps float @raw_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(i32 %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
6  ; CHECK-LABEL: name: raw_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
7  ; CHECK: bb.1 (%ir-block.0):
8  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
9  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
10  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
11  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
12  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
13  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
14  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
15  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
16  ; CHECK:   [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
17  ; CHECK:   [[BUFFER_ATOMIC_ADD_OFFEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_OFFEN_RTN [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 4)
18  ; CHECK:   $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_OFFEN_RTN]]
19  ; CHECK:   SI_RETURN_TO_EPILOG implicit $vgpr0
20  %ret = call i32 @llvm.amdgcn.raw.buffer.atomic.add.i32(i32 %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
21  %cast = bitcast i32 %ret to float
22  ret float %cast
23}
24
25define amdgpu_ps float @raw_buffer_atomic_add_i32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(i32 %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
26  ; CHECK-LABEL: name: raw_buffer_atomic_add_i32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
27  ; CHECK: bb.1 (%ir-block.0):
28  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
29  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
30  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
31  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
32  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
33  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
34  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
35  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
36  ; CHECK:   [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
37  ; CHECK:   [[BUFFER_ATOMIC_ADD_OFFEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_OFFEN_RTN [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 4)
38  ; CHECK:   $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_OFFEN_RTN]]
39  ; CHECK:   SI_RETURN_TO_EPILOG implicit $vgpr0
40  %ret = call i32 @llvm.amdgcn.raw.buffer.atomic.add.i32(i32 %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
41  %cast = bitcast i32 %ret to float
42  ret float %cast
43}
44
45define amdgpu_ps <2 x float> @raw_buffer_atomic_add_i64__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(i64 %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
46  ; CHECK-LABEL: name: raw_buffer_atomic_add_i64__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
47  ; CHECK: bb.1 (%ir-block.0):
48  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
49  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
50  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
51  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
52  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
53  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
54  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
55  ; CHECK:   [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
56  ; CHECK:   [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
57  ; CHECK:   [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
58  ; CHECK:   [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
59  ; CHECK:   [[BUFFER_ATOMIC_ADD_X2_OFFEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_ADD_X2_OFFEN_RTN [[REG_SEQUENCE]], [[COPY6]], [[REG_SEQUENCE1]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 4)
60  ; CHECK:   [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_OFFEN_RTN]].sub0
61  ; CHECK:   [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_OFFEN_RTN]].sub1
62  ; CHECK:   $vgpr0 = COPY [[COPY8]]
63  ; CHECK:   $vgpr1 = COPY [[COPY9]]
64  ; CHECK:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
65  %ret = call i64 @llvm.amdgcn.raw.buffer.atomic.add.i64(i64 %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
66  %cast = bitcast i64 %ret to <2 x float>
67  ret <2 x float> %cast
68}
69
70define amdgpu_ps void @raw_buffer_atomic_add_i64_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(i64 %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
71  ; CHECK-LABEL: name: raw_buffer_atomic_add_i64_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
72  ; CHECK: bb.1 (%ir-block.0):
73  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
74  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
75  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
76  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
77  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
78  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
79  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
80  ; CHECK:   [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
81  ; CHECK:   [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
82  ; CHECK:   [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
83  ; CHECK:   [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
84  ; CHECK:   [[BUFFER_ATOMIC_ADD_X2_OFFEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_ADD_X2_OFFEN_RTN [[REG_SEQUENCE]], [[COPY6]], [[REG_SEQUENCE1]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 4)
85  ; CHECK:   S_ENDPGM 0
86  %ret = call i64 @llvm.amdgcn.raw.buffer.atomic.add.i64(i64 %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
87  ret void
88}
89
90; All operands need regbank legalization
91define amdgpu_ps float @raw_buffer_atomic_add_i32__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset(i32 inreg %val, <4 x i32> %rsrc, i32 inreg %voffset, i32 %soffset) {
92  ; CHECK-LABEL: name: raw_buffer_atomic_add_i32__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset
93  ; CHECK: bb.1 (%ir-block.0):
94  ; CHECK:   successors: %bb.2(0x80000000)
95  ; CHECK:   liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
96  ; CHECK:   [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
97  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
98  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
99  ; CHECK:   [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2
100  ; CHECK:   [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3
101  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
102  ; CHECK:   [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
103  ; CHECK:   [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4
104  ; CHECK:   [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
105  ; CHECK:   [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
106  ; CHECK:   [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
107  ; CHECK:   [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
108  ; CHECK:   [[S_MOV_B64_term:%[0-9]+]]:sreg_64_xexec = S_MOV_B64_term $exec
109  ; CHECK: bb.2:
110  ; CHECK:   successors: %bb.3(0x40000000), %bb.2(0x40000000)
111  ; CHECK:   [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]].sub0, implicit $exec
112  ; CHECK:   [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]].sub1, implicit $exec
113  ; CHECK:   [[REG_SEQUENCE1:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1
114  ; CHECK:   [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE1]], [[COPY9]], implicit $exec
115  ; CHECK:   [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]].sub0, implicit $exec
116  ; CHECK:   [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]].sub1, implicit $exec
117  ; CHECK:   [[REG_SEQUENCE2:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_2]], %subreg.sub0, [[V_READFIRSTLANE_B32_3]], %subreg.sub1
118  ; CHECK:   [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY10]], implicit $exec
119  ; CHECK:   [[S_AND_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc
120  ; CHECK:   [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
121  ; CHECK:   [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
122  ; CHECK:   [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec
123  ; CHECK:   [[S_AND_B64_1:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U32_e64_]], [[S_AND_B64_]], implicit-def $scc
124  ; CHECK:   [[BUFFER_ATOMIC_ADD_OFFEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_OFFEN_RTN [[COPY7]], [[COPY8]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 4)
125  ; CHECK:   [[S_AND_SAVEEXEC_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[S_AND_B64_1]], implicit-def $exec, implicit-def $scc, implicit $exec
126  ; CHECK:   $exec = S_XOR_B64_term $exec, [[S_AND_SAVEEXEC_B64_]], implicit-def $scc
127  ; CHECK:   S_CBRANCH_EXECNZ %bb.2, implicit $exec
128  ; CHECK: bb.3:
129  ; CHECK:   successors: %bb.4(0x80000000)
130  ; CHECK:   $exec = S_MOV_B64_term [[S_MOV_B64_term]]
131  ; CHECK: bb.4:
132  ; CHECK:   $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_OFFEN_RTN]]
133  ; CHECK:   SI_RETURN_TO_EPILOG implicit $vgpr0
134  %ret = call i32 @llvm.amdgcn.raw.buffer.atomic.add.i32(i32 %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
135  %cast = bitcast i32 %ret to float
136  ret float %cast
137}
138
139; All operands need regbank legalization
140define amdgpu_ps void @raw_buffer_atomic_add_i32_noret__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset(i32 inreg %val, <4 x i32> %rsrc, i32 inreg %voffset, i32 %soffset) {
141  ; CHECK-LABEL: name: raw_buffer_atomic_add_i32_noret__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset
142  ; CHECK: bb.1 (%ir-block.0):
143  ; CHECK:   successors: %bb.2(0x80000000)
144  ; CHECK:   liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
145  ; CHECK:   [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
146  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
147  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
148  ; CHECK:   [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2
149  ; CHECK:   [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3
150  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
151  ; CHECK:   [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
152  ; CHECK:   [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4
153  ; CHECK:   [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
154  ; CHECK:   [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
155  ; CHECK:   [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
156  ; CHECK:   [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
157  ; CHECK:   [[S_MOV_B64_term:%[0-9]+]]:sreg_64_xexec = S_MOV_B64_term $exec
158  ; CHECK: bb.2:
159  ; CHECK:   successors: %bb.3(0x40000000), %bb.2(0x40000000)
160  ; CHECK:   [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]].sub0, implicit $exec
161  ; CHECK:   [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]].sub1, implicit $exec
162  ; CHECK:   [[REG_SEQUENCE1:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1
163  ; CHECK:   [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE1]], [[COPY9]], implicit $exec
164  ; CHECK:   [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]].sub0, implicit $exec
165  ; CHECK:   [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]].sub1, implicit $exec
166  ; CHECK:   [[REG_SEQUENCE2:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_2]], %subreg.sub0, [[V_READFIRSTLANE_B32_3]], %subreg.sub1
167  ; CHECK:   [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY10]], implicit $exec
168  ; CHECK:   [[S_AND_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc
169  ; CHECK:   [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
170  ; CHECK:   [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
171  ; CHECK:   [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec
172  ; CHECK:   [[S_AND_B64_1:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U32_e64_]], [[S_AND_B64_]], implicit-def $scc
173  ; CHECK:   [[BUFFER_ATOMIC_ADD_OFFEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_OFFEN_RTN [[COPY7]], [[COPY8]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 4)
174  ; CHECK:   [[S_AND_SAVEEXEC_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[S_AND_B64_1]], implicit-def $exec, implicit-def $scc, implicit $exec
175  ; CHECK:   $exec = S_XOR_B64_term $exec, [[S_AND_SAVEEXEC_B64_]], implicit-def $scc
176  ; CHECK:   S_CBRANCH_EXECNZ %bb.2, implicit $exec
177  ; CHECK: bb.3:
178  ; CHECK:   successors: %bb.4(0x80000000)
179  ; CHECK:   $exec = S_MOV_B64_term [[S_MOV_B64_term]]
180  ; CHECK: bb.4:
181  ; CHECK:   S_ENDPGM 0
182  %ret = call i32 @llvm.amdgcn.raw.buffer.atomic.add.i32(i32 %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
183  ret void
184}
185
186define amdgpu_ps float @raw_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095(i32 %val, <4 x i32> inreg %rsrc, i32 %voffset.base, i32 inreg %soffset) {
187  ; CHECK-LABEL: name: raw_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095
188  ; CHECK: bb.1 (%ir-block.0):
189  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
190  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
191  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
192  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
193  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
194  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
195  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
196  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
197  ; CHECK:   [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
198  ; CHECK:   [[BUFFER_ATOMIC_ADD_OFFEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_OFFEN_RTN [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 4)
199  ; CHECK:   $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_OFFEN_RTN]]
200  ; CHECK:   SI_RETURN_TO_EPILOG implicit $vgpr0
201  %voffset = add i32 %voffset.base, 4095
202  %ret = call i32 @llvm.amdgcn.raw.buffer.atomic.add.i32(i32 %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
203  %cast = bitcast i32 %ret to float
204  ret float %cast
205}
206
207; Natural mapping + slc
208define amdgpu_ps float @raw_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc(i32 %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
209  ; CHECK-LABEL: name: raw_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc
210  ; CHECK: bb.1 (%ir-block.0):
211  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
212  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
213  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
214  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
215  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
216  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
217  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
218  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
219  ; CHECK:   [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
220  ; CHECK:   [[BUFFER_ATOMIC_ADD_OFFEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_OFFEN_RTN [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 0, 3, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 4)
221  ; CHECK:   $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_OFFEN_RTN]]
222  ; CHECK:   SI_RETURN_TO_EPILOG implicit $vgpr0
223  %ret = call i32 @llvm.amdgcn.raw.buffer.atomic.add.i32(i32 %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 2)
224  %cast = bitcast i32 %ret to float
225  ret float %cast
226}
227
228declare i32 @llvm.amdgcn.raw.buffer.atomic.add.i32(i32, <4 x i32>, i32, i32, i32 immarg) #0
229declare i64 @llvm.amdgcn.raw.buffer.atomic.add.i64(i64, <4 x i32>, i32, i32, i32 immarg) #0
230
231attributes #0 = { nounwind }
232