1; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx908 -stop-after=instruction-select -verify-machineinstrs -o - %s | FileCheck %s
3
4; Natural mapping
5define amdgpu_ps void @raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(float %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
6  ; CHECK-LABEL: name: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
7  ; CHECK: bb.1 (%ir-block.0):
8  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
9  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
10  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
11  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
12  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
13  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
14  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
15  ; CHECK:   [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
16  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
17  ; CHECK:   BUFFER_ATOMIC_ADD_F32_OFFEN [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, implicit $exec :: (volatile dereferenceable load store 4 on custom "TargetCustom7", align 1, addrspace 4)
18  ; CHECK:   S_ENDPGM 0
19  %ret = call float @llvm.amdgcn.raw.buffer.atomic.fadd.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
20  ret void
21}
22
23define amdgpu_ps void @raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_plus4095__sgpr_soffset(float %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
24  ; CHECK-LABEL: name: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_plus4095__sgpr_soffset
25  ; CHECK: bb.1 (%ir-block.0):
26  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
27  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
28  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
29  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
30  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
31  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
32  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
33  ; CHECK:   [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
34  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
35  ; CHECK:   BUFFER_ATOMIC_ADD_F32_OFFEN [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4095, 0, implicit $exec :: (volatile dereferenceable load store 4 on custom "TargetCustom7" + 4095, align 1, addrspace 4)
36  ; CHECK:   S_ENDPGM 0
37  %voffset.add = add i32 %voffset, 4095
38  %ret = call float @llvm.amdgcn.raw.buffer.atomic.fadd.f32(float %val, <4 x i32> %rsrc, i32 %voffset.add, i32 %soffset, i32 0)
39  ret void
40}
41
42define amdgpu_ps void @raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_4095__sgpr_soffset(float %val, <4 x i32> inreg %rsrc, i32 inreg %soffset) {
43  ; CHECK-LABEL: name: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_4095__sgpr_soffset
44  ; CHECK: bb.1 (%ir-block.0):
45  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
46  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
47  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
48  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
49  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
50  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
51  ; CHECK:   [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
52  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
53  ; CHECK:   BUFFER_ATOMIC_ADD_F32_OFFSET [[COPY]], [[REG_SEQUENCE]], [[COPY5]], 4095, 0, implicit $exec :: (volatile dereferenceable load store 4 on custom "TargetCustom7" + 4095, align 1, addrspace 4)
54  ; CHECK:   S_ENDPGM 0
55  %ret = call float @llvm.amdgcn.raw.buffer.atomic.fadd.f32(float %val, <4 x i32> %rsrc, i32 4095, i32 %soffset, i32 0)
56  ret void
57}
58
59; Natural mapping, no voffset
60define amdgpu_ps void @raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset(float %val, <4 x i32> inreg %rsrc, i32 inreg %soffset) {
61  ; CHECK-LABEL: name: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset
62  ; CHECK: bb.1 (%ir-block.0):
63  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
64  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
65  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
66  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
67  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
68  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
69  ; CHECK:   [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
70  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
71  ; CHECK:   BUFFER_ATOMIC_ADD_F32_OFFSET [[COPY]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, implicit $exec :: (volatile dereferenceable load store 4 on custom "TargetCustom7", align 1, addrspace 4)
72  ; CHECK:   S_ENDPGM 0
73  %ret = call float @llvm.amdgcn.raw.buffer.atomic.fadd.f32(float %val, <4 x i32> %rsrc, i32 0, i32 %soffset, i32 0)
74  ret void
75}
76
77; All operands need regbank legalization
78define amdgpu_ps void @raw_buffer_atomic_add_f32_noret__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset(float inreg %val, <4 x i32> %rsrc, i32 inreg %voffset, i32 %soffset) {
79  ; CHECK-LABEL: name: raw_buffer_atomic_add_f32_noret__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset
80  ; CHECK: bb.1 (%ir-block.0):
81  ; CHECK:   successors: %bb.2(0x80000000)
82  ; CHECK:   liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
83  ; CHECK:   [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
84  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
85  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
86  ; CHECK:   [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2
87  ; CHECK:   [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3
88  ; CHECK:   [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
89  ; CHECK:   [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4
90  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
91  ; CHECK:   [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
92  ; CHECK:   [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
93  ; CHECK:   [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
94  ; CHECK:   [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
95  ; CHECK:   [[S_MOV_B64_term:%[0-9]+]]:sreg_64_xexec = S_MOV_B64_term $exec
96  ; CHECK: bb.2:
97  ; CHECK:   successors: %bb.3(0x40000000), %bb.2(0x40000000)
98  ; CHECK:   [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]].sub0, implicit $exec
99  ; CHECK:   [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]].sub1, implicit $exec
100  ; CHECK:   [[REG_SEQUENCE1:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1
101  ; CHECK:   [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE1]], [[COPY9]], implicit $exec
102  ; CHECK:   [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]].sub0, implicit $exec
103  ; CHECK:   [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]].sub1, implicit $exec
104  ; CHECK:   [[REG_SEQUENCE2:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_2]], %subreg.sub0, [[V_READFIRSTLANE_B32_3]], %subreg.sub1
105  ; CHECK:   [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY10]], implicit $exec
106  ; CHECK:   [[S_AND_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc
107  ; CHECK:   [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
108  ; CHECK:   [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
109  ; CHECK:   [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec
110  ; CHECK:   [[S_AND_B64_1:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U32_e64_]], [[S_AND_B64_]], implicit-def $scc
111  ; CHECK:   BUFFER_ATOMIC_ADD_F32_OFFEN [[COPY7]], [[COPY8]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store 4 on custom "TargetCustom7", align 1, addrspace 4)
112  ; CHECK:   [[S_AND_SAVEEXEC_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[S_AND_B64_1]], implicit-def $exec, implicit-def $scc, implicit $exec
113  ; CHECK:   $exec = S_XOR_B64_term $exec, [[S_AND_SAVEEXEC_B64_]], implicit-def $scc
114  ; CHECK:   S_CBRANCH_EXECNZ %bb.2, implicit $exec
115  ; CHECK: bb.3:
116  ; CHECK:   successors: %bb.4(0x80000000)
117  ; CHECK:   $exec = S_MOV_B64_term [[S_MOV_B64_term]]
118  ; CHECK: bb.4:
119  ; CHECK:   S_ENDPGM 0
120  %ret = call float @llvm.amdgcn.raw.buffer.atomic.fadd.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
121  ret void
122}
123
124; All operands need regbank legalization, no voffset
125define amdgpu_ps void @raw_buffer_atomic_add_f32_noret__sgpr_val__vgpr_rsrc__0_voffset__vgpr_soffset(float inreg %val, <4 x i32> %rsrc, i32 %soffset) {
126  ; CHECK-LABEL: name: raw_buffer_atomic_add_f32_noret__sgpr_val__vgpr_rsrc__0_voffset__vgpr_soffset
127  ; CHECK: bb.1 (%ir-block.0):
128  ; CHECK:   successors: %bb.2(0x80000000)
129  ; CHECK:   liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
130  ; CHECK:   [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
131  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
132  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
133  ; CHECK:   [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2
134  ; CHECK:   [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3
135  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr4
136  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
137  ; CHECK:   [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
138  ; CHECK:   [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
139  ; CHECK:   [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
140  ; CHECK:   [[S_MOV_B64_term:%[0-9]+]]:sreg_64_xexec = S_MOV_B64_term $exec
141  ; CHECK: bb.2:
142  ; CHECK:   successors: %bb.3(0x40000000), %bb.2(0x40000000)
143  ; CHECK:   [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]].sub0, implicit $exec
144  ; CHECK:   [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]].sub1, implicit $exec
145  ; CHECK:   [[REG_SEQUENCE1:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1
146  ; CHECK:   [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE1]], [[COPY7]], implicit $exec
147  ; CHECK:   [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub0, implicit $exec
148  ; CHECK:   [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub1, implicit $exec
149  ; CHECK:   [[REG_SEQUENCE2:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_2]], %subreg.sub0, [[V_READFIRSTLANE_B32_3]], %subreg.sub1
150  ; CHECK:   [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY8]], implicit $exec
151  ; CHECK:   [[S_AND_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc
152  ; CHECK:   [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
153  ; CHECK:   [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
154  ; CHECK:   [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY5]], implicit $exec
155  ; CHECK:   [[S_AND_B64_1:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U32_e64_]], [[S_AND_B64_]], implicit-def $scc
156  ; CHECK:   BUFFER_ATOMIC_ADD_F32_OFFSET [[COPY6]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store 4 on custom "TargetCustom7", align 1, addrspace 4)
157  ; CHECK:   [[S_AND_SAVEEXEC_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[S_AND_B64_1]], implicit-def $exec, implicit-def $scc, implicit $exec
158  ; CHECK:   $exec = S_XOR_B64_term $exec, [[S_AND_SAVEEXEC_B64_]], implicit-def $scc
159  ; CHECK:   S_CBRANCH_EXECNZ %bb.2, implicit $exec
160  ; CHECK: bb.3:
161  ; CHECK:   successors: %bb.4(0x80000000)
162  ; CHECK:   $exec = S_MOV_B64_term [[S_MOV_B64_term]]
163  ; CHECK: bb.4:
164  ; CHECK:   S_ENDPGM 0
165  %ret = call float @llvm.amdgcn.raw.buffer.atomic.fadd.f32(float %val, <4 x i32> %rsrc, i32 0, i32 %soffset, i32 0)
166  ret void
167}
168
169define amdgpu_ps void @raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095(float %val, <4 x i32> inreg %rsrc, i32 %voffset.base, i32 inreg %soffset) {
170  ; CHECK-LABEL: name: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095
171  ; CHECK: bb.1 (%ir-block.0):
172  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
173  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
174  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
175  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
176  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
177  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
178  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
179  ; CHECK:   [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
180  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
181  ; CHECK:   BUFFER_ATOMIC_ADD_F32_OFFEN [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4095, 0, implicit $exec :: (volatile dereferenceable load store 4 on custom "TargetCustom7" + 4095, align 1, addrspace 4)
182  ; CHECK:   S_ENDPGM 0
183  %voffset = add i32 %voffset.base, 4095
184  %ret = call float @llvm.amdgcn.raw.buffer.atomic.fadd.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
185  ret void
186}
187
188; Natural mapping + slc
189define amdgpu_ps void @raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc(float %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
190  ; CHECK-LABEL: name: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc
191  ; CHECK: bb.1 (%ir-block.0):
192  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
193  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
194  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
195  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
196  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
197  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
198  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
199  ; CHECK:   [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
200  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
201  ; CHECK:   BUFFER_ATOMIC_ADD_F32_OFFEN [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 0, 1, implicit $exec :: (volatile dereferenceable load store 4 on custom "TargetCustom7", align 1, addrspace 4)
202  ; CHECK:   S_ENDPGM 0
203  %ret = call float @llvm.amdgcn.raw.buffer.atomic.fadd.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 2)
204  ret void
205}
206
207define amdgpu_ps void @raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(<2 x half> %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
208  ; CHECK-LABEL: name: raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
209  ; CHECK: bb.1 (%ir-block.0):
210  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
211  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
212  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
213  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
214  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
215  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
216  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
217  ; CHECK:   [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
218  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
219  ; CHECK:   BUFFER_ATOMIC_PK_ADD_F16_OFFEN [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, implicit $exec :: (volatile dereferenceable load store 4 on custom "TargetCustom7", align 1, addrspace 4)
220  ; CHECK:   S_ENDPGM 0
221  %ret = call <2 x half> @llvm.amdgcn.raw.buffer.atomic.fadd.v2f16(<2 x half> %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
222  ret void
223}
224
225define amdgpu_ps void @raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset(<2 x half> %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
226  ; CHECK-LABEL: name: raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset
227  ; CHECK: bb.1 (%ir-block.0):
228  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
229  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
230  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
231  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
232  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
233  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
234  ; CHECK:   [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
235  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
236  ; CHECK:   BUFFER_ATOMIC_PK_ADD_F16_OFFSET [[COPY]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, implicit $exec :: (volatile dereferenceable load store 4 on custom "TargetCustom7", align 1, addrspace 4)
237  ; CHECK:   S_ENDPGM 0
238  %ret = call <2 x half> @llvm.amdgcn.raw.buffer.atomic.fadd.v2f16(<2 x half> %val, <4 x i32> %rsrc, i32 0, i32 %soffset, i32 0)
239  ret void
240}
241
242declare float @llvm.amdgcn.raw.buffer.atomic.fadd.f32(float, <4 x i32>, i32, i32, i32 immarg) #0
243declare <2 x half> @llvm.amdgcn.raw.buffer.atomic.fadd.v2f16(<2 x half>, <4 x i32>, i32, i32, i32 immarg) #0
244
245attributes #0 = { nounwind }
246