1; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -stop-after=instruction-select -verify-machineinstrs -o - %s | FileCheck %s
3
4; Natural mapping
5define amdgpu_ps void @raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset(float %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
6  ; CHECK-LABEL: name: raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset
7  ; CHECK: bb.1 (%ir-block.0):
8  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
9  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
10  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
11  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
12  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
13  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
14  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
15  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
16  ; CHECK:   [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
17  ; CHECK:   TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 0, 78, 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
18  ; CHECK:   S_ENDPGM 0
19  call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 78, i32 0)
20  ret void
21}
22
23; Natural mapping
24define amdgpu_ps void @raw_tbuffer_store_v2f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset(<2 x float> %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
25  ; CHECK-LABEL: name: raw_tbuffer_store_v2f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset
26  ; CHECK: bb.1 (%ir-block.0):
27  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
28  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
29  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
30  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
31  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
32  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
33  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
34  ; CHECK:   [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
35  ; CHECK:   [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
36  ; CHECK:   [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
37  ; CHECK:   [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
38  ; CHECK:   TBUFFER_STORE_FORMAT_XY_OFFEN_exact [[REG_SEQUENCE]], [[COPY6]], [[REG_SEQUENCE1]], [[COPY7]], 0, 78, 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s32>), align 1, addrspace 4)
39  ; CHECK:   S_ENDPGM 0
40  call void @llvm.amdgcn.raw.tbuffer.store.v2f32(<2 x float> %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 78, i32 0)
41  ret void
42}
43
44; Natural mapping
45define amdgpu_ps void @raw_tbuffer_store_v3f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset(<3 x float> %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
46  ; CHECK-LABEL: name: raw_tbuffer_store_v3f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset
47  ; CHECK: bb.1 (%ir-block.0):
48  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
49  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
50  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
51  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
52  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2
53  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr2
54  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr3
55  ; CHECK:   [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr4
56  ; CHECK:   [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr5
57  ; CHECK:   [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY4]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY6]], %subreg.sub3
58  ; CHECK:   [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
59  ; CHECK:   [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
60  ; CHECK:   TBUFFER_STORE_FORMAT_XYZ_OFFEN_exact [[REG_SEQUENCE]], [[COPY7]], [[REG_SEQUENCE1]], [[COPY8]], 0, 78, 0, 0, 0, implicit $exec :: (dereferenceable store (<3 x s32>), align 1, addrspace 4)
61  ; CHECK:   S_ENDPGM 0
62  call void @llvm.amdgcn.raw.tbuffer.store.v3f32(<3 x float> %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 78, i32 0)
63  ret void
64}
65
66; Natural mapping
67define amdgpu_ps void @raw_tbuffer_store_v4f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset(<4 x float> %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
68  ; CHECK-LABEL: name: raw_tbuffer_store_v4f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset
69  ; CHECK: bb.1 (%ir-block.0):
70  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
71  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
72  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
73  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
74  ; CHECK:   [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
75  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
76  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
77  ; CHECK:   [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
78  ; CHECK:   [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
79  ; CHECK:   [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
80  ; CHECK:   [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
81  ; CHECK:   [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
82  ; CHECK:   [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6
83  ; CHECK:   TBUFFER_STORE_FORMAT_XYZW_OFFEN_exact [[REG_SEQUENCE]], [[COPY8]], [[REG_SEQUENCE1]], [[COPY9]], 0, 78, 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 4)
84  ; CHECK:   S_ENDPGM 0
85  call void @llvm.amdgcn.raw.tbuffer.store.v4f32(<4 x float> %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 78, i32 0)
86  ret void
87}
88
89; Copies for VGPR arguments
90define amdgpu_ps void @raw_tbuffer_store_f32__sgpr_rsrc__sgpr_voffset__sgpr_soffset(float %val, <4 x i32> inreg %rsrc, i32 inreg %voffset, i32 inreg %soffset) {
91  ; CHECK-LABEL: name: raw_tbuffer_store_f32__sgpr_rsrc__sgpr_voffset__sgpr_soffset
92  ; CHECK: bb.1 (%ir-block.0):
93  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $vgpr0
94  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
95  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
96  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
97  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
98  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
99  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
100  ; CHECK:   [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
101  ; CHECK:   [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr7
102  ; CHECK:   [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
103  ; CHECK:   TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[COPY7]], [[REG_SEQUENCE]], [[COPY6]], 0, 94, 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
104  ; CHECK:   S_ENDPGM 0
105  call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 94, i32 0)
106  ret void
107}
108
109; Waterfall for rsrc
110define amdgpu_ps void @raw_tbuffer_store_f32__vgpr_rsrc__vgpr_voffset__sgpr_soffset(float %val, <4 x i32> %rsrc, i32 %voffset, i32 inreg %soffset) {
111  ; CHECK-LABEL: name: raw_tbuffer_store_f32__vgpr_rsrc__vgpr_voffset__sgpr_soffset
112  ; CHECK: bb.1 (%ir-block.0):
113  ; CHECK:   successors: %bb.2(0x80000000)
114  ; CHECK:   liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
115  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
116  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
117  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
118  ; CHECK:   [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
119  ; CHECK:   [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
120  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
121  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5
122  ; CHECK:   [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr2
123  ; CHECK:   [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
124  ; CHECK:   [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
125  ; CHECK:   [[S_MOV_B32_term:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32_term $exec_lo
126  ; CHECK: bb.2:
127  ; CHECK:   successors: %bb.3(0x40000000), %bb.2(0x40000000)
128  ; CHECK:   [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]].sub0, implicit $exec
129  ; CHECK:   [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]].sub1, implicit $exec
130  ; CHECK:   [[REG_SEQUENCE1:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1
131  ; CHECK:   [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE1]], [[COPY7]], implicit $exec
132  ; CHECK:   [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub0, implicit $exec
133  ; CHECK:   [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub1, implicit $exec
134  ; CHECK:   [[REG_SEQUENCE2:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_2]], %subreg.sub0, [[V_READFIRSTLANE_B32_3]], %subreg.sub1
135  ; CHECK:   [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY8]], implicit $exec
136  ; CHECK:   [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc
137  ; CHECK:   [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
138  ; CHECK:   TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[COPY5]], [[REG_SEQUENCE3]], [[COPY6]], 0, 94, 1, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
139  ; CHECK:   [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
140  ; CHECK:   $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
141  ; CHECK:   S_CBRANCH_EXECNZ %bb.2, implicit $exec
142  ; CHECK: bb.3:
143  ; CHECK:   successors: %bb.4(0x80000000)
144  ; CHECK:   $exec_lo = S_MOV_B32_term [[S_MOV_B32_term]]
145  ; CHECK: bb.4:
146  ; CHECK:   S_ENDPGM 0
147  call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 94, i32 1)
148  ret void
149}
150
151; Waterfall for rsrc and soffset
152define amdgpu_ps void @raw_tbuffer_store_f32__vgpr_rsrc__vgpr_voffset__vgpr_soffset(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset) {
153  ; CHECK-LABEL: name: raw_tbuffer_store_f32__vgpr_rsrc__vgpr_voffset__vgpr_soffset
154  ; CHECK: bb.1 (%ir-block.0):
155  ; CHECK:   successors: %bb.2(0x80000000)
156  ; CHECK:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
157  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
158  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
159  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
160  ; CHECK:   [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
161  ; CHECK:   [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
162  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
163  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5
164  ; CHECK:   [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr6
165  ; CHECK:   [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
166  ; CHECK:   [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
167  ; CHECK:   [[S_MOV_B32_term:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32_term $exec_lo
168  ; CHECK: bb.2:
169  ; CHECK:   successors: %bb.3(0x40000000), %bb.2(0x40000000)
170  ; CHECK:   [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]].sub0, implicit $exec
171  ; CHECK:   [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]].sub1, implicit $exec
172  ; CHECK:   [[REG_SEQUENCE1:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1
173  ; CHECK:   [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE1]], [[COPY7]], implicit $exec
174  ; CHECK:   [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub0, implicit $exec
175  ; CHECK:   [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub1, implicit $exec
176  ; CHECK:   [[REG_SEQUENCE2:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_2]], %subreg.sub0, [[V_READFIRSTLANE_B32_3]], %subreg.sub1
177  ; CHECK:   [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY8]], implicit $exec
178  ; CHECK:   [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc
179  ; CHECK:   [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
180  ; CHECK:   [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
181  ; CHECK:   [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec
182  ; CHECK:   [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U32_e64_]], [[S_AND_B32_]], implicit-def $scc
183  ; CHECK:   TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[COPY5]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 94, 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
184  ; CHECK:   [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
185  ; CHECK:   $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
186  ; CHECK:   S_CBRANCH_EXECNZ %bb.2, implicit $exec
187  ; CHECK: bb.3:
188  ; CHECK:   successors: %bb.4(0x80000000)
189  ; CHECK:   $exec_lo = S_MOV_B32_term [[S_MOV_B32_term]]
190  ; CHECK: bb.4:
191  ; CHECK:   S_ENDPGM 0
192  call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 94, i32 0)
193  ret void
194}
195
196; Waterfall for rsrc and soffset, copy for voffset
197define amdgpu_ps void @raw_tbuffer_store_f32__vgpr_rsrc__sgpr_voffset__vgpr_soffset(float %val, <4 x i32> %rsrc, i32 inreg %voffset, i32 %soffset) {
198  ; CHECK-LABEL: name: raw_tbuffer_store_f32__vgpr_rsrc__sgpr_voffset__vgpr_soffset
199  ; CHECK: bb.1 (%ir-block.0):
200  ; CHECK:   successors: %bb.2(0x80000000)
201  ; CHECK:   liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
202  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
203  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
204  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
205  ; CHECK:   [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
206  ; CHECK:   [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
207  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
208  ; CHECK:   [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
209  ; CHECK:   [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr5
210  ; CHECK:   [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
211  ; CHECK:   [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
212  ; CHECK:   [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
213  ; CHECK:   [[S_MOV_B32_term:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32_term $exec_lo
214  ; CHECK: bb.2:
215  ; CHECK:   successors: %bb.3(0x40000000), %bb.2(0x40000000)
216  ; CHECK:   [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub0, implicit $exec
217  ; CHECK:   [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub1, implicit $exec
218  ; CHECK:   [[REG_SEQUENCE1:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1
219  ; CHECK:   [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE1]], [[COPY8]], implicit $exec
220  ; CHECK:   [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]].sub0, implicit $exec
221  ; CHECK:   [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]].sub1, implicit $exec
222  ; CHECK:   [[REG_SEQUENCE2:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_2]], %subreg.sub0, [[V_READFIRSTLANE_B32_3]], %subreg.sub1
223  ; CHECK:   [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY9]], implicit $exec
224  ; CHECK:   [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc
225  ; CHECK:   [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
226  ; CHECK:   [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
227  ; CHECK:   [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec
228  ; CHECK:   [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U32_e64_]], [[S_AND_B32_]], implicit-def $scc
229  ; CHECK:   TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[COPY7]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 78, 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
230  ; CHECK:   [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
231  ; CHECK:   $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
232  ; CHECK:   S_CBRANCH_EXECNZ %bb.2, implicit $exec
233  ; CHECK: bb.3:
234  ; CHECK:   successors: %bb.4(0x80000000)
235  ; CHECK:   $exec_lo = S_MOV_B32_term [[S_MOV_B32_term]]
236  ; CHECK: bb.4:
237  ; CHECK:   S_ENDPGM 0
238  call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 78, i32 0)
239  ret void
240}
241
242; Natural mapping + glc
243define amdgpu_ps void @raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_glc(float %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
244  ; CHECK-LABEL: name: raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_glc
245  ; CHECK: bb.1 (%ir-block.0):
246  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
247  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
248  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
249  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
250  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
251  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
252  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
253  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
254  ; CHECK:   [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
255  ; CHECK:   TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 0, 78, 1, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
256  ; CHECK:   S_ENDPGM 0
257  call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 78, i32 1)
258  ret void
259}
260
261; Natural mapping + slc
262define amdgpu_ps void @raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc(float %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
263  ; CHECK-LABEL: name: raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc
264  ; CHECK: bb.1 (%ir-block.0):
265  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
266  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
267  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
268  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
269  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
270  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
271  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
272  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
273  ; CHECK:   [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
274  ; CHECK:   TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 0, 78, 2, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
275  ; CHECK:   S_ENDPGM 0
276  call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 78, i32 2)
277  ret void
278}
279
280; Natural mapping + glc + slc
281define amdgpu_ps void @raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc_glc(float %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
282  ; CHECK-LABEL: name: raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc_glc
283  ; CHECK: bb.1 (%ir-block.0):
284  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
285  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
286  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
287  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
288  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
289  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
290  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
291  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
292  ; CHECK:   [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
293  ; CHECK:   TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 0, 78, 3, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
294  ; CHECK:   S_ENDPGM 0
295  call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 78, i32 3)
296  ret void
297}
298
299; Natural mapping + dlc
300define amdgpu_ps void @raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_dlc(float %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
301  ; CHECK-LABEL: name: raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_dlc
302  ; CHECK: bb.1 (%ir-block.0):
303  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
304  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
305  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
306  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
307  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
308  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
309  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
310  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
311  ; CHECK:   [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
312  ; CHECK:   TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 0, 78, 4, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
313  ; CHECK:   S_ENDPGM 0
314  call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 78, i32 4)
315  ret void
316}
317
318
319
320define amdgpu_ps void @raw_tbuffer_store_f32__sgpr_rsrc__vdpr_voffset__sgpr_soffset__voffset0(float %val, <4 x i32> inreg %rsrc, i32 inreg %soffset) {
321  ; CHECK-LABEL: name: raw_tbuffer_store_f32__sgpr_rsrc__vdpr_voffset__sgpr_soffset__voffset0
322  ; CHECK: bb.1 (%ir-block.0):
323  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
324  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
325  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
326  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
327  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
328  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
329  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
330  ; CHECK:   [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
331  ; CHECK:   TBUFFER_STORE_FORMAT_X_OFFSET_exact [[COPY]], [[REG_SEQUENCE]], [[COPY5]], 0, 94, 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
332  ; CHECK:   S_ENDPGM 0
333  call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 0, i32 %soffset, i32 94, i32 0)
334  ret void
335}
336
337define amdgpu_ps void @raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset4095(float %val, <4 x i32> inreg %rsrc, i32 inreg %soffset) {
338  ; CHECK-LABEL: name: raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset4095
339  ; CHECK: bb.1 (%ir-block.0):
340  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
341  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
342  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
343  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
344  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
345  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
346  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
347  ; CHECK:   [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
348  ; CHECK:   TBUFFER_STORE_FORMAT_X_OFFSET_exact [[COPY]], [[REG_SEQUENCE]], [[COPY5]], 4095, 94, 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
349  ; CHECK:   S_ENDPGM 0
350  call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 4095, i32 %soffset, i32 94, i32 0)
351  ret void
352}
353
354define amdgpu_ps void @raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset4096(float %val, <4 x i32> inreg %rsrc, i32 inreg %soffset) {
355  ; CHECK-LABEL: name: raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset4096
356  ; CHECK: bb.1 (%ir-block.0):
357  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
358  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
359  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
360  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
361  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
362  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
363  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
364  ; CHECK:   [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
365  ; CHECK:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4096
366  ; CHECK:   [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
367  ; CHECK:   TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[COPY6]], [[REG_SEQUENCE]], [[COPY5]], 0, 94, 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
368  ; CHECK:   S_ENDPGM 0
369  call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 4096, i32 %soffset, i32 94, i32 0)
370  ret void
371}
372
373define amdgpu_ps void @raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add16(float %val, <4 x i32> inreg %rsrc, i32 %voffset.base, i32 inreg %soffset) {
374  ; CHECK-LABEL: name: raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add16
375  ; CHECK: bb.1 (%ir-block.0):
376  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
377  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
378  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
379  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
380  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
381  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
382  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
383  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
384  ; CHECK:   [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
385  ; CHECK:   TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 16, 94, 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
386  ; CHECK:   S_ENDPGM 0
387  %voffset = add i32 %voffset.base, 16
388  call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 94, i32 0)
389  ret void
390}
391
392define amdgpu_ps void @raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095(float %val, <4 x i32> inreg %rsrc, i32 %voffset.base, i32 inreg %soffset) {
393  ; CHECK-LABEL: name: raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095
394  ; CHECK: bb.1 (%ir-block.0):
395  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
396  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
397  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
398  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
399  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
400  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
401  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
402  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
403  ; CHECK:   [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
404  ; CHECK:   TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4095, 94, 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
405  ; CHECK:   S_ENDPGM 0
406  %voffset = add i32 %voffset.base, 4095
407  call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 94, i32 0)
408  ret void
409}
410
411define amdgpu_ps void @raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4096(float %val, <4 x i32> inreg %rsrc, i32 %voffset.base, i32 inreg %soffset) {
412  ; CHECK-LABEL: name: raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4096
413  ; CHECK: bb.1 (%ir-block.0):
414  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
415  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
416  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
417  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
418  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
419  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
420  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
421  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
422  ; CHECK:   [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
423  ; CHECK:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4096
424  ; CHECK:   [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
425  ; CHECK:   [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec
426  ; CHECK:   TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY6]], 0, 94, 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
427  ; CHECK:   S_ENDPGM 0
428  %voffset = add i32 %voffset.base, 4096
429  call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 94, i32 0)
430  ret void
431}
432
433define amdgpu_ps void @raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset4095(float %val, <4 x i32> inreg %rsrc, i32 %voffset) {
434  ; CHECK-LABEL: name: raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset4095
435  ; CHECK: bb.1 (%ir-block.0):
436  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1
437  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
438  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
439  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
440  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
441  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
442  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
443  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
444  ; CHECK:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095
445  ; CHECK:   TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 94, 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
446  ; CHECK:   S_ENDPGM 0
447  call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 4095, i32 94, i32 0)
448  ret void
449}
450
451define amdgpu_ps void @raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset4096(float %val, <4 x i32> inreg %rsrc, i32 %voffset) {
452  ; CHECK-LABEL: name: raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset4096
453  ; CHECK: bb.1 (%ir-block.0):
454  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1
455  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
456  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
457  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
458  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
459  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
460  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
461  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
462  ; CHECK:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4096
463  ; CHECK:   TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 94, 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
464  ; CHECK:   S_ENDPGM 0
465  call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 4096, i32 94, i32 0)
466  ret void
467}
468
469define amdgpu_ps void @raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset_add16(float %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset.base) {
470  ; CHECK-LABEL: name: raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset_add16
471  ; CHECK: bb.1 (%ir-block.0):
472  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
473  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
474  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
475  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
476  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
477  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
478  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
479  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
480  ; CHECK:   [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
481  ; CHECK:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 16
482  ; CHECK:   [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[COPY6]], [[S_MOV_B32_]], implicit-def $scc
483  ; CHECK:   TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[S_ADD_I32_]], 0, 94, 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
484  ; CHECK:   S_ENDPGM 0
485  %soffset = add i32 %soffset.base, 16
486  call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 94, i32 0)
487  ret void
488}
489
490define amdgpu_ps void @raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset_add4095(float %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset.base) {
491  ; CHECK-LABEL: name: raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset_add4095
492  ; CHECK: bb.1 (%ir-block.0):
493  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
494  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
495  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
496  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
497  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
498  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
499  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
500  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
501  ; CHECK:   [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
502  ; CHECK:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095
503  ; CHECK:   [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[COPY6]], [[S_MOV_B32_]], implicit-def $scc
504  ; CHECK:   TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[S_ADD_I32_]], 0, 94, 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
505  ; CHECK:   S_ENDPGM 0
506  %soffset = add i32 %soffset.base, 4095
507  call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 94, i32 0)
508  ret void
509}
510
511define amdgpu_ps void @raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset_add4096(float %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset.base) {
512  ; CHECK-LABEL: name: raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset_add4096
513  ; CHECK: bb.1 (%ir-block.0):
514  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
515  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
516  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
517  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
518  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
519  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
520  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
521  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
522  ; CHECK:   [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
523  ; CHECK:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4096
524  ; CHECK:   [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[COPY6]], [[S_MOV_B32_]], implicit-def $scc
525  ; CHECK:   TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[S_ADD_I32_]], 0, 94, 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
526  ; CHECK:   S_ENDPGM 0
527  %soffset = add i32 %soffset.base, 4096
528  call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 94, i32 0)
529  ret void
530}
531
532; An add of the offset is necessary, with a waterfall loop. Make sure the add is done outside of the waterfall loop.
533define amdgpu_ps void @raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset_add5000(float %val, <4 x i32> %rsrc, i32 %voffset, i32 inreg %soffset.base) {
534  ; CHECK-LABEL: name: raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset_add5000
535  ; CHECK: bb.1 (%ir-block.0):
536  ; CHECK:   successors: %bb.2(0x80000000)
537  ; CHECK:   liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
538  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
539  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
540  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
541  ; CHECK:   [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
542  ; CHECK:   [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
543  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
544  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5
545  ; CHECK:   [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr2
546  ; CHECK:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 5000
547  ; CHECK:   [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[COPY6]], [[S_MOV_B32_]], implicit-def $scc
548  ; CHECK:   [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
549  ; CHECK:   [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
550  ; CHECK:   [[S_MOV_B32_term:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32_term $exec_lo
551  ; CHECK: bb.2:
552  ; CHECK:   successors: %bb.3(0x40000000), %bb.2(0x40000000)
553  ; CHECK:   [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]].sub0, implicit $exec
554  ; CHECK:   [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]].sub1, implicit $exec
555  ; CHECK:   [[REG_SEQUENCE1:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1
556  ; CHECK:   [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE1]], [[COPY7]], implicit $exec
557  ; CHECK:   [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub0, implicit $exec
558  ; CHECK:   [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub1, implicit $exec
559  ; CHECK:   [[REG_SEQUENCE2:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_2]], %subreg.sub0, [[V_READFIRSTLANE_B32_3]], %subreg.sub1
560  ; CHECK:   [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY8]], implicit $exec
561  ; CHECK:   [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc
562  ; CHECK:   [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
563  ; CHECK:   TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[COPY5]], [[REG_SEQUENCE3]], [[S_ADD_I32_]], 0, 94, 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
564  ; CHECK:   [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
565  ; CHECK:   $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
566  ; CHECK:   S_CBRANCH_EXECNZ %bb.2, implicit $exec
567  ; CHECK: bb.3:
568  ; CHECK:   successors: %bb.4(0x80000000)
569  ; CHECK:   $exec_lo = S_MOV_B32_term [[S_MOV_B32_term]]
570  ; CHECK: bb.4:
571  ; CHECK:   S_ENDPGM 0
572  %soffset = add i32 %soffset.base, 5000
573  call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 94, i32 0)
574  ret void
575}
576
577; An add of the offset is necessary, with a waterfall loop. Make sure the add is done outside of the waterfall loop.
578define amdgpu_ps void @raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add5000(float %val, <4 x i32> %rsrc, i32 %voffset.base, i32 inreg %soffset) {
579  ; CHECK-LABEL: name: raw_tbuffer_store_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add5000
580  ; CHECK: bb.1 (%ir-block.0):
581  ; CHECK:   successors: %bb.2(0x80000000)
582  ; CHECK:   liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
583  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
584  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
585  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
586  ; CHECK:   [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
587  ; CHECK:   [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
588  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
589  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5
590  ; CHECK:   [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr2
591  ; CHECK:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4096
592  ; CHECK:   [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
593  ; CHECK:   [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec
594  ; CHECK:   [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
595  ; CHECK:   [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
596  ; CHECK:   [[S_MOV_B32_term:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32_term $exec_lo
597  ; CHECK: bb.2:
598  ; CHECK:   successors: %bb.3(0x40000000), %bb.2(0x40000000)
599  ; CHECK:   [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub0, implicit $exec
600  ; CHECK:   [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub1, implicit $exec
601  ; CHECK:   [[REG_SEQUENCE1:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1
602  ; CHECK:   [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE1]], [[COPY8]], implicit $exec
603  ; CHECK:   [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]].sub0, implicit $exec
604  ; CHECK:   [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]].sub1, implicit $exec
605  ; CHECK:   [[REG_SEQUENCE2:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_2]], %subreg.sub0, [[V_READFIRSTLANE_B32_3]], %subreg.sub1
606  ; CHECK:   [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY9]], implicit $exec
607  ; CHECK:   [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc
608  ; CHECK:   [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
609  ; CHECK:   TBUFFER_STORE_FORMAT_X_OFFEN_exact [[COPY]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE3]], [[COPY6]], 904, 94, 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
610  ; CHECK:   [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
611  ; CHECK:   $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
612  ; CHECK:   S_CBRANCH_EXECNZ %bb.2, implicit $exec
613  ; CHECK: bb.3:
614  ; CHECK:   successors: %bb.4(0x80000000)
615  ; CHECK:   $exec_lo = S_MOV_B32_term [[S_MOV_B32_term]]
616  ; CHECK: bb.4:
617  ; CHECK:   S_ENDPGM 0
618  %voffset = add i32 %voffset.base, 5000
619  call void @llvm.amdgcn.raw.tbuffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 94, i32 0)
620  ret void
621}
622
623declare void @llvm.amdgcn.raw.tbuffer.store.f32(float, <4 x i32>, i32, i32, i32 immarg, i32 immarg)
624declare void @llvm.amdgcn.raw.tbuffer.store.v2f32(<2 x float>, <4 x i32>, i32, i32, i32 immarg, i32 immarg)
625declare void @llvm.amdgcn.raw.tbuffer.store.v3f32(<3 x float>, <4 x i32>, i32, i32, i32 immarg, i32 immarg)
626declare void @llvm.amdgcn.raw.tbuffer.store.v4f32(<4 x float>, <4 x i32>, i32, i32, i32 immarg, i32 immarg)
627