1; RUN: llc -march=amdgcn -disable-promote-alloca-to-vector -verify-machineinstrs < %s | FileCheck %s
2
3declare i8 addrspace(4)* @llvm.amdgcn.kernarg.segment.ptr() #1
4
5declare i32 @llvm.amdgcn.workitem.id.x() #1
6
7; CI+ intrinsic
8declare void @llvm.amdgcn.s.dcache.inv.vol() #0
9
10; VI+ intrinsic
11declare void @llvm.amdgcn.s.dcache.wb() #0
12
13; CHECK-LABEL: {{^}}target_none:
14; CHECK: s_movk_i32 [[OFFSETREG:s[0-9]+]], 0x400
15; CHECK: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, [[OFFSETREG]]
16; CHECK: buffer_store_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64
17define amdgpu_kernel void @target_none() #0 {
18  %kernargs = call i8 addrspace(4)* @llvm.amdgcn.kernarg.segment.ptr()
19  %kernargs.gep = getelementptr inbounds i8, i8 addrspace(4)* %kernargs, i64 1024
20  %kernargs.gep.cast = bitcast i8 addrspace(4)* %kernargs.gep to i32 addrspace(1)* addrspace(4)*
21  %ptr = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(4)* %kernargs.gep.cast
22  %id = call i32 @llvm.amdgcn.workitem.id.x()
23  %id.ext = sext i32 %id to i64
24  %gep = getelementptr inbounds i32, i32 addrspace(1)* %ptr, i64 %id.ext
25  store i32 0, i32 addrspace(1)* %gep
26  ret void
27}
28
29; CHECK-LABEL: {{^}}target_tahiti:
30; CHECK: s_movk_i32 [[OFFSETREG:s[0-9]+]], 0x400
31; CHECK: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, [[OFFSETREG]]
32; CHECK: buffer_store_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64
33define amdgpu_kernel void @target_tahiti() #1 {
34  %kernargs = call i8 addrspace(4)* @llvm.amdgcn.kernarg.segment.ptr()
35  %kernargs.gep = getelementptr inbounds i8, i8 addrspace(4)* %kernargs, i64 1024
36  %kernargs.gep.cast = bitcast i8 addrspace(4)* %kernargs.gep to i32 addrspace(1)* addrspace(4)*
37  %ptr = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(4)* %kernargs.gep.cast
38  %id = call i32 @llvm.amdgcn.workitem.id.x()
39  %id.ext = sext i32 %id to i64
40  %gep = getelementptr inbounds i32, i32 addrspace(1)* %ptr, i64 %id.ext
41  store i32 0, i32 addrspace(1)* %gep
42  ret void
43}
44
45; CHECK-LABEL: {{^}}target_bonaire:
46; CHECK: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x100
47; CHECK: buffer_store_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64
48; CHECK: s_dcache_inv_vol
49define amdgpu_kernel void @target_bonaire() #3 {
50  %kernargs = call i8 addrspace(4)* @llvm.amdgcn.kernarg.segment.ptr()
51  %kernargs.gep = getelementptr inbounds i8, i8 addrspace(4)* %kernargs, i64 1024
52  %kernargs.gep.cast = bitcast i8 addrspace(4)* %kernargs.gep to i32 addrspace(1)* addrspace(4)*
53  %ptr = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(4)* %kernargs.gep.cast
54  %id = call i32 @llvm.amdgcn.workitem.id.x()
55  %id.ext = sext i32 %id to i64
56  %gep = getelementptr inbounds i32, i32 addrspace(1)* %ptr, i64 %id.ext
57  store i32 0, i32 addrspace(1)* %gep
58  call void @llvm.amdgcn.s.dcache.inv.vol()
59  ret void
60}
61
62; CHECK-LABEL: {{^}}target_fiji:
63; CHECK: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x400
64; CHECK: flat_store_dword
65; CHECK: s_dcache_wb{{$}}
66define amdgpu_kernel void @target_fiji() #4 {
67  %kernargs = call i8 addrspace(4)* @llvm.amdgcn.kernarg.segment.ptr()
68  %kernargs.gep = getelementptr inbounds i8, i8 addrspace(4)* %kernargs, i64 1024
69  %kernargs.gep.cast = bitcast i8 addrspace(4)* %kernargs.gep to i32 addrspace(1)* addrspace(4)*
70  %ptr = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(4)* %kernargs.gep.cast
71  %id = call i32 @llvm.amdgcn.workitem.id.x()
72  %id.ext = sext i32 %id to i64
73  %gep = getelementptr inbounds i32, i32 addrspace(1)* %ptr, i64 %id.ext
74  store i32 0, i32 addrspace(1)* %gep
75  call void @llvm.amdgcn.s.dcache.wb()
76  ret void
77}
78
79; CHECK-LABEL: {{^}}promote_alloca_enabled:
80; CHECK: ds_read_b32
81define amdgpu_kernel void @promote_alloca_enabled(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #5 {
82entry:
83  %stack = alloca [5 x i32], align 4, addrspace(5)
84  %tmp = load i32, i32 addrspace(1)* %in, align 4
85  %arrayidx1 = getelementptr inbounds [5 x i32], [5 x i32] addrspace(5)* %stack, i32 0, i32 %tmp
86  %load = load i32, i32 addrspace(5)* %arrayidx1
87  store i32 %load, i32 addrspace(1)* %out
88  ret void
89}
90
91; CHECK-LABEL: {{^}}promote_alloca_disabled:
92; CHECK: SCRATCH_RSRC_DWORD0
93; CHECK: SCRATCH_RSRC_DWORD1
94; CHECK: ScratchSize: 24
95define amdgpu_kernel void @promote_alloca_disabled(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #6 {
96entry:
97  %stack = alloca [5 x i32], align 4, addrspace(5)
98  %tmp = load i32, i32 addrspace(1)* %in, align 4
99  %arrayidx1 = getelementptr inbounds [5 x i32], [5 x i32] addrspace(5)* %stack, i32 0, i32 %tmp
100  %load = load i32, i32 addrspace(5)* %arrayidx1
101  store i32 %load, i32 addrspace(1)* %out
102  ret void
103}
104
105attributes #0 = { nounwind }
106attributes #1 = { nounwind readnone }
107attributes #2 = { nounwind "target-cpu"="tahiti" }
108attributes #3 = { nounwind "target-cpu"="bonaire" }
109attributes #4 = { nounwind "target-cpu"="fiji" }
110attributes #5 = { nounwind "target-features"="+promote-alloca" "amdgpu-waves-per-eu"="1,3" }
111attributes #6 = { nounwind "target-features"="-promote-alloca" "amdgpu-waves-per-eu"="1,3" }
112