1; RUN: opt -S -mtriple=amdgcn-- -amdgpu-lower-module-lds < %s | FileCheck %s
2; RUN: opt -S -mtriple=amdgcn-- -passes=amdgpu-lower-module-lds < %s | FileCheck %s
3
4; Properly aligned, same size as alignment.
5; CHECK: %llvm.amdgcn.kernel.k0.lds.t = type { [16 x i8], [8 x i8], [4 x i8], [2 x i8], [1 x i8] }
6
7; Different properly aligned values, but same size of 1.
8; CHECK: %llvm.amdgcn.kernel.k1.lds.t = type { [1 x i8], [1 x i8], [1 x i8], [1 x i8], [1 x i8], [3 x i8], [1 x i8] }
9
10; All are under-aligned, requires to fix each on different alignment boundary.
11; CHECK: %llvm.amdgcn.kernel.k2.lds.t = type { [9 x i8], [1 x i8], [2 x i8], [3 x i8], [1 x i8], [5 x i8] }
12
13; All LDS are underaligned, requires to allocate on 8 byte boundary
14; CHECK: %llvm.amdgcn.kernel.k3.lds.t = type { [7 x i8], [1 x i8], [7 x i8], [1 x i8], [6 x i8], [2 x i8], [5 x i8] }
15
16; All LDS are underaligned, requires to allocate on 16 byte boundary
17; CHECK: %llvm.amdgcn.kernel.k4.lds.t = type { [12 x i8], [4 x i8], [11 x i8], [5 x i8], [10 x i8], [6 x i8], [9 x i8] }
18
19; All LDS are properly aligned on 16 byte boundary, but they are of different size.
20; CHECK: %llvm.amdgcn.kernel.k5.lds.t = type { [20 x i8], [12 x i8], [19 x i8], [13 x i8], [18 x i8], [14 x i8], [17 x i8] }
21
22; CHECK: @llvm.amdgcn.kernel.k0.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k0.lds.t undef, align 16
23; CHECK: @llvm.amdgcn.kernel.k1.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k1.lds.t undef, align 16
24; CHECK: @llvm.amdgcn.kernel.k2.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k2.lds.t undef, align 16
25; CHECK: @llvm.amdgcn.kernel.k3.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k3.lds.t undef, align 8
26; CHECK: @llvm.amdgcn.kernel.k4.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k4.lds.t undef, align 16
27; CHECK: @llvm.amdgcn.kernel.k5.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k5.lds.t undef, align 16
28
29
30; Properly aligned, same size as alignment.
31; CHECK-NOT: @k0.lds.size.1.align.1
32; CHECK-NOT: @k0.lds.size.2.align.2
33; CHECK-NOT: @k0.lds.size.4.align.4
34; CHECK-NOT: @k0.lds.size.8.align.8
35; CHECK-NOT: @k0.lds.size.16.align.16
36@k0.lds.size.1.align.1 = internal unnamed_addr addrspace(3) global [1 x i8] undef, align 1
37@k0.lds.size.2.align.2 = internal unnamed_addr addrspace(3) global [2 x i8] undef, align 2
38@k0.lds.size.4.align.4 = internal unnamed_addr addrspace(3) global [4 x i8] undef, align 4
39@k0.lds.size.8.align.8 = internal unnamed_addr addrspace(3) global [8 x i8] undef, align 8
40@k0.lds.size.16.align.16 = internal unnamed_addr addrspace(3) global [16 x i8] undef, align 16
41
42define amdgpu_kernel void @k0() {
43  %k0.lds.size.1.align.1.bc = bitcast [1 x i8] addrspace(3)* @k0.lds.size.1.align.1 to i8 addrspace(3)*
44   store i8 1, i8 addrspace(3)* %k0.lds.size.1.align.1.bc, align 1
45
46  %k0.lds.size.2.align.2.bc = bitcast [2 x i8] addrspace(3)* @k0.lds.size.2.align.2 to i8 addrspace(3)*
47   store i8 2, i8 addrspace(3)* %k0.lds.size.2.align.2.bc, align 2
48
49  %k0.lds.size.4.align.4.bc = bitcast [4 x i8] addrspace(3)* @k0.lds.size.4.align.4 to i8 addrspace(3)*
50   store i8 3, i8 addrspace(3)* %k0.lds.size.4.align.4.bc, align 4
51
52  %k0.lds.size.8.align.8.bc = bitcast [8 x i8] addrspace(3)* @k0.lds.size.8.align.8 to i8 addrspace(3)*
53   store i8 4, i8 addrspace(3)* %k0.lds.size.8.align.8.bc, align 8
54
55  %k0.lds.size.16.align.16.bc = bitcast [16 x i8] addrspace(3)* @k0.lds.size.16.align.16 to i8 addrspace(3)*
56   store i8 5, i8 addrspace(3)* %k0.lds.size.16.align.16.bc, align 16
57
58  ret void
59}
60
61; Different properly aligned values, but same size of 1.
62; CHECK-NOT: @k1.lds.size.1.align.1
63; CHECK-NOT: @k1.lds.size.1.align.2
64; CHECK-NOT: @k1.lds.size.1.align.4
65; CHECK-NOT: @k1.lds.size.1.align.8
66; CHECK-NOT: @k1.lds.size.1.align.16
67@k1.lds.size.1.align.1 = internal unnamed_addr addrspace(3) global [1 x i8] undef, align 1
68@k1.lds.size.1.align.2 = internal unnamed_addr addrspace(3) global [1 x i8] undef, align 2
69@k1.lds.size.1.align.4 = internal unnamed_addr addrspace(3) global [1 x i8] undef, align 4
70@k1.lds.size.1.align.8 = internal unnamed_addr addrspace(3) global [1 x i8] undef, align 8
71@k1.lds.size.1.align.16 = internal unnamed_addr addrspace(3) global [1 x i8] undef, align 16
72
73define amdgpu_kernel void @k1() {
74  %k1.lds.size.1.align.1.bc = bitcast [1 x i8] addrspace(3)* @k1.lds.size.1.align.1 to i8 addrspace(3)*
75   store i8 1, i8 addrspace(3)* %k1.lds.size.1.align.1.bc, align 1
76
77  %k1.lds.size.1.align.2.bc = bitcast [1 x i8] addrspace(3)* @k1.lds.size.1.align.2 to i8 addrspace(3)*
78   store i8 2, i8 addrspace(3)* %k1.lds.size.1.align.2.bc, align 2
79
80  %k1.lds.size.1.align.4.bc = bitcast [1 x i8] addrspace(3)* @k1.lds.size.1.align.4 to i8 addrspace(3)*
81   store i8 3, i8 addrspace(3)* %k1.lds.size.1.align.4.bc, align 4
82
83  %k1.lds.size.1.align.8.bc = bitcast [1 x i8] addrspace(3)* @k1.lds.size.1.align.8 to i8 addrspace(3)*
84   store i8 4, i8 addrspace(3)* %k1.lds.size.1.align.8.bc, align 8
85
86  %k1.lds.size.1.align.16.bc = bitcast [1 x i8] addrspace(3)* @k1.lds.size.1.align.16 to i8 addrspace(3)*
87   store i8 5, i8 addrspace(3)* %k1.lds.size.1.align.16.bc, align 16
88
89  ret void
90}
91
92; All are under-aligned, requires to fix each on different alignment boundary.
93; CHECK-NOT: @k2.lds.size.2.align.1
94; CHECK-NOT: @k2.lds.size.3.align.2
95; CHECK-NOT: @k2.lds.size.5.align.4
96; CHECK-NOT: @k2.lds.size.9.align.8
97@k2.lds.size.2.align.1 = internal unnamed_addr addrspace(3) global [2 x i8] undef, align 1
98@k2.lds.size.3.align.2 = internal unnamed_addr addrspace(3) global [3 x i8] undef, align 2
99@k2.lds.size.5.align.4 = internal unnamed_addr addrspace(3) global [5 x i8] undef, align 4
100@k2.lds.size.9.align.8 = internal unnamed_addr addrspace(3) global [9 x i8] undef, align 8
101
102define amdgpu_kernel void @k2() {
103  %k2.lds.size.2.align.1.bc = bitcast [2 x i8] addrspace(3)* @k2.lds.size.2.align.1 to i8 addrspace(3)*
104   store i8 1, i8 addrspace(3)* %k2.lds.size.2.align.1.bc, align 1
105
106  %k2.lds.size.3.align.2.bc = bitcast [3 x i8] addrspace(3)* @k2.lds.size.3.align.2 to i8 addrspace(3)*
107   store i8 2, i8 addrspace(3)* %k2.lds.size.3.align.2.bc, align 2
108
109  %k2.lds.size.5.align.4.bc = bitcast [5 x i8] addrspace(3)* @k2.lds.size.5.align.4 to i8 addrspace(3)*
110   store i8 3, i8 addrspace(3)* %k2.lds.size.5.align.4.bc, align 4
111
112  %k2.lds.size.9.align.8.bc = bitcast [9 x i8] addrspace(3)* @k2.lds.size.9.align.8 to i8 addrspace(3)*
113   store i8 4, i8 addrspace(3)* %k2.lds.size.9.align.8.bc, align 8
114
115  ret void
116}
117
118; All LDS are underaligned, requires to allocate on 8 byte boundary
119; CHECK-NOT: @k3.lds.size.5.align.2
120; CHECK-NOT: @k3.lds.size.6.align.2
121; CHECK-NOT: @k3.lds.size.7.align.2
122; CHECK-NOT: @k3.lds.size.7.align.4
123@k3.lds.size.5.align.2 = internal unnamed_addr addrspace(3) global [5 x i8] undef, align 2
124@k3.lds.size.6.align.2 = internal unnamed_addr addrspace(3) global [6 x i8] undef, align 2
125@k3.lds.size.7.align.2 = internal unnamed_addr addrspace(3) global [7 x i8] undef, align 2
126@k3.lds.size.7.align.4 = internal unnamed_addr addrspace(3) global [7 x i8] undef, align 4
127
128define amdgpu_kernel void @k3() {
129  %k3.lds.size.5.align.2.bc = bitcast [5 x i8] addrspace(3)* @k3.lds.size.5.align.2 to i8 addrspace(3)*
130   store i8 1, i8 addrspace(3)* %k3.lds.size.5.align.2.bc, align 2
131
132  %k3.lds.size.6.align.2.bc = bitcast [6 x i8] addrspace(3)* @k3.lds.size.6.align.2 to i8 addrspace(3)*
133   store i8 2, i8 addrspace(3)* %k3.lds.size.6.align.2.bc, align 2
134
135  %k3.lds.size.7.align.2.bc = bitcast [7 x i8] addrspace(3)* @k3.lds.size.7.align.2 to i8 addrspace(3)*
136   store i8 3, i8 addrspace(3)* %k3.lds.size.7.align.2.bc, align 2
137
138  %k3.lds.size.7.align.4.bc = bitcast [7 x i8] addrspace(3)* @k3.lds.size.7.align.4 to i8 addrspace(3)*
139   store i8 4, i8 addrspace(3)* %k3.lds.size.7.align.4.bc, align 4
140
141  ret void
142}
143
144; All LDS are underaligned, requires to allocate on 16 byte boundary
145; CHECK-NOT: @k4.lds.size.9.align.1
146; CHECK-NOT: @k4.lds.size.10.align.2
147; CHECK-NOT: @k4.lds.size.11.align.4
148; CHECK-NOT: @k4.lds.size.12.align.8
149@k4.lds.size.9.align.1 = internal unnamed_addr addrspace(3) global [9 x i8] undef, align 1
150@k4.lds.size.10.align.2 = internal unnamed_addr addrspace(3) global [10 x i8] undef, align 2
151@k4.lds.size.11.align.4 = internal unnamed_addr addrspace(3) global [11 x i8] undef, align 4
152@k4.lds.size.12.align.8 = internal unnamed_addr addrspace(3) global [12 x i8] undef, align 8
153
154define amdgpu_kernel void @k4() {
155  %k4.lds.size.9.align.1.bc = bitcast [9 x i8] addrspace(3)* @k4.lds.size.9.align.1 to i8 addrspace(3)*
156   store i8 1, i8 addrspace(3)* %k4.lds.size.9.align.1.bc, align 1
157
158  %k4.lds.size.10.align.2.bc = bitcast [10 x i8] addrspace(3)* @k4.lds.size.10.align.2 to i8 addrspace(3)*
159   store i8 2, i8 addrspace(3)* %k4.lds.size.10.align.2.bc, align 2
160
161  %k4.lds.size.11.align.4.bc = bitcast [11 x i8] addrspace(3)* @k4.lds.size.11.align.4 to i8 addrspace(3)*
162   store i8 3, i8 addrspace(3)* %k4.lds.size.11.align.4.bc, align 4
163
164  %k4.lds.size.12.align.8.bc = bitcast [12 x i8] addrspace(3)* @k4.lds.size.12.align.8 to i8 addrspace(3)*
165   store i8 4, i8 addrspace(3)* %k4.lds.size.12.align.8.bc, align 8
166
167  ret void
168}
169
170; CHECK-NOT: @k5.lds.size.17.align.16
171; CHECK-NOT: @k5.lds.size.18.align.16
172; CHECK-NOT: @k5.lds.size.19.align.16
173; CHECK-NOT: @k5.lds.size.20.align.16
174@k5.lds.size.17.align.16 = internal unnamed_addr addrspace(3) global [17 x i8] undef, align 16
175@k5.lds.size.18.align.16 = internal unnamed_addr addrspace(3) global [18 x i8] undef, align 16
176@k5.lds.size.19.align.16 = internal unnamed_addr addrspace(3) global [19 x i8] undef, align 16
177@k5.lds.size.20.align.16 = internal unnamed_addr addrspace(3) global [20 x i8] undef, align 16
178
179define amdgpu_kernel void @k5() {
180  %k5.lds.size.17.align.16.bc = bitcast [17 x i8] addrspace(3)* @k5.lds.size.17.align.16 to i8 addrspace(3)*
181   store i8 1, i8 addrspace(3)* %k5.lds.size.17.align.16.bc, align 16
182
183  %k5.lds.size.18.align.16.bc = bitcast [18 x i8] addrspace(3)* @k5.lds.size.18.align.16 to i8 addrspace(3)*
184   store i8 2, i8 addrspace(3)* %k5.lds.size.18.align.16.bc, align 16
185
186  %k5.lds.size.19.align.16.bc = bitcast [19 x i8] addrspace(3)* @k5.lds.size.19.align.16 to i8 addrspace(3)*
187   store i8 3, i8 addrspace(3)* %k5.lds.size.19.align.16.bc, align 16
188
189  %k5.lds.size.20.align.16.bc = bitcast [20 x i8] addrspace(3)* @k5.lds.size.20.align.16 to i8 addrspace(3)*
190   store i8 4, i8 addrspace(3)* %k5.lds.size.20.align.16.bc, align 16
191
192  ret void
193}
194