1; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -check-prefix=GCN -check-prefix=GFX9 %s
2; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -check-prefix=GCN -check-prefix=VI -check-prefix=CIVI %s
3; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -check-prefix=GCN -check-prefix=CI -check-prefix=CIVI %s
4
5; FIXME: Should be same on CI/VI
6; GCN-LABEL: {{^}}s_ashr_v2i16:
7; GFX9: s_load_dword [[LHS:s[0-9]+]]
8; GFX9: s_load_dword [[RHS:s[0-9]+]]
9; GFX9: v_mov_b32_e32 [[VLHS:v[0-9]+]], [[LHS]]
10; GFX9: v_pk_ashrrev_i16 [[RESULT:v[0-9]+]], [[RHS]], [[VLHS]]
11
12; CIVI: s_load_dword [[LHS:s[0-9]+]]
13; CIVI: s_load_dword [[RHS:s[0-9]+]]
14
15; CIVI-DAG: s_ashr_i32
16; CIVI-DAG: s_ashr_i32
17; CIVI-DAG: s_sext_i32_i16
18; CIVI-DAG: s_sext_i32_i16
19; CIVI-DAG: s_ashr_i32
20; CIVI-DAG: s_ashr_i32
21; CIVI-DAG: s_lshl_b32
22; CIVI: s_and_b32
23; CIVI: s_or_b32
24
25define amdgpu_kernel void @s_ashr_v2i16(<2 x i16> addrspace(1)* %out, i32, <2 x i16> %lhs, i32, <2 x i16> %rhs) #0 {
26  %result = ashr <2 x i16> %lhs, %rhs
27  store <2 x i16> %result, <2 x i16> addrspace(1)* %out
28  ret void
29}
30
31; GCN-LABEL: {{^}}v_ashr_v2i16:
32; GCN: {{buffer|flat|global}}_load_dword [[LHS:v[0-9]+]]
33; GCN: {{buffer|flat|global}}_load_dword [[RHS:v[0-9]+]]
34; GFX9: v_pk_ashrrev_i16 [[RESULT:v[0-9]+]], [[RHS]], [[LHS]]
35
36; VI: v_ashrrev_i16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
37; VI: v_ashrrev_i16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
38; VI: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
39
40; CI: s_mov_b32 [[MASK:s[0-9]+]], 0xffff{{$}}
41; CI-DAG: v_and_b32_e32 v{{[0-9]+}}, [[MASK]], [[RHS]]
42; CI-DAG: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 16
43; CI: v_ashrrev_i32_e32 v{{[0-9]+}}, 16, [[LHS]]
44; CI: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
45; CI: v_ashr_i32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
46; CI: v_ashr_i32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
47; CI: v_lshlrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
48; CI: v_and_b32_e32 v{{[0-9]+}}, [[MASK]], v{{[0-9]+}}
49; CI: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
50define amdgpu_kernel void @v_ashr_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 {
51  %tid = call i32 @llvm.amdgcn.workitem.id.x()
52  %tid.ext = sext i32 %tid to i64
53  %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext
54  %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext
55  %b_ptr = getelementptr <2 x i16>, <2 x i16> addrspace(1)* %in.gep, i32 1
56  %a = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep
57  %b = load <2 x i16>, <2 x i16> addrspace(1)* %b_ptr
58  %result = ashr <2 x i16> %a, %b
59  store <2 x i16> %result, <2 x i16> addrspace(1)* %out.gep
60  ret void
61}
62
63; GCN-LABEL: {{^}}ashr_v_s_v2i16:
64; GFX9: s_load_dword [[RHS:s[0-9]+]]
65; GFX9: {{buffer|flat|global}}_load_dword [[LHS:v[0-9]+]]
66; GFX9: v_pk_ashrrev_i16 [[RESULT:v[0-9]+]], [[RHS]], [[LHS]]
67define amdgpu_kernel void @ashr_v_s_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in, <2 x i16> %sgpr) #0 {
68  %tid = call i32 @llvm.amdgcn.workitem.id.x()
69  %tid.ext = sext i32 %tid to i64
70  %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext
71  %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext
72  %vgpr = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep
73  %result = ashr <2 x i16> %vgpr, %sgpr
74  store <2 x i16> %result, <2 x i16> addrspace(1)* %out.gep
75  ret void
76}
77
78; GCN-LABEL: {{^}}ashr_s_v_v2i16:
79; GFX9: s_load_dword [[LHS:s[0-9]+]]
80; GFX9: {{buffer|flat|global}}_load_dword [[RHS:v[0-9]+]]
81; GFX9: v_pk_ashrrev_i16 [[RESULT:v[0-9]+]], [[RHS]], [[LHS]]
82define amdgpu_kernel void @ashr_s_v_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in, <2 x i16> %sgpr) #0 {
83  %tid = call i32 @llvm.amdgcn.workitem.id.x()
84  %tid.ext = sext i32 %tid to i64
85  %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext
86  %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext
87  %vgpr = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep
88  %result = ashr <2 x i16> %sgpr, %vgpr
89  store <2 x i16> %result, <2 x i16> addrspace(1)* %out.gep
90  ret void
91}
92
93; GCN-LABEL: {{^}}ashr_imm_v_v2i16:
94; GCN: {{buffer|flat|global}}_load_dword [[RHS:v[0-9]+]]
95; GFX9: v_pk_ashrrev_i16 [[RESULT:v[0-9]+]], [[RHS]], -4
96define amdgpu_kernel void @ashr_imm_v_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 {
97  %tid = call i32 @llvm.amdgcn.workitem.id.x()
98  %tid.ext = sext i32 %tid to i64
99  %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext
100  %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext
101  %vgpr = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep
102  %result = ashr <2 x i16> <i16 -4, i16 -4>, %vgpr
103  store <2 x i16> %result, <2 x i16> addrspace(1)* %out.gep
104  ret void
105}
106
107; GCN-LABEL: {{^}}ashr_v_imm_v2i16:
108; GCN: {{buffer|flat|global}}_load_dword [[LHS:v[0-9]+]]
109; GFX9: v_pk_ashrrev_i16 [[RESULT:v[0-9]+]], 8, [[LHS]]
110define amdgpu_kernel void @ashr_v_imm_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 {
111  %tid = call i32 @llvm.amdgcn.workitem.id.x()
112  %tid.ext = sext i32 %tid to i64
113  %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext
114  %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext
115  %vgpr = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep
116  %result = ashr <2 x i16> %vgpr, <i16 8, i16 8>
117  store <2 x i16> %result, <2 x i16> addrspace(1)* %out.gep
118  ret void
119}
120
121; GCN-LABEL: {{^}}v_ashr_v4i16:
122; GCN: {{buffer|flat|global}}_load_dwordx2
123; GCN: {{buffer|flat|global}}_load_dwordx2
124; GFX9: v_pk_ashrrev_i16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
125; GFX9: v_pk_ashrrev_i16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
126
127; VI: v_ashrrev_i16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
128; VI: v_ashrrev_i16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
129; VI: v_ashrrev_i16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
130; VI: v_ashrrev_i16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
131; VI: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
132; VI: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
133
134; GCN: {{buffer|flat|global}}_store_dwordx2
135define amdgpu_kernel void @v_ashr_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) #0 {
136  %tid = call i32 @llvm.amdgcn.workitem.id.x()
137  %tid.ext = sext i32 %tid to i64
138  %in.gep = getelementptr inbounds <4 x i16>, <4 x i16> addrspace(1)* %in, i64 %tid.ext
139  %out.gep = getelementptr inbounds <4 x i16>, <4 x i16> addrspace(1)* %out, i64 %tid.ext
140  %b_ptr = getelementptr <4 x i16>, <4 x i16> addrspace(1)* %in.gep, i32 1
141  %a = load <4 x i16>, <4 x i16> addrspace(1)* %in.gep
142  %b = load <4 x i16>, <4 x i16> addrspace(1)* %b_ptr
143  %result = ashr <4 x i16> %a, %b
144  store <4 x i16> %result, <4 x i16> addrspace(1)* %out.gep
145  ret void
146}
147
148; GCN-LABEL: {{^}}ashr_v_imm_v4i16:
149; GCN: {{buffer|flat|global}}_load_dwordx2
150; GFX9: v_pk_ashrrev_i16 v{{[0-9]+}}, 8, v{{[0-9]+}}
151; GFX9: v_pk_ashrrev_i16 v{{[0-9]+}}, 8, v{{[0-9]+}}
152; GCN: {{buffer|flat|global}}_store_dwordx2
153define amdgpu_kernel void @ashr_v_imm_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) #0 {
154  %tid = call i32 @llvm.amdgcn.workitem.id.x()
155  %tid.ext = sext i32 %tid to i64
156  %in.gep = getelementptr inbounds <4 x i16>, <4 x i16> addrspace(1)* %in, i64 %tid.ext
157  %out.gep = getelementptr inbounds <4 x i16>, <4 x i16> addrspace(1)* %out, i64 %tid.ext
158  %vgpr = load <4 x i16>, <4 x i16> addrspace(1)* %in.gep
159  %result = ashr <4 x i16> %vgpr, <i16 8, i16 8, i16 8, i16 8>
160  store <4 x i16> %result, <4 x i16> addrspace(1)* %out.gep
161  ret void
162}
163
164declare i32 @llvm.amdgcn.workitem.id.x() #1
165
166attributes #0 = { nounwind }
167attributes #1 = { nounwind readnone }
168