1; RUN: llc -march=amdgcn -mtriple=amdgcn---amdgiz -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
2; RUN: llc -march=amdgcn -mtriple=amdgcn---amdgiz -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
3
4; GCN-LABEL: {{^}}extract_vector_elt_v2f16:
5; GCN: s_load_dword [[VEC:s[0-9]+]]
6; GCN: s_lshr_b32 [[ELT1:s[0-9]+]], [[VEC]], 16
7; GCN-DAG: v_mov_b32_e32 [[VELT0:v[0-9]+]], [[VEC]]
8; GCN-DAG: v_mov_b32_e32 [[VELT1:v[0-9]+]], [[ELT1]]
9; GCN-DAG: buffer_store_short [[VELT0]]
10; GCN-DAG: buffer_store_short [[VELT1]]
11define amdgpu_kernel void @extract_vector_elt_v2f16(half addrspace(1)* %out, <2 x half> addrspace(4)* %vec.ptr) #0 {
12  %vec = load <2 x half>, <2 x half> addrspace(4)* %vec.ptr
13  %p0 = extractelement <2 x half> %vec, i32 0
14  %p1 = extractelement <2 x half> %vec, i32 1
15  %out1 = getelementptr half, half addrspace(1)* %out, i32 10
16  store half %p1, half addrspace(1)* %out, align 2
17  store half %p0, half addrspace(1)* %out1, align 2
18  ret void
19}
20
21; GCN-LABEL: {{^}}extract_vector_elt_v2f16_dynamic_sgpr:
22; GCN: s_load_dword [[IDX:s[0-9]+]]
23; GCN: s_load_dword [[VEC:s[0-9]+]]
24; GCN: s_lshl_b32 [[IDX_SCALED:s[0-9]+]], [[IDX]], 4
25; GCN: s_lshr_b32 [[ELT1:s[0-9]+]], [[VEC]], [[IDX_SCALED]]
26; GCN: v_mov_b32_e32 [[VELT1:v[0-9]+]], [[ELT1]]
27; GCN: buffer_store_short [[VELT1]]
28; GCN: ScratchSize: 0
29define amdgpu_kernel void @extract_vector_elt_v2f16_dynamic_sgpr(half addrspace(1)* %out, <2 x half> addrspace(4)* %vec.ptr, i32 %idx) #0 {
30  %vec = load <2 x half>, <2 x half> addrspace(4)* %vec.ptr
31  %elt = extractelement <2 x half> %vec, i32 %idx
32  store half %elt, half addrspace(1)* %out, align 2
33  ret void
34}
35
36; GCN-LABEL: {{^}}extract_vector_elt_v2f16_dynamic_vgpr:
37; GCN-DAG: s_load_dword [[VEC:s[0-9]+]]
38; GCN-DAG: {{flat|buffer}}_load_dword [[IDX:v[0-9]+]]
39; GCN: v_lshlrev_b32_e32 [[IDX_SCALED:v[0-9]+]], 4, [[IDX]]
40
41; SI: v_lshr_b32_e32 [[ELT:v[0-9]+]], [[VEC]], [[IDX_SCALED]]
42; VI: v_lshrrev_b32_e64 [[ELT:v[0-9]+]], [[IDX_SCALED]], [[VEC]]
43
44
45; SI: buffer_store_short [[ELT]]
46; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[ELT]]
47; GCN: ScratchSize: 0{{$}}
48define amdgpu_kernel void @extract_vector_elt_v2f16_dynamic_vgpr(half addrspace(1)* %out, <2 x half> addrspace(4)* %vec.ptr, i32 addrspace(1)* %idx.ptr) #0 {
49  %tid = call i32 @llvm.amdgcn.workitem.id.x()
50  %tid.ext = sext i32 %tid to i64
51  %gep = getelementptr inbounds i32, i32 addrspace(1)* %idx.ptr, i64 %tid.ext
52  %out.gep = getelementptr inbounds half, half addrspace(1)* %out, i64 %tid.ext
53  %vec = load <2 x half>, <2 x half> addrspace(4)* %vec.ptr
54  %idx = load i32, i32 addrspace(1)* %gep
55  %elt = extractelement <2 x half> %vec, i32 %idx
56  store half %elt, half addrspace(1)* %out.gep, align 2
57  ret void
58}
59
60; GCN-LABEL: {{^}}extract_vector_elt_v3f16:
61; GCN: s_load_dwordx2
62; GCN: s_load_dwordx2
63
64; GCN: buffer_store_short
65; GCN: buffer_store_short
66define amdgpu_kernel void @extract_vector_elt_v3f16(half addrspace(1)* %out, <3 x half> %foo) #0 {
67  %p0 = extractelement <3 x half> %foo, i32 0
68  %p1 = extractelement <3 x half> %foo, i32 2
69  %out1 = getelementptr half, half addrspace(1)* %out, i32 1
70  store half %p1, half addrspace(1)* %out, align 2
71  store half %p0, half addrspace(1)* %out1, align 2
72  ret void
73}
74
75; FIXME: Why sometimes vector shift?
76; GCN-LABEL: {{^}}dynamic_extract_vector_elt_v3f16:
77; SI: s_load_dword s
78; SI: s_load_dwordx2 s
79; SI: s_load_dwordx2 s
80
81; GFX89: s_load_dwordx2 s
82; GFX89: s_load_dwordx2 s
83; GFX89: s_load_dword s
84
85
86; GCN-DAG: s_lshl_b32 s{{[0-9]+}}, s{{[0-9]+}}, 4
87; GCN: s_lshr_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}
88
89; GCN: {{buffer|global}}_store_short
90define amdgpu_kernel void @dynamic_extract_vector_elt_v3f16(half addrspace(1)* %out, <3 x half> %foo, i32 %idx) #0 {
91  %p0 = extractelement <3 x half> %foo, i32 %idx
92  %out1 = getelementptr half, half addrspace(1)* %out, i32 1
93  store half %p0, half addrspace(1)* %out
94  ret void
95}
96
97; GCN-LABEL: {{^}}v_extractelement_v4f16_2:
98; SI: buffer_load_dword [[LOAD:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
99; SI: buffer_store_short [[LOAD]]
100
101; VI: flat_load_dword v
102; VI: flat_store_short
103
104; GFX9: global_load_dword [[LOAD:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, off offset:4
105; GFX9: global_store_short_d16_hi v{{\[[0-9]+:[0-9]+\]}}, [[LOAD]]
106define amdgpu_kernel void @v_extractelement_v4f16_2(half addrspace(1)* %out, <4 x half> addrspace(1)* %in) #0 {
107  %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
108  %tid.ext = sext i32 %tid to i64
109  %in.gep = getelementptr inbounds <4 x half>, <4 x half> addrspace(1)* %in, i64 %tid.ext
110  %out.gep = getelementptr inbounds half, half addrspace(1)* %out, i64 %tid.ext
111  %vec = load <4 x half>, <4 x half> addrspace(1)* %in.gep
112  %vec.extract = extractelement <4 x half> %vec, i32 2
113  store half %vec.extract, half addrspace(1)* %out.gep
114  ret void
115}
116
117; GCN-LABEL: {{^}}v_insertelement_v4f16_dynamic_vgpr:
118; GCN-DAG: {{flat|global|buffer}}_load_dword [[IDX:v[0-9]+]],
119; GCN-DAG: {{flat|global|buffer}}_load_dwordx2 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}
120; GCN-DAG: v_lshlrev_b32_e32 [[SCALED_IDX:v[0-9]+]], 4, [[IDX]]
121
122; GFX89: v_lshrrev_b64 v{{\[}}[[SHIFT_LO:[0-9]+]]:[[SHIFT_HI:[0-9]+]]{{\]}}, [[SCALED_IDX]], v{{\[}}[[LO]]:[[HI]]{{\]}}
123; GFX89: {{flat|global}}_store_short v{{\[[0-9]+:[0-9]+\]}}, v[[SHIFT_LO]]
124
125; SI: v_lshr_b64 v{{\[}}[[SHIFT_LO:[0-9]+]]:[[SHIFT_HI:[0-9]+]]{{\]}}, v{{\[}}[[LO]]:[[HI]]{{\]}}, [[SCALED_IDX]]
126; SI: buffer_store_short v[[SHIFT_LO]]
127define amdgpu_kernel void @v_insertelement_v4f16_dynamic_vgpr(half addrspace(1)* %out, <4 x half> addrspace(1)* %in) #0 {
128  %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
129  %tid.ext = sext i32 %tid to i64
130  %in.gep = getelementptr inbounds <4 x half>, <4 x half> addrspace(1)* %in, i64 %tid.ext
131  %out.gep = getelementptr inbounds half, half addrspace(1)* %out, i64 %tid.ext
132  %idx.val = load volatile i32, i32 addrspace(1)* undef
133  %vec = load <4 x half>, <4 x half> addrspace(1)* %in.gep
134  %vec.extract = extractelement <4 x half> %vec, i32 %idx.val
135  store half %vec.extract, half addrspace(1)* %out.gep
136  ret void
137}
138
139; GCN-LABEL: {{^}}reduce_load_vector_v8f16_extract_01:
140; GCN: s_load_dwordx2 [[PTR:s\[[0-9]+:[0-9]+\]]],
141; GCN-NOT: {{s|buffer|flat|global}}_load_
142; GCN: s_load_dword s{{[0-9]+}}, [[PTR]], 0x0
143; GCN-NOT: {{s|buffer|flat|global}}_load_
144; GCN: s_lshr_b32 s{{[0-9]+}}, s{{[0-9]+}}, 16
145define amdgpu_kernel void @reduce_load_vector_v8f16_extract_01(<16 x half> addrspace(4)* %ptr) #0 {
146  %load = load <16 x half>, <16 x half> addrspace(4)* %ptr
147  %elt0 = extractelement <16 x half> %load, i32 0
148  %elt1 = extractelement <16 x half> %load, i32 1
149  store volatile half %elt0, half addrspace(1)* undef, align 2
150  store volatile half %elt1, half addrspace(1)* undef, align 2
151  ret void
152}
153
154; GCN-LABEL: {{^}}reduce_load_vector_v8f16_extract_23:
155; GCN: s_load_dwordx2 [[PTR:s\[[0-9]+:[0-9]+\]]],
156; GCN-NOT: {{s|buffer|flat|global}}_load_
157; GCN: s_load_dword s{{[0-9]+}}, [[PTR]], {{0x1|0x4}}
158; GCN-NOT: {{s|buffer|flat|global}}_load_
159; GCN: s_lshr_b32 s{{[0-9]+}}, s{{[0-9]+}}, 16
160define amdgpu_kernel void @reduce_load_vector_v8f16_extract_23(<16 x half> addrspace(4)* %ptr) #0 {
161  %load = load <16 x half>, <16 x half> addrspace(4)* %ptr
162  %elt2 = extractelement <16 x half> %load, i32 2
163  %elt3 = extractelement <16 x half> %load, i32 3
164  store volatile half %elt2, half addrspace(1)* undef, align 2
165  store volatile half %elt3, half addrspace(1)* undef, align 2
166  ret void
167}
168
169declare i32 @llvm.amdgcn.workitem.id.x() #1
170
171attributes #0 = { nounwind }
172attributes #1 = { nounwind readnone }
173