1; RUN:  llc -amdgpu-scalarize-global-loads=false  -march=amdgcn < %s | FileCheck -check-prefixes=GCN,SI %s
2; RUN:  llc -amdgpu-scalarize-global-loads=false  -march=amdgcn -mcpu=tonga -mattr=-flat-for-global < %s | FileCheck -check-prefixes=GCN,VI %s
3
4; GCN-LABEL: {{^}}no_reorder_v2f64_global_load_store:
5; GCN: buffer_load_dwordx4
6; GCN: buffer_load_dwordx4
7; GCN: buffer_store_dwordx4
8; GCN: buffer_store_dwordx4
9; GCN: s_endpgm
10define amdgpu_kernel void @no_reorder_v2f64_global_load_store(<2 x double> addrspace(1)* nocapture %x, <2 x double> addrspace(1)* nocapture %y) nounwind {
11  %tmp1 = load <2 x double>, <2 x double> addrspace(1)* %x, align 16
12  %tmp4 = load <2 x double>, <2 x double> addrspace(1)* %y, align 16
13  store <2 x double> %tmp4, <2 x double> addrspace(1)* %x, align 16
14  store <2 x double> %tmp1, <2 x double> addrspace(1)* %y, align 16
15  ret void
16}
17
18; GCN-LABEL: {{^}}no_reorder_scalarized_v2f64_local_load_store:
19; SI: ds_read2_b64
20; SI: ds_write2_b64
21
22; VI: ds_read_b128
23; VI: ds_write_b128
24
25; GCN: s_endpgm
26define amdgpu_kernel void @no_reorder_scalarized_v2f64_local_load_store(<2 x double> addrspace(3)* nocapture %x, <2 x double> addrspace(3)* nocapture %y) nounwind {
27  %tmp1 = load <2 x double>, <2 x double> addrspace(3)* %x, align 16
28  %tmp4 = load <2 x double>, <2 x double> addrspace(3)* %y, align 16
29  store <2 x double> %tmp4, <2 x double> addrspace(3)* %x, align 16
30  store <2 x double> %tmp1, <2 x double> addrspace(3)* %y, align 16
31  ret void
32}
33
34; GCN-LABEL: {{^}}no_reorder_split_v8i32_global_load_store:
35; GCN: buffer_load_dwordx4
36; GCN: buffer_load_dwordx4
37; GCN: buffer_load_dwordx4
38; GCN: buffer_load_dwordx4
39
40
41; GCN: buffer_store_dwordx4
42; GCN: buffer_store_dwordx4
43; GCN: buffer_store_dwordx4
44; GCN: buffer_store_dwordx4
45; GCN: s_endpgm
46define amdgpu_kernel void @no_reorder_split_v8i32_global_load_store(<8 x i32> addrspace(1)* nocapture %x, <8 x i32> addrspace(1)* nocapture %y) nounwind {
47  %tmp1 = load <8 x i32>, <8 x i32> addrspace(1)* %x, align 32
48  %tmp4 = load <8 x i32>, <8 x i32> addrspace(1)* %y, align 32
49  store <8 x i32> %tmp4, <8 x i32> addrspace(1)* %x, align 32
50  store <8 x i32> %tmp1, <8 x i32> addrspace(1)* %y, align 32
51  ret void
52}
53
54; GCN-LABEL: {{^}}no_reorder_extload_64:
55; GCN: ds_read_b64
56; GCN: ds_read_b64
57; GCN: ds_write_b64
58; GCN-NOT: ds_read
59; GCN: ds_write_b64
60; GCN: s_endpgm
61define amdgpu_kernel void @no_reorder_extload_64(<2 x i32> addrspace(3)* nocapture %x, <2 x i32> addrspace(3)* nocapture %y) nounwind {
62  %tmp1 = load <2 x i32>, <2 x i32> addrspace(3)* %x, align 8
63  %tmp4 = load <2 x i32>, <2 x i32> addrspace(3)* %y, align 8
64  %tmp1ext = zext <2 x i32> %tmp1 to <2 x i64>
65  %tmp4ext = zext <2 x i32> %tmp4 to <2 x i64>
66  %tmp7 = add <2 x i64> %tmp1ext, <i64 1, i64 1>
67  %tmp9 = add <2 x i64> %tmp4ext, <i64 1, i64 1>
68  %trunctmp9 = trunc <2 x i64> %tmp9 to <2 x i32>
69  %trunctmp7 = trunc <2 x i64> %tmp7 to <2 x i32>
70  store <2 x i32> %trunctmp9, <2 x i32> addrspace(3)* %x, align 8
71  store <2 x i32> %trunctmp7, <2 x i32> addrspace(3)* %y, align 8
72  ret void
73}
74