1; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -o - %s | FileCheck %s
2; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -stop-after=finalize-isel -o - %s | FileCheck --check-prefix=MIR %s
3
4; Ensure that the scoped AA is attached on loads/stores lowered from mem ops.
5
6; Re-evaluate the slot numbers of scopes as that numbering could be changed run-by-run.
7
8; MIR-DAG: ![[DOMAIN:[0-9]+]] = distinct !{!{{[0-9]+}}, !"bax"}
9; MIR-DAG: ![[SCOPE0:[0-9]+]] = distinct !{!{{[0-9]+}}, ![[DOMAIN]], !"bax: %p"}
10; MIR-DAG: ![[SCOPE1:[0-9]+]] = distinct !{!{{[0-9]+}}, ![[DOMAIN]], !"bax: %q"}
11; MIR-DAG: ![[SET0:[0-9]+]] = !{![[SCOPE0]]}
12; MIR-DAG: ![[SET1:[0-9]+]] = !{![[SCOPE1]]}
13
14; MIR-LABEL: name: test_memcpy
15; MIR: %8:vreg_128 = GLOBAL_LOAD_DWORDX4 %9, 16, 0, implicit $exec :: (load (s128) from %ir.p1, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]], addrspace 1)
16; MIR: GLOBAL_STORE_DWORDX4 %10, killed %8, 0, 0, implicit $exec :: (store (s128) into %ir.p0, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]], addrspace 1)
17define i32 @test_memcpy(i32 addrspace(1)* nocapture %p, i32 addrspace(1)* nocapture readonly %q) {
18; Check loads of %q are scheduled ahead of that store of the memcpy on %p.
19; CHECK-LABEL: test_memcpy:
20; CHECK-DAG:    global_load_dwordx2 v{{\[}}[[Q0:[0-9]+]]:[[Q1:[0-9]+]]{{\]}}, v[2:3], off
21; CHECK-DAG:    global_load_dwordx4 [[PVAL:v\[[0-9]+:[0-9]+\]]], v[0:1], off offset:16
22; CHECK-DAG:    v_add_nc_u32_e32 v{{[0-9]+}}, v[[Q0]], v[[Q1]]
23; CHECK:        global_store_dwordx4 v[0:1], [[PVAL]], off
24; CHECK:        s_setpc_b64 s[30:31]
25  %p0 = bitcast i32 addrspace(1)* %p to i8 addrspace(1)*
26  %add.ptr = getelementptr inbounds i32, i32 addrspace(1)* %p, i64 4
27  %p1 = bitcast i32 addrspace(1)* %add.ptr to i8 addrspace(1)*
28  tail call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* noundef nonnull align 4 dereferenceable(16) %p0, i8 addrspace(1)* noundef nonnull align 4 dereferenceable(16) %p1, i64 16, i1 false), !alias.scope !2, !noalias !4
29  %v0 = load i32, i32 addrspace(1)* %q, align 4, !alias.scope !4, !noalias !2
30  %q1 = getelementptr inbounds i32, i32 addrspace(1)* %q, i64 1
31  %v1 = load i32, i32 addrspace(1)* %q1, align 4, !alias.scope !4, !noalias !2
32  %add = add i32 %v0, %v1
33  ret i32 %add
34}
35
36; MIR-LABEL: name: test_memcpy_inline
37; MIR: %8:vreg_128 = GLOBAL_LOAD_DWORDX4 %9, 16, 0, implicit $exec :: (load (s128) from %ir.p1, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]], addrspace 1)
38; MIR: GLOBAL_STORE_DWORDX4 %10, killed %8, 0, 0, implicit $exec :: (store (s128) into %ir.p0, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]], addrspace 1)
39define i32 @test_memcpy_inline(i32 addrspace(1)* nocapture %p, i32 addrspace(1)* nocapture readonly %q) {
40; Check loads of %q are scheduled ahead of that store of the memcpy on %p.
41; CHECK-LABEL: test_memcpy_inline:
42; CHECK-DAG:    global_load_dwordx2 v{{\[}}[[Q0:[0-9]+]]:[[Q1:[0-9]+]]{{\]}}, v[2:3], off
43; CHECK-DAG:    global_load_dwordx4 [[PVAL:v\[[0-9]+:[0-9]+\]]], v[0:1], off offset:16
44; CHECK-DAG:    v_add_nc_u32_e32 v{{[0-9]+}}, v[[Q0]], v[[Q1]]
45; CHECK:        global_store_dwordx4 v[0:1], [[PVAL]], off
46; CHECK:        s_setpc_b64 s[30:31]
47  %p0 = bitcast i32 addrspace(1)* %p to i8 addrspace(1)*
48  %add.ptr = getelementptr inbounds i32, i32 addrspace(1)* %p, i64 4
49  %p1 = bitcast i32 addrspace(1)* %add.ptr to i8 addrspace(1)*
50  tail call void @llvm.memcpy.inline.p1i8.p1i8.i64(i8 addrspace(1)* noundef nonnull align 4 dereferenceable(16) %p0, i8 addrspace(1)* noundef nonnull align 4 dereferenceable(16) %p1, i64 16, i1 false), !alias.scope !2, !noalias !4
51  %v0 = load i32, i32 addrspace(1)* %q, align 4, !alias.scope !4, !noalias !2
52  %q1 = getelementptr inbounds i32, i32 addrspace(1)* %q, i64 1
53  %v1 = load i32, i32 addrspace(1)* %q1, align 4, !alias.scope !4, !noalias !2
54  %add = add i32 %v0, %v1
55  ret i32 %add
56}
57
58; MIR-LABEL: name: test_memmove
59; MIR: %8:vreg_128 = GLOBAL_LOAD_DWORDX4 %9, 16, 0, implicit $exec :: (load (s128) from %ir.p1, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]], addrspace 1)
60; MIR: GLOBAL_STORE_DWORDX4 %10, killed %8, 0, 0, implicit $exec :: (store (s128) into %ir.p0, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]], addrspace 1)
61define i32 @test_memmove(i32 addrspace(1)* nocapture %p, i32 addrspace(1)* nocapture readonly %q) {
62; Check loads of %q are scheduled ahead of that store of the memmove on %p.
63; CHECK-LABEL: test_memmove:
64; CHECK-DAG:    global_load_dwordx2 v{{\[}}[[Q0:[0-9]+]]:[[Q1:[0-9]+]]{{\]}}, v[2:3], off
65; CHECK-DAG:    global_load_dwordx4 [[PVAL:v\[[0-9]+:[0-9]+\]]], v[0:1], off offset:16
66; CHECK-DAG:    v_add_nc_u32_e32 v{{[0-9]+}}, v[[Q0]], v[[Q1]]
67; CHECK:        global_store_dwordx4 v[0:1], [[PVAL]]
68; CHECK:        s_setpc_b64 s[30:31]
69  %p0 = bitcast i32 addrspace(1)* %p to i8 addrspace(1)*
70  %add.ptr = getelementptr inbounds i32, i32 addrspace(1)* %p, i64 4
71  %p1 = bitcast i32 addrspace(1)* %add.ptr to i8 addrspace(1)*
72  tail call void @llvm.memmove.p1i8.p1i8.i64(i8 addrspace(1)* noundef nonnull align 4 dereferenceable(16) %p0, i8 addrspace(1)* noundef nonnull align 4 dereferenceable(16) %p1, i64 16, i1 false), !alias.scope !2, !noalias !4
73  %v0 = load i32, i32 addrspace(1)* %q, align 4, !alias.scope !4, !noalias !2
74  %q1 = getelementptr inbounds i32, i32 addrspace(1)* %q, i64 1
75  %v1 = load i32, i32 addrspace(1)* %q1, align 4, !alias.scope !4, !noalias !2
76  %add = add i32 %v0, %v1
77  ret i32 %add
78}
79
80; MIR-LABEL: name: test_memset
81; MIR: GLOBAL_STORE_DWORDX4 killed %10, killed %11, 0, 0, implicit $exec :: (store (s128) into %ir.p0, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]], addrspace 1)
82define i32 @test_memset(i32 addrspace(1)* nocapture %p, i32 addrspace(1)* nocapture readonly %q) {
83; Check loads of %q are scheduled ahead of that store of the memset on %p.
84; CHECK-LABEL: test_memset:
85; CHECK-DAG:    global_load_dwordx2 v{{\[}}[[Q0:[0-9]+]]:[[Q1:[0-9]+]]{{\]}}, v[2:3], off
86; CHECK-DAG:    v_mov_b32_e32 v[[PVAL:[0-9]+]], 0xaaaaaaaa
87; CHECK:        global_store_dwordx4 v[0:1], v{{\[}}[[PVAL]]{{:[0-9]+\]}}, off
88; CHECK:        v_add_nc_u32_e32 v{{[0-9]+}}, v[[Q0]], v[[Q1]]
89; CHECK:        s_setpc_b64 s[30:31]
90  %p0 = bitcast i32 addrspace(1)* %p to i8 addrspace(1)*
91  tail call void @llvm.memset.p1i8.i64(i8 addrspace(1)* noundef nonnull align 4 dereferenceable(16) %p0, i8 170, i64 16, i1 false), !alias.scope !2, !noalias !4
92  %v0 = load i32, i32 addrspace(1)* %q, align 4, !alias.scope !4, !noalias !2
93  %q1 = getelementptr inbounds i32, i32 addrspace(1)* %q, i64 1
94  %v1 = load i32, i32 addrspace(1)* %q1, align 4, !alias.scope !4, !noalias !2
95  %add = add i32 %v0, %v1
96  ret i32 %add
97}
98
99declare void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* noalias nocapture writeonly, i8 addrspace(1)* noalias nocapture readonly, i64, i1 immarg)
100declare void @llvm.memcpy.inline.p1i8.p1i8.i64(i8 addrspace(1)* noalias nocapture writeonly, i8 addrspace(1)* noalias nocapture readonly, i64, i1 immarg)
101declare void @llvm.memmove.p1i8.p1i8.i64(i8 addrspace(1)* nocapture writeonly, i8 addrspace(1)* nocapture readonly, i64, i1 immarg)
102declare void @llvm.memset.p1i8.i64(i8 addrspace(1)* nocapture writeonly, i8, i64, i1 immarg)
103
104!0 = distinct !{!0, !"bax"}
105!1 = distinct !{!1, !0, !"bax: %p"}
106!2 = !{!1}
107!3 = distinct !{!3, !0, !"bax: %q"}
108!4 = !{!3}
109