1; REQUIRES: asserts
2; RUN: llc < %s -mtriple=arm64-linux-gnu -mcpu=cortex-a57 -verify-misched -debug-only=machine-scheduler -o - 2>&1 > /dev/null | FileCheck %s
3; RUN: llc < %s -mtriple=arm64-linux-gnu -mcpu=exynos-m3 -verify-misched -debug-only=machine-scheduler -o - 2>&1 > /dev/null | FileCheck %s
4
5; Test ldr clustering.
6; CHECK: ********** MI Scheduling **********
7; CHECK-LABEL: ldr_int:%bb.0
8; CHECK: Cluster ld/st SU(1) - SU(2)
9; CHECK: SU(1):   %{{[0-9]+}}:gpr32 = LDRWui
10; CHECK: SU(2):   %{{[0-9]+}}:gpr32 = LDRWui
11define i32 @ldr_int(i32* %a) nounwind {
12  %p1 = getelementptr inbounds i32, i32* %a, i32 1
13  %tmp1 = load i32, i32* %p1, align 2
14  %p2 = getelementptr inbounds i32, i32* %a, i32 2
15  %tmp2 = load i32, i32* %p2, align 2
16  %tmp3 = add i32 %tmp1, %tmp2
17  ret i32 %tmp3
18}
19
20; Test ldpsw clustering
21; CHECK: ********** MI Scheduling **********
22; CHECK-LABEL: ldp_sext_int:%bb.0
23; CHECK: Cluster ld/st SU(1) - SU(2)
24; CHECK: SU(1):   %{{[0-9]+}}:gpr64 = LDRSWui
25; CHECK: SU(2):   %{{[0-9]+}}:gpr64 = LDRSWui
26define i64 @ldp_sext_int(i32* %p) nounwind {
27  %tmp = load i32, i32* %p, align 4
28  %add.ptr = getelementptr inbounds i32, i32* %p, i64 1
29  %tmp1 = load i32, i32* %add.ptr, align 4
30  %sexttmp = sext i32 %tmp to i64
31  %sexttmp1 = sext i32 %tmp1 to i64
32  %add = add nsw i64 %sexttmp1, %sexttmp
33  ret i64 %add
34}
35
36; Test ldur clustering.
37; CHECK: ********** MI Scheduling **********
38; CHECK-LABEL: ldur_int:%bb.0
39; CHECK: Cluster ld/st SU(1) - SU(2)
40; CHECK: SU(1):   %{{[0-9]+}}:gpr32 = LDURWi
41; CHECK: SU(2):   %{{[0-9]+}}:gpr32 = LDURWi
42define i32 @ldur_int(i32* %a) nounwind {
43  %p1 = getelementptr inbounds i32, i32* %a, i32 -1
44  %tmp1 = load i32, i32* %p1, align 2
45  %p2 = getelementptr inbounds i32, i32* %a, i32 -2
46  %tmp2 = load i32, i32* %p2, align 2
47  %tmp3 = add i32 %tmp1, %tmp2
48  ret i32 %tmp3
49}
50
51; Test sext + zext clustering.
52; CHECK: ********** MI Scheduling **********
53; CHECK-LABEL: ldp_half_sext_zext_int:%bb.0
54; CHECK: Cluster ld/st SU(3) - SU(4)
55; CHECK: SU(3):   %{{[0-9]+}}:gpr64 = LDRSWui
56; CHECK: SU(4):   undef %{{[0-9]+}}.sub_32:gpr64 = LDRWui
57define i64 @ldp_half_sext_zext_int(i64* %q, i32* %p) nounwind {
58  %tmp0 = load i64, i64* %q, align 4
59  %tmp = load i32, i32* %p, align 4
60  %add.ptr = getelementptr inbounds i32, i32* %p, i64 1
61  %tmp1 = load i32, i32* %add.ptr, align 4
62  %sexttmp = sext i32 %tmp to i64
63  %sexttmp1 = zext i32 %tmp1 to i64
64  %add = add nsw i64 %sexttmp1, %sexttmp
65  %add1 = add nsw i64 %add, %tmp0
66  ret i64 %add1
67}
68
69; Test zext + sext clustering.
70; CHECK: ********** MI Scheduling **********
71; CHECK-LABEL: ldp_half_zext_sext_int:%bb.0
72; CHECK: Cluster ld/st SU(3) - SU(4)
73; CHECK: SU(3):   undef %{{[0-9]+}}.sub_32:gpr64 = LDRWui
74; CHECK: SU(4):   %{{[0-9]+}}:gpr64 = LDRSWui
75define i64 @ldp_half_zext_sext_int(i64* %q, i32* %p) nounwind {
76  %tmp0 = load i64, i64* %q, align 4
77  %tmp = load i32, i32* %p, align 4
78  %add.ptr = getelementptr inbounds i32, i32* %p, i64 1
79  %tmp1 = load i32, i32* %add.ptr, align 4
80  %sexttmp = zext i32 %tmp to i64
81  %sexttmp1 = sext i32 %tmp1 to i64
82  %add = add nsw i64 %sexttmp1, %sexttmp
83  %add1 = add nsw i64 %add, %tmp0
84  ret i64 %add1
85}
86
87; Verify we don't cluster volatile loads.
88; CHECK: ********** MI Scheduling **********
89; CHECK-LABEL: ldr_int_volatile:%bb.0
90; CHECK-NOT: Cluster ld/st
91; CHECK: SU(1):   %{{[0-9]+}}:gpr32 = LDRWui
92; CHECK: SU(2):   %{{[0-9]+}}:gpr32 = LDRWui
93define i32 @ldr_int_volatile(i32* %a) nounwind {
94  %p1 = getelementptr inbounds i32, i32* %a, i32 1
95  %tmp1 = load volatile i32, i32* %p1, align 2
96  %p2 = getelementptr inbounds i32, i32* %a, i32 2
97  %tmp2 = load volatile i32, i32* %p2, align 2
98  %tmp3 = add i32 %tmp1, %tmp2
99  ret i32 %tmp3
100}
101
102; Test ldq clustering (no clustering for Exynos).
103; CHECK: ********** MI Scheduling **********
104; CHECK-LABEL: ldq_cluster:%bb.0
105; CHECK: Cluster ld/st SU(1) - SU(3)
106; CHECK: SU(1):   %{{[0-9]+}}:fpr128 = LDRQui
107; CHECK: SU(3):   %{{[0-9]+}}:fpr128 = LDRQui
108define <2 x i64> @ldq_cluster(i64* %p) {
109  %a1 = bitcast i64* %p to <2 x i64>*
110  %tmp1 = load <2 x i64>, < 2 x i64>* %a1, align 8
111  %add.ptr2 = getelementptr inbounds i64, i64* %p, i64 2
112  %a2 = bitcast i64* %add.ptr2 to <2 x i64>*
113  %tmp2 = add nsw <2 x i64> %tmp1, %tmp1
114  %tmp3 = load <2 x i64>, <2 x i64>* %a2, align 8
115  %res  = mul nsw <2 x i64> %tmp2, %tmp3
116  ret <2 x i64> %res
117}
118