1; RUN: llc < %s -march=arm64 -verify-machineinstrs | FileCheck %s
2; RUN: llc < %s -march=arm64 -aarch64-unscaled-mem-op=true\
3; RUN:   -verify-machineinstrs | FileCheck -check-prefix=LDUR_CHK %s
4
5; CHECK: ldp_int
6; CHECK: ldp
7define i32 @ldp_int(i32* %p) nounwind {
8  %tmp = load i32* %p, align 4
9  %add.ptr = getelementptr inbounds i32* %p, i64 1
10  %tmp1 = load i32* %add.ptr, align 4
11  %add = add nsw i32 %tmp1, %tmp
12  ret i32 %add
13}
14
15; CHECK: ldp_long
16; CHECK: ldp
17define i64 @ldp_long(i64* %p) nounwind {
18  %tmp = load i64* %p, align 8
19  %add.ptr = getelementptr inbounds i64* %p, i64 1
20  %tmp1 = load i64* %add.ptr, align 8
21  %add = add nsw i64 %tmp1, %tmp
22  ret i64 %add
23}
24
25; CHECK: ldp_float
26; CHECK: ldp
27define float @ldp_float(float* %p) nounwind {
28  %tmp = load float* %p, align 4
29  %add.ptr = getelementptr inbounds float* %p, i64 1
30  %tmp1 = load float* %add.ptr, align 4
31  %add = fadd float %tmp, %tmp1
32  ret float %add
33}
34
35; CHECK: ldp_double
36; CHECK: ldp
37define double @ldp_double(double* %p) nounwind {
38  %tmp = load double* %p, align 8
39  %add.ptr = getelementptr inbounds double* %p, i64 1
40  %tmp1 = load double* %add.ptr, align 8
41  %add = fadd double %tmp, %tmp1
42  ret double %add
43}
44
45; Test the load/store optimizer---combine ldurs into a ldp, if appropriate
46define i32 @ldur_int(i32* %a) nounwind {
47; LDUR_CHK: ldur_int
48; LDUR_CHK: ldp     [[DST1:w[0-9]+]], [[DST2:w[0-9]+]], [x0, #-8]
49; LDUR_CHK-NEXT: add     w{{[0-9]+}}, [[DST2]], [[DST1]]
50; LDUR_CHK-NEXT: ret
51  %p1 = getelementptr inbounds i32* %a, i32 -1
52  %tmp1 = load i32* %p1, align 2
53  %p2 = getelementptr inbounds i32* %a, i32 -2
54  %tmp2 = load i32* %p2, align 2
55  %tmp3 = add i32 %tmp1, %tmp2
56  ret i32 %tmp3
57}
58
59define i64 @ldur_long(i64* %a) nounwind ssp {
60; LDUR_CHK: ldur_long
61; LDUR_CHK: ldp     [[DST1:x[0-9]+]], [[DST2:x[0-9]+]], [x0, #-16]
62; LDUR_CHK-NEXT: add     x{{[0-9]+}}, [[DST2]], [[DST1]]
63; LDUR_CHK-NEXT: ret
64  %p1 = getelementptr inbounds i64* %a, i64 -1
65  %tmp1 = load i64* %p1, align 2
66  %p2 = getelementptr inbounds i64* %a, i64 -2
67  %tmp2 = load i64* %p2, align 2
68  %tmp3 = add i64 %tmp1, %tmp2
69  ret i64 %tmp3
70}
71
72define float @ldur_float(float* %a) {
73; LDUR_CHK: ldur_float
74; LDUR_CHK: ldp     [[DST1:s[0-9]+]], [[DST2:s[0-9]+]], [x0, #-8]
75; LDUR_CHK-NEXT: add     s{{[0-9]+}}, [[DST2]], [[DST1]]
76; LDUR_CHK-NEXT: ret
77  %p1 = getelementptr inbounds float* %a, i64 -1
78  %tmp1 = load float* %p1, align 2
79  %p2 = getelementptr inbounds float* %a, i64 -2
80  %tmp2 = load float* %p2, align 2
81  %tmp3 = fadd float %tmp1, %tmp2
82  ret float %tmp3
83}
84
85define double @ldur_double(double* %a) {
86; LDUR_CHK: ldur_double
87; LDUR_CHK: ldp     [[DST1:d[0-9]+]], [[DST2:d[0-9]+]], [x0, #-16]
88; LDUR_CHK-NEXT: add     d{{[0-9]+}}, [[DST2]], [[DST1]]
89; LDUR_CHK-NEXT: ret
90  %p1 = getelementptr inbounds double* %a, i64 -1
91  %tmp1 = load double* %p1, align 2
92  %p2 = getelementptr inbounds double* %a, i64 -2
93  %tmp2 = load double* %p2, align 2
94  %tmp3 = fadd double %tmp1, %tmp2
95  ret double %tmp3
96}
97
98; Now check some boundary conditions
99define i64 @pairUpBarelyIn(i64* %a) nounwind ssp {
100; LDUR_CHK: pairUpBarelyIn
101; LDUR_CHK-NOT: ldur
102; LDUR_CHK: ldp     [[DST1:x[0-9]+]], [[DST2:x[0-9]+]], [x0, #-256]
103; LDUR_CHK-NEXT: add     x{{[0-9]+}}, [[DST2]], [[DST1]]
104; LDUR_CHK-NEXT: ret
105  %p1 = getelementptr inbounds i64* %a, i64 -31
106  %tmp1 = load i64* %p1, align 2
107  %p2 = getelementptr inbounds i64* %a, i64 -32
108  %tmp2 = load i64* %p2, align 2
109  %tmp3 = add i64 %tmp1, %tmp2
110  ret i64 %tmp3
111}
112
113define i64 @pairUpBarelyOut(i64* %a) nounwind ssp {
114; LDUR_CHK: pairUpBarelyOut
115; LDUR_CHK-NOT: ldp
116; Don't be fragile about which loads or manipulations of the base register
117; are used---just check that there isn't an ldp before the add
118; LDUR_CHK: add
119; LDUR_CHK-NEXT: ret
120  %p1 = getelementptr inbounds i64* %a, i64 -32
121  %tmp1 = load i64* %p1, align 2
122  %p2 = getelementptr inbounds i64* %a, i64 -33
123  %tmp2 = load i64* %p2, align 2
124  %tmp3 = add i64 %tmp1, %tmp2
125  ret i64 %tmp3
126}
127
128define i64 @pairUpNotAligned(i64* %a) nounwind ssp {
129; LDUR_CHK: pairUpNotAligned
130; LDUR_CHK-NOT: ldp
131; LDUR_CHK: ldur
132; LDUR_CHK-NEXT: ldur
133; LDUR_CHK-NEXT: add
134; LDUR_CHK-NEXT: ret
135  %p1 = getelementptr inbounds i64* %a, i64 -18
136  %bp1 = bitcast i64* %p1 to i8*
137  %bp1p1 = getelementptr inbounds i8* %bp1, i64 1
138  %dp1 = bitcast i8* %bp1p1 to i64*
139  %tmp1 = load i64* %dp1, align 1
140
141  %p2 = getelementptr inbounds i64* %a, i64 -17
142  %bp2 = bitcast i64* %p2 to i8*
143  %bp2p1 = getelementptr inbounds i8* %bp2, i64 1
144  %dp2 = bitcast i8* %bp2p1 to i64*
145  %tmp2 = load i64* %dp2, align 1
146
147  %tmp3 = add i64 %tmp1, %tmp2
148  ret i64 %tmp3
149}
150