1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -codegenprepare -S < %s | FileCheck %s
3; RUN: opt -enable-debugify -codegenprepare -S < %s 2>&1 | FileCheck %s -check-prefix=DEBUG
4
5; Subset of tests from llvm/tests/Transforms/CodeGenPrepare/X86/overflow-intrinsics.ll
6; to test shouldFormOverflowOp on SPARC, where it is not profitable to create
7; overflow intrinsics if the math part is not used.
8
9target triple = "arm64-apple-iphoneos"
10
11define i64 @uaddo1_overflow_used(i64 %a, i64 %b) nounwind ssp {
12; CHECK-LABEL: @uaddo1_overflow_used(
13; CHECK-NEXT:    [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[B:%.*]], i64 [[A:%.*]])
14; CHECK-NEXT:    [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
15; CHECK-NEXT:    [[OV:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
16; CHECK-NEXT:    [[Q:%.*]] = select i1 [[OV]], i64 [[B]], i64 42
17; CHECK-NEXT:    ret i64 [[Q]]
18;
19  %add = add i64 %b, %a
20  %cmp = icmp ult i64 %add, %a
21  %Q = select i1 %cmp, i64 %b, i64 42
22  ret i64 %Q
23}
24
25define i64 @uaddo1_math_overflow_used(i64 %a, i64 %b, i64* %res) nounwind ssp {
26; CHECK-LABEL: @uaddo1_math_overflow_used(
27; CHECK-NEXT:    [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[B:%.*]], i64 [[A:%.*]])
28; CHECK-NEXT:    [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
29; CHECK-NEXT:    [[OV:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
30; CHECK-NEXT:    [[Q:%.*]] = select i1 [[OV]], i64 [[B]], i64 42
31; CHECK-NEXT:    store i64 [[MATH]], i64* [[RES:%.*]]
32; CHECK-NEXT:    ret i64 [[Q]]
33;
34  %add = add i64 %b, %a
35  %cmp = icmp ult i64 %add, %a
36  %Q = select i1 %cmp, i64 %b, i64 42
37  store i64 %add, i64* %res
38  ret i64 %Q
39}
40
41define i64 @uaddo2_overflow_used(i64 %a, i64 %b) nounwind ssp {
42; CHECK-LABEL: @uaddo2_overflow_used(
43; CHECK-NEXT:    [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[B:%.*]], i64 [[A:%.*]])
44; CHECK-NEXT:    [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
45; CHECK-NEXT:    [[OV:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
46; CHECK-NEXT:    [[Q:%.*]] = select i1 [[OV]], i64 [[B]], i64 42
47; CHECK-NEXT:    ret i64 [[Q]]
48;
49  %add = add i64 %b, %a
50  %cmp = icmp ult i64 %add, %b
51  %Q = select i1 %cmp, i64 %b, i64 42
52  ret i64 %Q
53}
54
55define i64 @uaddo2_math_overflow_used(i64 %a, i64 %b, i64* %res) nounwind ssp {
56; CHECK-LABEL: @uaddo2_math_overflow_used(
57; CHECK-NEXT:    [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[B:%.*]], i64 [[A:%.*]])
58; CHECK-NEXT:    [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
59; CHECK-NEXT:    [[OV:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
60; CHECK-NEXT:    [[Q:%.*]] = select i1 [[OV]], i64 [[B]], i64 42
61; CHECK-NEXT:    store i64 [[MATH]], i64* [[RES:%.*]]
62; CHECK-NEXT:    ret i64 [[Q]]
63;
64  %add = add i64 %b, %a
65  %cmp = icmp ult i64 %add, %b
66  %Q = select i1 %cmp, i64 %b, i64 42
67  store i64 %add, i64* %res
68  ret i64 %Q
69}
70
71define i64 @uaddo3_overflow_used(i64 %a, i64 %b) nounwind ssp {
72; CHECK-LABEL: @uaddo3_overflow_used(
73; CHECK-NEXT:    [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[B:%.*]], i64 [[A:%.*]])
74; CHECK-NEXT:    [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
75; CHECK-NEXT:    [[OV:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
76; CHECK-NEXT:    [[Q:%.*]] = select i1 [[OV]], i64 [[B]], i64 42
77; CHECK-NEXT:    ret i64 [[Q]]
78;
79  %add = add i64 %b, %a
80  %cmp = icmp ugt i64 %b, %add
81  %Q = select i1 %cmp, i64 %b, i64 42
82  ret i64 %Q
83}
84
85define i64 @uaddo3_math_overflow_used(i64 %a, i64 %b, i64* %res) nounwind ssp {
86; CHECK-LABEL: @uaddo3_math_overflow_used(
87; CHECK-NEXT:    [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[B:%.*]], i64 [[A:%.*]])
88; CHECK-NEXT:    [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
89; CHECK-NEXT:    [[OV:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
90; CHECK-NEXT:    [[Q:%.*]] = select i1 [[OV]], i64 [[B]], i64 42
91; CHECK-NEXT:    store i64 [[MATH]], i64* [[RES:%.*]]
92; CHECK-NEXT:    ret i64 [[Q]]
93;
94  %add = add i64 %b, %a
95  %cmp = icmp ugt i64 %b, %add
96  %Q = select i1 %cmp, i64 %b, i64 42
97  store i64 %add, i64* %res
98  ret i64 %Q
99}
100
101; Instcombine folds (a + b <u a)  to (a ^ -1 <u b). Make sure we match this
102; pattern as well.
103define i64 @uaddo6_xor(i64 %a, i64 %b) {
104; CHECK-LABEL: @uaddo6_xor(
105; CHECK-NEXT:    [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[A:%.*]], i64 [[B:%.*]])
106; CHECK-NEXT:    [[OV:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
107; CHECK-NEXT:    [[Q:%.*]] = select i1 [[OV]], i64 [[B]], i64 42
108; CHECK-NEXT:    ret i64 [[Q]]
109;
110  %x = xor i64 %a, -1
111  %cmp = icmp ult i64 %x, %b
112  %Q = select i1 %cmp, i64 %b, i64 42
113  ret i64 %Q
114}
115
116define i64 @uaddo6_xor_commuted(i64 %a, i64 %b) {
117; CHECK-LABEL: @uaddo6_xor_commuted(
118; CHECK-NEXT:    [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[A:%.*]], i64 [[B:%.*]])
119; CHECK-NEXT:    [[OV:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
120; CHECK-NEXT:    [[Q:%.*]] = select i1 [[OV]], i64 [[B]], i64 42
121; CHECK-NEXT:    ret i64 [[Q]]
122;
123  %x = xor i64 %a, -1
124  %cmp = icmp ugt i64 %b, %x
125  %Q = select i1 %cmp, i64 %b, i64 42
126  ret i64 %Q
127}
128
129declare void @use(i64)
130
131define i64 @uaddo6_xor_multi_use(i64 %a, i64 %b) {
132; CHECK-LABEL: @uaddo6_xor_multi_use(
133; CHECK-NEXT:    [[X:%.*]] = xor i64 -1, [[A:%.*]]
134; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i64 [[X]], [[B:%.*]]
135; CHECK-NEXT:    [[Q:%.*]] = select i1 [[CMP]], i64 [[B]], i64 42
136; CHECK-NEXT:    call void @use(i64 [[X]])
137; CHECK-NEXT:    ret i64 [[Q]]
138;
139  %x = xor i64 -1, %a
140  %cmp = icmp ult i64 %x, %b
141  %Q = select i1 %cmp, i64 %b, i64 42
142  call void @use(i64 %x)
143  ret i64 %Q
144}
145
146define i1 @usubo_ult_i64_overflow_used(i64 %x, i64 %y, i64* %p) {
147; CHECK-LABEL: @usubo_ult_i64_overflow_used(
148; CHECK-NEXT:    [[S:%.*]] = sub i64 [[X:%.*]], [[Y:%.*]]
149; CHECK-NEXT:    [[OV:%.*]] = icmp ult i64 [[X]], [[Y]]
150; CHECK-NEXT:    ret i1 [[OV]]
151;
152  %s = sub i64 %x, %y
153  %ov = icmp ult i64 %x, %y
154  ret i1 %ov
155}
156
157define i1 @usubo_ult_i64_math_overflow_used(i64 %x, i64 %y, i64* %p) {
158; CHECK-LABEL: @usubo_ult_i64_math_overflow_used(
159; CHECK-NEXT:    [[S:%.*]] = sub i64 [[X:%.*]], [[Y:%.*]]
160; CHECK-NEXT:    store i64 [[S]], i64* [[P:%.*]]
161; CHECK-NEXT:    [[OV:%.*]] = icmp ult i64 [[X]], [[Y]]
162; CHECK-NEXT:    ret i1 [[OV]]
163;
164  %s = sub i64 %x, %y
165  store i64 %s, i64* %p
166  %ov = icmp ult i64 %x, %y
167  ret i1 %ov
168}
169
170; Check that every instruction inserted by -codegenprepare has a debug location.
171; DEBUG: CheckModuleDebugify: PASS
172