1 //===- PatternMatchTest.cpp -----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "GISelMITest.h"
10 #include "llvm/CodeGen/GlobalISel/ConstantFoldingMIRBuilder.h"
11 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
12 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
13 #include "llvm/CodeGen/GlobalISel/Utils.h"
14 #include "llvm/CodeGen/MIRParser/MIRParser.h"
15 #include "llvm/CodeGen/MachineFunction.h"
16 #include "llvm/CodeGen/MachineModuleInfo.h"
17 #include "llvm/CodeGen/TargetFrameLowering.h"
18 #include "llvm/CodeGen/TargetInstrInfo.h"
19 #include "llvm/CodeGen/TargetLowering.h"
20 #include "llvm/CodeGen/TargetSubtargetInfo.h"
21 #include "llvm/Support/SourceMgr.h"
22 #include "llvm/Support/TargetRegistry.h"
23 #include "llvm/Support/TargetSelect.h"
24 #include "llvm/Target/TargetMachine.h"
25 #include "llvm/Target/TargetOptions.h"
26 #include "gtest/gtest.h"
27
28 using namespace llvm;
29 using namespace MIPatternMatch;
30
31 namespace {
32
TEST_F(AArch64GISelMITest,MatchIntConstant)33 TEST_F(AArch64GISelMITest, MatchIntConstant) {
34 setUp();
35 if (!TM)
36 return;
37 auto MIBCst = B.buildConstant(LLT::scalar(64), 42);
38 int64_t Cst;
39 bool match = mi_match(MIBCst.getReg(0), *MRI, m_ICst(Cst));
40 EXPECT_TRUE(match);
41 EXPECT_EQ(Cst, 42);
42 }
43
TEST_F(AArch64GISelMITest,MatchBinaryOp)44 TEST_F(AArch64GISelMITest, MatchBinaryOp) {
45 setUp();
46 if (!TM)
47 return;
48 LLT s32 = LLT::scalar(32);
49 LLT s64 = LLT::scalar(64);
50 LLT p0 = LLT::pointer(0, 64);
51 auto MIBAdd = B.buildAdd(s64, Copies[0], Copies[1]);
52 // Test case for no bind.
53 bool match =
54 mi_match(MIBAdd.getReg(0), *MRI, m_GAdd(m_Reg(), m_Reg()));
55 EXPECT_TRUE(match);
56 Register Src0, Src1, Src2;
57 match = mi_match(MIBAdd.getReg(0), *MRI,
58 m_GAdd(m_Reg(Src0), m_Reg(Src1)));
59 EXPECT_TRUE(match);
60 EXPECT_EQ(Src0, Copies[0]);
61 EXPECT_EQ(Src1, Copies[1]);
62
63 // Build MUL(ADD %0, %1), %2
64 auto MIBMul = B.buildMul(s64, MIBAdd, Copies[2]);
65
66 // Try to match MUL.
67 match = mi_match(MIBMul.getReg(0), *MRI,
68 m_GMul(m_Reg(Src0), m_Reg(Src1)));
69 EXPECT_TRUE(match);
70 EXPECT_EQ(Src0, MIBAdd.getReg(0));
71 EXPECT_EQ(Src1, Copies[2]);
72
73 // Try to match MUL(ADD)
74 match = mi_match(MIBMul.getReg(0), *MRI,
75 m_GMul(m_GAdd(m_Reg(Src0), m_Reg(Src1)), m_Reg(Src2)));
76 EXPECT_TRUE(match);
77 EXPECT_EQ(Src0, Copies[0]);
78 EXPECT_EQ(Src1, Copies[1]);
79 EXPECT_EQ(Src2, Copies[2]);
80
81 // Test Commutativity.
82 auto MIBMul2 = B.buildMul(s64, Copies[0], B.buildConstant(s64, 42));
83 // Try to match MUL(Cst, Reg) on src of MUL(Reg, Cst) to validate
84 // commutativity.
85 int64_t Cst;
86 match = mi_match(MIBMul2.getReg(0), *MRI,
87 m_GMul(m_ICst(Cst), m_Reg(Src0)));
88 EXPECT_TRUE(match);
89 EXPECT_EQ(Cst, 42);
90 EXPECT_EQ(Src0, Copies[0]);
91
92 // Make sure commutative doesn't work with something like SUB.
93 auto MIBSub = B.buildSub(s64, Copies[0], B.buildConstant(s64, 42));
94 match = mi_match(MIBSub.getReg(0), *MRI,
95 m_GSub(m_ICst(Cst), m_Reg(Src0)));
96 EXPECT_FALSE(match);
97
98 auto MIBFMul = B.buildInstr(TargetOpcode::G_FMUL, {s64},
99 {Copies[0], B.buildConstant(s64, 42)});
100 // Match and test commutativity for FMUL.
101 match = mi_match(MIBFMul.getReg(0), *MRI,
102 m_GFMul(m_ICst(Cst), m_Reg(Src0)));
103 EXPECT_TRUE(match);
104 EXPECT_EQ(Cst, 42);
105 EXPECT_EQ(Src0, Copies[0]);
106
107 // FSUB
108 auto MIBFSub = B.buildInstr(TargetOpcode::G_FSUB, {s64},
109 {Copies[0], B.buildConstant(s64, 42)});
110 match = mi_match(MIBFSub.getReg(0), *MRI,
111 m_GFSub(m_Reg(Src0), m_Reg()));
112 EXPECT_TRUE(match);
113 EXPECT_EQ(Src0, Copies[0]);
114
115 // Build AND %0, %1
116 auto MIBAnd = B.buildAnd(s64, Copies[0], Copies[1]);
117 // Try to match AND.
118 match = mi_match(MIBAnd.getReg(0), *MRI,
119 m_GAnd(m_Reg(Src0), m_Reg(Src1)));
120 EXPECT_TRUE(match);
121 EXPECT_EQ(Src0, Copies[0]);
122 EXPECT_EQ(Src1, Copies[1]);
123
124 // Build OR %0, %1
125 auto MIBOr = B.buildOr(s64, Copies[0], Copies[1]);
126 // Try to match OR.
127 match = mi_match(MIBOr.getReg(0), *MRI,
128 m_GOr(m_Reg(Src0), m_Reg(Src1)));
129 EXPECT_TRUE(match);
130 EXPECT_EQ(Src0, Copies[0]);
131 EXPECT_EQ(Src1, Copies[1]);
132
133 // Match lshr, and make sure a different shift amount type works.
134 auto TruncCopy1 = B.buildTrunc(s32, Copies[1]);
135 auto LShr = B.buildLShr(s64, Copies[0], TruncCopy1);
136 match = mi_match(LShr.getReg(0), *MRI,
137 m_GLShr(m_Reg(Src0), m_Reg(Src1)));
138 EXPECT_TRUE(match);
139 EXPECT_EQ(Src0, Copies[0]);
140 EXPECT_EQ(Src1, TruncCopy1.getReg(0));
141
142 // Match shl, and make sure a different shift amount type works.
143 auto Shl = B.buildShl(s64, Copies[0], TruncCopy1);
144 match = mi_match(Shl.getReg(0), *MRI,
145 m_GShl(m_Reg(Src0), m_Reg(Src1)));
146 EXPECT_TRUE(match);
147 EXPECT_EQ(Src0, Copies[0]);
148 EXPECT_EQ(Src1, TruncCopy1.getReg(0));
149
150 // Build a G_PTR_ADD and check that we can match it.
151 auto PtrAdd = B.buildPtrAdd(p0, {B.buildUndef(p0)}, Copies[0]);
152 match = mi_match(PtrAdd.getReg(0), *MRI, m_GPtrAdd(m_Reg(Src0), m_Reg(Src1)));
153 EXPECT_TRUE(match);
154 EXPECT_EQ(Src0, PtrAdd->getOperand(1).getReg());
155 EXPECT_EQ(Src1, Copies[0]);
156 }
157
TEST_F(AArch64GISelMITest,MatchICmp)158 TEST_F(AArch64GISelMITest, MatchICmp) {
159 setUp();
160 if (!TM)
161 return;
162
163 const LLT s1 = LLT::scalar(1);
164 auto CmpEq = B.buildICmp(CmpInst::ICMP_EQ, s1, Copies[0], Copies[1]);
165
166 // Check match any predicate.
167 bool match =
168 mi_match(CmpEq.getReg(0), *MRI, m_GICmp(m_Pred(), m_Reg(), m_Reg()));
169 EXPECT_TRUE(match);
170
171 // Check we get the predicate and registers.
172 CmpInst::Predicate Pred;
173 Register Reg0;
174 Register Reg1;
175 match = mi_match(CmpEq.getReg(0), *MRI,
176 m_GICmp(m_Pred(Pred), m_Reg(Reg0), m_Reg(Reg1)));
177 EXPECT_TRUE(match);
178 EXPECT_EQ(CmpInst::ICMP_EQ, Pred);
179 EXPECT_EQ(Copies[0], Reg0);
180 EXPECT_EQ(Copies[1], Reg1);
181 }
182
TEST_F(AArch64GISelMITest,MatchFCmp)183 TEST_F(AArch64GISelMITest, MatchFCmp) {
184 setUp();
185 if (!TM)
186 return;
187
188 const LLT s1 = LLT::scalar(1);
189 auto CmpEq = B.buildFCmp(CmpInst::FCMP_OEQ, s1, Copies[0], Copies[1]);
190
191 // Check match any predicate.
192 bool match =
193 mi_match(CmpEq.getReg(0), *MRI, m_GFCmp(m_Pred(), m_Reg(), m_Reg()));
194 EXPECT_TRUE(match);
195
196 // Check we get the predicate and registers.
197 CmpInst::Predicate Pred;
198 Register Reg0;
199 Register Reg1;
200 match = mi_match(CmpEq.getReg(0), *MRI,
201 m_GFCmp(m_Pred(Pred), m_Reg(Reg0), m_Reg(Reg1)));
202 EXPECT_TRUE(match);
203 EXPECT_EQ(CmpInst::FCMP_OEQ, Pred);
204 EXPECT_EQ(Copies[0], Reg0);
205 EXPECT_EQ(Copies[1], Reg1);
206 }
207
TEST_F(AArch64GISelMITest,MatchFPUnaryOp)208 TEST_F(AArch64GISelMITest, MatchFPUnaryOp) {
209 setUp();
210 if (!TM)
211 return;
212
213 // Truncate s64 to s32.
214 LLT s32 = LLT::scalar(32);
215 auto Copy0s32 = B.buildFPTrunc(s32, Copies[0]);
216
217 // Match G_FABS.
218 auto MIBFabs = B.buildInstr(TargetOpcode::G_FABS, {s32}, {Copy0s32});
219 bool match =
220 mi_match(MIBFabs.getReg(0), *MRI, m_GFabs(m_Reg()));
221 EXPECT_TRUE(match);
222
223 Register Src;
224 auto MIBFNeg = B.buildInstr(TargetOpcode::G_FNEG, {s32}, {Copy0s32});
225 match = mi_match(MIBFNeg.getReg(0), *MRI, m_GFNeg(m_Reg(Src)));
226 EXPECT_TRUE(match);
227 EXPECT_EQ(Src, Copy0s32.getReg(0));
228
229 match = mi_match(MIBFabs.getReg(0), *MRI, m_GFabs(m_Reg(Src)));
230 EXPECT_TRUE(match);
231 EXPECT_EQ(Src, Copy0s32.getReg(0));
232
233 // Build and match FConstant.
234 auto MIBFCst = B.buildFConstant(s32, .5);
235 const ConstantFP *TmpFP{};
236 match = mi_match(MIBFCst.getReg(0), *MRI, m_GFCst(TmpFP));
237 EXPECT_TRUE(match);
238 EXPECT_TRUE(TmpFP);
239 APFloat APF((float).5);
240 auto *CFP = ConstantFP::get(Context, APF);
241 EXPECT_EQ(CFP, TmpFP);
242
243 // Build double float.
244 LLT s64 = LLT::scalar(64);
245 auto MIBFCst64 = B.buildFConstant(s64, .5);
246 const ConstantFP *TmpFP64{};
247 match = mi_match(MIBFCst64.getReg(0), *MRI, m_GFCst(TmpFP64));
248 EXPECT_TRUE(match);
249 EXPECT_TRUE(TmpFP64);
250 APFloat APF64(.5);
251 auto CFP64 = ConstantFP::get(Context, APF64);
252 EXPECT_EQ(CFP64, TmpFP64);
253 EXPECT_NE(TmpFP64, TmpFP);
254
255 // Build half float.
256 LLT s16 = LLT::scalar(16);
257 auto MIBFCst16 = B.buildFConstant(s16, .5);
258 const ConstantFP *TmpFP16{};
259 match = mi_match(MIBFCst16.getReg(0), *MRI, m_GFCst(TmpFP16));
260 EXPECT_TRUE(match);
261 EXPECT_TRUE(TmpFP16);
262 bool Ignored;
263 APFloat APF16(.5);
264 APF16.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored);
265 auto CFP16 = ConstantFP::get(Context, APF16);
266 EXPECT_EQ(TmpFP16, CFP16);
267 EXPECT_NE(TmpFP16, TmpFP);
268 }
269
TEST_F(AArch64GISelMITest,MatchExtendsTrunc)270 TEST_F(AArch64GISelMITest, MatchExtendsTrunc) {
271 setUp();
272 if (!TM)
273 return;
274
275 LLT s64 = LLT::scalar(64);
276 LLT s32 = LLT::scalar(32);
277
278 auto MIBTrunc = B.buildTrunc(s32, Copies[0]);
279 auto MIBAExt = B.buildAnyExt(s64, MIBTrunc);
280 auto MIBZExt = B.buildZExt(s64, MIBTrunc);
281 auto MIBSExt = B.buildSExt(s64, MIBTrunc);
282 Register Src0;
283 bool match =
284 mi_match(MIBTrunc.getReg(0), *MRI, m_GTrunc(m_Reg(Src0)));
285 EXPECT_TRUE(match);
286 EXPECT_EQ(Src0, Copies[0]);
287 match =
288 mi_match(MIBAExt.getReg(0), *MRI, m_GAnyExt(m_Reg(Src0)));
289 EXPECT_TRUE(match);
290 EXPECT_EQ(Src0, MIBTrunc.getReg(0));
291
292 match = mi_match(MIBSExt.getReg(0), *MRI, m_GSExt(m_Reg(Src0)));
293 EXPECT_TRUE(match);
294 EXPECT_EQ(Src0, MIBTrunc.getReg(0));
295
296 match = mi_match(MIBZExt.getReg(0), *MRI, m_GZExt(m_Reg(Src0)));
297 EXPECT_TRUE(match);
298 EXPECT_EQ(Src0, MIBTrunc.getReg(0));
299
300 // Match ext(trunc src)
301 match = mi_match(MIBAExt.getReg(0), *MRI,
302 m_GAnyExt(m_GTrunc(m_Reg(Src0))));
303 EXPECT_TRUE(match);
304 EXPECT_EQ(Src0, Copies[0]);
305
306 match = mi_match(MIBSExt.getReg(0), *MRI,
307 m_GSExt(m_GTrunc(m_Reg(Src0))));
308 EXPECT_TRUE(match);
309 EXPECT_EQ(Src0, Copies[0]);
310
311 match = mi_match(MIBZExt.getReg(0), *MRI,
312 m_GZExt(m_GTrunc(m_Reg(Src0))));
313 EXPECT_TRUE(match);
314 EXPECT_EQ(Src0, Copies[0]);
315 }
316
TEST_F(AArch64GISelMITest,MatchSpecificType)317 TEST_F(AArch64GISelMITest, MatchSpecificType) {
318 setUp();
319 if (!TM)
320 return;
321
322 // Try to match a 64bit add.
323 LLT s64 = LLT::scalar(64);
324 LLT s32 = LLT::scalar(32);
325 auto MIBAdd = B.buildAdd(s64, Copies[0], Copies[1]);
326 EXPECT_FALSE(mi_match(MIBAdd.getReg(0), *MRI,
327 m_GAdd(m_SpecificType(s32), m_Reg())));
328 EXPECT_TRUE(mi_match(MIBAdd.getReg(0), *MRI,
329 m_GAdd(m_SpecificType(s64), m_Reg())));
330
331 // Try to match the destination type of a bitcast.
332 LLT v2s32 = LLT::vector(2, 32);
333 auto MIBCast = B.buildCast(v2s32, Copies[0]);
334 EXPECT_TRUE(
335 mi_match(MIBCast.getReg(0), *MRI, m_GBitcast(m_Reg())));
336 EXPECT_TRUE(
337 mi_match(MIBCast.getReg(0), *MRI, m_SpecificType(v2s32)));
338 EXPECT_TRUE(
339 mi_match(MIBCast.getReg(1), *MRI, m_SpecificType(s64)));
340
341 // Build a PTRToInt and INTTOPTR and match and test them.
342 LLT PtrTy = LLT::pointer(0, 64);
343 auto MIBIntToPtr = B.buildCast(PtrTy, Copies[0]);
344 auto MIBPtrToInt = B.buildCast(s64, MIBIntToPtr);
345 Register Src0;
346
347 // match the ptrtoint(inttoptr reg)
348 bool match = mi_match(MIBPtrToInt.getReg(0), *MRI,
349 m_GPtrToInt(m_GIntToPtr(m_Reg(Src0))));
350 EXPECT_TRUE(match);
351 EXPECT_EQ(Src0, Copies[0]);
352 }
353
TEST_F(AArch64GISelMITest,MatchCombinators)354 TEST_F(AArch64GISelMITest, MatchCombinators) {
355 setUp();
356 if (!TM)
357 return;
358
359 LLT s64 = LLT::scalar(64);
360 LLT s32 = LLT::scalar(32);
361 auto MIBAdd = B.buildAdd(s64, Copies[0], Copies[1]);
362 Register Src0, Src1;
363 bool match =
364 mi_match(MIBAdd.getReg(0), *MRI,
365 m_all_of(m_SpecificType(s64), m_GAdd(m_Reg(Src0), m_Reg(Src1))));
366 EXPECT_TRUE(match);
367 EXPECT_EQ(Src0, Copies[0]);
368 EXPECT_EQ(Src1, Copies[1]);
369 // Check for s32 (which should fail).
370 match =
371 mi_match(MIBAdd.getReg(0), *MRI,
372 m_all_of(m_SpecificType(s32), m_GAdd(m_Reg(Src0), m_Reg(Src1))));
373 EXPECT_FALSE(match);
374 match =
375 mi_match(MIBAdd.getReg(0), *MRI,
376 m_any_of(m_SpecificType(s32), m_GAdd(m_Reg(Src0), m_Reg(Src1))));
377 EXPECT_TRUE(match);
378 EXPECT_EQ(Src0, Copies[0]);
379 EXPECT_EQ(Src1, Copies[1]);
380
381 // Match a case where none of the predicates hold true.
382 match = mi_match(
383 MIBAdd.getReg(0), *MRI,
384 m_any_of(m_SpecificType(LLT::scalar(16)), m_GSub(m_Reg(), m_Reg())));
385 EXPECT_FALSE(match);
386 }
387
TEST_F(AArch64GISelMITest,MatchMiscellaneous)388 TEST_F(AArch64GISelMITest, MatchMiscellaneous) {
389 setUp();
390 if (!TM)
391 return;
392
393 LLT s64 = LLT::scalar(64);
394 auto MIBAdd = B.buildAdd(s64, Copies[0], Copies[1]);
395 Register Reg = MIBAdd.getReg(0);
396
397 // Only one use of Reg.
398 B.buildCast(LLT::pointer(0, 32), MIBAdd);
399 EXPECT_TRUE(mi_match(Reg, *MRI, m_OneUse(m_GAdd(m_Reg(), m_Reg()))));
400 EXPECT_TRUE(mi_match(Reg, *MRI, m_OneNonDBGUse(m_GAdd(m_Reg(), m_Reg()))));
401
402 // Add multiple debug uses of Reg.
403 B.buildInstr(TargetOpcode::DBG_VALUE, {}, {Reg})->getOperand(0).setIsDebug();
404 B.buildInstr(TargetOpcode::DBG_VALUE, {}, {Reg})->getOperand(0).setIsDebug();
405
406 EXPECT_FALSE(mi_match(Reg, *MRI, m_OneUse(m_GAdd(m_Reg(), m_Reg()))));
407 EXPECT_TRUE(mi_match(Reg, *MRI, m_OneNonDBGUse(m_GAdd(m_Reg(), m_Reg()))));
408
409 // Multiple non-debug uses of Reg.
410 B.buildCast(LLT::pointer(1, 32), MIBAdd);
411 EXPECT_FALSE(mi_match(Reg, *MRI, m_OneUse(m_GAdd(m_Reg(), m_Reg()))));
412 EXPECT_FALSE(mi_match(Reg, *MRI, m_OneNonDBGUse(m_GAdd(m_Reg(), m_Reg()))));
413 }
414
TEST_F(AArch64GISelMITest,MatchSpecificConstant)415 TEST_F(AArch64GISelMITest, MatchSpecificConstant) {
416 setUp();
417 if (!TM)
418 return;
419
420 // Basic case: Can we match a G_CONSTANT with a specific value?
421 auto FortyTwo = B.buildConstant(LLT::scalar(64), 42);
422 EXPECT_TRUE(mi_match(FortyTwo.getReg(0), *MRI, m_SpecificICst(42)));
423 EXPECT_FALSE(mi_match(FortyTwo.getReg(0), *MRI, m_SpecificICst(123)));
424
425 // Test that this works inside of a more complex pattern.
426 LLT s64 = LLT::scalar(64);
427 auto MIBAdd = B.buildAdd(s64, Copies[0], FortyTwo);
428 EXPECT_TRUE(mi_match(MIBAdd.getReg(2), *MRI, m_SpecificICst(42)));
429
430 // Wrong constant.
431 EXPECT_FALSE(mi_match(MIBAdd.getReg(2), *MRI, m_SpecificICst(123)));
432
433 // No constant on the LHS.
434 EXPECT_FALSE(mi_match(MIBAdd.getReg(1), *MRI, m_SpecificICst(42)));
435 }
436
TEST_F(AArch64GISelMITest,MatchZeroInt)437 TEST_F(AArch64GISelMITest, MatchZeroInt) {
438 setUp();
439 if (!TM)
440 return;
441 auto Zero = B.buildConstant(LLT::scalar(64), 0);
442 EXPECT_TRUE(mi_match(Zero.getReg(0), *MRI, m_ZeroInt()));
443
444 auto FortyTwo = B.buildConstant(LLT::scalar(64), 42);
445 EXPECT_FALSE(mi_match(FortyTwo.getReg(0), *MRI, m_ZeroInt()));
446 }
447
TEST_F(AArch64GISelMITest,MatchAllOnesInt)448 TEST_F(AArch64GISelMITest, MatchAllOnesInt) {
449 setUp();
450 if (!TM)
451 return;
452 auto AllOnes = B.buildConstant(LLT::scalar(64), -1);
453 EXPECT_TRUE(mi_match(AllOnes.getReg(0), *MRI, m_AllOnesInt()));
454
455 auto FortyTwo = B.buildConstant(LLT::scalar(64), 42);
456 EXPECT_FALSE(mi_match(FortyTwo.getReg(0), *MRI, m_AllOnesInt()));
457 }
458
TEST_F(AArch64GISelMITest,MatchNeg)459 TEST_F(AArch64GISelMITest, MatchNeg) {
460 setUp();
461 if (!TM)
462 return;
463
464 LLT s64 = LLT::scalar(64);
465 auto Zero = B.buildConstant(LLT::scalar(64), 0);
466 auto NegInst = B.buildSub(s64, Zero, Copies[0]);
467 Register NegatedReg;
468
469 // Match: G_SUB = 0, %Reg
470 EXPECT_TRUE(mi_match(NegInst.getReg(0), *MRI, m_Neg(m_Reg(NegatedReg))));
471 EXPECT_EQ(NegatedReg, Copies[0]);
472
473 // Don't match: G_SUB = %Reg, 0
474 auto NotNegInst1 = B.buildSub(s64, Copies[0], Zero);
475 EXPECT_FALSE(mi_match(NotNegInst1.getReg(0), *MRI, m_Neg(m_Reg(NegatedReg))));
476
477 // Don't match: G_SUB = 42, %Reg
478 auto FortyTwo = B.buildConstant(LLT::scalar(64), 42);
479 auto NotNegInst2 = B.buildSub(s64, FortyTwo, Copies[0]);
480 EXPECT_FALSE(mi_match(NotNegInst2.getReg(0), *MRI, m_Neg(m_Reg(NegatedReg))));
481
482 // Complex testcase.
483 // %sub = G_SUB = 0, %negated_reg
484 // %add = G_ADD = %x, %sub
485 auto AddInst = B.buildAdd(s64, Copies[1], NegInst);
486 NegatedReg = Register();
487 EXPECT_TRUE(mi_match(AddInst.getReg(2), *MRI, m_Neg(m_Reg(NegatedReg))));
488 EXPECT_EQ(NegatedReg, Copies[0]);
489 }
490
TEST_F(AArch64GISelMITest,MatchNot)491 TEST_F(AArch64GISelMITest, MatchNot) {
492 setUp();
493 if (!TM)
494 return;
495
496 LLT s64 = LLT::scalar(64);
497 auto AllOnes = B.buildConstant(LLT::scalar(64), -1);
498 auto NotInst1 = B.buildXor(s64, Copies[0], AllOnes);
499 Register NotReg;
500
501 // Match: G_XOR %NotReg, -1
502 EXPECT_TRUE(mi_match(NotInst1.getReg(0), *MRI, m_Not(m_Reg(NotReg))));
503 EXPECT_EQ(NotReg, Copies[0]);
504
505 // Match: G_XOR -1, %NotReg
506 auto NotInst2 = B.buildXor(s64, AllOnes, Copies[1]);
507 EXPECT_TRUE(mi_match(NotInst2.getReg(0), *MRI, m_Not(m_Reg(NotReg))));
508 EXPECT_EQ(NotReg, Copies[1]);
509
510 // Don't match: G_XOR %NotReg, 42
511 auto FortyTwo = B.buildConstant(LLT::scalar(64), 42);
512 auto WrongCst = B.buildXor(s64, Copies[0], FortyTwo);
513 EXPECT_FALSE(mi_match(WrongCst.getReg(0), *MRI, m_Not(m_Reg(NotReg))));
514
515 // Complex testcase.
516 // %xor = G_XOR %NotReg, -1
517 // %add = G_ADD %x, %xor
518 auto AddInst = B.buildAdd(s64, Copies[1], NotInst1);
519 NotReg = Register();
520 EXPECT_TRUE(mi_match(AddInst.getReg(2), *MRI, m_Not(m_Reg(NotReg))));
521 EXPECT_EQ(NotReg, Copies[0]);
522 }
523 } // namespace
524
main(int argc,char ** argv)525 int main(int argc, char **argv) {
526 ::testing::InitGoogleTest(&argc, argv);
527 initLLVM();
528 return RUN_ALL_TESTS();
529 }
530