1 //===- MachineIRBuilderTest.cpp -------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "GISelMITest.h"
10 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
11
TEST_F(AArch64GISelMITest,TestBuildConstantFConstant)12 TEST_F(AArch64GISelMITest, TestBuildConstantFConstant) {
13 setUp();
14 if (!TM)
15 return;
16
17 B.buildConstant(LLT::scalar(32), 42);
18 B.buildFConstant(LLT::scalar(32), 1.0);
19
20 B.buildConstant(LLT::fixed_vector(2, 32), 99);
21 B.buildFConstant(LLT::fixed_vector(2, 32), 2.0);
22
23 // Test APFloat overload.
24 APFloat KVal(APFloat::IEEEdouble(), "4.0");
25 B.buildFConstant(LLT::scalar(64), KVal);
26
27 auto CheckStr = R"(
28 CHECK: [[CONST0:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
29 CHECK: [[FCONST0:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
30 CHECK: [[CONST1:%[0-9]+]]:_(s32) = G_CONSTANT i32 99
31 CHECK: [[VEC0:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[CONST1]]:_(s32), [[CONST1]]:_(s32)
32 CHECK: [[FCONST1:%[0-9]+]]:_(s32) = G_FCONSTANT float 2.000000e+00
33 CHECK: [[VEC1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[FCONST1]]:_(s32), [[FCONST1]]:_(s32)
34 CHECK: [[FCONST2:%[0-9]+]]:_(s64) = G_FCONSTANT double 4.000000e+00
35 )";
36
37 EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;
38 }
39
40 #ifdef GTEST_HAS_DEATH_TEST
41 #ifndef NDEBUG
42
TEST_F(AArch64GISelMITest,TestBuildConstantFConstantDeath)43 TEST_F(AArch64GISelMITest, TestBuildConstantFConstantDeath) {
44 setUp();
45 if (!TM)
46 return;
47
48 LLVMContext &Ctx = MF->getFunction().getContext();
49 APInt APV32(32, 12345);
50
51 // Test APInt version breaks
52 EXPECT_DEATH(B.buildConstant(LLT::scalar(16), APV32),
53 "creating constant with the wrong size");
54 EXPECT_DEATH(B.buildConstant(LLT::fixed_vector(2, 16), APV32),
55 "creating constant with the wrong size");
56
57 // Test ConstantInt version breaks
58 ConstantInt *CI = ConstantInt::get(Ctx, APV32);
59 EXPECT_DEATH(B.buildConstant(LLT::scalar(16), *CI),
60 "creating constant with the wrong size");
61 EXPECT_DEATH(B.buildConstant(LLT::fixed_vector(2, 16), *CI),
62 "creating constant with the wrong size");
63
64 APFloat DoubleVal(APFloat::IEEEdouble());
65 ConstantFP *CF = ConstantFP::get(Ctx, DoubleVal);
66 EXPECT_DEATH(B.buildFConstant(LLT::scalar(16), *CF),
67 "creating fconstant with the wrong size");
68 EXPECT_DEATH(B.buildFConstant(LLT::fixed_vector(2, 16), *CF),
69 "creating fconstant with the wrong size");
70 }
71
72 #endif
73 #endif
74
TEST_F(AArch64GISelMITest,DstOpSrcOp)75 TEST_F(AArch64GISelMITest, DstOpSrcOp) {
76 setUp();
77 if (!TM)
78 return;
79
80 SmallVector<Register, 4> Copies;
81 collectCopies(Copies, MF);
82
83 LLT s64 = LLT::scalar(64);
84 auto MIBAdd = B.buildAdd(s64, Copies[0], Copies[1]);
85
86 // Test SrcOp and DstOp can be constructed directly from MachineOperand by
87 // copying the instruction
88 B.buildAdd(MIBAdd->getOperand(0), MIBAdd->getOperand(1), MIBAdd->getOperand(2));
89
90
91 auto CheckStr = R"(
92 ; CHECK: [[COPY0:%[0-9]+]]:_(s64) = COPY $x0
93 ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
94 ; CHECK: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY0]]:_, [[COPY1]]:_
95 ; CHECK: [[ADD]]:_(s64) = G_ADD [[COPY0]]:_, [[COPY1]]:_
96 )";
97
98 EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;
99 }
100
TEST_F(AArch64GISelMITest,BuildUnmerge)101 TEST_F(AArch64GISelMITest, BuildUnmerge) {
102 setUp();
103 if (!TM)
104 return;
105
106 SmallVector<Register, 4> Copies;
107 collectCopies(Copies, MF);
108 B.buildUnmerge(LLT::scalar(32), Copies[0]);
109 B.buildUnmerge(LLT::scalar(16), Copies[1]);
110
111 auto CheckStr = R"(
112 ; CHECK: [[COPY0:%[0-9]+]]:_(s64) = COPY $x0
113 ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
114 ; CHECK: [[UNMERGE32_0:%[0-9]+]]:_(s32), [[UNMERGE32_1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY0]]
115 ; CHECK: [[UNMERGE16_0:%[0-9]+]]:_(s16), [[UNMERGE16_1:%[0-9]+]]:_(s16), [[UNMERGE16_2:%[0-9]+]]:_(s16), [[UNMERGE16_3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]]
116
117 )";
118
119 EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;
120 }
121
TEST_F(AArch64GISelMITest,TestBuildFPInsts)122 TEST_F(AArch64GISelMITest, TestBuildFPInsts) {
123 setUp();
124 if (!TM)
125 return;
126
127 SmallVector<Register, 4> Copies;
128 collectCopies(Copies, MF);
129
130 LLT S64 = LLT::scalar(64);
131
132 B.buildFAdd(S64, Copies[0], Copies[1]);
133 B.buildFSub(S64, Copies[0], Copies[1]);
134 B.buildFMA(S64, Copies[0], Copies[1], Copies[2]);
135 B.buildFMAD(S64, Copies[0], Copies[1], Copies[2]);
136 B.buildFMAD(S64, Copies[0], Copies[1], Copies[2], MachineInstr::FmNoNans);
137 B.buildFNeg(S64, Copies[0]);
138 B.buildFAbs(S64, Copies[0]);
139 B.buildFCopysign(S64, Copies[0], Copies[1]);
140
141 auto CheckStr = R"(
142 ; CHECK: [[COPY0:%[0-9]+]]:_(s64) = COPY $x0
143 ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
144 ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
145 ; CHECK: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[COPY0]]:_, [[COPY1]]:_
146 ; CHECK: [[FSUB:%[0-9]+]]:_(s64) = G_FSUB [[COPY0]]:_, [[COPY1]]:_
147 ; CHECK: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[COPY0]]:_, [[COPY1]]:_, [[COPY2]]:_
148 ; CHECK: [[FMAD0:%[0-9]+]]:_(s64) = G_FMAD [[COPY0]]:_, [[COPY1]]:_, [[COPY2]]:_
149 ; CHECK: [[FMAD1:%[0-9]+]]:_(s64) = nnan G_FMAD [[COPY0]]:_, [[COPY1]]:_, [[COPY2]]:_
150 ; CHECK: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[COPY0]]:_
151 ; CHECK: [[FABS:%[0-9]+]]:_(s64) = G_FABS [[COPY0]]:_
152 ; CHECK: [[FCOPYSIGN:%[0-9]+]]:_(s64) = G_FCOPYSIGN [[COPY0]]:_, [[COPY1]]:_
153 )";
154
155 EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;
156 }
157
TEST_F(AArch64GISelMITest,BuildIntrinsic)158 TEST_F(AArch64GISelMITest, BuildIntrinsic) {
159 setUp();
160 if (!TM)
161 return;
162
163 LLT S64 = LLT::scalar(64);
164 SmallVector<Register, 4> Copies;
165 collectCopies(Copies, MF);
166
167 // Make sure DstOp version works. sqrt is just a placeholder intrinsic.
168 B.buildIntrinsic(Intrinsic::sqrt, {S64}, false)
169 .addUse(Copies[0]);
170
171 // Make sure register version works
172 SmallVector<Register, 1> Results;
173 Results.push_back(MRI->createGenericVirtualRegister(S64));
174 B.buildIntrinsic(Intrinsic::sqrt, Results, false)
175 .addUse(Copies[1]);
176
177 auto CheckStr = R"(
178 ; CHECK: [[COPY0:%[0-9]+]]:_(s64) = COPY $x0
179 ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
180 ; CHECK: [[SQRT0:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.sqrt), [[COPY0]]:_
181 ; CHECK: [[SQRT1:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.sqrt), [[COPY1]]:_
182 )";
183
184 EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;
185 }
186
TEST_F(AArch64GISelMITest,BuildXor)187 TEST_F(AArch64GISelMITest, BuildXor) {
188 setUp();
189 if (!TM)
190 return;
191
192 LLT S64 = LLT::scalar(64);
193 LLT S128 = LLT::scalar(128);
194 SmallVector<Register, 4> Copies;
195 collectCopies(Copies, MF);
196 B.buildXor(S64, Copies[0], Copies[1]);
197 B.buildNot(S64, Copies[0]);
198
199 // Make sure this works with > 64-bit types
200 auto Merge = B.buildMerge(S128, {Copies[0], Copies[1]});
201 B.buildNot(S128, Merge);
202 auto CheckStr = R"(
203 ; CHECK: [[COPY0:%[0-9]+]]:_(s64) = COPY $x0
204 ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
205 ; CHECK: [[XOR0:%[0-9]+]]:_(s64) = G_XOR [[COPY0]]:_, [[COPY1]]:_
206 ; CHECK: [[NEGONE64:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
207 ; CHECK: [[XOR1:%[0-9]+]]:_(s64) = G_XOR [[COPY0]]:_, [[NEGONE64]]:_
208 ; CHECK: [[MERGE:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY0]]:_(s64), [[COPY1]]:_(s64)
209 ; CHECK: [[NEGONE128:%[0-9]+]]:_(s128) = G_CONSTANT i128 -1
210 ; CHECK: [[XOR2:%[0-9]+]]:_(s128) = G_XOR [[MERGE]]:_, [[NEGONE128]]:_
211 )";
212
213 EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;
214 }
215
TEST_F(AArch64GISelMITest,BuildBitCounts)216 TEST_F(AArch64GISelMITest, BuildBitCounts) {
217 setUp();
218 if (!TM)
219 return;
220
221 LLT S32 = LLT::scalar(32);
222 SmallVector<Register, 4> Copies;
223 collectCopies(Copies, MF);
224
225 B.buildCTPOP(S32, Copies[0]);
226 B.buildCTLZ(S32, Copies[0]);
227 B.buildCTLZ_ZERO_UNDEF(S32, Copies[1]);
228 B.buildCTTZ(S32, Copies[0]);
229 B.buildCTTZ_ZERO_UNDEF(S32, Copies[1]);
230
231 auto CheckStr = R"(
232 ; CHECK: [[COPY0:%[0-9]+]]:_(s64) = COPY $x0
233 ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
234 ; CHECK: [[CTPOP:%[0-9]+]]:_(s32) = G_CTPOP [[COPY0]]:_
235 ; CHECK: [[CTLZ0:%[0-9]+]]:_(s32) = G_CTLZ [[COPY0]]:_
236 ; CHECK: [[CTLZ_UNDEF0:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[COPY1]]:_
237 ; CHECK: [[CTTZ:%[0-9]+]]:_(s32) = G_CTTZ [[COPY0]]:_
238 ; CHECK: [[CTTZ_UNDEF0:%[0-9]+]]:_(s32) = G_CTTZ_ZERO_UNDEF [[COPY1]]:_
239 )";
240
241 EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;
242 }
243
TEST_F(AArch64GISelMITest,BuildCasts)244 TEST_F(AArch64GISelMITest, BuildCasts) {
245 setUp();
246 if (!TM)
247 return;
248
249 LLT S32 = LLT::scalar(32);
250 SmallVector<Register, 4> Copies;
251 collectCopies(Copies, MF);
252
253 B.buildUITOFP(S32, Copies[0]);
254 B.buildSITOFP(S32, Copies[0]);
255 B.buildFPTOUI(S32, Copies[0]);
256 B.buildFPTOSI(S32, Copies[0]);
257
258 auto CheckStr = R"(
259 ; CHECK: [[COPY0:%[0-9]+]]:_(s64) = COPY $x0
260 ; CHECK: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[COPY0]]:_
261 ; CHECK: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[COPY0]]:_
262 ; CHECK: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[COPY0]]:_
263 ; CHECK: [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[COPY0]]:_
264 )";
265
266 EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;
267 }
268
TEST_F(AArch64GISelMITest,BuildMinMaxAbs)269 TEST_F(AArch64GISelMITest, BuildMinMaxAbs) {
270 setUp();
271 if (!TM)
272 return;
273
274 LLT S64 = LLT::scalar(64);
275 SmallVector<Register, 4> Copies;
276 collectCopies(Copies, MF);
277
278 B.buildSMin(S64, Copies[0], Copies[1]);
279 B.buildSMax(S64, Copies[0], Copies[1]);
280 B.buildUMin(S64, Copies[0], Copies[1]);
281 B.buildUMax(S64, Copies[0], Copies[1]);
282 B.buildAbs(S64, Copies[0]);
283
284 auto CheckStr = R"(
285 ; CHECK: [[COPY0:%[0-9]+]]:_(s64) = COPY $x0
286 ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
287 ; CHECK: [[SMIN0:%[0-9]+]]:_(s64) = G_SMIN [[COPY0]]:_, [[COPY1]]:_
288 ; CHECK: [[SMAX0:%[0-9]+]]:_(s64) = G_SMAX [[COPY0]]:_, [[COPY1]]:_
289 ; CHECK: [[UMIN0:%[0-9]+]]:_(s64) = G_UMIN [[COPY0]]:_, [[COPY1]]:_
290 ; CHECK: [[UMAX0:%[0-9]+]]:_(s64) = G_UMAX [[COPY0]]:_, [[COPY1]]:_
291 ; CHECK: [[UABS0:%[0-9]+]]:_(s64) = G_ABS [[COPY0]]:_
292 )";
293
294 EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;
295 }
296
TEST_F(AArch64GISelMITest,BuildAtomicRMW)297 TEST_F(AArch64GISelMITest, BuildAtomicRMW) {
298 setUp();
299 if (!TM)
300 return;
301
302 LLT S64 = LLT::scalar(64);
303 LLT P0 = LLT::pointer(0, 64);
304 SmallVector<Register, 4> Copies;
305 collectCopies(Copies, MF);
306
307 MachineMemOperand *MMO = MF->getMachineMemOperand(
308 MachinePointerInfo(),
309 MachineMemOperand::MOLoad | MachineMemOperand::MOStore, 8, Align(8),
310 AAMDNodes(), nullptr, SyncScope::System, AtomicOrdering::Unordered);
311
312 auto Ptr = B.buildUndef(P0);
313 B.buildAtomicRMWFAdd(S64, Ptr, Copies[0], *MMO);
314 B.buildAtomicRMWFSub(S64, Ptr, Copies[0], *MMO);
315
316 auto CheckStr = R"(
317 ; CHECK: [[COPY0:%[0-9]+]]:_(s64) = COPY $x0
318 ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
319 ; CHECK: [[PTR:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
320 ; CHECK: [[FADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_FADD [[PTR]]:_(p0), [[COPY0]]:_ :: (load store unordered (s64))
321 ; CHECK: [[FSUB:%[0-9]+]]:_(s64) = G_ATOMICRMW_FSUB [[PTR]]:_(p0), [[COPY0]]:_ :: (load store unordered (s64))
322 )";
323
324 EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;
325 }
326
TEST_F(AArch64GISelMITest,BuildMerge)327 TEST_F(AArch64GISelMITest, BuildMerge) {
328 setUp();
329 if (!TM)
330 return;
331
332 LLT S32 = LLT::scalar(32);
333 Register RegC0 = B.buildConstant(S32, 0).getReg(0);
334 Register RegC1 = B.buildConstant(S32, 1).getReg(0);
335 Register RegC2 = B.buildConstant(S32, 2).getReg(0);
336 Register RegC3 = B.buildConstant(S32, 3).getReg(0);
337
338 // Merging plain constants as one big blob of bit should produce a
339 // G_MERGE_VALUES.
340 B.buildMerge(LLT::scalar(128), {RegC0, RegC1, RegC2, RegC3});
341 // Merging plain constants to a vector should produce a G_BUILD_VECTOR.
342 LLT V2x32 = LLT::fixed_vector(2, 32);
343 Register RegC0C1 =
344 B.buildMerge(V2x32, {RegC0, RegC1}).getReg(0);
345 Register RegC2C3 =
346 B.buildMerge(V2x32, {RegC2, RegC3}).getReg(0);
347 // Merging vector constants to a vector should produce a G_CONCAT_VECTORS.
348 B.buildMerge(LLT::fixed_vector(4, 32), {RegC0C1, RegC2C3});
349 // Merging vector constants to a plain type is not allowed.
350 // Nothing else to test.
351
352 auto CheckStr = R"(
353 ; CHECK: [[C0:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
354 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
355 ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
356 ; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
357 ; CHECK: {{%[0-9]+}}:_(s128) = G_MERGE_VALUES [[C0]]:_(s32), [[C1]]:_(s32), [[C2]]:_(s32), [[C3]]:_(s32)
358 ; CHECK: [[LOW2x32:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C0]]:_(s32), [[C1]]:_(s32)
359 ; CHECK: [[HIGH2x32:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C2]]:_(s32), [[C3]]:_(s32)
360 ; CHECK: {{%[0-9]+}}:_(<4 x s32>) = G_CONCAT_VECTORS [[LOW2x32]]:_(<2 x s32>), [[HIGH2x32]]:_(<2 x s32>)
361 )";
362
363 EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;
364 }
365
TEST_F(AArch64GISelMITest,BuildAddoSubo)366 TEST_F(AArch64GISelMITest, BuildAddoSubo) {
367 setUp();
368 if (!TM)
369 return;
370
371 LLT S1 = LLT::scalar(1);
372 LLT S64 = LLT::scalar(64);
373 SmallVector<Register, 4> Copies;
374 collectCopies(Copies, MF);
375
376 auto UAddo = B.buildUAddo(S64, S1, Copies[0], Copies[1]);
377 auto USubo = B.buildUSubo(S64, S1, Copies[0], Copies[1]);
378 auto SAddo = B.buildSAddo(S64, S1, Copies[0], Copies[1]);
379 auto SSubo = B.buildSSubo(S64, S1, Copies[0], Copies[1]);
380
381 B.buildUAdde(S64, S1, Copies[0], Copies[1], UAddo.getReg(1));
382 B.buildUSube(S64, S1, Copies[0], Copies[1], USubo.getReg(1));
383 B.buildSAdde(S64, S1, Copies[0], Copies[1], SAddo.getReg(1));
384 B.buildSSube(S64, S1, Copies[0], Copies[1], SSubo.getReg(1));
385
386 auto CheckStr = R"(
387 ; CHECK: [[COPY0:%[0-9]+]]:_(s64) = COPY $x0
388 ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
389 ; CHECK: [[UADDO:%[0-9]+]]:_(s64), [[UADDO_FLAG:%[0-9]+]]:_(s1) = G_UADDO [[COPY0]]:_, [[COPY1]]:_
390 ; CHECK: [[USUBO:%[0-9]+]]:_(s64), [[USUBO_FLAG:%[0-9]+]]:_(s1) = G_USUBO [[COPY0]]:_, [[COPY1]]:_
391 ; CHECK: [[SADDO:%[0-9]+]]:_(s64), [[SADDO_FLAG:%[0-9]+]]:_(s1) = G_SADDO [[COPY0]]:_, [[COPY1]]:_
392 ; CHECK: [[SSUBO:%[0-9]+]]:_(s64), [[SSUBO_FLAG:%[0-9]+]]:_(s1) = G_SSUBO [[COPY0]]:_, [[COPY1]]:_
393 ; CHECK: [[UADDE:%[0-9]+]]:_(s64), [[UADDE_FLAG:%[0-9]+]]:_(s1) = G_UADDE [[COPY0]]:_, [[COPY1]]:_, [[UADDO_FLAG]]
394 ; CHECK: [[USUBE:%[0-9]+]]:_(s64), [[USUBE_FLAG:%[0-9]+]]:_(s1) = G_USUBE [[COPY0]]:_, [[COPY1]]:_, [[USUBO_FLAG]]
395 ; CHECK: [[SADDE:%[0-9]+]]:_(s64), [[SADDE_FLAG:%[0-9]+]]:_(s1) = G_SADDE [[COPY0]]:_, [[COPY1]]:_, [[SADDO_FLAG]]
396 ; CHECK: [[SSUBE:%[0-9]+]]:_(s64), [[SSUBE_FLAG:%[0-9]+]]:_(s1) = G_SSUBE [[COPY0]]:_, [[COPY1]]:_, [[SSUBO_FLAG]]
397 )";
398
399 EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;
400 }
401
TEST_F(AArch64GISelMITest,BuildBitfieldExtract)402 TEST_F(AArch64GISelMITest, BuildBitfieldExtract) {
403 setUp();
404 if (!TM)
405 return;
406 LLT S64 = LLT::scalar(64);
407 SmallVector<Register, 4> Copies;
408 collectCopies(Copies, MF);
409
410 auto Ubfx = B.buildUbfx(S64, Copies[0], Copies[1], Copies[2]);
411 B.buildSbfx(S64, Ubfx, Copies[0], Copies[2]);
412
413 const auto *CheckStr = R"(
414 ; CHECK: [[COPY0:%[0-9]+]]:_(s64) = COPY $x0
415 ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
416 ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
417 ; CHECK: [[UBFX:%[0-9]+]]:_(s64) = G_UBFX [[COPY0]]:_, [[COPY1]]:_(s64), [[COPY2]]:_
418 ; CHECK: [[SBFX:%[0-9]+]]:_(s64) = G_SBFX [[UBFX]]:_, [[COPY0]]:_(s64), [[COPY2]]:_
419 )";
420
421 EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;
422 }
423