1 //===-- RISCVExpandPseudoInsts.cpp - Expand pseudo instructions -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains a pass that expands pseudo instructions into target
10 // instructions. This pass should be run after register allocation but before
11 // the post-regalloc scheduling pass.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "RISCV.h"
16 #include "RISCVInstrInfo.h"
17 #include "RISCVTargetMachine.h"
18
19 #include "llvm/CodeGen/LivePhysRegs.h"
20 #include "llvm/CodeGen/MachineFunctionPass.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22
23 using namespace llvm;
24
25 #define RISCV_EXPAND_PSEUDO_NAME "RISCV pseudo instruction expansion pass"
26
27 namespace {
28
29 class RISCVExpandPseudo : public MachineFunctionPass {
30 public:
31 const RISCVInstrInfo *TII;
32 static char ID;
33
RISCVExpandPseudo()34 RISCVExpandPseudo() : MachineFunctionPass(ID) {
35 initializeRISCVExpandPseudoPass(*PassRegistry::getPassRegistry());
36 }
37
38 bool runOnMachineFunction(MachineFunction &MF) override;
39
getPassName() const40 StringRef getPassName() const override { return RISCV_EXPAND_PSEUDO_NAME; }
41
42 private:
43 bool expandMBB(MachineBasicBlock &MBB);
44 bool expandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
45 MachineBasicBlock::iterator &NextMBBI);
46 bool expandAtomicBinOp(MachineBasicBlock &MBB,
47 MachineBasicBlock::iterator MBBI, AtomicRMWInst::BinOp,
48 bool IsMasked, int Width,
49 MachineBasicBlock::iterator &NextMBBI);
50 bool expandAtomicMinMaxOp(MachineBasicBlock &MBB,
51 MachineBasicBlock::iterator MBBI,
52 AtomicRMWInst::BinOp, bool IsMasked, int Width,
53 MachineBasicBlock::iterator &NextMBBI);
54 bool expandAtomicCmpXchg(MachineBasicBlock &MBB,
55 MachineBasicBlock::iterator MBBI, bool IsMasked,
56 int Width, MachineBasicBlock::iterator &NextMBBI);
57 bool expandAuipcInstPair(MachineBasicBlock &MBB,
58 MachineBasicBlock::iterator MBBI,
59 MachineBasicBlock::iterator &NextMBBI,
60 unsigned FlagsHi, unsigned SecondOpcode);
61 bool expandLoadLocalAddress(MachineBasicBlock &MBB,
62 MachineBasicBlock::iterator MBBI,
63 MachineBasicBlock::iterator &NextMBBI);
64 bool expandLoadAddress(MachineBasicBlock &MBB,
65 MachineBasicBlock::iterator MBBI,
66 MachineBasicBlock::iterator &NextMBBI);
67 bool expandLoadTLSIEAddress(MachineBasicBlock &MBB,
68 MachineBasicBlock::iterator MBBI,
69 MachineBasicBlock::iterator &NextMBBI);
70 bool expandLoadTLSGDAddress(MachineBasicBlock &MBB,
71 MachineBasicBlock::iterator MBBI,
72 MachineBasicBlock::iterator &NextMBBI);
73 };
74
75 char RISCVExpandPseudo::ID = 0;
76
runOnMachineFunction(MachineFunction & MF)77 bool RISCVExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
78 TII = static_cast<const RISCVInstrInfo *>(MF.getSubtarget().getInstrInfo());
79 bool Modified = false;
80 for (auto &MBB : MF)
81 Modified |= expandMBB(MBB);
82 return Modified;
83 }
84
expandMBB(MachineBasicBlock & MBB)85 bool RISCVExpandPseudo::expandMBB(MachineBasicBlock &MBB) {
86 bool Modified = false;
87
88 MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
89 while (MBBI != E) {
90 MachineBasicBlock::iterator NMBBI = std::next(MBBI);
91 Modified |= expandMI(MBB, MBBI, NMBBI);
92 MBBI = NMBBI;
93 }
94
95 return Modified;
96 }
97
expandMI(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,MachineBasicBlock::iterator & NextMBBI)98 bool RISCVExpandPseudo::expandMI(MachineBasicBlock &MBB,
99 MachineBasicBlock::iterator MBBI,
100 MachineBasicBlock::iterator &NextMBBI) {
101 switch (MBBI->getOpcode()) {
102 case RISCV::PseudoAtomicLoadNand32:
103 return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Nand, false, 32,
104 NextMBBI);
105 case RISCV::PseudoAtomicLoadNand64:
106 return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Nand, false, 64,
107 NextMBBI);
108 case RISCV::PseudoMaskedAtomicSwap32:
109 return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Xchg, true, 32,
110 NextMBBI);
111 case RISCV::PseudoMaskedAtomicLoadAdd32:
112 return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Add, true, 32, NextMBBI);
113 case RISCV::PseudoMaskedAtomicLoadSub32:
114 return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Sub, true, 32, NextMBBI);
115 case RISCV::PseudoMaskedAtomicLoadNand32:
116 return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Nand, true, 32,
117 NextMBBI);
118 case RISCV::PseudoMaskedAtomicLoadMax32:
119 return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::Max, true, 32,
120 NextMBBI);
121 case RISCV::PseudoMaskedAtomicLoadMin32:
122 return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::Min, true, 32,
123 NextMBBI);
124 case RISCV::PseudoMaskedAtomicLoadUMax32:
125 return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::UMax, true, 32,
126 NextMBBI);
127 case RISCV::PseudoMaskedAtomicLoadUMin32:
128 return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::UMin, true, 32,
129 NextMBBI);
130 case RISCV::PseudoCmpXchg32:
131 return expandAtomicCmpXchg(MBB, MBBI, false, 32, NextMBBI);
132 case RISCV::PseudoCmpXchg64:
133 return expandAtomicCmpXchg(MBB, MBBI, false, 64, NextMBBI);
134 case RISCV::PseudoMaskedCmpXchg32:
135 return expandAtomicCmpXchg(MBB, MBBI, true, 32, NextMBBI);
136 case RISCV::PseudoLLA:
137 return expandLoadLocalAddress(MBB, MBBI, NextMBBI);
138 case RISCV::PseudoLA:
139 return expandLoadAddress(MBB, MBBI, NextMBBI);
140 case RISCV::PseudoLA_TLS_IE:
141 return expandLoadTLSIEAddress(MBB, MBBI, NextMBBI);
142 case RISCV::PseudoLA_TLS_GD:
143 return expandLoadTLSGDAddress(MBB, MBBI, NextMBBI);
144 }
145
146 return false;
147 }
148
getLRForRMW32(AtomicOrdering Ordering)149 static unsigned getLRForRMW32(AtomicOrdering Ordering) {
150 switch (Ordering) {
151 default:
152 llvm_unreachable("Unexpected AtomicOrdering");
153 case AtomicOrdering::Monotonic:
154 return RISCV::LR_W;
155 case AtomicOrdering::Acquire:
156 return RISCV::LR_W_AQ;
157 case AtomicOrdering::Release:
158 return RISCV::LR_W;
159 case AtomicOrdering::AcquireRelease:
160 return RISCV::LR_W_AQ;
161 case AtomicOrdering::SequentiallyConsistent:
162 return RISCV::LR_W_AQ_RL;
163 }
164 }
165
getSCForRMW32(AtomicOrdering Ordering)166 static unsigned getSCForRMW32(AtomicOrdering Ordering) {
167 switch (Ordering) {
168 default:
169 llvm_unreachable("Unexpected AtomicOrdering");
170 case AtomicOrdering::Monotonic:
171 return RISCV::SC_W;
172 case AtomicOrdering::Acquire:
173 return RISCV::SC_W;
174 case AtomicOrdering::Release:
175 return RISCV::SC_W_RL;
176 case AtomicOrdering::AcquireRelease:
177 return RISCV::SC_W_RL;
178 case AtomicOrdering::SequentiallyConsistent:
179 return RISCV::SC_W_AQ_RL;
180 }
181 }
182
getLRForRMW64(AtomicOrdering Ordering)183 static unsigned getLRForRMW64(AtomicOrdering Ordering) {
184 switch (Ordering) {
185 default:
186 llvm_unreachable("Unexpected AtomicOrdering");
187 case AtomicOrdering::Monotonic:
188 return RISCV::LR_D;
189 case AtomicOrdering::Acquire:
190 return RISCV::LR_D_AQ;
191 case AtomicOrdering::Release:
192 return RISCV::LR_D;
193 case AtomicOrdering::AcquireRelease:
194 return RISCV::LR_D_AQ;
195 case AtomicOrdering::SequentiallyConsistent:
196 return RISCV::LR_D_AQ_RL;
197 }
198 }
199
getSCForRMW64(AtomicOrdering Ordering)200 static unsigned getSCForRMW64(AtomicOrdering Ordering) {
201 switch (Ordering) {
202 default:
203 llvm_unreachable("Unexpected AtomicOrdering");
204 case AtomicOrdering::Monotonic:
205 return RISCV::SC_D;
206 case AtomicOrdering::Acquire:
207 return RISCV::SC_D;
208 case AtomicOrdering::Release:
209 return RISCV::SC_D_RL;
210 case AtomicOrdering::AcquireRelease:
211 return RISCV::SC_D_RL;
212 case AtomicOrdering::SequentiallyConsistent:
213 return RISCV::SC_D_AQ_RL;
214 }
215 }
216
getLRForRMW(AtomicOrdering Ordering,int Width)217 static unsigned getLRForRMW(AtomicOrdering Ordering, int Width) {
218 if (Width == 32)
219 return getLRForRMW32(Ordering);
220 if (Width == 64)
221 return getLRForRMW64(Ordering);
222 llvm_unreachable("Unexpected LR width\n");
223 }
224
getSCForRMW(AtomicOrdering Ordering,int Width)225 static unsigned getSCForRMW(AtomicOrdering Ordering, int Width) {
226 if (Width == 32)
227 return getSCForRMW32(Ordering);
228 if (Width == 64)
229 return getSCForRMW64(Ordering);
230 llvm_unreachable("Unexpected SC width\n");
231 }
232
doAtomicBinOpExpansion(const RISCVInstrInfo * TII,MachineInstr & MI,DebugLoc DL,MachineBasicBlock * ThisMBB,MachineBasicBlock * LoopMBB,MachineBasicBlock * DoneMBB,AtomicRMWInst::BinOp BinOp,int Width)233 static void doAtomicBinOpExpansion(const RISCVInstrInfo *TII, MachineInstr &MI,
234 DebugLoc DL, MachineBasicBlock *ThisMBB,
235 MachineBasicBlock *LoopMBB,
236 MachineBasicBlock *DoneMBB,
237 AtomicRMWInst::BinOp BinOp, int Width) {
238 Register DestReg = MI.getOperand(0).getReg();
239 Register ScratchReg = MI.getOperand(1).getReg();
240 Register AddrReg = MI.getOperand(2).getReg();
241 Register IncrReg = MI.getOperand(3).getReg();
242 AtomicOrdering Ordering =
243 static_cast<AtomicOrdering>(MI.getOperand(4).getImm());
244
245 // .loop:
246 // lr.[w|d] dest, (addr)
247 // binop scratch, dest, val
248 // sc.[w|d] scratch, scratch, (addr)
249 // bnez scratch, loop
250 BuildMI(LoopMBB, DL, TII->get(getLRForRMW(Ordering, Width)), DestReg)
251 .addReg(AddrReg);
252 switch (BinOp) {
253 default:
254 llvm_unreachable("Unexpected AtomicRMW BinOp");
255 case AtomicRMWInst::Nand:
256 BuildMI(LoopMBB, DL, TII->get(RISCV::AND), ScratchReg)
257 .addReg(DestReg)
258 .addReg(IncrReg);
259 BuildMI(LoopMBB, DL, TII->get(RISCV::XORI), ScratchReg)
260 .addReg(ScratchReg)
261 .addImm(-1);
262 break;
263 }
264 BuildMI(LoopMBB, DL, TII->get(getSCForRMW(Ordering, Width)), ScratchReg)
265 .addReg(AddrReg)
266 .addReg(ScratchReg);
267 BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
268 .addReg(ScratchReg)
269 .addReg(RISCV::X0)
270 .addMBB(LoopMBB);
271 }
272
insertMaskedMerge(const RISCVInstrInfo * TII,DebugLoc DL,MachineBasicBlock * MBB,Register DestReg,Register OldValReg,Register NewValReg,Register MaskReg,Register ScratchReg)273 static void insertMaskedMerge(const RISCVInstrInfo *TII, DebugLoc DL,
274 MachineBasicBlock *MBB, Register DestReg,
275 Register OldValReg, Register NewValReg,
276 Register MaskReg, Register ScratchReg) {
277 assert(OldValReg != ScratchReg && "OldValReg and ScratchReg must be unique");
278 assert(OldValReg != MaskReg && "OldValReg and MaskReg must be unique");
279 assert(ScratchReg != MaskReg && "ScratchReg and MaskReg must be unique");
280
281 // We select bits from newval and oldval using:
282 // https://graphics.stanford.edu/~seander/bithacks.html#MaskedMerge
283 // r = oldval ^ ((oldval ^ newval) & masktargetdata);
284 BuildMI(MBB, DL, TII->get(RISCV::XOR), ScratchReg)
285 .addReg(OldValReg)
286 .addReg(NewValReg);
287 BuildMI(MBB, DL, TII->get(RISCV::AND), ScratchReg)
288 .addReg(ScratchReg)
289 .addReg(MaskReg);
290 BuildMI(MBB, DL, TII->get(RISCV::XOR), DestReg)
291 .addReg(OldValReg)
292 .addReg(ScratchReg);
293 }
294
doMaskedAtomicBinOpExpansion(const RISCVInstrInfo * TII,MachineInstr & MI,DebugLoc DL,MachineBasicBlock * ThisMBB,MachineBasicBlock * LoopMBB,MachineBasicBlock * DoneMBB,AtomicRMWInst::BinOp BinOp,int Width)295 static void doMaskedAtomicBinOpExpansion(
296 const RISCVInstrInfo *TII, MachineInstr &MI, DebugLoc DL,
297 MachineBasicBlock *ThisMBB, MachineBasicBlock *LoopMBB,
298 MachineBasicBlock *DoneMBB, AtomicRMWInst::BinOp BinOp, int Width) {
299 assert(Width == 32 && "Should never need to expand masked 64-bit operations");
300 Register DestReg = MI.getOperand(0).getReg();
301 Register ScratchReg = MI.getOperand(1).getReg();
302 Register AddrReg = MI.getOperand(2).getReg();
303 Register IncrReg = MI.getOperand(3).getReg();
304 Register MaskReg = MI.getOperand(4).getReg();
305 AtomicOrdering Ordering =
306 static_cast<AtomicOrdering>(MI.getOperand(5).getImm());
307
308 // .loop:
309 // lr.w destreg, (alignedaddr)
310 // binop scratch, destreg, incr
311 // xor scratch, destreg, scratch
312 // and scratch, scratch, masktargetdata
313 // xor scratch, destreg, scratch
314 // sc.w scratch, scratch, (alignedaddr)
315 // bnez scratch, loop
316 BuildMI(LoopMBB, DL, TII->get(getLRForRMW32(Ordering)), DestReg)
317 .addReg(AddrReg);
318 switch (BinOp) {
319 default:
320 llvm_unreachable("Unexpected AtomicRMW BinOp");
321 case AtomicRMWInst::Xchg:
322 BuildMI(LoopMBB, DL, TII->get(RISCV::ADDI), ScratchReg)
323 .addReg(IncrReg)
324 .addImm(0);
325 break;
326 case AtomicRMWInst::Add:
327 BuildMI(LoopMBB, DL, TII->get(RISCV::ADD), ScratchReg)
328 .addReg(DestReg)
329 .addReg(IncrReg);
330 break;
331 case AtomicRMWInst::Sub:
332 BuildMI(LoopMBB, DL, TII->get(RISCV::SUB), ScratchReg)
333 .addReg(DestReg)
334 .addReg(IncrReg);
335 break;
336 case AtomicRMWInst::Nand:
337 BuildMI(LoopMBB, DL, TII->get(RISCV::AND), ScratchReg)
338 .addReg(DestReg)
339 .addReg(IncrReg);
340 BuildMI(LoopMBB, DL, TII->get(RISCV::XORI), ScratchReg)
341 .addReg(ScratchReg)
342 .addImm(-1);
343 break;
344 }
345
346 insertMaskedMerge(TII, DL, LoopMBB, ScratchReg, DestReg, ScratchReg, MaskReg,
347 ScratchReg);
348
349 BuildMI(LoopMBB, DL, TII->get(getSCForRMW32(Ordering)), ScratchReg)
350 .addReg(AddrReg)
351 .addReg(ScratchReg);
352 BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
353 .addReg(ScratchReg)
354 .addReg(RISCV::X0)
355 .addMBB(LoopMBB);
356 }
357
expandAtomicBinOp(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,AtomicRMWInst::BinOp BinOp,bool IsMasked,int Width,MachineBasicBlock::iterator & NextMBBI)358 bool RISCVExpandPseudo::expandAtomicBinOp(
359 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
360 AtomicRMWInst::BinOp BinOp, bool IsMasked, int Width,
361 MachineBasicBlock::iterator &NextMBBI) {
362 MachineInstr &MI = *MBBI;
363 DebugLoc DL = MI.getDebugLoc();
364
365 MachineFunction *MF = MBB.getParent();
366 auto LoopMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
367 auto DoneMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
368
369 // Insert new MBBs.
370 MF->insert(++MBB.getIterator(), LoopMBB);
371 MF->insert(++LoopMBB->getIterator(), DoneMBB);
372
373 // Set up successors and transfer remaining instructions to DoneMBB.
374 LoopMBB->addSuccessor(LoopMBB);
375 LoopMBB->addSuccessor(DoneMBB);
376 DoneMBB->splice(DoneMBB->end(), &MBB, MI, MBB.end());
377 DoneMBB->transferSuccessors(&MBB);
378 MBB.addSuccessor(LoopMBB);
379
380 if (!IsMasked)
381 doAtomicBinOpExpansion(TII, MI, DL, &MBB, LoopMBB, DoneMBB, BinOp, Width);
382 else
383 doMaskedAtomicBinOpExpansion(TII, MI, DL, &MBB, LoopMBB, DoneMBB, BinOp,
384 Width);
385
386 NextMBBI = MBB.end();
387 MI.eraseFromParent();
388
389 LivePhysRegs LiveRegs;
390 computeAndAddLiveIns(LiveRegs, *LoopMBB);
391 computeAndAddLiveIns(LiveRegs, *DoneMBB);
392
393 return true;
394 }
395
insertSext(const RISCVInstrInfo * TII,DebugLoc DL,MachineBasicBlock * MBB,Register ValReg,Register ShamtReg)396 static void insertSext(const RISCVInstrInfo *TII, DebugLoc DL,
397 MachineBasicBlock *MBB, Register ValReg,
398 Register ShamtReg) {
399 BuildMI(MBB, DL, TII->get(RISCV::SLL), ValReg)
400 .addReg(ValReg)
401 .addReg(ShamtReg);
402 BuildMI(MBB, DL, TII->get(RISCV::SRA), ValReg)
403 .addReg(ValReg)
404 .addReg(ShamtReg);
405 }
406
expandAtomicMinMaxOp(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,AtomicRMWInst::BinOp BinOp,bool IsMasked,int Width,MachineBasicBlock::iterator & NextMBBI)407 bool RISCVExpandPseudo::expandAtomicMinMaxOp(
408 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
409 AtomicRMWInst::BinOp BinOp, bool IsMasked, int Width,
410 MachineBasicBlock::iterator &NextMBBI) {
411 assert(IsMasked == true &&
412 "Should only need to expand masked atomic max/min");
413 assert(Width == 32 && "Should never need to expand masked 64-bit operations");
414
415 MachineInstr &MI = *MBBI;
416 DebugLoc DL = MI.getDebugLoc();
417 MachineFunction *MF = MBB.getParent();
418 auto LoopHeadMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
419 auto LoopIfBodyMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
420 auto LoopTailMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
421 auto DoneMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
422
423 // Insert new MBBs.
424 MF->insert(++MBB.getIterator(), LoopHeadMBB);
425 MF->insert(++LoopHeadMBB->getIterator(), LoopIfBodyMBB);
426 MF->insert(++LoopIfBodyMBB->getIterator(), LoopTailMBB);
427 MF->insert(++LoopTailMBB->getIterator(), DoneMBB);
428
429 // Set up successors and transfer remaining instructions to DoneMBB.
430 LoopHeadMBB->addSuccessor(LoopIfBodyMBB);
431 LoopHeadMBB->addSuccessor(LoopTailMBB);
432 LoopIfBodyMBB->addSuccessor(LoopTailMBB);
433 LoopTailMBB->addSuccessor(LoopHeadMBB);
434 LoopTailMBB->addSuccessor(DoneMBB);
435 DoneMBB->splice(DoneMBB->end(), &MBB, MI, MBB.end());
436 DoneMBB->transferSuccessors(&MBB);
437 MBB.addSuccessor(LoopHeadMBB);
438
439 Register DestReg = MI.getOperand(0).getReg();
440 Register Scratch1Reg = MI.getOperand(1).getReg();
441 Register Scratch2Reg = MI.getOperand(2).getReg();
442 Register AddrReg = MI.getOperand(3).getReg();
443 Register IncrReg = MI.getOperand(4).getReg();
444 Register MaskReg = MI.getOperand(5).getReg();
445 bool IsSigned = BinOp == AtomicRMWInst::Min || BinOp == AtomicRMWInst::Max;
446 AtomicOrdering Ordering =
447 static_cast<AtomicOrdering>(MI.getOperand(IsSigned ? 7 : 6).getImm());
448
449 //
450 // .loophead:
451 // lr.w destreg, (alignedaddr)
452 // and scratch2, destreg, mask
453 // mv scratch1, destreg
454 // [sext scratch2 if signed min/max]
455 // ifnochangeneeded scratch2, incr, .looptail
456 BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW32(Ordering)), DestReg)
457 .addReg(AddrReg);
458 BuildMI(LoopHeadMBB, DL, TII->get(RISCV::AND), Scratch2Reg)
459 .addReg(DestReg)
460 .addReg(MaskReg);
461 BuildMI(LoopHeadMBB, DL, TII->get(RISCV::ADDI), Scratch1Reg)
462 .addReg(DestReg)
463 .addImm(0);
464
465 switch (BinOp) {
466 default:
467 llvm_unreachable("Unexpected AtomicRMW BinOp");
468 case AtomicRMWInst::Max: {
469 insertSext(TII, DL, LoopHeadMBB, Scratch2Reg, MI.getOperand(6).getReg());
470 BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BGE))
471 .addReg(Scratch2Reg)
472 .addReg(IncrReg)
473 .addMBB(LoopTailMBB);
474 break;
475 }
476 case AtomicRMWInst::Min: {
477 insertSext(TII, DL, LoopHeadMBB, Scratch2Reg, MI.getOperand(6).getReg());
478 BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BGE))
479 .addReg(IncrReg)
480 .addReg(Scratch2Reg)
481 .addMBB(LoopTailMBB);
482 break;
483 }
484 case AtomicRMWInst::UMax:
485 BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BGEU))
486 .addReg(Scratch2Reg)
487 .addReg(IncrReg)
488 .addMBB(LoopTailMBB);
489 break;
490 case AtomicRMWInst::UMin:
491 BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BGEU))
492 .addReg(IncrReg)
493 .addReg(Scratch2Reg)
494 .addMBB(LoopTailMBB);
495 break;
496 }
497
498 // .loopifbody:
499 // xor scratch1, destreg, incr
500 // and scratch1, scratch1, mask
501 // xor scratch1, destreg, scratch1
502 insertMaskedMerge(TII, DL, LoopIfBodyMBB, Scratch1Reg, DestReg, IncrReg,
503 MaskReg, Scratch1Reg);
504
505 // .looptail:
506 // sc.w scratch1, scratch1, (addr)
507 // bnez scratch1, loop
508 BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW32(Ordering)), Scratch1Reg)
509 .addReg(AddrReg)
510 .addReg(Scratch1Reg);
511 BuildMI(LoopTailMBB, DL, TII->get(RISCV::BNE))
512 .addReg(Scratch1Reg)
513 .addReg(RISCV::X0)
514 .addMBB(LoopHeadMBB);
515
516 NextMBBI = MBB.end();
517 MI.eraseFromParent();
518
519 LivePhysRegs LiveRegs;
520 computeAndAddLiveIns(LiveRegs, *LoopHeadMBB);
521 computeAndAddLiveIns(LiveRegs, *LoopIfBodyMBB);
522 computeAndAddLiveIns(LiveRegs, *LoopTailMBB);
523 computeAndAddLiveIns(LiveRegs, *DoneMBB);
524
525 return true;
526 }
527
expandAtomicCmpXchg(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,bool IsMasked,int Width,MachineBasicBlock::iterator & NextMBBI)528 bool RISCVExpandPseudo::expandAtomicCmpXchg(
529 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, bool IsMasked,
530 int Width, MachineBasicBlock::iterator &NextMBBI) {
531 MachineInstr &MI = *MBBI;
532 DebugLoc DL = MI.getDebugLoc();
533 MachineFunction *MF = MBB.getParent();
534 auto LoopHeadMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
535 auto LoopTailMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
536 auto DoneMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
537
538 // Insert new MBBs.
539 MF->insert(++MBB.getIterator(), LoopHeadMBB);
540 MF->insert(++LoopHeadMBB->getIterator(), LoopTailMBB);
541 MF->insert(++LoopTailMBB->getIterator(), DoneMBB);
542
543 // Set up successors and transfer remaining instructions to DoneMBB.
544 LoopHeadMBB->addSuccessor(LoopTailMBB);
545 LoopHeadMBB->addSuccessor(DoneMBB);
546 LoopTailMBB->addSuccessor(DoneMBB);
547 LoopTailMBB->addSuccessor(LoopHeadMBB);
548 DoneMBB->splice(DoneMBB->end(), &MBB, MI, MBB.end());
549 DoneMBB->transferSuccessors(&MBB);
550 MBB.addSuccessor(LoopHeadMBB);
551
552 Register DestReg = MI.getOperand(0).getReg();
553 Register ScratchReg = MI.getOperand(1).getReg();
554 Register AddrReg = MI.getOperand(2).getReg();
555 Register CmpValReg = MI.getOperand(3).getReg();
556 Register NewValReg = MI.getOperand(4).getReg();
557 AtomicOrdering Ordering =
558 static_cast<AtomicOrdering>(MI.getOperand(IsMasked ? 6 : 5).getImm());
559
560 if (!IsMasked) {
561 // .loophead:
562 // lr.[w|d] dest, (addr)
563 // bne dest, cmpval, done
564 BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW(Ordering, Width)), DestReg)
565 .addReg(AddrReg);
566 BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BNE))
567 .addReg(DestReg)
568 .addReg(CmpValReg)
569 .addMBB(DoneMBB);
570 // .looptail:
571 // sc.[w|d] scratch, newval, (addr)
572 // bnez scratch, loophead
573 BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW(Ordering, Width)), ScratchReg)
574 .addReg(AddrReg)
575 .addReg(NewValReg);
576 BuildMI(LoopTailMBB, DL, TII->get(RISCV::BNE))
577 .addReg(ScratchReg)
578 .addReg(RISCV::X0)
579 .addMBB(LoopHeadMBB);
580 } else {
581 // .loophead:
582 // lr.w dest, (addr)
583 // and scratch, dest, mask
584 // bne scratch, cmpval, done
585 Register MaskReg = MI.getOperand(5).getReg();
586 BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW(Ordering, Width)), DestReg)
587 .addReg(AddrReg);
588 BuildMI(LoopHeadMBB, DL, TII->get(RISCV::AND), ScratchReg)
589 .addReg(DestReg)
590 .addReg(MaskReg);
591 BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BNE))
592 .addReg(ScratchReg)
593 .addReg(CmpValReg)
594 .addMBB(DoneMBB);
595
596 // .looptail:
597 // xor scratch, dest, newval
598 // and scratch, scratch, mask
599 // xor scratch, dest, scratch
600 // sc.w scratch, scratch, (adrr)
601 // bnez scratch, loophead
602 insertMaskedMerge(TII, DL, LoopTailMBB, ScratchReg, DestReg, NewValReg,
603 MaskReg, ScratchReg);
604 BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW(Ordering, Width)), ScratchReg)
605 .addReg(AddrReg)
606 .addReg(ScratchReg);
607 BuildMI(LoopTailMBB, DL, TII->get(RISCV::BNE))
608 .addReg(ScratchReg)
609 .addReg(RISCV::X0)
610 .addMBB(LoopHeadMBB);
611 }
612
613 NextMBBI = MBB.end();
614 MI.eraseFromParent();
615
616 LivePhysRegs LiveRegs;
617 computeAndAddLiveIns(LiveRegs, *LoopHeadMBB);
618 computeAndAddLiveIns(LiveRegs, *LoopTailMBB);
619 computeAndAddLiveIns(LiveRegs, *DoneMBB);
620
621 return true;
622 }
623
expandAuipcInstPair(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,MachineBasicBlock::iterator & NextMBBI,unsigned FlagsHi,unsigned SecondOpcode)624 bool RISCVExpandPseudo::expandAuipcInstPair(
625 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
626 MachineBasicBlock::iterator &NextMBBI, unsigned FlagsHi,
627 unsigned SecondOpcode) {
628 MachineFunction *MF = MBB.getParent();
629 MachineInstr &MI = *MBBI;
630 DebugLoc DL = MI.getDebugLoc();
631
632 Register DestReg = MI.getOperand(0).getReg();
633 const MachineOperand &Symbol = MI.getOperand(1);
634
635 MachineBasicBlock *NewMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
636
637 // Tell AsmPrinter that we unconditionally want the symbol of this label to be
638 // emitted.
639 NewMBB->setLabelMustBeEmitted();
640
641 MF->insert(++MBB.getIterator(), NewMBB);
642
643 BuildMI(NewMBB, DL, TII->get(RISCV::AUIPC), DestReg)
644 .addDisp(Symbol, 0, FlagsHi);
645 BuildMI(NewMBB, DL, TII->get(SecondOpcode), DestReg)
646 .addReg(DestReg)
647 .addMBB(NewMBB, RISCVII::MO_PCREL_LO);
648
649 // Move all the rest of the instructions to NewMBB.
650 NewMBB->splice(NewMBB->end(), &MBB, std::next(MBBI), MBB.end());
651 // Update machine-CFG edges.
652 NewMBB->transferSuccessorsAndUpdatePHIs(&MBB);
653 // Make the original basic block fall-through to the new.
654 MBB.addSuccessor(NewMBB);
655
656 // Make sure live-ins are correctly attached to this new basic block.
657 LivePhysRegs LiveRegs;
658 computeAndAddLiveIns(LiveRegs, *NewMBB);
659
660 NextMBBI = MBB.end();
661 MI.eraseFromParent();
662 return true;
663 }
664
expandLoadLocalAddress(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,MachineBasicBlock::iterator & NextMBBI)665 bool RISCVExpandPseudo::expandLoadLocalAddress(
666 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
667 MachineBasicBlock::iterator &NextMBBI) {
668 return expandAuipcInstPair(MBB, MBBI, NextMBBI, RISCVII::MO_PCREL_HI,
669 RISCV::ADDI);
670 }
671
expandLoadAddress(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,MachineBasicBlock::iterator & NextMBBI)672 bool RISCVExpandPseudo::expandLoadAddress(
673 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
674 MachineBasicBlock::iterator &NextMBBI) {
675 MachineFunction *MF = MBB.getParent();
676
677 unsigned SecondOpcode;
678 unsigned FlagsHi;
679 if (MF->getTarget().isPositionIndependent()) {
680 const auto &STI = MF->getSubtarget<RISCVSubtarget>();
681 SecondOpcode = STI.is64Bit() ? RISCV::LD : RISCV::LW;
682 FlagsHi = RISCVII::MO_GOT_HI;
683 } else {
684 SecondOpcode = RISCV::ADDI;
685 FlagsHi = RISCVII::MO_PCREL_HI;
686 }
687 return expandAuipcInstPair(MBB, MBBI, NextMBBI, FlagsHi, SecondOpcode);
688 }
689
expandLoadTLSIEAddress(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,MachineBasicBlock::iterator & NextMBBI)690 bool RISCVExpandPseudo::expandLoadTLSIEAddress(
691 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
692 MachineBasicBlock::iterator &NextMBBI) {
693 MachineFunction *MF = MBB.getParent();
694
695 const auto &STI = MF->getSubtarget<RISCVSubtarget>();
696 unsigned SecondOpcode = STI.is64Bit() ? RISCV::LD : RISCV::LW;
697 return expandAuipcInstPair(MBB, MBBI, NextMBBI, RISCVII::MO_TLS_GOT_HI,
698 SecondOpcode);
699 }
700
expandLoadTLSGDAddress(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,MachineBasicBlock::iterator & NextMBBI)701 bool RISCVExpandPseudo::expandLoadTLSGDAddress(
702 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
703 MachineBasicBlock::iterator &NextMBBI) {
704 return expandAuipcInstPair(MBB, MBBI, NextMBBI, RISCVII::MO_TLS_GD_HI,
705 RISCV::ADDI);
706 }
707
708 } // end of anonymous namespace
709
710 INITIALIZE_PASS(RISCVExpandPseudo, "riscv-expand-pseudo",
711 RISCV_EXPAND_PSEUDO_NAME, false, false)
712 namespace llvm {
713
createRISCVExpandPseudoPass()714 FunctionPass *createRISCVExpandPseudoPass() { return new RISCVExpandPseudo(); }
715
716 } // end of namespace llvm
717