1 //=- AArch64ConditionOptimizer.cpp - Remove useless comparisons for AArch64 -=//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass tries to make consecutive compares of values use same operands to
11 // allow CSE pass to remove duplicated instructions.  For this it analyzes
12 // branches and adjusts comparisons with immediate values by converting:
13 //  * GE -> GT
14 //  * GT -> GE
15 //  * LT -> LE
16 //  * LE -> LT
17 // and adjusting immediate values appropriately.  It basically corrects two
18 // immediate values towards each other to make them equal.
19 //
20 // Consider the following example in C:
21 //
22 //   if ((a < 5 && ...) || (a > 5 && ...)) {
23 //        ~~~~~             ~~~~~
24 //          ^                 ^
25 //          x                 y
26 //
27 // Here both "x" and "y" expressions compare "a" with "5".  When "x" evaluates
28 // to "false", "y" can just check flags set by the first comparison.  As a
29 // result of the canonicalization employed by
30 // SelectionDAGBuilder::visitSwitchCase, DAGCombine, and other target-specific
31 // code, assembly ends up in the form that is not CSE friendly:
32 //
33 //     ...
34 //     cmp      w8, #4
35 //     b.gt     .LBB0_3
36 //     ...
37 //   .LBB0_3:
38 //     cmp      w8, #6
39 //     b.lt     .LBB0_6
40 //     ...
41 //
42 // Same assembly after the pass:
43 //
44 //     ...
45 //     cmp      w8, #5
46 //     b.ge     .LBB0_3
47 //     ...
48 //   .LBB0_3:
49 //     cmp      w8, #5     // <-- CSE pass removes this instruction
50 //     b.le     .LBB0_6
51 //     ...
52 //
53 // Currently only SUBS and ADDS followed by b.?? are supported.
54 //
55 // TODO: maybe handle TBNZ/TBZ the same way as CMP when used instead for "a < 0"
56 // TODO: handle other conditional instructions (e.g. CSET)
57 // TODO: allow second branching to be anything if it doesn't require adjusting
58 //
59 //===----------------------------------------------------------------------===//
60 
61 #include "AArch64.h"
62 #include "MCTargetDesc/AArch64AddressingModes.h"
63 #include "Utils/AArch64BaseInfo.h"
64 #include "llvm/ADT/ArrayRef.h"
65 #include "llvm/ADT/DepthFirstIterator.h"
66 #include "llvm/ADT/SmallVector.h"
67 #include "llvm/ADT/Statistic.h"
68 #include "llvm/CodeGen/MachineBasicBlock.h"
69 #include "llvm/CodeGen/MachineDominators.h"
70 #include "llvm/CodeGen/MachineFunction.h"
71 #include "llvm/CodeGen/MachineFunctionPass.h"
72 #include "llvm/CodeGen/MachineInstr.h"
73 #include "llvm/CodeGen/MachineInstrBuilder.h"
74 #include "llvm/CodeGen/MachineOperand.h"
75 #include "llvm/CodeGen/MachineRegisterInfo.h"
76 #include "llvm/CodeGen/TargetInstrInfo.h"
77 #include "llvm/CodeGen/TargetSubtargetInfo.h"
78 #include "llvm/Pass.h"
79 #include "llvm/Support/Debug.h"
80 #include "llvm/Support/ErrorHandling.h"
81 #include "llvm/Support/raw_ostream.h"
82 #include <cassert>
83 #include <cstdlib>
84 #include <tuple>
85 
86 using namespace llvm;
87 
88 #define DEBUG_TYPE "aarch64-condopt"
89 
90 STATISTIC(NumConditionsAdjusted, "Number of conditions adjusted");
91 
92 namespace {
93 
94 class AArch64ConditionOptimizer : public MachineFunctionPass {
95   const TargetInstrInfo *TII;
96   MachineDominatorTree *DomTree;
97   const MachineRegisterInfo *MRI;
98 
99 public:
100   // Stores immediate, compare instruction opcode and branch condition (in this
101   // order) of adjusted comparison.
102   using CmpInfo = std::tuple<int, unsigned, AArch64CC::CondCode>;
103 
104   static char ID;
105 
AArch64ConditionOptimizer()106   AArch64ConditionOptimizer() : MachineFunctionPass(ID) {
107     initializeAArch64ConditionOptimizerPass(*PassRegistry::getPassRegistry());
108   }
109 
110   void getAnalysisUsage(AnalysisUsage &AU) const override;
111   MachineInstr *findSuitableCompare(MachineBasicBlock *MBB);
112   CmpInfo adjustCmp(MachineInstr *CmpMI, AArch64CC::CondCode Cmp);
113   void modifyCmp(MachineInstr *CmpMI, const CmpInfo &Info);
114   bool adjustTo(MachineInstr *CmpMI, AArch64CC::CondCode Cmp, MachineInstr *To,
115                 int ToImm);
116   bool runOnMachineFunction(MachineFunction &MF) override;
117 
getPassName() const118   StringRef getPassName() const override {
119     return "AArch64 Condition Optimizer";
120   }
121 };
122 
123 } // end anonymous namespace
124 
125 char AArch64ConditionOptimizer::ID = 0;
126 
127 INITIALIZE_PASS_BEGIN(AArch64ConditionOptimizer, "aarch64-condopt",
128                       "AArch64 CondOpt Pass", false, false)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)129 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
130 INITIALIZE_PASS_END(AArch64ConditionOptimizer, "aarch64-condopt",
131                     "AArch64 CondOpt Pass", false, false)
132 
133 FunctionPass *llvm::createAArch64ConditionOptimizerPass() {
134   return new AArch64ConditionOptimizer();
135 }
136 
getAnalysisUsage(AnalysisUsage & AU) const137 void AArch64ConditionOptimizer::getAnalysisUsage(AnalysisUsage &AU) const {
138   AU.addRequired<MachineDominatorTree>();
139   AU.addPreserved<MachineDominatorTree>();
140   MachineFunctionPass::getAnalysisUsage(AU);
141 }
142 
143 // Finds compare instruction that corresponds to supported types of branching.
144 // Returns the instruction or nullptr on failures or detecting unsupported
145 // instructions.
findSuitableCompare(MachineBasicBlock * MBB)146 MachineInstr *AArch64ConditionOptimizer::findSuitableCompare(
147     MachineBasicBlock *MBB) {
148   MachineBasicBlock::iterator I = MBB->getFirstTerminator();
149   if (I == MBB->end())
150     return nullptr;
151 
152   if (I->getOpcode() != AArch64::Bcc)
153     return nullptr;
154 
155   // Since we may modify cmp of this MBB, make sure NZCV does not live out.
156   for (auto SuccBB : MBB->successors())
157     if (SuccBB->isLiveIn(AArch64::NZCV))
158       return nullptr;
159 
160   // Now find the instruction controlling the terminator.
161   for (MachineBasicBlock::iterator B = MBB->begin(); I != B;) {
162     --I;
163     assert(!I->isTerminator() && "Spurious terminator");
164     // Check if there is any use of NZCV between CMP and Bcc.
165     if (I->readsRegister(AArch64::NZCV))
166       return nullptr;
167     switch (I->getOpcode()) {
168     // cmp is an alias for subs with a dead destination register.
169     case AArch64::SUBSWri:
170     case AArch64::SUBSXri:
171     // cmn is an alias for adds with a dead destination register.
172     case AArch64::ADDSWri:
173     case AArch64::ADDSXri: {
174       unsigned ShiftAmt = AArch64_AM::getShiftValue(I->getOperand(3).getImm());
175       if (!I->getOperand(2).isImm()) {
176         LLVM_DEBUG(dbgs() << "Immediate of cmp is symbolic, " << *I << '\n');
177         return nullptr;
178       } else if (I->getOperand(2).getImm() << ShiftAmt >= 0xfff) {
179         LLVM_DEBUG(dbgs() << "Immediate of cmp may be out of range, " << *I
180                           << '\n');
181         return nullptr;
182       } else if (!MRI->use_empty(I->getOperand(0).getReg())) {
183         LLVM_DEBUG(dbgs() << "Destination of cmp is not dead, " << *I << '\n');
184         return nullptr;
185       }
186       return &*I;
187     }
188     // Prevent false positive case like:
189     // cmp      w19, #0
190     // cinc     w0, w19, gt
191     // ...
192     // fcmp     d8, #0.0
193     // b.gt     .LBB0_5
194     case AArch64::FCMPDri:
195     case AArch64::FCMPSri:
196     case AArch64::FCMPESri:
197     case AArch64::FCMPEDri:
198 
199     case AArch64::SUBSWrr:
200     case AArch64::SUBSXrr:
201     case AArch64::ADDSWrr:
202     case AArch64::ADDSXrr:
203     case AArch64::FCMPSrr:
204     case AArch64::FCMPDrr:
205     case AArch64::FCMPESrr:
206     case AArch64::FCMPEDrr:
207       // Skip comparison instructions without immediate operands.
208       return nullptr;
209     }
210   }
211   LLVM_DEBUG(dbgs() << "Flags not defined in " << printMBBReference(*MBB)
212                     << '\n');
213   return nullptr;
214 }
215 
216 // Changes opcode adds <-> subs considering register operand width.
getComplementOpc(int Opc)217 static int getComplementOpc(int Opc) {
218   switch (Opc) {
219   case AArch64::ADDSWri: return AArch64::SUBSWri;
220   case AArch64::ADDSXri: return AArch64::SUBSXri;
221   case AArch64::SUBSWri: return AArch64::ADDSWri;
222   case AArch64::SUBSXri: return AArch64::ADDSXri;
223   default:
224     llvm_unreachable("Unexpected opcode");
225   }
226 }
227 
228 // Changes form of comparison inclusive <-> exclusive.
getAdjustedCmp(AArch64CC::CondCode Cmp)229 static AArch64CC::CondCode getAdjustedCmp(AArch64CC::CondCode Cmp) {
230   switch (Cmp) {
231   case AArch64CC::GT: return AArch64CC::GE;
232   case AArch64CC::GE: return AArch64CC::GT;
233   case AArch64CC::LT: return AArch64CC::LE;
234   case AArch64CC::LE: return AArch64CC::LT;
235   default:
236     llvm_unreachable("Unexpected condition code");
237   }
238 }
239 
240 // Transforms GT -> GE, GE -> GT, LT -> LE, LE -> LT by updating comparison
241 // operator and condition code.
adjustCmp(MachineInstr * CmpMI,AArch64CC::CondCode Cmp)242 AArch64ConditionOptimizer::CmpInfo AArch64ConditionOptimizer::adjustCmp(
243     MachineInstr *CmpMI, AArch64CC::CondCode Cmp) {
244   unsigned Opc = CmpMI->getOpcode();
245 
246   // CMN (compare with negative immediate) is an alias to ADDS (as
247   // "operand - negative" == "operand + positive")
248   bool Negative = (Opc == AArch64::ADDSWri || Opc == AArch64::ADDSXri);
249 
250   int Correction = (Cmp == AArch64CC::GT) ? 1 : -1;
251   // Negate Correction value for comparison with negative immediate (CMN).
252   if (Negative) {
253     Correction = -Correction;
254   }
255 
256   const int OldImm = (int)CmpMI->getOperand(2).getImm();
257   const int NewImm = std::abs(OldImm + Correction);
258 
259   // Handle +0 -> -1 and -0 -> +1 (CMN with 0 immediate) transitions by
260   // adjusting compare instruction opcode.
261   if (OldImm == 0 && ((Negative && Correction == 1) ||
262                       (!Negative && Correction == -1))) {
263     Opc = getComplementOpc(Opc);
264   }
265 
266   return CmpInfo(NewImm, Opc, getAdjustedCmp(Cmp));
267 }
268 
269 // Applies changes to comparison instruction suggested by adjustCmp().
modifyCmp(MachineInstr * CmpMI,const CmpInfo & Info)270 void AArch64ConditionOptimizer::modifyCmp(MachineInstr *CmpMI,
271     const CmpInfo &Info) {
272   int Imm;
273   unsigned Opc;
274   AArch64CC::CondCode Cmp;
275   std::tie(Imm, Opc, Cmp) = Info;
276 
277   MachineBasicBlock *const MBB = CmpMI->getParent();
278 
279   // Change immediate in comparison instruction (ADDS or SUBS).
280   BuildMI(*MBB, CmpMI, CmpMI->getDebugLoc(), TII->get(Opc))
281       .add(CmpMI->getOperand(0))
282       .add(CmpMI->getOperand(1))
283       .addImm(Imm)
284       .add(CmpMI->getOperand(3));
285   CmpMI->eraseFromParent();
286 
287   // The fact that this comparison was picked ensures that it's related to the
288   // first terminator instruction.
289   MachineInstr &BrMI = *MBB->getFirstTerminator();
290 
291   // Change condition in branch instruction.
292   BuildMI(*MBB, BrMI, BrMI.getDebugLoc(), TII->get(AArch64::Bcc))
293       .addImm(Cmp)
294       .add(BrMI.getOperand(1));
295   BrMI.eraseFromParent();
296 
297   MBB->updateTerminator();
298 
299   ++NumConditionsAdjusted;
300 }
301 
302 // Parse a condition code returned by AnalyzeBranch, and compute the CondCode
303 // corresponding to TBB.
304 // Returns true if parsing was successful, otherwise false is returned.
parseCond(ArrayRef<MachineOperand> Cond,AArch64CC::CondCode & CC)305 static bool parseCond(ArrayRef<MachineOperand> Cond, AArch64CC::CondCode &CC) {
306   // A normal br.cond simply has the condition code.
307   if (Cond[0].getImm() != -1) {
308     assert(Cond.size() == 1 && "Unknown Cond array format");
309     CC = (AArch64CC::CondCode)(int)Cond[0].getImm();
310     return true;
311   }
312   return false;
313 }
314 
315 // Adjusts one cmp instruction to another one if result of adjustment will allow
316 // CSE.  Returns true if compare instruction was changed, otherwise false is
317 // returned.
adjustTo(MachineInstr * CmpMI,AArch64CC::CondCode Cmp,MachineInstr * To,int ToImm)318 bool AArch64ConditionOptimizer::adjustTo(MachineInstr *CmpMI,
319   AArch64CC::CondCode Cmp, MachineInstr *To, int ToImm)
320 {
321   CmpInfo Info = adjustCmp(CmpMI, Cmp);
322   if (std::get<0>(Info) == ToImm && std::get<1>(Info) == To->getOpcode()) {
323     modifyCmp(CmpMI, Info);
324     return true;
325   }
326   return false;
327 }
328 
runOnMachineFunction(MachineFunction & MF)329 bool AArch64ConditionOptimizer::runOnMachineFunction(MachineFunction &MF) {
330   LLVM_DEBUG(dbgs() << "********** AArch64 Conditional Compares **********\n"
331                     << "********** Function: " << MF.getName() << '\n');
332   if (skipFunction(MF.getFunction()))
333     return false;
334 
335   TII = MF.getSubtarget().getInstrInfo();
336   DomTree = &getAnalysis<MachineDominatorTree>();
337   MRI = &MF.getRegInfo();
338 
339   bool Changed = false;
340 
341   // Visit blocks in dominator tree pre-order. The pre-order enables multiple
342   // cmp-conversions from the same head block.
343   // Note that updateDomTree() modifies the children of the DomTree node
344   // currently being visited. The df_iterator supports that; it doesn't look at
345   // child_begin() / child_end() until after a node has been visited.
346   for (MachineDomTreeNode *I : depth_first(DomTree)) {
347     MachineBasicBlock *HBB = I->getBlock();
348 
349     SmallVector<MachineOperand, 4> HeadCond;
350     MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
351     if (TII->analyzeBranch(*HBB, TBB, FBB, HeadCond)) {
352       continue;
353     }
354 
355     // Equivalence check is to skip loops.
356     if (!TBB || TBB == HBB) {
357       continue;
358     }
359 
360     SmallVector<MachineOperand, 4> TrueCond;
361     MachineBasicBlock *TBB_TBB = nullptr, *TBB_FBB = nullptr;
362     if (TII->analyzeBranch(*TBB, TBB_TBB, TBB_FBB, TrueCond)) {
363       continue;
364     }
365 
366     MachineInstr *HeadCmpMI = findSuitableCompare(HBB);
367     if (!HeadCmpMI) {
368       continue;
369     }
370 
371     MachineInstr *TrueCmpMI = findSuitableCompare(TBB);
372     if (!TrueCmpMI) {
373       continue;
374     }
375 
376     AArch64CC::CondCode HeadCmp;
377     if (HeadCond.empty() || !parseCond(HeadCond, HeadCmp)) {
378       continue;
379     }
380 
381     AArch64CC::CondCode TrueCmp;
382     if (TrueCond.empty() || !parseCond(TrueCond, TrueCmp)) {
383       continue;
384     }
385 
386     const int HeadImm = (int)HeadCmpMI->getOperand(2).getImm();
387     const int TrueImm = (int)TrueCmpMI->getOperand(2).getImm();
388 
389     LLVM_DEBUG(dbgs() << "Head branch:\n");
390     LLVM_DEBUG(dbgs() << "\tcondition: " << AArch64CC::getCondCodeName(HeadCmp)
391                       << '\n');
392     LLVM_DEBUG(dbgs() << "\timmediate: " << HeadImm << '\n');
393 
394     LLVM_DEBUG(dbgs() << "True branch:\n");
395     LLVM_DEBUG(dbgs() << "\tcondition: " << AArch64CC::getCondCodeName(TrueCmp)
396                       << '\n');
397     LLVM_DEBUG(dbgs() << "\timmediate: " << TrueImm << '\n');
398 
399     if (((HeadCmp == AArch64CC::GT && TrueCmp == AArch64CC::LT) ||
400          (HeadCmp == AArch64CC::LT && TrueCmp == AArch64CC::GT)) &&
401         std::abs(TrueImm - HeadImm) == 2) {
402       // This branch transforms machine instructions that correspond to
403       //
404       // 1) (a > {TrueImm} && ...) || (a < {HeadImm} && ...)
405       // 2) (a < {TrueImm} && ...) || (a > {HeadImm} && ...)
406       //
407       // into
408       //
409       // 1) (a >= {NewImm} && ...) || (a <= {NewImm} && ...)
410       // 2) (a <= {NewImm} && ...) || (a >= {NewImm} && ...)
411 
412       CmpInfo HeadCmpInfo = adjustCmp(HeadCmpMI, HeadCmp);
413       CmpInfo TrueCmpInfo = adjustCmp(TrueCmpMI, TrueCmp);
414       if (std::get<0>(HeadCmpInfo) == std::get<0>(TrueCmpInfo) &&
415           std::get<1>(HeadCmpInfo) == std::get<1>(TrueCmpInfo)) {
416         modifyCmp(HeadCmpMI, HeadCmpInfo);
417         modifyCmp(TrueCmpMI, TrueCmpInfo);
418         Changed = true;
419       }
420     } else if (((HeadCmp == AArch64CC::GT && TrueCmp == AArch64CC::GT) ||
421                 (HeadCmp == AArch64CC::LT && TrueCmp == AArch64CC::LT)) &&
422                 std::abs(TrueImm - HeadImm) == 1) {
423       // This branch transforms machine instructions that correspond to
424       //
425       // 1) (a > {TrueImm} && ...) || (a > {HeadImm} && ...)
426       // 2) (a < {TrueImm} && ...) || (a < {HeadImm} && ...)
427       //
428       // into
429       //
430       // 1) (a <= {NewImm} && ...) || (a >  {NewImm} && ...)
431       // 2) (a <  {NewImm} && ...) || (a >= {NewImm} && ...)
432 
433       // GT -> GE transformation increases immediate value, so picking the
434       // smaller one; LT -> LE decreases immediate value so invert the choice.
435       bool adjustHeadCond = (HeadImm < TrueImm);
436       if (HeadCmp == AArch64CC::LT) {
437           adjustHeadCond = !adjustHeadCond;
438       }
439 
440       if (adjustHeadCond) {
441         Changed |= adjustTo(HeadCmpMI, HeadCmp, TrueCmpMI, TrueImm);
442       } else {
443         Changed |= adjustTo(TrueCmpMI, TrueCmp, HeadCmpMI, HeadImm);
444       }
445     }
446     // Other transformation cases almost never occur due to generation of < or >
447     // comparisons instead of <= and >=.
448   }
449 
450   return Changed;
451 }
452