1 //===- ARMConstantIslandPass.cpp - ARM constant islands -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains a pass that splits the constant pool up into 'islands'
10 // which are scattered through-out the function. This is required due to the
11 // limited pc-relative displacements that ARM has.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "ARM.h"
16 #include "ARMBaseInstrInfo.h"
17 #include "ARMBasicBlockInfo.h"
18 #include "ARMMachineFunctionInfo.h"
19 #include "ARMSubtarget.h"
20 #include "MCTargetDesc/ARMBaseInfo.h"
21 #include "Thumb2InstrInfo.h"
22 #include "Utils/ARMBaseInfo.h"
23 #include "llvm/ADT/DenseMap.h"
24 #include "llvm/ADT/STLExtras.h"
25 #include "llvm/ADT/SmallSet.h"
26 #include "llvm/ADT/SmallVector.h"
27 #include "llvm/ADT/Statistic.h"
28 #include "llvm/ADT/StringRef.h"
29 #include "llvm/CodeGen/LivePhysRegs.h"
30 #include "llvm/CodeGen/MachineBasicBlock.h"
31 #include "llvm/CodeGen/MachineConstantPool.h"
32 #include "llvm/CodeGen/MachineDominators.h"
33 #include "llvm/CodeGen/MachineFunction.h"
34 #include "llvm/CodeGen/MachineFunctionPass.h"
35 #include "llvm/CodeGen/MachineInstr.h"
36 #include "llvm/CodeGen/MachineJumpTableInfo.h"
37 #include "llvm/CodeGen/MachineOperand.h"
38 #include "llvm/CodeGen/MachineRegisterInfo.h"
39 #include "llvm/Config/llvm-config.h"
40 #include "llvm/IR/DataLayout.h"
41 #include "llvm/IR/DebugLoc.h"
42 #include "llvm/MC/MCInstrDesc.h"
43 #include "llvm/Pass.h"
44 #include "llvm/Support/CommandLine.h"
45 #include "llvm/Support/Compiler.h"
46 #include "llvm/Support/Debug.h"
47 #include "llvm/Support/ErrorHandling.h"
48 #include "llvm/Support/Format.h"
49 #include "llvm/Support/MathExtras.h"
50 #include "llvm/Support/raw_ostream.h"
51 #include <algorithm>
52 #include <cassert>
53 #include <cstdint>
54 #include <iterator>
55 #include <utility>
56 #include <vector>
57
58 using namespace llvm;
59
60 #define DEBUG_TYPE "arm-cp-islands"
61
62 #define ARM_CP_ISLANDS_OPT_NAME \
63 "ARM constant island placement and branch shortening pass"
64 STATISTIC(NumCPEs, "Number of constpool entries");
65 STATISTIC(NumSplit, "Number of uncond branches inserted");
66 STATISTIC(NumCBrFixed, "Number of cond branches fixed");
67 STATISTIC(NumUBrFixed, "Number of uncond branches fixed");
68 STATISTIC(NumTBs, "Number of table branches generated");
69 STATISTIC(NumT2CPShrunk, "Number of Thumb2 constantpool instructions shrunk");
70 STATISTIC(NumT2BrShrunk, "Number of Thumb2 immediate branches shrunk");
71 STATISTIC(NumCBZ, "Number of CBZ / CBNZ formed");
72 STATISTIC(NumJTMoved, "Number of jump table destination blocks moved");
73 STATISTIC(NumJTInserted, "Number of jump table intermediate blocks inserted");
74 STATISTIC(NumLEInserted, "Number of LE backwards branches inserted");
75
76 static cl::opt<bool>
77 AdjustJumpTableBlocks("arm-adjust-jump-tables", cl::Hidden, cl::init(true),
78 cl::desc("Adjust basic block layout to better use TB[BH]"));
79
80 static cl::opt<unsigned>
81 CPMaxIteration("arm-constant-island-max-iteration", cl::Hidden, cl::init(30),
82 cl::desc("The max number of iteration for converge"));
83
84 static cl::opt<bool> SynthesizeThumb1TBB(
85 "arm-synthesize-thumb-1-tbb", cl::Hidden, cl::init(true),
86 cl::desc("Use compressed jump tables in Thumb-1 by synthesizing an "
87 "equivalent to the TBB/TBH instructions"));
88
89 namespace {
90
91 /// ARMConstantIslands - Due to limited PC-relative displacements, ARM
92 /// requires constant pool entries to be scattered among the instructions
93 /// inside a function. To do this, it completely ignores the normal LLVM
94 /// constant pool; instead, it places constants wherever it feels like with
95 /// special instructions.
96 ///
97 /// The terminology used in this pass includes:
98 /// Islands - Clumps of constants placed in the function.
99 /// Water - Potential places where an island could be formed.
100 /// CPE - A constant pool entry that has been placed somewhere, which
101 /// tracks a list of users.
102 class ARMConstantIslands : public MachineFunctionPass {
103 std::unique_ptr<ARMBasicBlockUtils> BBUtils = nullptr;
104
105 /// WaterList - A sorted list of basic blocks where islands could be placed
106 /// (i.e. blocks that don't fall through to the following block, due
107 /// to a return, unreachable, or unconditional branch).
108 std::vector<MachineBasicBlock*> WaterList;
109
110 /// NewWaterList - The subset of WaterList that was created since the
111 /// previous iteration by inserting unconditional branches.
112 SmallSet<MachineBasicBlock*, 4> NewWaterList;
113
114 using water_iterator = std::vector<MachineBasicBlock *>::iterator;
115
116 /// CPUser - One user of a constant pool, keeping the machine instruction
117 /// pointer, the constant pool being referenced, and the max displacement
118 /// allowed from the instruction to the CP. The HighWaterMark records the
119 /// highest basic block where a new CPEntry can be placed. To ensure this
120 /// pass terminates, the CP entries are initially placed at the end of the
121 /// function and then move monotonically to lower addresses. The
122 /// exception to this rule is when the current CP entry for a particular
123 /// CPUser is out of range, but there is another CP entry for the same
124 /// constant value in range. We want to use the existing in-range CP
125 /// entry, but if it later moves out of range, the search for new water
126 /// should resume where it left off. The HighWaterMark is used to record
127 /// that point.
128 struct CPUser {
129 MachineInstr *MI;
130 MachineInstr *CPEMI;
131 MachineBasicBlock *HighWaterMark;
132 unsigned MaxDisp;
133 bool NegOk;
134 bool IsSoImm;
135 bool KnownAlignment = false;
136
CPUser__anon1a9336290111::ARMConstantIslands::CPUser137 CPUser(MachineInstr *mi, MachineInstr *cpemi, unsigned maxdisp,
138 bool neg, bool soimm)
139 : MI(mi), CPEMI(cpemi), MaxDisp(maxdisp), NegOk(neg), IsSoImm(soimm) {
140 HighWaterMark = CPEMI->getParent();
141 }
142
143 /// getMaxDisp - Returns the maximum displacement supported by MI.
144 /// Correct for unknown alignment.
145 /// Conservatively subtract 2 bytes to handle weird alignment effects.
getMaxDisp__anon1a9336290111::ARMConstantIslands::CPUser146 unsigned getMaxDisp() const {
147 return (KnownAlignment ? MaxDisp : MaxDisp - 2) - 2;
148 }
149 };
150
151 /// CPUsers - Keep track of all of the machine instructions that use various
152 /// constant pools and their max displacement.
153 std::vector<CPUser> CPUsers;
154
155 /// CPEntry - One per constant pool entry, keeping the machine instruction
156 /// pointer, the constpool index, and the number of CPUser's which
157 /// reference this entry.
158 struct CPEntry {
159 MachineInstr *CPEMI;
160 unsigned CPI;
161 unsigned RefCount;
162
CPEntry__anon1a9336290111::ARMConstantIslands::CPEntry163 CPEntry(MachineInstr *cpemi, unsigned cpi, unsigned rc = 0)
164 : CPEMI(cpemi), CPI(cpi), RefCount(rc) {}
165 };
166
167 /// CPEntries - Keep track of all of the constant pool entry machine
168 /// instructions. For each original constpool index (i.e. those that existed
169 /// upon entry to this pass), it keeps a vector of entries. Original
170 /// elements are cloned as we go along; the clones are put in the vector of
171 /// the original element, but have distinct CPIs.
172 ///
173 /// The first half of CPEntries contains generic constants, the second half
174 /// contains jump tables. Use getCombinedIndex on a generic CPEMI to look up
175 /// which vector it will be in here.
176 std::vector<std::vector<CPEntry>> CPEntries;
177
178 /// Maps a JT index to the offset in CPEntries containing copies of that
179 /// table. The equivalent map for a CONSTPOOL_ENTRY is the identity.
180 DenseMap<int, int> JumpTableEntryIndices;
181
182 /// Maps a JT index to the LEA that actually uses the index to calculate its
183 /// base address.
184 DenseMap<int, int> JumpTableUserIndices;
185
186 /// ImmBranch - One per immediate branch, keeping the machine instruction
187 /// pointer, conditional or unconditional, the max displacement,
188 /// and (if isCond is true) the corresponding unconditional branch
189 /// opcode.
190 struct ImmBranch {
191 MachineInstr *MI;
192 unsigned MaxDisp : 31;
193 bool isCond : 1;
194 unsigned UncondBr;
195
ImmBranch__anon1a9336290111::ARMConstantIslands::ImmBranch196 ImmBranch(MachineInstr *mi, unsigned maxdisp, bool cond, unsigned ubr)
197 : MI(mi), MaxDisp(maxdisp), isCond(cond), UncondBr(ubr) {}
198 };
199
200 /// ImmBranches - Keep track of all the immediate branch instructions.
201 std::vector<ImmBranch> ImmBranches;
202
203 /// PushPopMIs - Keep track of all the Thumb push / pop instructions.
204 SmallVector<MachineInstr*, 4> PushPopMIs;
205
206 /// T2JumpTables - Keep track of all the Thumb2 jumptable instructions.
207 SmallVector<MachineInstr*, 4> T2JumpTables;
208
209 MachineFunction *MF;
210 MachineConstantPool *MCP;
211 const ARMBaseInstrInfo *TII;
212 const ARMSubtarget *STI;
213 ARMFunctionInfo *AFI;
214 MachineDominatorTree *DT = nullptr;
215 bool isThumb;
216 bool isThumb1;
217 bool isThumb2;
218 bool isPositionIndependentOrROPI;
219
220 public:
221 static char ID;
222
ARMConstantIslands()223 ARMConstantIslands() : MachineFunctionPass(ID) {}
224
225 bool runOnMachineFunction(MachineFunction &MF) override;
226
getAnalysisUsage(AnalysisUsage & AU) const227 void getAnalysisUsage(AnalysisUsage &AU) const override {
228 AU.addRequired<MachineDominatorTree>();
229 MachineFunctionPass::getAnalysisUsage(AU);
230 }
231
getRequiredProperties() const232 MachineFunctionProperties getRequiredProperties() const override {
233 return MachineFunctionProperties().set(
234 MachineFunctionProperties::Property::NoVRegs);
235 }
236
getPassName() const237 StringRef getPassName() const override {
238 return ARM_CP_ISLANDS_OPT_NAME;
239 }
240
241 private:
242 void doInitialConstPlacement(std::vector<MachineInstr *> &CPEMIs);
243 void doInitialJumpTablePlacement(std::vector<MachineInstr *> &CPEMIs);
244 bool BBHasFallthrough(MachineBasicBlock *MBB);
245 CPEntry *findConstPoolEntry(unsigned CPI, const MachineInstr *CPEMI);
246 Align getCPEAlign(const MachineInstr *CPEMI);
247 void scanFunctionJumpTables();
248 void initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs);
249 MachineBasicBlock *splitBlockBeforeInstr(MachineInstr *MI);
250 void updateForInsertedWaterBlock(MachineBasicBlock *NewBB);
251 bool decrementCPEReferenceCount(unsigned CPI, MachineInstr* CPEMI);
252 unsigned getCombinedIndex(const MachineInstr *CPEMI);
253 int findInRangeCPEntry(CPUser& U, unsigned UserOffset);
254 bool findAvailableWater(CPUser&U, unsigned UserOffset,
255 water_iterator &WaterIter, bool CloserWater);
256 void createNewWater(unsigned CPUserIndex, unsigned UserOffset,
257 MachineBasicBlock *&NewMBB);
258 bool handleConstantPoolUser(unsigned CPUserIndex, bool CloserWater);
259 void removeDeadCPEMI(MachineInstr *CPEMI);
260 bool removeUnusedCPEntries();
261 bool isCPEntryInRange(MachineInstr *MI, unsigned UserOffset,
262 MachineInstr *CPEMI, unsigned Disp, bool NegOk,
263 bool DoDump = false);
264 bool isWaterInRange(unsigned UserOffset, MachineBasicBlock *Water,
265 CPUser &U, unsigned &Growth);
266 bool fixupImmediateBr(ImmBranch &Br);
267 bool fixupConditionalBr(ImmBranch &Br);
268 bool fixupUnconditionalBr(ImmBranch &Br);
269 bool optimizeThumb2Instructions();
270 bool optimizeThumb2Branches();
271 bool reorderThumb2JumpTables();
272 bool preserveBaseRegister(MachineInstr *JumpMI, MachineInstr *LEAMI,
273 unsigned &DeadSize, bool &CanDeleteLEA,
274 bool &BaseRegKill);
275 bool optimizeThumb2JumpTables();
276 MachineBasicBlock *adjustJTTargetBlockForward(MachineBasicBlock *BB,
277 MachineBasicBlock *JTBB);
278
279 unsigned getUserOffset(CPUser&) const;
280 void dumpBBs();
281 void verify();
282
283 bool isOffsetInRange(unsigned UserOffset, unsigned TrialOffset,
284 unsigned Disp, bool NegativeOK, bool IsSoImm = false);
isOffsetInRange(unsigned UserOffset,unsigned TrialOffset,const CPUser & U)285 bool isOffsetInRange(unsigned UserOffset, unsigned TrialOffset,
286 const CPUser &U) {
287 return isOffsetInRange(UserOffset, TrialOffset,
288 U.getMaxDisp(), U.NegOk, U.IsSoImm);
289 }
290 };
291
292 } // end anonymous namespace
293
294 char ARMConstantIslands::ID = 0;
295
296 /// verify - check BBOffsets, BBSizes, alignment of islands
verify()297 void ARMConstantIslands::verify() {
298 #ifndef NDEBUG
299 BBInfoVector &BBInfo = BBUtils->getBBInfo();
300 assert(std::is_sorted(MF->begin(), MF->end(),
301 [&BBInfo](const MachineBasicBlock &LHS,
302 const MachineBasicBlock &RHS) {
303 return BBInfo[LHS.getNumber()].postOffset() <
304 BBInfo[RHS.getNumber()].postOffset();
305 }));
306 LLVM_DEBUG(dbgs() << "Verifying " << CPUsers.size() << " CP users.\n");
307 for (unsigned i = 0, e = CPUsers.size(); i != e; ++i) {
308 CPUser &U = CPUsers[i];
309 unsigned UserOffset = getUserOffset(U);
310 // Verify offset using the real max displacement without the safety
311 // adjustment.
312 if (isCPEntryInRange(U.MI, UserOffset, U.CPEMI, U.getMaxDisp()+2, U.NegOk,
313 /* DoDump = */ true)) {
314 LLVM_DEBUG(dbgs() << "OK\n");
315 continue;
316 }
317 LLVM_DEBUG(dbgs() << "Out of range.\n");
318 dumpBBs();
319 LLVM_DEBUG(MF->dump());
320 llvm_unreachable("Constant pool entry out of range!");
321 }
322 #endif
323 }
324
325 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
326 /// print block size and offset information - debugging
dumpBBs()327 LLVM_DUMP_METHOD void ARMConstantIslands::dumpBBs() {
328 LLVM_DEBUG({
329 BBInfoVector &BBInfo = BBUtils->getBBInfo();
330 for (unsigned J = 0, E = BBInfo.size(); J !=E; ++J) {
331 const BasicBlockInfo &BBI = BBInfo[J];
332 dbgs() << format("%08x %bb.%u\t", BBI.Offset, J)
333 << " kb=" << unsigned(BBI.KnownBits)
334 << " ua=" << unsigned(BBI.Unalign) << " pa=" << Log2(BBI.PostAlign)
335 << format(" size=%#x\n", BBInfo[J].Size);
336 }
337 });
338 }
339 #endif
340
341 // Align blocks where the previous block does not fall through. This may add
342 // extra NOP's but they will not be executed. It uses the PrefLoopAlignment as a
343 // measure of how much to align, and only runs at CodeGenOpt::Aggressive.
AlignBlocks(MachineFunction * MF)344 static bool AlignBlocks(MachineFunction *MF) {
345 if (MF->getTarget().getOptLevel() != CodeGenOpt::Aggressive ||
346 MF->getFunction().hasOptSize())
347 return false;
348
349 auto *TLI = MF->getSubtarget().getTargetLowering();
350 const Align Alignment = TLI->getPrefLoopAlignment();
351 if (Alignment < 4)
352 return false;
353
354 bool Changed = false;
355 bool PrevCanFallthough = true;
356 for (auto &MBB : *MF) {
357 if (!PrevCanFallthough) {
358 Changed = true;
359 MBB.setAlignment(Alignment);
360 }
361 PrevCanFallthough = MBB.canFallThrough();
362 }
363
364 return Changed;
365 }
366
runOnMachineFunction(MachineFunction & mf)367 bool ARMConstantIslands::runOnMachineFunction(MachineFunction &mf) {
368 MF = &mf;
369 MCP = mf.getConstantPool();
370 BBUtils = std::unique_ptr<ARMBasicBlockUtils>(new ARMBasicBlockUtils(mf));
371
372 LLVM_DEBUG(dbgs() << "***** ARMConstantIslands: "
373 << MCP->getConstants().size() << " CP entries, aligned to "
374 << MCP->getConstantPoolAlign().value() << " bytes *****\n");
375
376 STI = &static_cast<const ARMSubtarget &>(MF->getSubtarget());
377 TII = STI->getInstrInfo();
378 isPositionIndependentOrROPI =
379 STI->getTargetLowering()->isPositionIndependent() || STI->isROPI();
380 AFI = MF->getInfo<ARMFunctionInfo>();
381 DT = &getAnalysis<MachineDominatorTree>();
382
383 isThumb = AFI->isThumbFunction();
384 isThumb1 = AFI->isThumb1OnlyFunction();
385 isThumb2 = AFI->isThumb2Function();
386
387 bool GenerateTBB = isThumb2 || (isThumb1 && SynthesizeThumb1TBB);
388 // TBB generation code in this constant island pass has not been adapted to
389 // deal with speculation barriers.
390 if (STI->hardenSlsRetBr())
391 GenerateTBB = false;
392
393 // Renumber all of the machine basic blocks in the function, guaranteeing that
394 // the numbers agree with the position of the block in the function.
395 MF->RenumberBlocks();
396
397 // Try to reorder and otherwise adjust the block layout to make good use
398 // of the TB[BH] instructions.
399 bool MadeChange = false;
400 if (GenerateTBB && AdjustJumpTableBlocks) {
401 scanFunctionJumpTables();
402 MadeChange |= reorderThumb2JumpTables();
403 // Data is out of date, so clear it. It'll be re-computed later.
404 T2JumpTables.clear();
405 // Blocks may have shifted around. Keep the numbering up to date.
406 MF->RenumberBlocks();
407 }
408
409 // Align any non-fallthrough blocks
410 MadeChange |= AlignBlocks(MF);
411
412 // Perform the initial placement of the constant pool entries. To start with,
413 // we put them all at the end of the function.
414 std::vector<MachineInstr*> CPEMIs;
415 if (!MCP->isEmpty())
416 doInitialConstPlacement(CPEMIs);
417
418 if (MF->getJumpTableInfo())
419 doInitialJumpTablePlacement(CPEMIs);
420
421 /// The next UID to take is the first unused one.
422 AFI->initPICLabelUId(CPEMIs.size());
423
424 // Do the initial scan of the function, building up information about the
425 // sizes of each block, the location of all the water, and finding all of the
426 // constant pool users.
427 initializeFunctionInfo(CPEMIs);
428 CPEMIs.clear();
429 LLVM_DEBUG(dumpBBs());
430
431 // Functions with jump tables need an alignment of 4 because they use the ADR
432 // instruction, which aligns the PC to 4 bytes before adding an offset.
433 if (!T2JumpTables.empty())
434 MF->ensureAlignment(Align(4));
435
436 /// Remove dead constant pool entries.
437 MadeChange |= removeUnusedCPEntries();
438
439 // Iteratively place constant pool entries and fix up branches until there
440 // is no change.
441 unsigned NoCPIters = 0, NoBRIters = 0;
442 while (true) {
443 LLVM_DEBUG(dbgs() << "Beginning CP iteration #" << NoCPIters << '\n');
444 bool CPChange = false;
445 for (unsigned i = 0, e = CPUsers.size(); i != e; ++i)
446 // For most inputs, it converges in no more than 5 iterations.
447 // If it doesn't end in 10, the input may have huge BB or many CPEs.
448 // In this case, we will try different heuristics.
449 CPChange |= handleConstantPoolUser(i, NoCPIters >= CPMaxIteration / 2);
450 if (CPChange && ++NoCPIters > CPMaxIteration)
451 report_fatal_error("Constant Island pass failed to converge!");
452 LLVM_DEBUG(dumpBBs());
453
454 // Clear NewWaterList now. If we split a block for branches, it should
455 // appear as "new water" for the next iteration of constant pool placement.
456 NewWaterList.clear();
457
458 LLVM_DEBUG(dbgs() << "Beginning BR iteration #" << NoBRIters << '\n');
459 bool BRChange = false;
460 for (unsigned i = 0, e = ImmBranches.size(); i != e; ++i)
461 BRChange |= fixupImmediateBr(ImmBranches[i]);
462 if (BRChange && ++NoBRIters > 30)
463 report_fatal_error("Branch Fix Up pass failed to converge!");
464 LLVM_DEBUG(dumpBBs());
465
466 if (!CPChange && !BRChange)
467 break;
468 MadeChange = true;
469 }
470
471 // Shrink 32-bit Thumb2 load and store instructions.
472 if (isThumb2 && !STI->prefers32BitThumb())
473 MadeChange |= optimizeThumb2Instructions();
474
475 // Shrink 32-bit branch instructions.
476 if (isThumb && STI->hasV8MBaselineOps())
477 MadeChange |= optimizeThumb2Branches();
478
479 // Optimize jump tables using TBB / TBH.
480 if (GenerateTBB && !STI->genExecuteOnly())
481 MadeChange |= optimizeThumb2JumpTables();
482
483 // After a while, this might be made debug-only, but it is not expensive.
484 verify();
485
486 // Save the mapping between original and cloned constpool entries.
487 for (unsigned i = 0, e = CPEntries.size(); i != e; ++i) {
488 for (unsigned j = 0, je = CPEntries[i].size(); j != je; ++j) {
489 const CPEntry & CPE = CPEntries[i][j];
490 if (CPE.CPEMI && CPE.CPEMI->getOperand(1).isCPI())
491 AFI->recordCPEClone(i, CPE.CPI);
492 }
493 }
494
495 LLVM_DEBUG(dbgs() << '\n'; dumpBBs());
496
497 BBUtils->clear();
498 WaterList.clear();
499 CPUsers.clear();
500 CPEntries.clear();
501 JumpTableEntryIndices.clear();
502 JumpTableUserIndices.clear();
503 ImmBranches.clear();
504 PushPopMIs.clear();
505 T2JumpTables.clear();
506
507 return MadeChange;
508 }
509
510 /// Perform the initial placement of the regular constant pool entries.
511 /// To start with, we put them all at the end of the function.
512 void
doInitialConstPlacement(std::vector<MachineInstr * > & CPEMIs)513 ARMConstantIslands::doInitialConstPlacement(std::vector<MachineInstr*> &CPEMIs) {
514 // Create the basic block to hold the CPE's.
515 MachineBasicBlock *BB = MF->CreateMachineBasicBlock();
516 MF->push_back(BB);
517
518 // MachineConstantPool measures alignment in bytes.
519 const Align MaxAlign = MCP->getConstantPoolAlign();
520 const unsigned MaxLogAlign = Log2(MaxAlign);
521
522 // Mark the basic block as required by the const-pool.
523 BB->setAlignment(MaxAlign);
524
525 // The function needs to be as aligned as the basic blocks. The linker may
526 // move functions around based on their alignment.
527 // Special case: halfword literals still need word alignment on the function.
528 Align FuncAlign = MaxAlign;
529 if (MaxAlign == 2)
530 FuncAlign = Align(4);
531 MF->ensureAlignment(FuncAlign);
532
533 // Order the entries in BB by descending alignment. That ensures correct
534 // alignment of all entries as long as BB is sufficiently aligned. Keep
535 // track of the insertion point for each alignment. We are going to bucket
536 // sort the entries as they are created.
537 SmallVector<MachineBasicBlock::iterator, 8> InsPoint(MaxLogAlign + 1,
538 BB->end());
539
540 // Add all of the constants from the constant pool to the end block, use an
541 // identity mapping of CPI's to CPE's.
542 const std::vector<MachineConstantPoolEntry> &CPs = MCP->getConstants();
543
544 const DataLayout &TD = MF->getDataLayout();
545 for (unsigned i = 0, e = CPs.size(); i != e; ++i) {
546 unsigned Size = CPs[i].getSizeInBytes(TD);
547 Align Alignment = CPs[i].getAlign();
548 // Verify that all constant pool entries are a multiple of their alignment.
549 // If not, we would have to pad them out so that instructions stay aligned.
550 assert(isAligned(Alignment, Size) && "CP Entry not multiple of 4 bytes!");
551
552 // Insert CONSTPOOL_ENTRY before entries with a smaller alignment.
553 unsigned LogAlign = Log2(Alignment);
554 MachineBasicBlock::iterator InsAt = InsPoint[LogAlign];
555 MachineInstr *CPEMI =
556 BuildMI(*BB, InsAt, DebugLoc(), TII->get(ARM::CONSTPOOL_ENTRY))
557 .addImm(i).addConstantPoolIndex(i).addImm(Size);
558 CPEMIs.push_back(CPEMI);
559
560 // Ensure that future entries with higher alignment get inserted before
561 // CPEMI. This is bucket sort with iterators.
562 for (unsigned a = LogAlign + 1; a <= MaxLogAlign; ++a)
563 if (InsPoint[a] == InsAt)
564 InsPoint[a] = CPEMI;
565
566 // Add a new CPEntry, but no corresponding CPUser yet.
567 CPEntries.emplace_back(1, CPEntry(CPEMI, i));
568 ++NumCPEs;
569 LLVM_DEBUG(dbgs() << "Moved CPI#" << i << " to end of function, size = "
570 << Size << ", align = " << Alignment.value() << '\n');
571 }
572 LLVM_DEBUG(BB->dump());
573 }
574
575 /// Do initial placement of the jump tables. Because Thumb2's TBB and TBH
576 /// instructions can be made more efficient if the jump table immediately
577 /// follows the instruction, it's best to place them immediately next to their
578 /// jumps to begin with. In almost all cases they'll never be moved from that
579 /// position.
doInitialJumpTablePlacement(std::vector<MachineInstr * > & CPEMIs)580 void ARMConstantIslands::doInitialJumpTablePlacement(
581 std::vector<MachineInstr *> &CPEMIs) {
582 unsigned i = CPEntries.size();
583 auto MJTI = MF->getJumpTableInfo();
584 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
585
586 MachineBasicBlock *LastCorrectlyNumberedBB = nullptr;
587 for (MachineBasicBlock &MBB : *MF) {
588 auto MI = MBB.getLastNonDebugInstr();
589 // Look past potential SpeculationBarriers at end of BB.
590 while (MI != MBB.end() &&
591 (isSpeculationBarrierEndBBOpcode(MI->getOpcode()) ||
592 MI->isDebugInstr()))
593 --MI;
594
595 if (MI == MBB.end())
596 continue;
597
598 unsigned JTOpcode;
599 switch (MI->getOpcode()) {
600 default:
601 continue;
602 case ARM::BR_JTadd:
603 case ARM::BR_JTr:
604 case ARM::tBR_JTr:
605 case ARM::BR_JTm_i12:
606 case ARM::BR_JTm_rs:
607 JTOpcode = ARM::JUMPTABLE_ADDRS;
608 break;
609 case ARM::t2BR_JT:
610 JTOpcode = ARM::JUMPTABLE_INSTS;
611 break;
612 case ARM::tTBB_JT:
613 case ARM::t2TBB_JT:
614 JTOpcode = ARM::JUMPTABLE_TBB;
615 break;
616 case ARM::tTBH_JT:
617 case ARM::t2TBH_JT:
618 JTOpcode = ARM::JUMPTABLE_TBH;
619 break;
620 }
621
622 unsigned NumOps = MI->getDesc().getNumOperands();
623 MachineOperand JTOp =
624 MI->getOperand(NumOps - (MI->isPredicable() ? 2 : 1));
625 unsigned JTI = JTOp.getIndex();
626 unsigned Size = JT[JTI].MBBs.size() * sizeof(uint32_t);
627 MachineBasicBlock *JumpTableBB = MF->CreateMachineBasicBlock();
628 MF->insert(std::next(MachineFunction::iterator(MBB)), JumpTableBB);
629 MachineInstr *CPEMI = BuildMI(*JumpTableBB, JumpTableBB->begin(),
630 DebugLoc(), TII->get(JTOpcode))
631 .addImm(i++)
632 .addJumpTableIndex(JTI)
633 .addImm(Size);
634 CPEMIs.push_back(CPEMI);
635 CPEntries.emplace_back(1, CPEntry(CPEMI, JTI));
636 JumpTableEntryIndices.insert(std::make_pair(JTI, CPEntries.size() - 1));
637 if (!LastCorrectlyNumberedBB)
638 LastCorrectlyNumberedBB = &MBB;
639 }
640
641 // If we did anything then we need to renumber the subsequent blocks.
642 if (LastCorrectlyNumberedBB)
643 MF->RenumberBlocks(LastCorrectlyNumberedBB);
644 }
645
646 /// BBHasFallthrough - Return true if the specified basic block can fallthrough
647 /// into the block immediately after it.
BBHasFallthrough(MachineBasicBlock * MBB)648 bool ARMConstantIslands::BBHasFallthrough(MachineBasicBlock *MBB) {
649 // Get the next machine basic block in the function.
650 MachineFunction::iterator MBBI = MBB->getIterator();
651 // Can't fall off end of function.
652 if (std::next(MBBI) == MBB->getParent()->end())
653 return false;
654
655 MachineBasicBlock *NextBB = &*std::next(MBBI);
656 if (!MBB->isSuccessor(NextBB))
657 return false;
658
659 // Try to analyze the end of the block. A potential fallthrough may already
660 // have an unconditional branch for whatever reason.
661 MachineBasicBlock *TBB, *FBB;
662 SmallVector<MachineOperand, 4> Cond;
663 bool TooDifficult = TII->analyzeBranch(*MBB, TBB, FBB, Cond);
664 return TooDifficult || FBB == nullptr;
665 }
666
667 /// findConstPoolEntry - Given the constpool index and CONSTPOOL_ENTRY MI,
668 /// look up the corresponding CPEntry.
669 ARMConstantIslands::CPEntry *
findConstPoolEntry(unsigned CPI,const MachineInstr * CPEMI)670 ARMConstantIslands::findConstPoolEntry(unsigned CPI,
671 const MachineInstr *CPEMI) {
672 std::vector<CPEntry> &CPEs = CPEntries[CPI];
673 // Number of entries per constpool index should be small, just do a
674 // linear search.
675 for (unsigned i = 0, e = CPEs.size(); i != e; ++i) {
676 if (CPEs[i].CPEMI == CPEMI)
677 return &CPEs[i];
678 }
679 return nullptr;
680 }
681
682 /// getCPEAlign - Returns the required alignment of the constant pool entry
683 /// represented by CPEMI.
getCPEAlign(const MachineInstr * CPEMI)684 Align ARMConstantIslands::getCPEAlign(const MachineInstr *CPEMI) {
685 switch (CPEMI->getOpcode()) {
686 case ARM::CONSTPOOL_ENTRY:
687 break;
688 case ARM::JUMPTABLE_TBB:
689 return isThumb1 ? Align(4) : Align(1);
690 case ARM::JUMPTABLE_TBH:
691 return isThumb1 ? Align(4) : Align(2);
692 case ARM::JUMPTABLE_INSTS:
693 return Align(2);
694 case ARM::JUMPTABLE_ADDRS:
695 return Align(4);
696 default:
697 llvm_unreachable("unknown constpool entry kind");
698 }
699
700 unsigned CPI = getCombinedIndex(CPEMI);
701 assert(CPI < MCP->getConstants().size() && "Invalid constant pool index.");
702 return MCP->getConstants()[CPI].getAlign();
703 }
704
705 /// scanFunctionJumpTables - Do a scan of the function, building up
706 /// information about the sizes of each block and the locations of all
707 /// the jump tables.
scanFunctionJumpTables()708 void ARMConstantIslands::scanFunctionJumpTables() {
709 for (MachineBasicBlock &MBB : *MF) {
710 for (MachineInstr &I : MBB)
711 if (I.isBranch() &&
712 (I.getOpcode() == ARM::t2BR_JT || I.getOpcode() == ARM::tBR_JTr))
713 T2JumpTables.push_back(&I);
714 }
715 }
716
717 /// initializeFunctionInfo - Do the initial scan of the function, building up
718 /// information about the sizes of each block, the location of all the water,
719 /// and finding all of the constant pool users.
720 void ARMConstantIslands::
initializeFunctionInfo(const std::vector<MachineInstr * > & CPEMIs)721 initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs) {
722
723 BBUtils->computeAllBlockSizes();
724 BBInfoVector &BBInfo = BBUtils->getBBInfo();
725 // The known bits of the entry block offset are determined by the function
726 // alignment.
727 BBInfo.front().KnownBits = Log2(MF->getAlignment());
728
729 // Compute block offsets and known bits.
730 BBUtils->adjustBBOffsetsAfter(&MF->front());
731
732 // Now go back through the instructions and build up our data structures.
733 for (MachineBasicBlock &MBB : *MF) {
734 // If this block doesn't fall through into the next MBB, then this is
735 // 'water' that a constant pool island could be placed.
736 if (!BBHasFallthrough(&MBB))
737 WaterList.push_back(&MBB);
738
739 for (MachineInstr &I : MBB) {
740 if (I.isDebugInstr())
741 continue;
742
743 unsigned Opc = I.getOpcode();
744 if (I.isBranch()) {
745 bool isCond = false;
746 unsigned Bits = 0;
747 unsigned Scale = 1;
748 int UOpc = Opc;
749 switch (Opc) {
750 default:
751 continue; // Ignore other JT branches
752 case ARM::t2BR_JT:
753 case ARM::tBR_JTr:
754 T2JumpTables.push_back(&I);
755 continue; // Does not get an entry in ImmBranches
756 case ARM::Bcc:
757 isCond = true;
758 UOpc = ARM::B;
759 LLVM_FALLTHROUGH;
760 case ARM::B:
761 Bits = 24;
762 Scale = 4;
763 break;
764 case ARM::tBcc:
765 isCond = true;
766 UOpc = ARM::tB;
767 Bits = 8;
768 Scale = 2;
769 break;
770 case ARM::tB:
771 Bits = 11;
772 Scale = 2;
773 break;
774 case ARM::t2Bcc:
775 isCond = true;
776 UOpc = ARM::t2B;
777 Bits = 20;
778 Scale = 2;
779 break;
780 case ARM::t2B:
781 Bits = 24;
782 Scale = 2;
783 break;
784 }
785
786 // Record this immediate branch.
787 unsigned MaxOffs = ((1 << (Bits-1))-1) * Scale;
788 ImmBranches.push_back(ImmBranch(&I, MaxOffs, isCond, UOpc));
789 }
790
791 if (Opc == ARM::tPUSH || Opc == ARM::tPOP_RET)
792 PushPopMIs.push_back(&I);
793
794 if (Opc == ARM::CONSTPOOL_ENTRY || Opc == ARM::JUMPTABLE_ADDRS ||
795 Opc == ARM::JUMPTABLE_INSTS || Opc == ARM::JUMPTABLE_TBB ||
796 Opc == ARM::JUMPTABLE_TBH)
797 continue;
798
799 // Scan the instructions for constant pool operands.
800 for (unsigned op = 0, e = I.getNumOperands(); op != e; ++op)
801 if (I.getOperand(op).isCPI() || I.getOperand(op).isJTI()) {
802 // We found one. The addressing mode tells us the max displacement
803 // from the PC that this instruction permits.
804
805 // Basic size info comes from the TSFlags field.
806 unsigned Bits = 0;
807 unsigned Scale = 1;
808 bool NegOk = false;
809 bool IsSoImm = false;
810
811 switch (Opc) {
812 default:
813 llvm_unreachable("Unknown addressing mode for CP reference!");
814
815 // Taking the address of a CP entry.
816 case ARM::LEApcrel:
817 case ARM::LEApcrelJT: {
818 // This takes a SoImm, which is 8 bit immediate rotated. We'll
819 // pretend the maximum offset is 255 * 4. Since each instruction
820 // 4 byte wide, this is always correct. We'll check for other
821 // displacements that fits in a SoImm as well.
822 Bits = 8;
823 NegOk = true;
824 IsSoImm = true;
825 unsigned CPI = I.getOperand(op).getIndex();
826 assert(CPI < CPEMIs.size());
827 MachineInstr *CPEMI = CPEMIs[CPI];
828 const Align CPEAlign = getCPEAlign(CPEMI);
829 const unsigned LogCPEAlign = Log2(CPEAlign);
830 if (LogCPEAlign >= 2)
831 Scale = 4;
832 else
833 // For constants with less than 4-byte alignment,
834 // we'll pretend the maximum offset is 255 * 1.
835 Scale = 1;
836 }
837 break;
838 case ARM::t2LEApcrel:
839 case ARM::t2LEApcrelJT:
840 Bits = 12;
841 NegOk = true;
842 break;
843 case ARM::tLEApcrel:
844 case ARM::tLEApcrelJT:
845 Bits = 8;
846 Scale = 4;
847 break;
848
849 case ARM::LDRBi12:
850 case ARM::LDRi12:
851 case ARM::LDRcp:
852 case ARM::t2LDRpci:
853 case ARM::t2LDRHpci:
854 case ARM::t2LDRBpci:
855 Bits = 12; // +-offset_12
856 NegOk = true;
857 break;
858
859 case ARM::tLDRpci:
860 Bits = 8;
861 Scale = 4; // +(offset_8*4)
862 break;
863
864 case ARM::VLDRD:
865 case ARM::VLDRS:
866 Bits = 8;
867 Scale = 4; // +-(offset_8*4)
868 NegOk = true;
869 break;
870 case ARM::VLDRH:
871 Bits = 8;
872 Scale = 2; // +-(offset_8*2)
873 NegOk = true;
874 break;
875 }
876
877 // Remember that this is a user of a CP entry.
878 unsigned CPI = I.getOperand(op).getIndex();
879 if (I.getOperand(op).isJTI()) {
880 JumpTableUserIndices.insert(std::make_pair(CPI, CPUsers.size()));
881 CPI = JumpTableEntryIndices[CPI];
882 }
883
884 MachineInstr *CPEMI = CPEMIs[CPI];
885 unsigned MaxOffs = ((1 << Bits)-1) * Scale;
886 CPUsers.push_back(CPUser(&I, CPEMI, MaxOffs, NegOk, IsSoImm));
887
888 // Increment corresponding CPEntry reference count.
889 CPEntry *CPE = findConstPoolEntry(CPI, CPEMI);
890 assert(CPE && "Cannot find a corresponding CPEntry!");
891 CPE->RefCount++;
892
893 // Instructions can only use one CP entry, don't bother scanning the
894 // rest of the operands.
895 break;
896 }
897 }
898 }
899 }
900
901 /// CompareMBBNumbers - Little predicate function to sort the WaterList by MBB
902 /// ID.
CompareMBBNumbers(const MachineBasicBlock * LHS,const MachineBasicBlock * RHS)903 static bool CompareMBBNumbers(const MachineBasicBlock *LHS,
904 const MachineBasicBlock *RHS) {
905 return LHS->getNumber() < RHS->getNumber();
906 }
907
908 /// updateForInsertedWaterBlock - When a block is newly inserted into the
909 /// machine function, it upsets all of the block numbers. Renumber the blocks
910 /// and update the arrays that parallel this numbering.
updateForInsertedWaterBlock(MachineBasicBlock * NewBB)911 void ARMConstantIslands::updateForInsertedWaterBlock(MachineBasicBlock *NewBB) {
912 // Renumber the MBB's to keep them consecutive.
913 NewBB->getParent()->RenumberBlocks(NewBB);
914
915 // Insert an entry into BBInfo to align it properly with the (newly
916 // renumbered) block numbers.
917 BBUtils->insert(NewBB->getNumber(), BasicBlockInfo());
918
919 // Next, update WaterList. Specifically, we need to add NewMBB as having
920 // available water after it.
921 water_iterator IP = llvm::lower_bound(WaterList, NewBB, CompareMBBNumbers);
922 WaterList.insert(IP, NewBB);
923 }
924
925 /// Split the basic block containing MI into two blocks, which are joined by
926 /// an unconditional branch. Update data structures and renumber blocks to
927 /// account for this change and returns the newly created block.
splitBlockBeforeInstr(MachineInstr * MI)928 MachineBasicBlock *ARMConstantIslands::splitBlockBeforeInstr(MachineInstr *MI) {
929 MachineBasicBlock *OrigBB = MI->getParent();
930
931 // Collect liveness information at MI.
932 LivePhysRegs LRs(*MF->getSubtarget().getRegisterInfo());
933 LRs.addLiveOuts(*OrigBB);
934 auto LivenessEnd = ++MachineBasicBlock::iterator(MI).getReverse();
935 for (MachineInstr &LiveMI : make_range(OrigBB->rbegin(), LivenessEnd))
936 LRs.stepBackward(LiveMI);
937
938 // Create a new MBB for the code after the OrigBB.
939 MachineBasicBlock *NewBB =
940 MF->CreateMachineBasicBlock(OrigBB->getBasicBlock());
941 MachineFunction::iterator MBBI = ++OrigBB->getIterator();
942 MF->insert(MBBI, NewBB);
943
944 // Splice the instructions starting with MI over to NewBB.
945 NewBB->splice(NewBB->end(), OrigBB, MI, OrigBB->end());
946
947 // Add an unconditional branch from OrigBB to NewBB.
948 // Note the new unconditional branch is not being recorded.
949 // There doesn't seem to be meaningful DebugInfo available; this doesn't
950 // correspond to anything in the source.
951 unsigned Opc = isThumb ? (isThumb2 ? ARM::t2B : ARM::tB) : ARM::B;
952 if (!isThumb)
953 BuildMI(OrigBB, DebugLoc(), TII->get(Opc)).addMBB(NewBB);
954 else
955 BuildMI(OrigBB, DebugLoc(), TII->get(Opc))
956 .addMBB(NewBB)
957 .add(predOps(ARMCC::AL));
958 ++NumSplit;
959
960 // Update the CFG. All succs of OrigBB are now succs of NewBB.
961 NewBB->transferSuccessors(OrigBB);
962
963 // OrigBB branches to NewBB.
964 OrigBB->addSuccessor(NewBB);
965
966 // Update live-in information in the new block.
967 MachineRegisterInfo &MRI = MF->getRegInfo();
968 for (MCPhysReg L : LRs)
969 if (!MRI.isReserved(L))
970 NewBB->addLiveIn(L);
971
972 // Update internal data structures to account for the newly inserted MBB.
973 // This is almost the same as updateForInsertedWaterBlock, except that
974 // the Water goes after OrigBB, not NewBB.
975 MF->RenumberBlocks(NewBB);
976
977 // Insert an entry into BBInfo to align it properly with the (newly
978 // renumbered) block numbers.
979 BBUtils->insert(NewBB->getNumber(), BasicBlockInfo());
980
981 // Next, update WaterList. Specifically, we need to add OrigMBB as having
982 // available water after it (but not if it's already there, which happens
983 // when splitting before a conditional branch that is followed by an
984 // unconditional branch - in that case we want to insert NewBB).
985 water_iterator IP = llvm::lower_bound(WaterList, OrigBB, CompareMBBNumbers);
986 MachineBasicBlock* WaterBB = *IP;
987 if (WaterBB == OrigBB)
988 WaterList.insert(std::next(IP), NewBB);
989 else
990 WaterList.insert(IP, OrigBB);
991 NewWaterList.insert(OrigBB);
992
993 // Figure out how large the OrigBB is. As the first half of the original
994 // block, it cannot contain a tablejump. The size includes
995 // the new jump we added. (It should be possible to do this without
996 // recounting everything, but it's very confusing, and this is rarely
997 // executed.)
998 BBUtils->computeBlockSize(OrigBB);
999
1000 // Figure out how large the NewMBB is. As the second half of the original
1001 // block, it may contain a tablejump.
1002 BBUtils->computeBlockSize(NewBB);
1003
1004 // All BBOffsets following these blocks must be modified.
1005 BBUtils->adjustBBOffsetsAfter(OrigBB);
1006
1007 return NewBB;
1008 }
1009
1010 /// getUserOffset - Compute the offset of U.MI as seen by the hardware
1011 /// displacement computation. Update U.KnownAlignment to match its current
1012 /// basic block location.
getUserOffset(CPUser & U) const1013 unsigned ARMConstantIslands::getUserOffset(CPUser &U) const {
1014 unsigned UserOffset = BBUtils->getOffsetOf(U.MI);
1015
1016 SmallVectorImpl<BasicBlockInfo> &BBInfo = BBUtils->getBBInfo();
1017 const BasicBlockInfo &BBI = BBInfo[U.MI->getParent()->getNumber()];
1018 unsigned KnownBits = BBI.internalKnownBits();
1019
1020 // The value read from PC is offset from the actual instruction address.
1021 UserOffset += (isThumb ? 4 : 8);
1022
1023 // Because of inline assembly, we may not know the alignment (mod 4) of U.MI.
1024 // Make sure U.getMaxDisp() returns a constrained range.
1025 U.KnownAlignment = (KnownBits >= 2);
1026
1027 // On Thumb, offsets==2 mod 4 are rounded down by the hardware for
1028 // purposes of the displacement computation; compensate for that here.
1029 // For unknown alignments, getMaxDisp() constrains the range instead.
1030 if (isThumb && U.KnownAlignment)
1031 UserOffset &= ~3u;
1032
1033 return UserOffset;
1034 }
1035
1036 /// isOffsetInRange - Checks whether UserOffset (the location of a constant pool
1037 /// reference) is within MaxDisp of TrialOffset (a proposed location of a
1038 /// constant pool entry).
1039 /// UserOffset is computed by getUserOffset above to include PC adjustments. If
1040 /// the mod 4 alignment of UserOffset is not known, the uncertainty must be
1041 /// subtracted from MaxDisp instead. CPUser::getMaxDisp() does that.
isOffsetInRange(unsigned UserOffset,unsigned TrialOffset,unsigned MaxDisp,bool NegativeOK,bool IsSoImm)1042 bool ARMConstantIslands::isOffsetInRange(unsigned UserOffset,
1043 unsigned TrialOffset, unsigned MaxDisp,
1044 bool NegativeOK, bool IsSoImm) {
1045 if (UserOffset <= TrialOffset) {
1046 // User before the Trial.
1047 if (TrialOffset - UserOffset <= MaxDisp)
1048 return true;
1049 // FIXME: Make use full range of soimm values.
1050 } else if (NegativeOK) {
1051 if (UserOffset - TrialOffset <= MaxDisp)
1052 return true;
1053 // FIXME: Make use full range of soimm values.
1054 }
1055 return false;
1056 }
1057
1058 /// isWaterInRange - Returns true if a CPE placed after the specified
1059 /// Water (a basic block) will be in range for the specific MI.
1060 ///
1061 /// Compute how much the function will grow by inserting a CPE after Water.
isWaterInRange(unsigned UserOffset,MachineBasicBlock * Water,CPUser & U,unsigned & Growth)1062 bool ARMConstantIslands::isWaterInRange(unsigned UserOffset,
1063 MachineBasicBlock* Water, CPUser &U,
1064 unsigned &Growth) {
1065 BBInfoVector &BBInfo = BBUtils->getBBInfo();
1066 const Align CPEAlign = getCPEAlign(U.CPEMI);
1067 const unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPEAlign);
1068 unsigned NextBlockOffset;
1069 Align NextBlockAlignment;
1070 MachineFunction::const_iterator NextBlock = Water->getIterator();
1071 if (++NextBlock == MF->end()) {
1072 NextBlockOffset = BBInfo[Water->getNumber()].postOffset();
1073 } else {
1074 NextBlockOffset = BBInfo[NextBlock->getNumber()].Offset;
1075 NextBlockAlignment = NextBlock->getAlignment();
1076 }
1077 unsigned Size = U.CPEMI->getOperand(2).getImm();
1078 unsigned CPEEnd = CPEOffset + Size;
1079
1080 // The CPE may be able to hide in the alignment padding before the next
1081 // block. It may also cause more padding to be required if it is more aligned
1082 // that the next block.
1083 if (CPEEnd > NextBlockOffset) {
1084 Growth = CPEEnd - NextBlockOffset;
1085 // Compute the padding that would go at the end of the CPE to align the next
1086 // block.
1087 Growth += offsetToAlignment(CPEEnd, NextBlockAlignment);
1088
1089 // If the CPE is to be inserted before the instruction, that will raise
1090 // the offset of the instruction. Also account for unknown alignment padding
1091 // in blocks between CPE and the user.
1092 if (CPEOffset < UserOffset)
1093 UserOffset += Growth + UnknownPadding(MF->getAlignment(), Log2(CPEAlign));
1094 } else
1095 // CPE fits in existing padding.
1096 Growth = 0;
1097
1098 return isOffsetInRange(UserOffset, CPEOffset, U);
1099 }
1100
1101 /// isCPEntryInRange - Returns true if the distance between specific MI and
1102 /// specific ConstPool entry instruction can fit in MI's displacement field.
isCPEntryInRange(MachineInstr * MI,unsigned UserOffset,MachineInstr * CPEMI,unsigned MaxDisp,bool NegOk,bool DoDump)1103 bool ARMConstantIslands::isCPEntryInRange(MachineInstr *MI, unsigned UserOffset,
1104 MachineInstr *CPEMI, unsigned MaxDisp,
1105 bool NegOk, bool DoDump) {
1106 unsigned CPEOffset = BBUtils->getOffsetOf(CPEMI);
1107
1108 if (DoDump) {
1109 LLVM_DEBUG({
1110 BBInfoVector &BBInfo = BBUtils->getBBInfo();
1111 unsigned Block = MI->getParent()->getNumber();
1112 const BasicBlockInfo &BBI = BBInfo[Block];
1113 dbgs() << "User of CPE#" << CPEMI->getOperand(0).getImm()
1114 << " max delta=" << MaxDisp
1115 << format(" insn address=%#x", UserOffset) << " in "
1116 << printMBBReference(*MI->getParent()) << ": "
1117 << format("%#x-%x\t", BBI.Offset, BBI.postOffset()) << *MI
1118 << format("CPE address=%#x offset=%+d: ", CPEOffset,
1119 int(CPEOffset - UserOffset));
1120 });
1121 }
1122
1123 return isOffsetInRange(UserOffset, CPEOffset, MaxDisp, NegOk);
1124 }
1125
1126 #ifndef NDEBUG
1127 /// BBIsJumpedOver - Return true of the specified basic block's only predecessor
1128 /// unconditionally branches to its only successor.
BBIsJumpedOver(MachineBasicBlock * MBB)1129 static bool BBIsJumpedOver(MachineBasicBlock *MBB) {
1130 if (MBB->pred_size() != 1 || MBB->succ_size() != 1)
1131 return false;
1132
1133 MachineBasicBlock *Succ = *MBB->succ_begin();
1134 MachineBasicBlock *Pred = *MBB->pred_begin();
1135 MachineInstr *PredMI = &Pred->back();
1136 if (PredMI->getOpcode() == ARM::B || PredMI->getOpcode() == ARM::tB
1137 || PredMI->getOpcode() == ARM::t2B)
1138 return PredMI->getOperand(0).getMBB() == Succ;
1139 return false;
1140 }
1141 #endif // NDEBUG
1142
1143 /// decrementCPEReferenceCount - find the constant pool entry with index CPI
1144 /// and instruction CPEMI, and decrement its refcount. If the refcount
1145 /// becomes 0 remove the entry and instruction. Returns true if we removed
1146 /// the entry, false if we didn't.
decrementCPEReferenceCount(unsigned CPI,MachineInstr * CPEMI)1147 bool ARMConstantIslands::decrementCPEReferenceCount(unsigned CPI,
1148 MachineInstr *CPEMI) {
1149 // Find the old entry. Eliminate it if it is no longer used.
1150 CPEntry *CPE = findConstPoolEntry(CPI, CPEMI);
1151 assert(CPE && "Unexpected!");
1152 if (--CPE->RefCount == 0) {
1153 removeDeadCPEMI(CPEMI);
1154 CPE->CPEMI = nullptr;
1155 --NumCPEs;
1156 return true;
1157 }
1158 return false;
1159 }
1160
getCombinedIndex(const MachineInstr * CPEMI)1161 unsigned ARMConstantIslands::getCombinedIndex(const MachineInstr *CPEMI) {
1162 if (CPEMI->getOperand(1).isCPI())
1163 return CPEMI->getOperand(1).getIndex();
1164
1165 return JumpTableEntryIndices[CPEMI->getOperand(1).getIndex()];
1166 }
1167
1168 /// LookForCPEntryInRange - see if the currently referenced CPE is in range;
1169 /// if not, see if an in-range clone of the CPE is in range, and if so,
1170 /// change the data structures so the user references the clone. Returns:
1171 /// 0 = no existing entry found
1172 /// 1 = entry found, and there were no code insertions or deletions
1173 /// 2 = entry found, and there were code insertions or deletions
findInRangeCPEntry(CPUser & U,unsigned UserOffset)1174 int ARMConstantIslands::findInRangeCPEntry(CPUser& U, unsigned UserOffset) {
1175 MachineInstr *UserMI = U.MI;
1176 MachineInstr *CPEMI = U.CPEMI;
1177
1178 // Check to see if the CPE is already in-range.
1179 if (isCPEntryInRange(UserMI, UserOffset, CPEMI, U.getMaxDisp(), U.NegOk,
1180 true)) {
1181 LLVM_DEBUG(dbgs() << "In range\n");
1182 return 1;
1183 }
1184
1185 // No. Look for previously created clones of the CPE that are in range.
1186 unsigned CPI = getCombinedIndex(CPEMI);
1187 std::vector<CPEntry> &CPEs = CPEntries[CPI];
1188 for (unsigned i = 0, e = CPEs.size(); i != e; ++i) {
1189 // We already tried this one
1190 if (CPEs[i].CPEMI == CPEMI)
1191 continue;
1192 // Removing CPEs can leave empty entries, skip
1193 if (CPEs[i].CPEMI == nullptr)
1194 continue;
1195 if (isCPEntryInRange(UserMI, UserOffset, CPEs[i].CPEMI, U.getMaxDisp(),
1196 U.NegOk)) {
1197 LLVM_DEBUG(dbgs() << "Replacing CPE#" << CPI << " with CPE#"
1198 << CPEs[i].CPI << "\n");
1199 // Point the CPUser node to the replacement
1200 U.CPEMI = CPEs[i].CPEMI;
1201 // Change the CPI in the instruction operand to refer to the clone.
1202 for (unsigned j = 0, e = UserMI->getNumOperands(); j != e; ++j)
1203 if (UserMI->getOperand(j).isCPI()) {
1204 UserMI->getOperand(j).setIndex(CPEs[i].CPI);
1205 break;
1206 }
1207 // Adjust the refcount of the clone...
1208 CPEs[i].RefCount++;
1209 // ...and the original. If we didn't remove the old entry, none of the
1210 // addresses changed, so we don't need another pass.
1211 return decrementCPEReferenceCount(CPI, CPEMI) ? 2 : 1;
1212 }
1213 }
1214 return 0;
1215 }
1216
1217 /// getUnconditionalBrDisp - Returns the maximum displacement that can fit in
1218 /// the specific unconditional branch instruction.
getUnconditionalBrDisp(int Opc)1219 static inline unsigned getUnconditionalBrDisp(int Opc) {
1220 switch (Opc) {
1221 case ARM::tB:
1222 return ((1<<10)-1)*2;
1223 case ARM::t2B:
1224 return ((1<<23)-1)*2;
1225 default:
1226 break;
1227 }
1228
1229 return ((1<<23)-1)*4;
1230 }
1231
1232 /// findAvailableWater - Look for an existing entry in the WaterList in which
1233 /// we can place the CPE referenced from U so it's within range of U's MI.
1234 /// Returns true if found, false if not. If it returns true, WaterIter
1235 /// is set to the WaterList entry. For Thumb, prefer water that will not
1236 /// introduce padding to water that will. To ensure that this pass
1237 /// terminates, the CPE location for a particular CPUser is only allowed to
1238 /// move to a lower address, so search backward from the end of the list and
1239 /// prefer the first water that is in range.
findAvailableWater(CPUser & U,unsigned UserOffset,water_iterator & WaterIter,bool CloserWater)1240 bool ARMConstantIslands::findAvailableWater(CPUser &U, unsigned UserOffset,
1241 water_iterator &WaterIter,
1242 bool CloserWater) {
1243 if (WaterList.empty())
1244 return false;
1245
1246 unsigned BestGrowth = ~0u;
1247 // The nearest water without splitting the UserBB is right after it.
1248 // If the distance is still large (we have a big BB), then we need to split it
1249 // if we don't converge after certain iterations. This helps the following
1250 // situation to converge:
1251 // BB0:
1252 // Big BB
1253 // BB1:
1254 // Constant Pool
1255 // When a CP access is out of range, BB0 may be used as water. However,
1256 // inserting islands between BB0 and BB1 makes other accesses out of range.
1257 MachineBasicBlock *UserBB = U.MI->getParent();
1258 BBInfoVector &BBInfo = BBUtils->getBBInfo();
1259 const Align CPEAlign = getCPEAlign(U.CPEMI);
1260 unsigned MinNoSplitDisp = BBInfo[UserBB->getNumber()].postOffset(CPEAlign);
1261 if (CloserWater && MinNoSplitDisp > U.getMaxDisp() / 2)
1262 return false;
1263 for (water_iterator IP = std::prev(WaterList.end()), B = WaterList.begin();;
1264 --IP) {
1265 MachineBasicBlock* WaterBB = *IP;
1266 // Check if water is in range and is either at a lower address than the
1267 // current "high water mark" or a new water block that was created since
1268 // the previous iteration by inserting an unconditional branch. In the
1269 // latter case, we want to allow resetting the high water mark back to
1270 // this new water since we haven't seen it before. Inserting branches
1271 // should be relatively uncommon and when it does happen, we want to be
1272 // sure to take advantage of it for all the CPEs near that block, so that
1273 // we don't insert more branches than necessary.
1274 // When CloserWater is true, we try to find the lowest address after (or
1275 // equal to) user MI's BB no matter of padding growth.
1276 unsigned Growth;
1277 if (isWaterInRange(UserOffset, WaterBB, U, Growth) &&
1278 (WaterBB->getNumber() < U.HighWaterMark->getNumber() ||
1279 NewWaterList.count(WaterBB) || WaterBB == U.MI->getParent()) &&
1280 Growth < BestGrowth) {
1281 // This is the least amount of required padding seen so far.
1282 BestGrowth = Growth;
1283 WaterIter = IP;
1284 LLVM_DEBUG(dbgs() << "Found water after " << printMBBReference(*WaterBB)
1285 << " Growth=" << Growth << '\n');
1286
1287 if (CloserWater && WaterBB == U.MI->getParent())
1288 return true;
1289 // Keep looking unless it is perfect and we're not looking for the lowest
1290 // possible address.
1291 if (!CloserWater && BestGrowth == 0)
1292 return true;
1293 }
1294 if (IP == B)
1295 break;
1296 }
1297 return BestGrowth != ~0u;
1298 }
1299
1300 /// createNewWater - No existing WaterList entry will work for
1301 /// CPUsers[CPUserIndex], so create a place to put the CPE. The end of the
1302 /// block is used if in range, and the conditional branch munged so control
1303 /// flow is correct. Otherwise the block is split to create a hole with an
1304 /// unconditional branch around it. In either case NewMBB is set to a
1305 /// block following which the new island can be inserted (the WaterList
1306 /// is not adjusted).
createNewWater(unsigned CPUserIndex,unsigned UserOffset,MachineBasicBlock * & NewMBB)1307 void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
1308 unsigned UserOffset,
1309 MachineBasicBlock *&NewMBB) {
1310 CPUser &U = CPUsers[CPUserIndex];
1311 MachineInstr *UserMI = U.MI;
1312 MachineInstr *CPEMI = U.CPEMI;
1313 const Align CPEAlign = getCPEAlign(CPEMI);
1314 MachineBasicBlock *UserMBB = UserMI->getParent();
1315 BBInfoVector &BBInfo = BBUtils->getBBInfo();
1316 const BasicBlockInfo &UserBBI = BBInfo[UserMBB->getNumber()];
1317
1318 // If the block does not end in an unconditional branch already, and if the
1319 // end of the block is within range, make new water there. (The addition
1320 // below is for the unconditional branch we will be adding: 4 bytes on ARM +
1321 // Thumb2, 2 on Thumb1.
1322 if (BBHasFallthrough(UserMBB)) {
1323 // Size of branch to insert.
1324 unsigned Delta = isThumb1 ? 2 : 4;
1325 // Compute the offset where the CPE will begin.
1326 unsigned CPEOffset = UserBBI.postOffset(CPEAlign) + Delta;
1327
1328 if (isOffsetInRange(UserOffset, CPEOffset, U)) {
1329 LLVM_DEBUG(dbgs() << "Split at end of " << printMBBReference(*UserMBB)
1330 << format(", expected CPE offset %#x\n", CPEOffset));
1331 NewMBB = &*++UserMBB->getIterator();
1332 // Add an unconditional branch from UserMBB to fallthrough block. Record
1333 // it for branch lengthening; this new branch will not get out of range,
1334 // but if the preceding conditional branch is out of range, the targets
1335 // will be exchanged, and the altered branch may be out of range, so the
1336 // machinery has to know about it.
1337 int UncondBr = isThumb ? ((isThumb2) ? ARM::t2B : ARM::tB) : ARM::B;
1338 if (!isThumb)
1339 BuildMI(UserMBB, DebugLoc(), TII->get(UncondBr)).addMBB(NewMBB);
1340 else
1341 BuildMI(UserMBB, DebugLoc(), TII->get(UncondBr))
1342 .addMBB(NewMBB)
1343 .add(predOps(ARMCC::AL));
1344 unsigned MaxDisp = getUnconditionalBrDisp(UncondBr);
1345 ImmBranches.push_back(ImmBranch(&UserMBB->back(),
1346 MaxDisp, false, UncondBr));
1347 BBUtils->computeBlockSize(UserMBB);
1348 BBUtils->adjustBBOffsetsAfter(UserMBB);
1349 return;
1350 }
1351 }
1352
1353 // What a big block. Find a place within the block to split it. This is a
1354 // little tricky on Thumb1 since instructions are 2 bytes and constant pool
1355 // entries are 4 bytes: if instruction I references island CPE, and
1356 // instruction I+1 references CPE', it will not work well to put CPE as far
1357 // forward as possible, since then CPE' cannot immediately follow it (that
1358 // location is 2 bytes farther away from I+1 than CPE was from I) and we'd
1359 // need to create a new island. So, we make a first guess, then walk through
1360 // the instructions between the one currently being looked at and the
1361 // possible insertion point, and make sure any other instructions that
1362 // reference CPEs will be able to use the same island area; if not, we back
1363 // up the insertion point.
1364
1365 // Try to split the block so it's fully aligned. Compute the latest split
1366 // point where we can add a 4-byte branch instruction, and then align to
1367 // Align which is the largest possible alignment in the function.
1368 const Align Align = MF->getAlignment();
1369 assert(Align >= CPEAlign && "Over-aligned constant pool entry");
1370 unsigned KnownBits = UserBBI.internalKnownBits();
1371 unsigned UPad = UnknownPadding(Align, KnownBits);
1372 unsigned BaseInsertOffset = UserOffset + U.getMaxDisp() - UPad;
1373 LLVM_DEBUG(dbgs() << format("Split in middle of big block before %#x",
1374 BaseInsertOffset));
1375
1376 // The 4 in the following is for the unconditional branch we'll be inserting
1377 // (allows for long branch on Thumb1). Alignment of the island is handled
1378 // inside isOffsetInRange.
1379 BaseInsertOffset -= 4;
1380
1381 LLVM_DEBUG(dbgs() << format(", adjusted to %#x", BaseInsertOffset)
1382 << " la=" << Log2(Align) << " kb=" << KnownBits
1383 << " up=" << UPad << '\n');
1384
1385 // This could point off the end of the block if we've already got constant
1386 // pool entries following this block; only the last one is in the water list.
1387 // Back past any possible branches (allow for a conditional and a maximally
1388 // long unconditional).
1389 if (BaseInsertOffset + 8 >= UserBBI.postOffset()) {
1390 // Ensure BaseInsertOffset is larger than the offset of the instruction
1391 // following UserMI so that the loop which searches for the split point
1392 // iterates at least once.
1393 BaseInsertOffset =
1394 std::max(UserBBI.postOffset() - UPad - 8,
1395 UserOffset + TII->getInstSizeInBytes(*UserMI) + 1);
1396 // If the CP is referenced(ie, UserOffset) is in first four instructions
1397 // after IT, this recalculated BaseInsertOffset could be in the middle of
1398 // an IT block. If it is, change the BaseInsertOffset to just after the
1399 // IT block. This still make the CP Entry is in range becuase of the
1400 // following reasons.
1401 // 1. The initial BaseseInsertOffset calculated is (UserOffset +
1402 // U.getMaxDisp() - UPad).
1403 // 2. An IT block is only at most 4 instructions plus the "it" itself (18
1404 // bytes).
1405 // 3. All the relevant instructions support much larger Maximum
1406 // displacement.
1407 MachineBasicBlock::iterator I = UserMI;
1408 ++I;
1409 Register PredReg;
1410 for (unsigned Offset = UserOffset + TII->getInstSizeInBytes(*UserMI);
1411 I->getOpcode() != ARM::t2IT &&
1412 getITInstrPredicate(*I, PredReg) != ARMCC::AL;
1413 Offset += TII->getInstSizeInBytes(*I), I = std::next(I)) {
1414 BaseInsertOffset =
1415 std::max(BaseInsertOffset, Offset + TII->getInstSizeInBytes(*I) + 1);
1416 assert(I != UserMBB->end() && "Fell off end of block");
1417 }
1418 LLVM_DEBUG(dbgs() << format("Move inside block: %#x\n", BaseInsertOffset));
1419 }
1420 unsigned EndInsertOffset = BaseInsertOffset + 4 + UPad +
1421 CPEMI->getOperand(2).getImm();
1422 MachineBasicBlock::iterator MI = UserMI;
1423 ++MI;
1424 unsigned CPUIndex = CPUserIndex+1;
1425 unsigned NumCPUsers = CPUsers.size();
1426 MachineInstr *LastIT = nullptr;
1427 for (unsigned Offset = UserOffset + TII->getInstSizeInBytes(*UserMI);
1428 Offset < BaseInsertOffset;
1429 Offset += TII->getInstSizeInBytes(*MI), MI = std::next(MI)) {
1430 assert(MI != UserMBB->end() && "Fell off end of block");
1431 if (CPUIndex < NumCPUsers && CPUsers[CPUIndex].MI == &*MI) {
1432 CPUser &U = CPUsers[CPUIndex];
1433 if (!isOffsetInRange(Offset, EndInsertOffset, U)) {
1434 // Shift intertion point by one unit of alignment so it is within reach.
1435 BaseInsertOffset -= Align.value();
1436 EndInsertOffset -= Align.value();
1437 }
1438 // This is overly conservative, as we don't account for CPEMIs being
1439 // reused within the block, but it doesn't matter much. Also assume CPEs
1440 // are added in order with alignment padding. We may eventually be able
1441 // to pack the aligned CPEs better.
1442 EndInsertOffset += U.CPEMI->getOperand(2).getImm();
1443 CPUIndex++;
1444 }
1445
1446 // Remember the last IT instruction.
1447 if (MI->getOpcode() == ARM::t2IT)
1448 LastIT = &*MI;
1449 }
1450
1451 --MI;
1452
1453 // Avoid splitting an IT block.
1454 if (LastIT) {
1455 Register PredReg;
1456 ARMCC::CondCodes CC = getITInstrPredicate(*MI, PredReg);
1457 if (CC != ARMCC::AL)
1458 MI = LastIT;
1459 }
1460
1461 // Avoid splitting a MOVW+MOVT pair with a relocation on Windows.
1462 // On Windows, this instruction pair is covered by one single
1463 // IMAGE_REL_ARM_MOV32T relocation which covers both instructions. If a
1464 // constant island is injected inbetween them, the relocation will clobber
1465 // the instruction and fail to update the MOVT instruction.
1466 // (These instructions are bundled up until right before the ConstantIslands
1467 // pass.)
1468 if (STI->isTargetWindows() && isThumb && MI->getOpcode() == ARM::t2MOVTi16 &&
1469 (MI->getOperand(2).getTargetFlags() & ARMII::MO_OPTION_MASK) ==
1470 ARMII::MO_HI16) {
1471 --MI;
1472 assert(MI->getOpcode() == ARM::t2MOVi16 &&
1473 (MI->getOperand(1).getTargetFlags() & ARMII::MO_OPTION_MASK) ==
1474 ARMII::MO_LO16);
1475 }
1476
1477 // We really must not split an IT block.
1478 #ifndef NDEBUG
1479 Register PredReg;
1480 assert(!isThumb || getITInstrPredicate(*MI, PredReg) == ARMCC::AL);
1481 #endif
1482 NewMBB = splitBlockBeforeInstr(&*MI);
1483 }
1484
1485 /// handleConstantPoolUser - Analyze the specified user, checking to see if it
1486 /// is out-of-range. If so, pick up the constant pool value and move it some
1487 /// place in-range. Return true if we changed any addresses (thus must run
1488 /// another pass of branch lengthening), false otherwise.
handleConstantPoolUser(unsigned CPUserIndex,bool CloserWater)1489 bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex,
1490 bool CloserWater) {
1491 CPUser &U = CPUsers[CPUserIndex];
1492 MachineInstr *UserMI = U.MI;
1493 MachineInstr *CPEMI = U.CPEMI;
1494 unsigned CPI = getCombinedIndex(CPEMI);
1495 unsigned Size = CPEMI->getOperand(2).getImm();
1496 // Compute this only once, it's expensive.
1497 unsigned UserOffset = getUserOffset(U);
1498
1499 // See if the current entry is within range, or there is a clone of it
1500 // in range.
1501 int result = findInRangeCPEntry(U, UserOffset);
1502 if (result==1) return false;
1503 else if (result==2) return true;
1504
1505 // No existing clone of this CPE is within range.
1506 // We will be generating a new clone. Get a UID for it.
1507 unsigned ID = AFI->createPICLabelUId();
1508
1509 // Look for water where we can place this CPE.
1510 MachineBasicBlock *NewIsland = MF->CreateMachineBasicBlock();
1511 MachineBasicBlock *NewMBB;
1512 water_iterator IP;
1513 if (findAvailableWater(U, UserOffset, IP, CloserWater)) {
1514 LLVM_DEBUG(dbgs() << "Found water in range\n");
1515 MachineBasicBlock *WaterBB = *IP;
1516
1517 // If the original WaterList entry was "new water" on this iteration,
1518 // propagate that to the new island. This is just keeping NewWaterList
1519 // updated to match the WaterList, which will be updated below.
1520 if (NewWaterList.erase(WaterBB))
1521 NewWaterList.insert(NewIsland);
1522
1523 // The new CPE goes before the following block (NewMBB).
1524 NewMBB = &*++WaterBB->getIterator();
1525 } else {
1526 // No water found.
1527 LLVM_DEBUG(dbgs() << "No water found\n");
1528 createNewWater(CPUserIndex, UserOffset, NewMBB);
1529
1530 // splitBlockBeforeInstr adds to WaterList, which is important when it is
1531 // called while handling branches so that the water will be seen on the
1532 // next iteration for constant pools, but in this context, we don't want
1533 // it. Check for this so it will be removed from the WaterList.
1534 // Also remove any entry from NewWaterList.
1535 MachineBasicBlock *WaterBB = &*--NewMBB->getIterator();
1536 IP = find(WaterList, WaterBB);
1537 if (IP != WaterList.end())
1538 NewWaterList.erase(WaterBB);
1539
1540 // We are adding new water. Update NewWaterList.
1541 NewWaterList.insert(NewIsland);
1542 }
1543 // Always align the new block because CP entries can be smaller than 4
1544 // bytes. Be careful not to decrease the existing alignment, e.g. NewMBB may
1545 // be an already aligned constant pool block.
1546 const Align Alignment = isThumb ? Align(2) : Align(4);
1547 if (NewMBB->getAlignment() < Alignment)
1548 NewMBB->setAlignment(Alignment);
1549
1550 // Remove the original WaterList entry; we want subsequent insertions in
1551 // this vicinity to go after the one we're about to insert. This
1552 // considerably reduces the number of times we have to move the same CPE
1553 // more than once and is also important to ensure the algorithm terminates.
1554 if (IP != WaterList.end())
1555 WaterList.erase(IP);
1556
1557 // Okay, we know we can put an island before NewMBB now, do it!
1558 MF->insert(NewMBB->getIterator(), NewIsland);
1559
1560 // Update internal data structures to account for the newly inserted MBB.
1561 updateForInsertedWaterBlock(NewIsland);
1562
1563 // Now that we have an island to add the CPE to, clone the original CPE and
1564 // add it to the island.
1565 U.HighWaterMark = NewIsland;
1566 U.CPEMI = BuildMI(NewIsland, DebugLoc(), CPEMI->getDesc())
1567 .addImm(ID)
1568 .add(CPEMI->getOperand(1))
1569 .addImm(Size);
1570 CPEntries[CPI].push_back(CPEntry(U.CPEMI, ID, 1));
1571 ++NumCPEs;
1572
1573 // Decrement the old entry, and remove it if refcount becomes 0.
1574 decrementCPEReferenceCount(CPI, CPEMI);
1575
1576 // Mark the basic block as aligned as required by the const-pool entry.
1577 NewIsland->setAlignment(getCPEAlign(U.CPEMI));
1578
1579 // Increase the size of the island block to account for the new entry.
1580 BBUtils->adjustBBSize(NewIsland, Size);
1581 BBUtils->adjustBBOffsetsAfter(&*--NewIsland->getIterator());
1582
1583 // Finally, change the CPI in the instruction operand to be ID.
1584 for (unsigned i = 0, e = UserMI->getNumOperands(); i != e; ++i)
1585 if (UserMI->getOperand(i).isCPI()) {
1586 UserMI->getOperand(i).setIndex(ID);
1587 break;
1588 }
1589
1590 LLVM_DEBUG(
1591 dbgs() << " Moved CPE to #" << ID << " CPI=" << CPI
1592 << format(" offset=%#x\n",
1593 BBUtils->getBBInfo()[NewIsland->getNumber()].Offset));
1594
1595 return true;
1596 }
1597
1598 /// removeDeadCPEMI - Remove a dead constant pool entry instruction. Update
1599 /// sizes and offsets of impacted basic blocks.
removeDeadCPEMI(MachineInstr * CPEMI)1600 void ARMConstantIslands::removeDeadCPEMI(MachineInstr *CPEMI) {
1601 MachineBasicBlock *CPEBB = CPEMI->getParent();
1602 unsigned Size = CPEMI->getOperand(2).getImm();
1603 CPEMI->eraseFromParent();
1604 BBInfoVector &BBInfo = BBUtils->getBBInfo();
1605 BBUtils->adjustBBSize(CPEBB, -Size);
1606 // All succeeding offsets have the current size value added in, fix this.
1607 if (CPEBB->empty()) {
1608 BBInfo[CPEBB->getNumber()].Size = 0;
1609
1610 // This block no longer needs to be aligned.
1611 CPEBB->setAlignment(Align(1));
1612 } else {
1613 // Entries are sorted by descending alignment, so realign from the front.
1614 CPEBB->setAlignment(getCPEAlign(&*CPEBB->begin()));
1615 }
1616
1617 BBUtils->adjustBBOffsetsAfter(CPEBB);
1618 // An island has only one predecessor BB and one successor BB. Check if
1619 // this BB's predecessor jumps directly to this BB's successor. This
1620 // shouldn't happen currently.
1621 assert(!BBIsJumpedOver(CPEBB) && "How did this happen?");
1622 // FIXME: remove the empty blocks after all the work is done?
1623 }
1624
1625 /// removeUnusedCPEntries - Remove constant pool entries whose refcounts
1626 /// are zero.
removeUnusedCPEntries()1627 bool ARMConstantIslands::removeUnusedCPEntries() {
1628 unsigned MadeChange = false;
1629 for (unsigned i = 0, e = CPEntries.size(); i != e; ++i) {
1630 std::vector<CPEntry> &CPEs = CPEntries[i];
1631 for (unsigned j = 0, ee = CPEs.size(); j != ee; ++j) {
1632 if (CPEs[j].RefCount == 0 && CPEs[j].CPEMI) {
1633 removeDeadCPEMI(CPEs[j].CPEMI);
1634 CPEs[j].CPEMI = nullptr;
1635 MadeChange = true;
1636 }
1637 }
1638 }
1639 return MadeChange;
1640 }
1641
1642
1643 /// fixupImmediateBr - Fix up an immediate branch whose destination is too far
1644 /// away to fit in its displacement field.
fixupImmediateBr(ImmBranch & Br)1645 bool ARMConstantIslands::fixupImmediateBr(ImmBranch &Br) {
1646 MachineInstr *MI = Br.MI;
1647 MachineBasicBlock *DestBB = MI->getOperand(0).getMBB();
1648
1649 // Check to see if the DestBB is already in-range.
1650 if (BBUtils->isBBInRange(MI, DestBB, Br.MaxDisp))
1651 return false;
1652
1653 if (!Br.isCond)
1654 return fixupUnconditionalBr(Br);
1655 return fixupConditionalBr(Br);
1656 }
1657
1658 /// fixupUnconditionalBr - Fix up an unconditional branch whose destination is
1659 /// too far away to fit in its displacement field. If the LR register has been
1660 /// spilled in the epilogue, then we can use BL to implement a far jump.
1661 /// Otherwise, add an intermediate branch instruction to a branch.
1662 bool
fixupUnconditionalBr(ImmBranch & Br)1663 ARMConstantIslands::fixupUnconditionalBr(ImmBranch &Br) {
1664 MachineInstr *MI = Br.MI;
1665 MachineBasicBlock *MBB = MI->getParent();
1666 if (!isThumb1)
1667 llvm_unreachable("fixupUnconditionalBr is Thumb1 only!");
1668
1669 if (!AFI->isLRSpilled())
1670 report_fatal_error("underestimated function size");
1671
1672 // Use BL to implement far jump.
1673 Br.MaxDisp = (1 << 21) * 2;
1674 MI->setDesc(TII->get(ARM::tBfar));
1675 BBInfoVector &BBInfo = BBUtils->getBBInfo();
1676 BBInfo[MBB->getNumber()].Size += 2;
1677 BBUtils->adjustBBOffsetsAfter(MBB);
1678 ++NumUBrFixed;
1679
1680 LLVM_DEBUG(dbgs() << " Changed B to long jump " << *MI);
1681
1682 return true;
1683 }
1684
1685 /// fixupConditionalBr - Fix up a conditional branch whose destination is too
1686 /// far away to fit in its displacement field. It is converted to an inverse
1687 /// conditional branch + an unconditional branch to the destination.
1688 bool
fixupConditionalBr(ImmBranch & Br)1689 ARMConstantIslands::fixupConditionalBr(ImmBranch &Br) {
1690 MachineInstr *MI = Br.MI;
1691 MachineBasicBlock *DestBB = MI->getOperand(0).getMBB();
1692
1693 // Add an unconditional branch to the destination and invert the branch
1694 // condition to jump over it:
1695 // blt L1
1696 // =>
1697 // bge L2
1698 // b L1
1699 // L2:
1700 ARMCC::CondCodes CC = (ARMCC::CondCodes)MI->getOperand(1).getImm();
1701 CC = ARMCC::getOppositeCondition(CC);
1702 Register CCReg = MI->getOperand(2).getReg();
1703
1704 // If the branch is at the end of its MBB and that has a fall-through block,
1705 // direct the updated conditional branch to the fall-through block. Otherwise,
1706 // split the MBB before the next instruction.
1707 MachineBasicBlock *MBB = MI->getParent();
1708 MachineInstr *BMI = &MBB->back();
1709 bool NeedSplit = (BMI != MI) || !BBHasFallthrough(MBB);
1710
1711 ++NumCBrFixed;
1712 if (BMI != MI) {
1713 if (std::next(MachineBasicBlock::iterator(MI)) == std::prev(MBB->end()) &&
1714 BMI->getOpcode() == Br.UncondBr) {
1715 // Last MI in the BB is an unconditional branch. Can we simply invert the
1716 // condition and swap destinations:
1717 // beq L1
1718 // b L2
1719 // =>
1720 // bne L2
1721 // b L1
1722 MachineBasicBlock *NewDest = BMI->getOperand(0).getMBB();
1723 if (BBUtils->isBBInRange(MI, NewDest, Br.MaxDisp)) {
1724 LLVM_DEBUG(
1725 dbgs() << " Invert Bcc condition and swap its destination with "
1726 << *BMI);
1727 BMI->getOperand(0).setMBB(DestBB);
1728 MI->getOperand(0).setMBB(NewDest);
1729 MI->getOperand(1).setImm(CC);
1730 return true;
1731 }
1732 }
1733 }
1734
1735 if (NeedSplit) {
1736 splitBlockBeforeInstr(MI);
1737 // No need for the branch to the next block. We're adding an unconditional
1738 // branch to the destination.
1739 int delta = TII->getInstSizeInBytes(MBB->back());
1740 BBUtils->adjustBBSize(MBB, -delta);
1741 MBB->back().eraseFromParent();
1742
1743 // The conditional successor will be swapped between the BBs after this, so
1744 // update CFG.
1745 MBB->addSuccessor(DestBB);
1746 std::next(MBB->getIterator())->removeSuccessor(DestBB);
1747
1748 // BBInfo[SplitBB].Offset is wrong temporarily, fixed below
1749 }
1750 MachineBasicBlock *NextBB = &*++MBB->getIterator();
1751
1752 LLVM_DEBUG(dbgs() << " Insert B to " << printMBBReference(*DestBB)
1753 << " also invert condition and change dest. to "
1754 << printMBBReference(*NextBB) << "\n");
1755
1756 // Insert a new conditional branch and a new unconditional branch.
1757 // Also update the ImmBranch as well as adding a new entry for the new branch.
1758 BuildMI(MBB, DebugLoc(), TII->get(MI->getOpcode()))
1759 .addMBB(NextBB).addImm(CC).addReg(CCReg);
1760 Br.MI = &MBB->back();
1761 BBUtils->adjustBBSize(MBB, TII->getInstSizeInBytes(MBB->back()));
1762 if (isThumb)
1763 BuildMI(MBB, DebugLoc(), TII->get(Br.UncondBr))
1764 .addMBB(DestBB)
1765 .add(predOps(ARMCC::AL));
1766 else
1767 BuildMI(MBB, DebugLoc(), TII->get(Br.UncondBr)).addMBB(DestBB);
1768 BBUtils->adjustBBSize(MBB, TII->getInstSizeInBytes(MBB->back()));
1769 unsigned MaxDisp = getUnconditionalBrDisp(Br.UncondBr);
1770 ImmBranches.push_back(ImmBranch(&MBB->back(), MaxDisp, false, Br.UncondBr));
1771
1772 // Remove the old conditional branch. It may or may not still be in MBB.
1773 BBUtils->adjustBBSize(MI->getParent(), -TII->getInstSizeInBytes(*MI));
1774 MI->eraseFromParent();
1775 BBUtils->adjustBBOffsetsAfter(MBB);
1776 return true;
1777 }
1778
optimizeThumb2Instructions()1779 bool ARMConstantIslands::optimizeThumb2Instructions() {
1780 bool MadeChange = false;
1781
1782 // Shrink ADR and LDR from constantpool.
1783 for (unsigned i = 0, e = CPUsers.size(); i != e; ++i) {
1784 CPUser &U = CPUsers[i];
1785 unsigned Opcode = U.MI->getOpcode();
1786 unsigned NewOpc = 0;
1787 unsigned Scale = 1;
1788 unsigned Bits = 0;
1789 switch (Opcode) {
1790 default: break;
1791 case ARM::t2LEApcrel:
1792 if (isARMLowRegister(U.MI->getOperand(0).getReg())) {
1793 NewOpc = ARM::tLEApcrel;
1794 Bits = 8;
1795 Scale = 4;
1796 }
1797 break;
1798 case ARM::t2LDRpci:
1799 if (isARMLowRegister(U.MI->getOperand(0).getReg())) {
1800 NewOpc = ARM::tLDRpci;
1801 Bits = 8;
1802 Scale = 4;
1803 }
1804 break;
1805 }
1806
1807 if (!NewOpc)
1808 continue;
1809
1810 unsigned UserOffset = getUserOffset(U);
1811 unsigned MaxOffs = ((1 << Bits) - 1) * Scale;
1812
1813 // Be conservative with inline asm.
1814 if (!U.KnownAlignment)
1815 MaxOffs -= 2;
1816
1817 // FIXME: Check if offset is multiple of scale if scale is not 4.
1818 if (isCPEntryInRange(U.MI, UserOffset, U.CPEMI, MaxOffs, false, true)) {
1819 LLVM_DEBUG(dbgs() << "Shrink: " << *U.MI);
1820 U.MI->setDesc(TII->get(NewOpc));
1821 MachineBasicBlock *MBB = U.MI->getParent();
1822 BBUtils->adjustBBSize(MBB, -2);
1823 BBUtils->adjustBBOffsetsAfter(MBB);
1824 ++NumT2CPShrunk;
1825 MadeChange = true;
1826 }
1827 }
1828
1829 return MadeChange;
1830 }
1831
1832
optimizeThumb2Branches()1833 bool ARMConstantIslands::optimizeThumb2Branches() {
1834
1835 auto TryShrinkBranch = [this](ImmBranch &Br) {
1836 unsigned Opcode = Br.MI->getOpcode();
1837 unsigned NewOpc = 0;
1838 unsigned Scale = 1;
1839 unsigned Bits = 0;
1840 switch (Opcode) {
1841 default: break;
1842 case ARM::t2B:
1843 NewOpc = ARM::tB;
1844 Bits = 11;
1845 Scale = 2;
1846 break;
1847 case ARM::t2Bcc:
1848 NewOpc = ARM::tBcc;
1849 Bits = 8;
1850 Scale = 2;
1851 break;
1852 }
1853 if (NewOpc) {
1854 unsigned MaxOffs = ((1 << (Bits-1))-1) * Scale;
1855 MachineBasicBlock *DestBB = Br.MI->getOperand(0).getMBB();
1856 if (BBUtils->isBBInRange(Br.MI, DestBB, MaxOffs)) {
1857 LLVM_DEBUG(dbgs() << "Shrink branch: " << *Br.MI);
1858 Br.MI->setDesc(TII->get(NewOpc));
1859 MachineBasicBlock *MBB = Br.MI->getParent();
1860 BBUtils->adjustBBSize(MBB, -2);
1861 BBUtils->adjustBBOffsetsAfter(MBB);
1862 ++NumT2BrShrunk;
1863 return true;
1864 }
1865 }
1866 return false;
1867 };
1868
1869 struct ImmCompare {
1870 MachineInstr* MI = nullptr;
1871 unsigned NewOpc = 0;
1872 };
1873
1874 auto FindCmpForCBZ = [this](ImmBranch &Br, ImmCompare &ImmCmp,
1875 MachineBasicBlock *DestBB) {
1876 ImmCmp.MI = nullptr;
1877 ImmCmp.NewOpc = 0;
1878
1879 // If the conditional branch doesn't kill CPSR, then CPSR can be liveout
1880 // so this transformation is not safe.
1881 if (!Br.MI->killsRegister(ARM::CPSR))
1882 return false;
1883
1884 Register PredReg;
1885 unsigned NewOpc = 0;
1886 ARMCC::CondCodes Pred = getInstrPredicate(*Br.MI, PredReg);
1887 if (Pred == ARMCC::EQ)
1888 NewOpc = ARM::tCBZ;
1889 else if (Pred == ARMCC::NE)
1890 NewOpc = ARM::tCBNZ;
1891 else
1892 return false;
1893
1894 // Check if the distance is within 126. Subtract starting offset by 2
1895 // because the cmp will be eliminated.
1896 unsigned BrOffset = BBUtils->getOffsetOf(Br.MI) + 4 - 2;
1897 BBInfoVector &BBInfo = BBUtils->getBBInfo();
1898 unsigned DestOffset = BBInfo[DestBB->getNumber()].Offset;
1899 if (BrOffset >= DestOffset || (DestOffset - BrOffset) > 126)
1900 return false;
1901
1902 // Search backwards to find a tCMPi8
1903 auto *TRI = STI->getRegisterInfo();
1904 MachineInstr *CmpMI = findCMPToFoldIntoCBZ(Br.MI, TRI);
1905 if (!CmpMI || CmpMI->getOpcode() != ARM::tCMPi8)
1906 return false;
1907
1908 ImmCmp.MI = CmpMI;
1909 ImmCmp.NewOpc = NewOpc;
1910 return true;
1911 };
1912
1913 auto TryConvertToLE = [this](ImmBranch &Br, ImmCompare &Cmp) {
1914 if (Br.MI->getOpcode() != ARM::t2Bcc || !STI->hasLOB() ||
1915 STI->hasMinSize())
1916 return false;
1917
1918 MachineBasicBlock *MBB = Br.MI->getParent();
1919 MachineBasicBlock *DestBB = Br.MI->getOperand(0).getMBB();
1920 if (BBUtils->getOffsetOf(MBB) < BBUtils->getOffsetOf(DestBB) ||
1921 !BBUtils->isBBInRange(Br.MI, DestBB, 4094))
1922 return false;
1923
1924 if (!DT->dominates(DestBB, MBB))
1925 return false;
1926
1927 // We queried for the CBN?Z opcode based upon the 'ExitBB', the opposite
1928 // target of Br. So now we need to reverse the condition.
1929 Cmp.NewOpc = Cmp.NewOpc == ARM::tCBZ ? ARM::tCBNZ : ARM::tCBZ;
1930
1931 MachineInstrBuilder MIB = BuildMI(*MBB, Br.MI, Br.MI->getDebugLoc(),
1932 TII->get(ARM::t2LE));
1933 // Swapped a t2Bcc for a t2LE, so no need to update the size of the block.
1934 MIB.add(Br.MI->getOperand(0));
1935 Br.MI->eraseFromParent();
1936 Br.MI = MIB;
1937 ++NumLEInserted;
1938 return true;
1939 };
1940
1941 bool MadeChange = false;
1942
1943 // The order in which branches appear in ImmBranches is approximately their
1944 // order within the function body. By visiting later branches first, we reduce
1945 // the distance between earlier forward branches and their targets, making it
1946 // more likely that the cbn?z optimization, which can only apply to forward
1947 // branches, will succeed.
1948 for (ImmBranch &Br : reverse(ImmBranches)) {
1949 MachineBasicBlock *DestBB = Br.MI->getOperand(0).getMBB();
1950 MachineBasicBlock *MBB = Br.MI->getParent();
1951 MachineBasicBlock *ExitBB = &MBB->back() == Br.MI ?
1952 MBB->getFallThrough() :
1953 MBB->back().getOperand(0).getMBB();
1954
1955 ImmCompare Cmp;
1956 if (FindCmpForCBZ(Br, Cmp, ExitBB) && TryConvertToLE(Br, Cmp)) {
1957 DestBB = ExitBB;
1958 MadeChange = true;
1959 } else {
1960 FindCmpForCBZ(Br, Cmp, DestBB);
1961 MadeChange |= TryShrinkBranch(Br);
1962 }
1963
1964 unsigned Opcode = Br.MI->getOpcode();
1965 if ((Opcode != ARM::tBcc && Opcode != ARM::t2LE) || !Cmp.NewOpc)
1966 continue;
1967
1968 Register Reg = Cmp.MI->getOperand(0).getReg();
1969
1970 // Check for Kill flags on Reg. If they are present remove them and set kill
1971 // on the new CBZ.
1972 auto *TRI = STI->getRegisterInfo();
1973 MachineBasicBlock::iterator KillMI = Br.MI;
1974 bool RegKilled = false;
1975 do {
1976 --KillMI;
1977 if (KillMI->killsRegister(Reg, TRI)) {
1978 KillMI->clearRegisterKills(Reg, TRI);
1979 RegKilled = true;
1980 break;
1981 }
1982 } while (KillMI != Cmp.MI);
1983
1984 // Create the new CBZ/CBNZ
1985 LLVM_DEBUG(dbgs() << "Fold: " << *Cmp.MI << " and: " << *Br.MI);
1986 MachineInstr *NewBR =
1987 BuildMI(*MBB, Br.MI, Br.MI->getDebugLoc(), TII->get(Cmp.NewOpc))
1988 .addReg(Reg, getKillRegState(RegKilled))
1989 .addMBB(DestBB, Br.MI->getOperand(0).getTargetFlags());
1990
1991 Cmp.MI->eraseFromParent();
1992
1993 if (Br.MI->getOpcode() == ARM::tBcc) {
1994 Br.MI->eraseFromParent();
1995 Br.MI = NewBR;
1996 BBUtils->adjustBBSize(MBB, -2);
1997 } else if (MBB->back().getOpcode() != ARM::t2LE) {
1998 // An LE has been generated, but it's not the terminator - that is an
1999 // unconditional branch. However, the logic has now been reversed with the
2000 // CBN?Z being the conditional branch and the LE being the unconditional
2001 // branch. So this means we can remove the redundant unconditional branch
2002 // at the end of the block.
2003 MachineInstr *LastMI = &MBB->back();
2004 BBUtils->adjustBBSize(MBB, -LastMI->getDesc().getSize());
2005 LastMI->eraseFromParent();
2006 }
2007 BBUtils->adjustBBOffsetsAfter(MBB);
2008 ++NumCBZ;
2009 MadeChange = true;
2010 }
2011
2012 return MadeChange;
2013 }
2014
isSimpleIndexCalc(MachineInstr & I,unsigned EntryReg,unsigned BaseReg)2015 static bool isSimpleIndexCalc(MachineInstr &I, unsigned EntryReg,
2016 unsigned BaseReg) {
2017 if (I.getOpcode() != ARM::t2ADDrs)
2018 return false;
2019
2020 if (I.getOperand(0).getReg() != EntryReg)
2021 return false;
2022
2023 if (I.getOperand(1).getReg() != BaseReg)
2024 return false;
2025
2026 // FIXME: what about CC and IdxReg?
2027 return true;
2028 }
2029
2030 /// While trying to form a TBB/TBH instruction, we may (if the table
2031 /// doesn't immediately follow the BR_JT) need access to the start of the
2032 /// jump-table. We know one instruction that produces such a register; this
2033 /// function works out whether that definition can be preserved to the BR_JT,
2034 /// possibly by removing an intervening addition (which is usually needed to
2035 /// calculate the actual entry to jump to).
preserveBaseRegister(MachineInstr * JumpMI,MachineInstr * LEAMI,unsigned & DeadSize,bool & CanDeleteLEA,bool & BaseRegKill)2036 bool ARMConstantIslands::preserveBaseRegister(MachineInstr *JumpMI,
2037 MachineInstr *LEAMI,
2038 unsigned &DeadSize,
2039 bool &CanDeleteLEA,
2040 bool &BaseRegKill) {
2041 if (JumpMI->getParent() != LEAMI->getParent())
2042 return false;
2043
2044 // Now we hope that we have at least these instructions in the basic block:
2045 // BaseReg = t2LEA ...
2046 // [...]
2047 // EntryReg = t2ADDrs BaseReg, ...
2048 // [...]
2049 // t2BR_JT EntryReg
2050 //
2051 // We have to be very conservative about what we recognise here though. The
2052 // main perturbing factors to watch out for are:
2053 // + Spills at any point in the chain: not direct problems but we would
2054 // expect a blocking Def of the spilled register so in practice what we
2055 // can do is limited.
2056 // + EntryReg == BaseReg: this is the one situation we should allow a Def
2057 // of BaseReg, but only if the t2ADDrs can be removed.
2058 // + Some instruction other than t2ADDrs computing the entry. Not seen in
2059 // the wild, but we should be careful.
2060 Register EntryReg = JumpMI->getOperand(0).getReg();
2061 Register BaseReg = LEAMI->getOperand(0).getReg();
2062
2063 CanDeleteLEA = true;
2064 BaseRegKill = false;
2065 MachineInstr *RemovableAdd = nullptr;
2066 MachineBasicBlock::iterator I(LEAMI);
2067 for (++I; &*I != JumpMI; ++I) {
2068 if (isSimpleIndexCalc(*I, EntryReg, BaseReg)) {
2069 RemovableAdd = &*I;
2070 break;
2071 }
2072
2073 for (unsigned K = 0, E = I->getNumOperands(); K != E; ++K) {
2074 const MachineOperand &MO = I->getOperand(K);
2075 if (!MO.isReg() || !MO.getReg())
2076 continue;
2077 if (MO.isDef() && MO.getReg() == BaseReg)
2078 return false;
2079 if (MO.isUse() && MO.getReg() == BaseReg) {
2080 BaseRegKill = BaseRegKill || MO.isKill();
2081 CanDeleteLEA = false;
2082 }
2083 }
2084 }
2085
2086 if (!RemovableAdd)
2087 return true;
2088
2089 // Check the add really is removable, and that nothing else in the block
2090 // clobbers BaseReg.
2091 for (++I; &*I != JumpMI; ++I) {
2092 for (unsigned K = 0, E = I->getNumOperands(); K != E; ++K) {
2093 const MachineOperand &MO = I->getOperand(K);
2094 if (!MO.isReg() || !MO.getReg())
2095 continue;
2096 if (MO.isDef() && MO.getReg() == BaseReg)
2097 return false;
2098 if (MO.isUse() && MO.getReg() == EntryReg)
2099 RemovableAdd = nullptr;
2100 }
2101 }
2102
2103 if (RemovableAdd) {
2104 RemovableAdd->eraseFromParent();
2105 DeadSize += isThumb2 ? 4 : 2;
2106 } else if (BaseReg == EntryReg) {
2107 // The add wasn't removable, but clobbered the base for the TBB. So we can't
2108 // preserve it.
2109 return false;
2110 }
2111
2112 // We reached the end of the block without seeing another definition of
2113 // BaseReg (except, possibly the t2ADDrs, which was removed). BaseReg can be
2114 // used in the TBB/TBH if necessary.
2115 return true;
2116 }
2117
2118 /// Returns whether CPEMI is the first instruction in the block
2119 /// immediately following JTMI (assumed to be a TBB or TBH terminator). If so,
2120 /// we can switch the first register to PC and usually remove the address
2121 /// calculation that preceded it.
jumpTableFollowsTB(MachineInstr * JTMI,MachineInstr * CPEMI)2122 static bool jumpTableFollowsTB(MachineInstr *JTMI, MachineInstr *CPEMI) {
2123 MachineFunction::iterator MBB = JTMI->getParent()->getIterator();
2124 MachineFunction *MF = MBB->getParent();
2125 ++MBB;
2126
2127 return MBB != MF->end() && !MBB->empty() && &*MBB->begin() == CPEMI;
2128 }
2129
RemoveDeadAddBetweenLEAAndJT(MachineInstr * LEAMI,MachineInstr * JumpMI,unsigned & DeadSize)2130 static void RemoveDeadAddBetweenLEAAndJT(MachineInstr *LEAMI,
2131 MachineInstr *JumpMI,
2132 unsigned &DeadSize) {
2133 // Remove a dead add between the LEA and JT, which used to compute EntryReg,
2134 // but the JT now uses PC. Finds the last ADD (if any) that def's EntryReg
2135 // and is not clobbered / used.
2136 MachineInstr *RemovableAdd = nullptr;
2137 Register EntryReg = JumpMI->getOperand(0).getReg();
2138
2139 // Find the last ADD to set EntryReg
2140 MachineBasicBlock::iterator I(LEAMI);
2141 for (++I; &*I != JumpMI; ++I) {
2142 if (I->getOpcode() == ARM::t2ADDrs && I->getOperand(0).getReg() == EntryReg)
2143 RemovableAdd = &*I;
2144 }
2145
2146 if (!RemovableAdd)
2147 return;
2148
2149 // Ensure EntryReg is not clobbered or used.
2150 MachineBasicBlock::iterator J(RemovableAdd);
2151 for (++J; &*J != JumpMI; ++J) {
2152 for (unsigned K = 0, E = J->getNumOperands(); K != E; ++K) {
2153 const MachineOperand &MO = J->getOperand(K);
2154 if (!MO.isReg() || !MO.getReg())
2155 continue;
2156 if (MO.isDef() && MO.getReg() == EntryReg)
2157 return;
2158 if (MO.isUse() && MO.getReg() == EntryReg)
2159 return;
2160 }
2161 }
2162
2163 LLVM_DEBUG(dbgs() << "Removing Dead Add: " << *RemovableAdd);
2164 RemovableAdd->eraseFromParent();
2165 DeadSize += 4;
2166 }
2167
2168 /// optimizeThumb2JumpTables - Use tbb / tbh instructions to generate smaller
2169 /// jumptables when it's possible.
optimizeThumb2JumpTables()2170 bool ARMConstantIslands::optimizeThumb2JumpTables() {
2171 bool MadeChange = false;
2172
2173 // FIXME: After the tables are shrunk, can we get rid some of the
2174 // constantpool tables?
2175 MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
2176 if (!MJTI) return false;
2177
2178 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
2179 for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) {
2180 MachineInstr *MI = T2JumpTables[i];
2181 const MCInstrDesc &MCID = MI->getDesc();
2182 unsigned NumOps = MCID.getNumOperands();
2183 unsigned JTOpIdx = NumOps - (MI->isPredicable() ? 2 : 1);
2184 MachineOperand JTOP = MI->getOperand(JTOpIdx);
2185 unsigned JTI = JTOP.getIndex();
2186 assert(JTI < JT.size());
2187
2188 bool ByteOk = true;
2189 bool HalfWordOk = true;
2190 unsigned JTOffset = BBUtils->getOffsetOf(MI) + 4;
2191 const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
2192 BBInfoVector &BBInfo = BBUtils->getBBInfo();
2193 for (unsigned j = 0, ee = JTBBs.size(); j != ee; ++j) {
2194 MachineBasicBlock *MBB = JTBBs[j];
2195 unsigned DstOffset = BBInfo[MBB->getNumber()].Offset;
2196 // Negative offset is not ok. FIXME: We should change BB layout to make
2197 // sure all the branches are forward.
2198 if (ByteOk && (DstOffset - JTOffset) > ((1<<8)-1)*2)
2199 ByteOk = false;
2200 unsigned TBHLimit = ((1<<16)-1)*2;
2201 if (HalfWordOk && (DstOffset - JTOffset) > TBHLimit)
2202 HalfWordOk = false;
2203 if (!ByteOk && !HalfWordOk)
2204 break;
2205 }
2206
2207 if (!ByteOk && !HalfWordOk)
2208 continue;
2209
2210 CPUser &User = CPUsers[JumpTableUserIndices[JTI]];
2211 MachineBasicBlock *MBB = MI->getParent();
2212 if (!MI->getOperand(0).isKill()) // FIXME: needed now?
2213 continue;
2214
2215 unsigned DeadSize = 0;
2216 bool CanDeleteLEA = false;
2217 bool BaseRegKill = false;
2218
2219 unsigned IdxReg = ~0U;
2220 bool IdxRegKill = true;
2221 if (isThumb2) {
2222 IdxReg = MI->getOperand(1).getReg();
2223 IdxRegKill = MI->getOperand(1).isKill();
2224
2225 bool PreservedBaseReg =
2226 preserveBaseRegister(MI, User.MI, DeadSize, CanDeleteLEA, BaseRegKill);
2227 if (!jumpTableFollowsTB(MI, User.CPEMI) && !PreservedBaseReg)
2228 continue;
2229 } else {
2230 // We're in thumb-1 mode, so we must have something like:
2231 // %idx = tLSLri %idx, 2
2232 // %base = tLEApcrelJT
2233 // %t = tLDRr %base, %idx
2234 Register BaseReg = User.MI->getOperand(0).getReg();
2235
2236 if (User.MI->getIterator() == User.MI->getParent()->begin())
2237 continue;
2238 MachineInstr *Shift = User.MI->getPrevNode();
2239 if (Shift->getOpcode() != ARM::tLSLri ||
2240 Shift->getOperand(3).getImm() != 2 ||
2241 !Shift->getOperand(2).isKill())
2242 continue;
2243 IdxReg = Shift->getOperand(2).getReg();
2244 Register ShiftedIdxReg = Shift->getOperand(0).getReg();
2245
2246 // It's important that IdxReg is live until the actual TBB/TBH. Most of
2247 // the range is checked later, but the LEA might still clobber it and not
2248 // actually get removed.
2249 if (BaseReg == IdxReg && !jumpTableFollowsTB(MI, User.CPEMI))
2250 continue;
2251
2252 MachineInstr *Load = User.MI->getNextNode();
2253 if (Load->getOpcode() != ARM::tLDRr)
2254 continue;
2255 if (Load->getOperand(1).getReg() != BaseReg ||
2256 Load->getOperand(2).getReg() != ShiftedIdxReg ||
2257 !Load->getOperand(2).isKill())
2258 continue;
2259
2260 // If we're in PIC mode, there should be another ADD following.
2261 auto *TRI = STI->getRegisterInfo();
2262
2263 // %base cannot be redefined after the load as it will appear before
2264 // TBB/TBH like:
2265 // %base =
2266 // %base =
2267 // tBB %base, %idx
2268 if (registerDefinedBetween(BaseReg, Load->getNextNode(), MBB->end(), TRI))
2269 continue;
2270
2271 if (isPositionIndependentOrROPI) {
2272 MachineInstr *Add = Load->getNextNode();
2273 if (Add->getOpcode() != ARM::tADDrr ||
2274 Add->getOperand(2).getReg() != BaseReg ||
2275 Add->getOperand(3).getReg() != Load->getOperand(0).getReg() ||
2276 !Add->getOperand(3).isKill())
2277 continue;
2278 if (Add->getOperand(0).getReg() != MI->getOperand(0).getReg())
2279 continue;
2280 if (registerDefinedBetween(IdxReg, Add->getNextNode(), MI, TRI))
2281 // IdxReg gets redefined in the middle of the sequence.
2282 continue;
2283 Add->eraseFromParent();
2284 DeadSize += 2;
2285 } else {
2286 if (Load->getOperand(0).getReg() != MI->getOperand(0).getReg())
2287 continue;
2288 if (registerDefinedBetween(IdxReg, Load->getNextNode(), MI, TRI))
2289 // IdxReg gets redefined in the middle of the sequence.
2290 continue;
2291 }
2292
2293 // Now safe to delete the load and lsl. The LEA will be removed later.
2294 CanDeleteLEA = true;
2295 Shift->eraseFromParent();
2296 Load->eraseFromParent();
2297 DeadSize += 4;
2298 }
2299
2300 LLVM_DEBUG(dbgs() << "Shrink JT: " << *MI);
2301 MachineInstr *CPEMI = User.CPEMI;
2302 unsigned Opc = ByteOk ? ARM::t2TBB_JT : ARM::t2TBH_JT;
2303 if (!isThumb2)
2304 Opc = ByteOk ? ARM::tTBB_JT : ARM::tTBH_JT;
2305
2306 MachineBasicBlock::iterator MI_JT = MI;
2307 MachineInstr *NewJTMI =
2308 BuildMI(*MBB, MI_JT, MI->getDebugLoc(), TII->get(Opc))
2309 .addReg(User.MI->getOperand(0).getReg(),
2310 getKillRegState(BaseRegKill))
2311 .addReg(IdxReg, getKillRegState(IdxRegKill))
2312 .addJumpTableIndex(JTI, JTOP.getTargetFlags())
2313 .addImm(CPEMI->getOperand(0).getImm());
2314 LLVM_DEBUG(dbgs() << printMBBReference(*MBB) << ": " << *NewJTMI);
2315
2316 unsigned JTOpc = ByteOk ? ARM::JUMPTABLE_TBB : ARM::JUMPTABLE_TBH;
2317 CPEMI->setDesc(TII->get(JTOpc));
2318
2319 if (jumpTableFollowsTB(MI, User.CPEMI)) {
2320 NewJTMI->getOperand(0).setReg(ARM::PC);
2321 NewJTMI->getOperand(0).setIsKill(false);
2322
2323 if (CanDeleteLEA) {
2324 if (isThumb2)
2325 RemoveDeadAddBetweenLEAAndJT(User.MI, MI, DeadSize);
2326
2327 User.MI->eraseFromParent();
2328 DeadSize += isThumb2 ? 4 : 2;
2329
2330 // The LEA was eliminated, the TBB instruction becomes the only new user
2331 // of the jump table.
2332 User.MI = NewJTMI;
2333 User.MaxDisp = 4;
2334 User.NegOk = false;
2335 User.IsSoImm = false;
2336 User.KnownAlignment = false;
2337 } else {
2338 // The LEA couldn't be eliminated, so we must add another CPUser to
2339 // record the TBB or TBH use.
2340 int CPEntryIdx = JumpTableEntryIndices[JTI];
2341 auto &CPEs = CPEntries[CPEntryIdx];
2342 auto Entry =
2343 find_if(CPEs, [&](CPEntry &E) { return E.CPEMI == User.CPEMI; });
2344 ++Entry->RefCount;
2345 CPUsers.emplace_back(CPUser(NewJTMI, User.CPEMI, 4, false, false));
2346 }
2347 }
2348
2349 unsigned NewSize = TII->getInstSizeInBytes(*NewJTMI);
2350 unsigned OrigSize = TII->getInstSizeInBytes(*MI);
2351 MI->eraseFromParent();
2352
2353 int Delta = OrigSize - NewSize + DeadSize;
2354 BBInfo[MBB->getNumber()].Size -= Delta;
2355 BBUtils->adjustBBOffsetsAfter(MBB);
2356
2357 ++NumTBs;
2358 MadeChange = true;
2359 }
2360
2361 return MadeChange;
2362 }
2363
2364 /// reorderThumb2JumpTables - Adjust the function's block layout to ensure that
2365 /// jump tables always branch forwards, since that's what tbb and tbh need.
reorderThumb2JumpTables()2366 bool ARMConstantIslands::reorderThumb2JumpTables() {
2367 bool MadeChange = false;
2368
2369 MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
2370 if (!MJTI) return false;
2371
2372 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
2373 for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) {
2374 MachineInstr *MI = T2JumpTables[i];
2375 const MCInstrDesc &MCID = MI->getDesc();
2376 unsigned NumOps = MCID.getNumOperands();
2377 unsigned JTOpIdx = NumOps - (MI->isPredicable() ? 2 : 1);
2378 MachineOperand JTOP = MI->getOperand(JTOpIdx);
2379 unsigned JTI = JTOP.getIndex();
2380 assert(JTI < JT.size());
2381
2382 // We prefer if target blocks for the jump table come after the jump
2383 // instruction so we can use TB[BH]. Loop through the target blocks
2384 // and try to adjust them such that that's true.
2385 int JTNumber = MI->getParent()->getNumber();
2386 const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
2387 for (unsigned j = 0, ee = JTBBs.size(); j != ee; ++j) {
2388 MachineBasicBlock *MBB = JTBBs[j];
2389 int DTNumber = MBB->getNumber();
2390
2391 if (DTNumber < JTNumber) {
2392 // The destination precedes the switch. Try to move the block forward
2393 // so we have a positive offset.
2394 MachineBasicBlock *NewBB =
2395 adjustJTTargetBlockForward(MBB, MI->getParent());
2396 if (NewBB)
2397 MJTI->ReplaceMBBInJumpTable(JTI, JTBBs[j], NewBB);
2398 MadeChange = true;
2399 }
2400 }
2401 }
2402
2403 return MadeChange;
2404 }
2405
2406 MachineBasicBlock *ARMConstantIslands::
adjustJTTargetBlockForward(MachineBasicBlock * BB,MachineBasicBlock * JTBB)2407 adjustJTTargetBlockForward(MachineBasicBlock *BB, MachineBasicBlock *JTBB) {
2408 // If the destination block is terminated by an unconditional branch,
2409 // try to move it; otherwise, create a new block following the jump
2410 // table that branches back to the actual target. This is a very simple
2411 // heuristic. FIXME: We can definitely improve it.
2412 MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
2413 SmallVector<MachineOperand, 4> Cond;
2414 SmallVector<MachineOperand, 4> CondPrior;
2415 MachineFunction::iterator BBi = BB->getIterator();
2416 MachineFunction::iterator OldPrior = std::prev(BBi);
2417 MachineFunction::iterator OldNext = std::next(BBi);
2418
2419 // If the block terminator isn't analyzable, don't try to move the block
2420 bool B = TII->analyzeBranch(*BB, TBB, FBB, Cond);
2421
2422 // If the block ends in an unconditional branch, move it. The prior block
2423 // has to have an analyzable terminator for us to move this one. Be paranoid
2424 // and make sure we're not trying to move the entry block of the function.
2425 if (!B && Cond.empty() && BB != &MF->front() &&
2426 !TII->analyzeBranch(*OldPrior, TBB, FBB, CondPrior)) {
2427 BB->moveAfter(JTBB);
2428 OldPrior->updateTerminator(BB);
2429 BB->updateTerminator(OldNext != MF->end() ? &*OldNext : nullptr);
2430 // Update numbering to account for the block being moved.
2431 MF->RenumberBlocks();
2432 ++NumJTMoved;
2433 return nullptr;
2434 }
2435
2436 // Create a new MBB for the code after the jump BB.
2437 MachineBasicBlock *NewBB =
2438 MF->CreateMachineBasicBlock(JTBB->getBasicBlock());
2439 MachineFunction::iterator MBBI = ++JTBB->getIterator();
2440 MF->insert(MBBI, NewBB);
2441
2442 // Copy live-in information to new block.
2443 for (const MachineBasicBlock::RegisterMaskPair &RegMaskPair : BB->liveins())
2444 NewBB->addLiveIn(RegMaskPair);
2445
2446 // Add an unconditional branch from NewBB to BB.
2447 // There doesn't seem to be meaningful DebugInfo available; this doesn't
2448 // correspond directly to anything in the source.
2449 if (isThumb2)
2450 BuildMI(NewBB, DebugLoc(), TII->get(ARM::t2B))
2451 .addMBB(BB)
2452 .add(predOps(ARMCC::AL));
2453 else
2454 BuildMI(NewBB, DebugLoc(), TII->get(ARM::tB))
2455 .addMBB(BB)
2456 .add(predOps(ARMCC::AL));
2457
2458 // Update internal data structures to account for the newly inserted MBB.
2459 MF->RenumberBlocks(NewBB);
2460
2461 // Update the CFG.
2462 NewBB->addSuccessor(BB);
2463 JTBB->replaceSuccessor(BB, NewBB);
2464
2465 ++NumJTInserted;
2466 return NewBB;
2467 }
2468
2469 /// createARMConstantIslandPass - returns an instance of the constpool
2470 /// island pass.
createARMConstantIslandPass()2471 FunctionPass *llvm::createARMConstantIslandPass() {
2472 return new ARMConstantIslands();
2473 }
2474
2475 INITIALIZE_PASS(ARMConstantIslands, "arm-cp-islands", ARM_CP_ISLANDS_OPT_NAME,
2476 false, false)
2477