1 //===-- ARMConstantIslandPass.cpp - ARM constant islands ------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains a pass that splits the constant pool up into 'islands'
11 // which are scattered through-out the function. This is required due to the
12 // limited pc-relative displacements that ARM has.
13 //
14 //===----------------------------------------------------------------------===//
15
16 #include "ARM.h"
17 #include "ARMMachineFunctionInfo.h"
18 #include "MCTargetDesc/ARMAddressingModes.h"
19 #include "Thumb2InstrInfo.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/CodeGen/MachineConstantPool.h"
25 #include "llvm/CodeGen/MachineFunctionPass.h"
26 #include "llvm/CodeGen/MachineJumpTableInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/Support/CommandLine.h"
30 #include "llvm/Support/Debug.h"
31 #include "llvm/Support/ErrorHandling.h"
32 #include "llvm/Support/Format.h"
33 #include "llvm/Support/raw_ostream.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include <algorithm>
36 using namespace llvm;
37
38 #define DEBUG_TYPE "arm-cp-islands"
39
40 STATISTIC(NumCPEs, "Number of constpool entries");
41 STATISTIC(NumSplit, "Number of uncond branches inserted");
42 STATISTIC(NumCBrFixed, "Number of cond branches fixed");
43 STATISTIC(NumUBrFixed, "Number of uncond branches fixed");
44 STATISTIC(NumTBs, "Number of table branches generated");
45 STATISTIC(NumT2CPShrunk, "Number of Thumb2 constantpool instructions shrunk");
46 STATISTIC(NumT2BrShrunk, "Number of Thumb2 immediate branches shrunk");
47 STATISTIC(NumCBZ, "Number of CBZ / CBNZ formed");
48 STATISTIC(NumJTMoved, "Number of jump table destination blocks moved");
49 STATISTIC(NumJTInserted, "Number of jump table intermediate blocks inserted");
50
51
52 static cl::opt<bool>
53 AdjustJumpTableBlocks("arm-adjust-jump-tables", cl::Hidden, cl::init(true),
54 cl::desc("Adjust basic block layout to better use TB[BH]"));
55
56 // FIXME: This option should be removed once it has received sufficient testing.
57 static cl::opt<bool>
58 AlignConstantIslands("arm-align-constant-islands", cl::Hidden, cl::init(true),
59 cl::desc("Align constant islands in code"));
60
61 /// UnknownPadding - Return the worst case padding that could result from
62 /// unknown offset bits. This does not include alignment padding caused by
63 /// known offset bits.
64 ///
65 /// @param LogAlign log2(alignment)
66 /// @param KnownBits Number of known low offset bits.
UnknownPadding(unsigned LogAlign,unsigned KnownBits)67 static inline unsigned UnknownPadding(unsigned LogAlign, unsigned KnownBits) {
68 if (KnownBits < LogAlign)
69 return (1u << LogAlign) - (1u << KnownBits);
70 return 0;
71 }
72
73 namespace {
74 /// ARMConstantIslands - Due to limited PC-relative displacements, ARM
75 /// requires constant pool entries to be scattered among the instructions
76 /// inside a function. To do this, it completely ignores the normal LLVM
77 /// constant pool; instead, it places constants wherever it feels like with
78 /// special instructions.
79 ///
80 /// The terminology used in this pass includes:
81 /// Islands - Clumps of constants placed in the function.
82 /// Water - Potential places where an island could be formed.
83 /// CPE - A constant pool entry that has been placed somewhere, which
84 /// tracks a list of users.
85 class ARMConstantIslands : public MachineFunctionPass {
86 /// BasicBlockInfo - Information about the offset and size of a single
87 /// basic block.
88 struct BasicBlockInfo {
89 /// Offset - Distance from the beginning of the function to the beginning
90 /// of this basic block.
91 ///
92 /// Offsets are computed assuming worst case padding before an aligned
93 /// block. This means that subtracting basic block offsets always gives a
94 /// conservative estimate of the real distance which may be smaller.
95 ///
96 /// Because worst case padding is used, the computed offset of an aligned
97 /// block may not actually be aligned.
98 unsigned Offset;
99
100 /// Size - Size of the basic block in bytes. If the block contains
101 /// inline assembly, this is a worst case estimate.
102 ///
103 /// The size does not include any alignment padding whether from the
104 /// beginning of the block, or from an aligned jump table at the end.
105 unsigned Size;
106
107 /// KnownBits - The number of low bits in Offset that are known to be
108 /// exact. The remaining bits of Offset are an upper bound.
109 uint8_t KnownBits;
110
111 /// Unalign - When non-zero, the block contains instructions (inline asm)
112 /// of unknown size. The real size may be smaller than Size bytes by a
113 /// multiple of 1 << Unalign.
114 uint8_t Unalign;
115
116 /// PostAlign - When non-zero, the block terminator contains a .align
117 /// directive, so the end of the block is aligned to 1 << PostAlign
118 /// bytes.
119 uint8_t PostAlign;
120
BasicBlockInfo__anond264dd5c0111::ARMConstantIslands::BasicBlockInfo121 BasicBlockInfo() : Offset(0), Size(0), KnownBits(0), Unalign(0),
122 PostAlign(0) {}
123
124 /// Compute the number of known offset bits internally to this block.
125 /// This number should be used to predict worst case padding when
126 /// splitting the block.
internalKnownBits__anond264dd5c0111::ARMConstantIslands::BasicBlockInfo127 unsigned internalKnownBits() const {
128 unsigned Bits = Unalign ? Unalign : KnownBits;
129 // If the block size isn't a multiple of the known bits, assume the
130 // worst case padding.
131 if (Size & ((1u << Bits) - 1))
132 Bits = countTrailingZeros(Size);
133 return Bits;
134 }
135
136 /// Compute the offset immediately following this block. If LogAlign is
137 /// specified, return the offset the successor block will get if it has
138 /// this alignment.
postOffset__anond264dd5c0111::ARMConstantIslands::BasicBlockInfo139 unsigned postOffset(unsigned LogAlign = 0) const {
140 unsigned PO = Offset + Size;
141 unsigned LA = std::max(unsigned(PostAlign), LogAlign);
142 if (!LA)
143 return PO;
144 // Add alignment padding from the terminator.
145 return PO + UnknownPadding(LA, internalKnownBits());
146 }
147
148 /// Compute the number of known low bits of postOffset. If this block
149 /// contains inline asm, the number of known bits drops to the
150 /// instruction alignment. An aligned terminator may increase the number
151 /// of know bits.
152 /// If LogAlign is given, also consider the alignment of the next block.
postKnownBits__anond264dd5c0111::ARMConstantIslands::BasicBlockInfo153 unsigned postKnownBits(unsigned LogAlign = 0) const {
154 return std::max(std::max(unsigned(PostAlign), LogAlign),
155 internalKnownBits());
156 }
157 };
158
159 std::vector<BasicBlockInfo> BBInfo;
160
161 /// WaterList - A sorted list of basic blocks where islands could be placed
162 /// (i.e. blocks that don't fall through to the following block, due
163 /// to a return, unreachable, or unconditional branch).
164 std::vector<MachineBasicBlock*> WaterList;
165
166 /// NewWaterList - The subset of WaterList that was created since the
167 /// previous iteration by inserting unconditional branches.
168 SmallSet<MachineBasicBlock*, 4> NewWaterList;
169
170 typedef std::vector<MachineBasicBlock*>::iterator water_iterator;
171
172 /// CPUser - One user of a constant pool, keeping the machine instruction
173 /// pointer, the constant pool being referenced, and the max displacement
174 /// allowed from the instruction to the CP. The HighWaterMark records the
175 /// highest basic block where a new CPEntry can be placed. To ensure this
176 /// pass terminates, the CP entries are initially placed at the end of the
177 /// function and then move monotonically to lower addresses. The
178 /// exception to this rule is when the current CP entry for a particular
179 /// CPUser is out of range, but there is another CP entry for the same
180 /// constant value in range. We want to use the existing in-range CP
181 /// entry, but if it later moves out of range, the search for new water
182 /// should resume where it left off. The HighWaterMark is used to record
183 /// that point.
184 struct CPUser {
185 MachineInstr *MI;
186 MachineInstr *CPEMI;
187 MachineBasicBlock *HighWaterMark;
188 private:
189 unsigned MaxDisp;
190 public:
191 bool NegOk;
192 bool IsSoImm;
193 bool KnownAlignment;
CPUser__anond264dd5c0111::ARMConstantIslands::CPUser194 CPUser(MachineInstr *mi, MachineInstr *cpemi, unsigned maxdisp,
195 bool neg, bool soimm)
196 : MI(mi), CPEMI(cpemi), MaxDisp(maxdisp), NegOk(neg), IsSoImm(soimm),
197 KnownAlignment(false) {
198 HighWaterMark = CPEMI->getParent();
199 }
200 /// getMaxDisp - Returns the maximum displacement supported by MI.
201 /// Correct for unknown alignment.
202 /// Conservatively subtract 2 bytes to handle weird alignment effects.
getMaxDisp__anond264dd5c0111::ARMConstantIslands::CPUser203 unsigned getMaxDisp() const {
204 return (KnownAlignment ? MaxDisp : MaxDisp - 2) - 2;
205 }
206 };
207
208 /// CPUsers - Keep track of all of the machine instructions that use various
209 /// constant pools and their max displacement.
210 std::vector<CPUser> CPUsers;
211
212 /// CPEntry - One per constant pool entry, keeping the machine instruction
213 /// pointer, the constpool index, and the number of CPUser's which
214 /// reference this entry.
215 struct CPEntry {
216 MachineInstr *CPEMI;
217 unsigned CPI;
218 unsigned RefCount;
CPEntry__anond264dd5c0111::ARMConstantIslands::CPEntry219 CPEntry(MachineInstr *cpemi, unsigned cpi, unsigned rc = 0)
220 : CPEMI(cpemi), CPI(cpi), RefCount(rc) {}
221 };
222
223 /// CPEntries - Keep track of all of the constant pool entry machine
224 /// instructions. For each original constpool index (i.e. those that
225 /// existed upon entry to this pass), it keeps a vector of entries.
226 /// Original elements are cloned as we go along; the clones are
227 /// put in the vector of the original element, but have distinct CPIs.
228 std::vector<std::vector<CPEntry> > CPEntries;
229
230 /// ImmBranch - One per immediate branch, keeping the machine instruction
231 /// pointer, conditional or unconditional, the max displacement,
232 /// and (if isCond is true) the corresponding unconditional branch
233 /// opcode.
234 struct ImmBranch {
235 MachineInstr *MI;
236 unsigned MaxDisp : 31;
237 bool isCond : 1;
238 int UncondBr;
ImmBranch__anond264dd5c0111::ARMConstantIslands::ImmBranch239 ImmBranch(MachineInstr *mi, unsigned maxdisp, bool cond, int ubr)
240 : MI(mi), MaxDisp(maxdisp), isCond(cond), UncondBr(ubr) {}
241 };
242
243 /// ImmBranches - Keep track of all the immediate branch instructions.
244 ///
245 std::vector<ImmBranch> ImmBranches;
246
247 /// PushPopMIs - Keep track of all the Thumb push / pop instructions.
248 ///
249 SmallVector<MachineInstr*, 4> PushPopMIs;
250
251 /// T2JumpTables - Keep track of all the Thumb2 jumptable instructions.
252 SmallVector<MachineInstr*, 4> T2JumpTables;
253
254 /// HasFarJump - True if any far jump instruction has been emitted during
255 /// the branch fix up pass.
256 bool HasFarJump;
257
258 MachineFunction *MF;
259 MachineConstantPool *MCP;
260 const ARMBaseInstrInfo *TII;
261 const ARMSubtarget *STI;
262 ARMFunctionInfo *AFI;
263 bool isThumb;
264 bool isThumb1;
265 bool isThumb2;
266 public:
267 static char ID;
ARMConstantIslands()268 ARMConstantIslands() : MachineFunctionPass(ID) {}
269
270 bool runOnMachineFunction(MachineFunction &MF) override;
271
getPassName() const272 const char *getPassName() const override {
273 return "ARM constant island placement and branch shortening pass";
274 }
275
276 private:
277 void doInitialPlacement(std::vector<MachineInstr*> &CPEMIs);
278 bool BBHasFallthrough(MachineBasicBlock *MBB);
279 CPEntry *findConstPoolEntry(unsigned CPI, const MachineInstr *CPEMI);
280 unsigned getCPELogAlign(const MachineInstr *CPEMI);
281 void scanFunctionJumpTables();
282 void initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs);
283 MachineBasicBlock *splitBlockBeforeInstr(MachineInstr *MI);
284 void updateForInsertedWaterBlock(MachineBasicBlock *NewBB);
285 void adjustBBOffsetsAfter(MachineBasicBlock *BB);
286 bool decrementCPEReferenceCount(unsigned CPI, MachineInstr* CPEMI);
287 int findInRangeCPEntry(CPUser& U, unsigned UserOffset);
288 bool findAvailableWater(CPUser&U, unsigned UserOffset,
289 water_iterator &WaterIter);
290 void createNewWater(unsigned CPUserIndex, unsigned UserOffset,
291 MachineBasicBlock *&NewMBB);
292 bool handleConstantPoolUser(unsigned CPUserIndex);
293 void removeDeadCPEMI(MachineInstr *CPEMI);
294 bool removeUnusedCPEntries();
295 bool isCPEntryInRange(MachineInstr *MI, unsigned UserOffset,
296 MachineInstr *CPEMI, unsigned Disp, bool NegOk,
297 bool DoDump = false);
298 bool isWaterInRange(unsigned UserOffset, MachineBasicBlock *Water,
299 CPUser &U, unsigned &Growth);
300 bool isBBInRange(MachineInstr *MI, MachineBasicBlock *BB, unsigned Disp);
301 bool fixupImmediateBr(ImmBranch &Br);
302 bool fixupConditionalBr(ImmBranch &Br);
303 bool fixupUnconditionalBr(ImmBranch &Br);
304 bool undoLRSpillRestore();
305 bool mayOptimizeThumb2Instruction(const MachineInstr *MI) const;
306 bool optimizeThumb2Instructions();
307 bool optimizeThumb2Branches();
308 bool reorderThumb2JumpTables();
309 bool optimizeThumb2JumpTables();
310 MachineBasicBlock *adjustJTTargetBlockForward(MachineBasicBlock *BB,
311 MachineBasicBlock *JTBB);
312
313 void computeBlockSize(MachineBasicBlock *MBB);
314 unsigned getOffsetOf(MachineInstr *MI) const;
315 unsigned getUserOffset(CPUser&) const;
316 void dumpBBs();
317 void verify();
318
319 bool isOffsetInRange(unsigned UserOffset, unsigned TrialOffset,
320 unsigned Disp, bool NegativeOK, bool IsSoImm = false);
isOffsetInRange(unsigned UserOffset,unsigned TrialOffset,const CPUser & U)321 bool isOffsetInRange(unsigned UserOffset, unsigned TrialOffset,
322 const CPUser &U) {
323 return isOffsetInRange(UserOffset, TrialOffset,
324 U.getMaxDisp(), U.NegOk, U.IsSoImm);
325 }
326 };
327 char ARMConstantIslands::ID = 0;
328 }
329
330 /// verify - check BBOffsets, BBSizes, alignment of islands
verify()331 void ARMConstantIslands::verify() {
332 #ifndef NDEBUG
333 for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end();
334 MBBI != E; ++MBBI) {
335 MachineBasicBlock *MBB = MBBI;
336 unsigned MBBId = MBB->getNumber();
337 assert(!MBBId || BBInfo[MBBId - 1].postOffset() <= BBInfo[MBBId].Offset);
338 }
339 DEBUG(dbgs() << "Verifying " << CPUsers.size() << " CP users.\n");
340 for (unsigned i = 0, e = CPUsers.size(); i != e; ++i) {
341 CPUser &U = CPUsers[i];
342 unsigned UserOffset = getUserOffset(U);
343 // Verify offset using the real max displacement without the safety
344 // adjustment.
345 if (isCPEntryInRange(U.MI, UserOffset, U.CPEMI, U.getMaxDisp()+2, U.NegOk,
346 /* DoDump = */ true)) {
347 DEBUG(dbgs() << "OK\n");
348 continue;
349 }
350 DEBUG(dbgs() << "Out of range.\n");
351 dumpBBs();
352 DEBUG(MF->dump());
353 llvm_unreachable("Constant pool entry out of range!");
354 }
355 #endif
356 }
357
358 /// print block size and offset information - debugging
dumpBBs()359 void ARMConstantIslands::dumpBBs() {
360 DEBUG({
361 for (unsigned J = 0, E = BBInfo.size(); J !=E; ++J) {
362 const BasicBlockInfo &BBI = BBInfo[J];
363 dbgs() << format("%08x BB#%u\t", BBI.Offset, J)
364 << " kb=" << unsigned(BBI.KnownBits)
365 << " ua=" << unsigned(BBI.Unalign)
366 << " pa=" << unsigned(BBI.PostAlign)
367 << format(" size=%#x\n", BBInfo[J].Size);
368 }
369 });
370 }
371
372 /// createARMConstantIslandPass - returns an instance of the constpool
373 /// island pass.
createARMConstantIslandPass()374 FunctionPass *llvm::createARMConstantIslandPass() {
375 return new ARMConstantIslands();
376 }
377
runOnMachineFunction(MachineFunction & mf)378 bool ARMConstantIslands::runOnMachineFunction(MachineFunction &mf) {
379 MF = &mf;
380 MCP = mf.getConstantPool();
381
382 DEBUG(dbgs() << "***** ARMConstantIslands: "
383 << MCP->getConstants().size() << " CP entries, aligned to "
384 << MCP->getConstantPoolAlignment() << " bytes *****\n");
385
386 TII = (const ARMBaseInstrInfo *)MF->getTarget()
387 .getSubtargetImpl()
388 ->getInstrInfo();
389 AFI = MF->getInfo<ARMFunctionInfo>();
390 STI = &MF->getTarget().getSubtarget<ARMSubtarget>();
391
392 isThumb = AFI->isThumbFunction();
393 isThumb1 = AFI->isThumb1OnlyFunction();
394 isThumb2 = AFI->isThumb2Function();
395
396 HasFarJump = false;
397
398 // This pass invalidates liveness information when it splits basic blocks.
399 MF->getRegInfo().invalidateLiveness();
400
401 // Renumber all of the machine basic blocks in the function, guaranteeing that
402 // the numbers agree with the position of the block in the function.
403 MF->RenumberBlocks();
404
405 // Try to reorder and otherwise adjust the block layout to make good use
406 // of the TB[BH] instructions.
407 bool MadeChange = false;
408 if (isThumb2 && AdjustJumpTableBlocks) {
409 scanFunctionJumpTables();
410 MadeChange |= reorderThumb2JumpTables();
411 // Data is out of date, so clear it. It'll be re-computed later.
412 T2JumpTables.clear();
413 // Blocks may have shifted around. Keep the numbering up to date.
414 MF->RenumberBlocks();
415 }
416
417 // Thumb1 functions containing constant pools get 4-byte alignment.
418 // This is so we can keep exact track of where the alignment padding goes.
419
420 // ARM and Thumb2 functions need to be 4-byte aligned.
421 if (!isThumb1)
422 MF->ensureAlignment(2); // 2 = log2(4)
423
424 // Perform the initial placement of the constant pool entries. To start with,
425 // we put them all at the end of the function.
426 std::vector<MachineInstr*> CPEMIs;
427 if (!MCP->isEmpty())
428 doInitialPlacement(CPEMIs);
429
430 /// The next UID to take is the first unused one.
431 AFI->initPICLabelUId(CPEMIs.size());
432
433 // Do the initial scan of the function, building up information about the
434 // sizes of each block, the location of all the water, and finding all of the
435 // constant pool users.
436 initializeFunctionInfo(CPEMIs);
437 CPEMIs.clear();
438 DEBUG(dumpBBs());
439
440
441 /// Remove dead constant pool entries.
442 MadeChange |= removeUnusedCPEntries();
443
444 // Iteratively place constant pool entries and fix up branches until there
445 // is no change.
446 unsigned NoCPIters = 0, NoBRIters = 0;
447 while (true) {
448 DEBUG(dbgs() << "Beginning CP iteration #" << NoCPIters << '\n');
449 bool CPChange = false;
450 for (unsigned i = 0, e = CPUsers.size(); i != e; ++i)
451 CPChange |= handleConstantPoolUser(i);
452 if (CPChange && ++NoCPIters > 30)
453 report_fatal_error("Constant Island pass failed to converge!");
454 DEBUG(dumpBBs());
455
456 // Clear NewWaterList now. If we split a block for branches, it should
457 // appear as "new water" for the next iteration of constant pool placement.
458 NewWaterList.clear();
459
460 DEBUG(dbgs() << "Beginning BR iteration #" << NoBRIters << '\n');
461 bool BRChange = false;
462 for (unsigned i = 0, e = ImmBranches.size(); i != e; ++i)
463 BRChange |= fixupImmediateBr(ImmBranches[i]);
464 if (BRChange && ++NoBRIters > 30)
465 report_fatal_error("Branch Fix Up pass failed to converge!");
466 DEBUG(dumpBBs());
467
468 if (!CPChange && !BRChange)
469 break;
470 MadeChange = true;
471 }
472
473 // Shrink 32-bit Thumb2 branch, load, and store instructions.
474 if (isThumb2 && !STI->prefers32BitThumb())
475 MadeChange |= optimizeThumb2Instructions();
476
477 // After a while, this might be made debug-only, but it is not expensive.
478 verify();
479
480 // If LR has been forced spilled and no far jump (i.e. BL) has been issued,
481 // undo the spill / restore of LR if possible.
482 if (isThumb && !HasFarJump && AFI->isLRSpilledForFarJump())
483 MadeChange |= undoLRSpillRestore();
484
485 // Save the mapping between original and cloned constpool entries.
486 for (unsigned i = 0, e = CPEntries.size(); i != e; ++i) {
487 for (unsigned j = 0, je = CPEntries[i].size(); j != je; ++j) {
488 const CPEntry & CPE = CPEntries[i][j];
489 AFI->recordCPEClone(i, CPE.CPI);
490 }
491 }
492
493 DEBUG(dbgs() << '\n'; dumpBBs());
494
495 BBInfo.clear();
496 WaterList.clear();
497 CPUsers.clear();
498 CPEntries.clear();
499 ImmBranches.clear();
500 PushPopMIs.clear();
501 T2JumpTables.clear();
502
503 return MadeChange;
504 }
505
506 /// doInitialPlacement - Perform the initial placement of the constant pool
507 /// entries. To start with, we put them all at the end of the function.
508 void
doInitialPlacement(std::vector<MachineInstr * > & CPEMIs)509 ARMConstantIslands::doInitialPlacement(std::vector<MachineInstr*> &CPEMIs) {
510 // Create the basic block to hold the CPE's.
511 MachineBasicBlock *BB = MF->CreateMachineBasicBlock();
512 MF->push_back(BB);
513
514 // MachineConstantPool measures alignment in bytes. We measure in log2(bytes).
515 unsigned MaxAlign = Log2_32(MCP->getConstantPoolAlignment());
516
517 // Mark the basic block as required by the const-pool.
518 // If AlignConstantIslands isn't set, use 4-byte alignment for everything.
519 BB->setAlignment(AlignConstantIslands ? MaxAlign : 2);
520
521 // The function needs to be as aligned as the basic blocks. The linker may
522 // move functions around based on their alignment.
523 MF->ensureAlignment(BB->getAlignment());
524
525 // Order the entries in BB by descending alignment. That ensures correct
526 // alignment of all entries as long as BB is sufficiently aligned. Keep
527 // track of the insertion point for each alignment. We are going to bucket
528 // sort the entries as they are created.
529 SmallVector<MachineBasicBlock::iterator, 8> InsPoint(MaxAlign + 1, BB->end());
530
531 // Add all of the constants from the constant pool to the end block, use an
532 // identity mapping of CPI's to CPE's.
533 const std::vector<MachineConstantPoolEntry> &CPs = MCP->getConstants();
534
535 const DataLayout &TD = *MF->getSubtarget().getDataLayout();
536 for (unsigned i = 0, e = CPs.size(); i != e; ++i) {
537 unsigned Size = TD.getTypeAllocSize(CPs[i].getType());
538 assert(Size >= 4 && "Too small constant pool entry");
539 unsigned Align = CPs[i].getAlignment();
540 assert(isPowerOf2_32(Align) && "Invalid alignment");
541 // Verify that all constant pool entries are a multiple of their alignment.
542 // If not, we would have to pad them out so that instructions stay aligned.
543 assert((Size % Align) == 0 && "CP Entry not multiple of 4 bytes!");
544
545 // Insert CONSTPOOL_ENTRY before entries with a smaller alignment.
546 unsigned LogAlign = Log2_32(Align);
547 MachineBasicBlock::iterator InsAt = InsPoint[LogAlign];
548 MachineInstr *CPEMI =
549 BuildMI(*BB, InsAt, DebugLoc(), TII->get(ARM::CONSTPOOL_ENTRY))
550 .addImm(i).addConstantPoolIndex(i).addImm(Size);
551 CPEMIs.push_back(CPEMI);
552
553 // Ensure that future entries with higher alignment get inserted before
554 // CPEMI. This is bucket sort with iterators.
555 for (unsigned a = LogAlign + 1; a <= MaxAlign; ++a)
556 if (InsPoint[a] == InsAt)
557 InsPoint[a] = CPEMI;
558
559 // Add a new CPEntry, but no corresponding CPUser yet.
560 CPEntries.emplace_back(1, CPEntry(CPEMI, i));
561 ++NumCPEs;
562 DEBUG(dbgs() << "Moved CPI#" << i << " to end of function, size = "
563 << Size << ", align = " << Align <<'\n');
564 }
565 DEBUG(BB->dump());
566 }
567
568 /// BBHasFallthrough - Return true if the specified basic block can fallthrough
569 /// into the block immediately after it.
BBHasFallthrough(MachineBasicBlock * MBB)570 bool ARMConstantIslands::BBHasFallthrough(MachineBasicBlock *MBB) {
571 // Get the next machine basic block in the function.
572 MachineFunction::iterator MBBI = MBB;
573 // Can't fall off end of function.
574 if (std::next(MBBI) == MBB->getParent()->end())
575 return false;
576
577 MachineBasicBlock *NextBB = std::next(MBBI);
578 if (std::find(MBB->succ_begin(), MBB->succ_end(), NextBB) == MBB->succ_end())
579 return false;
580
581 // Try to analyze the end of the block. A potential fallthrough may already
582 // have an unconditional branch for whatever reason.
583 MachineBasicBlock *TBB, *FBB;
584 SmallVector<MachineOperand, 4> Cond;
585 bool TooDifficult = TII->AnalyzeBranch(*MBB, TBB, FBB, Cond);
586 return TooDifficult || FBB == nullptr;
587 }
588
589 /// findConstPoolEntry - Given the constpool index and CONSTPOOL_ENTRY MI,
590 /// look up the corresponding CPEntry.
591 ARMConstantIslands::CPEntry
findConstPoolEntry(unsigned CPI,const MachineInstr * CPEMI)592 *ARMConstantIslands::findConstPoolEntry(unsigned CPI,
593 const MachineInstr *CPEMI) {
594 std::vector<CPEntry> &CPEs = CPEntries[CPI];
595 // Number of entries per constpool index should be small, just do a
596 // linear search.
597 for (unsigned i = 0, e = CPEs.size(); i != e; ++i) {
598 if (CPEs[i].CPEMI == CPEMI)
599 return &CPEs[i];
600 }
601 return nullptr;
602 }
603
604 /// getCPELogAlign - Returns the required alignment of the constant pool entry
605 /// represented by CPEMI. Alignment is measured in log2(bytes) units.
getCPELogAlign(const MachineInstr * CPEMI)606 unsigned ARMConstantIslands::getCPELogAlign(const MachineInstr *CPEMI) {
607 assert(CPEMI && CPEMI->getOpcode() == ARM::CONSTPOOL_ENTRY);
608
609 // Everything is 4-byte aligned unless AlignConstantIslands is set.
610 if (!AlignConstantIslands)
611 return 2;
612
613 unsigned CPI = CPEMI->getOperand(1).getIndex();
614 assert(CPI < MCP->getConstants().size() && "Invalid constant pool index.");
615 unsigned Align = MCP->getConstants()[CPI].getAlignment();
616 assert(isPowerOf2_32(Align) && "Invalid CPE alignment");
617 return Log2_32(Align);
618 }
619
620 /// scanFunctionJumpTables - Do a scan of the function, building up
621 /// information about the sizes of each block and the locations of all
622 /// the jump tables.
scanFunctionJumpTables()623 void ARMConstantIslands::scanFunctionJumpTables() {
624 for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end();
625 MBBI != E; ++MBBI) {
626 MachineBasicBlock &MBB = *MBBI;
627
628 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
629 I != E; ++I)
630 if (I->isBranch() && I->getOpcode() == ARM::t2BR_JT)
631 T2JumpTables.push_back(I);
632 }
633 }
634
635 /// initializeFunctionInfo - Do the initial scan of the function, building up
636 /// information about the sizes of each block, the location of all the water,
637 /// and finding all of the constant pool users.
638 void ARMConstantIslands::
initializeFunctionInfo(const std::vector<MachineInstr * > & CPEMIs)639 initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs) {
640 BBInfo.clear();
641 BBInfo.resize(MF->getNumBlockIDs());
642
643 // First thing, compute the size of all basic blocks, and see if the function
644 // has any inline assembly in it. If so, we have to be conservative about
645 // alignment assumptions, as we don't know for sure the size of any
646 // instructions in the inline assembly.
647 for (MachineFunction::iterator I = MF->begin(), E = MF->end(); I != E; ++I)
648 computeBlockSize(I);
649
650 // The known bits of the entry block offset are determined by the function
651 // alignment.
652 BBInfo.front().KnownBits = MF->getAlignment();
653
654 // Compute block offsets and known bits.
655 adjustBBOffsetsAfter(MF->begin());
656
657 // Now go back through the instructions and build up our data structures.
658 for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end();
659 MBBI != E; ++MBBI) {
660 MachineBasicBlock &MBB = *MBBI;
661
662 // If this block doesn't fall through into the next MBB, then this is
663 // 'water' that a constant pool island could be placed.
664 if (!BBHasFallthrough(&MBB))
665 WaterList.push_back(&MBB);
666
667 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
668 I != E; ++I) {
669 if (I->isDebugValue())
670 continue;
671
672 int Opc = I->getOpcode();
673 if (I->isBranch()) {
674 bool isCond = false;
675 unsigned Bits = 0;
676 unsigned Scale = 1;
677 int UOpc = Opc;
678 switch (Opc) {
679 default:
680 continue; // Ignore other JT branches
681 case ARM::t2BR_JT:
682 T2JumpTables.push_back(I);
683 continue; // Does not get an entry in ImmBranches
684 case ARM::Bcc:
685 isCond = true;
686 UOpc = ARM::B;
687 // Fallthrough
688 case ARM::B:
689 Bits = 24;
690 Scale = 4;
691 break;
692 case ARM::tBcc:
693 isCond = true;
694 UOpc = ARM::tB;
695 Bits = 8;
696 Scale = 2;
697 break;
698 case ARM::tB:
699 Bits = 11;
700 Scale = 2;
701 break;
702 case ARM::t2Bcc:
703 isCond = true;
704 UOpc = ARM::t2B;
705 Bits = 20;
706 Scale = 2;
707 break;
708 case ARM::t2B:
709 Bits = 24;
710 Scale = 2;
711 break;
712 }
713
714 // Record this immediate branch.
715 unsigned MaxOffs = ((1 << (Bits-1))-1) * Scale;
716 ImmBranches.push_back(ImmBranch(I, MaxOffs, isCond, UOpc));
717 }
718
719 if (Opc == ARM::tPUSH || Opc == ARM::tPOP_RET)
720 PushPopMIs.push_back(I);
721
722 if (Opc == ARM::CONSTPOOL_ENTRY)
723 continue;
724
725 // Scan the instructions for constant pool operands.
726 for (unsigned op = 0, e = I->getNumOperands(); op != e; ++op)
727 if (I->getOperand(op).isCPI()) {
728 // We found one. The addressing mode tells us the max displacement
729 // from the PC that this instruction permits.
730
731 // Basic size info comes from the TSFlags field.
732 unsigned Bits = 0;
733 unsigned Scale = 1;
734 bool NegOk = false;
735 bool IsSoImm = false;
736
737 switch (Opc) {
738 default:
739 llvm_unreachable("Unknown addressing mode for CP reference!");
740
741 // Taking the address of a CP entry.
742 case ARM::LEApcrel:
743 // This takes a SoImm, which is 8 bit immediate rotated. We'll
744 // pretend the maximum offset is 255 * 4. Since each instruction
745 // 4 byte wide, this is always correct. We'll check for other
746 // displacements that fits in a SoImm as well.
747 Bits = 8;
748 Scale = 4;
749 NegOk = true;
750 IsSoImm = true;
751 break;
752 case ARM::t2LEApcrel:
753 Bits = 12;
754 NegOk = true;
755 break;
756 case ARM::tLEApcrel:
757 Bits = 8;
758 Scale = 4;
759 break;
760
761 case ARM::LDRBi12:
762 case ARM::LDRi12:
763 case ARM::LDRcp:
764 case ARM::t2LDRpci:
765 Bits = 12; // +-offset_12
766 NegOk = true;
767 break;
768
769 case ARM::tLDRpci:
770 Bits = 8;
771 Scale = 4; // +(offset_8*4)
772 break;
773
774 case ARM::VLDRD:
775 case ARM::VLDRS:
776 Bits = 8;
777 Scale = 4; // +-(offset_8*4)
778 NegOk = true;
779 break;
780 }
781
782 // Remember that this is a user of a CP entry.
783 unsigned CPI = I->getOperand(op).getIndex();
784 MachineInstr *CPEMI = CPEMIs[CPI];
785 unsigned MaxOffs = ((1 << Bits)-1) * Scale;
786 CPUsers.push_back(CPUser(I, CPEMI, MaxOffs, NegOk, IsSoImm));
787
788 // Increment corresponding CPEntry reference count.
789 CPEntry *CPE = findConstPoolEntry(CPI, CPEMI);
790 assert(CPE && "Cannot find a corresponding CPEntry!");
791 CPE->RefCount++;
792
793 // Instructions can only use one CP entry, don't bother scanning the
794 // rest of the operands.
795 break;
796 }
797 }
798 }
799 }
800
801 /// computeBlockSize - Compute the size and some alignment information for MBB.
802 /// This function updates BBInfo directly.
computeBlockSize(MachineBasicBlock * MBB)803 void ARMConstantIslands::computeBlockSize(MachineBasicBlock *MBB) {
804 BasicBlockInfo &BBI = BBInfo[MBB->getNumber()];
805 BBI.Size = 0;
806 BBI.Unalign = 0;
807 BBI.PostAlign = 0;
808
809 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;
810 ++I) {
811 BBI.Size += TII->GetInstSizeInBytes(I);
812 // For inline asm, GetInstSizeInBytes returns a conservative estimate.
813 // The actual size may be smaller, but still a multiple of the instr size.
814 if (I->isInlineAsm())
815 BBI.Unalign = isThumb ? 1 : 2;
816 // Also consider instructions that may be shrunk later.
817 else if (isThumb && mayOptimizeThumb2Instruction(I))
818 BBI.Unalign = 1;
819 }
820
821 // tBR_JTr contains a .align 2 directive.
822 if (!MBB->empty() && MBB->back().getOpcode() == ARM::tBR_JTr) {
823 BBI.PostAlign = 2;
824 MBB->getParent()->ensureAlignment(2);
825 }
826 }
827
828 /// getOffsetOf - Return the current offset of the specified machine instruction
829 /// from the start of the function. This offset changes as stuff is moved
830 /// around inside the function.
getOffsetOf(MachineInstr * MI) const831 unsigned ARMConstantIslands::getOffsetOf(MachineInstr *MI) const {
832 MachineBasicBlock *MBB = MI->getParent();
833
834 // The offset is composed of two things: the sum of the sizes of all MBB's
835 // before this instruction's block, and the offset from the start of the block
836 // it is in.
837 unsigned Offset = BBInfo[MBB->getNumber()].Offset;
838
839 // Sum instructions before MI in MBB.
840 for (MachineBasicBlock::iterator I = MBB->begin(); &*I != MI; ++I) {
841 assert(I != MBB->end() && "Didn't find MI in its own basic block?");
842 Offset += TII->GetInstSizeInBytes(I);
843 }
844 return Offset;
845 }
846
847 /// CompareMBBNumbers - Little predicate function to sort the WaterList by MBB
848 /// ID.
CompareMBBNumbers(const MachineBasicBlock * LHS,const MachineBasicBlock * RHS)849 static bool CompareMBBNumbers(const MachineBasicBlock *LHS,
850 const MachineBasicBlock *RHS) {
851 return LHS->getNumber() < RHS->getNumber();
852 }
853
854 /// updateForInsertedWaterBlock - When a block is newly inserted into the
855 /// machine function, it upsets all of the block numbers. Renumber the blocks
856 /// and update the arrays that parallel this numbering.
updateForInsertedWaterBlock(MachineBasicBlock * NewBB)857 void ARMConstantIslands::updateForInsertedWaterBlock(MachineBasicBlock *NewBB) {
858 // Renumber the MBB's to keep them consecutive.
859 NewBB->getParent()->RenumberBlocks(NewBB);
860
861 // Insert an entry into BBInfo to align it properly with the (newly
862 // renumbered) block numbers.
863 BBInfo.insert(BBInfo.begin() + NewBB->getNumber(), BasicBlockInfo());
864
865 // Next, update WaterList. Specifically, we need to add NewMBB as having
866 // available water after it.
867 water_iterator IP =
868 std::lower_bound(WaterList.begin(), WaterList.end(), NewBB,
869 CompareMBBNumbers);
870 WaterList.insert(IP, NewBB);
871 }
872
873
874 /// Split the basic block containing MI into two blocks, which are joined by
875 /// an unconditional branch. Update data structures and renumber blocks to
876 /// account for this change and returns the newly created block.
splitBlockBeforeInstr(MachineInstr * MI)877 MachineBasicBlock *ARMConstantIslands::splitBlockBeforeInstr(MachineInstr *MI) {
878 MachineBasicBlock *OrigBB = MI->getParent();
879
880 // Create a new MBB for the code after the OrigBB.
881 MachineBasicBlock *NewBB =
882 MF->CreateMachineBasicBlock(OrigBB->getBasicBlock());
883 MachineFunction::iterator MBBI = OrigBB; ++MBBI;
884 MF->insert(MBBI, NewBB);
885
886 // Splice the instructions starting with MI over to NewBB.
887 NewBB->splice(NewBB->end(), OrigBB, MI, OrigBB->end());
888
889 // Add an unconditional branch from OrigBB to NewBB.
890 // Note the new unconditional branch is not being recorded.
891 // There doesn't seem to be meaningful DebugInfo available; this doesn't
892 // correspond to anything in the source.
893 unsigned Opc = isThumb ? (isThumb2 ? ARM::t2B : ARM::tB) : ARM::B;
894 if (!isThumb)
895 BuildMI(OrigBB, DebugLoc(), TII->get(Opc)).addMBB(NewBB);
896 else
897 BuildMI(OrigBB, DebugLoc(), TII->get(Opc)).addMBB(NewBB)
898 .addImm(ARMCC::AL).addReg(0);
899 ++NumSplit;
900
901 // Update the CFG. All succs of OrigBB are now succs of NewBB.
902 NewBB->transferSuccessors(OrigBB);
903
904 // OrigBB branches to NewBB.
905 OrigBB->addSuccessor(NewBB);
906
907 // Update internal data structures to account for the newly inserted MBB.
908 // This is almost the same as updateForInsertedWaterBlock, except that
909 // the Water goes after OrigBB, not NewBB.
910 MF->RenumberBlocks(NewBB);
911
912 // Insert an entry into BBInfo to align it properly with the (newly
913 // renumbered) block numbers.
914 BBInfo.insert(BBInfo.begin() + NewBB->getNumber(), BasicBlockInfo());
915
916 // Next, update WaterList. Specifically, we need to add OrigMBB as having
917 // available water after it (but not if it's already there, which happens
918 // when splitting before a conditional branch that is followed by an
919 // unconditional branch - in that case we want to insert NewBB).
920 water_iterator IP =
921 std::lower_bound(WaterList.begin(), WaterList.end(), OrigBB,
922 CompareMBBNumbers);
923 MachineBasicBlock* WaterBB = *IP;
924 if (WaterBB == OrigBB)
925 WaterList.insert(std::next(IP), NewBB);
926 else
927 WaterList.insert(IP, OrigBB);
928 NewWaterList.insert(OrigBB);
929
930 // Figure out how large the OrigBB is. As the first half of the original
931 // block, it cannot contain a tablejump. The size includes
932 // the new jump we added. (It should be possible to do this without
933 // recounting everything, but it's very confusing, and this is rarely
934 // executed.)
935 computeBlockSize(OrigBB);
936
937 // Figure out how large the NewMBB is. As the second half of the original
938 // block, it may contain a tablejump.
939 computeBlockSize(NewBB);
940
941 // All BBOffsets following these blocks must be modified.
942 adjustBBOffsetsAfter(OrigBB);
943
944 return NewBB;
945 }
946
947 /// getUserOffset - Compute the offset of U.MI as seen by the hardware
948 /// displacement computation. Update U.KnownAlignment to match its current
949 /// basic block location.
getUserOffset(CPUser & U) const950 unsigned ARMConstantIslands::getUserOffset(CPUser &U) const {
951 unsigned UserOffset = getOffsetOf(U.MI);
952 const BasicBlockInfo &BBI = BBInfo[U.MI->getParent()->getNumber()];
953 unsigned KnownBits = BBI.internalKnownBits();
954
955 // The value read from PC is offset from the actual instruction address.
956 UserOffset += (isThumb ? 4 : 8);
957
958 // Because of inline assembly, we may not know the alignment (mod 4) of U.MI.
959 // Make sure U.getMaxDisp() returns a constrained range.
960 U.KnownAlignment = (KnownBits >= 2);
961
962 // On Thumb, offsets==2 mod 4 are rounded down by the hardware for
963 // purposes of the displacement computation; compensate for that here.
964 // For unknown alignments, getMaxDisp() constrains the range instead.
965 if (isThumb && U.KnownAlignment)
966 UserOffset &= ~3u;
967
968 return UserOffset;
969 }
970
971 /// isOffsetInRange - Checks whether UserOffset (the location of a constant pool
972 /// reference) is within MaxDisp of TrialOffset (a proposed location of a
973 /// constant pool entry).
974 /// UserOffset is computed by getUserOffset above to include PC adjustments. If
975 /// the mod 4 alignment of UserOffset is not known, the uncertainty must be
976 /// subtracted from MaxDisp instead. CPUser::getMaxDisp() does that.
isOffsetInRange(unsigned UserOffset,unsigned TrialOffset,unsigned MaxDisp,bool NegativeOK,bool IsSoImm)977 bool ARMConstantIslands::isOffsetInRange(unsigned UserOffset,
978 unsigned TrialOffset, unsigned MaxDisp,
979 bool NegativeOK, bool IsSoImm) {
980 if (UserOffset <= TrialOffset) {
981 // User before the Trial.
982 if (TrialOffset - UserOffset <= MaxDisp)
983 return true;
984 // FIXME: Make use full range of soimm values.
985 } else if (NegativeOK) {
986 if (UserOffset - TrialOffset <= MaxDisp)
987 return true;
988 // FIXME: Make use full range of soimm values.
989 }
990 return false;
991 }
992
993 /// isWaterInRange - Returns true if a CPE placed after the specified
994 /// Water (a basic block) will be in range for the specific MI.
995 ///
996 /// Compute how much the function will grow by inserting a CPE after Water.
isWaterInRange(unsigned UserOffset,MachineBasicBlock * Water,CPUser & U,unsigned & Growth)997 bool ARMConstantIslands::isWaterInRange(unsigned UserOffset,
998 MachineBasicBlock* Water, CPUser &U,
999 unsigned &Growth) {
1000 unsigned CPELogAlign = getCPELogAlign(U.CPEMI);
1001 unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPELogAlign);
1002 unsigned NextBlockOffset, NextBlockAlignment;
1003 MachineFunction::const_iterator NextBlock = Water;
1004 if (++NextBlock == MF->end()) {
1005 NextBlockOffset = BBInfo[Water->getNumber()].postOffset();
1006 NextBlockAlignment = 0;
1007 } else {
1008 NextBlockOffset = BBInfo[NextBlock->getNumber()].Offset;
1009 NextBlockAlignment = NextBlock->getAlignment();
1010 }
1011 unsigned Size = U.CPEMI->getOperand(2).getImm();
1012 unsigned CPEEnd = CPEOffset + Size;
1013
1014 // The CPE may be able to hide in the alignment padding before the next
1015 // block. It may also cause more padding to be required if it is more aligned
1016 // that the next block.
1017 if (CPEEnd > NextBlockOffset) {
1018 Growth = CPEEnd - NextBlockOffset;
1019 // Compute the padding that would go at the end of the CPE to align the next
1020 // block.
1021 Growth += OffsetToAlignment(CPEEnd, 1u << NextBlockAlignment);
1022
1023 // If the CPE is to be inserted before the instruction, that will raise
1024 // the offset of the instruction. Also account for unknown alignment padding
1025 // in blocks between CPE and the user.
1026 if (CPEOffset < UserOffset)
1027 UserOffset += Growth + UnknownPadding(MF->getAlignment(), CPELogAlign);
1028 } else
1029 // CPE fits in existing padding.
1030 Growth = 0;
1031
1032 return isOffsetInRange(UserOffset, CPEOffset, U);
1033 }
1034
1035 /// isCPEntryInRange - Returns true if the distance between specific MI and
1036 /// specific ConstPool entry instruction can fit in MI's displacement field.
isCPEntryInRange(MachineInstr * MI,unsigned UserOffset,MachineInstr * CPEMI,unsigned MaxDisp,bool NegOk,bool DoDump)1037 bool ARMConstantIslands::isCPEntryInRange(MachineInstr *MI, unsigned UserOffset,
1038 MachineInstr *CPEMI, unsigned MaxDisp,
1039 bool NegOk, bool DoDump) {
1040 unsigned CPEOffset = getOffsetOf(CPEMI);
1041
1042 if (DoDump) {
1043 DEBUG({
1044 unsigned Block = MI->getParent()->getNumber();
1045 const BasicBlockInfo &BBI = BBInfo[Block];
1046 dbgs() << "User of CPE#" << CPEMI->getOperand(0).getImm()
1047 << " max delta=" << MaxDisp
1048 << format(" insn address=%#x", UserOffset)
1049 << " in BB#" << Block << ": "
1050 << format("%#x-%x\t", BBI.Offset, BBI.postOffset()) << *MI
1051 << format("CPE address=%#x offset=%+d: ", CPEOffset,
1052 int(CPEOffset-UserOffset));
1053 });
1054 }
1055
1056 return isOffsetInRange(UserOffset, CPEOffset, MaxDisp, NegOk);
1057 }
1058
1059 #ifndef NDEBUG
1060 /// BBIsJumpedOver - Return true of the specified basic block's only predecessor
1061 /// unconditionally branches to its only successor.
BBIsJumpedOver(MachineBasicBlock * MBB)1062 static bool BBIsJumpedOver(MachineBasicBlock *MBB) {
1063 if (MBB->pred_size() != 1 || MBB->succ_size() != 1)
1064 return false;
1065
1066 MachineBasicBlock *Succ = *MBB->succ_begin();
1067 MachineBasicBlock *Pred = *MBB->pred_begin();
1068 MachineInstr *PredMI = &Pred->back();
1069 if (PredMI->getOpcode() == ARM::B || PredMI->getOpcode() == ARM::tB
1070 || PredMI->getOpcode() == ARM::t2B)
1071 return PredMI->getOperand(0).getMBB() == Succ;
1072 return false;
1073 }
1074 #endif // NDEBUG
1075
adjustBBOffsetsAfter(MachineBasicBlock * BB)1076 void ARMConstantIslands::adjustBBOffsetsAfter(MachineBasicBlock *BB) {
1077 unsigned BBNum = BB->getNumber();
1078 for(unsigned i = BBNum + 1, e = MF->getNumBlockIDs(); i < e; ++i) {
1079 // Get the offset and known bits at the end of the layout predecessor.
1080 // Include the alignment of the current block.
1081 unsigned LogAlign = MF->getBlockNumbered(i)->getAlignment();
1082 unsigned Offset = BBInfo[i - 1].postOffset(LogAlign);
1083 unsigned KnownBits = BBInfo[i - 1].postKnownBits(LogAlign);
1084
1085 // This is where block i begins. Stop if the offset is already correct,
1086 // and we have updated 2 blocks. This is the maximum number of blocks
1087 // changed before calling this function.
1088 if (i > BBNum + 2 &&
1089 BBInfo[i].Offset == Offset &&
1090 BBInfo[i].KnownBits == KnownBits)
1091 break;
1092
1093 BBInfo[i].Offset = Offset;
1094 BBInfo[i].KnownBits = KnownBits;
1095 }
1096 }
1097
1098 /// decrementCPEReferenceCount - find the constant pool entry with index CPI
1099 /// and instruction CPEMI, and decrement its refcount. If the refcount
1100 /// becomes 0 remove the entry and instruction. Returns true if we removed
1101 /// the entry, false if we didn't.
1102
decrementCPEReferenceCount(unsigned CPI,MachineInstr * CPEMI)1103 bool ARMConstantIslands::decrementCPEReferenceCount(unsigned CPI,
1104 MachineInstr *CPEMI) {
1105 // Find the old entry. Eliminate it if it is no longer used.
1106 CPEntry *CPE = findConstPoolEntry(CPI, CPEMI);
1107 assert(CPE && "Unexpected!");
1108 if (--CPE->RefCount == 0) {
1109 removeDeadCPEMI(CPEMI);
1110 CPE->CPEMI = nullptr;
1111 --NumCPEs;
1112 return true;
1113 }
1114 return false;
1115 }
1116
1117 /// LookForCPEntryInRange - see if the currently referenced CPE is in range;
1118 /// if not, see if an in-range clone of the CPE is in range, and if so,
1119 /// change the data structures so the user references the clone. Returns:
1120 /// 0 = no existing entry found
1121 /// 1 = entry found, and there were no code insertions or deletions
1122 /// 2 = entry found, and there were code insertions or deletions
findInRangeCPEntry(CPUser & U,unsigned UserOffset)1123 int ARMConstantIslands::findInRangeCPEntry(CPUser& U, unsigned UserOffset)
1124 {
1125 MachineInstr *UserMI = U.MI;
1126 MachineInstr *CPEMI = U.CPEMI;
1127
1128 // Check to see if the CPE is already in-range.
1129 if (isCPEntryInRange(UserMI, UserOffset, CPEMI, U.getMaxDisp(), U.NegOk,
1130 true)) {
1131 DEBUG(dbgs() << "In range\n");
1132 return 1;
1133 }
1134
1135 // No. Look for previously created clones of the CPE that are in range.
1136 unsigned CPI = CPEMI->getOperand(1).getIndex();
1137 std::vector<CPEntry> &CPEs = CPEntries[CPI];
1138 for (unsigned i = 0, e = CPEs.size(); i != e; ++i) {
1139 // We already tried this one
1140 if (CPEs[i].CPEMI == CPEMI)
1141 continue;
1142 // Removing CPEs can leave empty entries, skip
1143 if (CPEs[i].CPEMI == nullptr)
1144 continue;
1145 if (isCPEntryInRange(UserMI, UserOffset, CPEs[i].CPEMI, U.getMaxDisp(),
1146 U.NegOk)) {
1147 DEBUG(dbgs() << "Replacing CPE#" << CPI << " with CPE#"
1148 << CPEs[i].CPI << "\n");
1149 // Point the CPUser node to the replacement
1150 U.CPEMI = CPEs[i].CPEMI;
1151 // Change the CPI in the instruction operand to refer to the clone.
1152 for (unsigned j = 0, e = UserMI->getNumOperands(); j != e; ++j)
1153 if (UserMI->getOperand(j).isCPI()) {
1154 UserMI->getOperand(j).setIndex(CPEs[i].CPI);
1155 break;
1156 }
1157 // Adjust the refcount of the clone...
1158 CPEs[i].RefCount++;
1159 // ...and the original. If we didn't remove the old entry, none of the
1160 // addresses changed, so we don't need another pass.
1161 return decrementCPEReferenceCount(CPI, CPEMI) ? 2 : 1;
1162 }
1163 }
1164 return 0;
1165 }
1166
1167 /// getUnconditionalBrDisp - Returns the maximum displacement that can fit in
1168 /// the specific unconditional branch instruction.
getUnconditionalBrDisp(int Opc)1169 static inline unsigned getUnconditionalBrDisp(int Opc) {
1170 switch (Opc) {
1171 case ARM::tB:
1172 return ((1<<10)-1)*2;
1173 case ARM::t2B:
1174 return ((1<<23)-1)*2;
1175 default:
1176 break;
1177 }
1178
1179 return ((1<<23)-1)*4;
1180 }
1181
1182 /// findAvailableWater - Look for an existing entry in the WaterList in which
1183 /// we can place the CPE referenced from U so it's within range of U's MI.
1184 /// Returns true if found, false if not. If it returns true, WaterIter
1185 /// is set to the WaterList entry. For Thumb, prefer water that will not
1186 /// introduce padding to water that will. To ensure that this pass
1187 /// terminates, the CPE location for a particular CPUser is only allowed to
1188 /// move to a lower address, so search backward from the end of the list and
1189 /// prefer the first water that is in range.
findAvailableWater(CPUser & U,unsigned UserOffset,water_iterator & WaterIter)1190 bool ARMConstantIslands::findAvailableWater(CPUser &U, unsigned UserOffset,
1191 water_iterator &WaterIter) {
1192 if (WaterList.empty())
1193 return false;
1194
1195 unsigned BestGrowth = ~0u;
1196 for (water_iterator IP = std::prev(WaterList.end()), B = WaterList.begin();;
1197 --IP) {
1198 MachineBasicBlock* WaterBB = *IP;
1199 // Check if water is in range and is either at a lower address than the
1200 // current "high water mark" or a new water block that was created since
1201 // the previous iteration by inserting an unconditional branch. In the
1202 // latter case, we want to allow resetting the high water mark back to
1203 // this new water since we haven't seen it before. Inserting branches
1204 // should be relatively uncommon and when it does happen, we want to be
1205 // sure to take advantage of it for all the CPEs near that block, so that
1206 // we don't insert more branches than necessary.
1207 unsigned Growth;
1208 if (isWaterInRange(UserOffset, WaterBB, U, Growth) &&
1209 (WaterBB->getNumber() < U.HighWaterMark->getNumber() ||
1210 NewWaterList.count(WaterBB) || WaterBB == U.MI->getParent()) &&
1211 Growth < BestGrowth) {
1212 // This is the least amount of required padding seen so far.
1213 BestGrowth = Growth;
1214 WaterIter = IP;
1215 DEBUG(dbgs() << "Found water after BB#" << WaterBB->getNumber()
1216 << " Growth=" << Growth << '\n');
1217
1218 // Keep looking unless it is perfect.
1219 if (BestGrowth == 0)
1220 return true;
1221 }
1222 if (IP == B)
1223 break;
1224 }
1225 return BestGrowth != ~0u;
1226 }
1227
1228 /// createNewWater - No existing WaterList entry will work for
1229 /// CPUsers[CPUserIndex], so create a place to put the CPE. The end of the
1230 /// block is used if in range, and the conditional branch munged so control
1231 /// flow is correct. Otherwise the block is split to create a hole with an
1232 /// unconditional branch around it. In either case NewMBB is set to a
1233 /// block following which the new island can be inserted (the WaterList
1234 /// is not adjusted).
createNewWater(unsigned CPUserIndex,unsigned UserOffset,MachineBasicBlock * & NewMBB)1235 void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
1236 unsigned UserOffset,
1237 MachineBasicBlock *&NewMBB) {
1238 CPUser &U = CPUsers[CPUserIndex];
1239 MachineInstr *UserMI = U.MI;
1240 MachineInstr *CPEMI = U.CPEMI;
1241 unsigned CPELogAlign = getCPELogAlign(CPEMI);
1242 MachineBasicBlock *UserMBB = UserMI->getParent();
1243 const BasicBlockInfo &UserBBI = BBInfo[UserMBB->getNumber()];
1244
1245 // If the block does not end in an unconditional branch already, and if the
1246 // end of the block is within range, make new water there. (The addition
1247 // below is for the unconditional branch we will be adding: 4 bytes on ARM +
1248 // Thumb2, 2 on Thumb1.
1249 if (BBHasFallthrough(UserMBB)) {
1250 // Size of branch to insert.
1251 unsigned Delta = isThumb1 ? 2 : 4;
1252 // Compute the offset where the CPE will begin.
1253 unsigned CPEOffset = UserBBI.postOffset(CPELogAlign) + Delta;
1254
1255 if (isOffsetInRange(UserOffset, CPEOffset, U)) {
1256 DEBUG(dbgs() << "Split at end of BB#" << UserMBB->getNumber()
1257 << format(", expected CPE offset %#x\n", CPEOffset));
1258 NewMBB = std::next(MachineFunction::iterator(UserMBB));
1259 // Add an unconditional branch from UserMBB to fallthrough block. Record
1260 // it for branch lengthening; this new branch will not get out of range,
1261 // but if the preceding conditional branch is out of range, the targets
1262 // will be exchanged, and the altered branch may be out of range, so the
1263 // machinery has to know about it.
1264 int UncondBr = isThumb ? ((isThumb2) ? ARM::t2B : ARM::tB) : ARM::B;
1265 if (!isThumb)
1266 BuildMI(UserMBB, DebugLoc(), TII->get(UncondBr)).addMBB(NewMBB);
1267 else
1268 BuildMI(UserMBB, DebugLoc(), TII->get(UncondBr)).addMBB(NewMBB)
1269 .addImm(ARMCC::AL).addReg(0);
1270 unsigned MaxDisp = getUnconditionalBrDisp(UncondBr);
1271 ImmBranches.push_back(ImmBranch(&UserMBB->back(),
1272 MaxDisp, false, UncondBr));
1273 computeBlockSize(UserMBB);
1274 adjustBBOffsetsAfter(UserMBB);
1275 return;
1276 }
1277 }
1278
1279 // What a big block. Find a place within the block to split it. This is a
1280 // little tricky on Thumb1 since instructions are 2 bytes and constant pool
1281 // entries are 4 bytes: if instruction I references island CPE, and
1282 // instruction I+1 references CPE', it will not work well to put CPE as far
1283 // forward as possible, since then CPE' cannot immediately follow it (that
1284 // location is 2 bytes farther away from I+1 than CPE was from I) and we'd
1285 // need to create a new island. So, we make a first guess, then walk through
1286 // the instructions between the one currently being looked at and the
1287 // possible insertion point, and make sure any other instructions that
1288 // reference CPEs will be able to use the same island area; if not, we back
1289 // up the insertion point.
1290
1291 // Try to split the block so it's fully aligned. Compute the latest split
1292 // point where we can add a 4-byte branch instruction, and then align to
1293 // LogAlign which is the largest possible alignment in the function.
1294 unsigned LogAlign = MF->getAlignment();
1295 assert(LogAlign >= CPELogAlign && "Over-aligned constant pool entry");
1296 unsigned KnownBits = UserBBI.internalKnownBits();
1297 unsigned UPad = UnknownPadding(LogAlign, KnownBits);
1298 unsigned BaseInsertOffset = UserOffset + U.getMaxDisp() - UPad;
1299 DEBUG(dbgs() << format("Split in middle of big block before %#x",
1300 BaseInsertOffset));
1301
1302 // The 4 in the following is for the unconditional branch we'll be inserting
1303 // (allows for long branch on Thumb1). Alignment of the island is handled
1304 // inside isOffsetInRange.
1305 BaseInsertOffset -= 4;
1306
1307 DEBUG(dbgs() << format(", adjusted to %#x", BaseInsertOffset)
1308 << " la=" << LogAlign
1309 << " kb=" << KnownBits
1310 << " up=" << UPad << '\n');
1311
1312 // This could point off the end of the block if we've already got constant
1313 // pool entries following this block; only the last one is in the water list.
1314 // Back past any possible branches (allow for a conditional and a maximally
1315 // long unconditional).
1316 if (BaseInsertOffset + 8 >= UserBBI.postOffset()) {
1317 // Ensure BaseInsertOffset is larger than the offset of the instruction
1318 // following UserMI so that the loop which searches for the split point
1319 // iterates at least once.
1320 BaseInsertOffset =
1321 std::max(UserBBI.postOffset() - UPad - 8,
1322 UserOffset + TII->GetInstSizeInBytes(UserMI) + 1);
1323 DEBUG(dbgs() << format("Move inside block: %#x\n", BaseInsertOffset));
1324 }
1325 unsigned EndInsertOffset = BaseInsertOffset + 4 + UPad +
1326 CPEMI->getOperand(2).getImm();
1327 MachineBasicBlock::iterator MI = UserMI;
1328 ++MI;
1329 unsigned CPUIndex = CPUserIndex+1;
1330 unsigned NumCPUsers = CPUsers.size();
1331 MachineInstr *LastIT = nullptr;
1332 for (unsigned Offset = UserOffset+TII->GetInstSizeInBytes(UserMI);
1333 Offset < BaseInsertOffset;
1334 Offset += TII->GetInstSizeInBytes(MI), MI = std::next(MI)) {
1335 assert(MI != UserMBB->end() && "Fell off end of block");
1336 if (CPUIndex < NumCPUsers && CPUsers[CPUIndex].MI == MI) {
1337 CPUser &U = CPUsers[CPUIndex];
1338 if (!isOffsetInRange(Offset, EndInsertOffset, U)) {
1339 // Shift intertion point by one unit of alignment so it is within reach.
1340 BaseInsertOffset -= 1u << LogAlign;
1341 EndInsertOffset -= 1u << LogAlign;
1342 }
1343 // This is overly conservative, as we don't account for CPEMIs being
1344 // reused within the block, but it doesn't matter much. Also assume CPEs
1345 // are added in order with alignment padding. We may eventually be able
1346 // to pack the aligned CPEs better.
1347 EndInsertOffset += U.CPEMI->getOperand(2).getImm();
1348 CPUIndex++;
1349 }
1350
1351 // Remember the last IT instruction.
1352 if (MI->getOpcode() == ARM::t2IT)
1353 LastIT = MI;
1354 }
1355
1356 --MI;
1357
1358 // Avoid splitting an IT block.
1359 if (LastIT) {
1360 unsigned PredReg = 0;
1361 ARMCC::CondCodes CC = getITInstrPredicate(MI, PredReg);
1362 if (CC != ARMCC::AL)
1363 MI = LastIT;
1364 }
1365
1366 // We really must not split an IT block.
1367 DEBUG(unsigned PredReg;
1368 assert(!isThumb || getITInstrPredicate(MI, PredReg) == ARMCC::AL));
1369
1370 NewMBB = splitBlockBeforeInstr(MI);
1371 }
1372
1373 /// handleConstantPoolUser - Analyze the specified user, checking to see if it
1374 /// is out-of-range. If so, pick up the constant pool value and move it some
1375 /// place in-range. Return true if we changed any addresses (thus must run
1376 /// another pass of branch lengthening), false otherwise.
handleConstantPoolUser(unsigned CPUserIndex)1377 bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) {
1378 CPUser &U = CPUsers[CPUserIndex];
1379 MachineInstr *UserMI = U.MI;
1380 MachineInstr *CPEMI = U.CPEMI;
1381 unsigned CPI = CPEMI->getOperand(1).getIndex();
1382 unsigned Size = CPEMI->getOperand(2).getImm();
1383 // Compute this only once, it's expensive.
1384 unsigned UserOffset = getUserOffset(U);
1385
1386 // See if the current entry is within range, or there is a clone of it
1387 // in range.
1388 int result = findInRangeCPEntry(U, UserOffset);
1389 if (result==1) return false;
1390 else if (result==2) return true;
1391
1392 // No existing clone of this CPE is within range.
1393 // We will be generating a new clone. Get a UID for it.
1394 unsigned ID = AFI->createPICLabelUId();
1395
1396 // Look for water where we can place this CPE.
1397 MachineBasicBlock *NewIsland = MF->CreateMachineBasicBlock();
1398 MachineBasicBlock *NewMBB;
1399 water_iterator IP;
1400 if (findAvailableWater(U, UserOffset, IP)) {
1401 DEBUG(dbgs() << "Found water in range\n");
1402 MachineBasicBlock *WaterBB = *IP;
1403
1404 // If the original WaterList entry was "new water" on this iteration,
1405 // propagate that to the new island. This is just keeping NewWaterList
1406 // updated to match the WaterList, which will be updated below.
1407 if (NewWaterList.erase(WaterBB))
1408 NewWaterList.insert(NewIsland);
1409
1410 // The new CPE goes before the following block (NewMBB).
1411 NewMBB = std::next(MachineFunction::iterator(WaterBB));
1412
1413 } else {
1414 // No water found.
1415 DEBUG(dbgs() << "No water found\n");
1416 createNewWater(CPUserIndex, UserOffset, NewMBB);
1417
1418 // splitBlockBeforeInstr adds to WaterList, which is important when it is
1419 // called while handling branches so that the water will be seen on the
1420 // next iteration for constant pools, but in this context, we don't want
1421 // it. Check for this so it will be removed from the WaterList.
1422 // Also remove any entry from NewWaterList.
1423 MachineBasicBlock *WaterBB = std::prev(MachineFunction::iterator(NewMBB));
1424 IP = std::find(WaterList.begin(), WaterList.end(), WaterBB);
1425 if (IP != WaterList.end())
1426 NewWaterList.erase(WaterBB);
1427
1428 // We are adding new water. Update NewWaterList.
1429 NewWaterList.insert(NewIsland);
1430 }
1431
1432 // Remove the original WaterList entry; we want subsequent insertions in
1433 // this vicinity to go after the one we're about to insert. This
1434 // considerably reduces the number of times we have to move the same CPE
1435 // more than once and is also important to ensure the algorithm terminates.
1436 if (IP != WaterList.end())
1437 WaterList.erase(IP);
1438
1439 // Okay, we know we can put an island before NewMBB now, do it!
1440 MF->insert(NewMBB, NewIsland);
1441
1442 // Update internal data structures to account for the newly inserted MBB.
1443 updateForInsertedWaterBlock(NewIsland);
1444
1445 // Decrement the old entry, and remove it if refcount becomes 0.
1446 decrementCPEReferenceCount(CPI, CPEMI);
1447
1448 // Now that we have an island to add the CPE to, clone the original CPE and
1449 // add it to the island.
1450 U.HighWaterMark = NewIsland;
1451 U.CPEMI = BuildMI(NewIsland, DebugLoc(), TII->get(ARM::CONSTPOOL_ENTRY))
1452 .addImm(ID).addConstantPoolIndex(CPI).addImm(Size);
1453 CPEntries[CPI].push_back(CPEntry(U.CPEMI, ID, 1));
1454 ++NumCPEs;
1455
1456 // Mark the basic block as aligned as required by the const-pool entry.
1457 NewIsland->setAlignment(getCPELogAlign(U.CPEMI));
1458
1459 // Increase the size of the island block to account for the new entry.
1460 BBInfo[NewIsland->getNumber()].Size += Size;
1461 adjustBBOffsetsAfter(std::prev(MachineFunction::iterator(NewIsland)));
1462
1463 // Finally, change the CPI in the instruction operand to be ID.
1464 for (unsigned i = 0, e = UserMI->getNumOperands(); i != e; ++i)
1465 if (UserMI->getOperand(i).isCPI()) {
1466 UserMI->getOperand(i).setIndex(ID);
1467 break;
1468 }
1469
1470 DEBUG(dbgs() << " Moved CPE to #" << ID << " CPI=" << CPI
1471 << format(" offset=%#x\n", BBInfo[NewIsland->getNumber()].Offset));
1472
1473 return true;
1474 }
1475
1476 /// removeDeadCPEMI - Remove a dead constant pool entry instruction. Update
1477 /// sizes and offsets of impacted basic blocks.
removeDeadCPEMI(MachineInstr * CPEMI)1478 void ARMConstantIslands::removeDeadCPEMI(MachineInstr *CPEMI) {
1479 MachineBasicBlock *CPEBB = CPEMI->getParent();
1480 unsigned Size = CPEMI->getOperand(2).getImm();
1481 CPEMI->eraseFromParent();
1482 BBInfo[CPEBB->getNumber()].Size -= Size;
1483 // All succeeding offsets have the current size value added in, fix this.
1484 if (CPEBB->empty()) {
1485 BBInfo[CPEBB->getNumber()].Size = 0;
1486
1487 // This block no longer needs to be aligned.
1488 CPEBB->setAlignment(0);
1489 } else
1490 // Entries are sorted by descending alignment, so realign from the front.
1491 CPEBB->setAlignment(getCPELogAlign(CPEBB->begin()));
1492
1493 adjustBBOffsetsAfter(CPEBB);
1494 // An island has only one predecessor BB and one successor BB. Check if
1495 // this BB's predecessor jumps directly to this BB's successor. This
1496 // shouldn't happen currently.
1497 assert(!BBIsJumpedOver(CPEBB) && "How did this happen?");
1498 // FIXME: remove the empty blocks after all the work is done?
1499 }
1500
1501 /// removeUnusedCPEntries - Remove constant pool entries whose refcounts
1502 /// are zero.
removeUnusedCPEntries()1503 bool ARMConstantIslands::removeUnusedCPEntries() {
1504 unsigned MadeChange = false;
1505 for (unsigned i = 0, e = CPEntries.size(); i != e; ++i) {
1506 std::vector<CPEntry> &CPEs = CPEntries[i];
1507 for (unsigned j = 0, ee = CPEs.size(); j != ee; ++j) {
1508 if (CPEs[j].RefCount == 0 && CPEs[j].CPEMI) {
1509 removeDeadCPEMI(CPEs[j].CPEMI);
1510 CPEs[j].CPEMI = nullptr;
1511 MadeChange = true;
1512 }
1513 }
1514 }
1515 return MadeChange;
1516 }
1517
1518 /// isBBInRange - Returns true if the distance between specific MI and
1519 /// specific BB can fit in MI's displacement field.
isBBInRange(MachineInstr * MI,MachineBasicBlock * DestBB,unsigned MaxDisp)1520 bool ARMConstantIslands::isBBInRange(MachineInstr *MI,MachineBasicBlock *DestBB,
1521 unsigned MaxDisp) {
1522 unsigned PCAdj = isThumb ? 4 : 8;
1523 unsigned BrOffset = getOffsetOf(MI) + PCAdj;
1524 unsigned DestOffset = BBInfo[DestBB->getNumber()].Offset;
1525
1526 DEBUG(dbgs() << "Branch of destination BB#" << DestBB->getNumber()
1527 << " from BB#" << MI->getParent()->getNumber()
1528 << " max delta=" << MaxDisp
1529 << " from " << getOffsetOf(MI) << " to " << DestOffset
1530 << " offset " << int(DestOffset-BrOffset) << "\t" << *MI);
1531
1532 if (BrOffset <= DestOffset) {
1533 // Branch before the Dest.
1534 if (DestOffset-BrOffset <= MaxDisp)
1535 return true;
1536 } else {
1537 if (BrOffset-DestOffset <= MaxDisp)
1538 return true;
1539 }
1540 return false;
1541 }
1542
1543 /// fixupImmediateBr - Fix up an immediate branch whose destination is too far
1544 /// away to fit in its displacement field.
fixupImmediateBr(ImmBranch & Br)1545 bool ARMConstantIslands::fixupImmediateBr(ImmBranch &Br) {
1546 MachineInstr *MI = Br.MI;
1547 MachineBasicBlock *DestBB = MI->getOperand(0).getMBB();
1548
1549 // Check to see if the DestBB is already in-range.
1550 if (isBBInRange(MI, DestBB, Br.MaxDisp))
1551 return false;
1552
1553 if (!Br.isCond)
1554 return fixupUnconditionalBr(Br);
1555 return fixupConditionalBr(Br);
1556 }
1557
1558 /// fixupUnconditionalBr - Fix up an unconditional branch whose destination is
1559 /// too far away to fit in its displacement field. If the LR register has been
1560 /// spilled in the epilogue, then we can use BL to implement a far jump.
1561 /// Otherwise, add an intermediate branch instruction to a branch.
1562 bool
fixupUnconditionalBr(ImmBranch & Br)1563 ARMConstantIslands::fixupUnconditionalBr(ImmBranch &Br) {
1564 MachineInstr *MI = Br.MI;
1565 MachineBasicBlock *MBB = MI->getParent();
1566 if (!isThumb1)
1567 llvm_unreachable("fixupUnconditionalBr is Thumb1 only!");
1568
1569 // Use BL to implement far jump.
1570 Br.MaxDisp = (1 << 21) * 2;
1571 MI->setDesc(TII->get(ARM::tBfar));
1572 BBInfo[MBB->getNumber()].Size += 2;
1573 adjustBBOffsetsAfter(MBB);
1574 HasFarJump = true;
1575 ++NumUBrFixed;
1576
1577 DEBUG(dbgs() << " Changed B to long jump " << *MI);
1578
1579 return true;
1580 }
1581
1582 /// fixupConditionalBr - Fix up a conditional branch whose destination is too
1583 /// far away to fit in its displacement field. It is converted to an inverse
1584 /// conditional branch + an unconditional branch to the destination.
1585 bool
fixupConditionalBr(ImmBranch & Br)1586 ARMConstantIslands::fixupConditionalBr(ImmBranch &Br) {
1587 MachineInstr *MI = Br.MI;
1588 MachineBasicBlock *DestBB = MI->getOperand(0).getMBB();
1589
1590 // Add an unconditional branch to the destination and invert the branch
1591 // condition to jump over it:
1592 // blt L1
1593 // =>
1594 // bge L2
1595 // b L1
1596 // L2:
1597 ARMCC::CondCodes CC = (ARMCC::CondCodes)MI->getOperand(1).getImm();
1598 CC = ARMCC::getOppositeCondition(CC);
1599 unsigned CCReg = MI->getOperand(2).getReg();
1600
1601 // If the branch is at the end of its MBB and that has a fall-through block,
1602 // direct the updated conditional branch to the fall-through block. Otherwise,
1603 // split the MBB before the next instruction.
1604 MachineBasicBlock *MBB = MI->getParent();
1605 MachineInstr *BMI = &MBB->back();
1606 bool NeedSplit = (BMI != MI) || !BBHasFallthrough(MBB);
1607
1608 ++NumCBrFixed;
1609 if (BMI != MI) {
1610 if (std::next(MachineBasicBlock::iterator(MI)) == std::prev(MBB->end()) &&
1611 BMI->getOpcode() == Br.UncondBr) {
1612 // Last MI in the BB is an unconditional branch. Can we simply invert the
1613 // condition and swap destinations:
1614 // beq L1
1615 // b L2
1616 // =>
1617 // bne L2
1618 // b L1
1619 MachineBasicBlock *NewDest = BMI->getOperand(0).getMBB();
1620 if (isBBInRange(MI, NewDest, Br.MaxDisp)) {
1621 DEBUG(dbgs() << " Invert Bcc condition and swap its destination with "
1622 << *BMI);
1623 BMI->getOperand(0).setMBB(DestBB);
1624 MI->getOperand(0).setMBB(NewDest);
1625 MI->getOperand(1).setImm(CC);
1626 return true;
1627 }
1628 }
1629 }
1630
1631 if (NeedSplit) {
1632 splitBlockBeforeInstr(MI);
1633 // No need for the branch to the next block. We're adding an unconditional
1634 // branch to the destination.
1635 int delta = TII->GetInstSizeInBytes(&MBB->back());
1636 BBInfo[MBB->getNumber()].Size -= delta;
1637 MBB->back().eraseFromParent();
1638 // BBInfo[SplitBB].Offset is wrong temporarily, fixed below
1639 }
1640 MachineBasicBlock *NextBB = std::next(MachineFunction::iterator(MBB));
1641
1642 DEBUG(dbgs() << " Insert B to BB#" << DestBB->getNumber()
1643 << " also invert condition and change dest. to BB#"
1644 << NextBB->getNumber() << "\n");
1645
1646 // Insert a new conditional branch and a new unconditional branch.
1647 // Also update the ImmBranch as well as adding a new entry for the new branch.
1648 BuildMI(MBB, DebugLoc(), TII->get(MI->getOpcode()))
1649 .addMBB(NextBB).addImm(CC).addReg(CCReg);
1650 Br.MI = &MBB->back();
1651 BBInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(&MBB->back());
1652 if (isThumb)
1653 BuildMI(MBB, DebugLoc(), TII->get(Br.UncondBr)).addMBB(DestBB)
1654 .addImm(ARMCC::AL).addReg(0);
1655 else
1656 BuildMI(MBB, DebugLoc(), TII->get(Br.UncondBr)).addMBB(DestBB);
1657 BBInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(&MBB->back());
1658 unsigned MaxDisp = getUnconditionalBrDisp(Br.UncondBr);
1659 ImmBranches.push_back(ImmBranch(&MBB->back(), MaxDisp, false, Br.UncondBr));
1660
1661 // Remove the old conditional branch. It may or may not still be in MBB.
1662 BBInfo[MI->getParent()->getNumber()].Size -= TII->GetInstSizeInBytes(MI);
1663 MI->eraseFromParent();
1664 adjustBBOffsetsAfter(MBB);
1665 return true;
1666 }
1667
1668 /// undoLRSpillRestore - Remove Thumb push / pop instructions that only spills
1669 /// LR / restores LR to pc. FIXME: This is done here because it's only possible
1670 /// to do this if tBfar is not used.
undoLRSpillRestore()1671 bool ARMConstantIslands::undoLRSpillRestore() {
1672 bool MadeChange = false;
1673 for (unsigned i = 0, e = PushPopMIs.size(); i != e; ++i) {
1674 MachineInstr *MI = PushPopMIs[i];
1675 // First two operands are predicates.
1676 if (MI->getOpcode() == ARM::tPOP_RET &&
1677 MI->getOperand(2).getReg() == ARM::PC &&
1678 MI->getNumExplicitOperands() == 3) {
1679 // Create the new insn and copy the predicate from the old.
1680 BuildMI(MI->getParent(), MI->getDebugLoc(), TII->get(ARM::tBX_RET))
1681 .addOperand(MI->getOperand(0))
1682 .addOperand(MI->getOperand(1));
1683 MI->eraseFromParent();
1684 MadeChange = true;
1685 }
1686 }
1687 return MadeChange;
1688 }
1689
1690 // mayOptimizeThumb2Instruction - Returns true if optimizeThumb2Instructions
1691 // below may shrink MI.
1692 bool
mayOptimizeThumb2Instruction(const MachineInstr * MI) const1693 ARMConstantIslands::mayOptimizeThumb2Instruction(const MachineInstr *MI) const {
1694 switch(MI->getOpcode()) {
1695 // optimizeThumb2Instructions.
1696 case ARM::t2LEApcrel:
1697 case ARM::t2LDRpci:
1698 // optimizeThumb2Branches.
1699 case ARM::t2B:
1700 case ARM::t2Bcc:
1701 case ARM::tBcc:
1702 // optimizeThumb2JumpTables.
1703 case ARM::t2BR_JT:
1704 return true;
1705 }
1706 return false;
1707 }
1708
optimizeThumb2Instructions()1709 bool ARMConstantIslands::optimizeThumb2Instructions() {
1710 bool MadeChange = false;
1711
1712 // Shrink ADR and LDR from constantpool.
1713 for (unsigned i = 0, e = CPUsers.size(); i != e; ++i) {
1714 CPUser &U = CPUsers[i];
1715 unsigned Opcode = U.MI->getOpcode();
1716 unsigned NewOpc = 0;
1717 unsigned Scale = 1;
1718 unsigned Bits = 0;
1719 switch (Opcode) {
1720 default: break;
1721 case ARM::t2LEApcrel:
1722 if (isARMLowRegister(U.MI->getOperand(0).getReg())) {
1723 NewOpc = ARM::tLEApcrel;
1724 Bits = 8;
1725 Scale = 4;
1726 }
1727 break;
1728 case ARM::t2LDRpci:
1729 if (isARMLowRegister(U.MI->getOperand(0).getReg())) {
1730 NewOpc = ARM::tLDRpci;
1731 Bits = 8;
1732 Scale = 4;
1733 }
1734 break;
1735 }
1736
1737 if (!NewOpc)
1738 continue;
1739
1740 unsigned UserOffset = getUserOffset(U);
1741 unsigned MaxOffs = ((1 << Bits) - 1) * Scale;
1742
1743 // Be conservative with inline asm.
1744 if (!U.KnownAlignment)
1745 MaxOffs -= 2;
1746
1747 // FIXME: Check if offset is multiple of scale if scale is not 4.
1748 if (isCPEntryInRange(U.MI, UserOffset, U.CPEMI, MaxOffs, false, true)) {
1749 DEBUG(dbgs() << "Shrink: " << *U.MI);
1750 U.MI->setDesc(TII->get(NewOpc));
1751 MachineBasicBlock *MBB = U.MI->getParent();
1752 BBInfo[MBB->getNumber()].Size -= 2;
1753 adjustBBOffsetsAfter(MBB);
1754 ++NumT2CPShrunk;
1755 MadeChange = true;
1756 }
1757 }
1758
1759 MadeChange |= optimizeThumb2Branches();
1760 MadeChange |= optimizeThumb2JumpTables();
1761 return MadeChange;
1762 }
1763
optimizeThumb2Branches()1764 bool ARMConstantIslands::optimizeThumb2Branches() {
1765 bool MadeChange = false;
1766
1767 for (unsigned i = 0, e = ImmBranches.size(); i != e; ++i) {
1768 ImmBranch &Br = ImmBranches[i];
1769 unsigned Opcode = Br.MI->getOpcode();
1770 unsigned NewOpc = 0;
1771 unsigned Scale = 1;
1772 unsigned Bits = 0;
1773 switch (Opcode) {
1774 default: break;
1775 case ARM::t2B:
1776 NewOpc = ARM::tB;
1777 Bits = 11;
1778 Scale = 2;
1779 break;
1780 case ARM::t2Bcc: {
1781 NewOpc = ARM::tBcc;
1782 Bits = 8;
1783 Scale = 2;
1784 break;
1785 }
1786 }
1787 if (NewOpc) {
1788 unsigned MaxOffs = ((1 << (Bits-1))-1) * Scale;
1789 MachineBasicBlock *DestBB = Br.MI->getOperand(0).getMBB();
1790 if (isBBInRange(Br.MI, DestBB, MaxOffs)) {
1791 DEBUG(dbgs() << "Shrink branch: " << *Br.MI);
1792 Br.MI->setDesc(TII->get(NewOpc));
1793 MachineBasicBlock *MBB = Br.MI->getParent();
1794 BBInfo[MBB->getNumber()].Size -= 2;
1795 adjustBBOffsetsAfter(MBB);
1796 ++NumT2BrShrunk;
1797 MadeChange = true;
1798 }
1799 }
1800
1801 Opcode = Br.MI->getOpcode();
1802 if (Opcode != ARM::tBcc)
1803 continue;
1804
1805 // If the conditional branch doesn't kill CPSR, then CPSR can be liveout
1806 // so this transformation is not safe.
1807 if (!Br.MI->killsRegister(ARM::CPSR))
1808 continue;
1809
1810 NewOpc = 0;
1811 unsigned PredReg = 0;
1812 ARMCC::CondCodes Pred = getInstrPredicate(Br.MI, PredReg);
1813 if (Pred == ARMCC::EQ)
1814 NewOpc = ARM::tCBZ;
1815 else if (Pred == ARMCC::NE)
1816 NewOpc = ARM::tCBNZ;
1817 if (!NewOpc)
1818 continue;
1819 MachineBasicBlock *DestBB = Br.MI->getOperand(0).getMBB();
1820 // Check if the distance is within 126. Subtract starting offset by 2
1821 // because the cmp will be eliminated.
1822 unsigned BrOffset = getOffsetOf(Br.MI) + 4 - 2;
1823 unsigned DestOffset = BBInfo[DestBB->getNumber()].Offset;
1824 if (BrOffset < DestOffset && (DestOffset - BrOffset) <= 126) {
1825 MachineBasicBlock::iterator CmpMI = Br.MI;
1826 if (CmpMI != Br.MI->getParent()->begin()) {
1827 --CmpMI;
1828 if (CmpMI->getOpcode() == ARM::tCMPi8) {
1829 unsigned Reg = CmpMI->getOperand(0).getReg();
1830 Pred = getInstrPredicate(CmpMI, PredReg);
1831 if (Pred == ARMCC::AL &&
1832 CmpMI->getOperand(1).getImm() == 0 &&
1833 isARMLowRegister(Reg)) {
1834 MachineBasicBlock *MBB = Br.MI->getParent();
1835 DEBUG(dbgs() << "Fold: " << *CmpMI << " and: " << *Br.MI);
1836 MachineInstr *NewBR =
1837 BuildMI(*MBB, CmpMI, Br.MI->getDebugLoc(), TII->get(NewOpc))
1838 .addReg(Reg).addMBB(DestBB,Br.MI->getOperand(0).getTargetFlags());
1839 CmpMI->eraseFromParent();
1840 Br.MI->eraseFromParent();
1841 Br.MI = NewBR;
1842 BBInfo[MBB->getNumber()].Size -= 2;
1843 adjustBBOffsetsAfter(MBB);
1844 ++NumCBZ;
1845 MadeChange = true;
1846 }
1847 }
1848 }
1849 }
1850 }
1851
1852 return MadeChange;
1853 }
1854
1855 /// optimizeThumb2JumpTables - Use tbb / tbh instructions to generate smaller
1856 /// jumptables when it's possible.
optimizeThumb2JumpTables()1857 bool ARMConstantIslands::optimizeThumb2JumpTables() {
1858 bool MadeChange = false;
1859
1860 // FIXME: After the tables are shrunk, can we get rid some of the
1861 // constantpool tables?
1862 MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
1863 if (!MJTI) return false;
1864
1865 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
1866 for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) {
1867 MachineInstr *MI = T2JumpTables[i];
1868 const MCInstrDesc &MCID = MI->getDesc();
1869 unsigned NumOps = MCID.getNumOperands();
1870 unsigned JTOpIdx = NumOps - (MI->isPredicable() ? 3 : 2);
1871 MachineOperand JTOP = MI->getOperand(JTOpIdx);
1872 unsigned JTI = JTOP.getIndex();
1873 assert(JTI < JT.size());
1874
1875 bool ByteOk = true;
1876 bool HalfWordOk = true;
1877 unsigned JTOffset = getOffsetOf(MI) + 4;
1878 const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
1879 for (unsigned j = 0, ee = JTBBs.size(); j != ee; ++j) {
1880 MachineBasicBlock *MBB = JTBBs[j];
1881 unsigned DstOffset = BBInfo[MBB->getNumber()].Offset;
1882 // Negative offset is not ok. FIXME: We should change BB layout to make
1883 // sure all the branches are forward.
1884 if (ByteOk && (DstOffset - JTOffset) > ((1<<8)-1)*2)
1885 ByteOk = false;
1886 unsigned TBHLimit = ((1<<16)-1)*2;
1887 if (HalfWordOk && (DstOffset - JTOffset) > TBHLimit)
1888 HalfWordOk = false;
1889 if (!ByteOk && !HalfWordOk)
1890 break;
1891 }
1892
1893 if (ByteOk || HalfWordOk) {
1894 MachineBasicBlock *MBB = MI->getParent();
1895 unsigned BaseReg = MI->getOperand(0).getReg();
1896 bool BaseRegKill = MI->getOperand(0).isKill();
1897 if (!BaseRegKill)
1898 continue;
1899 unsigned IdxReg = MI->getOperand(1).getReg();
1900 bool IdxRegKill = MI->getOperand(1).isKill();
1901
1902 // Scan backwards to find the instruction that defines the base
1903 // register. Due to post-RA scheduling, we can't count on it
1904 // immediately preceding the branch instruction.
1905 MachineBasicBlock::iterator PrevI = MI;
1906 MachineBasicBlock::iterator B = MBB->begin();
1907 while (PrevI != B && !PrevI->definesRegister(BaseReg))
1908 --PrevI;
1909
1910 // If for some reason we didn't find it, we can't do anything, so
1911 // just skip this one.
1912 if (!PrevI->definesRegister(BaseReg))
1913 continue;
1914
1915 MachineInstr *AddrMI = PrevI;
1916 bool OptOk = true;
1917 // Examine the instruction that calculates the jumptable entry address.
1918 // Make sure it only defines the base register and kills any uses
1919 // other than the index register.
1920 for (unsigned k = 0, eee = AddrMI->getNumOperands(); k != eee; ++k) {
1921 const MachineOperand &MO = AddrMI->getOperand(k);
1922 if (!MO.isReg() || !MO.getReg())
1923 continue;
1924 if (MO.isDef() && MO.getReg() != BaseReg) {
1925 OptOk = false;
1926 break;
1927 }
1928 if (MO.isUse() && !MO.isKill() && MO.getReg() != IdxReg) {
1929 OptOk = false;
1930 break;
1931 }
1932 }
1933 if (!OptOk)
1934 continue;
1935
1936 // Now scan back again to find the tLEApcrel or t2LEApcrelJT instruction
1937 // that gave us the initial base register definition.
1938 for (--PrevI; PrevI != B && !PrevI->definesRegister(BaseReg); --PrevI)
1939 ;
1940
1941 // The instruction should be a tLEApcrel or t2LEApcrelJT; we want
1942 // to delete it as well.
1943 MachineInstr *LeaMI = PrevI;
1944 if ((LeaMI->getOpcode() != ARM::tLEApcrelJT &&
1945 LeaMI->getOpcode() != ARM::t2LEApcrelJT) ||
1946 LeaMI->getOperand(0).getReg() != BaseReg)
1947 OptOk = false;
1948
1949 if (!OptOk)
1950 continue;
1951
1952 DEBUG(dbgs() << "Shrink JT: " << *MI << " addr: " << *AddrMI
1953 << " lea: " << *LeaMI);
1954 unsigned Opc = ByteOk ? ARM::t2TBB_JT : ARM::t2TBH_JT;
1955 MachineBasicBlock::iterator MI_JT = MI;
1956 MachineInstr *NewJTMI =
1957 BuildMI(*MBB, MI_JT, MI->getDebugLoc(), TII->get(Opc))
1958 .addReg(IdxReg, getKillRegState(IdxRegKill))
1959 .addJumpTableIndex(JTI, JTOP.getTargetFlags())
1960 .addImm(MI->getOperand(JTOpIdx+1).getImm());
1961 DEBUG(dbgs() << "BB#" << MBB->getNumber() << ": " << *NewJTMI);
1962 // FIXME: Insert an "ALIGN" instruction to ensure the next instruction
1963 // is 2-byte aligned. For now, asm printer will fix it up.
1964 unsigned NewSize = TII->GetInstSizeInBytes(NewJTMI);
1965 unsigned OrigSize = TII->GetInstSizeInBytes(AddrMI);
1966 OrigSize += TII->GetInstSizeInBytes(LeaMI);
1967 OrigSize += TII->GetInstSizeInBytes(MI);
1968
1969 AddrMI->eraseFromParent();
1970 LeaMI->eraseFromParent();
1971 MI->eraseFromParent();
1972
1973 int delta = OrigSize - NewSize;
1974 BBInfo[MBB->getNumber()].Size -= delta;
1975 adjustBBOffsetsAfter(MBB);
1976
1977 ++NumTBs;
1978 MadeChange = true;
1979 }
1980 }
1981
1982 return MadeChange;
1983 }
1984
1985 /// reorderThumb2JumpTables - Adjust the function's block layout to ensure that
1986 /// jump tables always branch forwards, since that's what tbb and tbh need.
reorderThumb2JumpTables()1987 bool ARMConstantIslands::reorderThumb2JumpTables() {
1988 bool MadeChange = false;
1989
1990 MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
1991 if (!MJTI) return false;
1992
1993 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
1994 for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) {
1995 MachineInstr *MI = T2JumpTables[i];
1996 const MCInstrDesc &MCID = MI->getDesc();
1997 unsigned NumOps = MCID.getNumOperands();
1998 unsigned JTOpIdx = NumOps - (MI->isPredicable() ? 3 : 2);
1999 MachineOperand JTOP = MI->getOperand(JTOpIdx);
2000 unsigned JTI = JTOP.getIndex();
2001 assert(JTI < JT.size());
2002
2003 // We prefer if target blocks for the jump table come after the jump
2004 // instruction so we can use TB[BH]. Loop through the target blocks
2005 // and try to adjust them such that that's true.
2006 int JTNumber = MI->getParent()->getNumber();
2007 const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
2008 for (unsigned j = 0, ee = JTBBs.size(); j != ee; ++j) {
2009 MachineBasicBlock *MBB = JTBBs[j];
2010 int DTNumber = MBB->getNumber();
2011
2012 if (DTNumber < JTNumber) {
2013 // The destination precedes the switch. Try to move the block forward
2014 // so we have a positive offset.
2015 MachineBasicBlock *NewBB =
2016 adjustJTTargetBlockForward(MBB, MI->getParent());
2017 if (NewBB)
2018 MJTI->ReplaceMBBInJumpTable(JTI, JTBBs[j], NewBB);
2019 MadeChange = true;
2020 }
2021 }
2022 }
2023
2024 return MadeChange;
2025 }
2026
2027 MachineBasicBlock *ARMConstantIslands::
adjustJTTargetBlockForward(MachineBasicBlock * BB,MachineBasicBlock * JTBB)2028 adjustJTTargetBlockForward(MachineBasicBlock *BB, MachineBasicBlock *JTBB) {
2029 // If the destination block is terminated by an unconditional branch,
2030 // try to move it; otherwise, create a new block following the jump
2031 // table that branches back to the actual target. This is a very simple
2032 // heuristic. FIXME: We can definitely improve it.
2033 MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
2034 SmallVector<MachineOperand, 4> Cond;
2035 SmallVector<MachineOperand, 4> CondPrior;
2036 MachineFunction::iterator BBi = BB;
2037 MachineFunction::iterator OldPrior = std::prev(BBi);
2038
2039 // If the block terminator isn't analyzable, don't try to move the block
2040 bool B = TII->AnalyzeBranch(*BB, TBB, FBB, Cond);
2041
2042 // If the block ends in an unconditional branch, move it. The prior block
2043 // has to have an analyzable terminator for us to move this one. Be paranoid
2044 // and make sure we're not trying to move the entry block of the function.
2045 if (!B && Cond.empty() && BB != MF->begin() &&
2046 !TII->AnalyzeBranch(*OldPrior, TBB, FBB, CondPrior)) {
2047 BB->moveAfter(JTBB);
2048 OldPrior->updateTerminator();
2049 BB->updateTerminator();
2050 // Update numbering to account for the block being moved.
2051 MF->RenumberBlocks();
2052 ++NumJTMoved;
2053 return nullptr;
2054 }
2055
2056 // Create a new MBB for the code after the jump BB.
2057 MachineBasicBlock *NewBB =
2058 MF->CreateMachineBasicBlock(JTBB->getBasicBlock());
2059 MachineFunction::iterator MBBI = JTBB; ++MBBI;
2060 MF->insert(MBBI, NewBB);
2061
2062 // Add an unconditional branch from NewBB to BB.
2063 // There doesn't seem to be meaningful DebugInfo available; this doesn't
2064 // correspond directly to anything in the source.
2065 assert (isThumb2 && "Adjusting for TB[BH] but not in Thumb2?");
2066 BuildMI(NewBB, DebugLoc(), TII->get(ARM::t2B)).addMBB(BB)
2067 .addImm(ARMCC::AL).addReg(0);
2068
2069 // Update internal data structures to account for the newly inserted MBB.
2070 MF->RenumberBlocks(NewBB);
2071
2072 // Update the CFG.
2073 NewBB->addSuccessor(BB);
2074 JTBB->removeSuccessor(BB);
2075 JTBB->addSuccessor(NewBB);
2076
2077 ++NumJTInserted;
2078 return NewBB;
2079 }
2080