1 //===- AddrModeMatcher.cpp - Addressing mode matching facility --*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements target addressing mode matcher class.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/Transforms/Utils/AddrModeMatcher.h"
15 #include "llvm/DerivedTypes.h"
16 #include "llvm/GlobalValue.h"
17 #include "llvm/Instruction.h"
18 #include "llvm/Assembly/Writer.h"
19 #include "llvm/Target/TargetData.h"
20 #include "llvm/Support/Debug.h"
21 #include "llvm/Support/GetElementPtrTypeIterator.h"
22 #include "llvm/Support/PatternMatch.h"
23 #include "llvm/Support/raw_ostream.h"
24
25 using namespace llvm;
26 using namespace llvm::PatternMatch;
27
print(raw_ostream & OS) const28 void ExtAddrMode::print(raw_ostream &OS) const {
29 bool NeedPlus = false;
30 OS << "[";
31 if (BaseGV) {
32 OS << (NeedPlus ? " + " : "")
33 << "GV:";
34 WriteAsOperand(OS, BaseGV, /*PrintType=*/false);
35 NeedPlus = true;
36 }
37
38 if (BaseOffs)
39 OS << (NeedPlus ? " + " : "") << BaseOffs, NeedPlus = true;
40
41 if (BaseReg) {
42 OS << (NeedPlus ? " + " : "")
43 << "Base:";
44 WriteAsOperand(OS, BaseReg, /*PrintType=*/false);
45 NeedPlus = true;
46 }
47 if (Scale) {
48 OS << (NeedPlus ? " + " : "")
49 << Scale << "*";
50 WriteAsOperand(OS, ScaledReg, /*PrintType=*/false);
51 NeedPlus = true;
52 }
53
54 OS << ']';
55 }
56
dump() const57 void ExtAddrMode::dump() const {
58 print(dbgs());
59 dbgs() << '\n';
60 }
61
62
63 /// MatchScaledValue - Try adding ScaleReg*Scale to the current addressing mode.
64 /// Return true and update AddrMode if this addr mode is legal for the target,
65 /// false if not.
MatchScaledValue(Value * ScaleReg,int64_t Scale,unsigned Depth)66 bool AddressingModeMatcher::MatchScaledValue(Value *ScaleReg, int64_t Scale,
67 unsigned Depth) {
68 // If Scale is 1, then this is the same as adding ScaleReg to the addressing
69 // mode. Just process that directly.
70 if (Scale == 1)
71 return MatchAddr(ScaleReg, Depth);
72
73 // If the scale is 0, it takes nothing to add this.
74 if (Scale == 0)
75 return true;
76
77 // If we already have a scale of this value, we can add to it, otherwise, we
78 // need an available scale field.
79 if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg)
80 return false;
81
82 ExtAddrMode TestAddrMode = AddrMode;
83
84 // Add scale to turn X*4+X*3 -> X*7. This could also do things like
85 // [A+B + A*7] -> [B+A*8].
86 TestAddrMode.Scale += Scale;
87 TestAddrMode.ScaledReg = ScaleReg;
88
89 // If the new address isn't legal, bail out.
90 if (!TLI.isLegalAddressingMode(TestAddrMode, AccessTy))
91 return false;
92
93 // It was legal, so commit it.
94 AddrMode = TestAddrMode;
95
96 // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now
97 // to see if ScaleReg is actually X+C. If so, we can turn this into adding
98 // X*Scale + C*Scale to addr mode.
99 ConstantInt *CI = 0; Value *AddLHS = 0;
100 if (isa<Instruction>(ScaleReg) && // not a constant expr.
101 match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI)))) {
102 TestAddrMode.ScaledReg = AddLHS;
103 TestAddrMode.BaseOffs += CI->getSExtValue()*TestAddrMode.Scale;
104
105 // If this addressing mode is legal, commit it and remember that we folded
106 // this instruction.
107 if (TLI.isLegalAddressingMode(TestAddrMode, AccessTy)) {
108 AddrModeInsts.push_back(cast<Instruction>(ScaleReg));
109 AddrMode = TestAddrMode;
110 return true;
111 }
112 }
113
114 // Otherwise, not (x+c)*scale, just return what we have.
115 return true;
116 }
117
118 /// MightBeFoldableInst - This is a little filter, which returns true if an
119 /// addressing computation involving I might be folded into a load/store
120 /// accessing it. This doesn't need to be perfect, but needs to accept at least
121 /// the set of instructions that MatchOperationAddr can.
MightBeFoldableInst(Instruction * I)122 static bool MightBeFoldableInst(Instruction *I) {
123 switch (I->getOpcode()) {
124 case Instruction::BitCast:
125 // Don't touch identity bitcasts.
126 if (I->getType() == I->getOperand(0)->getType())
127 return false;
128 return I->getType()->isPointerTy() || I->getType()->isIntegerTy();
129 case Instruction::PtrToInt:
130 // PtrToInt is always a noop, as we know that the int type is pointer sized.
131 return true;
132 case Instruction::IntToPtr:
133 // We know the input is intptr_t, so this is foldable.
134 return true;
135 case Instruction::Add:
136 return true;
137 case Instruction::Mul:
138 case Instruction::Shl:
139 // Can only handle X*C and X << C.
140 return isa<ConstantInt>(I->getOperand(1));
141 case Instruction::GetElementPtr:
142 return true;
143 default:
144 return false;
145 }
146 }
147
148
149 /// MatchOperationAddr - Given an instruction or constant expr, see if we can
150 /// fold the operation into the addressing mode. If so, update the addressing
151 /// mode and return true, otherwise return false without modifying AddrMode.
MatchOperationAddr(User * AddrInst,unsigned Opcode,unsigned Depth)152 bool AddressingModeMatcher::MatchOperationAddr(User *AddrInst, unsigned Opcode,
153 unsigned Depth) {
154 // Avoid exponential behavior on extremely deep expression trees.
155 if (Depth >= 5) return false;
156
157 switch (Opcode) {
158 case Instruction::PtrToInt:
159 // PtrToInt is always a noop, as we know that the int type is pointer sized.
160 return MatchAddr(AddrInst->getOperand(0), Depth);
161 case Instruction::IntToPtr:
162 // This inttoptr is a no-op if the integer type is pointer sized.
163 if (TLI.getValueType(AddrInst->getOperand(0)->getType()) ==
164 TLI.getPointerTy())
165 return MatchAddr(AddrInst->getOperand(0), Depth);
166 return false;
167 case Instruction::BitCast:
168 // BitCast is always a noop, and we can handle it as long as it is
169 // int->int or pointer->pointer (we don't want int<->fp or something).
170 if ((AddrInst->getOperand(0)->getType()->isPointerTy() ||
171 AddrInst->getOperand(0)->getType()->isIntegerTy()) &&
172 // Don't touch identity bitcasts. These were probably put here by LSR,
173 // and we don't want to mess around with them. Assume it knows what it
174 // is doing.
175 AddrInst->getOperand(0)->getType() != AddrInst->getType())
176 return MatchAddr(AddrInst->getOperand(0), Depth);
177 return false;
178 case Instruction::Add: {
179 // Check to see if we can merge in the RHS then the LHS. If so, we win.
180 ExtAddrMode BackupAddrMode = AddrMode;
181 unsigned OldSize = AddrModeInsts.size();
182 if (MatchAddr(AddrInst->getOperand(1), Depth+1) &&
183 MatchAddr(AddrInst->getOperand(0), Depth+1))
184 return true;
185
186 // Restore the old addr mode info.
187 AddrMode = BackupAddrMode;
188 AddrModeInsts.resize(OldSize);
189
190 // Otherwise this was over-aggressive. Try merging in the LHS then the RHS.
191 if (MatchAddr(AddrInst->getOperand(0), Depth+1) &&
192 MatchAddr(AddrInst->getOperand(1), Depth+1))
193 return true;
194
195 // Otherwise we definitely can't merge the ADD in.
196 AddrMode = BackupAddrMode;
197 AddrModeInsts.resize(OldSize);
198 break;
199 }
200 //case Instruction::Or:
201 // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD.
202 //break;
203 case Instruction::Mul:
204 case Instruction::Shl: {
205 // Can only handle X*C and X << C.
206 ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1));
207 if (!RHS) return false;
208 int64_t Scale = RHS->getSExtValue();
209 if (Opcode == Instruction::Shl)
210 Scale = 1LL << Scale;
211
212 return MatchScaledValue(AddrInst->getOperand(0), Scale, Depth);
213 }
214 case Instruction::GetElementPtr: {
215 // Scan the GEP. We check it if it contains constant offsets and at most
216 // one variable offset.
217 int VariableOperand = -1;
218 unsigned VariableScale = 0;
219
220 int64_t ConstantOffset = 0;
221 const TargetData *TD = TLI.getTargetData();
222 gep_type_iterator GTI = gep_type_begin(AddrInst);
223 for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) {
224 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
225 const StructLayout *SL = TD->getStructLayout(STy);
226 unsigned Idx =
227 cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue();
228 ConstantOffset += SL->getElementOffset(Idx);
229 } else {
230 uint64_t TypeSize = TD->getTypeAllocSize(GTI.getIndexedType());
231 if (ConstantInt *CI = dyn_cast<ConstantInt>(AddrInst->getOperand(i))) {
232 ConstantOffset += CI->getSExtValue()*TypeSize;
233 } else if (TypeSize) { // Scales of zero don't do anything.
234 // We only allow one variable index at the moment.
235 if (VariableOperand != -1)
236 return false;
237
238 // Remember the variable index.
239 VariableOperand = i;
240 VariableScale = TypeSize;
241 }
242 }
243 }
244
245 // A common case is for the GEP to only do a constant offset. In this case,
246 // just add it to the disp field and check validity.
247 if (VariableOperand == -1) {
248 AddrMode.BaseOffs += ConstantOffset;
249 if (ConstantOffset == 0 || TLI.isLegalAddressingMode(AddrMode, AccessTy)){
250 // Check to see if we can fold the base pointer in too.
251 if (MatchAddr(AddrInst->getOperand(0), Depth+1))
252 return true;
253 }
254 AddrMode.BaseOffs -= ConstantOffset;
255 return false;
256 }
257
258 // Save the valid addressing mode in case we can't match.
259 ExtAddrMode BackupAddrMode = AddrMode;
260 unsigned OldSize = AddrModeInsts.size();
261
262 // See if the scale and offset amount is valid for this target.
263 AddrMode.BaseOffs += ConstantOffset;
264
265 // Match the base operand of the GEP.
266 if (!MatchAddr(AddrInst->getOperand(0), Depth+1)) {
267 // If it couldn't be matched, just stuff the value in a register.
268 if (AddrMode.HasBaseReg) {
269 AddrMode = BackupAddrMode;
270 AddrModeInsts.resize(OldSize);
271 return false;
272 }
273 AddrMode.HasBaseReg = true;
274 AddrMode.BaseReg = AddrInst->getOperand(0);
275 }
276
277 // Match the remaining variable portion of the GEP.
278 if (!MatchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale,
279 Depth)) {
280 // If it couldn't be matched, try stuffing the base into a register
281 // instead of matching it, and retrying the match of the scale.
282 AddrMode = BackupAddrMode;
283 AddrModeInsts.resize(OldSize);
284 if (AddrMode.HasBaseReg)
285 return false;
286 AddrMode.HasBaseReg = true;
287 AddrMode.BaseReg = AddrInst->getOperand(0);
288 AddrMode.BaseOffs += ConstantOffset;
289 if (!MatchScaledValue(AddrInst->getOperand(VariableOperand),
290 VariableScale, Depth)) {
291 // If even that didn't work, bail.
292 AddrMode = BackupAddrMode;
293 AddrModeInsts.resize(OldSize);
294 return false;
295 }
296 }
297
298 return true;
299 }
300 }
301 return false;
302 }
303
304 /// MatchAddr - If we can, try to add the value of 'Addr' into the current
305 /// addressing mode. If Addr can't be added to AddrMode this returns false and
306 /// leaves AddrMode unmodified. This assumes that Addr is either a pointer type
307 /// or intptr_t for the target.
308 ///
MatchAddr(Value * Addr,unsigned Depth)309 bool AddressingModeMatcher::MatchAddr(Value *Addr, unsigned Depth) {
310 if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) {
311 // Fold in immediates if legal for the target.
312 AddrMode.BaseOffs += CI->getSExtValue();
313 if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
314 return true;
315 AddrMode.BaseOffs -= CI->getSExtValue();
316 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) {
317 // If this is a global variable, try to fold it into the addressing mode.
318 if (AddrMode.BaseGV == 0) {
319 AddrMode.BaseGV = GV;
320 if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
321 return true;
322 AddrMode.BaseGV = 0;
323 }
324 } else if (Instruction *I = dyn_cast<Instruction>(Addr)) {
325 ExtAddrMode BackupAddrMode = AddrMode;
326 unsigned OldSize = AddrModeInsts.size();
327
328 // Check to see if it is possible to fold this operation.
329 if (MatchOperationAddr(I, I->getOpcode(), Depth)) {
330 // Okay, it's possible to fold this. Check to see if it is actually
331 // *profitable* to do so. We use a simple cost model to avoid increasing
332 // register pressure too much.
333 if (I->hasOneUse() ||
334 IsProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) {
335 AddrModeInsts.push_back(I);
336 return true;
337 }
338
339 // It isn't profitable to do this, roll back.
340 //cerr << "NOT FOLDING: " << *I;
341 AddrMode = BackupAddrMode;
342 AddrModeInsts.resize(OldSize);
343 }
344 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
345 if (MatchOperationAddr(CE, CE->getOpcode(), Depth))
346 return true;
347 } else if (isa<ConstantPointerNull>(Addr)) {
348 // Null pointer gets folded without affecting the addressing mode.
349 return true;
350 }
351
352 // Worse case, the target should support [reg] addressing modes. :)
353 if (!AddrMode.HasBaseReg) {
354 AddrMode.HasBaseReg = true;
355 AddrMode.BaseReg = Addr;
356 // Still check for legality in case the target supports [imm] but not [i+r].
357 if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
358 return true;
359 AddrMode.HasBaseReg = false;
360 AddrMode.BaseReg = 0;
361 }
362
363 // If the base register is already taken, see if we can do [r+r].
364 if (AddrMode.Scale == 0) {
365 AddrMode.Scale = 1;
366 AddrMode.ScaledReg = Addr;
367 if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
368 return true;
369 AddrMode.Scale = 0;
370 AddrMode.ScaledReg = 0;
371 }
372 // Couldn't match.
373 return false;
374 }
375
376
377 /// IsOperandAMemoryOperand - Check to see if all uses of OpVal by the specified
378 /// inline asm call are due to memory operands. If so, return true, otherwise
379 /// return false.
IsOperandAMemoryOperand(CallInst * CI,InlineAsm * IA,Value * OpVal,const TargetLowering & TLI)380 static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal,
381 const TargetLowering &TLI) {
382 std::vector<InlineAsm::ConstraintInfo>
383 Constraints = IA->ParseConstraints();
384
385 unsigned ArgNo = 0; // The argument of the CallInst.
386 for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
387 TargetLowering::AsmOperandInfo OpInfo(Constraints[i]);
388
389 // Compute the value type for each operand.
390 switch (OpInfo.Type) {
391 case InlineAsm::isOutput:
392 if (OpInfo.isIndirect)
393 OpInfo.CallOperandVal = CI->getArgOperand(ArgNo++);
394 break;
395 case InlineAsm::isInput:
396 OpInfo.CallOperandVal = CI->getArgOperand(ArgNo++);
397 break;
398 case InlineAsm::isClobber:
399 // Nothing to do.
400 break;
401 }
402
403 // Compute the constraint code and ConstraintType to use.
404 TLI.ComputeConstraintToUse(OpInfo, SDValue());
405
406 // If this asm operand is our Value*, and if it isn't an indirect memory
407 // operand, we can't fold it!
408 if (OpInfo.CallOperandVal == OpVal &&
409 (OpInfo.ConstraintType != TargetLowering::C_Memory ||
410 !OpInfo.isIndirect))
411 return false;
412 }
413
414 return true;
415 }
416
417
418 /// FindAllMemoryUses - Recursively walk all the uses of I until we find a
419 /// memory use. If we find an obviously non-foldable instruction, return true.
420 /// Add the ultimately found memory instructions to MemoryUses.
FindAllMemoryUses(Instruction * I,SmallVectorImpl<std::pair<Instruction *,unsigned>> & MemoryUses,SmallPtrSet<Instruction *,16> & ConsideredInsts,const TargetLowering & TLI)421 static bool FindAllMemoryUses(Instruction *I,
422 SmallVectorImpl<std::pair<Instruction*,unsigned> > &MemoryUses,
423 SmallPtrSet<Instruction*, 16> &ConsideredInsts,
424 const TargetLowering &TLI) {
425 // If we already considered this instruction, we're done.
426 if (!ConsideredInsts.insert(I))
427 return false;
428
429 // If this is an obviously unfoldable instruction, bail out.
430 if (!MightBeFoldableInst(I))
431 return true;
432
433 // Loop over all the uses, recursively processing them.
434 for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
435 UI != E; ++UI) {
436 User *U = *UI;
437
438 if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
439 MemoryUses.push_back(std::make_pair(LI, UI.getOperandNo()));
440 continue;
441 }
442
443 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
444 unsigned opNo = UI.getOperandNo();
445 if (opNo == 0) return true; // Storing addr, not into addr.
446 MemoryUses.push_back(std::make_pair(SI, opNo));
447 continue;
448 }
449
450 if (CallInst *CI = dyn_cast<CallInst>(U)) {
451 InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue());
452 if (!IA) return true;
453
454 // If this is a memory operand, we're cool, otherwise bail out.
455 if (!IsOperandAMemoryOperand(CI, IA, I, TLI))
456 return true;
457 continue;
458 }
459
460 if (FindAllMemoryUses(cast<Instruction>(U), MemoryUses, ConsideredInsts,
461 TLI))
462 return true;
463 }
464
465 return false;
466 }
467
468
469 /// ValueAlreadyLiveAtInst - Retrn true if Val is already known to be live at
470 /// the use site that we're folding it into. If so, there is no cost to
471 /// include it in the addressing mode. KnownLive1 and KnownLive2 are two values
472 /// that we know are live at the instruction already.
ValueAlreadyLiveAtInst(Value * Val,Value * KnownLive1,Value * KnownLive2)473 bool AddressingModeMatcher::ValueAlreadyLiveAtInst(Value *Val,Value *KnownLive1,
474 Value *KnownLive2) {
475 // If Val is either of the known-live values, we know it is live!
476 if (Val == 0 || Val == KnownLive1 || Val == KnownLive2)
477 return true;
478
479 // All values other than instructions and arguments (e.g. constants) are live.
480 if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true;
481
482 // If Val is a constant sized alloca in the entry block, it is live, this is
483 // true because it is just a reference to the stack/frame pointer, which is
484 // live for the whole function.
485 if (AllocaInst *AI = dyn_cast<AllocaInst>(Val))
486 if (AI->isStaticAlloca())
487 return true;
488
489 // Check to see if this value is already used in the memory instruction's
490 // block. If so, it's already live into the block at the very least, so we
491 // can reasonably fold it.
492 BasicBlock *MemBB = MemoryInst->getParent();
493 for (Value::use_iterator UI = Val->use_begin(), E = Val->use_end();
494 UI != E; ++UI)
495 // We know that uses of arguments and instructions have to be instructions.
496 if (cast<Instruction>(*UI)->getParent() == MemBB)
497 return true;
498
499 return false;
500 }
501
502
503
504 /// IsProfitableToFoldIntoAddressingMode - It is possible for the addressing
505 /// mode of the machine to fold the specified instruction into a load or store
506 /// that ultimately uses it. However, the specified instruction has multiple
507 /// uses. Given this, it may actually increase register pressure to fold it
508 /// into the load. For example, consider this code:
509 ///
510 /// X = ...
511 /// Y = X+1
512 /// use(Y) -> nonload/store
513 /// Z = Y+1
514 /// load Z
515 ///
516 /// In this case, Y has multiple uses, and can be folded into the load of Z
517 /// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to
518 /// be live at the use(Y) line. If we don't fold Y into load Z, we use one
519 /// fewer register. Since Y can't be folded into "use(Y)" we don't increase the
520 /// number of computations either.
521 ///
522 /// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If
523 /// X was live across 'load Z' for other reasons, we actually *would* want to
524 /// fold the addressing mode in the Z case. This would make Y die earlier.
525 bool AddressingModeMatcher::
IsProfitableToFoldIntoAddressingMode(Instruction * I,ExtAddrMode & AMBefore,ExtAddrMode & AMAfter)526 IsProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore,
527 ExtAddrMode &AMAfter) {
528 if (IgnoreProfitability) return true;
529
530 // AMBefore is the addressing mode before this instruction was folded into it,
531 // and AMAfter is the addressing mode after the instruction was folded. Get
532 // the set of registers referenced by AMAfter and subtract out those
533 // referenced by AMBefore: this is the set of values which folding in this
534 // address extends the lifetime of.
535 //
536 // Note that there are only two potential values being referenced here,
537 // BaseReg and ScaleReg (global addresses are always available, as are any
538 // folded immediates).
539 Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg;
540
541 // If the BaseReg or ScaledReg was referenced by the previous addrmode, their
542 // lifetime wasn't extended by adding this instruction.
543 if (ValueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg))
544 BaseReg = 0;
545 if (ValueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg))
546 ScaledReg = 0;
547
548 // If folding this instruction (and it's subexprs) didn't extend any live
549 // ranges, we're ok with it.
550 if (BaseReg == 0 && ScaledReg == 0)
551 return true;
552
553 // If all uses of this instruction are ultimately load/store/inlineasm's,
554 // check to see if their addressing modes will include this instruction. If
555 // so, we can fold it into all uses, so it doesn't matter if it has multiple
556 // uses.
557 SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses;
558 SmallPtrSet<Instruction*, 16> ConsideredInsts;
559 if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI))
560 return false; // Has a non-memory, non-foldable use!
561
562 // Now that we know that all uses of this instruction are part of a chain of
563 // computation involving only operations that could theoretically be folded
564 // into a memory use, loop over each of these uses and see if they could
565 // *actually* fold the instruction.
566 SmallVector<Instruction*, 32> MatchedAddrModeInsts;
567 for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) {
568 Instruction *User = MemoryUses[i].first;
569 unsigned OpNo = MemoryUses[i].second;
570
571 // Get the access type of this use. If the use isn't a pointer, we don't
572 // know what it accesses.
573 Value *Address = User->getOperand(OpNo);
574 if (!Address->getType()->isPointerTy())
575 return false;
576 const Type *AddressAccessTy =
577 cast<PointerType>(Address->getType())->getElementType();
578
579 // Do a match against the root of this address, ignoring profitability. This
580 // will tell us if the addressing mode for the memory operation will
581 // *actually* cover the shared instruction.
582 ExtAddrMode Result;
583 AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, AddressAccessTy,
584 MemoryInst, Result);
585 Matcher.IgnoreProfitability = true;
586 bool Success = Matcher.MatchAddr(Address, 0);
587 Success = Success; assert(Success && "Couldn't select *anything*?");
588
589 // If the match didn't cover I, then it won't be shared by it.
590 if (std::find(MatchedAddrModeInsts.begin(), MatchedAddrModeInsts.end(),
591 I) == MatchedAddrModeInsts.end())
592 return false;
593
594 MatchedAddrModeInsts.clear();
595 }
596
597 return true;
598 }
599