1 //===-- PPCTargetTransformInfo.cpp - PPC specific TTI ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "PPCTargetTransformInfo.h"
10 #include "llvm/Analysis/CodeMetrics.h"
11 #include "llvm/Analysis/TargetLibraryInfo.h"
12 #include "llvm/Analysis/TargetTransformInfo.h"
13 #include "llvm/CodeGen/BasicTTIImpl.h"
14 #include "llvm/CodeGen/CostTable.h"
15 #include "llvm/CodeGen/TargetLowering.h"
16 #include "llvm/CodeGen/TargetSchedule.h"
17 #include "llvm/IR/IntrinsicsPowerPC.h"
18 #include "llvm/Support/CommandLine.h"
19 #include "llvm/Support/Debug.h"
20 #include "llvm/Support/KnownBits.h"
21 #include "llvm/Transforms/InstCombine/InstCombiner.h"
22 #include "llvm/Transforms/Utils/Local.h"
23
24 using namespace llvm;
25
26 #define DEBUG_TYPE "ppctti"
27
28 static cl::opt<bool> DisablePPCConstHoist("disable-ppc-constant-hoisting",
29 cl::desc("disable constant hoisting on PPC"), cl::init(false), cl::Hidden);
30
31 // This is currently only used for the data prefetch pass
32 static cl::opt<unsigned>
33 CacheLineSize("ppc-loop-prefetch-cache-line", cl::Hidden, cl::init(64),
34 cl::desc("The loop prefetch cache line size"));
35
36 static cl::opt<bool>
37 EnablePPCColdCC("ppc-enable-coldcc", cl::Hidden, cl::init(false),
38 cl::desc("Enable using coldcc calling conv for cold "
39 "internal functions"));
40
41 static cl::opt<bool>
42 LsrNoInsnsCost("ppc-lsr-no-insns-cost", cl::Hidden, cl::init(false),
43 cl::desc("Do not add instruction count to lsr cost model"));
44
45 // The latency of mtctr is only justified if there are more than 4
46 // comparisons that will be removed as a result.
47 static cl::opt<unsigned>
48 SmallCTRLoopThreshold("min-ctr-loop-threshold", cl::init(4), cl::Hidden,
49 cl::desc("Loops with a constant trip count smaller than "
50 "this value will not use the count register."));
51
52 //===----------------------------------------------------------------------===//
53 //
54 // PPC cost model.
55 //
56 //===----------------------------------------------------------------------===//
57
58 TargetTransformInfo::PopcntSupportKind
getPopcntSupport(unsigned TyWidth)59 PPCTTIImpl::getPopcntSupport(unsigned TyWidth) {
60 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
61 if (ST->hasPOPCNTD() != PPCSubtarget::POPCNTD_Unavailable && TyWidth <= 64)
62 return ST->hasPOPCNTD() == PPCSubtarget::POPCNTD_Slow ?
63 TTI::PSK_SlowHardware : TTI::PSK_FastHardware;
64 return TTI::PSK_Software;
65 }
66
67 Optional<Instruction *>
instCombineIntrinsic(InstCombiner & IC,IntrinsicInst & II) const68 PPCTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
69 Intrinsic::ID IID = II.getIntrinsicID();
70 switch (IID) {
71 default:
72 break;
73 case Intrinsic::ppc_altivec_lvx:
74 case Intrinsic::ppc_altivec_lvxl:
75 // Turn PPC lvx -> load if the pointer is known aligned.
76 if (getOrEnforceKnownAlignment(
77 II.getArgOperand(0), Align(16), IC.getDataLayout(), &II,
78 &IC.getAssumptionCache(), &IC.getDominatorTree()) >= 16) {
79 Value *Ptr = IC.Builder.CreateBitCast(
80 II.getArgOperand(0), PointerType::getUnqual(II.getType()));
81 return new LoadInst(II.getType(), Ptr, "", false, Align(16));
82 }
83 break;
84 case Intrinsic::ppc_vsx_lxvw4x:
85 case Intrinsic::ppc_vsx_lxvd2x: {
86 // Turn PPC VSX loads into normal loads.
87 Value *Ptr = IC.Builder.CreateBitCast(II.getArgOperand(0),
88 PointerType::getUnqual(II.getType()));
89 return new LoadInst(II.getType(), Ptr, Twine(""), false, Align(1));
90 }
91 case Intrinsic::ppc_altivec_stvx:
92 case Intrinsic::ppc_altivec_stvxl:
93 // Turn stvx -> store if the pointer is known aligned.
94 if (getOrEnforceKnownAlignment(
95 II.getArgOperand(1), Align(16), IC.getDataLayout(), &II,
96 &IC.getAssumptionCache(), &IC.getDominatorTree()) >= 16) {
97 Type *OpPtrTy = PointerType::getUnqual(II.getArgOperand(0)->getType());
98 Value *Ptr = IC.Builder.CreateBitCast(II.getArgOperand(1), OpPtrTy);
99 return new StoreInst(II.getArgOperand(0), Ptr, false, Align(16));
100 }
101 break;
102 case Intrinsic::ppc_vsx_stxvw4x:
103 case Intrinsic::ppc_vsx_stxvd2x: {
104 // Turn PPC VSX stores into normal stores.
105 Type *OpPtrTy = PointerType::getUnqual(II.getArgOperand(0)->getType());
106 Value *Ptr = IC.Builder.CreateBitCast(II.getArgOperand(1), OpPtrTy);
107 return new StoreInst(II.getArgOperand(0), Ptr, false, Align(1));
108 }
109 case Intrinsic::ppc_altivec_vperm:
110 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
111 // Note that ppc_altivec_vperm has a big-endian bias, so when creating
112 // a vectorshuffle for little endian, we must undo the transformation
113 // performed on vec_perm in altivec.h. That is, we must complement
114 // the permutation mask with respect to 31 and reverse the order of
115 // V1 and V2.
116 if (Constant *Mask = dyn_cast<Constant>(II.getArgOperand(2))) {
117 assert(cast<FixedVectorType>(Mask->getType())->getNumElements() == 16 &&
118 "Bad type for intrinsic!");
119
120 // Check that all of the elements are integer constants or undefs.
121 bool AllEltsOk = true;
122 for (unsigned i = 0; i != 16; ++i) {
123 Constant *Elt = Mask->getAggregateElement(i);
124 if (!Elt || !(isa<ConstantInt>(Elt) || isa<UndefValue>(Elt))) {
125 AllEltsOk = false;
126 break;
127 }
128 }
129
130 if (AllEltsOk) {
131 // Cast the input vectors to byte vectors.
132 Value *Op0 =
133 IC.Builder.CreateBitCast(II.getArgOperand(0), Mask->getType());
134 Value *Op1 =
135 IC.Builder.CreateBitCast(II.getArgOperand(1), Mask->getType());
136 Value *Result = UndefValue::get(Op0->getType());
137
138 // Only extract each element once.
139 Value *ExtractedElts[32];
140 memset(ExtractedElts, 0, sizeof(ExtractedElts));
141
142 for (unsigned i = 0; i != 16; ++i) {
143 if (isa<UndefValue>(Mask->getAggregateElement(i)))
144 continue;
145 unsigned Idx =
146 cast<ConstantInt>(Mask->getAggregateElement(i))->getZExtValue();
147 Idx &= 31; // Match the hardware behavior.
148 if (DL.isLittleEndian())
149 Idx = 31 - Idx;
150
151 if (!ExtractedElts[Idx]) {
152 Value *Op0ToUse = (DL.isLittleEndian()) ? Op1 : Op0;
153 Value *Op1ToUse = (DL.isLittleEndian()) ? Op0 : Op1;
154 ExtractedElts[Idx] = IC.Builder.CreateExtractElement(
155 Idx < 16 ? Op0ToUse : Op1ToUse, IC.Builder.getInt32(Idx & 15));
156 }
157
158 // Insert this value into the result vector.
159 Result = IC.Builder.CreateInsertElement(Result, ExtractedElts[Idx],
160 IC.Builder.getInt32(i));
161 }
162 return CastInst::Create(Instruction::BitCast, Result, II.getType());
163 }
164 }
165 break;
166 }
167 return None;
168 }
169
getIntImmCost(const APInt & Imm,Type * Ty,TTI::TargetCostKind CostKind)170 int PPCTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
171 TTI::TargetCostKind CostKind) {
172 if (DisablePPCConstHoist)
173 return BaseT::getIntImmCost(Imm, Ty, CostKind);
174
175 assert(Ty->isIntegerTy());
176
177 unsigned BitSize = Ty->getPrimitiveSizeInBits();
178 if (BitSize == 0)
179 return ~0U;
180
181 if (Imm == 0)
182 return TTI::TCC_Free;
183
184 if (Imm.getBitWidth() <= 64) {
185 if (isInt<16>(Imm.getSExtValue()))
186 return TTI::TCC_Basic;
187
188 if (isInt<32>(Imm.getSExtValue())) {
189 // A constant that can be materialized using lis.
190 if ((Imm.getZExtValue() & 0xFFFF) == 0)
191 return TTI::TCC_Basic;
192
193 return 2 * TTI::TCC_Basic;
194 }
195 }
196
197 return 4 * TTI::TCC_Basic;
198 }
199
getIntImmCostIntrin(Intrinsic::ID IID,unsigned Idx,const APInt & Imm,Type * Ty,TTI::TargetCostKind CostKind)200 int PPCTTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
201 const APInt &Imm, Type *Ty,
202 TTI::TargetCostKind CostKind) {
203 if (DisablePPCConstHoist)
204 return BaseT::getIntImmCostIntrin(IID, Idx, Imm, Ty, CostKind);
205
206 assert(Ty->isIntegerTy());
207
208 unsigned BitSize = Ty->getPrimitiveSizeInBits();
209 if (BitSize == 0)
210 return ~0U;
211
212 switch (IID) {
213 default:
214 return TTI::TCC_Free;
215 case Intrinsic::sadd_with_overflow:
216 case Intrinsic::uadd_with_overflow:
217 case Intrinsic::ssub_with_overflow:
218 case Intrinsic::usub_with_overflow:
219 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<16>(Imm.getSExtValue()))
220 return TTI::TCC_Free;
221 break;
222 case Intrinsic::experimental_stackmap:
223 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
224 return TTI::TCC_Free;
225 break;
226 case Intrinsic::experimental_patchpoint_void:
227 case Intrinsic::experimental_patchpoint_i64:
228 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
229 return TTI::TCC_Free;
230 break;
231 }
232 return PPCTTIImpl::getIntImmCost(Imm, Ty, CostKind);
233 }
234
getIntImmCostInst(unsigned Opcode,unsigned Idx,const APInt & Imm,Type * Ty,TTI::TargetCostKind CostKind,Instruction * Inst)235 int PPCTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
236 const APInt &Imm, Type *Ty,
237 TTI::TargetCostKind CostKind,
238 Instruction *Inst) {
239 if (DisablePPCConstHoist)
240 return BaseT::getIntImmCostInst(Opcode, Idx, Imm, Ty, CostKind, Inst);
241
242 assert(Ty->isIntegerTy());
243
244 unsigned BitSize = Ty->getPrimitiveSizeInBits();
245 if (BitSize == 0)
246 return ~0U;
247
248 unsigned ImmIdx = ~0U;
249 bool ShiftedFree = false, RunFree = false, UnsignedFree = false,
250 ZeroFree = false;
251 switch (Opcode) {
252 default:
253 return TTI::TCC_Free;
254 case Instruction::GetElementPtr:
255 // Always hoist the base address of a GetElementPtr. This prevents the
256 // creation of new constants for every base constant that gets constant
257 // folded with the offset.
258 if (Idx == 0)
259 return 2 * TTI::TCC_Basic;
260 return TTI::TCC_Free;
261 case Instruction::And:
262 RunFree = true; // (for the rotate-and-mask instructions)
263 LLVM_FALLTHROUGH;
264 case Instruction::Add:
265 case Instruction::Or:
266 case Instruction::Xor:
267 ShiftedFree = true;
268 LLVM_FALLTHROUGH;
269 case Instruction::Sub:
270 case Instruction::Mul:
271 case Instruction::Shl:
272 case Instruction::LShr:
273 case Instruction::AShr:
274 ImmIdx = 1;
275 break;
276 case Instruction::ICmp:
277 UnsignedFree = true;
278 ImmIdx = 1;
279 // Zero comparisons can use record-form instructions.
280 LLVM_FALLTHROUGH;
281 case Instruction::Select:
282 ZeroFree = true;
283 break;
284 case Instruction::PHI:
285 case Instruction::Call:
286 case Instruction::Ret:
287 case Instruction::Load:
288 case Instruction::Store:
289 break;
290 }
291
292 if (ZeroFree && Imm == 0)
293 return TTI::TCC_Free;
294
295 if (Idx == ImmIdx && Imm.getBitWidth() <= 64) {
296 if (isInt<16>(Imm.getSExtValue()))
297 return TTI::TCC_Free;
298
299 if (RunFree) {
300 if (Imm.getBitWidth() <= 32 &&
301 (isShiftedMask_32(Imm.getZExtValue()) ||
302 isShiftedMask_32(~Imm.getZExtValue())))
303 return TTI::TCC_Free;
304
305 if (ST->isPPC64() &&
306 (isShiftedMask_64(Imm.getZExtValue()) ||
307 isShiftedMask_64(~Imm.getZExtValue())))
308 return TTI::TCC_Free;
309 }
310
311 if (UnsignedFree && isUInt<16>(Imm.getZExtValue()))
312 return TTI::TCC_Free;
313
314 if (ShiftedFree && (Imm.getZExtValue() & 0xFFFF) == 0)
315 return TTI::TCC_Free;
316 }
317
318 return PPCTTIImpl::getIntImmCost(Imm, Ty, CostKind);
319 }
320
321 unsigned
getUserCost(const User * U,ArrayRef<const Value * > Operands,TTI::TargetCostKind CostKind)322 PPCTTIImpl::getUserCost(const User *U, ArrayRef<const Value *> Operands,
323 TTI::TargetCostKind CostKind) {
324 // We already implement getCastInstrCost and getMemoryOpCost where we perform
325 // the vector adjustment there.
326 if (isa<CastInst>(U) || isa<LoadInst>(U) || isa<StoreInst>(U))
327 return BaseT::getUserCost(U, Operands, CostKind);
328
329 if (U->getType()->isVectorTy()) {
330 // Instructions that need to be split should cost more.
331 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, U->getType());
332 return LT.first * BaseT::getUserCost(U, Operands, CostKind);
333 }
334
335 return BaseT::getUserCost(U, Operands, CostKind);
336 }
337
338 // Determining the address of a TLS variable results in a function call in
339 // certain TLS models.
memAddrUsesCTR(const Value * MemAddr,const PPCTargetMachine & TM,SmallPtrSetImpl<const Value * > & Visited)340 static bool memAddrUsesCTR(const Value *MemAddr, const PPCTargetMachine &TM,
341 SmallPtrSetImpl<const Value *> &Visited) {
342 // No need to traverse again if we already checked this operand.
343 if (!Visited.insert(MemAddr).second)
344 return false;
345 const auto *GV = dyn_cast<GlobalValue>(MemAddr);
346 if (!GV) {
347 // Recurse to check for constants that refer to TLS global variables.
348 if (const auto *CV = dyn_cast<Constant>(MemAddr))
349 for (const auto &CO : CV->operands())
350 if (memAddrUsesCTR(CO, TM, Visited))
351 return true;
352 return false;
353 }
354
355 if (!GV->isThreadLocal())
356 return false;
357 TLSModel::Model Model = TM.getTLSModel(GV);
358 return Model == TLSModel::GeneralDynamic || Model == TLSModel::LocalDynamic;
359 }
360
mightUseCTR(BasicBlock * BB,TargetLibraryInfo * LibInfo,SmallPtrSetImpl<const Value * > & Visited)361 bool PPCTTIImpl::mightUseCTR(BasicBlock *BB, TargetLibraryInfo *LibInfo,
362 SmallPtrSetImpl<const Value *> &Visited) {
363 const PPCTargetMachine &TM = ST->getTargetMachine();
364
365 // Loop through the inline asm constraints and look for something that
366 // clobbers ctr.
367 auto asmClobbersCTR = [](InlineAsm *IA) {
368 InlineAsm::ConstraintInfoVector CIV = IA->ParseConstraints();
369 for (unsigned i = 0, ie = CIV.size(); i < ie; ++i) {
370 InlineAsm::ConstraintInfo &C = CIV[i];
371 if (C.Type != InlineAsm::isInput)
372 for (unsigned j = 0, je = C.Codes.size(); j < je; ++j)
373 if (StringRef(C.Codes[j]).equals_lower("{ctr}"))
374 return true;
375 }
376 return false;
377 };
378
379 auto isLargeIntegerTy = [](bool Is32Bit, Type *Ty) {
380 if (IntegerType *ITy = dyn_cast<IntegerType>(Ty))
381 return ITy->getBitWidth() > (Is32Bit ? 32U : 64U);
382
383 return false;
384 };
385
386 auto supportedHalfPrecisionOp = [](Instruction *Inst) {
387 switch (Inst->getOpcode()) {
388 default:
389 return false;
390 case Instruction::FPTrunc:
391 case Instruction::FPExt:
392 case Instruction::Load:
393 case Instruction::Store:
394 case Instruction::FPToUI:
395 case Instruction::UIToFP:
396 case Instruction::FPToSI:
397 case Instruction::SIToFP:
398 return true;
399 }
400 };
401
402 for (BasicBlock::iterator J = BB->begin(), JE = BB->end();
403 J != JE; ++J) {
404 // There are no direct operations on half precision so assume that
405 // anything with that type requires a call except for a few select
406 // operations with Power9.
407 if (Instruction *CurrInst = dyn_cast<Instruction>(J)) {
408 for (const auto &Op : CurrInst->operands()) {
409 if (Op->getType()->getScalarType()->isHalfTy() ||
410 CurrInst->getType()->getScalarType()->isHalfTy())
411 return !(ST->isISA3_0() && supportedHalfPrecisionOp(CurrInst));
412 }
413 }
414 if (CallInst *CI = dyn_cast<CallInst>(J)) {
415 // Inline ASM is okay, unless it clobbers the ctr register.
416 if (InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledOperand())) {
417 if (asmClobbersCTR(IA))
418 return true;
419 continue;
420 }
421
422 if (Function *F = CI->getCalledFunction()) {
423 // Most intrinsics don't become function calls, but some might.
424 // sin, cos, exp and log are always calls.
425 unsigned Opcode = 0;
426 if (F->getIntrinsicID() != Intrinsic::not_intrinsic) {
427 switch (F->getIntrinsicID()) {
428 default: continue;
429 // If we have a call to loop_decrement or set_loop_iterations,
430 // we're definitely using CTR.
431 case Intrinsic::set_loop_iterations:
432 case Intrinsic::loop_decrement:
433 return true;
434
435 // Binary operations on 128-bit value will use CTR.
436 case Intrinsic::experimental_constrained_fadd:
437 case Intrinsic::experimental_constrained_fsub:
438 case Intrinsic::experimental_constrained_fmul:
439 case Intrinsic::experimental_constrained_fdiv:
440 case Intrinsic::experimental_constrained_frem:
441 if (F->getType()->getScalarType()->isFP128Ty() ||
442 F->getType()->getScalarType()->isPPC_FP128Ty())
443 return true;
444 break;
445
446 case Intrinsic::experimental_constrained_fptosi:
447 case Intrinsic::experimental_constrained_fptoui:
448 case Intrinsic::experimental_constrained_sitofp:
449 case Intrinsic::experimental_constrained_uitofp: {
450 Type *SrcType = CI->getArgOperand(0)->getType()->getScalarType();
451 Type *DstType = CI->getType()->getScalarType();
452 if (SrcType->isPPC_FP128Ty() || DstType->isPPC_FP128Ty() ||
453 isLargeIntegerTy(!TM.isPPC64(), SrcType) ||
454 isLargeIntegerTy(!TM.isPPC64(), DstType))
455 return true;
456 break;
457 }
458
459 // Exclude eh_sjlj_setjmp; we don't need to exclude eh_sjlj_longjmp
460 // because, although it does clobber the counter register, the
461 // control can't then return to inside the loop unless there is also
462 // an eh_sjlj_setjmp.
463 case Intrinsic::eh_sjlj_setjmp:
464
465 case Intrinsic::memcpy:
466 case Intrinsic::memmove:
467 case Intrinsic::memset:
468 case Intrinsic::powi:
469 case Intrinsic::log:
470 case Intrinsic::log2:
471 case Intrinsic::log10:
472 case Intrinsic::exp:
473 case Intrinsic::exp2:
474 case Intrinsic::pow:
475 case Intrinsic::sin:
476 case Intrinsic::cos:
477 case Intrinsic::experimental_constrained_powi:
478 case Intrinsic::experimental_constrained_log:
479 case Intrinsic::experimental_constrained_log2:
480 case Intrinsic::experimental_constrained_log10:
481 case Intrinsic::experimental_constrained_exp:
482 case Intrinsic::experimental_constrained_exp2:
483 case Intrinsic::experimental_constrained_pow:
484 case Intrinsic::experimental_constrained_sin:
485 case Intrinsic::experimental_constrained_cos:
486 return true;
487 case Intrinsic::copysign:
488 if (CI->getArgOperand(0)->getType()->getScalarType()->
489 isPPC_FP128Ty())
490 return true;
491 else
492 continue; // ISD::FCOPYSIGN is never a library call.
493 case Intrinsic::fma: Opcode = ISD::FMA; break;
494 case Intrinsic::sqrt: Opcode = ISD::FSQRT; break;
495 case Intrinsic::floor: Opcode = ISD::FFLOOR; break;
496 case Intrinsic::ceil: Opcode = ISD::FCEIL; break;
497 case Intrinsic::trunc: Opcode = ISD::FTRUNC; break;
498 case Intrinsic::rint: Opcode = ISD::FRINT; break;
499 case Intrinsic::lrint: Opcode = ISD::LRINT; break;
500 case Intrinsic::llrint: Opcode = ISD::LLRINT; break;
501 case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
502 case Intrinsic::round: Opcode = ISD::FROUND; break;
503 case Intrinsic::lround: Opcode = ISD::LROUND; break;
504 case Intrinsic::llround: Opcode = ISD::LLROUND; break;
505 case Intrinsic::minnum: Opcode = ISD::FMINNUM; break;
506 case Intrinsic::maxnum: Opcode = ISD::FMAXNUM; break;
507 case Intrinsic::experimental_constrained_fcmp:
508 Opcode = ISD::STRICT_FSETCC;
509 break;
510 case Intrinsic::experimental_constrained_fcmps:
511 Opcode = ISD::STRICT_FSETCCS;
512 break;
513 case Intrinsic::experimental_constrained_fma:
514 Opcode = ISD::STRICT_FMA;
515 break;
516 case Intrinsic::experimental_constrained_sqrt:
517 Opcode = ISD::STRICT_FSQRT;
518 break;
519 case Intrinsic::experimental_constrained_floor:
520 Opcode = ISD::STRICT_FFLOOR;
521 break;
522 case Intrinsic::experimental_constrained_ceil:
523 Opcode = ISD::STRICT_FCEIL;
524 break;
525 case Intrinsic::experimental_constrained_trunc:
526 Opcode = ISD::STRICT_FTRUNC;
527 break;
528 case Intrinsic::experimental_constrained_rint:
529 Opcode = ISD::STRICT_FRINT;
530 break;
531 case Intrinsic::experimental_constrained_lrint:
532 Opcode = ISD::STRICT_LRINT;
533 break;
534 case Intrinsic::experimental_constrained_llrint:
535 Opcode = ISD::STRICT_LLRINT;
536 break;
537 case Intrinsic::experimental_constrained_nearbyint:
538 Opcode = ISD::STRICT_FNEARBYINT;
539 break;
540 case Intrinsic::experimental_constrained_round:
541 Opcode = ISD::STRICT_FROUND;
542 break;
543 case Intrinsic::experimental_constrained_lround:
544 Opcode = ISD::STRICT_LROUND;
545 break;
546 case Intrinsic::experimental_constrained_llround:
547 Opcode = ISD::STRICT_LLROUND;
548 break;
549 case Intrinsic::experimental_constrained_minnum:
550 Opcode = ISD::STRICT_FMINNUM;
551 break;
552 case Intrinsic::experimental_constrained_maxnum:
553 Opcode = ISD::STRICT_FMAXNUM;
554 break;
555 case Intrinsic::umul_with_overflow: Opcode = ISD::UMULO; break;
556 case Intrinsic::smul_with_overflow: Opcode = ISD::SMULO; break;
557 }
558 }
559
560 // PowerPC does not use [US]DIVREM or other library calls for
561 // operations on regular types which are not otherwise library calls
562 // (i.e. soft float or atomics). If adapting for targets that do,
563 // additional care is required here.
564
565 LibFunc Func;
566 if (!F->hasLocalLinkage() && F->hasName() && LibInfo &&
567 LibInfo->getLibFunc(F->getName(), Func) &&
568 LibInfo->hasOptimizedCodeGen(Func)) {
569 // Non-read-only functions are never treated as intrinsics.
570 if (!CI->onlyReadsMemory())
571 return true;
572
573 // Conversion happens only for FP calls.
574 if (!CI->getArgOperand(0)->getType()->isFloatingPointTy())
575 return true;
576
577 switch (Func) {
578 default: return true;
579 case LibFunc_copysign:
580 case LibFunc_copysignf:
581 continue; // ISD::FCOPYSIGN is never a library call.
582 case LibFunc_copysignl:
583 return true;
584 case LibFunc_fabs:
585 case LibFunc_fabsf:
586 case LibFunc_fabsl:
587 continue; // ISD::FABS is never a library call.
588 case LibFunc_sqrt:
589 case LibFunc_sqrtf:
590 case LibFunc_sqrtl:
591 Opcode = ISD::FSQRT; break;
592 case LibFunc_floor:
593 case LibFunc_floorf:
594 case LibFunc_floorl:
595 Opcode = ISD::FFLOOR; break;
596 case LibFunc_nearbyint:
597 case LibFunc_nearbyintf:
598 case LibFunc_nearbyintl:
599 Opcode = ISD::FNEARBYINT; break;
600 case LibFunc_ceil:
601 case LibFunc_ceilf:
602 case LibFunc_ceill:
603 Opcode = ISD::FCEIL; break;
604 case LibFunc_rint:
605 case LibFunc_rintf:
606 case LibFunc_rintl:
607 Opcode = ISD::FRINT; break;
608 case LibFunc_round:
609 case LibFunc_roundf:
610 case LibFunc_roundl:
611 Opcode = ISD::FROUND; break;
612 case LibFunc_trunc:
613 case LibFunc_truncf:
614 case LibFunc_truncl:
615 Opcode = ISD::FTRUNC; break;
616 case LibFunc_fmin:
617 case LibFunc_fminf:
618 case LibFunc_fminl:
619 Opcode = ISD::FMINNUM; break;
620 case LibFunc_fmax:
621 case LibFunc_fmaxf:
622 case LibFunc_fmaxl:
623 Opcode = ISD::FMAXNUM; break;
624 }
625 }
626
627 if (Opcode) {
628 EVT EVTy =
629 TLI->getValueType(DL, CI->getArgOperand(0)->getType(), true);
630
631 if (EVTy == MVT::Other)
632 return true;
633
634 if (TLI->isOperationLegalOrCustom(Opcode, EVTy))
635 continue;
636 else if (EVTy.isVector() &&
637 TLI->isOperationLegalOrCustom(Opcode, EVTy.getScalarType()))
638 continue;
639
640 return true;
641 }
642 }
643
644 return true;
645 } else if (isa<BinaryOperator>(J) &&
646 (J->getType()->getScalarType()->isFP128Ty() ||
647 J->getType()->getScalarType()->isPPC_FP128Ty())) {
648 // Most operations on f128 or ppc_f128 values become calls.
649 return true;
650 } else if (isa<UIToFPInst>(J) || isa<SIToFPInst>(J) ||
651 isa<FPToUIInst>(J) || isa<FPToSIInst>(J)) {
652 CastInst *CI = cast<CastInst>(J);
653 if (CI->getSrcTy()->getScalarType()->isPPC_FP128Ty() ||
654 CI->getDestTy()->getScalarType()->isPPC_FP128Ty() ||
655 isLargeIntegerTy(!TM.isPPC64(), CI->getSrcTy()->getScalarType()) ||
656 isLargeIntegerTy(!TM.isPPC64(), CI->getDestTy()->getScalarType()))
657 return true;
658 } else if (isLargeIntegerTy(!TM.isPPC64(),
659 J->getType()->getScalarType()) &&
660 (J->getOpcode() == Instruction::UDiv ||
661 J->getOpcode() == Instruction::SDiv ||
662 J->getOpcode() == Instruction::URem ||
663 J->getOpcode() == Instruction::SRem)) {
664 return true;
665 } else if (!TM.isPPC64() &&
666 isLargeIntegerTy(false, J->getType()->getScalarType()) &&
667 (J->getOpcode() == Instruction::Shl ||
668 J->getOpcode() == Instruction::AShr ||
669 J->getOpcode() == Instruction::LShr)) {
670 // Only on PPC32, for 128-bit integers (specifically not 64-bit
671 // integers), these might be runtime calls.
672 return true;
673 } else if (isa<IndirectBrInst>(J) || isa<InvokeInst>(J)) {
674 // On PowerPC, indirect jumps use the counter register.
675 return true;
676 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(J)) {
677 if (SI->getNumCases() + 1 >= (unsigned)TLI->getMinimumJumpTableEntries())
678 return true;
679 }
680
681 // FREM is always a call.
682 if (J->getOpcode() == Instruction::FRem)
683 return true;
684
685 if (ST->useSoftFloat()) {
686 switch(J->getOpcode()) {
687 case Instruction::FAdd:
688 case Instruction::FSub:
689 case Instruction::FMul:
690 case Instruction::FDiv:
691 case Instruction::FPTrunc:
692 case Instruction::FPExt:
693 case Instruction::FPToUI:
694 case Instruction::FPToSI:
695 case Instruction::UIToFP:
696 case Instruction::SIToFP:
697 case Instruction::FCmp:
698 return true;
699 }
700 }
701
702 for (Value *Operand : J->operands())
703 if (memAddrUsesCTR(Operand, TM, Visited))
704 return true;
705 }
706
707 return false;
708 }
709
isHardwareLoopProfitable(Loop * L,ScalarEvolution & SE,AssumptionCache & AC,TargetLibraryInfo * LibInfo,HardwareLoopInfo & HWLoopInfo)710 bool PPCTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
711 AssumptionCache &AC,
712 TargetLibraryInfo *LibInfo,
713 HardwareLoopInfo &HWLoopInfo) {
714 const PPCTargetMachine &TM = ST->getTargetMachine();
715 TargetSchedModel SchedModel;
716 SchedModel.init(ST);
717
718 // Do not convert small short loops to CTR loop.
719 unsigned ConstTripCount = SE.getSmallConstantTripCount(L);
720 if (ConstTripCount && ConstTripCount < SmallCTRLoopThreshold) {
721 SmallPtrSet<const Value *, 32> EphValues;
722 CodeMetrics::collectEphemeralValues(L, &AC, EphValues);
723 CodeMetrics Metrics;
724 for (BasicBlock *BB : L->blocks())
725 Metrics.analyzeBasicBlock(BB, *this, EphValues);
726 // 6 is an approximate latency for the mtctr instruction.
727 if (Metrics.NumInsts <= (6 * SchedModel.getIssueWidth()))
728 return false;
729 }
730
731 // We don't want to spill/restore the counter register, and so we don't
732 // want to use the counter register if the loop contains calls.
733 SmallPtrSet<const Value *, 4> Visited;
734 for (Loop::block_iterator I = L->block_begin(), IE = L->block_end();
735 I != IE; ++I)
736 if (mightUseCTR(*I, LibInfo, Visited))
737 return false;
738
739 SmallVector<BasicBlock*, 4> ExitingBlocks;
740 L->getExitingBlocks(ExitingBlocks);
741
742 // If there is an exit edge known to be frequently taken,
743 // we should not transform this loop.
744 for (auto &BB : ExitingBlocks) {
745 Instruction *TI = BB->getTerminator();
746 if (!TI) continue;
747
748 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
749 uint64_t TrueWeight = 0, FalseWeight = 0;
750 if (!BI->isConditional() ||
751 !BI->extractProfMetadata(TrueWeight, FalseWeight))
752 continue;
753
754 // If the exit path is more frequent than the loop path,
755 // we return here without further analysis for this loop.
756 bool TrueIsExit = !L->contains(BI->getSuccessor(0));
757 if (( TrueIsExit && FalseWeight < TrueWeight) ||
758 (!TrueIsExit && FalseWeight > TrueWeight))
759 return false;
760 }
761 }
762
763 // If an exit block has a PHI that accesses a TLS variable as one of the
764 // incoming values from the loop, we cannot produce a CTR loop because the
765 // address for that value will be computed in the loop.
766 SmallVector<BasicBlock *, 4> ExitBlocks;
767 L->getExitBlocks(ExitBlocks);
768 for (auto &BB : ExitBlocks) {
769 for (auto &PHI : BB->phis()) {
770 for (int Idx = 0, EndIdx = PHI.getNumIncomingValues(); Idx < EndIdx;
771 Idx++) {
772 const BasicBlock *IncomingBB = PHI.getIncomingBlock(Idx);
773 const Value *IncomingValue = PHI.getIncomingValue(Idx);
774 if (L->contains(IncomingBB) &&
775 memAddrUsesCTR(IncomingValue, TM, Visited))
776 return false;
777 }
778 }
779 }
780
781 LLVMContext &C = L->getHeader()->getContext();
782 HWLoopInfo.CountType = TM.isPPC64() ?
783 Type::getInt64Ty(C) : Type::getInt32Ty(C);
784 HWLoopInfo.LoopDecrement = ConstantInt::get(HWLoopInfo.CountType, 1);
785 return true;
786 }
787
getUnrollingPreferences(Loop * L,ScalarEvolution & SE,TTI::UnrollingPreferences & UP)788 void PPCTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
789 TTI::UnrollingPreferences &UP) {
790 if (ST->getCPUDirective() == PPC::DIR_A2) {
791 // The A2 is in-order with a deep pipeline, and concatenation unrolling
792 // helps expose latency-hiding opportunities to the instruction scheduler.
793 UP.Partial = UP.Runtime = true;
794
795 // We unroll a lot on the A2 (hundreds of instructions), and the benefits
796 // often outweigh the cost of a division to compute the trip count.
797 UP.AllowExpensiveTripCount = true;
798 }
799
800 BaseT::getUnrollingPreferences(L, SE, UP);
801 }
802
getPeelingPreferences(Loop * L,ScalarEvolution & SE,TTI::PeelingPreferences & PP)803 void PPCTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
804 TTI::PeelingPreferences &PP) {
805 BaseT::getPeelingPreferences(L, SE, PP);
806 }
807 // This function returns true to allow using coldcc calling convention.
808 // Returning true results in coldcc being used for functions which are cold at
809 // all call sites when the callers of the functions are not calling any other
810 // non coldcc functions.
useColdCCForColdCall(Function & F)811 bool PPCTTIImpl::useColdCCForColdCall(Function &F) {
812 return EnablePPCColdCC;
813 }
814
enableAggressiveInterleaving(bool LoopHasReductions)815 bool PPCTTIImpl::enableAggressiveInterleaving(bool LoopHasReductions) {
816 // On the A2, always unroll aggressively.
817 if (ST->getCPUDirective() == PPC::DIR_A2)
818 return true;
819
820 return LoopHasReductions;
821 }
822
823 PPCTTIImpl::TTI::MemCmpExpansionOptions
enableMemCmpExpansion(bool OptSize,bool IsZeroCmp) const824 PPCTTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
825 TTI::MemCmpExpansionOptions Options;
826 Options.LoadSizes = {8, 4, 2, 1};
827 Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
828 return Options;
829 }
830
enableInterleavedAccessVectorization()831 bool PPCTTIImpl::enableInterleavedAccessVectorization() {
832 return true;
833 }
834
getNumberOfRegisters(unsigned ClassID) const835 unsigned PPCTTIImpl::getNumberOfRegisters(unsigned ClassID) const {
836 assert(ClassID == GPRRC || ClassID == FPRRC ||
837 ClassID == VRRC || ClassID == VSXRC);
838 if (ST->hasVSX()) {
839 assert(ClassID == GPRRC || ClassID == VSXRC || ClassID == VRRC);
840 return ClassID == VSXRC ? 64 : 32;
841 }
842 assert(ClassID == GPRRC || ClassID == FPRRC || ClassID == VRRC);
843 return 32;
844 }
845
getRegisterClassForType(bool Vector,Type * Ty) const846 unsigned PPCTTIImpl::getRegisterClassForType(bool Vector, Type *Ty) const {
847 if (Vector)
848 return ST->hasVSX() ? VSXRC : VRRC;
849 else if (Ty && (Ty->getScalarType()->isFloatTy() ||
850 Ty->getScalarType()->isDoubleTy()))
851 return ST->hasVSX() ? VSXRC : FPRRC;
852 else if (Ty && (Ty->getScalarType()->isFP128Ty() ||
853 Ty->getScalarType()->isPPC_FP128Ty()))
854 return VRRC;
855 else if (Ty && Ty->getScalarType()->isHalfTy())
856 return VSXRC;
857 else
858 return GPRRC;
859 }
860
getRegisterClassName(unsigned ClassID) const861 const char* PPCTTIImpl::getRegisterClassName(unsigned ClassID) const {
862
863 switch (ClassID) {
864 default:
865 llvm_unreachable("unknown register class");
866 return "PPC::unknown register class";
867 case GPRRC: return "PPC::GPRRC";
868 case FPRRC: return "PPC::FPRRC";
869 case VRRC: return "PPC::VRRC";
870 case VSXRC: return "PPC::VSXRC";
871 }
872 }
873
getRegisterBitWidth(bool Vector) const874 unsigned PPCTTIImpl::getRegisterBitWidth(bool Vector) const {
875 if (Vector) {
876 if (ST->hasAltivec()) return 128;
877 return 0;
878 }
879
880 if (ST->isPPC64())
881 return 64;
882 return 32;
883
884 }
885
getCacheLineSize() const886 unsigned PPCTTIImpl::getCacheLineSize() const {
887 // Check first if the user specified a custom line size.
888 if (CacheLineSize.getNumOccurrences() > 0)
889 return CacheLineSize;
890
891 // Starting with P7 we have a cache line size of 128.
892 unsigned Directive = ST->getCPUDirective();
893 // Assume that Future CPU has the same cache line size as the others.
894 if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 ||
895 Directive == PPC::DIR_PWR9 || Directive == PPC::DIR_PWR10 ||
896 Directive == PPC::DIR_PWR_FUTURE)
897 return 128;
898
899 // On other processors return a default of 64 bytes.
900 return 64;
901 }
902
getPrefetchDistance() const903 unsigned PPCTTIImpl::getPrefetchDistance() const {
904 return 300;
905 }
906
getMaxInterleaveFactor(unsigned VF)907 unsigned PPCTTIImpl::getMaxInterleaveFactor(unsigned VF) {
908 unsigned Directive = ST->getCPUDirective();
909 // The 440 has no SIMD support, but floating-point instructions
910 // have a 5-cycle latency, so unroll by 5x for latency hiding.
911 if (Directive == PPC::DIR_440)
912 return 5;
913
914 // The A2 has no SIMD support, but floating-point instructions
915 // have a 6-cycle latency, so unroll by 6x for latency hiding.
916 if (Directive == PPC::DIR_A2)
917 return 6;
918
919 // FIXME: For lack of any better information, do no harm...
920 if (Directive == PPC::DIR_E500mc || Directive == PPC::DIR_E5500)
921 return 1;
922
923 // For P7 and P8, floating-point instructions have a 6-cycle latency and
924 // there are two execution units, so unroll by 12x for latency hiding.
925 // FIXME: the same for P9 as previous gen until POWER9 scheduling is ready
926 // FIXME: the same for P10 as previous gen until POWER10 scheduling is ready
927 // Assume that future is the same as the others.
928 if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 ||
929 Directive == PPC::DIR_PWR9 || Directive == PPC::DIR_PWR10 ||
930 Directive == PPC::DIR_PWR_FUTURE)
931 return 12;
932
933 // For most things, modern systems have two execution units (and
934 // out-of-order execution).
935 return 2;
936 }
937
938 // Adjust the cost of vector instructions on targets which there is overlap
939 // between the vector and scalar units, thereby reducing the overall throughput
940 // of vector code wrt. scalar code.
vectorCostAdjustment(int Cost,unsigned Opcode,Type * Ty1,Type * Ty2)941 int PPCTTIImpl::vectorCostAdjustment(int Cost, unsigned Opcode, Type *Ty1,
942 Type *Ty2) {
943 if (!ST->vectorsUseTwoUnits() || !Ty1->isVectorTy())
944 return Cost;
945
946 std::pair<int, MVT> LT1 = TLI->getTypeLegalizationCost(DL, Ty1);
947 // If type legalization involves splitting the vector, we don't want to
948 // double the cost at every step - only the last step.
949 if (LT1.first != 1 || !LT1.second.isVector())
950 return Cost;
951
952 int ISD = TLI->InstructionOpcodeToISD(Opcode);
953 if (TLI->isOperationExpand(ISD, LT1.second))
954 return Cost;
955
956 if (Ty2) {
957 std::pair<int, MVT> LT2 = TLI->getTypeLegalizationCost(DL, Ty2);
958 if (LT2.first != 1 || !LT2.second.isVector())
959 return Cost;
960 }
961
962 return Cost * 2;
963 }
964
getArithmeticInstrCost(unsigned Opcode,Type * Ty,TTI::TargetCostKind CostKind,TTI::OperandValueKind Op1Info,TTI::OperandValueKind Op2Info,TTI::OperandValueProperties Opd1PropInfo,TTI::OperandValueProperties Opd2PropInfo,ArrayRef<const Value * > Args,const Instruction * CxtI)965 int PPCTTIImpl::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
966 TTI::TargetCostKind CostKind,
967 TTI::OperandValueKind Op1Info,
968 TTI::OperandValueKind Op2Info,
969 TTI::OperandValueProperties Opd1PropInfo,
970 TTI::OperandValueProperties Opd2PropInfo,
971 ArrayRef<const Value *> Args,
972 const Instruction *CxtI) {
973 assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode");
974 // TODO: Handle more cost kinds.
975 if (CostKind != TTI::TCK_RecipThroughput)
976 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
977 Op2Info, Opd1PropInfo,
978 Opd2PropInfo, Args, CxtI);
979
980 // Fallback to the default implementation.
981 int Cost = BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
982 Op2Info,
983 Opd1PropInfo, Opd2PropInfo);
984 return vectorCostAdjustment(Cost, Opcode, Ty, nullptr);
985 }
986
getShuffleCost(TTI::ShuffleKind Kind,Type * Tp,int Index,Type * SubTp)987 int PPCTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
988 Type *SubTp) {
989 // Legalize the type.
990 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
991
992 // PPC, for both Altivec/VSX, support cheap arbitrary permutations
993 // (at least in the sense that there need only be one non-loop-invariant
994 // instruction). We need one such shuffle instruction for each actual
995 // register (this is not true for arbitrary shuffles, but is true for the
996 // structured types of shuffles covered by TTI::ShuffleKind).
997 return vectorCostAdjustment(LT.first, Instruction::ShuffleVector, Tp,
998 nullptr);
999 }
1000
getCFInstrCost(unsigned Opcode,TTI::TargetCostKind CostKind)1001 int PPCTTIImpl::getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind) {
1002 if (CostKind != TTI::TCK_RecipThroughput)
1003 return Opcode == Instruction::PHI ? 0 : 1;
1004 // Branches are assumed to be predicted.
1005 return CostKind == TTI::TCK_RecipThroughput ? 0 : 1;
1006 }
1007
getCastInstrCost(unsigned Opcode,Type * Dst,Type * Src,TTI::CastContextHint CCH,TTI::TargetCostKind CostKind,const Instruction * I)1008 int PPCTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
1009 TTI::CastContextHint CCH,
1010 TTI::TargetCostKind CostKind,
1011 const Instruction *I) {
1012 assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode");
1013
1014 int Cost = BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
1015 Cost = vectorCostAdjustment(Cost, Opcode, Dst, Src);
1016 // TODO: Allow non-throughput costs that aren't binary.
1017 if (CostKind != TTI::TCK_RecipThroughput)
1018 return Cost == 0 ? 0 : 1;
1019 return Cost;
1020 }
1021
getCmpSelInstrCost(unsigned Opcode,Type * ValTy,Type * CondTy,CmpInst::Predicate VecPred,TTI::TargetCostKind CostKind,const Instruction * I)1022 int PPCTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
1023 CmpInst::Predicate VecPred,
1024 TTI::TargetCostKind CostKind,
1025 const Instruction *I) {
1026 int Cost =
1027 BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
1028 // TODO: Handle other cost kinds.
1029 if (CostKind != TTI::TCK_RecipThroughput)
1030 return Cost;
1031 return vectorCostAdjustment(Cost, Opcode, ValTy, nullptr);
1032 }
1033
getVectorInstrCost(unsigned Opcode,Type * Val,unsigned Index)1034 int PPCTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
1035 assert(Val->isVectorTy() && "This must be a vector type");
1036
1037 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1038 assert(ISD && "Invalid opcode");
1039
1040 int Cost = BaseT::getVectorInstrCost(Opcode, Val, Index);
1041 Cost = vectorCostAdjustment(Cost, Opcode, Val, nullptr);
1042
1043 if (ST->hasVSX() && Val->getScalarType()->isDoubleTy()) {
1044 // Double-precision scalars are already located in index #0 (or #1 if LE).
1045 if (ISD == ISD::EXTRACT_VECTOR_ELT &&
1046 Index == (ST->isLittleEndian() ? 1 : 0))
1047 return 0;
1048
1049 return Cost;
1050
1051 } else if (Val->getScalarType()->isIntegerTy() && Index != -1U) {
1052 if (ST->hasP9Altivec()) {
1053 if (ISD == ISD::INSERT_VECTOR_ELT)
1054 // A move-to VSR and a permute/insert. Assume vector operation cost
1055 // for both (cost will be 2x on P9).
1056 return vectorCostAdjustment(2, Opcode, Val, nullptr);
1057
1058 // It's an extract. Maybe we can do a cheap move-from VSR.
1059 unsigned EltSize = Val->getScalarSizeInBits();
1060 if (EltSize == 64) {
1061 unsigned MfvsrdIndex = ST->isLittleEndian() ? 1 : 0;
1062 if (Index == MfvsrdIndex)
1063 return 1;
1064 } else if (EltSize == 32) {
1065 unsigned MfvsrwzIndex = ST->isLittleEndian() ? 2 : 1;
1066 if (Index == MfvsrwzIndex)
1067 return 1;
1068 }
1069
1070 // We need a vector extract (or mfvsrld). Assume vector operation cost.
1071 // The cost of the load constant for a vector extract is disregarded
1072 // (invariant, easily schedulable).
1073 return vectorCostAdjustment(1, Opcode, Val, nullptr);
1074
1075 } else if (ST->hasDirectMove())
1076 // Assume permute has standard cost.
1077 // Assume move-to/move-from VSR have 2x standard cost.
1078 return 3;
1079 }
1080
1081 // Estimated cost of a load-hit-store delay. This was obtained
1082 // experimentally as a minimum needed to prevent unprofitable
1083 // vectorization for the paq8p benchmark. It may need to be
1084 // raised further if other unprofitable cases remain.
1085 unsigned LHSPenalty = 2;
1086 if (ISD == ISD::INSERT_VECTOR_ELT)
1087 LHSPenalty += 7;
1088
1089 // Vector element insert/extract with Altivec is very expensive,
1090 // because they require store and reload with the attendant
1091 // processor stall for load-hit-store. Until VSX is available,
1092 // these need to be estimated as very costly.
1093 if (ISD == ISD::EXTRACT_VECTOR_ELT ||
1094 ISD == ISD::INSERT_VECTOR_ELT)
1095 return LHSPenalty + Cost;
1096
1097 return Cost;
1098 }
1099
getMemoryOpCost(unsigned Opcode,Type * Src,MaybeAlign Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind,const Instruction * I)1100 int PPCTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
1101 MaybeAlign Alignment, unsigned AddressSpace,
1102 TTI::TargetCostKind CostKind,
1103 const Instruction *I) {
1104 if (TLI->getValueType(DL, Src, true) == MVT::Other)
1105 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1106 CostKind);
1107 // Legalize the type.
1108 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
1109 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
1110 "Invalid Opcode");
1111
1112 int Cost = BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1113 CostKind);
1114 // TODO: Handle other cost kinds.
1115 if (CostKind != TTI::TCK_RecipThroughput)
1116 return Cost;
1117
1118 Cost = vectorCostAdjustment(Cost, Opcode, Src, nullptr);
1119
1120 bool IsAltivecType = ST->hasAltivec() &&
1121 (LT.second == MVT::v16i8 || LT.second == MVT::v8i16 ||
1122 LT.second == MVT::v4i32 || LT.second == MVT::v4f32);
1123 bool IsVSXType = ST->hasVSX() &&
1124 (LT.second == MVT::v2f64 || LT.second == MVT::v2i64);
1125
1126 // VSX has 32b/64b load instructions. Legalization can handle loading of
1127 // 32b/64b to VSR correctly and cheaply. But BaseT::getMemoryOpCost and
1128 // PPCTargetLowering can't compute the cost appropriately. So here we
1129 // explicitly check this case.
1130 unsigned MemBytes = Src->getPrimitiveSizeInBits();
1131 if (Opcode == Instruction::Load && ST->hasVSX() && IsAltivecType &&
1132 (MemBytes == 64 || (ST->hasP8Vector() && MemBytes == 32)))
1133 return 1;
1134
1135 // Aligned loads and stores are easy.
1136 unsigned SrcBytes = LT.second.getStoreSize();
1137 if (!SrcBytes || !Alignment || *Alignment >= SrcBytes)
1138 return Cost;
1139
1140 // If we can use the permutation-based load sequence, then this is also
1141 // relatively cheap (not counting loop-invariant instructions): one load plus
1142 // one permute (the last load in a series has extra cost, but we're
1143 // neglecting that here). Note that on the P7, we could do unaligned loads
1144 // for Altivec types using the VSX instructions, but that's more expensive
1145 // than using the permutation-based load sequence. On the P8, that's no
1146 // longer true.
1147 if (Opcode == Instruction::Load && (!ST->hasP8Vector() && IsAltivecType) &&
1148 *Alignment >= LT.second.getScalarType().getStoreSize())
1149 return Cost + LT.first; // Add the cost of the permutations.
1150
1151 // For VSX, we can do unaligned loads and stores on Altivec/VSX types. On the
1152 // P7, unaligned vector loads are more expensive than the permutation-based
1153 // load sequence, so that might be used instead, but regardless, the net cost
1154 // is about the same (not counting loop-invariant instructions).
1155 if (IsVSXType || (ST->hasVSX() && IsAltivecType))
1156 return Cost;
1157
1158 // Newer PPC supports unaligned memory access.
1159 if (TLI->allowsMisalignedMemoryAccesses(LT.second, 0))
1160 return Cost;
1161
1162 // PPC in general does not support unaligned loads and stores. They'll need
1163 // to be decomposed based on the alignment factor.
1164
1165 // Add the cost of each scalar load or store.
1166 assert(Alignment);
1167 Cost += LT.first * ((SrcBytes / Alignment->value()) - 1);
1168
1169 // For a vector type, there is also scalarization overhead (only for
1170 // stores, loads are expanded using the vector-load + permutation sequence,
1171 // which is much less expensive).
1172 if (Src->isVectorTy() && Opcode == Instruction::Store)
1173 for (int i = 0, e = cast<FixedVectorType>(Src)->getNumElements(); i < e;
1174 ++i)
1175 Cost += getVectorInstrCost(Instruction::ExtractElement, Src, i);
1176
1177 return Cost;
1178 }
1179
getInterleavedMemoryOpCost(unsigned Opcode,Type * VecTy,unsigned Factor,ArrayRef<unsigned> Indices,Align Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind,bool UseMaskForCond,bool UseMaskForGaps)1180 int PPCTTIImpl::getInterleavedMemoryOpCost(
1181 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1182 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1183 bool UseMaskForCond, bool UseMaskForGaps) {
1184 if (UseMaskForCond || UseMaskForGaps)
1185 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
1186 Alignment, AddressSpace, CostKind,
1187 UseMaskForCond, UseMaskForGaps);
1188
1189 assert(isa<VectorType>(VecTy) &&
1190 "Expect a vector type for interleaved memory op");
1191
1192 // Legalize the type.
1193 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, VecTy);
1194
1195 // Firstly, the cost of load/store operation.
1196 int Cost =
1197 getMemoryOpCost(Opcode, VecTy, MaybeAlign(Alignment), AddressSpace,
1198 CostKind);
1199
1200 // PPC, for both Altivec/VSX, support cheap arbitrary permutations
1201 // (at least in the sense that there need only be one non-loop-invariant
1202 // instruction). For each result vector, we need one shuffle per incoming
1203 // vector (except that the first shuffle can take two incoming vectors
1204 // because it does not need to take itself).
1205 Cost += Factor*(LT.first-1);
1206
1207 return Cost;
1208 }
1209
getIntrinsicInstrCost(const IntrinsicCostAttributes & ICA,TTI::TargetCostKind CostKind)1210 unsigned PPCTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
1211 TTI::TargetCostKind CostKind) {
1212 return BaseT::getIntrinsicInstrCost(ICA, CostKind);
1213 }
1214
areFunctionArgsABICompatible(const Function * Caller,const Function * Callee,SmallPtrSetImpl<Argument * > & Args) const1215 bool PPCTTIImpl::areFunctionArgsABICompatible(
1216 const Function *Caller, const Function *Callee,
1217 SmallPtrSetImpl<Argument *> &Args) const {
1218
1219 // We need to ensure that argument promotion does not
1220 // attempt to promote pointers to MMA types (__vector_pair
1221 // and __vector_quad) since these types explicitly cannot be
1222 // passed as arguments. Both of these types are larger than
1223 // the 128-bit Altivec vectors and have a scalar size of 1 bit.
1224 if (!BaseT::areFunctionArgsABICompatible(Caller, Callee, Args))
1225 return false;
1226
1227 return llvm::none_of(Args, [](Argument *A) {
1228 auto *EltTy = cast<PointerType>(A->getType())->getElementType();
1229 if (EltTy->isSized())
1230 return (EltTy->isIntOrIntVectorTy(1) &&
1231 EltTy->getPrimitiveSizeInBits() > 128);
1232 return false;
1233 });
1234 }
1235
canSaveCmp(Loop * L,BranchInst ** BI,ScalarEvolution * SE,LoopInfo * LI,DominatorTree * DT,AssumptionCache * AC,TargetLibraryInfo * LibInfo)1236 bool PPCTTIImpl::canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE,
1237 LoopInfo *LI, DominatorTree *DT,
1238 AssumptionCache *AC, TargetLibraryInfo *LibInfo) {
1239 // Process nested loops first.
1240 for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
1241 if (canSaveCmp(*I, BI, SE, LI, DT, AC, LibInfo))
1242 return false; // Stop search.
1243
1244 HardwareLoopInfo HWLoopInfo(L);
1245
1246 if (!HWLoopInfo.canAnalyze(*LI))
1247 return false;
1248
1249 if (!isHardwareLoopProfitable(L, *SE, *AC, LibInfo, HWLoopInfo))
1250 return false;
1251
1252 if (!HWLoopInfo.isHardwareLoopCandidate(*SE, *LI, *DT))
1253 return false;
1254
1255 *BI = HWLoopInfo.ExitBranch;
1256 return true;
1257 }
1258
isLSRCostLess(TargetTransformInfo::LSRCost & C1,TargetTransformInfo::LSRCost & C2)1259 bool PPCTTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1,
1260 TargetTransformInfo::LSRCost &C2) {
1261 // PowerPC default behaviour here is "instruction number 1st priority".
1262 // If LsrNoInsnsCost is set, call default implementation.
1263 if (!LsrNoInsnsCost)
1264 return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost, C1.NumIVMuls,
1265 C1.NumBaseAdds, C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
1266 std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost, C2.NumIVMuls,
1267 C2.NumBaseAdds, C2.ScaleCost, C2.ImmCost, C2.SetupCost);
1268 else
1269 return TargetTransformInfoImplBase::isLSRCostLess(C1, C2);
1270 }
1271
isNumRegsMajorCostOfLSR()1272 bool PPCTTIImpl::isNumRegsMajorCostOfLSR() {
1273 return false;
1274 }
1275
getTgtMemIntrinsic(IntrinsicInst * Inst,MemIntrinsicInfo & Info)1276 bool PPCTTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
1277 MemIntrinsicInfo &Info) {
1278 switch (Inst->getIntrinsicID()) {
1279 case Intrinsic::ppc_altivec_lvx:
1280 case Intrinsic::ppc_altivec_lvxl:
1281 case Intrinsic::ppc_altivec_lvebx:
1282 case Intrinsic::ppc_altivec_lvehx:
1283 case Intrinsic::ppc_altivec_lvewx:
1284 case Intrinsic::ppc_vsx_lxvd2x:
1285 case Intrinsic::ppc_vsx_lxvw4x:
1286 case Intrinsic::ppc_vsx_lxvd2x_be:
1287 case Intrinsic::ppc_vsx_lxvw4x_be:
1288 case Intrinsic::ppc_vsx_lxvl:
1289 case Intrinsic::ppc_vsx_lxvll:
1290 case Intrinsic::ppc_vsx_lxvp: {
1291 Info.PtrVal = Inst->getArgOperand(0);
1292 Info.ReadMem = true;
1293 Info.WriteMem = false;
1294 return true;
1295 }
1296 case Intrinsic::ppc_altivec_stvx:
1297 case Intrinsic::ppc_altivec_stvxl:
1298 case Intrinsic::ppc_altivec_stvebx:
1299 case Intrinsic::ppc_altivec_stvehx:
1300 case Intrinsic::ppc_altivec_stvewx:
1301 case Intrinsic::ppc_vsx_stxvd2x:
1302 case Intrinsic::ppc_vsx_stxvw4x:
1303 case Intrinsic::ppc_vsx_stxvd2x_be:
1304 case Intrinsic::ppc_vsx_stxvw4x_be:
1305 case Intrinsic::ppc_vsx_stxvl:
1306 case Intrinsic::ppc_vsx_stxvll:
1307 case Intrinsic::ppc_vsx_stxvp: {
1308 Info.PtrVal = Inst->getArgOperand(1);
1309 Info.ReadMem = false;
1310 Info.WriteMem = true;
1311 return true;
1312 }
1313 default:
1314 break;
1315 }
1316
1317 return false;
1318 }
1319