1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements a TargetTransformInfo analysis pass specific to the
10 /// X86 target machine. It uses the target's detailed information to provide
11 /// more precise answers to certain TTI queries, while letting the target
12 /// independent and default TTI implementations handle the rest.
13 ///
14 //===----------------------------------------------------------------------===//
15 /// About Cost Model numbers used below it's necessary to say the following:
16 /// the numbers correspond to some "generic" X86 CPU instead of usage of
17 /// concrete CPU model. Usually the numbers correspond to CPU where the feature
18 /// apeared at the first time. For example, if we do Subtarget.hasSSE42() in
19 /// the lookups below the cost is based on Nehalem as that was the first CPU
20 /// to support that feature level and thus has most likely the worst case cost.
21 /// Some examples of other technologies/CPUs:
22 /// SSE 3 - Pentium4 / Athlon64
23 /// SSE 4.1 - Penryn
24 /// SSE 4.2 - Nehalem
25 /// AVX - Sandy Bridge
26 /// AVX2 - Haswell
27 /// AVX-512 - Xeon Phi / Skylake
28 /// And some examples of instruction target dependent costs (latency)
29 /// divss sqrtss rsqrtss
30 /// AMD K7 11-16 19 3
31 /// Piledriver 9-24 13-15 5
32 /// Jaguar 14 16 2
33 /// Pentium II,III 18 30 2
34 /// Nehalem 7-14 7-18 3
35 /// Haswell 10-13 11 5
36 /// TODO: Develop and implement the target dependent cost model and
37 /// specialize cost numbers for different Cost Model Targets such as throughput,
38 /// code size, latency and uop count.
39 //===----------------------------------------------------------------------===//
40
41 #include "X86TargetTransformInfo.h"
42 #include "llvm/Analysis/TargetTransformInfo.h"
43 #include "llvm/CodeGen/BasicTTIImpl.h"
44 #include "llvm/CodeGen/CostTable.h"
45 #include "llvm/CodeGen/TargetLowering.h"
46 #include "llvm/IR/IntrinsicInst.h"
47 #include "llvm/Support/Debug.h"
48
49 using namespace llvm;
50
51 #define DEBUG_TYPE "x86tti"
52
53 //===----------------------------------------------------------------------===//
54 //
55 // X86 cost model.
56 //
57 //===----------------------------------------------------------------------===//
58
59 TargetTransformInfo::PopcntSupportKind
getPopcntSupport(unsigned TyWidth)60 X86TTIImpl::getPopcntSupport(unsigned TyWidth) {
61 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
62 // TODO: Currently the __builtin_popcount() implementation using SSE3
63 // instructions is inefficient. Once the problem is fixed, we should
64 // call ST->hasSSE3() instead of ST->hasPOPCNT().
65 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software;
66 }
67
getCacheSize(TargetTransformInfo::CacheLevel Level) const68 llvm::Optional<unsigned> X86TTIImpl::getCacheSize(
69 TargetTransformInfo::CacheLevel Level) const {
70 switch (Level) {
71 case TargetTransformInfo::CacheLevel::L1D:
72 // - Penryn
73 // - Nehalem
74 // - Westmere
75 // - Sandy Bridge
76 // - Ivy Bridge
77 // - Haswell
78 // - Broadwell
79 // - Skylake
80 // - Kabylake
81 return 32 * 1024; // 32 KByte
82 case TargetTransformInfo::CacheLevel::L2D:
83 // - Penryn
84 // - Nehalem
85 // - Westmere
86 // - Sandy Bridge
87 // - Ivy Bridge
88 // - Haswell
89 // - Broadwell
90 // - Skylake
91 // - Kabylake
92 return 256 * 1024; // 256 KByte
93 }
94
95 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
96 }
97
getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const98 llvm::Optional<unsigned> X86TTIImpl::getCacheAssociativity(
99 TargetTransformInfo::CacheLevel Level) const {
100 // - Penryn
101 // - Nehalem
102 // - Westmere
103 // - Sandy Bridge
104 // - Ivy Bridge
105 // - Haswell
106 // - Broadwell
107 // - Skylake
108 // - Kabylake
109 switch (Level) {
110 case TargetTransformInfo::CacheLevel::L1D:
111 LLVM_FALLTHROUGH;
112 case TargetTransformInfo::CacheLevel::L2D:
113 return 8;
114 }
115
116 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
117 }
118
getNumberOfRegisters(unsigned ClassID) const119 unsigned X86TTIImpl::getNumberOfRegisters(unsigned ClassID) const {
120 bool Vector = (ClassID == 1);
121 if (Vector && !ST->hasSSE1())
122 return 0;
123
124 if (ST->is64Bit()) {
125 if (Vector && ST->hasAVX512())
126 return 32;
127 return 16;
128 }
129 return 8;
130 }
131
getRegisterBitWidth(bool Vector) const132 unsigned X86TTIImpl::getRegisterBitWidth(bool Vector) const {
133 unsigned PreferVectorWidth = ST->getPreferVectorWidth();
134 if (Vector) {
135 if (ST->hasAVX512() && PreferVectorWidth >= 512)
136 return 512;
137 if (ST->hasAVX() && PreferVectorWidth >= 256)
138 return 256;
139 if (ST->hasSSE1() && PreferVectorWidth >= 128)
140 return 128;
141 return 0;
142 }
143
144 if (ST->is64Bit())
145 return 64;
146
147 return 32;
148 }
149
getLoadStoreVecRegBitWidth(unsigned) const150 unsigned X86TTIImpl::getLoadStoreVecRegBitWidth(unsigned) const {
151 return getRegisterBitWidth(true);
152 }
153
getMaxInterleaveFactor(unsigned VF)154 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) {
155 // If the loop will not be vectorized, don't interleave the loop.
156 // Let regular unroll to unroll the loop, which saves the overflow
157 // check and memory check cost.
158 if (VF == 1)
159 return 1;
160
161 if (ST->isAtom())
162 return 1;
163
164 // Sandybridge and Haswell have multiple execution ports and pipelined
165 // vector units.
166 if (ST->hasAVX())
167 return 4;
168
169 return 2;
170 }
171
getArithmeticInstrCost(unsigned Opcode,Type * Ty,TTI::TargetCostKind CostKind,TTI::OperandValueKind Op1Info,TTI::OperandValueKind Op2Info,TTI::OperandValueProperties Opd1PropInfo,TTI::OperandValueProperties Opd2PropInfo,ArrayRef<const Value * > Args,const Instruction * CxtI)172 int X86TTIImpl::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
173 TTI::TargetCostKind CostKind,
174 TTI::OperandValueKind Op1Info,
175 TTI::OperandValueKind Op2Info,
176 TTI::OperandValueProperties Opd1PropInfo,
177 TTI::OperandValueProperties Opd2PropInfo,
178 ArrayRef<const Value *> Args,
179 const Instruction *CxtI) {
180 // TODO: Handle more cost kinds.
181 if (CostKind != TTI::TCK_RecipThroughput)
182 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
183 Op2Info, Opd1PropInfo,
184 Opd2PropInfo, Args, CxtI);
185 // Legalize the type.
186 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
187
188 int ISD = TLI->InstructionOpcodeToISD(Opcode);
189 assert(ISD && "Invalid opcode");
190
191 static const CostTblEntry GLMCostTable[] = {
192 { ISD::FDIV, MVT::f32, 18 }, // divss
193 { ISD::FDIV, MVT::v4f32, 35 }, // divps
194 { ISD::FDIV, MVT::f64, 33 }, // divsd
195 { ISD::FDIV, MVT::v2f64, 65 }, // divpd
196 };
197
198 if (ST->useGLMDivSqrtCosts())
199 if (const auto *Entry = CostTableLookup(GLMCostTable, ISD,
200 LT.second))
201 return LT.first * Entry->Cost;
202
203 static const CostTblEntry SLMCostTable[] = {
204 { ISD::MUL, MVT::v4i32, 11 }, // pmulld
205 { ISD::MUL, MVT::v8i16, 2 }, // pmullw
206 { ISD::MUL, MVT::v16i8, 14 }, // extend/pmullw/trunc sequence.
207 { ISD::FMUL, MVT::f64, 2 }, // mulsd
208 { ISD::FMUL, MVT::v2f64, 4 }, // mulpd
209 { ISD::FMUL, MVT::v4f32, 2 }, // mulps
210 { ISD::FDIV, MVT::f32, 17 }, // divss
211 { ISD::FDIV, MVT::v4f32, 39 }, // divps
212 { ISD::FDIV, MVT::f64, 32 }, // divsd
213 { ISD::FDIV, MVT::v2f64, 69 }, // divpd
214 { ISD::FADD, MVT::v2f64, 2 }, // addpd
215 { ISD::FSUB, MVT::v2f64, 2 }, // subpd
216 // v2i64/v4i64 mul is custom lowered as a series of long:
217 // multiplies(3), shifts(3) and adds(2)
218 // slm muldq version throughput is 2 and addq throughput 4
219 // thus: 3X2 (muldq throughput) + 3X1 (shift throughput) +
220 // 3X4 (addq throughput) = 17
221 { ISD::MUL, MVT::v2i64, 17 },
222 // slm addq\subq throughput is 4
223 { ISD::ADD, MVT::v2i64, 4 },
224 { ISD::SUB, MVT::v2i64, 4 },
225 };
226
227 if (ST->isSLM()) {
228 if (Args.size() == 2 && ISD == ISD::MUL && LT.second == MVT::v4i32) {
229 // Check if the operands can be shrinked into a smaller datatype.
230 bool Op1Signed = false;
231 unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed);
232 bool Op2Signed = false;
233 unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed);
234
235 bool signedMode = Op1Signed | Op2Signed;
236 unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize);
237
238 if (OpMinSize <= 7)
239 return LT.first * 3; // pmullw/sext
240 if (!signedMode && OpMinSize <= 8)
241 return LT.first * 3; // pmullw/zext
242 if (OpMinSize <= 15)
243 return LT.first * 5; // pmullw/pmulhw/pshuf
244 if (!signedMode && OpMinSize <= 16)
245 return LT.first * 5; // pmullw/pmulhw/pshuf
246 }
247
248 if (const auto *Entry = CostTableLookup(SLMCostTable, ISD,
249 LT.second)) {
250 return LT.first * Entry->Cost;
251 }
252 }
253
254 if ((ISD == ISD::SDIV || ISD == ISD::SREM || ISD == ISD::UDIV ||
255 ISD == ISD::UREM) &&
256 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
257 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
258 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
259 if (ISD == ISD::SDIV || ISD == ISD::SREM) {
260 // On X86, vector signed division by constants power-of-two are
261 // normally expanded to the sequence SRA + SRL + ADD + SRA.
262 // The OperandValue properties may not be the same as that of the previous
263 // operation; conservatively assume OP_None.
264 int Cost =
265 2 * getArithmeticInstrCost(Instruction::AShr, Ty, CostKind, Op1Info,
266 Op2Info,
267 TargetTransformInfo::OP_None,
268 TargetTransformInfo::OP_None);
269 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, CostKind, Op1Info,
270 Op2Info,
271 TargetTransformInfo::OP_None,
272 TargetTransformInfo::OP_None);
273 Cost += getArithmeticInstrCost(Instruction::Add, Ty, CostKind, Op1Info,
274 Op2Info,
275 TargetTransformInfo::OP_None,
276 TargetTransformInfo::OP_None);
277
278 if (ISD == ISD::SREM) {
279 // For SREM: (X % C) is the equivalent of (X - (X/C)*C)
280 Cost += getArithmeticInstrCost(Instruction::Mul, Ty, CostKind, Op1Info,
281 Op2Info);
282 Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind, Op1Info,
283 Op2Info);
284 }
285
286 return Cost;
287 }
288
289 // Vector unsigned division/remainder will be simplified to shifts/masks.
290 if (ISD == ISD::UDIV)
291 return getArithmeticInstrCost(Instruction::LShr, Ty, CostKind,
292 Op1Info, Op2Info,
293 TargetTransformInfo::OP_None,
294 TargetTransformInfo::OP_None);
295
296 else // UREM
297 return getArithmeticInstrCost(Instruction::And, Ty, CostKind,
298 Op1Info, Op2Info,
299 TargetTransformInfo::OP_None,
300 TargetTransformInfo::OP_None);
301 }
302
303 static const CostTblEntry AVX512BWUniformConstCostTable[] = {
304 { ISD::SHL, MVT::v64i8, 2 }, // psllw + pand.
305 { ISD::SRL, MVT::v64i8, 2 }, // psrlw + pand.
306 { ISD::SRA, MVT::v64i8, 4 }, // psrlw, pand, pxor, psubb.
307 };
308
309 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
310 ST->hasBWI()) {
311 if (const auto *Entry = CostTableLookup(AVX512BWUniformConstCostTable, ISD,
312 LT.second))
313 return LT.first * Entry->Cost;
314 }
315
316 static const CostTblEntry AVX512UniformConstCostTable[] = {
317 { ISD::SRA, MVT::v2i64, 1 },
318 { ISD::SRA, MVT::v4i64, 1 },
319 { ISD::SRA, MVT::v8i64, 1 },
320
321 { ISD::SHL, MVT::v64i8, 4 }, // psllw + pand.
322 { ISD::SRL, MVT::v64i8, 4 }, // psrlw + pand.
323 { ISD::SRA, MVT::v64i8, 8 }, // psrlw, pand, pxor, psubb.
324
325 { ISD::SDIV, MVT::v16i32, 6 }, // pmuludq sequence
326 { ISD::SREM, MVT::v16i32, 8 }, // pmuludq+mul+sub sequence
327 { ISD::UDIV, MVT::v16i32, 5 }, // pmuludq sequence
328 { ISD::UREM, MVT::v16i32, 7 }, // pmuludq+mul+sub sequence
329 };
330
331 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
332 ST->hasAVX512()) {
333 if (const auto *Entry = CostTableLookup(AVX512UniformConstCostTable, ISD,
334 LT.second))
335 return LT.first * Entry->Cost;
336 }
337
338 static const CostTblEntry AVX2UniformConstCostTable[] = {
339 { ISD::SHL, MVT::v32i8, 2 }, // psllw + pand.
340 { ISD::SRL, MVT::v32i8, 2 }, // psrlw + pand.
341 { ISD::SRA, MVT::v32i8, 4 }, // psrlw, pand, pxor, psubb.
342
343 { ISD::SRA, MVT::v4i64, 4 }, // 2 x psrad + shuffle.
344
345 { ISD::SDIV, MVT::v8i32, 6 }, // pmuludq sequence
346 { ISD::SREM, MVT::v8i32, 8 }, // pmuludq+mul+sub sequence
347 { ISD::UDIV, MVT::v8i32, 5 }, // pmuludq sequence
348 { ISD::UREM, MVT::v8i32, 7 }, // pmuludq+mul+sub sequence
349 };
350
351 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
352 ST->hasAVX2()) {
353 if (const auto *Entry = CostTableLookup(AVX2UniformConstCostTable, ISD,
354 LT.second))
355 return LT.first * Entry->Cost;
356 }
357
358 static const CostTblEntry SSE2UniformConstCostTable[] = {
359 { ISD::SHL, MVT::v16i8, 2 }, // psllw + pand.
360 { ISD::SRL, MVT::v16i8, 2 }, // psrlw + pand.
361 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb.
362
363 { ISD::SHL, MVT::v32i8, 4+2 }, // 2*(psllw + pand) + split.
364 { ISD::SRL, MVT::v32i8, 4+2 }, // 2*(psrlw + pand) + split.
365 { ISD::SRA, MVT::v32i8, 8+2 }, // 2*(psrlw, pand, pxor, psubb) + split.
366
367 { ISD::SDIV, MVT::v8i32, 12+2 }, // 2*pmuludq sequence + split.
368 { ISD::SREM, MVT::v8i32, 16+2 }, // 2*pmuludq+mul+sub sequence + split.
369 { ISD::SDIV, MVT::v4i32, 6 }, // pmuludq sequence
370 { ISD::SREM, MVT::v4i32, 8 }, // pmuludq+mul+sub sequence
371 { ISD::UDIV, MVT::v8i32, 10+2 }, // 2*pmuludq sequence + split.
372 { ISD::UREM, MVT::v8i32, 14+2 }, // 2*pmuludq+mul+sub sequence + split.
373 { ISD::UDIV, MVT::v4i32, 5 }, // pmuludq sequence
374 { ISD::UREM, MVT::v4i32, 7 }, // pmuludq+mul+sub sequence
375 };
376
377 // XOP has faster vXi8 shifts.
378 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
379 ST->hasSSE2() && !ST->hasXOP()) {
380 if (const auto *Entry =
381 CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second))
382 return LT.first * Entry->Cost;
383 }
384
385 static const CostTblEntry AVX512BWConstCostTable[] = {
386 { ISD::SDIV, MVT::v64i8, 14 }, // 2*ext+2*pmulhw sequence
387 { ISD::SREM, MVT::v64i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
388 { ISD::UDIV, MVT::v64i8, 14 }, // 2*ext+2*pmulhw sequence
389 { ISD::UREM, MVT::v64i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
390 { ISD::SDIV, MVT::v32i16, 6 }, // vpmulhw sequence
391 { ISD::SREM, MVT::v32i16, 8 }, // vpmulhw+mul+sub sequence
392 { ISD::UDIV, MVT::v32i16, 6 }, // vpmulhuw sequence
393 { ISD::UREM, MVT::v32i16, 8 }, // vpmulhuw+mul+sub sequence
394 };
395
396 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
397 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
398 ST->hasBWI()) {
399 if (const auto *Entry =
400 CostTableLookup(AVX512BWConstCostTable, ISD, LT.second))
401 return LT.first * Entry->Cost;
402 }
403
404 static const CostTblEntry AVX512ConstCostTable[] = {
405 { ISD::SDIV, MVT::v16i32, 15 }, // vpmuldq sequence
406 { ISD::SREM, MVT::v16i32, 17 }, // vpmuldq+mul+sub sequence
407 { ISD::UDIV, MVT::v16i32, 15 }, // vpmuludq sequence
408 { ISD::UREM, MVT::v16i32, 17 }, // vpmuludq+mul+sub sequence
409 { ISD::SDIV, MVT::v64i8, 28 }, // 4*ext+4*pmulhw sequence
410 { ISD::SREM, MVT::v64i8, 32 }, // 4*ext+4*pmulhw+mul+sub sequence
411 { ISD::UDIV, MVT::v64i8, 28 }, // 4*ext+4*pmulhw sequence
412 { ISD::UREM, MVT::v64i8, 32 }, // 4*ext+4*pmulhw+mul+sub sequence
413 { ISD::SDIV, MVT::v32i16, 12 }, // 2*vpmulhw sequence
414 { ISD::SREM, MVT::v32i16, 16 }, // 2*vpmulhw+mul+sub sequence
415 { ISD::UDIV, MVT::v32i16, 12 }, // 2*vpmulhuw sequence
416 { ISD::UREM, MVT::v32i16, 16 }, // 2*vpmulhuw+mul+sub sequence
417 };
418
419 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
420 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
421 ST->hasAVX512()) {
422 if (const auto *Entry =
423 CostTableLookup(AVX512ConstCostTable, ISD, LT.second))
424 return LT.first * Entry->Cost;
425 }
426
427 static const CostTblEntry AVX2ConstCostTable[] = {
428 { ISD::SDIV, MVT::v32i8, 14 }, // 2*ext+2*pmulhw sequence
429 { ISD::SREM, MVT::v32i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
430 { ISD::UDIV, MVT::v32i8, 14 }, // 2*ext+2*pmulhw sequence
431 { ISD::UREM, MVT::v32i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
432 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence
433 { ISD::SREM, MVT::v16i16, 8 }, // vpmulhw+mul+sub sequence
434 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence
435 { ISD::UREM, MVT::v16i16, 8 }, // vpmulhuw+mul+sub sequence
436 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence
437 { ISD::SREM, MVT::v8i32, 19 }, // vpmuldq+mul+sub sequence
438 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence
439 { ISD::UREM, MVT::v8i32, 19 }, // vpmuludq+mul+sub sequence
440 };
441
442 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
443 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
444 ST->hasAVX2()) {
445 if (const auto *Entry = CostTableLookup(AVX2ConstCostTable, ISD, LT.second))
446 return LT.first * Entry->Cost;
447 }
448
449 static const CostTblEntry SSE2ConstCostTable[] = {
450 { ISD::SDIV, MVT::v32i8, 28+2 }, // 4*ext+4*pmulhw sequence + split.
451 { ISD::SREM, MVT::v32i8, 32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split.
452 { ISD::SDIV, MVT::v16i8, 14 }, // 2*ext+2*pmulhw sequence
453 { ISD::SREM, MVT::v16i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
454 { ISD::UDIV, MVT::v32i8, 28+2 }, // 4*ext+4*pmulhw sequence + split.
455 { ISD::UREM, MVT::v32i8, 32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split.
456 { ISD::UDIV, MVT::v16i8, 14 }, // 2*ext+2*pmulhw sequence
457 { ISD::UREM, MVT::v16i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
458 { ISD::SDIV, MVT::v16i16, 12+2 }, // 2*pmulhw sequence + split.
459 { ISD::SREM, MVT::v16i16, 16+2 }, // 2*pmulhw+mul+sub sequence + split.
460 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence
461 { ISD::SREM, MVT::v8i16, 8 }, // pmulhw+mul+sub sequence
462 { ISD::UDIV, MVT::v16i16, 12+2 }, // 2*pmulhuw sequence + split.
463 { ISD::UREM, MVT::v16i16, 16+2 }, // 2*pmulhuw+mul+sub sequence + split.
464 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence
465 { ISD::UREM, MVT::v8i16, 8 }, // pmulhuw+mul+sub sequence
466 { ISD::SDIV, MVT::v8i32, 38+2 }, // 2*pmuludq sequence + split.
467 { ISD::SREM, MVT::v8i32, 48+2 }, // 2*pmuludq+mul+sub sequence + split.
468 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence
469 { ISD::SREM, MVT::v4i32, 24 }, // pmuludq+mul+sub sequence
470 { ISD::UDIV, MVT::v8i32, 30+2 }, // 2*pmuludq sequence + split.
471 { ISD::UREM, MVT::v8i32, 40+2 }, // 2*pmuludq+mul+sub sequence + split.
472 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence
473 { ISD::UREM, MVT::v4i32, 20 }, // pmuludq+mul+sub sequence
474 };
475
476 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
477 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
478 ST->hasSSE2()) {
479 // pmuldq sequence.
480 if (ISD == ISD::SDIV && LT.second == MVT::v8i32 && ST->hasAVX())
481 return LT.first * 32;
482 if (ISD == ISD::SREM && LT.second == MVT::v8i32 && ST->hasAVX())
483 return LT.first * 38;
484 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41())
485 return LT.first * 15;
486 if (ISD == ISD::SREM && LT.second == MVT::v4i32 && ST->hasSSE41())
487 return LT.first * 20;
488
489 if (const auto *Entry = CostTableLookup(SSE2ConstCostTable, ISD, LT.second))
490 return LT.first * Entry->Cost;
491 }
492
493 static const CostTblEntry AVX512BWShiftCostTable[] = {
494 { ISD::SHL, MVT::v8i16, 1 }, // vpsllvw
495 { ISD::SRL, MVT::v8i16, 1 }, // vpsrlvw
496 { ISD::SRA, MVT::v8i16, 1 }, // vpsravw
497
498 { ISD::SHL, MVT::v16i16, 1 }, // vpsllvw
499 { ISD::SRL, MVT::v16i16, 1 }, // vpsrlvw
500 { ISD::SRA, MVT::v16i16, 1 }, // vpsravw
501
502 { ISD::SHL, MVT::v32i16, 1 }, // vpsllvw
503 { ISD::SRL, MVT::v32i16, 1 }, // vpsrlvw
504 { ISD::SRA, MVT::v32i16, 1 }, // vpsravw
505 };
506
507 if (ST->hasBWI())
508 if (const auto *Entry = CostTableLookup(AVX512BWShiftCostTable, ISD, LT.second))
509 return LT.first * Entry->Cost;
510
511 static const CostTblEntry AVX2UniformCostTable[] = {
512 // Uniform splats are cheaper for the following instructions.
513 { ISD::SHL, MVT::v16i16, 1 }, // psllw.
514 { ISD::SRL, MVT::v16i16, 1 }, // psrlw.
515 { ISD::SRA, MVT::v16i16, 1 }, // psraw.
516 { ISD::SHL, MVT::v32i16, 2 }, // 2*psllw.
517 { ISD::SRL, MVT::v32i16, 2 }, // 2*psrlw.
518 { ISD::SRA, MVT::v32i16, 2 }, // 2*psraw.
519 };
520
521 if (ST->hasAVX2() &&
522 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
523 (Op2Info == TargetTransformInfo::OK_UniformValue))) {
524 if (const auto *Entry =
525 CostTableLookup(AVX2UniformCostTable, ISD, LT.second))
526 return LT.first * Entry->Cost;
527 }
528
529 static const CostTblEntry SSE2UniformCostTable[] = {
530 // Uniform splats are cheaper for the following instructions.
531 { ISD::SHL, MVT::v8i16, 1 }, // psllw.
532 { ISD::SHL, MVT::v4i32, 1 }, // pslld
533 { ISD::SHL, MVT::v2i64, 1 }, // psllq.
534
535 { ISD::SRL, MVT::v8i16, 1 }, // psrlw.
536 { ISD::SRL, MVT::v4i32, 1 }, // psrld.
537 { ISD::SRL, MVT::v2i64, 1 }, // psrlq.
538
539 { ISD::SRA, MVT::v8i16, 1 }, // psraw.
540 { ISD::SRA, MVT::v4i32, 1 }, // psrad.
541 };
542
543 if (ST->hasSSE2() &&
544 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
545 (Op2Info == TargetTransformInfo::OK_UniformValue))) {
546 if (const auto *Entry =
547 CostTableLookup(SSE2UniformCostTable, ISD, LT.second))
548 return LT.first * Entry->Cost;
549 }
550
551 static const CostTblEntry AVX512DQCostTable[] = {
552 { ISD::MUL, MVT::v2i64, 1 },
553 { ISD::MUL, MVT::v4i64, 1 },
554 { ISD::MUL, MVT::v8i64, 1 }
555 };
556
557 // Look for AVX512DQ lowering tricks for custom cases.
558 if (ST->hasDQI())
559 if (const auto *Entry = CostTableLookup(AVX512DQCostTable, ISD, LT.second))
560 return LT.first * Entry->Cost;
561
562 static const CostTblEntry AVX512BWCostTable[] = {
563 { ISD::SHL, MVT::v64i8, 11 }, // vpblendvb sequence.
564 { ISD::SRL, MVT::v64i8, 11 }, // vpblendvb sequence.
565 { ISD::SRA, MVT::v64i8, 24 }, // vpblendvb sequence.
566
567 { ISD::MUL, MVT::v64i8, 11 }, // extend/pmullw/trunc sequence.
568 { ISD::MUL, MVT::v32i8, 4 }, // extend/pmullw/trunc sequence.
569 { ISD::MUL, MVT::v16i8, 4 }, // extend/pmullw/trunc sequence.
570 };
571
572 // Look for AVX512BW lowering tricks for custom cases.
573 if (ST->hasBWI())
574 if (const auto *Entry = CostTableLookup(AVX512BWCostTable, ISD, LT.second))
575 return LT.first * Entry->Cost;
576
577 static const CostTblEntry AVX512CostTable[] = {
578 { ISD::SHL, MVT::v16i32, 1 },
579 { ISD::SRL, MVT::v16i32, 1 },
580 { ISD::SRA, MVT::v16i32, 1 },
581
582 { ISD::SHL, MVT::v8i64, 1 },
583 { ISD::SRL, MVT::v8i64, 1 },
584
585 { ISD::SRA, MVT::v2i64, 1 },
586 { ISD::SRA, MVT::v4i64, 1 },
587 { ISD::SRA, MVT::v8i64, 1 },
588
589 { ISD::MUL, MVT::v64i8, 26 }, // extend/pmullw/trunc sequence.
590 { ISD::MUL, MVT::v32i8, 13 }, // extend/pmullw/trunc sequence.
591 { ISD::MUL, MVT::v16i8, 5 }, // extend/pmullw/trunc sequence.
592 { ISD::MUL, MVT::v16i32, 1 }, // pmulld (Skylake from agner.org)
593 { ISD::MUL, MVT::v8i32, 1 }, // pmulld (Skylake from agner.org)
594 { ISD::MUL, MVT::v4i32, 1 }, // pmulld (Skylake from agner.org)
595 { ISD::MUL, MVT::v8i64, 8 }, // 3*pmuludq/3*shift/2*add
596
597 { ISD::FADD, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/
598 { ISD::FSUB, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/
599 { ISD::FMUL, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/
600
601 { ISD::FADD, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/
602 { ISD::FSUB, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/
603 { ISD::FMUL, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/
604 };
605
606 if (ST->hasAVX512())
607 if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second))
608 return LT.first * Entry->Cost;
609
610 static const CostTblEntry AVX2ShiftCostTable[] = {
611 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to
612 // customize them to detect the cases where shift amount is a scalar one.
613 { ISD::SHL, MVT::v4i32, 1 },
614 { ISD::SRL, MVT::v4i32, 1 },
615 { ISD::SRA, MVT::v4i32, 1 },
616 { ISD::SHL, MVT::v8i32, 1 },
617 { ISD::SRL, MVT::v8i32, 1 },
618 { ISD::SRA, MVT::v8i32, 1 },
619 { ISD::SHL, MVT::v2i64, 1 },
620 { ISD::SRL, MVT::v2i64, 1 },
621 { ISD::SHL, MVT::v4i64, 1 },
622 { ISD::SRL, MVT::v4i64, 1 },
623 };
624
625 if (ST->hasAVX512()) {
626 if (ISD == ISD::SHL && LT.second == MVT::v32i16 &&
627 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
628 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
629 // On AVX512, a packed v32i16 shift left by a constant build_vector
630 // is lowered into a vector multiply (vpmullw).
631 return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind,
632 Op1Info, Op2Info,
633 TargetTransformInfo::OP_None,
634 TargetTransformInfo::OP_None);
635 }
636
637 // Look for AVX2 lowering tricks.
638 if (ST->hasAVX2()) {
639 if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
640 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
641 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
642 // On AVX2, a packed v16i16 shift left by a constant build_vector
643 // is lowered into a vector multiply (vpmullw).
644 return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind,
645 Op1Info, Op2Info,
646 TargetTransformInfo::OP_None,
647 TargetTransformInfo::OP_None);
648
649 if (const auto *Entry = CostTableLookup(AVX2ShiftCostTable, ISD, LT.second))
650 return LT.first * Entry->Cost;
651 }
652
653 static const CostTblEntry XOPShiftCostTable[] = {
654 // 128bit shifts take 1cy, but right shifts require negation beforehand.
655 { ISD::SHL, MVT::v16i8, 1 },
656 { ISD::SRL, MVT::v16i8, 2 },
657 { ISD::SRA, MVT::v16i8, 2 },
658 { ISD::SHL, MVT::v8i16, 1 },
659 { ISD::SRL, MVT::v8i16, 2 },
660 { ISD::SRA, MVT::v8i16, 2 },
661 { ISD::SHL, MVT::v4i32, 1 },
662 { ISD::SRL, MVT::v4i32, 2 },
663 { ISD::SRA, MVT::v4i32, 2 },
664 { ISD::SHL, MVT::v2i64, 1 },
665 { ISD::SRL, MVT::v2i64, 2 },
666 { ISD::SRA, MVT::v2i64, 2 },
667 // 256bit shifts require splitting if AVX2 didn't catch them above.
668 { ISD::SHL, MVT::v32i8, 2+2 },
669 { ISD::SRL, MVT::v32i8, 4+2 },
670 { ISD::SRA, MVT::v32i8, 4+2 },
671 { ISD::SHL, MVT::v16i16, 2+2 },
672 { ISD::SRL, MVT::v16i16, 4+2 },
673 { ISD::SRA, MVT::v16i16, 4+2 },
674 { ISD::SHL, MVT::v8i32, 2+2 },
675 { ISD::SRL, MVT::v8i32, 4+2 },
676 { ISD::SRA, MVT::v8i32, 4+2 },
677 { ISD::SHL, MVT::v4i64, 2+2 },
678 { ISD::SRL, MVT::v4i64, 4+2 },
679 { ISD::SRA, MVT::v4i64, 4+2 },
680 };
681
682 // Look for XOP lowering tricks.
683 if (ST->hasXOP()) {
684 // If the right shift is constant then we'll fold the negation so
685 // it's as cheap as a left shift.
686 int ShiftISD = ISD;
687 if ((ShiftISD == ISD::SRL || ShiftISD == ISD::SRA) &&
688 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
689 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
690 ShiftISD = ISD::SHL;
691 if (const auto *Entry =
692 CostTableLookup(XOPShiftCostTable, ShiftISD, LT.second))
693 return LT.first * Entry->Cost;
694 }
695
696 static const CostTblEntry SSE2UniformShiftCostTable[] = {
697 // Uniform splats are cheaper for the following instructions.
698 { ISD::SHL, MVT::v16i16, 2+2 }, // 2*psllw + split.
699 { ISD::SHL, MVT::v8i32, 2+2 }, // 2*pslld + split.
700 { ISD::SHL, MVT::v4i64, 2+2 }, // 2*psllq + split.
701
702 { ISD::SRL, MVT::v16i16, 2+2 }, // 2*psrlw + split.
703 { ISD::SRL, MVT::v8i32, 2+2 }, // 2*psrld + split.
704 { ISD::SRL, MVT::v4i64, 2+2 }, // 2*psrlq + split.
705
706 { ISD::SRA, MVT::v16i16, 2+2 }, // 2*psraw + split.
707 { ISD::SRA, MVT::v8i32, 2+2 }, // 2*psrad + split.
708 { ISD::SRA, MVT::v2i64, 4 }, // 2*psrad + shuffle.
709 { ISD::SRA, MVT::v4i64, 8+2 }, // 2*(2*psrad + shuffle) + split.
710 };
711
712 if (ST->hasSSE2() &&
713 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
714 (Op2Info == TargetTransformInfo::OK_UniformValue))) {
715
716 // Handle AVX2 uniform v4i64 ISD::SRA, it's not worth a table.
717 if (ISD == ISD::SRA && LT.second == MVT::v4i64 && ST->hasAVX2())
718 return LT.first * 4; // 2*psrad + shuffle.
719
720 if (const auto *Entry =
721 CostTableLookup(SSE2UniformShiftCostTable, ISD, LT.second))
722 return LT.first * Entry->Cost;
723 }
724
725 if (ISD == ISD::SHL &&
726 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) {
727 MVT VT = LT.second;
728 // Vector shift left by non uniform constant can be lowered
729 // into vector multiply.
730 if (((VT == MVT::v8i16 || VT == MVT::v4i32) && ST->hasSSE2()) ||
731 ((VT == MVT::v16i16 || VT == MVT::v8i32) && ST->hasAVX()))
732 ISD = ISD::MUL;
733 }
734
735 static const CostTblEntry AVX2CostTable[] = {
736 { ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence.
737 { ISD::SHL, MVT::v64i8, 22 }, // 2*vpblendvb sequence.
738 { ISD::SHL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
739 { ISD::SHL, MVT::v32i16, 20 }, // 2*extend/vpsrlvd/pack sequence.
740
741 { ISD::SRL, MVT::v32i8, 11 }, // vpblendvb sequence.
742 { ISD::SRL, MVT::v64i8, 22 }, // 2*vpblendvb sequence.
743 { ISD::SRL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
744 { ISD::SRL, MVT::v32i16, 20 }, // 2*extend/vpsrlvd/pack sequence.
745
746 { ISD::SRA, MVT::v32i8, 24 }, // vpblendvb sequence.
747 { ISD::SRA, MVT::v64i8, 48 }, // 2*vpblendvb sequence.
748 { ISD::SRA, MVT::v16i16, 10 }, // extend/vpsravd/pack sequence.
749 { ISD::SRA, MVT::v32i16, 20 }, // 2*extend/vpsravd/pack sequence.
750 { ISD::SRA, MVT::v2i64, 4 }, // srl/xor/sub sequence.
751 { ISD::SRA, MVT::v4i64, 4 }, // srl/xor/sub sequence.
752
753 { ISD::SUB, MVT::v32i8, 1 }, // psubb
754 { ISD::ADD, MVT::v32i8, 1 }, // paddb
755 { ISD::SUB, MVT::v16i16, 1 }, // psubw
756 { ISD::ADD, MVT::v16i16, 1 }, // paddw
757 { ISD::SUB, MVT::v8i32, 1 }, // psubd
758 { ISD::ADD, MVT::v8i32, 1 }, // paddd
759 { ISD::SUB, MVT::v4i64, 1 }, // psubq
760 { ISD::ADD, MVT::v4i64, 1 }, // paddq
761
762 { ISD::MUL, MVT::v32i8, 17 }, // extend/pmullw/trunc sequence.
763 { ISD::MUL, MVT::v16i8, 7 }, // extend/pmullw/trunc sequence.
764 { ISD::MUL, MVT::v16i16, 1 }, // pmullw
765 { ISD::MUL, MVT::v8i32, 2 }, // pmulld (Haswell from agner.org)
766 { ISD::MUL, MVT::v4i64, 8 }, // 3*pmuludq/3*shift/2*add
767
768 { ISD::FADD, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/
769 { ISD::FADD, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/
770 { ISD::FSUB, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/
771 { ISD::FSUB, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/
772 { ISD::FMUL, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/
773 { ISD::FMUL, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/
774
775 { ISD::FDIV, MVT::f32, 7 }, // Haswell from http://www.agner.org/
776 { ISD::FDIV, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/
777 { ISD::FDIV, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/
778 { ISD::FDIV, MVT::f64, 14 }, // Haswell from http://www.agner.org/
779 { ISD::FDIV, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/
780 { ISD::FDIV, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/
781 };
782
783 // Look for AVX2 lowering tricks for custom cases.
784 if (ST->hasAVX2())
785 if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second))
786 return LT.first * Entry->Cost;
787
788 static const CostTblEntry AVX1CostTable[] = {
789 // We don't have to scalarize unsupported ops. We can issue two half-sized
790 // operations and we only need to extract the upper YMM half.
791 // Two ops + 1 extract + 1 insert = 4.
792 { ISD::MUL, MVT::v16i16, 4 },
793 { ISD::MUL, MVT::v8i32, 4 },
794 { ISD::SUB, MVT::v32i8, 4 },
795 { ISD::ADD, MVT::v32i8, 4 },
796 { ISD::SUB, MVT::v16i16, 4 },
797 { ISD::ADD, MVT::v16i16, 4 },
798 { ISD::SUB, MVT::v8i32, 4 },
799 { ISD::ADD, MVT::v8i32, 4 },
800 { ISD::SUB, MVT::v4i64, 4 },
801 { ISD::ADD, MVT::v4i64, 4 },
802
803 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then
804 // are lowered as a series of long multiplies(3), shifts(3) and adds(2)
805 // Because we believe v4i64 to be a legal type, we must also include the
806 // extract+insert in the cost table. Therefore, the cost here is 18
807 // instead of 8.
808 { ISD::MUL, MVT::v4i64, 18 },
809
810 { ISD::MUL, MVT::v32i8, 26 }, // extend/pmullw/trunc sequence.
811
812 { ISD::FDIV, MVT::f32, 14 }, // SNB from http://www.agner.org/
813 { ISD::FDIV, MVT::v4f32, 14 }, // SNB from http://www.agner.org/
814 { ISD::FDIV, MVT::v8f32, 28 }, // SNB from http://www.agner.org/
815 { ISD::FDIV, MVT::f64, 22 }, // SNB from http://www.agner.org/
816 { ISD::FDIV, MVT::v2f64, 22 }, // SNB from http://www.agner.org/
817 { ISD::FDIV, MVT::v4f64, 44 }, // SNB from http://www.agner.org/
818 };
819
820 if (ST->hasAVX())
821 if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, LT.second))
822 return LT.first * Entry->Cost;
823
824 static const CostTblEntry SSE42CostTable[] = {
825 { ISD::FADD, MVT::f64, 1 }, // Nehalem from http://www.agner.org/
826 { ISD::FADD, MVT::f32, 1 }, // Nehalem from http://www.agner.org/
827 { ISD::FADD, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/
828 { ISD::FADD, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/
829
830 { ISD::FSUB, MVT::f64, 1 }, // Nehalem from http://www.agner.org/
831 { ISD::FSUB, MVT::f32 , 1 }, // Nehalem from http://www.agner.org/
832 { ISD::FSUB, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/
833 { ISD::FSUB, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/
834
835 { ISD::FMUL, MVT::f64, 1 }, // Nehalem from http://www.agner.org/
836 { ISD::FMUL, MVT::f32, 1 }, // Nehalem from http://www.agner.org/
837 { ISD::FMUL, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/
838 { ISD::FMUL, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/
839
840 { ISD::FDIV, MVT::f32, 14 }, // Nehalem from http://www.agner.org/
841 { ISD::FDIV, MVT::v4f32, 14 }, // Nehalem from http://www.agner.org/
842 { ISD::FDIV, MVT::f64, 22 }, // Nehalem from http://www.agner.org/
843 { ISD::FDIV, MVT::v2f64, 22 }, // Nehalem from http://www.agner.org/
844 };
845
846 if (ST->hasSSE42())
847 if (const auto *Entry = CostTableLookup(SSE42CostTable, ISD, LT.second))
848 return LT.first * Entry->Cost;
849
850 static const CostTblEntry SSE41CostTable[] = {
851 { ISD::SHL, MVT::v16i8, 11 }, // pblendvb sequence.
852 { ISD::SHL, MVT::v32i8, 2*11+2 }, // pblendvb sequence + split.
853 { ISD::SHL, MVT::v8i16, 14 }, // pblendvb sequence.
854 { ISD::SHL, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split.
855 { ISD::SHL, MVT::v4i32, 4 }, // pslld/paddd/cvttps2dq/pmulld
856 { ISD::SHL, MVT::v8i32, 2*4+2 }, // pslld/paddd/cvttps2dq/pmulld + split
857
858 { ISD::SRL, MVT::v16i8, 12 }, // pblendvb sequence.
859 { ISD::SRL, MVT::v32i8, 2*12+2 }, // pblendvb sequence + split.
860 { ISD::SRL, MVT::v8i16, 14 }, // pblendvb sequence.
861 { ISD::SRL, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split.
862 { ISD::SRL, MVT::v4i32, 11 }, // Shift each lane + blend.
863 { ISD::SRL, MVT::v8i32, 2*11+2 }, // Shift each lane + blend + split.
864
865 { ISD::SRA, MVT::v16i8, 24 }, // pblendvb sequence.
866 { ISD::SRA, MVT::v32i8, 2*24+2 }, // pblendvb sequence + split.
867 { ISD::SRA, MVT::v8i16, 14 }, // pblendvb sequence.
868 { ISD::SRA, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split.
869 { ISD::SRA, MVT::v4i32, 12 }, // Shift each lane + blend.
870 { ISD::SRA, MVT::v8i32, 2*12+2 }, // Shift each lane + blend + split.
871
872 { ISD::MUL, MVT::v4i32, 2 } // pmulld (Nehalem from agner.org)
873 };
874
875 if (ST->hasSSE41())
876 if (const auto *Entry = CostTableLookup(SSE41CostTable, ISD, LT.second))
877 return LT.first * Entry->Cost;
878
879 static const CostTblEntry SSE2CostTable[] = {
880 // We don't correctly identify costs of casts because they are marked as
881 // custom.
882 { ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence.
883 { ISD::SHL, MVT::v8i16, 32 }, // cmpgtb sequence.
884 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul.
885 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence.
886 { ISD::SHL, MVT::v4i64, 2*4+2 }, // splat+shuffle sequence + split.
887
888 { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence.
889 { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence.
890 { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend.
891 { ISD::SRL, MVT::v2i64, 4 }, // splat+shuffle sequence.
892 { ISD::SRL, MVT::v4i64, 2*4+2 }, // splat+shuffle sequence + split.
893
894 { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence.
895 { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence.
896 { ISD::SRA, MVT::v4i32, 16 }, // Shift each lane + blend.
897 { ISD::SRA, MVT::v2i64, 12 }, // srl/xor/sub sequence.
898 { ISD::SRA, MVT::v4i64, 2*12+2 }, // srl/xor/sub sequence+split.
899
900 { ISD::MUL, MVT::v16i8, 12 }, // extend/pmullw/trunc sequence.
901 { ISD::MUL, MVT::v8i16, 1 }, // pmullw
902 { ISD::MUL, MVT::v4i32, 6 }, // 3*pmuludq/4*shuffle
903 { ISD::MUL, MVT::v2i64, 8 }, // 3*pmuludq/3*shift/2*add
904
905 { ISD::FDIV, MVT::f32, 23 }, // Pentium IV from http://www.agner.org/
906 { ISD::FDIV, MVT::v4f32, 39 }, // Pentium IV from http://www.agner.org/
907 { ISD::FDIV, MVT::f64, 38 }, // Pentium IV from http://www.agner.org/
908 { ISD::FDIV, MVT::v2f64, 69 }, // Pentium IV from http://www.agner.org/
909
910 { ISD::FADD, MVT::f32, 2 }, // Pentium IV from http://www.agner.org/
911 { ISD::FADD, MVT::f64, 2 }, // Pentium IV from http://www.agner.org/
912
913 { ISD::FSUB, MVT::f32, 2 }, // Pentium IV from http://www.agner.org/
914 { ISD::FSUB, MVT::f64, 2 }, // Pentium IV from http://www.agner.org/
915 };
916
917 if (ST->hasSSE2())
918 if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second))
919 return LT.first * Entry->Cost;
920
921 static const CostTblEntry SSE1CostTable[] = {
922 { ISD::FDIV, MVT::f32, 17 }, // Pentium III from http://www.agner.org/
923 { ISD::FDIV, MVT::v4f32, 34 }, // Pentium III from http://www.agner.org/
924
925 { ISD::FADD, MVT::f32, 1 }, // Pentium III from http://www.agner.org/
926 { ISD::FADD, MVT::v4f32, 2 }, // Pentium III from http://www.agner.org/
927
928 { ISD::FSUB, MVT::f32, 1 }, // Pentium III from http://www.agner.org/
929 { ISD::FSUB, MVT::v4f32, 2 }, // Pentium III from http://www.agner.org/
930
931 { ISD::ADD, MVT::i8, 1 }, // Pentium III from http://www.agner.org/
932 { ISD::ADD, MVT::i16, 1 }, // Pentium III from http://www.agner.org/
933 { ISD::ADD, MVT::i32, 1 }, // Pentium III from http://www.agner.org/
934
935 { ISD::SUB, MVT::i8, 1 }, // Pentium III from http://www.agner.org/
936 { ISD::SUB, MVT::i16, 1 }, // Pentium III from http://www.agner.org/
937 { ISD::SUB, MVT::i32, 1 }, // Pentium III from http://www.agner.org/
938 };
939
940 if (ST->hasSSE1())
941 if (const auto *Entry = CostTableLookup(SSE1CostTable, ISD, LT.second))
942 return LT.first * Entry->Cost;
943
944 // It is not a good idea to vectorize division. We have to scalarize it and
945 // in the process we will often end up having to spilling regular
946 // registers. The overhead of division is going to dominate most kernels
947 // anyways so try hard to prevent vectorization of division - it is
948 // generally a bad idea. Assume somewhat arbitrarily that we have to be able
949 // to hide "20 cycles" for each lane.
950 if (LT.second.isVector() && (ISD == ISD::SDIV || ISD == ISD::SREM ||
951 ISD == ISD::UDIV || ISD == ISD::UREM)) {
952 int ScalarCost = getArithmeticInstrCost(
953 Opcode, Ty->getScalarType(), CostKind, Op1Info, Op2Info,
954 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
955 return 20 * LT.first * LT.second.getVectorNumElements() * ScalarCost;
956 }
957
958 // Fallback to the default implementation.
959 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info);
960 }
961
getShuffleCost(TTI::ShuffleKind Kind,VectorType * BaseTp,int Index,VectorType * SubTp)962 int X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, VectorType *BaseTp,
963 int Index, VectorType *SubTp) {
964 // 64-bit packed float vectors (v2f32) are widened to type v4f32.
965 // 64-bit packed integer vectors (v2i32) are widened to type v4i32.
966 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, BaseTp);
967
968 // Treat Transpose as 2-op shuffles - there's no difference in lowering.
969 if (Kind == TTI::SK_Transpose)
970 Kind = TTI::SK_PermuteTwoSrc;
971
972 // For Broadcasts we are splatting the first element from the first input
973 // register, so only need to reference that input and all the output
974 // registers are the same.
975 if (Kind == TTI::SK_Broadcast)
976 LT.first = 1;
977
978 // Subvector extractions are free if they start at the beginning of a
979 // vector and cheap if the subvectors are aligned.
980 if (Kind == TTI::SK_ExtractSubvector && LT.second.isVector()) {
981 int NumElts = LT.second.getVectorNumElements();
982 if ((Index % NumElts) == 0)
983 return 0;
984 std::pair<int, MVT> SubLT = TLI->getTypeLegalizationCost(DL, SubTp);
985 if (SubLT.second.isVector()) {
986 int NumSubElts = SubLT.second.getVectorNumElements();
987 if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0)
988 return SubLT.first;
989 // Handle some cases for widening legalization. For now we only handle
990 // cases where the original subvector was naturally aligned and evenly
991 // fit in its legalized subvector type.
992 // FIXME: Remove some of the alignment restrictions.
993 // FIXME: We can use permq for 64-bit or larger extracts from 256-bit
994 // vectors.
995 int OrigSubElts = cast<FixedVectorType>(SubTp)->getNumElements();
996 if (NumSubElts > OrigSubElts && (Index % OrigSubElts) == 0 &&
997 (NumSubElts % OrigSubElts) == 0 &&
998 LT.second.getVectorElementType() ==
999 SubLT.second.getVectorElementType() &&
1000 LT.second.getVectorElementType().getSizeInBits() ==
1001 BaseTp->getElementType()->getPrimitiveSizeInBits()) {
1002 assert(NumElts >= NumSubElts && NumElts > OrigSubElts &&
1003 "Unexpected number of elements!");
1004 auto *VecTy = FixedVectorType::get(BaseTp->getElementType(),
1005 LT.second.getVectorNumElements());
1006 auto *SubTy = FixedVectorType::get(BaseTp->getElementType(),
1007 SubLT.second.getVectorNumElements());
1008 int ExtractIndex = alignDown((Index % NumElts), NumSubElts);
1009 int ExtractCost = getShuffleCost(TTI::SK_ExtractSubvector, VecTy,
1010 ExtractIndex, SubTy);
1011
1012 // If the original size is 32-bits or more, we can use pshufd. Otherwise
1013 // if we have SSSE3 we can use pshufb.
1014 if (SubTp->getPrimitiveSizeInBits() >= 32 || ST->hasSSSE3())
1015 return ExtractCost + 1; // pshufd or pshufb
1016
1017 assert(SubTp->getPrimitiveSizeInBits() == 16 &&
1018 "Unexpected vector size");
1019
1020 return ExtractCost + 2; // worst case pshufhw + pshufd
1021 }
1022 }
1023 }
1024
1025 // Handle some common (illegal) sub-vector types as they are often very cheap
1026 // to shuffle even on targets without PSHUFB.
1027 EVT VT = TLI->getValueType(DL, BaseTp);
1028 if (VT.isSimple() && VT.isVector() && VT.getSizeInBits() < 128 &&
1029 !ST->hasSSSE3()) {
1030 static const CostTblEntry SSE2SubVectorShuffleTbl[] = {
1031 {TTI::SK_Broadcast, MVT::v4i16, 1}, // pshuflw
1032 {TTI::SK_Broadcast, MVT::v2i16, 1}, // pshuflw
1033 {TTI::SK_Broadcast, MVT::v8i8, 2}, // punpck/pshuflw
1034 {TTI::SK_Broadcast, MVT::v4i8, 2}, // punpck/pshuflw
1035 {TTI::SK_Broadcast, MVT::v2i8, 1}, // punpck
1036
1037 {TTI::SK_Reverse, MVT::v4i16, 1}, // pshuflw
1038 {TTI::SK_Reverse, MVT::v2i16, 1}, // pshuflw
1039 {TTI::SK_Reverse, MVT::v4i8, 3}, // punpck/pshuflw/packus
1040 {TTI::SK_Reverse, MVT::v2i8, 1}, // punpck
1041
1042 {TTI::SK_PermuteTwoSrc, MVT::v4i16, 2}, // punpck/pshuflw
1043 {TTI::SK_PermuteTwoSrc, MVT::v2i16, 2}, // punpck/pshuflw
1044 {TTI::SK_PermuteTwoSrc, MVT::v8i8, 7}, // punpck/pshuflw
1045 {TTI::SK_PermuteTwoSrc, MVT::v4i8, 4}, // punpck/pshuflw
1046 {TTI::SK_PermuteTwoSrc, MVT::v2i8, 2}, // punpck
1047
1048 {TTI::SK_PermuteSingleSrc, MVT::v4i16, 1}, // pshuflw
1049 {TTI::SK_PermuteSingleSrc, MVT::v2i16, 1}, // pshuflw
1050 {TTI::SK_PermuteSingleSrc, MVT::v8i8, 5}, // punpck/pshuflw
1051 {TTI::SK_PermuteSingleSrc, MVT::v4i8, 3}, // punpck/pshuflw
1052 {TTI::SK_PermuteSingleSrc, MVT::v2i8, 1}, // punpck
1053 };
1054
1055 if (ST->hasSSE2())
1056 if (const auto *Entry =
1057 CostTableLookup(SSE2SubVectorShuffleTbl, Kind, VT.getSimpleVT()))
1058 return Entry->Cost;
1059 }
1060
1061 // We are going to permute multiple sources and the result will be in multiple
1062 // destinations. Providing an accurate cost only for splits where the element
1063 // type remains the same.
1064 if (Kind == TTI::SK_PermuteSingleSrc && LT.first != 1) {
1065 MVT LegalVT = LT.second;
1066 if (LegalVT.isVector() &&
1067 LegalVT.getVectorElementType().getSizeInBits() ==
1068 BaseTp->getElementType()->getPrimitiveSizeInBits() &&
1069 LegalVT.getVectorNumElements() <
1070 cast<FixedVectorType>(BaseTp)->getNumElements()) {
1071
1072 unsigned VecTySize = DL.getTypeStoreSize(BaseTp);
1073 unsigned LegalVTSize = LegalVT.getStoreSize();
1074 // Number of source vectors after legalization:
1075 unsigned NumOfSrcs = (VecTySize + LegalVTSize - 1) / LegalVTSize;
1076 // Number of destination vectors after legalization:
1077 unsigned NumOfDests = LT.first;
1078
1079 auto *SingleOpTy = FixedVectorType::get(BaseTp->getElementType(),
1080 LegalVT.getVectorNumElements());
1081
1082 unsigned NumOfShuffles = (NumOfSrcs - 1) * NumOfDests;
1083 return NumOfShuffles *
1084 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy, 0, nullptr);
1085 }
1086
1087 return BaseT::getShuffleCost(Kind, BaseTp, Index, SubTp);
1088 }
1089
1090 // For 2-input shuffles, we must account for splitting the 2 inputs into many.
1091 if (Kind == TTI::SK_PermuteTwoSrc && LT.first != 1) {
1092 // We assume that source and destination have the same vector type.
1093 int NumOfDests = LT.first;
1094 int NumOfShufflesPerDest = LT.first * 2 - 1;
1095 LT.first = NumOfDests * NumOfShufflesPerDest;
1096 }
1097
1098 static const CostTblEntry AVX512VBMIShuffleTbl[] = {
1099 {TTI::SK_Reverse, MVT::v64i8, 1}, // vpermb
1100 {TTI::SK_Reverse, MVT::v32i8, 1}, // vpermb
1101
1102 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 1}, // vpermb
1103 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 1}, // vpermb
1104
1105 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 2}, // vpermt2b
1106 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 2}, // vpermt2b
1107 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 2} // vpermt2b
1108 };
1109
1110 if (ST->hasVBMI())
1111 if (const auto *Entry =
1112 CostTableLookup(AVX512VBMIShuffleTbl, Kind, LT.second))
1113 return LT.first * Entry->Cost;
1114
1115 static const CostTblEntry AVX512BWShuffleTbl[] = {
1116 {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw
1117 {TTI::SK_Broadcast, MVT::v64i8, 1}, // vpbroadcastb
1118
1119 {TTI::SK_Reverse, MVT::v32i16, 2}, // vpermw
1120 {TTI::SK_Reverse, MVT::v16i16, 2}, // vpermw
1121 {TTI::SK_Reverse, MVT::v64i8, 2}, // pshufb + vshufi64x2
1122
1123 {TTI::SK_PermuteSingleSrc, MVT::v32i16, 2}, // vpermw
1124 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 2}, // vpermw
1125 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 8}, // extend to v32i16
1126
1127 {TTI::SK_PermuteTwoSrc, MVT::v32i16, 2}, // vpermt2w
1128 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 2}, // vpermt2w
1129 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 2}, // vpermt2w
1130 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 19}, // 6 * v32i8 + 1
1131
1132 {TTI::SK_Select, MVT::v32i16, 1}, // vblendmw
1133 {TTI::SK_Select, MVT::v64i8, 1}, // vblendmb
1134 };
1135
1136 if (ST->hasBWI())
1137 if (const auto *Entry =
1138 CostTableLookup(AVX512BWShuffleTbl, Kind, LT.second))
1139 return LT.first * Entry->Cost;
1140
1141 static const CostTblEntry AVX512ShuffleTbl[] = {
1142 {TTI::SK_Broadcast, MVT::v8f64, 1}, // vbroadcastpd
1143 {TTI::SK_Broadcast, MVT::v16f32, 1}, // vbroadcastps
1144 {TTI::SK_Broadcast, MVT::v8i64, 1}, // vpbroadcastq
1145 {TTI::SK_Broadcast, MVT::v16i32, 1}, // vpbroadcastd
1146 {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw
1147 {TTI::SK_Broadcast, MVT::v64i8, 1}, // vpbroadcastb
1148
1149 {TTI::SK_Reverse, MVT::v8f64, 1}, // vpermpd
1150 {TTI::SK_Reverse, MVT::v16f32, 1}, // vpermps
1151 {TTI::SK_Reverse, MVT::v8i64, 1}, // vpermq
1152 {TTI::SK_Reverse, MVT::v16i32, 1}, // vpermd
1153
1154 {TTI::SK_PermuteSingleSrc, MVT::v8f64, 1}, // vpermpd
1155 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1}, // vpermpd
1156 {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // vpermpd
1157 {TTI::SK_PermuteSingleSrc, MVT::v16f32, 1}, // vpermps
1158 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1}, // vpermps
1159 {TTI::SK_PermuteSingleSrc, MVT::v4f32, 1}, // vpermps
1160 {TTI::SK_PermuteSingleSrc, MVT::v8i64, 1}, // vpermq
1161 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1}, // vpermq
1162 {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // vpermq
1163 {TTI::SK_PermuteSingleSrc, MVT::v16i32, 1}, // vpermd
1164 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1}, // vpermd
1165 {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1}, // vpermd
1166 {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1}, // pshufb
1167
1168 {TTI::SK_PermuteTwoSrc, MVT::v8f64, 1}, // vpermt2pd
1169 {TTI::SK_PermuteTwoSrc, MVT::v16f32, 1}, // vpermt2ps
1170 {TTI::SK_PermuteTwoSrc, MVT::v8i64, 1}, // vpermt2q
1171 {TTI::SK_PermuteTwoSrc, MVT::v16i32, 1}, // vpermt2d
1172 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 1}, // vpermt2pd
1173 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 1}, // vpermt2ps
1174 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 1}, // vpermt2q
1175 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 1}, // vpermt2d
1176 {TTI::SK_PermuteTwoSrc, MVT::v2f64, 1}, // vpermt2pd
1177 {TTI::SK_PermuteTwoSrc, MVT::v4f32, 1}, // vpermt2ps
1178 {TTI::SK_PermuteTwoSrc, MVT::v2i64, 1}, // vpermt2q
1179 {TTI::SK_PermuteTwoSrc, MVT::v4i32, 1}, // vpermt2d
1180
1181 // FIXME: This just applies the type legalization cost rules above
1182 // assuming these completely split.
1183 {TTI::SK_PermuteSingleSrc, MVT::v32i16, 14},
1184 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 14},
1185 {TTI::SK_PermuteTwoSrc, MVT::v32i16, 42},
1186 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 42},
1187
1188 {TTI::SK_Select, MVT::v32i16, 1}, // vpternlogq
1189 {TTI::SK_Select, MVT::v64i8, 1}, // vpternlogq
1190 {TTI::SK_Select, MVT::v8f64, 1}, // vblendmpd
1191 {TTI::SK_Select, MVT::v16f32, 1}, // vblendmps
1192 {TTI::SK_Select, MVT::v8i64, 1}, // vblendmq
1193 {TTI::SK_Select, MVT::v16i32, 1}, // vblendmd
1194 };
1195
1196 if (ST->hasAVX512())
1197 if (const auto *Entry = CostTableLookup(AVX512ShuffleTbl, Kind, LT.second))
1198 return LT.first * Entry->Cost;
1199
1200 static const CostTblEntry AVX2ShuffleTbl[] = {
1201 {TTI::SK_Broadcast, MVT::v4f64, 1}, // vbroadcastpd
1202 {TTI::SK_Broadcast, MVT::v8f32, 1}, // vbroadcastps
1203 {TTI::SK_Broadcast, MVT::v4i64, 1}, // vpbroadcastq
1204 {TTI::SK_Broadcast, MVT::v8i32, 1}, // vpbroadcastd
1205 {TTI::SK_Broadcast, MVT::v16i16, 1}, // vpbroadcastw
1206 {TTI::SK_Broadcast, MVT::v32i8, 1}, // vpbroadcastb
1207
1208 {TTI::SK_Reverse, MVT::v4f64, 1}, // vpermpd
1209 {TTI::SK_Reverse, MVT::v8f32, 1}, // vpermps
1210 {TTI::SK_Reverse, MVT::v4i64, 1}, // vpermq
1211 {TTI::SK_Reverse, MVT::v8i32, 1}, // vpermd
1212 {TTI::SK_Reverse, MVT::v16i16, 2}, // vperm2i128 + pshufb
1213 {TTI::SK_Reverse, MVT::v32i8, 2}, // vperm2i128 + pshufb
1214
1215 {TTI::SK_Select, MVT::v16i16, 1}, // vpblendvb
1216 {TTI::SK_Select, MVT::v32i8, 1}, // vpblendvb
1217
1218 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1}, // vpermpd
1219 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1}, // vpermps
1220 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1}, // vpermq
1221 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1}, // vpermd
1222 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vperm2i128 + 2*vpshufb
1223 // + vpblendvb
1224 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4}, // vperm2i128 + 2*vpshufb
1225 // + vpblendvb
1226
1227 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3}, // 2*vpermpd + vblendpd
1228 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 3}, // 2*vpermps + vblendps
1229 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3}, // 2*vpermq + vpblendd
1230 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 3}, // 2*vpermd + vpblendd
1231 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 7}, // 2*vperm2i128 + 4*vpshufb
1232 // + vpblendvb
1233 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 7}, // 2*vperm2i128 + 4*vpshufb
1234 // + vpblendvb
1235 };
1236
1237 if (ST->hasAVX2())
1238 if (const auto *Entry = CostTableLookup(AVX2ShuffleTbl, Kind, LT.second))
1239 return LT.first * Entry->Cost;
1240
1241 static const CostTblEntry XOPShuffleTbl[] = {
1242 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2}, // vperm2f128 + vpermil2pd
1243 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 2}, // vperm2f128 + vpermil2ps
1244 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2}, // vperm2f128 + vpermil2pd
1245 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 2}, // vperm2f128 + vpermil2ps
1246 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vextractf128 + 2*vpperm
1247 // + vinsertf128
1248 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4}, // vextractf128 + 2*vpperm
1249 // + vinsertf128
1250
1251 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 9}, // 2*vextractf128 + 6*vpperm
1252 // + vinsertf128
1253 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 1}, // vpperm
1254 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 9}, // 2*vextractf128 + 6*vpperm
1255 // + vinsertf128
1256 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 1}, // vpperm
1257 };
1258
1259 if (ST->hasXOP())
1260 if (const auto *Entry = CostTableLookup(XOPShuffleTbl, Kind, LT.second))
1261 return LT.first * Entry->Cost;
1262
1263 static const CostTblEntry AVX1ShuffleTbl[] = {
1264 {TTI::SK_Broadcast, MVT::v4f64, 2}, // vperm2f128 + vpermilpd
1265 {TTI::SK_Broadcast, MVT::v8f32, 2}, // vperm2f128 + vpermilps
1266 {TTI::SK_Broadcast, MVT::v4i64, 2}, // vperm2f128 + vpermilpd
1267 {TTI::SK_Broadcast, MVT::v8i32, 2}, // vperm2f128 + vpermilps
1268 {TTI::SK_Broadcast, MVT::v16i16, 3}, // vpshuflw + vpshufd + vinsertf128
1269 {TTI::SK_Broadcast, MVT::v32i8, 2}, // vpshufb + vinsertf128
1270
1271 {TTI::SK_Reverse, MVT::v4f64, 2}, // vperm2f128 + vpermilpd
1272 {TTI::SK_Reverse, MVT::v8f32, 2}, // vperm2f128 + vpermilps
1273 {TTI::SK_Reverse, MVT::v4i64, 2}, // vperm2f128 + vpermilpd
1274 {TTI::SK_Reverse, MVT::v8i32, 2}, // vperm2f128 + vpermilps
1275 {TTI::SK_Reverse, MVT::v16i16, 4}, // vextractf128 + 2*pshufb
1276 // + vinsertf128
1277 {TTI::SK_Reverse, MVT::v32i8, 4}, // vextractf128 + 2*pshufb
1278 // + vinsertf128
1279
1280 {TTI::SK_Select, MVT::v4i64, 1}, // vblendpd
1281 {TTI::SK_Select, MVT::v4f64, 1}, // vblendpd
1282 {TTI::SK_Select, MVT::v8i32, 1}, // vblendps
1283 {TTI::SK_Select, MVT::v8f32, 1}, // vblendps
1284 {TTI::SK_Select, MVT::v16i16, 3}, // vpand + vpandn + vpor
1285 {TTI::SK_Select, MVT::v32i8, 3}, // vpand + vpandn + vpor
1286
1287 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2}, // vperm2f128 + vshufpd
1288 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2}, // vperm2f128 + vshufpd
1289 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 4}, // 2*vperm2f128 + 2*vshufps
1290 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 4}, // 2*vperm2f128 + 2*vshufps
1291 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 8}, // vextractf128 + 4*pshufb
1292 // + 2*por + vinsertf128
1293 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 8}, // vextractf128 + 4*pshufb
1294 // + 2*por + vinsertf128
1295
1296 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3}, // 2*vperm2f128 + vshufpd
1297 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3}, // 2*vperm2f128 + vshufpd
1298 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 4}, // 2*vperm2f128 + 2*vshufps
1299 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 4}, // 2*vperm2f128 + 2*vshufps
1300 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 15}, // 2*vextractf128 + 8*pshufb
1301 // + 4*por + vinsertf128
1302 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 15}, // 2*vextractf128 + 8*pshufb
1303 // + 4*por + vinsertf128
1304 };
1305
1306 if (ST->hasAVX())
1307 if (const auto *Entry = CostTableLookup(AVX1ShuffleTbl, Kind, LT.second))
1308 return LT.first * Entry->Cost;
1309
1310 static const CostTblEntry SSE41ShuffleTbl[] = {
1311 {TTI::SK_Select, MVT::v2i64, 1}, // pblendw
1312 {TTI::SK_Select, MVT::v2f64, 1}, // movsd
1313 {TTI::SK_Select, MVT::v4i32, 1}, // pblendw
1314 {TTI::SK_Select, MVT::v4f32, 1}, // blendps
1315 {TTI::SK_Select, MVT::v8i16, 1}, // pblendw
1316 {TTI::SK_Select, MVT::v16i8, 1} // pblendvb
1317 };
1318
1319 if (ST->hasSSE41())
1320 if (const auto *Entry = CostTableLookup(SSE41ShuffleTbl, Kind, LT.second))
1321 return LT.first * Entry->Cost;
1322
1323 static const CostTblEntry SSSE3ShuffleTbl[] = {
1324 {TTI::SK_Broadcast, MVT::v8i16, 1}, // pshufb
1325 {TTI::SK_Broadcast, MVT::v16i8, 1}, // pshufb
1326
1327 {TTI::SK_Reverse, MVT::v8i16, 1}, // pshufb
1328 {TTI::SK_Reverse, MVT::v16i8, 1}, // pshufb
1329
1330 {TTI::SK_Select, MVT::v8i16, 3}, // 2*pshufb + por
1331 {TTI::SK_Select, MVT::v16i8, 3}, // 2*pshufb + por
1332
1333 {TTI::SK_PermuteSingleSrc, MVT::v8i16, 1}, // pshufb
1334 {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1}, // pshufb
1335
1336 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 3}, // 2*pshufb + por
1337 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 3}, // 2*pshufb + por
1338 };
1339
1340 if (ST->hasSSSE3())
1341 if (const auto *Entry = CostTableLookup(SSSE3ShuffleTbl, Kind, LT.second))
1342 return LT.first * Entry->Cost;
1343
1344 static const CostTblEntry SSE2ShuffleTbl[] = {
1345 {TTI::SK_Broadcast, MVT::v2f64, 1}, // shufpd
1346 {TTI::SK_Broadcast, MVT::v2i64, 1}, // pshufd
1347 {TTI::SK_Broadcast, MVT::v4i32, 1}, // pshufd
1348 {TTI::SK_Broadcast, MVT::v8i16, 2}, // pshuflw + pshufd
1349 {TTI::SK_Broadcast, MVT::v16i8, 3}, // unpck + pshuflw + pshufd
1350
1351 {TTI::SK_Reverse, MVT::v2f64, 1}, // shufpd
1352 {TTI::SK_Reverse, MVT::v2i64, 1}, // pshufd
1353 {TTI::SK_Reverse, MVT::v4i32, 1}, // pshufd
1354 {TTI::SK_Reverse, MVT::v8i16, 3}, // pshuflw + pshufhw + pshufd
1355 {TTI::SK_Reverse, MVT::v16i8, 9}, // 2*pshuflw + 2*pshufhw
1356 // + 2*pshufd + 2*unpck + packus
1357
1358 {TTI::SK_Select, MVT::v2i64, 1}, // movsd
1359 {TTI::SK_Select, MVT::v2f64, 1}, // movsd
1360 {TTI::SK_Select, MVT::v4i32, 2}, // 2*shufps
1361 {TTI::SK_Select, MVT::v8i16, 3}, // pand + pandn + por
1362 {TTI::SK_Select, MVT::v16i8, 3}, // pand + pandn + por
1363
1364 {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // shufpd
1365 {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // pshufd
1366 {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1}, // pshufd
1367 {TTI::SK_PermuteSingleSrc, MVT::v8i16, 5}, // 2*pshuflw + 2*pshufhw
1368 // + pshufd/unpck
1369 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 10 }, // 2*pshuflw + 2*pshufhw
1370 // + 2*pshufd + 2*unpck + 2*packus
1371
1372 { TTI::SK_PermuteTwoSrc, MVT::v2f64, 1 }, // shufpd
1373 { TTI::SK_PermuteTwoSrc, MVT::v2i64, 1 }, // shufpd
1374 { TTI::SK_PermuteTwoSrc, MVT::v4i32, 2 }, // 2*{unpck,movsd,pshufd}
1375 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 8 }, // blend+permute
1376 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 13 }, // blend+permute
1377 };
1378
1379 if (ST->hasSSE2())
1380 if (const auto *Entry = CostTableLookup(SSE2ShuffleTbl, Kind, LT.second))
1381 return LT.first * Entry->Cost;
1382
1383 static const CostTblEntry SSE1ShuffleTbl[] = {
1384 { TTI::SK_Broadcast, MVT::v4f32, 1 }, // shufps
1385 { TTI::SK_Reverse, MVT::v4f32, 1 }, // shufps
1386 { TTI::SK_Select, MVT::v4f32, 2 }, // 2*shufps
1387 { TTI::SK_PermuteSingleSrc, MVT::v4f32, 1 }, // shufps
1388 { TTI::SK_PermuteTwoSrc, MVT::v4f32, 2 }, // 2*shufps
1389 };
1390
1391 if (ST->hasSSE1())
1392 if (const auto *Entry = CostTableLookup(SSE1ShuffleTbl, Kind, LT.second))
1393 return LT.first * Entry->Cost;
1394
1395 return BaseT::getShuffleCost(Kind, BaseTp, Index, SubTp);
1396 }
1397
getCastInstrCost(unsigned Opcode,Type * Dst,Type * Src,TTI::CastContextHint CCH,TTI::TargetCostKind CostKind,const Instruction * I)1398 int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
1399 TTI::CastContextHint CCH,
1400 TTI::TargetCostKind CostKind,
1401 const Instruction *I) {
1402 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1403 assert(ISD && "Invalid opcode");
1404
1405 // TODO: Allow non-throughput costs that aren't binary.
1406 auto AdjustCost = [&CostKind](int Cost) {
1407 if (CostKind != TTI::TCK_RecipThroughput)
1408 return Cost == 0 ? 0 : 1;
1409 return Cost;
1410 };
1411
1412 // FIXME: Need a better design of the cost table to handle non-simple types of
1413 // potential massive combinations (elem_num x src_type x dst_type).
1414
1415 static const TypeConversionCostTblEntry AVX512BWConversionTbl[] {
1416 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 1 },
1417 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 1 },
1418
1419 // Mask sign extend has an instruction.
1420 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 1 },
1421 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 1 },
1422 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 1 },
1423 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 1 },
1424 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 1 },
1425 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 1 },
1426 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 1 },
1427 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
1428 { ISD::SIGN_EXTEND, MVT::v32i8, MVT::v32i1, 1 },
1429 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i1, 1 },
1430 { ISD::SIGN_EXTEND, MVT::v64i8, MVT::v64i1, 1 },
1431
1432 // Mask zero extend is a sext + shift.
1433 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 2 },
1434 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 2 },
1435 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 2 },
1436 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 2 },
1437 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 2 },
1438 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 2 },
1439 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 2 },
1440 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 },
1441 { ISD::ZERO_EXTEND, MVT::v32i8, MVT::v32i1, 2 },
1442 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i1, 2 },
1443 { ISD::ZERO_EXTEND, MVT::v64i8, MVT::v64i1, 2 },
1444
1445 { ISD::TRUNCATE, MVT::v32i8, MVT::v32i16, 2 },
1446 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 }, // widen to zmm
1447 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 2 }, // widen to zmm
1448 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, // widen to zmm
1449 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, // widen to zmm
1450 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 2 }, // widen to zmm
1451 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 2 }, // widen to zmm
1452 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 2 }, // widen to zmm
1453 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 2 }, // widen to zmm
1454 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 2 }, // widen to zmm
1455 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i8, 2 }, // widen to zmm
1456 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i16, 2 },
1457 { ISD::TRUNCATE, MVT::v64i1, MVT::v64i8, 2 },
1458 };
1459
1460 static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = {
1461 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 },
1462 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 },
1463
1464 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 },
1465 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 },
1466
1467 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f32, 1 },
1468 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f64, 1 },
1469
1470 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f32, 1 },
1471 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 },
1472 };
1473
1474 // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and
1475 // 256-bit wide vectors.
1476
1477 static const TypeConversionCostTblEntry AVX512FConversionTbl[] = {
1478 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 },
1479 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 },
1480 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 },
1481
1482 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // sext+vpslld+vptestmd
1483 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 3 }, // sext+vpslld+vptestmd
1484 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 3 }, // sext+vpslld+vptestmd
1485 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 3 }, // sext+vpslld+vptestmd
1486 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 3 }, // sext+vpsllq+vptestmq
1487 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 3 }, // sext+vpsllq+vptestmq
1488 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 3 }, // sext+vpsllq+vptestmq
1489 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 3 }, // sext+vpslld+vptestmd
1490 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 2 }, // zmm vpslld+vptestmd
1491 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 }, // zmm vpslld+vptestmd
1492 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, // zmm vpslld+vptestmd
1493 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i32, 2 }, // vpslld+vptestmd
1494 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 }, // zmm vpsllq+vptestmq
1495 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 }, // zmm vpsllq+vptestmq
1496 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i64, 2 }, // vpsllq+vptestmq
1497 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 2 },
1498 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 2 },
1499 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i64, 2 },
1500 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 2 },
1501 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 },
1502 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, // zmm vpmovqd
1503 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i64, 5 },// 2*vpmovqd+concat+vpmovdb
1504
1505 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 }, // extend to v16i32
1506 { ISD::TRUNCATE, MVT::v32i8, MVT::v32i16, 8 },
1507
1508 // Sign extend is zmm vpternlogd+vptruncdb.
1509 // Zero extend is zmm broadcast load+vptruncdw.
1510 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 3 },
1511 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 4 },
1512 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 3 },
1513 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 4 },
1514 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 3 },
1515 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 4 },
1516 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 3 },
1517 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 4 },
1518
1519 // Sign extend is zmm vpternlogd+vptruncdw.
1520 // Zero extend is zmm vpternlogd+vptruncdw+vpsrlw.
1521 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 3 },
1522 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 4 },
1523 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 3 },
1524 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 4 },
1525 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 3 },
1526 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 4 },
1527 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 3 },
1528 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 4 },
1529
1530 { ISD::SIGN_EXTEND, MVT::v2i32, MVT::v2i1, 1 }, // zmm vpternlogd
1531 { ISD::ZERO_EXTEND, MVT::v2i32, MVT::v2i1, 2 }, // zmm vpternlogd+psrld
1532 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 }, // zmm vpternlogd
1533 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 }, // zmm vpternlogd+psrld
1534 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 }, // zmm vpternlogd
1535 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 }, // zmm vpternlogd+psrld
1536 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 }, // zmm vpternlogq
1537 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 }, // zmm vpternlogq+psrlq
1538 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 }, // zmm vpternlogq
1539 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 }, // zmm vpternlogq+psrlq
1540
1541 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 1 }, // vpternlogd
1542 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, // vpternlogd+psrld
1543 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i1, 1 }, // vpternlogq
1544 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i1, 2 }, // vpternlogq+psrlq
1545
1546 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
1547 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
1548 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
1549 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
1550 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 1 },
1551 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 1 },
1552 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 1 },
1553 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 1 },
1554 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
1555 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
1556
1557 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 3 }, // FIXME: May not be right
1558 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 3 }, // FIXME: May not be right
1559
1560 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
1561 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
1562 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 },
1563 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 },
1564 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
1565 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 },
1566 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
1567 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
1568
1569 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
1570 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
1571 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 },
1572 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 },
1573 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
1574 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 },
1575 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
1576 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
1577 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 26 },
1578 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 5 },
1579
1580 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f64, 3 },
1581 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f64, 3 },
1582 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f32, 3 },
1583 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f32, 3 },
1584
1585 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f64, 1 },
1586 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f64, 3 },
1587 { ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f64, 3 },
1588 { ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f32, 1 },
1589 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 3 },
1590 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v16f32, 3 },
1591 };
1592
1593 static const TypeConversionCostTblEntry AVX512BWVLConversionTbl[] {
1594 // Mask sign extend has an instruction.
1595 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 1 },
1596 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 1 },
1597 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 1 },
1598 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 1 },
1599 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 1 },
1600 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 1 },
1601 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 1 },
1602 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
1603 { ISD::SIGN_EXTEND, MVT::v32i8, MVT::v32i1, 1 },
1604
1605 // Mask zero extend is a sext + shift.
1606 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 2 },
1607 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 2 },
1608 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 2 },
1609 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 2 },
1610 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 2 },
1611 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 2 },
1612 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 2 },
1613 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 },
1614 { ISD::ZERO_EXTEND, MVT::v32i8, MVT::v32i1, 2 },
1615
1616 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 },
1617 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 2 }, // vpsllw+vptestmb
1618 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, // vpsllw+vptestmw
1619 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, // vpsllw+vptestmb
1620 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 2 }, // vpsllw+vptestmw
1621 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 2 }, // vpsllw+vptestmb
1622 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 2 }, // vpsllw+vptestmw
1623 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 2 }, // vpsllw+vptestmb
1624 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 2 }, // vpsllw+vptestmw
1625 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i8, 2 }, // vpsllw+vptestmb
1626 };
1627
1628 static const TypeConversionCostTblEntry AVX512DQVLConversionTbl[] = {
1629 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 },
1630 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
1631 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 },
1632 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 },
1633
1634 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 },
1635 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
1636 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 },
1637 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 },
1638
1639 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 1 },
1640 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f32, 1 },
1641 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 },
1642 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f64, 1 },
1643
1644 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 1 },
1645 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f32, 1 },
1646 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
1647 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f64, 1 },
1648 };
1649
1650 static const TypeConversionCostTblEntry AVX512VLConversionTbl[] = {
1651 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // sext+vpslld+vptestmd
1652 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 3 }, // sext+vpslld+vptestmd
1653 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 3 }, // sext+vpslld+vptestmd
1654 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 8 }, // split+2*v8i8
1655 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 3 }, // sext+vpsllq+vptestmq
1656 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 3 }, // sext+vpsllq+vptestmq
1657 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 3 }, // sext+vpsllq+vptestmq
1658 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 8 }, // split+2*v8i16
1659 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 2 }, // vpslld+vptestmd
1660 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 }, // vpslld+vptestmd
1661 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, // vpslld+vptestmd
1662 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 }, // vpsllq+vptestmq
1663 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 }, // vpsllq+vptestmq
1664 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, // vpmovqd
1665
1666 // sign extend is vpcmpeq+maskedmove+vpmovdw+vpacksswb
1667 // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw+vpackuswb
1668 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 5 },
1669 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 6 },
1670 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 5 },
1671 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 6 },
1672 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 5 },
1673 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 6 },
1674 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 10 },
1675 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 12 },
1676
1677 // sign extend is vpcmpeq+maskedmove+vpmovdw
1678 // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw
1679 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 4 },
1680 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 5 },
1681 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 4 },
1682 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 5 },
1683 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 4 },
1684 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 5 },
1685 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 10 },
1686 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 12 },
1687
1688 { ISD::SIGN_EXTEND, MVT::v2i32, MVT::v2i1, 1 }, // vpternlogd
1689 { ISD::ZERO_EXTEND, MVT::v2i32, MVT::v2i1, 2 }, // vpternlogd+psrld
1690 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 }, // vpternlogd
1691 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 }, // vpternlogd+psrld
1692 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 }, // vpternlogd
1693 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 }, // vpternlogd+psrld
1694 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 }, // vpternlogq
1695 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 }, // vpternlogq+psrlq
1696 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 }, // vpternlogq
1697 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 }, // vpternlogq+psrlq
1698
1699 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 2 },
1700 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
1701 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 2 },
1702 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 5 },
1703 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
1704 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 },
1705 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 2 },
1706 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 1 },
1707 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
1708 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
1709 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
1710 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 5 },
1711 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 },
1712 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 5 },
1713
1714 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 1 },
1715 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 1 },
1716
1717 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 3 },
1718 { ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f32, 3 },
1719
1720 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 1 },
1721 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 1 },
1722
1723 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 },
1724 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
1725 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 1 },
1726 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 1 },
1727 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 1 },
1728 };
1729
1730 static const TypeConversionCostTblEntry AVX2ConversionTbl[] = {
1731 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
1732 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
1733 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
1734 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
1735 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 1 },
1736 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 1 },
1737 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 1 },
1738 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 1 },
1739 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
1740 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
1741 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
1742 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
1743 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 1 },
1744 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 1 },
1745 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
1746 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
1747 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
1748 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
1749 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 3 },
1750 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 3 },
1751
1752 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 },
1753 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 },
1754
1755 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 },
1756 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 },
1757 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 },
1758 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 },
1759
1760 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 },
1761 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 },
1762
1763 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 },
1764 };
1765
1766 static const TypeConversionCostTblEntry AVXConversionTbl[] = {
1767 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 },
1768 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 },
1769 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 },
1770 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 },
1771 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
1772 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
1773 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 4 },
1774 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 },
1775 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 4 },
1776 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 4 },
1777 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
1778 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
1779 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 4 },
1780 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
1781 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
1782 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
1783 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
1784 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
1785
1786 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 4 },
1787 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 5 },
1788 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 4 },
1789 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i64, 9 },
1790 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i64, 11 },
1791
1792 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 },
1793 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
1794 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
1795 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 },
1796 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 },
1797 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 },
1798 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i64, 11 },
1799 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 9 },
1800 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 3 },
1801 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i64, 11 },
1802
1803 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
1804 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 },
1805 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 },
1806 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
1807 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 },
1808 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 },
1809 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 },
1810 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 },
1811 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
1812 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
1813 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
1814 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
1815
1816 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 },
1817 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 },
1818 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 },
1819 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 },
1820 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
1821 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 },
1822 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
1823 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
1824 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
1825 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 6 },
1826 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 },
1827 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 },
1828 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 },
1829 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 },
1830 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 6 },
1831 // The generic code to compute the scalar overhead is currently broken.
1832 // Workaround this limitation by estimating the scalarization overhead
1833 // here. We have roughly 10 instructions per scalar element.
1834 // Multiply that by the vector width.
1835 // FIXME: remove that when PR19268 is fixed.
1836 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 },
1837 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 },
1838
1839 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 4 },
1840 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f64, 3 },
1841 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f64, 2 },
1842 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f32, 3 },
1843
1844 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f64, 3 },
1845 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f64, 2 },
1846 { ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f32, 4 },
1847 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f32, 3 },
1848 // This node is expanded into scalarized operations but BasicTTI is overly
1849 // optimistic estimating its cost. It computes 3 per element (one
1850 // vector-extract, one scalar conversion and one vector-insert). The
1851 // problem is that the inserts form a read-modify-write chain so latency
1852 // should be factored in too. Inflating the cost per element by 1.
1853 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 },
1854 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 },
1855
1856 { ISD::FP_EXTEND, MVT::v4f64, MVT::v4f32, 1 },
1857 { ISD::FP_ROUND, MVT::v4f32, MVT::v4f64, 1 },
1858 };
1859
1860 static const TypeConversionCostTblEntry SSE41ConversionTbl[] = {
1861 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 2 },
1862 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 2 },
1863 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 2 },
1864 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 2 },
1865 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
1866 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
1867
1868 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 },
1869 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 2 },
1870 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 1 },
1871 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 1 },
1872 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
1873 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
1874 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 2 },
1875 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 2 },
1876 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
1877 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
1878 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 4 },
1879 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 4 },
1880 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
1881 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
1882 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
1883 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
1884 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 4 },
1885 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 4 },
1886
1887 // These truncates end up widening elements.
1888 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 1 }, // PMOVXZBQ
1889 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 1 }, // PMOVXZWQ
1890 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 1 }, // PMOVXZBD
1891
1892 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i16, 1 },
1893 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 1 },
1894 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 1 },
1895 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 1 },
1896 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 },
1897 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 },
1898 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 3 },
1899 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 6 },
1900 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i64, 1 }, // PSHUFB
1901
1902 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 4 },
1903 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 4 },
1904
1905 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f32, 3 },
1906 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f64, 3 },
1907
1908 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f32, 3 },
1909 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f64, 3 },
1910 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 },
1911 };
1912
1913 static const TypeConversionCostTblEntry SSE2ConversionTbl[] = {
1914 // These are somewhat magic numbers justified by looking at the output of
1915 // Intel's IACA, running some kernels and making sure when we take
1916 // legalization into account the throughput will be overestimated.
1917 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
1918 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
1919 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
1920 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
1921 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 },
1922 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 2*10 },
1923 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2*10 },
1924 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
1925 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
1926
1927 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
1928 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
1929 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
1930 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
1931 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
1932 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 },
1933 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 6 },
1934 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
1935
1936 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f32, 4 },
1937 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 2 },
1938 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 3 },
1939 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 },
1940 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f64, 2 },
1941 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f64, 4 },
1942
1943 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 1 },
1944
1945 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 6 },
1946 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 6 },
1947
1948 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 4 },
1949 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 4 },
1950 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f32, 4 },
1951 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f64, 4 },
1952 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 3 },
1953 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 2 },
1954 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f64, 2 },
1955 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 4 },
1956
1957 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 },
1958 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 6 },
1959 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
1960 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 3 },
1961 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
1962 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 8 },
1963 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
1964 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 2 },
1965 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 6 },
1966 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 6 },
1967 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 3 },
1968 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
1969 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 9 },
1970 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 12 },
1971 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
1972 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 2 },
1973 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
1974 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 10 },
1975 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 3 },
1976 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
1977 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 6 },
1978 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 8 },
1979 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 3 },
1980 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 5 },
1981
1982 // These truncates are really widening elements.
1983 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 1 }, // PSHUFD
1984 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, // PUNPCKLWD+DQ
1985 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // PUNPCKLBW+WD+PSHUFD
1986 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 1 }, // PUNPCKLWD
1987 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, // PUNPCKLBW+WD
1988 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 1 }, // PUNPCKLBW
1989
1990 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i16, 2 }, // PAND+PACKUSWB
1991 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 2 }, // PAND+PACKUSWB
1992 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 2 }, // PAND+PACKUSWB
1993 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 },
1994 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i32, 3 }, // PAND+2*PACKUSWB
1995 { ISD::TRUNCATE, MVT::v2i16, MVT::v2i32, 1 },
1996 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 3 },
1997 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 3 },
1998 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
1999 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 7 },
2000 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
2001 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 10 },
2002 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i64, 4 }, // PAND+3*PACKUSWB
2003 { ISD::TRUNCATE, MVT::v2i16, MVT::v2i64, 2 }, // PSHUFD+PSHUFLW
2004 { ISD::TRUNCATE, MVT::v2i32, MVT::v2i64, 1 }, // PSHUFD
2005 };
2006
2007 std::pair<int, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src);
2008 std::pair<int, MVT> LTDest = TLI->getTypeLegalizationCost(DL, Dst);
2009
2010 if (ST->hasSSE2() && !ST->hasAVX()) {
2011 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
2012 LTDest.second, LTSrc.second))
2013 return AdjustCost(LTSrc.first * Entry->Cost);
2014 }
2015
2016 EVT SrcTy = TLI->getValueType(DL, Src);
2017 EVT DstTy = TLI->getValueType(DL, Dst);
2018
2019 // The function getSimpleVT only handles simple value types.
2020 if (!SrcTy.isSimple() || !DstTy.isSimple())
2021 return AdjustCost(BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind));
2022
2023 MVT SimpleSrcTy = SrcTy.getSimpleVT();
2024 MVT SimpleDstTy = DstTy.getSimpleVT();
2025
2026 if (ST->useAVX512Regs()) {
2027 if (ST->hasBWI())
2028 if (const auto *Entry = ConvertCostTableLookup(AVX512BWConversionTbl, ISD,
2029 SimpleDstTy, SimpleSrcTy))
2030 return AdjustCost(Entry->Cost);
2031
2032 if (ST->hasDQI())
2033 if (const auto *Entry = ConvertCostTableLookup(AVX512DQConversionTbl, ISD,
2034 SimpleDstTy, SimpleSrcTy))
2035 return AdjustCost(Entry->Cost);
2036
2037 if (ST->hasAVX512())
2038 if (const auto *Entry = ConvertCostTableLookup(AVX512FConversionTbl, ISD,
2039 SimpleDstTy, SimpleSrcTy))
2040 return AdjustCost(Entry->Cost);
2041 }
2042
2043 if (ST->hasBWI())
2044 if (const auto *Entry = ConvertCostTableLookup(AVX512BWVLConversionTbl, ISD,
2045 SimpleDstTy, SimpleSrcTy))
2046 return AdjustCost(Entry->Cost);
2047
2048 if (ST->hasDQI())
2049 if (const auto *Entry = ConvertCostTableLookup(AVX512DQVLConversionTbl, ISD,
2050 SimpleDstTy, SimpleSrcTy))
2051 return AdjustCost(Entry->Cost);
2052
2053 if (ST->hasAVX512())
2054 if (const auto *Entry = ConvertCostTableLookup(AVX512VLConversionTbl, ISD,
2055 SimpleDstTy, SimpleSrcTy))
2056 return AdjustCost(Entry->Cost);
2057
2058 if (ST->hasAVX2()) {
2059 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
2060 SimpleDstTy, SimpleSrcTy))
2061 return AdjustCost(Entry->Cost);
2062 }
2063
2064 if (ST->hasAVX()) {
2065 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD,
2066 SimpleDstTy, SimpleSrcTy))
2067 return AdjustCost(Entry->Cost);
2068 }
2069
2070 if (ST->hasSSE41()) {
2071 if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD,
2072 SimpleDstTy, SimpleSrcTy))
2073 return AdjustCost(Entry->Cost);
2074 }
2075
2076 if (ST->hasSSE2()) {
2077 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
2078 SimpleDstTy, SimpleSrcTy))
2079 return AdjustCost(Entry->Cost);
2080 }
2081
2082 return AdjustCost(
2083 BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
2084 }
2085
getCmpSelInstrCost(unsigned Opcode,Type * ValTy,Type * CondTy,CmpInst::Predicate VecPred,TTI::TargetCostKind CostKind,const Instruction * I)2086 int X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
2087 CmpInst::Predicate VecPred,
2088 TTI::TargetCostKind CostKind,
2089 const Instruction *I) {
2090 // TODO: Handle other cost kinds.
2091 if (CostKind != TTI::TCK_RecipThroughput)
2092 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
2093 I);
2094
2095 // Legalize the type.
2096 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
2097
2098 MVT MTy = LT.second;
2099
2100 int ISD = TLI->InstructionOpcodeToISD(Opcode);
2101 assert(ISD && "Invalid opcode");
2102
2103 unsigned ExtraCost = 0;
2104 if (I && (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp)) {
2105 // Some vector comparison predicates cost extra instructions.
2106 if (MTy.isVector() &&
2107 !((ST->hasXOP() && (!ST->hasAVX2() || MTy.is128BitVector())) ||
2108 (ST->hasAVX512() && 32 <= MTy.getScalarSizeInBits()) ||
2109 ST->hasBWI())) {
2110 switch (cast<CmpInst>(I)->getPredicate()) {
2111 case CmpInst::Predicate::ICMP_NE:
2112 // xor(cmpeq(x,y),-1)
2113 ExtraCost = 1;
2114 break;
2115 case CmpInst::Predicate::ICMP_SGE:
2116 case CmpInst::Predicate::ICMP_SLE:
2117 // xor(cmpgt(x,y),-1)
2118 ExtraCost = 1;
2119 break;
2120 case CmpInst::Predicate::ICMP_ULT:
2121 case CmpInst::Predicate::ICMP_UGT:
2122 // cmpgt(xor(x,signbit),xor(y,signbit))
2123 // xor(cmpeq(pmaxu(x,y),x),-1)
2124 ExtraCost = 2;
2125 break;
2126 case CmpInst::Predicate::ICMP_ULE:
2127 case CmpInst::Predicate::ICMP_UGE:
2128 if ((ST->hasSSE41() && MTy.getScalarSizeInBits() == 32) ||
2129 (ST->hasSSE2() && MTy.getScalarSizeInBits() < 32)) {
2130 // cmpeq(psubus(x,y),0)
2131 // cmpeq(pminu(x,y),x)
2132 ExtraCost = 1;
2133 } else {
2134 // xor(cmpgt(xor(x,signbit),xor(y,signbit)),-1)
2135 ExtraCost = 3;
2136 }
2137 break;
2138 default:
2139 break;
2140 }
2141 }
2142 }
2143
2144 static const CostTblEntry SLMCostTbl[] = {
2145 // slm pcmpeq/pcmpgt throughput is 2
2146 { ISD::SETCC, MVT::v2i64, 2 },
2147 };
2148
2149 static const CostTblEntry AVX512BWCostTbl[] = {
2150 { ISD::SETCC, MVT::v32i16, 1 },
2151 { ISD::SETCC, MVT::v64i8, 1 },
2152
2153 { ISD::SELECT, MVT::v32i16, 1 },
2154 { ISD::SELECT, MVT::v64i8, 1 },
2155 };
2156
2157 static const CostTblEntry AVX512CostTbl[] = {
2158 { ISD::SETCC, MVT::v8i64, 1 },
2159 { ISD::SETCC, MVT::v16i32, 1 },
2160 { ISD::SETCC, MVT::v8f64, 1 },
2161 { ISD::SETCC, MVT::v16f32, 1 },
2162
2163 { ISD::SELECT, MVT::v8i64, 1 },
2164 { ISD::SELECT, MVT::v16i32, 1 },
2165 { ISD::SELECT, MVT::v8f64, 1 },
2166 { ISD::SELECT, MVT::v16f32, 1 },
2167
2168 { ISD::SETCC, MVT::v32i16, 2 }, // FIXME: should probably be 4
2169 { ISD::SETCC, MVT::v64i8, 2 }, // FIXME: should probably be 4
2170
2171 { ISD::SELECT, MVT::v32i16, 2 }, // FIXME: should be 3
2172 { ISD::SELECT, MVT::v64i8, 2 }, // FIXME: should be 3
2173 };
2174
2175 static const CostTblEntry AVX2CostTbl[] = {
2176 { ISD::SETCC, MVT::v4i64, 1 },
2177 { ISD::SETCC, MVT::v8i32, 1 },
2178 { ISD::SETCC, MVT::v16i16, 1 },
2179 { ISD::SETCC, MVT::v32i8, 1 },
2180
2181 { ISD::SELECT, MVT::v4i64, 1 }, // pblendvb
2182 { ISD::SELECT, MVT::v8i32, 1 }, // pblendvb
2183 { ISD::SELECT, MVT::v16i16, 1 }, // pblendvb
2184 { ISD::SELECT, MVT::v32i8, 1 }, // pblendvb
2185 };
2186
2187 static const CostTblEntry AVX1CostTbl[] = {
2188 { ISD::SETCC, MVT::v4f64, 1 },
2189 { ISD::SETCC, MVT::v8f32, 1 },
2190 // AVX1 does not support 8-wide integer compare.
2191 { ISD::SETCC, MVT::v4i64, 4 },
2192 { ISD::SETCC, MVT::v8i32, 4 },
2193 { ISD::SETCC, MVT::v16i16, 4 },
2194 { ISD::SETCC, MVT::v32i8, 4 },
2195
2196 { ISD::SELECT, MVT::v4f64, 1 }, // vblendvpd
2197 { ISD::SELECT, MVT::v8f32, 1 }, // vblendvps
2198 { ISD::SELECT, MVT::v4i64, 1 }, // vblendvpd
2199 { ISD::SELECT, MVT::v8i32, 1 }, // vblendvps
2200 { ISD::SELECT, MVT::v16i16, 3 }, // vandps + vandnps + vorps
2201 { ISD::SELECT, MVT::v32i8, 3 }, // vandps + vandnps + vorps
2202 };
2203
2204 static const CostTblEntry SSE42CostTbl[] = {
2205 { ISD::SETCC, MVT::v2f64, 1 },
2206 { ISD::SETCC, MVT::v4f32, 1 },
2207 { ISD::SETCC, MVT::v2i64, 1 },
2208 };
2209
2210 static const CostTblEntry SSE41CostTbl[] = {
2211 { ISD::SELECT, MVT::v2f64, 1 }, // blendvpd
2212 { ISD::SELECT, MVT::v4f32, 1 }, // blendvps
2213 { ISD::SELECT, MVT::v2i64, 1 }, // pblendvb
2214 { ISD::SELECT, MVT::v4i32, 1 }, // pblendvb
2215 { ISD::SELECT, MVT::v8i16, 1 }, // pblendvb
2216 { ISD::SELECT, MVT::v16i8, 1 }, // pblendvb
2217 };
2218
2219 static const CostTblEntry SSE2CostTbl[] = {
2220 { ISD::SETCC, MVT::v2f64, 2 },
2221 { ISD::SETCC, MVT::f64, 1 },
2222 { ISD::SETCC, MVT::v2i64, 8 },
2223 { ISD::SETCC, MVT::v4i32, 1 },
2224 { ISD::SETCC, MVT::v8i16, 1 },
2225 { ISD::SETCC, MVT::v16i8, 1 },
2226
2227 { ISD::SELECT, MVT::v2f64, 3 }, // andpd + andnpd + orpd
2228 { ISD::SELECT, MVT::v2i64, 3 }, // pand + pandn + por
2229 { ISD::SELECT, MVT::v4i32, 3 }, // pand + pandn + por
2230 { ISD::SELECT, MVT::v8i16, 3 }, // pand + pandn + por
2231 { ISD::SELECT, MVT::v16i8, 3 }, // pand + pandn + por
2232 };
2233
2234 static const CostTblEntry SSE1CostTbl[] = {
2235 { ISD::SETCC, MVT::v4f32, 2 },
2236 { ISD::SETCC, MVT::f32, 1 },
2237
2238 { ISD::SELECT, MVT::v4f32, 3 }, // andps + andnps + orps
2239 };
2240
2241 if (ST->isSLM())
2242 if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy))
2243 return LT.first * (ExtraCost + Entry->Cost);
2244
2245 if (ST->hasBWI())
2246 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
2247 return LT.first * (ExtraCost + Entry->Cost);
2248
2249 if (ST->hasAVX512())
2250 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
2251 return LT.first * (ExtraCost + Entry->Cost);
2252
2253 if (ST->hasAVX2())
2254 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
2255 return LT.first * (ExtraCost + Entry->Cost);
2256
2257 if (ST->hasAVX())
2258 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
2259 return LT.first * (ExtraCost + Entry->Cost);
2260
2261 if (ST->hasSSE42())
2262 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
2263 return LT.first * (ExtraCost + Entry->Cost);
2264
2265 if (ST->hasSSE41())
2266 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
2267 return LT.first * (ExtraCost + Entry->Cost);
2268
2269 if (ST->hasSSE2())
2270 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
2271 return LT.first * (ExtraCost + Entry->Cost);
2272
2273 if (ST->hasSSE1())
2274 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
2275 return LT.first * (ExtraCost + Entry->Cost);
2276
2277 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
2278 }
2279
getAtomicMemIntrinsicMaxElementSize() const2280 unsigned X86TTIImpl::getAtomicMemIntrinsicMaxElementSize() const { return 16; }
2281
getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes & ICA,TTI::TargetCostKind CostKind)2282 int X86TTIImpl::getTypeBasedIntrinsicInstrCost(
2283 const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) {
2284
2285 // Costs should match the codegen from:
2286 // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll
2287 // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll
2288 // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll
2289 // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll
2290 // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll
2291
2292 // TODO: Overflow intrinsics (*ADDO, *SUBO, *MULO) with vector types are not
2293 // specialized in these tables yet.
2294 static const CostTblEntry AVX512CDCostTbl[] = {
2295 { ISD::CTLZ, MVT::v8i64, 1 },
2296 { ISD::CTLZ, MVT::v16i32, 1 },
2297 { ISD::CTLZ, MVT::v32i16, 8 },
2298 { ISD::CTLZ, MVT::v64i8, 20 },
2299 { ISD::CTLZ, MVT::v4i64, 1 },
2300 { ISD::CTLZ, MVT::v8i32, 1 },
2301 { ISD::CTLZ, MVT::v16i16, 4 },
2302 { ISD::CTLZ, MVT::v32i8, 10 },
2303 { ISD::CTLZ, MVT::v2i64, 1 },
2304 { ISD::CTLZ, MVT::v4i32, 1 },
2305 { ISD::CTLZ, MVT::v8i16, 4 },
2306 { ISD::CTLZ, MVT::v16i8, 4 },
2307 };
2308 static const CostTblEntry AVX512BWCostTbl[] = {
2309 { ISD::ABS, MVT::v32i16, 1 },
2310 { ISD::ABS, MVT::v64i8, 1 },
2311 { ISD::BITREVERSE, MVT::v8i64, 5 },
2312 { ISD::BITREVERSE, MVT::v16i32, 5 },
2313 { ISD::BITREVERSE, MVT::v32i16, 5 },
2314 { ISD::BITREVERSE, MVT::v64i8, 5 },
2315 { ISD::CTLZ, MVT::v8i64, 23 },
2316 { ISD::CTLZ, MVT::v16i32, 22 },
2317 { ISD::CTLZ, MVT::v32i16, 18 },
2318 { ISD::CTLZ, MVT::v64i8, 17 },
2319 { ISD::CTPOP, MVT::v8i64, 7 },
2320 { ISD::CTPOP, MVT::v16i32, 11 },
2321 { ISD::CTPOP, MVT::v32i16, 9 },
2322 { ISD::CTPOP, MVT::v64i8, 6 },
2323 { ISD::CTTZ, MVT::v8i64, 10 },
2324 { ISD::CTTZ, MVT::v16i32, 14 },
2325 { ISD::CTTZ, MVT::v32i16, 12 },
2326 { ISD::CTTZ, MVT::v64i8, 9 },
2327 { ISD::SADDSAT, MVT::v32i16, 1 },
2328 { ISD::SADDSAT, MVT::v64i8, 1 },
2329 { ISD::SMAX, MVT::v32i16, 1 },
2330 { ISD::SMAX, MVT::v64i8, 1 },
2331 { ISD::SMIN, MVT::v32i16, 1 },
2332 { ISD::SMIN, MVT::v64i8, 1 },
2333 { ISD::SSUBSAT, MVT::v32i16, 1 },
2334 { ISD::SSUBSAT, MVT::v64i8, 1 },
2335 { ISD::UADDSAT, MVT::v32i16, 1 },
2336 { ISD::UADDSAT, MVT::v64i8, 1 },
2337 { ISD::UMAX, MVT::v32i16, 1 },
2338 { ISD::UMAX, MVT::v64i8, 1 },
2339 { ISD::UMIN, MVT::v32i16, 1 },
2340 { ISD::UMIN, MVT::v64i8, 1 },
2341 { ISD::USUBSAT, MVT::v32i16, 1 },
2342 { ISD::USUBSAT, MVT::v64i8, 1 },
2343 };
2344 static const CostTblEntry AVX512CostTbl[] = {
2345 { ISD::ABS, MVT::v8i64, 1 },
2346 { ISD::ABS, MVT::v16i32, 1 },
2347 { ISD::ABS, MVT::v32i16, 2 }, // FIXME: include split
2348 { ISD::ABS, MVT::v64i8, 2 }, // FIXME: include split
2349 { ISD::ABS, MVT::v4i64, 1 },
2350 { ISD::ABS, MVT::v2i64, 1 },
2351 { ISD::BITREVERSE, MVT::v8i64, 36 },
2352 { ISD::BITREVERSE, MVT::v16i32, 24 },
2353 { ISD::BITREVERSE, MVT::v32i16, 10 },
2354 { ISD::BITREVERSE, MVT::v64i8, 10 },
2355 { ISD::CTLZ, MVT::v8i64, 29 },
2356 { ISD::CTLZ, MVT::v16i32, 35 },
2357 { ISD::CTLZ, MVT::v32i16, 28 },
2358 { ISD::CTLZ, MVT::v64i8, 18 },
2359 { ISD::CTPOP, MVT::v8i64, 16 },
2360 { ISD::CTPOP, MVT::v16i32, 24 },
2361 { ISD::CTPOP, MVT::v32i16, 18 },
2362 { ISD::CTPOP, MVT::v64i8, 12 },
2363 { ISD::CTTZ, MVT::v8i64, 20 },
2364 { ISD::CTTZ, MVT::v16i32, 28 },
2365 { ISD::CTTZ, MVT::v32i16, 24 },
2366 { ISD::CTTZ, MVT::v64i8, 18 },
2367 { ISD::SMAX, MVT::v8i64, 1 },
2368 { ISD::SMAX, MVT::v16i32, 1 },
2369 { ISD::SMAX, MVT::v32i16, 2 }, // FIXME: include split
2370 { ISD::SMAX, MVT::v64i8, 2 }, // FIXME: include split
2371 { ISD::SMAX, MVT::v4i64, 1 },
2372 { ISD::SMAX, MVT::v2i64, 1 },
2373 { ISD::SMIN, MVT::v8i64, 1 },
2374 { ISD::SMIN, MVT::v16i32, 1 },
2375 { ISD::SMIN, MVT::v32i16, 2 }, // FIXME: include split
2376 { ISD::SMIN, MVT::v64i8, 2 }, // FIXME: include split
2377 { ISD::SMIN, MVT::v4i64, 1 },
2378 { ISD::SMIN, MVT::v2i64, 1 },
2379 { ISD::UMAX, MVT::v8i64, 1 },
2380 { ISD::UMAX, MVT::v16i32, 1 },
2381 { ISD::UMAX, MVT::v32i16, 2 }, // FIXME: include split
2382 { ISD::UMAX, MVT::v64i8, 2 }, // FIXME: include split
2383 { ISD::UMAX, MVT::v4i64, 1 },
2384 { ISD::UMAX, MVT::v2i64, 1 },
2385 { ISD::UMIN, MVT::v8i64, 1 },
2386 { ISD::UMIN, MVT::v16i32, 1 },
2387 { ISD::UMIN, MVT::v32i16, 2 }, // FIXME: include split
2388 { ISD::UMIN, MVT::v64i8, 2 }, // FIXME: include split
2389 { ISD::UMIN, MVT::v4i64, 1 },
2390 { ISD::UMIN, MVT::v2i64, 1 },
2391 { ISD::USUBSAT, MVT::v16i32, 2 }, // pmaxud + psubd
2392 { ISD::USUBSAT, MVT::v2i64, 2 }, // pmaxuq + psubq
2393 { ISD::USUBSAT, MVT::v4i64, 2 }, // pmaxuq + psubq
2394 { ISD::USUBSAT, MVT::v8i64, 2 }, // pmaxuq + psubq
2395 { ISD::UADDSAT, MVT::v16i32, 3 }, // not + pminud + paddd
2396 { ISD::UADDSAT, MVT::v2i64, 3 }, // not + pminuq + paddq
2397 { ISD::UADDSAT, MVT::v4i64, 3 }, // not + pminuq + paddq
2398 { ISD::UADDSAT, MVT::v8i64, 3 }, // not + pminuq + paddq
2399 { ISD::SADDSAT, MVT::v32i16, 2 }, // FIXME: include split
2400 { ISD::SADDSAT, MVT::v64i8, 2 }, // FIXME: include split
2401 { ISD::SSUBSAT, MVT::v32i16, 2 }, // FIXME: include split
2402 { ISD::SSUBSAT, MVT::v64i8, 2 }, // FIXME: include split
2403 { ISD::UADDSAT, MVT::v32i16, 2 }, // FIXME: include split
2404 { ISD::UADDSAT, MVT::v64i8, 2 }, // FIXME: include split
2405 { ISD::USUBSAT, MVT::v32i16, 2 }, // FIXME: include split
2406 { ISD::USUBSAT, MVT::v64i8, 2 }, // FIXME: include split
2407 { ISD::FMAXNUM, MVT::f32, 2 },
2408 { ISD::FMAXNUM, MVT::v4f32, 2 },
2409 { ISD::FMAXNUM, MVT::v8f32, 2 },
2410 { ISD::FMAXNUM, MVT::v16f32, 2 },
2411 { ISD::FMAXNUM, MVT::f64, 2 },
2412 { ISD::FMAXNUM, MVT::v2f64, 2 },
2413 { ISD::FMAXNUM, MVT::v4f64, 2 },
2414 { ISD::FMAXNUM, MVT::v8f64, 2 },
2415 };
2416 static const CostTblEntry XOPCostTbl[] = {
2417 { ISD::BITREVERSE, MVT::v4i64, 4 },
2418 { ISD::BITREVERSE, MVT::v8i32, 4 },
2419 { ISD::BITREVERSE, MVT::v16i16, 4 },
2420 { ISD::BITREVERSE, MVT::v32i8, 4 },
2421 { ISD::BITREVERSE, MVT::v2i64, 1 },
2422 { ISD::BITREVERSE, MVT::v4i32, 1 },
2423 { ISD::BITREVERSE, MVT::v8i16, 1 },
2424 { ISD::BITREVERSE, MVT::v16i8, 1 },
2425 { ISD::BITREVERSE, MVT::i64, 3 },
2426 { ISD::BITREVERSE, MVT::i32, 3 },
2427 { ISD::BITREVERSE, MVT::i16, 3 },
2428 { ISD::BITREVERSE, MVT::i8, 3 }
2429 };
2430 static const CostTblEntry AVX2CostTbl[] = {
2431 { ISD::ABS, MVT::v4i64, 2 }, // VBLENDVPD(X,VPSUBQ(0,X),X)
2432 { ISD::ABS, MVT::v8i32, 1 },
2433 { ISD::ABS, MVT::v16i16, 1 },
2434 { ISD::ABS, MVT::v32i8, 1 },
2435 { ISD::BITREVERSE, MVT::v4i64, 5 },
2436 { ISD::BITREVERSE, MVT::v8i32, 5 },
2437 { ISD::BITREVERSE, MVT::v16i16, 5 },
2438 { ISD::BITREVERSE, MVT::v32i8, 5 },
2439 { ISD::BSWAP, MVT::v4i64, 1 },
2440 { ISD::BSWAP, MVT::v8i32, 1 },
2441 { ISD::BSWAP, MVT::v16i16, 1 },
2442 { ISD::CTLZ, MVT::v4i64, 23 },
2443 { ISD::CTLZ, MVT::v8i32, 18 },
2444 { ISD::CTLZ, MVT::v16i16, 14 },
2445 { ISD::CTLZ, MVT::v32i8, 9 },
2446 { ISD::CTPOP, MVT::v4i64, 7 },
2447 { ISD::CTPOP, MVT::v8i32, 11 },
2448 { ISD::CTPOP, MVT::v16i16, 9 },
2449 { ISD::CTPOP, MVT::v32i8, 6 },
2450 { ISD::CTTZ, MVT::v4i64, 10 },
2451 { ISD::CTTZ, MVT::v8i32, 14 },
2452 { ISD::CTTZ, MVT::v16i16, 12 },
2453 { ISD::CTTZ, MVT::v32i8, 9 },
2454 { ISD::SADDSAT, MVT::v16i16, 1 },
2455 { ISD::SADDSAT, MVT::v32i8, 1 },
2456 { ISD::SMAX, MVT::v8i32, 1 },
2457 { ISD::SMAX, MVT::v16i16, 1 },
2458 { ISD::SMAX, MVT::v32i8, 1 },
2459 { ISD::SMIN, MVT::v8i32, 1 },
2460 { ISD::SMIN, MVT::v16i16, 1 },
2461 { ISD::SMIN, MVT::v32i8, 1 },
2462 { ISD::SSUBSAT, MVT::v16i16, 1 },
2463 { ISD::SSUBSAT, MVT::v32i8, 1 },
2464 { ISD::UADDSAT, MVT::v16i16, 1 },
2465 { ISD::UADDSAT, MVT::v32i8, 1 },
2466 { ISD::UADDSAT, MVT::v8i32, 3 }, // not + pminud + paddd
2467 { ISD::UMAX, MVT::v8i32, 1 },
2468 { ISD::UMAX, MVT::v16i16, 1 },
2469 { ISD::UMAX, MVT::v32i8, 1 },
2470 { ISD::UMIN, MVT::v8i32, 1 },
2471 { ISD::UMIN, MVT::v16i16, 1 },
2472 { ISD::UMIN, MVT::v32i8, 1 },
2473 { ISD::USUBSAT, MVT::v16i16, 1 },
2474 { ISD::USUBSAT, MVT::v32i8, 1 },
2475 { ISD::USUBSAT, MVT::v8i32, 2 }, // pmaxud + psubd
2476 { ISD::FMAXNUM, MVT::v8f32, 3 }, // MAXPS + CMPUNORDPS + BLENDVPS
2477 { ISD::FMAXNUM, MVT::v4f64, 3 }, // MAXPD + CMPUNORDPD + BLENDVPD
2478 { ISD::FSQRT, MVT::f32, 7 }, // Haswell from http://www.agner.org/
2479 { ISD::FSQRT, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/
2480 { ISD::FSQRT, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/
2481 { ISD::FSQRT, MVT::f64, 14 }, // Haswell from http://www.agner.org/
2482 { ISD::FSQRT, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/
2483 { ISD::FSQRT, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/
2484 };
2485 static const CostTblEntry AVX1CostTbl[] = {
2486 { ISD::ABS, MVT::v4i64, 6 }, // VBLENDVPD(X,VPSUBQ(0,X),X)
2487 { ISD::ABS, MVT::v8i32, 3 },
2488 { ISD::ABS, MVT::v16i16, 3 },
2489 { ISD::ABS, MVT::v32i8, 3 },
2490 { ISD::BITREVERSE, MVT::v4i64, 12 }, // 2 x 128-bit Op + extract/insert
2491 { ISD::BITREVERSE, MVT::v8i32, 12 }, // 2 x 128-bit Op + extract/insert
2492 { ISD::BITREVERSE, MVT::v16i16, 12 }, // 2 x 128-bit Op + extract/insert
2493 { ISD::BITREVERSE, MVT::v32i8, 12 }, // 2 x 128-bit Op + extract/insert
2494 { ISD::BSWAP, MVT::v4i64, 4 },
2495 { ISD::BSWAP, MVT::v8i32, 4 },
2496 { ISD::BSWAP, MVT::v16i16, 4 },
2497 { ISD::CTLZ, MVT::v4i64, 48 }, // 2 x 128-bit Op + extract/insert
2498 { ISD::CTLZ, MVT::v8i32, 38 }, // 2 x 128-bit Op + extract/insert
2499 { ISD::CTLZ, MVT::v16i16, 30 }, // 2 x 128-bit Op + extract/insert
2500 { ISD::CTLZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert
2501 { ISD::CTPOP, MVT::v4i64, 16 }, // 2 x 128-bit Op + extract/insert
2502 { ISD::CTPOP, MVT::v8i32, 24 }, // 2 x 128-bit Op + extract/insert
2503 { ISD::CTPOP, MVT::v16i16, 20 }, // 2 x 128-bit Op + extract/insert
2504 { ISD::CTPOP, MVT::v32i8, 14 }, // 2 x 128-bit Op + extract/insert
2505 { ISD::CTTZ, MVT::v4i64, 22 }, // 2 x 128-bit Op + extract/insert
2506 { ISD::CTTZ, MVT::v8i32, 30 }, // 2 x 128-bit Op + extract/insert
2507 { ISD::CTTZ, MVT::v16i16, 26 }, // 2 x 128-bit Op + extract/insert
2508 { ISD::CTTZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert
2509 { ISD::SADDSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2510 { ISD::SADDSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2511 { ISD::SMAX, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert
2512 { ISD::SMAX, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2513 { ISD::SMAX, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2514 { ISD::SMIN, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert
2515 { ISD::SMIN, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2516 { ISD::SMIN, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2517 { ISD::SSUBSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2518 { ISD::SSUBSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2519 { ISD::UADDSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2520 { ISD::UADDSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2521 { ISD::UADDSAT, MVT::v8i32, 8 }, // 2 x 128-bit Op + extract/insert
2522 { ISD::UMAX, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert
2523 { ISD::UMAX, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2524 { ISD::UMAX, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2525 { ISD::UMIN, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert
2526 { ISD::UMIN, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2527 { ISD::UMIN, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2528 { ISD::USUBSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2529 { ISD::USUBSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2530 { ISD::USUBSAT, MVT::v8i32, 6 }, // 2 x 128-bit Op + extract/insert
2531 { ISD::FMAXNUM, MVT::f32, 3 }, // MAXSS + CMPUNORDSS + BLENDVPS
2532 { ISD::FMAXNUM, MVT::v4f32, 3 }, // MAXPS + CMPUNORDPS + BLENDVPS
2533 { ISD::FMAXNUM, MVT::v8f32, 5 }, // MAXPS + CMPUNORDPS + BLENDVPS + ?
2534 { ISD::FMAXNUM, MVT::f64, 3 }, // MAXSD + CMPUNORDSD + BLENDVPD
2535 { ISD::FMAXNUM, MVT::v2f64, 3 }, // MAXPD + CMPUNORDPD + BLENDVPD
2536 { ISD::FMAXNUM, MVT::v4f64, 5 }, // MAXPD + CMPUNORDPD + BLENDVPD + ?
2537 { ISD::FSQRT, MVT::f32, 14 }, // SNB from http://www.agner.org/
2538 { ISD::FSQRT, MVT::v4f32, 14 }, // SNB from http://www.agner.org/
2539 { ISD::FSQRT, MVT::v8f32, 28 }, // SNB from http://www.agner.org/
2540 { ISD::FSQRT, MVT::f64, 21 }, // SNB from http://www.agner.org/
2541 { ISD::FSQRT, MVT::v2f64, 21 }, // SNB from http://www.agner.org/
2542 { ISD::FSQRT, MVT::v4f64, 43 }, // SNB from http://www.agner.org/
2543 };
2544 static const CostTblEntry GLMCostTbl[] = {
2545 { ISD::FSQRT, MVT::f32, 19 }, // sqrtss
2546 { ISD::FSQRT, MVT::v4f32, 37 }, // sqrtps
2547 { ISD::FSQRT, MVT::f64, 34 }, // sqrtsd
2548 { ISD::FSQRT, MVT::v2f64, 67 }, // sqrtpd
2549 };
2550 static const CostTblEntry SLMCostTbl[] = {
2551 { ISD::FSQRT, MVT::f32, 20 }, // sqrtss
2552 { ISD::FSQRT, MVT::v4f32, 40 }, // sqrtps
2553 { ISD::FSQRT, MVT::f64, 35 }, // sqrtsd
2554 { ISD::FSQRT, MVT::v2f64, 70 }, // sqrtpd
2555 };
2556 static const CostTblEntry SSE42CostTbl[] = {
2557 { ISD::ABS, MVT::v2i64, 3 }, // BLENDVPD(X,PSUBQ(0,X),X)
2558 { ISD::USUBSAT, MVT::v4i32, 2 }, // pmaxud + psubd
2559 { ISD::UADDSAT, MVT::v4i32, 3 }, // not + pminud + paddd
2560 { ISD::FSQRT, MVT::f32, 18 }, // Nehalem from http://www.agner.org/
2561 { ISD::FSQRT, MVT::v4f32, 18 }, // Nehalem from http://www.agner.org/
2562 };
2563 static const CostTblEntry SSE41CostTbl[] = {
2564 { ISD::SMAX, MVT::v4i32, 1 },
2565 { ISD::SMAX, MVT::v16i8, 1 },
2566 { ISD::SMIN, MVT::v4i32, 1 },
2567 { ISD::SMIN, MVT::v16i8, 1 },
2568 { ISD::UMAX, MVT::v4i32, 1 },
2569 { ISD::UMAX, MVT::v8i16, 1 },
2570 { ISD::UMIN, MVT::v4i32, 1 },
2571 { ISD::UMIN, MVT::v8i16, 1 },
2572 };
2573 static const CostTblEntry SSSE3CostTbl[] = {
2574 { ISD::ABS, MVT::v4i32, 1 },
2575 { ISD::ABS, MVT::v8i16, 1 },
2576 { ISD::ABS, MVT::v16i8, 1 },
2577 { ISD::BITREVERSE, MVT::v2i64, 5 },
2578 { ISD::BITREVERSE, MVT::v4i32, 5 },
2579 { ISD::BITREVERSE, MVT::v8i16, 5 },
2580 { ISD::BITREVERSE, MVT::v16i8, 5 },
2581 { ISD::BSWAP, MVT::v2i64, 1 },
2582 { ISD::BSWAP, MVT::v4i32, 1 },
2583 { ISD::BSWAP, MVT::v8i16, 1 },
2584 { ISD::CTLZ, MVT::v2i64, 23 },
2585 { ISD::CTLZ, MVT::v4i32, 18 },
2586 { ISD::CTLZ, MVT::v8i16, 14 },
2587 { ISD::CTLZ, MVT::v16i8, 9 },
2588 { ISD::CTPOP, MVT::v2i64, 7 },
2589 { ISD::CTPOP, MVT::v4i32, 11 },
2590 { ISD::CTPOP, MVT::v8i16, 9 },
2591 { ISD::CTPOP, MVT::v16i8, 6 },
2592 { ISD::CTTZ, MVT::v2i64, 10 },
2593 { ISD::CTTZ, MVT::v4i32, 14 },
2594 { ISD::CTTZ, MVT::v8i16, 12 },
2595 { ISD::CTTZ, MVT::v16i8, 9 }
2596 };
2597 static const CostTblEntry SSE2CostTbl[] = {
2598 { ISD::ABS, MVT::v2i64, 4 },
2599 { ISD::ABS, MVT::v4i32, 3 },
2600 { ISD::ABS, MVT::v8i16, 3 },
2601 { ISD::ABS, MVT::v16i8, 3 },
2602 { ISD::BITREVERSE, MVT::v2i64, 29 },
2603 { ISD::BITREVERSE, MVT::v4i32, 27 },
2604 { ISD::BITREVERSE, MVT::v8i16, 27 },
2605 { ISD::BITREVERSE, MVT::v16i8, 20 },
2606 { ISD::BSWAP, MVT::v2i64, 7 },
2607 { ISD::BSWAP, MVT::v4i32, 7 },
2608 { ISD::BSWAP, MVT::v8i16, 7 },
2609 { ISD::CTLZ, MVT::v2i64, 25 },
2610 { ISD::CTLZ, MVT::v4i32, 26 },
2611 { ISD::CTLZ, MVT::v8i16, 20 },
2612 { ISD::CTLZ, MVT::v16i8, 17 },
2613 { ISD::CTPOP, MVT::v2i64, 12 },
2614 { ISD::CTPOP, MVT::v4i32, 15 },
2615 { ISD::CTPOP, MVT::v8i16, 13 },
2616 { ISD::CTPOP, MVT::v16i8, 10 },
2617 { ISD::CTTZ, MVT::v2i64, 14 },
2618 { ISD::CTTZ, MVT::v4i32, 18 },
2619 { ISD::CTTZ, MVT::v8i16, 16 },
2620 { ISD::CTTZ, MVT::v16i8, 13 },
2621 { ISD::SADDSAT, MVT::v8i16, 1 },
2622 { ISD::SADDSAT, MVT::v16i8, 1 },
2623 { ISD::SMAX, MVT::v8i16, 1 },
2624 { ISD::SMIN, MVT::v8i16, 1 },
2625 { ISD::SSUBSAT, MVT::v8i16, 1 },
2626 { ISD::SSUBSAT, MVT::v16i8, 1 },
2627 { ISD::UADDSAT, MVT::v8i16, 1 },
2628 { ISD::UADDSAT, MVT::v16i8, 1 },
2629 { ISD::UMAX, MVT::v8i16, 2 },
2630 { ISD::UMAX, MVT::v16i8, 1 },
2631 { ISD::UMIN, MVT::v8i16, 2 },
2632 { ISD::UMIN, MVT::v16i8, 1 },
2633 { ISD::USUBSAT, MVT::v8i16, 1 },
2634 { ISD::USUBSAT, MVT::v16i8, 1 },
2635 { ISD::FMAXNUM, MVT::f64, 4 },
2636 { ISD::FMAXNUM, MVT::v2f64, 4 },
2637 { ISD::FSQRT, MVT::f64, 32 }, // Nehalem from http://www.agner.org/
2638 { ISD::FSQRT, MVT::v2f64, 32 }, // Nehalem from http://www.agner.org/
2639 };
2640 static const CostTblEntry SSE1CostTbl[] = {
2641 { ISD::FMAXNUM, MVT::f32, 4 },
2642 { ISD::FMAXNUM, MVT::v4f32, 4 },
2643 { ISD::FSQRT, MVT::f32, 28 }, // Pentium III from http://www.agner.org/
2644 { ISD::FSQRT, MVT::v4f32, 56 }, // Pentium III from http://www.agner.org/
2645 };
2646 static const CostTblEntry BMI64CostTbl[] = { // 64-bit targets
2647 { ISD::CTTZ, MVT::i64, 1 },
2648 };
2649 static const CostTblEntry BMI32CostTbl[] = { // 32 or 64-bit targets
2650 { ISD::CTTZ, MVT::i32, 1 },
2651 { ISD::CTTZ, MVT::i16, 1 },
2652 { ISD::CTTZ, MVT::i8, 1 },
2653 };
2654 static const CostTblEntry LZCNT64CostTbl[] = { // 64-bit targets
2655 { ISD::CTLZ, MVT::i64, 1 },
2656 };
2657 static const CostTblEntry LZCNT32CostTbl[] = { // 32 or 64-bit targets
2658 { ISD::CTLZ, MVT::i32, 1 },
2659 { ISD::CTLZ, MVT::i16, 1 },
2660 { ISD::CTLZ, MVT::i8, 1 },
2661 };
2662 static const CostTblEntry POPCNT64CostTbl[] = { // 64-bit targets
2663 { ISD::CTPOP, MVT::i64, 1 },
2664 };
2665 static const CostTblEntry POPCNT32CostTbl[] = { // 32 or 64-bit targets
2666 { ISD::CTPOP, MVT::i32, 1 },
2667 { ISD::CTPOP, MVT::i16, 1 },
2668 { ISD::CTPOP, MVT::i8, 1 },
2669 };
2670 static const CostTblEntry X64CostTbl[] = { // 64-bit targets
2671 { ISD::BITREVERSE, MVT::i64, 14 },
2672 { ISD::CTLZ, MVT::i64, 4 }, // BSR+XOR or BSR+XOR+CMOV
2673 { ISD::CTTZ, MVT::i64, 3 }, // TEST+BSF+CMOV/BRANCH
2674 { ISD::CTPOP, MVT::i64, 10 },
2675 { ISD::SADDO, MVT::i64, 1 },
2676 { ISD::UADDO, MVT::i64, 1 },
2677 { ISD::UMULO, MVT::i64, 2 }, // mulq + seto
2678 };
2679 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets
2680 { ISD::BITREVERSE, MVT::i32, 14 },
2681 { ISD::BITREVERSE, MVT::i16, 14 },
2682 { ISD::BITREVERSE, MVT::i8, 11 },
2683 { ISD::CTLZ, MVT::i32, 4 }, // BSR+XOR or BSR+XOR+CMOV
2684 { ISD::CTLZ, MVT::i16, 4 }, // BSR+XOR or BSR+XOR+CMOV
2685 { ISD::CTLZ, MVT::i8, 4 }, // BSR+XOR or BSR+XOR+CMOV
2686 { ISD::CTTZ, MVT::i32, 3 }, // TEST+BSF+CMOV/BRANCH
2687 { ISD::CTTZ, MVT::i16, 3 }, // TEST+BSF+CMOV/BRANCH
2688 { ISD::CTTZ, MVT::i8, 3 }, // TEST+BSF+CMOV/BRANCH
2689 { ISD::CTPOP, MVT::i32, 8 },
2690 { ISD::CTPOP, MVT::i16, 9 },
2691 { ISD::CTPOP, MVT::i8, 7 },
2692 { ISD::SADDO, MVT::i32, 1 },
2693 { ISD::SADDO, MVT::i16, 1 },
2694 { ISD::SADDO, MVT::i8, 1 },
2695 { ISD::UADDO, MVT::i32, 1 },
2696 { ISD::UADDO, MVT::i16, 1 },
2697 { ISD::UADDO, MVT::i8, 1 },
2698 { ISD::UMULO, MVT::i32, 2 }, // mul + seto
2699 { ISD::UMULO, MVT::i16, 2 },
2700 { ISD::UMULO, MVT::i8, 2 },
2701 };
2702
2703 Type *RetTy = ICA.getReturnType();
2704 Type *OpTy = RetTy;
2705 Intrinsic::ID IID = ICA.getID();
2706 unsigned ISD = ISD::DELETED_NODE;
2707 switch (IID) {
2708 default:
2709 break;
2710 case Intrinsic::abs:
2711 ISD = ISD::ABS;
2712 break;
2713 case Intrinsic::bitreverse:
2714 ISD = ISD::BITREVERSE;
2715 break;
2716 case Intrinsic::bswap:
2717 ISD = ISD::BSWAP;
2718 break;
2719 case Intrinsic::ctlz:
2720 ISD = ISD::CTLZ;
2721 break;
2722 case Intrinsic::ctpop:
2723 ISD = ISD::CTPOP;
2724 break;
2725 case Intrinsic::cttz:
2726 ISD = ISD::CTTZ;
2727 break;
2728 case Intrinsic::maxnum:
2729 case Intrinsic::minnum:
2730 // FMINNUM has same costs so don't duplicate.
2731 ISD = ISD::FMAXNUM;
2732 break;
2733 case Intrinsic::sadd_sat:
2734 ISD = ISD::SADDSAT;
2735 break;
2736 case Intrinsic::smax:
2737 ISD = ISD::SMAX;
2738 break;
2739 case Intrinsic::smin:
2740 ISD = ISD::SMIN;
2741 break;
2742 case Intrinsic::ssub_sat:
2743 ISD = ISD::SSUBSAT;
2744 break;
2745 case Intrinsic::uadd_sat:
2746 ISD = ISD::UADDSAT;
2747 break;
2748 case Intrinsic::umax:
2749 ISD = ISD::UMAX;
2750 break;
2751 case Intrinsic::umin:
2752 ISD = ISD::UMIN;
2753 break;
2754 case Intrinsic::usub_sat:
2755 ISD = ISD::USUBSAT;
2756 break;
2757 case Intrinsic::sqrt:
2758 ISD = ISD::FSQRT;
2759 break;
2760 case Intrinsic::sadd_with_overflow:
2761 case Intrinsic::ssub_with_overflow:
2762 // SSUBO has same costs so don't duplicate.
2763 ISD = ISD::SADDO;
2764 OpTy = RetTy->getContainedType(0);
2765 break;
2766 case Intrinsic::uadd_with_overflow:
2767 case Intrinsic::usub_with_overflow:
2768 // USUBO has same costs so don't duplicate.
2769 ISD = ISD::UADDO;
2770 OpTy = RetTy->getContainedType(0);
2771 break;
2772 case Intrinsic::umul_with_overflow:
2773 case Intrinsic::smul_with_overflow:
2774 // SMULO has same costs so don't duplicate.
2775 ISD = ISD::UMULO;
2776 OpTy = RetTy->getContainedType(0);
2777 break;
2778 }
2779
2780 if (ISD != ISD::DELETED_NODE) {
2781 // Legalize the type.
2782 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, OpTy);
2783 MVT MTy = LT.second;
2784
2785 // Attempt to lookup cost.
2786 if (ST->useGLMDivSqrtCosts())
2787 if (const auto *Entry = CostTableLookup(GLMCostTbl, ISD, MTy))
2788 return LT.first * Entry->Cost;
2789
2790 if (ST->isSLM())
2791 if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy))
2792 return LT.first * Entry->Cost;
2793
2794 if (ST->hasCDI())
2795 if (const auto *Entry = CostTableLookup(AVX512CDCostTbl, ISD, MTy))
2796 return LT.first * Entry->Cost;
2797
2798 if (ST->hasBWI())
2799 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
2800 return LT.first * Entry->Cost;
2801
2802 if (ST->hasAVX512())
2803 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
2804 return LT.first * Entry->Cost;
2805
2806 if (ST->hasXOP())
2807 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
2808 return LT.first * Entry->Cost;
2809
2810 if (ST->hasAVX2())
2811 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
2812 return LT.first * Entry->Cost;
2813
2814 if (ST->hasAVX())
2815 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
2816 return LT.first * Entry->Cost;
2817
2818 if (ST->hasSSE42())
2819 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
2820 return LT.first * Entry->Cost;
2821
2822 if (ST->hasSSE41())
2823 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
2824 return LT.first * Entry->Cost;
2825
2826 if (ST->hasSSSE3())
2827 if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy))
2828 return LT.first * Entry->Cost;
2829
2830 if (ST->hasSSE2())
2831 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
2832 return LT.first * Entry->Cost;
2833
2834 if (ST->hasSSE1())
2835 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
2836 return LT.first * Entry->Cost;
2837
2838 if (ST->hasBMI()) {
2839 if (ST->is64Bit())
2840 if (const auto *Entry = CostTableLookup(BMI64CostTbl, ISD, MTy))
2841 return LT.first * Entry->Cost;
2842
2843 if (const auto *Entry = CostTableLookup(BMI32CostTbl, ISD, MTy))
2844 return LT.first * Entry->Cost;
2845 }
2846
2847 if (ST->hasLZCNT()) {
2848 if (ST->is64Bit())
2849 if (const auto *Entry = CostTableLookup(LZCNT64CostTbl, ISD, MTy))
2850 return LT.first * Entry->Cost;
2851
2852 if (const auto *Entry = CostTableLookup(LZCNT32CostTbl, ISD, MTy))
2853 return LT.first * Entry->Cost;
2854 }
2855
2856 if (ST->hasPOPCNT()) {
2857 if (ST->is64Bit())
2858 if (const auto *Entry = CostTableLookup(POPCNT64CostTbl, ISD, MTy))
2859 return LT.first * Entry->Cost;
2860
2861 if (const auto *Entry = CostTableLookup(POPCNT32CostTbl, ISD, MTy))
2862 return LT.first * Entry->Cost;
2863 }
2864
2865 // TODO - add BMI (TZCNT) scalar handling
2866
2867 if (ST->is64Bit())
2868 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy))
2869 return LT.first * Entry->Cost;
2870
2871 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy))
2872 return LT.first * Entry->Cost;
2873 }
2874
2875 return BaseT::getIntrinsicInstrCost(ICA, CostKind);
2876 }
2877
getIntrinsicInstrCost(const IntrinsicCostAttributes & ICA,TTI::TargetCostKind CostKind)2878 int X86TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
2879 TTI::TargetCostKind CostKind) {
2880 if (ICA.isTypeBasedOnly())
2881 return getTypeBasedIntrinsicInstrCost(ICA, CostKind);
2882
2883 static const CostTblEntry AVX512CostTbl[] = {
2884 { ISD::ROTL, MVT::v8i64, 1 },
2885 { ISD::ROTL, MVT::v4i64, 1 },
2886 { ISD::ROTL, MVT::v2i64, 1 },
2887 { ISD::ROTL, MVT::v16i32, 1 },
2888 { ISD::ROTL, MVT::v8i32, 1 },
2889 { ISD::ROTL, MVT::v4i32, 1 },
2890 { ISD::ROTR, MVT::v8i64, 1 },
2891 { ISD::ROTR, MVT::v4i64, 1 },
2892 { ISD::ROTR, MVT::v2i64, 1 },
2893 { ISD::ROTR, MVT::v16i32, 1 },
2894 { ISD::ROTR, MVT::v8i32, 1 },
2895 { ISD::ROTR, MVT::v4i32, 1 }
2896 };
2897 // XOP: ROTL = VPROT(X,Y), ROTR = VPROT(X,SUB(0,Y))
2898 static const CostTblEntry XOPCostTbl[] = {
2899 { ISD::ROTL, MVT::v4i64, 4 },
2900 { ISD::ROTL, MVT::v8i32, 4 },
2901 { ISD::ROTL, MVT::v16i16, 4 },
2902 { ISD::ROTL, MVT::v32i8, 4 },
2903 { ISD::ROTL, MVT::v2i64, 1 },
2904 { ISD::ROTL, MVT::v4i32, 1 },
2905 { ISD::ROTL, MVT::v8i16, 1 },
2906 { ISD::ROTL, MVT::v16i8, 1 },
2907 { ISD::ROTR, MVT::v4i64, 6 },
2908 { ISD::ROTR, MVT::v8i32, 6 },
2909 { ISD::ROTR, MVT::v16i16, 6 },
2910 { ISD::ROTR, MVT::v32i8, 6 },
2911 { ISD::ROTR, MVT::v2i64, 2 },
2912 { ISD::ROTR, MVT::v4i32, 2 },
2913 { ISD::ROTR, MVT::v8i16, 2 },
2914 { ISD::ROTR, MVT::v16i8, 2 }
2915 };
2916 static const CostTblEntry X64CostTbl[] = { // 64-bit targets
2917 { ISD::ROTL, MVT::i64, 1 },
2918 { ISD::ROTR, MVT::i64, 1 },
2919 { ISD::FSHL, MVT::i64, 4 }
2920 };
2921 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets
2922 { ISD::ROTL, MVT::i32, 1 },
2923 { ISD::ROTL, MVT::i16, 1 },
2924 { ISD::ROTL, MVT::i8, 1 },
2925 { ISD::ROTR, MVT::i32, 1 },
2926 { ISD::ROTR, MVT::i16, 1 },
2927 { ISD::ROTR, MVT::i8, 1 },
2928 { ISD::FSHL, MVT::i32, 4 },
2929 { ISD::FSHL, MVT::i16, 4 },
2930 { ISD::FSHL, MVT::i8, 4 }
2931 };
2932
2933 Intrinsic::ID IID = ICA.getID();
2934 Type *RetTy = ICA.getReturnType();
2935 const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
2936 unsigned ISD = ISD::DELETED_NODE;
2937 switch (IID) {
2938 default:
2939 break;
2940 case Intrinsic::fshl:
2941 ISD = ISD::FSHL;
2942 if (Args[0] == Args[1])
2943 ISD = ISD::ROTL;
2944 break;
2945 case Intrinsic::fshr:
2946 // FSHR has same costs so don't duplicate.
2947 ISD = ISD::FSHL;
2948 if (Args[0] == Args[1])
2949 ISD = ISD::ROTR;
2950 break;
2951 }
2952
2953 if (ISD != ISD::DELETED_NODE) {
2954 // Legalize the type.
2955 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy);
2956 MVT MTy = LT.second;
2957
2958 // Attempt to lookup cost.
2959 if (ST->hasAVX512())
2960 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
2961 return LT.first * Entry->Cost;
2962
2963 if (ST->hasXOP())
2964 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
2965 return LT.first * Entry->Cost;
2966
2967 if (ST->is64Bit())
2968 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy))
2969 return LT.first * Entry->Cost;
2970
2971 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy))
2972 return LT.first * Entry->Cost;
2973 }
2974
2975 return BaseT::getIntrinsicInstrCost(ICA, CostKind);
2976 }
2977
getVectorInstrCost(unsigned Opcode,Type * Val,unsigned Index)2978 int X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
2979 static const CostTblEntry SLMCostTbl[] = {
2980 { ISD::EXTRACT_VECTOR_ELT, MVT::i8, 4 },
2981 { ISD::EXTRACT_VECTOR_ELT, MVT::i16, 4 },
2982 { ISD::EXTRACT_VECTOR_ELT, MVT::i32, 4 },
2983 { ISD::EXTRACT_VECTOR_ELT, MVT::i64, 7 }
2984 };
2985
2986 assert(Val->isVectorTy() && "This must be a vector type");
2987 Type *ScalarType = Val->getScalarType();
2988 int RegisterFileMoveCost = 0;
2989
2990 if (Index != -1U && (Opcode == Instruction::ExtractElement ||
2991 Opcode == Instruction::InsertElement)) {
2992 // Legalize the type.
2993 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
2994
2995 // This type is legalized to a scalar type.
2996 if (!LT.second.isVector())
2997 return 0;
2998
2999 // The type may be split. Normalize the index to the new type.
3000 unsigned NumElts = LT.second.getVectorNumElements();
3001 unsigned SubNumElts = NumElts;
3002 Index = Index % NumElts;
3003
3004 // For >128-bit vectors, we need to extract higher 128-bit subvectors.
3005 // For inserts, we also need to insert the subvector back.
3006 if (LT.second.getSizeInBits() > 128) {
3007 assert((LT.second.getSizeInBits() % 128) == 0 && "Illegal vector");
3008 unsigned NumSubVecs = LT.second.getSizeInBits() / 128;
3009 SubNumElts = NumElts / NumSubVecs;
3010 if (SubNumElts <= Index) {
3011 RegisterFileMoveCost += (Opcode == Instruction::InsertElement ? 2 : 1);
3012 Index %= SubNumElts;
3013 }
3014 }
3015
3016 if (Index == 0) {
3017 // Floating point scalars are already located in index #0.
3018 // Many insertions to #0 can fold away for scalar fp-ops, so let's assume
3019 // true for all.
3020 if (ScalarType->isFloatingPointTy())
3021 return RegisterFileMoveCost;
3022
3023 // Assume movd/movq XMM -> GPR is relatively cheap on all targets.
3024 if (ScalarType->isIntegerTy() && Opcode == Instruction::ExtractElement)
3025 return 1 + RegisterFileMoveCost;
3026 }
3027
3028 int ISD = TLI->InstructionOpcodeToISD(Opcode);
3029 assert(ISD && "Unexpected vector opcode");
3030 MVT MScalarTy = LT.second.getScalarType();
3031 if (ST->isSLM())
3032 if (auto *Entry = CostTableLookup(SLMCostTbl, ISD, MScalarTy))
3033 return Entry->Cost + RegisterFileMoveCost;
3034
3035 // Assume pinsr/pextr XMM <-> GPR is relatively cheap on all targets.
3036 if ((MScalarTy == MVT::i16 && ST->hasSSE2()) ||
3037 (MScalarTy.isInteger() && ST->hasSSE41()))
3038 return 1 + RegisterFileMoveCost;
3039
3040 // Assume insertps is relatively cheap on all targets.
3041 if (MScalarTy == MVT::f32 && ST->hasSSE41() &&
3042 Opcode == Instruction::InsertElement)
3043 return 1 + RegisterFileMoveCost;
3044
3045 // For extractions we just need to shuffle the element to index 0, which
3046 // should be very cheap (assume cost = 1). For insertions we need to shuffle
3047 // the elements to its destination. In both cases we must handle the
3048 // subvector move(s).
3049 // If the vector type is already less than 128-bits then don't reduce it.
3050 // TODO: Under what circumstances should we shuffle using the full width?
3051 int ShuffleCost = 1;
3052 if (Opcode == Instruction::InsertElement) {
3053 auto *SubTy = cast<VectorType>(Val);
3054 EVT VT = TLI->getValueType(DL, Val);
3055 if (VT.getScalarType() != MScalarTy || VT.getSizeInBits() >= 128)
3056 SubTy = FixedVectorType::get(ScalarType, SubNumElts);
3057 ShuffleCost = getShuffleCost(TTI::SK_PermuteTwoSrc, SubTy, 0, SubTy);
3058 }
3059 int IntOrFpCost = ScalarType->isFloatingPointTy() ? 0 : 1;
3060 return ShuffleCost + IntOrFpCost + RegisterFileMoveCost;
3061 }
3062
3063 // Add to the base cost if we know that the extracted element of a vector is
3064 // destined to be moved to and used in the integer register file.
3065 if (Opcode == Instruction::ExtractElement && ScalarType->isPointerTy())
3066 RegisterFileMoveCost += 1;
3067
3068 return BaseT::getVectorInstrCost(Opcode, Val, Index) + RegisterFileMoveCost;
3069 }
3070
getScalarizationOverhead(VectorType * Ty,const APInt & DemandedElts,bool Insert,bool Extract)3071 unsigned X86TTIImpl::getScalarizationOverhead(VectorType *Ty,
3072 const APInt &DemandedElts,
3073 bool Insert, bool Extract) {
3074 unsigned Cost = 0;
3075
3076 // For insertions, a ISD::BUILD_VECTOR style vector initialization can be much
3077 // cheaper than an accumulation of ISD::INSERT_VECTOR_ELT.
3078 if (Insert) {
3079 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
3080 MVT MScalarTy = LT.second.getScalarType();
3081
3082 if ((MScalarTy == MVT::i16 && ST->hasSSE2()) ||
3083 (MScalarTy.isInteger() && ST->hasSSE41()) ||
3084 (MScalarTy == MVT::f32 && ST->hasSSE41())) {
3085 // For types we can insert directly, insertion into 128-bit sub vectors is
3086 // cheap, followed by a cheap chain of concatenations.
3087 if (LT.second.getSizeInBits() <= 128) {
3088 Cost +=
3089 BaseT::getScalarizationOverhead(Ty, DemandedElts, Insert, false);
3090 } else {
3091 // In each 128-lane, if at least one index is demanded but not all
3092 // indices are demanded and this 128-lane is not the first 128-lane of
3093 // the legalized-vector, then this 128-lane needs a extracti128; If in
3094 // each 128-lane, there is at least one demanded index, this 128-lane
3095 // needs a inserti128.
3096
3097 // The following cases will help you build a better understanding:
3098 // Assume we insert several elements into a v8i32 vector in avx2,
3099 // Case#1: inserting into 1th index needs vpinsrd + inserti128.
3100 // Case#2: inserting into 5th index needs extracti128 + vpinsrd +
3101 // inserti128.
3102 // Case#3: inserting into 4,5,6,7 index needs 4*vpinsrd + inserti128.
3103 unsigned Num128Lanes = LT.second.getSizeInBits() / 128 * LT.first;
3104 unsigned NumElts = LT.second.getVectorNumElements() * LT.first;
3105 APInt WidenedDemandedElts = DemandedElts.zextOrSelf(NumElts);
3106 unsigned Scale = NumElts / Num128Lanes;
3107 // We iterate each 128-lane, and check if we need a
3108 // extracti128/inserti128 for this 128-lane.
3109 for (unsigned I = 0; I < NumElts; I += Scale) {
3110 APInt Mask = WidenedDemandedElts.getBitsSet(NumElts, I, I + Scale);
3111 APInt MaskedDE = Mask & WidenedDemandedElts;
3112 unsigned Population = MaskedDE.countPopulation();
3113 Cost += (Population > 0 && Population != Scale &&
3114 I % LT.second.getVectorNumElements() != 0);
3115 Cost += Population > 0;
3116 }
3117 Cost += DemandedElts.countPopulation();
3118
3119 // For vXf32 cases, insertion into the 0'th index in each v4f32
3120 // 128-bit vector is free.
3121 // NOTE: This assumes legalization widens vXf32 vectors.
3122 if (MScalarTy == MVT::f32)
3123 for (unsigned i = 0, e = cast<FixedVectorType>(Ty)->getNumElements();
3124 i < e; i += 4)
3125 if (DemandedElts[i])
3126 Cost--;
3127 }
3128 } else if (LT.second.isVector()) {
3129 // Without fast insertion, we need to use MOVD/MOVQ to pass each demanded
3130 // integer element as a SCALAR_TO_VECTOR, then we build the vector as a
3131 // series of UNPCK followed by CONCAT_VECTORS - all of these can be
3132 // considered cheap.
3133 if (Ty->isIntOrIntVectorTy())
3134 Cost += DemandedElts.countPopulation();
3135
3136 // Get the smaller of the legalized or original pow2-extended number of
3137 // vector elements, which represents the number of unpacks we'll end up
3138 // performing.
3139 unsigned NumElts = LT.second.getVectorNumElements();
3140 unsigned Pow2Elts =
3141 PowerOf2Ceil(cast<FixedVectorType>(Ty)->getNumElements());
3142 Cost += (std::min<unsigned>(NumElts, Pow2Elts) - 1) * LT.first;
3143 }
3144 }
3145
3146 // TODO: Use default extraction for now, but we should investigate extending this
3147 // to handle repeated subvector extraction.
3148 if (Extract)
3149 Cost += BaseT::getScalarizationOverhead(Ty, DemandedElts, false, Extract);
3150
3151 return Cost;
3152 }
3153
getMemoryOpCost(unsigned Opcode,Type * Src,MaybeAlign Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind,const Instruction * I)3154 int X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
3155 MaybeAlign Alignment, unsigned AddressSpace,
3156 TTI::TargetCostKind CostKind,
3157 const Instruction *I) {
3158 // TODO: Handle other cost kinds.
3159 if (CostKind != TTI::TCK_RecipThroughput) {
3160 if (isa_and_nonnull<StoreInst>(I)) {
3161 Value *Ptr = I->getOperand(1);
3162 // Store instruction with index and scale costs 2 Uops.
3163 // Check the preceding GEP to identify non-const indices.
3164 if (auto *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
3165 if (!all_of(GEP->indices(), [](Value *V) { return isa<Constant>(V); }))
3166 return TTI::TCC_Basic * 2;
3167 }
3168 }
3169 return TTI::TCC_Basic;
3170 }
3171
3172 // Handle non-power-of-two vectors such as <3 x float>
3173 if (auto *VTy = dyn_cast<FixedVectorType>(Src)) {
3174 unsigned NumElem = VTy->getNumElements();
3175
3176 // Handle a few common cases:
3177 // <3 x float>
3178 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32)
3179 // Cost = 64 bit store + extract + 32 bit store.
3180 return 3;
3181
3182 // <3 x double>
3183 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64)
3184 // Cost = 128 bit store + unpack + 64 bit store.
3185 return 3;
3186
3187 // Assume that all other non-power-of-two numbers are scalarized.
3188 if (!isPowerOf2_32(NumElem)) {
3189 APInt DemandedElts = APInt::getAllOnesValue(NumElem);
3190 int Cost = BaseT::getMemoryOpCost(Opcode, VTy->getScalarType(), Alignment,
3191 AddressSpace, CostKind);
3192 int SplitCost = getScalarizationOverhead(VTy, DemandedElts,
3193 Opcode == Instruction::Load,
3194 Opcode == Instruction::Store);
3195 return NumElem * Cost + SplitCost;
3196 }
3197 }
3198
3199 // Type legalization can't handle structs
3200 if (TLI->getValueType(DL, Src, true) == MVT::Other)
3201 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
3202 CostKind);
3203
3204 // Legalize the type.
3205 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
3206 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
3207 "Invalid Opcode");
3208
3209 // Each load/store unit costs 1.
3210 int Cost = LT.first * 1;
3211
3212 // This isn't exactly right. We're using slow unaligned 32-byte accesses as a
3213 // proxy for a double-pumped AVX memory interface such as on Sandybridge.
3214 if (LT.second.getStoreSize() == 32 && ST->isUnalignedMem32Slow())
3215 Cost *= 2;
3216
3217 return Cost;
3218 }
3219
getMaskedMemoryOpCost(unsigned Opcode,Type * SrcTy,Align Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind)3220 int X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy,
3221 Align Alignment, unsigned AddressSpace,
3222 TTI::TargetCostKind CostKind) {
3223 bool IsLoad = (Instruction::Load == Opcode);
3224 bool IsStore = (Instruction::Store == Opcode);
3225
3226 auto *SrcVTy = dyn_cast<FixedVectorType>(SrcTy);
3227 if (!SrcVTy)
3228 // To calculate scalar take the regular cost, without mask
3229 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace, CostKind);
3230
3231 unsigned NumElem = SrcVTy->getNumElements();
3232 auto *MaskTy =
3233 FixedVectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem);
3234 if ((IsLoad && !isLegalMaskedLoad(SrcVTy, Alignment)) ||
3235 (IsStore && !isLegalMaskedStore(SrcVTy, Alignment)) ||
3236 !isPowerOf2_32(NumElem)) {
3237 // Scalarization
3238 APInt DemandedElts = APInt::getAllOnesValue(NumElem);
3239 int MaskSplitCost =
3240 getScalarizationOverhead(MaskTy, DemandedElts, false, true);
3241 int ScalarCompareCost = getCmpSelInstrCost(
3242 Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr,
3243 CmpInst::BAD_ICMP_PREDICATE, CostKind);
3244 int BranchCost = getCFInstrCost(Instruction::Br, CostKind);
3245 int MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost);
3246 int ValueSplitCost =
3247 getScalarizationOverhead(SrcVTy, DemandedElts, IsLoad, IsStore);
3248 int MemopCost =
3249 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
3250 Alignment, AddressSpace, CostKind);
3251 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost;
3252 }
3253
3254 // Legalize the type.
3255 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy);
3256 auto VT = TLI->getValueType(DL, SrcVTy);
3257 int Cost = 0;
3258 if (VT.isSimple() && LT.second != VT.getSimpleVT() &&
3259 LT.second.getVectorNumElements() == NumElem)
3260 // Promotion requires expand/truncate for data and a shuffle for mask.
3261 Cost += getShuffleCost(TTI::SK_PermuteTwoSrc, SrcVTy, 0, nullptr) +
3262 getShuffleCost(TTI::SK_PermuteTwoSrc, MaskTy, 0, nullptr);
3263
3264 else if (LT.second.getVectorNumElements() > NumElem) {
3265 auto *NewMaskTy = FixedVectorType::get(MaskTy->getElementType(),
3266 LT.second.getVectorNumElements());
3267 // Expanding requires fill mask with zeroes
3268 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, 0, MaskTy);
3269 }
3270
3271 // Pre-AVX512 - each maskmov load costs 2 + store costs ~8.
3272 if (!ST->hasAVX512())
3273 return Cost + LT.first * (IsLoad ? 2 : 8);
3274
3275 // AVX-512 masked load/store is cheapper
3276 return Cost + LT.first;
3277 }
3278
getAddressComputationCost(Type * Ty,ScalarEvolution * SE,const SCEV * Ptr)3279 int X86TTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
3280 const SCEV *Ptr) {
3281 // Address computations in vectorized code with non-consecutive addresses will
3282 // likely result in more instructions compared to scalar code where the
3283 // computation can more often be merged into the index mode. The resulting
3284 // extra micro-ops can significantly decrease throughput.
3285 const unsigned NumVectorInstToHideOverhead = 10;
3286
3287 // Cost modeling of Strided Access Computation is hidden by the indexing
3288 // modes of X86 regardless of the stride value. We dont believe that there
3289 // is a difference between constant strided access in gerenal and constant
3290 // strided value which is less than or equal to 64.
3291 // Even in the case of (loop invariant) stride whose value is not known at
3292 // compile time, the address computation will not incur more than one extra
3293 // ADD instruction.
3294 if (Ty->isVectorTy() && SE) {
3295 if (!BaseT::isStridedAccess(Ptr))
3296 return NumVectorInstToHideOverhead;
3297 if (!BaseT::getConstantStrideStep(SE, Ptr))
3298 return 1;
3299 }
3300
3301 return BaseT::getAddressComputationCost(Ty, SE, Ptr);
3302 }
3303
getArithmeticReductionCost(unsigned Opcode,VectorType * ValTy,bool IsPairwise,TTI::TargetCostKind CostKind)3304 int X86TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
3305 bool IsPairwise,
3306 TTI::TargetCostKind CostKind) {
3307 // Just use the default implementation for pair reductions.
3308 if (IsPairwise)
3309 return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwise, CostKind);
3310
3311 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
3312 // and make it as the cost.
3313
3314 static const CostTblEntry SLMCostTblNoPairWise[] = {
3315 { ISD::FADD, MVT::v2f64, 3 },
3316 { ISD::ADD, MVT::v2i64, 5 },
3317 };
3318
3319 static const CostTblEntry SSE2CostTblNoPairWise[] = {
3320 { ISD::FADD, MVT::v2f64, 2 },
3321 { ISD::FADD, MVT::v4f32, 4 },
3322 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
3323 { ISD::ADD, MVT::v2i32, 2 }, // FIXME: chosen to be less than v4i32
3324 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3".
3325 { ISD::ADD, MVT::v2i16, 2 }, // The data reported by the IACA tool is "4.3".
3326 { ISD::ADD, MVT::v4i16, 3 }, // The data reported by the IACA tool is "4.3".
3327 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3".
3328 { ISD::ADD, MVT::v2i8, 2 },
3329 { ISD::ADD, MVT::v4i8, 2 },
3330 { ISD::ADD, MVT::v8i8, 2 },
3331 { ISD::ADD, MVT::v16i8, 3 },
3332 };
3333
3334 static const CostTblEntry AVX1CostTblNoPairWise[] = {
3335 { ISD::FADD, MVT::v4f64, 3 },
3336 { ISD::FADD, MVT::v4f32, 3 },
3337 { ISD::FADD, MVT::v8f32, 4 },
3338 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
3339 { ISD::ADD, MVT::v4i64, 3 },
3340 { ISD::ADD, MVT::v8i32, 5 },
3341 { ISD::ADD, MVT::v16i16, 5 },
3342 { ISD::ADD, MVT::v32i8, 4 },
3343 };
3344
3345 int ISD = TLI->InstructionOpcodeToISD(Opcode);
3346 assert(ISD && "Invalid opcode");
3347
3348 // Before legalizing the type, give a chance to look up illegal narrow types
3349 // in the table.
3350 // FIXME: Is there a better way to do this?
3351 EVT VT = TLI->getValueType(DL, ValTy);
3352 if (VT.isSimple()) {
3353 MVT MTy = VT.getSimpleVT();
3354 if (ST->isSLM())
3355 if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy))
3356 return Entry->Cost;
3357
3358 if (ST->hasAVX())
3359 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
3360 return Entry->Cost;
3361
3362 if (ST->hasSSE2())
3363 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
3364 return Entry->Cost;
3365 }
3366
3367 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
3368
3369 MVT MTy = LT.second;
3370
3371 auto *ValVTy = cast<FixedVectorType>(ValTy);
3372
3373 unsigned ArithmeticCost = 0;
3374 if (LT.first != 1 && MTy.isVector() &&
3375 MTy.getVectorNumElements() < ValVTy->getNumElements()) {
3376 // Type needs to be split. We need LT.first - 1 arithmetic ops.
3377 auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(),
3378 MTy.getVectorNumElements());
3379 ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind);
3380 ArithmeticCost *= LT.first - 1;
3381 }
3382
3383 if (ST->isSLM())
3384 if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy))
3385 return ArithmeticCost + Entry->Cost;
3386
3387 if (ST->hasAVX())
3388 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
3389 return ArithmeticCost + Entry->Cost;
3390
3391 if (ST->hasSSE2())
3392 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
3393 return ArithmeticCost + Entry->Cost;
3394
3395 // FIXME: These assume a naive kshift+binop lowering, which is probably
3396 // conservative in most cases.
3397 static const CostTblEntry AVX512BoolReduction[] = {
3398 { ISD::AND, MVT::v2i1, 3 },
3399 { ISD::AND, MVT::v4i1, 5 },
3400 { ISD::AND, MVT::v8i1, 7 },
3401 { ISD::AND, MVT::v16i1, 9 },
3402 { ISD::AND, MVT::v32i1, 11 },
3403 { ISD::AND, MVT::v64i1, 13 },
3404 { ISD::OR, MVT::v2i1, 3 },
3405 { ISD::OR, MVT::v4i1, 5 },
3406 { ISD::OR, MVT::v8i1, 7 },
3407 { ISD::OR, MVT::v16i1, 9 },
3408 { ISD::OR, MVT::v32i1, 11 },
3409 { ISD::OR, MVT::v64i1, 13 },
3410 };
3411
3412 static const CostTblEntry AVX2BoolReduction[] = {
3413 { ISD::AND, MVT::v16i16, 2 }, // vpmovmskb + cmp
3414 { ISD::AND, MVT::v32i8, 2 }, // vpmovmskb + cmp
3415 { ISD::OR, MVT::v16i16, 2 }, // vpmovmskb + cmp
3416 { ISD::OR, MVT::v32i8, 2 }, // vpmovmskb + cmp
3417 };
3418
3419 static const CostTblEntry AVX1BoolReduction[] = {
3420 { ISD::AND, MVT::v4i64, 2 }, // vmovmskpd + cmp
3421 { ISD::AND, MVT::v8i32, 2 }, // vmovmskps + cmp
3422 { ISD::AND, MVT::v16i16, 4 }, // vextractf128 + vpand + vpmovmskb + cmp
3423 { ISD::AND, MVT::v32i8, 4 }, // vextractf128 + vpand + vpmovmskb + cmp
3424 { ISD::OR, MVT::v4i64, 2 }, // vmovmskpd + cmp
3425 { ISD::OR, MVT::v8i32, 2 }, // vmovmskps + cmp
3426 { ISD::OR, MVT::v16i16, 4 }, // vextractf128 + vpor + vpmovmskb + cmp
3427 { ISD::OR, MVT::v32i8, 4 }, // vextractf128 + vpor + vpmovmskb + cmp
3428 };
3429
3430 static const CostTblEntry SSE2BoolReduction[] = {
3431 { ISD::AND, MVT::v2i64, 2 }, // movmskpd + cmp
3432 { ISD::AND, MVT::v4i32, 2 }, // movmskps + cmp
3433 { ISD::AND, MVT::v8i16, 2 }, // pmovmskb + cmp
3434 { ISD::AND, MVT::v16i8, 2 }, // pmovmskb + cmp
3435 { ISD::OR, MVT::v2i64, 2 }, // movmskpd + cmp
3436 { ISD::OR, MVT::v4i32, 2 }, // movmskps + cmp
3437 { ISD::OR, MVT::v8i16, 2 }, // pmovmskb + cmp
3438 { ISD::OR, MVT::v16i8, 2 }, // pmovmskb + cmp
3439 };
3440
3441 // Handle bool allof/anyof patterns.
3442 if (ValVTy->getElementType()->isIntegerTy(1)) {
3443 unsigned ArithmeticCost = 0;
3444 if (LT.first != 1 && MTy.isVector() &&
3445 MTy.getVectorNumElements() < ValVTy->getNumElements()) {
3446 // Type needs to be split. We need LT.first - 1 arithmetic ops.
3447 auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(),
3448 MTy.getVectorNumElements());
3449 ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind);
3450 ArithmeticCost *= LT.first - 1;
3451 }
3452
3453 if (ST->hasAVX512())
3454 if (const auto *Entry = CostTableLookup(AVX512BoolReduction, ISD, MTy))
3455 return ArithmeticCost + Entry->Cost;
3456 if (ST->hasAVX2())
3457 if (const auto *Entry = CostTableLookup(AVX2BoolReduction, ISD, MTy))
3458 return ArithmeticCost + Entry->Cost;
3459 if (ST->hasAVX())
3460 if (const auto *Entry = CostTableLookup(AVX1BoolReduction, ISD, MTy))
3461 return ArithmeticCost + Entry->Cost;
3462 if (ST->hasSSE2())
3463 if (const auto *Entry = CostTableLookup(SSE2BoolReduction, ISD, MTy))
3464 return ArithmeticCost + Entry->Cost;
3465
3466 return BaseT::getArithmeticReductionCost(Opcode, ValVTy, IsPairwise,
3467 CostKind);
3468 }
3469
3470 unsigned NumVecElts = ValVTy->getNumElements();
3471 unsigned ScalarSize = ValVTy->getScalarSizeInBits();
3472
3473 // Special case power of 2 reductions where the scalar type isn't changed
3474 // by type legalization.
3475 if (!isPowerOf2_32(NumVecElts) || ScalarSize != MTy.getScalarSizeInBits())
3476 return BaseT::getArithmeticReductionCost(Opcode, ValVTy, IsPairwise,
3477 CostKind);
3478
3479 unsigned ReductionCost = 0;
3480
3481 auto *Ty = ValVTy;
3482 if (LT.first != 1 && MTy.isVector() &&
3483 MTy.getVectorNumElements() < ValVTy->getNumElements()) {
3484 // Type needs to be split. We need LT.first - 1 arithmetic ops.
3485 Ty = FixedVectorType::get(ValVTy->getElementType(),
3486 MTy.getVectorNumElements());
3487 ReductionCost = getArithmeticInstrCost(Opcode, Ty, CostKind);
3488 ReductionCost *= LT.first - 1;
3489 NumVecElts = MTy.getVectorNumElements();
3490 }
3491
3492 // Now handle reduction with the legal type, taking into account size changes
3493 // at each level.
3494 while (NumVecElts > 1) {
3495 // Determine the size of the remaining vector we need to reduce.
3496 unsigned Size = NumVecElts * ScalarSize;
3497 NumVecElts /= 2;
3498 // If we're reducing from 256/512 bits, use an extract_subvector.
3499 if (Size > 128) {
3500 auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts);
3501 ReductionCost +=
3502 getShuffleCost(TTI::SK_ExtractSubvector, Ty, NumVecElts, SubTy);
3503 Ty = SubTy;
3504 } else if (Size == 128) {
3505 // Reducing from 128 bits is a permute of v2f64/v2i64.
3506 FixedVectorType *ShufTy;
3507 if (ValVTy->isFloatingPointTy())
3508 ShufTy =
3509 FixedVectorType::get(Type::getDoubleTy(ValVTy->getContext()), 2);
3510 else
3511 ShufTy =
3512 FixedVectorType::get(Type::getInt64Ty(ValVTy->getContext()), 2);
3513 ReductionCost +=
3514 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, 0, nullptr);
3515 } else if (Size == 64) {
3516 // Reducing from 64 bits is a shuffle of v4f32/v4i32.
3517 FixedVectorType *ShufTy;
3518 if (ValVTy->isFloatingPointTy())
3519 ShufTy =
3520 FixedVectorType::get(Type::getFloatTy(ValVTy->getContext()), 4);
3521 else
3522 ShufTy =
3523 FixedVectorType::get(Type::getInt32Ty(ValVTy->getContext()), 4);
3524 ReductionCost +=
3525 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, 0, nullptr);
3526 } else {
3527 // Reducing from smaller size is a shift by immediate.
3528 auto *ShiftTy = FixedVectorType::get(
3529 Type::getIntNTy(ValVTy->getContext(), Size), 128 / Size);
3530 ReductionCost += getArithmeticInstrCost(
3531 Instruction::LShr, ShiftTy, CostKind,
3532 TargetTransformInfo::OK_AnyValue,
3533 TargetTransformInfo::OK_UniformConstantValue,
3534 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
3535 }
3536
3537 // Add the arithmetic op for this level.
3538 ReductionCost += getArithmeticInstrCost(Opcode, Ty, CostKind);
3539 }
3540
3541 // Add the final extract element to the cost.
3542 return ReductionCost + getVectorInstrCost(Instruction::ExtractElement, Ty, 0);
3543 }
3544
getMinMaxCost(Type * Ty,Type * CondTy,bool IsUnsigned)3545 int X86TTIImpl::getMinMaxCost(Type *Ty, Type *CondTy, bool IsUnsigned) {
3546 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
3547
3548 MVT MTy = LT.second;
3549
3550 int ISD;
3551 if (Ty->isIntOrIntVectorTy()) {
3552 ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN;
3553 } else {
3554 assert(Ty->isFPOrFPVectorTy() &&
3555 "Expected float point or integer vector type.");
3556 ISD = ISD::FMINNUM;
3557 }
3558
3559 static const CostTblEntry SSE1CostTbl[] = {
3560 {ISD::FMINNUM, MVT::v4f32, 1},
3561 };
3562
3563 static const CostTblEntry SSE2CostTbl[] = {
3564 {ISD::FMINNUM, MVT::v2f64, 1},
3565 {ISD::SMIN, MVT::v8i16, 1},
3566 {ISD::UMIN, MVT::v16i8, 1},
3567 };
3568
3569 static const CostTblEntry SSE41CostTbl[] = {
3570 {ISD::SMIN, MVT::v4i32, 1},
3571 {ISD::UMIN, MVT::v4i32, 1},
3572 {ISD::UMIN, MVT::v8i16, 1},
3573 {ISD::SMIN, MVT::v16i8, 1},
3574 };
3575
3576 static const CostTblEntry SSE42CostTbl[] = {
3577 {ISD::UMIN, MVT::v2i64, 3}, // xor+pcmpgtq+blendvpd
3578 };
3579
3580 static const CostTblEntry AVX1CostTbl[] = {
3581 {ISD::FMINNUM, MVT::v8f32, 1},
3582 {ISD::FMINNUM, MVT::v4f64, 1},
3583 {ISD::SMIN, MVT::v8i32, 3},
3584 {ISD::UMIN, MVT::v8i32, 3},
3585 {ISD::SMIN, MVT::v16i16, 3},
3586 {ISD::UMIN, MVT::v16i16, 3},
3587 {ISD::SMIN, MVT::v32i8, 3},
3588 {ISD::UMIN, MVT::v32i8, 3},
3589 };
3590
3591 static const CostTblEntry AVX2CostTbl[] = {
3592 {ISD::SMIN, MVT::v8i32, 1},
3593 {ISD::UMIN, MVT::v8i32, 1},
3594 {ISD::SMIN, MVT::v16i16, 1},
3595 {ISD::UMIN, MVT::v16i16, 1},
3596 {ISD::SMIN, MVT::v32i8, 1},
3597 {ISD::UMIN, MVT::v32i8, 1},
3598 };
3599
3600 static const CostTblEntry AVX512CostTbl[] = {
3601 {ISD::FMINNUM, MVT::v16f32, 1},
3602 {ISD::FMINNUM, MVT::v8f64, 1},
3603 {ISD::SMIN, MVT::v2i64, 1},
3604 {ISD::UMIN, MVT::v2i64, 1},
3605 {ISD::SMIN, MVT::v4i64, 1},
3606 {ISD::UMIN, MVT::v4i64, 1},
3607 {ISD::SMIN, MVT::v8i64, 1},
3608 {ISD::UMIN, MVT::v8i64, 1},
3609 {ISD::SMIN, MVT::v16i32, 1},
3610 {ISD::UMIN, MVT::v16i32, 1},
3611 };
3612
3613 static const CostTblEntry AVX512BWCostTbl[] = {
3614 {ISD::SMIN, MVT::v32i16, 1},
3615 {ISD::UMIN, MVT::v32i16, 1},
3616 {ISD::SMIN, MVT::v64i8, 1},
3617 {ISD::UMIN, MVT::v64i8, 1},
3618 };
3619
3620 // If we have a native MIN/MAX instruction for this type, use it.
3621 if (ST->hasBWI())
3622 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
3623 return LT.first * Entry->Cost;
3624
3625 if (ST->hasAVX512())
3626 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
3627 return LT.first * Entry->Cost;
3628
3629 if (ST->hasAVX2())
3630 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
3631 return LT.first * Entry->Cost;
3632
3633 if (ST->hasAVX())
3634 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
3635 return LT.first * Entry->Cost;
3636
3637 if (ST->hasSSE42())
3638 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
3639 return LT.first * Entry->Cost;
3640
3641 if (ST->hasSSE41())
3642 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
3643 return LT.first * Entry->Cost;
3644
3645 if (ST->hasSSE2())
3646 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
3647 return LT.first * Entry->Cost;
3648
3649 if (ST->hasSSE1())
3650 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
3651 return LT.first * Entry->Cost;
3652
3653 unsigned CmpOpcode;
3654 if (Ty->isFPOrFPVectorTy()) {
3655 CmpOpcode = Instruction::FCmp;
3656 } else {
3657 assert(Ty->isIntOrIntVectorTy() &&
3658 "expecting floating point or integer type for min/max reduction");
3659 CmpOpcode = Instruction::ICmp;
3660 }
3661
3662 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
3663 // Otherwise fall back to cmp+select.
3664 return getCmpSelInstrCost(CmpOpcode, Ty, CondTy, CmpInst::BAD_ICMP_PREDICATE,
3665 CostKind) +
3666 getCmpSelInstrCost(Instruction::Select, Ty, CondTy,
3667 CmpInst::BAD_ICMP_PREDICATE, CostKind);
3668 }
3669
getMinMaxReductionCost(VectorType * ValTy,VectorType * CondTy,bool IsPairwise,bool IsUnsigned,TTI::TargetCostKind CostKind)3670 int X86TTIImpl::getMinMaxReductionCost(VectorType *ValTy, VectorType *CondTy,
3671 bool IsPairwise, bool IsUnsigned,
3672 TTI::TargetCostKind CostKind) {
3673 // Just use the default implementation for pair reductions.
3674 if (IsPairwise)
3675 return BaseT::getMinMaxReductionCost(ValTy, CondTy, IsPairwise, IsUnsigned,
3676 CostKind);
3677
3678 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
3679
3680 MVT MTy = LT.second;
3681
3682 int ISD;
3683 if (ValTy->isIntOrIntVectorTy()) {
3684 ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN;
3685 } else {
3686 assert(ValTy->isFPOrFPVectorTy() &&
3687 "Expected float point or integer vector type.");
3688 ISD = ISD::FMINNUM;
3689 }
3690
3691 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
3692 // and make it as the cost.
3693
3694 static const CostTblEntry SSE2CostTblNoPairWise[] = {
3695 {ISD::UMIN, MVT::v2i16, 5}, // need pxors to use pminsw/pmaxsw
3696 {ISD::UMIN, MVT::v4i16, 7}, // need pxors to use pminsw/pmaxsw
3697 {ISD::UMIN, MVT::v8i16, 9}, // need pxors to use pminsw/pmaxsw
3698 };
3699
3700 static const CostTblEntry SSE41CostTblNoPairWise[] = {
3701 {ISD::SMIN, MVT::v2i16, 3}, // same as sse2
3702 {ISD::SMIN, MVT::v4i16, 5}, // same as sse2
3703 {ISD::UMIN, MVT::v2i16, 5}, // same as sse2
3704 {ISD::UMIN, MVT::v4i16, 7}, // same as sse2
3705 {ISD::SMIN, MVT::v8i16, 4}, // phminposuw+xor
3706 {ISD::UMIN, MVT::v8i16, 4}, // FIXME: umin is cheaper than umax
3707 {ISD::SMIN, MVT::v2i8, 3}, // pminsb
3708 {ISD::SMIN, MVT::v4i8, 5}, // pminsb
3709 {ISD::SMIN, MVT::v8i8, 7}, // pminsb
3710 {ISD::SMIN, MVT::v16i8, 6},
3711 {ISD::UMIN, MVT::v2i8, 3}, // same as sse2
3712 {ISD::UMIN, MVT::v4i8, 5}, // same as sse2
3713 {ISD::UMIN, MVT::v8i8, 7}, // same as sse2
3714 {ISD::UMIN, MVT::v16i8, 6}, // FIXME: umin is cheaper than umax
3715 };
3716
3717 static const CostTblEntry AVX1CostTblNoPairWise[] = {
3718 {ISD::SMIN, MVT::v16i16, 6},
3719 {ISD::UMIN, MVT::v16i16, 6}, // FIXME: umin is cheaper than umax
3720 {ISD::SMIN, MVT::v32i8, 8},
3721 {ISD::UMIN, MVT::v32i8, 8},
3722 };
3723
3724 static const CostTblEntry AVX512BWCostTblNoPairWise[] = {
3725 {ISD::SMIN, MVT::v32i16, 8},
3726 {ISD::UMIN, MVT::v32i16, 8}, // FIXME: umin is cheaper than umax
3727 {ISD::SMIN, MVT::v64i8, 10},
3728 {ISD::UMIN, MVT::v64i8, 10},
3729 };
3730
3731 // Before legalizing the type, give a chance to look up illegal narrow types
3732 // in the table.
3733 // FIXME: Is there a better way to do this?
3734 EVT VT = TLI->getValueType(DL, ValTy);
3735 if (VT.isSimple()) {
3736 MVT MTy = VT.getSimpleVT();
3737 if (ST->hasBWI())
3738 if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy))
3739 return Entry->Cost;
3740
3741 if (ST->hasAVX())
3742 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
3743 return Entry->Cost;
3744
3745 if (ST->hasSSE41())
3746 if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy))
3747 return Entry->Cost;
3748
3749 if (ST->hasSSE2())
3750 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
3751 return Entry->Cost;
3752 }
3753
3754 auto *ValVTy = cast<FixedVectorType>(ValTy);
3755 unsigned NumVecElts = ValVTy->getNumElements();
3756
3757 auto *Ty = ValVTy;
3758 unsigned MinMaxCost = 0;
3759 if (LT.first != 1 && MTy.isVector() &&
3760 MTy.getVectorNumElements() < ValVTy->getNumElements()) {
3761 // Type needs to be split. We need LT.first - 1 operations ops.
3762 Ty = FixedVectorType::get(ValVTy->getElementType(),
3763 MTy.getVectorNumElements());
3764 auto *SubCondTy = FixedVectorType::get(CondTy->getElementType(),
3765 MTy.getVectorNumElements());
3766 MinMaxCost = getMinMaxCost(Ty, SubCondTy, IsUnsigned);
3767 MinMaxCost *= LT.first - 1;
3768 NumVecElts = MTy.getVectorNumElements();
3769 }
3770
3771 if (ST->hasBWI())
3772 if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy))
3773 return MinMaxCost + Entry->Cost;
3774
3775 if (ST->hasAVX())
3776 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
3777 return MinMaxCost + Entry->Cost;
3778
3779 if (ST->hasSSE41())
3780 if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy))
3781 return MinMaxCost + Entry->Cost;
3782
3783 if (ST->hasSSE2())
3784 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
3785 return MinMaxCost + Entry->Cost;
3786
3787 unsigned ScalarSize = ValTy->getScalarSizeInBits();
3788
3789 // Special case power of 2 reductions where the scalar type isn't changed
3790 // by type legalization.
3791 if (!isPowerOf2_32(ValVTy->getNumElements()) ||
3792 ScalarSize != MTy.getScalarSizeInBits())
3793 return BaseT::getMinMaxReductionCost(ValTy, CondTy, IsPairwise, IsUnsigned,
3794 CostKind);
3795
3796 // Now handle reduction with the legal type, taking into account size changes
3797 // at each level.
3798 while (NumVecElts > 1) {
3799 // Determine the size of the remaining vector we need to reduce.
3800 unsigned Size = NumVecElts * ScalarSize;
3801 NumVecElts /= 2;
3802 // If we're reducing from 256/512 bits, use an extract_subvector.
3803 if (Size > 128) {
3804 auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts);
3805 MinMaxCost +=
3806 getShuffleCost(TTI::SK_ExtractSubvector, Ty, NumVecElts, SubTy);
3807 Ty = SubTy;
3808 } else if (Size == 128) {
3809 // Reducing from 128 bits is a permute of v2f64/v2i64.
3810 VectorType *ShufTy;
3811 if (ValTy->isFloatingPointTy())
3812 ShufTy =
3813 FixedVectorType::get(Type::getDoubleTy(ValTy->getContext()), 2);
3814 else
3815 ShufTy = FixedVectorType::get(Type::getInt64Ty(ValTy->getContext()), 2);
3816 MinMaxCost +=
3817 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, 0, nullptr);
3818 } else if (Size == 64) {
3819 // Reducing from 64 bits is a shuffle of v4f32/v4i32.
3820 FixedVectorType *ShufTy;
3821 if (ValTy->isFloatingPointTy())
3822 ShufTy = FixedVectorType::get(Type::getFloatTy(ValTy->getContext()), 4);
3823 else
3824 ShufTy = FixedVectorType::get(Type::getInt32Ty(ValTy->getContext()), 4);
3825 MinMaxCost +=
3826 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, 0, nullptr);
3827 } else {
3828 // Reducing from smaller size is a shift by immediate.
3829 auto *ShiftTy = FixedVectorType::get(
3830 Type::getIntNTy(ValTy->getContext(), Size), 128 / Size);
3831 MinMaxCost += getArithmeticInstrCost(
3832 Instruction::LShr, ShiftTy, TTI::TCK_RecipThroughput,
3833 TargetTransformInfo::OK_AnyValue,
3834 TargetTransformInfo::OK_UniformConstantValue,
3835 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
3836 }
3837
3838 // Add the arithmetic op for this level.
3839 auto *SubCondTy =
3840 FixedVectorType::get(CondTy->getElementType(), Ty->getNumElements());
3841 MinMaxCost += getMinMaxCost(Ty, SubCondTy, IsUnsigned);
3842 }
3843
3844 // Add the final extract element to the cost.
3845 return MinMaxCost + getVectorInstrCost(Instruction::ExtractElement, Ty, 0);
3846 }
3847
3848 /// Calculate the cost of materializing a 64-bit value. This helper
3849 /// method might only calculate a fraction of a larger immediate. Therefore it
3850 /// is valid to return a cost of ZERO.
getIntImmCost(int64_t Val)3851 int X86TTIImpl::getIntImmCost(int64_t Val) {
3852 if (Val == 0)
3853 return TTI::TCC_Free;
3854
3855 if (isInt<32>(Val))
3856 return TTI::TCC_Basic;
3857
3858 return 2 * TTI::TCC_Basic;
3859 }
3860
getIntImmCost(const APInt & Imm,Type * Ty,TTI::TargetCostKind CostKind)3861 int X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
3862 TTI::TargetCostKind CostKind) {
3863 assert(Ty->isIntegerTy());
3864
3865 unsigned BitSize = Ty->getPrimitiveSizeInBits();
3866 if (BitSize == 0)
3867 return ~0U;
3868
3869 // Never hoist constants larger than 128bit, because this might lead to
3870 // incorrect code generation or assertions in codegen.
3871 // Fixme: Create a cost model for types larger than i128 once the codegen
3872 // issues have been fixed.
3873 if (BitSize > 128)
3874 return TTI::TCC_Free;
3875
3876 if (Imm == 0)
3877 return TTI::TCC_Free;
3878
3879 // Sign-extend all constants to a multiple of 64-bit.
3880 APInt ImmVal = Imm;
3881 if (BitSize % 64 != 0)
3882 ImmVal = Imm.sext(alignTo(BitSize, 64));
3883
3884 // Split the constant into 64-bit chunks and calculate the cost for each
3885 // chunk.
3886 int Cost = 0;
3887 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
3888 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
3889 int64_t Val = Tmp.getSExtValue();
3890 Cost += getIntImmCost(Val);
3891 }
3892 // We need at least one instruction to materialize the constant.
3893 return std::max(1, Cost);
3894 }
3895
getIntImmCostInst(unsigned Opcode,unsigned Idx,const APInt & Imm,Type * Ty,TTI::TargetCostKind CostKind,Instruction * Inst)3896 int X86TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
3897 const APInt &Imm, Type *Ty,
3898 TTI::TargetCostKind CostKind,
3899 Instruction *Inst) {
3900 assert(Ty->isIntegerTy());
3901
3902 unsigned BitSize = Ty->getPrimitiveSizeInBits();
3903 // There is no cost model for constants with a bit size of 0. Return TCC_Free
3904 // here, so that constant hoisting will ignore this constant.
3905 if (BitSize == 0)
3906 return TTI::TCC_Free;
3907
3908 unsigned ImmIdx = ~0U;
3909 switch (Opcode) {
3910 default:
3911 return TTI::TCC_Free;
3912 case Instruction::GetElementPtr:
3913 // Always hoist the base address of a GetElementPtr. This prevents the
3914 // creation of new constants for every base constant that gets constant
3915 // folded with the offset.
3916 if (Idx == 0)
3917 return 2 * TTI::TCC_Basic;
3918 return TTI::TCC_Free;
3919 case Instruction::Store:
3920 ImmIdx = 0;
3921 break;
3922 case Instruction::ICmp:
3923 // This is an imperfect hack to prevent constant hoisting of
3924 // compares that might be trying to check if a 64-bit value fits in
3925 // 32-bits. The backend can optimize these cases using a right shift by 32.
3926 // Ideally we would check the compare predicate here. There also other
3927 // similar immediates the backend can use shifts for.
3928 if (Idx == 1 && Imm.getBitWidth() == 64) {
3929 uint64_t ImmVal = Imm.getZExtValue();
3930 if (ImmVal == 0x100000000ULL || ImmVal == 0xffffffff)
3931 return TTI::TCC_Free;
3932 }
3933 ImmIdx = 1;
3934 break;
3935 case Instruction::And:
3936 // We support 64-bit ANDs with immediates with 32-bits of leading zeroes
3937 // by using a 32-bit operation with implicit zero extension. Detect such
3938 // immediates here as the normal path expects bit 31 to be sign extended.
3939 if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue()))
3940 return TTI::TCC_Free;
3941 ImmIdx = 1;
3942 break;
3943 case Instruction::Add:
3944 case Instruction::Sub:
3945 // For add/sub, we can use the opposite instruction for INT32_MIN.
3946 if (Idx == 1 && Imm.getBitWidth() == 64 && Imm.getZExtValue() == 0x80000000)
3947 return TTI::TCC_Free;
3948 ImmIdx = 1;
3949 break;
3950 case Instruction::UDiv:
3951 case Instruction::SDiv:
3952 case Instruction::URem:
3953 case Instruction::SRem:
3954 // Division by constant is typically expanded later into a different
3955 // instruction sequence. This completely changes the constants.
3956 // Report them as "free" to stop ConstantHoist from marking them as opaque.
3957 return TTI::TCC_Free;
3958 case Instruction::Mul:
3959 case Instruction::Or:
3960 case Instruction::Xor:
3961 ImmIdx = 1;
3962 break;
3963 // Always return TCC_Free for the shift value of a shift instruction.
3964 case Instruction::Shl:
3965 case Instruction::LShr:
3966 case Instruction::AShr:
3967 if (Idx == 1)
3968 return TTI::TCC_Free;
3969 break;
3970 case Instruction::Trunc:
3971 case Instruction::ZExt:
3972 case Instruction::SExt:
3973 case Instruction::IntToPtr:
3974 case Instruction::PtrToInt:
3975 case Instruction::BitCast:
3976 case Instruction::PHI:
3977 case Instruction::Call:
3978 case Instruction::Select:
3979 case Instruction::Ret:
3980 case Instruction::Load:
3981 break;
3982 }
3983
3984 if (Idx == ImmIdx) {
3985 int NumConstants = divideCeil(BitSize, 64);
3986 int Cost = X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
3987 return (Cost <= NumConstants * TTI::TCC_Basic)
3988 ? static_cast<int>(TTI::TCC_Free)
3989 : Cost;
3990 }
3991
3992 return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
3993 }
3994
getIntImmCostIntrin(Intrinsic::ID IID,unsigned Idx,const APInt & Imm,Type * Ty,TTI::TargetCostKind CostKind)3995 int X86TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
3996 const APInt &Imm, Type *Ty,
3997 TTI::TargetCostKind CostKind) {
3998 assert(Ty->isIntegerTy());
3999
4000 unsigned BitSize = Ty->getPrimitiveSizeInBits();
4001 // There is no cost model for constants with a bit size of 0. Return TCC_Free
4002 // here, so that constant hoisting will ignore this constant.
4003 if (BitSize == 0)
4004 return TTI::TCC_Free;
4005
4006 switch (IID) {
4007 default:
4008 return TTI::TCC_Free;
4009 case Intrinsic::sadd_with_overflow:
4010 case Intrinsic::uadd_with_overflow:
4011 case Intrinsic::ssub_with_overflow:
4012 case Intrinsic::usub_with_overflow:
4013 case Intrinsic::smul_with_overflow:
4014 case Intrinsic::umul_with_overflow:
4015 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
4016 return TTI::TCC_Free;
4017 break;
4018 case Intrinsic::experimental_stackmap:
4019 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
4020 return TTI::TCC_Free;
4021 break;
4022 case Intrinsic::experimental_patchpoint_void:
4023 case Intrinsic::experimental_patchpoint_i64:
4024 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
4025 return TTI::TCC_Free;
4026 break;
4027 }
4028 return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
4029 }
4030
4031 unsigned
getCFInstrCost(unsigned Opcode,TTI::TargetCostKind CostKind)4032 X86TTIImpl::getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind) {
4033 if (CostKind != TTI::TCK_RecipThroughput)
4034 return Opcode == Instruction::PHI ? 0 : 1;
4035 // Branches are assumed to be predicted.
4036 return CostKind == TTI::TCK_RecipThroughput ? 0 : 1;
4037 }
4038
getGatherOverhead() const4039 int X86TTIImpl::getGatherOverhead() const {
4040 // Some CPUs have more overhead for gather. The specified overhead is relative
4041 // to the Load operation. "2" is the number provided by Intel architects. This
4042 // parameter is used for cost estimation of Gather Op and comparison with
4043 // other alternatives.
4044 // TODO: Remove the explicit hasAVX512()?, That would mean we would only
4045 // enable gather with a -march.
4046 if (ST->hasAVX512() || (ST->hasAVX2() && ST->hasFastGather()))
4047 return 2;
4048
4049 return 1024;
4050 }
4051
getScatterOverhead() const4052 int X86TTIImpl::getScatterOverhead() const {
4053 if (ST->hasAVX512())
4054 return 2;
4055
4056 return 1024;
4057 }
4058
4059 // Return an average cost of Gather / Scatter instruction, maybe improved later
getGSVectorCost(unsigned Opcode,Type * SrcVTy,const Value * Ptr,Align Alignment,unsigned AddressSpace)4060 int X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy, const Value *Ptr,
4061 Align Alignment, unsigned AddressSpace) {
4062
4063 assert(isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost");
4064 unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements();
4065
4066 // Try to reduce index size from 64 bit (default for GEP)
4067 // to 32. It is essential for VF 16. If the index can't be reduced to 32, the
4068 // operation will use 16 x 64 indices which do not fit in a zmm and needs
4069 // to split. Also check that the base pointer is the same for all lanes,
4070 // and that there's at most one variable index.
4071 auto getIndexSizeInBits = [](const Value *Ptr, const DataLayout &DL) {
4072 unsigned IndexSize = DL.getPointerSizeInBits();
4073 const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
4074 if (IndexSize < 64 || !GEP)
4075 return IndexSize;
4076
4077 unsigned NumOfVarIndices = 0;
4078 const Value *Ptrs = GEP->getPointerOperand();
4079 if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs))
4080 return IndexSize;
4081 for (unsigned i = 1; i < GEP->getNumOperands(); ++i) {
4082 if (isa<Constant>(GEP->getOperand(i)))
4083 continue;
4084 Type *IndxTy = GEP->getOperand(i)->getType();
4085 if (auto *IndexVTy = dyn_cast<VectorType>(IndxTy))
4086 IndxTy = IndexVTy->getElementType();
4087 if ((IndxTy->getPrimitiveSizeInBits() == 64 &&
4088 !isa<SExtInst>(GEP->getOperand(i))) ||
4089 ++NumOfVarIndices > 1)
4090 return IndexSize; // 64
4091 }
4092 return (unsigned)32;
4093 };
4094
4095 // Trying to reduce IndexSize to 32 bits for vector 16.
4096 // By default the IndexSize is equal to pointer size.
4097 unsigned IndexSize = (ST->hasAVX512() && VF >= 16)
4098 ? getIndexSizeInBits(Ptr, DL)
4099 : DL.getPointerSizeInBits();
4100
4101 auto *IndexVTy = FixedVectorType::get(
4102 IntegerType::get(SrcVTy->getContext(), IndexSize), VF);
4103 std::pair<int, MVT> IdxsLT = TLI->getTypeLegalizationCost(DL, IndexVTy);
4104 std::pair<int, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, SrcVTy);
4105 int SplitFactor = std::max(IdxsLT.first, SrcLT.first);
4106 if (SplitFactor > 1) {
4107 // Handle splitting of vector of pointers
4108 auto *SplitSrcTy =
4109 FixedVectorType::get(SrcVTy->getScalarType(), VF / SplitFactor);
4110 return SplitFactor * getGSVectorCost(Opcode, SplitSrcTy, Ptr, Alignment,
4111 AddressSpace);
4112 }
4113
4114 // The gather / scatter cost is given by Intel architects. It is a rough
4115 // number since we are looking at one instruction in a time.
4116 const int GSOverhead = (Opcode == Instruction::Load)
4117 ? getGatherOverhead()
4118 : getScatterOverhead();
4119 return GSOverhead + VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
4120 MaybeAlign(Alignment), AddressSpace,
4121 TTI::TCK_RecipThroughput);
4122 }
4123
4124 /// Return the cost of full scalarization of gather / scatter operation.
4125 ///
4126 /// Opcode - Load or Store instruction.
4127 /// SrcVTy - The type of the data vector that should be gathered or scattered.
4128 /// VariableMask - The mask is non-constant at compile time.
4129 /// Alignment - Alignment for one element.
4130 /// AddressSpace - pointer[s] address space.
4131 ///
getGSScalarCost(unsigned Opcode,Type * SrcVTy,bool VariableMask,Align Alignment,unsigned AddressSpace)4132 int X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy,
4133 bool VariableMask, Align Alignment,
4134 unsigned AddressSpace) {
4135 unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements();
4136 APInt DemandedElts = APInt::getAllOnesValue(VF);
4137 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
4138
4139 int MaskUnpackCost = 0;
4140 if (VariableMask) {
4141 auto *MaskTy =
4142 FixedVectorType::get(Type::getInt1Ty(SrcVTy->getContext()), VF);
4143 MaskUnpackCost =
4144 getScalarizationOverhead(MaskTy, DemandedElts, false, true);
4145 int ScalarCompareCost = getCmpSelInstrCost(
4146 Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()), nullptr,
4147 CmpInst::BAD_ICMP_PREDICATE, CostKind);
4148 int BranchCost = getCFInstrCost(Instruction::Br, CostKind);
4149 MaskUnpackCost += VF * (BranchCost + ScalarCompareCost);
4150 }
4151
4152 // The cost of the scalar loads/stores.
4153 int MemoryOpCost = VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
4154 MaybeAlign(Alignment), AddressSpace,
4155 CostKind);
4156
4157 int InsertExtractCost = 0;
4158 if (Opcode == Instruction::Load)
4159 for (unsigned i = 0; i < VF; ++i)
4160 // Add the cost of inserting each scalar load into the vector
4161 InsertExtractCost +=
4162 getVectorInstrCost(Instruction::InsertElement, SrcVTy, i);
4163 else
4164 for (unsigned i = 0; i < VF; ++i)
4165 // Add the cost of extracting each element out of the data vector
4166 InsertExtractCost +=
4167 getVectorInstrCost(Instruction::ExtractElement, SrcVTy, i);
4168
4169 return MemoryOpCost + MaskUnpackCost + InsertExtractCost;
4170 }
4171
4172 /// Calculate the cost of Gather / Scatter operation
getGatherScatterOpCost(unsigned Opcode,Type * SrcVTy,const Value * Ptr,bool VariableMask,Align Alignment,TTI::TargetCostKind CostKind,const Instruction * I=nullptr)4173 int X86TTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *SrcVTy,
4174 const Value *Ptr, bool VariableMask,
4175 Align Alignment,
4176 TTI::TargetCostKind CostKind,
4177 const Instruction *I = nullptr) {
4178
4179 if (CostKind != TTI::TCK_RecipThroughput)
4180 return 1;
4181
4182 assert(SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter");
4183 unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements();
4184 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
4185 if (!PtrTy && Ptr->getType()->isVectorTy())
4186 PtrTy = dyn_cast<PointerType>(
4187 cast<VectorType>(Ptr->getType())->getElementType());
4188 assert(PtrTy && "Unexpected type for Ptr argument");
4189 unsigned AddressSpace = PtrTy->getAddressSpace();
4190
4191 bool Scalarize = false;
4192 if ((Opcode == Instruction::Load &&
4193 !isLegalMaskedGather(SrcVTy, Align(Alignment))) ||
4194 (Opcode == Instruction::Store &&
4195 !isLegalMaskedScatter(SrcVTy, Align(Alignment))))
4196 Scalarize = true;
4197 // Gather / Scatter for vector 2 is not profitable on KNL / SKX
4198 // Vector-4 of gather/scatter instruction does not exist on KNL.
4199 // We can extend it to 8 elements, but zeroing upper bits of
4200 // the mask vector will add more instructions. Right now we give the scalar
4201 // cost of vector-4 for KNL. TODO: Check, maybe the gather/scatter instruction
4202 // is better in the VariableMask case.
4203 if (ST->hasAVX512() && (VF == 2 || (VF == 4 && !ST->hasVLX())))
4204 Scalarize = true;
4205
4206 if (Scalarize)
4207 return getGSScalarCost(Opcode, SrcVTy, VariableMask, Alignment,
4208 AddressSpace);
4209
4210 return getGSVectorCost(Opcode, SrcVTy, Ptr, Alignment, AddressSpace);
4211 }
4212
isLSRCostLess(TargetTransformInfo::LSRCost & C1,TargetTransformInfo::LSRCost & C2)4213 bool X86TTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1,
4214 TargetTransformInfo::LSRCost &C2) {
4215 // X86 specific here are "instruction number 1st priority".
4216 return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost,
4217 C1.NumIVMuls, C1.NumBaseAdds,
4218 C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
4219 std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost,
4220 C2.NumIVMuls, C2.NumBaseAdds,
4221 C2.ScaleCost, C2.ImmCost, C2.SetupCost);
4222 }
4223
canMacroFuseCmp()4224 bool X86TTIImpl::canMacroFuseCmp() {
4225 return ST->hasMacroFusion() || ST->hasBranchFusion();
4226 }
4227
isLegalMaskedLoad(Type * DataTy,Align Alignment)4228 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) {
4229 if (!ST->hasAVX())
4230 return false;
4231
4232 // The backend can't handle a single element vector.
4233 if (isa<VectorType>(DataTy) &&
4234 cast<FixedVectorType>(DataTy)->getNumElements() == 1)
4235 return false;
4236 Type *ScalarTy = DataTy->getScalarType();
4237
4238 if (ScalarTy->isPointerTy())
4239 return true;
4240
4241 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
4242 return true;
4243
4244 if (!ScalarTy->isIntegerTy())
4245 return false;
4246
4247 unsigned IntWidth = ScalarTy->getIntegerBitWidth();
4248 return IntWidth == 32 || IntWidth == 64 ||
4249 ((IntWidth == 8 || IntWidth == 16) && ST->hasBWI());
4250 }
4251
isLegalMaskedStore(Type * DataType,Align Alignment)4252 bool X86TTIImpl::isLegalMaskedStore(Type *DataType, Align Alignment) {
4253 return isLegalMaskedLoad(DataType, Alignment);
4254 }
4255
isLegalNTLoad(Type * DataType,Align Alignment)4256 bool X86TTIImpl::isLegalNTLoad(Type *DataType, Align Alignment) {
4257 unsigned DataSize = DL.getTypeStoreSize(DataType);
4258 // The only supported nontemporal loads are for aligned vectors of 16 or 32
4259 // bytes. Note that 32-byte nontemporal vector loads are supported by AVX2
4260 // (the equivalent stores only require AVX).
4261 if (Alignment >= DataSize && (DataSize == 16 || DataSize == 32))
4262 return DataSize == 16 ? ST->hasSSE1() : ST->hasAVX2();
4263
4264 return false;
4265 }
4266
isLegalNTStore(Type * DataType,Align Alignment)4267 bool X86TTIImpl::isLegalNTStore(Type *DataType, Align Alignment) {
4268 unsigned DataSize = DL.getTypeStoreSize(DataType);
4269
4270 // SSE4A supports nontemporal stores of float and double at arbitrary
4271 // alignment.
4272 if (ST->hasSSE4A() && (DataType->isFloatTy() || DataType->isDoubleTy()))
4273 return true;
4274
4275 // Besides the SSE4A subtarget exception above, only aligned stores are
4276 // available nontemporaly on any other subtarget. And only stores with a size
4277 // of 4..32 bytes (powers of 2, only) are permitted.
4278 if (Alignment < DataSize || DataSize < 4 || DataSize > 32 ||
4279 !isPowerOf2_32(DataSize))
4280 return false;
4281
4282 // 32-byte vector nontemporal stores are supported by AVX (the equivalent
4283 // loads require AVX2).
4284 if (DataSize == 32)
4285 return ST->hasAVX();
4286 else if (DataSize == 16)
4287 return ST->hasSSE1();
4288 return true;
4289 }
4290
isLegalMaskedExpandLoad(Type * DataTy)4291 bool X86TTIImpl::isLegalMaskedExpandLoad(Type *DataTy) {
4292 if (!isa<VectorType>(DataTy))
4293 return false;
4294
4295 if (!ST->hasAVX512())
4296 return false;
4297
4298 // The backend can't handle a single element vector.
4299 if (cast<FixedVectorType>(DataTy)->getNumElements() == 1)
4300 return false;
4301
4302 Type *ScalarTy = cast<VectorType>(DataTy)->getElementType();
4303
4304 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
4305 return true;
4306
4307 if (!ScalarTy->isIntegerTy())
4308 return false;
4309
4310 unsigned IntWidth = ScalarTy->getIntegerBitWidth();
4311 return IntWidth == 32 || IntWidth == 64 ||
4312 ((IntWidth == 8 || IntWidth == 16) && ST->hasVBMI2());
4313 }
4314
isLegalMaskedCompressStore(Type * DataTy)4315 bool X86TTIImpl::isLegalMaskedCompressStore(Type *DataTy) {
4316 return isLegalMaskedExpandLoad(DataTy);
4317 }
4318
isLegalMaskedGather(Type * DataTy,Align Alignment)4319 bool X86TTIImpl::isLegalMaskedGather(Type *DataTy, Align Alignment) {
4320 // Some CPUs have better gather performance than others.
4321 // TODO: Remove the explicit ST->hasAVX512()?, That would mean we would only
4322 // enable gather with a -march.
4323 if (!(ST->hasAVX512() || (ST->hasFastGather() && ST->hasAVX2())))
4324 return false;
4325
4326 // This function is called now in two cases: from the Loop Vectorizer
4327 // and from the Scalarizer.
4328 // When the Loop Vectorizer asks about legality of the feature,
4329 // the vectorization factor is not calculated yet. The Loop Vectorizer
4330 // sends a scalar type and the decision is based on the width of the
4331 // scalar element.
4332 // Later on, the cost model will estimate usage this intrinsic based on
4333 // the vector type.
4334 // The Scalarizer asks again about legality. It sends a vector type.
4335 // In this case we can reject non-power-of-2 vectors.
4336 // We also reject single element vectors as the type legalizer can't
4337 // scalarize it.
4338 if (auto *DataVTy = dyn_cast<FixedVectorType>(DataTy)) {
4339 unsigned NumElts = DataVTy->getNumElements();
4340 if (NumElts == 1)
4341 return false;
4342 }
4343 Type *ScalarTy = DataTy->getScalarType();
4344 if (ScalarTy->isPointerTy())
4345 return true;
4346
4347 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
4348 return true;
4349
4350 if (!ScalarTy->isIntegerTy())
4351 return false;
4352
4353 unsigned IntWidth = ScalarTy->getIntegerBitWidth();
4354 return IntWidth == 32 || IntWidth == 64;
4355 }
4356
isLegalMaskedScatter(Type * DataType,Align Alignment)4357 bool X86TTIImpl::isLegalMaskedScatter(Type *DataType, Align Alignment) {
4358 // AVX2 doesn't support scatter
4359 if (!ST->hasAVX512())
4360 return false;
4361 return isLegalMaskedGather(DataType, Alignment);
4362 }
4363
hasDivRemOp(Type * DataType,bool IsSigned)4364 bool X86TTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) {
4365 EVT VT = TLI->getValueType(DL, DataType);
4366 return TLI->isOperationLegal(IsSigned ? ISD::SDIVREM : ISD::UDIVREM, VT);
4367 }
4368
isFCmpOrdCheaperThanFCmpZero(Type * Ty)4369 bool X86TTIImpl::isFCmpOrdCheaperThanFCmpZero(Type *Ty) {
4370 return false;
4371 }
4372
areInlineCompatible(const Function * Caller,const Function * Callee) const4373 bool X86TTIImpl::areInlineCompatible(const Function *Caller,
4374 const Function *Callee) const {
4375 const TargetMachine &TM = getTLI()->getTargetMachine();
4376
4377 // Work this as a subsetting of subtarget features.
4378 const FeatureBitset &CallerBits =
4379 TM.getSubtargetImpl(*Caller)->getFeatureBits();
4380 const FeatureBitset &CalleeBits =
4381 TM.getSubtargetImpl(*Callee)->getFeatureBits();
4382
4383 FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList;
4384 FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList;
4385 return (RealCallerBits & RealCalleeBits) == RealCalleeBits;
4386 }
4387
areFunctionArgsABICompatible(const Function * Caller,const Function * Callee,SmallPtrSetImpl<Argument * > & Args) const4388 bool X86TTIImpl::areFunctionArgsABICompatible(
4389 const Function *Caller, const Function *Callee,
4390 SmallPtrSetImpl<Argument *> &Args) const {
4391 if (!BaseT::areFunctionArgsABICompatible(Caller, Callee, Args))
4392 return false;
4393
4394 // If we get here, we know the target features match. If one function
4395 // considers 512-bit vectors legal and the other does not, consider them
4396 // incompatible.
4397 const TargetMachine &TM = getTLI()->getTargetMachine();
4398
4399 if (TM.getSubtarget<X86Subtarget>(*Caller).useAVX512Regs() ==
4400 TM.getSubtarget<X86Subtarget>(*Callee).useAVX512Regs())
4401 return true;
4402
4403 // Consider the arguments compatible if they aren't vectors or aggregates.
4404 // FIXME: Look at the size of vectors.
4405 // FIXME: Look at the element types of aggregates to see if there are vectors.
4406 // FIXME: The API of this function seems intended to allow arguments
4407 // to be removed from the set, but the caller doesn't check if the set
4408 // becomes empty so that may not work in practice.
4409 return llvm::none_of(Args, [](Argument *A) {
4410 auto *EltTy = cast<PointerType>(A->getType())->getElementType();
4411 return EltTy->isVectorTy() || EltTy->isAggregateType();
4412 });
4413 }
4414
4415 X86TTIImpl::TTI::MemCmpExpansionOptions
enableMemCmpExpansion(bool OptSize,bool IsZeroCmp) const4416 X86TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
4417 TTI::MemCmpExpansionOptions Options;
4418 Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
4419 Options.NumLoadsPerBlock = 2;
4420 // All GPR and vector loads can be unaligned.
4421 Options.AllowOverlappingLoads = true;
4422 if (IsZeroCmp) {
4423 // Only enable vector loads for equality comparison. Right now the vector
4424 // version is not as fast for three way compare (see #33329).
4425 const unsigned PreferredWidth = ST->getPreferVectorWidth();
4426 if (PreferredWidth >= 512 && ST->hasAVX512()) Options.LoadSizes.push_back(64);
4427 if (PreferredWidth >= 256 && ST->hasAVX()) Options.LoadSizes.push_back(32);
4428 if (PreferredWidth >= 128 && ST->hasSSE2()) Options.LoadSizes.push_back(16);
4429 }
4430 if (ST->is64Bit()) {
4431 Options.LoadSizes.push_back(8);
4432 }
4433 Options.LoadSizes.push_back(4);
4434 Options.LoadSizes.push_back(2);
4435 Options.LoadSizes.push_back(1);
4436 return Options;
4437 }
4438
enableInterleavedAccessVectorization()4439 bool X86TTIImpl::enableInterleavedAccessVectorization() {
4440 // TODO: We expect this to be beneficial regardless of arch,
4441 // but there are currently some unexplained performance artifacts on Atom.
4442 // As a temporary solution, disable on Atom.
4443 return !(ST->isAtom());
4444 }
4445
4446 // Get estimation for interleaved load/store operations for AVX2.
4447 // \p Factor is the interleaved-access factor (stride) - number of
4448 // (interleaved) elements in the group.
4449 // \p Indices contains the indices for a strided load: when the
4450 // interleaved load has gaps they indicate which elements are used.
4451 // If Indices is empty (or if the number of indices is equal to the size
4452 // of the interleaved-access as given in \p Factor) the access has no gaps.
4453 //
4454 // As opposed to AVX-512, AVX2 does not have generic shuffles that allow
4455 // computing the cost using a generic formula as a function of generic
4456 // shuffles. We therefore use a lookup table instead, filled according to
4457 // the instruction sequences that codegen currently generates.
getInterleavedMemoryOpCostAVX2(unsigned Opcode,FixedVectorType * VecTy,unsigned Factor,ArrayRef<unsigned> Indices,Align Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind,bool UseMaskForCond,bool UseMaskForGaps)4458 int X86TTIImpl::getInterleavedMemoryOpCostAVX2(
4459 unsigned Opcode, FixedVectorType *VecTy, unsigned Factor,
4460 ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace,
4461 TTI::TargetCostKind CostKind, bool UseMaskForCond, bool UseMaskForGaps) {
4462
4463 if (UseMaskForCond || UseMaskForGaps)
4464 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
4465 Alignment, AddressSpace, CostKind,
4466 UseMaskForCond, UseMaskForGaps);
4467
4468 // We currently Support only fully-interleaved groups, with no gaps.
4469 // TODO: Support also strided loads (interleaved-groups with gaps).
4470 if (Indices.size() && Indices.size() != Factor)
4471 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
4472 Alignment, AddressSpace,
4473 CostKind);
4474
4475 // VecTy for interleave memop is <VF*Factor x Elt>.
4476 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
4477 // VecTy = <12 x i32>.
4478 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
4479
4480 // This function can be called with VecTy=<6xi128>, Factor=3, in which case
4481 // the VF=2, while v2i128 is an unsupported MVT vector type
4482 // (see MachineValueType.h::getVectorVT()).
4483 if (!LegalVT.isVector())
4484 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
4485 Alignment, AddressSpace,
4486 CostKind);
4487
4488 unsigned VF = VecTy->getNumElements() / Factor;
4489 Type *ScalarTy = VecTy->getElementType();
4490
4491 // Calculate the number of memory operations (NumOfMemOps), required
4492 // for load/store the VecTy.
4493 unsigned VecTySize = DL.getTypeStoreSize(VecTy);
4494 unsigned LegalVTSize = LegalVT.getStoreSize();
4495 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize;
4496
4497 // Get the cost of one memory operation.
4498 auto *SingleMemOpTy = FixedVectorType::get(VecTy->getElementType(),
4499 LegalVT.getVectorNumElements());
4500 unsigned MemOpCost = getMemoryOpCost(Opcode, SingleMemOpTy,
4501 MaybeAlign(Alignment), AddressSpace,
4502 CostKind);
4503
4504 auto *VT = FixedVectorType::get(ScalarTy, VF);
4505 EVT ETy = TLI->getValueType(DL, VT);
4506 if (!ETy.isSimple())
4507 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
4508 Alignment, AddressSpace,
4509 CostKind);
4510
4511 // TODO: Complete for other data-types and strides.
4512 // Each combination of Stride, ElementTy and VF results in a different
4513 // sequence; The cost tables are therefore accessed with:
4514 // Factor (stride) and VectorType=VFxElemType.
4515 // The Cost accounts only for the shuffle sequence;
4516 // The cost of the loads/stores is accounted for separately.
4517 //
4518 static const CostTblEntry AVX2InterleavedLoadTbl[] = {
4519 { 2, MVT::v4i64, 6 }, //(load 8i64 and) deinterleave into 2 x 4i64
4520 { 2, MVT::v4f64, 6 }, //(load 8f64 and) deinterleave into 2 x 4f64
4521
4522 { 3, MVT::v2i8, 10 }, //(load 6i8 and) deinterleave into 3 x 2i8
4523 { 3, MVT::v4i8, 4 }, //(load 12i8 and) deinterleave into 3 x 4i8
4524 { 3, MVT::v8i8, 9 }, //(load 24i8 and) deinterleave into 3 x 8i8
4525 { 3, MVT::v16i8, 11}, //(load 48i8 and) deinterleave into 3 x 16i8
4526 { 3, MVT::v32i8, 13}, //(load 96i8 and) deinterleave into 3 x 32i8
4527 { 3, MVT::v8f32, 17 }, //(load 24f32 and)deinterleave into 3 x 8f32
4528
4529 { 4, MVT::v2i8, 12 }, //(load 8i8 and) deinterleave into 4 x 2i8
4530 { 4, MVT::v4i8, 4 }, //(load 16i8 and) deinterleave into 4 x 4i8
4531 { 4, MVT::v8i8, 20 }, //(load 32i8 and) deinterleave into 4 x 8i8
4532 { 4, MVT::v16i8, 39 }, //(load 64i8 and) deinterleave into 4 x 16i8
4533 { 4, MVT::v32i8, 80 }, //(load 128i8 and) deinterleave into 4 x 32i8
4534
4535 { 8, MVT::v8f32, 40 } //(load 64f32 and)deinterleave into 8 x 8f32
4536 };
4537
4538 static const CostTblEntry AVX2InterleavedStoreTbl[] = {
4539 { 2, MVT::v4i64, 6 }, //interleave into 2 x 4i64 into 8i64 (and store)
4540 { 2, MVT::v4f64, 6 }, //interleave into 2 x 4f64 into 8f64 (and store)
4541
4542 { 3, MVT::v2i8, 7 }, //interleave 3 x 2i8 into 6i8 (and store)
4543 { 3, MVT::v4i8, 8 }, //interleave 3 x 4i8 into 12i8 (and store)
4544 { 3, MVT::v8i8, 11 }, //interleave 3 x 8i8 into 24i8 (and store)
4545 { 3, MVT::v16i8, 11 }, //interleave 3 x 16i8 into 48i8 (and store)
4546 { 3, MVT::v32i8, 13 }, //interleave 3 x 32i8 into 96i8 (and store)
4547
4548 { 4, MVT::v2i8, 12 }, //interleave 4 x 2i8 into 8i8 (and store)
4549 { 4, MVT::v4i8, 9 }, //interleave 4 x 4i8 into 16i8 (and store)
4550 { 4, MVT::v8i8, 10 }, //interleave 4 x 8i8 into 32i8 (and store)
4551 { 4, MVT::v16i8, 10 }, //interleave 4 x 16i8 into 64i8 (and store)
4552 { 4, MVT::v32i8, 12 } //interleave 4 x 32i8 into 128i8 (and store)
4553 };
4554
4555 if (Opcode == Instruction::Load) {
4556 if (const auto *Entry =
4557 CostTableLookup(AVX2InterleavedLoadTbl, Factor, ETy.getSimpleVT()))
4558 return NumOfMemOps * MemOpCost + Entry->Cost;
4559 } else {
4560 assert(Opcode == Instruction::Store &&
4561 "Expected Store Instruction at this point");
4562 if (const auto *Entry =
4563 CostTableLookup(AVX2InterleavedStoreTbl, Factor, ETy.getSimpleVT()))
4564 return NumOfMemOps * MemOpCost + Entry->Cost;
4565 }
4566
4567 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
4568 Alignment, AddressSpace, CostKind);
4569 }
4570
4571 // Get estimation for interleaved load/store operations and strided load.
4572 // \p Indices contains indices for strided load.
4573 // \p Factor - the factor of interleaving.
4574 // AVX-512 provides 3-src shuffles that significantly reduces the cost.
getInterleavedMemoryOpCostAVX512(unsigned Opcode,FixedVectorType * VecTy,unsigned Factor,ArrayRef<unsigned> Indices,Align Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind,bool UseMaskForCond,bool UseMaskForGaps)4575 int X86TTIImpl::getInterleavedMemoryOpCostAVX512(
4576 unsigned Opcode, FixedVectorType *VecTy, unsigned Factor,
4577 ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace,
4578 TTI::TargetCostKind CostKind, bool UseMaskForCond, bool UseMaskForGaps) {
4579
4580 if (UseMaskForCond || UseMaskForGaps)
4581 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
4582 Alignment, AddressSpace, CostKind,
4583 UseMaskForCond, UseMaskForGaps);
4584
4585 // VecTy for interleave memop is <VF*Factor x Elt>.
4586 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
4587 // VecTy = <12 x i32>.
4588
4589 // Calculate the number of memory operations (NumOfMemOps), required
4590 // for load/store the VecTy.
4591 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
4592 unsigned VecTySize = DL.getTypeStoreSize(VecTy);
4593 unsigned LegalVTSize = LegalVT.getStoreSize();
4594 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize;
4595
4596 // Get the cost of one memory operation.
4597 auto *SingleMemOpTy = FixedVectorType::get(VecTy->getElementType(),
4598 LegalVT.getVectorNumElements());
4599 unsigned MemOpCost = getMemoryOpCost(Opcode, SingleMemOpTy,
4600 MaybeAlign(Alignment), AddressSpace,
4601 CostKind);
4602
4603 unsigned VF = VecTy->getNumElements() / Factor;
4604 MVT VT = MVT::getVectorVT(MVT::getVT(VecTy->getScalarType()), VF);
4605
4606 if (Opcode == Instruction::Load) {
4607 // The tables (AVX512InterleavedLoadTbl and AVX512InterleavedStoreTbl)
4608 // contain the cost of the optimized shuffle sequence that the
4609 // X86InterleavedAccess pass will generate.
4610 // The cost of loads and stores are computed separately from the table.
4611
4612 // X86InterleavedAccess support only the following interleaved-access group.
4613 static const CostTblEntry AVX512InterleavedLoadTbl[] = {
4614 {3, MVT::v16i8, 12}, //(load 48i8 and) deinterleave into 3 x 16i8
4615 {3, MVT::v32i8, 14}, //(load 96i8 and) deinterleave into 3 x 32i8
4616 {3, MVT::v64i8, 22}, //(load 96i8 and) deinterleave into 3 x 32i8
4617 };
4618
4619 if (const auto *Entry =
4620 CostTableLookup(AVX512InterleavedLoadTbl, Factor, VT))
4621 return NumOfMemOps * MemOpCost + Entry->Cost;
4622 //If an entry does not exist, fallback to the default implementation.
4623
4624 // Kind of shuffle depends on number of loaded values.
4625 // If we load the entire data in one register, we can use a 1-src shuffle.
4626 // Otherwise, we'll merge 2 sources in each operation.
4627 TTI::ShuffleKind ShuffleKind =
4628 (NumOfMemOps > 1) ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc;
4629
4630 unsigned ShuffleCost =
4631 getShuffleCost(ShuffleKind, SingleMemOpTy, 0, nullptr);
4632
4633 unsigned NumOfLoadsInInterleaveGrp =
4634 Indices.size() ? Indices.size() : Factor;
4635 auto *ResultTy = FixedVectorType::get(VecTy->getElementType(),
4636 VecTy->getNumElements() / Factor);
4637 unsigned NumOfResults =
4638 getTLI()->getTypeLegalizationCost(DL, ResultTy).first *
4639 NumOfLoadsInInterleaveGrp;
4640
4641 // About a half of the loads may be folded in shuffles when we have only
4642 // one result. If we have more than one result, we do not fold loads at all.
4643 unsigned NumOfUnfoldedLoads =
4644 NumOfResults > 1 ? NumOfMemOps : NumOfMemOps / 2;
4645
4646 // Get a number of shuffle operations per result.
4647 unsigned NumOfShufflesPerResult =
4648 std::max((unsigned)1, (unsigned)(NumOfMemOps - 1));
4649
4650 // The SK_MergeTwoSrc shuffle clobbers one of src operands.
4651 // When we have more than one destination, we need additional instructions
4652 // to keep sources.
4653 unsigned NumOfMoves = 0;
4654 if (NumOfResults > 1 && ShuffleKind == TTI::SK_PermuteTwoSrc)
4655 NumOfMoves = NumOfResults * NumOfShufflesPerResult / 2;
4656
4657 int Cost = NumOfResults * NumOfShufflesPerResult * ShuffleCost +
4658 NumOfUnfoldedLoads * MemOpCost + NumOfMoves;
4659
4660 return Cost;
4661 }
4662
4663 // Store.
4664 assert(Opcode == Instruction::Store &&
4665 "Expected Store Instruction at this point");
4666 // X86InterleavedAccess support only the following interleaved-access group.
4667 static const CostTblEntry AVX512InterleavedStoreTbl[] = {
4668 {3, MVT::v16i8, 12}, // interleave 3 x 16i8 into 48i8 (and store)
4669 {3, MVT::v32i8, 14}, // interleave 3 x 32i8 into 96i8 (and store)
4670 {3, MVT::v64i8, 26}, // interleave 3 x 64i8 into 96i8 (and store)
4671
4672 {4, MVT::v8i8, 10}, // interleave 4 x 8i8 into 32i8 (and store)
4673 {4, MVT::v16i8, 11}, // interleave 4 x 16i8 into 64i8 (and store)
4674 {4, MVT::v32i8, 14}, // interleave 4 x 32i8 into 128i8 (and store)
4675 {4, MVT::v64i8, 24} // interleave 4 x 32i8 into 256i8 (and store)
4676 };
4677
4678 if (const auto *Entry =
4679 CostTableLookup(AVX512InterleavedStoreTbl, Factor, VT))
4680 return NumOfMemOps * MemOpCost + Entry->Cost;
4681 //If an entry does not exist, fallback to the default implementation.
4682
4683 // There is no strided stores meanwhile. And store can't be folded in
4684 // shuffle.
4685 unsigned NumOfSources = Factor; // The number of values to be merged.
4686 unsigned ShuffleCost =
4687 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleMemOpTy, 0, nullptr);
4688 unsigned NumOfShufflesPerStore = NumOfSources - 1;
4689
4690 // The SK_MergeTwoSrc shuffle clobbers one of src operands.
4691 // We need additional instructions to keep sources.
4692 unsigned NumOfMoves = NumOfMemOps * NumOfShufflesPerStore / 2;
4693 int Cost = NumOfMemOps * (MemOpCost + NumOfShufflesPerStore * ShuffleCost) +
4694 NumOfMoves;
4695 return Cost;
4696 }
4697
getInterleavedMemoryOpCost(unsigned Opcode,Type * VecTy,unsigned Factor,ArrayRef<unsigned> Indices,Align Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind,bool UseMaskForCond,bool UseMaskForGaps)4698 int X86TTIImpl::getInterleavedMemoryOpCost(
4699 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
4700 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
4701 bool UseMaskForCond, bool UseMaskForGaps) {
4702 auto isSupportedOnAVX512 = [](Type *VecTy, bool HasBW) {
4703 Type *EltTy = cast<VectorType>(VecTy)->getElementType();
4704 if (EltTy->isFloatTy() || EltTy->isDoubleTy() || EltTy->isIntegerTy(64) ||
4705 EltTy->isIntegerTy(32) || EltTy->isPointerTy())
4706 return true;
4707 if (EltTy->isIntegerTy(16) || EltTy->isIntegerTy(8))
4708 return HasBW;
4709 return false;
4710 };
4711 if (ST->hasAVX512() && isSupportedOnAVX512(VecTy, ST->hasBWI()))
4712 return getInterleavedMemoryOpCostAVX512(
4713 Opcode, cast<FixedVectorType>(VecTy), Factor, Indices, Alignment,
4714 AddressSpace, CostKind, UseMaskForCond, UseMaskForGaps);
4715 if (ST->hasAVX2())
4716 return getInterleavedMemoryOpCostAVX2(
4717 Opcode, cast<FixedVectorType>(VecTy), Factor, Indices, Alignment,
4718 AddressSpace, CostKind, UseMaskForCond, UseMaskForGaps);
4719
4720 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
4721 Alignment, AddressSpace, CostKind,
4722 UseMaskForCond, UseMaskForGaps);
4723 }
4724